From 80a8240fb9203fd04ecc6233909974de6e0b2ea5 Mon Sep 17 00:00:00 2001 From: Vikash Singh <3116482+vi3k6i5@users.noreply.github.com> Date: Mon, 5 Apr 2021 16:49:57 +0530 Subject: [PATCH 1/3] feat: updated googleapis proto changes for request tags --- .../proto/spanner_database_admin.proto | 2 +- .../services/database_admin/async_client.py | 44 ++- .../database_admin/transports/base.py | 30 +- .../database_admin/transports/grpc.py | 103 +++---- .../database_admin/transports/grpc_asyncio.py | 111 +++---- .../types/__init__.py | 84 ++--- .../types/spanner_database_admin.py | 2 +- .../proto/spanner_instance_admin.proto | 4 +- .../services/instance_admin/async_client.py | 38 ++- .../instance_admin/transports/base.py | 24 +- .../instance_admin/transports/grpc.py | 103 +++---- .../instance_admin/transports/grpc_asyncio.py | 111 +++---- .../types/__init__.py | 32 +- google/cloud/spanner_v1/proto/keys.proto | 2 +- .../cloud/spanner_v1/proto/query_plan.proto | 2 +- .../cloud/spanner_v1/proto/result_set.proto | 2 +- google/cloud/spanner_v1/proto/spanner.proto | 117 ++++++- .../cloud/spanner_v1/proto/transaction.proto | 281 ++++++++++++++++- google/cloud/spanner_v1/proto/type.proto | 2 +- .../services/spanner/async_client.py | 45 ++- .../services/spanner/transports/base.py | 31 +- .../services/spanner/transports/grpc.py | 101 +++--- .../spanner/transports/grpc_asyncio.py | 109 +++---- google/cloud/spanner_v1/types/__init__.py | 78 ++--- google/cloud/spanner_v1/types/spanner.py | 133 +++++++- scripts/fixup_spanner_v1_keywords.py | 14 +- .../spanner_admin_database_v1/__init__.py | 15 + .../test_database_admin.py | 290 +++++++++++++++++- .../spanner_admin_instance_v1/__init__.py | 15 + .../test_instance_admin.py | 176 ++++++++++- tests/unit/gapic/spanner_v1/__init__.py | 15 + tests/unit/gapic/spanner_v1/test_spanner.py | 256 +++++++++++++++- 32 files changed, 1804 insertions(+), 568 deletions(-) diff --git a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto index ac771bc061..f09cf073b2 100644 --- a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto +++ b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto @@ -736,7 +736,7 @@ message RestoreDatabaseRequest { // to. If this field is not specified, the restored database will use the same // encryption configuration as the backup by default, namely // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] - // = `USE_CONFIG_DEFAULT_OR_DATABASE_ENCRYPTION`. + // = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. RestoreDatabaseEncryptionConfig encryption_config = 4 [(google.api.field_behavior) = OPTIONAL]; } diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py index 31b97af061..e40e0b1960 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py @@ -103,8 +103,36 @@ class DatabaseAdminAsyncClient: DatabaseAdminClient.parse_common_location_path ) - from_service_account_info = DatabaseAdminClient.from_service_account_info - from_service_account_file = DatabaseAdminClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatabaseAdminAsyncClient: The constructed client. + """ + return DatabaseAdminClient.from_service_account_info.__func__(DatabaseAdminAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatabaseAdminAsyncClient: The constructed client. + """ + return DatabaseAdminClient.from_service_account_file.__func__(DatabaseAdminAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property @@ -236,6 +264,7 @@ async def list_databases( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -431,6 +460,7 @@ async def get_database( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -559,6 +589,7 @@ async def update_database_ddl( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -642,6 +673,7 @@ async def drop_database( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -726,6 +758,7 @@ async def get_database_ddl( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -1007,6 +1040,7 @@ async def get_iam_policy( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -1302,6 +1336,7 @@ async def get_backup( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -1402,6 +1437,7 @@ async def update_backup( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -1481,6 +1517,7 @@ async def delete_backup( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -1566,6 +1603,7 @@ async def list_backups( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -1795,6 +1833,7 @@ async def list_database_operations( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -1897,6 +1936,7 @@ async def list_backup_operations( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py index 779f02e840..0e9a7e50c7 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py @@ -79,10 +79,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -90,6 +90,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -99,20 +102,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -125,6 +125,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -141,6 +142,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -154,6 +156,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -167,6 +170,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -180,6 +184,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -196,6 +201,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -217,6 +223,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -230,6 +237,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -243,6 +251,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -256,6 +265,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -272,6 +282,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -285,6 +296,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py index 665ed4fc15..b695a5a113 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py @@ -118,7 +118,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -126,70 +129,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -197,18 +180,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -222,7 +195,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py index 25229d58cd..cac4b1e2b6 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py @@ -73,7 +73,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -151,10 +151,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -163,7 +163,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -171,70 +174,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -242,18 +225,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/spanner_admin_database_v1/types/__init__.py b/google/cloud/spanner_admin_database_v1/types/__init__.py index 9749add377..a1316e789a 100644 --- a/google/cloud/spanner_admin_database_v1/types/__init__.py +++ b/google/cloud/spanner_admin_database_v1/types/__init__.py @@ -15,80 +15,80 @@ # limitations under the License. # -from .common import ( - OperationProgress, - EncryptionConfig, - EncryptionInfo, -) from .backup import ( Backup, - CreateBackupRequest, + BackupInfo, + CreateBackupEncryptionConfig, CreateBackupMetadata, - UpdateBackupRequest, - GetBackupRequest, + CreateBackupRequest, DeleteBackupRequest, - ListBackupsRequest, - ListBackupsResponse, + GetBackupRequest, ListBackupOperationsRequest, ListBackupOperationsResponse, - BackupInfo, - CreateBackupEncryptionConfig, + ListBackupsRequest, + ListBackupsResponse, + UpdateBackupRequest, +) +from .common import ( + EncryptionConfig, + EncryptionInfo, + OperationProgress, ) from .spanner_database_admin import ( - RestoreInfo, - Database, - ListDatabasesRequest, - ListDatabasesResponse, - CreateDatabaseRequest, CreateDatabaseMetadata, - GetDatabaseRequest, - UpdateDatabaseDdlRequest, - UpdateDatabaseDdlMetadata, + CreateDatabaseRequest, + Database, DropDatabaseRequest, GetDatabaseDdlRequest, GetDatabaseDdlResponse, + GetDatabaseRequest, ListDatabaseOperationsRequest, ListDatabaseOperationsResponse, - RestoreDatabaseRequest, + ListDatabasesRequest, + ListDatabasesResponse, + OptimizeRestoredDatabaseMetadata, RestoreDatabaseEncryptionConfig, RestoreDatabaseMetadata, - OptimizeRestoredDatabaseMetadata, + RestoreDatabaseRequest, + RestoreInfo, + UpdateDatabaseDdlMetadata, + UpdateDatabaseDdlRequest, RestoreSourceType, ) __all__ = ( - "OperationProgress", - "EncryptionConfig", - "EncryptionInfo", "Backup", - "CreateBackupRequest", + "BackupInfo", + "CreateBackupEncryptionConfig", "CreateBackupMetadata", - "UpdateBackupRequest", - "GetBackupRequest", + "CreateBackupRequest", "DeleteBackupRequest", - "ListBackupsRequest", - "ListBackupsResponse", + "GetBackupRequest", "ListBackupOperationsRequest", "ListBackupOperationsResponse", - "BackupInfo", - "CreateBackupEncryptionConfig", - "RestoreInfo", - "Database", - "ListDatabasesRequest", - "ListDatabasesResponse", - "CreateDatabaseRequest", + "ListBackupsRequest", + "ListBackupsResponse", + "UpdateBackupRequest", + "EncryptionConfig", + "EncryptionInfo", + "OperationProgress", "CreateDatabaseMetadata", - "GetDatabaseRequest", - "UpdateDatabaseDdlRequest", - "UpdateDatabaseDdlMetadata", + "CreateDatabaseRequest", + "Database", "DropDatabaseRequest", "GetDatabaseDdlRequest", "GetDatabaseDdlResponse", + "GetDatabaseRequest", "ListDatabaseOperationsRequest", "ListDatabaseOperationsResponse", - "RestoreDatabaseRequest", + "ListDatabasesRequest", + "ListDatabasesResponse", + "OptimizeRestoredDatabaseMetadata", "RestoreDatabaseEncryptionConfig", "RestoreDatabaseMetadata", - "OptimizeRestoredDatabaseMetadata", + "RestoreDatabaseRequest", + "RestoreInfo", + "UpdateDatabaseDdlMetadata", + "UpdateDatabaseDdlRequest", "RestoreSourceType", ) diff --git a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py index c7309dbbde..278d5e6b95 100644 --- a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py +++ b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py @@ -540,7 +540,7 @@ class RestoreDatabaseRequest(proto.Message): not specified, the restored database will use the same encryption configuration as the backup by default, namely [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] - = ``USE_CONFIG_DEFAULT_OR_DATABASE_ENCRYPTION``. + = ``USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION``. """ parent = proto.Field(proto.STRING, number=1) diff --git a/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto b/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto index 54767bf263..69043c1b37 100644 --- a/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto +++ b/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -373,7 +373,7 @@ message Instance { // either omitted or set to `CREATING`. For // [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance], the state must be // either omitted or set to `READY`. - State state = 6; + State state = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; // Cloud Labels are a flexible and lightweight mechanism for organizing cloud // resources into groups that reflect a customer's organizational needs and diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py index a83b1a2c1d..f2a9c36243 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py @@ -106,8 +106,36 @@ class InstanceAdminAsyncClient: InstanceAdminClient.parse_common_location_path ) - from_service_account_info = InstanceAdminClient.from_service_account_info - from_service_account_file = InstanceAdminClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InstanceAdminAsyncClient: The constructed client. + """ + return InstanceAdminClient.from_service_account_info.__func__(InstanceAdminAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InstanceAdminAsyncClient: The constructed client. + """ + return InstanceAdminClient.from_service_account_file.__func__(InstanceAdminAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property @@ -240,6 +268,7 @@ async def list_instance_configs( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -331,6 +360,7 @@ async def get_instance_config( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -416,6 +446,7 @@ async def list_instances( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -504,6 +535,7 @@ async def get_instance( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -883,6 +915,7 @@ async def delete_instance( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -1155,6 +1188,7 @@ async def get_iam_policy( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/base.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/base.py index fa07b95eeb..e3b368c82a 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/base.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/base.py @@ -77,10 +77,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -88,6 +88,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -97,20 +100,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -123,6 +123,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -136,6 +137,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -149,6 +151,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -162,6 +165,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -181,6 +185,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -197,6 +202,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py index e896249468..a3e3f39762 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py @@ -131,7 +131,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -139,70 +142,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -210,18 +193,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -235,7 +208,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py index ca7f009071..e4a860874e 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py @@ -86,7 +86,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -164,10 +164,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -176,7 +176,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -184,70 +187,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -255,18 +238,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/spanner_admin_instance_v1/types/__init__.py b/google/cloud/spanner_admin_instance_v1/types/__init__.py index 37b771feed..f5ebcd7d5c 100644 --- a/google/cloud/spanner_admin_instance_v1/types/__init__.py +++ b/google/cloud/spanner_admin_instance_v1/types/__init__.py @@ -16,35 +16,35 @@ # from .spanner_instance_admin import ( - ReplicaInfo, - InstanceConfig, + CreateInstanceMetadata, + CreateInstanceRequest, + DeleteInstanceRequest, + GetInstanceConfigRequest, + GetInstanceRequest, Instance, + InstanceConfig, ListInstanceConfigsRequest, ListInstanceConfigsResponse, - GetInstanceConfigRequest, - GetInstanceRequest, - CreateInstanceRequest, ListInstancesRequest, ListInstancesResponse, - UpdateInstanceRequest, - DeleteInstanceRequest, - CreateInstanceMetadata, + ReplicaInfo, UpdateInstanceMetadata, + UpdateInstanceRequest, ) __all__ = ( - "ReplicaInfo", - "InstanceConfig", + "CreateInstanceMetadata", + "CreateInstanceRequest", + "DeleteInstanceRequest", + "GetInstanceConfigRequest", + "GetInstanceRequest", "Instance", + "InstanceConfig", "ListInstanceConfigsRequest", "ListInstanceConfigsResponse", - "GetInstanceConfigRequest", - "GetInstanceRequest", - "CreateInstanceRequest", "ListInstancesRequest", "ListInstancesResponse", - "UpdateInstanceRequest", - "DeleteInstanceRequest", - "CreateInstanceMetadata", + "ReplicaInfo", "UpdateInstanceMetadata", + "UpdateInstanceRequest", ) diff --git a/google/cloud/spanner_v1/proto/keys.proto b/google/cloud/spanner_v1/proto/keys.proto index 267df0d102..d8ce0d6774 100644 --- a/google/cloud/spanner_v1/proto/keys.proto +++ b/google/cloud/spanner_v1/proto/keys.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/google/cloud/spanner_v1/proto/query_plan.proto b/google/cloud/spanner_v1/proto/query_plan.proto index 974a70e6d1..35f8fe21c5 100644 --- a/google/cloud/spanner_v1/proto/query_plan.proto +++ b/google/cloud/spanner_v1/proto/query_plan.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/google/cloud/spanner_v1/proto/result_set.proto b/google/cloud/spanner_v1/proto/result_set.proto index a87d741fdc..d6bb9a2831 100644 --- a/google/cloud/spanner_v1/proto/result_set.proto +++ b/google/cloud/spanner_v1/proto/result_set.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/google/cloud/spanner_v1/proto/spanner.proto b/google/cloud/spanner_v1/proto/spanner.proto index 8f579e333d..c436227221 100644 --- a/google/cloud/spanner_v1/proto/spanner.proto +++ b/google/cloud/spanner_v1/proto/spanner.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -425,6 +425,63 @@ message DeleteSessionRequest { ]; } +// Common request options for various APIs. +message RequestOptions { + // The relative priority for requests. Note that priority is not applicable + // for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. + // + // The priority acts as a hint to the Cloud Spanner scheduler and does not + // guarantee priority or order of execution. For example: + // + // * Some parts of a write operation always execute at `PRIORITY_HIGH`, + // regardless of the specified priority. This may cause you to see an + // increase in high priority workload even when executing a low priority + // request. This can also potentially cause a priority inversion where a + // lower priority request will be fulfilled ahead of a higher priority + // request. + // * If a transaction contains multiple operations with different priorities, + // Cloud Spanner does not guarantee to process the higher priority + // operations first. There may be other constraints to satisfy, such as + // order of operations. + enum Priority { + // `PRIORITY_UNSPECIFIED` is equivalent to `PRIORITY_HIGH`. + PRIORITY_UNSPECIFIED = 0; + + // This specifies that the request is low priority. + PRIORITY_LOW = 1; + + // This specifies that the request is medium priority. + PRIORITY_MEDIUM = 2; + + // This specifies that the request is high priority. + PRIORITY_HIGH = 3; + } + + // Priority for the request. + Priority priority = 1; + + // A per-request tag which can be applied to queries or reads, used for + // statistics collection. + // Both request_tag and transaction_tag can be specified for a read or query + // that belongs to a transaction. + // This field is ignored for requests where it's not applicable (e.g. + // CommitRequest). + // `request_tag` must be a valid identifier of the form: + // `[a-zA-Z][a-zA-Z0-9_\-]` between 2 and 64 characters in length + string request_tag = 2; + + // A tag used for statistics collection about this transaction. + // Both request_tag and transaction_tag can be specified for a read or query + // that belongs to a transaction. + // The value of transaction_tag should be the same for all requests belonging + // to the same transaction. + // If this request doesn’t belong to any transaction, transaction_tag will be + // ignored. + // `transaction_tag` must be a valid identifier of the format: + // `[a-zA-Z][a-zA-Z0-9_\-]{0,49}` + string transaction_tag = 3; +} + // The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. message ExecuteSqlRequest { @@ -435,21 +492,50 @@ message ExecuteSqlRequest { // This parameter allows individual queries to pick different query // optimizer versions. // - // Specifying "latest" as a value instructs Cloud Spanner to use the + // Specifying `latest` as a value instructs Cloud Spanner to use the // latest supported query optimizer version. If not specified, Cloud Spanner - // uses optimizer version set at the database level options. Any other + // uses the optimizer version set at the database level options. Any other // positive integer (from the list of supported optimizer versions) // overrides the default optimizer version for query execution. + // // The list of supported optimizer versions can be queried from - // SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. Executing a SQL statement - // with an invalid optimizer version will fail with a syntax error - // (`INVALID_ARGUMENT`) status. + // SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. + // + // Executing a SQL statement with an invalid optimizer version fails with + // an `INVALID_ARGUMENT` error. + // // See // https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer // for more information on managing the query optimizer. // // The `optimizer_version` statement hint has precedence over this setting. string optimizer_version = 1; + + // An option to control the selection of optimizer statistics package. + // + // This parameter allows individual queries to use a different query + // optimizer statistics package. + // + // Specifying `latest` as a value instructs Cloud Spanner to use the latest + // generated statistics package. If not specified, Cloud Spanner uses + // the statistics package set at the database level options, or the latest + // package if the database option is not set. + // + // The statistics package requested by the query has to be exempt from + // garbage collection. This can be achieved with the following DDL + // statement: + // + // ``` + // ALTER STATISTICS SET OPTIONS (allow_gc=false) + // ``` + // + // The list of available statistics packages can be queried from + // `INFORMATION_SCHEMA.SPANNER_STATISTICS`. + // + // Executing a SQL statement with an invalid optimizer statistics package + // or with a statistics package that allows garbage collection fails with + // an `INVALID_ARGUMENT` error. + string optimizer_statistics_package = 2; } // Mode in which the statement must be processed. @@ -547,6 +633,9 @@ message ExecuteSqlRequest { // Query optimizer configuration to use for the given query. QueryOptions query_options = 10; + + // Common options for this request. + RequestOptions request_options = 11; } // The request for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. @@ -613,6 +702,9 @@ message ExecuteBatchDmlRequest { // sequence number, the transaction may be aborted. Replays of previously // handled requests will yield the same response as the first execution. int64 seqno = 4 [(google.api.field_behavior) = REQUIRED]; + + // Common options for this request. + RequestOptions request_options = 5; } // The response for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list @@ -847,6 +939,9 @@ message ReadRequest { // match for the values of fields common to this message and the // PartitionReadRequest message used to create this partition_token. bytes partition_token = 10; + + // Common options for this request. + RequestOptions request_options = 11; } // The request for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. @@ -861,6 +956,13 @@ message BeginTransactionRequest { // Required. Options for the new transaction. TransactionOptions options = 2 [(google.api.field_behavior) = REQUIRED]; + + // Common options for this request. + // Priority is ignored for this request. Setting the priority in this + // request_options struct will not do anything. To set the priority for a + // transaction, set it on the reads and writes that are part of this + // transaction instead. + RequestOptions request_options = 3; } // The request for [Commit][google.spanner.v1.Spanner.Commit]. @@ -899,6 +1001,9 @@ message CommitRequest { // the [CommitResponse][google.spanner.v1.CommitResponse.commit_stats]. Default value is // `false`. bool return_commit_stats = 5; + + // Common options for this request. + RequestOptions request_options = 6; } // The response for [Commit][google.spanner.v1.Spanner.Commit]. diff --git a/google/cloud/spanner_v1/proto/transaction.proto b/google/cloud/spanner_v1/proto/transaction.proto index 5c6f494474..30ef9dc84a 100644 --- a/google/cloud/spanner_v1/proto/transaction.proto +++ b/google/cloud/spanner_v1/proto/transaction.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -28,9 +28,284 @@ option java_package = "com.google.spanner.v1"; option php_namespace = "Google\\Cloud\\Spanner\\V1"; option ruby_package = "Google::Cloud::Spanner::V1"; -// TransactionOptions are used to specify different types of transactions. +// # Transactions // -// For more info, see: https://cloud.google.com/spanner/docs/reference/rest/v1/Transaction +// +// Each session can have at most one active transaction at a time (note that +// standalone reads and queries use a transaction internally and do count +// towards the one transaction limit). After the active transaction is +// completed, the session can immediately be re-used for the next transaction. +// It is not necessary to create a new session for each transaction. +// +// # Transaction Modes +// +// Cloud Spanner supports three transaction modes: +// +// 1. Locking read-write. This type of transaction is the only way +// to write data into Cloud Spanner. These transactions rely on +// pessimistic locking and, if necessary, two-phase commit. +// Locking read-write transactions may abort, requiring the +// application to retry. +// +// 2. Snapshot read-only. This transaction type provides guaranteed +// consistency across several reads, but does not allow +// writes. Snapshot read-only transactions can be configured to +// read at timestamps in the past. Snapshot read-only +// transactions do not need to be committed. +// +// 3. Partitioned DML. This type of transaction is used to execute +// a single Partitioned DML statement. Partitioned DML partitions +// the key space and runs the DML statement over each partition +// in parallel using separate, internal transactions that commit +// independently. Partitioned DML transactions do not need to be +// committed. +// +// For transactions that only read, snapshot read-only transactions +// provide simpler semantics and are almost always faster. In +// particular, read-only transactions do not take locks, so they do +// not conflict with read-write transactions. As a consequence of not +// taking locks, they also do not abort, so retry loops are not needed. +// +// Transactions may only read/write data in a single database. They +// may, however, read/write data in different tables within that +// database. +// +// ## Locking Read-Write Transactions +// +// Locking transactions may be used to atomically read-modify-write +// data anywhere in a database. This type of transaction is externally +// consistent. +// +// Clients should attempt to minimize the amount of time a transaction +// is active. Faster transactions commit with higher probability +// and cause less contention. Cloud Spanner attempts to keep read locks +// active as long as the transaction continues to do reads, and the +// transaction has not been terminated by +// [Commit][google.spanner.v1.Spanner.Commit] or +// [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of +// inactivity at the client may cause Cloud Spanner to release a +// transaction's locks and abort it. +// +// Conceptually, a read-write transaction consists of zero or more +// reads or SQL statements followed by +// [Commit][google.spanner.v1.Spanner.Commit]. At any time before +// [Commit][google.spanner.v1.Spanner.Commit], the client can send a +// [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the +// transaction. +// +// ## Semantics +// +// Cloud Spanner can commit the transaction if all read locks it acquired +// are still valid at commit time, and it is able to acquire write +// locks for all writes. Cloud Spanner can abort the transaction for any +// reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees +// that the transaction has not modified any user data in Cloud Spanner. +// +// Unless the transaction commits, Cloud Spanner makes no guarantees about +// how long the transaction's locks were held for. It is an error to +// use Cloud Spanner locks for any sort of mutual exclusion other than +// between Cloud Spanner transactions themselves. +// +// ## Retrying Aborted Transactions +// +// When a transaction aborts, the application can choose to retry the +// whole transaction again. To maximize the chances of successfully +// committing the retry, the client should execute the retry in the +// same session as the original attempt. The original session's lock +// priority increases with each consecutive abort, meaning that each +// attempt has a slightly better chance of success than the previous. +// +// Under some circumstances (e.g., many transactions attempting to +// modify the same row(s)), a transaction can abort many times in a +// short period before successfully committing. Thus, it is not a good +// idea to cap the number of retries a transaction can attempt; +// instead, it is better to limit the total amount of wall time spent +// retrying. +// +// ## Idle Transactions +// +// A transaction is considered idle if it has no outstanding reads or +// SQL queries and has not started a read or SQL query within the last 10 +// seconds. Idle transactions can be aborted by Cloud Spanner so that they +// don't hold on to locks indefinitely. In that case, the commit will +// fail with error `ABORTED`. +// +// If this behavior is undesirable, periodically executing a simple +// SQL query in the transaction (e.g., `SELECT 1`) prevents the +// transaction from becoming idle. +// +// ## Snapshot Read-Only Transactions +// +// Snapshot read-only transactions provides a simpler method than +// locking read-write transactions for doing several consistent +// reads. However, this type of transaction does not support writes. +// +// Snapshot transactions do not take locks. Instead, they work by +// choosing a Cloud Spanner timestamp, then executing all reads at that +// timestamp. Since they do not acquire locks, they do not block +// concurrent read-write transactions. +// +// Unlike locking read-write transactions, snapshot read-only +// transactions never abort. They can fail if the chosen read +// timestamp is garbage collected; however, the default garbage +// collection policy is generous enough that most applications do not +// need to worry about this in practice. +// +// Snapshot read-only transactions do not need to call +// [Commit][google.spanner.v1.Spanner.Commit] or +// [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not +// permitted to do so). +// +// To execute a snapshot transaction, the client specifies a timestamp +// bound, which tells Cloud Spanner how to choose a read timestamp. +// +// The types of timestamp bound are: +// +// - Strong (the default). +// - Bounded staleness. +// - Exact staleness. +// +// If the Cloud Spanner database to be read is geographically distributed, +// stale read-only transactions can execute more quickly than strong +// or read-write transaction, because they are able to execute far +// from the leader replica. +// +// Each type of timestamp bound is discussed in detail below. +// +// ## Strong +// +// Strong reads are guaranteed to see the effects of all transactions +// that have committed before the start of the read. Furthermore, all +// rows yielded by a single read are consistent with each other -- if +// any part of the read observes a transaction, all parts of the read +// see the transaction. +// +// Strong reads are not repeatable: two consecutive strong read-only +// transactions might return inconsistent results if there are +// concurrent writes. If consistency across reads is required, the +// reads should be executed within a transaction or at an exact read +// timestamp. +// +// See [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. +// +// ## Exact Staleness +// +// These timestamp bounds execute reads at a user-specified +// timestamp. Reads at a timestamp are guaranteed to see a consistent +// prefix of the global transaction history: they observe +// modifications done by all transactions with a commit timestamp <= +// the read timestamp, and observe none of the modifications done by +// transactions with a larger commit timestamp. They will block until +// all conflicting transactions that may be assigned commit timestamps +// <= the read timestamp have finished. +// +// The timestamp can either be expressed as an absolute Cloud Spanner commit +// timestamp or a staleness relative to the current time. +// +// These modes do not require a "negotiation phase" to pick a +// timestamp. As a result, they execute slightly faster than the +// equivalent boundedly stale concurrency modes. On the other hand, +// boundedly stale reads usually return fresher results. +// +// See [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] and +// [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. +// +// ## Bounded Staleness +// +// Bounded staleness modes allow Cloud Spanner to pick the read timestamp, +// subject to a user-provided staleness bound. Cloud Spanner chooses the +// newest timestamp within the staleness bound that allows execution +// of the reads at the closest available replica without blocking. +// +// All rows yielded are consistent with each other -- if any part of +// the read observes a transaction, all parts of the read see the +// transaction. Boundedly stale reads are not repeatable: two stale +// reads, even if they use the same staleness bound, can execute at +// different timestamps and thus return inconsistent results. +// +// Boundedly stale reads execute in two phases: the first phase +// negotiates a timestamp among all replicas needed to serve the +// read. In the second phase, reads are executed at the negotiated +// timestamp. +// +// As a result of the two phase execution, bounded staleness reads are +// usually a little slower than comparable exact staleness +// reads. However, they are typically able to return fresher +// results, and are more likely to execute at the closest replica. +// +// Because the timestamp negotiation requires up-front knowledge of +// which rows will be read, it can only be used with single-use +// read-only transactions. +// +// See [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] and +// [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. +// +// ## Old Read Timestamps and Garbage Collection +// +// Cloud Spanner continuously garbage collects deleted and overwritten data +// in the background to reclaim storage space. This process is known +// as "version GC". By default, version GC reclaims versions after they +// are one hour old. Because of this, Cloud Spanner cannot perform reads +// at read timestamps more than one hour in the past. This +// restriction also applies to in-progress reads and/or SQL queries whose +// timestamp become too old while executing. Reads and SQL queries with +// too-old read timestamps fail with the error `FAILED_PRECONDITION`. +// +// ## Partitioned DML Transactions +// +// Partitioned DML transactions are used to execute DML statements with a +// different execution strategy that provides different, and often better, +// scalability properties for large, table-wide operations than DML in a +// ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, +// should prefer using ReadWrite transactions. +// +// Partitioned DML partitions the keyspace and runs the DML statement on each +// partition in separate, internal transactions. These transactions commit +// automatically when complete, and run independently from one another. +// +// To reduce lock contention, this execution strategy only acquires read locks +// on rows that match the WHERE clause of the statement. Additionally, the +// smaller per-partition transactions hold locks for less time. +// +// That said, Partitioned DML is not a drop-in replacement for standard DML used +// in ReadWrite transactions. +// +// - The DML statement must be fully-partitionable. Specifically, the statement +// must be expressible as the union of many statements which each access only +// a single row of the table. +// +// - The statement is not applied atomically to all rows of the table. Rather, +// the statement is applied atomically to partitions of the table, in +// independent transactions. Secondary index rows are updated atomically +// with the base table rows. +// +// - Partitioned DML does not guarantee exactly-once execution semantics +// against a partition. The statement will be applied at least once to each +// partition. It is strongly recommended that the DML statement should be +// idempotent to avoid unexpected results. For instance, it is potentially +// dangerous to run a statement such as +// `UPDATE table SET column = column + 1` as it could be run multiple times +// against some rows. +// +// - The partitions are committed automatically - there is no support for +// Commit or Rollback. If the call returns an error, or if the client issuing +// the ExecuteSql call dies, it is possible that some rows had the statement +// executed on them successfully. It is also possible that statement was +// never executed against other rows. +// +// - Partitioned DML transactions may only contain the execution of a single +// DML statement via ExecuteSql or ExecuteStreamingSql. +// +// - If any error is encountered during the execution of the partitioned DML +// operation (for instance, a UNIQUE INDEX violation, division by zero, or a +// value that cannot be stored due to schema constraints), then the +// operation is stopped at that point and an error is returned. It is +// possible that at this point, some partitions have been committed (or even +// committed multiple times), and other partitions have not been run at all. +// +// Given the above, Partitioned DML is good fit for large, database-wide, +// operations that are idempotent, such as deleting old rows from a very large +// table. message TransactionOptions { // Message type to initiate a read-write transaction. Currently this // transaction type has no options. diff --git a/google/cloud/spanner_v1/proto/type.proto b/google/cloud/spanner_v1/proto/type.proto index 1b863c0fdf..4a5afd485d 100644 --- a/google/cloud/spanner_v1/proto/type.proto +++ b/google/cloud/spanner_v1/proto/type.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/google/cloud/spanner_v1/services/spanner/async_client.py b/google/cloud/spanner_v1/services/spanner/async_client.py index a4a188bc97..d220c20f6e 100644 --- a/google/cloud/spanner_v1/services/spanner/async_client.py +++ b/google/cloud/spanner_v1/services/spanner/async_client.py @@ -79,8 +79,36 @@ class SpannerAsyncClient: common_location_path = staticmethod(SpannerClient.common_location_path) parse_common_location_path = staticmethod(SpannerClient.parse_common_location_path) - from_service_account_info = SpannerClient.from_service_account_info - from_service_account_file = SpannerClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpannerAsyncClient: The constructed client. + """ + return SpannerClient.from_service_account_info.__func__(SpannerAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpannerAsyncClient: The constructed client. + """ + return SpannerClient.from_service_account_file.__func__(SpannerAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property @@ -222,6 +250,7 @@ async def create_session( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -319,6 +348,7 @@ async def batch_create_sessions( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -398,6 +428,7 @@ async def get_session( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -480,6 +511,7 @@ async def list_sessions( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -561,6 +593,7 @@ async def delete_session( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -631,6 +664,7 @@ async def execute_sql( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -798,6 +832,7 @@ async def execute_batch_dml( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -870,6 +905,7 @@ async def read( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -1021,6 +1057,7 @@ async def begin_transaction( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -1158,6 +1195,7 @@ async def commit( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -1250,6 +1288,7 @@ async def rollback( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -1320,6 +1359,7 @@ async def partition_query( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -1394,6 +1434,7 @@ async def partition_read( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, diff --git a/google/cloud/spanner_v1/services/spanner/transports/base.py b/google/cloud/spanner_v1/services/spanner/transports/base.py index 36e3c0cb52..f91b98d6fb 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/base.py +++ b/google/cloud/spanner_v1/services/spanner/transports/base.py @@ -73,10 +73,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -84,6 +84,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -93,20 +96,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -117,6 +117,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -128,6 +129,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -139,6 +141,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -150,6 +153,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -161,6 +165,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -172,6 +177,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -188,6 +194,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -199,6 +206,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -213,6 +221,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -224,6 +233,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -235,6 +245,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -246,6 +257,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -257,6 +269,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, diff --git a/google/cloud/spanner_v1/services/spanner/transports/grpc.py b/google/cloud/spanner_v1/services/spanner/transports/grpc.py index 2ac10fc5b3..0a3ead94e5 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/grpc.py +++ b/google/cloud/spanner_v1/services/spanner/transports/grpc.py @@ -111,7 +111,9 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -119,70 +121,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -190,17 +172,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -214,7 +187,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py b/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py index 265f4bb30a..a7c83ef512 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py +++ b/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py @@ -66,7 +66,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -144,10 +144,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -156,7 +156,9 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -164,70 +166,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -235,17 +217,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/spanner_v1/types/__init__.py b/google/cloud/spanner_v1/types/__init__.py index a71a15855c..7a7ac395e4 100644 --- a/google/cloud/spanner_v1/types/__init__.py +++ b/google/cloud/spanner_v1/types/__init__.py @@ -24,44 +24,45 @@ PlanNode, QueryPlan, ) -from .transaction import ( - TransactionOptions, - Transaction, - TransactionSelector, -) -from .type import ( - Type, - StructType, - TypeCode, -) from .result_set import ( - ResultSet, PartialResultSet, + ResultSet, ResultSetMetadata, ResultSetStats, ) from .spanner import ( - CreateSessionRequest, BatchCreateSessionsRequest, BatchCreateSessionsResponse, - Session, - GetSessionRequest, - ListSessionsRequest, - ListSessionsResponse, + BeginTransactionRequest, + CommitRequest, + CommitResponse, + CreateSessionRequest, DeleteSessionRequest, - ExecuteSqlRequest, ExecuteBatchDmlRequest, ExecuteBatchDmlResponse, + ExecuteSqlRequest, + GetSessionRequest, + ListSessionsRequest, + ListSessionsResponse, + Partition, PartitionOptions, PartitionQueryRequest, PartitionReadRequest, - Partition, PartitionResponse, ReadRequest, - BeginTransactionRequest, - CommitRequest, - CommitResponse, + RequestOptions, RollbackRequest, + Session, +) +from .transaction import ( + Transaction, + TransactionOptions, + TransactionSelector, +) +from .type import ( + StructType, + Type, + TypeCode, ) __all__ = ( @@ -70,35 +71,36 @@ "Mutation", "PlanNode", "QueryPlan", - "TransactionOptions", - "Transaction", - "TransactionSelector", - "Type", - "StructType", - "TypeCode", - "ResultSet", "PartialResultSet", + "ResultSet", "ResultSetMetadata", "ResultSetStats", - "CreateSessionRequest", "BatchCreateSessionsRequest", "BatchCreateSessionsResponse", - "Session", - "GetSessionRequest", - "ListSessionsRequest", - "ListSessionsResponse", + "BeginTransactionRequest", + "CommitRequest", + "CommitResponse", + "CreateSessionRequest", "DeleteSessionRequest", - "ExecuteSqlRequest", "ExecuteBatchDmlRequest", "ExecuteBatchDmlResponse", + "ExecuteSqlRequest", + "GetSessionRequest", + "ListSessionsRequest", + "ListSessionsResponse", + "Partition", "PartitionOptions", "PartitionQueryRequest", "PartitionReadRequest", - "Partition", "PartitionResponse", "ReadRequest", - "BeginTransactionRequest", - "CommitRequest", - "CommitResponse", + "RequestOptions", "RollbackRequest", + "Session", + "Transaction", + "TransactionOptions", + "TransactionSelector", + "StructType", + "Type", + "TypeCode", ) diff --git a/google/cloud/spanner_v1/types/spanner.py b/google/cloud/spanner_v1/types/spanner.py index 1dfd8451fe..acb32c8ff9 100644 --- a/google/cloud/spanner_v1/types/spanner.py +++ b/google/cloud/spanner_v1/types/spanner.py @@ -39,6 +39,7 @@ "ListSessionsRequest", "ListSessionsResponse", "DeleteSessionRequest", + "RequestOptions", "ExecuteSqlRequest", "ExecuteBatchDmlRequest", "ExecuteBatchDmlResponse", @@ -240,6 +241,63 @@ class DeleteSessionRequest(proto.Message): name = proto.Field(proto.STRING, number=1) +class RequestOptions(proto.Message): + r"""Common request options for various APIs. + + Attributes: + priority (google.cloud.spanner_v1.types.RequestOptions.Priority): + Priority for the request. + request_tag (str): + A per-request tag which can be applied to queries or reads, + used for statistics collection. Both request_tag and + transaction_tag can be specified for a read or query that + belongs to a transaction. This field is ignored for requests + where it's not applicable (e.g. CommitRequest). + ``request_tag`` must be a valid identifier of the form: + ``[a-zA-Z][a-zA-Z0-9_\-]`` between 2 and 64 characters in + length + transaction_tag (str): + A tag used for statistics collection about this transaction. + Both request_tag and transaction_tag can be specified for a + read or query that belongs to a transaction. The value of + transaction_tag should be the same for all requests + belonging to the same transaction. If this request doesn’t + belong to any transaction, transaction_tag will be ignored. + ``transaction_tag`` must be a valid identifier of the + format: ``[a-zA-Z][a-zA-Z0-9_\-]{0,49}`` + """ + + class Priority(proto.Enum): + r"""The relative priority for requests. Note that priority is not + applicable for + [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. + + The priority acts as a hint to the Cloud Spanner scheduler and does + not guarantee priority or order of execution. For example: + + - Some parts of a write operation always execute at + ``PRIORITY_HIGH``, regardless of the specified priority. This may + cause you to see an increase in high priority workload even when + executing a low priority request. This can also potentially cause + a priority inversion where a lower priority request will be + fulfilled ahead of a higher priority request. + - If a transaction contains multiple operations with different + priorities, Cloud Spanner does not guarantee to process the + higher priority operations first. There may be other constraints + to satisfy, such as order of operations. + """ + PRIORITY_UNSPECIFIED = 0 + PRIORITY_LOW = 1 + PRIORITY_MEDIUM = 2 + PRIORITY_HIGH = 3 + + priority = proto.Field(proto.ENUM, number=1, enum=Priority,) + + request_tag = proto.Field(proto.STRING, number=2) + + transaction_tag = proto.Field(proto.STRING, number=3) + + class ExecuteSqlRequest(proto.Message): r"""The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and @@ -335,6 +393,8 @@ class ExecuteSqlRequest(proto.Message): query_options (google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryOptions): Query optimizer configuration to use for the given query. + request_options (google.cloud.spanner_v1.types.RequestOptions): + Common options for this request. """ class QueryMode(proto.Enum): @@ -353,25 +413,58 @@ class QueryOptions(proto.Message): This parameter allows individual queries to pick different query optimizer versions. - Specifying "latest" as a value instructs Cloud Spanner to + Specifying ``latest`` as a value instructs Cloud Spanner to use the latest supported query optimizer version. If not - specified, Cloud Spanner uses optimizer version set at the - database level options. Any other positive integer (from the - list of supported optimizer versions) overrides the default - optimizer version for query execution. The list of supported - optimizer versions can be queried from - SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. Executing a SQL - statement with an invalid optimizer version will fail with a - syntax error (``INVALID_ARGUMENT``) status. See + specified, Cloud Spanner uses the optimizer version set at + the database level options. Any other positive integer (from + the list of supported optimizer versions) overrides the + default optimizer version for query execution. + + The list of supported optimizer versions can be queried from + SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. + + Executing a SQL statement with an invalid optimizer version + fails with an ``INVALID_ARGUMENT`` error. + + See https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer for more information on managing the query optimizer. The ``optimizer_version`` statement hint has precedence over this setting. + optimizer_statistics_package (str): + An option to control the selection of optimizer statistics + package. + + This parameter allows individual queries to use a different + query optimizer statistics package. + + Specifying ``latest`` as a value instructs Cloud Spanner to + use the latest generated statistics package. If not + specified, Cloud Spanner uses the statistics package set at + the database level options, or the latest package if the + database option is not set. + + The statistics package requested by the query has to be + exempt from garbage collection. This can be achieved with + the following DDL statement: + + :: + + ALTER STATISTICS SET OPTIONS (allow_gc=false) + + The list of available statistics packages can be queried + from ``INFORMATION_SCHEMA.SPANNER_STATISTICS``. + + Executing a SQL statement with an invalid optimizer + statistics package or with a statistics package that allows + garbage collection fails with an ``INVALID_ARGUMENT`` error. """ optimizer_version = proto.Field(proto.STRING, number=1) + optimizer_statistics_package = proto.Field(proto.STRING, number=2) + session = proto.Field(proto.STRING, number=1) transaction = proto.Field( @@ -396,6 +489,8 @@ class QueryOptions(proto.Message): query_options = proto.Field(proto.MESSAGE, number=10, message=QueryOptions,) + request_options = proto.Field(proto.MESSAGE, number=11, message="RequestOptions",) + class ExecuteBatchDmlRequest(proto.Message): r"""The request for @@ -434,6 +529,8 @@ class ExecuteBatchDmlRequest(proto.Message): sequence number, the transaction may be aborted. Replays of previously handled requests will yield the same response as the first execution. + request_options (google.cloud.spanner_v1.types.RequestOptions): + Common options for this request. """ class Statement(proto.Message): @@ -491,6 +588,8 @@ class Statement(proto.Message): seqno = proto.Field(proto.INT64, number=4) + request_options = proto.Field(proto.MESSAGE, number=5, message="RequestOptions",) + class ExecuteBatchDmlResponse(proto.Message): r"""The response for @@ -835,6 +934,8 @@ class ReadRequest(proto.Message): must be an exact match for the values of fields common to this message and the PartitionReadRequest message used to create this partition_token. + request_options (google.cloud.spanner_v1.types.RequestOptions): + Common options for this request. """ session = proto.Field(proto.STRING, number=1) @@ -857,6 +958,8 @@ class ReadRequest(proto.Message): partition_token = proto.Field(proto.BYTES, number=10) + request_options = proto.Field(proto.MESSAGE, number=11, message="RequestOptions",) + class BeginTransactionRequest(proto.Message): r"""The request for @@ -868,6 +971,12 @@ class BeginTransactionRequest(proto.Message): transaction runs. options (google.cloud.spanner_v1.types.TransactionOptions): Required. Options for the new transaction. + request_options (google.cloud.spanner_v1.types.RequestOptions): + Common options for this request. Priority is ignored for + this request. Setting the priority in this request_options + struct will not do anything. To set the priority for a + transaction, set it on the reads and writes that are part of + this transaction instead. """ session = proto.Field(proto.STRING, number=1) @@ -876,6 +985,8 @@ class BeginTransactionRequest(proto.Message): proto.MESSAGE, number=2, message=gs_transaction.TransactionOptions, ) + request_options = proto.Field(proto.MESSAGE, number=3, message="RequestOptions",) + class CommitRequest(proto.Message): r"""The request for [Commit][google.spanner.v1.Spanner.Commit]. @@ -906,6 +1017,8 @@ class CommitRequest(proto.Message): be included in the [CommitResponse][google.spanner.v1.CommitResponse.commit_stats]. Default value is ``false``. + request_options (google.cloud.spanner_v1.types.RequestOptions): + Common options for this request. """ session = proto.Field(proto.STRING, number=1) @@ -923,6 +1036,8 @@ class CommitRequest(proto.Message): return_commit_stats = proto.Field(proto.BOOL, number=5) + request_options = proto.Field(proto.MESSAGE, number=6, message="RequestOptions",) + class CommitResponse(proto.Message): r"""The response for [Commit][google.spanner.v1.Spanner.Commit]. diff --git a/scripts/fixup_spanner_v1_keywords.py b/scripts/fixup_spanner_v1_keywords.py index 19e3c0185b..4faf734dcb 100644 --- a/scripts/fixup_spanner_v1_keywords.py +++ b/scripts/fixup_spanner_v1_keywords.py @@ -42,20 +42,20 @@ class spannerCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'batch_create_sessions': ('database', 'session_count', 'session_template', ), - 'begin_transaction': ('session', 'options', ), - 'commit': ('session', 'transaction_id', 'single_use_transaction', 'mutations', 'return_commit_stats', ), + 'begin_transaction': ('session', 'options', 'request_options', ), + 'commit': ('session', 'transaction_id', 'single_use_transaction', 'mutations', 'return_commit_stats', 'request_options', ), 'create_session': ('database', 'session', ), 'delete_session': ('name', ), - 'execute_batch_dml': ('session', 'transaction', 'statements', 'seqno', ), - 'execute_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', ), - 'execute_streaming_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', ), + 'execute_batch_dml': ('session', 'transaction', 'statements', 'seqno', 'request_options', ), + 'execute_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', 'request_options', ), + 'execute_streaming_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', 'request_options', ), 'get_session': ('name', ), 'list_sessions': ('database', 'page_size', 'page_token', 'filter', ), 'partition_query': ('session', 'sql', 'transaction', 'params', 'param_types', 'partition_options', ), 'partition_read': ('session', 'table', 'key_set', 'transaction', 'index', 'columns', 'partition_options', ), - 'read': ('session', 'table', 'columns', 'key_set', 'transaction', 'index', 'limit', 'resume_token', 'partition_token', ), + 'read': ('session', 'table', 'columns', 'key_set', 'transaction', 'index', 'limit', 'resume_token', 'partition_token', 'request_options', ), 'rollback': ('session', 'transaction_id', ), - 'streaming_read': ('session', 'table', 'columns', 'key_set', 'transaction', 'index', 'limit', 'resume_token', 'partition_token', ), + 'streaming_read': ('session', 'table', 'columns', 'key_set', 'transaction', 'index', 'limit', 'resume_token', 'partition_token', 'request_options', ), } diff --git a/tests/unit/gapic/spanner_admin_database_v1/__init__.py b/tests/unit/gapic/spanner_admin_database_v1/__init__.py index 8b13789179..42ffdf2bc4 100644 --- a/tests/unit/gapic/spanner_admin_database_v1/__init__.py +++ b/tests/unit/gapic/spanner_admin_database_v1/__init__.py @@ -1 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py b/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py index 86eba5e283..1906328473 100644 --- a/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py +++ b/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py @@ -104,15 +104,19 @@ def test__get_default_mtls_endpoint(): ) -def test_database_admin_client_from_service_account_info(): +@pytest.mark.parametrize( + "client_class", [DatabaseAdminClient, DatabaseAdminAsyncClient,] +) +def test_database_admin_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = DatabaseAdminClient.from_service_account_info(info) + client = client_class.from_service_account_info(info) assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "spanner.googleapis.com:443" @@ -128,9 +132,11 @@ def test_database_admin_client_from_service_account_file(client_class): factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "spanner.googleapis.com:443" @@ -493,6 +499,22 @@ def test_list_databases_from_dict(): test_list_databases(request_type=dict) +def test_list_databases_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: + client.list_databases() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.ListDatabasesRequest() + + @pytest.mark.asyncio async def test_list_databases_async( transport: str = "grpc_asyncio", @@ -842,6 +864,22 @@ def test_create_database_from_dict(): test_create_database(request_type=dict) +def test_create_database_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_database), "__call__") as call: + client.create_database() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.CreateDatabaseRequest() + + @pytest.mark.asyncio async def test_create_database_async( transport: str = "grpc_asyncio", @@ -1052,6 +1090,22 @@ def test_get_database_from_dict(): test_get_database(request_type=dict) +def test_get_database_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database), "__call__") as call: + client.get_database() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.GetDatabaseRequest() + + @pytest.mark.asyncio async def test_get_database_async( transport: str = "grpc_asyncio", @@ -1252,6 +1306,24 @@ def test_update_database_ddl_from_dict(): test_update_database_ddl(request_type=dict) +def test_update_database_ddl_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_database_ddl), "__call__" + ) as call: + client.update_database_ddl() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.UpdateDatabaseDdlRequest() + + @pytest.mark.asyncio async def test_update_database_ddl_async( transport: str = "grpc_asyncio", @@ -1461,6 +1533,22 @@ def test_drop_database_from_dict(): test_drop_database(request_type=dict) +def test_drop_database_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_database), "__call__") as call: + client.drop_database() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.DropDatabaseRequest() + + @pytest.mark.asyncio async def test_drop_database_async( transport: str = "grpc_asyncio", @@ -1647,6 +1735,22 @@ def test_get_database_ddl_from_dict(): test_get_database_ddl(request_type=dict) +def test_get_database_ddl_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database_ddl), "__call__") as call: + client.get_database_ddl() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.GetDatabaseDdlRequest() + + @pytest.mark.asyncio async def test_get_database_ddl_async( transport: str = "grpc_asyncio", @@ -1843,6 +1947,22 @@ def test_set_iam_policy_from_dict(): test_set_iam_policy(request_type=dict) +def test_set_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + client.set_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + @pytest.mark.asyncio async def test_set_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy.SetIamPolicyRequest @@ -2050,6 +2170,22 @@ def test_get_iam_policy_from_dict(): test_get_iam_policy(request_type=dict) +def test_get_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + client.get_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + @pytest.mark.asyncio async def test_get_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy.GetIamPolicyRequest @@ -2259,6 +2395,24 @@ def test_test_iam_permissions_from_dict(): test_test_iam_permissions(request_type=dict) +def test_test_iam_permissions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + client.test_iam_permissions() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + @pytest.mark.asyncio async def test_test_iam_permissions_async( transport: str = "grpc_asyncio", request_type=iam_policy.TestIamPermissionsRequest @@ -2487,6 +2641,22 @@ def test_create_backup_from_dict(): test_create_backup(request_type=dict) +def test_create_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + client.create_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == gsad_backup.CreateBackupRequest() + + @pytest.mark.asyncio async def test_create_backup_async( transport: str = "grpc_asyncio", request_type=gsad_backup.CreateBackupRequest @@ -2710,6 +2880,22 @@ def test_get_backup_from_dict(): test_get_backup(request_type=dict) +def test_get_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + client.get_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == backup.GetBackupRequest() + + @pytest.mark.asyncio async def test_get_backup_async( transport: str = "grpc_asyncio", request_type=backup.GetBackupRequest @@ -2925,6 +3111,22 @@ def test_update_backup_from_dict(): test_update_backup(request_type=dict) +def test_update_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + client.update_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == gsad_backup.UpdateBackupRequest() + + @pytest.mark.asyncio async def test_update_backup_async( transport: str = "grpc_asyncio", request_type=gsad_backup.UpdateBackupRequest @@ -3137,6 +3339,22 @@ def test_delete_backup_from_dict(): test_delete_backup(request_type=dict) +def test_delete_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + client.delete_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == backup.DeleteBackupRequest() + + @pytest.mark.asyncio async def test_delete_backup_async( transport: str = "grpc_asyncio", request_type=backup.DeleteBackupRequest @@ -3320,6 +3538,22 @@ def test_list_backups_from_dict(): test_list_backups(request_type=dict) +def test_list_backups_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + client.list_backups() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == backup.ListBackupsRequest() + + @pytest.mark.asyncio async def test_list_backups_async( transport: str = "grpc_asyncio", request_type=backup.ListBackupsRequest @@ -3622,6 +3856,22 @@ def test_restore_database_from_dict(): test_restore_database(request_type=dict) +def test_restore_database_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_database), "__call__") as call: + client.restore_database() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.RestoreDatabaseRequest() + + @pytest.mark.asyncio async def test_restore_database_async( transport: str = "grpc_asyncio", @@ -3839,6 +4089,24 @@ def test_list_database_operations_from_dict(): test_list_database_operations(request_type=dict) +def test_list_database_operations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_operations), "__call__" + ) as call: + client.list_database_operations() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.ListDatabaseOperationsRequest() + + @pytest.mark.asyncio async def test_list_database_operations_async( transport: str = "grpc_asyncio", @@ -4203,6 +4471,24 @@ def test_list_backup_operations_from_dict(): test_list_backup_operations(request_type=dict) +def test_list_backup_operations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backup_operations), "__call__" + ) as call: + client.list_backup_operations() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == backup.ListBackupOperationsRequest() + + @pytest.mark.asyncio async def test_list_backup_operations_async( transport: str = "grpc_asyncio", request_type=backup.ListBackupOperationsRequest diff --git a/tests/unit/gapic/spanner_admin_instance_v1/__init__.py b/tests/unit/gapic/spanner_admin_instance_v1/__init__.py index 8b13789179..42ffdf2bc4 100644 --- a/tests/unit/gapic/spanner_admin_instance_v1/__init__.py +++ b/tests/unit/gapic/spanner_admin_instance_v1/__init__.py @@ -1 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py b/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py index e2caceee98..b64c5eca33 100644 --- a/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py +++ b/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py @@ -98,15 +98,19 @@ def test__get_default_mtls_endpoint(): ) -def test_instance_admin_client_from_service_account_info(): +@pytest.mark.parametrize( + "client_class", [InstanceAdminClient, InstanceAdminAsyncClient,] +) +def test_instance_admin_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = InstanceAdminClient.from_service_account_info(info) + client = client_class.from_service_account_info(info) assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "spanner.googleapis.com:443" @@ -122,9 +126,11 @@ def test_instance_admin_client_from_service_account_file(client_class): factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "spanner.googleapis.com:443" @@ -490,6 +496,24 @@ def test_list_instance_configs_from_dict(): test_list_instance_configs(request_type=dict) +def test_list_instance_configs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instance_configs), "__call__" + ) as call: + client.list_instance_configs() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.ListInstanceConfigsRequest() + + @pytest.mark.asyncio async def test_list_instance_configs_async( transport: str = "grpc_asyncio", @@ -875,6 +899,24 @@ def test_get_instance_config_from_dict(): test_get_instance_config(request_type=dict) +def test_get_instance_config_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_instance_config), "__call__" + ) as call: + client.get_instance_config() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.GetInstanceConfigRequest() + + @pytest.mark.asyncio async def test_get_instance_config_async( transport: str = "grpc_asyncio", @@ -1083,6 +1125,22 @@ def test_list_instances_from_dict(): test_list_instances(request_type=dict) +def test_list_instances_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + client.list_instances() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.ListInstancesRequest() + + @pytest.mark.asyncio async def test_list_instances_async( transport: str = "grpc_asyncio", @@ -1452,6 +1510,22 @@ def test_get_instance_from_dict(): test_get_instance(request_type=dict) +def test_get_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + client.get_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.GetInstanceRequest() + + @pytest.mark.asyncio async def test_get_instance_async( transport: str = "grpc_asyncio", @@ -1658,6 +1732,22 @@ def test_create_instance_from_dict(): test_create_instance(request_type=dict) +def test_create_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + client.create_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.CreateInstanceRequest() + + @pytest.mark.asyncio async def test_create_instance_async( transport: str = "grpc_asyncio", @@ -1867,6 +1957,22 @@ def test_update_instance_from_dict(): test_update_instance(request_type=dict) +def test_update_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + client.update_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.UpdateInstanceRequest() + + @pytest.mark.asyncio async def test_update_instance_async( transport: str = "grpc_asyncio", @@ -2072,6 +2178,22 @@ def test_delete_instance_from_dict(): test_delete_instance(request_type=dict) +def test_delete_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + client.delete_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.DeleteInstanceRequest() + + @pytest.mark.asyncio async def test_delete_instance_async( transport: str = "grpc_asyncio", @@ -2258,6 +2380,22 @@ def test_set_iam_policy_from_dict(): test_set_iam_policy(request_type=dict) +def test_set_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + client.set_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + @pytest.mark.asyncio async def test_set_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy.SetIamPolicyRequest @@ -2465,6 +2603,22 @@ def test_get_iam_policy_from_dict(): test_get_iam_policy(request_type=dict) +def test_get_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + client.get_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + @pytest.mark.asyncio async def test_get_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy.GetIamPolicyRequest @@ -2674,6 +2828,24 @@ def test_test_iam_permissions_from_dict(): test_test_iam_permissions(request_type=dict) +def test_test_iam_permissions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + client.test_iam_permissions() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + @pytest.mark.asyncio async def test_test_iam_permissions_async( transport: str = "grpc_asyncio", request_type=iam_policy.TestIamPermissionsRequest diff --git a/tests/unit/gapic/spanner_v1/__init__.py b/tests/unit/gapic/spanner_v1/__init__.py index 8b13789179..42ffdf2bc4 100644 --- a/tests/unit/gapic/spanner_v1/__init__.py +++ b/tests/unit/gapic/spanner_v1/__init__.py @@ -1 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/tests/unit/gapic/spanner_v1/test_spanner.py b/tests/unit/gapic/spanner_v1/test_spanner.py index 56d3818009..37ca9c6deb 100644 --- a/tests/unit/gapic/spanner_v1/test_spanner.py +++ b/tests/unit/gapic/spanner_v1/test_spanner.py @@ -87,15 +87,17 @@ def test__get_default_mtls_endpoint(): assert SpannerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi -def test_spanner_client_from_service_account_info(): +@pytest.mark.parametrize("client_class", [SpannerClient, SpannerAsyncClient,]) +def test_spanner_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = SpannerClient.from_service_account_info(info) + client = client_class.from_service_account_info(info) assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "spanner.googleapis.com:443" @@ -109,9 +111,11 @@ def test_spanner_client_from_service_account_file(client_class): factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "spanner.googleapis.com:443" @@ -448,6 +452,22 @@ def test_create_session_from_dict(): test_create_session(request_type=dict) +def test_create_session_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_session), "__call__") as call: + client.create_session() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.CreateSessionRequest() + + @pytest.mark.asyncio async def test_create_session_async( transport: str = "grpc_asyncio", request_type=spanner.CreateSessionRequest @@ -635,6 +655,24 @@ def test_batch_create_sessions_from_dict(): test_batch_create_sessions(request_type=dict) +def test_batch_create_sessions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_sessions), "__call__" + ) as call: + client.batch_create_sessions() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.BatchCreateSessionsRequest() + + @pytest.mark.asyncio async def test_batch_create_sessions_async( transport: str = "grpc_asyncio", request_type=spanner.BatchCreateSessionsRequest @@ -844,6 +882,22 @@ def test_get_session_from_dict(): test_get_session(request_type=dict) +def test_get_session_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_session), "__call__") as call: + client.get_session() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.GetSessionRequest() + + @pytest.mark.asyncio async def test_get_session_async( transport: str = "grpc_asyncio", request_type=spanner.GetSessionRequest @@ -1033,6 +1087,22 @@ def test_list_sessions_from_dict(): test_list_sessions(request_type=dict) +def test_list_sessions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sessions), "__call__") as call: + client.list_sessions() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ListSessionsRequest() + + @pytest.mark.asyncio async def test_list_sessions_async( transport: str = "grpc_asyncio", request_type=spanner.ListSessionsRequest @@ -1343,6 +1413,22 @@ def test_delete_session_from_dict(): test_delete_session(request_type=dict) +def test_delete_session_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_session), "__call__") as call: + client.delete_session() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.DeleteSessionRequest() + + @pytest.mark.asyncio async def test_delete_session_async( transport: str = "grpc_asyncio", request_type=spanner.DeleteSessionRequest @@ -1522,6 +1608,22 @@ def test_execute_sql_from_dict(): test_execute_sql(request_type=dict) +def test_execute_sql_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_sql), "__call__") as call: + client.execute_sql() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ExecuteSqlRequest() + + @pytest.mark.asyncio async def test_execute_sql_async( transport: str = "grpc_asyncio", request_type=spanner.ExecuteSqlRequest @@ -1644,6 +1746,24 @@ def test_execute_streaming_sql_from_dict(): test_execute_streaming_sql(request_type=dict) +def test_execute_streaming_sql_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_streaming_sql), "__call__" + ) as call: + client.execute_streaming_sql() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ExecuteSqlRequest() + + @pytest.mark.asyncio async def test_execute_streaming_sql_async( transport: str = "grpc_asyncio", request_type=spanner.ExecuteSqlRequest @@ -1775,6 +1895,24 @@ def test_execute_batch_dml_from_dict(): test_execute_batch_dml(request_type=dict) +def test_execute_batch_dml_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_batch_dml), "__call__" + ) as call: + client.execute_batch_dml() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ExecuteBatchDmlRequest() + + @pytest.mark.asyncio async def test_execute_batch_dml_async( transport: str = "grpc_asyncio", request_type=spanner.ExecuteBatchDmlRequest @@ -1899,6 +2037,22 @@ def test_read_from_dict(): test_read(request_type=dict) +def test_read_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read), "__call__") as call: + client.read() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ReadRequest() + + @pytest.mark.asyncio async def test_read_async( transport: str = "grpc_asyncio", request_type=spanner.ReadRequest @@ -2017,6 +2171,22 @@ def test_streaming_read_from_dict(): test_streaming_read(request_type=dict) +def test_streaming_read_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.streaming_read), "__call__") as call: + client.streaming_read() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ReadRequest() + + @pytest.mark.asyncio async def test_streaming_read_async( transport: str = "grpc_asyncio", request_type=spanner.ReadRequest @@ -2144,6 +2314,24 @@ def test_begin_transaction_from_dict(): test_begin_transaction(request_type=dict) +def test_begin_transaction_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.begin_transaction), "__call__" + ) as call: + client.begin_transaction() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.BeginTransactionRequest() + + @pytest.mark.asyncio async def test_begin_transaction_async( transport: str = "grpc_asyncio", request_type=spanner.BeginTransactionRequest @@ -2355,6 +2543,22 @@ def test_commit_from_dict(): test_commit(request_type=dict) +def test_commit_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.commit), "__call__") as call: + client.commit() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.CommitRequest() + + @pytest.mark.asyncio async def test_commit_async( transport: str = "grpc_asyncio", request_type=spanner.CommitRequest @@ -2581,6 +2785,22 @@ def test_rollback_from_dict(): test_rollback(request_type=dict) +def test_rollback_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.rollback), "__call__") as call: + client.rollback() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.RollbackRequest() + + @pytest.mark.asyncio async def test_rollback_async( transport: str = "grpc_asyncio", request_type=spanner.RollbackRequest @@ -2774,6 +2994,22 @@ def test_partition_query_from_dict(): test_partition_query(request_type=dict) +def test_partition_query_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.partition_query), "__call__") as call: + client.partition_query() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.PartitionQueryRequest() + + @pytest.mark.asyncio async def test_partition_query_async( transport: str = "grpc_asyncio", request_type=spanner.PartitionQueryRequest @@ -2894,6 +3130,22 @@ def test_partition_read_from_dict(): test_partition_read(request_type=dict) +def test_partition_read_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.partition_read), "__call__") as call: + client.partition_read() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.PartitionReadRequest() + + @pytest.mark.asyncio async def test_partition_read_async( transport: str = "grpc_asyncio", request_type=spanner.PartitionReadRequest From 41045fd98c49a7acf42a684326957ffab12c0853 Mon Sep 17 00:00:00 2001 From: Vikash Singh <3116482+vi3k6i5@users.noreply.github.com> Date: Thu, 22 Apr 2021 18:38:42 +0530 Subject: [PATCH 2/3] feat: added support for numberic for python decimal value --- .../cloud/spanner_v1/proto/transaction.proto | 279 +----------------- 1 file changed, 2 insertions(+), 277 deletions(-) diff --git a/google/cloud/spanner_v1/proto/transaction.proto b/google/cloud/spanner_v1/proto/transaction.proto index 30ef9dc84a..7082c56258 100644 --- a/google/cloud/spanner_v1/proto/transaction.proto +++ b/google/cloud/spanner_v1/proto/transaction.proto @@ -28,284 +28,9 @@ option java_package = "com.google.spanner.v1"; option php_namespace = "Google\\Cloud\\Spanner\\V1"; option ruby_package = "Google::Cloud::Spanner::V1"; -// # Transactions +// TransactionOptions are used to specify different types of transactions. // -// -// Each session can have at most one active transaction at a time (note that -// standalone reads and queries use a transaction internally and do count -// towards the one transaction limit). After the active transaction is -// completed, the session can immediately be re-used for the next transaction. -// It is not necessary to create a new session for each transaction. -// -// # Transaction Modes -// -// Cloud Spanner supports three transaction modes: -// -// 1. Locking read-write. This type of transaction is the only way -// to write data into Cloud Spanner. These transactions rely on -// pessimistic locking and, if necessary, two-phase commit. -// Locking read-write transactions may abort, requiring the -// application to retry. -// -// 2. Snapshot read-only. This transaction type provides guaranteed -// consistency across several reads, but does not allow -// writes. Snapshot read-only transactions can be configured to -// read at timestamps in the past. Snapshot read-only -// transactions do not need to be committed. -// -// 3. Partitioned DML. This type of transaction is used to execute -// a single Partitioned DML statement. Partitioned DML partitions -// the key space and runs the DML statement over each partition -// in parallel using separate, internal transactions that commit -// independently. Partitioned DML transactions do not need to be -// committed. -// -// For transactions that only read, snapshot read-only transactions -// provide simpler semantics and are almost always faster. In -// particular, read-only transactions do not take locks, so they do -// not conflict with read-write transactions. As a consequence of not -// taking locks, they also do not abort, so retry loops are not needed. -// -// Transactions may only read/write data in a single database. They -// may, however, read/write data in different tables within that -// database. -// -// ## Locking Read-Write Transactions -// -// Locking transactions may be used to atomically read-modify-write -// data anywhere in a database. This type of transaction is externally -// consistent. -// -// Clients should attempt to minimize the amount of time a transaction -// is active. Faster transactions commit with higher probability -// and cause less contention. Cloud Spanner attempts to keep read locks -// active as long as the transaction continues to do reads, and the -// transaction has not been terminated by -// [Commit][google.spanner.v1.Spanner.Commit] or -// [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of -// inactivity at the client may cause Cloud Spanner to release a -// transaction's locks and abort it. -// -// Conceptually, a read-write transaction consists of zero or more -// reads or SQL statements followed by -// [Commit][google.spanner.v1.Spanner.Commit]. At any time before -// [Commit][google.spanner.v1.Spanner.Commit], the client can send a -// [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the -// transaction. -// -// ## Semantics -// -// Cloud Spanner can commit the transaction if all read locks it acquired -// are still valid at commit time, and it is able to acquire write -// locks for all writes. Cloud Spanner can abort the transaction for any -// reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees -// that the transaction has not modified any user data in Cloud Spanner. -// -// Unless the transaction commits, Cloud Spanner makes no guarantees about -// how long the transaction's locks were held for. It is an error to -// use Cloud Spanner locks for any sort of mutual exclusion other than -// between Cloud Spanner transactions themselves. -// -// ## Retrying Aborted Transactions -// -// When a transaction aborts, the application can choose to retry the -// whole transaction again. To maximize the chances of successfully -// committing the retry, the client should execute the retry in the -// same session as the original attempt. The original session's lock -// priority increases with each consecutive abort, meaning that each -// attempt has a slightly better chance of success than the previous. -// -// Under some circumstances (e.g., many transactions attempting to -// modify the same row(s)), a transaction can abort many times in a -// short period before successfully committing. Thus, it is not a good -// idea to cap the number of retries a transaction can attempt; -// instead, it is better to limit the total amount of wall time spent -// retrying. -// -// ## Idle Transactions -// -// A transaction is considered idle if it has no outstanding reads or -// SQL queries and has not started a read or SQL query within the last 10 -// seconds. Idle transactions can be aborted by Cloud Spanner so that they -// don't hold on to locks indefinitely. In that case, the commit will -// fail with error `ABORTED`. -// -// If this behavior is undesirable, periodically executing a simple -// SQL query in the transaction (e.g., `SELECT 1`) prevents the -// transaction from becoming idle. -// -// ## Snapshot Read-Only Transactions -// -// Snapshot read-only transactions provides a simpler method than -// locking read-write transactions for doing several consistent -// reads. However, this type of transaction does not support writes. -// -// Snapshot transactions do not take locks. Instead, they work by -// choosing a Cloud Spanner timestamp, then executing all reads at that -// timestamp. Since they do not acquire locks, they do not block -// concurrent read-write transactions. -// -// Unlike locking read-write transactions, snapshot read-only -// transactions never abort. They can fail if the chosen read -// timestamp is garbage collected; however, the default garbage -// collection policy is generous enough that most applications do not -// need to worry about this in practice. -// -// Snapshot read-only transactions do not need to call -// [Commit][google.spanner.v1.Spanner.Commit] or -// [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not -// permitted to do so). -// -// To execute a snapshot transaction, the client specifies a timestamp -// bound, which tells Cloud Spanner how to choose a read timestamp. -// -// The types of timestamp bound are: -// -// - Strong (the default). -// - Bounded staleness. -// - Exact staleness. -// -// If the Cloud Spanner database to be read is geographically distributed, -// stale read-only transactions can execute more quickly than strong -// or read-write transaction, because they are able to execute far -// from the leader replica. -// -// Each type of timestamp bound is discussed in detail below. -// -// ## Strong -// -// Strong reads are guaranteed to see the effects of all transactions -// that have committed before the start of the read. Furthermore, all -// rows yielded by a single read are consistent with each other -- if -// any part of the read observes a transaction, all parts of the read -// see the transaction. -// -// Strong reads are not repeatable: two consecutive strong read-only -// transactions might return inconsistent results if there are -// concurrent writes. If consistency across reads is required, the -// reads should be executed within a transaction or at an exact read -// timestamp. -// -// See [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. -// -// ## Exact Staleness -// -// These timestamp bounds execute reads at a user-specified -// timestamp. Reads at a timestamp are guaranteed to see a consistent -// prefix of the global transaction history: they observe -// modifications done by all transactions with a commit timestamp <= -// the read timestamp, and observe none of the modifications done by -// transactions with a larger commit timestamp. They will block until -// all conflicting transactions that may be assigned commit timestamps -// <= the read timestamp have finished. -// -// The timestamp can either be expressed as an absolute Cloud Spanner commit -// timestamp or a staleness relative to the current time. -// -// These modes do not require a "negotiation phase" to pick a -// timestamp. As a result, they execute slightly faster than the -// equivalent boundedly stale concurrency modes. On the other hand, -// boundedly stale reads usually return fresher results. -// -// See [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] and -// [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. -// -// ## Bounded Staleness -// -// Bounded staleness modes allow Cloud Spanner to pick the read timestamp, -// subject to a user-provided staleness bound. Cloud Spanner chooses the -// newest timestamp within the staleness bound that allows execution -// of the reads at the closest available replica without blocking. -// -// All rows yielded are consistent with each other -- if any part of -// the read observes a transaction, all parts of the read see the -// transaction. Boundedly stale reads are not repeatable: two stale -// reads, even if they use the same staleness bound, can execute at -// different timestamps and thus return inconsistent results. -// -// Boundedly stale reads execute in two phases: the first phase -// negotiates a timestamp among all replicas needed to serve the -// read. In the second phase, reads are executed at the negotiated -// timestamp. -// -// As a result of the two phase execution, bounded staleness reads are -// usually a little slower than comparable exact staleness -// reads. However, they are typically able to return fresher -// results, and are more likely to execute at the closest replica. -// -// Because the timestamp negotiation requires up-front knowledge of -// which rows will be read, it can only be used with single-use -// read-only transactions. -// -// See [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] and -// [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. -// -// ## Old Read Timestamps and Garbage Collection -// -// Cloud Spanner continuously garbage collects deleted and overwritten data -// in the background to reclaim storage space. This process is known -// as "version GC". By default, version GC reclaims versions after they -// are one hour old. Because of this, Cloud Spanner cannot perform reads -// at read timestamps more than one hour in the past. This -// restriction also applies to in-progress reads and/or SQL queries whose -// timestamp become too old while executing. Reads and SQL queries with -// too-old read timestamps fail with the error `FAILED_PRECONDITION`. -// -// ## Partitioned DML Transactions -// -// Partitioned DML transactions are used to execute DML statements with a -// different execution strategy that provides different, and often better, -// scalability properties for large, table-wide operations than DML in a -// ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, -// should prefer using ReadWrite transactions. -// -// Partitioned DML partitions the keyspace and runs the DML statement on each -// partition in separate, internal transactions. These transactions commit -// automatically when complete, and run independently from one another. -// -// To reduce lock contention, this execution strategy only acquires read locks -// on rows that match the WHERE clause of the statement. Additionally, the -// smaller per-partition transactions hold locks for less time. -// -// That said, Partitioned DML is not a drop-in replacement for standard DML used -// in ReadWrite transactions. -// -// - The DML statement must be fully-partitionable. Specifically, the statement -// must be expressible as the union of many statements which each access only -// a single row of the table. -// -// - The statement is not applied atomically to all rows of the table. Rather, -// the statement is applied atomically to partitions of the table, in -// independent transactions. Secondary index rows are updated atomically -// with the base table rows. -// -// - Partitioned DML does not guarantee exactly-once execution semantics -// against a partition. The statement will be applied at least once to each -// partition. It is strongly recommended that the DML statement should be -// idempotent to avoid unexpected results. For instance, it is potentially -// dangerous to run a statement such as -// `UPDATE table SET column = column + 1` as it could be run multiple times -// against some rows. -// -// - The partitions are committed automatically - there is no support for -// Commit or Rollback. If the call returns an error, or if the client issuing -// the ExecuteSql call dies, it is possible that some rows had the statement -// executed on them successfully. It is also possible that statement was -// never executed against other rows. -// -// - Partitioned DML transactions may only contain the execution of a single -// DML statement via ExecuteSql or ExecuteStreamingSql. -// -// - If any error is encountered during the execution of the partitioned DML -// operation (for instance, a UNIQUE INDEX violation, division by zero, or a -// value that cannot be stored due to schema constraints), then the -// operation is stopped at that point and an error is returned. It is -// possible that at this point, some partitions have been committed (or even -// committed multiple times), and other partitions have not been run at all. -// -// Given the above, Partitioned DML is good fit for large, database-wide, -// operations that are idempotent, such as deleting old rows from a very large -// table. +// For more info, see: https://cloud.google.com/spanner/docs/reference/rest/v1/Transaction message TransactionOptions { // Message type to initiate a read-write transaction. Currently this // transaction type has no options. From bee083d138bd0be3e3207390fa8daf326b219b1d Mon Sep 17 00:00:00 2001 From: Vikash Singh <3116482+vi3k6i5@users.noreply.github.com> Date: Fri, 23 Apr 2021 22:20:01 +0530 Subject: [PATCH 3/3] feat: added support for converting decimal field to numeric field and removed it's conversion to string --- google/cloud/spanner_dbapi/parse_utils.py | 16 +--------------- tests/unit/spanner_dbapi/test_parse_utils.py | 13 +------------ 2 files changed, 2 insertions(+), 27 deletions(-) diff --git a/google/cloud/spanner_dbapi/parse_utils.py b/google/cloud/spanner_dbapi/parse_utils.py index 0a7d505541..1744874764 100644 --- a/google/cloud/spanner_dbapi/parse_utils.py +++ b/google/cloud/spanner_dbapi/parse_utils.py @@ -509,25 +509,11 @@ def sql_pyformat_args_to_spanner(sql, params): resolved_value = pyfmt % params named_args[key] = resolved_value else: - named_args[key] = cast_for_spanner(params[i]) + named_args[key] = params[i] return sanitize_literals_for_upload(sql), named_args -def cast_for_spanner(value): - """Convert the param to its Cloud Spanner equivalent type. - - :type value: Any - :param value: The value to convert to a Cloud Spanner type. - - :rtype: Any - :returns: The value converted to a Cloud Spanner type. - """ - if isinstance(value, decimal.Decimal): - return str(value) - return value - - def get_param_types(params): """Determine Cloud Spanner types for the given parameters. diff --git a/tests/unit/spanner_dbapi/test_parse_utils.py b/tests/unit/spanner_dbapi/test_parse_utils.py index c612659a3e..73277a7de3 100644 --- a/tests/unit/spanner_dbapi/test_parse_utils.py +++ b/tests/unit/spanner_dbapi/test_parse_utils.py @@ -307,7 +307,7 @@ def test_sql_pyformat_args_to_spanner(self): ), ( "SELECT (an.p + @a0) AS np FROM an WHERE (an.p + @a1) = @a2", - {"a0": 1, "a1": 1.0, "a2": str(31)}, + {"a0": 1, "a1": 1.0, "a2": decimal.Decimal("31")}, ), ), ] @@ -339,17 +339,6 @@ def test_sql_pyformat_args_to_spanner_invalid(self): lambda: sql_pyformat_args_to_spanner(sql, params), ) - def test_cast_for_spanner(self): - import decimal - - from google.cloud.spanner_dbapi.parse_utils import cast_for_spanner - - dec = 3 - value = decimal.Decimal(dec) - self.assertEqual(cast_for_spanner(value), str(dec)) - self.assertEqual(cast_for_spanner(5), 5) - self.assertEqual(cast_for_spanner("string"), "string") - @unittest.skipIf(skip_condition, skip_message) def test_get_param_types(self): import datetime