diff --git a/docs/language_v1/types.rst b/docs/language_v1/types.rst index befde156..5dd3769e 100644 --- a/docs/language_v1/types.rst +++ b/docs/language_v1/types.rst @@ -3,3 +3,4 @@ Types for Google Cloud Language v1 API .. automodule:: google.cloud.language_v1.types :members: + :show-inheritance: diff --git a/docs/language_v1beta2/types.rst b/docs/language_v1beta2/types.rst index 5a1c2284..2e834e61 100644 --- a/docs/language_v1beta2/types.rst +++ b/docs/language_v1beta2/types.rst @@ -3,3 +3,4 @@ Types for Google Cloud Language v1beta2 API .. automodule:: google.cloud.language_v1beta2.types :members: + :show-inheritance: diff --git a/google/cloud/language_v1/proto/language_service.proto b/google/cloud/language_v1/proto/language_service.proto index e8e4fd8d..304eab07 100644 --- a/google/cloud/language_v1/proto/language_service.proto +++ b/google/cloud/language_v1/proto/language_service.proto @@ -100,7 +100,7 @@ service LanguageService { } } -// ################################################################ # + // // Represents the input to API methods. message Document { diff --git a/google/cloud/language_v1/services/language_service/async_client.py b/google/cloud/language_v1/services/language_service/async_client.py index f7a214e9..0d7fd084 100644 --- a/google/cloud/language_v1/services/language_service/async_client.py +++ b/google/cloud/language_v1/services/language_service/async_client.py @@ -45,9 +45,47 @@ class LanguageServiceAsyncClient: DEFAULT_ENDPOINT = LanguageServiceClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = LanguageServiceClient.DEFAULT_MTLS_ENDPOINT + common_billing_account_path = staticmethod( + LanguageServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + LanguageServiceClient.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(LanguageServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + LanguageServiceClient.parse_common_folder_path + ) + + common_organization_path = staticmethod( + LanguageServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + LanguageServiceClient.parse_common_organization_path + ) + + common_project_path = staticmethod(LanguageServiceClient.common_project_path) + parse_common_project_path = staticmethod( + LanguageServiceClient.parse_common_project_path + ) + + common_location_path = staticmethod(LanguageServiceClient.common_location_path) + parse_common_location_path = staticmethod( + LanguageServiceClient.parse_common_location_path + ) + from_service_account_file = LanguageServiceClient.from_service_account_file from_service_account_json = from_service_account_file + @property + def transport(self) -> LanguageServiceTransport: + """Return the transport used by the client instance. + + Returns: + LanguageServiceTransport: The transport used by the client instance. + """ + return self._client.transport + get_transport_class = functools.partial( type(LanguageServiceClient).get_transport_class, type(LanguageServiceClient) ) @@ -143,7 +181,8 @@ async def analyze_sentiment( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([document, encoding_type]): + has_flattened_params = any([document, encoding_type]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." @@ -168,7 +207,7 @@ async def analyze_sentiment( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -224,7 +263,8 @@ async def analyze_entities( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([document, encoding_type]): + has_flattened_params = any([document, encoding_type]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." @@ -249,7 +289,7 @@ async def analyze_entities( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -308,7 +348,8 @@ async def analyze_entity_sentiment( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([document, encoding_type]): + has_flattened_params = any([document, encoding_type]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." @@ -333,7 +374,7 @@ async def analyze_entity_sentiment( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -388,7 +429,8 @@ async def analyze_syntax( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([document, encoding_type]): + has_flattened_params = any([document, encoding_type]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." @@ -413,7 +455,7 @@ async def analyze_syntax( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -462,7 +504,8 @@ async def classify_text( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([document]): + has_flattened_params = any([document]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." @@ -485,7 +528,7 @@ async def classify_text( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -550,7 +593,8 @@ async def annotate_text( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([document, features, encoding_type]): + has_flattened_params = any([document, features, encoding_type]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." @@ -577,7 +621,7 @@ async def annotate_text( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, diff --git a/google/cloud/language_v1/services/language_service/client.py b/google/cloud/language_v1/services/language_service/client.py index 1084acd3..2c4d9504 100644 --- a/google/cloud/language_v1/services/language_service/client.py +++ b/google/cloud/language_v1/services/language_service/client.py @@ -131,6 +131,74 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): from_service_account_json = from_service_account_file + @property + def transport(self) -> LanguageServiceTransport: + """Return the transport used by the client instance. + + Returns: + LanguageServiceTransport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + def __init__( self, *, @@ -166,10 +234,10 @@ def __init__( not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: diff --git a/google/cloud/language_v1/services/language_service/transports/base.py b/google/cloud/language_v1/services/language_service/transports/base.py index 79ed44e8..20f77df4 100644 --- a/google/cloud/language_v1/services/language_service/transports/base.py +++ b/google/cloud/language_v1/services/language_service/transports/base.py @@ -114,7 +114,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -127,7 +127,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -140,7 +140,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -153,7 +153,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -166,7 +166,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -179,7 +179,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, diff --git a/google/cloud/language_v1/services/language_service/transports/grpc.py b/google/cloud/language_v1/services/language_service/transports/grpc.py index 73608a10..6260c9ec 100644 --- a/google/cloud/language_v1/services/language_service/transports/grpc.py +++ b/google/cloud/language_v1/services/language_service/transports/grpc.py @@ -90,10 +90,10 @@ def __init__( for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -102,6 +102,8 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._ssl_channel_credentials = ssl_channel_credentials + if channel: # Sanity check: Ensure that channel and credentials are not both # provided. @@ -109,6 +111,7 @@ def __init__( # If a channel was explicitly provided, set it. self._grpc_channel = channel + self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn( "api_mtls_endpoint and client_cert_source are deprecated", @@ -145,6 +148,7 @@ def __init__( scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, ) + self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" @@ -222,12 +226,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. + """Return the channel designed to connect to this service. """ - # Return the channel from cache. return self._grpc_channel @property diff --git a/google/cloud/language_v1/services/language_service/transports/grpc_asyncio.py b/google/cloud/language_v1/services/language_service/transports/grpc_asyncio.py index b55e8c8b..93692457 100644 --- a/google/cloud/language_v1/services/language_service/transports/grpc_asyncio.py +++ b/google/cloud/language_v1/services/language_service/transports/grpc_asyncio.py @@ -147,6 +147,8 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._ssl_channel_credentials = ssl_channel_credentials + if channel: # Sanity check: Ensure that channel and credentials are not both # provided. @@ -154,6 +156,7 @@ def __init__( # If a channel was explicitly provided, set it. self._grpc_channel = channel + self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn( "api_mtls_endpoint and client_cert_source are deprecated", @@ -190,6 +193,7 @@ def __init__( scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, ) + self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" diff --git a/google/cloud/language_v1/types/language_service.py b/google/cloud/language_v1/types/language_service.py index 10664a54..4fedc52d 100644 --- a/google/cloud/language_v1/types/language_service.py +++ b/google/cloud/language_v1/types/language_service.py @@ -564,7 +564,7 @@ class Type(proto.Enum): type_ = proto.Field(proto.ENUM, number=2, enum=Type,) - sentiment = proto.Field(proto.MESSAGE, number=3, message=Sentiment,) + sentiment = proto.Field(proto.MESSAGE, number=3, message="Sentiment",) class TextSpan(proto.Message): @@ -615,7 +615,7 @@ class AnalyzeSentimentRequest(proto.Message): calculate sentence offsets. """ - document = proto.Field(proto.MESSAGE, number=1, message=Document,) + document = proto.Field(proto.MESSAGE, number=1, message="Document",) encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",) @@ -637,11 +637,11 @@ class AnalyzeSentimentResponse(proto.Message): document. """ - document_sentiment = proto.Field(proto.MESSAGE, number=1, message=Sentiment,) + document_sentiment = proto.Field(proto.MESSAGE, number=1, message="Sentiment",) language = proto.Field(proto.STRING, number=2) - sentences = proto.RepeatedField(proto.MESSAGE, number=3, message=Sentence,) + sentences = proto.RepeatedField(proto.MESSAGE, number=3, message="Sentence",) class AnalyzeEntitySentimentRequest(proto.Message): @@ -655,7 +655,7 @@ class AnalyzeEntitySentimentRequest(proto.Message): calculate offsets. """ - document = proto.Field(proto.MESSAGE, number=1, message=Document,) + document = proto.Field(proto.MESSAGE, number=1, message="Document",) encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",) @@ -675,7 +675,7 @@ class AnalyzeEntitySentimentResponse(proto.Message): field for more details. """ - entities = proto.RepeatedField(proto.MESSAGE, number=1, message=Entity,) + entities = proto.RepeatedField(proto.MESSAGE, number=1, message="Entity",) language = proto.Field(proto.STRING, number=2) @@ -691,7 +691,7 @@ class AnalyzeEntitiesRequest(proto.Message): calculate offsets. """ - document = proto.Field(proto.MESSAGE, number=1, message=Document,) + document = proto.Field(proto.MESSAGE, number=1, message="Document",) encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",) @@ -711,7 +711,7 @@ class AnalyzeEntitiesResponse(proto.Message): field for more details. """ - entities = proto.RepeatedField(proto.MESSAGE, number=1, message=Entity,) + entities = proto.RepeatedField(proto.MESSAGE, number=1, message="Entity",) language = proto.Field(proto.STRING, number=2) @@ -727,7 +727,7 @@ class AnalyzeSyntaxRequest(proto.Message): calculate offsets. """ - document = proto.Field(proto.MESSAGE, number=1, message=Document,) + document = proto.Field(proto.MESSAGE, number=1, message="Document",) encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",) @@ -749,9 +749,9 @@ class AnalyzeSyntaxResponse(proto.Message): field for more details. """ - sentences = proto.RepeatedField(proto.MESSAGE, number=1, message=Sentence,) + sentences = proto.RepeatedField(proto.MESSAGE, number=1, message="Sentence",) - tokens = proto.RepeatedField(proto.MESSAGE, number=2, message=Token,) + tokens = proto.RepeatedField(proto.MESSAGE, number=2, message="Token",) language = proto.Field(proto.STRING, number=3) @@ -764,7 +764,7 @@ class ClassifyTextRequest(proto.Message): Input document. """ - document = proto.Field(proto.MESSAGE, number=1, message=Document,) + document = proto.Field(proto.MESSAGE, number=1, message="Document",) class ClassifyTextResponse(proto.Message): @@ -776,7 +776,7 @@ class ClassifyTextResponse(proto.Message): """ categories = proto.RepeatedField( - proto.MESSAGE, number=1, message=ClassificationCategory, + proto.MESSAGE, number=1, message="ClassificationCategory", ) @@ -824,7 +824,7 @@ class Features(proto.Message): classify_text = proto.Field(proto.BOOL, number=6) - document = proto.Field(proto.MESSAGE, number=1, message=Document,) + document = proto.Field(proto.MESSAGE, number=1, message="Document",) features = proto.Field(proto.MESSAGE, number=2, message=Features,) @@ -861,18 +861,18 @@ class AnnotateTextResponse(proto.Message): Categories identified in the input document. """ - sentences = proto.RepeatedField(proto.MESSAGE, number=1, message=Sentence,) + sentences = proto.RepeatedField(proto.MESSAGE, number=1, message="Sentence",) - tokens = proto.RepeatedField(proto.MESSAGE, number=2, message=Token,) + tokens = proto.RepeatedField(proto.MESSAGE, number=2, message="Token",) - entities = proto.RepeatedField(proto.MESSAGE, number=3, message=Entity,) + entities = proto.RepeatedField(proto.MESSAGE, number=3, message="Entity",) - document_sentiment = proto.Field(proto.MESSAGE, number=4, message=Sentiment,) + document_sentiment = proto.Field(proto.MESSAGE, number=4, message="Sentiment",) language = proto.Field(proto.STRING, number=5) categories = proto.RepeatedField( - proto.MESSAGE, number=6, message=ClassificationCategory, + proto.MESSAGE, number=6, message="ClassificationCategory", ) diff --git a/google/cloud/language_v1beta2/proto/language_service.proto b/google/cloud/language_v1beta2/proto/language_service.proto index afca1205..bd4167a3 100644 --- a/google/cloud/language_v1beta2/proto/language_service.proto +++ b/google/cloud/language_v1beta2/proto/language_service.proto @@ -101,7 +101,7 @@ service LanguageService { } } -// ################################################################ # + // // Represents the input to API methods. message Document { diff --git a/google/cloud/language_v1beta2/services/language_service/async_client.py b/google/cloud/language_v1beta2/services/language_service/async_client.py index 0c2f1c99..dab4fba9 100644 --- a/google/cloud/language_v1beta2/services/language_service/async_client.py +++ b/google/cloud/language_v1beta2/services/language_service/async_client.py @@ -45,9 +45,47 @@ class LanguageServiceAsyncClient: DEFAULT_ENDPOINT = LanguageServiceClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = LanguageServiceClient.DEFAULT_MTLS_ENDPOINT + common_billing_account_path = staticmethod( + LanguageServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + LanguageServiceClient.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(LanguageServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + LanguageServiceClient.parse_common_folder_path + ) + + common_organization_path = staticmethod( + LanguageServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + LanguageServiceClient.parse_common_organization_path + ) + + common_project_path = staticmethod(LanguageServiceClient.common_project_path) + parse_common_project_path = staticmethod( + LanguageServiceClient.parse_common_project_path + ) + + common_location_path = staticmethod(LanguageServiceClient.common_location_path) + parse_common_location_path = staticmethod( + LanguageServiceClient.parse_common_location_path + ) + from_service_account_file = LanguageServiceClient.from_service_account_file from_service_account_json = from_service_account_file + @property + def transport(self) -> LanguageServiceTransport: + """Return the transport used by the client instance. + + Returns: + LanguageServiceTransport: The transport used by the client instance. + """ + return self._client.transport + get_transport_class = functools.partial( type(LanguageServiceClient).get_transport_class, type(LanguageServiceClient) ) @@ -144,7 +182,8 @@ async def analyze_sentiment( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([document, encoding_type]): + has_flattened_params = any([document, encoding_type]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." @@ -169,7 +208,7 @@ async def analyze_sentiment( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -225,7 +264,8 @@ async def analyze_entities( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([document, encoding_type]): + has_flattened_params = any([document, encoding_type]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." @@ -250,7 +290,7 @@ async def analyze_entities( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -309,7 +349,8 @@ async def analyze_entity_sentiment( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([document, encoding_type]): + has_flattened_params = any([document, encoding_type]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." @@ -334,7 +375,7 @@ async def analyze_entity_sentiment( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -389,7 +430,8 @@ async def analyze_syntax( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([document, encoding_type]): + has_flattened_params = any([document, encoding_type]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." @@ -414,7 +456,7 @@ async def analyze_syntax( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -463,7 +505,8 @@ async def classify_text( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([document]): + has_flattened_params = any([document]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." @@ -486,7 +529,7 @@ async def classify_text( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -551,7 +594,8 @@ async def annotate_text( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([document, features, encoding_type]): + has_flattened_params = any([document, features, encoding_type]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." @@ -578,7 +622,7 @@ async def annotate_text( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, diff --git a/google/cloud/language_v1beta2/services/language_service/client.py b/google/cloud/language_v1beta2/services/language_service/client.py index c2d85031..b5346311 100644 --- a/google/cloud/language_v1beta2/services/language_service/client.py +++ b/google/cloud/language_v1beta2/services/language_service/client.py @@ -131,6 +131,74 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): from_service_account_json = from_service_account_file + @property + def transport(self) -> LanguageServiceTransport: + """Return the transport used by the client instance. + + Returns: + LanguageServiceTransport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + def __init__( self, *, @@ -166,10 +234,10 @@ def __init__( not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: diff --git a/google/cloud/language_v1beta2/services/language_service/transports/base.py b/google/cloud/language_v1beta2/services/language_service/transports/base.py index aa6eb5d0..4e4f7add 100644 --- a/google/cloud/language_v1beta2/services/language_service/transports/base.py +++ b/google/cloud/language_v1beta2/services/language_service/transports/base.py @@ -114,7 +114,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -127,7 +127,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -140,7 +140,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -153,7 +153,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -166,7 +166,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, @@ -179,7 +179,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( - exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, diff --git a/google/cloud/language_v1beta2/services/language_service/transports/grpc.py b/google/cloud/language_v1beta2/services/language_service/transports/grpc.py index dd734bc0..849c6483 100644 --- a/google/cloud/language_v1beta2/services/language_service/transports/grpc.py +++ b/google/cloud/language_v1beta2/services/language_service/transports/grpc.py @@ -90,10 +90,10 @@ def __init__( for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -102,6 +102,8 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._ssl_channel_credentials = ssl_channel_credentials + if channel: # Sanity check: Ensure that channel and credentials are not both # provided. @@ -109,6 +111,7 @@ def __init__( # If a channel was explicitly provided, set it. self._grpc_channel = channel + self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn( "api_mtls_endpoint and client_cert_source are deprecated", @@ -145,6 +148,7 @@ def __init__( scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, ) + self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" @@ -222,12 +226,8 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. + """Return the channel designed to connect to this service. """ - # Return the channel from cache. return self._grpc_channel @property diff --git a/google/cloud/language_v1beta2/services/language_service/transports/grpc_asyncio.py b/google/cloud/language_v1beta2/services/language_service/transports/grpc_asyncio.py index 7898ec3f..475b78b3 100644 --- a/google/cloud/language_v1beta2/services/language_service/transports/grpc_asyncio.py +++ b/google/cloud/language_v1beta2/services/language_service/transports/grpc_asyncio.py @@ -147,6 +147,8 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._ssl_channel_credentials = ssl_channel_credentials + if channel: # Sanity check: Ensure that channel and credentials are not both # provided. @@ -154,6 +156,7 @@ def __init__( # If a channel was explicitly provided, set it. self._grpc_channel = channel + self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn( "api_mtls_endpoint and client_cert_source are deprecated", @@ -190,6 +193,7 @@ def __init__( scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, ) + self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" diff --git a/google/cloud/language_v1beta2/types/language_service.py b/google/cloud/language_v1beta2/types/language_service.py index 411dd8ee..567aca06 100644 --- a/google/cloud/language_v1beta2/types/language_service.py +++ b/google/cloud/language_v1beta2/types/language_service.py @@ -561,7 +561,7 @@ class Type(proto.Enum): type_ = proto.Field(proto.ENUM, number=2, enum=Type,) - sentiment = proto.Field(proto.MESSAGE, number=3, message=Sentiment,) + sentiment = proto.Field(proto.MESSAGE, number=3, message="Sentiment",) class TextSpan(proto.Message): @@ -613,7 +613,7 @@ class AnalyzeSentimentRequest(proto.Message): sentiment. """ - document = proto.Field(proto.MESSAGE, number=1, message=Document,) + document = proto.Field(proto.MESSAGE, number=1, message="Document",) encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",) @@ -635,11 +635,11 @@ class AnalyzeSentimentResponse(proto.Message): document. """ - document_sentiment = proto.Field(proto.MESSAGE, number=1, message=Sentiment,) + document_sentiment = proto.Field(proto.MESSAGE, number=1, message="Sentiment",) language = proto.Field(proto.STRING, number=2) - sentences = proto.RepeatedField(proto.MESSAGE, number=3, message=Sentence,) + sentences = proto.RepeatedField(proto.MESSAGE, number=3, message="Sentence",) class AnalyzeEntitySentimentRequest(proto.Message): @@ -653,7 +653,7 @@ class AnalyzeEntitySentimentRequest(proto.Message): calculate offsets. """ - document = proto.Field(proto.MESSAGE, number=1, message=Document,) + document = proto.Field(proto.MESSAGE, number=1, message="Document",) encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",) @@ -673,7 +673,7 @@ class AnalyzeEntitySentimentResponse(proto.Message): field for more details. """ - entities = proto.RepeatedField(proto.MESSAGE, number=1, message=Entity,) + entities = proto.RepeatedField(proto.MESSAGE, number=1, message="Entity",) language = proto.Field(proto.STRING, number=2) @@ -689,7 +689,7 @@ class AnalyzeEntitiesRequest(proto.Message): calculate offsets. """ - document = proto.Field(proto.MESSAGE, number=1, message=Document,) + document = proto.Field(proto.MESSAGE, number=1, message="Document",) encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",) @@ -709,7 +709,7 @@ class AnalyzeEntitiesResponse(proto.Message): field for more details. """ - entities = proto.RepeatedField(proto.MESSAGE, number=1, message=Entity,) + entities = proto.RepeatedField(proto.MESSAGE, number=1, message="Entity",) language = proto.Field(proto.STRING, number=2) @@ -725,7 +725,7 @@ class AnalyzeSyntaxRequest(proto.Message): calculate offsets. """ - document = proto.Field(proto.MESSAGE, number=1, message=Document,) + document = proto.Field(proto.MESSAGE, number=1, message="Document",) encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",) @@ -747,9 +747,9 @@ class AnalyzeSyntaxResponse(proto.Message): field for more details. """ - sentences = proto.RepeatedField(proto.MESSAGE, number=1, message=Sentence,) + sentences = proto.RepeatedField(proto.MESSAGE, number=1, message="Sentence",) - tokens = proto.RepeatedField(proto.MESSAGE, number=2, message=Token,) + tokens = proto.RepeatedField(proto.MESSAGE, number=2, message="Token",) language = proto.Field(proto.STRING, number=3) @@ -762,7 +762,7 @@ class ClassifyTextRequest(proto.Message): Required. Input document. """ - document = proto.Field(proto.MESSAGE, number=1, message=Document,) + document = proto.Field(proto.MESSAGE, number=1, message="Document",) class ClassifyTextResponse(proto.Message): @@ -774,7 +774,7 @@ class ClassifyTextResponse(proto.Message): """ categories = proto.RepeatedField( - proto.MESSAGE, number=1, message=ClassificationCategory, + proto.MESSAGE, number=1, message="ClassificationCategory", ) @@ -825,7 +825,7 @@ class Features(proto.Message): classify_text = proto.Field(proto.BOOL, number=6) - document = proto.Field(proto.MESSAGE, number=1, message=Document,) + document = proto.Field(proto.MESSAGE, number=1, message="Document",) features = proto.Field(proto.MESSAGE, number=2, message=Features,) @@ -862,18 +862,18 @@ class AnnotateTextResponse(proto.Message): Categories identified in the input document. """ - sentences = proto.RepeatedField(proto.MESSAGE, number=1, message=Sentence,) + sentences = proto.RepeatedField(proto.MESSAGE, number=1, message="Sentence",) - tokens = proto.RepeatedField(proto.MESSAGE, number=2, message=Token,) + tokens = proto.RepeatedField(proto.MESSAGE, number=2, message="Token",) - entities = proto.RepeatedField(proto.MESSAGE, number=3, message=Entity,) + entities = proto.RepeatedField(proto.MESSAGE, number=3, message="Entity",) - document_sentiment = proto.Field(proto.MESSAGE, number=4, message=Sentiment,) + document_sentiment = proto.Field(proto.MESSAGE, number=4, message="Sentiment",) language = proto.Field(proto.STRING, number=5) categories = proto.RepeatedField( - proto.MESSAGE, number=6, message=ClassificationCategory, + proto.MESSAGE, number=6, message="ClassificationCategory", ) diff --git a/noxfile.py b/noxfile.py index e1a2051c..d1ebf6e0 100644 --- a/noxfile.py +++ b/noxfile.py @@ -28,7 +28,7 @@ DEFAULT_PYTHON_VERSION = "3.8" SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] -UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -151,7 +151,7 @@ def docs(session): """Build the docs for this library.""" session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") + session.install("sphinx", "alabaster", "recommonmark") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( diff --git a/scripts/fixup_language_v1_keywords.py b/scripts/fixup_language_v1_keywords.py index c7c107ce..3d84959b 100644 --- a/scripts/fixup_language_v1_keywords.py +++ b/scripts/fixup_language_v1_keywords.py @@ -1,3 +1,4 @@ +#! /usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2020 Google LLC diff --git a/scripts/fixup_language_v1beta2_keywords.py b/scripts/fixup_language_v1beta2_keywords.py index c7c107ce..3d84959b 100644 --- a/scripts/fixup_language_v1beta2_keywords.py +++ b/scripts/fixup_language_v1beta2_keywords.py @@ -1,3 +1,4 @@ +#! /usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2020 Google LLC diff --git a/synth.metadata b/synth.metadata index bc28899b..f6b0e17e 100644 --- a/synth.metadata +++ b/synth.metadata @@ -3,8 +3,16 @@ { "git": { "name": ".", - "remote": "git@github.com:/googleapis/python-language.git", - "sha": "cde50983b6d45fd0b2348eeb552404b391403bc6" + "remote": "https://github.com/googleapis/python-language.git", + "sha": "8c9fdeb8dbf5376dc2ac813e4e9b14a8886ebd51" + } + }, + { + "git": { + "name": "googleapis", + "remote": "https://github.com/googleapis/googleapis.git", + "sha": "14adde91e90011702483e943edf1044549252bd9", + "internalRef": "344906237" } }, { @@ -41,5 +49,116 @@ "generator": "bazel" } } + ], + "generatedFiles": [ + ".flake8", + ".github/CONTRIBUTING.md", + ".github/ISSUE_TEMPLATE/bug_report.md", + ".github/ISSUE_TEMPLATE/feature_request.md", + ".github/ISSUE_TEMPLATE/support_request.md", + ".github/PULL_REQUEST_TEMPLATE.md", + ".github/release-please.yml", + ".github/snippet-bot.yml", + ".gitignore", + ".kokoro/build.sh", + ".kokoro/continuous/common.cfg", + ".kokoro/continuous/continuous.cfg", + ".kokoro/docker/docs/Dockerfile", + ".kokoro/docker/docs/fetch_gpg_keys.sh", + ".kokoro/docs/common.cfg", + ".kokoro/docs/docs-presubmit.cfg", + ".kokoro/docs/docs.cfg", + ".kokoro/populate-secrets.sh", + ".kokoro/presubmit/common.cfg", + ".kokoro/presubmit/presubmit.cfg", + ".kokoro/publish-docs.sh", + ".kokoro/release.sh", + ".kokoro/release/common.cfg", + ".kokoro/release/release.cfg", + ".kokoro/samples/lint/common.cfg", + ".kokoro/samples/lint/continuous.cfg", + ".kokoro/samples/lint/periodic.cfg", + ".kokoro/samples/lint/presubmit.cfg", + ".kokoro/samples/python3.6/common.cfg", + ".kokoro/samples/python3.6/continuous.cfg", + ".kokoro/samples/python3.6/periodic.cfg", + ".kokoro/samples/python3.6/presubmit.cfg", + ".kokoro/samples/python3.7/common.cfg", + ".kokoro/samples/python3.7/continuous.cfg", + ".kokoro/samples/python3.7/periodic.cfg", + ".kokoro/samples/python3.7/presubmit.cfg", + ".kokoro/samples/python3.8/common.cfg", + ".kokoro/samples/python3.8/continuous.cfg", + ".kokoro/samples/python3.8/periodic.cfg", + ".kokoro/samples/python3.8/presubmit.cfg", + ".kokoro/test-samples.sh", + ".kokoro/trampoline.sh", + ".kokoro/trampoline_v2.sh", + ".trampolinerc", + "CODE_OF_CONDUCT.md", + "CONTRIBUTING.rst", + "LICENSE", + "MANIFEST.in", + "docs/_static/custom.css", + "docs/_templates/layout.html", + "docs/conf.py", + "docs/language_v1/services.rst", + "docs/language_v1/types.rst", + "docs/language_v1beta2/services.rst", + "docs/language_v1beta2/types.rst", + "docs/multiprocessing.rst", + "google/cloud/language/__init__.py", + "google/cloud/language/py.typed", + "google/cloud/language_v1/__init__.py", + "google/cloud/language_v1/proto/language_service.proto", + "google/cloud/language_v1/py.typed", + "google/cloud/language_v1/services/__init__.py", + "google/cloud/language_v1/services/language_service/__init__.py", + "google/cloud/language_v1/services/language_service/async_client.py", + "google/cloud/language_v1/services/language_service/client.py", + "google/cloud/language_v1/services/language_service/transports/__init__.py", + "google/cloud/language_v1/services/language_service/transports/base.py", + "google/cloud/language_v1/services/language_service/transports/grpc.py", + "google/cloud/language_v1/services/language_service/transports/grpc_asyncio.py", + "google/cloud/language_v1/types/__init__.py", + "google/cloud/language_v1/types/language_service.py", + "google/cloud/language_v1beta2/__init__.py", + "google/cloud/language_v1beta2/proto/language_service.proto", + "google/cloud/language_v1beta2/py.typed", + "google/cloud/language_v1beta2/services/__init__.py", + "google/cloud/language_v1beta2/services/language_service/__init__.py", + "google/cloud/language_v1beta2/services/language_service/async_client.py", + "google/cloud/language_v1beta2/services/language_service/client.py", + "google/cloud/language_v1beta2/services/language_service/transports/__init__.py", + "google/cloud/language_v1beta2/services/language_service/transports/base.py", + "google/cloud/language_v1beta2/services/language_service/transports/grpc.py", + "google/cloud/language_v1beta2/services/language_service/transports/grpc_asyncio.py", + "google/cloud/language_v1beta2/types/__init__.py", + "google/cloud/language_v1beta2/types/language_service.py", + "mypy.ini", + "noxfile.py", + "renovate.json", + "samples/AUTHORING_GUIDE.md", + "samples/CONTRIBUTING.md", + "samples/snippets/api/noxfile.py", + "samples/snippets/classify_text/noxfile.py", + "samples/snippets/cloud-client/v1/noxfile.py", + "samples/snippets/generated-samples/v1/noxfile.py", + "samples/snippets/sentiment/noxfile.py", + "scripts/decrypt-secrets.sh", + "scripts/fixup_language_v1_keywords.py", + "scripts/fixup_language_v1beta2_keywords.py", + "scripts/readme-gen/readme_gen.py", + "scripts/readme-gen/templates/README.tmpl.rst", + "scripts/readme-gen/templates/auth.tmpl.rst", + "scripts/readme-gen/templates/auth_api_key.tmpl.rst", + "scripts/readme-gen/templates/install_deps.tmpl.rst", + "scripts/readme-gen/templates/install_portaudio.tmpl.rst", + "setup.cfg", + "testing/.gitignore", + "tests/unit/gapic/language_v1/__init__.py", + "tests/unit/gapic/language_v1/test_language_service.py", + "tests/unit/gapic/language_v1beta2/__init__.py", + "tests/unit/gapic/language_v1beta2/test_language_service.py" ] } \ No newline at end of file diff --git a/tests/unit/gapic/language_v1/test_language_service.py b/tests/unit/gapic/language_v1/test_language_service.py index 6ccbebf7..1d9dd6b0 100644 --- a/tests/unit/gapic/language_v1/test_language_service.py +++ b/tests/unit/gapic/language_v1/test_language_service.py @@ -95,12 +95,12 @@ def test_language_service_client_from_service_account_file(client_class): ) as factory: factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") - assert client._transport._credentials == creds + assert client.transport._credentials == creds client = client_class.from_service_account_json("dummy/file/path.json") - assert client._transport._credentials == creds + assert client.transport._credentials == creds - assert client._transport._host == "language.googleapis.com:443" + assert client.transport._host == "language.googleapis.com:443" def test_language_service_client_get_transport_class(): @@ -461,7 +461,7 @@ def test_analyze_sentiment( # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client._transport.analyze_sentiment), "__call__" + type(client.transport.analyze_sentiment), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeSentimentResponse( @@ -477,6 +477,7 @@ def test_analyze_sentiment( assert args[0] == language_service.AnalyzeSentimentRequest() # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeSentimentResponse) assert response.language == "language_value" @@ -487,18 +488,21 @@ def test_analyze_sentiment_from_dict(): @pytest.mark.asyncio -async def test_analyze_sentiment_async(transport: str = "grpc_asyncio"): +async def test_analyze_sentiment_async( + transport: str = "grpc_asyncio", + request_type=language_service.AnalyzeSentimentRequest, +): client = LanguageServiceAsyncClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = language_service.AnalyzeSentimentRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client._client._transport.analyze_sentiment), "__call__" + type(client.transport.analyze_sentiment), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( @@ -511,7 +515,7 @@ async def test_analyze_sentiment_async(transport: str = "grpc_asyncio"): assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == language_service.AnalyzeSentimentRequest() # Establish that the response is the type that we expect. assert isinstance(response, language_service.AnalyzeSentimentResponse) @@ -519,12 +523,17 @@ async def test_analyze_sentiment_async(transport: str = "grpc_asyncio"): assert response.language == "language_value" +@pytest.mark.asyncio +async def test_analyze_sentiment_async_from_dict(): + await test_analyze_sentiment_async(request_type=dict) + + def test_analyze_sentiment_flattened(): client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client._transport.analyze_sentiment), "__call__" + type(client.transport.analyze_sentiment), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeSentimentResponse() @@ -571,7 +580,7 @@ async def test_analyze_sentiment_flattened_async(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client._client._transport.analyze_sentiment), "__call__" + type(client.transport.analyze_sentiment), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeSentimentResponse() @@ -628,9 +637,7 @@ def test_analyze_entities( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._transport.analyze_entities), "__call__" - ) as call: + with mock.patch.object(type(client.transport.analyze_entities), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeEntitiesResponse( language="language_value", @@ -645,6 +652,7 @@ def test_analyze_entities( assert args[0] == language_service.AnalyzeEntitiesRequest() # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeEntitiesResponse) assert response.language == "language_value" @@ -655,19 +663,20 @@ def test_analyze_entities_from_dict(): @pytest.mark.asyncio -async def test_analyze_entities_async(transport: str = "grpc_asyncio"): +async def test_analyze_entities_async( + transport: str = "grpc_asyncio", + request_type=language_service.AnalyzeEntitiesRequest, +): client = LanguageServiceAsyncClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = language_service.AnalyzeEntitiesRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._client._transport.analyze_entities), "__call__" - ) as call: + with mock.patch.object(type(client.transport.analyze_entities), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( language_service.AnalyzeEntitiesResponse(language="language_value",) @@ -679,7 +688,7 @@ async def test_analyze_entities_async(transport: str = "grpc_asyncio"): assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == language_service.AnalyzeEntitiesRequest() # Establish that the response is the type that we expect. assert isinstance(response, language_service.AnalyzeEntitiesResponse) @@ -687,13 +696,16 @@ async def test_analyze_entities_async(transport: str = "grpc_asyncio"): assert response.language == "language_value" +@pytest.mark.asyncio +async def test_analyze_entities_async_from_dict(): + await test_analyze_entities_async(request_type=dict) + + def test_analyze_entities_flattened(): client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._transport.analyze_entities), "__call__" - ) as call: + with mock.patch.object(type(client.transport.analyze_entities), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeEntitiesResponse() @@ -738,9 +750,7 @@ async def test_analyze_entities_flattened_async(): client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._client._transport.analyze_entities), "__call__" - ) as call: + with mock.patch.object(type(client.transport.analyze_entities), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeEntitiesResponse() @@ -797,7 +807,7 @@ def test_analyze_entity_sentiment( # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client._transport.analyze_entity_sentiment), "__call__" + type(client.transport.analyze_entity_sentiment), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeEntitySentimentResponse( @@ -813,6 +823,7 @@ def test_analyze_entity_sentiment( assert args[0] == language_service.AnalyzeEntitySentimentRequest() # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeEntitySentimentResponse) assert response.language == "language_value" @@ -823,18 +834,21 @@ def test_analyze_entity_sentiment_from_dict(): @pytest.mark.asyncio -async def test_analyze_entity_sentiment_async(transport: str = "grpc_asyncio"): +async def test_analyze_entity_sentiment_async( + transport: str = "grpc_asyncio", + request_type=language_service.AnalyzeEntitySentimentRequest, +): client = LanguageServiceAsyncClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = language_service.AnalyzeEntitySentimentRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client._client._transport.analyze_entity_sentiment), "__call__" + type(client.transport.analyze_entity_sentiment), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( @@ -847,7 +861,7 @@ async def test_analyze_entity_sentiment_async(transport: str = "grpc_asyncio"): assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == language_service.AnalyzeEntitySentimentRequest() # Establish that the response is the type that we expect. assert isinstance(response, language_service.AnalyzeEntitySentimentResponse) @@ -855,12 +869,17 @@ async def test_analyze_entity_sentiment_async(transport: str = "grpc_asyncio"): assert response.language == "language_value" +@pytest.mark.asyncio +async def test_analyze_entity_sentiment_async_from_dict(): + await test_analyze_entity_sentiment_async(request_type=dict) + + def test_analyze_entity_sentiment_flattened(): client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client._transport.analyze_entity_sentiment), "__call__" + type(client.transport.analyze_entity_sentiment), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeEntitySentimentResponse() @@ -907,7 +926,7 @@ async def test_analyze_entity_sentiment_flattened_async(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client._client._transport.analyze_entity_sentiment), "__call__" + type(client.transport.analyze_entity_sentiment), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeEntitySentimentResponse() @@ -964,7 +983,7 @@ def test_analyze_syntax( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client._transport.analyze_syntax), "__call__") as call: + with mock.patch.object(type(client.transport.analyze_syntax), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeSyntaxResponse( language="language_value", @@ -979,6 +998,7 @@ def test_analyze_syntax( assert args[0] == language_service.AnalyzeSyntaxRequest() # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeSyntaxResponse) assert response.language == "language_value" @@ -989,19 +1009,19 @@ def test_analyze_syntax_from_dict(): @pytest.mark.asyncio -async def test_analyze_syntax_async(transport: str = "grpc_asyncio"): +async def test_analyze_syntax_async( + transport: str = "grpc_asyncio", request_type=language_service.AnalyzeSyntaxRequest +): client = LanguageServiceAsyncClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = language_service.AnalyzeSyntaxRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._client._transport.analyze_syntax), "__call__" - ) as call: + with mock.patch.object(type(client.transport.analyze_syntax), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( language_service.AnalyzeSyntaxResponse(language="language_value",) @@ -1013,7 +1033,7 @@ async def test_analyze_syntax_async(transport: str = "grpc_asyncio"): assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == language_service.AnalyzeSyntaxRequest() # Establish that the response is the type that we expect. assert isinstance(response, language_service.AnalyzeSyntaxResponse) @@ -1021,11 +1041,16 @@ async def test_analyze_syntax_async(transport: str = "grpc_asyncio"): assert response.language == "language_value" +@pytest.mark.asyncio +async def test_analyze_syntax_async_from_dict(): + await test_analyze_syntax_async(request_type=dict) + + def test_analyze_syntax_flattened(): client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client._transport.analyze_syntax), "__call__") as call: + with mock.patch.object(type(client.transport.analyze_syntax), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeSyntaxResponse() @@ -1070,9 +1095,7 @@ async def test_analyze_syntax_flattened_async(): client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._client._transport.analyze_syntax), "__call__" - ) as call: + with mock.patch.object(type(client.transport.analyze_syntax), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeSyntaxResponse() @@ -1128,7 +1151,7 @@ def test_classify_text( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client._transport.classify_text), "__call__") as call: + with mock.patch.object(type(client.transport.classify_text), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.ClassifyTextResponse() @@ -1141,6 +1164,7 @@ def test_classify_text( assert args[0] == language_service.ClassifyTextRequest() # Establish that the response is the type that we expect. + assert isinstance(response, language_service.ClassifyTextResponse) @@ -1149,19 +1173,19 @@ def test_classify_text_from_dict(): @pytest.mark.asyncio -async def test_classify_text_async(transport: str = "grpc_asyncio"): +async def test_classify_text_async( + transport: str = "grpc_asyncio", request_type=language_service.ClassifyTextRequest +): client = LanguageServiceAsyncClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = language_service.ClassifyTextRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._client._transport.classify_text), "__call__" - ) as call: + with mock.patch.object(type(client.transport.classify_text), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( language_service.ClassifyTextResponse() @@ -1173,17 +1197,22 @@ async def test_classify_text_async(transport: str = "grpc_asyncio"): assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == language_service.ClassifyTextRequest() # Establish that the response is the type that we expect. assert isinstance(response, language_service.ClassifyTextResponse) +@pytest.mark.asyncio +async def test_classify_text_async_from_dict(): + await test_classify_text_async(request_type=dict) + + def test_classify_text_flattened(): client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client._transport.classify_text), "__call__") as call: + with mock.patch.object(type(client.transport.classify_text), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.ClassifyTextResponse() @@ -1224,9 +1253,7 @@ async def test_classify_text_flattened_async(): client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._client._transport.classify_text), "__call__" - ) as call: + with mock.patch.object(type(client.transport.classify_text), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.ClassifyTextResponse() @@ -1278,7 +1305,7 @@ def test_annotate_text( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client._transport.annotate_text), "__call__") as call: + with mock.patch.object(type(client.transport.annotate_text), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnnotateTextResponse( language="language_value", @@ -1293,6 +1320,7 @@ def test_annotate_text( assert args[0] == language_service.AnnotateTextRequest() # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnnotateTextResponse) assert response.language == "language_value" @@ -1303,19 +1331,19 @@ def test_annotate_text_from_dict(): @pytest.mark.asyncio -async def test_annotate_text_async(transport: str = "grpc_asyncio"): +async def test_annotate_text_async( + transport: str = "grpc_asyncio", request_type=language_service.AnnotateTextRequest +): client = LanguageServiceAsyncClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = language_service.AnnotateTextRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._client._transport.annotate_text), "__call__" - ) as call: + with mock.patch.object(type(client.transport.annotate_text), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( language_service.AnnotateTextResponse(language="language_value",) @@ -1327,7 +1355,7 @@ async def test_annotate_text_async(transport: str = "grpc_asyncio"): assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == language_service.AnnotateTextRequest() # Establish that the response is the type that we expect. assert isinstance(response, language_service.AnnotateTextResponse) @@ -1335,11 +1363,16 @@ async def test_annotate_text_async(transport: str = "grpc_asyncio"): assert response.language == "language_value" +@pytest.mark.asyncio +async def test_annotate_text_async_from_dict(): + await test_annotate_text_async(request_type=dict) + + def test_annotate_text_flattened(): client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client._transport.annotate_text), "__call__") as call: + with mock.patch.object(type(client.transport.annotate_text), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnnotateTextResponse() @@ -1390,9 +1423,7 @@ async def test_annotate_text_flattened_async(): client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._client._transport.annotate_text), "__call__" - ) as call: + with mock.patch.object(type(client.transport.annotate_text), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnnotateTextResponse() @@ -1478,7 +1509,7 @@ def test_transport_instance(): credentials=credentials.AnonymousCredentials(), ) client = LanguageServiceClient(transport=transport) - assert client._transport is transport + assert client.transport is transport def test_transport_get_channel(): @@ -1514,7 +1545,7 @@ def test_transport_adc(transport_class): def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) - assert isinstance(client._transport, transports.LanguageServiceGrpcTransport,) + assert isinstance(client.transport, transports.LanguageServiceGrpcTransport,) def test_language_service_base_transport_error(): @@ -1622,7 +1653,7 @@ def test_language_service_host_no_port(): api_endpoint="language.googleapis.com" ), ) - assert client._transport._host == "language.googleapis.com:443" + assert client.transport._host == "language.googleapis.com:443" def test_language_service_host_with_port(): @@ -1632,7 +1663,7 @@ def test_language_service_host_with_port(): api_endpoint="language.googleapis.com:8000" ), ) - assert client._transport._host == "language.googleapis.com:8000" + assert client.transport._host == "language.googleapis.com:8000" def test_language_service_grpc_transport_channel(): @@ -1644,6 +1675,7 @@ def test_language_service_grpc_transport_channel(): ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None def test_language_service_grpc_asyncio_transport_channel(): @@ -1655,6 +1687,7 @@ def test_language_service_grpc_asyncio_transport_channel(): ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None @pytest.mark.parametrize( @@ -1705,6 +1738,7 @@ def test_language_service_transport_channel_mtls_with_client_cert_source( quota_project_id=None, ) assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred @pytest.mark.parametrize( @@ -1750,6 +1784,107 @@ def test_language_service_transport_channel_mtls_with_adc(transport_class): assert transport.grpc_channel == mock_grpc_channel +def test_common_billing_account_path(): + billing_account = "squid" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = LanguageServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = LanguageServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = LanguageServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "whelk" + + expected = "folders/{folder}".format(folder=folder,) + actual = LanguageServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = LanguageServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = LanguageServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "oyster" + + expected = "organizations/{organization}".format(organization=organization,) + actual = LanguageServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = LanguageServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = LanguageServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "cuttlefish" + + expected = "projects/{project}".format(project=project,) + actual = LanguageServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = LanguageServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = LanguageServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = LanguageServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = LanguageServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = LanguageServiceClient.parse_common_location_path(path) + assert expected == actual + + def test_client_withDEFAULT_CLIENT_INFO(): client_info = gapic_v1.client_info.ClientInfo() diff --git a/tests/unit/gapic/language_v1beta2/test_language_service.py b/tests/unit/gapic/language_v1beta2/test_language_service.py index 5b27952c..6e4df53d 100644 --- a/tests/unit/gapic/language_v1beta2/test_language_service.py +++ b/tests/unit/gapic/language_v1beta2/test_language_service.py @@ -97,12 +97,12 @@ def test_language_service_client_from_service_account_file(client_class): ) as factory: factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") - assert client._transport._credentials == creds + assert client.transport._credentials == creds client = client_class.from_service_account_json("dummy/file/path.json") - assert client._transport._credentials == creds + assert client.transport._credentials == creds - assert client._transport._host == "language.googleapis.com:443" + assert client.transport._host == "language.googleapis.com:443" def test_language_service_client_get_transport_class(): @@ -463,7 +463,7 @@ def test_analyze_sentiment( # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client._transport.analyze_sentiment), "__call__" + type(client.transport.analyze_sentiment), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeSentimentResponse( @@ -479,6 +479,7 @@ def test_analyze_sentiment( assert args[0] == language_service.AnalyzeSentimentRequest() # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeSentimentResponse) assert response.language == "language_value" @@ -489,18 +490,21 @@ def test_analyze_sentiment_from_dict(): @pytest.mark.asyncio -async def test_analyze_sentiment_async(transport: str = "grpc_asyncio"): +async def test_analyze_sentiment_async( + transport: str = "grpc_asyncio", + request_type=language_service.AnalyzeSentimentRequest, +): client = LanguageServiceAsyncClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = language_service.AnalyzeSentimentRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client._client._transport.analyze_sentiment), "__call__" + type(client.transport.analyze_sentiment), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( @@ -513,7 +517,7 @@ async def test_analyze_sentiment_async(transport: str = "grpc_asyncio"): assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == language_service.AnalyzeSentimentRequest() # Establish that the response is the type that we expect. assert isinstance(response, language_service.AnalyzeSentimentResponse) @@ -521,12 +525,17 @@ async def test_analyze_sentiment_async(transport: str = "grpc_asyncio"): assert response.language == "language_value" +@pytest.mark.asyncio +async def test_analyze_sentiment_async_from_dict(): + await test_analyze_sentiment_async(request_type=dict) + + def test_analyze_sentiment_flattened(): client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client._transport.analyze_sentiment), "__call__" + type(client.transport.analyze_sentiment), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeSentimentResponse() @@ -573,7 +582,7 @@ async def test_analyze_sentiment_flattened_async(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client._client._transport.analyze_sentiment), "__call__" + type(client.transport.analyze_sentiment), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeSentimentResponse() @@ -630,9 +639,7 @@ def test_analyze_entities( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._transport.analyze_entities), "__call__" - ) as call: + with mock.patch.object(type(client.transport.analyze_entities), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeEntitiesResponse( language="language_value", @@ -647,6 +654,7 @@ def test_analyze_entities( assert args[0] == language_service.AnalyzeEntitiesRequest() # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeEntitiesResponse) assert response.language == "language_value" @@ -657,19 +665,20 @@ def test_analyze_entities_from_dict(): @pytest.mark.asyncio -async def test_analyze_entities_async(transport: str = "grpc_asyncio"): +async def test_analyze_entities_async( + transport: str = "grpc_asyncio", + request_type=language_service.AnalyzeEntitiesRequest, +): client = LanguageServiceAsyncClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = language_service.AnalyzeEntitiesRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._client._transport.analyze_entities), "__call__" - ) as call: + with mock.patch.object(type(client.transport.analyze_entities), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( language_service.AnalyzeEntitiesResponse(language="language_value",) @@ -681,7 +690,7 @@ async def test_analyze_entities_async(transport: str = "grpc_asyncio"): assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == language_service.AnalyzeEntitiesRequest() # Establish that the response is the type that we expect. assert isinstance(response, language_service.AnalyzeEntitiesResponse) @@ -689,13 +698,16 @@ async def test_analyze_entities_async(transport: str = "grpc_asyncio"): assert response.language == "language_value" +@pytest.mark.asyncio +async def test_analyze_entities_async_from_dict(): + await test_analyze_entities_async(request_type=dict) + + def test_analyze_entities_flattened(): client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._transport.analyze_entities), "__call__" - ) as call: + with mock.patch.object(type(client.transport.analyze_entities), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeEntitiesResponse() @@ -740,9 +752,7 @@ async def test_analyze_entities_flattened_async(): client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._client._transport.analyze_entities), "__call__" - ) as call: + with mock.patch.object(type(client.transport.analyze_entities), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeEntitiesResponse() @@ -799,7 +809,7 @@ def test_analyze_entity_sentiment( # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client._transport.analyze_entity_sentiment), "__call__" + type(client.transport.analyze_entity_sentiment), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeEntitySentimentResponse( @@ -815,6 +825,7 @@ def test_analyze_entity_sentiment( assert args[0] == language_service.AnalyzeEntitySentimentRequest() # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeEntitySentimentResponse) assert response.language == "language_value" @@ -825,18 +836,21 @@ def test_analyze_entity_sentiment_from_dict(): @pytest.mark.asyncio -async def test_analyze_entity_sentiment_async(transport: str = "grpc_asyncio"): +async def test_analyze_entity_sentiment_async( + transport: str = "grpc_asyncio", + request_type=language_service.AnalyzeEntitySentimentRequest, +): client = LanguageServiceAsyncClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = language_service.AnalyzeEntitySentimentRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client._client._transport.analyze_entity_sentiment), "__call__" + type(client.transport.analyze_entity_sentiment), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( @@ -849,7 +863,7 @@ async def test_analyze_entity_sentiment_async(transport: str = "grpc_asyncio"): assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == language_service.AnalyzeEntitySentimentRequest() # Establish that the response is the type that we expect. assert isinstance(response, language_service.AnalyzeEntitySentimentResponse) @@ -857,12 +871,17 @@ async def test_analyze_entity_sentiment_async(transport: str = "grpc_asyncio"): assert response.language == "language_value" +@pytest.mark.asyncio +async def test_analyze_entity_sentiment_async_from_dict(): + await test_analyze_entity_sentiment_async(request_type=dict) + + def test_analyze_entity_sentiment_flattened(): client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client._transport.analyze_entity_sentiment), "__call__" + type(client.transport.analyze_entity_sentiment), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeEntitySentimentResponse() @@ -909,7 +928,7 @@ async def test_analyze_entity_sentiment_flattened_async(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client._client._transport.analyze_entity_sentiment), "__call__" + type(client.transport.analyze_entity_sentiment), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeEntitySentimentResponse() @@ -966,7 +985,7 @@ def test_analyze_syntax( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client._transport.analyze_syntax), "__call__") as call: + with mock.patch.object(type(client.transport.analyze_syntax), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeSyntaxResponse( language="language_value", @@ -981,6 +1000,7 @@ def test_analyze_syntax( assert args[0] == language_service.AnalyzeSyntaxRequest() # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeSyntaxResponse) assert response.language == "language_value" @@ -991,19 +1011,19 @@ def test_analyze_syntax_from_dict(): @pytest.mark.asyncio -async def test_analyze_syntax_async(transport: str = "grpc_asyncio"): +async def test_analyze_syntax_async( + transport: str = "grpc_asyncio", request_type=language_service.AnalyzeSyntaxRequest +): client = LanguageServiceAsyncClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = language_service.AnalyzeSyntaxRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._client._transport.analyze_syntax), "__call__" - ) as call: + with mock.patch.object(type(client.transport.analyze_syntax), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( language_service.AnalyzeSyntaxResponse(language="language_value",) @@ -1015,7 +1035,7 @@ async def test_analyze_syntax_async(transport: str = "grpc_asyncio"): assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == language_service.AnalyzeSyntaxRequest() # Establish that the response is the type that we expect. assert isinstance(response, language_service.AnalyzeSyntaxResponse) @@ -1023,11 +1043,16 @@ async def test_analyze_syntax_async(transport: str = "grpc_asyncio"): assert response.language == "language_value" +@pytest.mark.asyncio +async def test_analyze_syntax_async_from_dict(): + await test_analyze_syntax_async(request_type=dict) + + def test_analyze_syntax_flattened(): client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client._transport.analyze_syntax), "__call__") as call: + with mock.patch.object(type(client.transport.analyze_syntax), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeSyntaxResponse() @@ -1072,9 +1097,7 @@ async def test_analyze_syntax_flattened_async(): client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._client._transport.analyze_syntax), "__call__" - ) as call: + with mock.patch.object(type(client.transport.analyze_syntax), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnalyzeSyntaxResponse() @@ -1130,7 +1153,7 @@ def test_classify_text( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client._transport.classify_text), "__call__") as call: + with mock.patch.object(type(client.transport.classify_text), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.ClassifyTextResponse() @@ -1143,6 +1166,7 @@ def test_classify_text( assert args[0] == language_service.ClassifyTextRequest() # Establish that the response is the type that we expect. + assert isinstance(response, language_service.ClassifyTextResponse) @@ -1151,19 +1175,19 @@ def test_classify_text_from_dict(): @pytest.mark.asyncio -async def test_classify_text_async(transport: str = "grpc_asyncio"): +async def test_classify_text_async( + transport: str = "grpc_asyncio", request_type=language_service.ClassifyTextRequest +): client = LanguageServiceAsyncClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = language_service.ClassifyTextRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._client._transport.classify_text), "__call__" - ) as call: + with mock.patch.object(type(client.transport.classify_text), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( language_service.ClassifyTextResponse() @@ -1175,17 +1199,22 @@ async def test_classify_text_async(transport: str = "grpc_asyncio"): assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == language_service.ClassifyTextRequest() # Establish that the response is the type that we expect. assert isinstance(response, language_service.ClassifyTextResponse) +@pytest.mark.asyncio +async def test_classify_text_async_from_dict(): + await test_classify_text_async(request_type=dict) + + def test_classify_text_flattened(): client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client._transport.classify_text), "__call__") as call: + with mock.patch.object(type(client.transport.classify_text), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.ClassifyTextResponse() @@ -1226,9 +1255,7 @@ async def test_classify_text_flattened_async(): client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._client._transport.classify_text), "__call__" - ) as call: + with mock.patch.object(type(client.transport.classify_text), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.ClassifyTextResponse() @@ -1280,7 +1307,7 @@ def test_annotate_text( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client._transport.annotate_text), "__call__") as call: + with mock.patch.object(type(client.transport.annotate_text), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnnotateTextResponse( language="language_value", @@ -1295,6 +1322,7 @@ def test_annotate_text( assert args[0] == language_service.AnnotateTextRequest() # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnnotateTextResponse) assert response.language == "language_value" @@ -1305,19 +1333,19 @@ def test_annotate_text_from_dict(): @pytest.mark.asyncio -async def test_annotate_text_async(transport: str = "grpc_asyncio"): +async def test_annotate_text_async( + transport: str = "grpc_asyncio", request_type=language_service.AnnotateTextRequest +): client = LanguageServiceAsyncClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = language_service.AnnotateTextRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._client._transport.annotate_text), "__call__" - ) as call: + with mock.patch.object(type(client.transport.annotate_text), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( language_service.AnnotateTextResponse(language="language_value",) @@ -1329,7 +1357,7 @@ async def test_annotate_text_async(transport: str = "grpc_asyncio"): assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == language_service.AnnotateTextRequest() # Establish that the response is the type that we expect. assert isinstance(response, language_service.AnnotateTextResponse) @@ -1337,11 +1365,16 @@ async def test_annotate_text_async(transport: str = "grpc_asyncio"): assert response.language == "language_value" +@pytest.mark.asyncio +async def test_annotate_text_async_from_dict(): + await test_annotate_text_async(request_type=dict) + + def test_annotate_text_flattened(): client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client._transport.annotate_text), "__call__") as call: + with mock.patch.object(type(client.transport.annotate_text), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnnotateTextResponse() @@ -1392,9 +1425,7 @@ async def test_annotate_text_flattened_async(): client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client._client._transport.annotate_text), "__call__" - ) as call: + with mock.patch.object(type(client.transport.annotate_text), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = language_service.AnnotateTextResponse() @@ -1480,7 +1511,7 @@ def test_transport_instance(): credentials=credentials.AnonymousCredentials(), ) client = LanguageServiceClient(transport=transport) - assert client._transport is transport + assert client.transport is transport def test_transport_get_channel(): @@ -1516,7 +1547,7 @@ def test_transport_adc(transport_class): def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) - assert isinstance(client._transport, transports.LanguageServiceGrpcTransport,) + assert isinstance(client.transport, transports.LanguageServiceGrpcTransport,) def test_language_service_base_transport_error(): @@ -1624,7 +1655,7 @@ def test_language_service_host_no_port(): api_endpoint="language.googleapis.com" ), ) - assert client._transport._host == "language.googleapis.com:443" + assert client.transport._host == "language.googleapis.com:443" def test_language_service_host_with_port(): @@ -1634,7 +1665,7 @@ def test_language_service_host_with_port(): api_endpoint="language.googleapis.com:8000" ), ) - assert client._transport._host == "language.googleapis.com:8000" + assert client.transport._host == "language.googleapis.com:8000" def test_language_service_grpc_transport_channel(): @@ -1646,6 +1677,7 @@ def test_language_service_grpc_transport_channel(): ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None def test_language_service_grpc_asyncio_transport_channel(): @@ -1657,6 +1689,7 @@ def test_language_service_grpc_asyncio_transport_channel(): ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None @pytest.mark.parametrize( @@ -1707,6 +1740,7 @@ def test_language_service_transport_channel_mtls_with_client_cert_source( quota_project_id=None, ) assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred @pytest.mark.parametrize( @@ -1752,6 +1786,107 @@ def test_language_service_transport_channel_mtls_with_adc(transport_class): assert transport.grpc_channel == mock_grpc_channel +def test_common_billing_account_path(): + billing_account = "squid" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = LanguageServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = LanguageServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = LanguageServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "whelk" + + expected = "folders/{folder}".format(folder=folder,) + actual = LanguageServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = LanguageServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = LanguageServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "oyster" + + expected = "organizations/{organization}".format(organization=organization,) + actual = LanguageServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = LanguageServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = LanguageServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "cuttlefish" + + expected = "projects/{project}".format(project=project,) + actual = LanguageServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = LanguageServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = LanguageServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = LanguageServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = LanguageServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = LanguageServiceClient.parse_common_location_path(path) + assert expected == actual + + def test_client_withDEFAULT_CLIENT_INFO(): client_info = gapic_v1.client_info.ClientInfo()