diff --git a/samples/v1/language_classify_gcs.py b/samples/v1/language_classify_gcs.py index a20789cc..b357a8ae 100644 --- a/samples/v1/language_classify_gcs.py +++ b/samples/v1/language_classify_gcs.py @@ -48,7 +48,7 @@ def sample_classify_text(gcs_content_uri): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language} response = client.classify_text(request = {'document': document}) # Loop through classified categories returned from the API diff --git a/samples/v1/language_classify_text.py b/samples/v1/language_classify_text.py index ad55d26c..6fe2aaa4 100644 --- a/samples/v1/language_classify_text.py +++ b/samples/v1/language_classify_text.py @@ -46,7 +46,7 @@ def sample_classify_text(text_content): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"content": text_content, "type": type_, "language": language} + document = {"content": text_content, "type_": type_, "language": language} response = client.classify_text(request = {'document': document}) # Loop through classified categories returned from the API diff --git a/samples/v1/language_entities_gcs.py b/samples/v1/language_entities_gcs.py index d735e885..6bdb8577 100644 --- a/samples/v1/language_entities_gcs.py +++ b/samples/v1/language_entities_gcs.py @@ -47,17 +47,17 @@ def sample_analyze_entities(gcs_content_uri): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = language_v1..EncodingType.UTF8 + encoding_type = language_v1.EncodingType.UTF8 response = client.analyze_entities(request = {'document': document, 'encoding_type': encoding_type}) # Loop through entitites returned from the API for entity in response.entities: print(u"Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name)) + print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name)) # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) # Loop over the metadata associated with entity. For many known entities, @@ -73,7 +73,7 @@ def sample_analyze_entities(gcs_content_uri): print(u"Mention text: {}".format(mention.text.content)) # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name) + u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name) ) # Get the language of the text, which will be the same as diff --git a/samples/v1/language_entities_text.py b/samples/v1/language_entities_text.py index db2ad9e2..2cce0015 100644 --- a/samples/v1/language_entities_text.py +++ b/samples/v1/language_entities_text.py @@ -46,7 +46,7 @@ def sample_analyze_entities(text_content): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"content": text_content, "type": type_, "language": language} + document = {"content": text_content, "type_": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 @@ -58,7 +58,7 @@ def sample_analyze_entities(text_content): print(u"Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name)) + print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name)) # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) @@ -77,7 +77,7 @@ def sample_analyze_entities(text_content): # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name) + u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name) ) # Get the language of the text, which will be the same as diff --git a/samples/v1/language_entity_sentiment_gcs.py b/samples/v1/language_entity_sentiment_gcs.py index 2a4c6ff3..dba3dc1b 100644 --- a/samples/v1/language_entity_sentiment_gcs.py +++ b/samples/v1/language_entity_sentiment_gcs.py @@ -47,7 +47,7 @@ def sample_analyze_entity_sentiment(gcs_content_uri): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 @@ -57,7 +57,7 @@ def sample_analyze_entity_sentiment(gcs_content_uri): for entity in response.entities: print(u"Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name)) + print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name)) # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) # Get the aggregate sentiment expressed for this entity in the provided document. @@ -77,7 +77,7 @@ def sample_analyze_entity_sentiment(gcs_content_uri): print(u"Mention text: {}".format(mention.text.content)) # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name) + u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name) ) # Get the language of the text, which will be the same as diff --git a/samples/v1/language_entity_sentiment_text.py b/samples/v1/language_entity_sentiment_text.py index 20c9dbd8..b28434df 100644 --- a/samples/v1/language_entity_sentiment_text.py +++ b/samples/v1/language_entity_sentiment_text.py @@ -46,7 +46,7 @@ def sample_analyze_entity_sentiment(text_content): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"content": text_content, "type": type_, "language": language} + document = {"content": text_content, "type_": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 @@ -56,7 +56,7 @@ def sample_analyze_entity_sentiment(text_content): for entity in response.entities: print(u"Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name)) + print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name)) # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) # Get the aggregate sentiment expressed for this entity in the provided document. @@ -76,7 +76,7 @@ def sample_analyze_entity_sentiment(text_content): print(u"Mention text: {}".format(mention.text.content)) # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name) + u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name) ) # Get the language of the text, which will be the same as diff --git a/samples/v1/language_sentiment_gcs.py b/samples/v1/language_sentiment_gcs.py index 68839805..f225db1c 100644 --- a/samples/v1/language_sentiment_gcs.py +++ b/samples/v1/language_sentiment_gcs.py @@ -47,7 +47,7 @@ def sample_analyze_sentiment(gcs_content_uri): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 diff --git a/samples/v1/language_sentiment_text.py b/samples/v1/language_sentiment_text.py index 0be2b6cf..d94420a3 100644 --- a/samples/v1/language_sentiment_text.py +++ b/samples/v1/language_sentiment_text.py @@ -46,7 +46,7 @@ def sample_analyze_sentiment(text_content): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"content": text_content, "type": type_, "language": language} + document = {"content": text_content, "type_": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 diff --git a/samples/v1/language_syntax_gcs.py b/samples/v1/language_syntax_gcs.py index e04be406..4e8a5cc4 100644 --- a/samples/v1/language_syntax_gcs.py +++ b/samples/v1/language_syntax_gcs.py @@ -47,7 +47,7 @@ def sample_analyze_syntax(gcs_content_uri): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8 diff --git a/samples/v1/language_syntax_text.py b/samples/v1/language_syntax_text.py index 9f37e92c..c3eb9383 100644 --- a/samples/v1/language_syntax_text.py +++ b/samples/v1/language_syntax_text.py @@ -46,7 +46,7 @@ def sample_analyze_syntax(text_content): # For list of supported languages: # https://cloud.google.com/natural-language/docs/languages language = "en" - document = {"content": text_content, "type": type_, "language": language} + document = {"content": text_content, "type_": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = language_v1.EncodingType.UTF8