Skip to content
This repository has been archived by the owner on Sep 5, 2023. It is now read-only.

Commit

Permalink
fix: adds underscore to "type" to NL API samples (#49)
Browse files Browse the repository at this point in the history
* fix: adds underscore to "type" in entity sentiment sample

* fix: other language samples missing type with underscore
  • Loading branch information
telpirion committed Nov 3, 2020
1 parent d5270e1 commit 36aa320
Show file tree
Hide file tree
Showing 10 changed files with 19 additions and 19 deletions.
2 changes: 1 addition & 1 deletion samples/v1/language_classify_gcs.py
Expand Up @@ -48,7 +48,7 @@ def sample_classify_text(gcs_content_uri):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language}
document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language}

response = client.classify_text(request = {'document': document})
# Loop through classified categories returned from the API
Expand Down
2 changes: 1 addition & 1 deletion samples/v1/language_classify_text.py
Expand Up @@ -46,7 +46,7 @@ def sample_classify_text(text_content):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"content": text_content, "type": type_, "language": language}
document = {"content": text_content, "type_": type_, "language": language}

response = client.classify_text(request = {'document': document})
# Loop through classified categories returned from the API
Expand Down
8 changes: 4 additions & 4 deletions samples/v1/language_entities_gcs.py
Expand Up @@ -47,17 +47,17 @@ def sample_analyze_entities(gcs_content_uri):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language}
document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language}

# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1..EncodingType.UTF8
encoding_type = language_v1.EncodingType.UTF8

response = client.analyze_entities(request = {'document': document, 'encoding_type': encoding_type})
# Loop through entitites returned from the API
for entity in response.entities:
print(u"Representative name for the entity: {}".format(entity.name))
# Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name))
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name))
# Get the salience score associated with the entity in the [0, 1.0] range
print(u"Salience score: {}".format(entity.salience))
# Loop over the metadata associated with entity. For many known entities,
Expand All @@ -73,7 +73,7 @@ def sample_analyze_entities(gcs_content_uri):
print(u"Mention text: {}".format(mention.text.content))
# Get the mention type, e.g. PROPER for proper noun
print(
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name)
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name)
)

# Get the language of the text, which will be the same as
Expand Down
6 changes: 3 additions & 3 deletions samples/v1/language_entities_text.py
Expand Up @@ -46,7 +46,7 @@ def sample_analyze_entities(text_content):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"content": text_content, "type": type_, "language": language}
document = {"content": text_content, "type_": type_, "language": language}

# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
Expand All @@ -58,7 +58,7 @@ def sample_analyze_entities(text_content):
print(u"Representative name for the entity: {}".format(entity.name))

# Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name))
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name))

# Get the salience score associated with the entity in the [0, 1.0] range
print(u"Salience score: {}".format(entity.salience))
Expand All @@ -77,7 +77,7 @@ def sample_analyze_entities(text_content):

# Get the mention type, e.g. PROPER for proper noun
print(
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name)
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name)
)

# Get the language of the text, which will be the same as
Expand Down
6 changes: 3 additions & 3 deletions samples/v1/language_entity_sentiment_gcs.py
Expand Up @@ -47,7 +47,7 @@ def sample_analyze_entity_sentiment(gcs_content_uri):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language}
document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language}

# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
Expand All @@ -57,7 +57,7 @@ def sample_analyze_entity_sentiment(gcs_content_uri):
for entity in response.entities:
print(u"Representative name for the entity: {}".format(entity.name))
# Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name))
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name))
# Get the salience score associated with the entity in the [0, 1.0] range
print(u"Salience score: {}".format(entity.salience))
# Get the aggregate sentiment expressed for this entity in the provided document.
Expand All @@ -77,7 +77,7 @@ def sample_analyze_entity_sentiment(gcs_content_uri):
print(u"Mention text: {}".format(mention.text.content))
# Get the mention type, e.g. PROPER for proper noun
print(
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name)
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name)
)

# Get the language of the text, which will be the same as
Expand Down
6 changes: 3 additions & 3 deletions samples/v1/language_entity_sentiment_text.py
Expand Up @@ -46,7 +46,7 @@ def sample_analyze_entity_sentiment(text_content):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"content": text_content, "type": type_, "language": language}
document = {"content": text_content, "type_": type_, "language": language}

# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
Expand All @@ -56,7 +56,7 @@ def sample_analyze_entity_sentiment(text_content):
for entity in response.entities:
print(u"Representative name for the entity: {}".format(entity.name))
# Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name))
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name))
# Get the salience score associated with the entity in the [0, 1.0] range
print(u"Salience score: {}".format(entity.salience))
# Get the aggregate sentiment expressed for this entity in the provided document.
Expand All @@ -76,7 +76,7 @@ def sample_analyze_entity_sentiment(text_content):
print(u"Mention text: {}".format(mention.text.content))
# Get the mention type, e.g. PROPER for proper noun
print(
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name)
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name)
)

# Get the language of the text, which will be the same as
Expand Down
2 changes: 1 addition & 1 deletion samples/v1/language_sentiment_gcs.py
Expand Up @@ -47,7 +47,7 @@ def sample_analyze_sentiment(gcs_content_uri):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language}
document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language}

# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
Expand Down
2 changes: 1 addition & 1 deletion samples/v1/language_sentiment_text.py
Expand Up @@ -46,7 +46,7 @@ def sample_analyze_sentiment(text_content):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"content": text_content, "type": type_, "language": language}
document = {"content": text_content, "type_": type_, "language": language}

# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
Expand Down
2 changes: 1 addition & 1 deletion samples/v1/language_syntax_gcs.py
Expand Up @@ -47,7 +47,7 @@ def sample_analyze_syntax(gcs_content_uri):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language}
document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language}

# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
Expand Down
2 changes: 1 addition & 1 deletion samples/v1/language_syntax_text.py
Expand Up @@ -46,7 +46,7 @@ def sample_analyze_syntax(text_content):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"content": text_content, "type": type_, "language": language}
document = {"content": text_content, "type_": type_, "language": language}

# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
Expand Down

0 comments on commit 36aa320

Please sign in to comment.