Natural Language API に直接送信されたテキスト文字列に対してエンティティ分析を行う例を次に示します。
エンティティ分析を行う関数の定義
入力した文章からをエンティティを取得して、それぞれ表示するプログラムを作成します。
from google.cloud import language_v1
from google.cloud.language_v1 import enums
from google.cloud.language_v1 import types
def sample_analyze_entities(text_content):
"""
Analyzing Entities in a String
Args:
text_content The text content to analyze
"""
client = language_v1.LanguageServiceClient()
type_ = language_v1.types.Document.Type.PLAIN_TEXT
document = types.Document(
content=text_content,
type=enums.Document.Type.PLAIN_TEXT)
response = client.analyze_entities(document=document)
# Loop through entitites returned from the API
for entity in response.entities:
print(u"Representative name for the entity: {}".format(entity.name))
# Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al
print(u"Entity type: {}".format(entity.type))
# Get the salience score associated with the entity in the [0, 1.0] range
print(u"Salience score: {}".format(entity.salience))
# Loop over the metadata associated with entity. For many known entities,
# the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid).
# Some entity types may have additional metadata, e.g. ADDRESS entities
# may have metadata for the address street_name, postal_code, et al.
for metadata_name, metadata_value in entity.metadata.items():
print(u"{}: {}".format(metadata_name, metadata_value))
# Loop over the mentions of this entity in the input document.
# The API currently supports proper noun mentions.
for mention in entity.mentions:
print(u"Mention text: {}".format(mention.text.content))
# Get the mention type, e.g. PROPER for proper noun
print(
u"Mention type: {}".format(mention.type)
)
# the automatically-detected language.
print(u"Language of the text: {}".format(response.language))