Home | History | Annotate | Download | only in dyn
      1 <html><body>
      2 <style>
      3 
      4 body, h1, h2, h3, div, span, p, pre, a {
      5   margin: 0;
      6   padding: 0;
      7   border: 0;
      8   font-weight: inherit;
      9   font-style: inherit;
     10   font-size: 100%;
     11   font-family: inherit;
     12   vertical-align: baseline;
     13 }
     14 
     15 body {
     16   font-size: 13px;
     17   padding: 1em;
     18 }
     19 
     20 h1 {
     21   font-size: 26px;
     22   margin-bottom: 1em;
     23 }
     24 
     25 h2 {
     26   font-size: 24px;
     27   margin-bottom: 1em;
     28 }
     29 
     30 h3 {
     31   font-size: 20px;
     32   margin-bottom: 1em;
     33   margin-top: 1em;
     34 }
     35 
     36 pre, code {
     37   line-height: 1.5;
     38   font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
     39 }
     40 
     41 pre {
     42   margin-top: 0.5em;
     43 }
     44 
     45 h1, h2, h3, p {
     46   font-family: Arial, sans serif;
     47 }
     48 
     49 h1, h2, h3 {
     50   border-bottom: solid #CCC 1px;
     51 }
     52 
     53 .toc_element {
     54   margin-top: 0.5em;
     55 }
     56 
     57 .firstline {
     58   margin-left: 2 em;
     59 }
     60 
     61 .method  {
     62   margin-top: 1em;
     63   border: solid 1px #CCC;
     64   padding: 1em;
     65   background: #EEE;
     66 }
     67 
     68 .details {
     69   font-weight: bold;
     70   font-size: 14px;
     71 }
     72 
     73 </style>
     74 
     75 <h1><a href="vision_v1.html">Google Cloud Vision API</a> . <a href="vision_v1.images.html">images</a></h1>
     76 <h2>Instance Methods</h2>
     77 <p class="toc_element">
     78   <code><a href="#annotate">annotate(body, x__xgafv=None)</a></code></p>
     79 <p class="firstline">Run image detection and annotation for a batch of images.</p>
     80 <h3>Method Details</h3>
     81 <div class="method">
     82     <code class="details" id="annotate">annotate(body, x__xgafv=None)</code>
     83   <pre>Run image detection and annotation for a batch of images.
     84 
     85 Args:
     86   body: object, The request body. (required)
     87     The object takes the form of:
     88 
     89 { # Multiple image annotation requests are batched into a single service call.
     90     "requests": [ # Individual image annotation requests for this batch.
     91       { # Request for performing Google Cloud Vision API tasks over a user-provided
     92           # image, with user-requested features.
     93         "imageContext": { # Image context and/or feature-specific parameters. # Additional context that may accompany the image.
     94           "latLongRect": { # Rectangle determined by min and max `LatLng` pairs. # lat/long rectangle that specifies the location of the image.
     95             "minLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
     96                 # of doubles representing degrees latitude and degrees longitude. Unless
     97                 # specified otherwise, this must conform to the
     98                 # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
     99                 # standard</a>. Values must be within normalized ranges.
    100                 #
    101                 # Example of normalization code in Python:
    102                 #
    103                 #     def NormalizeLongitude(longitude):
    104                 #       """Wraps decimal degrees longitude to [-180.0, 180.0]."""
    105                 #       q, r = divmod(longitude, 360.0)
    106                 #       if r > 180.0 or (r == 180.0 and q <= -1.0):
    107                 #         return r - 360.0
    108                 #       return r
    109                 #
    110                 #     def NormalizeLatLng(latitude, longitude):
    111                 #       """Wraps decimal degrees latitude and longitude to
    112                 #       [-90.0, 90.0] and [-180.0, 180.0], respectively."""
    113                 #       r = latitude % 360.0
    114                 #       if r <= 90.0:
    115                 #         return r, NormalizeLongitude(longitude)
    116                 #       elif r >= 270.0:
    117                 #         return r - 360, NormalizeLongitude(longitude)
    118                 #       else:
    119                 #         return 180 - r, NormalizeLongitude(longitude + 180.0)
    120                 #
    121                 #     assert 180.0 == NormalizeLongitude(180.0)
    122                 #     assert -180.0 == NormalizeLongitude(-180.0)
    123                 #     assert -179.0 == NormalizeLongitude(181.0)
    124                 #     assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
    125                 #     assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
    126                 #     assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
    127                 #     assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
    128                 #     assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
    129                 #     assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
    130                 #     assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
    131                 #     assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
    132                 #     assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
    133                 #     assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
    134               "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
    135               "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
    136             },
    137             "maxLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
    138                 # of doubles representing degrees latitude and degrees longitude. Unless
    139                 # specified otherwise, this must conform to the
    140                 # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
    141                 # standard</a>. Values must be within normalized ranges.
    142                 #
    143                 # Example of normalization code in Python:
    144                 #
    145                 #     def NormalizeLongitude(longitude):
    146                 #       """Wraps decimal degrees longitude to [-180.0, 180.0]."""
    147                 #       q, r = divmod(longitude, 360.0)
    148                 #       if r > 180.0 or (r == 180.0 and q <= -1.0):
    149                 #         return r - 360.0
    150                 #       return r
    151                 #
    152                 #     def NormalizeLatLng(latitude, longitude):
    153                 #       """Wraps decimal degrees latitude and longitude to
    154                 #       [-90.0, 90.0] and [-180.0, 180.0], respectively."""
    155                 #       r = latitude % 360.0
    156                 #       if r <= 90.0:
    157                 #         return r, NormalizeLongitude(longitude)
    158                 #       elif r >= 270.0:
    159                 #         return r - 360, NormalizeLongitude(longitude)
    160                 #       else:
    161                 #         return 180 - r, NormalizeLongitude(longitude + 180.0)
    162                 #
    163                 #     assert 180.0 == NormalizeLongitude(180.0)
    164                 #     assert -180.0 == NormalizeLongitude(-180.0)
    165                 #     assert -179.0 == NormalizeLongitude(181.0)
    166                 #     assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
    167                 #     assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
    168                 #     assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
    169                 #     assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
    170                 #     assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
    171                 #     assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
    172                 #     assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
    173                 #     assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
    174                 #     assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
    175                 #     assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
    176               "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
    177               "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
    178             },
    179           },
    180           "languageHints": [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
    181               # yields the best results since it enables automatic language detection. For
    182               # languages based on the Latin alphabet, setting `language_hints` is not
    183               # needed. In rare cases, when the language of the text in the image is known,
    184               # setting a hint will help get better results (although it will be a
    185               # significant hindrance if the hint is wrong). Text detection returns an
    186               # error if one or more of the specified languages is not one of the
    187               # [supported languages](/vision/docs/languages).
    188             "A String",
    189           ],
    190           "cropHintsParams": { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
    191             "aspectRatios": [ # Aspect ratios in floats, representing the ratio of the width to the height
    192                 # of the image. For example, if the desired aspect ratio is 4/3, the
    193                 # corresponding float value should be 1.33333.  If not specified, the
    194                 # best possible crop is returned. The number of provided aspect ratios is
    195                 # limited to a maximum of 16; any aspect ratios provided after the 16th are
    196                 # ignored.
    197               3.14,
    198             ],
    199           },
    200         },
    201         "image": { # Client image to perform Google Cloud Vision API tasks over. # The image to be processed.
    202           "content": "A String", # Image content, represented as a stream of bytes.
    203               # Note: as with all `bytes` fields, protobuffers use a pure binary
    204               # representation, whereas JSON representations use base64.
    205           "source": { # External image source (Google Cloud Storage image location). # Google Cloud Storage image location. If both `content` and `source`
    206               # are provided for an image, `content` takes precedence and is
    207               # used to perform the image annotation request.
    208             "gcsImageUri": "A String", # NOTE: For new code `image_uri` below is preferred.
    209                 # Google Cloud Storage image URI, which must be in the following form:
    210                 # `gs://bucket_name/object_name` (for details, see
    211                 # [Google Cloud Storage Request
    212                 # URIs](https://cloud.google.com/storage/docs/reference-uris)).
    213                 # NOTE: Cloud Storage object versioning is not supported.
    214             "imageUri": "A String", # Image URI which supports:
    215                 # 1) Google Cloud Storage image URI, which must be in the following form:
    216                 # `gs://bucket_name/object_name` (for details, see
    217                 # [Google Cloud Storage Request
    218                 # URIs](https://cloud.google.com/storage/docs/reference-uris)).
    219                 # NOTE: Cloud Storage object versioning is not supported.
    220                 # 2) Publicly accessible image HTTP/HTTPS URL.
    221                 # This is preferred over the legacy `gcs_image_uri` above. When both
    222                 # `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
    223                 # precedence.
    224           },
    225         },
    226         "features": [ # Requested features.
    227           { # Users describe the type of Google Cloud Vision API tasks to perform over
    228               # images by using *Feature*s. Each Feature indicates a type of image
    229               # detection task to perform. Features encode the Cloud Vision API
    230               # vertical to operate on and the number of top-scoring results to return.
    231             "type": "A String", # The feature type.
    232             "maxResults": 42, # Maximum number of results of this type.
    233           },
    234         ],
    235       },
    236     ],
    237   }
    238 
    239   x__xgafv: string, V1 error format.
    240     Allowed values
    241       1 - v1 error format
    242       2 - v2 error format
    243 
    244 Returns:
    245   An object of the form:
    246 
    247     { # Response to a batch image annotation request.
    248     "responses": [ # Individual responses to image annotation requests within the batch.
    249       { # Response to an image annotation request.
    250         "safeSearchAnnotation": { # Set of features pertaining to the image, computed by computer vision # If present, safe-search annotation has completed successfully.
    251             # methods over safe-search verticals (for example, adult, spoof, medical,
    252             # violence).
    253           "medical": "A String", # Likelihood that this is a medical image.
    254           "spoof": "A String", # Spoof likelihood. The likelihood that an modification
    255               # was made to the image's canonical version to make it appear
    256               # funny or offensive.
    257           "violence": "A String", # Violence likelihood.
    258           "adult": "A String", # Represents the adult content likelihood for the image.
    259         },
    260         "textAnnotations": [ # If present, text (OCR) detection has completed successfully.
    261           { # Set of detected entity features.
    262             "confidence": 3.14, # The accuracy of the entity detection in an image.
    263                 # For example, for an image in which the "Eiffel Tower" entity is detected,
    264                 # this field represents the confidence that there is a tower in the query
    265                 # image. Range [0, 1].
    266             "description": "A String", # Entity textual description, expressed in its `locale` language.
    267             "locale": "A String", # The language code for the locale in which the entity textual
    268                 # `description` is expressed.
    269             "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
    270                 # image. For example, the relevancy of "tower" is likely higher to an image
    271                 # containing the detected "Eiffel Tower" than to an image containing a
    272                 # detected distant towering building, even though the confidence that
    273                 # there is a tower in each image may be the same. Range [0, 1].
    274             "mid": "A String", # Opaque entity ID. Some IDs may be available in
    275                 # [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
    276             "locations": [ # The location information for the detected entity. Multiple
    277                 # `LocationInfo` elements can be present because one location may
    278                 # indicate the location of the scene in the image, and another location
    279                 # may indicate the location of the place where the image was taken.
    280                 # Location information is usually present for landmarks.
    281               { # Detected entity location information.
    282                 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
    283                     # of doubles representing degrees latitude and degrees longitude. Unless
    284                     # specified otherwise, this must conform to the
    285                     # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
    286                     # standard</a>. Values must be within normalized ranges.
    287                     #
    288                     # Example of normalization code in Python:
    289                     #
    290                     #     def NormalizeLongitude(longitude):
    291                     #       """Wraps decimal degrees longitude to [-180.0, 180.0]."""
    292                     #       q, r = divmod(longitude, 360.0)
    293                     #       if r > 180.0 or (r == 180.0 and q <= -1.0):
    294                     #         return r - 360.0
    295                     #       return r
    296                     #
    297                     #     def NormalizeLatLng(latitude, longitude):
    298                     #       """Wraps decimal degrees latitude and longitude to
    299                     #       [-90.0, 90.0] and [-180.0, 180.0], respectively."""
    300                     #       r = latitude % 360.0
    301                     #       if r <= 90.0:
    302                     #         return r, NormalizeLongitude(longitude)
    303                     #       elif r >= 270.0:
    304                     #         return r - 360, NormalizeLongitude(longitude)
    305                     #       else:
    306                     #         return 180 - r, NormalizeLongitude(longitude + 180.0)
    307                     #
    308                     #     assert 180.0 == NormalizeLongitude(180.0)
    309                     #     assert -180.0 == NormalizeLongitude(-180.0)
    310                     #     assert -179.0 == NormalizeLongitude(181.0)
    311                     #     assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
    312                     #     assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
    313                     #     assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
    314                     #     assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
    315                     #     assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
    316                     #     assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
    317                     #     assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
    318                     #     assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
    319                     #     assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
    320                     #     assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
    321                   "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
    322                   "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
    323                 },
    324               },
    325             ],
    326             "score": 3.14, # Overall score of the result. Range [0, 1].
    327             "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Currently not produced
    328                 # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
    329                 # are produced for the entire text detected in an image region, followed by
    330                 # `boundingPoly`s for each word within the detected text.
    331               "vertices": [ # The bounding polygon vertices.
    332                 { # A vertex represents a 2D point in the image.
    333                     # NOTE: the vertex coordinates are in the same scale as the original image.
    334                   "y": 42, # Y coordinate.
    335                   "x": 42, # X coordinate.
    336                 },
    337               ],
    338             },
    339             "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
    340                 # fields, such a score or string that qualifies the entity.
    341               { # A `Property` consists of a user-supplied name/value pair.
    342                 "uint64Value": "A String", # Value of numeric properties.
    343                 "name": "A String", # Name of the property.
    344                 "value": "A String", # Value of the property.
    345               },
    346             ],
    347           },
    348         ],
    349         "webDetection": { # Relevant information for the image from the Internet. # If present, web detection has completed successfully.
    350           "webEntities": [ # Deduced entities from similar images on the Internet.
    351             { # Entity deduced from similar images on the Internet.
    352               "entityId": "A String", # Opaque entity ID.
    353               "score": 3.14, # Overall relevancy score for the entity.
    354                   # Not normalized and not comparable across different image queries.
    355               "description": "A String", # Canonical description of the entity, in English.
    356             },
    357           ],
    358           "pagesWithMatchingImages": [ # Web pages containing the matching images from the Internet.
    359             { # Metadata for web pages.
    360               "url": "A String", # The result web page URL.
    361               "score": 3.14, # Overall relevancy score for the web page.
    362                   # Not normalized and not comparable across different image queries.
    363             },
    364           ],
    365           "visuallySimilarImages": [ # The visually similar image results.
    366             { # Metadata for online images.
    367               "url": "A String", # The result image URL.
    368               "score": 3.14, # Overall relevancy score for the image.
    369                   # Not normalized and not comparable across different image queries.
    370             },
    371           ],
    372           "partialMatchingImages": [ # Partial matching images from the Internet.
    373               # Those images are similar enough to share some key-point features. For
    374               # example an original image will likely have partial matching for its crops.
    375             { # Metadata for online images.
    376               "url": "A String", # The result image URL.
    377               "score": 3.14, # Overall relevancy score for the image.
    378                   # Not normalized and not comparable across different image queries.
    379             },
    380           ],
    381           "fullMatchingImages": [ # Fully matching images from the Internet.
    382               # Can include resized copies of the query image.
    383             { # Metadata for online images.
    384               "url": "A String", # The result image URL.
    385               "score": 3.14, # Overall relevancy score for the image.
    386                   # Not normalized and not comparable across different image queries.
    387             },
    388           ],
    389         },
    390         "fullTextAnnotation": { # TextAnnotation contains a structured representation of OCR extracted text. # If present, text (OCR) detection or document (OCR) text detection has
    391             # completed successfully.
    392             # This annotation provides the structural hierarchy for the OCR detected
    393             # text.
    394             # The hierarchy of an OCR extracted text structure is like this:
    395             #     TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
    396             # Each structural component, starting from Page, may further have their own
    397             # properties. Properties describe detected languages, breaks etc.. Please
    398             # refer to the google.cloud.vision.v1.TextAnnotation.TextProperty message
    399             # definition below for more detail.
    400           "text": "A String", # UTF-8 text detected on the pages.
    401           "pages": [ # List of pages detected by OCR.
    402             { # Detected page from OCR.
    403               "width": 42, # Page width in pixels.
    404               "property": { # Additional information detected on the structural component. # Additional information detected on the page.
    405                 "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
    406                   "isPrefix": True or False, # True if break prepends the element.
    407                   "type": "A String", # Detected break type.
    408                 },
    409                 "detectedLanguages": [ # A list of detected languages together with confidence.
    410                   { # Detected language for a structural component.
    411                     "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
    412                         # information, see
    413                         # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
    414                     "confidence": 3.14, # Confidence of detected language. Range [0, 1].
    415                   },
    416                 ],
    417               },
    418               "blocks": [ # List of blocks of text, images etc on this page.
    419                 { # Logical element on the page.
    420                   "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the block.
    421                       # The vertices are in the order of top-left, top-right, bottom-right,
    422                       # bottom-left. When a rotation of the bounding box is detected the rotation
    423                       # is represented as around the top-left corner as defined when the text is
    424                       # read in the 'natural' orientation.
    425                       # For example:
    426                       #   * when the text is horizontal it might look like:
    427                       #      0----1
    428                       #      |    |
    429                       #      3----2
    430                       #   * when it's rotated 180 degrees around the top-left corner it becomes:
    431                       #      2----3
    432                       #      |    |
    433                       #      1----0
    434                       #   and the vertice order will still be (0, 1, 2, 3).
    435                     "vertices": [ # The bounding polygon vertices.
    436                       { # A vertex represents a 2D point in the image.
    437                           # NOTE: the vertex coordinates are in the same scale as the original image.
    438                         "y": 42, # Y coordinate.
    439                         "x": 42, # X coordinate.
    440                       },
    441                     ],
    442                   },
    443                   "blockType": "A String", # Detected block type (text, image etc) for this block.
    444                   "property": { # Additional information detected on the structural component. # Additional information detected for the block.
    445                     "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
    446                       "isPrefix": True or False, # True if break prepends the element.
    447                       "type": "A String", # Detected break type.
    448                     },
    449                     "detectedLanguages": [ # A list of detected languages together with confidence.
    450                       { # Detected language for a structural component.
    451                         "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
    452                             # information, see
    453                             # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
    454                         "confidence": 3.14, # Confidence of detected language. Range [0, 1].
    455                       },
    456                     ],
    457                   },
    458                   "paragraphs": [ # List of paragraphs in this block (if this blocks is of type text).
    459                     { # Structural unit of text representing a number of words in certain order.
    460                       "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the paragraph.
    461                           # The vertices are in the order of top-left, top-right, bottom-right,
    462                           # bottom-left. When a rotation of the bounding box is detected the rotation
    463                           # is represented as around the top-left corner as defined when the text is
    464                           # read in the 'natural' orientation.
    465                           # For example:
    466                           #   * when the text is horizontal it might look like:
    467                           #      0----1
    468                           #      |    |
    469                           #      3----2
    470                           #   * when it's rotated 180 degrees around the top-left corner it becomes:
    471                           #      2----3
    472                           #      |    |
    473                           #      1----0
    474                           #   and the vertice order will still be (0, 1, 2, 3).
    475                         "vertices": [ # The bounding polygon vertices.
    476                           { # A vertex represents a 2D point in the image.
    477                               # NOTE: the vertex coordinates are in the same scale as the original image.
    478                             "y": 42, # Y coordinate.
    479                             "x": 42, # X coordinate.
    480                           },
    481                         ],
    482                       },
    483                       "property": { # Additional information detected on the structural component. # Additional information detected for the paragraph.
    484                         "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
    485                           "isPrefix": True or False, # True if break prepends the element.
    486                           "type": "A String", # Detected break type.
    487                         },
    488                         "detectedLanguages": [ # A list of detected languages together with confidence.
    489                           { # Detected language for a structural component.
    490                             "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
    491                                 # information, see
    492                                 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
    493                             "confidence": 3.14, # Confidence of detected language. Range [0, 1].
    494                           },
    495                         ],
    496                       },
    497                       "words": [ # List of words in this paragraph.
    498                         { # A word representation.
    499                           "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the word.
    500                               # The vertices are in the order of top-left, top-right, bottom-right,
    501                               # bottom-left. When a rotation of the bounding box is detected the rotation
    502                               # is represented as around the top-left corner as defined when the text is
    503                               # read in the 'natural' orientation.
    504                               # For example:
    505                               #   * when the text is horizontal it might look like:
    506                               #      0----1
    507                               #      |    |
    508                               #      3----2
    509                               #   * when it's rotated 180 degrees around the top-left corner it becomes:
    510                               #      2----3
    511                               #      |    |
    512                               #      1----0
    513                               #   and the vertice order will still be (0, 1, 2, 3).
    514                             "vertices": [ # The bounding polygon vertices.
    515                               { # A vertex represents a 2D point in the image.
    516                                   # NOTE: the vertex coordinates are in the same scale as the original image.
    517                                 "y": 42, # Y coordinate.
    518                                 "x": 42, # X coordinate.
    519                               },
    520                             ],
    521                           },
    522                           "symbols": [ # List of symbols in the word.
    523                               # The order of the symbols follows the natural reading order.
    524                             { # A single symbol representation.
    525                               "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the symbol.
    526                                   # The vertices are in the order of top-left, top-right, bottom-right,
    527                                   # bottom-left. When a rotation of the bounding box is detected the rotation
    528                                   # is represented as around the top-left corner as defined when the text is
    529                                   # read in the 'natural' orientation.
    530                                   # For example:
    531                                   #   * when the text is horizontal it might look like:
    532                                   #      0----1
    533                                   #      |    |
    534                                   #      3----2
    535                                   #   * when it's rotated 180 degrees around the top-left corner it becomes:
    536                                   #      2----3
    537                                   #      |    |
    538                                   #      1----0
    539                                   #   and the vertice order will still be (0, 1, 2, 3).
    540                                 "vertices": [ # The bounding polygon vertices.
    541                                   { # A vertex represents a 2D point in the image.
    542                                       # NOTE: the vertex coordinates are in the same scale as the original image.
    543                                     "y": 42, # Y coordinate.
    544                                     "x": 42, # X coordinate.
    545                                   },
    546                                 ],
    547                               },
    548                               "text": "A String", # The actual UTF-8 representation of the symbol.
    549                               "property": { # Additional information detected on the structural component. # Additional information detected for the symbol.
    550                                 "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
    551                                   "isPrefix": True or False, # True if break prepends the element.
    552                                   "type": "A String", # Detected break type.
    553                                 },
    554                                 "detectedLanguages": [ # A list of detected languages together with confidence.
    555                                   { # Detected language for a structural component.
    556                                     "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
    557                                         # information, see
    558                                         # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
    559                                     "confidence": 3.14, # Confidence of detected language. Range [0, 1].
    560                                   },
    561                                 ],
    562                               },
    563                             },
    564                           ],
    565                           "property": { # Additional information detected on the structural component. # Additional information detected for the word.
    566                             "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
    567                               "isPrefix": True or False, # True if break prepends the element.
    568                               "type": "A String", # Detected break type.
    569                             },
    570                             "detectedLanguages": [ # A list of detected languages together with confidence.
    571                               { # Detected language for a structural component.
    572                                 "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
    573                                     # information, see
    574                                     # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
    575                                 "confidence": 3.14, # Confidence of detected language. Range [0, 1].
    576                               },
    577                             ],
    578                           },
    579                         },
    580                       ],
    581                     },
    582                   ],
    583                 },
    584               ],
    585               "height": 42, # Page height in pixels.
    586             },
    587           ],
    588         },
    589         "labelAnnotations": [ # If present, label detection has completed successfully.
    590           { # Set of detected entity features.
    591             "confidence": 3.14, # The accuracy of the entity detection in an image.
    592                 # For example, for an image in which the "Eiffel Tower" entity is detected,
    593                 # this field represents the confidence that there is a tower in the query
    594                 # image. Range [0, 1].
    595             "description": "A String", # Entity textual description, expressed in its `locale` language.
    596             "locale": "A String", # The language code for the locale in which the entity textual
    597                 # `description` is expressed.
    598             "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
    599                 # image. For example, the relevancy of "tower" is likely higher to an image
    600                 # containing the detected "Eiffel Tower" than to an image containing a
    601                 # detected distant towering building, even though the confidence that
    602                 # there is a tower in each image may be the same. Range [0, 1].
    603             "mid": "A String", # Opaque entity ID. Some IDs may be available in
    604                 # [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
    605             "locations": [ # The location information for the detected entity. Multiple
    606                 # `LocationInfo` elements can be present because one location may
    607                 # indicate the location of the scene in the image, and another location
    608                 # may indicate the location of the place where the image was taken.
    609                 # Location information is usually present for landmarks.
    610               { # Detected entity location information.
    611                 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
    612                     # of doubles representing degrees latitude and degrees longitude. Unless
    613                     # specified otherwise, this must conform to the
    614                     # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
    615                     # standard</a>. Values must be within normalized ranges.
    616                     #
    617                     # Example of normalization code in Python:
    618                     #
    619                     #     def NormalizeLongitude(longitude):
    620                     #       """Wraps decimal degrees longitude to [-180.0, 180.0]."""
    621                     #       q, r = divmod(longitude, 360.0)
    622                     #       if r > 180.0 or (r == 180.0 and q <= -1.0):
    623                     #         return r - 360.0
    624                     #       return r
    625                     #
    626                     #     def NormalizeLatLng(latitude, longitude):
    627                     #       """Wraps decimal degrees latitude and longitude to
    628                     #       [-90.0, 90.0] and [-180.0, 180.0], respectively."""
    629                     #       r = latitude % 360.0
    630                     #       if r <= 90.0:
    631                     #         return r, NormalizeLongitude(longitude)
    632                     #       elif r >= 270.0:
    633                     #         return r - 360, NormalizeLongitude(longitude)
    634                     #       else:
    635                     #         return 180 - r, NormalizeLongitude(longitude + 180.0)
    636                     #
    637                     #     assert 180.0 == NormalizeLongitude(180.0)
    638                     #     assert -180.0 == NormalizeLongitude(-180.0)
    639                     #     assert -179.0 == NormalizeLongitude(181.0)
    640                     #     assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
    641                     #     assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
    642                     #     assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
    643                     #     assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
    644                     #     assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
    645                     #     assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
    646                     #     assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
    647                     #     assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
    648                     #     assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
    649                     #     assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
    650                   "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
    651                   "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
    652                 },
    653               },
    654             ],
    655             "score": 3.14, # Overall score of the result. Range [0, 1].
    656             "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Currently not produced
    657                 # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
    658                 # are produced for the entire text detected in an image region, followed by
    659                 # `boundingPoly`s for each word within the detected text.
    660               "vertices": [ # The bounding polygon vertices.
    661                 { # A vertex represents a 2D point in the image.
    662                     # NOTE: the vertex coordinates are in the same scale as the original image.
    663                   "y": 42, # Y coordinate.
    664                   "x": 42, # X coordinate.
    665                 },
    666               ],
    667             },
    668             "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
    669                 # fields, such a score or string that qualifies the entity.
    670               { # A `Property` consists of a user-supplied name/value pair.
    671                 "uint64Value": "A String", # Value of numeric properties.
    672                 "name": "A String", # Name of the property.
    673                 "value": "A String", # Value of the property.
    674               },
    675             ],
    676           },
    677         ],
    678         "imagePropertiesAnnotation": { # Stores image properties, such as dominant colors. # If present, image properties were extracted successfully.
    679           "dominantColors": { # Set of dominant colors and their corresponding scores. # If present, dominant colors completed successfully.
    680             "colors": [ # RGB color values with their score and pixel fraction.
    681               { # Color information consists of RGB channels, score, and the fraction of
    682                   # the image that the color occupies in the image.
    683                 "color": { # Represents a color in the RGBA color space. This representation is designed # RGB components of the color.
    684                     # for simplicity of conversion to/from color representations in various
    685                     # languages over compactness; for example, the fields of this representation
    686                     # can be trivially provided to the constructor of "java.awt.Color" in Java; it
    687                     # can also be trivially provided to UIColor's "+colorWithRed:green:blue:alpha"
    688                     # method in iOS; and, with just a little work, it can be easily formatted into
    689                     # a CSS "rgba()" string in JavaScript, as well. Here are some examples:
    690                     #
    691                     # Example (Java):
    692                     #
    693                     #      import com.google.type.Color;
    694                     #
    695                     #      // ...
    696                     #      public static java.awt.Color fromProto(Color protocolor) {
    697                     #        float alpha = protocolor.hasAlpha()
    698                     #            ? protocolor.getAlpha().getValue()
    699                     #            : 1.0;
    700                     #
    701                     #        return new java.awt.Color(
    702                     #            protocolor.getRed(),
    703                     #            protocolor.getGreen(),
    704                     #            protocolor.getBlue(),
    705                     #            alpha);
    706                     #      }
    707                     #
    708                     #      public static Color toProto(java.awt.Color color) {
    709                     #        float red = (float) color.getRed();
    710                     #        float green = (float) color.getGreen();
    711                     #        float blue = (float) color.getBlue();
    712                     #        float denominator = 255.0;
    713                     #        Color.Builder resultBuilder =
    714                     #            Color
    715                     #                .newBuilder()
    716                     #                .setRed(red / denominator)
    717                     #                .setGreen(green / denominator)
    718                     #                .setBlue(blue / denominator);
    719                     #        int alpha = color.getAlpha();
    720                     #        if (alpha != 255) {
    721                     #          result.setAlpha(
    722                     #              FloatValue
    723                     #                  .newBuilder()
    724                     #                  .setValue(((float) alpha) / denominator)
    725                     #                  .build());
    726                     #        }
    727                     #        return resultBuilder.build();
    728                     #      }
    729                     #      // ...
    730                     #
    731                     # Example (iOS / Obj-C):
    732                     #
    733                     #      // ...
    734                     #      static UIColor* fromProto(Color* protocolor) {
    735                     #         float red = [protocolor red];
    736                     #         float green = [protocolor green];
    737                     #         float blue = [protocolor blue];
    738                     #         FloatValue* alpha_wrapper = [protocolor alpha];
    739                     #         float alpha = 1.0;
    740                     #         if (alpha_wrapper != nil) {
    741                     #           alpha = [alpha_wrapper value];
    742                     #         }
    743                     #         return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
    744                     #      }
    745                     #
    746                     #      static Color* toProto(UIColor* color) {
    747                     #          CGFloat red, green, blue, alpha;
    748                     #          if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) {
    749                     #            return nil;
    750                     #          }
    751                     #          Color* result = [Color alloc] init];
    752                     #          [result setRed:red];
    753                     #          [result setGreen:green];
    754                     #          [result setBlue:blue];
    755                     #          if (alpha <= 0.9999) {
    756                     #            [result setAlpha:floatWrapperWithValue(alpha)];
    757                     #          }
    758                     #          [result autorelease];
    759                     #          return result;
    760                     #     }
    761                     #     // ...
    762                     #
    763                     #  Example (JavaScript):
    764                     #
    765                     #     // ...
    766                     #
    767                     #     var protoToCssColor = function(rgb_color) {
    768                     #        var redFrac = rgb_color.red || 0.0;
    769                     #        var greenFrac = rgb_color.green || 0.0;
    770                     #        var blueFrac = rgb_color.blue || 0.0;
    771                     #        var red = Math.floor(redFrac * 255);
    772                     #        var green = Math.floor(greenFrac * 255);
    773                     #        var blue = Math.floor(blueFrac * 255);
    774                     #
    775                     #        if (!('alpha' in rgb_color)) {
    776                     #           return rgbToCssColor_(red, green, blue);
    777                     #        }
    778                     #
    779                     #        var alphaFrac = rgb_color.alpha.value || 0.0;
    780                     #        var rgbParams = [red, green, blue].join(',');
    781                     #        return ['rgba(', rgbParams, ',', alphaFrac, ')'].join('');
    782                     #     };
    783                     #
    784                     #     var rgbToCssColor_ = function(red, green, blue) {
    785                     #       var rgbNumber = new Number((red << 16) | (green << 8) | blue);
    786                     #       var hexString = rgbNumber.toString(16);
    787                     #       var missingZeros = 6 - hexString.length;
    788                     #       var resultBuilder = ['#'];
    789                     #       for (var i = 0; i < missingZeros; i++) {
    790                     #          resultBuilder.push('0');
    791                     #       }
    792                     #       resultBuilder.push(hexString);
    793                     #       return resultBuilder.join('');
    794                     #     };
    795                     #
    796                     #     // ...
    797                   "blue": 3.14, # The amount of blue in the color as a value in the interval [0, 1].
    798                   "alpha": 3.14, # The fraction of this color that should be applied to the pixel. That is,
    799                       # the final pixel color is defined by the equation:
    800                       #
    801                       #   pixel color = alpha * (this color) + (1.0 - alpha) * (background color)
    802                       #
    803                       # This means that a value of 1.0 corresponds to a solid color, whereas
    804                       # a value of 0.0 corresponds to a completely transparent color. This
    805                       # uses a wrapper message rather than a simple float scalar so that it is
    806                       # possible to distinguish between a default value and the value being unset.
    807                       # If omitted, this color object is to be rendered as a solid color
    808                       # (as if the alpha value had been explicitly given with a value of 1.0).
    809                   "green": 3.14, # The amount of green in the color as a value in the interval [0, 1].
    810                   "red": 3.14, # The amount of red in the color as a value in the interval [0, 1].
    811                 },
    812                 "pixelFraction": 3.14, # The fraction of pixels the color occupies in the image.
    813                     # Value in range [0, 1].
    814                 "score": 3.14, # Image-specific score for this color. Value in range [0, 1].
    815               },
    816             ],
    817           },
    818         },
    819         "faceAnnotations": [ # If present, face detection has completed successfully.
    820           { # A face annotation object contains the results of face detection.
    821             "sorrowLikelihood": "A String", # Sorrow likelihood.
    822             "landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1].
    823             "underExposedLikelihood": "A String", # Under-exposed likelihood.
    824             "detectionConfidence": 3.14, # Detection confidence. Range [0, 1].
    825             "joyLikelihood": "A String", # Joy likelihood.
    826             "landmarks": [ # Detected face landmarks.
    827               { # A face-specific landmark (for example, a face feature).
    828                   # Landmark positions may fall outside the bounds of the image
    829                   # if the face is near one or more edges of the image.
    830                   # Therefore it is NOT guaranteed that `0 <= x < width` or
    831                   # `0 <= y < height`.
    832                 "position": { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
    833                     # A valid Position must have both x and y coordinates.
    834                     # The position coordinates are in the same scale as the original image.
    835                   "y": 3.14, # Y coordinate.
    836                   "x": 3.14, # X coordinate.
    837                   "z": 3.14, # Z coordinate (or depth).
    838                 },
    839                 "type": "A String", # Face landmark type.
    840               },
    841             ],
    842             "surpriseLikelihood": "A String", # Surprise likelihood.
    843             "blurredLikelihood": "A String", # Blurred likelihood.
    844             "tiltAngle": 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
    845                 # pointing relative to the image's horizontal plane. Range [-180,180].
    846             "angerLikelihood": "A String", # Anger likelihood.
    847             "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
    848                 # are in the original image's scale, as returned in `ImageParams`.
    849                 # The bounding box is computed to "frame" the face in accordance with human
    850                 # expectations. It is based on the landmarker results.
    851                 # Note that one or more x and/or y coordinates may not be generated in the
    852                 # `BoundingPoly` (the polygon will be unbounded) if only a partial face
    853                 # appears in the image to be annotated.
    854               "vertices": [ # The bounding polygon vertices.
    855                 { # A vertex represents a 2D point in the image.
    856                     # NOTE: the vertex coordinates are in the same scale as the original image.
    857                   "y": 42, # Y coordinate.
    858                   "x": 42, # X coordinate.
    859                 },
    860               ],
    861             },
    862             "rollAngle": 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
    863                 # of the face relative to the image vertical about the axis perpendicular to
    864                 # the face. Range [-180,180].
    865             "panAngle": 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
    866                 # pointing relative to the vertical plane perpendicular to the image. Range
    867                 # [-180,180].
    868             "headwearLikelihood": "A String", # Headwear likelihood.
    869             "fdBoundingPoly": { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
    870                 # `boundingPoly`, and encloses only the skin part of the face. Typically, it
    871                 # is used to eliminate the face from any image analysis that detects the
    872                 # "amount of skin" visible in an image. It is not based on the
    873                 # landmarker results, only on the initial face detection, hence
    874                 # the <code>fd</code> (face detection) prefix.
    875               "vertices": [ # The bounding polygon vertices.
    876                 { # A vertex represents a 2D point in the image.
    877                     # NOTE: the vertex coordinates are in the same scale as the original image.
    878                   "y": 42, # Y coordinate.
    879                   "x": 42, # X coordinate.
    880                 },
    881               ],
    882             },
    883           },
    884         ],
    885         "logoAnnotations": [ # If present, logo detection has completed successfully.
    886           { # Set of detected entity features.
    887             "confidence": 3.14, # The accuracy of the entity detection in an image.
    888                 # For example, for an image in which the "Eiffel Tower" entity is detected,
    889                 # this field represents the confidence that there is a tower in the query
    890                 # image. Range [0, 1].
    891             "description": "A String", # Entity textual description, expressed in its `locale` language.
    892             "locale": "A String", # The language code for the locale in which the entity textual
    893                 # `description` is expressed.
    894             "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
    895                 # image. For example, the relevancy of "tower" is likely higher to an image
    896                 # containing the detected "Eiffel Tower" than to an image containing a
    897                 # detected distant towering building, even though the confidence that
    898                 # there is a tower in each image may be the same. Range [0, 1].
    899             "mid": "A String", # Opaque entity ID. Some IDs may be available in
    900                 # [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
    901             "locations": [ # The location information for the detected entity. Multiple
    902                 # `LocationInfo` elements can be present because one location may
    903                 # indicate the location of the scene in the image, and another location
    904                 # may indicate the location of the place where the image was taken.
    905                 # Location information is usually present for landmarks.
    906               { # Detected entity location information.
    907                 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
    908                     # of doubles representing degrees latitude and degrees longitude. Unless
    909                     # specified otherwise, this must conform to the
    910                     # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
    911                     # standard</a>. Values must be within normalized ranges.
    912                     #
    913                     # Example of normalization code in Python:
    914                     #
    915                     #     def NormalizeLongitude(longitude):
    916                     #       """Wraps decimal degrees longitude to [-180.0, 180.0]."""
    917                     #       q, r = divmod(longitude, 360.0)
    918                     #       if r > 180.0 or (r == 180.0 and q <= -1.0):
    919                     #         return r - 360.0
    920                     #       return r
    921                     #
    922                     #     def NormalizeLatLng(latitude, longitude):
    923                     #       """Wraps decimal degrees latitude and longitude to
    924                     #       [-90.0, 90.0] and [-180.0, 180.0], respectively."""
    925                     #       r = latitude % 360.0
    926                     #       if r <= 90.0:
    927                     #         return r, NormalizeLongitude(longitude)
    928                     #       elif r >= 270.0:
    929                     #         return r - 360, NormalizeLongitude(longitude)
    930                     #       else:
    931                     #         return 180 - r, NormalizeLongitude(longitude + 180.0)
    932                     #
    933                     #     assert 180.0 == NormalizeLongitude(180.0)
    934                     #     assert -180.0 == NormalizeLongitude(-180.0)
    935                     #     assert -179.0 == NormalizeLongitude(181.0)
    936                     #     assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
    937                     #     assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
    938                     #     assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
    939                     #     assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
    940                     #     assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
    941                     #     assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
    942                     #     assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
    943                     #     assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
    944                     #     assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
    945                     #     assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
    946                   "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
    947                   "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
    948                 },
    949               },
    950             ],
    951             "score": 3.14, # Overall score of the result. Range [0, 1].
    952             "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Currently not produced
    953                 # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
    954                 # are produced for the entire text detected in an image region, followed by
    955                 # `boundingPoly`s for each word within the detected text.
    956               "vertices": [ # The bounding polygon vertices.
    957                 { # A vertex represents a 2D point in the image.
    958                     # NOTE: the vertex coordinates are in the same scale as the original image.
    959                   "y": 42, # Y coordinate.
    960                   "x": 42, # X coordinate.
    961                 },
    962               ],
    963             },
    964             "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
    965                 # fields, such a score or string that qualifies the entity.
    966               { # A `Property` consists of a user-supplied name/value pair.
    967                 "uint64Value": "A String", # Value of numeric properties.
    968                 "name": "A String", # Name of the property.
    969                 "value": "A String", # Value of the property.
    970               },
    971             ],
    972           },
    973         ],
    974         "landmarkAnnotations": [ # If present, landmark detection has completed successfully.
    975           { # Set of detected entity features.
    976             "confidence": 3.14, # The accuracy of the entity detection in an image.
    977                 # For example, for an image in which the "Eiffel Tower" entity is detected,
    978                 # this field represents the confidence that there is a tower in the query
    979                 # image. Range [0, 1].
    980             "description": "A String", # Entity textual description, expressed in its `locale` language.
    981             "locale": "A String", # The language code for the locale in which the entity textual
    982                 # `description` is expressed.
    983             "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
    984                 # image. For example, the relevancy of "tower" is likely higher to an image
    985                 # containing the detected "Eiffel Tower" than to an image containing a
    986                 # detected distant towering building, even though the confidence that
    987                 # there is a tower in each image may be the same. Range [0, 1].
    988             "mid": "A String", # Opaque entity ID. Some IDs may be available in
    989                 # [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
    990             "locations": [ # The location information for the detected entity. Multiple
    991                 # `LocationInfo` elements can be present because one location may
    992                 # indicate the location of the scene in the image, and another location
    993                 # may indicate the location of the place where the image was taken.
    994                 # Location information is usually present for landmarks.
    995               { # Detected entity location information.
    996                 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
    997                     # of doubles representing degrees latitude and degrees longitude. Unless
    998                     # specified otherwise, this must conform to the
    999                     # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
   1000                     # standard</a>. Values must be within normalized ranges.
   1001                     #
   1002                     # Example of normalization code in Python:
   1003                     #
   1004                     #     def NormalizeLongitude(longitude):
   1005                     #       """Wraps decimal degrees longitude to [-180.0, 180.0]."""
   1006                     #       q, r = divmod(longitude, 360.0)
   1007                     #       if r > 180.0 or (r == 180.0 and q <= -1.0):
   1008                     #         return r - 360.0
   1009                     #       return r
   1010                     #
   1011                     #     def NormalizeLatLng(latitude, longitude):
   1012                     #       """Wraps decimal degrees latitude and longitude to
   1013                     #       [-90.0, 90.0] and [-180.0, 180.0], respectively."""
   1014                     #       r = latitude % 360.0
   1015                     #       if r <= 90.0:
   1016                     #         return r, NormalizeLongitude(longitude)
   1017                     #       elif r >= 270.0:
   1018                     #         return r - 360, NormalizeLongitude(longitude)
   1019                     #       else:
   1020                     #         return 180 - r, NormalizeLongitude(longitude + 180.0)
   1021                     #
   1022                     #     assert 180.0 == NormalizeLongitude(180.0)
   1023                     #     assert -180.0 == NormalizeLongitude(-180.0)
   1024                     #     assert -179.0 == NormalizeLongitude(181.0)
   1025                     #     assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
   1026                     #     assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
   1027                     #     assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
   1028                     #     assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
   1029                     #     assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
   1030                     #     assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
   1031                     #     assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
   1032                     #     assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
   1033                     #     assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
   1034                     #     assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
   1035                   "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
   1036                   "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
   1037                 },
   1038               },
   1039             ],
   1040             "score": 3.14, # Overall score of the result. Range [0, 1].
   1041             "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Currently not produced
   1042                 # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
   1043                 # are produced for the entire text detected in an image region, followed by
   1044                 # `boundingPoly`s for each word within the detected text.
   1045               "vertices": [ # The bounding polygon vertices.
   1046                 { # A vertex represents a 2D point in the image.
   1047                     # NOTE: the vertex coordinates are in the same scale as the original image.
   1048                   "y": 42, # Y coordinate.
   1049                   "x": 42, # X coordinate.
   1050                 },
   1051               ],
   1052             },
   1053             "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
   1054                 # fields, such a score or string that qualifies the entity.
   1055               { # A `Property` consists of a user-supplied name/value pair.
   1056                 "uint64Value": "A String", # Value of numeric properties.
   1057                 "name": "A String", # Name of the property.
   1058                 "value": "A String", # Value of the property.
   1059               },
   1060             ],
   1061           },
   1062         ],
   1063         "error": { # The `Status` type defines a logical error model that is suitable for different # If set, represents the error message for the operation.
   1064             # Note that filled-in image annotations are guaranteed to be
   1065             # correct, even when `error` is set.
   1066             # programming environments, including REST APIs and RPC APIs. It is used by
   1067             # [gRPC](https://github.com/grpc). The error model is designed to be:
   1068             #
   1069             # - Simple to use and understand for most users
   1070             # - Flexible enough to meet unexpected needs
   1071             #
   1072             # # Overview
   1073             #
   1074             # The `Status` message contains three pieces of data: error code, error message,
   1075             # and error details. The error code should be an enum value of
   1076             # google.rpc.Code, but it may accept additional error codes if needed.  The
   1077             # error message should be a developer-facing English message that helps
   1078             # developers *understand* and *resolve* the error. If a localized user-facing
   1079             # error message is needed, put the localized message in the error details or
   1080             # localize it in the client. The optional error details may contain arbitrary
   1081             # information about the error. There is a predefined set of error detail types
   1082             # in the package `google.rpc` that can be used for common error conditions.
   1083             #
   1084             # # Language mapping
   1085             #
   1086             # The `Status` message is the logical representation of the error model, but it
   1087             # is not necessarily the actual wire format. When the `Status` message is
   1088             # exposed in different client libraries and different wire protocols, it can be
   1089             # mapped differently. For example, it will likely be mapped to some exceptions
   1090             # in Java, but more likely mapped to some error codes in C.
   1091             #
   1092             # # Other uses
   1093             #
   1094             # The error model and the `Status` message can be used in a variety of
   1095             # environments, either with or without APIs, to provide a
   1096             # consistent developer experience across different environments.
   1097             #
   1098             # Example uses of this error model include:
   1099             #
   1100             # - Partial errors. If a service needs to return partial errors to the client,
   1101             #     it may embed the `Status` in the normal response to indicate the partial
   1102             #     errors.
   1103             #
   1104             # - Workflow errors. A typical workflow has multiple steps. Each step may
   1105             #     have a `Status` message for error reporting.
   1106             #
   1107             # - Batch operations. If a client uses batch request and batch response, the
   1108             #     `Status` message should be used directly inside batch response, one for
   1109             #     each error sub-response.
   1110             #
   1111             # - Asynchronous operations. If an API call embeds asynchronous operation
   1112             #     results in its response, the status of those operations should be
   1113             #     represented directly using the `Status` message.
   1114             #
   1115             # - Logging. If some API errors are stored in logs, the message `Status` could
   1116             #     be used directly after any stripping needed for security/privacy reasons.
   1117           "message": "A String", # A developer-facing error message, which should be in English. Any
   1118               # user-facing error message should be localized and sent in the
   1119               # google.rpc.Status.details field, or localized by the client.
   1120           "code": 42, # The status code, which should be an enum value of google.rpc.Code.
   1121           "details": [ # A list of messages that carry the error details.  There will be a
   1122               # common set of message types for APIs to use.
   1123             {
   1124               "a_key": "", # Properties of the object. Contains field @type with type URL.
   1125             },
   1126           ],
   1127         },
   1128         "cropHintsAnnotation": { # Set of crop hints that are used to generate new crops when serving images. # If present, crop hints have completed successfully.
   1129           "cropHints": [ # Crop hint results.
   1130             { # Single crop hint that is used to generate a new crop when serving an image.
   1131               "confidence": 3.14, # Confidence of this being a salient region.  Range [0, 1].
   1132               "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon for the crop region. The coordinates of the bounding
   1133                   # box are in the original image's scale, as returned in `ImageParams`.
   1134                 "vertices": [ # The bounding polygon vertices.
   1135                   { # A vertex represents a 2D point in the image.
   1136                       # NOTE: the vertex coordinates are in the same scale as the original image.
   1137                     "y": 42, # Y coordinate.
   1138                     "x": 42, # X coordinate.
   1139                   },
   1140                 ],
   1141               },
   1142               "importanceFraction": 3.14, # Fraction of importance of this salient region with respect to the original
   1143                   # image.
   1144             },
   1145           ],
   1146         },
   1147       },
   1148     ],
   1149   }</pre>
   1150 </div>
   1151 
   1152 </body></html>