1<html><body>
2<style>
3
4body, h1, h2, h3, div, span, p, pre, a {
5  margin: 0;
6  padding: 0;
7  border: 0;
8  font-weight: inherit;
9  font-style: inherit;
10  font-size: 100%;
11  font-family: inherit;
12  vertical-align: baseline;
13}
14
15body {
16  font-size: 13px;
17  padding: 1em;
18}
19
20h1 {
21  font-size: 26px;
22  margin-bottom: 1em;
23}
24
25h2 {
26  font-size: 24px;
27  margin-bottom: 1em;
28}
29
30h3 {
31  font-size: 20px;
32  margin-bottom: 1em;
33  margin-top: 1em;
34}
35
36pre, code {
37  line-height: 1.5;
38  font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
39}
40
41pre {
42  margin-top: 0.5em;
43}
44
45h1, h2, h3, p {
46  font-family: Arial, sans serif;
47}
48
49h1, h2, h3 {
50  border-bottom: solid #CCC 1px;
51}
52
53.toc_element {
54  margin-top: 0.5em;
55}
56
57.firstline {
58  margin-left: 2 em;
59}
60
61.method  {
62  margin-top: 1em;
63  border: solid 1px #CCC;
64  padding: 1em;
65  background: #EEE;
66}
67
68.details {
69  font-weight: bold;
70  font-size: 14px;
71}
72
73</style>
74
75<h1><a href="vision_v1.html">Cloud Vision API</a> . <a href="vision_v1.images.html">images</a></h1>
76<h2>Instance Methods</h2>
77<p class="toc_element">
78  <code><a href="#annotate">annotate(body, x__xgafv=None)</a></code></p>
79<p class="firstline">Run image detection and annotation for a batch of images.</p>
80<p class="toc_element">
81  <code><a href="#asyncBatchAnnotate">asyncBatchAnnotate(body, x__xgafv=None)</a></code></p>
82<p class="firstline">Run asynchronous image detection and annotation for a list of images.</p>
83<h3>Method Details</h3>
84<div class="method">
85    <code class="details" id="annotate">annotate(body, x__xgafv=None)</code>
86  <pre>Run image detection and annotation for a batch of images.
87
88Args:
89  body: object, The request body. (required)
90    The object takes the form of:
91
92{ # Multiple image annotation requests are batched into a single service call.
93    "requests": [ # Individual image annotation requests for this batch.
94      { # Request for performing Google Cloud Vision API tasks over a user-provided
95          # image, with user-requested features, and with context information.
96        "imageContext": { # Image context and/or feature-specific parameters. # Additional context that may accompany the image.
97          "latLongRect": { # Rectangle determined by min and max `LatLng` pairs. # Not used.
98            "minLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
99                # of doubles representing degrees latitude and degrees longitude. Unless
100                # specified otherwise, this must conform to the
101                # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
102                # standard</a>. Values must be within normalized ranges.
103              "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
104              "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
105            },
106            "maxLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
107                # of doubles representing degrees latitude and degrees longitude. Unless
108                # specified otherwise, this must conform to the
109                # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
110                # standard</a>. Values must be within normalized ranges.
111              "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
112              "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
113            },
114          },
115          "languageHints": [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
116              # yields the best results since it enables automatic language detection. For
117              # languages based on the Latin alphabet, setting `language_hints` is not
118              # needed. In rare cases, when the language of the text in the image is known,
119              # setting a hint will help get better results (although it will be a
120              # significant hindrance if the hint is wrong). Text detection returns an
121              # error if one or more of the specified languages is not one of the
122              # [supported languages](/vision/docs/languages).
123            "A String",
124          ],
125          "productSearchParams": { # Parameters for a product search request. # Parameters for product search.
126            "filter": "A String", # The filtering expression. This can be used to restrict search results based
127                # on Product labels. We currently support an AND of OR of key-value
128                # expressions, where each expression within an OR must have the same key. An
129                # '=' should be used to connect the key and value.
130                #
131                # For example, "(color = red OR color = blue) AND brand = Google" is
132                # acceptable, but "(color = red OR brand = Google)" is not acceptable.
133                # "color: red" is not acceptable because it uses a ':' instead of an '='.
134            "productCategories": [ # The list of product categories to search in. Currently, we only consider
135                # the first category, and either "homegoods-v2", "apparel-v2", or "toys-v2"
136                # should be specified. The legacy categories "homegoods", "apparel", and
137                # "toys" are still supported but will be deprecated. For new products, please
138                # use "homegoods-v2", "apparel-v2", or "toys-v2" for better product search
139                # accuracy. It is recommended to migrate existing products to these
140                # categories as well.
141              "A String",
142            ],
143            "productSet": "A String", # The resource name of a ProductSet to be searched for similar images.
144                #
145                # Format is:
146                # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
147            "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
148                # Optional. If it is not specified, system discretion will be applied.
149              "normalizedVertices": [ # The bounding polygon normalized vertices.
150                { # A vertex represents a 2D point in the image.
151                    # NOTE: the normalized vertex coordinates are relative to the original image
152                    # and range from 0 to 1.
153                  "y": 3.14, # Y coordinate.
154                  "x": 3.14, # X coordinate.
155                },
156              ],
157              "vertices": [ # The bounding polygon vertices.
158                { # A vertex represents a 2D point in the image.
159                    # NOTE: the vertex coordinates are in the same scale as the original image.
160                  "y": 42, # Y coordinate.
161                  "x": 42, # X coordinate.
162                },
163              ],
164            },
165          },
166          "cropHintsParams": { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
167            "aspectRatios": [ # Aspect ratios in floats, representing the ratio of the width to the height
168                # of the image. For example, if the desired aspect ratio is 4/3, the
169                # corresponding float value should be 1.33333.  If not specified, the
170                # best possible crop is returned. The number of provided aspect ratios is
171                # limited to a maximum of 16; any aspect ratios provided after the 16th are
172                # ignored.
173              3.14,
174            ],
175          },
176          "webDetectionParams": { # Parameters for web detection request. # Parameters for web detection.
177            "includeGeoResults": True or False, # Whether to include results derived from the geo information in the image.
178          },
179        },
180        "image": { # Client image to perform Google Cloud Vision API tasks over. # The image to be processed.
181          "content": "A String", # Image content, represented as a stream of bytes.
182              # Note: As with all `bytes` fields, protobuffers use a pure binary
183              # representation, whereas JSON representations use base64.
184          "source": { # External image source (Google Cloud Storage or web URL image location). # Google Cloud Storage image location, or publicly-accessible image
185              # URL. If both `content` and `source` are provided for an image, `content`
186              # takes precedence and is used to perform the image annotation request.
187            "gcsImageUri": "A String", # **Use `image_uri` instead.**
188                #
189                # The Google Cloud Storage  URI of the form
190                # `gs://bucket_name/object_name`. Object versioning is not supported. See
191                # [Google Cloud Storage Request
192                # URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
193            "imageUri": "A String", # The URI of the source image. Can be either:
194                #
195                # 1. A Google Cloud Storage URI of the form
196                #    `gs://bucket_name/object_name`. Object versioning is not supported. See
197                #    [Google Cloud Storage Request
198                #    URIs](https://cloud.google.com/storage/docs/reference-uris) for more
199                #    info.
200                #
201                # 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
202                #    HTTP/HTTPS URLs, Google cannot guarantee that the request will be
203                #    completed. Your request may fail if the specified host denies the
204                #    request (e.g. due to request throttling or DOS prevention), or if Google
205                #    throttles requests to the site for abuse prevention. You should not
206                #    depend on externally-hosted images for production applications.
207                #
208                # When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
209                # precedence.
210          },
211        },
212        "features": [ # Requested features.
213          { # The type of Google Cloud Vision API detection to perform, and the maximum
214              # number of results to return for that type. Multiple `Feature` objects can
215              # be specified in the `features` list.
216            "model": "A String", # Model to use for the feature.
217                # Supported values: "builtin/stable" (the default if unset) and
218                # "builtin/latest".
219            "type": "A String", # The feature type.
220            "maxResults": 42, # Maximum number of results of this type. Does not apply to
221                # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
222          },
223        ],
224      },
225    ],
226  }
227
228  x__xgafv: string, V1 error format.
229    Allowed values
230      1 - v1 error format
231      2 - v2 error format
232
233Returns:
234  An object of the form:
235
236    { # Response to a batch image annotation request.
237    "responses": [ # Individual responses to image annotation requests within the batch.
238      { # Response to an image annotation request.
239        "safeSearchAnnotation": { # Set of features pertaining to the image, computed by computer vision # If present, safe-search annotation has completed successfully.
240            # methods over safe-search verticals (for example, adult, spoof, medical,
241            # violence).
242          "spoof": "A String", # Spoof likelihood. The likelihood that an modification
243              # was made to the image's canonical version to make it appear
244              # funny or offensive.
245          "violence": "A String", # Likelihood that this image contains violent content.
246          "medical": "A String", # Likelihood that this is a medical image.
247          "adult": "A String", # Represents the adult content likelihood for the image. Adult content may
248              # contain elements such as nudity, pornographic images or cartoons, or
249              # sexual activities.
250          "racy": "A String", # Likelihood that the request image contains racy content. Racy content may
251              # include (but is not limited to) skimpy or sheer clothing, strategically
252              # covered nudity, lewd or provocative poses, or close-ups of sensitive
253              # body areas.
254        },
255        "textAnnotations": [ # If present, text (OCR) detection has completed successfully.
256          { # Set of detected entity features.
257            "confidence": 3.14, # **Deprecated. Use `score` instead.**
258                # The accuracy of the entity detection in an image.
259                # For example, for an image in which the "Eiffel Tower" entity is detected,
260                # this field represents the confidence that there is a tower in the query
261                # image. Range [0, 1].
262            "description": "A String", # Entity textual description, expressed in its `locale` language.
263            "locale": "A String", # The language code for the locale in which the entity textual
264                # `description` is expressed.
265            "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
266                # image. For example, the relevancy of "tower" is likely higher to an image
267                # containing the detected "Eiffel Tower" than to an image containing a
268                # detected distant towering building, even though the confidence that
269                # there is a tower in each image may be the same. Range [0, 1].
270            "locations": [ # The location information for the detected entity. Multiple
271                # `LocationInfo` elements can be present because one location may
272                # indicate the location of the scene in the image, and another location
273                # may indicate the location of the place where the image was taken.
274                # Location information is usually present for landmarks.
275              { # Detected entity location information.
276                "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
277                    # of doubles representing degrees latitude and degrees longitude. Unless
278                    # specified otherwise, this must conform to the
279                    # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
280                    # standard</a>. Values must be within normalized ranges.
281                  "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
282                  "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
283                },
284              },
285            ],
286            "mid": "A String", # Opaque entity ID. Some IDs may be available in
287                # [Google Knowledge Graph Search
288                # API](https://developers.google.com/knowledge-graph/).
289            "score": 3.14, # Overall score of the result. Range [0, 1].
290            "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
291                # for `LABEL_DETECTION` features.
292              "normalizedVertices": [ # The bounding polygon normalized vertices.
293                { # A vertex represents a 2D point in the image.
294                    # NOTE: the normalized vertex coordinates are relative to the original image
295                    # and range from 0 to 1.
296                  "y": 3.14, # Y coordinate.
297                  "x": 3.14, # X coordinate.
298                },
299              ],
300              "vertices": [ # The bounding polygon vertices.
301                { # A vertex represents a 2D point in the image.
302                    # NOTE: the vertex coordinates are in the same scale as the original image.
303                  "y": 42, # Y coordinate.
304                  "x": 42, # X coordinate.
305                },
306              ],
307            },
308            "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
309                # fields, such a score or string that qualifies the entity.
310              { # A `Property` consists of a user-supplied name/value pair.
311                "uint64Value": "A String", # Value of numeric properties.
312                "name": "A String", # Name of the property.
313                "value": "A String", # Value of the property.
314              },
315            ],
316          },
317        ],
318        "webDetection": { # Relevant information for the image from the Internet. # If present, web detection has completed successfully.
319          "fullMatchingImages": [ # Fully matching images from the Internet.
320              # Can include resized copies of the query image.
321            { # Metadata for online images.
322              "url": "A String", # The result image URL.
323              "score": 3.14, # (Deprecated) Overall relevancy score for the image.
324            },
325          ],
326          "pagesWithMatchingImages": [ # Web pages containing the matching images from the Internet.
327            { # Metadata for web pages.
328              "pageTitle": "A String", # Title for the web page, may contain HTML markups.
329              "url": "A String", # The result web page URL.
330              "score": 3.14, # (Deprecated) Overall relevancy score for the web page.
331              "partialMatchingImages": [ # Partial matching images on the page.
332                  # Those images are similar enough to share some key-point features. For
333                  # example an original image will likely have partial matching for its
334                  # crops.
335                { # Metadata for online images.
336                  "url": "A String", # The result image URL.
337                  "score": 3.14, # (Deprecated) Overall relevancy score for the image.
338                },
339              ],
340              "fullMatchingImages": [ # Fully matching images on the page.
341                  # Can include resized copies of the query image.
342                { # Metadata for online images.
343                  "url": "A String", # The result image URL.
344                  "score": 3.14, # (Deprecated) Overall relevancy score for the image.
345                },
346              ],
347            },
348          ],
349          "visuallySimilarImages": [ # The visually similar image results.
350            { # Metadata for online images.
351              "url": "A String", # The result image URL.
352              "score": 3.14, # (Deprecated) Overall relevancy score for the image.
353            },
354          ],
355          "partialMatchingImages": [ # Partial matching images from the Internet.
356              # Those images are similar enough to share some key-point features. For
357              # example an original image will likely have partial matching for its crops.
358            { # Metadata for online images.
359              "url": "A String", # The result image URL.
360              "score": 3.14, # (Deprecated) Overall relevancy score for the image.
361            },
362          ],
363          "webEntities": [ # Deduced entities from similar images on the Internet.
364            { # Entity deduced from similar images on the Internet.
365              "entityId": "A String", # Opaque entity ID.
366              "score": 3.14, # Overall relevancy score for the entity.
367                  # Not normalized and not comparable across different image queries.
368              "description": "A String", # Canonical description of the entity, in English.
369            },
370          ],
371          "bestGuessLabels": [ # The service's best guess as to the topic of the request image.
372              # Inferred from similar images on the open web.
373            { # Label to provide extra metadata for the web detection.
374              "languageCode": "A String", # The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
375                  # For more information, see
376                  # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
377              "label": "A String", # Label for extra metadata.
378            },
379          ],
380        },
381        "localizedObjectAnnotations": [ # If present, localized object detection has completed successfully.
382            # This will be sorted descending by confidence score.
383          { # Set of detected objects with bounding boxes.
384            "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
385                # information, see
386                # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
387            "score": 3.14, # Score of the result. Range [0, 1].
388            "mid": "A String", # Object ID that should align with EntityAnnotation mid.
389            "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this object belongs. This must be populated.
390              "normalizedVertices": [ # The bounding polygon normalized vertices.
391                { # A vertex represents a 2D point in the image.
392                    # NOTE: the normalized vertex coordinates are relative to the original image
393                    # and range from 0 to 1.
394                  "y": 3.14, # Y coordinate.
395                  "x": 3.14, # X coordinate.
396                },
397              ],
398              "vertices": [ # The bounding polygon vertices.
399                { # A vertex represents a 2D point in the image.
400                    # NOTE: the vertex coordinates are in the same scale as the original image.
401                  "y": 42, # Y coordinate.
402                  "x": 42, # X coordinate.
403                },
404              ],
405            },
406            "name": "A String", # Object name, expressed in its `language_code` language.
407          },
408        ],
409        "fullTextAnnotation": { # TextAnnotation contains a structured representation of OCR extracted text. # If present, text (OCR) detection or document (OCR) text detection has
410            # completed successfully.
411            # This annotation provides the structural hierarchy for the OCR detected
412            # text.
413            # The hierarchy of an OCR extracted text structure is like this:
414            #     TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
415            # Each structural component, starting from Page, may further have their own
416            # properties. Properties describe detected languages, breaks etc.. Please refer
417            # to the TextAnnotation.TextProperty message definition below for more
418            # detail.
419          "text": "A String", # UTF-8 text detected on the pages.
420          "pages": [ # List of pages detected by OCR.
421            { # Detected page from OCR.
422              "width": 42, # Page width. For PDFs the unit is points. For images (including
423                  # TIFFs) the unit is pixels.
424              "confidence": 3.14, # Confidence of the OCR results on the page. Range [0, 1].
425              "property": { # Additional information detected on the structural component. # Additional information detected on the page.
426                "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
427                  "isPrefix": True or False, # True if break prepends the element.
428                  "type": "A String", # Detected break type.
429                },
430                "detectedLanguages": [ # A list of detected languages together with confidence.
431                  { # Detected language for a structural component.
432                    "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
433                        # information, see
434                        # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
435                    "confidence": 3.14, # Confidence of detected language. Range [0, 1].
436                  },
437                ],
438              },
439              "blocks": [ # List of blocks of text, images etc on this page.
440                { # Logical element on the page.
441                  "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the block.
442                      # The vertices are in the order of top-left, top-right, bottom-right,
443                      # bottom-left. When a rotation of the bounding box is detected the rotation
444                      # is represented as around the top-left corner as defined when the text is
445                      # read in the 'natural' orientation.
446                      # For example:
447                      #
448                      # * when the text is horizontal it might look like:
449                      #
450                      #         0----1
451                      #         |    |
452                      #         3----2
453                      #
454                      # * when it's rotated 180 degrees around the top-left corner it becomes:
455                      #
456                      #         2----3
457                      #         |    |
458                      #         1----0
459                      #
460                      #   and the vertex order will still be (0, 1, 2, 3).
461                    "normalizedVertices": [ # The bounding polygon normalized vertices.
462                      { # A vertex represents a 2D point in the image.
463                          # NOTE: the normalized vertex coordinates are relative to the original image
464                          # and range from 0 to 1.
465                        "y": 3.14, # Y coordinate.
466                        "x": 3.14, # X coordinate.
467                      },
468                    ],
469                    "vertices": [ # The bounding polygon vertices.
470                      { # A vertex represents a 2D point in the image.
471                          # NOTE: the vertex coordinates are in the same scale as the original image.
472                        "y": 42, # Y coordinate.
473                        "x": 42, # X coordinate.
474                      },
475                    ],
476                  },
477                  "confidence": 3.14, # Confidence of the OCR results on the block. Range [0, 1].
478                  "property": { # Additional information detected on the structural component. # Additional information detected for the block.
479                    "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
480                      "isPrefix": True or False, # True if break prepends the element.
481                      "type": "A String", # Detected break type.
482                    },
483                    "detectedLanguages": [ # A list of detected languages together with confidence.
484                      { # Detected language for a structural component.
485                        "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
486                            # information, see
487                            # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
488                        "confidence": 3.14, # Confidence of detected language. Range [0, 1].
489                      },
490                    ],
491                  },
492                  "blockType": "A String", # Detected block type (text, image etc) for this block.
493                  "paragraphs": [ # List of paragraphs in this block (if this blocks is of type text).
494                    { # Structural unit of text representing a number of words in certain order.
495                      "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the paragraph.
496                          # The vertices are in the order of top-left, top-right, bottom-right,
497                          # bottom-left. When a rotation of the bounding box is detected the rotation
498                          # is represented as around the top-left corner as defined when the text is
499                          # read in the 'natural' orientation.
500                          # For example:
501                          #   * when the text is horizontal it might look like:
502                          #      0----1
503                          #      |    |
504                          #      3----2
505                          #   * when it's rotated 180 degrees around the top-left corner it becomes:
506                          #      2----3
507                          #      |    |
508                          #      1----0
509                          #   and the vertex order will still be (0, 1, 2, 3).
510                        "normalizedVertices": [ # The bounding polygon normalized vertices.
511                          { # A vertex represents a 2D point in the image.
512                              # NOTE: the normalized vertex coordinates are relative to the original image
513                              # and range from 0 to 1.
514                            "y": 3.14, # Y coordinate.
515                            "x": 3.14, # X coordinate.
516                          },
517                        ],
518                        "vertices": [ # The bounding polygon vertices.
519                          { # A vertex represents a 2D point in the image.
520                              # NOTE: the vertex coordinates are in the same scale as the original image.
521                            "y": 42, # Y coordinate.
522                            "x": 42, # X coordinate.
523                          },
524                        ],
525                      },
526                      "confidence": 3.14, # Confidence of the OCR results for the paragraph. Range [0, 1].
527                      "property": { # Additional information detected on the structural component. # Additional information detected for the paragraph.
528                        "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
529                          "isPrefix": True or False, # True if break prepends the element.
530                          "type": "A String", # Detected break type.
531                        },
532                        "detectedLanguages": [ # A list of detected languages together with confidence.
533                          { # Detected language for a structural component.
534                            "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
535                                # information, see
536                                # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
537                            "confidence": 3.14, # Confidence of detected language. Range [0, 1].
538                          },
539                        ],
540                      },
541                      "words": [ # List of words in this paragraph.
542                        { # A word representation.
543                          "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the word.
544                              # The vertices are in the order of top-left, top-right, bottom-right,
545                              # bottom-left. When a rotation of the bounding box is detected the rotation
546                              # is represented as around the top-left corner as defined when the text is
547                              # read in the 'natural' orientation.
548                              # For example:
549                              #   * when the text is horizontal it might look like:
550                              #      0----1
551                              #      |    |
552                              #      3----2
553                              #   * when it's rotated 180 degrees around the top-left corner it becomes:
554                              #      2----3
555                              #      |    |
556                              #      1----0
557                              #   and the vertex order will still be (0, 1, 2, 3).
558                            "normalizedVertices": [ # The bounding polygon normalized vertices.
559                              { # A vertex represents a 2D point in the image.
560                                  # NOTE: the normalized vertex coordinates are relative to the original image
561                                  # and range from 0 to 1.
562                                "y": 3.14, # Y coordinate.
563                                "x": 3.14, # X coordinate.
564                              },
565                            ],
566                            "vertices": [ # The bounding polygon vertices.
567                              { # A vertex represents a 2D point in the image.
568                                  # NOTE: the vertex coordinates are in the same scale as the original image.
569                                "y": 42, # Y coordinate.
570                                "x": 42, # X coordinate.
571                              },
572                            ],
573                          },
574                          "symbols": [ # List of symbols in the word.
575                              # The order of the symbols follows the natural reading order.
576                            { # A single symbol representation.
577                              "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the symbol.
578                                  # The vertices are in the order of top-left, top-right, bottom-right,
579                                  # bottom-left. When a rotation of the bounding box is detected the rotation
580                                  # is represented as around the top-left corner as defined when the text is
581                                  # read in the 'natural' orientation.
582                                  # For example:
583                                  #   * when the text is horizontal it might look like:
584                                  #      0----1
585                                  #      |    |
586                                  #      3----2
587                                  #   * when it's rotated 180 degrees around the top-left corner it becomes:
588                                  #      2----3
589                                  #      |    |
590                                  #      1----0
591                                  #   and the vertice order will still be (0, 1, 2, 3).
592                                "normalizedVertices": [ # The bounding polygon normalized vertices.
593                                  { # A vertex represents a 2D point in the image.
594                                      # NOTE: the normalized vertex coordinates are relative to the original image
595                                      # and range from 0 to 1.
596                                    "y": 3.14, # Y coordinate.
597                                    "x": 3.14, # X coordinate.
598                                  },
599                                ],
600                                "vertices": [ # The bounding polygon vertices.
601                                  { # A vertex represents a 2D point in the image.
602                                      # NOTE: the vertex coordinates are in the same scale as the original image.
603                                    "y": 42, # Y coordinate.
604                                    "x": 42, # X coordinate.
605                                  },
606                                ],
607                              },
608                              "text": "A String", # The actual UTF-8 representation of the symbol.
609                              "confidence": 3.14, # Confidence of the OCR results for the symbol. Range [0, 1].
610                              "property": { # Additional information detected on the structural component. # Additional information detected for the symbol.
611                                "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
612                                  "isPrefix": True or False, # True if break prepends the element.
613                                  "type": "A String", # Detected break type.
614                                },
615                                "detectedLanguages": [ # A list of detected languages together with confidence.
616                                  { # Detected language for a structural component.
617                                    "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
618                                        # information, see
619                                        # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
620                                    "confidence": 3.14, # Confidence of detected language. Range [0, 1].
621                                  },
622                                ],
623                              },
624                            },
625                          ],
626                          "confidence": 3.14, # Confidence of the OCR results for the word. Range [0, 1].
627                          "property": { # Additional information detected on the structural component. # Additional information detected for the word.
628                            "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
629                              "isPrefix": True or False, # True if break prepends the element.
630                              "type": "A String", # Detected break type.
631                            },
632                            "detectedLanguages": [ # A list of detected languages together with confidence.
633                              { # Detected language for a structural component.
634                                "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
635                                    # information, see
636                                    # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
637                                "confidence": 3.14, # Confidence of detected language. Range [0, 1].
638                              },
639                            ],
640                          },
641                        },
642                      ],
643                    },
644                  ],
645                },
646              ],
647              "height": 42, # Page height. For PDFs the unit is points. For images (including
648                  # TIFFs) the unit is pixels.
649            },
650          ],
651        },
652        "labelAnnotations": [ # If present, label detection has completed successfully.
653          { # Set of detected entity features.
654            "confidence": 3.14, # **Deprecated. Use `score` instead.**
655                # The accuracy of the entity detection in an image.
656                # For example, for an image in which the "Eiffel Tower" entity is detected,
657                # this field represents the confidence that there is a tower in the query
658                # image. Range [0, 1].
659            "description": "A String", # Entity textual description, expressed in its `locale` language.
660            "locale": "A String", # The language code for the locale in which the entity textual
661                # `description` is expressed.
662            "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
663                # image. For example, the relevancy of "tower" is likely higher to an image
664                # containing the detected "Eiffel Tower" than to an image containing a
665                # detected distant towering building, even though the confidence that
666                # there is a tower in each image may be the same. Range [0, 1].
667            "locations": [ # The location information for the detected entity. Multiple
668                # `LocationInfo` elements can be present because one location may
669                # indicate the location of the scene in the image, and another location
670                # may indicate the location of the place where the image was taken.
671                # Location information is usually present for landmarks.
672              { # Detected entity location information.
673                "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
674                    # of doubles representing degrees latitude and degrees longitude. Unless
675                    # specified otherwise, this must conform to the
676                    # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
677                    # standard</a>. Values must be within normalized ranges.
678                  "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
679                  "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
680                },
681              },
682            ],
683            "mid": "A String", # Opaque entity ID. Some IDs may be available in
684                # [Google Knowledge Graph Search
685                # API](https://developers.google.com/knowledge-graph/).
686            "score": 3.14, # Overall score of the result. Range [0, 1].
687            "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
688                # for `LABEL_DETECTION` features.
689              "normalizedVertices": [ # The bounding polygon normalized vertices.
690                { # A vertex represents a 2D point in the image.
691                    # NOTE: the normalized vertex coordinates are relative to the original image
692                    # and range from 0 to 1.
693                  "y": 3.14, # Y coordinate.
694                  "x": 3.14, # X coordinate.
695                },
696              ],
697              "vertices": [ # The bounding polygon vertices.
698                { # A vertex represents a 2D point in the image.
699                    # NOTE: the vertex coordinates are in the same scale as the original image.
700                  "y": 42, # Y coordinate.
701                  "x": 42, # X coordinate.
702                },
703              ],
704            },
705            "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
706                # fields, such a score or string that qualifies the entity.
707              { # A `Property` consists of a user-supplied name/value pair.
708                "uint64Value": "A String", # Value of numeric properties.
709                "name": "A String", # Name of the property.
710                "value": "A String", # Value of the property.
711              },
712            ],
713          },
714        ],
715        "imagePropertiesAnnotation": { # Stores image properties, such as dominant colors. # If present, image properties were extracted successfully.
716          "dominantColors": { # Set of dominant colors and their corresponding scores. # If present, dominant colors completed successfully.
717            "colors": [ # RGB color values with their score and pixel fraction.
718              { # Color information consists of RGB channels, score, and the fraction of
719                  # the image that the color occupies in the image.
720                "color": { # Represents a color in the RGBA color space. This representation is designed # RGB components of the color.
721                    # for simplicity of conversion to/from color representations in various
722                    # languages over compactness; for example, the fields of this representation
723                    # can be trivially provided to the constructor of "java.awt.Color" in Java; it
724                    # can also be trivially provided to UIColor's "+colorWithRed:green:blue:alpha"
725                    # method in iOS; and, with just a little work, it can be easily formatted into
726                    # a CSS "rgba()" string in JavaScript, as well.
727                    #
728                    # Note: this proto does not carry information about the absolute color space
729                    # that should be used to interpret the RGB value (e.g. sRGB, Adobe RGB,
730                    # DCI-P3, BT.2020, etc.). By default, applications SHOULD assume the sRGB color
731                    # space.
732                    #
733                    # Example (Java):
734                    #
735                    #      import com.google.type.Color;
736                    #
737                    #      // ...
738                    #      public static java.awt.Color fromProto(Color protocolor) {
739                    #        float alpha = protocolor.hasAlpha()
740                    #            ? protocolor.getAlpha().getValue()
741                    #            : 1.0;
742                    #
743                    #        return new java.awt.Color(
744                    #            protocolor.getRed(),
745                    #            protocolor.getGreen(),
746                    #            protocolor.getBlue(),
747                    #            alpha);
748                    #      }
749                    #
750                    #      public static Color toProto(java.awt.Color color) {
751                    #        float red = (float) color.getRed();
752                    #        float green = (float) color.getGreen();
753                    #        float blue = (float) color.getBlue();
754                    #        float denominator = 255.0;
755                    #        Color.Builder resultBuilder =
756                    #            Color
757                    #                .newBuilder()
758                    #                .setRed(red / denominator)
759                    #                .setGreen(green / denominator)
760                    #                .setBlue(blue / denominator);
761                    #        int alpha = color.getAlpha();
762                    #        if (alpha != 255) {
763                    #          result.setAlpha(
764                    #              FloatValue
765                    #                  .newBuilder()
766                    #                  .setValue(((float) alpha) / denominator)
767                    #                  .build());
768                    #        }
769                    #        return resultBuilder.build();
770                    #      }
771                    #      // ...
772                    #
773                    # Example (iOS / Obj-C):
774                    #
775                    #      // ...
776                    #      static UIColor* fromProto(Color* protocolor) {
777                    #         float red = [protocolor red];
778                    #         float green = [protocolor green];
779                    #         float blue = [protocolor blue];
780                    #         FloatValue* alpha_wrapper = [protocolor alpha];
781                    #         float alpha = 1.0;
782                    #         if (alpha_wrapper != nil) {
783                    #           alpha = [alpha_wrapper value];
784                    #         }
785                    #         return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
786                    #      }
787                    #
788                    #      static Color* toProto(UIColor* color) {
789                    #          CGFloat red, green, blue, alpha;
790                    #          if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) {
791                    #            return nil;
792                    #          }
793                    #          Color* result = [[Color alloc] init];
794                    #          [result setRed:red];
795                    #          [result setGreen:green];
796                    #          [result setBlue:blue];
797                    #          if (alpha <= 0.9999) {
798                    #            [result setAlpha:floatWrapperWithValue(alpha)];
799                    #          }
800                    #          [result autorelease];
801                    #          return result;
802                    #     }
803                    #     // ...
804                    #
805                    #  Example (JavaScript):
806                    #
807                    #     // ...
808                    #
809                    #     var protoToCssColor = function(rgb_color) {
810                    #        var redFrac = rgb_color.red || 0.0;
811                    #        var greenFrac = rgb_color.green || 0.0;
812                    #        var blueFrac = rgb_color.blue || 0.0;
813                    #        var red = Math.floor(redFrac * 255);
814                    #        var green = Math.floor(greenFrac * 255);
815                    #        var blue = Math.floor(blueFrac * 255);
816                    #
817                    #        if (!('alpha' in rgb_color)) {
818                    #           return rgbToCssColor_(red, green, blue);
819                    #        }
820                    #
821                    #        var alphaFrac = rgb_color.alpha.value || 0.0;
822                    #        var rgbParams = [red, green, blue].join(',');
823                    #        return ['rgba(', rgbParams, ',', alphaFrac, ')'].join('');
824                    #     };
825                    #
826                    #     var rgbToCssColor_ = function(red, green, blue) {
827                    #       var rgbNumber = new Number((red << 16) | (green << 8) | blue);
828                    #       var hexString = rgbNumber.toString(16);
829                    #       var missingZeros = 6 - hexString.length;
830                    #       var resultBuilder = ['#'];
831                    #       for (var i = 0; i < missingZeros; i++) {
832                    #          resultBuilder.push('0');
833                    #       }
834                    #       resultBuilder.push(hexString);
835                    #       return resultBuilder.join('');
836                    #     };
837                    #
838                    #     // ...
839                  "blue": 3.14, # The amount of blue in the color as a value in the interval [0, 1].
840                  "alpha": 3.14, # The fraction of this color that should be applied to the pixel. That is,
841                      # the final pixel color is defined by the equation:
842                      #
843                      #   pixel color = alpha * (this color) + (1.0 - alpha) * (background color)
844                      #
845                      # This means that a value of 1.0 corresponds to a solid color, whereas
846                      # a value of 0.0 corresponds to a completely transparent color. This
847                      # uses a wrapper message rather than a simple float scalar so that it is
848                      # possible to distinguish between a default value and the value being unset.
849                      # If omitted, this color object is to be rendered as a solid color
850                      # (as if the alpha value had been explicitly given with a value of 1.0).
851                  "green": 3.14, # The amount of green in the color as a value in the interval [0, 1].
852                  "red": 3.14, # The amount of red in the color as a value in the interval [0, 1].
853                },
854                "pixelFraction": 3.14, # The fraction of pixels the color occupies in the image.
855                    # Value in range [0, 1].
856                "score": 3.14, # Image-specific score for this color. Value in range [0, 1].
857              },
858            ],
859          },
860        },
861        "faceAnnotations": [ # If present, face detection has completed successfully.
862          { # A face annotation object contains the results of face detection.
863            "panAngle": 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
864                # pointing relative to the vertical plane perpendicular to the image. Range
865                # [-180,180].
866            "underExposedLikelihood": "A String", # Under-exposed likelihood.
867            "detectionConfidence": 3.14, # Detection confidence. Range [0, 1].
868            "joyLikelihood": "A String", # Joy likelihood.
869            "landmarks": [ # Detected face landmarks.
870              { # A face-specific landmark (for example, a face feature).
871                "position": { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
872                    # A valid Position must have both x and y coordinates.
873                    # The position coordinates are in the same scale as the original image.
874                  "y": 3.14, # Y coordinate.
875                  "x": 3.14, # X coordinate.
876                  "z": 3.14, # Z coordinate (or depth).
877                },
878                "type": "A String", # Face landmark type.
879              },
880            ],
881            "sorrowLikelihood": "A String", # Sorrow likelihood.
882            "blurredLikelihood": "A String", # Blurred likelihood.
883            "tiltAngle": 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
884                # pointing relative to the image's horizontal plane. Range [-180,180].
885            "angerLikelihood": "A String", # Anger likelihood.
886            "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
887                # are in the original image's scale.
888                # The bounding box is computed to "frame" the face in accordance with human
889                # expectations. It is based on the landmarker results.
890                # Note that one or more x and/or y coordinates may not be generated in the
891                # `BoundingPoly` (the polygon will be unbounded) if only a partial face
892                # appears in the image to be annotated.
893              "normalizedVertices": [ # The bounding polygon normalized vertices.
894                { # A vertex represents a 2D point in the image.
895                    # NOTE: the normalized vertex coordinates are relative to the original image
896                    # and range from 0 to 1.
897                  "y": 3.14, # Y coordinate.
898                  "x": 3.14, # X coordinate.
899                },
900              ],
901              "vertices": [ # The bounding polygon vertices.
902                { # A vertex represents a 2D point in the image.
903                    # NOTE: the vertex coordinates are in the same scale as the original image.
904                  "y": 42, # Y coordinate.
905                  "x": 42, # X coordinate.
906                },
907              ],
908            },
909            "rollAngle": 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
910                # of the face relative to the image vertical about the axis perpendicular to
911                # the face. Range [-180,180].
912            "headwearLikelihood": "A String", # Headwear likelihood.
913            "surpriseLikelihood": "A String", # Surprise likelihood.
914            "fdBoundingPoly": { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
915                # `boundingPoly`, and encloses only the skin part of the face. Typically, it
916                # is used to eliminate the face from any image analysis that detects the
917                # "amount of skin" visible in an image. It is not based on the
918                # landmarker results, only on the initial face detection, hence
919                # the <code>fd</code> (face detection) prefix.
920              "normalizedVertices": [ # The bounding polygon normalized vertices.
921                { # A vertex represents a 2D point in the image.
922                    # NOTE: the normalized vertex coordinates are relative to the original image
923                    # and range from 0 to 1.
924                  "y": 3.14, # Y coordinate.
925                  "x": 3.14, # X coordinate.
926                },
927              ],
928              "vertices": [ # The bounding polygon vertices.
929                { # A vertex represents a 2D point in the image.
930                    # NOTE: the vertex coordinates are in the same scale as the original image.
931                  "y": 42, # Y coordinate.
932                  "x": 42, # X coordinate.
933                },
934              ],
935            },
936            "landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1].
937          },
938        ],
939        "productSearchResults": { # Results for a product search request. # If present, product search has completed successfully.
940          "productGroupedResults": [ # List of results grouped by products detected in the query image. Each entry
941              # corresponds to one bounding polygon in the query image, and contains the
942              # matching products specific to that region. There may be duplicate product
943              # matches in the union of all the per-product results.
944            { # Information about the products similar to a single product in a query
945                # image.
946              "results": [ # List of results, one for each product match.
947                { # Information about a product.
948                  "image": "A String", # The resource name of the image from the product that is the closest match
949                      # to the query.
950                  "score": 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
951                      # 1 (full confidence).
952                  "product": { # A Product contains ReferenceImages. # The Product.
953                    "productLabels": [ # Key-value pairs that can be attached to a product. At query time,
954                        # constraints can be specified based on the product_labels.
955                        #
956                        # Note that integer values can be provided as strings, e.g. "1199". Only
957                        # strings with integer values can match a range-based restriction which is
958                        # to be supported soon.
959                        #
960                        # Multiple values can be assigned to the same key. One product may have up to
961                        # 100 product_labels.
962                      { # A product label represented as a key-value pair.
963                        "key": "A String", # The key of the label attached to the product. Cannot be empty and cannot
964                            # exceed 128 bytes.
965                        "value": "A String", # The value of the label attached to the product. Cannot be empty and
966                            # cannot exceed 128 bytes.
967                      },
968                    ],
969                    "displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
970                        # 4096 characters long.
971                    "name": "A String", # The resource name of the product.
972                        #
973                        # Format is:
974                        # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
975                        #
976                        # This field is ignored when creating a product.
977                    "productCategory": "A String", # The category for the product identified by the reference image. This should
978                        # be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
979                        # "homegoods", "apparel", and "toys" are still supported, but these should
980                        # not be used for new products.
981                        #
982                        # This field is immutable.
983                    "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
984                        # characters long.
985                  },
986                },
987              ],
988              "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the product detected in the query image.
989                "normalizedVertices": [ # The bounding polygon normalized vertices.
990                  { # A vertex represents a 2D point in the image.
991                      # NOTE: the normalized vertex coordinates are relative to the original image
992                      # and range from 0 to 1.
993                    "y": 3.14, # Y coordinate.
994                    "x": 3.14, # X coordinate.
995                  },
996                ],
997                "vertices": [ # The bounding polygon vertices.
998                  { # A vertex represents a 2D point in the image.
999                      # NOTE: the vertex coordinates are in the same scale as the original image.
1000                    "y": 42, # Y coordinate.
1001                    "x": 42, # X coordinate.
1002                  },
1003                ],
1004              },
1005            },
1006          ],
1007          "results": [ # List of results, one for each product match.
1008            { # Information about a product.
1009              "image": "A String", # The resource name of the image from the product that is the closest match
1010                  # to the query.
1011              "score": 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
1012                  # 1 (full confidence).
1013              "product": { # A Product contains ReferenceImages. # The Product.
1014                "productLabels": [ # Key-value pairs that can be attached to a product. At query time,
1015                    # constraints can be specified based on the product_labels.
1016                    #
1017                    # Note that integer values can be provided as strings, e.g. "1199". Only
1018                    # strings with integer values can match a range-based restriction which is
1019                    # to be supported soon.
1020                    #
1021                    # Multiple values can be assigned to the same key. One product may have up to
1022                    # 100 product_labels.
1023                  { # A product label represented as a key-value pair.
1024                    "key": "A String", # The key of the label attached to the product. Cannot be empty and cannot
1025                        # exceed 128 bytes.
1026                    "value": "A String", # The value of the label attached to the product. Cannot be empty and
1027                        # cannot exceed 128 bytes.
1028                  },
1029                ],
1030                "displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
1031                    # 4096 characters long.
1032                "name": "A String", # The resource name of the product.
1033                    #
1034                    # Format is:
1035                    # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
1036                    #
1037                    # This field is ignored when creating a product.
1038                "productCategory": "A String", # The category for the product identified by the reference image. This should
1039                    # be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
1040                    # "homegoods", "apparel", and "toys" are still supported, but these should
1041                    # not be used for new products.
1042                    #
1043                    # This field is immutable.
1044                "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
1045                    # characters long.
1046              },
1047            },
1048          ],
1049          "indexTime": "A String", # Timestamp of the index which provided these results. Products added to the
1050              # product set and products removed from the product set after this time are
1051              # not reflected in the current results.
1052        },
1053        "logoAnnotations": [ # If present, logo detection has completed successfully.
1054          { # Set of detected entity features.
1055            "confidence": 3.14, # **Deprecated. Use `score` instead.**
1056                # The accuracy of the entity detection in an image.
1057                # For example, for an image in which the "Eiffel Tower" entity is detected,
1058                # this field represents the confidence that there is a tower in the query
1059                # image. Range [0, 1].
1060            "description": "A String", # Entity textual description, expressed in its `locale` language.
1061            "locale": "A String", # The language code for the locale in which the entity textual
1062                # `description` is expressed.
1063            "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
1064                # image. For example, the relevancy of "tower" is likely higher to an image
1065                # containing the detected "Eiffel Tower" than to an image containing a
1066                # detected distant towering building, even though the confidence that
1067                # there is a tower in each image may be the same. Range [0, 1].
1068            "locations": [ # The location information for the detected entity. Multiple
1069                # `LocationInfo` elements can be present because one location may
1070                # indicate the location of the scene in the image, and another location
1071                # may indicate the location of the place where the image was taken.
1072                # Location information is usually present for landmarks.
1073              { # Detected entity location information.
1074                "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1075                    # of doubles representing degrees latitude and degrees longitude. Unless
1076                    # specified otherwise, this must conform to the
1077                    # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
1078                    # standard</a>. Values must be within normalized ranges.
1079                  "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1080                  "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1081                },
1082              },
1083            ],
1084            "mid": "A String", # Opaque entity ID. Some IDs may be available in
1085                # [Google Knowledge Graph Search
1086                # API](https://developers.google.com/knowledge-graph/).
1087            "score": 3.14, # Overall score of the result. Range [0, 1].
1088            "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1089                # for `LABEL_DETECTION` features.
1090              "normalizedVertices": [ # The bounding polygon normalized vertices.
1091                { # A vertex represents a 2D point in the image.
1092                    # NOTE: the normalized vertex coordinates are relative to the original image
1093                    # and range from 0 to 1.
1094                  "y": 3.14, # Y coordinate.
1095                  "x": 3.14, # X coordinate.
1096                },
1097              ],
1098              "vertices": [ # The bounding polygon vertices.
1099                { # A vertex represents a 2D point in the image.
1100                    # NOTE: the vertex coordinates are in the same scale as the original image.
1101                  "y": 42, # Y coordinate.
1102                  "x": 42, # X coordinate.
1103                },
1104              ],
1105            },
1106            "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
1107                # fields, such a score or string that qualifies the entity.
1108              { # A `Property` consists of a user-supplied name/value pair.
1109                "uint64Value": "A String", # Value of numeric properties.
1110                "name": "A String", # Name of the property.
1111                "value": "A String", # Value of the property.
1112              },
1113            ],
1114          },
1115        ],
1116        "landmarkAnnotations": [ # If present, landmark detection has completed successfully.
1117          { # Set of detected entity features.
1118            "confidence": 3.14, # **Deprecated. Use `score` instead.**
1119                # The accuracy of the entity detection in an image.
1120                # For example, for an image in which the "Eiffel Tower" entity is detected,
1121                # this field represents the confidence that there is a tower in the query
1122                # image. Range [0, 1].
1123            "description": "A String", # Entity textual description, expressed in its `locale` language.
1124            "locale": "A String", # The language code for the locale in which the entity textual
1125                # `description` is expressed.
1126            "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
1127                # image. For example, the relevancy of "tower" is likely higher to an image
1128                # containing the detected "Eiffel Tower" than to an image containing a
1129                # detected distant towering building, even though the confidence that
1130                # there is a tower in each image may be the same. Range [0, 1].
1131            "locations": [ # The location information for the detected entity. Multiple
1132                # `LocationInfo` elements can be present because one location may
1133                # indicate the location of the scene in the image, and another location
1134                # may indicate the location of the place where the image was taken.
1135                # Location information is usually present for landmarks.
1136              { # Detected entity location information.
1137                "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1138                    # of doubles representing degrees latitude and degrees longitude. Unless
1139                    # specified otherwise, this must conform to the
1140                    # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
1141                    # standard</a>. Values must be within normalized ranges.
1142                  "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1143                  "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1144                },
1145              },
1146            ],
1147            "mid": "A String", # Opaque entity ID. Some IDs may be available in
1148                # [Google Knowledge Graph Search
1149                # API](https://developers.google.com/knowledge-graph/).
1150            "score": 3.14, # Overall score of the result. Range [0, 1].
1151            "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1152                # for `LABEL_DETECTION` features.
1153              "normalizedVertices": [ # The bounding polygon normalized vertices.
1154                { # A vertex represents a 2D point in the image.
1155                    # NOTE: the normalized vertex coordinates are relative to the original image
1156                    # and range from 0 to 1.
1157                  "y": 3.14, # Y coordinate.
1158                  "x": 3.14, # X coordinate.
1159                },
1160              ],
1161              "vertices": [ # The bounding polygon vertices.
1162                { # A vertex represents a 2D point in the image.
1163                    # NOTE: the vertex coordinates are in the same scale as the original image.
1164                  "y": 42, # Y coordinate.
1165                  "x": 42, # X coordinate.
1166                },
1167              ],
1168            },
1169            "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
1170                # fields, such a score or string that qualifies the entity.
1171              { # A `Property` consists of a user-supplied name/value pair.
1172                "uint64Value": "A String", # Value of numeric properties.
1173                "name": "A String", # Name of the property.
1174                "value": "A String", # Value of the property.
1175              },
1176            ],
1177          },
1178        ],
1179        "context": { # If an image was produced from a file (e.g. a PDF), this message gives # If present, contextual information is needed to understand where this image
1180            # comes from.
1181            # information about the source of that image.
1182          "pageNumber": 42, # If the file was a PDF or TIFF, this field gives the page number within
1183              # the file used to produce the image.
1184          "uri": "A String", # The URI of the file used to produce the image.
1185        },
1186        "error": { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the operation.
1187            # Note that filled-in image annotations are guaranteed to be
1188            # correct, even when `error` is set.
1189            # different programming environments, including REST APIs and RPC APIs. It is
1190            # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1191            # three pieces of data: error code, error message, and error details.
1192            #
1193            # You can find out more about this error model and how to work with it in the
1194            # [API Design Guide](https://cloud.google.com/apis/design/errors).
1195          "message": "A String", # A developer-facing error message, which should be in English. Any
1196              # user-facing error message should be localized and sent in the
1197              # google.rpc.Status.details field, or localized by the client.
1198          "code": 42, # The status code, which should be an enum value of google.rpc.Code.
1199          "details": [ # A list of messages that carry the error details.  There is a common set of
1200              # message types for APIs to use.
1201            {
1202              "a_key": "", # Properties of the object. Contains field @type with type URL.
1203            },
1204          ],
1205        },
1206        "cropHintsAnnotation": { # Set of crop hints that are used to generate new crops when serving images. # If present, crop hints have completed successfully.
1207          "cropHints": [ # Crop hint results.
1208            { # Single crop hint that is used to generate a new crop when serving an image.
1209              "confidence": 3.14, # Confidence of this being a salient region.  Range [0, 1].
1210              "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon for the crop region. The coordinates of the bounding
1211                  # box are in the original image's scale.
1212                "normalizedVertices": [ # The bounding polygon normalized vertices.
1213                  { # A vertex represents a 2D point in the image.
1214                      # NOTE: the normalized vertex coordinates are relative to the original image
1215                      # and range from 0 to 1.
1216                    "y": 3.14, # Y coordinate.
1217                    "x": 3.14, # X coordinate.
1218                  },
1219                ],
1220                "vertices": [ # The bounding polygon vertices.
1221                  { # A vertex represents a 2D point in the image.
1222                      # NOTE: the vertex coordinates are in the same scale as the original image.
1223                    "y": 42, # Y coordinate.
1224                    "x": 42, # X coordinate.
1225                  },
1226                ],
1227              },
1228              "importanceFraction": 3.14, # Fraction of importance of this salient region with respect to the original
1229                  # image.
1230            },
1231          ],
1232        },
1233      },
1234    ],
1235  }</pre>
1236</div>
1237
1238<div class="method">
1239    <code class="details" id="asyncBatchAnnotate">asyncBatchAnnotate(body, x__xgafv=None)</code>
1240  <pre>Run asynchronous image detection and annotation for a list of images.
1241
1242Progress and results can be retrieved through the
1243`google.longrunning.Operations` interface.
1244`Operation.metadata` contains `OperationMetadata` (metadata).
1245`Operation.response` contains `AsyncBatchAnnotateImagesResponse` (results).
1246
1247This service will write image annotation outputs to json files in customer
1248GCS bucket, each json file containing BatchAnnotateImagesResponse proto.
1249
1250Args:
1251  body: object, The request body. (required)
1252    The object takes the form of:
1253
1254{ # Request for async image annotation for a list of images.
1255    "outputConfig": { # The desired output location and metadata. # Required. The desired output location and metadata (e.g. format).
1256      "batchSize": 42, # The max number of response protos to put into each output JSON file on
1257          # Google Cloud Storage.
1258          # The valid range is [1, 100]. If not specified, the default value is 20.
1259          #
1260          # For example, for one pdf file with 100 pages, 100 response protos will
1261          # be generated. If `batch_size` = 20, then 5 json files each
1262          # containing 20 response protos will be written under the prefix
1263          # `gcs_destination`.`uri`.
1264          #
1265          # Currently, batch_size only applies to GcsDestination, with potential future
1266          # support for other output configurations.
1267      "gcsDestination": { # The Google Cloud Storage location where the output will be written to. # The Google Cloud Storage location to write the output(s) to.
1268        "uri": "A String", # Google Cloud Storage URI prefix where the results will be stored. Results
1269            # will be in JSON format and preceded by its corresponding input URI prefix.
1270            # This field can either represent a gcs file prefix or gcs directory. In
1271            # either case, the uri should be unique because in order to get all of the
1272            # output files, you will need to do a wildcard gcs search on the uri prefix
1273            # you provide.
1274            #
1275            # Examples:
1276            #
1277            # *    File Prefix: gs://bucket-name/here/filenameprefix   The output files
1278            # will be created in gs://bucket-name/here/ and the names of the
1279            # output files will begin with "filenameprefix".
1280            #
1281            # *    Directory Prefix: gs://bucket-name/some/location/   The output files
1282            # will be created in gs://bucket-name/some/location/ and the names of the
1283            # output files could be anything because there was no filename prefix
1284            # specified.
1285            #
1286            # If multiple outputs, each response is still AnnotateFileResponse, each of
1287            # which contains some subset of the full list of AnnotateImageResponse.
1288            # Multiple outputs can happen if, for example, the output JSON is too large
1289            # and overflows into multiple sharded files.
1290      },
1291    },
1292    "requests": [ # Individual image annotation requests for this batch.
1293      { # Request for performing Google Cloud Vision API tasks over a user-provided
1294          # image, with user-requested features, and with context information.
1295        "imageContext": { # Image context and/or feature-specific parameters. # Additional context that may accompany the image.
1296          "latLongRect": { # Rectangle determined by min and max `LatLng` pairs. # Not used.
1297            "minLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
1298                # of doubles representing degrees latitude and degrees longitude. Unless
1299                # specified otherwise, this must conform to the
1300                # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
1301                # standard</a>. Values must be within normalized ranges.
1302              "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1303              "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1304            },
1305            "maxLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
1306                # of doubles representing degrees latitude and degrees longitude. Unless
1307                # specified otherwise, this must conform to the
1308                # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
1309                # standard</a>. Values must be within normalized ranges.
1310              "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1311              "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1312            },
1313          },
1314          "languageHints": [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
1315              # yields the best results since it enables automatic language detection. For
1316              # languages based on the Latin alphabet, setting `language_hints` is not
1317              # needed. In rare cases, when the language of the text in the image is known,
1318              # setting a hint will help get better results (although it will be a
1319              # significant hindrance if the hint is wrong). Text detection returns an
1320              # error if one or more of the specified languages is not one of the
1321              # [supported languages](/vision/docs/languages).
1322            "A String",
1323          ],
1324          "productSearchParams": { # Parameters for a product search request. # Parameters for product search.
1325            "filter": "A String", # The filtering expression. This can be used to restrict search results based
1326                # on Product labels. We currently support an AND of OR of key-value
1327                # expressions, where each expression within an OR must have the same key. An
1328                # '=' should be used to connect the key and value.
1329                #
1330                # For example, "(color = red OR color = blue) AND brand = Google" is
1331                # acceptable, but "(color = red OR brand = Google)" is not acceptable.
1332                # "color: red" is not acceptable because it uses a ':' instead of an '='.
1333            "productCategories": [ # The list of product categories to search in. Currently, we only consider
1334                # the first category, and either "homegoods-v2", "apparel-v2", or "toys-v2"
1335                # should be specified. The legacy categories "homegoods", "apparel", and
1336                # "toys" are still supported but will be deprecated. For new products, please
1337                # use "homegoods-v2", "apparel-v2", or "toys-v2" for better product search
1338                # accuracy. It is recommended to migrate existing products to these
1339                # categories as well.
1340              "A String",
1341            ],
1342            "productSet": "A String", # The resource name of a ProductSet to be searched for similar images.
1343                #
1344                # Format is:
1345                # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
1346            "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
1347                # Optional. If it is not specified, system discretion will be applied.
1348              "normalizedVertices": [ # The bounding polygon normalized vertices.
1349                { # A vertex represents a 2D point in the image.
1350                    # NOTE: the normalized vertex coordinates are relative to the original image
1351                    # and range from 0 to 1.
1352                  "y": 3.14, # Y coordinate.
1353                  "x": 3.14, # X coordinate.
1354                },
1355              ],
1356              "vertices": [ # The bounding polygon vertices.
1357                { # A vertex represents a 2D point in the image.
1358                    # NOTE: the vertex coordinates are in the same scale as the original image.
1359                  "y": 42, # Y coordinate.
1360                  "x": 42, # X coordinate.
1361                },
1362              ],
1363            },
1364          },
1365          "cropHintsParams": { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
1366            "aspectRatios": [ # Aspect ratios in floats, representing the ratio of the width to the height
1367                # of the image. For example, if the desired aspect ratio is 4/3, the
1368                # corresponding float value should be 1.33333.  If not specified, the
1369                # best possible crop is returned. The number of provided aspect ratios is
1370                # limited to a maximum of 16; any aspect ratios provided after the 16th are
1371                # ignored.
1372              3.14,
1373            ],
1374          },
1375          "webDetectionParams": { # Parameters for web detection request. # Parameters for web detection.
1376            "includeGeoResults": True or False, # Whether to include results derived from the geo information in the image.
1377          },
1378        },
1379        "image": { # Client image to perform Google Cloud Vision API tasks over. # The image to be processed.
1380          "content": "A String", # Image content, represented as a stream of bytes.
1381              # Note: As with all `bytes` fields, protobuffers use a pure binary
1382              # representation, whereas JSON representations use base64.
1383          "source": { # External image source (Google Cloud Storage or web URL image location). # Google Cloud Storage image location, or publicly-accessible image
1384              # URL. If both `content` and `source` are provided for an image, `content`
1385              # takes precedence and is used to perform the image annotation request.
1386            "gcsImageUri": "A String", # **Use `image_uri` instead.**
1387                #
1388                # The Google Cloud Storage  URI of the form
1389                # `gs://bucket_name/object_name`. Object versioning is not supported. See
1390                # [Google Cloud Storage Request
1391                # URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
1392            "imageUri": "A String", # The URI of the source image. Can be either:
1393                #
1394                # 1. A Google Cloud Storage URI of the form
1395                #    `gs://bucket_name/object_name`. Object versioning is not supported. See
1396                #    [Google Cloud Storage Request
1397                #    URIs](https://cloud.google.com/storage/docs/reference-uris) for more
1398                #    info.
1399                #
1400                # 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
1401                #    HTTP/HTTPS URLs, Google cannot guarantee that the request will be
1402                #    completed. Your request may fail if the specified host denies the
1403                #    request (e.g. due to request throttling or DOS prevention), or if Google
1404                #    throttles requests to the site for abuse prevention. You should not
1405                #    depend on externally-hosted images for production applications.
1406                #
1407                # When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
1408                # precedence.
1409          },
1410        },
1411        "features": [ # Requested features.
1412          { # The type of Google Cloud Vision API detection to perform, and the maximum
1413              # number of results to return for that type. Multiple `Feature` objects can
1414              # be specified in the `features` list.
1415            "model": "A String", # Model to use for the feature.
1416                # Supported values: "builtin/stable" (the default if unset) and
1417                # "builtin/latest".
1418            "type": "A String", # The feature type.
1419            "maxResults": 42, # Maximum number of results of this type. Does not apply to
1420                # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
1421          },
1422        ],
1423      },
1424    ],
1425  }
1426
1427  x__xgafv: string, V1 error format.
1428    Allowed values
1429      1 - v1 error format
1430      2 - v2 error format
1431
1432Returns:
1433  An object of the form:
1434
1435    { # This resource represents a long-running operation that is the result of a
1436      # network API call.
1437    "response": { # The normal response of the operation in case of success.  If the original
1438        # method returns no data on success, such as `Delete`, the response is
1439        # `google.protobuf.Empty`.  If the original method is standard
1440        # `Get`/`Create`/`Update`, the response should be the resource.  For other
1441        # methods, the response should have the type `XxxResponse`, where `Xxx`
1442        # is the original method name.  For example, if the original method name
1443        # is `TakeSnapshot()`, the inferred response type is
1444        # `TakeSnapshotResponse`.
1445      "a_key": "", # Properties of the object. Contains field @type with type URL.
1446    },
1447    "metadata": { # Service-specific metadata associated with the operation.  It typically
1448        # contains progress information and common metadata such as create time.
1449        # Some services might not provide such metadata.  Any method that returns a
1450        # long-running operation should document the metadata type, if any.
1451      "a_key": "", # Properties of the object. Contains field @type with type URL.
1452    },
1453    "done": True or False, # If the value is `false`, it means the operation is still in progress.
1454        # If `true`, the operation is completed, and either `error` or `response` is
1455        # available.
1456    "name": "A String", # The server-assigned name, which is only unique within the same service that
1457        # originally returns it. If you use the default HTTP mapping, the
1458        # `name` should be a resource name ending with `operations/{unique_id}`.
1459    "error": { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
1460        # different programming environments, including REST APIs and RPC APIs. It is
1461        # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1462        # three pieces of data: error code, error message, and error details.
1463        #
1464        # You can find out more about this error model and how to work with it in the
1465        # [API Design Guide](https://cloud.google.com/apis/design/errors).
1466      "message": "A String", # A developer-facing error message, which should be in English. Any
1467          # user-facing error message should be localized and sent in the
1468          # google.rpc.Status.details field, or localized by the client.
1469      "code": 42, # The status code, which should be an enum value of google.rpc.Code.
1470      "details": [ # A list of messages that carry the error details.  There is a common set of
1471          # message types for APIs to use.
1472        {
1473          "a_key": "", # Properties of the object. Contains field @type with type URL.
1474        },
1475      ],
1476    },
1477  }</pre>
1478</div>
1479
1480</body></html>