1<html><body>
2<style>
3
4body, h1, h2, h3, div, span, p, pre, a {
5  margin: 0;
6  padding: 0;
7  border: 0;
8  font-weight: inherit;
9  font-style: inherit;
10  font-size: 100%;
11  font-family: inherit;
12  vertical-align: baseline;
13}
14
15body {
16  font-size: 13px;
17  padding: 1em;
18}
19
20h1 {
21  font-size: 26px;
22  margin-bottom: 1em;
23}
24
25h2 {
26  font-size: 24px;
27  margin-bottom: 1em;
28}
29
30h3 {
31  font-size: 20px;
32  margin-bottom: 1em;
33  margin-top: 1em;
34}
35
36pre, code {
37  line-height: 1.5;
38  font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
39}
40
41pre {
42  margin-top: 0.5em;
43}
44
45h1, h2, h3, p {
46  font-family: Arial, sans serif;
47}
48
49h1, h2, h3 {
50  border-bottom: solid #CCC 1px;
51}
52
53.toc_element {
54  margin-top: 0.5em;
55}
56
57.firstline {
58  margin-left: 2 em;
59}
60
61.method  {
62  margin-top: 1em;
63  border: solid 1px #CCC;
64  padding: 1em;
65  background: #EEE;
66}
67
68.details {
69  font-weight: bold;
70  font-size: 14px;
71}
72
73</style>
74
75<h1><a href="dataflow_v1b3.html">Dataflow API</a> . <a href="dataflow_v1b3.projects.html">projects</a> . <a href="dataflow_v1b3.projects.locations.html">locations</a> . <a href="dataflow_v1b3.projects.locations.jobs.html">jobs</a> . <a href="dataflow_v1b3.projects.locations.jobs.workItems.html">workItems</a></h1>
76<h2>Instance Methods</h2>
77<p class="toc_element">
78  <code><a href="#lease">lease(projectId, location, jobId, body, x__xgafv=None)</a></code></p>
79<p class="firstline">Leases a dataflow WorkItem to run.</p>
80<p class="toc_element">
81  <code><a href="#reportStatus">reportStatus(projectId, location, jobId, body, x__xgafv=None)</a></code></p>
82<p class="firstline">Reports the status of dataflow WorkItems leased by a worker.</p>
83<h3>Method Details</h3>
84<div class="method">
85    <code class="details" id="lease">lease(projectId, location, jobId, body, x__xgafv=None)</code>
86  <pre>Leases a dataflow WorkItem to run.
87
88Args:
89  projectId: string, Identifies the project this worker belongs to. (required)
90  location: string, The [regional endpoint]
91(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that
92contains the WorkItem's job. (required)
93  jobId: string, Identifies the workflow job this worker belongs to. (required)
94  body: object, The request body. (required)
95    The object takes the form of:
96
97{ # Request to lease WorkItems.
98    "workItemTypes": [ # Filter for WorkItem type.
99      "A String",
100    ],
101    "workerCapabilities": [ # Worker capabilities. WorkItems might be limited to workers with specific
102        # capabilities.
103      "A String",
104    ],
105    "currentWorkerTime": "A String", # The current timestamp at the worker.
106    "requestedLeaseDuration": "A String", # The initial lease period.
107    "workerId": "A String", # Identifies the worker leasing work -- typically the ID of the
108        # virtual machine running the worker.
109    "unifiedWorkerRequest": { # Untranslated bag-of-bytes WorkRequest from UnifiedWorker.
110      "a_key": "", # Properties of the object. Contains field @type with type URL.
111    },
112    "location": "A String", # The [regional endpoint]
113        # (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that
114        # contains the WorkItem's job.
115  }
116
117  x__xgafv: string, V1 error format.
118    Allowed values
119      1 - v1 error format
120      2 - v2 error format
121
122Returns:
123  An object of the form:
124
125    { # Response to a request to lease WorkItems.
126    "workItems": [ # A list of the leased WorkItems.
127      { # WorkItem represents basic information about a WorkItem to be executed
128          # in the cloud.
129        "packages": [ # Any required packages that need to be fetched in order to execute
130            # this WorkItem.
131          { # The packages that must be installed in order for a worker to run the
132              # steps of the Cloud Dataflow job that will be assigned to its worker
133              # pool.
134              #
135              # This is the mechanism by which the Cloud Dataflow SDK causes code to
136              # be loaded onto the workers. For example, the Cloud Dataflow Java SDK
137              # might use this to install jars containing the user's code and all of the
138              # various dependencies (libraries, data files, etc.) required in order
139              # for that code to run.
140            "location": "A String", # The resource to read the package from. The supported resource type is:
141                #
142                # Google Cloud Storage:
143                #
144                #   storage.googleapis.com/{bucket}
145                #   bucket.storage.googleapis.com/
146            "name": "A String", # The name of the package.
147          },
148        ],
149        "leaseExpireTime": "A String", # Time when the lease on this Work will expire.
150        "seqMapTask": { # Describes a particular function to invoke. # Additional information for SeqMapTask WorkItems.
151          "inputs": [ # Information about each of the inputs.
152            { # Information about a side input of a DoFn or an input of a SeqDoFn.
153              "sources": [ # The source(s) to read element(s) from to get the value of this side input.
154                  # If more than one source, then the elements are taken from the
155                  # sources, in the specified order if order matters.
156                  # At least one source is required.
157                { # A source that records can be read and decoded from.
158                  "codec": { # The codec to use to decode data read from the source.
159                    "a_key": "", # Properties of the object.
160                  },
161                  "baseSpecs": [ # While splitting, sources may specify the produced bundles
162                      # as differences against another source, in order to save backend-side
163                      # memory and allow bigger jobs. For details, see SourceSplitRequest.
164                      # To support this use case, the full set of parameters of the source
165                      # is logically obtained by taking the latest explicitly specified value
166                      # of each parameter in the order:
167                      # base_specs (later items win), spec (overrides anything in base_specs).
168                    {
169                      "a_key": "", # Properties of the object.
170                    },
171                  ],
172                  "spec": { # The source to read from, plus its parameters.
173                    "a_key": "", # Properties of the object.
174                  },
175                  "doesNotNeedSplitting": True or False, # Setting this value to true hints to the framework that the source
176                      # doesn't need splitting, and using SourceSplitRequest on it would
177                      # yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
178                      #
179                      # E.g. a file splitter may set this to true when splitting a single file
180                      # into a set of byte ranges of appropriate size, and set this
181                      # to false when splitting a filepattern into individual files.
182                      # However, for efficiency, a file splitter may decide to produce
183                      # file subranges directly from the filepattern to avoid a splitting
184                      # round-trip.
185                      #
186                      # See SourceSplitRequest for an overview of the splitting process.
187                      #
188                      # This field is meaningful only in the Source objects populated
189                      # by the user (e.g. when filling in a DerivedSource).
190                      # Source objects supplied by the framework to the user don't have
191                      # this field populated.
192                  "metadata": { # Metadata about a Source useful for automatically optimizing # Optionally, metadata for this source can be supplied right away,
193                      # avoiding a SourceGetMetadataOperation roundtrip
194                      # (see SourceOperationRequest).
195                      #
196                      # This field is meaningful only in the Source objects populated
197                      # by the user (e.g. when filling in a DerivedSource).
198                      # Source objects supplied by the framework to the user don't have
199                      # this field populated.
200                      # and tuning the pipeline, etc.
201                    "infinite": True or False, # Specifies that the size of this source is known to be infinite
202                        # (this is a streaming source).
203                    "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
204                        # read from this source.  This estimate is in terms of external storage
205                        # size, before any decompression or other processing done by the reader.
206                    "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
207                        # the (encoded) keys in lexicographically sorted order.
208                  },
209                },
210              ],
211              "kind": { # How to interpret the source element(s) as a side input value.
212                "a_key": "", # Properties of the object.
213              },
214              "tag": "A String", # The id of the tag the user code will access this side input by;
215                  # this should correspond to the tag of some MultiOutputInfo.
216            },
217          ],
218          "name": "A String", # The user-provided name of the SeqDo operation.
219          "stageName": "A String", # System-defined name of the stage containing the SeqDo operation.
220              # Unique across the workflow.
221          "systemName": "A String", # System-defined name of the SeqDo operation.
222              # Unique across the workflow.
223          "userFn": { # The user function to invoke.
224            "a_key": "", # Properties of the object.
225          },
226          "outputInfos": [ # Information about each of the outputs.
227            { # Information about an output of a SeqMapTask.
228              "tag": "A String", # The id of the TupleTag the user code will tag the output value by.
229              "sink": { # A sink that records can be encoded and written to. # The sink to write the output value to.
230                "codec": { # The codec to use to encode data written to the sink.
231                  "a_key": "", # Properties of the object.
232                },
233                "spec": { # The sink to write to, plus its parameters.
234                  "a_key": "", # Properties of the object.
235                },
236              },
237            },
238          ],
239        },
240        "projectId": "A String", # Identifies the cloud project this WorkItem belongs to.
241        "streamingComputationTask": { # A task which describes what action should be performed for the specified # Additional information for StreamingComputationTask WorkItems.
242            # streaming computation ranges.
243          "taskType": "A String", # A type of streaming computation task.
244          "computationRanges": [ # Contains ranges of a streaming computation this task should apply to.
245            { # Describes full or partial data disk assignment information of the computation
246                # ranges.
247              "rangeAssignments": [ # Data disk assignments for ranges from this computation.
248                { # Data disk assignment information for a specific key-range of a sharded
249                    # computation.
250                    # Currently we only support UTF-8 character splits to simplify encoding into
251                    # JSON.
252                  "start": "A String", # The start (inclusive) of the key range.
253                  "end": "A String", # The end (exclusive) of the key range.
254                  "dataDisk": "A String", # The name of the data disk where data for this range is stored.
255                      # This name is local to the Google Cloud Platform project and uniquely
256                      # identifies the disk within that project, for example
257                      # "myproject-1014-104817-4c2-harness-0-disk-1".
258                },
259              ],
260              "computationId": "A String", # The ID of the computation.
261            },
262          ],
263          "dataDisks": [ # Describes the set of data disks this task should apply to.
264            { # Describes mounted data disk.
265              "dataDisk": "A String", # The name of the data disk.
266                  # This name is local to the Google Cloud Platform project and uniquely
267                  # identifies the disk within that project, for example
268                  # "myproject-1014-104817-4c2-harness-0-disk-1".
269            },
270          ],
271        },
272        "initialReportIndex": "A String", # The initial index to use when reporting the status of the WorkItem.
273        "mapTask": { # MapTask consists of an ordered set of instructions, each of which # Additional information for MapTask WorkItems.
274            # describes one particular low-level operation for the worker to
275            # perform in order to accomplish the MapTask's WorkItem.
276            #
277            # Each instruction must appear in the list before any instructions which
278            # depends on its output.
279          "systemName": "A String", # System-defined name of this MapTask.
280              # Unique across the workflow.
281          "counterPrefix": "A String", # Counter prefix that can be used to prefix counters. Not currently used in
282              # Dataflow.
283          "stageName": "A String", # System-defined name of the stage containing this MapTask.
284              # Unique across the workflow.
285          "instructions": [ # The instructions in the MapTask.
286            { # Describes a particular operation comprising a MapTask.
287              "name": "A String", # User-provided name of this operation.
288              "read": { # An instruction that reads records. # Additional information for Read instructions.
289                  # Takes no inputs, produces one output.
290                "source": { # A source that records can be read and decoded from. # The source to read from.
291                  "codec": { # The codec to use to decode data read from the source.
292                    "a_key": "", # Properties of the object.
293                  },
294                  "baseSpecs": [ # While splitting, sources may specify the produced bundles
295                      # as differences against another source, in order to save backend-side
296                      # memory and allow bigger jobs. For details, see SourceSplitRequest.
297                      # To support this use case, the full set of parameters of the source
298                      # is logically obtained by taking the latest explicitly specified value
299                      # of each parameter in the order:
300                      # base_specs (later items win), spec (overrides anything in base_specs).
301                    {
302                      "a_key": "", # Properties of the object.
303                    },
304                  ],
305                  "spec": { # The source to read from, plus its parameters.
306                    "a_key": "", # Properties of the object.
307                  },
308                  "doesNotNeedSplitting": True or False, # Setting this value to true hints to the framework that the source
309                      # doesn't need splitting, and using SourceSplitRequest on it would
310                      # yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
311                      #
312                      # E.g. a file splitter may set this to true when splitting a single file
313                      # into a set of byte ranges of appropriate size, and set this
314                      # to false when splitting a filepattern into individual files.
315                      # However, for efficiency, a file splitter may decide to produce
316                      # file subranges directly from the filepattern to avoid a splitting
317                      # round-trip.
318                      #
319                      # See SourceSplitRequest for an overview of the splitting process.
320                      #
321                      # This field is meaningful only in the Source objects populated
322                      # by the user (e.g. when filling in a DerivedSource).
323                      # Source objects supplied by the framework to the user don't have
324                      # this field populated.
325                  "metadata": { # Metadata about a Source useful for automatically optimizing # Optionally, metadata for this source can be supplied right away,
326                      # avoiding a SourceGetMetadataOperation roundtrip
327                      # (see SourceOperationRequest).
328                      #
329                      # This field is meaningful only in the Source objects populated
330                      # by the user (e.g. when filling in a DerivedSource).
331                      # Source objects supplied by the framework to the user don't have
332                      # this field populated.
333                      # and tuning the pipeline, etc.
334                    "infinite": True or False, # Specifies that the size of this source is known to be infinite
335                        # (this is a streaming source).
336                    "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
337                        # read from this source.  This estimate is in terms of external storage
338                        # size, before any decompression or other processing done by the reader.
339                    "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
340                        # the (encoded) keys in lexicographically sorted order.
341                  },
342                },
343              },
344              "outputs": [ # Describes the outputs of the instruction.
345                { # An output of an instruction.
346                  "name": "A String", # The user-provided name of this output.
347                  "onlyCountKeyBytes": True or False, # For system-generated byte and mean byte metrics, certain instructions
348                      # should only report the key size.
349                  "codec": { # The codec to use to encode data being written via this output.
350                    "a_key": "", # Properties of the object.
351                  },
352                  "systemName": "A String", # System-defined name of this output.
353                      # Unique across the workflow.
354                  "originalName": "A String", # System-defined name for this output in the original workflow graph.
355                      # Outputs that do not contribute to an original instruction do not set this.
356                  "onlyCountValueBytes": True or False, # For system-generated byte and mean byte metrics, certain instructions
357                      # should only report the value size.
358                },
359              ],
360              "partialGroupByKey": { # An instruction that does a partial group-by-key. # Additional information for PartialGroupByKey instructions.
361                  # One input and one output.
362                "sideInputs": [ # Zero or more side inputs.
363                  { # Information about a side input of a DoFn or an input of a SeqDoFn.
364                    "sources": [ # The source(s) to read element(s) from to get the value of this side input.
365                        # If more than one source, then the elements are taken from the
366                        # sources, in the specified order if order matters.
367                        # At least one source is required.
368                      { # A source that records can be read and decoded from.
369                        "codec": { # The codec to use to decode data read from the source.
370                          "a_key": "", # Properties of the object.
371                        },
372                        "baseSpecs": [ # While splitting, sources may specify the produced bundles
373                            # as differences against another source, in order to save backend-side
374                            # memory and allow bigger jobs. For details, see SourceSplitRequest.
375                            # To support this use case, the full set of parameters of the source
376                            # is logically obtained by taking the latest explicitly specified value
377                            # of each parameter in the order:
378                            # base_specs (later items win), spec (overrides anything in base_specs).
379                          {
380                            "a_key": "", # Properties of the object.
381                          },
382                        ],
383                        "spec": { # The source to read from, plus its parameters.
384                          "a_key": "", # Properties of the object.
385                        },
386                        "doesNotNeedSplitting": True or False, # Setting this value to true hints to the framework that the source
387                            # doesn't need splitting, and using SourceSplitRequest on it would
388                            # yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
389                            #
390                            # E.g. a file splitter may set this to true when splitting a single file
391                            # into a set of byte ranges of appropriate size, and set this
392                            # to false when splitting a filepattern into individual files.
393                            # However, for efficiency, a file splitter may decide to produce
394                            # file subranges directly from the filepattern to avoid a splitting
395                            # round-trip.
396                            #
397                            # See SourceSplitRequest for an overview of the splitting process.
398                            #
399                            # This field is meaningful only in the Source objects populated
400                            # by the user (e.g. when filling in a DerivedSource).
401                            # Source objects supplied by the framework to the user don't have
402                            # this field populated.
403                        "metadata": { # Metadata about a Source useful for automatically optimizing # Optionally, metadata for this source can be supplied right away,
404                            # avoiding a SourceGetMetadataOperation roundtrip
405                            # (see SourceOperationRequest).
406                            #
407                            # This field is meaningful only in the Source objects populated
408                            # by the user (e.g. when filling in a DerivedSource).
409                            # Source objects supplied by the framework to the user don't have
410                            # this field populated.
411                            # and tuning the pipeline, etc.
412                          "infinite": True or False, # Specifies that the size of this source is known to be infinite
413                              # (this is a streaming source).
414                          "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
415                              # read from this source.  This estimate is in terms of external storage
416                              # size, before any decompression or other processing done by the reader.
417                          "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
418                              # the (encoded) keys in lexicographically sorted order.
419                        },
420                      },
421                    ],
422                    "kind": { # How to interpret the source element(s) as a side input value.
423                      "a_key": "", # Properties of the object.
424                    },
425                    "tag": "A String", # The id of the tag the user code will access this side input by;
426                        # this should correspond to the tag of some MultiOutputInfo.
427                  },
428                ],
429                "originalCombineValuesInputStoreName": "A String", # If this instruction includes a combining function this is the name of the
430                    # intermediate store between the GBK and the CombineValues.
431                "originalCombineValuesStepName": "A String", # If this instruction includes a combining function, this is the name of the
432                    # CombineValues instruction lifted into this instruction.
433                "valueCombiningFn": { # The value combining function to invoke.
434                  "a_key": "", # Properties of the object.
435                },
436                "input": { # An input of an instruction, as a reference to an output of a # Describes the input to the partial group-by-key instruction.
437                    # producer instruction.
438                  "outputNum": 42, # The output index (origin zero) within the producer.
439                  "producerInstructionIndex": 42, # The index (origin zero) of the parallel instruction that produces
440                      # the output to be consumed by this input.  This index is relative
441                      # to the list of instructions in this input's instruction's
442                      # containing MapTask.
443                },
444                "inputElementCodec": { # The codec to use for interpreting an element in the input PTable.
445                  "a_key": "", # Properties of the object.
446                },
447              },
448              "write": { # An instruction that writes records. # Additional information for Write instructions.
449                  # Takes one input, produces no outputs.
450                "input": { # An input of an instruction, as a reference to an output of a # The input.
451                    # producer instruction.
452                  "outputNum": 42, # The output index (origin zero) within the producer.
453                  "producerInstructionIndex": 42, # The index (origin zero) of the parallel instruction that produces
454                      # the output to be consumed by this input.  This index is relative
455                      # to the list of instructions in this input's instruction's
456                      # containing MapTask.
457                },
458                "sink": { # A sink that records can be encoded and written to. # The sink to write to.
459                  "codec": { # The codec to use to encode data written to the sink.
460                    "a_key": "", # Properties of the object.
461                  },
462                  "spec": { # The sink to write to, plus its parameters.
463                    "a_key": "", # Properties of the object.
464                  },
465                },
466              },
467              "systemName": "A String", # System-defined name of this operation.
468                  # Unique across the workflow.
469              "flatten": { # An instruction that copies its inputs (zero or more) to its (single) output. # Additional information for Flatten instructions.
470                "inputs": [ # Describes the inputs to the flatten instruction.
471                  { # An input of an instruction, as a reference to an output of a
472                      # producer instruction.
473                    "outputNum": 42, # The output index (origin zero) within the producer.
474                    "producerInstructionIndex": 42, # The index (origin zero) of the parallel instruction that produces
475                        # the output to be consumed by this input.  This index is relative
476                        # to the list of instructions in this input's instruction's
477                        # containing MapTask.
478                  },
479                ],
480              },
481              "originalName": "A String", # System-defined name for the operation in the original workflow graph.
482              "parDo": { # An instruction that does a ParDo operation. # Additional information for ParDo instructions.
483                  # Takes one main input and zero or more side inputs, and produces
484                  # zero or more outputs.
485                  # Runs user code.
486                "sideInputs": [ # Zero or more side inputs.
487                  { # Information about a side input of a DoFn or an input of a SeqDoFn.
488                    "sources": [ # The source(s) to read element(s) from to get the value of this side input.
489                        # If more than one source, then the elements are taken from the
490                        # sources, in the specified order if order matters.
491                        # At least one source is required.
492                      { # A source that records can be read and decoded from.
493                        "codec": { # The codec to use to decode data read from the source.
494                          "a_key": "", # Properties of the object.
495                        },
496                        "baseSpecs": [ # While splitting, sources may specify the produced bundles
497                            # as differences against another source, in order to save backend-side
498                            # memory and allow bigger jobs. For details, see SourceSplitRequest.
499                            # To support this use case, the full set of parameters of the source
500                            # is logically obtained by taking the latest explicitly specified value
501                            # of each parameter in the order:
502                            # base_specs (later items win), spec (overrides anything in base_specs).
503                          {
504                            "a_key": "", # Properties of the object.
505                          },
506                        ],
507                        "spec": { # The source to read from, plus its parameters.
508                          "a_key": "", # Properties of the object.
509                        },
510                        "doesNotNeedSplitting": True or False, # Setting this value to true hints to the framework that the source
511                            # doesn't need splitting, and using SourceSplitRequest on it would
512                            # yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
513                            #
514                            # E.g. a file splitter may set this to true when splitting a single file
515                            # into a set of byte ranges of appropriate size, and set this
516                            # to false when splitting a filepattern into individual files.
517                            # However, for efficiency, a file splitter may decide to produce
518                            # file subranges directly from the filepattern to avoid a splitting
519                            # round-trip.
520                            #
521                            # See SourceSplitRequest for an overview of the splitting process.
522                            #
523                            # This field is meaningful only in the Source objects populated
524                            # by the user (e.g. when filling in a DerivedSource).
525                            # Source objects supplied by the framework to the user don't have
526                            # this field populated.
527                        "metadata": { # Metadata about a Source useful for automatically optimizing # Optionally, metadata for this source can be supplied right away,
528                            # avoiding a SourceGetMetadataOperation roundtrip
529                            # (see SourceOperationRequest).
530                            #
531                            # This field is meaningful only in the Source objects populated
532                            # by the user (e.g. when filling in a DerivedSource).
533                            # Source objects supplied by the framework to the user don't have
534                            # this field populated.
535                            # and tuning the pipeline, etc.
536                          "infinite": True or False, # Specifies that the size of this source is known to be infinite
537                              # (this is a streaming source).
538                          "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
539                              # read from this source.  This estimate is in terms of external storage
540                              # size, before any decompression or other processing done by the reader.
541                          "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
542                              # the (encoded) keys in lexicographically sorted order.
543                        },
544                      },
545                    ],
546                    "kind": { # How to interpret the source element(s) as a side input value.
547                      "a_key": "", # Properties of the object.
548                    },
549                    "tag": "A String", # The id of the tag the user code will access this side input by;
550                        # this should correspond to the tag of some MultiOutputInfo.
551                  },
552                ],
553                "input": { # An input of an instruction, as a reference to an output of a # The input.
554                    # producer instruction.
555                  "outputNum": 42, # The output index (origin zero) within the producer.
556                  "producerInstructionIndex": 42, # The index (origin zero) of the parallel instruction that produces
557                      # the output to be consumed by this input.  This index is relative
558                      # to the list of instructions in this input's instruction's
559                      # containing MapTask.
560                },
561                "multiOutputInfos": [ # Information about each of the outputs, if user_fn is a  MultiDoFn.
562                  { # Information about an output of a multi-output DoFn.
563                    "tag": "A String", # The id of the tag the user code will emit to this output by; this
564                        # should correspond to the tag of some SideInputInfo.
565                  },
566                ],
567                "numOutputs": 42, # The number of outputs.
568                "userFn": { # The user function to invoke.
569                  "a_key": "", # Properties of the object.
570                },
571              },
572            },
573          ],
574        },
575        "jobId": "A String", # Identifies the workflow job this WorkItem belongs to.
576        "reportStatusInterval": "A String", # Recommended reporting interval.
577        "sourceOperationTask": { # A work item that represents the different operations that can be # Additional information for source operation WorkItems.
578            # performed on a user-defined Source specification.
579          "name": "A String", # User-provided name of the Read instruction for this source.
580          "stageName": "A String", # System-defined name of the stage containing the source operation.
581              # Unique across the workflow.
582          "getMetadata": { # A request to compute the SourceMetadata of a Source. # Information about a request to get metadata about a source.
583            "source": { # A source that records can be read and decoded from. # Specification of the source whose metadata should be computed.
584              "codec": { # The codec to use to decode data read from the source.
585                "a_key": "", # Properties of the object.
586              },
587              "baseSpecs": [ # While splitting, sources may specify the produced bundles
588                  # as differences against another source, in order to save backend-side
589                  # memory and allow bigger jobs. For details, see SourceSplitRequest.
590                  # To support this use case, the full set of parameters of the source
591                  # is logically obtained by taking the latest explicitly specified value
592                  # of each parameter in the order:
593                  # base_specs (later items win), spec (overrides anything in base_specs).
594                {
595                  "a_key": "", # Properties of the object.
596                },
597              ],
598              "spec": { # The source to read from, plus its parameters.
599                "a_key": "", # Properties of the object.
600              },
601              "doesNotNeedSplitting": True or False, # Setting this value to true hints to the framework that the source
602                  # doesn't need splitting, and using SourceSplitRequest on it would
603                  # yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
604                  #
605                  # E.g. a file splitter may set this to true when splitting a single file
606                  # into a set of byte ranges of appropriate size, and set this
607                  # to false when splitting a filepattern into individual files.
608                  # However, for efficiency, a file splitter may decide to produce
609                  # file subranges directly from the filepattern to avoid a splitting
610                  # round-trip.
611                  #
612                  # See SourceSplitRequest for an overview of the splitting process.
613                  #
614                  # This field is meaningful only in the Source objects populated
615                  # by the user (e.g. when filling in a DerivedSource).
616                  # Source objects supplied by the framework to the user don't have
617                  # this field populated.
618              "metadata": { # Metadata about a Source useful for automatically optimizing # Optionally, metadata for this source can be supplied right away,
619                  # avoiding a SourceGetMetadataOperation roundtrip
620                  # (see SourceOperationRequest).
621                  #
622                  # This field is meaningful only in the Source objects populated
623                  # by the user (e.g. when filling in a DerivedSource).
624                  # Source objects supplied by the framework to the user don't have
625                  # this field populated.
626                  # and tuning the pipeline, etc.
627                "infinite": True or False, # Specifies that the size of this source is known to be infinite
628                    # (this is a streaming source).
629                "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
630                    # read from this source.  This estimate is in terms of external storage
631                    # size, before any decompression or other processing done by the reader.
632                "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
633                    # the (encoded) keys in lexicographically sorted order.
634              },
635            },
636          },
637          "systemName": "A String", # System-defined name of the Read instruction for this source.
638              # Unique across the workflow.
639          "split": { # Represents the operation to split a high-level Source specification # Information about a request to split a source.
640              # into bundles (parts for parallel processing).
641              #
642              # At a high level, splitting of a source into bundles happens as follows:
643              # SourceSplitRequest is applied to the source. If it returns
644              # SOURCE_SPLIT_OUTCOME_USE_CURRENT, no further splitting happens and the source
645              # is used "as is". Otherwise, splitting is applied recursively to each
646              # produced DerivedSource.
647              #
648              # As an optimization, for any Source, if its does_not_need_splitting is
649              # true, the framework assumes that splitting this source would return
650              # SOURCE_SPLIT_OUTCOME_USE_CURRENT, and doesn't initiate a SourceSplitRequest.
651              # This applies both to the initial source being split and to bundles
652              # produced from it.
653            "source": { # A source that records can be read and decoded from. # Specification of the source to be split.
654              "codec": { # The codec to use to decode data read from the source.
655                "a_key": "", # Properties of the object.
656              },
657              "baseSpecs": [ # While splitting, sources may specify the produced bundles
658                  # as differences against another source, in order to save backend-side
659                  # memory and allow bigger jobs. For details, see SourceSplitRequest.
660                  # To support this use case, the full set of parameters of the source
661                  # is logically obtained by taking the latest explicitly specified value
662                  # of each parameter in the order:
663                  # base_specs (later items win), spec (overrides anything in base_specs).
664                {
665                  "a_key": "", # Properties of the object.
666                },
667              ],
668              "spec": { # The source to read from, plus its parameters.
669                "a_key": "", # Properties of the object.
670              },
671              "doesNotNeedSplitting": True or False, # Setting this value to true hints to the framework that the source
672                  # doesn't need splitting, and using SourceSplitRequest on it would
673                  # yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
674                  #
675                  # E.g. a file splitter may set this to true when splitting a single file
676                  # into a set of byte ranges of appropriate size, and set this
677                  # to false when splitting a filepattern into individual files.
678                  # However, for efficiency, a file splitter may decide to produce
679                  # file subranges directly from the filepattern to avoid a splitting
680                  # round-trip.
681                  #
682                  # See SourceSplitRequest for an overview of the splitting process.
683                  #
684                  # This field is meaningful only in the Source objects populated
685                  # by the user (e.g. when filling in a DerivedSource).
686                  # Source objects supplied by the framework to the user don't have
687                  # this field populated.
688              "metadata": { # Metadata about a Source useful for automatically optimizing # Optionally, metadata for this source can be supplied right away,
689                  # avoiding a SourceGetMetadataOperation roundtrip
690                  # (see SourceOperationRequest).
691                  #
692                  # This field is meaningful only in the Source objects populated
693                  # by the user (e.g. when filling in a DerivedSource).
694                  # Source objects supplied by the framework to the user don't have
695                  # this field populated.
696                  # and tuning the pipeline, etc.
697                "infinite": True or False, # Specifies that the size of this source is known to be infinite
698                    # (this is a streaming source).
699                "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
700                    # read from this source.  This estimate is in terms of external storage
701                    # size, before any decompression or other processing done by the reader.
702                "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
703                    # the (encoded) keys in lexicographically sorted order.
704              },
705            },
706            "options": { # Hints for splitting a Source into bundles (parts for parallel # Hints for tuning the splitting process.
707                # processing) using SourceSplitRequest.
708              "desiredShardSizeBytes": "A String", # DEPRECATED in favor of desired_bundle_size_bytes.
709              "desiredBundleSizeBytes": "A String", # The source should be split into a set of bundles where the estimated size
710                  # of each is approximately this many bytes.
711            },
712          },
713          "originalName": "A String", # System-defined name for the Read instruction for this source
714              # in the original workflow graph.
715        },
716        "streamingSetupTask": { # A task which initializes part of a streaming Dataflow job. # Additional information for StreamingSetupTask WorkItems.
717          "snapshotConfig": { # Streaming appliance snapshot configuration. # Configures streaming appliance snapshot.
718            "snapshotId": "A String", # If set, indicates the snapshot id for the snapshot being performed.
719            "importStateEndpoint": "A String", # Indicates which endpoint is used to import appliance state.
720          },
721          "workerHarnessPort": 42, # The TCP port used by the worker to communicate with the Dataflow
722              # worker harness.
723          "drain": True or False, # The user has requested drain.
724          "streamingComputationTopology": { # Global topology of the streaming Dataflow job, including all # The global topology of the streaming Dataflow job.
725              # computations and their sharded locations.
726            "computations": [ # The computations associated with a streaming Dataflow job.
727              { # All configuration data for a particular Computation.
728                "inputs": [ # The inputs to the computation.
729                  { # Describes a stream of data, either as input to be processed or as
730                      # output of a streaming Dataflow job.
731                    "streamingStageLocation": { # Identifies the location of a streaming computation stage, for # The stream is part of another computation within the current
732                        # streaming Dataflow job.
733                        # stage-to-stage communication.
734                      "streamId": "A String", # Identifies the particular stream within the streaming Dataflow
735                          # job.
736                    },
737                    "pubsubLocation": { # Identifies a pubsub location to use for transferring data into or # The stream is a pubsub stream.
738                        # out of a streaming Dataflow job.
739                      "idLabel": "A String", # If set, contains a pubsub label from which to extract record ids.
740                          # If left empty, record deduplication will be strictly best effort.
741                      "timestampLabel": "A String", # If set, contains a pubsub label from which to extract record timestamps.
742                          # If left empty, record timestamps will be generated upon arrival.
743                      "dropLateData": True or False, # Indicates whether the pipeline allows late-arriving data.
744                      "topic": "A String", # A pubsub topic, in the form of
745                          # "pubsub.googleapis.com/topics/<project-id>/<topic-name>"
746                      "trackingSubscription": "A String", # If set, specifies the pubsub subscription that will be used for tracking
747                          # custom time timestamps for watermark estimation.
748                      "withAttributes": True or False, # If true, then the client has requested to get pubsub attributes.
749                      "subscription": "A String", # A pubsub subscription, in the form of
750                          # "pubsub.googleapis.com/subscriptions/<project-id>/<subscription-name>"
751                    },
752                    "customSourceLocation": { # Identifies the location of a custom souce. # The stream is a custom source.
753                      "stateful": True or False, # Whether this source is stateful.
754                    },
755                    "sideInputLocation": { # Identifies the location of a streaming side input. # The stream is a streaming side input.
756                      "stateFamily": "A String", # Identifies the state family where this side input is stored.
757                      "tag": "A String", # Identifies the particular side input within the streaming Dataflow job.
758                    },
759                  },
760                ],
761                "outputs": [ # The outputs from the computation.
762                  { # Describes a stream of data, either as input to be processed or as
763                      # output of a streaming Dataflow job.
764                    "streamingStageLocation": { # Identifies the location of a streaming computation stage, for # The stream is part of another computation within the current
765                        # streaming Dataflow job.
766                        # stage-to-stage communication.
767                      "streamId": "A String", # Identifies the particular stream within the streaming Dataflow
768                          # job.
769                    },
770                    "pubsubLocation": { # Identifies a pubsub location to use for transferring data into or # The stream is a pubsub stream.
771                        # out of a streaming Dataflow job.
772                      "idLabel": "A String", # If set, contains a pubsub label from which to extract record ids.
773                          # If left empty, record deduplication will be strictly best effort.
774                      "timestampLabel": "A String", # If set, contains a pubsub label from which to extract record timestamps.
775                          # If left empty, record timestamps will be generated upon arrival.
776                      "dropLateData": True or False, # Indicates whether the pipeline allows late-arriving data.
777                      "topic": "A String", # A pubsub topic, in the form of
778                          # "pubsub.googleapis.com/topics/<project-id>/<topic-name>"
779                      "trackingSubscription": "A String", # If set, specifies the pubsub subscription that will be used for tracking
780                          # custom time timestamps for watermark estimation.
781                      "withAttributes": True or False, # If true, then the client has requested to get pubsub attributes.
782                      "subscription": "A String", # A pubsub subscription, in the form of
783                          # "pubsub.googleapis.com/subscriptions/<project-id>/<subscription-name>"
784                    },
785                    "customSourceLocation": { # Identifies the location of a custom souce. # The stream is a custom source.
786                      "stateful": True or False, # Whether this source is stateful.
787                    },
788                    "sideInputLocation": { # Identifies the location of a streaming side input. # The stream is a streaming side input.
789                      "stateFamily": "A String", # Identifies the state family where this side input is stored.
790                      "tag": "A String", # Identifies the particular side input within the streaming Dataflow job.
791                    },
792                  },
793                ],
794                "keyRanges": [ # The key ranges processed by the computation.
795                  { # Location information for a specific key-range of a sharded computation.
796                      # Currently we only support UTF-8 character splits to simplify encoding into
797                      # JSON.
798                    "deprecatedPersistentDirectory": "A String", # DEPRECATED. The location of the persistent state for this range, as a
799                        # persistent directory in the worker local filesystem.
800                    "start": "A String", # The start (inclusive) of the key range.
801                    "deliveryEndpoint": "A String", # The physical location of this range assignment to be used for
802                        # streaming computation cross-worker message delivery.
803                    "end": "A String", # The end (exclusive) of the key range.
804                    "dataDisk": "A String", # The name of the data disk where data for this range is stored.
805                        # This name is local to the Google Cloud Platform project and uniquely
806                        # identifies the disk within that project, for example
807                        # "myproject-1014-104817-4c2-harness-0-disk-1".
808                  },
809                ],
810                "computationId": "A String", # The ID of the computation.
811                "systemStageName": "A String", # The system stage name.
812                "stateFamilies": [ # The state family values.
813                  { # State family configuration.
814                    "stateFamily": "A String", # The state family value.
815                    "isRead": True or False, # If true, this family corresponds to a read operation.
816                  },
817                ],
818              },
819            ],
820            "dataDiskAssignments": [ # The disks assigned to a streaming Dataflow job.
821              { # Data disk assignment for a given VM instance.
822                "vmInstance": "A String", # VM instance name the data disks mounted to, for example
823                    # "myproject-1014-104817-4c2-harness-0".
824                "dataDisks": [ # Mounted data disks. The order is important a data disk's 0-based index in
825                    # this list defines which persistent directory the disk is mounted to, for
826                    # example the list of { "myproject-1014-104817-4c2-harness-0-disk-0" },
827                    # { "myproject-1014-104817-4c2-harness-0-disk-1" }.
828                  "A String",
829                ],
830              },
831            ],
832            "persistentStateVersion": 42, # Version number for persistent state.
833            "userStageToComputationNameMap": { # Maps user stage names to stable computation names.
834              "a_key": "A String",
835            },
836            "forwardingKeyBits": 42, # The size (in bits) of keys that will be assigned to source messages.
837          },
838          "receiveWorkPort": 42, # The TCP port on which the worker should listen for messages from
839              # other streaming computation workers.
840        },
841        "streamingConfigTask": { # A task that carries configuration information for streaming computations. # Additional information for StreamingConfigTask WorkItems.
842          "userStepToStateFamilyNameMap": { # Map from user step names to state families.
843            "a_key": "A String",
844          },
845          "windmillServicePort": "A String", # If present, the worker must use this port to communicate with Windmill
846              # Service dispatchers. Only applicable when windmill_service_endpoint is
847              # specified.
848          "streamingComputationConfigs": [ # Set of computation configuration information.
849            { # Configuration information for a single streaming computation.
850              "transformUserNameToStateFamily": { # Map from user name of stateful transforms in this stage to their state
851                  # family.
852                "a_key": "A String",
853              },
854              "computationId": "A String", # Unique identifier for this computation.
855              "systemName": "A String", # System defined name for this computation.
856              "stageName": "A String", # Stage name of this computation.
857              "instructions": [ # Instructions that comprise the computation.
858                { # Describes a particular operation comprising a MapTask.
859                  "name": "A String", # User-provided name of this operation.
860                  "read": { # An instruction that reads records. # Additional information for Read instructions.
861                      # Takes no inputs, produces one output.
862                    "source": { # A source that records can be read and decoded from. # The source to read from.
863                      "codec": { # The codec to use to decode data read from the source.
864                        "a_key": "", # Properties of the object.
865                      },
866                      "baseSpecs": [ # While splitting, sources may specify the produced bundles
867                          # as differences against another source, in order to save backend-side
868                          # memory and allow bigger jobs. For details, see SourceSplitRequest.
869                          # To support this use case, the full set of parameters of the source
870                          # is logically obtained by taking the latest explicitly specified value
871                          # of each parameter in the order:
872                          # base_specs (later items win), spec (overrides anything in base_specs).
873                        {
874                          "a_key": "", # Properties of the object.
875                        },
876                      ],
877                      "spec": { # The source to read from, plus its parameters.
878                        "a_key": "", # Properties of the object.
879                      },
880                      "doesNotNeedSplitting": True or False, # Setting this value to true hints to the framework that the source
881                          # doesn't need splitting, and using SourceSplitRequest on it would
882                          # yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
883                          #
884                          # E.g. a file splitter may set this to true when splitting a single file
885                          # into a set of byte ranges of appropriate size, and set this
886                          # to false when splitting a filepattern into individual files.
887                          # However, for efficiency, a file splitter may decide to produce
888                          # file subranges directly from the filepattern to avoid a splitting
889                          # round-trip.
890                          #
891                          # See SourceSplitRequest for an overview of the splitting process.
892                          #
893                          # This field is meaningful only in the Source objects populated
894                          # by the user (e.g. when filling in a DerivedSource).
895                          # Source objects supplied by the framework to the user don't have
896                          # this field populated.
897                      "metadata": { # Metadata about a Source useful for automatically optimizing # Optionally, metadata for this source can be supplied right away,
898                          # avoiding a SourceGetMetadataOperation roundtrip
899                          # (see SourceOperationRequest).
900                          #
901                          # This field is meaningful only in the Source objects populated
902                          # by the user (e.g. when filling in a DerivedSource).
903                          # Source objects supplied by the framework to the user don't have
904                          # this field populated.
905                          # and tuning the pipeline, etc.
906                        "infinite": True or False, # Specifies that the size of this source is known to be infinite
907                            # (this is a streaming source).
908                        "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
909                            # read from this source.  This estimate is in terms of external storage
910                            # size, before any decompression or other processing done by the reader.
911                        "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
912                            # the (encoded) keys in lexicographically sorted order.
913                      },
914                    },
915                  },
916                  "outputs": [ # Describes the outputs of the instruction.
917                    { # An output of an instruction.
918                      "name": "A String", # The user-provided name of this output.
919                      "onlyCountKeyBytes": True or False, # For system-generated byte and mean byte metrics, certain instructions
920                          # should only report the key size.
921                      "codec": { # The codec to use to encode data being written via this output.
922                        "a_key": "", # Properties of the object.
923                      },
924                      "systemName": "A String", # System-defined name of this output.
925                          # Unique across the workflow.
926                      "originalName": "A String", # System-defined name for this output in the original workflow graph.
927                          # Outputs that do not contribute to an original instruction do not set this.
928                      "onlyCountValueBytes": True or False, # For system-generated byte and mean byte metrics, certain instructions
929                          # should only report the value size.
930                    },
931                  ],
932                  "partialGroupByKey": { # An instruction that does a partial group-by-key. # Additional information for PartialGroupByKey instructions.
933                      # One input and one output.
934                    "sideInputs": [ # Zero or more side inputs.
935                      { # Information about a side input of a DoFn or an input of a SeqDoFn.
936                        "sources": [ # The source(s) to read element(s) from to get the value of this side input.
937                            # If more than one source, then the elements are taken from the
938                            # sources, in the specified order if order matters.
939                            # At least one source is required.
940                          { # A source that records can be read and decoded from.
941                            "codec": { # The codec to use to decode data read from the source.
942                              "a_key": "", # Properties of the object.
943                            },
944                            "baseSpecs": [ # While splitting, sources may specify the produced bundles
945                                # as differences against another source, in order to save backend-side
946                                # memory and allow bigger jobs. For details, see SourceSplitRequest.
947                                # To support this use case, the full set of parameters of the source
948                                # is logically obtained by taking the latest explicitly specified value
949                                # of each parameter in the order:
950                                # base_specs (later items win), spec (overrides anything in base_specs).
951                              {
952                                "a_key": "", # Properties of the object.
953                              },
954                            ],
955                            "spec": { # The source to read from, plus its parameters.
956                              "a_key": "", # Properties of the object.
957                            },
958                            "doesNotNeedSplitting": True or False, # Setting this value to true hints to the framework that the source
959                                # doesn't need splitting, and using SourceSplitRequest on it would
960                                # yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
961                                #
962                                # E.g. a file splitter may set this to true when splitting a single file
963                                # into a set of byte ranges of appropriate size, and set this
964                                # to false when splitting a filepattern into individual files.
965                                # However, for efficiency, a file splitter may decide to produce
966                                # file subranges directly from the filepattern to avoid a splitting
967                                # round-trip.
968                                #
969                                # See SourceSplitRequest for an overview of the splitting process.
970                                #
971                                # This field is meaningful only in the Source objects populated
972                                # by the user (e.g. when filling in a DerivedSource).
973                                # Source objects supplied by the framework to the user don't have
974                                # this field populated.
975                            "metadata": { # Metadata about a Source useful for automatically optimizing # Optionally, metadata for this source can be supplied right away,
976                                # avoiding a SourceGetMetadataOperation roundtrip
977                                # (see SourceOperationRequest).
978                                #
979                                # This field is meaningful only in the Source objects populated
980                                # by the user (e.g. when filling in a DerivedSource).
981                                # Source objects supplied by the framework to the user don't have
982                                # this field populated.
983                                # and tuning the pipeline, etc.
984                              "infinite": True or False, # Specifies that the size of this source is known to be infinite
985                                  # (this is a streaming source).
986                              "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
987                                  # read from this source.  This estimate is in terms of external storage
988                                  # size, before any decompression or other processing done by the reader.
989                              "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
990                                  # the (encoded) keys in lexicographically sorted order.
991                            },
992                          },
993                        ],
994                        "kind": { # How to interpret the source element(s) as a side input value.
995                          "a_key": "", # Properties of the object.
996                        },
997                        "tag": "A String", # The id of the tag the user code will access this side input by;
998                            # this should correspond to the tag of some MultiOutputInfo.
999                      },
1000                    ],
1001                    "originalCombineValuesInputStoreName": "A String", # If this instruction includes a combining function this is the name of the
1002                        # intermediate store between the GBK and the CombineValues.
1003                    "originalCombineValuesStepName": "A String", # If this instruction includes a combining function, this is the name of the
1004                        # CombineValues instruction lifted into this instruction.
1005                    "valueCombiningFn": { # The value combining function to invoke.
1006                      "a_key": "", # Properties of the object.
1007                    },
1008                    "input": { # An input of an instruction, as a reference to an output of a # Describes the input to the partial group-by-key instruction.
1009                        # producer instruction.
1010                      "outputNum": 42, # The output index (origin zero) within the producer.
1011                      "producerInstructionIndex": 42, # The index (origin zero) of the parallel instruction that produces
1012                          # the output to be consumed by this input.  This index is relative
1013                          # to the list of instructions in this input's instruction's
1014                          # containing MapTask.
1015                    },
1016                    "inputElementCodec": { # The codec to use for interpreting an element in the input PTable.
1017                      "a_key": "", # Properties of the object.
1018                    },
1019                  },
1020                  "write": { # An instruction that writes records. # Additional information for Write instructions.
1021                      # Takes one input, produces no outputs.
1022                    "input": { # An input of an instruction, as a reference to an output of a # The input.
1023                        # producer instruction.
1024                      "outputNum": 42, # The output index (origin zero) within the producer.
1025                      "producerInstructionIndex": 42, # The index (origin zero) of the parallel instruction that produces
1026                          # the output to be consumed by this input.  This index is relative
1027                          # to the list of instructions in this input's instruction's
1028                          # containing MapTask.
1029                    },
1030                    "sink": { # A sink that records can be encoded and written to. # The sink to write to.
1031                      "codec": { # The codec to use to encode data written to the sink.
1032                        "a_key": "", # Properties of the object.
1033                      },
1034                      "spec": { # The sink to write to, plus its parameters.
1035                        "a_key": "", # Properties of the object.
1036                      },
1037                    },
1038                  },
1039                  "systemName": "A String", # System-defined name of this operation.
1040                      # Unique across the workflow.
1041                  "flatten": { # An instruction that copies its inputs (zero or more) to its (single) output. # Additional information for Flatten instructions.
1042                    "inputs": [ # Describes the inputs to the flatten instruction.
1043                      { # An input of an instruction, as a reference to an output of a
1044                          # producer instruction.
1045                        "outputNum": 42, # The output index (origin zero) within the producer.
1046                        "producerInstructionIndex": 42, # The index (origin zero) of the parallel instruction that produces
1047                            # the output to be consumed by this input.  This index is relative
1048                            # to the list of instructions in this input's instruction's
1049                            # containing MapTask.
1050                      },
1051                    ],
1052                  },
1053                  "originalName": "A String", # System-defined name for the operation in the original workflow graph.
1054                  "parDo": { # An instruction that does a ParDo operation. # Additional information for ParDo instructions.
1055                      # Takes one main input and zero or more side inputs, and produces
1056                      # zero or more outputs.
1057                      # Runs user code.
1058                    "sideInputs": [ # Zero or more side inputs.
1059                      { # Information about a side input of a DoFn or an input of a SeqDoFn.
1060                        "sources": [ # The source(s) to read element(s) from to get the value of this side input.
1061                            # If more than one source, then the elements are taken from the
1062                            # sources, in the specified order if order matters.
1063                            # At least one source is required.
1064                          { # A source that records can be read and decoded from.
1065                            "codec": { # The codec to use to decode data read from the source.
1066                              "a_key": "", # Properties of the object.
1067                            },
1068                            "baseSpecs": [ # While splitting, sources may specify the produced bundles
1069                                # as differences against another source, in order to save backend-side
1070                                # memory and allow bigger jobs. For details, see SourceSplitRequest.
1071                                # To support this use case, the full set of parameters of the source
1072                                # is logically obtained by taking the latest explicitly specified value
1073                                # of each parameter in the order:
1074                                # base_specs (later items win), spec (overrides anything in base_specs).
1075                              {
1076                                "a_key": "", # Properties of the object.
1077                              },
1078                            ],
1079                            "spec": { # The source to read from, plus its parameters.
1080                              "a_key": "", # Properties of the object.
1081                            },
1082                            "doesNotNeedSplitting": True or False, # Setting this value to true hints to the framework that the source
1083                                # doesn't need splitting, and using SourceSplitRequest on it would
1084                                # yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
1085                                #
1086                                # E.g. a file splitter may set this to true when splitting a single file
1087                                # into a set of byte ranges of appropriate size, and set this
1088                                # to false when splitting a filepattern into individual files.
1089                                # However, for efficiency, a file splitter may decide to produce
1090                                # file subranges directly from the filepattern to avoid a splitting
1091                                # round-trip.
1092                                #
1093                                # See SourceSplitRequest for an overview of the splitting process.
1094                                #
1095                                # This field is meaningful only in the Source objects populated
1096                                # by the user (e.g. when filling in a DerivedSource).
1097                                # Source objects supplied by the framework to the user don't have
1098                                # this field populated.
1099                            "metadata": { # Metadata about a Source useful for automatically optimizing # Optionally, metadata for this source can be supplied right away,
1100                                # avoiding a SourceGetMetadataOperation roundtrip
1101                                # (see SourceOperationRequest).
1102                                #
1103                                # This field is meaningful only in the Source objects populated
1104                                # by the user (e.g. when filling in a DerivedSource).
1105                                # Source objects supplied by the framework to the user don't have
1106                                # this field populated.
1107                                # and tuning the pipeline, etc.
1108                              "infinite": True or False, # Specifies that the size of this source is known to be infinite
1109                                  # (this is a streaming source).
1110                              "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
1111                                  # read from this source.  This estimate is in terms of external storage
1112                                  # size, before any decompression or other processing done by the reader.
1113                              "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
1114                                  # the (encoded) keys in lexicographically sorted order.
1115                            },
1116                          },
1117                        ],
1118                        "kind": { # How to interpret the source element(s) as a side input value.
1119                          "a_key": "", # Properties of the object.
1120                        },
1121                        "tag": "A String", # The id of the tag the user code will access this side input by;
1122                            # this should correspond to the tag of some MultiOutputInfo.
1123                      },
1124                    ],
1125                    "input": { # An input of an instruction, as a reference to an output of a # The input.
1126                        # producer instruction.
1127                      "outputNum": 42, # The output index (origin zero) within the producer.
1128                      "producerInstructionIndex": 42, # The index (origin zero) of the parallel instruction that produces
1129                          # the output to be consumed by this input.  This index is relative
1130                          # to the list of instructions in this input's instruction's
1131                          # containing MapTask.
1132                    },
1133                    "multiOutputInfos": [ # Information about each of the outputs, if user_fn is a  MultiDoFn.
1134                      { # Information about an output of a multi-output DoFn.
1135                        "tag": "A String", # The id of the tag the user code will emit to this output by; this
1136                            # should correspond to the tag of some SideInputInfo.
1137                      },
1138                    ],
1139                    "numOutputs": 42, # The number of outputs.
1140                    "userFn": { # The user function to invoke.
1141                      "a_key": "", # Properties of the object.
1142                    },
1143                  },
1144                },
1145              ],
1146            },
1147          ],
1148          "maxWorkItemCommitBytes": "A String", # Maximum size for work item commit supported windmill storage layer.
1149          "windmillServiceEndpoint": "A String", # If present, the worker must use this endpoint to communicate with Windmill
1150              # Service dispatchers, otherwise the worker must continue to use whatever
1151              # endpoint it had been using.
1152        },
1153        "configuration": "A String", # Work item-specific configuration as an opaque blob.
1154        "shellTask": { # A task which consists of a shell command for the worker to execute. # Additional information for ShellTask WorkItems.
1155          "command": "A String", # The shell command to run.
1156          "exitCode": 42, # Exit code for the task.
1157        },
1158        "id": "A String", # Identifies this WorkItem.
1159      },
1160    ],
1161    "unifiedWorkerResponse": { # Untranslated bag-of-bytes WorkResponse for UnifiedWorker.
1162      "a_key": "", # Properties of the object. Contains field @type with type URL.
1163    },
1164  }</pre>
1165</div>
1166
1167<div class="method">
1168    <code class="details" id="reportStatus">reportStatus(projectId, location, jobId, body, x__xgafv=None)</code>
1169  <pre>Reports the status of dataflow WorkItems leased by a worker.
1170
1171Args:
1172  projectId: string, The project which owns the WorkItem's job. (required)
1173  location: string, The [regional endpoint]
1174(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that
1175contains the WorkItem's job. (required)
1176  jobId: string, The job which the WorkItem is part of. (required)
1177  body: object, The request body. (required)
1178    The object takes the form of:
1179
1180{ # Request to report the status of WorkItems.
1181    "workerId": "A String", # The ID of the worker reporting the WorkItem status.  If this
1182        # does not match the ID of the worker which the Dataflow service
1183        # believes currently has the lease on the WorkItem, the report
1184        # will be dropped (with an error response).
1185    "unifiedWorkerRequest": { # Untranslated bag-of-bytes WorkProgressUpdateRequest from UnifiedWorker.
1186      "a_key": "", # Properties of the object. Contains field @type with type URL.
1187    },
1188    "currentWorkerTime": "A String", # The current timestamp at the worker.
1189    "workItemStatuses": [ # The order is unimportant, except that the order of the
1190        # WorkItemServiceState messages in the ReportWorkItemStatusResponse
1191        # corresponds to the order of WorkItemStatus messages here.
1192      { # Conveys a worker's progress through the work described by a WorkItem.
1193        "reportIndex": "A String", # The report index.  When a WorkItem is leased, the lease will
1194            # contain an initial report index.  When a WorkItem's status is
1195            # reported to the system, the report should be sent with
1196            # that report index, and the response will contain the index the
1197            # worker should use for the next report.  Reports received with
1198            # unexpected index values will be rejected by the service.
1199            #
1200            # In order to preserve idempotency, the worker should not alter the
1201            # contents of a report, even if the worker must submit the same
1202            # report multiple times before getting back a response.  The worker
1203            # should not submit a subsequent report until the response for the
1204            # previous report had been received from the service.
1205        "errors": [ # Specifies errors which occurred during processing.  If errors are
1206            # provided, and completed = true, then the WorkItem is considered
1207            # to have failed.
1208          { # The `Status` type defines a logical error model that is suitable for
1209              # different programming environments, including REST APIs and RPC APIs. It is
1210              # used by [gRPC](https://github.com/grpc). The error model is designed to be:
1211              #
1212              # - Simple to use and understand for most users
1213              # - Flexible enough to meet unexpected needs
1214              #
1215              # # Overview
1216              #
1217              # The `Status` message contains three pieces of data: error code, error
1218              # message, and error details. The error code should be an enum value of
1219              # google.rpc.Code, but it may accept additional error codes if needed.  The
1220              # error message should be a developer-facing English message that helps
1221              # developers *understand* and *resolve* the error. If a localized user-facing
1222              # error message is needed, put the localized message in the error details or
1223              # localize it in the client. The optional error details may contain arbitrary
1224              # information about the error. There is a predefined set of error detail types
1225              # in the package `google.rpc` that can be used for common error conditions.
1226              #
1227              # # Language mapping
1228              #
1229              # The `Status` message is the logical representation of the error model, but it
1230              # is not necessarily the actual wire format. When the `Status` message is
1231              # exposed in different client libraries and different wire protocols, it can be
1232              # mapped differently. For example, it will likely be mapped to some exceptions
1233              # in Java, but more likely mapped to some error codes in C.
1234              #
1235              # # Other uses
1236              #
1237              # The error model and the `Status` message can be used in a variety of
1238              # environments, either with or without APIs, to provide a
1239              # consistent developer experience across different environments.
1240              #
1241              # Example uses of this error model include:
1242              #
1243              # - Partial errors. If a service needs to return partial errors to the client,
1244              #     it may embed the `Status` in the normal response to indicate the partial
1245              #     errors.
1246              #
1247              # - Workflow errors. A typical workflow has multiple steps. Each step may
1248              #     have a `Status` message for error reporting.
1249              #
1250              # - Batch operations. If a client uses batch request and batch response, the
1251              #     `Status` message should be used directly inside batch response, one for
1252              #     each error sub-response.
1253              #
1254              # - Asynchronous operations. If an API call embeds asynchronous operation
1255              #     results in its response, the status of those operations should be
1256              #     represented directly using the `Status` message.
1257              #
1258              # - Logging. If some API errors are stored in logs, the message `Status` could
1259              #     be used directly after any stripping needed for security/privacy reasons.
1260            "message": "A String", # A developer-facing error message, which should be in English. Any
1261                # user-facing error message should be localized and sent in the
1262                # google.rpc.Status.details field, or localized by the client.
1263            "code": 42, # The status code, which should be an enum value of google.rpc.Code.
1264            "details": [ # A list of messages that carry the error details.  There is a common set of
1265                # message types for APIs to use.
1266              {
1267                "a_key": "", # Properties of the object. Contains field @type with type URL.
1268              },
1269            ],
1270          },
1271        ],
1272        "sourceOperationResponse": { # The result of a SourceOperationRequest, specified in # If the work item represented a SourceOperationRequest, and the work
1273            # is completed, contains the result of the operation.
1274            # ReportWorkItemStatusRequest.source_operation when the work item
1275            # is completed.
1276          "getMetadata": { # The result of a SourceGetMetadataOperation. # A response to a request to get metadata about a source.
1277            "metadata": { # Metadata about a Source useful for automatically optimizing # The computed metadata.
1278                # and tuning the pipeline, etc.
1279              "infinite": True or False, # Specifies that the size of this source is known to be infinite
1280                  # (this is a streaming source).
1281              "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
1282                  # read from this source.  This estimate is in terms of external storage
1283                  # size, before any decompression or other processing done by the reader.
1284              "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
1285                  # the (encoded) keys in lexicographically sorted order.
1286            },
1287          },
1288          "split": { # The response to a SourceSplitRequest. # A response to a request to split a source.
1289            "outcome": "A String", # Indicates whether splitting happened and produced a list of bundles.
1290                # If this is USE_CURRENT_SOURCE_AS_IS, the current source should
1291                # be processed "as is" without splitting. "bundles" is ignored in this case.
1292                # If this is SPLITTING_HAPPENED, then "bundles" contains a list of
1293                # bundles into which the source was split.
1294            "bundles": [ # If outcome is SPLITTING_HAPPENED, then this is a list of bundles
1295                # into which the source was split. Otherwise this field is ignored.
1296                # This list can be empty, which means the source represents an empty input.
1297              { # Specification of one of the bundles produced as a result of splitting
1298                  # a Source (e.g. when executing a SourceSplitRequest, or when
1299                  # splitting an active task using WorkItemStatus.dynamic_source_split),
1300                  # relative to the source being split.
1301                "derivationMode": "A String", # What source to base the produced source on (if any).
1302                "source": { # A source that records can be read and decoded from. # Specification of the source.
1303                  "codec": { # The codec to use to decode data read from the source.
1304                    "a_key": "", # Properties of the object.
1305                  },
1306                  "baseSpecs": [ # While splitting, sources may specify the produced bundles
1307                      # as differences against another source, in order to save backend-side
1308                      # memory and allow bigger jobs. For details, see SourceSplitRequest.
1309                      # To support this use case, the full set of parameters of the source
1310                      # is logically obtained by taking the latest explicitly specified value
1311                      # of each parameter in the order:
1312                      # base_specs (later items win), spec (overrides anything in base_specs).
1313                    {
1314                      "a_key": "", # Properties of the object.
1315                    },
1316                  ],
1317                  "spec": { # The source to read from, plus its parameters.
1318                    "a_key": "", # Properties of the object.
1319                  },
1320                  "doesNotNeedSplitting": True or False, # Setting this value to true hints to the framework that the source
1321                      # doesn't need splitting, and using SourceSplitRequest on it would
1322                      # yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
1323                      #
1324                      # E.g. a file splitter may set this to true when splitting a single file
1325                      # into a set of byte ranges of appropriate size, and set this
1326                      # to false when splitting a filepattern into individual files.
1327                      # However, for efficiency, a file splitter may decide to produce
1328                      # file subranges directly from the filepattern to avoid a splitting
1329                      # round-trip.
1330                      #
1331                      # See SourceSplitRequest for an overview of the splitting process.
1332                      #
1333                      # This field is meaningful only in the Source objects populated
1334                      # by the user (e.g. when filling in a DerivedSource).
1335                      # Source objects supplied by the framework to the user don't have
1336                      # this field populated.
1337                  "metadata": { # Metadata about a Source useful for automatically optimizing # Optionally, metadata for this source can be supplied right away,
1338                      # avoiding a SourceGetMetadataOperation roundtrip
1339                      # (see SourceOperationRequest).
1340                      #
1341                      # This field is meaningful only in the Source objects populated
1342                      # by the user (e.g. when filling in a DerivedSource).
1343                      # Source objects supplied by the framework to the user don't have
1344                      # this field populated.
1345                      # and tuning the pipeline, etc.
1346                    "infinite": True or False, # Specifies that the size of this source is known to be infinite
1347                        # (this is a streaming source).
1348                    "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
1349                        # read from this source.  This estimate is in terms of external storage
1350                        # size, before any decompression or other processing done by the reader.
1351                    "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
1352                        # the (encoded) keys in lexicographically sorted order.
1353                  },
1354                },
1355              },
1356            ],
1357            "shards": [ # DEPRECATED in favor of bundles.
1358              { # DEPRECATED in favor of DerivedSource.
1359                "derivationMode": "A String", # DEPRECATED
1360                "source": { # A source that records can be read and decoded from. # DEPRECATED
1361                  "codec": { # The codec to use to decode data read from the source.
1362                    "a_key": "", # Properties of the object.
1363                  },
1364                  "baseSpecs": [ # While splitting, sources may specify the produced bundles
1365                      # as differences against another source, in order to save backend-side
1366                      # memory and allow bigger jobs. For details, see SourceSplitRequest.
1367                      # To support this use case, the full set of parameters of the source
1368                      # is logically obtained by taking the latest explicitly specified value
1369                      # of each parameter in the order:
1370                      # base_specs (later items win), spec (overrides anything in base_specs).
1371                    {
1372                      "a_key": "", # Properties of the object.
1373                    },
1374                  ],
1375                  "spec": { # The source to read from, plus its parameters.
1376                    "a_key": "", # Properties of the object.
1377                  },
1378                  "doesNotNeedSplitting": True or False, # Setting this value to true hints to the framework that the source
1379                      # doesn't need splitting, and using SourceSplitRequest on it would
1380                      # yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
1381                      #
1382                      # E.g. a file splitter may set this to true when splitting a single file
1383                      # into a set of byte ranges of appropriate size, and set this
1384                      # to false when splitting a filepattern into individual files.
1385                      # However, for efficiency, a file splitter may decide to produce
1386                      # file subranges directly from the filepattern to avoid a splitting
1387                      # round-trip.
1388                      #
1389                      # See SourceSplitRequest for an overview of the splitting process.
1390                      #
1391                      # This field is meaningful only in the Source objects populated
1392                      # by the user (e.g. when filling in a DerivedSource).
1393                      # Source objects supplied by the framework to the user don't have
1394                      # this field populated.
1395                  "metadata": { # Metadata about a Source useful for automatically optimizing # Optionally, metadata for this source can be supplied right away,
1396                      # avoiding a SourceGetMetadataOperation roundtrip
1397                      # (see SourceOperationRequest).
1398                      #
1399                      # This field is meaningful only in the Source objects populated
1400                      # by the user (e.g. when filling in a DerivedSource).
1401                      # Source objects supplied by the framework to the user don't have
1402                      # this field populated.
1403                      # and tuning the pipeline, etc.
1404                    "infinite": True or False, # Specifies that the size of this source is known to be infinite
1405                        # (this is a streaming source).
1406                    "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
1407                        # read from this source.  This estimate is in terms of external storage
1408                        # size, before any decompression or other processing done by the reader.
1409                    "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
1410                        # the (encoded) keys in lexicographically sorted order.
1411                  },
1412                },
1413              },
1414            ],
1415          },
1416        },
1417        "stopPosition": { # Position defines a position within a collection of data.  The value # A worker may split an active map task in two parts, "primary" and
1418            # "residual", continuing to process the primary part and returning the
1419            # residual part into the pool of available work.
1420            # This event is called a "dynamic split" and is critical to the dynamic
1421            # work rebalancing feature. The two obtained sub-tasks are called
1422            # "parts" of the split.
1423            # The parts, if concatenated, must represent the same input as would
1424            # be read by the current task if the split did not happen.
1425            # The exact way in which the original task is decomposed into the two
1426            # parts is specified either as a position demarcating them
1427            # (stop_position), or explicitly as two DerivedSources, if this
1428            # task consumes a user-defined source type (dynamic_source_split).
1429            #
1430            # The "current" task is adjusted as a result of the split: after a task
1431            # with range [A, B) sends a stop_position update at C, its range is
1432            # considered to be [A, C), e.g.:
1433            # * Progress should be interpreted relative to the new range, e.g.
1434            #   "75% completed" means "75% of [A, C) completed"
1435            # * The worker should interpret proposed_stop_position relative to the
1436            #   new range, e.g. "split at 68%" should be interpreted as
1437            #   "split at 68% of [A, C)".
1438            # * If the worker chooses to split again using stop_position, only
1439            #   stop_positions in [A, C) will be accepted.
1440            # * Etc.
1441            # dynamic_source_split has similar semantics: e.g., if a task with
1442            # source S splits using dynamic_source_split into {P, R}
1443            # (where P and R must be together equivalent to S), then subsequent
1444            # progress and proposed_stop_position should be interpreted relative
1445            # to P, and in a potential subsequent dynamic_source_split into {P', R'},
1446            # P' and R' must be together equivalent to P, etc.
1447            # can be either the end position, a key (used with ordered
1448            # collections), a byte offset, or a record index.
1449          "end": True or False, # Position is past all other positions. Also useful for the end
1450              # position of an unbounded range.
1451          "recordIndex": "A String", # Position is a record index.
1452          "byteOffset": "A String", # Position is a byte offset.
1453          "key": "A String", # Position is a string key, ordered lexicographically.
1454          "concatPosition": { # A position that encapsulates an inner position and an index for the inner # CloudPosition is a concat position.
1455              # position. A ConcatPosition can be used by a reader of a source that
1456              # encapsulates a set of other sources.
1457            "position": # Object with schema name: Position # Position within the inner source.
1458            "index": 42, # Index of the inner source.
1459          },
1460          "shufflePosition": "A String", # CloudPosition is a base64 encoded BatchShufflePosition (with FIXED
1461              # sharding).
1462        },
1463        "sourceFork": { # DEPRECATED in favor of DynamicSourceSplit. # DEPRECATED in favor of dynamic_source_split.
1464          "residualSource": { # Specification of one of the bundles produced as a result of splitting # DEPRECATED
1465              # a Source (e.g. when executing a SourceSplitRequest, or when
1466              # splitting an active task using WorkItemStatus.dynamic_source_split),
1467              # relative to the source being split.
1468            "derivationMode": "A String", # What source to base the produced source on (if any).
1469            "source": { # A source that records can be read and decoded from. # Specification of the source.
1470              "codec": { # The codec to use to decode data read from the source.
1471                "a_key": "", # Properties of the object.
1472              },
1473              "baseSpecs": [ # While splitting, sources may specify the produced bundles
1474                  # as differences against another source, in order to save backend-side
1475                  # memory and allow bigger jobs. For details, see SourceSplitRequest.
1476                  # To support this use case, the full set of parameters of the source
1477                  # is logically obtained by taking the latest explicitly specified value
1478                  # of each parameter in the order:
1479                  # base_specs (later items win), spec (overrides anything in base_specs).
1480                {
1481                  "a_key": "", # Properties of the object.
1482                },
1483              ],
1484              "spec": { # The source to read from, plus its parameters.
1485                "a_key": "", # Properties of the object.
1486              },
1487              "doesNotNeedSplitting": True or False, # Setting this value to true hints to the framework that the source
1488                  # doesn't need splitting, and using SourceSplitRequest on it would
1489                  # yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
1490                  #
1491                  # E.g. a file splitter may set this to true when splitting a single file
1492                  # into a set of byte ranges of appropriate size, and set this
1493                  # to false when splitting a filepattern into individual files.
1494                  # However, for efficiency, a file splitter may decide to produce
1495                  # file subranges directly from the filepattern to avoid a splitting
1496                  # round-trip.
1497                  #
1498                  # See SourceSplitRequest for an overview of the splitting process.
1499                  #
1500                  # This field is meaningful only in the Source objects populated
1501                  # by the user (e.g. when filling in a DerivedSource).
1502                  # Source objects supplied by the framework to the user don't have
1503                  # this field populated.
1504              "metadata": { # Metadata about a Source useful for automatically optimizing # Optionally, metadata for this source can be supplied right away,
1505                  # avoiding a SourceGetMetadataOperation roundtrip
1506                  # (see SourceOperationRequest).
1507                  #
1508                  # This field is meaningful only in the Source objects populated
1509                  # by the user (e.g. when filling in a DerivedSource).
1510                  # Source objects supplied by the framework to the user don't have
1511                  # this field populated.
1512                  # and tuning the pipeline, etc.
1513                "infinite": True or False, # Specifies that the size of this source is known to be infinite
1514                    # (this is a streaming source).
1515                "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
1516                    # read from this source.  This estimate is in terms of external storage
1517                    # size, before any decompression or other processing done by the reader.
1518                "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
1519                    # the (encoded) keys in lexicographically sorted order.
1520              },
1521            },
1522          },
1523          "primarySource": { # Specification of one of the bundles produced as a result of splitting # DEPRECATED
1524              # a Source (e.g. when executing a SourceSplitRequest, or when
1525              # splitting an active task using WorkItemStatus.dynamic_source_split),
1526              # relative to the source being split.
1527            "derivationMode": "A String", # What source to base the produced source on (if any).
1528            "source": { # A source that records can be read and decoded from. # Specification of the source.
1529              "codec": { # The codec to use to decode data read from the source.
1530                "a_key": "", # Properties of the object.
1531              },
1532              "baseSpecs": [ # While splitting, sources may specify the produced bundles
1533                  # as differences against another source, in order to save backend-side
1534                  # memory and allow bigger jobs. For details, see SourceSplitRequest.
1535                  # To support this use case, the full set of parameters of the source
1536                  # is logically obtained by taking the latest explicitly specified value
1537                  # of each parameter in the order:
1538                  # base_specs (later items win), spec (overrides anything in base_specs).
1539                {
1540                  "a_key": "", # Properties of the object.
1541                },
1542              ],
1543              "spec": { # The source to read from, plus its parameters.
1544                "a_key": "", # Properties of the object.
1545              },
1546              "doesNotNeedSplitting": True or False, # Setting this value to true hints to the framework that the source
1547                  # doesn't need splitting, and using SourceSplitRequest on it would
1548                  # yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
1549                  #
1550                  # E.g. a file splitter may set this to true when splitting a single file
1551                  # into a set of byte ranges of appropriate size, and set this
1552                  # to false when splitting a filepattern into individual files.
1553                  # However, for efficiency, a file splitter may decide to produce
1554                  # file subranges directly from the filepattern to avoid a splitting
1555                  # round-trip.
1556                  #
1557                  # See SourceSplitRequest for an overview of the splitting process.
1558                  #
1559                  # This field is meaningful only in the Source objects populated
1560                  # by the user (e.g. when filling in a DerivedSource).
1561                  # Source objects supplied by the framework to the user don't have
1562                  # this field populated.
1563              "metadata": { # Metadata about a Source useful for automatically optimizing # Optionally, metadata for this source can be supplied right away,
1564                  # avoiding a SourceGetMetadataOperation roundtrip
1565                  # (see SourceOperationRequest).
1566                  #
1567                  # This field is meaningful only in the Source objects populated
1568                  # by the user (e.g. when filling in a DerivedSource).
1569                  # Source objects supplied by the framework to the user don't have
1570                  # this field populated.
1571                  # and tuning the pipeline, etc.
1572                "infinite": True or False, # Specifies that the size of this source is known to be infinite
1573                    # (this is a streaming source).
1574                "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
1575                    # read from this source.  This estimate is in terms of external storage
1576                    # size, before any decompression or other processing done by the reader.
1577                "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
1578                    # the (encoded) keys in lexicographically sorted order.
1579              },
1580            },
1581          },
1582          "primary": { # DEPRECATED in favor of DerivedSource. # DEPRECATED
1583            "derivationMode": "A String", # DEPRECATED
1584            "source": { # A source that records can be read and decoded from. # DEPRECATED
1585              "codec": { # The codec to use to decode data read from the source.
1586                "a_key": "", # Properties of the object.
1587              },
1588              "baseSpecs": [ # While splitting, sources may specify the produced bundles
1589                  # as differences against another source, in order to save backend-side
1590                  # memory and allow bigger jobs. For details, see SourceSplitRequest.
1591                  # To support this use case, the full set of parameters of the source
1592                  # is logically obtained by taking the latest explicitly specified value
1593                  # of each parameter in the order:
1594                  # base_specs (later items win), spec (overrides anything in base_specs).
1595                {
1596                  "a_key": "", # Properties of the object.
1597                },
1598              ],
1599              "spec": { # The source to read from, plus its parameters.
1600                "a_key": "", # Properties of the object.
1601              },
1602              "doesNotNeedSplitting": True or False, # Setting this value to true hints to the framework that the source
1603                  # doesn't need splitting, and using SourceSplitRequest on it would
1604                  # yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
1605                  #
1606                  # E.g. a file splitter may set this to true when splitting a single file
1607                  # into a set of byte ranges of appropriate size, and set this
1608                  # to false when splitting a filepattern into individual files.
1609                  # However, for efficiency, a file splitter may decide to produce
1610                  # file subranges directly from the filepattern to avoid a splitting
1611                  # round-trip.
1612                  #
1613                  # See SourceSplitRequest for an overview of the splitting process.
1614                  #
1615                  # This field is meaningful only in the Source objects populated
1616                  # by the user (e.g. when filling in a DerivedSource).
1617                  # Source objects supplied by the framework to the user don't have
1618                  # this field populated.
1619              "metadata": { # Metadata about a Source useful for automatically optimizing # Optionally, metadata for this source can be supplied right away,
1620                  # avoiding a SourceGetMetadataOperation roundtrip
1621                  # (see SourceOperationRequest).
1622                  #
1623                  # This field is meaningful only in the Source objects populated
1624                  # by the user (e.g. when filling in a DerivedSource).
1625                  # Source objects supplied by the framework to the user don't have
1626                  # this field populated.
1627                  # and tuning the pipeline, etc.
1628                "infinite": True or False, # Specifies that the size of this source is known to be infinite
1629                    # (this is a streaming source).
1630                "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
1631                    # read from this source.  This estimate is in terms of external storage
1632                    # size, before any decompression or other processing done by the reader.
1633                "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
1634                    # the (encoded) keys in lexicographically sorted order.
1635              },
1636            },
1637          },
1638          "residual": { # DEPRECATED in favor of DerivedSource. # DEPRECATED
1639            "derivationMode": "A String", # DEPRECATED
1640            "source": { # A source that records can be read and decoded from. # DEPRECATED
1641              "codec": { # The codec to use to decode data read from the source.
1642                "a_key": "", # Properties of the object.
1643              },
1644              "baseSpecs": [ # While splitting, sources may specify the produced bundles
1645                  # as differences against another source, in order to save backend-side
1646                  # memory and allow bigger jobs. For details, see SourceSplitRequest.
1647                  # To support this use case, the full set of parameters of the source
1648                  # is logically obtained by taking the latest explicitly specified value
1649                  # of each parameter in the order:
1650                  # base_specs (later items win), spec (overrides anything in base_specs).
1651                {
1652                  "a_key": "", # Properties of the object.
1653                },
1654              ],
1655              "spec": { # The source to read from, plus its parameters.
1656                "a_key": "", # Properties of the object.
1657              },
1658              "doesNotNeedSplitting": True or False, # Setting this value to true hints to the framework that the source
1659                  # doesn't need splitting, and using SourceSplitRequest on it would
1660                  # yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
1661                  #
1662                  # E.g. a file splitter may set this to true when splitting a single file
1663                  # into a set of byte ranges of appropriate size, and set this
1664                  # to false when splitting a filepattern into individual files.
1665                  # However, for efficiency, a file splitter may decide to produce
1666                  # file subranges directly from the filepattern to avoid a splitting
1667                  # round-trip.
1668                  #
1669                  # See SourceSplitRequest for an overview of the splitting process.
1670                  #
1671                  # This field is meaningful only in the Source objects populated
1672                  # by the user (e.g. when filling in a DerivedSource).
1673                  # Source objects supplied by the framework to the user don't have
1674                  # this field populated.
1675              "metadata": { # Metadata about a Source useful for automatically optimizing # Optionally, metadata for this source can be supplied right away,
1676                  # avoiding a SourceGetMetadataOperation roundtrip
1677                  # (see SourceOperationRequest).
1678                  #
1679                  # This field is meaningful only in the Source objects populated
1680                  # by the user (e.g. when filling in a DerivedSource).
1681                  # Source objects supplied by the framework to the user don't have
1682                  # this field populated.
1683                  # and tuning the pipeline, etc.
1684                "infinite": True or False, # Specifies that the size of this source is known to be infinite
1685                    # (this is a streaming source).
1686                "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
1687                    # read from this source.  This estimate is in terms of external storage
1688                    # size, before any decompression or other processing done by the reader.
1689                "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
1690                    # the (encoded) keys in lexicographically sorted order.
1691              },
1692            },
1693          },
1694        },
1695        "requestedLeaseDuration": "A String", # Amount of time the worker requests for its lease.
1696        "completed": True or False, # True if the WorkItem was completed (successfully or unsuccessfully).
1697        "workItemId": "A String", # Identifies the WorkItem.
1698        "dynamicSourceSplit": { # When a task splits using WorkItemStatus.dynamic_source_split, this # See documentation of stop_position.
1699            # message describes the two parts of the split relative to the
1700            # description of the current task's input.
1701          "residual": { # Specification of one of the bundles produced as a result of splitting # Residual part (returned to the pool of work).
1702              # Specified relative to the previously-current source.
1703              # a Source (e.g. when executing a SourceSplitRequest, or when
1704              # splitting an active task using WorkItemStatus.dynamic_source_split),
1705              # relative to the source being split.
1706            "derivationMode": "A String", # What source to base the produced source on (if any).
1707            "source": { # A source that records can be read and decoded from. # Specification of the source.
1708              "codec": { # The codec to use to decode data read from the source.
1709                "a_key": "", # Properties of the object.
1710              },
1711              "baseSpecs": [ # While splitting, sources may specify the produced bundles
1712                  # as differences against another source, in order to save backend-side
1713                  # memory and allow bigger jobs. For details, see SourceSplitRequest.
1714                  # To support this use case, the full set of parameters of the source
1715                  # is logically obtained by taking the latest explicitly specified value
1716                  # of each parameter in the order:
1717                  # base_specs (later items win), spec (overrides anything in base_specs).
1718                {
1719                  "a_key": "", # Properties of the object.
1720                },
1721              ],
1722              "spec": { # The source to read from, plus its parameters.
1723                "a_key": "", # Properties of the object.
1724              },
1725              "doesNotNeedSplitting": True or False, # Setting this value to true hints to the framework that the source
1726                  # doesn't need splitting, and using SourceSplitRequest on it would
1727                  # yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
1728                  #
1729                  # E.g. a file splitter may set this to true when splitting a single file
1730                  # into a set of byte ranges of appropriate size, and set this
1731                  # to false when splitting a filepattern into individual files.
1732                  # However, for efficiency, a file splitter may decide to produce
1733                  # file subranges directly from the filepattern to avoid a splitting
1734                  # round-trip.
1735                  #
1736                  # See SourceSplitRequest for an overview of the splitting process.
1737                  #
1738                  # This field is meaningful only in the Source objects populated
1739                  # by the user (e.g. when filling in a DerivedSource).
1740                  # Source objects supplied by the framework to the user don't have
1741                  # this field populated.
1742              "metadata": { # Metadata about a Source useful for automatically optimizing # Optionally, metadata for this source can be supplied right away,
1743                  # avoiding a SourceGetMetadataOperation roundtrip
1744                  # (see SourceOperationRequest).
1745                  #
1746                  # This field is meaningful only in the Source objects populated
1747                  # by the user (e.g. when filling in a DerivedSource).
1748                  # Source objects supplied by the framework to the user don't have
1749                  # this field populated.
1750                  # and tuning the pipeline, etc.
1751                "infinite": True or False, # Specifies that the size of this source is known to be infinite
1752                    # (this is a streaming source).
1753                "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
1754                    # read from this source.  This estimate is in terms of external storage
1755                    # size, before any decompression or other processing done by the reader.
1756                "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
1757                    # the (encoded) keys in lexicographically sorted order.
1758              },
1759            },
1760          },
1761          "primary": { # Specification of one of the bundles produced as a result of splitting # Primary part (continued to be processed by worker).
1762              # Specified relative to the previously-current source.
1763              # Becomes current.
1764              # a Source (e.g. when executing a SourceSplitRequest, or when
1765              # splitting an active task using WorkItemStatus.dynamic_source_split),
1766              # relative to the source being split.
1767            "derivationMode": "A String", # What source to base the produced source on (if any).
1768            "source": { # A source that records can be read and decoded from. # Specification of the source.
1769              "codec": { # The codec to use to decode data read from the source.
1770                "a_key": "", # Properties of the object.
1771              },
1772              "baseSpecs": [ # While splitting, sources may specify the produced bundles
1773                  # as differences against another source, in order to save backend-side
1774                  # memory and allow bigger jobs. For details, see SourceSplitRequest.
1775                  # To support this use case, the full set of parameters of the source
1776                  # is logically obtained by taking the latest explicitly specified value
1777                  # of each parameter in the order:
1778                  # base_specs (later items win), spec (overrides anything in base_specs).
1779                {
1780                  "a_key": "", # Properties of the object.
1781                },
1782              ],
1783              "spec": { # The source to read from, plus its parameters.
1784                "a_key": "", # Properties of the object.
1785              },
1786              "doesNotNeedSplitting": True or False, # Setting this value to true hints to the framework that the source
1787                  # doesn't need splitting, and using SourceSplitRequest on it would
1788                  # yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
1789                  #
1790                  # E.g. a file splitter may set this to true when splitting a single file
1791                  # into a set of byte ranges of appropriate size, and set this
1792                  # to false when splitting a filepattern into individual files.
1793                  # However, for efficiency, a file splitter may decide to produce
1794                  # file subranges directly from the filepattern to avoid a splitting
1795                  # round-trip.
1796                  #
1797                  # See SourceSplitRequest for an overview of the splitting process.
1798                  #
1799                  # This field is meaningful only in the Source objects populated
1800                  # by the user (e.g. when filling in a DerivedSource).
1801                  # Source objects supplied by the framework to the user don't have
1802                  # this field populated.
1803              "metadata": { # Metadata about a Source useful for automatically optimizing # Optionally, metadata for this source can be supplied right away,
1804                  # avoiding a SourceGetMetadataOperation roundtrip
1805                  # (see SourceOperationRequest).
1806                  #
1807                  # This field is meaningful only in the Source objects populated
1808                  # by the user (e.g. when filling in a DerivedSource).
1809                  # Source objects supplied by the framework to the user don't have
1810                  # this field populated.
1811                  # and tuning the pipeline, etc.
1812                "infinite": True or False, # Specifies that the size of this source is known to be infinite
1813                    # (this is a streaming source).
1814                "estimatedSizeBytes": "A String", # An estimate of the total size (in bytes) of the data that would be
1815                    # read from this source.  This estimate is in terms of external storage
1816                    # size, before any decompression or other processing done by the reader.
1817                "producesSortedKeys": True or False, # Whether this source is known to produce key/value pairs with
1818                    # the (encoded) keys in lexicographically sorted order.
1819              },
1820            },
1821          },
1822        },
1823        "totalThrottlerWaitTimeSeconds": 3.14, # Total time the worker spent being throttled by external systems.
1824        "counterUpdates": [ # Worker output counters for this WorkItem.
1825          { # An update to a Counter sent from a worker.
1826            "floatingPointList": { # A metric value representing a list of floating point numbers. # List of floating point numbers, for Set.
1827              "elements": [ # Elements of the list.
1828                3.14,
1829              ],
1830            },
1831            "floatingPoint": 3.14, # Floating point value for Sum, Max, Min.
1832            "integerMean": { # A representation of an integer mean metric contribution. # Integer mean aggregation value for Mean.
1833              "count": { # A representation of an int64, n, that is immune to precision loss when # The number of values being aggregated.
1834                  # encoded in JSON.
1835                "lowBits": 42, # The low order bits: n & 0xffffffff.
1836                "highBits": 42, # The high order bits, including the sign: n >> 32.
1837              },
1838              "sum": { # A representation of an int64, n, that is immune to precision loss when # The sum of all values being aggregated.
1839                  # encoded in JSON.
1840                "lowBits": 42, # The low order bits: n & 0xffffffff.
1841                "highBits": 42, # The high order bits, including the sign: n >> 32.
1842              },
1843            },
1844            "boolean": True or False, # Boolean value for And, Or.
1845            "integerList": { # A metric value representing a list of integers. # List of integers, for Set.
1846              "elements": [ # Elements of the list.
1847                { # A representation of an int64, n, that is immune to precision loss when
1848                    # encoded in JSON.
1849                  "lowBits": 42, # The low order bits: n & 0xffffffff.
1850                  "highBits": 42, # The high order bits, including the sign: n >> 32.
1851                },
1852              ],
1853            },
1854            "cumulative": True or False, # True if this counter is reported as the total cumulative aggregate
1855                # value accumulated since the worker started working on this WorkItem.
1856                # By default this is false, indicating that this counter is reported
1857                # as a delta.
1858            "shortId": "A String", # The service-generated short identifier for this counter.
1859                # The short_id -> (name, metadata) mapping is constant for the lifetime of
1860                # a job.
1861            "integerGauge": { # A metric value representing temporal values of a variable. # Gauge data
1862              "timestamp": "A String", # The time at which this value was measured. Measured as msecs from epoch.
1863              "value": { # A representation of an int64, n, that is immune to precision loss when # The value of the variable represented by this gauge.
1864                  # encoded in JSON.
1865                "lowBits": 42, # The low order bits: n & 0xffffffff.
1866                "highBits": 42, # The high order bits, including the sign: n >> 32.
1867              },
1868            },
1869            "floatingPointMean": { # A representation of a floating point mean metric contribution. # Floating point mean aggregation value for Mean.
1870              "count": { # A representation of an int64, n, that is immune to precision loss when # The number of values being aggregated.
1871                  # encoded in JSON.
1872                "lowBits": 42, # The low order bits: n & 0xffffffff.
1873                "highBits": 42, # The high order bits, including the sign: n >> 32.
1874              },
1875              "sum": 3.14, # The sum of all values being aggregated.
1876            },
1877            "internal": "", # Value for internally-defined counters used by the Dataflow service.
1878            "structuredNameAndMetadata": { # A single message which encapsulates structured name and metadata for a given # Counter structured name and metadata.
1879                # counter.
1880              "name": { # Identifies a counter within a per-job namespace. Counters whose structured # Structured name of the counter.
1881                  # names are the same get merged into a single value for the job.
1882                "origin": "A String", # One of the standard Origins defined above.
1883                "executionStepName": "A String", # Name of the stage. An execution step contains multiple component steps.
1884                "name": "A String", # Counter name. Not necessarily globally-unique, but unique within the
1885                    # context of the other fields.
1886                    # Required.
1887                "workerId": "A String", # ID of a particular worker.
1888                "inputIndex": 42, # Index of an input collection that's being read from/written to as a side
1889                    # input.
1890                    # The index identifies a step's side inputs starting by 1 (e.g. the first
1891                    # side input has input_index 1, the third has input_index 3).
1892                    # Side inputs are identified by a pair of (original_step_name, input_index).
1893                    # This field helps uniquely identify them.
1894                "originNamespace": "A String", # A string containing a more specific namespace of the counter's origin.
1895                "originalRequestingStepName": "A String", # The step name requesting an operation, such as GBK.
1896                    # I.e. the ParDo causing a read/write from shuffle to occur, or a
1897                    # read from side inputs.
1898                "portion": "A String", # Portion of this counter, either key or value.
1899                "componentStepName": "A String", # Name of the optimized step being executed by the workers.
1900                "originalStepName": "A String", # System generated name of the original step in the user's graph, before
1901                    # optimization.
1902              },
1903              "metadata": { # CounterMetadata includes all static non-name non-value counter attributes. # Metadata associated with a counter
1904                "standardUnits": "A String", # System defined Units, see above enum.
1905                "kind": "A String", # Counter aggregation kind.
1906                "description": "A String", # Human-readable description of the counter semantics.
1907                "otherUnits": "A String", # A string referring to the unit type.
1908              },
1909            },
1910            "nameAndKind": { # Basic metadata about a counter. # Counter name and aggregation type.
1911              "kind": "A String", # Counter aggregation kind.
1912              "name": "A String", # Name of the counter.
1913            },
1914            "integer": { # A representation of an int64, n, that is immune to precision loss when # Integer value for Sum, Max, Min.
1915                # encoded in JSON.
1916              "lowBits": 42, # The low order bits: n & 0xffffffff.
1917              "highBits": 42, # The high order bits, including the sign: n >> 32.
1918            },
1919            "distribution": { # A metric value representing a distribution. # Distribution data
1920              "count": { # A representation of an int64, n, that is immune to precision loss when # The count of the number of elements present in the distribution.
1921                  # encoded in JSON.
1922                "lowBits": 42, # The low order bits: n & 0xffffffff.
1923                "highBits": 42, # The high order bits, including the sign: n >> 32.
1924              },
1925              "min": { # A representation of an int64, n, that is immune to precision loss when # The minimum value present in the distribution.
1926                  # encoded in JSON.
1927                "lowBits": 42, # The low order bits: n & 0xffffffff.
1928                "highBits": 42, # The high order bits, including the sign: n >> 32.
1929              },
1930              "max": { # A representation of an int64, n, that is immune to precision loss when # The maximum value present in the distribution.
1931                  # encoded in JSON.
1932                "lowBits": 42, # The low order bits: n & 0xffffffff.
1933                "highBits": 42, # The high order bits, including the sign: n >> 32.
1934              },
1935              "sum": { # A representation of an int64, n, that is immune to precision loss when # Use an int64 since we'd prefer the added precision. If overflow is a common
1936                  # problem we can detect it and use an additional int64 or a double.
1937                  # encoded in JSON.
1938                "lowBits": 42, # The low order bits: n & 0xffffffff.
1939                "highBits": 42, # The high order bits, including the sign: n >> 32.
1940              },
1941              "histogram": { # Histogram of value counts for a distribution. # (Optional) Histogram of value counts for the distribution.
1942                  #
1943                  # Buckets have an inclusive lower bound and exclusive upper bound and use
1944                  # "1,2,5 bucketing": The first bucket range is from [0,1) and all subsequent
1945                  # bucket boundaries are powers of ten multiplied by 1, 2, or 5. Thus, bucket
1946                  # boundaries are 0, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, ...
1947                  # Negative values are not supported.
1948                "firstBucketOffset": 42, # Starting index of first stored bucket. The non-inclusive upper-bound of
1949                    # the ith bucket is given by:
1950                    #   pow(10,(i-first_bucket_offset)/3) * (1,2,5)[(i-first_bucket_offset)%3]
1951                "bucketCounts": [ # Counts of values in each bucket. For efficiency, prefix and trailing
1952                    # buckets with count = 0 are elided. Buckets can store the full range of
1953                    # values of an unsigned long, with ULLONG_MAX falling into the 59th bucket
1954                    # with range [1e19, 2e19).
1955                  "A String",
1956                ],
1957              },
1958              "sumOfSquares": 3.14, # Use a double since the sum of squares is likely to overflow int64.
1959            },
1960            "stringList": { # A metric value representing a list of strings. # List of strings, for Set.
1961              "elements": [ # Elements of the list.
1962                "A String",
1963              ],
1964            },
1965          },
1966        ],
1967        "progress": { # Obsolete in favor of ApproximateReportedProgress and ApproximateSplitRequest. # DEPRECATED in favor of reported_progress.
1968          "position": { # Position defines a position within a collection of data.  The value # Obsolete.
1969              # can be either the end position, a key (used with ordered
1970              # collections), a byte offset, or a record index.
1971            "end": True or False, # Position is past all other positions. Also useful for the end
1972                # position of an unbounded range.
1973            "recordIndex": "A String", # Position is a record index.
1974            "byteOffset": "A String", # Position is a byte offset.
1975            "key": "A String", # Position is a string key, ordered lexicographically.
1976            "concatPosition": { # A position that encapsulates an inner position and an index for the inner # CloudPosition is a concat position.
1977                # position. A ConcatPosition can be used by a reader of a source that
1978                # encapsulates a set of other sources.
1979              "position": # Object with schema name: Position # Position within the inner source.
1980              "index": 42, # Index of the inner source.
1981            },
1982            "shufflePosition": "A String", # CloudPosition is a base64 encoded BatchShufflePosition (with FIXED
1983                # sharding).
1984          },
1985          "remainingTime": "A String", # Obsolete.
1986          "percentComplete": 3.14, # Obsolete.
1987        },
1988        "metricUpdates": [ # DEPRECATED in favor of counter_updates.
1989          { # Describes the state of a metric.
1990            "meanCount": "", # Worker-computed aggregate value for the "Mean" aggregation kind.
1991                # This holds the count of the aggregated values and is used in combination
1992                # with mean_sum above to obtain the actual mean aggregate value.
1993                # The only possible value type is Long.
1994            "kind": "A String", # Metric aggregation kind.  The possible metric aggregation kinds are
1995                # "Sum", "Max", "Min", "Mean", "Set", "And", "Or", and "Distribution".
1996                # The specified aggregation kind is case-insensitive.
1997                #
1998                # If omitted, this is not an aggregated value but instead
1999                # a single metric sample value.
2000            "set": "", # Worker-computed aggregate value for the "Set" aggregation kind.  The only
2001                # possible value type is a list of Values whose type can be Long, Double,
2002                # or String, according to the metric's type.  All Values in the list must
2003                # be of the same type.
2004            "name": { # Identifies a metric, by describing the source which generated the # Name of the metric.
2005                # metric.
2006              "origin": "A String", # Origin (namespace) of metric name. May be blank for user-define metrics;
2007                  # will be "dataflow" for metrics defined by the Dataflow service or SDK.
2008              "name": "A String", # Worker-defined metric name.
2009              "context": { # Zero or more labeled fields which identify the part of the job this
2010                  # metric is associated with, such as the name of a step or collection.
2011                  #
2012                  # For example, built-in counters associated with steps will have
2013                  # context['step'] = <step-name>. Counters associated with PCollections
2014                  # in the SDK will have context['pcollection'] = <pcollection-name>.
2015                "a_key": "A String",
2016              },
2017            },
2018            "meanSum": "", # Worker-computed aggregate value for the "Mean" aggregation kind.
2019                # This holds the sum of the aggregated values and is used in combination
2020                # with mean_count below to obtain the actual mean aggregate value.
2021                # The only possible value types are Long and Double.
2022            "cumulative": True or False, # True if this metric is reported as the total cumulative aggregate
2023                # value accumulated since the worker started working on this WorkItem.
2024                # By default this is false, indicating that this metric is reported
2025                # as a delta that is not associated with any WorkItem.
2026            "updateTime": "A String", # Timestamp associated with the metric value. Optional when workers are
2027                # reporting work progress; it will be filled in responses from the
2028                # metrics API.
2029            "scalar": "", # Worker-computed aggregate value for aggregation kinds "Sum", "Max", "Min",
2030                # "And", and "Or".  The possible value types are Long, Double, and Boolean.
2031            "internal": "", # Worker-computed aggregate value for internal use by the Dataflow
2032                # service.
2033            "gauge": "", # A struct value describing properties of a Gauge.
2034                # Metrics of gauge type show the value of a metric across time, and is
2035                # aggregated based on the newest value.
2036            "distribution": "", # A struct value describing properties of a distribution of numeric values.
2037          },
2038        ],
2039        "reportedProgress": { # A progress measurement of a WorkItem by a worker. # The worker's progress through this WorkItem.
2040          "fractionConsumed": 3.14, # Completion as fraction of the input consumed, from 0.0 (beginning, nothing
2041              # consumed), to 1.0 (end of the input, entire input consumed).
2042          "position": { # Position defines a position within a collection of data.  The value # A Position within the work to represent a progress.
2043              # can be either the end position, a key (used with ordered
2044              # collections), a byte offset, or a record index.
2045            "end": True or False, # Position is past all other positions. Also useful for the end
2046                # position of an unbounded range.
2047            "recordIndex": "A String", # Position is a record index.
2048            "byteOffset": "A String", # Position is a byte offset.
2049            "key": "A String", # Position is a string key, ordered lexicographically.
2050            "concatPosition": { # A position that encapsulates an inner position and an index for the inner # CloudPosition is a concat position.
2051                # position. A ConcatPosition can be used by a reader of a source that
2052                # encapsulates a set of other sources.
2053              "position": # Object with schema name: Position # Position within the inner source.
2054              "index": 42, # Index of the inner source.
2055            },
2056            "shufflePosition": "A String", # CloudPosition is a base64 encoded BatchShufflePosition (with FIXED
2057                # sharding).
2058          },
2059          "remainingParallelism": { # Represents the level of parallelism in a WorkItem's input, # Total amount of parallelism in the input of this task that remains,
2060              # (i.e. can be delegated to this task and any new tasks via dynamic
2061              # splitting). Always at least 1 for non-finished work items and 0 for
2062              # finished.
2063              #
2064              # "Amount of parallelism" refers to how many non-empty parts of the input
2065              # can be read in parallel. This does not necessarily equal number
2066              # of records. An input that can be read in parallel down to the
2067              # individual records is called "perfectly splittable".
2068              # An example of non-perfectly parallelizable input is a block-compressed
2069              # file format where a block of records has to be read as a whole,
2070              # but different blocks can be read in parallel.
2071              #
2072              # Examples:
2073              # * If we are processing record #30 (starting at 1) out of 50 in a perfectly
2074              #   splittable 50-record input, this value should be 21 (20 remaining + 1
2075              #   current).
2076              # * If we are reading through block 3 in a block-compressed file consisting
2077              #   of 5 blocks, this value should be 3 (since blocks 4 and 5 can be
2078              #   processed in parallel by new tasks via dynamic splitting and the current
2079              #   task remains processing block 3).
2080              # * If we are reading through the last block in a block-compressed file,
2081              #   or reading or processing the last record in a perfectly splittable
2082              #   input, this value should be 1, because apart from the current task, no
2083              #   additional remainder can be split off.
2084              # reported by the worker.
2085            "isInfinite": True or False, # Specifies whether the parallelism is infinite. If true, "value" is
2086                # ignored.
2087                # Infinite parallelism means the service will assume that the work item
2088                # can always be split into more non-empty work items by dynamic splitting.
2089                # This is a work-around for lack of support for infinity by the current
2090                # JSON-based Java RPC stack.
2091            "value": 3.14, # Specifies the level of parallelism in case it is finite.
2092          },
2093          "consumedParallelism": { # Represents the level of parallelism in a WorkItem's input, # Total amount of parallelism in the portion of input of this task that has
2094              # already been consumed and is no longer active. In the first two examples
2095              # above (see remaining_parallelism), the value should be 29 or 2
2096              # respectively.  The sum of remaining_parallelism and consumed_parallelism
2097              # should equal the total amount of parallelism in this work item.  If
2098              # specified, must be finite.
2099              # reported by the worker.
2100            "isInfinite": True or False, # Specifies whether the parallelism is infinite. If true, "value" is
2101                # ignored.
2102                # Infinite parallelism means the service will assume that the work item
2103                # can always be split into more non-empty work items by dynamic splitting.
2104                # This is a work-around for lack of support for infinity by the current
2105                # JSON-based Java RPC stack.
2106            "value": 3.14, # Specifies the level of parallelism in case it is finite.
2107          },
2108        },
2109      },
2110    ],
2111    "location": "A String", # The [regional endpoint]
2112        # (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that
2113        # contains the WorkItem's job.
2114  }
2115
2116  x__xgafv: string, V1 error format.
2117    Allowed values
2118      1 - v1 error format
2119      2 - v2 error format
2120
2121Returns:
2122  An object of the form:
2123
2124    { # Response from a request to report the status of WorkItems.
2125    "workItemServiceStates": [ # A set of messages indicating the service-side state for each
2126        # WorkItem whose status was reported, in the same order as the
2127        # WorkItemStatus messages in the ReportWorkItemStatusRequest which
2128        # resulting in this response.
2129      { # The Dataflow service's idea of the current state of a WorkItem
2130          # being processed by a worker.
2131        "reportStatusInterval": "A String", # New recommended reporting interval.
2132        "suggestedStopPosition": { # Position defines a position within a collection of data.  The value # Obsolete, always empty.
2133            # can be either the end position, a key (used with ordered
2134            # collections), a byte offset, or a record index.
2135          "end": True or False, # Position is past all other positions. Also useful for the end
2136              # position of an unbounded range.
2137          "recordIndex": "A String", # Position is a record index.
2138          "byteOffset": "A String", # Position is a byte offset.
2139          "key": "A String", # Position is a string key, ordered lexicographically.
2140          "concatPosition": { # A position that encapsulates an inner position and an index for the inner # CloudPosition is a concat position.
2141              # position. A ConcatPosition can be used by a reader of a source that
2142              # encapsulates a set of other sources.
2143            "position": # Object with schema name: Position # Position within the inner source.
2144            "index": 42, # Index of the inner source.
2145          },
2146          "shufflePosition": "A String", # CloudPosition is a base64 encoded BatchShufflePosition (with FIXED
2147              # sharding).
2148        },
2149        "leaseExpireTime": "A String", # Time at which the current lease will expire.
2150        "nextReportIndex": "A String", # The index value to use for the next report sent by the worker.
2151            # Note: If the report call fails for whatever reason, the worker should
2152            # reuse this index for subsequent report attempts.
2153        "harnessData": { # Other data returned by the service, specific to the particular
2154            # worker harness.
2155          "a_key": "", # Properties of the object.
2156        },
2157        "metricShortId": [ # The short ids that workers should use in subsequent metric updates.
2158            # Workers should strive to use short ids whenever possible, but it is ok
2159            # to request the short_id again if a worker lost track of it
2160            # (e.g. if the worker is recovering from a crash).
2161            # NOTE: it is possible that the response may have short ids for a subset
2162            # of the metrics.
2163          { # The metric short id is returned to the user alongside an offset into
2164              # ReportWorkItemStatusRequest
2165            "shortId": "A String", # The service-generated short identifier for the metric.
2166            "metricIndex": 42, # The index of the corresponding metric in
2167                # the ReportWorkItemStatusRequest. Required.
2168          },
2169        ],
2170        "splitRequest": { # A suggestion by the service to the worker to dynamically split the WorkItem. # The progress point in the WorkItem where the Dataflow service
2171            # suggests that the worker truncate the task.
2172          "fractionConsumed": 3.14, # A fraction at which to split the work item, from 0.0 (beginning of the
2173              # input) to 1.0 (end of the input).
2174          "position": { # Position defines a position within a collection of data.  The value # A Position at which to split the work item.
2175              # can be either the end position, a key (used with ordered
2176              # collections), a byte offset, or a record index.
2177            "end": True or False, # Position is past all other positions. Also useful for the end
2178                # position of an unbounded range.
2179            "recordIndex": "A String", # Position is a record index.
2180            "byteOffset": "A String", # Position is a byte offset.
2181            "key": "A String", # Position is a string key, ordered lexicographically.
2182            "concatPosition": { # A position that encapsulates an inner position and an index for the inner # CloudPosition is a concat position.
2183                # position. A ConcatPosition can be used by a reader of a source that
2184                # encapsulates a set of other sources.
2185              "position": # Object with schema name: Position # Position within the inner source.
2186              "index": 42, # Index of the inner source.
2187            },
2188            "shufflePosition": "A String", # CloudPosition is a base64 encoded BatchShufflePosition (with FIXED
2189                # sharding).
2190          },
2191          "fractionOfRemainder": 3.14, # The fraction of the remainder of work to split the work item at, from 0.0
2192              # (split at the current position) to 1.0 (end of the input).
2193        },
2194        "suggestedStopPoint": { # Obsolete in favor of ApproximateReportedProgress and ApproximateSplitRequest. # DEPRECATED in favor of split_request.
2195          "position": { # Position defines a position within a collection of data.  The value # Obsolete.
2196              # can be either the end position, a key (used with ordered
2197              # collections), a byte offset, or a record index.
2198            "end": True or False, # Position is past all other positions. Also useful for the end
2199                # position of an unbounded range.
2200            "recordIndex": "A String", # Position is a record index.
2201            "byteOffset": "A String", # Position is a byte offset.
2202            "key": "A String", # Position is a string key, ordered lexicographically.
2203            "concatPosition": { # A position that encapsulates an inner position and an index for the inner # CloudPosition is a concat position.
2204                # position. A ConcatPosition can be used by a reader of a source that
2205                # encapsulates a set of other sources.
2206              "position": # Object with schema name: Position # Position within the inner source.
2207              "index": 42, # Index of the inner source.
2208            },
2209            "shufflePosition": "A String", # CloudPosition is a base64 encoded BatchShufflePosition (with FIXED
2210                # sharding).
2211          },
2212          "remainingTime": "A String", # Obsolete.
2213          "percentComplete": 3.14, # Obsolete.
2214        },
2215      },
2216    ],
2217    "unifiedWorkerResponse": { # Untranslated bag-of-bytes WorkProgressUpdateResponse for UnifiedWorker.
2218      "a_key": "", # Properties of the object. Contains field @type with type URL.
2219    },
2220  }</pre>
2221</div>
2222
2223</body></html>