1syntax = "proto3";
2
3package tensorflow;
4
5import "tensorflow/core/framework/cost_graph.proto";
6import "tensorflow/core/framework/graph.proto";
7import "tensorflow/core/framework/step_stats.proto";
8import "tensorflow/core/protobuf/cluster.proto";
9import "tensorflow/core/protobuf/debug.proto";
10import "tensorflow/core/protobuf/rewriter_config.proto";
11
12option cc_enable_arenas = true;
13option java_outer_classname = "ConfigProtos";
14option java_multiple_files = true;
15option java_package = "org.tensorflow.framework";
16option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
17
18message GPUOptions {
19  // Fraction of the available GPU memory to allocate for each process.
20  // 1 means to allocate all of the GPU memory, 0.5 means the process
21  // allocates up to ~50% of the available GPU memory.
22  //
23  // GPU memory is pre-allocated unless the allow_growth option is enabled.
24  //
25  // If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
26  // the amount of memory available on the GPU device by using host memory as a
27  // swap space. Accessing memory not available on the device will be
28  // significantly slower as that would require memory transfer between the host
29  // and the device. Options to reduce the memory requirement should be
30  // considered before enabling this option as this may come with a negative
31  // performance impact. Oversubscription using the unified memory requires
32  // Pascal class or newer GPUs and it is currently only supported on the Linux
33  // operating system. See
34  // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
35  // for the detailed requirements.
36  double per_process_gpu_memory_fraction = 1;
37
38  // If true, the allocator does not pre-allocate the entire specified
39  // GPU memory region, instead starting small and growing as needed.
40  bool allow_growth = 4;
41
42  // The type of GPU allocation strategy to use.
43  //
44  // Allowed values:
45  // "": The empty string (default) uses a system-chosen default
46  //     which may change over time.
47  //
48  // "BFC": A "Best-fit with coalescing" algorithm, simplified from a
49  //        version of dlmalloc.
50  string allocator_type = 2;
51
52  // Delay deletion of up to this many bytes to reduce the number of
53  // interactions with gpu driver code.  If 0, the system chooses
54  // a reasonable default (several MBs).
55  int64 deferred_deletion_bytes = 3;
56
57  // A comma-separated list of GPU ids that determines the 'visible'
58  // to 'virtual' mapping of GPU devices.  For example, if TensorFlow
59  // can see 8 GPU devices in the process, and one wanted to map
60  // visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
61  // then one would specify this field as "5,3".  This field is similar in
62  // spirit to the CUDA_VISIBLE_DEVICES environment variable, except
63  // it applies to the visible GPU devices in the process.
64  //
65  // NOTE:
66  // 1. The GPU driver provides the process with the visible GPUs
67  //    in an order which is not guaranteed to have any correlation to
68  //    the *physical* GPU id in the machine.  This field is used for
69  //    remapping "visible" to "virtual", which means this operates only
70  //    after the process starts.  Users are required to use vendor
71  //    specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
72  //    physical to visible device mapping prior to invoking TensorFlow.
73  // 2. In the code, the ids in this list are also called "platform GPU id"s,
74  //    and the 'virtual' ids of GPU devices (i.e. the ids in the device
75  //    name "/device:GPU:<id>") are also called "TF GPU id"s. Please
76  //    refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
77  //    for more information.
78  string visible_device_list = 5;
79
80  // In the event polling loop sleep this many microseconds between
81  // PollEvents calls, when the queue is not empty.  If value is not
82  // set or set to 0, gets set to a non-zero default.
83  int32 polling_active_delay_usecs = 6;
84
85  // This field is deprecated and ignored.
86  int32 polling_inactive_delay_msecs = 7;
87
88  // Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
89  // enabling this option forces all CPU tensors to be allocated with Cuda
90  // pinned memory. Normally, TensorFlow will infer which tensors should be
91  // allocated as the pinned memory. But in case where the inference is
92  // incomplete, this option can significantly speed up the cross-device memory
93  // copy performance as long as it fits the memory.
94  // Note that this option is not something that should be
95  // enabled by default for unknown or very large models, since all Cuda pinned
96  // memory is unpageable, having too much pinned memory might negatively impact
97  // the overall host system performance.
98  bool force_gpu_compatible = 8;
99
100  message Experimental {
101    // Configuration for breaking down a visible GPU into multiple "virtual"
102    // devices.
103    message VirtualDevices {
104      // Per "virtual" device memory limit, in MB. The number of elements in
105      // the list is the number of virtual devices to create on the
106      // corresponding visible GPU (see "virtual_devices" below).
107      // If empty, it will create single virtual device taking all available
108      // memory from the device.
109      //
110      // For the concept of "visible" and "virtual" GPU, see the comments for
111      // "visible_device_list" above for more information.
112      repeated float memory_limit_mb = 1;
113
114      // Priority values to use with the virtual devices. Use the cuda function
115      // cudaDeviceGetStreamPriorityRange to query for valid range of values for
116      // priority.
117      //
118      // On a P4000 GPU with cuda 10.1, the priority range reported was 0 for
119      // least priority and -1 for greatest priority.
120      //
121      // If this field is not specified, then the virtual devices will be
122      // created with the default. If this field has values set, then the size
123      // of this must match with the above memory_limit_mb.
124      repeated int32 priority = 2;
125    }
126
127    // The multi virtual device settings. If empty (not set), it will create
128    // single virtual device on each visible GPU, according to the settings
129    // in "visible_device_list" above. Otherwise, the number of elements in the
130    // list must be the same as the number of visible GPUs (after
131    // "visible_device_list" filtering if it is set), and the string represented
132    // device names (e.g. /device:GPU:<id>) will refer to the virtual
133    // devices and have the <id> field assigned sequentially starting from 0,
134    // according to the order they appear in this list and the "memory_limit"
135    // list inside each element. For example,
136    //   visible_device_list = "1,0"
137    //   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
138    //   virtual_devices {}
139    // will create three virtual devices as:
140    //   /device:GPU:0 -> visible GPU 1 with 1GB memory
141    //   /device:GPU:1 -> visible GPU 1 with 2GB memory
142    //   /device:GPU:2 -> visible GPU 0 with all available memory
143    //
144    // NOTE:
145    // 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
146    //    at the same time.
147    // 2. Currently this setting is per-process, not per-session. Using
148    //    different settings in different sessions within same process will
149    //    result in undefined behavior.
150    repeated VirtualDevices virtual_devices = 1;
151
152    // If true, uses CUDA unified memory for memory allocations. If
153    // per_process_gpu_memory_fraction option is greater than 1.0, then unified
154    // memory is used regardless of the value for this field. See comments for
155    // per_process_gpu_memory_fraction field for more details and requirements
156    // of the unified memory. This option is useful to oversubscribe memory if
157    // multiple processes are sharing a single GPU while individually using less
158    // than 1.0 per process memory fraction.
159    bool use_unified_memory = 2;
160
161    // If > 1, the number of device-to-device copy streams to create
162    // for each GPUDevice.  Default value is 0, which is automatically
163    // converted to 1.
164    int32 num_dev_to_dev_copy_streams = 3;
165
166    // If non-empty, defines a good GPU ring order on a single worker based on
167    // device interconnect.  This assumes that all workers have the same GPU
168    // topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
169    // This ring order is used by the RingReducer implementation of
170    // CollectiveReduce, and serves as an override to automatic ring order
171    // generation in OrderTaskDeviceMap() during CollectiveParam resolution.
172    string collective_ring_order = 4;
173
174    // If true then extra work is done by GPUDevice and GPUBFCAllocator to
175    // keep track of when GPU memory is freed and when kernels actually
176    // complete so that we can know when a nominally free memory chunk
177    // is really not subject to pending use.
178    bool timestamped_allocator = 5;
179
180    // reserved id: 6
181
182    // Parameters for GPUKernelTracker.  By default no kernel tracking is done.
183    // Note that timestamped_allocator is only effective if some tracking is
184    // specified.
185    //
186    // If kernel_tracker_max_interval = n > 0, then a tracking event
187    // is inserted after every n kernels without an event.
188    int32 kernel_tracker_max_interval = 7;
189    // If kernel_tracker_max_bytes = n > 0, then a tracking event is
190    // inserted after every series of kernels allocating a sum of
191    // memory >= n.  If one kernel allocates b * n bytes, then one
192    // event will be inserted after it, but it will count as b against
193    // the pending limit.
194    int32 kernel_tracker_max_bytes = 8;
195    // If kernel_tracker_max_pending > 0 then no more than this many
196    // tracking events can be outstanding at a time.  An attempt to
197    // launch an additional kernel will stall until an event
198    // completes.
199    int32 kernel_tracker_max_pending = 9;
200  }
201
202  // Everything inside experimental is subject to change and is not subject
203  // to API stability guarantees in
204  // https://www.tensorflow.org/guide/version_compat.
205  Experimental experimental = 9;
206}
207
208// Options passed to the graph optimizer
209message OptimizerOptions {
210  // If true, optimize the graph using common subexpression elimination.
211  bool do_common_subexpression_elimination = 1;
212
213  // If true, perform constant folding optimization on the graph.
214  bool do_constant_folding = 2;
215
216  // Constant folding optimization replaces tensors whose values can be
217  // predetermined, with constant nodes. To avoid inserting too large constants,
218  // the size of each constant created can be limited. If this value is zero, a
219  // default limit of 10 MiB will be applied. If constant folding optimization
220  // is disabled, this value is ignored.
221  int64 max_folded_constant_in_bytes = 6;
222
223  // If true, perform function inlining on the graph.
224  bool do_function_inlining = 4;
225
226  // Optimization level
227  enum Level {
228    // L1 is the default level.
229    // Optimization performed at L1 :
230    // 1. Common subexpression elimination
231    // 2. Constant folding
232    L1 = 0;
233
234    // No optimizations
235    L0 = -1;
236  }
237
238  // Overall optimization level. The actual optimizations applied will be the
239  // logical OR of the flags that this level implies and any flags already set.
240  Level opt_level = 3;
241
242  // Control the use of the compiler/jit.  Experimental.
243  enum GlobalJitLevel {
244    DEFAULT = 0;  // Default setting ("off" now, but later expected to be "on")
245    OFF = -1;
246    // The following settings turn on compilation, with higher values being
247    // more aggressive.  Higher values may reduce opportunities for parallelism
248    // and may use more memory.  (At present, there is no distinction, but this
249    // is expected to change.)
250    ON_1 = 1;
251    ON_2 = 2;
252  }
253  GlobalJitLevel global_jit_level = 5;
254}
255
256message GraphOptions {
257  // Removed, use optimizer_options below.
258  reserved "skip_common_subexpression_elimination";
259  reserved 1;
260
261  // If true, use control flow to schedule the activation of Recv nodes.
262  // (Currently ignored.)
263  bool enable_recv_scheduling = 2;
264
265  // Options controlling how graph is optimized.
266  OptimizerOptions optimizer_options = 3;
267
268  // The number of steps to run before returning a cost model detailing
269  // the memory usage and performance of each node of the graph. 0 means
270  // no cost model.
271  int64 build_cost_model = 4;
272
273  // The number of steps to skip before collecting statistics for the
274  // cost model.
275  int64 build_cost_model_after = 9;
276
277  // Annotate each Node with Op output shape data, to the extent it can
278  // be statically inferred.
279  bool infer_shapes = 5;
280
281  // Only place the subgraphs that are run, rather than the entire graph.
282  //
283  // This is useful for interactive graph building, where one might
284  // produce graphs that cannot be placed during the debugging
285  // process.  In particular, it allows the client to continue work in
286  // a session after adding a node to a graph whose placement
287  // constraints are unsatisfiable.
288  bool place_pruned_graph = 6;
289
290  // If true, transfer float values between processes as bfloat16.
291  bool enable_bfloat16_sendrecv = 7;
292
293  // If > 0, record a timeline every this many steps.
294  // EXPERIMENTAL: This currently has no effect in MasterSession.
295  int32 timeline_step = 8;
296
297  // Options that control the type and amount of graph rewriting.
298  // Not currently configurable via the public Python API (i.e. there is no API
299  // stability guarantee if you import RewriterConfig explicitly).
300  RewriterConfig rewrite_options = 10;
301}
302
303message ThreadPoolOptionProto {
304  // The number of threads in the pool.
305  //
306  // 0 means the system picks a value based on where this option proto is used
307  // (see the declaration of the specific field for more info).
308  int32 num_threads = 1;
309
310  // The global name of the threadpool.
311  //
312  // If empty, then the threadpool is made and used according to the scope it's
313  // in - e.g., for a session threadpool, it is used by that session only.
314  //
315  // If non-empty, then:
316  // - a global threadpool associated with this name is looked
317  //   up or created. This allows, for example, sharing one threadpool across
318  //   many sessions (e.g., like the default behavior, if
319  //   inter_op_parallelism_threads is not configured), but still partitioning
320  //   into a large and small pool.
321  // - if the threadpool for this global_name already exists, then it is an
322  //   error if the existing pool was created using a different num_threads
323  //   value as is specified on this call.
324  // - threadpools created this way are never garbage collected.
325  string global_name = 2;
326}
327
328message RPCOptions {
329  // If true, always use RPC to contact the session target.
330  //
331  // If false (the default option), TensorFlow may use an optimized
332  // transport for client-master communication that avoids the RPC
333  // stack. This option is primarily for used testing the RPC stack.
334  bool use_rpc_for_inprocess_master = 1;
335
336  // The compression algorithm to be used. One of "deflate", "gzip".
337  string compression_algorithm = 2;
338
339  // If compression_algorithm is set, the compression level to be used.
340  // From 0 (no compression), up to 3.
341  int32 compression_level = 3;
342
343  // Setting cache_rpc_response to true will enable sender side caching of
344  // response for RecvTensorAsync and RecvBufAsync to allow receiver to retry
345  // requests . This is only necessary when the network fabric is experiencing a
346  // significant error rate.  Without it we'll fail a step on an network error,
347  // while with it we'll be able to complete long steps (like complex
348  // initializations) in the face of some network errors during RecvTensor.
349  bool cache_rpc_response = 4;
350
351  // Disables TCP connection sharing when opening a new RPC channel.
352  bool disable_session_connection_sharing = 5;
353}
354
355// Metadata about the session.
356//
357// This can be used by the runtime and the Ops for debugging, monitoring, etc.
358//
359// The (name, version) tuple is expected to be a unique identifier for
360// sessions within the same process.
361//
362// NOTE: This is currently used and propagated only by the direct session.
363message SessionMetadata {
364  string name = 1;
365
366  // The version is optional. If set, needs to be >= 0.
367  int64 version = 2;
368}
369
370// Session configuration parameters.
371// The system picks appropriate values for fields that are not set.
372message ConfigProto {
373  // Map from device type name (e.g., "CPU" or "GPU" ) to maximum
374  // number of devices of that type to use.  If a particular device
375  // type is not found in the map, the system picks an appropriate
376  // number.
377  map<string, int32> device_count = 1;
378
379  // The execution of an individual op (for some op types) can be
380  // parallelized on a pool of intra_op_parallelism_threads.
381  // 0 means the system picks an appropriate number.
382  //
383  // If you create an ordinary session, e.g., from Python or C++,
384  // then there is exactly one intra op thread pool per process.
385  // The first session created determines the number of threads in this pool.
386  // All subsequent sessions reuse/share this one global pool.
387  //
388  // There are notable exceptions to the default behavior describe above:
389  // 1. There is an environment variable  for overriding this thread pool,
390  //    named TF_OVERRIDE_GLOBAL_THREADPOOL.
391  // 2. When connecting to a server, such as a remote `tf.train.Server`
392  //    instance, then this option will be ignored altogether.
393  int32 intra_op_parallelism_threads = 2;
394
395  // Nodes that perform blocking operations are enqueued on a pool of
396  // inter_op_parallelism_threads available in each process.
397  //
398  // 0 means the system picks an appropriate number.
399  // Negative means all operations are performed in caller's thread.
400  //
401  // Note that the first Session created in the process sets the
402  // number of threads for all future sessions unless use_per_session_threads is
403  // true or session_inter_op_thread_pool is configured.
404  int32 inter_op_parallelism_threads = 5;
405
406  // If true, use a new set of threads for this session rather than the global
407  // pool of threads. Only supported by direct sessions.
408  //
409  // If false, use the global threads created by the first session, or the
410  // per-session thread pools configured by session_inter_op_thread_pool.
411  //
412  // This option is deprecated. The same effect can be achieved by setting
413  // session_inter_op_thread_pool to have one element, whose num_threads equals
414  // inter_op_parallelism_threads.
415  bool use_per_session_threads = 9;
416
417  // This option is experimental - it may be replaced with a different mechanism
418  // in the future.
419  //
420  // Configures session thread pools. If this is configured, then RunOptions for
421  // a Run call can select the thread pool to use.
422  //
423  // The intended use is for when some session invocations need to run in a
424  // background pool limited to a small number of threads:
425  // - For example, a session may be configured to have one large pool (for
426  // regular compute) and one small pool (for periodic, low priority work);
427  // using the small pool is currently the mechanism for limiting the inter-op
428  // parallelism of the low priority work.  Note that it does not limit the
429  // parallelism of work spawned by a single op kernel implementation.
430  // - Using this setting is normally not needed in training, but may help some
431  // serving use cases.
432  // - It is also generally recommended to set the global_name field of this
433  // proto, to avoid creating multiple large pools. It is typically better to
434  // run the non-low-priority work, even across sessions, in a single large
435  // pool.
436  repeated ThreadPoolOptionProto session_inter_op_thread_pool = 12;
437
438  // Assignment of Nodes to Devices is recomputed every placement_period
439  // steps until the system warms up (at which point the recomputation
440  // typically slows down automatically).
441  int32 placement_period = 3;
442
443  // When any filters are present sessions will ignore all devices which do not
444  // match the filters. Each filter can be partially specified, e.g. "/job:ps"
445  // "/job:worker/replica:3", etc.
446  repeated string device_filters = 4;
447
448  // Options that apply to all GPUs.
449  GPUOptions gpu_options = 6;
450
451  // Whether soft placement is allowed. If allow_soft_placement is true,
452  // an op will be placed on CPU if
453  //   1. there's no GPU implementation for the OP
454  // or
455  //   2. no GPU devices are known or registered
456  // or
457  //   3. need to co-locate with reftype input(s) which are from CPU.
458  bool allow_soft_placement = 7;
459
460  // Whether device placements should be logged.
461  bool log_device_placement = 8;
462
463  // Options that apply to all graphs.
464  GraphOptions graph_options = 10;
465
466  // Global timeout for all blocking operations in this session.  If non-zero,
467  // and not overridden on a per-operation basis, this value will be used as the
468  // deadline for all blocking operations.
469  int64 operation_timeout_in_ms = 11;
470
471  // Options that apply when this session uses the distributed runtime.
472  RPCOptions rpc_options = 13;
473
474  // Optional list of all workers to use in this session.
475  ClusterDef cluster_def = 14;
476
477  // If true, any resources such as Variables used in the session will not be
478  // shared with other sessions. However, when clusterspec propagation is
479  // enabled, this field is ignored and sessions are always isolated.
480  bool isolate_session_state = 15;
481
482  // When true, WorkerSessions are created with device attributes from the
483  // full cluster.
484  // This is helpful when a worker wants to partition a graph
485  // (for example during a PartitionedCallOp).
486  bool share_cluster_devices_in_session = 17;
487
488  // Everything inside Experimental is subject to change and is not subject
489  // to API stability guarantees in
490  // https://www.tensorflow.org/guide/version_compat.
491  message Experimental {
492    // Task name for group resolution.
493    string collective_group_leader = 1;
494
495    // We removed the flag client_handles_error_formatting. Marking the tag
496    // number as reserved.
497    // TODO(shikharagarwal): Should we just remove this tag so that it can be
498    // used in future for other purpose?
499    reserved 2;
500
501    // Which executor to use, the default executor will be used
502    // if it is an empty string or "DEFAULT"
503    string executor_type = 3;
504
505    // Guidance to formatting of large RecvBuf fields for transfer.
506    // Any positive value sets the max chunk size.  0 defaults to 4096.
507    // Any negative value indicates no max, i.e. one chunk only.
508    int32 recv_buf_max_chunk = 4;
509
510    // If true, and supported by the platform, the runtime will attempt to
511    // use NUMA affinity where applicable.  One consequence will be the
512    // existence of as many CPU devices as there are available NUMA nodes.
513    bool use_numa_affinity = 5;
514
515    // If true, make collective op execution order sequential and deterministic
516    // for potentially concurrent collective instances.
517    bool collective_deterministic_sequential_execution = 6;
518
519    // If true, use NCCL for CollectiveOps.  This feature is highly
520    // experimental.
521    bool collective_nccl = 7;
522
523    // In the following, session state means the value of a variable, elements
524    // in a hash table, or any other resource, accessible by worker sessions
525    // held by a TF server.
526    //
527    // When ClusterSpec propagation is enabled, the value of
528    // isolate_session_state is ignored when deciding whether to share session
529    // states in a TF server (for backwards compatibility reasons).
530    // - If share_session_state_in_clusterspec_propagation is true, the session
531    // states are shared.
532    // - If share_session_state_in_clusterspec_propagation is false, session
533    // states are isolated.
534    //
535    // When clusterspec propagation is not used, the value of
536    // share_session_state_in_clusterspec_propagation is ignored when deciding
537    // whether to share session states in a TF server.
538    // - If isolate_session_state is true, session states are isolated.
539    // - If isolate_session_state is false, session states are shared.
540    //
541    // TODO(b/129330037): Add a single API that consistently treats
542    // isolate_session_state and ClusterSpec propagation.
543    bool share_session_state_in_clusterspec_propagation = 8;
544
545    // If using a direct session, disable spinning while waiting for work in
546    // the thread pool. This may result in higher latency for completing ops,
547    // but in the case where there is a lot of spinning may result in lower
548    // CPU usage.
549    bool disable_thread_spinning = 9;
550
551    // This was promoted to a non-experimental API. Please use
552    // ConfigProto.share_cluster_devices_in_session instead.
553    bool share_cluster_devices_in_session = 10;
554
555    // Metadata about the session.
556    //
557    // If set, this can be used by the runtime and the Ops for debugging,
558    // monitoring, etc.
559    //
560    // NOTE: This is currently used and propagated only by the direct session.
561    SessionMetadata session_metadata = 11;
562
563    // If true, the session may treat the graph as being static for optimization
564    // purposes.
565    //
566    // If this option is set to true when a session is created, the full
567    // GraphDef must be passed in a single call to Session::Create(), and
568    // Session::Extend() may not be supported.
569    bool optimize_for_static_graph = 12;
570
571    // This field will eventually be deprecated and replaced by
572    // mlir_bridge_rollout (b/166038521).
573    //
574    // Whether to enable the MLIR-based TF->XLA bridge.
575    //
576    // This is a replacement to the existing bridge, and not ready for
577    // production usage yet.
578    // If this option is set to true when a session is created, MLIR is used to
579    // perform the set of graph transformations to put the graph in a form that
580    // can be executed with delegation of some computations to an accelerator.
581    // This builds on the model of XLA where a subset of the graph is
582    // encapsulated and attached to a "compile" operation, whose result is fed
583    // to an "execute" operation. The kernel for these operations is responsible
584    // to lower the encapsulated graph to a particular device.
585    bool enable_mlir_bridge = 13;
586
587    // An enum that describes the state of the MLIR bridge rollout.
588    enum MlirBridgeRollout {
589      // If this field is left unspecified, the MLIR bridge may be selectively
590      // enabled on a per graph basis.
591      MLIR_BRIDGE_ROLLOUT_UNSPECIFIED = 0;
592      // Enabling the MLIR bridge enables it for all graphs in this session.
593      MLIR_BRIDGE_ROLLOUT_ENABLED = 1;
594      // Disabling the MLIR bridge disables it for all graphs in this session.
595      MLIR_BRIDGE_ROLLOUT_DISABLED = 2;
596      // Enable the MLIR bridge on a per graph basis based on an analysis of
597      // the features used in the graph. If the features used by the graph are
598      // supported by the MLIR bridge, the MLIR bridge will be used to run the
599      // graph.
600      MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED = 3;
601    }
602    // This field is underdevelopment, for now use enable_mlir_bridge
603    // (b/166038521).
604    //
605    // Whether to enable the MLIR-based TF->XLA bridge.
606    MlirBridgeRollout mlir_bridge_rollout = 17;
607
608    // Whether to enable the MLIR-based Graph optimizations.
609    //
610    // This will become a part of standard Tensorflow graph optimization
611    // pipeline, currently this is only used for gradual migration and testing
612    // new passes that are replacing existing optimizations in Grappler.
613    bool enable_mlir_graph_optimization = 16;
614
615    // If true, the session will not store an additional copy of the graph for
616    // each subgraph.
617    //
618    // If this option is set to true when a session is created, the
619    // `RunOptions.output_partition_graphs` options must not be set.
620    bool disable_output_partition_graphs = 14;
621
622    // Minimum number of batches run through the XLA graph before XLA fusion
623    // autotuner is enabled. Default value of zero disables the autotuner.
624    //
625    // The XLA fusion autotuner can improve performance by executing a heuristic
626    // search on the compiler parameters.
627    int64 xla_fusion_autotuner_thresh = 15;
628
629    // Whether runtime execution uses TFRT.
630    bool use_tfrt = 18;
631
632    // Next: 19
633  }
634
635  Experimental experimental = 16;
636
637  // Next: 18
638}
639
640// Options for a single Run() call.
641message RunOptions {
642  // TODO(pbar) Turn this into a TraceOptions proto which allows
643  // tracing to be controlled in a more orthogonal manner?
644  enum TraceLevel {
645    NO_TRACE = 0;
646    SOFTWARE_TRACE = 1;
647    HARDWARE_TRACE = 2;
648    FULL_TRACE = 3;
649  }
650  TraceLevel trace_level = 1;
651
652  // Time to wait for operation to complete in milliseconds.
653  int64 timeout_in_ms = 2;
654
655  // The thread pool to use, if session_inter_op_thread_pool is configured.
656  // To use the caller thread set this to -1 - this uses the caller thread
657  // to execute Session::Run() and thus avoids a context switch. Using the
658  // caller thread to execute Session::Run() should be done ONLY for simple
659  // graphs, where the overhead of an additional context switch is
660  // comparable with the overhead of Session::Run().
661  int32 inter_op_thread_pool = 3;
662
663  // Whether the partition graph(s) executed by the executor(s) should be
664  // outputted via RunMetadata.
665  bool output_partition_graphs = 5;
666
667  // EXPERIMENTAL.  Options used to initialize DebuggerState, if enabled.
668  DebugOptions debug_options = 6;
669
670  // When enabled, causes tensor allocation information to be included in
671  // the error message when the Run() call fails because the allocator ran
672  // out of memory (OOM).
673  //
674  // Enabling this option can slow down the Run() call.
675  bool report_tensor_allocations_upon_oom = 7;
676
677  // Everything inside Experimental is subject to change and is not subject
678  // to API stability guarantees in
679  // https://www.tensorflow.org/guide/version_compat.
680  message Experimental {
681    // If non-zero, declares that this graph is going to use collective
682    // ops and must synchronize step_ids with any other graph with this
683    // same group_key value (in a distributed computation where tasks
684    // run disjoint graphs).
685    int64 collective_graph_key = 1;
686    // If true, then operations (using the inter-op pool) across all
687    // session::run() calls will be centrally scheduled, optimizing for (median
688    // and tail) latency.
689    // Consider using this option for CPU-bound workloads like inference.
690    bool use_run_handler_pool = 2;
691    // Options for run handler thread pool.
692    message RunHandlerPoolOptions {
693      // Priority of the request. The run handler thread pool will schedule ops
694      // based on the priority number. The larger number means higher priority.
695      int64 priority = 1;
696    }
697    RunHandlerPoolOptions run_handler_pool_options = 3;
698  }
699
700  Experimental experimental = 8;
701
702  reserved 4;
703}
704
705// Metadata output (i.e., non-Tensor) for a single Run() call.
706message RunMetadata {
707  // Statistics traced for this step. Populated if tracing is turned on via the
708  // "RunOptions" proto.
709  // EXPERIMENTAL: The format and set of events may change in future versions.
710  StepStats step_stats = 1;
711
712  // The cost graph for the computation defined by the run call.
713  CostGraphDef cost_graph = 2;
714
715  // Graphs of the partitions executed by executors.
716  repeated GraphDef partition_graphs = 3;
717
718  message FunctionGraphs {
719    // TODO(nareshmodi): Include some sort of function/cache-key identifier?
720    repeated GraphDef partition_graphs = 1;
721
722    GraphDef pre_optimization_graph = 2;
723    GraphDef post_optimization_graph = 3;
724  }
725  // This is only populated for graphs that are run as functions in TensorFlow
726  // V2. There will be an entry below for each function that is traced.
727  // The main use cases of the post_optimization_graph and the partition_graphs
728  // is to give the caller insight into the graphs that were actually run by the
729  // runtime. Additional information (such as those in step_stats) will match
730  // these graphs.
731  // We also include the pre_optimization_graph since it is usually easier to
732  // read, and is helpful in situations where the caller wants to get a high
733  // level idea of what the built graph looks like (since the various graph
734  // optimization passes might change the structure of the graph significantly).
735  repeated FunctionGraphs function_graphs = 4;
736}
737
738// Defines a connection between two tensors in a `GraphDef`.
739message TensorConnection {
740  // A tensor name. The value of this tensor will be substituted for
741  // the tensor named in `to_tensor`.
742  string from_tensor = 1;
743
744  // A tensor name. The value of this tensor will be bound to the
745  // value of the tensor named in `from_tensor`.
746  string to_tensor = 2;
747}
748
749// Defines a subgraph in another `GraphDef` as a set of feed points and nodes
750// to be fetched or executed.
751//
752// Compare with the arguments to `Session::Run()`.
753message CallableOptions {
754  // Tensors to be fed in the callable. Each feed is the name of a tensor.
755  repeated string feed = 1;
756
757  // Fetches. A list of tensor names. The caller of the callable expects a
758  // tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
759  // order of specified fetches does not change the execution order.
760  repeated string fetch = 2;
761
762  // Target Nodes. A list of node names. The named nodes will be run by the
763  // callable but their outputs will not be returned.
764  repeated string target = 3;
765
766  // Options that will be applied to each run.
767  RunOptions run_options = 4;
768
769  // Tensors to be connected in the callable. Each TensorConnection denotes
770  // a pair of tensors in the graph, between which an edge will be created
771  // in the callable.
772  repeated TensorConnection tensor_connection = 5;
773
774  // The Tensor objects fed in the callable and fetched from the callable
775  // are expected to be backed by host (CPU) memory by default.
776  //
777  // The options below allow changing that - feeding tensors backed by
778  // device memory, or returning tensors that are backed by device memory.
779  //
780  // The maps below map the name of a feed/fetch tensor (which appears in
781  // 'feed' or 'fetch' fields above), to the fully qualified name of the device
782  // owning the memory backing the contents of the tensor.
783  //
784  // For example, creating a callable with the following options:
785  //
786  // CallableOptions {
787  //   feed: "a:0"
788  //   feed: "b:0"
789  //
790  //   fetch: "x:0"
791  //   fetch: "y:0"
792  //
793  //   feed_devices: {
794  //     "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
795  //   }
796  //
797  //   fetch_devices: {
798  //     "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
799  //  }
800  // }
801  //
802  // means that the Callable expects:
803  // - The first argument ("a:0") is a Tensor backed by GPU memory.
804  // - The second argument ("b:0") is a Tensor backed by host memory.
805  // and of its return values:
806  // - The first output ("x:0") will be backed by host memory.
807  // - The second output ("y:0") will be backed by GPU memory.
808  //
809  // FEEDS:
810  // It is the responsibility of the caller to ensure that the memory of the fed
811  // tensors will be correctly initialized and synchronized before it is
812  // accessed by operations executed during the call to Session::RunCallable().
813  //
814  // This is typically ensured by using the TensorFlow memory allocators
815  // (Device::GetAllocator()) to create the Tensor to be fed.
816  //
817  // Alternatively, for CUDA-enabled GPU devices, this typically means that the
818  // operation that produced the contents of the tensor has completed, i.e., the
819  // CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
820  // cuStreamSynchronize()).
821  map<string, string> feed_devices = 6;
822  map<string, string> fetch_devices = 7;
823
824  // By default, RunCallable() will synchronize the GPU stream before returning
825  // fetched tensors on a GPU device, to ensure that the values in those tensors
826  // have been produced. This simplifies interacting with the tensors, but
827  // potentially incurs a performance hit.
828  //
829  // If this options is set to true, the caller is responsible for ensuring
830  // that the values in the fetched tensors have been produced before they are
831  // used. The caller can do this by invoking `Device::Sync()` on the underlying
832  // device(s), or by feeding the tensors back to the same Session using
833  // `feed_devices` with the same corresponding device name.
834  bool fetch_skip_sync = 8;
835
836  // Next: 9
837}
838