1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_C_EAGER_C_API_H_
17 #define TENSORFLOW_C_EAGER_C_API_H_
18 
19 // C API extensions to experiment with eager execution of kernels.
20 // WARNING: Unlike tensorflow/c/c_api.h, the API here is not guaranteed to be
21 // stable and can change without notice.
22 
23 #include "tensorflow/c/c_api.h"
24 
25 // Macro to control visibility of exported symbols in the shared library (.so,
26 // .dylib, .dll).
27 // This duplicates the TF_EXPORT macro definition in
28 // tensorflow/core/platform/macros.h in order to keep this .h file independent
29 // of any other includes.$a
30 #ifdef SWIG
31 #define TF_CAPI_EXPORT
32 #else
33 #if defined(COMPILER_MSVC)
34 #ifdef TF_COMPILE_LIBRARY
35 #define TF_CAPI_EXPORT __declspec(dllexport)
36 #else
37 #define TF_CAPI_EXPORT __declspec(dllimport)
38 #endif  // TF_COMPILE_LIBRARY
39 #else
40 #define TF_CAPI_EXPORT __attribute__((visibility("default")))
41 #endif  // COMPILER_MSVC
42 #endif  // SWIG
43 
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47 
48 typedef struct TFE_ContextOptions TFE_ContextOptions;
49 
50 // Return a new options object.
51 TF_CAPI_EXPORT extern TFE_ContextOptions* TFE_NewContextOptions();
52 
53 // Set the config in TF_ContextOptions.options.
54 // config should be a serialized tensorflow.ConfigProto proto.
55 // If config was not parsed successfully as a ConfigProto, record the
56 // error information in *status.
57 TF_CAPI_EXPORT extern void TFE_ContextOptionsSetConfig(
58     TFE_ContextOptions* options, const void* proto, size_t proto_len,
59     TF_Status* status);
60 
61 // Controls how to act when we try to run an operation on a given device but
62 // some input tensors are not on that device.
63 typedef enum TFE_ContextDevicePlacementPolicy {
64   // Running operations with input tensors on the wrong device will fail.
65   TFE_DEVICE_PLACEMENT_EXPLICIT = 0,
66   // Copy the tensor to the right device but log a warning.
67   TFE_DEVICE_PLACEMENT_WARN = 1,
68   // Silently copy the tensor, which has a performance cost since the
69   // operation will be blocked till the copy completes.
70   TFE_DEVICE_PLACEMENT_SILENT = 2,
71   // Default placement policy which silently copies int32 tensors but not other
72   // dtypes.
73   TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32 = 3,
74 } TFE_ContextDevicePlacementPolicy;
75 
76 TF_CAPI_EXPORT extern void TFE_ContextOptionsSetDevicePlacementPolicy(
77     TFE_ContextOptions*, TFE_ContextDevicePlacementPolicy);
78 
79 // Destroy an options object.
80 TF_CAPI_EXPORT extern void TFE_DeleteContextOptions(TFE_ContextOptions*);
81 
82 // "Context" under which operations/functions are executed. It encapsulates
83 // things like the available devices, resource manager etc.
84 //
85 // TODO(ashankar): Merge with TF_Session?
86 typedef struct TFE_Context TFE_Context;
87 
88 TF_CAPI_EXPORT extern TFE_Context* TFE_NewContext(
89     const TFE_ContextOptions* opts, TF_Status* status);
90 TF_CAPI_EXPORT extern void TFE_DeleteContext(TFE_Context* ctx,
91                                              TF_Status* status);
92 TF_CAPI_EXPORT extern TF_DeviceList* TFE_ContextListDevices(TFE_Context* ctx,
93                                                             TF_Status* status);
94 
95 // Clears the internal caches in the TFE context. Useful when reseeding random
96 // ops.
97 TF_CAPI_EXPORT extern void TFE_ContextClearCaches(TFE_Context* ctx);
98 
99 // Sets a thread-local device placement policy. After this call, other calls to
100 // TFE_Execute in the same thread will use the device policy specified here
101 // instead of the device policy used to construct the context. This has no
102 // effect on the device policy used by other program threads.
103 TF_CAPI_EXPORT extern void TFE_ContextSetThreadLocalDevicePlacementPolicy(
104     TFE_Context*, TFE_ContextDevicePlacementPolicy);
105 
106 // Returns the device placement policy to be used by this context in the current
107 // thread.
108 TF_CAPI_EXPORT extern TFE_ContextDevicePlacementPolicy
109 TFE_ContextGetDevicePlacementPolicy(TFE_Context*);
110 
111 // A handle to a tensor on a device.
112 //
113 // Like a TF_Tensor, a TFE_TensorHandle refers to a tensor with a value, shape,
114 // type etc. Unlike a TF_Tensor, a TFE_TensorHandle may refer to such tensors
115 // placed in memory of different devices or remote address spaces.
116 typedef struct TFE_TensorHandle TFE_TensorHandle;
117 
118 TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewTensorHandle(TF_Tensor* t,
119                                                             TF_Status* status);
120 TF_CAPI_EXPORT extern void TFE_DeleteTensorHandle(TFE_TensorHandle* h);
121 TF_CAPI_EXPORT extern TF_DataType TFE_TensorHandleDataType(TFE_TensorHandle* h);
122 TF_CAPI_EXPORT extern int TFE_TensorHandleNumDims(TFE_TensorHandle* h);
123 TF_CAPI_EXPORT extern int64_t TFE_TensorHandleDim(TFE_TensorHandle* h,
124                                                   int dim_index);
125 TF_CAPI_EXPORT extern const char* TFE_TensorHandleDeviceName(
126     TFE_TensorHandle* h);
127 TF_CAPI_EXPORT extern TF_Tensor* TFE_TensorHandleResolve(TFE_TensorHandle* h,
128                                                          TF_Status* status);
129 
130 // Create a new TFE_TensorHandle with the same contents as 'h' but placed
131 // in the memory of the device name 'device_name'.
132 // If source and destination are the same device, then this creates a new handle
133 // that shares the underlying buffer. Otherwise, it currently requires at least
134 // one of the source or destination devices to be CPU (i.e., for the source or
135 // destination tensor to be placed in host memory).
136 TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_TensorHandleCopyToDevice(
137     TFE_TensorHandle* h, TFE_Context* ctx, const char* device_name,
138     TF_Status* status);
139 
140 // Description of the TensorFlow op to execute.
141 //
142 // Assumes that the provided 'ctx' outlives the returned TFE_Op, i.e.,
143 // TFE_DeleteOp() is called before TFE_DeleteContext().
144 //
145 // Very similar to TF_OperationDescription with some differences:
146 // (1) TF_Output or TFE_TensorHandle* as arguments to TF_AddInput,
147 //     TF_AddInputList
148 // (2) TF_ColocateWith, TF_AddControlInput etc. do not make sense.
149 // (3) Implementation detail: Avoid use of NodeBuilder/NodeDefBuilder since
150 //     the additional sanity checks there seem unnecessary;
151 typedef struct TFE_Op TFE_Op;
152 
153 TF_CAPI_EXPORT extern TFE_Op* TFE_NewOp(TFE_Context* ctx,
154                                         const char* op_or_function_name,
155                                         TF_Status* status);
156 TF_CAPI_EXPORT extern void TFE_DeleteOp(TFE_Op* op);
157 
158 TF_CAPI_EXPORT extern void TFE_OpSetDevice(TFE_Op* op, const char* device_name,
159                                            TF_Status* status);
160 // The returned string remains valid throughout the lifetime of 'op'.
161 TF_CAPI_EXPORT extern const char* TFE_OpGetDevice(TFE_Op* op,
162                                                   TF_Status* status);
163 
164 // When 'enable' is set to 1, and if TensorFlow library is built with XLA
165 // support, a subsequent TFE_Execute() call on `op` will run the op via XLA.
166 //
167 // If the library is not built with XLA support, this call would be a no-op.
168 TF_CAPI_EXPORT extern void TFE_OpSetXLACompilation(TFE_Op* op,
169                                                    unsigned char enable);
170 
171 TF_CAPI_EXPORT extern void TFE_OpAddInput(TFE_Op* op, TFE_TensorHandle* h,
172                                           TF_Status* status);
173 
174 TF_CAPI_EXPORT extern TF_AttrType TFE_OpGetAttrType(TFE_Op* op,
175                                                     const char* attr_name,
176                                                     unsigned char* is_list,
177                                                     TF_Status* status);
178 // Get an attribute type given an op name; a fusion of TFE_NewOp and
179 // TFE_OpGetAttrType for use from Python without the overhead of the individual
180 // calls and memory management of TFE_Op.
181 TF_CAPI_EXPORT extern TF_AttrType TFE_OpNameGetAttrType(
182     TFE_Context* ctx, const char* op_or_function_name, const char* attr_name,
183     unsigned char* is_list, TF_Status* status);
184 
185 TF_CAPI_EXPORT extern void TFE_OpSetAttrString(TFE_Op* op,
186                                                const char* attr_name,
187                                                const char* value);
188 TF_CAPI_EXPORT extern void TFE_OpSetAttrInt(TFE_Op* op, const char* attr_name,
189                                             int64_t value);
190 TF_CAPI_EXPORT extern void TFE_OpSetAttrFloat(TFE_Op* op, const char* attr_name,
191                                               float value);
192 TF_CAPI_EXPORT extern void TFE_OpSetAttrBool(TFE_Op* op, const char* attr_name,
193                                              unsigned char value);
194 TF_CAPI_EXPORT extern void TFE_OpSetAttrType(TFE_Op* op, const char* attr_name,
195                                              TF_DataType value);
196 // If the number of dimensions is unknown, `num_dims` must be set to
197 // -1 and `dims` can be null.  If a dimension is unknown, the
198 // corresponding entry in the `dims` array must be -1.
199 TF_CAPI_EXPORT extern void TFE_OpSetAttrShape(TFE_Op* op, const char* attr_name,
200                                               const int64_t* dims,
201                                               const int num_dims,
202                                               TF_Status* out_status);
203 
204 // Sets the attribute attr_name to be a function specified by 'function'.
205 //
206 // TODO(ashankar,iga): Add this functionality to the C API for graph
207 // construction. Perhaps we want an AttrValueMap equivalent in the C API?
208 TF_CAPI_EXPORT extern void TFE_OpSetAttrFunction(TFE_Op* op,
209                                                  const char* attr_name,
210                                                  const TFE_Op* value);
211 
212 TF_CAPI_EXPORT extern void TFE_OpSetAttrStringList(TFE_Op* op,
213                                                    const char* attr_name,
214                                                    const char** value,
215                                                    int num_values);
216 TF_CAPI_EXPORT extern void TFE_OpSetAttrIntList(TFE_Op* op,
217                                                 const char* attr_name,
218                                                 const int64_t* values,
219                                                 int num_values);
220 TF_CAPI_EXPORT extern void TFE_OpSetAttrFloatList(TFE_Op* op,
221                                                   const char* attr_name,
222                                                   const float* values,
223                                                   int num_values);
224 TF_CAPI_EXPORT extern void TFE_OpSetAttrBoolList(TFE_Op* op,
225                                                  const char* attr_name,
226                                                  const unsigned char* values,
227                                                  int num_values);
228 TF_CAPI_EXPORT extern void TFE_OpSetAttrTypeList(TFE_Op* op,
229                                                  const char* attr_name,
230                                                  const TF_DataType* values,
231                                                  int num_values);
232 TF_CAPI_EXPORT extern void TFE_OpSetAttrShapeList(
233     TFE_Op* op, const char* attr_name, const int64_t** dims,
234     const int* num_dims, int num_values, TF_Status* out_status);
235 TF_CAPI_EXPORT extern void TFE_OpSetAttrFunctionList(TFE_Op* op,
236                                                      const char* attr_name,
237                                                      const TFE_Op** value,
238                                                      int num_values);
239 
240 // Execute the operation defined by 'op' and return handles to computed
241 // tensors in 'retvals'.
242 //
243 // 'retvals' must point to a pre-allocated array of TFE_TensorHandle*
244 // and '*num_retvals' should be set to the size of this array.
245 //
246 // On return, 'num_retvals' will be set to the actual number of outputs
247 // returned by the operation.
248 TF_CAPI_EXPORT extern void TFE_Execute(TFE_Op* op, TFE_TensorHandle** retvals,
249                                        int* num_retvals, TF_Status* status);
250 
251 // Add a function (serialized FunctionDef protocol buffer) to ctx so
252 // that it can be invoked using TFE_Execute.
253 TF_CAPI_EXPORT extern void TFE_ContextAddFunctionDef(
254     TFE_Context* ctx, const char* serialized_function_def, size_t size,
255     TF_Status* status);
256 
257 // Adds a function (created from TF_GraphToFunction or
258 // TF_FunctionImportFunctionDef) to the context, allowing it to be executed with
259 // TFE_Execute by creating an op with the same name as the function.
260 TF_CAPI_EXPORT extern void TFE_ContextAddFunction(TFE_Context* ctx,
261                                                   TF_Function* function,
262                                                   TF_Status* status);
263 
264 // Enables tracing of RunMetadata on the ops executed from this context.
265 TF_CAPI_EXPORT extern void TFE_ContextEnableRunMetadata(TFE_Context* ctx);
266 
267 // Disables tracing of RunMetadata on the ops executed from this context.
268 TF_CAPI_EXPORT extern void TFE_ContextDisableRunMetadata(TFE_Context* ctx);
269 
270 // Populates the passed-in buffer with a serialized RunMetadata protocol buffer
271 // containing any run metadata information accumulated so far and clears this
272 // information.
273 TF_CAPI_EXPORT extern void TFE_ContextExportRunMetadata(TFE_Context* ctx,
274                                                         TF_Buffer* buf,
275                                                         TF_Status* status);
276 
277 #ifdef __cplusplus
278 } /* end extern "C" */
279 #endif
280 
281 #ifdef __cplusplus
282 // A workaround to ease conversion to and from numpy objects and
283 // TFE_TensorHandle's.
284 //
285 // TODO(ashankar): Figure out an alternative scheme that precludes the need for
286 // these API-boundary breaking methods.
287 namespace tensorflow {
288 class Tensor;
289 }  // namespace tensorflow
290 
291 const tensorflow::Tensor* TFE_TensorHandleUnderlyingTensorInHostMemory(
292     TFE_TensorHandle* h, TF_Status* status);
293 TFE_TensorHandle* TFE_NewTensorHandle(const tensorflow::Tensor& t);
294 #endif
295 
296 #endif  // TENSORFLOW_C_EAGER_C_API_H_
297