1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_OUTFEED_THUNK_H_ 17 #define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_OUTFEED_THUNK_H_ 18 19 #include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h" 20 #include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h" 21 #include "tensorflow/compiler/xla/service/gpu/thunk.h" 22 #include "tensorflow/compiler/xla/service/hlo_instruction.h" 23 #include "tensorflow/core/platform/stream_executor_no_cuda.h" 24 25 namespace xla { 26 namespace gpu { 27 28 // A thunk that outfeeds data. Data must be already resident on the host. This 29 // thunk performs a host to device copy from the buffer allocated for the 30 // outfeed op to the host location. 31 class OutfeedThunk : public Thunk { 32 public: 33 // Constructs a OutfeedThunk that copies data to the host-side 34 // outfeed queue from the buffers in the given shape tree. 35 OutfeedThunk(ShapeTree<BufferAllocation::Slice> outfeed_slices, 36 const HloInstruction* hlo_instruction); 37 38 OutfeedThunk(const OutfeedThunk&) = delete; 39 OutfeedThunk& operator=(const OutfeedThunk&) = delete; 40 41 Status ExecuteOnStream(const BufferAllocations& buffer_allocations, 42 se::Stream* stream, 43 HloExecutionProfiler* profiler) override; 44 45 private: 46 const ShapeTree<BufferAllocation::Slice> outfeed_slices_; 47 }; 48 49 } // namespace gpu 50 } // namespace xla 51 52 #endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_OUTFEED_THUNK_H_ 53