1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_GPU_DEVICE_CONTEXT_H_
17 #define TENSORFLOW_CORE_COMMON_RUNTIME_GPU_DEVICE_CONTEXT_H_
18 
19 #include "tensorflow/core/common_runtime/device.h"
20 #include "tensorflow/core/framework/device_base.h"
21 #include "tensorflow/core/lib/gtl/inlined_vector.h"
22 
23 namespace stream_executor {
24 class Stream;
25 }  // namespace stream_executor
26 
27 namespace tensorflow {
28 
29 class GPUDeviceContext : public DeviceContext {
30  public:
31   // Does not take ownership of streams.
GPUDeviceContext(int stream_id,se::Stream * stream,se::Stream * nccl_stream,se::Stream * host_to_device_stream,se::Stream * device_to_host_stream,gtl::InlinedVector<se::Stream *,4> device_to_device_stream)32   GPUDeviceContext(int stream_id, se::Stream* stream,
33 #if TENSORFLOW_USE_ROCM
34                    se::Stream* nccl_stream,
35 #endif
36                    se::Stream* host_to_device_stream,
37                    se::Stream* device_to_host_stream,
38                    gtl::InlinedVector<se::Stream*, 4> device_to_device_stream)
39       : stream_id_(stream_id),
40         stream_(stream),
41 #if TENSORFLOW_USE_ROCM
42         nccl_stream_(nccl_stream),
43 #endif
44         host_to_device_stream_(host_to_device_stream),
45         device_to_host_stream_(device_to_host_stream),
46         device_to_device_stream_(device_to_device_stream) {
47   }
48 
~GPUDeviceContext()49   ~GPUDeviceContext() override {}
50 
stream()51   se::Stream* stream() const override { return stream_; }
52 #if TENSORFLOW_USE_ROCM
nccl_stream()53   se::Stream* nccl_stream() const { return nccl_stream_; }
54 #endif
host_to_device_stream()55   se::Stream* host_to_device_stream() const { return host_to_device_stream_; }
device_to_host_stream()56   se::Stream* device_to_host_stream() const { return device_to_host_stream_; }
device_to_device_stream(int index)57   se::Stream* device_to_device_stream(int index) const {
58     return device_to_device_stream_[index % device_to_device_stream_.size()];
59   }
stream_id()60   int stream_id() const { return stream_id_; }
61 
62   void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device,
63                              Tensor* device_tensor, StatusCallback done,
64                              bool sync_dst_compute) const override;
65 
66   void CopyDeviceTensorToCPU(const Tensor* device_tensor, StringPiece edge_name,
67                              Device* device, Tensor* cpu_tensor,
68                              StatusCallback done) override;
69 
70   void CopyTensorInSameDevice(const Tensor* input_tensor, Device* device,
71                               Tensor* output_tensor,
72                               StatusCallback done) const override;
73 
MaintainLifetimeOnStream(const Tensor * t,se::Stream * stream)74   void MaintainLifetimeOnStream(const Tensor* t,
75                                 se::Stream* stream) const override {}
76 
77   Status ThenExecute(Device* device, se::Stream* stream,
78                      std::function<void()> func) override;
79 
80  private:
81   int stream_id_;
82   // The default primary stream to use for this context.
83   // All the memory belongs to this stream.
84   se::Stream* stream_;
85 #if TENSORFLOW_USE_ROCM
86   // The stream to use for nccl operations.
87   se::Stream* nccl_stream_;
88 #endif
89   // The stream to use for copying data from host into GPU.
90   se::Stream* host_to_device_stream_;
91   // The stream to use for copying data from GPU to host.
92   se::Stream* device_to_host_stream_;
93   // Streams to use for copying data between GPUs.
94   gtl::InlinedVector<se::Stream*, 4> device_to_device_stream_;
95 };
96 
97 }  // namespace tensorflow
98 
99 #endif  // TENSORFLOW_CORE_COMMON_RUNTIME_GPU_DEVICE_CONTEXT_H_
100