1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_XFEED_QUEUE_H_ 17 #define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_XFEED_QUEUE_H_ 18 19 #include <deque> 20 #include <functional> 21 #include <vector> 22 23 #include "tensorflow/core/platform/mutex.h" 24 #include "tensorflow/core/platform/notification.h" 25 #include "tensorflow/core/platform/thread_annotations.h" 26 27 namespace xla { 28 namespace gpu { 29 30 // TODO(b/30467474) Once GPU outfeed implementation settles, consider 31 // folding back the cpu and gpu outfeed implementations into a generic 32 // one if possible. 33 34 // Manages a thread-safe queue of buffers. 35 template <typename BufferType> 36 class XfeedQueue { 37 public: 38 // Adds a tree of buffers to the queue. The individual buffers correspond to 39 // the elements of a tuple and may be nullptr if the buffer is a tuple index 40 // buffer. EnqueueDestination(BufferType buffers)41 void EnqueueDestination(BufferType buffers) { 42 tensorflow::mutex_lock l(mu_); 43 enqueued_buffers_.push_back(std::move(buffers)); 44 cv_.notify_one(); 45 } 46 47 // Blocks until the queue is non-empty, then returns the buffer at the head of 48 // the queue. BlockingGetNextDestination()49 BufferType BlockingGetNextDestination() { 50 for (const auto& callback : before_get_next_dest_callbacks_) { 51 callback(); 52 } 53 54 bool became_empty; 55 BufferType current_buffer; 56 { 57 tensorflow::mutex_lock l(mu_); 58 while (enqueued_buffers_.empty()) { 59 cv_.wait(l); 60 } 61 current_buffer = std::move(enqueued_buffers_.front()); 62 enqueued_buffers_.pop_front(); 63 became_empty = enqueued_buffers_.empty(); 64 } 65 if (became_empty) { 66 for (const auto& callback : on_empty_callbacks_) { 67 callback(); 68 } 69 } 70 return current_buffer; 71 } 72 RegisterOnEmptyCallback(std::function<void ()> callback)73 void RegisterOnEmptyCallback(std::function<void()> callback) { 74 on_empty_callbacks_.push_back(std::move(callback)); 75 } RegisterBeforeGetNextDestinationCallback(std::function<void ()> callback)76 void RegisterBeforeGetNextDestinationCallback( 77 std::function<void()> callback) { 78 before_get_next_dest_callbacks_.push_back(std::move(callback)); 79 } 80 81 private: 82 tensorflow::mutex mu_; 83 84 // Condition variable that is signaled every time a buffer is enqueued. 85 tensorflow::condition_variable cv_; 86 87 // The queue of trees of buffers. Buffer* queue contents are not owned. 88 std::deque<BufferType> enqueued_buffers_ GUARDED_BY(mu_); 89 90 // List of callbacks which will be called when 'enqueued_buffers_' becomes 91 // empty. 92 std::vector<std::function<void()>> on_empty_callbacks_; 93 94 // List of callbacks which will be called before BlockingGetNextDestination() 95 // is called. This lets you e.g. call EnqueueDestination() for each call to 96 // BlockingGetNextDestination(). 97 std::vector<std::function<void()>> before_get_next_dest_callbacks_; 98 }; 99 100 } // namespace gpu 101 } // namespace xla 102 103 #endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_XFEED_QUEUE_H_ 104