/external/libchrome/base/task/sequence_manager/ |
D | work_queue_unittest.cc | 41 work_queue_.reset(new WorkQueue(task_queue_.get(), "test", in SetUp() 44 work_queue_sets_->AddQueue(work_queue_.get(), 0); in SetUp() 47 void TearDown() override { work_queue_sets_->RemoveQueue(work_queue_.get()); } in TearDown() 78 std::unique_ptr<WorkQueue> work_queue_; member in base::sequence_manager::internal::WorkQueueTest 84 EXPECT_TRUE(work_queue_->Empty()); in TEST_F() 85 work_queue_->Push(FakeTaskWithEnqueueOrder(1)); in TEST_F() 86 EXPECT_FALSE(work_queue_->Empty()); in TEST_F() 90 work_queue_->Push(FakeTaskWithEnqueueOrder(1)); in TEST_F() 91 work_queue_->InsertFence(EnqueueOrder::blocking_fence()); in TEST_F() 92 EXPECT_FALSE(work_queue_->Empty()); in TEST_F() [all …]
|
/external/tensorflow/tensorflow/core/platform/default/ |
D | unbounded_work_queue.cc | 36 if (!work_queue_.empty()) { in ~UnboundedWorkQueue() 59 work_queue_.push_back(std::move(fn)); in Schedule() 63 if (work_queue_.size() > num_idle_threads_) { in Schedule() 86 while (!cancelled_ && work_queue_.empty()) { in PooledThreadFunc() 94 fn = std::move(work_queue_.front()); in PooledThreadFunc() 95 work_queue_.pop_front(); in PooledThreadFunc()
|
D | unbounded_work_queue.h | 60 std::deque<WorkFunction> work_queue_ TF_GUARDED_BY(work_queue_mu_);
|
/external/tensorflow/tensorflow/compiler/xla/pjrt/ |
D | worker_thread.cc | 27 work_queue_.push(nullptr); in ~WorkerThread() 33 work_queue_.push(std::move(fn)); in Schedule() 36 bool WorkerThread::WorkAvailable() { return !work_queue_.empty(); } in WorkAvailable() 44 fn = std::move(work_queue_.front()); in WorkLoop() 45 work_queue_.pop(); in WorkLoop()
|
D | worker_thread.h | 47 std::queue<std::function<void()>> work_queue_ TF_GUARDED_BY(mu_);
|
/external/tensorflow/tensorflow/stream_executor/host/ |
D | host_stream.cc | 45 work_queue_.push(nullptr); in ~HostStream() 54 work_queue_.push(std::move(fn)); in EnqueueTask() 58 bool HostStream::WorkAvailable() { return !work_queue_.empty(); } in WorkAvailable() 71 fn = std::move(work_queue_.front()); in WorkLoop() 72 work_queue_.pop(); in WorkLoop()
|
D | host_stream.h | 51 std::queue<std::function<void()>> work_queue_ TF_GUARDED_BY(mu_);
|
/external/tensorflow/tensorflow/core/platform/ |
D | unbounded_work_queue_test.cc | 29 : work_queue_( in UnboundedWorkQueueTest() 36 work_queue_->Schedule([this, fn]() { in RunMultipleCopiesOfClosure() 52 void ResetQueue() { work_queue_.reset(); } in ResetQueue() 63 std::unique_ptr<UnboundedWorkQueue> work_queue_; member in tensorflow::__anonbe26645e0111::UnboundedWorkQueueTest
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | base_collective_executor.h | 107 work_queue_(std::move(work_queue)) {} in BaseCollectiveExecutor() 125 work_queue_->Schedule(std::move(closure)); in RunClosure() 145 std::shared_ptr<UnboundedWorkQueue> work_queue_; variable
|
D | collective_executor_mgr.cc | 37 work_queue_(std::make_shared<UnboundedWorkQueue>(Env::Default(), in CollectiveExecutorMgr() 66 &gpu_ring_order_, work_queue_); in Create()
|
D | collective_executor_mgr.h | 78 std::shared_ptr<UnboundedWorkQueue> work_queue_; variable
|
D | collective_rma_local_test.cc | 42 work_queue_ = std::make_shared<UnboundedWorkQueue>(Env::Default(), "test"); in CollectiveRemoteAccessLocalTest() 60 std::shared_ptr<UnboundedWorkQueue> work_queue_; member in tensorflow::__anon0d44278e0111::CollectiveRemoteAccessLocalTest
|
D | permuter_test.cc | 169 work_queue_ = std::make_shared<UnboundedWorkQueue>(Env::Default(), "test"); in Init() 174 gpu_ring_order_.get(), work_queue_); in Init() 434 std::shared_ptr<UnboundedWorkQueue> work_queue_; member in tensorflow::__anon153c086f0111::PermuterTest
|
D | ring_gatherer_test.cc | 179 work_queue_ = std::make_shared<UnboundedWorkQueue>(Env::Default(), "test"); in Init() 184 gpu_ring_order_.get(), work_queue_); in Init() 528 std::shared_ptr<UnboundedWorkQueue> work_queue_; member in tensorflow::RingGathererTest
|
D | ring_reducer_test.cc | 202 work_queue_ = std::make_shared<UnboundedWorkQueue>(Env::Default(), "test"); in Init() 207 gpu_ring_order_.get(), work_queue_); in Init() 560 std::shared_ptr<UnboundedWorkQueue> work_queue_; member in tensorflow::RingReducerTest
|
D | hierarchical_tree_broadcaster_test.cc | 262 work_queue_ = std::make_shared<UnboundedWorkQueue>(Env::Default(), "test"); in Init() 267 gpu_ring_order_.get(), work_queue_); in Init() 717 std::shared_ptr<UnboundedWorkQueue> work_queue_; member in tensorflow::__anonb5878a120111::HierarchicalTreeBroadcasterTest
|
/external/tensorflow/tensorflow/core/distributed_runtime/ |
D | collective_rma_distributed.h | 35 work_queue_(std::move(work_queue)), in CollectiveRemoteAccessDistributed() 58 std::shared_ptr<UnboundedWorkQueue> work_queue_; variable
|
D | rpc_collective_executor_mgr.cc | 53 work_queue_, worker_cache_, step_id, in Create() 56 &gpu_ring_order_, work_queue_); in Create()
|
D | collective_rma_distributed_test.cc | 185 : work_queue_( in CollRMADistTest() 214 device_mgrs_[0], dev_resolvers_[dev0_worker_name], work_queue_, &wc_, in SetUp() 288 std::shared_ptr<UnboundedWorkQueue> work_queue_; member in tensorflow::__anond64f02c00111::CollRMADistTest
|
D | collective_rma_distributed.cc | 151 work_queue_->Schedule([s, done] { done(s); }); in RecvFromPeer()
|
/external/tensorflow/tensorflow/core/nccl/ |
D | nccl_manager_test.cc | 76 work_queue_ = new UnboundedWorkQueue(Env::Default(), "nccl_manager_test"); in SetUpTestSuite() 81 ASSERT_NE(work_queue_, nullptr); in SetUp() 101 delete work_queue_; in TearDownTestSuite() 354 this->work_queue_->Schedule(node_fn); in RunMultiNodeAllReduceTest() 413 this->work_queue_->Schedule(std::move(rank_fn)); in RunMultiNodeBroadcastTest() 432 static UnboundedWorkQueue* work_queue_; member in tensorflow::NcclManagerTest 460 UnboundedWorkQueue* NcclManagerTest<Scalar>::work_queue_ = nullptr; member in tensorflow::NcclManagerTest<Scalar> 559 this->work_queue_->Schedule(fn); in TYPED_TEST() 905 this->work_queue_->Schedule( in TYPED_TEST() 945 this->work_queue_->Schedule( in TYPED_TEST()
|
/external/tensorflow/tensorflow/core/framework/ |
D | dataset.cc | 721 work_queue_.push_back(std::move(work_item)); in Schedule() 732 while (!cancelled_ && work_queue_.empty()) { in WorkerLoop() 738 DCHECK(!work_queue_.empty()); in WorkerLoop() 739 work_item = std::move(work_queue_.front()); in WorkerLoop() 740 work_queue_.pop_front(); in WorkerLoop()
|
D | dataset.h | 1231 std::deque<std::function<void()>> work_queue_ TF_GUARDED_BY(mu_);
|
/external/tensorflow/tensorflow/core/kernels/ |
D | collective_nccl_test.cc | 89 work_queue_(std::make_shared<UnboundedWorkQueue>( in NcclTestBase() 128 /*gpu_ring_order=*/nullptr, work_queue_); in Init() 437 std::shared_ptr<UnboundedWorkQueue> work_queue_; member in tensorflow::NcclTestBase
|