Home
last modified time | relevance | path

Searched refs:device_ordinal (Results 1 – 25 of 118) sorted by relevance

12345

/external/tensorflow/tensorflow/compiler/xla/tests/
Dmultiple_devices_on_host_test.cc37 LocalExecutable* executable, int device_ordinal, LocalClient* client, in CompileAndExecute() argument
43 execute_options.set_device_ordinal(device_ordinal); in CompileAndExecute()
51 results->emplace_back(device_ordinal, std::move(result)); in CompileAndExecute()
74 for (int device_ordinal = 0; device_ordinal < device_count; in TestWithDeviceCount() local
75 device_ordinal++) { in TestWithDeviceCount()
77 tensorflow::ThreadOptions{}, absl::StrCat("thread-", device_ordinal), in TestWithDeviceCount()
78 [&executable, device_ordinal, client, &results_mutex, &results] { in TestWithDeviceCount()
79 CompileAndExecute(executable.get(), device_ordinal, client, in TestWithDeviceCount()
85 for (int device_ordinal = 0; device_ordinal < device_count; in TestWithDeviceCount() local
86 device_ordinal++) { in TestWithDeviceCount()
[all …]
Dlocal_client_test_base.cc39 StatusOr<OwningDeviceMemory> TestAllocator::Allocate(int device_ordinal, in Allocate() argument
42 VLOG(2) << "Allocate(" << device_ordinal << ", " << size << ")"; in Allocate()
46 device_allocation_count_[device_ordinal]++; in Allocate()
48 return StreamExecutorMemoryAllocator::Allocate(device_ordinal, size, in Allocate()
52 Status TestAllocator::Deallocate(int device_ordinal, se::DeviceMemoryBase mem) { in Deallocate() argument
53 VLOG(2) << "Deallocate(" << device_ordinal << ")"; in Deallocate()
57 device_deallocation_count_[device_ordinal]++; in Deallocate()
59 return StreamExecutorMemoryAllocator::Deallocate(device_ordinal, mem); in Deallocate()
67 int64 TestAllocator::allocation_count(int device_ordinal) const { in allocation_count()
69 auto it = device_allocation_count_.find(device_ordinal); in allocation_count()
[all …]
Dlocal_client_test_base.h49 StatusOr<OwningDeviceMemory> Allocate(int device_ordinal, uint64 size,
51 Status Deallocate(int device_ordinal, se::DeviceMemoryBase mem) override;
55 int64 allocation_count(int device_ordinal) const;
59 int64 deallocation_count(int device_ordinal) const;
/external/tensorflow/tensorflow/compiler/xla/service/
Ddevice_memory_allocator.cc34 int device_ordinal, uint64 size, bool retry_on_failure) { in Allocate() argument
36 GetStreamExecutor(device_ordinal)); in Allocate()
41 tensorflow::strings::HumanReadableNumBytes(size), size, device_ordinal); in Allocate()
45 tensorflow::strings::HumanReadableNumBytes(size), size, device_ordinal, in Allocate()
47 return OwningDeviceMemory(result, device_ordinal, this); in Allocate()
50 Status StreamExecutorMemoryAllocator::Deallocate(int device_ordinal, in Deallocate() argument
54 GetStreamExecutor(device_ordinal)); in Deallocate()
56 mem.opaque(), device_ordinal); in Deallocate()
63 int device_ordinal) { in GetStreamExecutor() argument
64 if (device_ordinal < 0) { in GetStreamExecutor()
[all …]
Dbackend.h111 StatusOr<se::StreamExecutor*> stream_executor(int device_ordinal) const;
124 StatusOr<StreamPool::Ptr> BorrowStream(int device_ordinal);
131 return [this](int device_ordinal) { return BorrowStream(device_ordinal); }; in StreamBorrower()
135 bool device_ordinal_supported(int device_ordinal) const { in device_ordinal_supported() argument
136 return (device_ordinal >= 0 && device_ordinal < device_count() && in device_ordinal_supported()
137 stream_executors_[device_ordinal] != nullptr); in device_ordinal_supported()
141 string device_name(int device_ordinal) const { in device_name() argument
142 return absl::StrCat(platform_->Name(), ":", device_ordinal); in device_name()
Dbackend.cc130 StatusOr<StreamPool::Ptr> Backend::BorrowStream(int device_ordinal) { in BorrowStream() argument
131 TF_ASSIGN_OR_RETURN(auto executor, stream_executor(device_ordinal)); in BorrowStream()
175 return default_stream_executor()->device_ordinal(); in default_device_ordinal()
194 int device_ordinal) const { in stream_executor()
195 if (device_ordinal < 0 || in stream_executor()
196 device_ordinal > stream_executors_.back()->device_ordinal()) { in stream_executor()
199 device_ordinal, stream_executors_.back()->device_ordinal()); in stream_executor()
202 if (executor->device_ordinal() == device_ordinal) { in stream_executor()
207 device_name(device_ordinal)); in stream_executor()
Ddevice_memory_allocator.h50 virtual StatusOr<OwningDeviceMemory> Allocate(int device_ordinal, uint64 size,
58 StatusOr<OwningDeviceMemory> Allocate(int device_ordinal, uint64 size) { in Allocate() argument
59 return Allocate(device_ordinal, size, /*retry_on_failure=*/true); in Allocate()
63 virtual Status Deallocate(int device_ordinal, se::DeviceMemoryBase mem) = 0;
85 StatusOr<OwningDeviceMemory> Allocate(int device_ordinal, uint64 size,
91 Status Deallocate(int device_ordinal, se::DeviceMemoryBase mem) override;
96 StatusOr<se::StreamExecutor*> GetStreamExecutor(int device_ordinal);
Dshaped_buffer.cc36 const se::Platform* platform, int device_ordinal) in ShapedBuffer() argument
40 device_ordinal_(device_ordinal), in ShapedBuffer()
79 "ShapedBuffer(", platform_->Name(), ":", device_ordinal(), in ToString()
109 int device_ordinal) in ScopedShapedBuffer() argument
111 device_ordinal), in ScopedShapedBuffer()
155 TF_CHECK_OK(allocator_->Deallocate(device_ordinal(), memory_base)); in Deallocate()
167 memory_allocator(), device_ordinal()); in TakeSubTree()
Dservice_executable_run_options.h47 int device_ordinal() const { return run_options_.device_ordinal(); } in device_ordinal() function
51 StatusOr<StreamPool::Ptr> BorrowStream(int device_ordinal) const { in BorrowStream() argument
53 ? borrow_stream_(device_ordinal) in BorrowStream()
Dallocation_tracker.cc88 shaped_buffer.device_ordinal()); in RegisterInternal()
119 shaped_buffer->device_ordinal())); in Unregister()
164 shaped_buffer->platform(), shaped_buffer->device_ordinal()); in DeconstructTuple()
219 se::DeviceMemoryBase device_memory, int device_ordinal) { in AddAllocationOrIncrementRefCount() argument
220 AllocationMap& allocation_map = opaque_to_allocation_map_[device_ordinal]; in AddAllocationOrIncrementRefCount()
224 OwningDeviceMemory(device_memory, device_ordinal, in AddAllocationOrIncrementRefCount()
233 int device_ordinal) { in DecrementRefCount() argument
234 AllocationMap& allocation_map = opaque_to_allocation_map_[device_ordinal]; in DecrementRefCount()
Dshaped_buffer.h46 const se::Platform* platform, int device_ordinal);
69 int device_ordinal() const { return device_ordinal_; } in device_ordinal() function
139 int device_ordinal);
165 CHECK_EQ(buffer.device_ordinal(), device_ordinal()); in set_buffer()
/external/tensorflow/tensorflow/compiler/xla/service/cpu/
Dcpu_runtime.cc33 XfeedManager* GetXfeedManager(int device_ordinal) { in GetXfeedManager() argument
38 auto it = managers->find(device_ordinal); in GetXfeedManager()
40 it = managers->emplace(device_ordinal, new XfeedManager()).first; in GetXfeedManager()
111 int device_ordinal = in __xla_cpu_runtime_AcquireInfeedBufferForDequeue() local
112 run_options ? run_options->stream()->parent()->device_ordinal() : 0; in __xla_cpu_runtime_AcquireInfeedBufferForDequeue()
116 << device_ordinal; in __xla_cpu_runtime_AcquireInfeedBufferForDequeue()
119 xla::cpu::runtime::GetXfeedManager(device_ordinal); in __xla_cpu_runtime_AcquireInfeedBufferForDequeue()
135 int device_ordinal = in __xla_cpu_runtime_ReleaseInfeedBufferAfterDequeue() local
136 run_options ? run_options->stream()->parent()->device_ordinal() : 0; in __xla_cpu_runtime_ReleaseInfeedBufferAfterDequeue()
140 << device_ordinal; in __xla_cpu_runtime_ReleaseInfeedBufferAfterDequeue()
[all …]
/external/tensorflow/tensorflow/compiler/xrt/
Dxrt_state.cc86 xla::Backend* backend, int device_ordinal, const xla::Shape& shape, in AllocateScopedShapedBuffer() argument
90 TF_ASSIGN_OR_RETURN(auto stream, backend->BorrowStream(device_ordinal)); in AllocateScopedShapedBuffer()
110 shape, on_device_shape, allocator, device_ordinal); in AllocateScopedShapedBuffer()
117 allocator->Allocate(device_ordinal, size, /*retry_on_failure=*/false)); in AllocateScopedShapedBuffer()
133 int device_ordinal, in XRTBufferAllocation() argument
137 device_ordinal_(device_ordinal), in XRTBufferAllocation()
167 XRTTupleAllocation::XRTTupleAllocation(int device_ordinal, in XRTTupleAllocation() argument
171 : device_ordinal_(device_ordinal), in XRTTupleAllocation()
184 const xla::LiteralBase& literal, xla::Backend* backend, int device_ordinal, in CreateAndTransfer() argument
191 backend, device_ordinal, literal.shape(), &scoped_buffer)); in CreateAndTransfer()
[all …]
Dxrt_state.h48 int device_ordinal,
92 xla::Backend* backend, int device_ordinal,
97 xla::Backend* backend, int device_ordinal,
128 static Status MakeTuple(xla::Backend* backend, int device_ordinal,
149 Status ToLiteral(xla::Backend* backend, int device_ordinal,
160 int device_ordinal();
196 XRTTupleAllocation(int device_ordinal, xla::DeviceMemoryAllocator* allocator,
204 int device_ordinal);
212 const xla::ShapeTree<ExpandedTupleInput>& elements, int device_ordinal,
Dxrt_device.cc34 OpKernelContext* ctx, int device_ordinal, ScopedRef* scoped_ref) { in InitScopedRef() argument
37 if (device_ordinal != metadata->device_ordinal()) { in InitScopedRef()
38 return errors::Internal("XRT device ordinal requested ", device_ordinal, in InitScopedRef()
40 metadata->device_ordinal()); in InitScopedRef()
/external/tensorflow/tensorflow/compiler/xla/client/
Dlocal_client.cc35 StatusOr<StreamPool::Ptr> BorrowStreamForDevice(int device_ordinal, in BorrowStreamForDevice() argument
37 if (device_ordinal < 0) { in BorrowStreamForDevice()
38 device_ordinal = backend->default_device_ordinal(); in BorrowStreamForDevice()
40 return backend->BorrowStream(device_ordinal); in BorrowStreamForDevice()
50 CHECK_GE(build_options_.device_ordinal(), 0) in LocalExecutable()
97 if (run_options.device_ordinal() != -1) { in ValidateExecutionOptions()
106 int run_device_ordinal = run_options.device_ordinal(); in ValidateExecutionOptions()
109 ? run_options.stream()->parent()->device_ordinal() in ValidateExecutionOptions()
114 run_device_ordinal, build_options_.device_ordinal())); in ValidateExecutionOptions()
156 stream, BorrowStreamForDevice(run_options.device_ordinal(), backend_)); in Run()
[all …]
Dexecutable_build_options.cc35 int device_ordinal) { in set_device_ordinal() argument
36 CHECK_GE(device_ordinal, 0); in set_device_ordinal()
37 device_ordinal_ = device_ordinal; in set_device_ordinal()
41 int ExecutableBuildOptions::device_ordinal() const { return device_ordinal_; } in device_ordinal() function in xla::ExecutableBuildOptions
Dlocal_client.h92 int build_device_ordinal() const { return build_options_.device_ordinal(); } in build_device_ordinal()
129 const Literal& literal, int device_ordinal,
149 Status TransferToInfeedLocal(const Literal& literal, int device_ordinal);
157 int device_ordinal);
182 bool device_ordinal_supported(int device_ordinal) const;
/external/tensorflow/tensorflow/python/tpu/ops/
Dtpu_ops.py283 device_ordinal, argument
307 device_ordinal=device_ordinal,
320 device_ordinal, argument
363 device_ordinal=device_ordinal,
378 device_ordinal, argument
429 device_ordinal=device_ordinal,
/external/tensorflow/tensorflow/stream_executor/rocm/
Drocm_driver.cc58 int device_ordinal() const { return device_ordinal_; } in device_ordinal() function in stream_executor::gpu::GpuContext
192 if (context->device_ordinal() == tls->current_device_ordinal) { in ScopedActivateContext()
193 DCHECK_EQ(CurrentDeviceOrDie(), context->device_ordinal()); in ScopedActivateContext()
198 << tls->current_device_ordinal << " to " << context->device_ordinal(); in ScopedActivateContext()
202 tensorflow::wrap::hipSetDevice(context->device_ordinal())); in ScopedActivateContext()
203 tls->current_device_ordinal = context->device_ordinal(); in ScopedActivateContext()
220 if (to_restore_->device_ordinal() == tls->current_device_ordinal) { in ~ScopedActivateContext()
221 DCHECK_EQ(CurrentDeviceOrDie(), to_restore_->device_ordinal()); in ~ScopedActivateContext()
227 << to_restore_->device_ordinal(); in ~ScopedActivateContext()
231 tensorflow::wrap::hipSetDevice(to_restore_->device_ordinal())); in ~ScopedActivateContext()
[all …]
/external/tensorflow/tensorflow/compiler/xla/service/gpu/
Dbuffer_comparator.cc107 auto device_ordinal = stream_exec->device_ordinal(); in Create() local
110 allocator->Allocate(device_ordinal, ref_buffer.size())); in Create()
115 ScopedShapedBuffer ret(shape, shape, allocator, device_ordinal); in Create()
137 auto device_ordinal = stream_exec->device_ordinal(); in CompareEqualImpl() local
139 device_ordinal); in CompareEqualImpl()
142 run_options.set_device_ordinal(stream_exec->device_ordinal()); in CompareEqualImpl()
Dnccl_all_reduce_thunk.cc51 int64 device_ordinal; member
68 replica_count, element_count, device_ordinal, generation_counter, in ToString()
227 << participant.device_ordinal; in SubmitParticipant()
265 ordinals.push_back(data.device_ordinal); in InitializeCommunicationChannels()
289 << " on device: " << participant.device_ordinal; in DoAllReduce()
302 << participant.device_ordinal; in DoAllReduce()
322 participant.device_ordinal = stream->parent()->device_ordinal(); in ExecuteOnStream()
Dbuffer_allocations.h52 const BufferAssignment* buffer_assignment, int device_ordinal,
66 int device_ordinal() const { return device_ordinal_; } in device_ordinal() function
86 BufferAllocations(BufferAllocation::Index buffer_count, int device_ordinal, in BufferAllocations() argument
90 device_ordinal_(device_ordinal), in BufferAllocations()
/external/tensorflow/tensorflow/compiler/jit/
Dxla_device.cc69 const xla::Backend* backend, int device_ordinal);
95 const xla::Backend* backend, int device_ordinal) { in GetOrCreateXlaDeviceAllocator() argument
99 auto it = state.allocators_.find({backend, device_ordinal}); in GetOrCreateXlaDeviceAllocator()
106 backend->stream_executors()[device_ordinal]); in GetOrCreateXlaDeviceAllocator()
108 state.allocators_[{backend, device_ordinal}] = std::move(alloc); in GetOrCreateXlaDeviceAllocator()
131 int device_ordinal) { in BuildXlaDeviceAttributes() argument
133 absl::StrCat(name_prefix, "/device:", device_name, ":", device_ordinal), in BuildXlaDeviceAttributes()
141 int device_ordinal, se::Platform* platform, const DeviceType& device_type, in Metadata() argument
144 : device_ordinal_(device_ordinal), in Metadata()
151 int XlaDevice::Metadata::device_ordinal() const { return device_ordinal_; } in device_ordinal() function in tensorflow::XlaDevice::Metadata
[all …]
/external/tensorflow/tensorflow/compiler/tf2xla/
Dside_effect_util.cc29 Status SetDeviceOrdinalAttributeForNode(Node* node, int device_ordinal) { in SetDeviceOrdinalAttributeForNode() argument
39 node->AddAttr("device_ordinal", device_ordinal); in SetDeviceOrdinalAttributeForNode()
42 device_ordinal_value.set_i(device_ordinal); in SetDeviceOrdinalAttributeForNode()
53 device_ordinal_value.set_i(device_ordinal); in SetDeviceOrdinalAttributeForNode()
64 node->AddAttr("device_ordinal", device_ordinal); in SetDeviceOrdinalAttributeForNode()

12345