/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_COMPILER_XLA_PYTHON_PY_CLIENT_H_ #define TENSORFLOW_COMPILER_XLA_PYTHON_PY_CLIENT_H_ #include #include #include #include "absl/types/optional.h" #include "pybind11/pybind11.h" #include "tensorflow/compiler/xla/pjrt/pjrt_client.h" #include "tensorflow/compiler/xla/statusor.h" #include "tensorflow/compiler/xla/types.h" namespace xla { class PyBuffer; class PyClient; class PyExecutable; // Custom holder types. // // We must keep the PyClient object alive as long as any of the runtime // objects are alive. Since we don't have a lot of control over Python // destructor ordering, we keep the PyClient object as a std::shared_ptr<>, // and ensure that each Python runtime object holds a reference to the // PyClient. An alternative design would be to keep a single global // singleton PyClient, although this seems less flexible, especially for // writing tests. // // To maintain PyClient references, we define pybind11 holder classes that // are custom smart pointers that also keep a reference to a PyClient. // pybind11 has a `keep_alive` feature that has a similar goal, but it doesn't // seem sufficiently flexible to describe ownership relationships in cases where // the ownership doesn't pertain to a direct argument or return value of a // function. Another alternative to the holder classes would be to create proxy // objects that contain both a reference and a runtime class; holder classes // seem less tedious to define. // A pair of a PyClient reference and an unowned pointer to T. template struct ClientAndPtr { ClientAndPtr() = default; // pybind11 requires that we define a constructor that takes a raw pointer, // but it should be unreachable. explicit ClientAndPtr(T*) { LOG(FATAL) << "ClientAndPtr should constructed via WrapWithClient."; } ClientAndPtr(const ClientAndPtr&) = default; ClientAndPtr(ClientAndPtr&&) = default; ClientAndPtr& operator=(const ClientAndPtr&) = default; ClientAndPtr& operator=(ClientAndPtr&&) = default; std::shared_ptr client; T* contents; T* get() const { return contents; } T* operator->() const { return contents; } T& operator*() const { return *contents; } }; // By defining a templated helper function, we can use return type deduction // and avoid specifying types at the caller. template ClientAndPtr WrapWithClient(std::shared_ptr client, T* contents) { ClientAndPtr result; result.client = std::move(client); result.contents = contents; return result; } // Python wrapper around PjRtClient. // We use a wrapper class to add Python-specific functionality. class PyClient : public std::enable_shared_from_this { public: explicit PyClient(std::unique_ptr pjrt_client); explicit PyClient(std::shared_ptr pjrt_client); PjRtClient* pjrt_client() const { return pjrt_client_.get(); } std::shared_ptr shared_pjrt_client() { return pjrt_client_; } absl::string_view platform_name() const { return pjrt_client_->platform_name(); } int addressable_device_count() const { return pjrt_client_->addressable_device_count(); } int device_count() const { return pjrt_client_->device_count(); } int task_id() const { return pjrt_client_->task_id(); } std::vector> Devices(); std::vector> LocalDevices(); std::vector> LiveBuffers(); StatusOr>>> GetDefaultDeviceAssignment(int num_replicas, int num_partitions); // TODO(skye): delete after all callers can handle 2D output StatusOr>> GetDefaultDeviceAssignment1D( int num_replicas); StatusOr CreateChannelHandle() { return pjrt_client_->CreateChannelHandle(); } StatusOr CreateDeviceToHostChannelHandle() { return pjrt_client_->CreateDeviceToHostChannelHandle(); } StatusOr CreateHostToDeviceChannelHandle() { return pjrt_client_->CreateHostToDeviceChannelHandle(); } StatusOr> PjRtBufferFromPyval( pybind11::handle argument, PjRtDevice* device, bool force_copy, PjRtClient::HostBufferSemantics host_buffer_semantics); StatusOr> BufferFromPyval( pybind11::handle argument, PjRtDevice* device, bool force_copy, PjRtClient::HostBufferSemantics host_buffer_semantics); StatusOr> Compile( const XlaComputation& computation, CompileOptions options); pybind11::bytes HeapProfile(); private: friend class PyBuffer; friend class PyExecutable; std::shared_ptr pjrt_client_; // Pointers to intrusive doubly-linked lists of buffers and executables, used // to iterate over all known objects when heap profiling. The list structure // is protected by the GIL. PyBuffer* buffers_ = nullptr; PyExecutable* executables_ = nullptr; }; } // namespace xla PYBIND11_DECLARE_HOLDER_TYPE(T, xla::ClientAndPtr); #endif // TENSORFLOW_COMPILER_XLA_PYTHON_PY_CLIENT_H_