1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_SCOPED_ALLOCATOR_H_
16 #define TENSORFLOW_CORE_COMMON_RUNTIME_SCOPED_ALLOCATOR_H_
17 
18 #include "tensorflow/core/framework/allocator.h"
19 #include "tensorflow/core/framework/tensor.h"
20 #include "tensorflow/core/lib/core/refcount.h"
21 #include "tensorflow/core/platform/mutex.h"
22 
23 namespace tensorflow {
24 class ScopedAllocatorContainer;
25 class ScopedAllocatorInstance;
26 
27 // Manages a single backing tensor and a collection of aliases.
28 class ScopedAllocator {
29  public:
30   static constexpr int32 kInvalidId = 0;
31   static constexpr size_t kMaxAlignment = 64;
32 
33   // A subrange of the TensorBuffer associated with this object that
34   // will be the backing memory for one aliased tensor.
35   struct Field {
36     int32 scope_id;
37     size_t offset;
38     size_t bytes_requested;
39     size_t bytes_allocated;
40   };
41   // Field index that refers to backing tensor, not any aliased field.
42   static constexpr int32 kBackingIndex = -1;
43 
44   // backing_tensor is expected to be newly allocated by a ScopedAllocatorOp
45   // instance.  It must be large enough to back all of the specified
46   // (offset, byte) ranges of the fields.
47   ScopedAllocator(const Tensor& backing_tensor, int32 scope_id,
48                   const std::string& name, const gtl::ArraySlice<Field> fields,
49                   int32 expected_call_count,
50                   ScopedAllocatorContainer* container);
51 
52   // Automatically deletes when last use expires, or when
53   // ScopedAllocatorContainer decides to delete.
54   ~ScopedAllocator() TF_LOCKS_EXCLUDED(mu_);
55 
56   // For debugging: returns true iff p is a pointer that could have
57   // been returned by AllocateRaw.
58   bool VerifyPointer(const void* p);
59   bool VerifyTensor(const Tensor* t);
60 
tensor()61   const Tensor& tensor() const { return backing_tensor_; }
62 
name()63   const std::string& name() const { return name_; }
64 
65  private:
66   friend class ScopedAllocatorInstance;
67   // Only ScopedAllocatorInstances can call AllocateRaw and DeallocateRaw on a
68   // ScopedAllocator
69   void* AllocateRaw(int32 field_index, size_t num_bytes) TF_LOCKS_EXCLUDED(mu_);
70   void DeallocateRaw(void* p) TF_LOCKS_EXCLUDED(mu_);
71   Tensor backing_tensor_;
72   TensorBuffer* tbuf_;
73   int32 id_;
74   std::string name_;
75   ScopedAllocatorContainer* container_;
76   std::vector<Field> fields_;
77   mutex mu_;
78   int32 expected_call_count_ TF_GUARDED_BY(mu_);
79   int32 live_alloc_count_ TF_GUARDED_BY(mu_);
80 };
81 
82 // An Allocator that will return a pointer into the backing buffer of
83 // a previously allocated tensor, allowing creation of an alias
84 // tensor.  There is a one-to-one mapping between the fields of a
85 // ScopedAllocator and ScopedAllocatorInstances.  There is also a one-to-one
86 // mapping between scope_ids and ScopedAllocatorInstances.  It should be
87 // discarded immediately after a single use.
88 class ScopedAllocatorInstance : public Allocator {
89  public:
90   explicit ScopedAllocatorInstance(ScopedAllocator* sa, int32 field_index);
91 
92  private:
~ScopedAllocatorInstance()93   ~ScopedAllocatorInstance() override {
94     VLOG(1) << "~ScopedAllocatorInstance " << this;
95   }
96 
97  public:
98   // When a ScopedAllocatorContainer "Drops" a scope_id, it calls DropFromTable
99   // on the underlying ScopedAllocatorInstance.  If this instance has already
100   // deallocated the tensor slice, we can safely delete this.
101   void DropFromTable() TF_LOCKS_EXCLUDED(mu_);
102   void* AllocateRaw(size_t alignment, size_t num_bytes)
103       TF_LOCKS_EXCLUDED(mu_) override;
AllocateRaw(size_t alignment,size_t num_bytes,const AllocationAttributes & allocator_attr)104   void* AllocateRaw(size_t alignment, size_t num_bytes,
105                     const AllocationAttributes& allocator_attr) override {
106     return AllocateRaw(alignment, num_bytes);
107   }
108   void DeallocateRaw(void* p) TF_LOCKS_EXCLUDED(mu_) override;
TracksAllocationSizes()109   bool TracksAllocationSizes() const override { return false; }
RequestedSize(const void * ptr)110   size_t RequestedSize(const void* ptr) const override { return 0; }
AllocatedSize(const void * ptr)111   size_t AllocatedSize(const void* ptr) const override { return 0; }
AllocationId(const void * ptr)112   int64 AllocationId(const void* ptr) const override { return 0; }
AllocatedSizeSlow(const void * ptr)113   size_t AllocatedSizeSlow(const void* ptr) const override { return 0; }
114   std::string Name() override;
115 
116  private:
117   mutex mu_;
118   ScopedAllocator* scoped_allocator_;
119   int32 field_index_;
120   bool allocated_ TF_GUARDED_BY(mu_);
121   bool deallocated_ TF_GUARDED_BY(mu_);
122   bool in_table_ TF_GUARDED_BY(mu_);
123 };
124 
125 }  // namespace tensorflow
126 #endif  // TENSORFLOW_CORE_COMMON_RUNTIME_SCOPED_ALLOCATOR_H_
127