1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Memory quarantine for AddressSanitizer and potentially other tools.
11 // Quarantine caches some specified amount of memory in per-thread caches,
12 // then evicts to global FIFO queue. When the queue reaches specified threshold,
13 // oldest memory is recycled.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #ifndef SANITIZER_QUARANTINE_H
18 #define SANITIZER_QUARANTINE_H
19 
20 #include "sanitizer_internal_defs.h"
21 #include "sanitizer_mutex.h"
22 #include "sanitizer_list.h"
23 
24 namespace __sanitizer {
25 
26 template<typename Node> class QuarantineCache;
27 
28 struct QuarantineBatch {
29   static const uptr kSize = 1021;
30   QuarantineBatch *next;
31   uptr size;
32   uptr count;
33   void *batch[kSize];
34 };
35 
36 COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13));  // 8Kb.
37 
38 // The callback interface is:
39 // void Callback::Recycle(Node *ptr);
40 // void *cb.Allocate(uptr size);
41 // void cb.Deallocate(void *ptr);
42 template<typename Callback, typename Node>
43 class Quarantine {
44  public:
45   typedef QuarantineCache<Callback> Cache;
46 
Quarantine(LinkerInitialized)47   explicit Quarantine(LinkerInitialized)
48       : cache_(LINKER_INITIALIZED) {
49   }
50 
Init(uptr size,uptr cache_size)51   void Init(uptr size, uptr cache_size) {
52     atomic_store(&max_size_, size, memory_order_release);
53     atomic_store(&min_size_, size / 10 * 9,
54                  memory_order_release); // 90% of max size.
55     max_cache_size_ = cache_size;
56   }
57 
GetSize()58   uptr GetSize() const { return atomic_load(&max_size_, memory_order_acquire); }
59 
Put(Cache * c,Callback cb,Node * ptr,uptr size)60   void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
61     c->Enqueue(cb, ptr, size);
62     if (c->Size() > max_cache_size_)
63       Drain(c, cb);
64   }
65 
Drain(Cache * c,Callback cb)66   void NOINLINE Drain(Cache *c, Callback cb) {
67     {
68       SpinMutexLock l(&cache_mutex_);
69       cache_.Transfer(c);
70     }
71     if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
72       Recycle(cb);
73   }
74 
75  private:
76   // Read-only data.
77   char pad0_[kCacheLineSize];
78   atomic_uintptr_t max_size_;
79   atomic_uintptr_t min_size_;
80   uptr max_cache_size_;
81   char pad1_[kCacheLineSize];
82   SpinMutex cache_mutex_;
83   SpinMutex recycle_mutex_;
84   Cache cache_;
85   char pad2_[kCacheLineSize];
86 
Recycle(Callback cb)87   void NOINLINE Recycle(Callback cb) {
88     Cache tmp;
89     uptr min_size = atomic_load(&min_size_, memory_order_acquire);
90     {
91       SpinMutexLock l(&cache_mutex_);
92       while (cache_.Size() > min_size) {
93         QuarantineBatch *b = cache_.DequeueBatch();
94         tmp.EnqueueBatch(b);
95       }
96     }
97     recycle_mutex_.Unlock();
98     DoRecycle(&tmp, cb);
99   }
100 
DoRecycle(Cache * c,Callback cb)101   void NOINLINE DoRecycle(Cache *c, Callback cb) {
102     while (QuarantineBatch *b = c->DequeueBatch()) {
103       const uptr kPrefetch = 16;
104       for (uptr i = 0; i < kPrefetch; i++)
105         PREFETCH(b->batch[i]);
106       for (uptr i = 0; i < b->count; i++) {
107         PREFETCH(b->batch[i + kPrefetch]);
108         cb.Recycle((Node*)b->batch[i]);
109       }
110       cb.Deallocate(b);
111     }
112   }
113 };
114 
115 // Per-thread cache of memory blocks.
116 template<typename Callback>
117 class QuarantineCache {
118  public:
QuarantineCache(LinkerInitialized)119   explicit QuarantineCache(LinkerInitialized) {
120   }
121 
QuarantineCache()122   QuarantineCache()
123       : size_() {
124     list_.clear();
125   }
126 
Size()127   uptr Size() const {
128     return atomic_load(&size_, memory_order_relaxed);
129   }
130 
Enqueue(Callback cb,void * ptr,uptr size)131   void Enqueue(Callback cb, void *ptr, uptr size) {
132     if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
133       AllocBatch(cb);
134       size += sizeof(QuarantineBatch);  // Count the batch in Quarantine size.
135     }
136     QuarantineBatch *b = list_.back();
137     CHECK(b);
138     b->batch[b->count++] = ptr;
139     b->size += size;
140     SizeAdd(size);
141   }
142 
Transfer(QuarantineCache * c)143   void Transfer(QuarantineCache *c) {
144     list_.append_back(&c->list_);
145     SizeAdd(c->Size());
146     atomic_store(&c->size_, 0, memory_order_relaxed);
147   }
148 
EnqueueBatch(QuarantineBatch * b)149   void EnqueueBatch(QuarantineBatch *b) {
150     list_.push_back(b);
151     SizeAdd(b->size);
152   }
153 
DequeueBatch()154   QuarantineBatch *DequeueBatch() {
155     if (list_.empty())
156       return nullptr;
157     QuarantineBatch *b = list_.front();
158     list_.pop_front();
159     SizeSub(b->size);
160     return b;
161   }
162 
163  private:
164   IntrusiveList<QuarantineBatch> list_;
165   atomic_uintptr_t size_;
166 
SizeAdd(uptr add)167   void SizeAdd(uptr add) {
168     atomic_store(&size_, Size() + add, memory_order_relaxed);
169   }
SizeSub(uptr sub)170   void SizeSub(uptr sub) {
171     atomic_store(&size_, Size() - sub, memory_order_relaxed);
172   }
173 
AllocBatch(Callback cb)174   NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
175     QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
176     CHECK(b);
177     b->count = 0;
178     b->size = 0;
179     list_.push_back(b);
180     return b;
181   }
182 };
183 } // namespace __sanitizer
184 
185 #endif // SANITIZER_QUARANTINE_H
186