1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is shared between run-time libraries of sanitizers.
11 //
12 // It declares common functions and classes that are used in both runtimes.
13 // Implementation of some functions are provided in sanitizer_common, while
14 // others must be defined by run-time library itself.
15 //===----------------------------------------------------------------------===//
16 #ifndef SANITIZER_COMMON_H
17 #define SANITIZER_COMMON_H
18 
19 #include "sanitizer_flags.h"
20 #include "sanitizer_interface_internal.h"
21 #include "sanitizer_internal_defs.h"
22 #include "sanitizer_libc.h"
23 #include "sanitizer_list.h"
24 #include "sanitizer_mutex.h"
25 
26 #ifdef _MSC_VER
27 extern "C" void _ReadWriteBarrier();
28 #pragma intrinsic(_ReadWriteBarrier)
29 #endif
30 
31 namespace __sanitizer {
32 struct StackTrace;
33 struct AddressInfo;
34 
35 // Constants.
36 const uptr kWordSize = SANITIZER_WORDSIZE / 8;
37 const uptr kWordSizeInBits = 8 * kWordSize;
38 
39 #if defined(__powerpc__) || defined(__powerpc64__)
40   const uptr kCacheLineSize = 128;
41 #else
42   const uptr kCacheLineSize = 64;
43 #endif
44 
45 const uptr kMaxPathLength = 4096;
46 
47 // 16K loaded modules should be enough for everyone.
48 static const uptr kMaxNumberOfModules = 1 << 14;
49 
50 const uptr kMaxThreadStackSize = 1 << 30;  // 1Gb
51 
52 static const uptr kErrorMessageBufferSize = 1 << 16;
53 
54 // Denotes fake PC values that come from JIT/JAVA/etc.
55 // For such PC values __tsan_symbolize_external() will be called.
56 const u64 kExternalPCBit = 1ULL << 60;
57 
58 extern const char *SanitizerToolName;  // Can be changed by the tool.
59 
60 extern atomic_uint32_t current_verbosity;
SetVerbosity(int verbosity)61 INLINE void SetVerbosity(int verbosity) {
62   atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
63 }
Verbosity()64 INLINE int Verbosity() {
65   return atomic_load(&current_verbosity, memory_order_relaxed);
66 }
67 
68 uptr GetPageSize();
69 uptr GetPageSizeCached();
70 uptr GetMmapGranularity();
71 uptr GetMaxVirtualAddress();
72 // Threads
73 uptr GetTid();
74 uptr GetThreadSelf();
75 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
76                                 uptr *stack_bottom);
77 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
78                           uptr *tls_addr, uptr *tls_size);
79 
80 // Memory management
81 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
MmapOrDieQuietly(uptr size,const char * mem_type)82 INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) {
83   return MmapOrDie(size, mem_type, /*raw_report*/ true);
84 }
85 void UnmapOrDie(void *addr, uptr size);
86 void *MmapFixedNoReserve(uptr fixed_addr, uptr size,
87                          const char *name = nullptr);
88 void *MmapNoReserveOrDie(uptr size, const char *mem_type);
89 void *MmapFixedOrDie(uptr fixed_addr, uptr size);
90 void *MmapNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
91 // Map aligned chunk of address space; size and alignment are powers of two.
92 void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
93 // Disallow access to a memory range.  Use MmapNoAccess to allocate an
94 // unaccessible memory.
95 bool MprotectNoAccess(uptr addr, uptr size);
96 
97 // Used to check if we can map shadow memory to a fixed location.
98 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
99 void FlushUnneededShadowMemory(uptr addr, uptr size);
100 void IncreaseTotalMmap(uptr size);
101 void DecreaseTotalMmap(uptr size);
102 uptr GetRSS();
103 void NoHugePagesInRegion(uptr addr, uptr length);
104 void DontDumpShadowMemory(uptr addr, uptr length);
105 // Check if the built VMA size matches the runtime one.
106 void CheckVMASize();
107 
108 // InternalScopedBuffer can be used instead of large stack arrays to
109 // keep frame size low.
110 // FIXME: use InternalAlloc instead of MmapOrDie once
111 // InternalAlloc is made libc-free.
112 template<typename T>
113 class InternalScopedBuffer {
114  public:
InternalScopedBuffer(uptr cnt)115   explicit InternalScopedBuffer(uptr cnt) {
116     cnt_ = cnt;
117     ptr_ = (T*)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
118   }
~InternalScopedBuffer()119   ~InternalScopedBuffer() {
120     UnmapOrDie(ptr_, cnt_ * sizeof(T));
121   }
122   T &operator[](uptr i) { return ptr_[i]; }
data()123   T *data() { return ptr_; }
size()124   uptr size() { return cnt_ * sizeof(T); }
125 
126  private:
127   T *ptr_;
128   uptr cnt_;
129   // Disallow evil constructors.
130   InternalScopedBuffer(const InternalScopedBuffer&);
131   void operator=(const InternalScopedBuffer&);
132 };
133 
134 class InternalScopedString : public InternalScopedBuffer<char> {
135  public:
InternalScopedString(uptr max_length)136   explicit InternalScopedString(uptr max_length)
137       : InternalScopedBuffer<char>(max_length), length_(0) {
138     (*this)[0] = '\0';
139   }
length()140   uptr length() { return length_; }
clear()141   void clear() {
142     (*this)[0] = '\0';
143     length_ = 0;
144   }
145   void append(const char *format, ...);
146 
147  private:
148   uptr length_;
149 };
150 
151 // Simple low-level (mmap-based) allocator for internal use. Doesn't have
152 // constructor, so all instances of LowLevelAllocator should be
153 // linker initialized.
154 class LowLevelAllocator {
155  public:
156   // Requires an external lock.
157   void *Allocate(uptr size);
158  private:
159   char *allocated_end_;
160   char *allocated_current_;
161 };
162 typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
163 // Allows to register tool-specific callbacks for LowLevelAllocator.
164 // Passing NULL removes the callback.
165 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
166 
167 // IO
168 void RawWrite(const char *buffer);
169 bool ColorizeReports();
170 void RemoveANSIEscapeSequencesFromString(char *buffer);
171 void Printf(const char *format, ...);
172 void Report(const char *format, ...);
173 void SetPrintfAndReportCallback(void (*callback)(const char *));
174 #define VReport(level, ...)                                              \
175   do {                                                                   \
176     if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
177   } while (0)
178 #define VPrintf(level, ...)                                              \
179   do {                                                                   \
180     if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
181   } while (0)
182 
183 // Can be used to prevent mixing error reports from different sanitizers.
184 extern StaticSpinMutex CommonSanitizerReportMutex;
185 
186 struct ReportFile {
187   void Write(const char *buffer, uptr length);
188   bool SupportsColors();
189   void SetReportPath(const char *path);
190 
191   // Don't use fields directly. They are only declared public to allow
192   // aggregate initialization.
193 
194   // Protects fields below.
195   StaticSpinMutex *mu;
196   // Opened file descriptor. Defaults to stderr. It may be equal to
197   // kInvalidFd, in which case new file will be opened when necessary.
198   fd_t fd;
199   // Path prefix of report file, set via __sanitizer_set_report_path.
200   char path_prefix[kMaxPathLength];
201   // Full path to report, obtained as <path_prefix>.PID
202   char full_path[kMaxPathLength];
203   // PID of the process that opened fd. If a fork() occurs,
204   // the PID of child will be different from fd_pid.
205   uptr fd_pid;
206 
207  private:
208   void ReopenIfNecessary();
209 };
210 extern ReportFile report_file;
211 
212 extern uptr stoptheworld_tracer_pid;
213 extern uptr stoptheworld_tracer_ppid;
214 
215 enum FileAccessMode {
216   RdOnly,
217   WrOnly,
218   RdWr
219 };
220 
221 // Returns kInvalidFd on error.
222 fd_t OpenFile(const char *filename, FileAccessMode mode,
223               error_t *errno_p = nullptr);
224 void CloseFile(fd_t);
225 
226 // Return true on success, false on error.
227 bool ReadFromFile(fd_t fd, void *buff, uptr buff_size,
228                   uptr *bytes_read = nullptr, error_t *error_p = nullptr);
229 bool WriteToFile(fd_t fd, const void *buff, uptr buff_size,
230                  uptr *bytes_written = nullptr, error_t *error_p = nullptr);
231 
232 bool RenameFile(const char *oldpath, const char *newpath,
233                 error_t *error_p = nullptr);
234 
235 // Scoped file handle closer.
236 struct FileCloser {
FileCloserFileCloser237   explicit FileCloser(fd_t fd) : fd(fd) {}
~FileCloserFileCloser238   ~FileCloser() { CloseFile(fd); }
239   fd_t fd;
240 };
241 
242 bool SupportsColoredOutput(fd_t fd);
243 
244 // Opens the file 'file_name" and reads up to 'max_len' bytes.
245 // The resulting buffer is mmaped and stored in '*buff'.
246 // The size of the mmaped region is stored in '*buff_size'.
247 // The total number of read bytes is stored in '*read_len'.
248 // Returns true if file was successfully opened and read.
249 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
250                       uptr *read_len, uptr max_len = 1 << 26,
251                       error_t *errno_p = nullptr);
252 // Maps given file to virtual memory, and returns pointer to it
253 // (or NULL if mapping fails). Stores the size of mmaped region
254 // in '*buff_size'.
255 void *MapFileToMemory(const char *file_name, uptr *buff_size);
256 void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset);
257 
258 bool IsAccessibleMemoryRange(uptr beg, uptr size);
259 
260 // Error report formatting.
261 const char *StripPathPrefix(const char *filepath,
262                             const char *strip_file_prefix);
263 // Strip the directories from the module name.
264 const char *StripModuleName(const char *module);
265 
266 // OS
267 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
268 uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
269 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
270 const char *GetProcessName();
271 void UpdateProcessName();
272 void CacheBinaryName();
273 void DisableCoreDumperIfNecessary();
274 void DumpProcessMap();
275 bool FileExists(const char *filename);
276 const char *GetEnv(const char *name);
277 bool SetEnv(const char *name, const char *value);
278 const char *GetPwd();
279 char *FindPathToBinary(const char *name);
280 bool IsPathSeparator(const char c);
281 bool IsAbsolutePath(const char *path);
282 
283 u32 GetUid();
284 void ReExec();
285 bool StackSizeIsUnlimited();
286 void SetStackSizeLimitInBytes(uptr limit);
287 bool AddressSpaceIsUnlimited();
288 void SetAddressSpaceUnlimited();
289 void AdjustStackSize(void *attr);
290 void PrepareForSandboxing(__sanitizer_sandbox_arguments *args);
291 void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
292 void SetSandboxingCallback(void (*f)());
293 
294 void CoverageUpdateMapping();
295 void CovBeforeFork();
296 void CovAfterFork(int child_pid);
297 
298 void InitializeCoverage(bool enabled, const char *coverage_dir);
299 void ReInitializeCoverage(bool enabled, const char *coverage_dir);
300 
301 void InitTlsSize();
302 uptr GetTlsSize();
303 
304 // Other
305 void SleepForSeconds(int seconds);
306 void SleepForMillis(int millis);
307 u64 NanoTime();
308 int Atexit(void (*function)(void));
309 void SortArray(uptr *array, uptr size);
310 bool TemplateMatch(const char *templ, const char *str);
311 
312 // Exit
313 void NORETURN Abort();
314 void NORETURN Die();
315 void NORETURN
316 CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
317 void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
318                                       const char *mmap_type, error_t err,
319                                       bool raw_report = false);
320 
321 // Set the name of the current thread to 'name', return true on succees.
322 // The name may be truncated to a system-dependent limit.
323 bool SanitizerSetThreadName(const char *name);
324 // Get the name of the current thread (no more than max_len bytes),
325 // return true on succees. name should have space for at least max_len+1 bytes.
326 bool SanitizerGetThreadName(char *name, int max_len);
327 
328 // Specific tools may override behavior of "Die" and "CheckFailed" functions
329 // to do tool-specific job.
330 typedef void (*DieCallbackType)(void);
331 
332 // It's possible to add several callbacks that would be run when "Die" is
333 // called. The callbacks will be run in the opposite order. The tools are
334 // strongly recommended to setup all callbacks during initialization, when there
335 // is only a single thread.
336 bool AddDieCallback(DieCallbackType callback);
337 bool RemoveDieCallback(DieCallbackType callback);
338 
339 void SetUserDieCallback(DieCallbackType callback);
340 
341 typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
342                                        u64, u64);
343 void SetCheckFailedCallback(CheckFailedCallbackType callback);
344 
345 // Callback will be called if soft_rss_limit_mb is given and the limit is
346 // exceeded (exceeded==true) or if rss went down below the limit
347 // (exceeded==false).
348 // The callback should be registered once at the tool init time.
349 void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
350 
351 // Functions related to signal handling.
352 typedef void (*SignalHandlerType)(int, void *, void *);
353 bool IsDeadlySignal(int signum);
354 void InstallDeadlySignalHandlers(SignalHandlerType handler);
355 // Alternative signal stack (POSIX-only).
356 void SetAlternateSignalStack();
357 void UnsetAlternateSignalStack();
358 
359 // We don't want a summary too long.
360 const int kMaxSummaryLength = 1024;
361 // Construct a one-line string:
362 //   SUMMARY: SanitizerToolName: error_message
363 // and pass it to __sanitizer_report_error_summary.
364 void ReportErrorSummary(const char *error_message);
365 // Same as above, but construct error_message as:
366 //   error_type file:line[:column][ function]
367 void ReportErrorSummary(const char *error_type, const AddressInfo &info);
368 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
369 void ReportErrorSummary(const char *error_type, StackTrace *trace);
370 
371 // Math
372 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
373 extern "C" {
374 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);  // NOLINT
375 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);  // NOLINT
376 #if defined(_WIN64)
377 unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);  // NOLINT
378 unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);  // NOLINT
379 #endif
380 }
381 #endif
382 
MostSignificantSetBitIndex(uptr x)383 INLINE uptr MostSignificantSetBitIndex(uptr x) {
384   CHECK_NE(x, 0U);
385   unsigned long up;  // NOLINT
386 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
387 # ifdef _WIN64
388   up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
389 # else
390   up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
391 # endif
392 #elif defined(_WIN64)
393   _BitScanReverse64(&up, x);
394 #else
395   _BitScanReverse(&up, x);
396 #endif
397   return up;
398 }
399 
LeastSignificantSetBitIndex(uptr x)400 INLINE uptr LeastSignificantSetBitIndex(uptr x) {
401   CHECK_NE(x, 0U);
402   unsigned long up;  // NOLINT
403 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
404 # ifdef _WIN64
405   up = __builtin_ctzll(x);
406 # else
407   up = __builtin_ctzl(x);
408 # endif
409 #elif defined(_WIN64)
410   _BitScanForward64(&up, x);
411 #else
412   _BitScanForward(&up, x);
413 #endif
414   return up;
415 }
416 
IsPowerOfTwo(uptr x)417 INLINE bool IsPowerOfTwo(uptr x) {
418   return (x & (x - 1)) == 0;
419 }
420 
RoundUpToPowerOfTwo(uptr size)421 INLINE uptr RoundUpToPowerOfTwo(uptr size) {
422   CHECK(size);
423   if (IsPowerOfTwo(size)) return size;
424 
425   uptr up = MostSignificantSetBitIndex(size);
426   CHECK(size < (1ULL << (up + 1)));
427   CHECK(size > (1ULL << up));
428   return 1ULL << (up + 1);
429 }
430 
RoundUpTo(uptr size,uptr boundary)431 INLINE uptr RoundUpTo(uptr size, uptr boundary) {
432   RAW_CHECK(IsPowerOfTwo(boundary));
433   return (size + boundary - 1) & ~(boundary - 1);
434 }
435 
RoundDownTo(uptr x,uptr boundary)436 INLINE uptr RoundDownTo(uptr x, uptr boundary) {
437   return x & ~(boundary - 1);
438 }
439 
IsAligned(uptr a,uptr alignment)440 INLINE bool IsAligned(uptr a, uptr alignment) {
441   return (a & (alignment - 1)) == 0;
442 }
443 
Log2(uptr x)444 INLINE uptr Log2(uptr x) {
445   CHECK(IsPowerOfTwo(x));
446   return LeastSignificantSetBitIndex(x);
447 }
448 
449 // Don't use std::min, std::max or std::swap, to minimize dependency
450 // on libstdc++.
Min(T a,T b)451 template<class T> T Min(T a, T b) { return a < b ? a : b; }
Max(T a,T b)452 template<class T> T Max(T a, T b) { return a > b ? a : b; }
Swap(T & a,T & b)453 template<class T> void Swap(T& a, T& b) {
454   T tmp = a;
455   a = b;
456   b = tmp;
457 }
458 
459 // Char handling
IsSpace(int c)460 INLINE bool IsSpace(int c) {
461   return (c == ' ') || (c == '\n') || (c == '\t') ||
462          (c == '\f') || (c == '\r') || (c == '\v');
463 }
IsDigit(int c)464 INLINE bool IsDigit(int c) {
465   return (c >= '0') && (c <= '9');
466 }
ToLower(int c)467 INLINE int ToLower(int c) {
468   return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
469 }
470 
471 // A low-level vector based on mmap. May incur a significant memory overhead for
472 // small vectors.
473 // WARNING: The current implementation supports only POD types.
474 template<typename T>
475 class InternalMmapVectorNoCtor {
476  public:
Initialize(uptr initial_capacity)477   void Initialize(uptr initial_capacity) {
478     capacity_ = Max(initial_capacity, (uptr)1);
479     size_ = 0;
480     data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVectorNoCtor");
481   }
Destroy()482   void Destroy() {
483     UnmapOrDie(data_, capacity_ * sizeof(T));
484   }
485   T &operator[](uptr i) {
486     CHECK_LT(i, size_);
487     return data_[i];
488   }
489   const T &operator[](uptr i) const {
490     CHECK_LT(i, size_);
491     return data_[i];
492   }
push_back(const T & element)493   void push_back(const T &element) {
494     CHECK_LE(size_, capacity_);
495     if (size_ == capacity_) {
496       uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
497       Resize(new_capacity);
498     }
499     data_[size_++] = element;
500   }
back()501   T &back() {
502     CHECK_GT(size_, 0);
503     return data_[size_ - 1];
504   }
pop_back()505   void pop_back() {
506     CHECK_GT(size_, 0);
507     size_--;
508   }
size()509   uptr size() const {
510     return size_;
511   }
data()512   const T *data() const {
513     return data_;
514   }
data()515   T *data() {
516     return data_;
517   }
capacity()518   uptr capacity() const {
519     return capacity_;
520   }
521 
clear()522   void clear() { size_ = 0; }
empty()523   bool empty() const { return size() == 0; }
524 
525  private:
Resize(uptr new_capacity)526   void Resize(uptr new_capacity) {
527     CHECK_GT(new_capacity, 0);
528     CHECK_LE(size_, new_capacity);
529     T *new_data = (T *)MmapOrDie(new_capacity * sizeof(T),
530                                  "InternalMmapVector");
531     internal_memcpy(new_data, data_, size_ * sizeof(T));
532     T *old_data = data_;
533     data_ = new_data;
534     UnmapOrDie(old_data, capacity_ * sizeof(T));
535     capacity_ = new_capacity;
536   }
537 
538   T *data_;
539   uptr capacity_;
540   uptr size_;
541 };
542 
543 template<typename T>
544 class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
545  public:
InternalMmapVector(uptr initial_capacity)546   explicit InternalMmapVector(uptr initial_capacity) {
547     InternalMmapVectorNoCtor<T>::Initialize(initial_capacity);
548   }
~InternalMmapVector()549   ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
550   // Disallow evil constructors.
551   InternalMmapVector(const InternalMmapVector&);
552   void operator=(const InternalMmapVector&);
553 };
554 
555 // HeapSort for arrays and InternalMmapVector.
556 template<class Container, class Compare>
InternalSort(Container * v,uptr size,Compare comp)557 void InternalSort(Container *v, uptr size, Compare comp) {
558   if (size < 2)
559     return;
560   // Stage 1: insert elements to the heap.
561   for (uptr i = 1; i < size; i++) {
562     uptr j, p;
563     for (j = i; j > 0; j = p) {
564       p = (j - 1) / 2;
565       if (comp((*v)[p], (*v)[j]))
566         Swap((*v)[j], (*v)[p]);
567       else
568         break;
569     }
570   }
571   // Stage 2: swap largest element with the last one,
572   // and sink the new top.
573   for (uptr i = size - 1; i > 0; i--) {
574     Swap((*v)[0], (*v)[i]);
575     uptr j, max_ind;
576     for (j = 0; j < i; j = max_ind) {
577       uptr left = 2 * j + 1;
578       uptr right = 2 * j + 2;
579       max_ind = j;
580       if (left < i && comp((*v)[max_ind], (*v)[left]))
581         max_ind = left;
582       if (right < i && comp((*v)[max_ind], (*v)[right]))
583         max_ind = right;
584       if (max_ind != j)
585         Swap((*v)[j], (*v)[max_ind]);
586       else
587         break;
588     }
589   }
590 }
591 
592 template<class Container, class Value, class Compare>
InternalBinarySearch(const Container & v,uptr first,uptr last,const Value & val,Compare comp)593 uptr InternalBinarySearch(const Container &v, uptr first, uptr last,
594                           const Value &val, Compare comp) {
595   uptr not_found = last + 1;
596   while (last >= first) {
597     uptr mid = (first + last) / 2;
598     if (comp(v[mid], val))
599       first = mid + 1;
600     else if (comp(val, v[mid]))
601       last = mid - 1;
602     else
603       return mid;
604   }
605   return not_found;
606 }
607 
608 // Represents a binary loaded into virtual memory (e.g. this can be an
609 // executable or a shared object).
610 class LoadedModule {
611  public:
LoadedModule()612   LoadedModule() : full_name_(nullptr), base_address_(0) { ranges_.clear(); }
613   void set(const char *module_name, uptr base_address);
614   void clear();
615   void addAddressRange(uptr beg, uptr end, bool executable);
616   bool containsAddress(uptr address) const;
617 
full_name()618   const char *full_name() const { return full_name_; }
base_address()619   uptr base_address() const { return base_address_; }
620 
621   struct AddressRange {
622     AddressRange *next;
623     uptr beg;
624     uptr end;
625     bool executable;
626 
AddressRangeAddressRange627     AddressRange(uptr beg, uptr end, bool executable)
628         : next(nullptr), beg(beg), end(end), executable(executable) {}
629   };
630 
631   typedef IntrusiveList<AddressRange>::ConstIterator Iterator;
ranges()632   Iterator ranges() const { return Iterator(&ranges_); }
633 
634  private:
635   char *full_name_;  // Owned.
636   uptr base_address_;
637   IntrusiveList<AddressRange> ranges_;
638 };
639 
640 // OS-dependent function that fills array with descriptions of at most
641 // "max_modules" currently loaded modules. Returns the number of
642 // initialized modules. If filter is nonzero, ignores modules for which
643 // filter(full_name) is false.
644 typedef bool (*string_predicate_t)(const char *);
645 uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
646                       string_predicate_t filter);
647 
648 // Callback type for iterating over a set of memory ranges.
649 typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
650 
651 enum AndroidApiLevel {
652   ANDROID_NOT_ANDROID = 0,
653   ANDROID_KITKAT = 19,
654   ANDROID_LOLLIPOP_MR1 = 22,
655   ANDROID_POST_LOLLIPOP = 23
656 };
657 
658 void WriteToSyslog(const char *buffer);
659 
660 #if SANITIZER_MAC
661 void LogFullErrorReport(const char *buffer);
662 #else
LogFullErrorReport(const char * buffer)663 INLINE void LogFullErrorReport(const char *buffer) {}
664 #endif
665 
666 #if SANITIZER_LINUX || SANITIZER_MAC
667 void WriteOneLineToSyslog(const char *s);
668 #else
WriteOneLineToSyslog(const char * s)669 INLINE void WriteOneLineToSyslog(const char *s) {}
670 #endif
671 
672 #if SANITIZER_LINUX
673 // Initialize Android logging. Any writes before this are silently lost.
674 void AndroidLogInit();
675 bool ShouldLogAfterPrintf();
676 #else
AndroidLogInit()677 INLINE void AndroidLogInit() {}
ShouldLogAfterPrintf()678 INLINE bool ShouldLogAfterPrintf() { return false; }
679 #endif
680 
681 #if SANITIZER_ANDROID
682 void SanitizerInitializeUnwinder();
683 AndroidApiLevel AndroidGetApiLevel();
684 #else
AndroidLogWrite(const char * buffer_unused)685 INLINE void AndroidLogWrite(const char *buffer_unused) {}
SanitizerInitializeUnwinder()686 INLINE void SanitizerInitializeUnwinder() {}
AndroidGetApiLevel()687 INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
688 #endif
689 
GetPthreadDestructorIterations()690 INLINE uptr GetPthreadDestructorIterations() {
691 #if SANITIZER_ANDROID
692   return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
693 #elif SANITIZER_POSIX
694   return 4;
695 #else
696 // Unused on Windows.
697   return 0;
698 #endif
699 }
700 
701 void *internal_start_thread(void(*func)(void*), void *arg);
702 void internal_join_thread(void *th);
703 void MaybeStartBackgroudThread();
704 
705 // Make the compiler think that something is going on there.
706 // Use this inside a loop that looks like memset/memcpy/etc to prevent the
707 // compiler from recognising it and turning it into an actual call to
708 // memset/memcpy/etc.
SanitizerBreakOptimization(void * arg)709 static inline void SanitizerBreakOptimization(void *arg) {
710 #if _MSC_VER && !defined(__clang__)
711   _ReadWriteBarrier();
712 #else
713   __asm__ __volatile__("" : : "r" (arg) : "memory");
714 #endif
715 }
716 
717 struct SignalContext {
718   void *context;
719   uptr addr;
720   uptr pc;
721   uptr sp;
722   uptr bp;
723 
SignalContextSignalContext724   SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp) :
725       context(context), addr(addr), pc(pc), sp(sp), bp(bp) {
726   }
727 
728   // Creates signal context in a platform-specific manner.
729   static SignalContext Create(void *siginfo, void *context);
730 };
731 
732 void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
733 
734 void DisableReexec();
735 void MaybeReexec();
736 
737 }  // namespace __sanitizer
738 
new(__sanitizer::operator_new_size_type size,__sanitizer::LowLevelAllocator & alloc)739 inline void *operator new(__sanitizer::operator_new_size_type size,
740                           __sanitizer::LowLevelAllocator &alloc) {
741   return alloc.Allocate(size);
742 }
743 
744 struct StackDepotStats {
745   uptr n_uniq_ids;
746   uptr allocated;
747 };
748 
749 #endif  // SANITIZER_COMMON_H
750