1//===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines some functions for various memory management utilities.
11//
12//===----------------------------------------------------------------------===//
13
14#include "Unix.h"
15#include "llvm/Support/DataTypes.h"
16#include "llvm/Support/ErrorHandling.h"
17#include "llvm/Support/Process.h"
18
19#ifdef HAVE_SYS_MMAN_H
20#include <sys/mman.h>
21#endif
22
23#ifdef __APPLE__
24#include <mach/mach.h>
25#endif
26
27#if defined(__mips__)
28#  if defined(__OpenBSD__)
29#    include <mips64/sysarch.h>
30#  else
31#    include <sys/cachectl.h>
32#  endif
33#endif
34
35#ifdef __APPLE__
36extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
37#else
38extern "C" void __clear_cache(void *, void*);
39#endif
40
41namespace {
42
43int getPosixProtectionFlags(unsigned Flags) {
44  switch (Flags) {
45  case llvm::sys::Memory::MF_READ:
46    return PROT_READ;
47  case llvm::sys::Memory::MF_WRITE:
48    return PROT_WRITE;
49  case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE:
50    return PROT_READ | PROT_WRITE;
51  case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC:
52    return PROT_READ | PROT_EXEC;
53  case llvm::sys::Memory::MF_READ | llvm::sys::Memory::MF_WRITE |
54      llvm::sys::Memory::MF_EXEC:
55    return PROT_READ | PROT_WRITE | PROT_EXEC;
56  case llvm::sys::Memory::MF_EXEC:
57#if defined(__FreeBSD__)
58    // On PowerPC, having an executable page that has no read permission
59    // can have unintended consequences.  The function InvalidateInstruction-
60    // Cache uses instructions dcbf and icbi, both of which are treated by
61    // the processor as loads.  If the page has no read permissions,
62    // executing these instructions will result in a segmentation fault.
63    // Somehow, this problem is not present on Linux, but it does happen
64    // on FreeBSD.
65    return PROT_READ | PROT_EXEC;
66#else
67    return PROT_EXEC;
68#endif
69  default:
70    llvm_unreachable("Illegal memory protection flag specified!");
71  }
72  // Provide a default return value as required by some compilers.
73  return PROT_NONE;
74}
75
76} // anonymous namespace
77
78namespace llvm {
79namespace sys {
80
81MemoryBlock
82Memory::allocateMappedMemory(size_t NumBytes,
83                             const MemoryBlock *const NearBlock,
84                             unsigned PFlags,
85                             std::error_code &EC) {
86  EC = std::error_code();
87  if (NumBytes == 0)
88    return MemoryBlock();
89
90  static const size_t PageSize = Process::getPageSize();
91  const size_t NumPages = (NumBytes+PageSize-1)/PageSize;
92
93  int fd = -1;
94
95  int MMFlags = MAP_PRIVATE |
96#ifdef MAP_ANONYMOUS
97  MAP_ANONYMOUS
98#else
99  MAP_ANON
100#endif
101  ; // Ends statement above
102
103  int Protect = getPosixProtectionFlags(PFlags);
104
105  // Use any near hint and the page size to set a page-aligned starting address
106  uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
107                                      NearBlock->size() : 0;
108  if (Start && Start % PageSize)
109    Start += PageSize - Start % PageSize;
110
111  void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages,
112                      Protect, MMFlags, fd, 0);
113  if (Addr == MAP_FAILED) {
114    if (NearBlock) //Try again without a near hint
115      return allocateMappedMemory(NumBytes, nullptr, PFlags, EC);
116
117    EC = std::error_code(errno, std::generic_category());
118    return MemoryBlock();
119  }
120
121  MemoryBlock Result;
122  Result.Address = Addr;
123  Result.Size = NumPages*PageSize;
124
125  if (PFlags & MF_EXEC)
126    Memory::InvalidateInstructionCache(Result.Address, Result.Size);
127
128  return Result;
129}
130
131std::error_code
132Memory::releaseMappedMemory(MemoryBlock &M) {
133  if (M.Address == nullptr || M.Size == 0)
134    return std::error_code();
135
136  if (0 != ::munmap(M.Address, M.Size))
137    return std::error_code(errno, std::generic_category());
138
139  M.Address = nullptr;
140  M.Size = 0;
141
142  return std::error_code();
143}
144
145std::error_code
146Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
147  static const size_t PageSize = Process::getPageSize();
148  if (M.Address == nullptr || M.Size == 0)
149    return std::error_code();
150
151  if (!Flags)
152    return std::error_code(EINVAL, std::generic_category());
153
154  int Protect = getPosixProtectionFlags(Flags);
155
156  uintptr_t Start = alignAddr((uint8_t *)M.Address - PageSize + 1, PageSize);
157  uintptr_t End = alignAddr((uint8_t *)M.Address + M.Size, PageSize);
158  int Result = ::mprotect((void *)Start, End - Start, Protect);
159
160  if (Result != 0)
161    return std::error_code(errno, std::generic_category());
162
163  if (Flags & MF_EXEC)
164    Memory::InvalidateInstructionCache(M.Address, M.Size);
165
166  return std::error_code();
167}
168
169/// AllocateRWX - Allocate a slab of memory with read/write/execute
170/// permissions.  This is typically used for JIT applications where we want
171/// to emit code to the memory then jump to it.  Getting this type of memory
172/// is very OS specific.
173///
174MemoryBlock
175Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
176                    std::string *ErrMsg) {
177  if (NumBytes == 0) return MemoryBlock();
178
179  static const size_t PageSize = Process::getPageSize();
180  size_t NumPages = (NumBytes+PageSize-1)/PageSize;
181
182  int fd = -1;
183
184  int flags = MAP_PRIVATE |
185#ifdef MAP_ANONYMOUS
186  MAP_ANONYMOUS
187#else
188  MAP_ANON
189#endif
190  ;
191
192  void* start = NearBlock ? (unsigned char*)NearBlock->base() +
193                            NearBlock->size() : nullptr;
194
195#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
196  void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_EXEC,
197                    flags, fd, 0);
198#else
199  void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_WRITE|PROT_EXEC,
200                    flags, fd, 0);
201#endif
202  if (pa == MAP_FAILED) {
203    if (NearBlock) //Try again without a near hint
204      return AllocateRWX(NumBytes, nullptr);
205
206    MakeErrMsg(ErrMsg, "Can't allocate RWX Memory");
207    return MemoryBlock();
208  }
209
210#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
211  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa,
212                                (vm_size_t)(PageSize*NumPages), 0,
213                                VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
214  if (KERN_SUCCESS != kr) {
215    MakeErrMsg(ErrMsg, "vm_protect max RX failed");
216    return MemoryBlock();
217  }
218
219  kr = vm_protect(mach_task_self(), (vm_address_t)pa,
220                  (vm_size_t)(PageSize*NumPages), 0,
221                  VM_PROT_READ | VM_PROT_WRITE);
222  if (KERN_SUCCESS != kr) {
223    MakeErrMsg(ErrMsg, "vm_protect RW failed");
224    return MemoryBlock();
225  }
226#endif
227
228  MemoryBlock result;
229  result.Address = pa;
230  result.Size = NumPages*PageSize;
231
232  return result;
233}
234
235bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
236  if (M.Address == nullptr || M.Size == 0) return false;
237  if (0 != ::munmap(M.Address, M.Size))
238    return MakeErrMsg(ErrMsg, "Can't release RWX Memory");
239  return false;
240}
241
242bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
243#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
244  if (M.Address == 0 || M.Size == 0) return false;
245  Memory::InvalidateInstructionCache(M.Address, M.Size);
246  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
247    (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE);
248  return KERN_SUCCESS == kr;
249#else
250  return true;
251#endif
252}
253
254bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
255  if (M.Address == nullptr || M.Size == 0) return false;
256  Memory::InvalidateInstructionCache(M.Address, M.Size);
257#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
258  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
259    (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
260  return KERN_SUCCESS == kr;
261#else
262  return true;
263#endif
264}
265
266bool Memory::setRangeWritable(const void *Addr, size_t Size) {
267#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
268  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
269                                (vm_size_t)Size, 0,
270                                VM_PROT_READ | VM_PROT_WRITE);
271  return KERN_SUCCESS == kr;
272#else
273  return true;
274#endif
275}
276
277bool Memory::setRangeExecutable(const void *Addr, size_t Size) {
278#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
279  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
280                                (vm_size_t)Size, 0,
281                                VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
282  return KERN_SUCCESS == kr;
283#else
284  return true;
285#endif
286}
287
288/// InvalidateInstructionCache - Before the JIT can run a block of code
289/// that has been emitted it must invalidate the instruction cache on some
290/// platforms.
291void Memory::InvalidateInstructionCache(const void *Addr,
292                                        size_t Len) {
293
294// icache invalidation for PPC and ARM.
295#if defined(__APPLE__)
296
297#  if (defined(__POWERPC__) || defined (__ppc__) || \
298       defined(_POWER) || defined(_ARCH_PPC) || defined(__arm__) || \
299       defined(__arm64__))
300  sys_icache_invalidate(const_cast<void *>(Addr), Len);
301#  endif
302
303#else
304
305#  if (defined(__POWERPC__) || defined (__ppc__) || \
306       defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__)
307  const size_t LineSize = 32;
308
309  const intptr_t Mask = ~(LineSize - 1);
310  const intptr_t StartLine = ((intptr_t) Addr) & Mask;
311  const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask;
312
313  for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
314    asm volatile("dcbf 0, %0" : : "r"(Line));
315  asm volatile("sync");
316
317  for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
318    asm volatile("icbi 0, %0" : : "r"(Line));
319  asm volatile("isync");
320#  elif (defined(__arm__) || defined(__aarch64__) || defined(__mips__)) && \
321        defined(__GNUC__)
322  // FIXME: Can we safely always call this for __GNUC__ everywhere?
323  const char *Start = static_cast<const char *>(Addr);
324  const char *End = Start + Len;
325  __clear_cache(const_cast<char *>(Start), const_cast<char *>(End));
326#  endif
327
328#endif  // end apple
329
330  ValgrindDiscardTranslations(Addr, Len);
331}
332
333} // namespace sys
334} // namespace llvm
335