1 /*
2  * Copyright 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "jit_memory_region.h"
18 
19 #include <fcntl.h>
20 #include <unistd.h>
21 
22 #include <android-base/unique_fd.h>
23 #include <log/log.h>
24 #include "base/bit_utils.h"  // For RoundDown, RoundUp
25 #include "base/globals.h"
26 #include "base/logging.h"  // For VLOG.
27 #include "base/membarrier.h"
28 #include "base/memfd.h"
29 #include "base/systrace.h"
30 #include "gc/allocator/dlmalloc.h"
31 #include "jit/jit_scoped_code_cache_write.h"
32 #include "oat_quick_method_header.h"
33 #include "palette/palette.h"
34 
35 using android::base::unique_fd;
36 
37 namespace art {
38 namespace jit {
39 
40 // Data cache will be half of the capacity
41 // Code cache will be the other half of the capacity.
42 // TODO: Make this variable?
43 static constexpr size_t kCodeAndDataCapacityDivider = 2;
44 
Initialize(size_t initial_capacity,size_t max_capacity,bool rwx_memory_allowed,bool is_zygote,std::string * error_msg)45 bool JitMemoryRegion::Initialize(size_t initial_capacity,
46                                  size_t max_capacity,
47                                  bool rwx_memory_allowed,
48                                  bool is_zygote,
49                                  std::string* error_msg) {
50   ScopedTrace trace(__PRETTY_FUNCTION__);
51 
52   CHECK_GE(max_capacity, initial_capacity);
53   CHECK(max_capacity <= 1 * GB) << "The max supported size for JIT code cache is 1GB";
54   // Align both capacities to page size, as that's the unit mspaces use.
55   initial_capacity_ = RoundDown(initial_capacity, 2 * kPageSize);
56   max_capacity_ = RoundDown(max_capacity, 2 * kPageSize);
57   current_capacity_ = initial_capacity,
58   data_end_ = initial_capacity / kCodeAndDataCapacityDivider;
59   exec_end_ = initial_capacity - data_end_;
60 
61   const size_t capacity = max_capacity_;
62   const size_t data_capacity = capacity / kCodeAndDataCapacityDivider;
63   const size_t exec_capacity = capacity - data_capacity;
64 
65   // File descriptor enabling dual-view mapping of code section.
66   unique_fd mem_fd;
67 
68 
69   // The memory mappings we are going to create.
70   MemMap data_pages;
71   MemMap exec_pages;
72   MemMap non_exec_pages;
73   MemMap writable_data_pages;
74 
75   if (is_zygote) {
76     android_errorWriteLog(0x534e4554, "200284993");  // Report to SafetyNet.
77     // Because we are not going to GC code generated by the zygote, just use all available.
78     current_capacity_ = max_capacity;
79     mem_fd = unique_fd(CreateZygoteMemory(capacity, error_msg));
80     if (mem_fd.get() < 0) {
81       return false;
82     }
83   } else {
84     // Bionic supports memfd_create, but the call may fail on older kernels.
85     mem_fd = unique_fd(art::memfd_create("jit-cache", /* flags= */ 0));
86     if (mem_fd.get() < 0) {
87       std::ostringstream oss;
88       oss << "Failed to initialize dual view JIT. memfd_create() error: " << strerror(errno);
89       if (!rwx_memory_allowed) {
90         // Without using RWX page permissions, the JIT can not fallback to single mapping as it
91         // requires tranitioning the code pages to RWX for updates.
92         *error_msg = oss.str();
93         return false;
94       }
95       VLOG(jit) << oss.str();
96     } else if (ftruncate(mem_fd, capacity) != 0) {
97       std::ostringstream oss;
98       oss << "Failed to initialize memory file: " << strerror(errno);
99       *error_msg = oss.str();
100       return false;
101     }
102   }
103 
104   // Map name specific for android_os_Debug.cpp accounting.
105   std::string data_cache_name = is_zygote ? "zygote-data-code-cache" : "data-code-cache";
106   std::string exec_cache_name = is_zygote ? "zygote-jit-code-cache" : "jit-code-cache";
107 
108   std::string error_str;
109   int base_flags;
110   if (mem_fd.get() >= 0) {
111     // Dual view of JIT code cache case. Create an initial mapping of data pages large enough
112     // for data and non-writable view of JIT code pages. We use the memory file descriptor to
113     // enable dual mapping - we'll create a second mapping using the descriptor below. The
114     // mappings will look like:
115     //
116     //       VA                  PA
117     //
118     //       +---------------+
119     //       | non exec code |\
120     //       +---------------+ \
121     //       | writable data |\ \
122     //       +---------------+ \ \
123     //       :               :\ \ \
124     //       +---------------+.\.\.+---------------+
125     //       |  exec code    |  \ \|     code      |
126     //       +---------------+...\.+---------------+
127     //       | readonly data |    \|     data      |
128     //       +---------------+.....+---------------+
129     //
130     // In this configuration code updates are written to the non-executable view of the code
131     // cache, and the executable view of the code cache has fixed RX memory protections.
132     //
133     // This memory needs to be mapped shared as the code portions will have two mappings.
134     //
135     // Additionally, the zyzote will create a dual view of the data portion of
136     // the cache. This mapping will be read-only, whereas the second mapping
137     // will be writable.
138 
139     base_flags = MAP_SHARED;
140 
141     // Create the writable mappings now, so that in case of the zygote, we can
142     // prevent any future writable mappings through sealing.
143     if (exec_capacity > 0) {
144       // For dual view, create the secondary view of code memory used for updating code. This view
145       // is never executable.
146       std::string name = exec_cache_name + "-rw";
147       non_exec_pages = MemMap::MapFile(exec_capacity,
148                                        kIsDebugBuild ? kProtR : kProtRW,
149                                        base_flags,
150                                        mem_fd,
151                                        /* start= */ data_capacity,
152                                        /* low_4GB= */ false,
153                                        name.c_str(),
154                                        &error_str);
155       if (!non_exec_pages.IsValid()) {
156         // This is unexpected.
157         *error_msg = "Failed to map non-executable view of JIT code cache";
158         return false;
159       }
160       // Create a dual view of the data cache.
161       name = data_cache_name + "-rw";
162       writable_data_pages = MemMap::MapFile(data_capacity,
163                                             kProtRW,
164                                             base_flags,
165                                             mem_fd,
166                                             /* start= */ 0,
167                                             /* low_4GB= */ false,
168                                             name.c_str(),
169                                             &error_str);
170       if (!writable_data_pages.IsValid()) {
171         std::ostringstream oss;
172         oss << "Failed to create dual data view: " << error_str;
173         *error_msg = oss.str();
174         return false;
175       }
176       if (writable_data_pages.MadviseDontFork() != 0) {
177         *error_msg = "Failed to MadviseDontFork the writable data view";
178         return false;
179       }
180       if (non_exec_pages.MadviseDontFork() != 0) {
181         *error_msg = "Failed to MadviseDontFork the writable code view";
182         return false;
183       }
184       // Now that we have created the writable and executable mappings, prevent creating any new
185       // ones.
186       if (is_zygote && !ProtectZygoteMemory(mem_fd.get(), error_msg)) {
187         return false;
188       }
189     }
190 
191     // Map in low 4gb to simplify accessing root tables for x86_64.
192     // We could do PC-relative addressing to avoid this problem, but that
193     // would require reserving code and data area before submitting, which
194     // means more windows for the code memory to be RWX.
195     data_pages = MemMap::MapFile(
196         data_capacity + exec_capacity,
197         kProtR,
198         base_flags,
199         mem_fd,
200         /* start= */ 0,
201         /* low_4gb= */ true,
202         data_cache_name.c_str(),
203         &error_str);
204   } else {
205     // Single view of JIT code cache case. Create an initial mapping of data pages large enough
206     // for data and JIT code pages. The mappings will look like:
207     //
208     //       VA                  PA
209     //
210     //       +---------------+...+---------------+
211     //       |  exec code    |   |     code      |
212     //       +---------------+...+---------------+
213     //       |      data     |   |     data      |
214     //       +---------------+...+---------------+
215     //
216     // In this configuration code updates are written to the executable view of the code cache,
217     // and the executable view of the code cache transitions RX to RWX for the update and then
218     // back to RX after the update.
219     base_flags = MAP_PRIVATE | MAP_ANON;
220     data_pages = MemMap::MapAnonymous(
221         data_cache_name.c_str(),
222         data_capacity + exec_capacity,
223         kProtRW,
224         /* low_4gb= */ true,
225         &error_str);
226   }
227 
228   if (!data_pages.IsValid()) {
229     std::ostringstream oss;
230     oss << "Failed to create read write cache: " << error_str << " size=" << capacity;
231     *error_msg = oss.str();
232     return false;
233   }
234 
235   if (exec_capacity > 0) {
236     uint8_t* const divider = data_pages.Begin() + data_capacity;
237     // Set initial permission for executable view to catch any SELinux permission problems early
238     // (for processes that cannot map WX pages). Otherwise, this region does not need to be
239     // executable as there is no code in the cache yet.
240     exec_pages = data_pages.RemapAtEnd(divider,
241                                        exec_cache_name.c_str(),
242                                        kProtRX,
243                                        base_flags | MAP_FIXED,
244                                        mem_fd.get(),
245                                        (mem_fd.get() >= 0) ? data_capacity : 0,
246                                        &error_str);
247     if (!exec_pages.IsValid()) {
248       std::ostringstream oss;
249       oss << "Failed to create read execute code cache: " << error_str << " size=" << capacity;
250       *error_msg = oss.str();
251       return false;
252     }
253   } else {
254     // Profiling only. No memory for code required.
255   }
256 
257   data_pages_ = std::move(data_pages);
258   exec_pages_ = std::move(exec_pages);
259   non_exec_pages_ = std::move(non_exec_pages);
260   writable_data_pages_ = std::move(writable_data_pages);
261 
262   VLOG(jit) << "Created JitMemoryRegion"
263             << ": data_pages=" << reinterpret_cast<void*>(data_pages_.Begin())
264             << ", exec_pages=" << reinterpret_cast<void*>(exec_pages_.Begin())
265             << ", non_exec_pages=" << reinterpret_cast<void*>(non_exec_pages_.Begin())
266             << ", writable_data_pages=" << reinterpret_cast<void*>(writable_data_pages_.Begin());
267 
268   // Now that the pages are initialized, initialize the spaces.
269 
270   // Initialize the data heap.
271   data_mspace_ = create_mspace_with_base(
272       HasDualDataMapping() ? writable_data_pages_.Begin() : data_pages_.Begin(),
273       data_end_,
274       /* locked= */ false);
275   CHECK(data_mspace_ != nullptr) << "create_mspace_with_base (data) failed";
276 
277   // Allow mspace to use the full data capacity.
278   // It will still only use as litle memory as possible and ask for MoreCore as needed.
279   CHECK(IsAlignedParam(data_capacity, kPageSize));
280   mspace_set_footprint_limit(data_mspace_, data_capacity);
281 
282   // Initialize the code heap.
283   MemMap* code_heap = nullptr;
284   if (non_exec_pages_.IsValid()) {
285     code_heap = &non_exec_pages_;
286   } else if (exec_pages_.IsValid()) {
287     code_heap = &exec_pages_;
288   }
289   if (code_heap != nullptr) {
290     // Make all pages reserved for the code heap writable. The mspace allocator, that manages the
291     // heap, will take and initialize pages in create_mspace_with_base().
292     {
293       ScopedCodeCacheWrite scc(*this);
294       exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
295     }
296     CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
297     SetFootprintLimit(current_capacity_);
298   } else {
299     exec_mspace_ = nullptr;
300     SetFootprintLimit(current_capacity_);
301   }
302   return true;
303 }
304 
SetFootprintLimit(size_t new_footprint)305 void JitMemoryRegion::SetFootprintLimit(size_t new_footprint) {
306   size_t data_space_footprint = new_footprint / kCodeAndDataCapacityDivider;
307   DCHECK(IsAlignedParam(data_space_footprint, kPageSize));
308   DCHECK_EQ(data_space_footprint * kCodeAndDataCapacityDivider, new_footprint);
309   if (HasCodeMapping()) {
310     ScopedCodeCacheWrite scc(*this);
311     mspace_set_footprint_limit(exec_mspace_, new_footprint - data_space_footprint);
312   }
313 }
314 
IncreaseCodeCacheCapacity()315 bool JitMemoryRegion::IncreaseCodeCacheCapacity() {
316   if (current_capacity_ == max_capacity_) {
317     return false;
318   }
319 
320   // Double the capacity if we're below 1MB, or increase it by 1MB if
321   // we're above.
322   if (current_capacity_ < 1 * MB) {
323     current_capacity_ *= 2;
324   } else {
325     current_capacity_ += 1 * MB;
326   }
327   if (current_capacity_ > max_capacity_) {
328     current_capacity_ = max_capacity_;
329   }
330 
331   VLOG(jit) << "Increasing code cache capacity to " << PrettySize(current_capacity_);
332 
333   SetFootprintLimit(current_capacity_);
334 
335   return true;
336 }
337 
338 // NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock
339 // is already held.
MoreCore(const void * mspace,intptr_t increment)340 void* JitMemoryRegion::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS {
341   if (mspace == exec_mspace_) {
342     CHECK(exec_mspace_ != nullptr);
343     const MemMap* const code_pages = GetUpdatableCodeMapping();
344     void* result = code_pages->Begin() + exec_end_;
345     exec_end_ += increment;
346     return result;
347   } else {
348     CHECK_EQ(data_mspace_, mspace);
349     const MemMap* const writable_data_pages = GetWritableDataMapping();
350     void* result = writable_data_pages->Begin() + data_end_;
351     data_end_ += increment;
352     return result;
353   }
354 }
355 
CommitCode(ArrayRef<const uint8_t> reserved_code,ArrayRef<const uint8_t> code,const uint8_t * stack_map,bool has_should_deoptimize_flag)356 const uint8_t* JitMemoryRegion::CommitCode(ArrayRef<const uint8_t> reserved_code,
357                                            ArrayRef<const uint8_t> code,
358                                            const uint8_t* stack_map,
359                                            bool has_should_deoptimize_flag) {
360   DCHECK(IsInExecSpace(reserved_code.data()));
361   ScopedCodeCacheWrite scc(*this);
362 
363   size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
364   size_t header_size = OatQuickMethodHeader::InstructionAlignedSize();
365   size_t total_size = header_size + code.size();
366 
367   // Each allocation should be on its own set of cache lines.
368   // `total_size` covers the OatQuickMethodHeader, the JIT generated machine code,
369   // and any alignment padding.
370   DCHECK_GT(total_size, header_size);
371   DCHECK_LE(total_size, reserved_code.size());
372   uint8_t* x_memory = const_cast<uint8_t*>(reserved_code.data());
373   uint8_t* w_memory = const_cast<uint8_t*>(GetNonExecutableAddress(x_memory));
374   // Ensure the header ends up at expected instruction alignment.
375   DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(w_memory + header_size), alignment);
376   const uint8_t* result = x_memory + header_size;
377 
378   // Write the code.
379   std::copy(code.begin(), code.end(), w_memory + header_size);
380 
381   // Write the header.
382   OatQuickMethodHeader* method_header =
383       OatQuickMethodHeader::FromCodePointer(w_memory + header_size);
384   new (method_header) OatQuickMethodHeader((stack_map != nullptr) ? result - stack_map : 0u);
385   if (has_should_deoptimize_flag) {
386     method_header->SetHasShouldDeoptimizeFlag();
387   }
388 
389   // Both instruction and data caches need flushing to the point of unification where both share
390   // a common view of memory. Flushing the data cache ensures the dirty cachelines from the
391   // newly added code are written out to the point of unification. Flushing the instruction
392   // cache ensures the newly written code will be fetched from the point of unification before
393   // use. Memory in the code cache is re-cycled as code is added and removed. The flushes
394   // prevent stale code from residing in the instruction cache.
395   //
396   // Caches are flushed before write permission is removed because some ARMv8 Qualcomm kernels
397   // may trigger a segfault if a page fault occurs when requesting a cache maintenance
398   // operation. This is a kernel bug that we need to work around until affected devices
399   // (e.g. Nexus 5X and 6P) stop being supported or their kernels are fixed.
400   //
401   // For reference, this behavior is caused by this commit:
402   // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
403   //
404   bool cache_flush_success = true;
405   if (HasDualCodeMapping()) {
406     // Flush d-cache for the non-executable mapping.
407     cache_flush_success = FlushCpuCaches(w_memory, w_memory + total_size);
408   }
409 
410   // Invalidate i-cache for the executable mapping.
411   if (cache_flush_success) {
412     cache_flush_success = FlushCpuCaches(x_memory, x_memory + total_size);
413   }
414 
415   // If flushing the cache has failed, reject the allocation because we can't guarantee
416   // correctness of the instructions present in the processor caches.
417   if (!cache_flush_success) {
418     PLOG(ERROR) << "Cache flush failed triggering code allocation failure";
419     return nullptr;
420   }
421 
422   // Ensure CPU instruction pipelines are flushed for all cores. This is necessary for
423   // correctness as code may still be in instruction pipelines despite the i-cache flush. It is
424   // not safe to assume that changing permissions with mprotect (RX->RWX->RX) will cause a TLB
425   // shootdown (incidentally invalidating the CPU pipelines by sending an IPI to all cores to
426   // notify them of the TLB invalidation). Some architectures, notably ARM and ARM64, have
427   // hardware support that broadcasts TLB invalidations and so their kernels have no software
428   // based TLB shootdown. The sync-core flavor of membarrier was introduced in Linux 4.16 to
429   // address this (see mbarrier(2)). The membarrier here will fail on prior kernels and on
430   // platforms lacking the appropriate support.
431   art::membarrier(art::MembarrierCommand::kPrivateExpeditedSyncCore);
432 
433   return result;
434 }
435 
FillRootTable(uint8_t * roots_data,const std::vector<Handle<mirror::Object>> & roots)436 static void FillRootTable(uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots)
437     REQUIRES(Locks::jit_lock_)
438     REQUIRES_SHARED(Locks::mutator_lock_) {
439   GcRoot<mirror::Object>* gc_roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
440   const uint32_t length = roots.size();
441   // Put all roots in `roots_data`.
442   for (uint32_t i = 0; i < length; ++i) {
443     ObjPtr<mirror::Object> object = roots[i].Get();
444     gc_roots[i] = GcRoot<mirror::Object>(object);
445   }
446   // Store the length of the table at the end. This will allow fetching it from a stack_map
447   // pointer.
448   reinterpret_cast<uint32_t*>(roots_data)[length] = length;
449 }
450 
CommitData(ArrayRef<const uint8_t> reserved_data,const std::vector<Handle<mirror::Object>> & roots,ArrayRef<const uint8_t> stack_map)451 bool JitMemoryRegion::CommitData(ArrayRef<const uint8_t> reserved_data,
452                                  const std::vector<Handle<mirror::Object>>& roots,
453                                  ArrayRef<const uint8_t> stack_map) {
454   DCHECK(IsInDataSpace(reserved_data.data()));
455   uint8_t* roots_data = GetWritableDataAddress(reserved_data.data());
456   size_t root_table_size = ComputeRootTableSize(roots.size());
457   uint8_t* stack_map_data = roots_data + root_table_size;
458   DCHECK_LE(root_table_size + stack_map.size(), reserved_data.size());
459   FillRootTable(roots_data, roots);
460   memcpy(stack_map_data, stack_map.data(), stack_map.size());
461   // Flush data cache, as compiled code references literals in it.
462   // TODO(oth): establish whether this is necessary.
463   if (UNLIKELY(!FlushCpuCaches(roots_data, roots_data + root_table_size + stack_map.size()))) {
464     VLOG(jit) << "Failed to flush data in CommitData";
465     return false;
466   }
467   return true;
468 }
469 
AllocateCode(size_t size)470 const uint8_t* JitMemoryRegion::AllocateCode(size_t size) {
471   size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
472   void* result = mspace_memalign(exec_mspace_, alignment, size);
473   if (UNLIKELY(result == nullptr)) {
474     return nullptr;
475   }
476   used_memory_for_code_ += mspace_usable_size(result);
477   return reinterpret_cast<uint8_t*>(GetExecutableAddress(result));
478 }
479 
FreeCode(const uint8_t * code)480 void JitMemoryRegion::FreeCode(const uint8_t* code) {
481   code = GetNonExecutableAddress(code);
482   used_memory_for_code_ -= mspace_usable_size(code);
483   mspace_free(exec_mspace_, const_cast<uint8_t*>(code));
484 }
485 
AllocateData(size_t data_size)486 const uint8_t* JitMemoryRegion::AllocateData(size_t data_size) {
487   void* result = mspace_malloc(data_mspace_, data_size);
488   if (UNLIKELY(result == nullptr)) {
489     return nullptr;
490   }
491   used_memory_for_data_ += mspace_usable_size(result);
492   return reinterpret_cast<uint8_t*>(GetNonWritableDataAddress(result));
493 }
494 
FreeData(const uint8_t * data)495 void JitMemoryRegion::FreeData(const uint8_t* data) {
496   FreeWritableData(GetWritableDataAddress(data));
497 }
498 
FreeWritableData(uint8_t * writable_data)499 void JitMemoryRegion::FreeWritableData(uint8_t* writable_data) REQUIRES(Locks::jit_lock_) {
500   used_memory_for_data_ -= mspace_usable_size(writable_data);
501   mspace_free(data_mspace_, writable_data);
502 }
503 
504 #if defined(__BIONIC__) && defined(ART_TARGET)
505 // The code below only works on bionic on target.
506 
CreateZygoteMemory(size_t capacity,std::string * error_msg)507 int JitMemoryRegion::CreateZygoteMemory(size_t capacity, std::string* error_msg) {
508   if (CacheOperationsMaySegFault()) {
509     // Zygote JIT requires dual code mappings by design. We can only do this if the cache flush
510     // and invalidate instructions work without raising faults.
511     *error_msg = "Zygote memory only works with dual mappings";
512     return -1;
513   }
514   /* Check if kernel support exists, otherwise fall back to ashmem */
515   static const char* kRegionName = "jit-zygote-cache";
516   if (art::IsSealFutureWriteSupported()) {
517     int fd = art::memfd_create(kRegionName, MFD_ALLOW_SEALING);
518     if (fd == -1) {
519       std::ostringstream oss;
520       oss << "Failed to create zygote mapping: " << strerror(errno);
521       *error_msg = oss.str();
522       return -1;
523     }
524 
525     if (ftruncate(fd, capacity) != 0) {
526       std::ostringstream oss;
527       oss << "Failed to create zygote mapping: " << strerror(errno);
528       *error_msg = oss.str();
529       return -1;
530     }
531 
532     return fd;
533   }
534 
535   LOG(INFO) << "Falling back to ashmem implementation for JIT zygote mapping";
536 
537   int fd;
538   palette_status_t status = PaletteAshmemCreateRegion(kRegionName, capacity, &fd);
539   if (status != PALETTE_STATUS_OK) {
540     CHECK_EQ(status, PALETTE_STATUS_CHECK_ERRNO);
541     std::ostringstream oss;
542     oss << "Failed to create zygote mapping: " << strerror(errno);
543     *error_msg = oss.str();
544     return -1;
545   }
546   return fd;
547 }
548 
ProtectZygoteMemory(int fd,std::string * error_msg)549 bool JitMemoryRegion::ProtectZygoteMemory(int fd, std::string* error_msg) {
550   if (art::IsSealFutureWriteSupported()) {
551     if (fcntl(fd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_SEAL | F_SEAL_FUTURE_WRITE)
552             == -1) {
553       std::ostringstream oss;
554       oss << "Failed to protect zygote mapping: " << strerror(errno);
555       *error_msg = oss.str();
556       return false;
557     }
558   } else {
559     palette_status_t status = PaletteAshmemSetProtRegion(fd, PROT_READ | PROT_EXEC);
560     if (status != PALETTE_STATUS_OK) {
561       CHECK_EQ(status, PALETTE_STATUS_CHECK_ERRNO);
562       std::ostringstream oss;
563       oss << "Failed to protect zygote mapping: " << strerror(errno);
564       *error_msg = oss.str();
565       return false;
566     }
567   }
568   return true;
569 }
570 
571 #else
572 
CreateZygoteMemory(size_t capacity,std::string * error_msg)573 int JitMemoryRegion::CreateZygoteMemory(size_t capacity, std::string* error_msg) {
574   // To simplify host building, we don't rely on the latest memfd features.
575   LOG(WARNING) << "Returning un-sealable region on non-bionic";
576   static const char* kRegionName = "/jit-zygote-cache";
577   int fd = art::memfd_create(kRegionName, 0);
578   if (fd == -1) {
579     std::ostringstream oss;
580     oss << "Failed to create zygote mapping: " << strerror(errno);
581     *error_msg = oss.str();
582     return -1;
583   }
584   if (ftruncate(fd, capacity) != 0) {
585     std::ostringstream oss;
586     oss << "Failed to create zygote mapping: " << strerror(errno);
587     *error_msg = oss.str();
588     return -1;
589   }
590   return fd;
591 }
592 
ProtectZygoteMemory(int fd ATTRIBUTE_UNUSED,std::string * error_msg ATTRIBUTE_UNUSED)593 bool JitMemoryRegion::ProtectZygoteMemory(int fd ATTRIBUTE_UNUSED,
594                                           std::string* error_msg ATTRIBUTE_UNUSED) {
595   return true;
596 }
597 
598 #endif
599 
600 }  // namespace jit
601 }  // namespace art
602