1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "CachedAppOptimizer"
18 //#define LOG_NDEBUG 0
19 #define ATRACE_TAG ATRACE_TAG_ACTIVITY_MANAGER
20 #define ATRACE_COMPACTION_TRACK "Compaction"
21 
22 #include <android-base/file.h>
23 #include <android-base/logging.h>
24 #include <android-base/stringprintf.h>
25 #include <android-base/unique_fd.h>
26 #include <android_runtime/AndroidRuntime.h>
27 #include <binder/IPCThreadState.h>
28 #include <cutils/compiler.h>
29 #include <dirent.h>
30 #include <jni.h>
31 #include <linux/errno.h>
32 #include <linux/time.h>
33 #include <log/log.h>
34 #include <meminfo/procmeminfo.h>
35 #include <meminfo/sysmeminfo.h>
36 #include <nativehelper/JNIHelp.h>
37 #include <processgroup/processgroup.h>
38 #include <stddef.h>
39 #include <stdio.h>
40 #include <sys/mman.h>
41 #include <sys/pidfd.h>
42 #include <sys/stat.h>
43 #include <sys/syscall.h>
44 #include <sys/sysinfo.h>
45 #include <sys/types.h>
46 #include <unistd.h>
47 #include <utils/Timers.h>
48 #include <utils/Trace.h>
49 
50 #include <algorithm>
51 
52 using android::base::StringPrintf;
53 using android::base::WriteStringToFile;
54 using android::meminfo::ProcMemInfo;
55 using namespace android::meminfo;
56 
57 static const size_t kPageSize = getpagesize();
58 static const size_t kPageMask = ~(kPageSize - 1);
59 
60 #define COMPACT_ACTION_FILE_FLAG 1
61 #define COMPACT_ACTION_ANON_FLAG 2
62 
63 using VmaToAdviseFunc = std::function<int(const Vma&)>;
64 using android::base::unique_fd;
65 
66 #define SYNC_RECEIVED_WHILE_FROZEN (1)
67 #define ASYNC_RECEIVED_WHILE_FROZEN (2)
68 #define TXNS_PENDING_WHILE_FROZEN (4)
69 
70 #define MAX_RW_COUNT (INT_MAX & kPageMask)
71 
72 // Defines the maximum amount of VMAs we can send per process_madvise syscall.
73 // Currently this is set to UIO_MAXIOV which is the maximum segments allowed by
74 // iovec implementation used by process_madvise syscall
75 #define MAX_VMAS_PER_BATCH UIO_MAXIOV
76 
77 // Maximum bytes that we can send per process_madvise syscall once this limit
78 // is reached we split the remaining VMAs into another syscall. The MAX_RW_COUNT
79 // limit is imposed by iovec implementation. However, if you want to use a smaller
80 // limit, it has to be a page aligned value.
81 #define MAX_BYTES_PER_BATCH MAX_RW_COUNT
82 
83 // Selected a high enough number to avoid clashing with linux errno codes
84 #define ERROR_COMPACTION_CANCELLED -1000
85 
86 namespace android {
87 
88 // Signal happening in separate thread that would bail out compaction
89 // before starting next VMA batch
90 static std::atomic<bool> cancelRunningCompaction;
91 
92 // A VmaBatch represents a set of VMAs that can be processed
93 // as VMAs are processed by client code it is expected that the
94 // VMAs get consumed which means they are discarded as they are
95 // processed so that the first element always is the next element
96 // to be sent
97 struct VmaBatch {
98     struct iovec* vmas;
99     // total amount of VMAs to reach the end of iovec
100     size_t totalVmas;
101     // total amount of bytes that are remaining within iovec
102     uint64_t totalBytes;
103 };
104 
105 // Advances the iterator by the specified amount of bytes.
106 // This is used to remove already processed or no longer
107 // needed parts of the batch.
108 // Returns total bytes consumed
consumeBytes(VmaBatch & batch,uint64_t bytesToConsume)109 uint64_t consumeBytes(VmaBatch& batch, uint64_t bytesToConsume) {
110     if (CC_UNLIKELY(bytesToConsume) < 0) {
111         LOG(ERROR) << "Cannot consume negative bytes for VMA batch !";
112         return 0;
113     }
114 
115     if (CC_UNLIKELY(bytesToConsume > batch.totalBytes)) {
116         // Avoid consuming more bytes than available
117         bytesToConsume = batch.totalBytes;
118     }
119 
120     uint64_t bytesConsumed = 0;
121     while (bytesConsumed < bytesToConsume) {
122         if (CC_UNLIKELY(batch.totalVmas == 0)) {
123             // No more vmas to consume
124             break;
125         }
126         if (CC_UNLIKELY(bytesConsumed + batch.vmas[0].iov_len > bytesToConsume)) {
127             // This vma can't be fully consumed, do it partially.
128             uint64_t bytesLeftToConsume = bytesToConsume - bytesConsumed;
129             bytesConsumed += bytesLeftToConsume;
130             batch.vmas[0].iov_base = (void*)((uint64_t)batch.vmas[0].iov_base + bytesLeftToConsume);
131             batch.vmas[0].iov_len -= bytesLeftToConsume;
132             batch.totalBytes -= bytesLeftToConsume;
133             return bytesConsumed;
134         }
135         // This vma can be fully consumed
136         bytesConsumed += batch.vmas[0].iov_len;
137         batch.totalBytes -= batch.vmas[0].iov_len;
138         --batch.totalVmas;
139         ++batch.vmas;
140     }
141 
142     return bytesConsumed;
143 }
144 
145 // given a source of vmas this class will act as a factory
146 // of VmaBatch objects and it will allow generating batches
147 // until there are no more left in the source vector.
148 // Note: the class does not actually modify the given
149 // vmas vector, instead it iterates on it until the end.
150 class VmaBatchCreator {
151     const std::vector<Vma>* sourceVmas;
152     const int totalVmasInSource;
153     // This is the destination array where batched VMAs will be stored
154     // it gets encapsulated into a VmaBatch which is the object
155     // meant to be used by client code.
156     struct iovec* destVmas;
157 
158     // Parameters to keep track of the iterator on the source vmas
159     int currentIndex_;
160     uint64_t currentOffset_;
161 
162 public:
VmaBatchCreator(const std::vector<Vma> * vmasToBatch,struct iovec * destVmasVec,int vmasInSource)163     VmaBatchCreator(const std::vector<Vma>* vmasToBatch, struct iovec* destVmasVec,
164                     int vmasInSource)
165           : sourceVmas(vmasToBatch),
166             totalVmasInSource(vmasInSource),
167             destVmas(destVmasVec),
168             currentIndex_(0),
169             currentOffset_(0) {}
170 
currentIndex()171     int currentIndex() { return currentIndex_; }
currentOffset()172     uint64_t currentOffset() { return currentOffset_; }
173 
174     // Generates a batch and moves the iterator on the source vmas
175     // past the last VMA in the batch.
176     // Returns true on success, false on failure
createNextBatch(VmaBatch & batch)177     bool createNextBatch(VmaBatch& batch) {
178         if (currentIndex_ >= MAX_VMAS_PER_BATCH && currentIndex_ >= sourceVmas->size()) {
179             return false;
180         }
181 
182         const std::vector<Vma>& vmas = *sourceVmas;
183         batch.vmas = destVmas;
184         uint64_t totalBytesInBatch = 0;
185         int indexInBatch = 0;
186 
187         // Add VMAs to the batch up until we consumed all the VMAs or
188         // reached any imposed limit of VMAs per batch.
189         while (indexInBatch < MAX_VMAS_PER_BATCH && currentIndex_ < totalVmasInSource) {
190             uint64_t vmaStart = vmas[currentIndex_].start + currentOffset_;
191             uint64_t vmaSize = vmas[currentIndex_].end - vmaStart;
192             uint64_t bytesAvailableInBatch = MAX_BYTES_PER_BATCH - totalBytesInBatch;
193 
194             batch.vmas[indexInBatch].iov_base = (void*)vmaStart;
195 
196             if (vmaSize > bytesAvailableInBatch) {
197                 // VMA would exceed the max available bytes in batch
198                 // clamp with available bytes and finish batch.
199                 vmaSize = bytesAvailableInBatch;
200                 currentOffset_ += bytesAvailableInBatch;
201             }
202 
203             batch.vmas[indexInBatch].iov_len = vmaSize;
204             totalBytesInBatch += vmaSize;
205 
206             ++indexInBatch;
207             if (totalBytesInBatch >= MAX_BYTES_PER_BATCH) {
208                 // Reached max bytes quota so this marks
209                 // the end of the batch
210                 if (CC_UNLIKELY(vmaSize == (vmas[currentIndex_].end - vmaStart))) {
211                     // we reached max bytes exactly at the end of the vma
212                     // so advance to next one
213                     currentOffset_ = 0;
214                     ++currentIndex_;
215                 }
216                 break;
217             }
218             // Fully finished current VMA, move to next one
219             currentOffset_ = 0;
220             ++currentIndex_;
221         }
222         batch.totalVmas = indexInBatch;
223         batch.totalBytes = totalBytesInBatch;
224         if (batch.totalVmas == 0 || batch.totalBytes == 0) {
225             // This is an empty batch, mark as failed creating.
226             return false;
227         }
228         return true;
229     }
230 };
231 
232 // Madvise a set of VMAs given in a batch for a specific process
233 // The total number of bytes successfully madvised will be set on
234 // outBytesProcessed.
235 // Returns 0 on success and standard linux -errno code returned by
236 // process_madvise on failure
madviseVmasFromBatch(unique_fd & pidfd,VmaBatch & batch,int madviseType,uint64_t * outBytesProcessed)237 int madviseVmasFromBatch(unique_fd& pidfd, VmaBatch& batch, int madviseType,
238                          uint64_t* outBytesProcessed) {
239     if (batch.totalVmas == 0 || batch.totalBytes == 0) {
240         // No VMAs in Batch, skip.
241         *outBytesProcessed = 0;
242         return 0;
243     }
244 
245     ATRACE_BEGIN(StringPrintf("Madvise %d: %zu VMAs.", madviseType, batch.totalVmas).c_str());
246     int64_t bytesProcessedInSend =
247             process_madvise(pidfd, batch.vmas, batch.totalVmas, madviseType, 0);
248     ATRACE_END();
249     if (CC_UNLIKELY(bytesProcessedInSend == -1)) {
250         bytesProcessedInSend = 0;
251         if (errno != EINVAL) {
252             // Forward irrecoverable errors and bail out compaction
253             *outBytesProcessed = 0;
254             return -errno;
255         }
256     }
257     if (bytesProcessedInSend == 0) {
258         // When we find a VMA with error, fully consume it as it
259         // is extremely expensive to iterate on its pages one by one
260         bytesProcessedInSend = batch.vmas[0].iov_len;
261     } else if (bytesProcessedInSend < batch.totalBytes) {
262         // Partially processed the bytes requested
263         // skip last page which is where it failed.
264         bytesProcessedInSend += kPageSize;
265     }
266     bytesProcessedInSend = consumeBytes(batch, bytesProcessedInSend);
267 
268     *outBytesProcessed = bytesProcessedInSend;
269     return 0;
270 }
271 
272 // Legacy method for compacting processes, any new code should
273 // use compactProcess instead.
compactProcessProcfs(int pid,const std::string & compactionType)274 static inline void compactProcessProcfs(int pid, const std::string& compactionType) {
275     std::string reclaim_path = StringPrintf("/proc/%d/reclaim", pid);
276     WriteStringToFile(compactionType, reclaim_path);
277 }
278 
279 // Compacts a set of VMAs for pid using an madviseType accepted by process_madvise syscall
280 // Returns the total bytes that where madvised.
281 //
282 // If any VMA fails compaction due to -EINVAL it will be skipped and continue.
283 // However, if it fails for any other reason, it will bail out and forward the error
compactMemory(const std::vector<Vma> & vmas,int pid,int madviseType,int totalVmas)284 static int64_t compactMemory(const std::vector<Vma>& vmas, int pid, int madviseType,
285                              int totalVmas) {
286     if (totalVmas == 0) {
287         return 0;
288     }
289 
290     unique_fd pidfd(pidfd_open(pid, 0));
291     if (pidfd < 0) {
292         // Skip compaction if failed to open pidfd with any error
293         return -errno;
294     }
295 
296     struct iovec destVmas[MAX_VMAS_PER_BATCH];
297 
298     VmaBatch batch;
299     VmaBatchCreator batcher(&vmas, destVmas, totalVmas);
300 
301     int64_t totalBytesProcessed = 0;
302     while (batcher.createNextBatch(batch)) {
303         uint64_t bytesProcessedInSend;
304         ScopedTrace batchTrace(ATRACE_TAG, "VMA Batch");
305         do {
306             if (CC_UNLIKELY(cancelRunningCompaction.load())) {
307                 // There could be a significant delay between when a compaction
308                 // is requested and when it is handled during this time our
309                 // OOM adjust could have improved.
310                 LOG(DEBUG) << "Cancelled running compaction for " << pid;
311                 ATRACE_INSTANT_FOR_TRACK(ATRACE_COMPACTION_TRACK,
312                                          StringPrintf("Cancelled compaction for %d", pid).c_str());
313                 return ERROR_COMPACTION_CANCELLED;
314             }
315             int error = madviseVmasFromBatch(pidfd, batch, madviseType, &bytesProcessedInSend);
316             if (error < 0) {
317                 // Returns standard linux errno code
318                 return error;
319             }
320             if (CC_UNLIKELY(bytesProcessedInSend == 0)) {
321                 // This means there was a problem consuming bytes,
322                 // bail out since no forward progress can be made with this batch
323                 break;
324             }
325             totalBytesProcessed += bytesProcessedInSend;
326         } while (batch.totalBytes > 0 && batch.totalVmas > 0);
327     }
328 
329     return totalBytesProcessed;
330 }
331 
getFilePageAdvice(const Vma & vma)332 static int getFilePageAdvice(const Vma& vma) {
333     if (vma.inode > 0 && !vma.is_shared) {
334         return MADV_COLD;
335     }
336     return -1;
337 }
getAnonPageAdvice(const Vma & vma)338 static int getAnonPageAdvice(const Vma& vma) {
339     bool hasReadFlag = (vma.flags & PROT_READ) > 0;
340     bool hasWriteFlag = (vma.flags & PROT_WRITE) > 0;
341     bool hasExecuteFlag = (vma.flags & PROT_EXEC) > 0;
342     if ((hasReadFlag || hasWriteFlag) && !hasExecuteFlag && !vma.is_shared) {
343         return MADV_PAGEOUT;
344     }
345     return -1;
346 }
getAnyPageAdvice(const Vma & vma)347 static int getAnyPageAdvice(const Vma& vma) {
348     if (vma.inode == 0 && !vma.is_shared) {
349         return MADV_PAGEOUT;
350     }
351     return MADV_COLD;
352 }
353 
354 // Perform a full process compaction using process_madvise syscall
355 // using the madvise behavior defined by vmaToAdviseFunc per VMA.
356 //
357 // Currently supported behaviors are MADV_COLD and MADV_PAGEOUT.
358 //
359 // Returns the total number of bytes compacted on success. On error
360 // returns process_madvise errno code or if compaction was cancelled
361 // it returns ERROR_COMPACTION_CANCELLED.
362 //
363 // Not thread safe. We reuse vectors so we assume this is called only
364 // on one thread at most.
compactProcess(int pid,VmaToAdviseFunc vmaToAdviseFunc)365 static int64_t compactProcess(int pid, VmaToAdviseFunc vmaToAdviseFunc) {
366     cancelRunningCompaction.store(false);
367     static std::string mapsBuffer;
368     ATRACE_BEGIN("CollectVmas");
369     ProcMemInfo meminfo(pid);
370     static std::vector<Vma> pageoutVmas(2000), coldVmas(2000);
371     int coldVmaIndex = 0;
372     int pageoutVmaIndex = 0;
373     auto vmaCollectorCb = [&vmaToAdviseFunc, &pageoutVmaIndex, &coldVmaIndex](const Vma& vma) {
374         int advice = vmaToAdviseFunc(vma);
375         switch (advice) {
376             case MADV_COLD:
377                 if (coldVmaIndex < coldVmas.size()) {
378                     coldVmas[coldVmaIndex] = vma;
379                 } else {
380                     coldVmas.push_back(vma);
381                 }
382                 ++coldVmaIndex;
383                 break;
384             case MADV_PAGEOUT:
385 #ifdef DEBUG_COMPACTION
386                 ALOGE("Adding to compact vma=%s", vma.name.c_str());
387 #endif
388                 if (pageoutVmaIndex < pageoutVmas.size()) {
389                     pageoutVmas[pageoutVmaIndex] = vma;
390                 } else {
391                     pageoutVmas.push_back(vma);
392                 }
393                 ++pageoutVmaIndex;
394                 break;
395         }
396         return true;
397     };
398     meminfo.ForEachVmaFromMaps(vmaCollectorCb, mapsBuffer);
399     ATRACE_END();
400 #ifdef DEBUG_COMPACTION
401     ALOGE("Total VMAs sent for compaction anon=%d file=%d", pageoutVmaIndex,
402             coldVmaIndex);
403 #endif
404 
405     int64_t pageoutBytes = compactMemory(pageoutVmas, pid, MADV_PAGEOUT, pageoutVmaIndex);
406     if (pageoutBytes < 0) {
407         // Error, just forward it.
408         cancelRunningCompaction.store(false);
409         return pageoutBytes;
410     }
411 
412     int64_t coldBytes = compactMemory(coldVmas, pid, MADV_COLD, coldVmaIndex);
413     if (coldBytes < 0) {
414         // Error, just forward it.
415         cancelRunningCompaction.store(false);
416         return coldBytes;
417     }
418 
419     return pageoutBytes + coldBytes;
420 }
421 
422 // Compact process using process_madvise syscall or fallback to procfs in
423 // case syscall does not exist.
compactProcessOrFallback(int pid,int compactionFlags)424 static void compactProcessOrFallback(int pid, int compactionFlags) {
425     if ((compactionFlags & (COMPACT_ACTION_ANON_FLAG | COMPACT_ACTION_FILE_FLAG)) == 0) return;
426 
427     bool compactAnon = compactionFlags & COMPACT_ACTION_ANON_FLAG;
428     bool compactFile = compactionFlags & COMPACT_ACTION_FILE_FLAG;
429 
430     // Set when the system does not support process_madvise syscall to avoid
431     // gathering VMAs in subsequent calls prior to falling back to procfs
432     static bool shouldForceProcFs = false;
433     std::string compactionType;
434     VmaToAdviseFunc vmaToAdviseFunc;
435 
436     if (compactAnon) {
437         if (compactFile) {
438             compactionType = "all";
439             vmaToAdviseFunc = getAnyPageAdvice;
440         } else {
441             compactionType = "anon";
442             vmaToAdviseFunc = getAnonPageAdvice;
443         }
444     } else {
445         compactionType = "file";
446         vmaToAdviseFunc = getFilePageAdvice;
447     }
448 
449     if (shouldForceProcFs || compactProcess(pid, vmaToAdviseFunc) == -ENOSYS) {
450         shouldForceProcFs = true;
451         compactProcessProcfs(pid, compactionType);
452     }
453 }
454 
455 // This performs per-process reclaim on all processes belonging to non-app UIDs.
456 // For the most part, these are non-zygote processes like Treble HALs, but it
457 // also includes zygote-derived processes that run in system UIDs, like bluetooth
458 // or potentially some mainline modules. The only process that should definitely
459 // not be compacted is system_server, since compacting system_server around the
460 // time of BOOT_COMPLETE could result in perceptible issues.
com_android_server_am_CachedAppOptimizer_compactSystem(JNIEnv *,jobject)461 static void com_android_server_am_CachedAppOptimizer_compactSystem(JNIEnv *, jobject) {
462     std::unique_ptr<DIR, decltype(&closedir)> proc(opendir("/proc"), closedir);
463     struct dirent* current;
464     while ((current = readdir(proc.get()))) {
465         if (current->d_type != DT_DIR) {
466             continue;
467         }
468 
469         // don't compact system_server, rely on persistent compaction during screen off
470         // in order to avoid mmap_sem-related stalls
471         if (atoi(current->d_name) == getpid()) {
472             continue;
473         }
474 
475         std::string status_name = StringPrintf("/proc/%s/status", current->d_name);
476         struct stat status_info;
477 
478         if (stat(status_name.c_str(), &status_info) != 0) {
479             // must be some other directory that isn't a pid
480             continue;
481         }
482 
483         // android.os.Process.FIRST_APPLICATION_UID
484         if (status_info.st_uid >= 10000) {
485             continue;
486         }
487 
488         int pid = atoi(current->d_name);
489 
490         compactProcessOrFallback(pid, COMPACT_ACTION_ANON_FLAG | COMPACT_ACTION_FILE_FLAG);
491     }
492 }
493 
com_android_server_am_CachedAppOptimizer_cancelCompaction(JNIEnv *,jobject)494 static void com_android_server_am_CachedAppOptimizer_cancelCompaction(JNIEnv*, jobject) {
495     cancelRunningCompaction.store(true);
496     ATRACE_INSTANT_FOR_TRACK(ATRACE_COMPACTION_TRACK, "Cancel compaction");
497 }
498 
com_android_server_am_CachedAppOptimizer_threadCpuTimeNs(JNIEnv *,jobject)499 static jlong com_android_server_am_CachedAppOptimizer_threadCpuTimeNs(JNIEnv*, jobject) {
500     int64_t currentCpuTime = systemTime(CLOCK_THREAD_CPUTIME_ID);
501 
502     return currentCpuTime;
503 }
504 
com_android_server_am_CachedAppOptimizer_getFreeSwapPercent(JNIEnv *,jobject)505 static jdouble com_android_server_am_CachedAppOptimizer_getFreeSwapPercent(JNIEnv*, jobject) {
506     struct sysinfo memoryInfo;
507     int error = sysinfo(&memoryInfo);
508     if(error == -1) {
509         LOG(ERROR) << "Could not check free swap space";
510         return 0;
511     }
512     return (double)memoryInfo.freeswap / (double)memoryInfo.totalswap;
513 }
514 
com_android_server_am_CachedAppOptimizer_getUsedZramMemory()515 static jlong com_android_server_am_CachedAppOptimizer_getUsedZramMemory() {
516     android::meminfo::SysMemInfo sysmeminfo;
517     return sysmeminfo.mem_zram_kb();
518 }
519 
com_android_server_am_CachedAppOptimizer_getMemoryFreedCompaction()520 static jlong com_android_server_am_CachedAppOptimizer_getMemoryFreedCompaction() {
521     android::meminfo::SysMemInfo sysmeminfo;
522     return sysmeminfo.mem_compacted_kb("/sys/block/zram0/");
523 }
524 
com_android_server_am_CachedAppOptimizer_compactProcess(JNIEnv *,jobject,jint pid,jint compactionFlags)525 static void com_android_server_am_CachedAppOptimizer_compactProcess(JNIEnv*, jobject, jint pid,
526                                                                     jint compactionFlags) {
527     compactProcessOrFallback(pid, compactionFlags);
528 }
529 
com_android_server_am_CachedAppOptimizer_freezeBinder(JNIEnv * env,jobject clazz,jint pid,jboolean freeze,jint timeout_ms)530 static jint com_android_server_am_CachedAppOptimizer_freezeBinder(JNIEnv* env, jobject clazz,
531                                                                   jint pid, jboolean freeze,
532                                                                   jint timeout_ms) {
533     jint retVal = IPCThreadState::freeze(pid, freeze, timeout_ms);
534     if (retVal != 0 && retVal != -EAGAIN) {
535         jniThrowException(env, "java/lang/RuntimeException", "Unable to freeze/unfreeze binder");
536     }
537 
538     return retVal;
539 }
540 
com_android_server_am_CachedAppOptimizer_getBinderFreezeInfo(JNIEnv * env,jobject clazz,jint pid)541 static jint com_android_server_am_CachedAppOptimizer_getBinderFreezeInfo(JNIEnv *env,
542         jobject clazz, jint pid) {
543     uint32_t syncReceived = 0, asyncReceived = 0;
544 
545     int error = IPCThreadState::getProcessFreezeInfo(pid, &syncReceived, &asyncReceived);
546 
547     if (error < 0) {
548         jniThrowException(env, "java/lang/RuntimeException", strerror(error));
549     }
550 
551     jint retVal = 0;
552 
553     // bit 0 of sync_recv goes to bit 0 of retVal
554     retVal |= syncReceived & SYNC_RECEIVED_WHILE_FROZEN;
555     // bit 0 of async_recv goes to bit 1 of retVal
556     retVal |= (asyncReceived << 1) & ASYNC_RECEIVED_WHILE_FROZEN;
557     // bit 1 of sync_recv goes to bit 2 of retVal
558     retVal |= (syncReceived << 1) & TXNS_PENDING_WHILE_FROZEN;
559 
560     return retVal;
561 }
562 
com_android_server_am_CachedAppOptimizer_getFreezerCheckPath(JNIEnv * env,jobject clazz)563 static jstring com_android_server_am_CachedAppOptimizer_getFreezerCheckPath(JNIEnv* env,
564                                                                             jobject clazz) {
565     std::string path;
566 
567     if (!getAttributePathForTask("FreezerState", getpid(), &path)) {
568         path = "";
569     }
570 
571     return env->NewStringUTF(path.c_str());
572 }
573 
com_android_server_am_CachedAppOptimizer_isFreezerProfileValid(JNIEnv * env)574 static jboolean com_android_server_am_CachedAppOptimizer_isFreezerProfileValid(JNIEnv* env) {
575     uid_t uid = getuid();
576     pid_t pid = getpid();
577 
578     return isProfileValidForProcess("Frozen", uid, pid) &&
579             isProfileValidForProcess("Unfrozen", uid, pid);
580 }
581 
582 static const JNINativeMethod sMethods[] = {
583         /* name, signature, funcPtr */
584         {"cancelCompaction", "()V",
585          (void*)com_android_server_am_CachedAppOptimizer_cancelCompaction},
586         {"threadCpuTimeNs", "()J", (void*)com_android_server_am_CachedAppOptimizer_threadCpuTimeNs},
587         {"getFreeSwapPercent", "()D",
588          (void*)com_android_server_am_CachedAppOptimizer_getFreeSwapPercent},
589         {"getUsedZramMemory", "()J",
590          (void*)com_android_server_am_CachedAppOptimizer_getUsedZramMemory},
591         {"getMemoryFreedCompaction", "()J",
592          (void*)com_android_server_am_CachedAppOptimizer_getMemoryFreedCompaction},
593         {"compactSystem", "()V", (void*)com_android_server_am_CachedAppOptimizer_compactSystem},
594         {"compactProcess", "(II)V", (void*)com_android_server_am_CachedAppOptimizer_compactProcess},
595         {"freezeBinder", "(IZI)I", (void*)com_android_server_am_CachedAppOptimizer_freezeBinder},
596         {"getBinderFreezeInfo", "(I)I",
597          (void*)com_android_server_am_CachedAppOptimizer_getBinderFreezeInfo},
598         {"getFreezerCheckPath", "()Ljava/lang/String;",
599          (void*)com_android_server_am_CachedAppOptimizer_getFreezerCheckPath},
600         {"isFreezerProfileValid", "()Z",
601          (void*)com_android_server_am_CachedAppOptimizer_isFreezerProfileValid}};
602 
register_android_server_am_CachedAppOptimizer(JNIEnv * env)603 int register_android_server_am_CachedAppOptimizer(JNIEnv* env)
604 {
605     return jniRegisterNativeMethods(env, "com/android/server/am/CachedAppOptimizer",
606                                     sMethods, NELEM(sMethods));
607 }
608 
609 }
610