1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "JankTracker.h"
18 
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <sys/mman.h>
22 
23 #include <algorithm>
24 #include <cmath>
25 #include <cstdio>
26 #include <limits>
27 
28 #include <cutils/ashmem.h>
29 #include <log/log.h>
30 
31 #include "Properties.h"
32 #include "utils/TimeUtils.h"
33 
34 namespace android {
35 namespace uirenderer {
36 
37 static const char* JANK_TYPE_NAMES[] = {
38         "Missed Vsync",
39         "High input latency",
40         "Slow UI thread",
41         "Slow bitmap uploads",
42         "Slow issue draw commands",
43 };
44 
45 struct Comparison {
46     FrameInfoIndex start;
47     FrameInfoIndex end;
48 };
49 
50 static const Comparison COMPARISONS[] = {
51         {FrameInfoIndex::IntendedVsync, FrameInfoIndex::Vsync},
52         {FrameInfoIndex::OldestInputEvent, FrameInfoIndex::Vsync},
53         {FrameInfoIndex::Vsync, FrameInfoIndex::SyncStart},
54         {FrameInfoIndex::SyncStart, FrameInfoIndex::IssueDrawCommandsStart},
55         {FrameInfoIndex::IssueDrawCommandsStart, FrameInfoIndex::FrameCompleted},
56 };
57 
58 // If the event exceeds 10 seconds throw it away, this isn't a jank event
59 // it's an ANR and will be handled as such
60 static const int64_t IGNORE_EXCEEDING = seconds_to_nanoseconds(10);
61 
62 /*
63  * We don't track direct-drawing via Surface:lockHardwareCanvas()
64  * for now
65  *
66  * TODO: kSurfaceCanvas can negatively impact other drawing by using up
67  * time on the RenderThread, figure out how to attribute that as a jank-causer
68  */
69 static const int64_t EXEMPT_FRAMES_FLAGS = FrameInfoFlags::SurfaceCanvas;
70 
71 // The bucketing algorithm controls so to speak
72 // If a frame is <= to this it goes in bucket 0
73 static const uint32_t kBucketMinThreshold = 5;
74 // If a frame is > this, start counting in increments of 2ms
75 static const uint32_t kBucket2msIntervals = 32;
76 // If a frame is > this, start counting in increments of 4ms
77 static const uint32_t kBucket4msIntervals = 48;
78 
79 // For testing purposes to try and eliminate test infra overhead we will
80 // consider any unknown delay of frame start as part of the test infrastructure
81 // and filter it out of the frame profile data
82 static FrameInfoIndex sFrameStart = FrameInfoIndex::IntendedVsync;
83 
84 // The interval of the slow frame histogram
85 static const uint32_t kSlowFrameBucketIntervalMs = 50;
86 // The start point of the slow frame bucket in ms
87 static const uint32_t kSlowFrameBucketStartMs = 150;
88 
89 // This will be called every frame, performance sensitive
90 // Uses bit twiddling to avoid branching while achieving the packing desired
frameCountIndexForFrameTime(nsecs_t frameTime)91 static uint32_t frameCountIndexForFrameTime(nsecs_t frameTime) {
92     uint32_t index = static_cast<uint32_t>(ns2ms(frameTime));
93     // If index > kBucketMinThreshold mask will be 0xFFFFFFFF as a result
94     // of negating 1 (twos compliment, yaay) else mask will be 0
95     uint32_t mask = -(index > kBucketMinThreshold);
96     // If index > threshold, this will essentially perform:
97     // amountAboveThreshold = index - threshold;
98     // index = threshold + (amountAboveThreshold / 2)
99     // However if index is <= this will do nothing. It will underflow, do
100     // a right shift by 0 (no-op), then overflow back to the original value
101     index = ((index - kBucket4msIntervals) >> (index > kBucket4msIntervals))
102             + kBucket4msIntervals;
103     index = ((index - kBucket2msIntervals) >> (index > kBucket2msIntervals))
104             + kBucket2msIntervals;
105     // If index was < minThreshold at the start of all this it's going to
106     // be a pretty garbage value right now. However, mask is 0 so we'll end
107     // up with the desired result of 0.
108     index = (index - kBucketMinThreshold) & mask;
109     return index;
110 }
111 
112 // Only called when dumping stats, less performance sensitive
frameTimeForFrameCountIndex(uint32_t index)113 int32_t JankTracker::frameTimeForFrameCountIndex(uint32_t index) {
114     index = index + kBucketMinThreshold;
115     if (index > kBucket2msIntervals) {
116         index += (index - kBucket2msIntervals);
117     }
118     if (index > kBucket4msIntervals) {
119         // This works because it was already doubled by the above if
120         // 1 is added to shift slightly more towards the middle of the bucket
121         index += (index - kBucket4msIntervals) + 1;
122     }
123     return index;
124 }
125 
frameTimeForSlowFrameCountIndex(uint32_t index)126 int32_t JankTracker::frameTimeForSlowFrameCountIndex(uint32_t index) {
127     return (index * kSlowFrameBucketIntervalMs) + kSlowFrameBucketStartMs;
128 }
129 
JankTracker(const DisplayInfo & displayInfo)130 JankTracker::JankTracker(const DisplayInfo& displayInfo) {
131     // By default this will use malloc memory. It may be moved later to ashmem
132     // if there is shared space for it and a request comes in to do that.
133     mData = new ProfileData;
134     reset();
135     nsecs_t frameIntervalNanos = static_cast<nsecs_t>(1_s / displayInfo.fps);
136 #if USE_HWC2
137     nsecs_t sfOffset = frameIntervalNanos - (displayInfo.presentationDeadline - 1_ms);
138     nsecs_t offsetDelta = sfOffset - displayInfo.appVsyncOffset;
139     // There are two different offset cases. If the offsetDelta is positive
140     // and small, then the intention is to give apps extra time by leveraging
141     // pipelining between the UI & RT threads. If the offsetDelta is large or
142     // negative, the intention is to subtract time from the total duration
143     // in which case we can't afford to wait for dequeueBuffer blockage.
144     if (offsetDelta <= 4_ms && offsetDelta >= 0) {
145         // SF will begin composition at VSYNC-app + offsetDelta. If we are triple
146         // buffered, this is the expected time at which dequeueBuffer will
147         // return due to the staggering of VSYNC-app & VSYNC-sf.
148         mDequeueTimeForgiveness = offsetDelta + 4_ms;
149     }
150 #endif
151     setFrameInterval(frameIntervalNanos);
152 }
153 
~JankTracker()154 JankTracker::~JankTracker() {
155     freeData();
156 }
157 
freeData()158 void JankTracker::freeData() {
159     if (mIsMapped) {
160         munmap(mData, sizeof(ProfileData));
161     } else {
162         delete mData;
163     }
164     mIsMapped = false;
165     mData = nullptr;
166 }
167 
rotateStorage()168 void JankTracker::rotateStorage() {
169     // If we are mapped we want to stop using the ashmem backend and switch to malloc
170     // We are expecting a switchStorageToAshmem call to follow this, but it's not guaranteed
171     // If we aren't sitting on top of ashmem then just do a reset() as it's functionally
172     // equivalent do a free, malloc, reset.
173     if (mIsMapped) {
174         freeData();
175         mData = new ProfileData;
176     }
177     reset();
178 }
179 
switchStorageToAshmem(int ashmemfd)180 void JankTracker::switchStorageToAshmem(int ashmemfd) {
181     int regionSize = ashmem_get_size_region(ashmemfd);
182     if (regionSize < 0) {
183         int err = errno;
184         ALOGW("Failed to get ashmem region size from fd %d, err %d %s", ashmemfd, err, strerror(err));
185         return;
186     }
187     if (regionSize < static_cast<int>(sizeof(ProfileData))) {
188         ALOGW("Ashmem region is too small! Received %d, required %u",
189                 regionSize, static_cast<unsigned int>(sizeof(ProfileData)));
190         return;
191     }
192     ProfileData* newData = reinterpret_cast<ProfileData*>(
193             mmap(NULL, sizeof(ProfileData), PROT_READ | PROT_WRITE,
194             MAP_SHARED, ashmemfd, 0));
195     if (newData == MAP_FAILED) {
196         int err = errno;
197         ALOGW("Failed to move profile data to ashmem fd %d, error = %d",
198                 ashmemfd, err);
199         return;
200     }
201 
202     // The new buffer may have historical data that we want to build on top of
203     // But let's make sure we don't overflow Just In Case
204     uint32_t divider = 0;
205     if (newData->totalFrameCount > (1 << 24)) {
206         divider = 4;
207     }
208     for (size_t i = 0; i < mData->jankTypeCounts.size(); i++) {
209         newData->jankTypeCounts[i] >>= divider;
210         newData->jankTypeCounts[i] += mData->jankTypeCounts[i];
211     }
212     for (size_t i = 0; i < mData->frameCounts.size(); i++) {
213         newData->frameCounts[i] >>= divider;
214         newData->frameCounts[i] += mData->frameCounts[i];
215     }
216     newData->jankFrameCount >>= divider;
217     newData->jankFrameCount += mData->jankFrameCount;
218     newData->totalFrameCount >>= divider;
219     newData->totalFrameCount += mData->totalFrameCount;
220     if (newData->statStartTime > mData->statStartTime
221             || newData->statStartTime == 0) {
222         newData->statStartTime = mData->statStartTime;
223     }
224 
225     freeData();
226     mData = newData;
227     mIsMapped = true;
228 }
229 
setFrameInterval(nsecs_t frameInterval)230 void JankTracker::setFrameInterval(nsecs_t frameInterval) {
231     mFrameInterval = frameInterval;
232     mThresholds[kMissedVsync] = 1;
233     /*
234      * Due to interpolation and sample rate differences between the touch
235      * panel and the display (example, 85hz touch panel driving a 60hz display)
236      * we call high latency 1.5 * frameinterval
237      *
238      * NOTE: Be careful when tuning this! A theoretical 1,000hz touch panel
239      * on a 60hz display will show kOldestInputEvent - kIntendedVsync of being 15ms
240      * Thus this must always be larger than frameInterval, or it will fail
241      */
242     mThresholds[kHighInputLatency] = static_cast<int64_t>(1.5 * frameInterval);
243 
244     // Note that these do not add up to 1. This is intentional. It's to deal
245     // with variance in values, and should be sort of an upper-bound on what
246     // is reasonable to expect.
247     mThresholds[kSlowUI] = static_cast<int64_t>(.5 * frameInterval);
248     mThresholds[kSlowSync] = static_cast<int64_t>(.2 * frameInterval);
249     mThresholds[kSlowRT] = static_cast<int64_t>(.75 * frameInterval);
250 
251 }
252 
addFrame(const FrameInfo & frame)253 void JankTracker::addFrame(const FrameInfo& frame) {
254     mData->totalFrameCount++;
255     // Fast-path for jank-free frames
256     int64_t totalDuration = frame.duration(sFrameStart, FrameInfoIndex::FrameCompleted);
257     if (mDequeueTimeForgiveness
258             && frame[FrameInfoIndex::DequeueBufferDuration] > 500_us) {
259         nsecs_t expectedDequeueDuration =
260                 mDequeueTimeForgiveness + frame[FrameInfoIndex::Vsync]
261                 - frame[FrameInfoIndex::IssueDrawCommandsStart];
262         if (expectedDequeueDuration > 0) {
263             // Forgive only up to the expected amount, but not more than
264             // the actual time spent blocked.
265             nsecs_t forgiveAmount = std::min(expectedDequeueDuration,
266                     frame[FrameInfoIndex::DequeueBufferDuration]);
267             LOG_ALWAYS_FATAL_IF(forgiveAmount >= totalDuration,
268                     "Impossible dequeue duration! dequeue duration reported %" PRId64
269                     ", total duration %" PRId64, forgiveAmount, totalDuration);
270             totalDuration -= forgiveAmount;
271         }
272     }
273     LOG_ALWAYS_FATAL_IF(totalDuration <= 0, "Impossible totalDuration %" PRId64, totalDuration);
274     uint32_t framebucket = frameCountIndexForFrameTime(totalDuration);
275     LOG_ALWAYS_FATAL_IF(framebucket < 0, "framebucket < 0 (%u)", framebucket);
276     // Keep the fast path as fast as possible.
277     if (CC_LIKELY(totalDuration < mFrameInterval)) {
278         mData->frameCounts[framebucket]++;
279         return;
280     }
281 
282     // Only things like Surface.lockHardwareCanvas() are exempt from tracking
283     if (frame[FrameInfoIndex::Flags] & EXEMPT_FRAMES_FLAGS) {
284         return;
285     }
286 
287     if (framebucket <= mData->frameCounts.size()) {
288         mData->frameCounts[framebucket]++;
289     } else {
290         framebucket = (ns2ms(totalDuration) - kSlowFrameBucketStartMs)
291                 / kSlowFrameBucketIntervalMs;
292         framebucket = std::min(framebucket,
293                 static_cast<uint32_t>(mData->slowFrameCounts.size() - 1));
294         framebucket = std::max(framebucket, 0u);
295         mData->slowFrameCounts[framebucket]++;
296     }
297 
298     mData->jankFrameCount++;
299 
300     for (int i = 0; i < NUM_BUCKETS; i++) {
301         int64_t delta = frame.duration(COMPARISONS[i].start, COMPARISONS[i].end);
302         if (delta >= mThresholds[i] && delta < IGNORE_EXCEEDING) {
303             mData->jankTypeCounts[i]++;
304         }
305     }
306 }
307 
dumpData(int fd,const ProfileDataDescription * description,const ProfileData * data)308 void JankTracker::dumpData(int fd, const ProfileDataDescription* description, const ProfileData* data) {
309     if (description) {
310         switch (description->type) {
311             case JankTrackerType::Generic:
312                 break;
313             case JankTrackerType::Package:
314                 dprintf(fd, "\nPackage: %s", description->name.c_str());
315                 break;
316             case JankTrackerType::Window:
317                 dprintf(fd, "\nWindow: %s", description->name.c_str());
318                 break;
319         }
320     }
321     if (sFrameStart != FrameInfoIndex::IntendedVsync) {
322         dprintf(fd, "\nNote: Data has been filtered!");
323     }
324     dprintf(fd, "\nStats since: %" PRIu64 "ns", data->statStartTime);
325     dprintf(fd, "\nTotal frames rendered: %u", data->totalFrameCount);
326     dprintf(fd, "\nJanky frames: %u (%.2f%%)", data->jankFrameCount,
327             (float) data->jankFrameCount / (float) data->totalFrameCount * 100.0f);
328     dprintf(fd, "\n50th percentile: %ums", findPercentile(data, 50));
329     dprintf(fd, "\n90th percentile: %ums", findPercentile(data, 90));
330     dprintf(fd, "\n95th percentile: %ums", findPercentile(data, 95));
331     dprintf(fd, "\n99th percentile: %ums", findPercentile(data, 99));
332     for (int i = 0; i < NUM_BUCKETS; i++) {
333         dprintf(fd, "\nNumber %s: %u", JANK_TYPE_NAMES[i], data->jankTypeCounts[i]);
334     }
335     dprintf(fd, "\nHISTOGRAM:");
336     for (size_t i = 0; i < data->frameCounts.size(); i++) {
337         dprintf(fd, " %ums=%u", frameTimeForFrameCountIndex(i),
338                 data->frameCounts[i]);
339     }
340     for (size_t i = 0; i < data->slowFrameCounts.size(); i++) {
341         dprintf(fd, " %ums=%u", frameTimeForSlowFrameCountIndex(i),
342                 data->slowFrameCounts[i]);
343     }
344     dprintf(fd, "\n");
345 }
346 
reset()347 void JankTracker::reset() {
348     mData->jankTypeCounts.fill(0);
349     mData->frameCounts.fill(0);
350     mData->slowFrameCounts.fill(0);
351     mData->totalFrameCount = 0;
352     mData->jankFrameCount = 0;
353     mData->statStartTime = systemTime(CLOCK_MONOTONIC);
354     sFrameStart = Properties::filterOutTestOverhead
355             ? FrameInfoIndex::HandleInputStart
356             : FrameInfoIndex::IntendedVsync;
357 }
358 
findPercentile(const ProfileData * data,int percentile)359 uint32_t JankTracker::findPercentile(const ProfileData* data, int percentile) {
360     int pos = percentile * data->totalFrameCount / 100;
361     int remaining = data->totalFrameCount - pos;
362     for (int i = data->slowFrameCounts.size() - 1; i >= 0; i--) {
363         remaining -= data->slowFrameCounts[i];
364         if (remaining <= 0) {
365             return (i * kSlowFrameBucketIntervalMs) + kSlowFrameBucketStartMs;
366         }
367     }
368     for (int i = data->frameCounts.size() - 1; i >= 0; i--) {
369         remaining -= data->frameCounts[i];
370         if (remaining <= 0) {
371             return frameTimeForFrameCountIndex(i);
372         }
373     }
374     return 0;
375 }
376 
377 } /* namespace uirenderer */
378 } /* namespace android */
379