1 /*
2 * Copyright 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
18 #undef LOG_TAG
19 #define LOG_TAG "VSyncReactor"
20 //#define LOG_NDEBUG 0
21 #include "VSyncReactor.h"
22 #include <cutils/properties.h>
23 #include <log/log.h>
24 #include <utils/Trace.h>
25 #include "../TracedOrdinal.h"
26 #include "TimeKeeper.h"
27 #include "VSyncDispatch.h"
28 #include "VSyncTracker.h"
29
30 namespace android::scheduler {
31 using base::StringAppendF;
32
33 Clock::~Clock() = default;
now() const34 nsecs_t SystemClock::now() const {
35 return systemTime(SYSTEM_TIME_MONOTONIC);
36 }
37
38 class PredictedVsyncTracer {
39 public:
PredictedVsyncTracer(VSyncDispatch & dispatch)40 PredictedVsyncTracer(VSyncDispatch& dispatch)
41 : mRegistration(dispatch,
42 std::bind(&PredictedVsyncTracer::callback, this, std::placeholders::_1,
43 std::placeholders::_2),
44 "PredictedVsyncTracer") {
45 mRegistration.schedule(0, 0);
46 }
47
48 private:
49 TracedOrdinal<bool> mParity = {"VSYNC-predicted", 0};
50 VSyncCallbackRegistration mRegistration;
51
callback(nsecs_t,nsecs_t)52 void callback(nsecs_t /*vsyncTime*/, nsecs_t /*targetWakeupTim*/) {
53 mParity = !mParity;
54 mRegistration.schedule(0, 0);
55 }
56 };
57
VSyncReactor(std::unique_ptr<Clock> clock,std::unique_ptr<VSyncDispatch> dispatch,std::unique_ptr<VSyncTracker> tracker,size_t pendingFenceLimit,bool supportKernelIdleTimer)58 VSyncReactor::VSyncReactor(std::unique_ptr<Clock> clock, std::unique_ptr<VSyncDispatch> dispatch,
59 std::unique_ptr<VSyncTracker> tracker, size_t pendingFenceLimit,
60 bool supportKernelIdleTimer)
61 : mClock(std::move(clock)),
62 mTracker(std::move(tracker)),
63 mDispatch(std::move(dispatch)),
64 mPendingLimit(pendingFenceLimit),
65 mPredictedVsyncTracer(property_get_bool("debug.sf.show_predicted_vsync", false)
66 ? std::make_unique<PredictedVsyncTracer>(*mDispatch)
67 : nullptr),
68 mSupportKernelIdleTimer(supportKernelIdleTimer) {}
69
70 VSyncReactor::~VSyncReactor() = default;
71
72 // The DispSync interface has a 'repeat this callback at rate' semantic. This object adapts
73 // VSyncDispatch's individually-scheduled callbacks so as to meet DispSync's existing semantic
74 // for now.
75 class CallbackRepeater {
76 public:
CallbackRepeater(VSyncDispatch & dispatch,DispSync::Callback * cb,const char * name,nsecs_t period,nsecs_t offset,nsecs_t notBefore)77 CallbackRepeater(VSyncDispatch& dispatch, DispSync::Callback* cb, const char* name,
78 nsecs_t period, nsecs_t offset, nsecs_t notBefore)
79 : mName(name),
80 mCallback(cb),
81 mRegistration(dispatch,
82 std::bind(&CallbackRepeater::callback, this, std::placeholders::_1,
83 std::placeholders::_2),
84 mName),
85 mPeriod(period),
86 mOffset(offset),
87 mLastCallTime(notBefore) {}
88
~CallbackRepeater()89 ~CallbackRepeater() {
90 std::lock_guard<std::mutex> lk(mMutex);
91 mRegistration.cancel();
92 }
93
start(nsecs_t offset)94 void start(nsecs_t offset) {
95 std::lock_guard<std::mutex> lk(mMutex);
96 mStopped = false;
97 mOffset = offset;
98
99 auto const schedule_result = mRegistration.schedule(calculateWorkload(), mLastCallTime);
100 LOG_ALWAYS_FATAL_IF((schedule_result != ScheduleResult::Scheduled),
101 "Error scheduling callback: rc %X", schedule_result);
102 }
103
setPeriod(nsecs_t period)104 void setPeriod(nsecs_t period) {
105 std::lock_guard<std::mutex> lk(mMutex);
106 if (period == mPeriod) {
107 return;
108 }
109 mPeriod = period;
110 }
111
stop()112 void stop() {
113 std::lock_guard<std::mutex> lk(mMutex);
114 LOG_ALWAYS_FATAL_IF(mStopped, "DispSyncInterface misuse: callback already stopped");
115 mStopped = true;
116 mRegistration.cancel();
117 }
118
dump(std::string & result) const119 void dump(std::string& result) const {
120 std::lock_guard<std::mutex> lk(mMutex);
121 StringAppendF(&result, "\t%s: mPeriod=%.2f last vsync time %.2fms relative to now (%s)\n",
122 mName.c_str(), mPeriod / 1e6f, (mLastCallTime - systemTime()) / 1e6f,
123 mStopped ? "stopped" : "running");
124 }
125
126 private:
callback(nsecs_t vsynctime,nsecs_t wakeupTime)127 void callback(nsecs_t vsynctime, nsecs_t wakeupTime) {
128 {
129 std::lock_guard<std::mutex> lk(mMutex);
130 mLastCallTime = vsynctime;
131 }
132
133 mCallback->onDispSyncEvent(wakeupTime, vsynctime);
134
135 {
136 std::lock_guard<std::mutex> lk(mMutex);
137 if (mStopped) {
138 return;
139 }
140 auto const schedule_result = mRegistration.schedule(calculateWorkload(), vsynctime);
141 LOG_ALWAYS_FATAL_IF((schedule_result != ScheduleResult::Scheduled),
142 "Error rescheduling callback: rc %X", schedule_result);
143 }
144 }
145
146 // DispSync offsets are defined as time after the vsync before presentation.
147 // VSyncReactor workloads are defined as time before the intended presentation vsync.
148 // Note change in sign between the two defnitions.
calculateWorkload()149 nsecs_t calculateWorkload() REQUIRES(mMutex) { return mPeriod - mOffset; }
150
151 const std::string mName;
152 DispSync::Callback* const mCallback;
153
154 std::mutex mutable mMutex;
155 VSyncCallbackRegistration mRegistration GUARDED_BY(mMutex);
156 bool mStopped GUARDED_BY(mMutex) = false;
157 nsecs_t mPeriod GUARDED_BY(mMutex);
158 nsecs_t mOffset GUARDED_BY(mMutex);
159 nsecs_t mLastCallTime GUARDED_BY(mMutex);
160 };
161
addPresentFence(const std::shared_ptr<FenceTime> & fence)162 bool VSyncReactor::addPresentFence(const std::shared_ptr<FenceTime>& fence) {
163 if (!fence) {
164 return false;
165 }
166
167 nsecs_t const signalTime = fence->getCachedSignalTime();
168 if (signalTime == Fence::SIGNAL_TIME_INVALID) {
169 return true;
170 }
171
172 std::lock_guard<std::mutex> lk(mMutex);
173 if (mExternalIgnoreFences || mInternalIgnoreFences) {
174 return true;
175 }
176
177 bool timestampAccepted = true;
178 for (auto it = mUnfiredFences.begin(); it != mUnfiredFences.end();) {
179 auto const time = (*it)->getCachedSignalTime();
180 if (time == Fence::SIGNAL_TIME_PENDING) {
181 it++;
182 } else if (time == Fence::SIGNAL_TIME_INVALID) {
183 it = mUnfiredFences.erase(it);
184 } else {
185 timestampAccepted &= mTracker->addVsyncTimestamp(time);
186
187 it = mUnfiredFences.erase(it);
188 }
189 }
190
191 if (signalTime == Fence::SIGNAL_TIME_PENDING) {
192 if (mPendingLimit == mUnfiredFences.size()) {
193 mUnfiredFences.erase(mUnfiredFences.begin());
194 }
195 mUnfiredFences.push_back(fence);
196 } else {
197 timestampAccepted &= mTracker->addVsyncTimestamp(signalTime);
198 }
199
200 if (!timestampAccepted) {
201 mMoreSamplesNeeded = true;
202 setIgnorePresentFencesInternal(true);
203 mPeriodConfirmationInProgress = true;
204 }
205
206 return mMoreSamplesNeeded;
207 }
208
setIgnorePresentFences(bool ignoration)209 void VSyncReactor::setIgnorePresentFences(bool ignoration) {
210 std::lock_guard<std::mutex> lk(mMutex);
211 mExternalIgnoreFences = ignoration;
212 updateIgnorePresentFencesInternal();
213 }
214
setIgnorePresentFencesInternal(bool ignoration)215 void VSyncReactor::setIgnorePresentFencesInternal(bool ignoration) {
216 mInternalIgnoreFences = ignoration;
217 updateIgnorePresentFencesInternal();
218 }
219
updateIgnorePresentFencesInternal()220 void VSyncReactor::updateIgnorePresentFencesInternal() {
221 if (mExternalIgnoreFences || mInternalIgnoreFences) {
222 mUnfiredFences.clear();
223 }
224 }
225
computeNextRefresh(int periodOffset,nsecs_t now) const226 nsecs_t VSyncReactor::computeNextRefresh(int periodOffset, nsecs_t now) const {
227 auto const currentPeriod = periodOffset ? mTracker->currentPeriod() : 0;
228 return mTracker->nextAnticipatedVSyncTimeFrom(now + periodOffset * currentPeriod);
229 }
230
expectedPresentTime(nsecs_t now)231 nsecs_t VSyncReactor::expectedPresentTime(nsecs_t now) {
232 return mTracker->nextAnticipatedVSyncTimeFrom(now);
233 }
234
startPeriodTransition(nsecs_t newPeriod)235 void VSyncReactor::startPeriodTransition(nsecs_t newPeriod) {
236 mPeriodConfirmationInProgress = true;
237 mPeriodTransitioningTo = newPeriod;
238 mMoreSamplesNeeded = true;
239 setIgnorePresentFencesInternal(true);
240 }
241
endPeriodTransition()242 void VSyncReactor::endPeriodTransition() {
243 setIgnorePresentFencesInternal(false);
244 mMoreSamplesNeeded = false;
245 mPeriodTransitioningTo.reset();
246 mPeriodConfirmationInProgress = false;
247 mLastHwVsync.reset();
248 }
249
setPeriod(nsecs_t period)250 void VSyncReactor::setPeriod(nsecs_t period) {
251 ATRACE_INT64("VSR-setPeriod", period);
252 std::lock_guard lk(mMutex);
253 mLastHwVsync.reset();
254
255 if (!mSupportKernelIdleTimer && period == getPeriod()) {
256 endPeriodTransition();
257 } else {
258 startPeriodTransition(period);
259 }
260 }
261
getPeriod()262 nsecs_t VSyncReactor::getPeriod() {
263 return mTracker->currentPeriod();
264 }
265
beginResync()266 void VSyncReactor::beginResync() {
267 mTracker->resetModel();
268 }
269
endResync()270 void VSyncReactor::endResync() {}
271
periodConfirmed(nsecs_t vsync_timestamp,std::optional<nsecs_t> HwcVsyncPeriod)272 bool VSyncReactor::periodConfirmed(nsecs_t vsync_timestamp, std::optional<nsecs_t> HwcVsyncPeriod) {
273 if (!mPeriodConfirmationInProgress) {
274 return false;
275 }
276
277 if (!mLastHwVsync && !HwcVsyncPeriod) {
278 return false;
279 }
280
281 const bool periodIsChanging =
282 mPeriodTransitioningTo && (*mPeriodTransitioningTo != getPeriod());
283 if (mSupportKernelIdleTimer && !periodIsChanging) {
284 // Clear out the Composer-provided period and use the allowance logic below
285 HwcVsyncPeriod = {};
286 }
287
288 auto const period = mPeriodTransitioningTo ? *mPeriodTransitioningTo : getPeriod();
289 static constexpr int allowancePercent = 10;
290 static constexpr std::ratio<allowancePercent, 100> allowancePercentRatio;
291 auto const allowance = period * allowancePercentRatio.num / allowancePercentRatio.den;
292 if (HwcVsyncPeriod) {
293 return std::abs(*HwcVsyncPeriod - period) < allowance;
294 }
295
296 auto const distance = vsync_timestamp - *mLastHwVsync;
297 return std::abs(distance - period) < allowance;
298 }
299
addResyncSample(nsecs_t timestamp,std::optional<nsecs_t> hwcVsyncPeriod,bool * periodFlushed)300 bool VSyncReactor::addResyncSample(nsecs_t timestamp, std::optional<nsecs_t> hwcVsyncPeriod,
301 bool* periodFlushed) {
302 assert(periodFlushed);
303
304 std::lock_guard<std::mutex> lk(mMutex);
305 if (periodConfirmed(timestamp, hwcVsyncPeriod)) {
306 if (mPeriodTransitioningTo) {
307 mTracker->setPeriod(*mPeriodTransitioningTo);
308 for (auto& entry : mCallbacks) {
309 entry.second->setPeriod(*mPeriodTransitioningTo);
310 }
311 *periodFlushed = true;
312 }
313 endPeriodTransition();
314 } else if (mPeriodConfirmationInProgress) {
315 mLastHwVsync = timestamp;
316 mMoreSamplesNeeded = true;
317 *periodFlushed = false;
318 } else {
319 mMoreSamplesNeeded = false;
320 *periodFlushed = false;
321 }
322
323 mTracker->addVsyncTimestamp(timestamp);
324 return mMoreSamplesNeeded;
325 }
326
addEventListener(const char * name,nsecs_t phase,DispSync::Callback * callback,nsecs_t)327 status_t VSyncReactor::addEventListener(const char* name, nsecs_t phase,
328 DispSync::Callback* callback,
329 nsecs_t /* lastCallbackTime */) {
330 std::lock_guard<std::mutex> lk(mMutex);
331 auto it = mCallbacks.find(callback);
332 if (it == mCallbacks.end()) {
333 // TODO (b/146557561): resolve lastCallbackTime semantics in DispSync i/f.
334 static auto constexpr maxListeners = 4;
335 if (mCallbacks.size() >= maxListeners) {
336 ALOGE("callback %s not added, exceeded callback limit of %i (currently %zu)", name,
337 maxListeners, mCallbacks.size());
338 return NO_MEMORY;
339 }
340
341 auto const period = mTracker->currentPeriod();
342 auto repeater = std::make_unique<CallbackRepeater>(*mDispatch, callback, name, period,
343 phase, mClock->now());
344 it = mCallbacks.emplace(std::pair(callback, std::move(repeater))).first;
345 }
346
347 it->second->start(phase);
348 return NO_ERROR;
349 }
350
removeEventListener(DispSync::Callback * callback,nsecs_t *)351 status_t VSyncReactor::removeEventListener(DispSync::Callback* callback,
352 nsecs_t* /* outLastCallback */) {
353 std::lock_guard<std::mutex> lk(mMutex);
354 auto const it = mCallbacks.find(callback);
355 LOG_ALWAYS_FATAL_IF(it == mCallbacks.end(), "callback %p not registered", callback);
356
357 it->second->stop();
358 return NO_ERROR;
359 }
360
changePhaseOffset(DispSync::Callback * callback,nsecs_t phase)361 status_t VSyncReactor::changePhaseOffset(DispSync::Callback* callback, nsecs_t phase) {
362 std::lock_guard<std::mutex> lk(mMutex);
363 auto const it = mCallbacks.find(callback);
364 LOG_ALWAYS_FATAL_IF(it == mCallbacks.end(), "callback was %p not registered", callback);
365
366 it->second->start(phase);
367 return NO_ERROR;
368 }
369
dump(std::string & result) const370 void VSyncReactor::dump(std::string& result) const {
371 std::lock_guard<std::mutex> lk(mMutex);
372 StringAppendF(&result, "VsyncReactor in use\n");
373 StringAppendF(&result, "Has %zu unfired fences\n", mUnfiredFences.size());
374 StringAppendF(&result, "mInternalIgnoreFences=%d mExternalIgnoreFences=%d\n",
375 mInternalIgnoreFences, mExternalIgnoreFences);
376 StringAppendF(&result, "mMoreSamplesNeeded=%d mPeriodConfirmationInProgress=%d\n",
377 mMoreSamplesNeeded, mPeriodConfirmationInProgress);
378 if (mPeriodTransitioningTo) {
379 StringAppendF(&result, "mPeriodTransitioningTo=%" PRId64 "\n", *mPeriodTransitioningTo);
380 } else {
381 StringAppendF(&result, "mPeriodTransitioningTo=nullptr\n");
382 }
383
384 if (mLastHwVsync) {
385 StringAppendF(&result, "Last HW vsync was %.2fms ago\n",
386 (mClock->now() - *mLastHwVsync) / 1e6f);
387 } else {
388 StringAppendF(&result, "No Last HW vsync\n");
389 }
390
391 StringAppendF(&result, "CallbackRepeaters:\n");
392 for (const auto& [callback, repeater] : mCallbacks) {
393 repeater->dump(result);
394 }
395
396 StringAppendF(&result, "VSyncTracker:\n");
397 mTracker->dump(result);
398 StringAppendF(&result, "VSyncDispatch:\n");
399 mDispatch->dump(result);
400 }
401
reset()402 void VSyncReactor::reset() {}
403
404 } // namespace android::scheduler
405