1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include <ctype.h>
9
10 #include "nanobench.h"
11
12 #include "AndroidCodecBench.h"
13 #include "Benchmark.h"
14 #include "BitmapRegionDecoderBench.h"
15 #include "CodecBench.h"
16 #include "CodecBenchPriv.h"
17 #include "ColorCodecBench.h"
18 #include "CrashHandler.h"
19 #include "GMBench.h"
20 #include "ProcStats.h"
21 #include "ResultsWriter.h"
22 #include "RecordingBench.h"
23 #include "SKPAnimationBench.h"
24 #include "SKPBench.h"
25 #include "Stats.h"
26 #include "ios_utils.h"
27
28 #include "SkAndroidCodec.h"
29 #include "SkAutoMalloc.h"
30 #include "SkBBoxHierarchy.h"
31 #include "SkBitmapRegionDecoder.h"
32 #include "SkCanvas.h"
33 #include "SkCodec.h"
34 #include "SkCommonFlags.h"
35 #include "SkCommonFlagsConfig.h"
36 #include "SkCommonFlagsPathRenderer.h"
37 #include "SkData.h"
38 #include "SkGraphics.h"
39 #include "SkLeanWindows.h"
40 #include "SkOSFile.h"
41 #include "SkOSPath.h"
42 #include "SkPictureRecorder.h"
43 #include "SkSVGDOM.h"
44 #include "SkScan.h"
45 #include "SkString.h"
46 #include "SkSurface.h"
47 #include "SkTaskGroup.h"
48 #include "SkThreadUtils.h"
49 #include "ThermalManager.h"
50
51 #include <stdlib.h>
52
53 #ifndef SK_BUILD_FOR_WIN32
54 #include <unistd.h>
55 #endif
56
57 #if SK_SUPPORT_GPU
58 #include "gl/GrGLDefines.h"
59 #include "GrCaps.h"
60 #include "GrContextFactory.h"
61 #include "gl/GrGLUtil.h"
62 using sk_gpu_test::GrContextFactory;
63 using sk_gpu_test::TestContext;
64 std::unique_ptr<GrContextFactory> gGrFactory;
65 #endif
66
67 struct GrContextOptions;
68
69 static const int kAutoTuneLoops = 0;
70
71 #if !defined(__has_feature)
72 #define __has_feature(x) 0
73 #endif
74
75 static const int kDefaultLoops =
76 #if defined(SK_DEBUG) || __has_feature(address_sanitizer)
77 1;
78 #else
79 kAutoTuneLoops;
80 #endif
81
loops_help_txt()82 static SkString loops_help_txt() {
83 SkString help;
84 help.printf("Number of times to run each bench. Set this to %d to auto-"
85 "tune for each bench. Timings are only reported when auto-tuning.",
86 kAutoTuneLoops);
87 return help;
88 }
89
to_string(int n)90 static SkString to_string(int n) {
91 SkString str;
92 str.appendS32(n);
93 return str;
94 }
95
96 DEFINE_int32(loops, kDefaultLoops, loops_help_txt().c_str());
97
98 DEFINE_int32(samples, 10, "Number of samples to measure for each bench.");
99 DEFINE_int32(ms, 0, "If >0, run each bench for this many ms instead of obeying --samples.");
100 DEFINE_int32(overheadLoops, 100000, "Loops to estimate timer overhead.");
101 DEFINE_double(overheadGoal, 0.0001,
102 "Loop until timer overhead is at most this fraction of our measurments.");
103 DEFINE_double(gpuMs, 5, "Target bench time in millseconds for GPU.");
104 DEFINE_int32(gpuFrameLag, 5, "If unknown, estimated maximum number of frames GPU allows to lag.");
105
106 DEFINE_string(outResultsFile, "", "If given, write results here as JSON.");
107 DEFINE_int32(maxCalibrationAttempts, 3,
108 "Try up to this many times to guess loops for a bench, or skip the bench.");
109 DEFINE_int32(maxLoops, 1000000, "Never run a bench more times than this.");
110 DEFINE_string(clip, "0,0,1000,1000", "Clip for SKPs.");
111 DEFINE_string(scales, "1.0", "Space-separated scales for SKPs.");
112 DEFINE_string(zoom, "1.0,0", "Comma-separated zoomMax,zoomPeriodMs factors for a periodic SKP zoom "
113 "function that ping-pongs between 1.0 and zoomMax.");
114 DEFINE_bool(bbh, true, "Build a BBH for SKPs?");
115 DEFINE_bool(lite, false, "Use SkLiteRecorder in recording benchmarks?");
116 DEFINE_bool(mpd, true, "Use MultiPictureDraw for the SKPs?");
117 DEFINE_bool(loopSKP, true, "Loop SKPs like we do for micro benches?");
118 DEFINE_int32(flushEvery, 10, "Flush --outResultsFile every Nth run.");
119 DEFINE_bool(resetGpuContext, true, "Reset the GrContext before running each test.");
120 DEFINE_bool(gpuStats, false, "Print GPU stats after each gpu benchmark?");
121 DEFINE_bool(gpuStatsDump, false, "Dump GPU states after each benchmark to json");
122 DEFINE_bool(keepAlive, false, "Print a message every so often so that we don't time out");
123 DEFINE_string(useThermalManager, "0,1,10,1000", "enabled,threshold,sleepTimeMs,TimeoutMs for "
124 "thermalManager\n");
125
126 DEFINE_string(sourceType, "",
127 "Apply usual --match rules to source type: bench, gm, skp, image, etc.");
128 DEFINE_string(benchType, "",
129 "Apply usual --match rules to bench type: micro, recording, piping, playback, skcodec, etc.");
130
131 #if SK_SUPPORT_GPU
132 DEFINE_pathrenderer_flag;
133 #endif
134
now_ms()135 static double now_ms() { return SkTime::GetNSecs() * 1e-6; }
136
humanize(double ms)137 static SkString humanize(double ms) {
138 if (FLAGS_verbose) return SkStringPrintf("%llu", (uint64_t)(ms*1e6));
139 return HumanizeMs(ms);
140 }
141 #define HUMANIZE(ms) humanize(ms).c_str()
142
init(SkImageInfo info,Benchmark * bench)143 bool Target::init(SkImageInfo info, Benchmark* bench) {
144 if (Benchmark::kRaster_Backend == config.backend) {
145 this->surface = SkSurface::MakeRaster(info);
146 if (!this->surface) {
147 return false;
148 }
149 }
150 return true;
151 }
capturePixels(SkBitmap * bmp)152 bool Target::capturePixels(SkBitmap* bmp) {
153 SkCanvas* canvas = this->getCanvas();
154 if (!canvas) {
155 return false;
156 }
157 bmp->setInfo(canvas->imageInfo());
158 if (!canvas->readPixels(bmp, 0, 0)) {
159 SkDebugf("Can't read canvas pixels.\n");
160 return false;
161 }
162 return true;
163 }
164
165 #if SK_SUPPORT_GPU
166 struct GPUTarget : public Target {
GPUTargetGPUTarget167 explicit GPUTarget(const Config& c) : Target(c), context(nullptr) { }
168 TestContext* context;
169
setupGPUTarget170 void setup() override {
171 this->context->makeCurrent();
172 // Make sure we're done with whatever came before.
173 this->context->finish();
174 }
endTimingGPUTarget175 void endTiming() override {
176 if (this->context) {
177 this->context->waitOnSyncOrSwap();
178 }
179 }
fenceGPUTarget180 void fence() override {
181 this->context->finish();
182 }
183
needsFrameTimingGPUTarget184 bool needsFrameTiming(int* maxFrameLag) const override {
185 if (!this->context->getMaxGpuFrameLag(maxFrameLag)) {
186 // Frame lag is unknown.
187 *maxFrameLag = FLAGS_gpuFrameLag;
188 }
189 return true;
190 }
initGPUTarget191 bool init(SkImageInfo info, Benchmark* bench) override {
192 uint32_t flags = this->config.useDFText ? SkSurfaceProps::kUseDeviceIndependentFonts_Flag :
193 0;
194 SkSurfaceProps props(flags, SkSurfaceProps::kLegacyFontHost_InitType);
195 this->surface = SkSurface::MakeRenderTarget(gGrFactory->get(this->config.ctxType,
196 this->config.ctxOverrides),
197 SkBudgeted::kNo, info,
198 this->config.samples, &props);
199 this->context = gGrFactory->getContextInfo(this->config.ctxType,
200 this->config.ctxOverrides).testContext();
201 if (!this->surface.get()) {
202 return false;
203 }
204 if (!this->context->fenceSyncSupport()) {
205 SkDebugf("WARNING: GL context for config \"%s\" does not support fence sync. "
206 "Timings might not be accurate.\n", this->config.name.c_str());
207 }
208 return true;
209 }
fillOptionsGPUTarget210 void fillOptions(ResultsWriter* log) override {
211 const GrGLubyte* version;
212 if (this->context->backend() == kOpenGL_GrBackend) {
213 const GrGLInterface* gl =
214 reinterpret_cast<const GrGLInterface*>(this->context->backendContext());
215 GR_GL_CALL_RET(gl, version, GetString(GR_GL_VERSION));
216 log->configOption("GL_VERSION", (const char*)(version));
217
218 GR_GL_CALL_RET(gl, version, GetString(GR_GL_RENDERER));
219 log->configOption("GL_RENDERER", (const char*) version);
220
221 GR_GL_CALL_RET(gl, version, GetString(GR_GL_VENDOR));
222 log->configOption("GL_VENDOR", (const char*) version);
223
224 GR_GL_CALL_RET(gl, version, GetString(GR_GL_SHADING_LANGUAGE_VERSION));
225 log->configOption("GL_SHADING_LANGUAGE_VERSION", (const char*) version);
226 }
227 }
228 };
229
230 #endif
231
time(int loops,Benchmark * bench,Target * target)232 static double time(int loops, Benchmark* bench, Target* target) {
233 SkCanvas* canvas = target->getCanvas();
234 if (canvas) {
235 canvas->clear(SK_ColorWHITE);
236 }
237 bench->preDraw(canvas);
238 double start = now_ms();
239 canvas = target->beginTiming(canvas);
240 bench->draw(loops, canvas);
241 if (canvas) {
242 canvas->flush();
243 }
244 target->endTiming();
245 double elapsed = now_ms() - start;
246 bench->postDraw(canvas);
247 return elapsed;
248 }
249
estimate_timer_overhead()250 static double estimate_timer_overhead() {
251 double overhead = 0;
252 for (int i = 0; i < FLAGS_overheadLoops; i++) {
253 double start = now_ms();
254 overhead += now_ms() - start;
255 }
256 return overhead / FLAGS_overheadLoops;
257 }
258
detect_forever_loops(int loops)259 static int detect_forever_loops(int loops) {
260 // look for a magic run-forever value
261 if (loops < 0) {
262 loops = SK_MaxS32;
263 }
264 return loops;
265 }
266
clamp_loops(int loops)267 static int clamp_loops(int loops) {
268 if (loops < 1) {
269 SkDebugf("ERROR: clamping loops from %d to 1. "
270 "There's probably something wrong with the bench.\n", loops);
271 return 1;
272 }
273 if (loops > FLAGS_maxLoops) {
274 SkDebugf("WARNING: clamping loops from %d to FLAGS_maxLoops, %d.\n", loops, FLAGS_maxLoops);
275 return FLAGS_maxLoops;
276 }
277 return loops;
278 }
279
write_canvas_png(Target * target,const SkString & filename)280 static bool write_canvas_png(Target* target, const SkString& filename) {
281
282 if (filename.isEmpty()) {
283 return false;
284 }
285 if (target->getCanvas() &&
286 kUnknown_SkColorType == target->getCanvas()->imageInfo().colorType()) {
287 return false;
288 }
289
290 SkBitmap bmp;
291
292 if (!target->capturePixels(&bmp)) {
293 return false;
294 }
295
296 SkString dir = SkOSPath::Dirname(filename.c_str());
297 if (!sk_mkdir(dir.c_str())) {
298 SkDebugf("Can't make dir %s.\n", dir.c_str());
299 return false;
300 }
301 SkFILEWStream stream(filename.c_str());
302 if (!stream.isValid()) {
303 SkDebugf("Can't write %s.\n", filename.c_str());
304 return false;
305 }
306 if (!SkEncodeImage(&stream, bmp, SkEncodedImageFormat::kPNG, 100)) {
307 SkDebugf("Can't encode a PNG.\n");
308 return false;
309 }
310 return true;
311 }
312
313 static int kFailedLoops = -2;
setup_cpu_bench(const double overhead,Target * target,Benchmark * bench)314 static int setup_cpu_bench(const double overhead, Target* target, Benchmark* bench) {
315 // First figure out approximately how many loops of bench it takes to make overhead negligible.
316 double bench_plus_overhead = 0.0;
317 int round = 0;
318 int loops = bench->calculateLoops(FLAGS_loops);
319 if (kAutoTuneLoops == loops) {
320 while (bench_plus_overhead < overhead) {
321 if (round++ == FLAGS_maxCalibrationAttempts) {
322 SkDebugf("WARNING: Can't estimate loops for %s (%s vs. %s); skipping.\n",
323 bench->getUniqueName(), HUMANIZE(bench_plus_overhead), HUMANIZE(overhead));
324 return kFailedLoops;
325 }
326 bench_plus_overhead = time(1, bench, target);
327 }
328 }
329
330 // Later we'll just start and stop the timer once but loop N times.
331 // We'll pick N to make timer overhead negligible:
332 //
333 // overhead
334 // ------------------------- < FLAGS_overheadGoal
335 // overhead + N * Bench Time
336 //
337 // where bench_plus_overhead ~=~ overhead + Bench Time.
338 //
339 // Doing some math, we get:
340 //
341 // (overhead / FLAGS_overheadGoal) - overhead
342 // ------------------------------------------ < N
343 // bench_plus_overhead - overhead)
344 //
345 // Luckily, this also works well in practice. :)
346 if (kAutoTuneLoops == loops) {
347 const double numer = overhead / FLAGS_overheadGoal - overhead;
348 const double denom = bench_plus_overhead - overhead;
349 loops = (int)ceil(numer / denom);
350 loops = clamp_loops(loops);
351 } else {
352 loops = detect_forever_loops(loops);
353 }
354
355 return loops;
356 }
357
setup_gpu_bench(Target * target,Benchmark * bench,int maxGpuFrameLag)358 static int setup_gpu_bench(Target* target, Benchmark* bench, int maxGpuFrameLag) {
359 // First, figure out how many loops it'll take to get a frame up to FLAGS_gpuMs.
360 int loops = bench->calculateLoops(FLAGS_loops);
361 if (kAutoTuneLoops == loops) {
362 loops = 1;
363 double elapsed = 0;
364 do {
365 if (1<<30 == loops) {
366 // We're about to wrap. Something's wrong with the bench.
367 loops = 0;
368 break;
369 }
370 loops *= 2;
371 // If the GPU lets frames lag at all, we need to make sure we're timing
372 // _this_ round, not still timing last round.
373 for (int i = 0; i < maxGpuFrameLag; i++) {
374 elapsed = time(loops, bench, target);
375 }
376 } while (elapsed < FLAGS_gpuMs);
377
378 // We've overshot at least a little. Scale back linearly.
379 loops = (int)ceil(loops * FLAGS_gpuMs / elapsed);
380 loops = clamp_loops(loops);
381
382 // Make sure we're not still timing our calibration.
383 target->fence();
384 } else {
385 loops = detect_forever_loops(loops);
386 }
387
388 // Pretty much the same deal as the calibration: do some warmup to make
389 // sure we're timing steady-state pipelined frames.
390 for (int i = 0; i < maxGpuFrameLag - 1; i++) {
391 time(loops, bench, target);
392 }
393
394 return loops;
395 }
396
397 #if SK_SUPPORT_GPU
398 #define kBogusContextType GrContextFactory::kGL_ContextType
399 #define kBogusContextOverrides GrContextFactory::ContextOverrides::kNone
400 #else
401 #define kBogusContextType 0
402 #define kBogusContextOverrides 0
403 #endif
404
create_config(const SkCommandLineConfig * config,SkTArray<Config> * configs)405 static void create_config(const SkCommandLineConfig* config, SkTArray<Config>* configs) {
406
407 #if SK_SUPPORT_GPU
408 if (const auto* gpuConfig = config->asConfigGpu()) {
409 if (!FLAGS_gpu)
410 return;
411
412 const auto ctxType = gpuConfig->getContextType();
413 const auto ctxOverrides = gpuConfig->getContextOverrides();
414 const auto sampleCount = gpuConfig->getSamples();
415
416 if (const GrContext* ctx = gGrFactory->get(ctxType, ctxOverrides)) {
417 const auto maxSampleCount = ctx->caps()->maxSampleCount();
418 if (sampleCount > ctx->caps()->maxSampleCount()) {
419 SkDebugf("Configuration sample count %d exceeds maximum %d.\n",
420 sampleCount, maxSampleCount);
421 return;
422 }
423 } else {
424 SkDebugf("No context was available matching config type and options.\n");
425 return;
426 }
427
428 Config target = {
429 gpuConfig->getTag(),
430 Benchmark::kGPU_Backend,
431 gpuConfig->getColorType(),
432 kPremul_SkAlphaType,
433 sk_ref_sp(gpuConfig->getColorSpace()),
434 sampleCount,
435 ctxType,
436 ctxOverrides,
437 gpuConfig->getUseDIText()
438 };
439
440 configs->push_back(target);
441 return;
442 }
443 #endif
444
445 #define CPU_CONFIG(name, backend, color, alpha, colorSpace) \
446 if (config->getTag().equals(#name)) { \
447 Config config = { \
448 SkString(#name), Benchmark::backend, color, alpha, colorSpace, \
449 0, kBogusContextType, kBogusContextOverrides, false \
450 }; \
451 configs->push_back(config); \
452 return; \
453 }
454
455 if (FLAGS_cpu) {
456 CPU_CONFIG(nonrendering, kNonRendering_Backend,
457 kUnknown_SkColorType, kUnpremul_SkAlphaType, nullptr)
458
459 CPU_CONFIG(8888, kRaster_Backend,
460 kN32_SkColorType, kPremul_SkAlphaType, nullptr)
461 CPU_CONFIG(565, kRaster_Backend,
462 kRGB_565_SkColorType, kOpaque_SkAlphaType, nullptr)
463 auto srgbColorSpace = SkColorSpace::MakeSRGB();
464 CPU_CONFIG(srgb, kRaster_Backend,
465 kN32_SkColorType, kPremul_SkAlphaType, srgbColorSpace)
466 auto srgbLinearColorSpace = SkColorSpace::MakeSRGBLinear();
467 CPU_CONFIG(f16, kRaster_Backend,
468 kRGBA_F16_SkColorType, kPremul_SkAlphaType, srgbLinearColorSpace)
469 }
470
471 #undef CPU_CONFIG
472 }
473
474 // Append all configs that are enabled and supported.
create_configs(SkTArray<Config> * configs)475 void create_configs(SkTArray<Config>* configs) {
476 SkCommandLineConfigArray array;
477 ParseConfigs(FLAGS_config, &array);
478 for (int i = 0; i < array.count(); ++i) {
479 create_config(array[i].get(), configs);
480 }
481 }
482
483 // disable warning : switch statement contains default but no 'case' labels
484 #if defined _WIN32
485 #pragma warning ( push )
486 #pragma warning ( disable : 4065 )
487 #endif
488
489 // If bench is enabled for config, returns a Target* for it, otherwise nullptr.
is_enabled(Benchmark * bench,const Config & config)490 static Target* is_enabled(Benchmark* bench, const Config& config) {
491 if (!bench->isSuitableFor(config.backend)) {
492 return nullptr;
493 }
494
495 SkImageInfo info = SkImageInfo::Make(bench->getSize().fX, bench->getSize().fY,
496 config.color, config.alpha, config.colorSpace);
497
498 Target* target = nullptr;
499
500 switch (config.backend) {
501 #if SK_SUPPORT_GPU
502 case Benchmark::kGPU_Backend:
503 target = new GPUTarget(config);
504 break;
505 #endif
506 default:
507 target = new Target(config);
508 break;
509 }
510
511 if (!target->init(info, bench)) {
512 delete target;
513 return nullptr;
514 }
515 return target;
516 }
517
518 #if defined _WIN32
519 #pragma warning ( pop )
520 #endif
521
valid_brd_bench(sk_sp<SkData> encoded,SkColorType colorType,uint32_t sampleSize,uint32_t minOutputSize,int * width,int * height)522 static bool valid_brd_bench(sk_sp<SkData> encoded, SkColorType colorType, uint32_t sampleSize,
523 uint32_t minOutputSize, int* width, int* height) {
524 std::unique_ptr<SkBitmapRegionDecoder> brd(
525 SkBitmapRegionDecoder::Create(encoded, SkBitmapRegionDecoder::kAndroidCodec_Strategy));
526 if (nullptr == brd.get()) {
527 // This is indicates that subset decoding is not supported for a particular image format.
528 return false;
529 }
530
531 if (sampleSize * minOutputSize > (uint32_t) brd->width() || sampleSize * minOutputSize >
532 (uint32_t) brd->height()) {
533 // This indicates that the image is not large enough to decode a
534 // minOutputSize x minOutputSize subset at the given sampleSize.
535 return false;
536 }
537
538 // Set the image width and height. The calling code will use this to choose subsets to decode.
539 *width = brd->width();
540 *height = brd->height();
541 return true;
542 }
543
cleanup_run(Target * target)544 static void cleanup_run(Target* target) {
545 delete target;
546 #if SK_SUPPORT_GPU
547 if (FLAGS_abandonGpuContext) {
548 gGrFactory->abandonContexts();
549 }
550 if (FLAGS_resetGpuContext || FLAGS_abandonGpuContext) {
551 gGrFactory->destroyContexts();
552 }
553 #endif
554 }
555
collect_files(const SkCommandLineFlags::StringArray & paths,const char * ext,SkTArray<SkString> * list)556 static void collect_files(const SkCommandLineFlags::StringArray& paths, const char* ext,
557 SkTArray<SkString>* list) {
558 for (int i = 0; i < paths.count(); ++i) {
559 if (SkStrEndsWith(paths[i], ext)) {
560 list->push_back(SkString(paths[i]));
561 } else {
562 SkOSFile::Iter it(paths[i], ext);
563 SkString path;
564 while (it.next(&path)) {
565 list->push_back(SkOSPath::Join(paths[i], path.c_str()));
566 }
567 }
568 }
569 }
570
571 class BenchmarkStream {
572 public:
BenchmarkStream()573 BenchmarkStream() : fBenches(BenchRegistry::Head())
574 , fGMs(skiagm::GMRegistry::Head())
575 , fCurrentRecording(0)
576 , fCurrentPiping(0)
577 , fCurrentScale(0)
578 , fCurrentSKP(0)
579 , fCurrentSVG(0)
580 , fCurrentUseMPD(0)
581 , fCurrentCodec(0)
582 , fCurrentAndroidCodec(0)
583 , fCurrentBRDImage(0)
584 , fCurrentColorImage(0)
585 , fCurrentColorType(0)
586 , fCurrentAlphaType(0)
587 , fCurrentSubsetType(0)
588 , fCurrentSampleSize(0)
589 , fCurrentAnimSKP(0) {
590 collect_files(FLAGS_skps, ".skp", &fSKPs);
591 collect_files(FLAGS_svgs, ".svg", &fSVGs);
592
593 if (4 != sscanf(FLAGS_clip[0], "%d,%d,%d,%d",
594 &fClip.fLeft, &fClip.fTop, &fClip.fRight, &fClip.fBottom)) {
595 SkDebugf("Can't parse %s from --clip as an SkIRect.\n", FLAGS_clip[0]);
596 exit(1);
597 }
598
599 for (int i = 0; i < FLAGS_scales.count(); i++) {
600 if (1 != sscanf(FLAGS_scales[i], "%f", &fScales.push_back())) {
601 SkDebugf("Can't parse %s from --scales as an SkScalar.\n", FLAGS_scales[i]);
602 exit(1);
603 }
604 }
605
606 if (2 != sscanf(FLAGS_zoom[0], "%f,%lf", &fZoomMax, &fZoomPeriodMs)) {
607 SkDebugf("Can't parse %s from --zoom as a zoomMax,zoomPeriodMs.\n", FLAGS_zoom[0]);
608 exit(1);
609 }
610
611 if (FLAGS_mpd) {
612 fUseMPDs.push_back() = true;
613 }
614 fUseMPDs.push_back() = false;
615
616 // Prepare the images for decoding
617 if (!CollectImages(FLAGS_images, &fImages)) {
618 exit(1);
619 }
620 if (!CollectImages(FLAGS_colorImages, &fColorImages)) {
621 exit(1);
622 }
623
624 // Choose the candidate color types for image decoding
625 fColorTypes.push_back(kN32_SkColorType);
626 if (!FLAGS_simpleCodec) {
627 fColorTypes.push_back(kRGB_565_SkColorType);
628 fColorTypes.push_back(kAlpha_8_SkColorType);
629 fColorTypes.push_back(kIndex_8_SkColorType);
630 fColorTypes.push_back(kGray_8_SkColorType);
631 }
632 }
633
ReadPicture(const char * path)634 static sk_sp<SkPicture> ReadPicture(const char* path) {
635 // Not strictly necessary, as it will be checked again later,
636 // but helps to avoid a lot of pointless work if we're going to skip it.
637 if (SkCommandLineFlags::ShouldSkip(FLAGS_match, SkOSPath::Basename(path).c_str())) {
638 return nullptr;
639 }
640
641 std::unique_ptr<SkStream> stream = SkStream::MakeFromFile(path);
642 if (!stream) {
643 SkDebugf("Could not read %s.\n", path);
644 return nullptr;
645 }
646
647 return SkPicture::MakeFromStream(stream.get());
648 }
649
ReadSVGPicture(const char * path)650 static sk_sp<SkPicture> ReadSVGPicture(const char* path) {
651 SkFILEStream stream(path);
652 if (!stream.isValid()) {
653 SkDebugf("Could not read %s.\n", path);
654 return nullptr;
655 }
656
657 sk_sp<SkSVGDOM> svgDom = SkSVGDOM::MakeFromStream(stream);
658 if (!svgDom) {
659 SkDebugf("Could not parse %s.\n", path);
660 return nullptr;
661 }
662
663 // Use the intrinsic SVG size if available, otherwise fall back to a default value.
664 static const SkSize kDefaultContainerSize = SkSize::Make(128, 128);
665 if (svgDom->containerSize().isEmpty()) {
666 svgDom->setContainerSize(kDefaultContainerSize);
667 }
668
669 SkPictureRecorder recorder;
670 svgDom->render(recorder.beginRecording(svgDom->containerSize().width(),
671 svgDom->containerSize().height()));
672 return recorder.finishRecordingAsPicture();
673 }
674
next()675 Benchmark* next() {
676 std::unique_ptr<Benchmark> bench;
677 do {
678 bench.reset(this->rawNext());
679 if (!bench) {
680 return nullptr;
681 }
682 } while(SkCommandLineFlags::ShouldSkip(FLAGS_sourceType, fSourceType) ||
683 SkCommandLineFlags::ShouldSkip(FLAGS_benchType, fBenchType));
684 return bench.release();
685 }
686
rawNext()687 Benchmark* rawNext() {
688 if (fBenches) {
689 Benchmark* bench = fBenches->factory()(nullptr);
690 fBenches = fBenches->next();
691 fSourceType = "bench";
692 fBenchType = "micro";
693 return bench;
694 }
695
696 while (fGMs) {
697 std::unique_ptr<skiagm::GM> gm(fGMs->factory()(nullptr));
698 fGMs = fGMs->next();
699 if (gm->runAsBench()) {
700 fSourceType = "gm";
701 fBenchType = "micro";
702 return new GMBench(gm.release());
703 }
704 }
705
706 // First add all .skps as RecordingBenches.
707 while (fCurrentRecording < fSKPs.count()) {
708 const SkString& path = fSKPs[fCurrentRecording++];
709 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
710 if (!pic) {
711 continue;
712 }
713 SkString name = SkOSPath::Basename(path.c_str());
714 fSourceType = "skp";
715 fBenchType = "recording";
716 fSKPBytes = static_cast<double>(pic->approximateBytesUsed());
717 fSKPOps = pic->approximateOpCount();
718 return new RecordingBench(name.c_str(), pic.get(), FLAGS_bbh, FLAGS_lite);
719 }
720
721 // Add all .skps as PipeBenches.
722 while (fCurrentPiping < fSKPs.count()) {
723 const SkString& path = fSKPs[fCurrentPiping++];
724 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
725 if (!pic) {
726 continue;
727 }
728 SkString name = SkOSPath::Basename(path.c_str());
729 fSourceType = "skp";
730 fBenchType = "piping";
731 fSKPBytes = static_cast<double>(pic->approximateBytesUsed());
732 fSKPOps = pic->approximateOpCount();
733 return new PipingBench(name.c_str(), pic.get());
734 }
735
736 // Then once each for each scale as SKPBenches (playback).
737 while (fCurrentScale < fScales.count()) {
738 while (fCurrentSKP < fSKPs.count()) {
739 const SkString& path = fSKPs[fCurrentSKP];
740 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
741 if (!pic) {
742 fCurrentSKP++;
743 continue;
744 }
745
746 while (fCurrentUseMPD < fUseMPDs.count()) {
747 if (FLAGS_bbh) {
748 // The SKP we read off disk doesn't have a BBH. Re-record so it grows one.
749 SkRTreeFactory factory;
750 SkPictureRecorder recorder;
751 pic->playback(recorder.beginRecording(pic->cullRect().width(),
752 pic->cullRect().height(),
753 &factory,
754 0));
755 pic = recorder.finishRecordingAsPicture();
756 }
757 SkString name = SkOSPath::Basename(path.c_str());
758 fSourceType = "skp";
759 fBenchType = "playback";
760 return new SKPBench(name.c_str(), pic.get(), fClip, fScales[fCurrentScale],
761 fUseMPDs[fCurrentUseMPD++], FLAGS_loopSKP);
762 }
763 fCurrentUseMPD = 0;
764 fCurrentSKP++;
765 }
766
767 while (fCurrentSVG++ < fSVGs.count()) {
768 const char* path = fSVGs[fCurrentSVG - 1].c_str();
769 if (sk_sp<SkPicture> pic = ReadSVGPicture(path)) {
770 fSourceType = "svg";
771 fBenchType = "playback";
772 return new SKPBench(SkOSPath::Basename(path).c_str(), pic.get(), fClip,
773 fScales[fCurrentScale], false, FLAGS_loopSKP);
774 }
775 }
776
777 fCurrentSKP = 0;
778 fCurrentSVG = 0;
779 fCurrentScale++;
780 }
781
782 // Now loop over each skp again if we have an animation
783 if (fZoomMax != 1.0f && fZoomPeriodMs > 0) {
784 while (fCurrentAnimSKP < fSKPs.count()) {
785 const SkString& path = fSKPs[fCurrentAnimSKP];
786 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
787 if (!pic) {
788 fCurrentAnimSKP++;
789 continue;
790 }
791
792 fCurrentAnimSKP++;
793 SkString name = SkOSPath::Basename(path.c_str());
794 sk_sp<SKPAnimationBench::Animation> animation(
795 SKPAnimationBench::CreateZoomAnimation(fZoomMax, fZoomPeriodMs));
796 return new SKPAnimationBench(name.c_str(), pic.get(), fClip, animation.get(),
797 FLAGS_loopSKP);
798 }
799 }
800
801 for (; fCurrentCodec < fImages.count(); fCurrentCodec++) {
802 fSourceType = "image";
803 fBenchType = "skcodec";
804 const SkString& path = fImages[fCurrentCodec];
805 if (SkCommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
806 continue;
807 }
808 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
809 std::unique_ptr<SkCodec> codec(SkCodec::NewFromData(encoded));
810 if (!codec) {
811 // Nothing to time.
812 SkDebugf("Cannot find codec for %s\n", path.c_str());
813 continue;
814 }
815
816 while (fCurrentColorType < fColorTypes.count()) {
817 const SkColorType colorType = fColorTypes[fCurrentColorType];
818
819 SkAlphaType alphaType = codec->getInfo().alphaType();
820 if (FLAGS_simpleCodec) {
821 if (kUnpremul_SkAlphaType == alphaType) {
822 alphaType = kPremul_SkAlphaType;
823 }
824
825 fCurrentColorType++;
826 } else {
827 switch (alphaType) {
828 case kOpaque_SkAlphaType:
829 // We only need to test one alpha type (opaque).
830 fCurrentColorType++;
831 break;
832 case kUnpremul_SkAlphaType:
833 case kPremul_SkAlphaType:
834 if (0 == fCurrentAlphaType) {
835 // Test unpremul first.
836 alphaType = kUnpremul_SkAlphaType;
837 fCurrentAlphaType++;
838 } else {
839 // Test premul.
840 alphaType = kPremul_SkAlphaType;
841 fCurrentAlphaType = 0;
842 fCurrentColorType++;
843 }
844 break;
845 default:
846 SkASSERT(false);
847 fCurrentColorType++;
848 break;
849 }
850 }
851
852 // Make sure we can decode to this color type and alpha type.
853 SkImageInfo info =
854 codec->getInfo().makeColorType(colorType).makeAlphaType(alphaType);
855 const size_t rowBytes = info.minRowBytes();
856 SkAutoMalloc storage(info.getSafeSize(rowBytes));
857
858 // Used if fCurrentColorType is kIndex_8_SkColorType
859 int colorCount = 256;
860 SkPMColor colors[256];
861
862 const SkCodec::Result result = codec->getPixels(
863 info, storage.get(), rowBytes, nullptr, colors,
864 &colorCount);
865 switch (result) {
866 case SkCodec::kSuccess:
867 case SkCodec::kIncompleteInput:
868 return new CodecBench(SkOSPath::Basename(path.c_str()),
869 encoded.get(), colorType, alphaType);
870 case SkCodec::kInvalidConversion:
871 // This is okay. Not all conversions are valid.
872 break;
873 default:
874 // This represents some sort of failure.
875 SkASSERT(false);
876 break;
877 }
878 }
879 fCurrentColorType = 0;
880 }
881
882 // Run AndroidCodecBenches
883 const int sampleSizes[] = { 2, 4, 8 };
884 for (; fCurrentAndroidCodec < fImages.count(); fCurrentAndroidCodec++) {
885 fSourceType = "image";
886 fBenchType = "skandroidcodec";
887
888 const SkString& path = fImages[fCurrentAndroidCodec];
889 if (SkCommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
890 continue;
891 }
892 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
893 std::unique_ptr<SkAndroidCodec> codec(SkAndroidCodec::NewFromData(encoded));
894 if (!codec) {
895 // Nothing to time.
896 SkDebugf("Cannot find codec for %s\n", path.c_str());
897 continue;
898 }
899
900 while (fCurrentSampleSize < (int) SK_ARRAY_COUNT(sampleSizes)) {
901 int sampleSize = sampleSizes[fCurrentSampleSize];
902 fCurrentSampleSize++;
903 if (10 * sampleSize > SkTMin(codec->getInfo().width(), codec->getInfo().height())) {
904 // Avoid benchmarking scaled decodes of already small images.
905 break;
906 }
907
908 return new AndroidCodecBench(SkOSPath::Basename(path.c_str()),
909 encoded.get(), sampleSize);
910 }
911 fCurrentSampleSize = 0;
912 }
913
914 // Run the BRDBenches
915 // We intend to create benchmarks that model the use cases in
916 // android/libraries/social/tiledimage. In this library, an image is decoded in 512x512
917 // tiles. The image can be translated freely, so the location of a tile may be anywhere in
918 // the image. For that reason, we will benchmark decodes in five representative locations
919 // in the image. Additionally, this use case utilizes power of two scaling, so we will
920 // test on power of two sample sizes. The output tile is always 512x512, so, when a
921 // sampleSize is used, the size of the subset that is decoded is always
922 // (sampleSize*512)x(sampleSize*512).
923 // There are a few good reasons to only test on power of two sample sizes at this time:
924 // All use cases we are aware of only scale by powers of two.
925 // PNG decodes use the indicated sampling strategy regardless of the sample size, so
926 // these tests are sufficient to provide good coverage of our scaling options.
927 const uint32_t brdSampleSizes[] = { 1, 2, 4, 8, 16 };
928 const uint32_t minOutputSize = 512;
929 for (; fCurrentBRDImage < fImages.count(); fCurrentBRDImage++) {
930 fSourceType = "image";
931 fBenchType = "BRD";
932
933 const SkString& path = fImages[fCurrentBRDImage];
934 if (SkCommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
935 continue;
936 }
937
938 while (fCurrentColorType < fColorTypes.count()) {
939 while (fCurrentSampleSize < (int) SK_ARRAY_COUNT(brdSampleSizes)) {
940 while (fCurrentSubsetType <= kLastSingle_SubsetType) {
941
942 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
943 const SkColorType colorType = fColorTypes[fCurrentColorType];
944 uint32_t sampleSize = brdSampleSizes[fCurrentSampleSize];
945 int currentSubsetType = fCurrentSubsetType++;
946
947 int width = 0;
948 int height = 0;
949 if (!valid_brd_bench(encoded, colorType, sampleSize, minOutputSize,
950 &width, &height)) {
951 break;
952 }
953
954 SkString basename = SkOSPath::Basename(path.c_str());
955 SkIRect subset;
956 const uint32_t subsetSize = sampleSize * minOutputSize;
957 switch (currentSubsetType) {
958 case kTopLeft_SubsetType:
959 basename.append("_TopLeft");
960 subset = SkIRect::MakeXYWH(0, 0, subsetSize, subsetSize);
961 break;
962 case kTopRight_SubsetType:
963 basename.append("_TopRight");
964 subset = SkIRect::MakeXYWH(width - subsetSize, 0, subsetSize,
965 subsetSize);
966 break;
967 case kMiddle_SubsetType:
968 basename.append("_Middle");
969 subset = SkIRect::MakeXYWH((width - subsetSize) / 2,
970 (height - subsetSize) / 2, subsetSize, subsetSize);
971 break;
972 case kBottomLeft_SubsetType:
973 basename.append("_BottomLeft");
974 subset = SkIRect::MakeXYWH(0, height - subsetSize, subsetSize,
975 subsetSize);
976 break;
977 case kBottomRight_SubsetType:
978 basename.append("_BottomRight");
979 subset = SkIRect::MakeXYWH(width - subsetSize,
980 height - subsetSize, subsetSize, subsetSize);
981 break;
982 default:
983 SkASSERT(false);
984 }
985
986 return new BitmapRegionDecoderBench(basename.c_str(), encoded.get(),
987 colorType, sampleSize, subset);
988 }
989 fCurrentSubsetType = 0;
990 fCurrentSampleSize++;
991 }
992 fCurrentSampleSize = 0;
993 fCurrentColorType++;
994 }
995 fCurrentColorType = 0;
996 }
997
998 while (fCurrentColorImage < fColorImages.count()) {
999 fSourceType = "colorimage";
1000 fBenchType = "skcolorcodec";
1001 const SkString& path = fColorImages[fCurrentColorImage];
1002 fCurrentColorImage++;
1003 sk_sp<SkData> encoded = SkData::MakeFromFileName(path.c_str());
1004 if (encoded) {
1005 return new ColorCodecBench(SkOSPath::Basename(path.c_str()).c_str(),
1006 std::move(encoded));
1007 } else {
1008 SkDebugf("Could not read file %s.\n", path.c_str());
1009 }
1010 }
1011
1012 return nullptr;
1013 }
1014
fillCurrentOptions(ResultsWriter * log) const1015 void fillCurrentOptions(ResultsWriter* log) const {
1016 log->configOption("source_type", fSourceType);
1017 log->configOption("bench_type", fBenchType);
1018 if (0 == strcmp(fSourceType, "skp")) {
1019 log->configOption("clip",
1020 SkStringPrintf("%d %d %d %d", fClip.fLeft, fClip.fTop,
1021 fClip.fRight, fClip.fBottom).c_str());
1022 SkASSERT_RELEASE(fCurrentScale < fScales.count()); // debugging paranoia
1023 log->configOption("scale", SkStringPrintf("%.2g", fScales[fCurrentScale]).c_str());
1024 if (fCurrentUseMPD > 0) {
1025 SkASSERT(1 == fCurrentUseMPD || 2 == fCurrentUseMPD);
1026 log->configOption("multi_picture_draw", fUseMPDs[fCurrentUseMPD-1] ? "true" : "false");
1027 }
1028 }
1029 if (0 == strcmp(fBenchType, "recording")) {
1030 log->metric("bytes", fSKPBytes);
1031 log->metric("ops", fSKPOps);
1032 }
1033 }
1034
1035 private:
1036 enum SubsetType {
1037 kTopLeft_SubsetType = 0,
1038 kTopRight_SubsetType = 1,
1039 kMiddle_SubsetType = 2,
1040 kBottomLeft_SubsetType = 3,
1041 kBottomRight_SubsetType = 4,
1042 kTranslate_SubsetType = 5,
1043 kZoom_SubsetType = 6,
1044 kLast_SubsetType = kZoom_SubsetType,
1045 kLastSingle_SubsetType = kBottomRight_SubsetType,
1046 };
1047
1048 const BenchRegistry* fBenches;
1049 const skiagm::GMRegistry* fGMs;
1050 SkIRect fClip;
1051 SkTArray<SkScalar> fScales;
1052 SkTArray<SkString> fSKPs;
1053 SkTArray<SkString> fSVGs;
1054 SkTArray<bool> fUseMPDs;
1055 SkTArray<SkString> fImages;
1056 SkTArray<SkString> fColorImages;
1057 SkTArray<SkColorType, true> fColorTypes;
1058 SkScalar fZoomMax;
1059 double fZoomPeriodMs;
1060
1061 double fSKPBytes, fSKPOps;
1062
1063 const char* fSourceType; // What we're benching: bench, GM, SKP, ...
1064 const char* fBenchType; // How we bench it: micro, recording, playback, ...
1065 int fCurrentRecording;
1066 int fCurrentPiping;
1067 int fCurrentScale;
1068 int fCurrentSKP;
1069 int fCurrentSVG;
1070 int fCurrentUseMPD;
1071 int fCurrentCodec;
1072 int fCurrentAndroidCodec;
1073 int fCurrentBRDImage;
1074 int fCurrentColorImage;
1075 int fCurrentColorType;
1076 int fCurrentAlphaType;
1077 int fCurrentSubsetType;
1078 int fCurrentSampleSize;
1079 int fCurrentAnimSKP;
1080 };
1081
1082 // Some runs (mostly, Valgrind) are so slow that the bot framework thinks we've hung.
1083 // This prints something every once in a while so that it knows we're still working.
start_keepalive()1084 static void start_keepalive() {
1085 struct Loop {
1086 static void forever(void*) {
1087 for (;;) {
1088 static const int kSec = 1200;
1089 #if defined(SK_BUILD_FOR_WIN)
1090 Sleep(kSec * 1000);
1091 #else
1092 sleep(kSec);
1093 #endif
1094 SkDebugf("\nBenchmarks still running...\n");
1095 }
1096 }
1097 };
1098 static SkThread* intentionallyLeaked = new SkThread(Loop::forever);
1099 intentionallyLeaked->start();
1100 }
1101
main(int argc,char ** argv)1102 int main(int argc, char** argv) {
1103 SkCommandLineFlags::Parse(argc, argv);
1104 #if defined(SK_BUILD_FOR_IOS)
1105 cd_Documents();
1106 #endif
1107 SetupCrashHandler();
1108 SkAutoGraphics ag;
1109 SkTaskGroup::Enabler enabled(FLAGS_threads);
1110
1111 #if SK_SUPPORT_GPU
1112 GrContextOptions grContextOpts;
1113 grContextOpts.fGpuPathRenderers = CollectGpuPathRenderersFromFlags();
1114 gGrFactory.reset(new GrContextFactory(grContextOpts));
1115 #endif
1116
1117 if (FLAGS_veryVerbose) {
1118 FLAGS_verbose = true;
1119 }
1120
1121 if (kAutoTuneLoops != FLAGS_loops) {
1122 FLAGS_samples = 1;
1123 FLAGS_gpuFrameLag = 0;
1124 }
1125
1126 if (!FLAGS_writePath.isEmpty()) {
1127 SkDebugf("Writing files to %s.\n", FLAGS_writePath[0]);
1128 if (!sk_mkdir(FLAGS_writePath[0])) {
1129 SkDebugf("Could not create %s. Files won't be written.\n", FLAGS_writePath[0]);
1130 FLAGS_writePath.set(0, nullptr);
1131 }
1132 }
1133
1134 std::unique_ptr<ResultsWriter> log(new ResultsWriter);
1135 if (!FLAGS_outResultsFile.isEmpty()) {
1136 #if defined(SK_RELEASE)
1137 log.reset(new NanoJSONResultsWriter(FLAGS_outResultsFile[0]));
1138 #else
1139 SkDebugf("I'm ignoring --outResultsFile because this is a Debug build.");
1140 return 1;
1141 #endif
1142 }
1143
1144 if (1 == FLAGS_properties.count() % 2) {
1145 SkDebugf("ERROR: --properties must be passed with an even number of arguments.\n");
1146 return 1;
1147 }
1148 for (int i = 1; i < FLAGS_properties.count(); i += 2) {
1149 log->property(FLAGS_properties[i-1], FLAGS_properties[i]);
1150 }
1151
1152 if (1 == FLAGS_key.count() % 2) {
1153 SkDebugf("ERROR: --key must be passed with an even number of arguments.\n");
1154 return 1;
1155 }
1156 for (int i = 1; i < FLAGS_key.count(); i += 2) {
1157 log->key(FLAGS_key[i-1], FLAGS_key[i]);
1158 }
1159
1160 const double overhead = estimate_timer_overhead();
1161 SkDebugf("Timer overhead: %s\n", HUMANIZE(overhead));
1162
1163 SkTArray<double> samples;
1164
1165 if (kAutoTuneLoops != FLAGS_loops) {
1166 SkDebugf("Fixed number of loops; times would only be misleading so we won't print them.\n");
1167 } else if (FLAGS_quiet) {
1168 SkDebugf("! -> high variance, ? -> moderate variance\n");
1169 SkDebugf(" micros \tbench\n");
1170 } else if (FLAGS_ms) {
1171 SkDebugf("curr/maxrss\tloops\tmin\tmedian\tmean\tmax\tstddev\tsamples\tconfig\tbench\n");
1172 } else {
1173 SkDebugf("curr/maxrss\tloops\tmin\tmedian\tmean\tmax\tstddev\t%-*s\tconfig\tbench\n",
1174 FLAGS_samples, "samples");
1175 }
1176
1177 SkTArray<Config> configs;
1178 create_configs(&configs);
1179
1180 #ifdef THERMAL_MANAGER_SUPPORTED
1181 int tmEnabled, tmThreshold, tmSleepTimeMs, tmTimeoutMs;
1182 if (4 != sscanf(FLAGS_useThermalManager[0], "%d,%d,%d,%d",
1183 &tmEnabled, &tmThreshold, &tmSleepTimeMs, &tmTimeoutMs)) {
1184 SkDebugf("Can't parse %s from --useThermalManager.\n", FLAGS_useThermalManager[0]);
1185 exit(1);
1186 }
1187 ThermalManager tm(tmThreshold, tmSleepTimeMs, tmTimeoutMs);
1188 #endif
1189
1190 if (FLAGS_keepAlive) {
1191 start_keepalive();
1192 }
1193
1194 gSkUseAnalyticAA = FLAGS_analyticAA;
1195
1196 if (FLAGS_forceAnalyticAA) {
1197 gSkForceAnalyticAA = true;
1198 }
1199
1200 int runs = 0;
1201 BenchmarkStream benchStream;
1202 while (Benchmark* b = benchStream.next()) {
1203 std::unique_ptr<Benchmark> bench(b);
1204 if (SkCommandLineFlags::ShouldSkip(FLAGS_match, bench->getUniqueName())) {
1205 continue;
1206 }
1207
1208 if (!configs.empty()) {
1209 log->bench(bench->getUniqueName(), bench->getSize().fX, bench->getSize().fY);
1210 bench->delayedSetup();
1211 }
1212 for (int i = 0; i < configs.count(); ++i) {
1213 #ifdef THERMAL_MANAGER_SUPPORTED
1214 if (tmEnabled && !tm.coolOffIfNecessary()) {
1215 SkDebugf("Could not cool off, timings will be throttled\n");
1216 }
1217 #endif
1218 Target* target = is_enabled(b, configs[i]);
1219 if (!target) {
1220 continue;
1221 }
1222
1223 // During HWUI output this canvas may be nullptr.
1224 SkCanvas* canvas = target->getCanvas();
1225 const char* config = target->config.name.c_str();
1226
1227 if (FLAGS_pre_log || FLAGS_dryRun) {
1228 SkDebugf("Running %s\t%s\n"
1229 , bench->getUniqueName()
1230 , config);
1231 if (FLAGS_dryRun) {
1232 continue;
1233 }
1234 }
1235
1236 target->setup();
1237 bench->perCanvasPreDraw(canvas);
1238
1239 int maxFrameLag;
1240 int loops = target->needsFrameTiming(&maxFrameLag)
1241 ? setup_gpu_bench(target, bench.get(), maxFrameLag)
1242 : setup_cpu_bench(overhead, target, bench.get());
1243
1244 if (FLAGS_ms) {
1245 samples.reset();
1246 auto stop = now_ms() + FLAGS_ms;
1247 do {
1248 samples.push_back(time(loops, bench.get(), target) / loops);
1249 } while (now_ms() < stop);
1250 } else {
1251 samples.reset(FLAGS_samples);
1252 for (int s = 0; s < FLAGS_samples; s++) {
1253 samples[s] = time(loops, bench.get(), target) / loops;
1254 }
1255 }
1256
1257 #if SK_SUPPORT_GPU
1258 SkTArray<SkString> keys;
1259 SkTArray<double> values;
1260 bool gpuStatsDump = FLAGS_gpuStatsDump && Benchmark::kGPU_Backend == configs[i].backend;
1261 if (gpuStatsDump) {
1262 // TODO cache stats
1263 bench->getGpuStats(canvas, &keys, &values);
1264 }
1265 #endif
1266
1267 bench->perCanvasPostDraw(canvas);
1268
1269 if (Benchmark::kNonRendering_Backend != target->config.backend &&
1270 !FLAGS_writePath.isEmpty() && FLAGS_writePath[0]) {
1271 SkString pngFilename = SkOSPath::Join(FLAGS_writePath[0], config);
1272 pngFilename = SkOSPath::Join(pngFilename.c_str(), bench->getUniqueName());
1273 pngFilename.append(".png");
1274 write_canvas_png(target, pngFilename);
1275 }
1276
1277 if (kFailedLoops == loops) {
1278 // Can't be timed. A warning note has already been printed.
1279 cleanup_run(target);
1280 continue;
1281 }
1282
1283 Stats stats(samples);
1284 log->config(config);
1285 log->configOption("name", bench->getName());
1286 benchStream.fillCurrentOptions(log.get());
1287 target->fillOptions(log.get());
1288 log->metric("min_ms", stats.min);
1289 log->metrics("samples", samples);
1290 #if SK_SUPPORT_GPU
1291 if (gpuStatsDump) {
1292 // dump to json, only SKPBench currently returns valid keys / values
1293 SkASSERT(keys.count() == values.count());
1294 for (int i = 0; i < keys.count(); i++) {
1295 log->metric(keys[i].c_str(), values[i]);
1296 }
1297 }
1298 #endif
1299
1300 if (runs++ % FLAGS_flushEvery == 0) {
1301 log->flush();
1302 }
1303
1304 if (kAutoTuneLoops != FLAGS_loops) {
1305 if (configs.count() == 1) {
1306 config = ""; // Only print the config if we run the same bench on more than one.
1307 }
1308 SkDebugf("%4d/%-4dMB\t%s\t%s\n"
1309 , sk_tools::getCurrResidentSetSizeMB()
1310 , sk_tools::getMaxResidentSetSizeMB()
1311 , bench->getUniqueName()
1312 , config);
1313 } else if (FLAGS_quiet) {
1314 const char* mark = " ";
1315 const double stddev_percent = 100 * sqrt(stats.var) / stats.mean;
1316 if (stddev_percent > 5) mark = "?";
1317 if (stddev_percent > 10) mark = "!";
1318
1319 SkDebugf("%10.2f %s\t%s\t%s\n",
1320 stats.median*1e3, mark, bench->getUniqueName(), config);
1321 } else {
1322 const double stddev_percent = 100 * sqrt(stats.var) / stats.mean;
1323 SkDebugf("%4d/%-4dMB\t%d\t%s\t%s\t%s\t%s\t%.0f%%\t%s\t%s\t%s\n"
1324 , sk_tools::getCurrResidentSetSizeMB()
1325 , sk_tools::getMaxResidentSetSizeMB()
1326 , loops
1327 , HUMANIZE(stats.min)
1328 , HUMANIZE(stats.median)
1329 , HUMANIZE(stats.mean)
1330 , HUMANIZE(stats.max)
1331 , stddev_percent
1332 , FLAGS_ms ? to_string(samples.count()).c_str() : stats.plot.c_str()
1333 , config
1334 , bench->getUniqueName()
1335 );
1336 }
1337
1338 #if SK_SUPPORT_GPU
1339 if (FLAGS_gpuStats && Benchmark::kGPU_Backend == configs[i].backend) {
1340 GrContext* context = gGrFactory->get(configs[i].ctxType,
1341 configs[i].ctxOverrides);
1342 context->printCacheStats();
1343 context->printGpuStats();
1344 }
1345 #endif
1346
1347 if (FLAGS_verbose) {
1348 SkDebugf("Samples: ");
1349 for (int i = 0; i < samples.count(); i++) {
1350 SkDebugf("%s ", HUMANIZE(samples[i]));
1351 }
1352 SkDebugf("%s\n", bench->getUniqueName());
1353 }
1354 cleanup_run(target);
1355 }
1356 }
1357
1358 SkGraphics::PurgeAllCaches();
1359
1360 log->bench("memory_usage", 0,0);
1361 log->config("meta");
1362 log->metric("max_rss_mb", sk_tools::getMaxResidentSetSizeMB());
1363
1364 #if SK_SUPPORT_GPU
1365 // Make sure we clean up the global GrContextFactory here, otherwise we might race with the
1366 // SkEventTracer destructor
1367 gGrFactory.reset(nullptr);
1368 #endif
1369
1370 return 0;
1371 }
1372