1/**
2 * Command line application to run CanvasKit benchmarks in webpages using puppeteer. Different
3 * webpages can be specified to measure different aspects. The HTML page run contains the JS code
4 * to run the scenario and either 1) produce the perf output as a JSON object or 2) defer to
5 * puppeteer reading the tracing data.
6 *
7 */
8const puppeteer = require('puppeteer');
9const express = require('express');
10const fs = require('fs');
11const commandLineArgs = require('command-line-args');
12const commandLineUsage= require('command-line-usage');
13
14const opts = [
15  {
16    name: 'bench_html',
17    typeLabel: '{underline file}',
18    description: 'An HTML file containing the bench harness.'
19  },
20  {
21    name: 'canvaskit_js',
22    typeLabel: '{underline file}',
23    description: '(required) The path to canvaskit.js.'
24  },
25  {
26    name: 'canvaskit_wasm',
27    typeLabel: '{underline file}',
28    description: '(required) The path to canvaskit.wasm.'
29  },
30  {
31    name: 'input_lottie',
32    typeLabel: '{underline file}',
33    description: 'The Lottie JSON file to process.'
34  },
35  {
36    name: 'input_skp',
37    typeLabel: '{underline file}',
38    description: 'The SKP file to process.'
39  },
40  {
41    name: 'assets',
42    typeLabel: '{underline file}',
43    description: 'A directory containing any assets needed by lottie files or tests (e.g. images/fonts).'
44  },
45  {
46    name: 'output',
47    typeLabel: '{underline file}',
48    description: 'The perf file to write. Defaults to perf.json',
49  },
50  {
51    name: 'chromium_executable_path',
52    typeLabel: '{underline file}',
53    description: 'The chromium executable to be used by puppeteer to run tests',
54  },
55  {
56    name: 'merge_output_as',
57    typeLabel: String,
58    description: 'Overwrites a json property in an existing output file.',
59  },
60  {
61    name: 'use_gpu',
62    description: 'Whether we should run in non-headless mode with GPU.',
63    type: Boolean,
64  },
65  {
66    name: 'use_tracing',
67    description: 'If non-empty, will be interpreted as the tracing categories that should be ' +
68      'measured and returned in the output JSON. Example: "blink,cc,gpu"',
69    type: String,
70  },
71  {
72    name: 'enable_simd',
73    description: 'enable execution of wasm SIMD operations in chromium',
74    type: Boolean
75  },
76  {
77    name: 'port',
78    description: 'The port number to use, defaults to 8081.',
79    type: Number,
80  },
81  {
82    name: 'query_params',
83    description: 'The query params to be added to the testing page URL. Useful for passing' +
84      'options to the perf html page.',
85    type: String,
86    multiple: true
87  },
88  {
89    name: 'help',
90    alias: 'h',
91    type: Boolean,
92    description: 'Print this usage guide.'
93  },
94  {
95    name: 'timeout',
96    description: 'Number of seconds to allow test to run.',
97    type: Number,
98  },
99];
100
101const usage = [
102  {
103    header: 'Skia Web-based Performance Metrics of CanvasKit',
104    content: "Command line application to capture performance metrics from a browser."
105  },
106  {
107    header: 'Options',
108    optionList: opts,
109  },
110];
111
112// Parse and validate flags.
113const options = commandLineArgs(opts);
114
115if (!options.output) {
116  options.output = 'perf.json';
117}
118if (!options.port) {
119  options.port = 8081;
120}
121if (!options.timeout) {
122  options.timeout = 60;
123}
124
125if (options.help) {
126  console.log(commandLineUsage(usage));
127  process.exit(0);
128}
129
130if (!options.bench_html) {
131  console.error('You must supply the bench_html file to run.');
132  console.log(commandLineUsage(usage));
133  process.exit(1);
134}
135const driverHTML = fs.readFileSync(options.bench_html, 'utf8');
136
137// This express webserver will serve the HTML file running the benchmark and any additional assets
138// needed to run the tests.
139const app = express();
140app.get('/', (req, res) => res.send(driverHTML));
141
142if (!options.canvaskit_js) {
143  console.error('You must supply path to canvaskit.js.');
144  console.log(commandLineUsage(usage));
145  process.exit(1);
146}
147
148if (!options.canvaskit_wasm) {
149  console.error('You must supply path to canvaskit.wasm.');
150  console.log(commandLineUsage(usage));
151  process.exit(1);
152}
153
154const benchmarkJS = fs.readFileSync('benchmark.js', 'utf8');
155const canvasPerfJS = fs.readFileSync('canvas_perf.js', 'utf8');
156const canvasKitJS = fs.readFileSync(options.canvaskit_js, 'utf8');
157const canvasKitWASM = fs.readFileSync(options.canvaskit_wasm, 'binary');
158
159app.get('/static/benchmark.js', (req, res) => res.send(benchmarkJS));
160app.get('/static/canvas_perf.js', (req, res) => res.send(canvasPerfJS));
161app.get('/static/canvaskit.js', (req, res) => res.send(canvasKitJS));
162app.get('/static/canvaskit.wasm', function(req, res) {
163  // Set the MIME type so it can be streamed efficiently.
164  res.type('application/wasm');
165  res.send(new Buffer(canvasKitWASM, 'binary'));
166});
167
168
169if (options.input_lottie) {
170  const lottieJSON = fs.readFileSync(options.input_lottie, 'utf8');
171  app.get('/static/lottie.json', (req, res) => res.send(lottieJSON));
172}
173if (options.input_skp) {
174  const skpBytes = fs.readFileSync(options.input_skp, 'binary');
175  app.get('/static/test.skp', (req, res) => {
176    res.send(new Buffer(skpBytes, 'binary'));
177  });
178}
179if (options.assets) {
180  app.use('/static/assets/', express.static(options.assets));
181  console.log('assets served from', options.assets);
182}
183
184app.listen(options.port, () => console.log('- Local web server started.'));
185
186let hash = "#cpu";
187if (options.use_gpu) {
188  hash = "#gpu";
189}
190let query_param_string = '?';
191if (options.query_params) {
192  for (const string of options.query_params) {
193    query_param_string += string + '&';
194  }
195}
196const targetURL = `http://localhost:${options.port}/${query_param_string}${hash}`;
197const viewPort = {width: 1000, height: 1000};
198
199// Drive chrome to load the web page from the server we have running.
200async function driveBrowser() {
201  console.log('- Launching chrome for ' + options.input);
202  let browser;
203  let page;
204  const headless = !options.use_gpu;
205  let browser_args = [
206      '--no-sandbox',
207      '--disable-setuid-sandbox',
208      '--window-size=' + viewPort.width + ',' + viewPort.height,
209      // The following two params allow Chrome to run at an unlimited fps. Note, if there is
210      // already a chrome instance running, these arguments will have NO EFFECT, as the existing
211      // Chrome instance will be used instead of puppeteer spinning up a new one.
212      '--disable-frame-rate-limit',
213      '--disable-gpu-vsync',
214  ];
215  if (options.enable_simd) {
216    browser_args.push('--enable-features=WebAssemblySimd');
217  }
218  if (options.use_gpu) {
219    browser_args.push('--ignore-gpu-blacklist');
220    browser_args.push('--ignore-gpu-blocklist');
221    browser_args.push('--enable-gpu-rasterization');
222  }
223  console.log("Running with headless: " + headless + " args: " + browser_args);
224  try {
225    browser = await puppeteer.launch({
226      headless: headless,
227      args: browser_args,
228      executablePath: options.chromium_executable_path
229    });
230    page = await browser.newPage();
231    await page.setViewport(viewPort);
232  } catch (e) {
233    console.log('Could not open the browser.', e);
234    process.exit(1);
235  }
236  console.log("Loading " + targetURL);
237  try {
238    await page.goto(targetURL, {
239      timeout: 60000,
240      waitUntil: 'networkidle0'
241    });
242
243    // Page is mostly loaded, wait for benchmark page to report itself ready.
244    console.log('Waiting 15s for benchmark to be ready');
245    await page.waitForFunction(`(window._perfReady === true) || window._error`, {
246      timeout: 15000,
247    });
248
249    let err = await page.evaluate('window._error');
250    if (err) {
251      console.log(`ERROR: ${err}`);
252      process.exit(1);
253    }
254
255    // Start trace if requested
256    if (options.use_tracing) {
257      const categories = options.use_tracing.split(',');
258      console.log('Collecting tracing data for categories', categories);
259      await page.tracing.start({
260        path: options.output,
261        screenshots: false,
262        categories: categories,
263      });
264    }
265
266    // Benchmarks should have a button with id #start_bench to click (this also makes manual
267    // debugging easier).
268    await page.click('#start_bench');
269
270    console.log(`Waiting ${options.timeout}s for run to be done`);
271    await page.waitForFunction(`(window._perfDone === true) || window._error`, {
272      timeout: options.timeout*1000,
273    });
274
275    err = await page.evaluate('window._error');
276    if (err) {
277      console.log(`ERROR: ${err}`);
278      process.exit(1);
279    }
280
281    if (options.use_tracing) {
282      // Stop Trace.
283      await page.tracing.stop();
284    } else {
285      const perfResults = await page.evaluate('window._perfData');
286      console.debug('Perf results: ', perfResults);
287
288      if (options.merge_output_as) {
289        const existing_output_file_contents = fs.readFileSync(options.output, 'utf8');
290        let existing_dataset = {};
291        try {
292          existing_dataset = JSON.parse(existing_output_file_contents);
293        } catch (e) {}
294
295        existing_dataset[options.merge_output_as] = perfResults;
296        fs.writeFileSync(options.output, JSON.stringify(existing_dataset));
297      } else {
298        fs.writeFileSync(options.output, JSON.stringify(perfResults));
299      }
300    }
301
302  } catch(e) {
303    console.log('Timed out while loading or drawing.', e);
304    await browser.close();
305    process.exit(1);
306  }
307
308  await browser.close();
309  // Need to call exit() because the web server is still running.
310  process.exit(0);
311}
312
313driveBrowser();
314