1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <time.h>
25 #include <pthread.h>
26
27 #include "igt.h"
28 #include "igt_sysfs.h"
29
30 #define LOCAL_I915_EXEC_NO_RELOC (1<<11)
31 #define LOCAL_I915_EXEC_HANDLE_LUT (1<<12)
32
33 #define LOCAL_I915_EXEC_BSD_SHIFT (13)
34 #define LOCAL_I915_EXEC_BSD_MASK (3 << LOCAL_I915_EXEC_BSD_SHIFT)
35
36 #define MAX_PRIO LOCAL_I915_CONTEXT_MAX_USER_PRIORITY
37 #define MIN_PRIO LOCAL_I915_CONTEXT_MIN_USER_PRIORITY
38
39 #define ENGINE_MASK (I915_EXEC_RING_MASK | LOCAL_I915_EXEC_BSD_MASK)
40
41 IGT_TEST_DESCRIPTION("Basic check of ring<->ring write synchronisation.");
42
43 /*
44 * Testcase: Basic check of sync
45 *
46 * Extremely efficient at catching missed irqs
47 */
48
gettime(void)49 static double gettime(void)
50 {
51 static clockid_t clock = -1;
52 struct timespec ts;
53
54 /* Stay on the same clock for consistency. */
55 if (clock != (clockid_t)-1) {
56 if (clock_gettime(clock, &ts))
57 goto error;
58 goto out;
59 }
60
61 #ifdef CLOCK_MONOTONIC_RAW
62 if (!clock_gettime(clock = CLOCK_MONOTONIC_RAW, &ts))
63 goto out;
64 #endif
65 #ifdef CLOCK_MONOTONIC_COARSE
66 if (!clock_gettime(clock = CLOCK_MONOTONIC_COARSE, &ts))
67 goto out;
68 #endif
69 if (!clock_gettime(clock = CLOCK_MONOTONIC, &ts))
70 goto out;
71 error:
72 igt_warn("Could not read monotonic time: %s\n",
73 strerror(errno));
74 igt_assert(0);
75 return 0;
76
77 out:
78 return ts.tv_sec + 1e-9*ts.tv_nsec;
79 }
80
81 static void
sync_ring(int fd,unsigned ring,int num_children,int timeout)82 sync_ring(int fd, unsigned ring, int num_children, int timeout)
83 {
84 unsigned engines[16];
85 const char *names[16];
86 int num_engines = 0;
87
88 if (ring == ALL_ENGINES) {
89 for_each_physical_engine(fd, ring) {
90 names[num_engines] = e__->name;
91 engines[num_engines++] = ring;
92 if (num_engines == ARRAY_SIZE(engines))
93 break;
94 }
95
96 num_children *= num_engines;
97 } else {
98 gem_require_ring(fd, ring);
99 names[num_engines] = NULL;
100 engines[num_engines++] = ring;
101 }
102
103 intel_detect_and_clear_missed_interrupts(fd);
104 igt_fork(child, num_children) {
105 const uint32_t bbe = MI_BATCH_BUFFER_END;
106 struct drm_i915_gem_exec_object2 object;
107 struct drm_i915_gem_execbuffer2 execbuf;
108 double start, elapsed;
109 unsigned long cycles;
110
111 memset(&object, 0, sizeof(object));
112 object.handle = gem_create(fd, 4096);
113 gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
114
115 memset(&execbuf, 0, sizeof(execbuf));
116 execbuf.buffers_ptr = to_user_pointer(&object);
117 execbuf.buffer_count = 1;
118 execbuf.flags = engines[child % num_engines];
119 gem_execbuf(fd, &execbuf);
120 gem_sync(fd, object.handle);
121
122 start = gettime();
123 cycles = 0;
124 do {
125 do {
126 gem_execbuf(fd, &execbuf);
127 gem_sync(fd, object.handle);
128 } while (++cycles & 1023);
129 } while ((elapsed = gettime() - start) < timeout);
130 igt_info("%s%sompleted %ld cycles: %.3f us\n",
131 names[child % num_engines] ?: "",
132 names[child % num_engines] ? " c" : "C",
133 cycles, elapsed*1e6/cycles);
134
135 gem_close(fd, object.handle);
136 }
137 igt_waitchildren_timeout(timeout+10, NULL);
138 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
139 }
140
141 static void
idle_ring(int fd,unsigned ring,int timeout)142 idle_ring(int fd, unsigned ring, int timeout)
143 {
144 const uint32_t bbe = MI_BATCH_BUFFER_END;
145 struct drm_i915_gem_exec_object2 object;
146 struct drm_i915_gem_execbuffer2 execbuf;
147 double start, elapsed;
148 unsigned long cycles;
149
150 gem_require_ring(fd, ring);
151
152 memset(&object, 0, sizeof(object));
153 object.handle = gem_create(fd, 4096);
154 gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
155
156 memset(&execbuf, 0, sizeof(execbuf));
157 execbuf.buffers_ptr = to_user_pointer(&object);
158 execbuf.buffer_count = 1;
159 execbuf.flags = ring;
160 gem_execbuf(fd, &execbuf);
161 gem_sync(fd, object.handle);
162
163 intel_detect_and_clear_missed_interrupts(fd);
164 start = gettime();
165 cycles = 0;
166 do {
167 do {
168 gem_execbuf(fd, &execbuf);
169 gem_quiescent_gpu(fd);
170 } while (++cycles & 1023);
171 } while ((elapsed = gettime() - start) < timeout);
172 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
173
174 igt_info("Completed %ld cycles: %.3f us\n",
175 cycles, elapsed*1e6/cycles);
176
177 gem_close(fd, object.handle);
178 }
179
180 static void
wakeup_ring(int fd,unsigned ring,int timeout,int wlen)181 wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
182 {
183 unsigned engines[16];
184 const char *names[16];
185 int num_engines = 0;
186
187 if (ring == ALL_ENGINES) {
188 for_each_physical_engine(fd, ring) {
189 if (!gem_can_store_dword(fd, ring))
190 continue;
191
192 names[num_engines] = e__->name;
193 engines[num_engines++] = ring;
194 if (num_engines == ARRAY_SIZE(engines))
195 break;
196 }
197 igt_require(num_engines);
198 } else {
199 gem_require_ring(fd, ring);
200 igt_require(gem_can_store_dword(fd, ring));
201 names[num_engines] = NULL;
202 engines[num_engines++] = ring;
203 }
204
205 intel_detect_and_clear_missed_interrupts(fd);
206 igt_fork(child, num_engines) {
207 const uint32_t bbe = MI_BATCH_BUFFER_END;
208 struct drm_i915_gem_exec_object2 object;
209 struct drm_i915_gem_execbuffer2 execbuf;
210 double end, this, elapsed, now, baseline;
211 unsigned long cycles;
212 igt_spin_t *spin;
213
214 memset(&object, 0, sizeof(object));
215 object.handle = gem_create(fd, 4096);
216 gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
217
218 memset(&execbuf, 0, sizeof(execbuf));
219 execbuf.buffers_ptr = to_user_pointer(&object);
220 execbuf.buffer_count = 1;
221 execbuf.flags = engines[child % num_engines];
222
223 spin = __igt_spin_new(fd,
224 .engine = execbuf.flags,
225 .flags = (IGT_SPIN_POLL_RUN |
226 IGT_SPIN_FAST));
227 igt_assert(igt_spin_has_poll(spin));
228
229 gem_execbuf(fd, &execbuf);
230
231 igt_spin_end(spin);
232 gem_sync(fd, object.handle);
233
234 for (int warmup = 0; warmup <= 1; warmup++) {
235 end = gettime() + timeout/10.;
236 elapsed = 0;
237 cycles = 0;
238 do {
239 igt_spin_reset(spin);
240
241 gem_execbuf(fd, &spin->execbuf);
242 igt_spin_busywait_until_started(spin);
243
244 this = gettime();
245 igt_spin_end(spin);
246 gem_sync(fd, spin->handle);
247 now = gettime();
248
249 elapsed += now - this;
250 cycles++;
251 } while (now < end);
252 baseline = elapsed / cycles;
253 }
254 igt_info("%s%saseline %ld cycles: %.3f us\n",
255 names[child % num_engines] ?: "",
256 names[child % num_engines] ? " b" : "B",
257 cycles, elapsed*1e6/cycles);
258
259 end = gettime() + timeout;
260 elapsed = 0;
261 cycles = 0;
262 do {
263 igt_spin_reset(spin);
264
265 gem_execbuf(fd, &spin->execbuf);
266 igt_spin_busywait_until_started(spin);
267
268 for (int n = 0; n < wlen; n++)
269 gem_execbuf(fd, &execbuf);
270
271 this = gettime();
272 igt_spin_end(spin);
273 gem_sync(fd, object.handle);
274 now = gettime();
275
276 elapsed += now - this;
277 cycles++;
278 } while (now < end);
279 elapsed -= cycles * baseline;
280
281 igt_info("%s%sompleted %ld cycles: %.3f + %.3f us\n",
282 names[child % num_engines] ?: "",
283 names[child % num_engines] ? " c" : "C",
284 cycles, 1e6*baseline, elapsed*1e6/cycles);
285
286 igt_spin_free(fd, spin);
287 gem_close(fd, object.handle);
288 }
289 igt_waitchildren_timeout(2*timeout, NULL);
290 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
291 }
292
active_ring(int fd,unsigned ring,int timeout)293 static void active_ring(int fd, unsigned ring, int timeout)
294 {
295 unsigned engines[16];
296 const char *names[16];
297 int num_engines = 0;
298
299 if (ring == ALL_ENGINES) {
300 for_each_physical_engine(fd, ring) {
301 if (!gem_can_store_dword(fd, ring))
302 continue;
303
304 names[num_engines] = e__->name;
305 engines[num_engines++] = ring;
306 if (num_engines == ARRAY_SIZE(engines))
307 break;
308 }
309 igt_require(num_engines);
310 } else {
311 gem_require_ring(fd, ring);
312 igt_require(gem_can_store_dword(fd, ring));
313 names[num_engines] = NULL;
314 engines[num_engines++] = ring;
315 }
316
317 intel_detect_and_clear_missed_interrupts(fd);
318 igt_fork(child, num_engines) {
319 double start, end, elapsed;
320 unsigned long cycles;
321 igt_spin_t *spin[2];
322
323 spin[0] = __igt_spin_new(fd,
324 .engine = ring,
325 .flags = IGT_SPIN_FAST);
326
327 spin[1] = __igt_spin_new(fd,
328 .engine = ring,
329 .flags = IGT_SPIN_FAST);
330
331 start = gettime();
332 end = start + timeout;
333 cycles = 0;
334 do {
335 for (int loop = 0; loop < 1024; loop++) {
336 igt_spin_t *s = spin[loop & 1];
337
338 igt_spin_end(s);
339 gem_sync(fd, s->handle);
340
341 igt_spin_reset(s);
342
343 gem_execbuf(fd, &s->execbuf);
344 }
345 cycles += 1024;
346 } while ((elapsed = gettime()) < end);
347 igt_spin_free(fd, spin[1]);
348 igt_spin_free(fd, spin[0]);
349
350 igt_info("%s%sompleted %ld cycles: %.3f us\n",
351 names[child % num_engines] ?: "",
352 names[child % num_engines] ? " c" : "C",
353 cycles, (elapsed - start)*1e6/cycles);
354 }
355 igt_waitchildren_timeout(2*timeout, NULL);
356 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
357 }
358
359 static void
active_wakeup_ring(int fd,unsigned ring,int timeout,int wlen)360 active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
361 {
362 unsigned engines[16];
363 const char *names[16];
364 int num_engines = 0;
365
366 if (ring == ALL_ENGINES) {
367 for_each_physical_engine(fd, ring) {
368 if (!gem_can_store_dword(fd, ring))
369 continue;
370
371 names[num_engines] = e__->name;
372 engines[num_engines++] = ring;
373 if (num_engines == ARRAY_SIZE(engines))
374 break;
375 }
376 igt_require(num_engines);
377 } else {
378 gem_require_ring(fd, ring);
379 igt_require(gem_can_store_dword(fd, ring));
380 names[num_engines] = NULL;
381 engines[num_engines++] = ring;
382 }
383
384 intel_detect_and_clear_missed_interrupts(fd);
385 igt_fork(child, num_engines) {
386 const uint32_t bbe = MI_BATCH_BUFFER_END;
387 struct drm_i915_gem_exec_object2 object;
388 struct drm_i915_gem_execbuffer2 execbuf;
389 double end, this, elapsed, now, baseline;
390 unsigned long cycles;
391 igt_spin_t *spin[2];
392
393 memset(&object, 0, sizeof(object));
394 object.handle = gem_create(fd, 4096);
395 gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
396
397 memset(&execbuf, 0, sizeof(execbuf));
398 execbuf.buffers_ptr = to_user_pointer(&object);
399 execbuf.buffer_count = 1;
400 execbuf.flags = engines[child % num_engines];
401
402 spin[0] = __igt_spin_new(fd,
403 .engine = execbuf.flags,
404 .flags = (IGT_SPIN_POLL_RUN |
405 IGT_SPIN_FAST));
406 igt_assert(igt_spin_has_poll(spin[0]));
407
408 spin[1] = __igt_spin_new(fd,
409 .engine = execbuf.flags,
410 .flags = (IGT_SPIN_POLL_RUN |
411 IGT_SPIN_FAST));
412
413 gem_execbuf(fd, &execbuf);
414
415 igt_spin_end(spin[1]);
416 igt_spin_end(spin[0]);
417 gem_sync(fd, object.handle);
418
419 for (int warmup = 0; warmup <= 1; warmup++) {
420 igt_spin_reset(spin[0]);
421
422 gem_execbuf(fd, &spin[0]->execbuf);
423
424 end = gettime() + timeout/10.;
425 elapsed = 0;
426 cycles = 0;
427 do {
428 igt_spin_busywait_until_started(spin[0]);
429
430 igt_spin_reset(spin[1]);
431
432 gem_execbuf(fd, &spin[1]->execbuf);
433
434 this = gettime();
435 igt_spin_end(spin[0]);
436 gem_sync(fd, spin[0]->handle);
437 now = gettime();
438
439 elapsed += now - this;
440 cycles++;
441 igt_swap(spin[0], spin[1]);
442 } while (now < end);
443 igt_spin_end(spin[0]);
444 baseline = elapsed / cycles;
445 }
446 igt_info("%s%saseline %ld cycles: %.3f us\n",
447 names[child % num_engines] ?: "",
448 names[child % num_engines] ? " b" : "B",
449 cycles, elapsed*1e6/cycles);
450
451 igt_spin_reset(spin[0]);
452
453 gem_execbuf(fd, &spin[0]->execbuf);
454
455 end = gettime() + timeout;
456 elapsed = 0;
457 cycles = 0;
458 do {
459 igt_spin_busywait_until_started(spin[0]);
460
461 for (int n = 0; n < wlen; n++)
462 gem_execbuf(fd, &execbuf);
463
464 igt_spin_reset(spin[1]);
465
466 gem_execbuf(fd, &spin[1]->execbuf);
467
468 this = gettime();
469 igt_spin_end(spin[0]);
470 gem_sync(fd, object.handle);
471 now = gettime();
472
473 elapsed += now - this;
474 cycles++;
475 igt_swap(spin[0], spin[1]);
476 } while (now < end);
477 igt_spin_end(spin[0]);
478 elapsed -= cycles * baseline;
479
480 igt_info("%s%sompleted %ld cycles: %.3f + %.3f us\n",
481 names[child % num_engines] ?: "",
482 names[child % num_engines] ? " c" : "C",
483 cycles, 1e6*baseline, elapsed*1e6/cycles);
484
485 igt_spin_free(fd, spin[1]);
486 igt_spin_free(fd, spin[0]);
487 gem_close(fd, object.handle);
488 }
489 igt_waitchildren_timeout(2*timeout, NULL);
490 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
491 }
492
493 static void
store_ring(int fd,unsigned ring,int num_children,int timeout)494 store_ring(int fd, unsigned ring, int num_children, int timeout)
495 {
496 const int gen = intel_gen(intel_get_drm_devid(fd));
497 unsigned engines[16];
498 const char *names[16];
499 int num_engines = 0;
500
501 if (ring == ALL_ENGINES) {
502 for_each_physical_engine(fd, ring) {
503 if (!gem_can_store_dword(fd, ring))
504 continue;
505
506 names[num_engines] = e__->name;
507 engines[num_engines++] = ring;
508 if (num_engines == ARRAY_SIZE(engines))
509 break;
510 }
511
512 num_children *= num_engines;
513 } else {
514 gem_require_ring(fd, ring);
515 igt_require(gem_can_store_dword(fd, ring));
516 names[num_engines] = NULL;
517 engines[num_engines++] = ring;
518 }
519
520 intel_detect_and_clear_missed_interrupts(fd);
521 igt_fork(child, num_children) {
522 const uint32_t bbe = MI_BATCH_BUFFER_END;
523 struct drm_i915_gem_exec_object2 object[2];
524 struct drm_i915_gem_relocation_entry reloc[1024];
525 struct drm_i915_gem_execbuffer2 execbuf;
526 double start, elapsed;
527 unsigned long cycles;
528 uint32_t *batch, *b;
529
530 memset(&execbuf, 0, sizeof(execbuf));
531 execbuf.buffers_ptr = to_user_pointer(object);
532 execbuf.flags = engines[child % num_engines];
533 execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC;
534 execbuf.flags |= LOCAL_I915_EXEC_HANDLE_LUT;
535 if (gen < 6)
536 execbuf.flags |= I915_EXEC_SECURE;
537
538 memset(object, 0, sizeof(object));
539 object[0].handle = gem_create(fd, 4096);
540 gem_write(fd, object[0].handle, 0, &bbe, sizeof(bbe));
541 execbuf.buffer_count = 1;
542 gem_execbuf(fd, &execbuf);
543
544 object[0].flags |= EXEC_OBJECT_WRITE;
545 object[1].handle = gem_create(fd, 20*1024);
546
547 object[1].relocs_ptr = to_user_pointer(reloc);
548 object[1].relocation_count = 1024;
549
550 batch = gem_mmap__cpu(fd, object[1].handle, 0, 20*1024,
551 PROT_WRITE | PROT_READ);
552 gem_set_domain(fd, object[1].handle,
553 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
554
555 memset(reloc, 0, sizeof(reloc));
556 b = batch;
557 for (int i = 0; i < 1024; i++) {
558 uint64_t offset;
559
560 reloc[i].presumed_offset = object[0].offset;
561 reloc[i].offset = (b - batch + 1) * sizeof(*batch);
562 reloc[i].delta = i * sizeof(uint32_t);
563 reloc[i].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
564 reloc[i].write_domain = I915_GEM_DOMAIN_INSTRUCTION;
565
566 offset = object[0].offset + reloc[i].delta;
567 *b++ = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
568 if (gen >= 8) {
569 *b++ = offset;
570 *b++ = offset >> 32;
571 } else if (gen >= 4) {
572 *b++ = 0;
573 *b++ = offset;
574 reloc[i].offset += sizeof(*batch);
575 } else {
576 b[-1] -= 1;
577 *b++ = offset;
578 }
579 *b++ = i;
580 }
581 *b++ = MI_BATCH_BUFFER_END;
582 igt_assert((b - batch)*sizeof(uint32_t) < 20*1024);
583 munmap(batch, 20*1024);
584 execbuf.buffer_count = 2;
585 gem_execbuf(fd, &execbuf);
586 gem_sync(fd, object[1].handle);
587
588 start = gettime();
589 cycles = 0;
590 do {
591 do {
592 gem_execbuf(fd, &execbuf);
593 gem_sync(fd, object[1].handle);
594 } while (++cycles & 1023);
595 } while ((elapsed = gettime() - start) < timeout);
596 igt_info("%s%sompleted %ld cycles: %.3f us\n",
597 names[child % num_engines] ?: "",
598 names[child % num_engines] ? " c" : "C",
599 cycles, elapsed*1e6/cycles);
600
601 gem_close(fd, object[1].handle);
602 gem_close(fd, object[0].handle);
603 }
604 igt_waitchildren_timeout(timeout+10, NULL);
605 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
606 }
607
608 static void
switch_ring(int fd,unsigned ring,int num_children,int timeout)609 switch_ring(int fd, unsigned ring, int num_children, int timeout)
610 {
611 const int gen = intel_gen(intel_get_drm_devid(fd));
612 unsigned engines[16];
613 const char *names[16];
614 int num_engines = 0;
615
616 gem_require_contexts(fd);
617
618 if (ring == ALL_ENGINES) {
619 for_each_physical_engine(fd, ring) {
620 if (!gem_can_store_dword(fd, ring))
621 continue;
622
623 names[num_engines] = e__->name;
624 engines[num_engines++] = ring;
625 if (num_engines == ARRAY_SIZE(engines))
626 break;
627 }
628
629 num_children *= num_engines;
630 } else {
631 gem_require_ring(fd, ring);
632 igt_require(gem_can_store_dword(fd, ring));
633 names[num_engines] = NULL;
634 engines[num_engines++] = ring;
635 }
636
637 intel_detect_and_clear_missed_interrupts(fd);
638 igt_fork(child, num_children) {
639 struct context {
640 struct drm_i915_gem_exec_object2 object[2];
641 struct drm_i915_gem_relocation_entry reloc[1024];
642 struct drm_i915_gem_execbuffer2 execbuf;
643 } contexts[2];
644 double elapsed, baseline;
645 unsigned long cycles;
646
647 for (int i = 0; i < ARRAY_SIZE(contexts); i++) {
648 const uint32_t bbe = MI_BATCH_BUFFER_END;
649 const uint32_t sz = 32 << 10;
650 struct context *c = &contexts[i];
651 uint32_t *batch, *b;
652
653 memset(&c->execbuf, 0, sizeof(c->execbuf));
654 c->execbuf.buffers_ptr = to_user_pointer(c->object);
655 c->execbuf.flags = engines[child % num_engines];
656 c->execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC;
657 c->execbuf.flags |= LOCAL_I915_EXEC_HANDLE_LUT;
658 if (gen < 6)
659 c->execbuf.flags |= I915_EXEC_SECURE;
660 c->execbuf.rsvd1 = gem_context_create(fd);
661
662 memset(c->object, 0, sizeof(c->object));
663 c->object[0].handle = gem_create(fd, 4096);
664 gem_write(fd, c->object[0].handle, 0, &bbe, sizeof(bbe));
665 c->execbuf.buffer_count = 1;
666 gem_execbuf(fd, &c->execbuf);
667
668 c->object[0].flags |= EXEC_OBJECT_WRITE;
669 c->object[1].handle = gem_create(fd, sz);
670
671 c->object[1].relocs_ptr = to_user_pointer(c->reloc);
672 c->object[1].relocation_count = 1024 * i;
673
674 batch = gem_mmap__cpu(fd, c->object[1].handle, 0, sz,
675 PROT_WRITE | PROT_READ);
676 gem_set_domain(fd, c->object[1].handle,
677 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
678
679 memset(c->reloc, 0, sizeof(c->reloc));
680 b = batch;
681 for (int r = 0; r < c->object[1].relocation_count; r++) {
682 uint64_t offset;
683
684 c->reloc[r].presumed_offset = c->object[0].offset;
685 c->reloc[r].offset = (b - batch + 1) * sizeof(*batch);
686 c->reloc[r].delta = r * sizeof(uint32_t);
687 c->reloc[r].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
688 c->reloc[r].write_domain = I915_GEM_DOMAIN_INSTRUCTION;
689
690 offset = c->object[0].offset + c->reloc[r].delta;
691 *b++ = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
692 if (gen >= 8) {
693 *b++ = offset;
694 *b++ = offset >> 32;
695 } else if (gen >= 4) {
696 *b++ = 0;
697 *b++ = offset;
698 c->reloc[r].offset += sizeof(*batch);
699 } else {
700 b[-1] -= 1;
701 *b++ = offset;
702 }
703 *b++ = r;
704 *b++ = 0x5 << 23;
705 }
706 *b++ = MI_BATCH_BUFFER_END;
707 igt_assert((b - batch)*sizeof(uint32_t) < sz);
708 munmap(batch, sz);
709 c->execbuf.buffer_count = 2;
710 gem_execbuf(fd, &c->execbuf);
711 gem_sync(fd, c->object[1].handle);
712 }
713
714 cycles = 0;
715 baseline = 0;
716 igt_until_timeout(timeout) {
717 do {
718 double this;
719
720 gem_execbuf(fd, &contexts[1].execbuf);
721 gem_execbuf(fd, &contexts[0].execbuf);
722
723 this = gettime();
724 gem_sync(fd, contexts[1].object[1].handle);
725 gem_sync(fd, contexts[0].object[1].handle);
726 baseline += gettime() - this;
727 } while (++cycles & 1023);
728 }
729 baseline /= cycles;
730
731 cycles = 0;
732 elapsed = 0;
733 igt_until_timeout(timeout) {
734 do {
735 double this;
736
737 gem_execbuf(fd, &contexts[1].execbuf);
738 gem_execbuf(fd, &contexts[0].execbuf);
739
740 this = gettime();
741 gem_sync(fd, contexts[0].object[1].handle);
742 elapsed += gettime() - this;
743
744 gem_sync(fd, contexts[1].object[1].handle);
745 } while (++cycles & 1023);
746 }
747 elapsed /= cycles;
748
749 igt_info("%s%sompleted %ld cycles: %.3f us, baseline %.3f us\n",
750 names[child % num_engines] ?: "",
751 names[child % num_engines] ? " c" : "C",
752 cycles, elapsed*1e6, baseline*1e6);
753
754 for (int i = 0; i < ARRAY_SIZE(contexts); i++) {
755 gem_close(fd, contexts[i].object[1].handle);
756 gem_close(fd, contexts[i].object[0].handle);
757 gem_context_destroy(fd, contexts[i].execbuf.rsvd1);
758 }
759 }
760 igt_waitchildren_timeout(timeout+10, NULL);
761 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
762 }
763
xchg(void * array,unsigned i,unsigned j)764 static void xchg(void *array, unsigned i, unsigned j)
765 {
766 uint32_t *u32 = array;
767 uint32_t tmp = u32[i];
768 u32[i] = u32[j];
769 u32[j] = tmp;
770 }
771
772 struct waiter {
773 pthread_t thread;
774 pthread_mutex_t mutex;
775 pthread_cond_t cond;
776
777 int ready;
778 volatile int *done;
779
780 int fd;
781 struct drm_i915_gem_exec_object2 object;
782 uint32_t handles[64];
783 };
784
waiter(void * arg)785 static void *waiter(void *arg)
786 {
787 struct waiter *w = arg;
788
789 do {
790 pthread_mutex_lock(&w->mutex);
791 w->ready = 0;
792 pthread_cond_signal(&w->cond);
793 while (!w->ready)
794 pthread_cond_wait(&w->cond, &w->mutex);
795 pthread_mutex_unlock(&w->mutex);
796 if (*w->done < 0)
797 return NULL;
798
799 gem_sync(w->fd, w->object.handle);
800 for (int n = 0; n < ARRAY_SIZE(w->handles); n++)
801 gem_sync(w->fd, w->handles[n]);
802 } while (1);
803 }
804
805 static void
__store_many(int fd,unsigned ring,int timeout,unsigned long * cycles)806 __store_many(int fd, unsigned ring, int timeout, unsigned long *cycles)
807 {
808 const int gen = intel_gen(intel_get_drm_devid(fd));
809 const uint32_t bbe = MI_BATCH_BUFFER_END;
810 struct drm_i915_gem_exec_object2 object[2];
811 struct drm_i915_gem_execbuffer2 execbuf;
812 struct drm_i915_gem_relocation_entry reloc[1024];
813 struct waiter threads[64];
814 int order[64];
815 uint32_t *batch, *b;
816 int done;
817
818 memset(&execbuf, 0, sizeof(execbuf));
819 execbuf.buffers_ptr = to_user_pointer(object);
820 execbuf.flags = ring;
821 execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC;
822 execbuf.flags |= LOCAL_I915_EXEC_HANDLE_LUT;
823 if (gen < 6)
824 execbuf.flags |= I915_EXEC_SECURE;
825
826 memset(object, 0, sizeof(object));
827 object[0].handle = gem_create(fd, 4096);
828 gem_write(fd, object[0].handle, 0, &bbe, sizeof(bbe));
829 execbuf.buffer_count = 1;
830 gem_execbuf(fd, &execbuf);
831 object[0].flags |= EXEC_OBJECT_WRITE;
832
833 object[1].relocs_ptr = to_user_pointer(reloc);
834 object[1].relocation_count = 1024;
835 execbuf.buffer_count = 2;
836
837 memset(reloc, 0, sizeof(reloc));
838 b = batch = malloc(20*1024);
839 for (int i = 0; i < 1024; i++) {
840 uint64_t offset;
841
842 reloc[i].presumed_offset = object[0].offset;
843 reloc[i].offset = (b - batch + 1) * sizeof(*batch);
844 reloc[i].delta = i * sizeof(uint32_t);
845 reloc[i].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
846 reloc[i].write_domain = I915_GEM_DOMAIN_INSTRUCTION;
847
848 offset = object[0].offset + reloc[i].delta;
849 *b++ = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
850 if (gen >= 8) {
851 *b++ = offset;
852 *b++ = offset >> 32;
853 } else if (gen >= 4) {
854 *b++ = 0;
855 *b++ = offset;
856 reloc[i].offset += sizeof(*batch);
857 } else {
858 b[-1] -= 1;
859 *b++ = offset;
860 }
861 *b++ = i;
862 }
863 *b++ = MI_BATCH_BUFFER_END;
864 igt_assert((b - batch)*sizeof(uint32_t) < 20*1024);
865
866 done = 0;
867 for (int i = 0; i < ARRAY_SIZE(threads); i++) {
868 threads[i].fd = fd;
869 threads[i].object = object[1];
870 threads[i].object.handle = gem_create(fd, 20*1024);
871 gem_write(fd, threads[i].object.handle, 0, batch, 20*1024);
872
873 pthread_cond_init(&threads[i].cond, NULL);
874 pthread_mutex_init(&threads[i].mutex, NULL);
875 threads[i].done = &done;
876 threads[i].ready = 0;
877
878 pthread_create(&threads[i].thread, NULL, waiter, &threads[i]);
879 order[i] = i;
880 }
881 free(batch);
882
883 for (int i = 0; i < ARRAY_SIZE(threads); i++) {
884 for (int j = 0; j < ARRAY_SIZE(threads); j++)
885 threads[i].handles[j] = threads[j].object.handle;
886 }
887
888 igt_until_timeout(timeout) {
889 for (int i = 0; i < ARRAY_SIZE(threads); i++) {
890 pthread_mutex_lock(&threads[i].mutex);
891 while (threads[i].ready)
892 pthread_cond_wait(&threads[i].cond,
893 &threads[i].mutex);
894 pthread_mutex_unlock(&threads[i].mutex);
895 igt_permute_array(threads[i].handles,
896 ARRAY_SIZE(threads[i].handles),
897 xchg);
898 }
899
900 igt_permute_array(order, ARRAY_SIZE(threads), xchg);
901 for (int i = 0; i < ARRAY_SIZE(threads); i++) {
902 object[1] = threads[i].object;
903 gem_execbuf(fd, &execbuf);
904 threads[i].object = object[1];
905 }
906 ++*cycles;
907
908 for (int i = 0; i < ARRAY_SIZE(threads); i++) {
909 struct waiter *w = &threads[order[i]];
910
911 w->ready = 1;
912 pthread_cond_signal(&w->cond);
913 }
914 }
915
916 for (int i = 0; i < ARRAY_SIZE(threads); i++) {
917 pthread_mutex_lock(&threads[i].mutex);
918 while (threads[i].ready)
919 pthread_cond_wait(&threads[i].cond, &threads[i].mutex);
920 pthread_mutex_unlock(&threads[i].mutex);
921 }
922 done = -1;
923 for (int i = 0; i < ARRAY_SIZE(threads); i++) {
924 threads[i].ready = 1;
925 pthread_cond_signal(&threads[i].cond);
926 pthread_join(threads[i].thread, NULL);
927 gem_close(fd, threads[i].object.handle);
928 }
929
930 gem_close(fd, object[0].handle);
931 }
932
933 static void
store_many(int fd,unsigned ring,int timeout)934 store_many(int fd, unsigned ring, int timeout)
935 {
936 unsigned long *shared;
937 const char *names[16];
938 int n = 0;
939
940 shared = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
941 igt_assert(shared != MAP_FAILED);
942
943 intel_detect_and_clear_missed_interrupts(fd);
944
945 if (ring == ALL_ENGINES) {
946 for_each_physical_engine(fd, ring) {
947 if (!gem_can_store_dword(fd, ring))
948 continue;
949
950 igt_fork(child, 1)
951 __store_many(fd,
952 ring,
953 timeout,
954 &shared[n]);
955
956 names[n++] = e__->name;
957 }
958 igt_waitchildren();
959 } else {
960 gem_require_ring(fd, ring);
961 igt_require(gem_can_store_dword(fd, ring));
962 __store_many(fd, ring, timeout, &shared[n]);
963 names[n++] = NULL;
964 }
965
966 for (int i = 0; i < n; i++) {
967 igt_info("%s%sompleted %ld cycles\n",
968 names[i] ?: "", names[i] ? " c" : "C", shared[i]);
969 }
970 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
971 munmap(shared, 4096);
972 }
973
974 static void
sync_all(int fd,int num_children,int timeout)975 sync_all(int fd, int num_children, int timeout)
976 {
977 unsigned engines[16], engine;
978 int num_engines = 0;
979
980 for_each_physical_engine(fd, engine) {
981 engines[num_engines++] = engine;
982 if (num_engines == ARRAY_SIZE(engines))
983 break;
984 }
985 igt_require(num_engines);
986
987 intel_detect_and_clear_missed_interrupts(fd);
988 igt_fork(child, num_children) {
989 const uint32_t bbe = MI_BATCH_BUFFER_END;
990 struct drm_i915_gem_exec_object2 object;
991 struct drm_i915_gem_execbuffer2 execbuf;
992 double start, elapsed;
993 unsigned long cycles;
994
995 memset(&object, 0, sizeof(object));
996 object.handle = gem_create(fd, 4096);
997 gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
998
999 memset(&execbuf, 0, sizeof(execbuf));
1000 execbuf.buffers_ptr = to_user_pointer(&object);
1001 execbuf.buffer_count = 1;
1002 gem_execbuf(fd, &execbuf);
1003 gem_sync(fd, object.handle);
1004
1005 start = gettime();
1006 cycles = 0;
1007 do {
1008 do {
1009 for (int n = 0; n < num_engines; n++) {
1010 execbuf.flags = engines[n];
1011 gem_execbuf(fd, &execbuf);
1012 }
1013 gem_sync(fd, object.handle);
1014 } while (++cycles & 1023);
1015 } while ((elapsed = gettime() - start) < timeout);
1016 igt_info("Completed %ld cycles: %.3f us\n",
1017 cycles, elapsed*1e6/cycles);
1018
1019 gem_close(fd, object.handle);
1020 }
1021 igt_waitchildren_timeout(timeout+10, NULL);
1022 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
1023 }
1024
1025 static void
store_all(int fd,int num_children,int timeout)1026 store_all(int fd, int num_children, int timeout)
1027 {
1028 const int gen = intel_gen(intel_get_drm_devid(fd));
1029 unsigned engines[16];
1030 int num_engines = 0;
1031 unsigned int ring;
1032
1033 for_each_physical_engine(fd, ring) {
1034 if (!gem_can_store_dword(fd, ring))
1035 continue;
1036
1037 engines[num_engines++] = ring;
1038 if (num_engines == ARRAY_SIZE(engines))
1039 break;
1040 }
1041 igt_require(num_engines);
1042
1043 intel_detect_and_clear_missed_interrupts(fd);
1044 igt_fork(child, num_children) {
1045 const uint32_t bbe = MI_BATCH_BUFFER_END;
1046 struct drm_i915_gem_exec_object2 object[2];
1047 struct drm_i915_gem_relocation_entry reloc[1024];
1048 struct drm_i915_gem_execbuffer2 execbuf;
1049 double start, elapsed;
1050 unsigned long cycles;
1051 uint32_t *batch, *b;
1052
1053 memset(&execbuf, 0, sizeof(execbuf));
1054 execbuf.buffers_ptr = to_user_pointer(object);
1055 execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC;
1056 execbuf.flags |= LOCAL_I915_EXEC_HANDLE_LUT;
1057 if (gen < 6)
1058 execbuf.flags |= I915_EXEC_SECURE;
1059
1060 memset(object, 0, sizeof(object));
1061 object[0].handle = gem_create(fd, 4096);
1062 gem_write(fd, object[0].handle, 0, &bbe, sizeof(bbe));
1063 execbuf.buffer_count = 1;
1064 gem_execbuf(fd, &execbuf);
1065
1066 object[0].flags |= EXEC_OBJECT_WRITE;
1067 object[1].handle = gem_create(fd, 1024*16 + 4096);
1068
1069 object[1].relocs_ptr = to_user_pointer(reloc);
1070 object[1].relocation_count = 1024;
1071
1072 batch = gem_mmap__cpu(fd, object[1].handle, 0, 16*1024 + 4096,
1073 PROT_WRITE | PROT_READ);
1074 gem_set_domain(fd, object[1].handle,
1075 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
1076
1077 memset(reloc, 0, sizeof(reloc));
1078 b = batch;
1079 for (int i = 0; i < 1024; i++) {
1080 uint64_t offset;
1081
1082 reloc[i].presumed_offset = object[0].offset;
1083 reloc[i].offset = (b - batch + 1) * sizeof(*batch);
1084 reloc[i].delta = i * sizeof(uint32_t);
1085 reloc[i].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
1086 reloc[i].write_domain = I915_GEM_DOMAIN_INSTRUCTION;
1087
1088 offset = object[0].offset + reloc[i].delta;
1089 *b++ = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
1090 if (gen >= 8) {
1091 *b++ = offset;
1092 *b++ = offset >> 32;
1093 } else if (gen >= 4) {
1094 *b++ = 0;
1095 *b++ = offset;
1096 reloc[i].offset += sizeof(*batch);
1097 } else {
1098 b[-1] -= 1;
1099 *b++ = offset;
1100 }
1101 *b++ = i;
1102 }
1103 *b++ = MI_BATCH_BUFFER_END;
1104 igt_assert((b - batch)*sizeof(uint32_t) < 20*1024);
1105 munmap(batch, 16*1024+4096);
1106 execbuf.buffer_count = 2;
1107 gem_execbuf(fd, &execbuf);
1108 gem_sync(fd, object[1].handle);
1109
1110 start = gettime();
1111 cycles = 0;
1112 do {
1113 do {
1114 igt_permute_array(engines, num_engines, xchg);
1115 for (int n = 0; n < num_engines; n++) {
1116 execbuf.flags &= ~ENGINE_MASK;
1117 execbuf.flags |= engines[n];
1118 gem_execbuf(fd, &execbuf);
1119 }
1120 gem_sync(fd, object[1].handle);
1121 } while (++cycles & 1023);
1122 } while ((elapsed = gettime() - start) < timeout);
1123 igt_info("Completed %ld cycles: %.3f us\n",
1124 cycles, elapsed*1e6/cycles);
1125
1126 gem_close(fd, object[1].handle);
1127 gem_close(fd, object[0].handle);
1128 }
1129 igt_waitchildren_timeout(timeout+10, NULL);
1130 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
1131 }
1132
1133 static void
preempt(int fd,unsigned ring,int num_children,int timeout)1134 preempt(int fd, unsigned ring, int num_children, int timeout)
1135 {
1136 unsigned engines[16];
1137 const char *names[16];
1138 int num_engines = 0;
1139 uint32_t ctx[2];
1140
1141 if (ring == ALL_ENGINES) {
1142 for_each_physical_engine(fd, ring) {
1143 names[num_engines] = e__->name;
1144 engines[num_engines++] = ring;
1145 if (num_engines == ARRAY_SIZE(engines))
1146 break;
1147 }
1148
1149 num_children *= num_engines;
1150 } else {
1151 gem_require_ring(fd, ring);
1152 names[num_engines] = NULL;
1153 engines[num_engines++] = ring;
1154 }
1155
1156 ctx[0] = gem_context_create(fd);
1157 gem_context_set_priority(fd, ctx[0], MIN_PRIO);
1158
1159 ctx[1] = gem_context_create(fd);
1160 gem_context_set_priority(fd, ctx[1], MAX_PRIO);
1161
1162 intel_detect_and_clear_missed_interrupts(fd);
1163 igt_fork(child, num_children) {
1164 const uint32_t bbe = MI_BATCH_BUFFER_END;
1165 struct drm_i915_gem_exec_object2 object;
1166 struct drm_i915_gem_execbuffer2 execbuf;
1167 double start, elapsed;
1168 unsigned long cycles;
1169
1170 memset(&object, 0, sizeof(object));
1171 object.handle = gem_create(fd, 4096);
1172 gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
1173
1174 memset(&execbuf, 0, sizeof(execbuf));
1175 execbuf.buffers_ptr = to_user_pointer(&object);
1176 execbuf.buffer_count = 1;
1177 execbuf.flags = engines[child % num_engines];
1178 execbuf.rsvd1 = ctx[1];
1179 gem_execbuf(fd, &execbuf);
1180 gem_sync(fd, object.handle);
1181
1182 start = gettime();
1183 cycles = 0;
1184 do {
1185 igt_spin_t *spin =
1186 __igt_spin_new(fd,
1187 .ctx = ctx[0],
1188 .engine = execbuf.flags);
1189
1190 do {
1191 gem_execbuf(fd, &execbuf);
1192 gem_sync(fd, object.handle);
1193 } while (++cycles & 1023);
1194
1195 igt_spin_free(fd, spin);
1196 } while ((elapsed = gettime() - start) < timeout);
1197 igt_info("%s%sompleted %ld cycles: %.3f us\n",
1198 names[child % num_engines] ?: "",
1199 names[child % num_engines] ? " c" : "C",
1200 cycles, elapsed*1e6/cycles);
1201
1202 gem_close(fd, object.handle);
1203 }
1204 igt_waitchildren_timeout(timeout+10, NULL);
1205 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
1206
1207 gem_context_destroy(fd, ctx[1]);
1208 gem_context_destroy(fd, ctx[0]);
1209 }
1210
1211 igt_main
1212 {
1213 const struct intel_execution_engine *e;
1214 const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
1215 int fd = -1;
1216
1217 igt_skip_on_simulation();
1218
1219 igt_fixture {
1220 fd = drm_open_driver(DRIVER_INTEL);
1221 igt_require_gem(fd);
1222 gem_submission_print_method(fd);
1223 gem_scheduler_print_capability(fd);
1224
1225 igt_fork_hang_detector(fd);
1226 }
1227
1228 for (e = intel_execution_engines; e->name; e++) {
1229 igt_subtest_f("%s", e->name)
1230 sync_ring(fd, e->exec_id | e->flags, 1, 150);
1231 igt_subtest_f("idle-%s", e->name)
1232 idle_ring(fd, e->exec_id | e->flags, 150);
1233 igt_subtest_f("active-%s", e->name)
1234 active_ring(fd, e->exec_id | e->flags, 150);
1235 igt_subtest_f("wakeup-%s", e->name)
1236 wakeup_ring(fd, e->exec_id | e->flags, 150, 1);
1237 igt_subtest_f("active-wakeup-%s", e->name)
1238 active_wakeup_ring(fd, e->exec_id | e->flags, 150, 1);
1239 igt_subtest_f("double-wakeup-%s", e->name)
1240 wakeup_ring(fd, e->exec_id | e->flags, 150, 2);
1241 igt_subtest_f("store-%s", e->name)
1242 store_ring(fd, e->exec_id | e->flags, 1, 150);
1243 igt_subtest_f("switch-%s", e->name)
1244 switch_ring(fd, e->exec_id | e->flags, 1, 150);
1245 igt_subtest_f("forked-switch-%s", e->name)
1246 switch_ring(fd, e->exec_id | e->flags, ncpus, 150);
1247 igt_subtest_f("many-%s", e->name)
1248 store_many(fd, e->exec_id | e->flags, 150);
1249 igt_subtest_f("forked-%s", e->name)
1250 sync_ring(fd, e->exec_id | e->flags, ncpus, 150);
1251 igt_subtest_f("forked-store-%s", e->name)
1252 store_ring(fd, e->exec_id | e->flags, ncpus, 150);
1253 }
1254
1255 igt_subtest("basic-each")
1256 sync_ring(fd, ALL_ENGINES, 1, 5);
1257 igt_subtest("basic-store-each")
1258 store_ring(fd, ALL_ENGINES, 1, 5);
1259 igt_subtest("basic-many-each")
1260 store_many(fd, ALL_ENGINES, 5);
1261 igt_subtest("switch-each")
1262 switch_ring(fd, ALL_ENGINES, 1, 150);
1263 igt_subtest("forked-switch-each")
1264 switch_ring(fd, ALL_ENGINES, ncpus, 150);
1265 igt_subtest("forked-each")
1266 sync_ring(fd, ALL_ENGINES, ncpus, 150);
1267 igt_subtest("forked-store-each")
1268 store_ring(fd, ALL_ENGINES, ncpus, 150);
1269 igt_subtest("active-each")
1270 active_ring(fd, ALL_ENGINES, 150);
1271 igt_subtest("wakeup-each")
1272 wakeup_ring(fd, ALL_ENGINES, 150, 1);
1273 igt_subtest("active-wakeup-each")
1274 active_wakeup_ring(fd, ALL_ENGINES, 150, 1);
1275 igt_subtest("double-wakeup-each")
1276 wakeup_ring(fd, ALL_ENGINES, 150, 2);
1277
1278 igt_subtest("basic-all")
1279 sync_all(fd, 1, 5);
1280 igt_subtest("basic-store-all")
1281 store_all(fd, 1, 5);
1282
1283 igt_subtest("all")
1284 sync_all(fd, 1, 150);
1285 igt_subtest("store-all")
1286 store_all(fd, 1, 150);
1287 igt_subtest("forked-all")
1288 sync_all(fd, ncpus, 150);
1289 igt_subtest("forked-store-all")
1290 store_all(fd, ncpus, 150);
1291
1292 igt_subtest_group {
1293 igt_fixture {
1294 gem_require_contexts(fd);
1295 igt_require(gem_scheduler_has_ctx_priority(fd));
1296 igt_require(gem_scheduler_has_preemption(fd));
1297 }
1298
1299 igt_subtest("preempt-all")
1300 preempt(fd, ALL_ENGINES, 1, 20);
1301
1302 for (e = intel_execution_engines; e->name; e++) {
1303 igt_subtest_f("preempt-%s", e->name)
1304 preempt(fd, e->exec_id | e->flags, ncpus, 150);
1305 }
1306 }
1307
1308 igt_fixture {
1309 igt_stop_hang_detector();
1310 close(fd);
1311 }
1312 }
1313