1 /*
2  * Copyright © 2017-2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Lionel Landwerlin <lionel.g.landwerlin@intel.com>
25  *
26  */
27 
28 #include "igt.h"
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <unistd.h>
33 #include <fcntl.h>
34 #include <signal.h>
35 #include <errno.h>
36 #include <time.h>
37 #include <sys/mman.h>
38 #include <sys/wait.h>
39 
40 #include "igt_dummyload.h"
41 #include "igt_perf.h"
42 #include "igt_sysfs.h"
43 #include "ioctl_wrappers.h"
44 
45 IGT_TEST_DESCRIPTION("Test context render powergating programming.");
46 
47 static unsigned int __intel_gen__, __intel_devid__;
48 static uint64_t __slice_mask__, __subslice_mask__;
49 static unsigned int __slice_count__, __subslice_count__;
50 
mask_minus_one(uint64_t mask)51 static uint64_t mask_minus_one(uint64_t mask)
52 {
53 	unsigned int i;
54 
55 	for (i = 0; i < (sizeof(mask) * 8 - 1); i++) {
56 		if ((1ULL << i) & mask)
57 			return mask & ~(1ULL << i);
58 	}
59 
60 	igt_assert(0);
61 	return 0;
62 }
63 
mask_plus_one(uint64_t mask)64 static uint64_t mask_plus_one(uint64_t mask)
65 {
66 	unsigned int i;
67 
68 	for (i = 0; i < (sizeof(mask) * 8 - 1); i++) {
69 		if (((1ULL << i) & mask) == 0)
70 			return mask | (1ULL << i);
71 	}
72 
73 	igt_assert(0);
74 	return 0;
75 }
76 
mask_minus(uint64_t mask,int n)77 static uint64_t mask_minus(uint64_t mask, int n)
78 {
79 	unsigned int i;
80 
81 	for (i = 0; i < n; i++)
82 		mask = mask_minus_one(mask);
83 
84 	return mask;
85 }
86 
mask_plus(uint64_t mask,int n)87 static uint64_t mask_plus(uint64_t mask, int n)
88 {
89 	unsigned int i;
90 
91 	for (i = 0; i < n; i++)
92 		mask = mask_plus_one(mask);
93 
94 	return mask;
95 }
96 
97 static bool
kernel_has_per_context_sseu_support(int fd)98 kernel_has_per_context_sseu_support(int fd)
99 {
100 	struct drm_i915_gem_context_param_sseu sseu = { };
101 	struct drm_i915_gem_context_param arg = {
102 		.param = I915_CONTEXT_PARAM_SSEU,
103 		.size = sizeof(sseu),
104 		.value = to_user_pointer(&sseu),
105 	};
106 	int ret;
107 
108 	if (__gem_context_get_param(fd, &arg))
109 		return false;
110 
111 	arg.value = to_user_pointer(&sseu);
112 
113 	ret = __gem_context_set_param(fd, &arg);
114 
115 	igt_assert(ret == 0 || ret == -ENODEV || ret == -EINVAL);
116 
117 	return ret == 0;
118 }
119 
has_engine(int fd,unsigned int class,unsigned int instance)120 static bool has_engine(int fd, unsigned int class, unsigned int instance)
121 {
122 	int pmu = perf_i915_open(I915_PMU_ENGINE_BUSY(class, instance));
123 
124 	if (pmu >= 0)
125 		close(pmu);
126 
127 	return pmu >= 0;
128 }
129 
130 /*
131  * Verify that invalid engines are rejected and valid ones are accepted.
132  */
test_engines(int fd)133 static void test_engines(int fd)
134 {
135 	struct drm_i915_gem_context_param_sseu sseu = { };
136 	struct drm_i915_gem_context_param arg = {
137 		.param = I915_CONTEXT_PARAM_SSEU,
138 		.ctx_id = gem_context_create(fd),
139 		.size = sizeof(sseu),
140 		.value = to_user_pointer(&sseu)
141 	};
142 	unsigned int class, instance;
143 	int last_with_engines;
144 
145 	/* get_param */
146 
147 	sseu.engine.engine_instance = -1; /* Assumed invalid. */
148 	igt_assert_eq(__gem_context_get_param(fd, &arg), -EINVAL);
149 
150 	sseu.engine.engine_class = I915_ENGINE_CLASS_INVALID; /* Both invalid. */
151 	igt_assert_eq(__gem_context_get_param(fd, &arg), -EINVAL);
152 
153 	sseu.engine.engine_instance = 0; /* Class invalid. */
154 	igt_assert_eq(__gem_context_get_param(fd, &arg), -EINVAL);
155 	sseu.engine.engine_class = I915_ENGINE_CLASS_RENDER;
156 
157 	last_with_engines = -1;
158 	for (class = 0; class < ~0; class++) {
159 		for (instance = 0; instance < ~0; instance++) {
160 			int ret;
161 
162 			sseu.engine.engine_class = class;
163 			sseu.engine.engine_instance = instance;
164 
165 			ret = __gem_context_get_param(fd, &arg);
166 
167 			if (has_engine(fd, class, instance)) {
168 				igt_assert_eq(ret, 0);
169 				last_with_engines = class;
170 			} else {
171 				igt_assert_eq(ret, -EINVAL);
172 				if (instance > 8) /* Skip over some instance holes. */
173 					break;
174 			}
175 		}
176 
177 		if (class - last_with_engines > 8) /* Skip over some class holes. */
178 			break;
179 	}
180 
181 	/*
182 	 * Get some proper values before trying to reprogram them onto
183 	 * an invalid engine.
184 	 */
185 	sseu.engine.engine_class = 0;
186 	sseu.engine.engine_instance = 0;
187 	gem_context_get_param(fd, &arg);
188 
189 	/* set_param */
190 
191 	sseu.engine.engine_instance = -1; /* Assumed invalid. */
192 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
193 
194 	sseu.engine.engine_class = I915_ENGINE_CLASS_INVALID; /* Both invalid. */
195 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
196 
197 	sseu.engine.engine_instance = 0; /* Class invalid. */
198 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
199 
200 	last_with_engines = -1;
201 	for (class = 0; class < ~0; class++) {
202 		for (instance = 0; instance < ~0; instance++) {
203 			int ret;
204 
205 			sseu.engine.engine_class = class;
206 			sseu.engine.engine_instance = instance;
207 
208 			ret = __gem_context_set_param(fd, &arg);
209 
210 			if (has_engine(fd, class, instance)) {
211 				igt_assert(ret == 0 || ret == -ENODEV);
212 				last_with_engines = class;
213 			} else {
214 				igt_assert_eq(ret, -EINVAL);
215 				if (instance > 8) /* Skip over some instance holes. */
216 					break;
217 			}
218 		}
219 
220 		if (class - last_with_engines > 8) /* Skip over some class holes. */
221 			break;
222 	}
223 
224 	gem_context_destroy(fd, arg.ctx_id);
225 }
226 
227 /*
228  * Verify that invalid arguments are rejected.
229  */
230 static void
test_invalid_args(int fd)231 test_invalid_args(int fd)
232 {
233 	struct drm_i915_gem_context_param arg = {
234 		.param = I915_CONTEXT_PARAM_SSEU,
235 		.ctx_id = gem_context_create(fd),
236 	};
237 	struct drm_i915_gem_context_param_sseu sseu = { };
238 	unsigned char *page[2];
239 	unsigned char *addr;
240 	unsigned int sz;
241 
242 	/* get param */
243 
244 	/* Invalid size. */
245 	arg.size = 1;
246 	igt_assert_eq(__gem_context_get_param(fd, &arg), -EINVAL);
247 
248 	/* Query size. */
249 	arg.size = 0;
250 	igt_assert_eq(__gem_context_get_param(fd, &arg), 0);
251 	sz = arg.size;
252 
253 	/* Bad pointers. */
254 	igt_assert_eq(__gem_context_get_param(fd, &arg), -EFAULT);
255 	arg.value = -1;
256 	igt_assert_eq(__gem_context_get_param(fd, &arg), -EFAULT);
257 	arg.value = 1;
258 	igt_assert_eq(__gem_context_get_param(fd, &arg), -EFAULT);
259 
260 	/* Unmapped. */
261 	page[0] = mmap(0, 4096, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
262 	igt_assert(page[0] != MAP_FAILED);
263 	memset(page[0], 0, sizeof(sseu));
264 	munmap(page[0], 4096);
265 	arg.value = to_user_pointer(page[0]);
266 	igt_assert_eq(__gem_context_get_param(fd, &arg), -EFAULT);
267 
268 	/* Straddle into unmapped area. */
269 	page[0] = mmap(0, 8192, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
270 	igt_assert(page[0] != MAP_FAILED);
271 	munmap(page[0], 8192);
272 	page[0] = mmap(page[0], 4096,
273 		       PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
274 	igt_assert(page[0] != MAP_FAILED);
275 	memset(page[0], 0, sizeof(sseu));
276 	page[1] = mmap((void *)((unsigned long)page[0] + 4096), 4096,
277 		       PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
278 	igt_assert(page[1] != MAP_FAILED);
279 	memset(page[1], 0, sizeof(sseu));
280 	munmap(page[1], 4096);
281 	arg.value = to_user_pointer(page[1]) -
282 		    sizeof(struct drm_i915_gem_context_param_sseu) + 4;
283 	igt_assert_eq(__gem_context_get_param(fd, &arg), -EFAULT);
284 	munmap(page[0], 4096);
285 
286 	/* Straddle into read-only area. */
287 	page[0] = mmap(0, 8192, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
288 	igt_assert(page[0] != MAP_FAILED);
289 	munmap(page[0], 8192);
290 	page[0] = mmap(page[0], 4096,
291 		       PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
292 	igt_assert(page[0] != MAP_FAILED);
293 	memset(page[0], 0, sizeof(sseu));
294 	page[1] = mmap((void *)((unsigned long)page[0] + 4096), 4096,
295 		       PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
296 	igt_assert(page[1] != MAP_FAILED);
297 	memset(page[1], 0, sizeof(sseu));
298 	igt_assert(mprotect(page[1], 4096, PROT_READ) == 0);
299 	arg.value = to_user_pointer(page[1] - sizeof(sseu) + 4);
300 	igt_assert_eq(__gem_context_get_param(fd, &arg), -EFAULT);
301 	munmap(page[0], 4096);
302 	munmap(page[1], 4096);
303 
304 	/* set param */
305 
306 	/* Invalid sizes. */
307 	arg.size = 1;
308 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
309 
310 	arg.size = 0;
311 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
312 	arg.size = sz;
313 
314 	/* Bad pointers. */
315 	arg.value = 0;
316 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EFAULT);
317 	arg.value = -1;
318 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EFAULT);
319 	arg.value = 1;
320 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EFAULT);
321 
322 	/* Get valid SSEU. */
323 	arg.value = to_user_pointer(&sseu);
324 	igt_assert_eq(__gem_context_get_param(fd, &arg), 0);
325 
326 	/* Invalid MBZ. */
327 	sseu.flags = -1;
328 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
329 	sseu.rsvd = -1;
330 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
331 	sseu.flags = 0;
332 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
333 	sseu.rsvd = 0;
334 
335 	/* Unmapped. */
336 	page[0] = mmap(0, 4096, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
337 	igt_assert(page[0] != MAP_FAILED);
338 	memcpy(page[0], &sseu, sizeof(sseu));
339 	munmap(page[0], 4096);
340 	arg.value = to_user_pointer(page[0]);
341 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EFAULT);
342 
343 	/* Straddle into unmapped area. */
344 	page[0] = mmap(0, 8192, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
345 	igt_assert(page[0] != MAP_FAILED);
346 	munmap(page[0], 8192);
347 	page[0] = mmap(page[0], 4096,
348 		       PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
349 	igt_assert(page[0] != MAP_FAILED);
350 	page[1] = mmap((void *)((unsigned long)page[0] + 4096), 4096,
351 		       PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
352 	igt_assert(page[1] != MAP_FAILED);
353 	addr = page[1] - sizeof(sseu) + 4;
354 	memcpy(addr, &sseu, sizeof(sseu));
355 	munmap(page[1], 4096);
356 	arg.value = to_user_pointer(addr);
357 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EFAULT);
358 	munmap(page[0], 4096);
359 
360 	gem_context_destroy(fd, arg.ctx_id);
361 }
362 
363 /*
364  * Verify that ggtt mapped area can be used as the sseu pointer.
365  */
366 static void
test_ggtt_args(int fd)367 test_ggtt_args(int fd)
368 {
369 	struct drm_i915_gem_context_param_sseu *sseu;
370 	struct drm_i915_gem_context_param arg = {
371 		.param = I915_CONTEXT_PARAM_SSEU,
372 		.ctx_id = gem_context_create(fd),
373 		.size = sizeof(*sseu),
374 	};
375 	uint32_t bo;
376 
377 	bo = gem_create(fd, 4096);
378 	arg.value = to_user_pointer(gem_mmap__gtt(fd, bo, 4096,
379 						  PROT_READ | PROT_WRITE));
380 
381 	igt_assert_eq(__gem_context_get_param(fd, &arg), 0);
382 	igt_assert_eq(__gem_context_set_param(fd, &arg), 0);
383 
384 	munmap((void *)(uintptr_t)arg.value, 4096);
385 	gem_close(fd, bo);
386 	gem_context_destroy(fd, arg.ctx_id);
387 }
388 
389 /*
390  * Verify that invalid SSEU values are rejected.
391  */
392 static void
test_invalid_sseu(int fd)393 test_invalid_sseu(int fd)
394 {
395 	struct drm_i915_gem_context_param_sseu device_sseu = { };
396 	struct drm_i915_gem_context_param_sseu sseu = { };
397 	struct drm_i915_gem_context_param arg = {
398 		.param = I915_CONTEXT_PARAM_SSEU,
399 		.ctx_id = gem_context_create(fd),
400 		.size = sizeof(sseu),
401 	};
402 	unsigned int i;
403 
404 	/* Fetch the device defaults. */
405 	arg.value = to_user_pointer(&device_sseu);
406 	gem_context_get_param(fd, &arg);
407 
408 	arg.value = to_user_pointer(&sseu);
409 
410 	/* Try all slice masks known to be invalid. */
411 	sseu = device_sseu;
412 	for (i = 1; i <= (8 - __slice_count__); i++) {
413 		sseu.slice_mask = mask_plus(__slice_mask__, i);
414 		igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
415 	}
416 
417 	/* 0 slices. */
418 	sseu.slice_mask = 0;
419 	igt_assert_eq(-EINVAL, __gem_context_set_param(fd, &arg));
420 
421 	/* Try all subslice masks known to be invalid. */
422 	sseu = device_sseu;
423 	for (i = 1; i <= (8 - __subslice_count__); i++) {
424 		sseu.subslice_mask = mask_plus(__subslice_mask__, i);
425 		igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
426 	}
427 
428 	/* 0 subslices. */
429 	sseu.subslice_mask = 0;
430 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
431 
432 	/* Try number of EUs superior to the max available. */
433 	sseu = device_sseu;
434 	sseu.min_eus_per_subslice = device_sseu.max_eus_per_subslice + 1;
435 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
436 
437 	sseu = device_sseu;
438 	sseu.max_eus_per_subslice = device_sseu.max_eus_per_subslice + 1;
439 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
440 
441 	/* Try to program 0 max EUs. */
442 	sseu = device_sseu;
443 	sseu.max_eus_per_subslice = 0;
444 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
445 
446 	/* Min > max */
447 	sseu = device_sseu;
448 	sseu.min_eus_per_subslice = sseu.max_eus_per_subslice;
449 	sseu.max_eus_per_subslice = 1;
450 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
451 
452 	if (__intel_gen__ != 11)
453 		goto out;
454 
455 	/* Subset of subslices but slice mask greater than one. */
456 	if (__slice_count__ > 1) {
457 		sseu = device_sseu;
458 		sseu.subslice_mask = mask_minus_one(sseu.subslice_mask);
459 		igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
460 	}
461 
462 	/* Odd subslices above four. */
463 	sseu = device_sseu;
464 	sseu.slice_mask = 0x1;
465 	sseu.subslice_mask = mask_minus_one(sseu.subslice_mask);
466 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
467 
468 	/* More than half subslices with one slice. */
469 	sseu = device_sseu;
470 	sseu.slice_mask = 0x1;
471 	sseu.subslice_mask = mask_minus(sseu.subslice_mask,
472 					__subslice_count__ / 2 - 1);
473 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
474 
475 	/* VME */
476 
477 	/* Slice count between one and max. */
478 	if (__slice_count__ > 2) {
479 		sseu = device_sseu;
480 		sseu.slice_mask = mask_minus_one(sseu.slice_mask);
481 		igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
482 	}
483 
484 	/* Less than half subslices with one slice. */
485 	sseu = device_sseu;
486 	sseu.slice_mask = 0x1;
487 	sseu.subslice_mask = mask_minus(sseu.subslice_mask,
488 					__subslice_count__ / 2 + 1);
489 	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
490 
491 out:
492 	gem_context_destroy(fd, arg.ctx_id);
493 }
494 
495 igt_main
496 {
497 	int fd;
498 
499 	igt_fixture {
500 		fd = drm_open_driver(DRIVER_INTEL);
501 		igt_require_gem(fd);
502 
503 		__intel_devid__ = intel_get_drm_devid(fd);
504 		__intel_gen__ = intel_gen(__intel_devid__);
505 
506 		igt_require(kernel_has_per_context_sseu_support(fd));
507 	}
508 
509 	igt_subtest_group {
510 		igt_fixture {
511 			drm_i915_getparam_t gp;
512 
513 			gp.param = I915_PARAM_SLICE_MASK;
514 			gp.value = (int *) &__slice_mask__;
515 			do_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
516 			__slice_count__ = __builtin_popcount(__slice_mask__);
517 
518 			gp.param = I915_PARAM_SUBSLICE_MASK;
519 			gp.value = (int *) &__subslice_mask__;
520 			do_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
521 			__subslice_count__ =
522 				__builtin_popcount(__subslice_mask__);
523 		}
524 
525 		igt_subtest("invalid-args")
526 			test_invalid_args(fd);
527 
528 		igt_subtest("invalid-sseu")
529 			test_invalid_sseu(fd);
530 
531 		igt_subtest("ggtt-args")
532 			test_ggtt_args(fd);
533 
534 		igt_subtest("engines")
535 			test_engines(fd);
536 	}
537 
538 	igt_fixture {
539 		close(fd);
540 	}
541 }
542