1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27 #include "igt.h"
28 #include <limits.h>
29 #include <unistd.h>
30 #include <stdlib.h>
31 #include <stdint.h>
32 #include <stdio.h>
33 #include <string.h>
34 #include <fcntl.h>
35 #include <inttypes.h>
36 #include <errno.h>
37 #include <sys/stat.h>
38 #include <sys/ioctl.h>
39 #include <sys/time.h>
40
41 #include <drm.h>
42
43
44 IGT_TEST_DESCRIPTION("Test context batch buffer execution.");
45
46 /* Copied from gem_exec_nop.c */
exec(int fd,uint32_t handle,int ring,int ctx_id)47 static int exec(int fd, uint32_t handle, int ring, int ctx_id)
48 {
49 struct drm_i915_gem_exec_object2 obj = { .handle = handle };
50 struct drm_i915_gem_execbuffer2 execbuf = {
51 .buffers_ptr = to_user_pointer(&obj),
52 .buffer_count = 1,
53 .flags = ring,
54 };
55
56 i915_execbuffer2_set_context_id(execbuf, ctx_id);
57
58 return __gem_execbuf(fd, &execbuf);
59 }
60
big_exec(int fd,uint32_t handle,int ring)61 static void big_exec(int fd, uint32_t handle, int ring)
62 {
63 int num_buffers = gem_global_aperture_size(fd) / 4096;
64 struct drm_i915_gem_execbuffer2 execbuf = {
65 .buffer_count = num_buffers,
66 .flags = ring,
67 };
68 struct drm_i915_gem_exec_object2 *gem_exec;
69 uint32_t ctx_id1, ctx_id2;
70 int i;
71
72 /* Make sure we only fill half of RAM with gem objects. */
73 igt_require(intel_get_total_ram_mb() * 1024 / 2 > num_buffers * 4);
74
75 gem_exec = calloc(num_buffers + 1, sizeof(*gem_exec));
76 igt_assert(gem_exec);
77 memset(gem_exec, 0, (num_buffers + 1) * sizeof(*gem_exec));
78
79 ctx_id1 = gem_context_create(fd);
80 ctx_id2 = gem_context_create(fd);
81
82 gem_exec[0].handle = handle;
83
84 execbuf.buffers_ptr = to_user_pointer(gem_exec);
85
86 execbuf.buffer_count = 1;
87 i915_execbuffer2_set_context_id(execbuf, ctx_id1);
88 gem_execbuf(fd, &execbuf);
89
90 for (i = 0; i < num_buffers; i++)
91 gem_exec[i].handle = gem_create(fd, 4096);
92 gem_exec[i].handle = handle;
93 execbuf.buffer_count = i + 1;
94
95 /* figure out how many buffers we can exactly fit */
96 while (__gem_execbuf(fd, &execbuf) != 0) {
97 i--;
98 gem_close(fd, gem_exec[i].handle);
99 gem_exec[i].handle = handle;
100 execbuf.buffer_count--;
101 igt_info("trying buffer count %i\n", i - 1);
102 }
103
104 igt_info("reduced buffer count to %i from %i\n", i - 1, num_buffers);
105
106 /* double check that it works */
107 gem_execbuf(fd, &execbuf);
108
109 i915_execbuffer2_set_context_id(execbuf, ctx_id2);
110 gem_execbuf(fd, &execbuf);
111 gem_sync(fd, handle);
112 }
113
invalid_context(int fd,const struct intel_execution_engine2 * e,uint32_t handle)114 static void invalid_context(int fd, const struct intel_execution_engine2 *e,
115 uint32_t handle)
116 {
117 struct drm_i915_gem_exec_object2 obj = {
118 .handle = handle,
119 };
120 struct drm_i915_gem_execbuffer2 execbuf = {
121 .buffers_ptr = to_user_pointer(&obj),
122 .buffer_count = 1,
123 .flags = e->flags,
124 };
125 unsigned int i;
126 uint32_t ctx;
127
128 /* Verify everything works. */
129 i915_execbuffer2_set_context_id(execbuf, 0);
130 gem_execbuf(fd, &execbuf);
131
132 ctx = gem_context_create(fd);
133 i915_execbuffer2_set_context_id(execbuf, ctx);
134 gem_execbuf(fd, &execbuf);
135
136 gem_context_destroy(fd, ctx);
137
138 /* Go through the non-existent context id's. */
139 for (i = 0; i < 32; i++) {
140 i915_execbuffer2_set_context_id(execbuf, 1UL << i);
141 igt_assert_eq(__gem_execbuf(fd, &execbuf), -ENOENT);
142 }
143
144 i915_execbuffer2_set_context_id(execbuf, INT_MAX);
145 igt_assert_eq(__gem_execbuf(fd, &execbuf), -ENOENT);
146
147 i915_execbuffer2_set_context_id(execbuf, UINT_MAX);
148 igt_assert_eq(__gem_execbuf(fd, &execbuf), -ENOENT);
149 }
150
has_recoverable_param(int i915)151 static bool has_recoverable_param(int i915)
152 {
153 struct drm_i915_gem_context_param param = {
154 .param = I915_CONTEXT_PARAM_RECOVERABLE
155 };
156
157 return __gem_context_get_param(i915, ¶m) == 0;
158 }
159
norecovery(int i915)160 static void norecovery(int i915)
161 {
162 igt_hang_t hang;
163
164 igt_require(has_recoverable_param(i915));
165 hang = igt_allow_hang(i915, 0, 0);
166
167 for (int pass = 1; pass >= 0; pass--) {
168 struct drm_i915_gem_context_param param = {
169 .ctx_id = gem_context_create(i915),
170 .param = I915_CONTEXT_PARAM_RECOVERABLE,
171 .value = pass,
172 };
173 int expect = pass == 0 ? -EIO : 0;
174 igt_spin_t *spin;
175
176 gem_context_set_param(i915, ¶m);
177
178 param.value = !pass;
179 gem_context_get_param(i915, ¶m);
180 igt_assert_eq(param.value, pass);
181
182 spin = __igt_spin_new(i915,
183 .ctx = param.ctx_id,
184 .flags = IGT_SPIN_POLL_RUN);
185 igt_spin_busywait_until_started(spin);
186
187 igt_force_gpu_reset(i915);
188
189 igt_spin_end(spin);
190 igt_assert_eq(__gem_execbuf(i915, &spin->execbuf), expect);
191 igt_spin_free(i915, spin);
192
193 gem_context_destroy(i915, param.ctx_id);
194 }
195
196 igt_disallow_hang(i915, hang);
197 }
198
199 igt_main
200 {
201 const uint32_t batch[2] = { 0, MI_BATCH_BUFFER_END };
202 const struct intel_execution_engine2 *e;
203 uint32_t handle;
204 uint32_t ctx_id;
205 int fd;
206
207 igt_fixture {
208 fd = drm_open_driver_render(DRIVER_INTEL);
209 igt_require_gem(fd);
210
211 gem_require_contexts(fd);
212
213 handle = gem_create(fd, 4096);
214 gem_write(fd, handle, 0, batch, sizeof(batch));
215 }
216
217 igt_subtest("basic") {
218 ctx_id = gem_context_create(fd);
219 igt_assert(exec(fd, handle, 0, ctx_id) == 0);
220 gem_sync(fd, handle);
221 gem_context_destroy(fd, ctx_id);
222
223 ctx_id = gem_context_create(fd);
224 igt_assert(exec(fd, handle, 0, ctx_id) == 0);
225 gem_sync(fd, handle);
226 gem_context_destroy(fd, ctx_id);
227
228 igt_assert(exec(fd, handle, 0, ctx_id) < 0);
229 gem_sync(fd, handle);
230 }
231
232 __for_each_physical_engine(fd, e)
233 igt_subtest_f("basic-invalid-context-%s", e->name)
234 invalid_context(fd, e, handle);
235
236 igt_subtest("eviction")
237 big_exec(fd, handle, 0);
238
239 igt_subtest("basic-norecovery")
240 norecovery(fd);
241
242 igt_subtest("reset-pin-leak") {
243 int i;
244
245 igt_skip_on_simulation();
246
247 /*
248 * Use an explicit context to isolate the test from
249 * any major code changes related to the per-file
250 * default context (eg. if they would be eliminated).
251 */
252 ctx_id = gem_context_create(fd);
253
254 /*
255 * Iterate enough times that the kernel will
256 * become unhappy if the ggtt pin count for
257 * the last context is leaked at every reset.
258 */
259 for (i = 0; i < 20; i++) {
260 igt_hang_t hang = igt_hang_ring(fd, 0);
261
262 igt_assert_eq(exec(fd, handle, 0, 0), 0);
263 igt_assert_eq(exec(fd, handle, 0, ctx_id), 0);
264 igt_post_hang_ring(fd, hang);
265 }
266
267 gem_context_destroy(fd, ctx_id);
268 }
269 }
270