1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #ifdef HAVE_CONFIG_H
26 #include <config.h>
27 #endif
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <assert.h>
35 #include <errno.h>
36
37 #include <xf86drm.h>
38 #include <xf86atomic.h>
39 #include "libdrm_lists.h"
40 #include "nouveau_drm.h"
41
42 #include "nouveau.h"
43 #include "private.h"
44
45 struct nouveau_pushbuf_krec {
46 struct nouveau_pushbuf_krec *next;
47 struct drm_nouveau_gem_pushbuf_bo buffer[NOUVEAU_GEM_MAX_BUFFERS];
48 struct drm_nouveau_gem_pushbuf_reloc reloc[NOUVEAU_GEM_MAX_RELOCS];
49 struct drm_nouveau_gem_pushbuf_push push[NOUVEAU_GEM_MAX_PUSH];
50 int nr_buffer;
51 int nr_reloc;
52 int nr_push;
53 uint64_t vram_used;
54 uint64_t gart_used;
55 };
56
57 struct nouveau_pushbuf_priv {
58 struct nouveau_pushbuf base;
59 struct nouveau_pushbuf_krec *list;
60 struct nouveau_pushbuf_krec *krec;
61 struct nouveau_list bctx_list;
62 struct nouveau_bo *bo;
63 uint32_t type;
64 uint32_t suffix0;
65 uint32_t suffix1;
66 uint32_t *ptr;
67 uint32_t *bgn;
68 int bo_next;
69 int bo_nr;
70 struct nouveau_bo *bos[];
71 };
72
73 static inline struct nouveau_pushbuf_priv *
nouveau_pushbuf(struct nouveau_pushbuf * push)74 nouveau_pushbuf(struct nouveau_pushbuf *push)
75 {
76 return (struct nouveau_pushbuf_priv *)push;
77 }
78
79 static int pushbuf_validate(struct nouveau_pushbuf *, bool);
80 static int pushbuf_flush(struct nouveau_pushbuf *);
81
82 static bool
pushbuf_kref_fits(struct nouveau_pushbuf * push,struct nouveau_bo * bo,uint32_t * domains)83 pushbuf_kref_fits(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
84 uint32_t *domains)
85 {
86 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
87 struct nouveau_pushbuf_krec *krec = nvpb->krec;
88 struct nouveau_device *dev = push->client->device;
89 struct nouveau_bo *kbo;
90 struct drm_nouveau_gem_pushbuf_bo *kref;
91 int i;
92
93 /* VRAM is the only valid domain. GART and VRAM|GART buffers
94 * are all accounted to GART, so if this doesn't fit in VRAM
95 * straight up, a flush is needed.
96 */
97 if (*domains == NOUVEAU_GEM_DOMAIN_VRAM) {
98 if (krec->vram_used + bo->size > dev->vram_limit)
99 return false;
100 krec->vram_used += bo->size;
101 return true;
102 }
103
104 /* GART or VRAM|GART buffer. Account both of these buffer types
105 * to GART only for the moment, which simplifies things. If the
106 * buffer can fit already, we're done here.
107 */
108 if (krec->gart_used + bo->size <= dev->gart_limit) {
109 krec->gart_used += bo->size;
110 return true;
111 }
112
113 /* Ran out of GART space, if it's a VRAM|GART buffer and it'll
114 * fit into available VRAM, turn it into a VRAM buffer
115 */
116 if ((*domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
117 krec->vram_used + bo->size <= dev->vram_limit) {
118 *domains &= NOUVEAU_GEM_DOMAIN_VRAM;
119 krec->vram_used += bo->size;
120 return true;
121 }
122
123 /* Still couldn't fit the buffer in anywhere, so as a last resort;
124 * scan the buffer list for VRAM|GART buffers and turn them into
125 * VRAM buffers until we have enough space in GART for this one
126 */
127 kref = krec->buffer;
128 for (i = 0; i < krec->nr_buffer; i++, kref++) {
129 if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
130 continue;
131
132 kbo = (void *)(unsigned long)kref->user_priv;
133 if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) ||
134 krec->vram_used + kbo->size > dev->vram_limit)
135 continue;
136
137 kref->valid_domains &= NOUVEAU_GEM_DOMAIN_VRAM;
138 krec->gart_used -= kbo->size;
139 krec->vram_used += kbo->size;
140 if (krec->gart_used + bo->size <= dev->gart_limit) {
141 krec->gart_used += bo->size;
142 return true;
143 }
144 }
145
146 /* Couldn't resolve a placement, need to force a flush */
147 return false;
148 }
149
150 static struct drm_nouveau_gem_pushbuf_bo *
pushbuf_kref(struct nouveau_pushbuf * push,struct nouveau_bo * bo,uint32_t flags)151 pushbuf_kref(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
152 uint32_t flags)
153 {
154 struct nouveau_device *dev = push->client->device;
155 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
156 struct nouveau_pushbuf_krec *krec = nvpb->krec;
157 struct nouveau_pushbuf *fpush;
158 struct drm_nouveau_gem_pushbuf_bo *kref;
159 uint32_t domains, domains_wr, domains_rd;
160
161 domains = 0;
162 if (flags & NOUVEAU_BO_VRAM)
163 domains |= NOUVEAU_GEM_DOMAIN_VRAM;
164 if (flags & NOUVEAU_BO_GART)
165 domains |= NOUVEAU_GEM_DOMAIN_GART;
166 domains_wr = domains * !!(flags & NOUVEAU_BO_WR);
167 domains_rd = domains * !!(flags & NOUVEAU_BO_RD);
168
169 /* if buffer is referenced on another pushbuf that is owned by the
170 * same client, we need to flush the other pushbuf first to ensure
171 * the correct ordering of commands
172 */
173 fpush = cli_push_get(push->client, bo);
174 if (fpush && fpush != push)
175 pushbuf_flush(fpush);
176
177 kref = cli_kref_get(push->client, bo);
178 if (kref) {
179 /* possible conflict in memory types - flush and retry */
180 if (!(kref->valid_domains & domains))
181 return NULL;
182
183 /* VRAM|GART buffer turning into a VRAM buffer. Make sure
184 * it'll fit in VRAM and force a flush if not.
185 */
186 if ((kref->valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
187 ( domains == NOUVEAU_GEM_DOMAIN_VRAM)) {
188 if (krec->vram_used + bo->size > dev->vram_limit)
189 return NULL;
190 krec->vram_used += bo->size;
191 krec->gart_used -= bo->size;
192 }
193
194 kref->valid_domains &= domains;
195 kref->write_domains |= domains_wr;
196 kref->read_domains |= domains_rd;
197 } else {
198 if (krec->nr_buffer == NOUVEAU_GEM_MAX_BUFFERS ||
199 !pushbuf_kref_fits(push, bo, &domains))
200 return NULL;
201
202 kref = &krec->buffer[krec->nr_buffer++];
203 kref->user_priv = (unsigned long)bo;
204 kref->handle = bo->handle;
205 kref->valid_domains = domains;
206 kref->write_domains = domains_wr;
207 kref->read_domains = domains_rd;
208 kref->presumed.valid = 1;
209 kref->presumed.offset = bo->offset;
210 if (bo->flags & NOUVEAU_BO_VRAM)
211 kref->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
212 else
213 kref->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
214
215 cli_kref_set(push->client, bo, kref, push);
216 atomic_inc(&nouveau_bo(bo)->refcnt);
217 }
218
219 return kref;
220 }
221
222 static uint32_t
pushbuf_krel(struct nouveau_pushbuf * push,struct nouveau_bo * bo,uint32_t data,uint32_t flags,uint32_t vor,uint32_t tor)223 pushbuf_krel(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
224 uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
225 {
226 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
227 struct nouveau_pushbuf_krec *krec = nvpb->krec;
228 struct drm_nouveau_gem_pushbuf_reloc *krel;
229 struct drm_nouveau_gem_pushbuf_bo *pkref;
230 struct drm_nouveau_gem_pushbuf_bo *bkref;
231 uint32_t reloc = data;
232
233 pkref = cli_kref_get(push->client, nvpb->bo);
234 bkref = cli_kref_get(push->client, bo);
235 krel = &krec->reloc[krec->nr_reloc++];
236
237 krel->reloc_bo_index = pkref - krec->buffer;
238 krel->reloc_bo_offset = (push->cur - nvpb->ptr) * 4;
239 krel->bo_index = bkref - krec->buffer;
240 krel->flags = 0;
241 krel->data = data;
242 krel->vor = vor;
243 krel->tor = tor;
244
245 if (flags & NOUVEAU_BO_LOW) {
246 reloc = (bkref->presumed.offset + data);
247 krel->flags |= NOUVEAU_GEM_RELOC_LOW;
248 } else
249 if (flags & NOUVEAU_BO_HIGH) {
250 reloc = (bkref->presumed.offset + data) >> 32;
251 krel->flags |= NOUVEAU_GEM_RELOC_HIGH;
252 }
253 if (flags & NOUVEAU_BO_OR) {
254 if (bkref->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM)
255 reloc |= vor;
256 else
257 reloc |= tor;
258 krel->flags |= NOUVEAU_GEM_RELOC_OR;
259 }
260
261 return reloc;
262 }
263
264 static void
pushbuf_dump(struct nouveau_pushbuf_krec * krec,int krec_id,int chid)265 pushbuf_dump(struct nouveau_pushbuf_krec *krec, int krec_id, int chid)
266 {
267 struct drm_nouveau_gem_pushbuf_reloc *krel;
268 struct drm_nouveau_gem_pushbuf_push *kpsh;
269 struct drm_nouveau_gem_pushbuf_bo *kref;
270 struct nouveau_bo *bo;
271 uint32_t *bgn, *end;
272 int i;
273
274 err("ch%d: krec %d pushes %d bufs %d relocs %d\n", chid,
275 krec_id, krec->nr_push, krec->nr_buffer, krec->nr_reloc);
276
277 kref = krec->buffer;
278 for (i = 0; i < krec->nr_buffer; i++, kref++) {
279 err("ch%d: buf %08x %08x %08x %08x %08x\n", chid, i,
280 kref->handle, kref->valid_domains,
281 kref->read_domains, kref->write_domains);
282 }
283
284 krel = krec->reloc;
285 for (i = 0; i < krec->nr_reloc; i++, krel++) {
286 err("ch%d: rel %08x %08x %08x %08x %08x %08x %08x\n",
287 chid, krel->reloc_bo_index, krel->reloc_bo_offset,
288 krel->bo_index, krel->flags, krel->data,
289 krel->vor, krel->tor);
290 }
291
292 kpsh = krec->push;
293 for (i = 0; i < krec->nr_push; i++, kpsh++) {
294 kref = krec->buffer + kpsh->bo_index;
295 bo = (void *)(unsigned long)kref->user_priv;
296 bgn = (uint32_t *)((char *)bo->map + kpsh->offset);
297 end = bgn + (kpsh->length /4);
298
299 err("ch%d: psh %08x %010llx %010llx\n", chid, kpsh->bo_index,
300 (unsigned long long)kpsh->offset,
301 (unsigned long long)(kpsh->offset + kpsh->length));
302 while (bgn < end)
303 err("\t0x%08x\n", *bgn++);
304 }
305 }
306
307 static int
pushbuf_submit(struct nouveau_pushbuf * push,struct nouveau_object * chan)308 pushbuf_submit(struct nouveau_pushbuf *push, struct nouveau_object *chan)
309 {
310 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
311 struct nouveau_pushbuf_krec *krec = nvpb->list;
312 struct nouveau_device *dev = push->client->device;
313 struct drm_nouveau_gem_pushbuf_bo_presumed *info;
314 struct drm_nouveau_gem_pushbuf_bo *kref;
315 struct drm_nouveau_gem_pushbuf req;
316 struct nouveau_fifo *fifo = chan->data;
317 struct nouveau_bo *bo;
318 int krec_id = 0;
319 int ret = 0, i;
320
321 if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
322 return -EINVAL;
323
324 if (push->kick_notify)
325 push->kick_notify(push);
326
327 nouveau_pushbuf_data(push, NULL, 0, 0);
328
329 while (krec && krec->nr_push) {
330 req.channel = fifo->channel;
331 req.nr_buffers = krec->nr_buffer;
332 req.buffers = (uint64_t)(unsigned long)krec->buffer;
333 req.nr_relocs = krec->nr_reloc;
334 req.nr_push = krec->nr_push;
335 req.relocs = (uint64_t)(unsigned long)krec->reloc;
336 req.push = (uint64_t)(unsigned long)krec->push;
337 req.suffix0 = nvpb->suffix0;
338 req.suffix1 = nvpb->suffix1;
339 req.vram_available = 0; /* for valgrind */
340 req.gart_available = 0;
341
342 if (dbg_on(0))
343 pushbuf_dump(krec, krec_id++, fifo->channel);
344
345 #ifndef SIMULATE
346 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_GEM_PUSHBUF,
347 &req, sizeof(req));
348 nvpb->suffix0 = req.suffix0;
349 nvpb->suffix1 = req.suffix1;
350 dev->vram_limit = (req.vram_available *
351 nouveau_device(dev)->vram_limit_percent) / 100;
352 dev->gart_limit = (req.gart_available *
353 nouveau_device(dev)->gart_limit_percent) / 100;
354 #else
355 if (dbg_on(31))
356 ret = -EINVAL;
357 #endif
358
359 if (ret) {
360 err("kernel rejected pushbuf: %s\n", strerror(-ret));
361 pushbuf_dump(krec, krec_id++, fifo->channel);
362 break;
363 }
364
365 kref = krec->buffer;
366 for (i = 0; i < krec->nr_buffer; i++, kref++) {
367 bo = (void *)(unsigned long)kref->user_priv;
368
369 info = &kref->presumed;
370 if (!info->valid) {
371 bo->flags &= ~NOUVEAU_BO_APER;
372 if (info->domain == NOUVEAU_GEM_DOMAIN_VRAM)
373 bo->flags |= NOUVEAU_BO_VRAM;
374 else
375 bo->flags |= NOUVEAU_BO_GART;
376 bo->offset = info->offset;
377 }
378
379 if (kref->write_domains)
380 nouveau_bo(bo)->access |= NOUVEAU_BO_WR;
381 if (kref->read_domains)
382 nouveau_bo(bo)->access |= NOUVEAU_BO_RD;
383 }
384
385 krec = krec->next;
386 }
387
388 return ret;
389 }
390
391 static int
pushbuf_flush(struct nouveau_pushbuf * push)392 pushbuf_flush(struct nouveau_pushbuf *push)
393 {
394 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
395 struct nouveau_pushbuf_krec *krec = nvpb->krec;
396 struct drm_nouveau_gem_pushbuf_bo *kref;
397 struct nouveau_bufctx *bctx, *btmp;
398 struct nouveau_bo *bo;
399 int ret = 0, i;
400
401 if (push->channel) {
402 ret = pushbuf_submit(push, push->channel);
403 } else {
404 nouveau_pushbuf_data(push, NULL, 0, 0);
405 krec->next = malloc(sizeof(*krec));
406 nvpb->krec = krec->next;
407 }
408
409 kref = krec->buffer;
410 for (i = 0; i < krec->nr_buffer; i++, kref++) {
411 bo = (void *)(unsigned long)kref->user_priv;
412 cli_kref_set(push->client, bo, NULL, NULL);
413 if (push->channel)
414 nouveau_bo_ref(NULL, &bo);
415 }
416
417 krec = nvpb->krec;
418 krec->vram_used = 0;
419 krec->gart_used = 0;
420 krec->nr_buffer = 0;
421 krec->nr_reloc = 0;
422 krec->nr_push = 0;
423
424 DRMLISTFOREACHENTRYSAFE(bctx, btmp, &nvpb->bctx_list, head) {
425 DRMLISTJOIN(&bctx->current, &bctx->pending);
426 DRMINITLISTHEAD(&bctx->current);
427 DRMLISTDELINIT(&bctx->head);
428 }
429
430 return ret;
431 }
432
433 static void
pushbuf_refn_fail(struct nouveau_pushbuf * push,int sref,int srel)434 pushbuf_refn_fail(struct nouveau_pushbuf *push, int sref, int srel)
435 {
436 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
437 struct nouveau_pushbuf_krec *krec = nvpb->krec;
438 struct drm_nouveau_gem_pushbuf_bo *kref;
439
440 kref = krec->buffer + sref;
441 while (krec->nr_buffer-- > sref) {
442 struct nouveau_bo *bo = (void *)(unsigned long)kref->user_priv;
443 cli_kref_set(push->client, bo, NULL, NULL);
444 nouveau_bo_ref(NULL, &bo);
445 kref++;
446 }
447 krec->nr_buffer = sref;
448 krec->nr_reloc = srel;
449 }
450
451 static int
pushbuf_refn(struct nouveau_pushbuf * push,bool retry,struct nouveau_pushbuf_refn * refs,int nr)452 pushbuf_refn(struct nouveau_pushbuf *push, bool retry,
453 struct nouveau_pushbuf_refn *refs, int nr)
454 {
455 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
456 struct nouveau_pushbuf_krec *krec = nvpb->krec;
457 struct drm_nouveau_gem_pushbuf_bo *kref;
458 int sref = krec->nr_buffer;
459 int ret = 0, i;
460
461 for (i = 0; i < nr; i++) {
462 kref = pushbuf_kref(push, refs[i].bo, refs[i].flags);
463 if (!kref) {
464 ret = -ENOSPC;
465 break;
466 }
467 }
468
469 if (ret) {
470 pushbuf_refn_fail(push, sref, krec->nr_reloc);
471 if (retry) {
472 pushbuf_flush(push);
473 nouveau_pushbuf_space(push, 0, 0, 0);
474 return pushbuf_refn(push, false, refs, nr);
475 }
476 }
477
478 return ret;
479 }
480
481 static int
pushbuf_validate(struct nouveau_pushbuf * push,bool retry)482 pushbuf_validate(struct nouveau_pushbuf *push, bool retry)
483 {
484 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
485 struct nouveau_pushbuf_krec *krec = nvpb->krec;
486 struct drm_nouveau_gem_pushbuf_bo *kref;
487 struct nouveau_bufctx *bctx = push->bufctx;
488 struct nouveau_bufref *bref;
489 int relocs = bctx ? bctx->relocs * 2: 0;
490 int sref, srel, ret;
491
492 ret = nouveau_pushbuf_space(push, relocs, relocs, 0);
493 if (ret || bctx == NULL)
494 return ret;
495
496 sref = krec->nr_buffer;
497 srel = krec->nr_reloc;
498
499 DRMLISTDEL(&bctx->head);
500 DRMLISTADD(&bctx->head, &nvpb->bctx_list);
501
502 DRMLISTFOREACHENTRY(bref, &bctx->pending, thead) {
503 kref = pushbuf_kref(push, bref->bo, bref->flags);
504 if (!kref) {
505 ret = -ENOSPC;
506 break;
507 }
508
509 if (bref->packet) {
510 pushbuf_krel(push, bref->bo, bref->packet, 0, 0, 0);
511 *push->cur++ = 0;
512 pushbuf_krel(push, bref->bo, bref->data, bref->flags,
513 bref->vor, bref->tor);
514 *push->cur++ = 0;
515 }
516 }
517
518 DRMLISTJOIN(&bctx->pending, &bctx->current);
519 DRMINITLISTHEAD(&bctx->pending);
520
521 if (ret) {
522 pushbuf_refn_fail(push, sref, srel);
523 if (retry) {
524 pushbuf_flush(push);
525 return pushbuf_validate(push, false);
526 }
527 }
528
529 return ret;
530 }
531
532 drm_public int
nouveau_pushbuf_new(struct nouveau_client * client,struct nouveau_object * chan,int nr,uint32_t size,bool immediate,struct nouveau_pushbuf ** ppush)533 nouveau_pushbuf_new(struct nouveau_client *client, struct nouveau_object *chan,
534 int nr, uint32_t size, bool immediate,
535 struct nouveau_pushbuf **ppush)
536 {
537 struct nouveau_device *dev = client->device;
538 struct nouveau_fifo *fifo = chan->data;
539 struct nouveau_pushbuf_priv *nvpb;
540 struct nouveau_pushbuf *push;
541 struct drm_nouveau_gem_pushbuf req = {};
542 int ret;
543
544 if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
545 return -EINVAL;
546
547 /* nop pushbuf call, to get the current "return to main" sequence
548 * we need to append to the pushbuf on early chipsets
549 */
550 req.channel = fifo->channel;
551 req.nr_push = 0;
552 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_GEM_PUSHBUF,
553 &req, sizeof(req));
554 if (ret)
555 return ret;
556
557 nvpb = calloc(1, sizeof(*nvpb) + nr * sizeof(*nvpb->bos));
558 if (!nvpb)
559 return -ENOMEM;
560
561 #ifndef SIMULATE
562 nvpb->suffix0 = req.suffix0;
563 nvpb->suffix1 = req.suffix1;
564 #else
565 nvpb->suffix0 = 0xffffffff;
566 nvpb->suffix1 = 0xffffffff;
567 #endif
568 nvpb->krec = calloc(1, sizeof(*nvpb->krec));
569 nvpb->list = nvpb->krec;
570 if (!nvpb->krec) {
571 free(nvpb);
572 return -ENOMEM;
573 }
574
575 push = &nvpb->base;
576 push->client = client;
577 push->channel = immediate ? chan : NULL;
578 push->flags = NOUVEAU_BO_RD;
579 if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_GART) {
580 push->flags |= NOUVEAU_BO_GART;
581 nvpb->type = NOUVEAU_BO_GART;
582 } else
583 if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_VRAM) {
584 push->flags |= NOUVEAU_BO_VRAM;
585 nvpb->type = NOUVEAU_BO_VRAM;
586 }
587 nvpb->type |= NOUVEAU_BO_MAP;
588
589 for (nvpb->bo_nr = 0; nvpb->bo_nr < nr; nvpb->bo_nr++) {
590 ret = nouveau_bo_new(client->device, nvpb->type, 0, size,
591 NULL, &nvpb->bos[nvpb->bo_nr]);
592 if (ret) {
593 nouveau_pushbuf_del(&push);
594 return ret;
595 }
596 }
597
598 DRMINITLISTHEAD(&nvpb->bctx_list);
599 *ppush = push;
600 return 0;
601 }
602
603 drm_public void
nouveau_pushbuf_del(struct nouveau_pushbuf ** ppush)604 nouveau_pushbuf_del(struct nouveau_pushbuf **ppush)
605 {
606 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(*ppush);
607 if (nvpb) {
608 struct drm_nouveau_gem_pushbuf_bo *kref;
609 struct nouveau_pushbuf_krec *krec;
610 while ((krec = nvpb->list)) {
611 kref = krec->buffer;
612 while (krec->nr_buffer--) {
613 unsigned long priv = kref++->user_priv;
614 struct nouveau_bo *bo = (void *)priv;
615 cli_kref_set(nvpb->base.client, bo, NULL, NULL);
616 nouveau_bo_ref(NULL, &bo);
617 }
618 nvpb->list = krec->next;
619 free(krec);
620 }
621 while (nvpb->bo_nr--)
622 nouveau_bo_ref(NULL, &nvpb->bos[nvpb->bo_nr]);
623 nouveau_bo_ref(NULL, &nvpb->bo);
624 free(nvpb);
625 }
626 *ppush = NULL;
627 }
628
629 drm_public struct nouveau_bufctx *
nouveau_pushbuf_bufctx(struct nouveau_pushbuf * push,struct nouveau_bufctx * ctx)630 nouveau_pushbuf_bufctx(struct nouveau_pushbuf *push, struct nouveau_bufctx *ctx)
631 {
632 struct nouveau_bufctx *prev = push->bufctx;
633 push->bufctx = ctx;
634 return prev;
635 }
636
637 drm_public int
nouveau_pushbuf_space(struct nouveau_pushbuf * push,uint32_t dwords,uint32_t relocs,uint32_t pushes)638 nouveau_pushbuf_space(struct nouveau_pushbuf *push,
639 uint32_t dwords, uint32_t relocs, uint32_t pushes)
640 {
641 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
642 struct nouveau_pushbuf_krec *krec = nvpb->krec;
643 struct nouveau_client *client = push->client;
644 struct nouveau_bo *bo = NULL;
645 bool flushed = false;
646 int ret = 0;
647
648 /* switch to next buffer if insufficient space in the current one */
649 if (push->cur + dwords >= push->end) {
650 if (nvpb->bo_next < nvpb->bo_nr) {
651 nouveau_bo_ref(nvpb->bos[nvpb->bo_next++], &bo);
652 if (nvpb->bo_next == nvpb->bo_nr && push->channel)
653 nvpb->bo_next = 0;
654 } else {
655 ret = nouveau_bo_new(client->device, nvpb->type, 0,
656 nvpb->bos[0]->size, NULL, &bo);
657 if (ret)
658 return ret;
659 }
660 }
661
662 /* make sure there's always enough space to queue up the pending
663 * data in the pushbuf proper
664 */
665 pushes++;
666
667 /* need to flush if we've run out of space on an immediate pushbuf,
668 * if the new buffer won't fit, or if the kernel push/reloc limits
669 * have been hit
670 */
671 if ((bo && ( push->channel ||
672 !pushbuf_kref(push, bo, push->flags))) ||
673 krec->nr_reloc + relocs >= NOUVEAU_GEM_MAX_RELOCS ||
674 krec->nr_push + pushes >= NOUVEAU_GEM_MAX_PUSH) {
675 if (nvpb->bo && krec->nr_buffer)
676 pushbuf_flush(push);
677 flushed = true;
678 }
679
680 /* if necessary, switch to new buffer */
681 if (bo) {
682 ret = nouveau_bo_map(bo, NOUVEAU_BO_WR, push->client);
683 if (ret)
684 return ret;
685
686 nouveau_pushbuf_data(push, NULL, 0, 0);
687 nouveau_bo_ref(bo, &nvpb->bo);
688 nouveau_bo_ref(NULL, &bo);
689
690 nvpb->bgn = nvpb->bo->map;
691 nvpb->ptr = nvpb->bgn;
692 push->cur = nvpb->bgn;
693 push->end = push->cur + (nvpb->bo->size / 4);
694 push->end -= 2 + push->rsvd_kick; /* space for suffix */
695 }
696
697 pushbuf_kref(push, nvpb->bo, push->flags);
698 return flushed ? pushbuf_validate(push, false) : 0;
699 }
700
701 drm_public void
nouveau_pushbuf_data(struct nouveau_pushbuf * push,struct nouveau_bo * bo,uint64_t offset,uint64_t length)702 nouveau_pushbuf_data(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
703 uint64_t offset, uint64_t length)
704 {
705 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
706 struct nouveau_pushbuf_krec *krec = nvpb->krec;
707 struct drm_nouveau_gem_pushbuf_push *kpsh;
708 struct drm_nouveau_gem_pushbuf_bo *kref;
709
710 if (bo != nvpb->bo && nvpb->bgn != push->cur) {
711 if (nvpb->suffix0 || nvpb->suffix1) {
712 *push->cur++ = nvpb->suffix0;
713 *push->cur++ = nvpb->suffix1;
714 }
715
716 nouveau_pushbuf_data(push, nvpb->bo,
717 (nvpb->bgn - nvpb->ptr) * 4,
718 (push->cur - nvpb->bgn) * 4);
719 nvpb->bgn = push->cur;
720 }
721
722 if (bo) {
723 kref = cli_kref_get(push->client, bo);
724 kpsh = &krec->push[krec->nr_push++];
725 kpsh->bo_index = kref - krec->buffer;
726 kpsh->offset = offset;
727 kpsh->length = length;
728 }
729 }
730
731 drm_public int
nouveau_pushbuf_refn(struct nouveau_pushbuf * push,struct nouveau_pushbuf_refn * refs,int nr)732 nouveau_pushbuf_refn(struct nouveau_pushbuf *push,
733 struct nouveau_pushbuf_refn *refs, int nr)
734 {
735 return pushbuf_refn(push, true, refs, nr);
736 }
737
738 drm_public void
nouveau_pushbuf_reloc(struct nouveau_pushbuf * push,struct nouveau_bo * bo,uint32_t data,uint32_t flags,uint32_t vor,uint32_t tor)739 nouveau_pushbuf_reloc(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
740 uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
741 {
742 *push->cur = pushbuf_krel(push, bo, data, flags, vor, tor);
743 push->cur++;
744 }
745
746 drm_public int
nouveau_pushbuf_validate(struct nouveau_pushbuf * push)747 nouveau_pushbuf_validate(struct nouveau_pushbuf *push)
748 {
749 return pushbuf_validate(push, true);
750 }
751
752 drm_public uint32_t
nouveau_pushbuf_refd(struct nouveau_pushbuf * push,struct nouveau_bo * bo)753 nouveau_pushbuf_refd(struct nouveau_pushbuf *push, struct nouveau_bo *bo)
754 {
755 struct drm_nouveau_gem_pushbuf_bo *kref;
756 uint32_t flags = 0;
757
758 if (cli_push_get(push->client, bo) == push) {
759 kref = cli_kref_get(push->client, bo);
760 if (kref->read_domains)
761 flags |= NOUVEAU_BO_RD;
762 if (kref->write_domains)
763 flags |= NOUVEAU_BO_WR;
764 }
765
766 return flags;
767 }
768
769 drm_public int
nouveau_pushbuf_kick(struct nouveau_pushbuf * push,struct nouveau_object * chan)770 nouveau_pushbuf_kick(struct nouveau_pushbuf *push, struct nouveau_object *chan)
771 {
772 if (!push->channel)
773 return pushbuf_submit(push, chan);
774 pushbuf_flush(push);
775 return pushbuf_validate(push, false);
776 }
777