1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #ifdef HAVE_CONFIG_H
26 #include <config.h>
27 #endif
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <stdint.h>
32 #include <string.h>
33 #include <strings.h>
34 #include <stdbool.h>
35 #include <assert.h>
36 #include <errno.h>
37 #include <fcntl.h>
38
39 #include <xf86drm.h>
40 #include <xf86atomic.h>
41 #include "libdrm.h"
42 #include "libdrm_lists.h"
43 #include "nouveau_drm.h"
44
45 #include "nouveau.h"
46 #include "private.h"
47
48 #ifdef DEBUG
49 uint32_t nouveau_debug = 0;
50
51 static void
debug_init(char * args)52 debug_init(char *args)
53 {
54 if (args) {
55 int n = strtol(args, NULL, 0);
56 if (n >= 0)
57 nouveau_debug = n;
58 }
59 }
60 #endif
61
62 /* this is the old libdrm's version of nouveau_device_wrap(), the symbol
63 * is kept here to prevent AIGLX from crashing if the DDX is linked against
64 * the new libdrm, but the DRI driver against the old
65 */
66 drm_public int
nouveau_device_open_existing(struct nouveau_device ** pdev,int close,int fd,drm_context_t ctx)67 nouveau_device_open_existing(struct nouveau_device **pdev, int close, int fd,
68 drm_context_t ctx)
69 {
70 return -EACCES;
71 }
72
73 drm_public int
nouveau_device_wrap(int fd,int close,struct nouveau_device ** pdev)74 nouveau_device_wrap(int fd, int close, struct nouveau_device **pdev)
75 {
76 struct nouveau_device_priv *nvdev = calloc(1, sizeof(*nvdev));
77 struct nouveau_device *dev = &nvdev->base;
78 uint64_t chipset, vram, gart, bousage;
79 drmVersionPtr ver;
80 int ret;
81 char *tmp;
82
83 #ifdef DEBUG
84 debug_init(getenv("NOUVEAU_LIBDRM_DEBUG"));
85 #endif
86
87 if (!nvdev)
88 return -ENOMEM;
89 ret = pthread_mutex_init(&nvdev->lock, NULL);
90 if (ret) {
91 free(nvdev);
92 return ret;
93 }
94
95 nvdev->base.fd = fd;
96
97 ver = drmGetVersion(fd);
98 if (ver) dev->drm_version = (ver->version_major << 24) |
99 (ver->version_minor << 8) |
100 ver->version_patchlevel;
101 drmFreeVersion(ver);
102
103 if ( dev->drm_version != 0x00000010 &&
104 (dev->drm_version < 0x01000000 ||
105 dev->drm_version >= 0x02000000)) {
106 nouveau_device_del(&dev);
107 return -EINVAL;
108 }
109
110 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_CHIPSET_ID, &chipset);
111 if (ret == 0)
112 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_FB_SIZE, &vram);
113 if (ret == 0)
114 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_AGP_SIZE, &gart);
115 if (ret) {
116 nouveau_device_del(&dev);
117 return ret;
118 }
119
120 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_HAS_BO_USAGE, &bousage);
121 if (ret == 0)
122 nvdev->have_bo_usage = (bousage != 0);
123
124 nvdev->close = close;
125
126 tmp = getenv("NOUVEAU_LIBDRM_VRAM_LIMIT_PERCENT");
127 if (tmp)
128 nvdev->vram_limit_percent = atoi(tmp);
129 else
130 nvdev->vram_limit_percent = 80;
131 tmp = getenv("NOUVEAU_LIBDRM_GART_LIMIT_PERCENT");
132 if (tmp)
133 nvdev->gart_limit_percent = atoi(tmp);
134 else
135 nvdev->gart_limit_percent = 80;
136 DRMINITLISTHEAD(&nvdev->bo_list);
137 nvdev->base.object.oclass = NOUVEAU_DEVICE_CLASS;
138 nvdev->base.lib_version = 0x01000000;
139 nvdev->base.chipset = chipset;
140 nvdev->base.vram_size = vram;
141 nvdev->base.gart_size = gart;
142 nvdev->base.vram_limit =
143 (nvdev->base.vram_size * nvdev->vram_limit_percent) / 100;
144 nvdev->base.gart_limit =
145 (nvdev->base.gart_size * nvdev->gart_limit_percent) / 100;
146
147 *pdev = &nvdev->base;
148 return 0;
149 }
150
151 drm_public int
nouveau_device_open(const char * busid,struct nouveau_device ** pdev)152 nouveau_device_open(const char *busid, struct nouveau_device **pdev)
153 {
154 int ret = -ENODEV, fd = drmOpen("nouveau", busid);
155 if (fd >= 0) {
156 ret = nouveau_device_wrap(fd, 1, pdev);
157 if (ret)
158 drmClose(fd);
159 }
160 return ret;
161 }
162
163 drm_public void
nouveau_device_del(struct nouveau_device ** pdev)164 nouveau_device_del(struct nouveau_device **pdev)
165 {
166 struct nouveau_device_priv *nvdev = nouveau_device(*pdev);
167 if (nvdev) {
168 if (nvdev->close)
169 drmClose(nvdev->base.fd);
170 free(nvdev->client);
171 pthread_mutex_destroy(&nvdev->lock);
172 free(nvdev);
173 *pdev = NULL;
174 }
175 }
176
177 drm_public int
nouveau_getparam(struct nouveau_device * dev,uint64_t param,uint64_t * value)178 nouveau_getparam(struct nouveau_device *dev, uint64_t param, uint64_t *value)
179 {
180 struct drm_nouveau_getparam r = { param, 0 };
181 int fd = dev->fd, ret =
182 drmCommandWriteRead(fd, DRM_NOUVEAU_GETPARAM, &r, sizeof(r));
183 *value = r.value;
184 return ret;
185 }
186
187 drm_public int
nouveau_setparam(struct nouveau_device * dev,uint64_t param,uint64_t value)188 nouveau_setparam(struct nouveau_device *dev, uint64_t param, uint64_t value)
189 {
190 struct drm_nouveau_setparam r = { param, value };
191 return drmCommandWrite(dev->fd, DRM_NOUVEAU_SETPARAM, &r, sizeof(r));
192 }
193
194 drm_public int
nouveau_client_new(struct nouveau_device * dev,struct nouveau_client ** pclient)195 nouveau_client_new(struct nouveau_device *dev, struct nouveau_client **pclient)
196 {
197 struct nouveau_device_priv *nvdev = nouveau_device(dev);
198 struct nouveau_client_priv *pcli;
199 int id = 0, i, ret = -ENOMEM;
200 uint32_t *clients;
201
202 pthread_mutex_lock(&nvdev->lock);
203
204 for (i = 0; i < nvdev->nr_client; i++) {
205 id = ffs(nvdev->client[i]) - 1;
206 if (id >= 0)
207 goto out;
208 }
209
210 clients = realloc(nvdev->client, sizeof(uint32_t) * (i + 1));
211 if (!clients)
212 goto unlock;
213 nvdev->client = clients;
214 nvdev->client[i] = 0;
215 nvdev->nr_client++;
216
217 out:
218 pcli = calloc(1, sizeof(*pcli));
219 if (pcli) {
220 nvdev->client[i] |= (1 << id);
221 pcli->base.device = dev;
222 pcli->base.id = (i * 32) + id;
223 ret = 0;
224 }
225
226 *pclient = &pcli->base;
227
228 unlock:
229 pthread_mutex_unlock(&nvdev->lock);
230 return ret;
231 }
232
233 drm_public void
nouveau_client_del(struct nouveau_client ** pclient)234 nouveau_client_del(struct nouveau_client **pclient)
235 {
236 struct nouveau_client_priv *pcli = nouveau_client(*pclient);
237 struct nouveau_device_priv *nvdev;
238 if (pcli) {
239 int id = pcli->base.id;
240 nvdev = nouveau_device(pcli->base.device);
241 pthread_mutex_lock(&nvdev->lock);
242 nvdev->client[id / 32] &= ~(1 << (id % 32));
243 pthread_mutex_unlock(&nvdev->lock);
244 free(pcli->kref);
245 free(pcli);
246 }
247 }
248
249 drm_public int
nouveau_object_new(struct nouveau_object * parent,uint64_t handle,uint32_t oclass,void * data,uint32_t length,struct nouveau_object ** pobj)250 nouveau_object_new(struct nouveau_object *parent, uint64_t handle,
251 uint32_t oclass, void *data, uint32_t length,
252 struct nouveau_object **pobj)
253 {
254 struct nouveau_device *dev;
255 struct nouveau_object *obj;
256 int ret = -EINVAL;
257
258 if (length == 0)
259 length = sizeof(struct nouveau_object *);
260 obj = malloc(sizeof(*obj) + length);
261 obj->parent = parent;
262 obj->handle = handle;
263 obj->oclass = oclass;
264 obj->length = length;
265 obj->data = obj + 1;
266 if (data)
267 memcpy(obj->data, data, length);
268 *(struct nouveau_object **)obj->data = obj;
269
270 dev = nouveau_object_find(obj, NOUVEAU_DEVICE_CLASS);
271 switch (parent->oclass) {
272 case NOUVEAU_DEVICE_CLASS:
273 switch (obj->oclass) {
274 case NOUVEAU_FIFO_CHANNEL_CLASS:
275 {
276 if (dev->chipset < 0xc0)
277 ret = abi16_chan_nv04(obj);
278 else
279 if (dev->chipset < 0xe0)
280 ret = abi16_chan_nvc0(obj);
281 else
282 ret = abi16_chan_nve0(obj);
283 }
284 break;
285 default:
286 break;
287 }
288 break;
289 case NOUVEAU_FIFO_CHANNEL_CLASS:
290 switch (obj->oclass) {
291 case NOUVEAU_NOTIFIER_CLASS:
292 ret = abi16_ntfy(obj);
293 break;
294 default:
295 ret = abi16_engobj(obj);
296 break;
297 }
298 default:
299 break;
300 }
301
302 if (ret) {
303 free(obj);
304 return ret;
305 }
306
307 *pobj = obj;
308 return 0;
309 }
310
311 drm_public void
nouveau_object_del(struct nouveau_object ** pobj)312 nouveau_object_del(struct nouveau_object **pobj)
313 {
314 struct nouveau_object *obj = *pobj;
315 struct nouveau_device *dev;
316 if (obj) {
317 dev = nouveau_object_find(obj, NOUVEAU_DEVICE_CLASS);
318 if (obj->oclass == NOUVEAU_FIFO_CHANNEL_CLASS) {
319 struct drm_nouveau_channel_free req;
320 req.channel = obj->handle;
321 drmCommandWrite(dev->fd, DRM_NOUVEAU_CHANNEL_FREE,
322 &req, sizeof(req));
323 } else {
324 struct drm_nouveau_gpuobj_free req;
325 req.channel = obj->parent->handle;
326 req.handle = obj->handle;
327 drmCommandWrite(dev->fd, DRM_NOUVEAU_GPUOBJ_FREE,
328 &req, sizeof(req));
329 }
330 }
331 free(obj);
332 *pobj = NULL;
333 }
334
335 drm_public void *
nouveau_object_find(struct nouveau_object * obj,uint32_t pclass)336 nouveau_object_find(struct nouveau_object *obj, uint32_t pclass)
337 {
338 while (obj && obj->oclass != pclass) {
339 obj = obj->parent;
340 if (pclass == NOUVEAU_PARENT_CLASS)
341 break;
342 }
343 return obj;
344 }
345
346 static void
nouveau_bo_del(struct nouveau_bo * bo)347 nouveau_bo_del(struct nouveau_bo *bo)
348 {
349 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
350 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
351 struct drm_gem_close req = { bo->handle };
352
353 pthread_mutex_lock(&nvdev->lock);
354 if (nvbo->name) {
355 if (atomic_read(&nvbo->refcnt)) {
356 /*
357 * bo has been revived by a race with
358 * nouveau_bo_prime_handle_ref, or nouveau_bo_name_ref.
359 *
360 * In theory there's still a race possible with
361 * nouveau_bo_wrap, but when using this function
362 * the lifetime of the handle is probably already
363 * handled in another way. If there are races
364 * you're probably using nouveau_bo_wrap wrong.
365 */
366 pthread_mutex_unlock(&nvdev->lock);
367 return;
368 }
369 DRMLISTDEL(&nvbo->head);
370 /*
371 * This bo has to be closed with the lock held because gem
372 * handles are not refcounted. If a shared bo is closed and
373 * re-opened in another thread a race against
374 * DRM_IOCTL_GEM_OPEN or drmPrimeFDToHandle might cause the
375 * bo to be closed accidentally while re-importing.
376 */
377 drmIoctl(bo->device->fd, DRM_IOCTL_GEM_CLOSE, &req);
378 pthread_mutex_unlock(&nvdev->lock);
379 } else {
380 DRMLISTDEL(&nvbo->head);
381 pthread_mutex_unlock(&nvdev->lock);
382 drmIoctl(bo->device->fd, DRM_IOCTL_GEM_CLOSE, &req);
383 }
384 if (bo->map)
385 drm_munmap(bo->map, bo->size);
386 free(nvbo);
387 }
388
389 drm_public int
nouveau_bo_new(struct nouveau_device * dev,uint32_t flags,uint32_t align,uint64_t size,union nouveau_bo_config * config,struct nouveau_bo ** pbo)390 nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, uint32_t align,
391 uint64_t size, union nouveau_bo_config *config,
392 struct nouveau_bo **pbo)
393 {
394 struct nouveau_device_priv *nvdev = nouveau_device(dev);
395 struct nouveau_bo_priv *nvbo = calloc(1, sizeof(*nvbo));
396 struct nouveau_bo *bo = &nvbo->base;
397 int ret;
398
399 if (!nvbo)
400 return -ENOMEM;
401 atomic_set(&nvbo->refcnt, 1);
402 bo->device = dev;
403 bo->flags = flags;
404 bo->size = size;
405
406 ret = abi16_bo_init(bo, align, config);
407 if (ret) {
408 free(nvbo);
409 return ret;
410 }
411
412 pthread_mutex_lock(&nvdev->lock);
413 DRMLISTADD(&nvbo->head, &nvdev->bo_list);
414 pthread_mutex_unlock(&nvdev->lock);
415
416 *pbo = bo;
417 return 0;
418 }
419
420 static int
nouveau_bo_wrap_locked(struct nouveau_device * dev,uint32_t handle,struct nouveau_bo ** pbo)421 nouveau_bo_wrap_locked(struct nouveau_device *dev, uint32_t handle,
422 struct nouveau_bo **pbo)
423 {
424 struct nouveau_device_priv *nvdev = nouveau_device(dev);
425 struct drm_nouveau_gem_info req = { .handle = handle };
426 struct nouveau_bo_priv *nvbo;
427 int ret;
428
429 DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
430 if (nvbo->base.handle == handle) {
431 *pbo = NULL;
432 nouveau_bo_ref(&nvbo->base, pbo);
433 return 0;
434 }
435 }
436
437 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_GEM_INFO,
438 &req, sizeof(req));
439 if (ret)
440 return ret;
441
442 nvbo = calloc(1, sizeof(*nvbo));
443 if (nvbo) {
444 atomic_set(&nvbo->refcnt, 1);
445 nvbo->base.device = dev;
446 abi16_bo_info(&nvbo->base, &req);
447 DRMLISTADD(&nvbo->head, &nvdev->bo_list);
448 *pbo = &nvbo->base;
449 return 0;
450 }
451
452 return -ENOMEM;
453 }
454
455 drm_public int
nouveau_bo_wrap(struct nouveau_device * dev,uint32_t handle,struct nouveau_bo ** pbo)456 nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle,
457 struct nouveau_bo **pbo)
458 {
459 struct nouveau_device_priv *nvdev = nouveau_device(dev);
460 int ret;
461 pthread_mutex_lock(&nvdev->lock);
462 ret = nouveau_bo_wrap_locked(dev, handle, pbo);
463 pthread_mutex_unlock(&nvdev->lock);
464 return ret;
465 }
466
467 drm_public int
nouveau_bo_name_ref(struct nouveau_device * dev,uint32_t name,struct nouveau_bo ** pbo)468 nouveau_bo_name_ref(struct nouveau_device *dev, uint32_t name,
469 struct nouveau_bo **pbo)
470 {
471 struct nouveau_device_priv *nvdev = nouveau_device(dev);
472 struct nouveau_bo_priv *nvbo;
473 struct drm_gem_open req = { .name = name };
474 int ret;
475
476 pthread_mutex_lock(&nvdev->lock);
477 DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
478 if (nvbo->name == name) {
479 *pbo = NULL;
480 nouveau_bo_ref(&nvbo->base, pbo);
481 pthread_mutex_unlock(&nvdev->lock);
482 return 0;
483 }
484 }
485
486 ret = drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req);
487 if (ret == 0) {
488 ret = nouveau_bo_wrap_locked(dev, req.handle, pbo);
489 nouveau_bo((*pbo))->name = name;
490 pthread_mutex_unlock(&nvdev->lock);
491 }
492
493 return ret;
494 }
495
496 drm_public int
nouveau_bo_name_get(struct nouveau_bo * bo,uint32_t * name)497 nouveau_bo_name_get(struct nouveau_bo *bo, uint32_t *name)
498 {
499 struct drm_gem_flink req = { .handle = bo->handle };
500 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
501
502 *name = nvbo->name;
503 if (!*name || *name == ~0U) {
504 int ret = drmIoctl(bo->device->fd, DRM_IOCTL_GEM_FLINK, &req);
505 if (ret) {
506 *name = 0;
507 return ret;
508 }
509 nvbo->name = *name = req.name;
510 }
511 return 0;
512 }
513
514 drm_public void
nouveau_bo_ref(struct nouveau_bo * bo,struct nouveau_bo ** pref)515 nouveau_bo_ref(struct nouveau_bo *bo, struct nouveau_bo **pref)
516 {
517 struct nouveau_bo *ref = *pref;
518 if (bo) {
519 atomic_inc(&nouveau_bo(bo)->refcnt);
520 }
521 if (ref) {
522 if (atomic_dec_and_test(&nouveau_bo(ref)->refcnt))
523 nouveau_bo_del(ref);
524 }
525 *pref = bo;
526 }
527
528 drm_public int
nouveau_bo_prime_handle_ref(struct nouveau_device * dev,int prime_fd,struct nouveau_bo ** bo)529 nouveau_bo_prime_handle_ref(struct nouveau_device *dev, int prime_fd,
530 struct nouveau_bo **bo)
531 {
532 struct nouveau_device_priv *nvdev = nouveau_device(dev);
533 int ret;
534 unsigned int handle;
535
536 nouveau_bo_ref(NULL, bo);
537
538 pthread_mutex_lock(&nvdev->lock);
539 ret = drmPrimeFDToHandle(dev->fd, prime_fd, &handle);
540 if (ret == 0) {
541 ret = nouveau_bo_wrap_locked(dev, handle, bo);
542 if (!ret) {
543 struct nouveau_bo_priv *nvbo = nouveau_bo(*bo);
544 if (!nvbo->name) {
545 /*
546 * XXX: Force locked DRM_IOCTL_GEM_CLOSE
547 * to rule out race conditions
548 */
549 nvbo->name = ~0;
550 }
551 }
552 }
553 pthread_mutex_unlock(&nvdev->lock);
554 return ret;
555 }
556
557 drm_public int
nouveau_bo_set_prime(struct nouveau_bo * bo,int * prime_fd)558 nouveau_bo_set_prime(struct nouveau_bo *bo, int *prime_fd)
559 {
560 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
561 int ret;
562
563 ret = drmPrimeHandleToFD(bo->device->fd, nvbo->base.handle, DRM_CLOEXEC, prime_fd);
564 if (ret)
565 return ret;
566 if (!nvbo->name)
567 nvbo->name = ~0;
568 return 0;
569 }
570
571 drm_public int
nouveau_bo_wait(struct nouveau_bo * bo,uint32_t access,struct nouveau_client * client)572 nouveau_bo_wait(struct nouveau_bo *bo, uint32_t access,
573 struct nouveau_client *client)
574 {
575 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
576 struct drm_nouveau_gem_cpu_prep req;
577 struct nouveau_pushbuf *push;
578 int ret = 0;
579
580 if (!(access & NOUVEAU_BO_RDWR))
581 return 0;
582
583 push = cli_push_get(client, bo);
584 if (push && push->channel)
585 nouveau_pushbuf_kick(push, push->channel);
586
587 if (!nvbo->name && !(nvbo->access & NOUVEAU_BO_WR) &&
588 !( access & NOUVEAU_BO_WR))
589 return 0;
590
591 req.handle = bo->handle;
592 req.flags = 0;
593 if (access & NOUVEAU_BO_WR)
594 req.flags |= NOUVEAU_GEM_CPU_PREP_WRITE;
595 if (access & NOUVEAU_BO_NOBLOCK)
596 req.flags |= NOUVEAU_GEM_CPU_PREP_NOWAIT;
597
598 ret = drmCommandWrite(bo->device->fd, DRM_NOUVEAU_GEM_CPU_PREP,
599 &req, sizeof(req));
600 if (ret == 0)
601 nvbo->access = 0;
602 return ret;
603 }
604
605 drm_public int
nouveau_bo_map(struct nouveau_bo * bo,uint32_t access,struct nouveau_client * client)606 nouveau_bo_map(struct nouveau_bo *bo, uint32_t access,
607 struct nouveau_client *client)
608 {
609 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
610 if (bo->map == NULL) {
611 bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
612 MAP_SHARED, bo->device->fd, nvbo->map_handle);
613 if (bo->map == MAP_FAILED) {
614 bo->map = NULL;
615 return -errno;
616 }
617 }
618 return nouveau_bo_wait(bo, access, client);
619 }
620