1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 
25 #ifdef HAVE_CONFIG_H
26 #include <config.h>
27 #endif
28 
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <stdint.h>
32 #include <string.h>
33 #include <strings.h>
34 #include <stdbool.h>
35 #include <assert.h>
36 #include <errno.h>
37 #include <fcntl.h>
38 
39 #include <xf86drm.h>
40 #include <xf86atomic.h>
41 #include "libdrm_macros.h"
42 #include "libdrm_lists.h"
43 #include "nouveau_drm.h"
44 
45 #include "nouveau.h"
46 #include "private.h"
47 
48 #include "nvif/class.h"
49 #include "nvif/cl0080.h"
50 #include "nvif/ioctl.h"
51 #include "nvif/unpack.h"
52 
53 #ifdef DEBUG
54 drm_private uint32_t nouveau_debug = 0;
55 
56 static void
debug_init(char * args)57 debug_init(char *args)
58 {
59 	if (args) {
60 		int n = strtol(args, NULL, 0);
61 		if (n >= 0)
62 			nouveau_debug = n;
63 	}
64 }
65 #endif
66 
67 static int
nouveau_object_ioctl(struct nouveau_object * obj,void * data,uint32_t size)68 nouveau_object_ioctl(struct nouveau_object *obj, void *data, uint32_t size)
69 {
70 	struct nouveau_drm *drm = nouveau_drm(obj);
71 	union {
72 		struct nvif_ioctl_v0 v0;
73 	} *args = data;
74 	uint32_t argc = size;
75 	int ret = -ENOSYS;
76 
77 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
78 		if (!obj->length) {
79 			if (obj != &drm->client)
80 				args->v0.object = (unsigned long)(void *)obj;
81 			else
82 				args->v0.object = 0;
83 			args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
84 			args->v0.route = 0x00;
85 		} else {
86 			args->v0.route = 0xff;
87 			args->v0.token = obj->handle;
88 		}
89 	} else
90 		return ret;
91 
92 	return drmCommandWriteRead(drm->fd, DRM_NOUVEAU_NVIF, args, argc);
93 }
94 
95 int
nouveau_object_mthd(struct nouveau_object * obj,uint32_t mthd,void * data,uint32_t size)96 nouveau_object_mthd(struct nouveau_object *obj,
97 		    uint32_t mthd, void *data, uint32_t size)
98 {
99 	struct nouveau_drm *drm = nouveau_drm(obj);
100 	struct {
101 		struct nvif_ioctl_v0 ioctl;
102 		struct nvif_ioctl_mthd_v0 mthd;
103 	} *args;
104 	uint32_t argc = sizeof(*args) + size;
105 	uint8_t stack[128];
106 	int ret;
107 
108 	if (!drm->nvif)
109 		return -ENOSYS;
110 
111 	if (argc > sizeof(stack)) {
112 		if (!(args = malloc(argc)))
113 			return -ENOMEM;
114 	} else {
115 		args = (void *)stack;
116 	}
117 	args->ioctl.version = 0;
118 	args->ioctl.type = NVIF_IOCTL_V0_MTHD;
119 	args->mthd.version = 0;
120 	args->mthd.method = mthd;
121 
122 	memcpy(args->mthd.data, data, size);
123 	ret = nouveau_object_ioctl(obj, args, argc);
124 	memcpy(data, args->mthd.data, size);
125 	if (args != (void *)stack)
126 		free(args);
127 	return ret;
128 }
129 
130 void
nouveau_object_sclass_put(struct nouveau_sclass ** psclass)131 nouveau_object_sclass_put(struct nouveau_sclass **psclass)
132 {
133 	free(*psclass);
134 	*psclass = NULL;
135 }
136 
137 int
nouveau_object_sclass_get(struct nouveau_object * obj,struct nouveau_sclass ** psclass)138 nouveau_object_sclass_get(struct nouveau_object *obj,
139 			  struct nouveau_sclass **psclass)
140 {
141 	struct nouveau_drm *drm = nouveau_drm(obj);
142 	struct {
143 		struct nvif_ioctl_v0 ioctl;
144 		struct nvif_ioctl_sclass_v0 sclass;
145 	} *args = NULL;
146 	struct nouveau_sclass *sclass;
147 	int ret, cnt = 0, i;
148 	uint32_t size;
149 
150 	if (!drm->nvif)
151 		return abi16_sclass(obj, psclass);
152 
153 	while (1) {
154 		size = sizeof(*args) + cnt * sizeof(args->sclass.oclass[0]);
155 		if (!(args = malloc(size)))
156 			return -ENOMEM;
157 		args->ioctl.version = 0;
158 		args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
159 		args->sclass.version = 0;
160 		args->sclass.count = cnt;
161 
162 		ret = nouveau_object_ioctl(obj, args, size);
163 		if (ret == 0 && args->sclass.count <= cnt)
164 			break;
165 		cnt = args->sclass.count;
166 		free(args);
167 		if (ret != 0)
168 			return ret;
169 	}
170 
171 	if ((sclass = calloc(args->sclass.count, sizeof(*sclass)))) {
172 		for (i = 0; i < args->sclass.count; i++) {
173 			sclass[i].oclass = args->sclass.oclass[i].oclass;
174 			sclass[i].minver = args->sclass.oclass[i].minver;
175 			sclass[i].maxver = args->sclass.oclass[i].maxver;
176 		}
177 		*psclass = sclass;
178 		ret = args->sclass.count;
179 	} else {
180 		ret = -ENOMEM;
181 	}
182 
183 	free(args);
184 	return ret;
185 }
186 
187 int
nouveau_object_mclass(struct nouveau_object * obj,const struct nouveau_mclass * mclass)188 nouveau_object_mclass(struct nouveau_object *obj,
189 		      const struct nouveau_mclass *mclass)
190 {
191 	struct nouveau_sclass *sclass;
192 	int ret = -ENODEV;
193 	int cnt, i, j;
194 
195 	cnt = nouveau_object_sclass_get(obj, &sclass);
196 	if (cnt < 0)
197 		return cnt;
198 
199 	for (i = 0; ret < 0 && mclass[i].oclass; i++) {
200 		for (j = 0; j < cnt; j++) {
201 			if (mclass[i].oclass  == sclass[j].oclass &&
202 			    mclass[i].version >= sclass[j].minver &&
203 			    mclass[i].version <= sclass[j].maxver) {
204 				ret = i;
205 				break;
206 			}
207 		}
208 	}
209 
210 	nouveau_object_sclass_put(&sclass);
211 	return ret;
212 }
213 
214 static void
nouveau_object_fini(struct nouveau_object * obj)215 nouveau_object_fini(struct nouveau_object *obj)
216 {
217 	struct {
218 		struct nvif_ioctl_v0 ioctl;
219 		struct nvif_ioctl_del del;
220 	} args = {
221 		.ioctl.type = NVIF_IOCTL_V0_DEL,
222 	};
223 
224 	if (obj->data) {
225 		abi16_delete(obj);
226 		free(obj->data);
227 		obj->data = NULL;
228 		return;
229 	}
230 
231 	nouveau_object_ioctl(obj, &args, sizeof(args));
232 }
233 
234 static int
nouveau_object_init(struct nouveau_object * parent,uint32_t handle,int32_t oclass,void * data,uint32_t size,struct nouveau_object * obj)235 nouveau_object_init(struct nouveau_object *parent, uint32_t handle,
236 		    int32_t oclass, void *data, uint32_t size,
237 		    struct nouveau_object *obj)
238 {
239 	struct nouveau_drm *drm = nouveau_drm(parent);
240 	struct {
241 		struct nvif_ioctl_v0 ioctl;
242 		struct nvif_ioctl_new_v0 new;
243 	} *args;
244 	uint32_t argc = sizeof(*args) + size;
245 	int (*func)(struct nouveau_object *);
246 	int ret = -ENOSYS;
247 
248 	obj->parent = parent;
249 	obj->handle = handle;
250 	obj->oclass = oclass;
251 	obj->length = 0;
252 	obj->data = NULL;
253 
254 	if (!abi16_object(obj, &func) && drm->nvif) {
255 		if (!(args = malloc(argc)))
256 			return -ENOMEM;
257 		args->ioctl.version = 0;
258 		args->ioctl.type = NVIF_IOCTL_V0_NEW;
259 		args->new.version = 0;
260 		args->new.route = NVIF_IOCTL_V0_ROUTE_NVIF;
261 		args->new.token = (unsigned long)(void *)obj;
262 		args->new.object = (unsigned long)(void *)obj;
263 		args->new.handle = handle;
264 		args->new.oclass = oclass;
265 		memcpy(args->new.data, data, size);
266 		ret = nouveau_object_ioctl(parent, args, argc);
267 		memcpy(data, args->new.data, size);
268 		free(args);
269 	} else
270 	if (func) {
271 		obj->length = size ? size : sizeof(struct nouveau_object *);
272 		if (!(obj->data = malloc(obj->length)))
273 			return -ENOMEM;
274 		if (data)
275 			memcpy(obj->data, data, obj->length);
276 		*(struct nouveau_object **)obj->data = obj;
277 
278 		ret = func(obj);
279 	}
280 
281 	if (ret) {
282 		nouveau_object_fini(obj);
283 		return ret;
284 	}
285 
286 	return 0;
287 }
288 
289 int
nouveau_object_new(struct nouveau_object * parent,uint64_t handle,uint32_t oclass,void * data,uint32_t length,struct nouveau_object ** pobj)290 nouveau_object_new(struct nouveau_object *parent, uint64_t handle,
291 		   uint32_t oclass, void *data, uint32_t length,
292 		   struct nouveau_object **pobj)
293 {
294 	struct nouveau_object *obj;
295 	int ret;
296 
297 	if (!(obj = malloc(sizeof(*obj))))
298 		return -ENOMEM;
299 
300 	ret = nouveau_object_init(parent, handle, oclass, data, length, obj);
301 	if (ret) {
302 		free(obj);
303 		return ret;
304 	}
305 
306 	*pobj = obj;
307 	return 0;
308 }
309 
310 void
nouveau_object_del(struct nouveau_object ** pobj)311 nouveau_object_del(struct nouveau_object **pobj)
312 {
313 	struct nouveau_object *obj = *pobj;
314 	if (obj) {
315 		nouveau_object_fini(obj);
316 		free(obj);
317 		*pobj = NULL;
318 	}
319 }
320 
321 void
nouveau_drm_del(struct nouveau_drm ** pdrm)322 nouveau_drm_del(struct nouveau_drm **pdrm)
323 {
324 	free(*pdrm);
325 	*pdrm = NULL;
326 }
327 
328 int
nouveau_drm_new(int fd,struct nouveau_drm ** pdrm)329 nouveau_drm_new(int fd, struct nouveau_drm **pdrm)
330 {
331 	struct nouveau_drm *drm;
332 	drmVersionPtr ver;
333 
334 #ifdef DEBUG
335 	debug_init(getenv("NOUVEAU_LIBDRM_DEBUG"));
336 #endif
337 
338 	if (!(drm = calloc(1, sizeof(*drm))))
339 		return -ENOMEM;
340 	drm->fd = fd;
341 
342 	if (!(ver = drmGetVersion(fd))) {
343 		nouveau_drm_del(&drm);
344 		return -EINVAL;
345 	}
346 	*pdrm = drm;
347 
348 	drm->version = (ver->version_major << 24) |
349 		       (ver->version_minor << 8) |
350 		        ver->version_patchlevel;
351 	drm->nvif = (drm->version >= 0x01000301);
352 	drmFreeVersion(ver);
353 	return 0;
354 }
355 
356 /* this is the old libdrm's version of nouveau_device_wrap(), the symbol
357  * is kept here to prevent AIGLX from crashing if the DDX is linked against
358  * the new libdrm, but the DRI driver against the old
359  */
360 int
nouveau_device_open_existing(struct nouveau_device ** pdev,int close,int fd,drm_context_t ctx)361 nouveau_device_open_existing(struct nouveau_device **pdev, int close, int fd,
362 			     drm_context_t ctx)
363 {
364 	return -EACCES;
365 }
366 
367 int
nouveau_device_new(struct nouveau_object * parent,int32_t oclass,void * data,uint32_t size,struct nouveau_device ** pdev)368 nouveau_device_new(struct nouveau_object *parent, int32_t oclass,
369 		   void *data, uint32_t size, struct nouveau_device **pdev)
370 {
371 	struct nv_device_info_v0 info = {};
372 	union {
373 		struct nv_device_v0 v0;
374 	} *args = data;
375 	uint32_t argc = size;
376 	struct nouveau_drm *drm = nouveau_drm(parent);
377 	struct nouveau_device_priv *nvdev;
378 	struct nouveau_device *dev;
379 	uint64_t v;
380 	char *tmp;
381 	int ret = -ENOSYS;
382 
383 	if (oclass != NV_DEVICE ||
384 	    nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))
385 		return ret;
386 
387 	if (!(nvdev = calloc(1, sizeof(*nvdev))))
388 		return -ENOMEM;
389 	dev = *pdev = &nvdev->base;
390 	dev->fd = -1;
391 
392 	if (drm->nvif) {
393 		ret = nouveau_object_init(parent, 0, oclass, args, argc,
394 					  &dev->object);
395 		if (ret)
396 			goto done;
397 
398 		info.version = 0;
399 
400 		ret = nouveau_object_mthd(&dev->object, NV_DEVICE_V0_INFO,
401 					  &info, sizeof(info));
402 		if (ret)
403 			goto done;
404 
405 		nvdev->base.chipset = info.chipset;
406 		nvdev->have_bo_usage = true;
407 	} else
408 	if (args->v0.device == ~0ULL) {
409 		nvdev->base.object.parent = &drm->client;
410 		nvdev->base.object.handle = ~0ULL;
411 		nvdev->base.object.oclass = NOUVEAU_DEVICE_CLASS;
412 		nvdev->base.object.length = ~0;
413 
414 		ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_CHIPSET_ID, &v);
415 		if (ret)
416 			goto done;
417 		nvdev->base.chipset = v;
418 
419 		ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_HAS_BO_USAGE, &v);
420 		if (ret == 0)
421 			nvdev->have_bo_usage = (v != 0);
422 	} else
423 		return -ENOSYS;
424 
425 	ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_FB_SIZE, &v);
426 	if (ret)
427 		goto done;
428 	nvdev->base.vram_size = v;
429 
430 	ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_AGP_SIZE, &v);
431 	if (ret)
432 		goto done;
433 	nvdev->base.gart_size = v;
434 
435 	tmp = getenv("NOUVEAU_LIBDRM_VRAM_LIMIT_PERCENT");
436 	if (tmp)
437 		nvdev->vram_limit_percent = atoi(tmp);
438 	else
439 		nvdev->vram_limit_percent = 80;
440 
441 	nvdev->base.vram_limit =
442 		(nvdev->base.vram_size * nvdev->vram_limit_percent) / 100;
443 
444 	tmp = getenv("NOUVEAU_LIBDRM_GART_LIMIT_PERCENT");
445 	if (tmp)
446 		nvdev->gart_limit_percent = atoi(tmp);
447 	else
448 		nvdev->gart_limit_percent = 80;
449 
450 	nvdev->base.gart_limit =
451 		(nvdev->base.gart_size * nvdev->gart_limit_percent) / 100;
452 
453 	ret = pthread_mutex_init(&nvdev->lock, NULL);
454 	DRMINITLISTHEAD(&nvdev->bo_list);
455 done:
456 	if (ret)
457 		nouveau_device_del(pdev);
458 	return ret;
459 }
460 
461 int
nouveau_device_wrap(int fd,int close,struct nouveau_device ** pdev)462 nouveau_device_wrap(int fd, int close, struct nouveau_device **pdev)
463 {
464 	struct nouveau_drm *drm;
465 	struct nouveau_device_priv *nvdev;
466 	int ret;
467 
468 	ret = nouveau_drm_new(fd, &drm);
469 	if (ret)
470 		return ret;
471 	drm->nvif = false;
472 
473 	ret = nouveau_device_new(&drm->client, NV_DEVICE,
474 				 &(struct nv_device_v0) {
475 					.device = ~0ULL,
476 				 }, sizeof(struct nv_device_v0), pdev);
477 	if (ret) {
478 		nouveau_drm_del(&drm);
479 		return ret;
480 	}
481 
482 	nvdev = nouveau_device(*pdev);
483 	nvdev->base.fd = drm->fd;
484 	nvdev->base.drm_version = drm->version;
485 	nvdev->close = close;
486 	return 0;
487 }
488 
489 int
nouveau_device_open(const char * busid,struct nouveau_device ** pdev)490 nouveau_device_open(const char *busid, struct nouveau_device **pdev)
491 {
492 	int ret = -ENODEV, fd = drmOpen("nouveau", busid);
493 	if (fd >= 0) {
494 		ret = nouveau_device_wrap(fd, 1, pdev);
495 		if (ret)
496 			drmClose(fd);
497 	}
498 	return ret;
499 }
500 
501 void
nouveau_device_del(struct nouveau_device ** pdev)502 nouveau_device_del(struct nouveau_device **pdev)
503 {
504 	struct nouveau_device_priv *nvdev = nouveau_device(*pdev);
505 	if (nvdev) {
506 		free(nvdev->client);
507 		pthread_mutex_destroy(&nvdev->lock);
508 		if (nvdev->base.fd >= 0) {
509 			struct nouveau_drm *drm =
510 				nouveau_drm(&nvdev->base.object);
511 			nouveau_drm_del(&drm);
512 			if (nvdev->close)
513 				drmClose(nvdev->base.fd);
514 		}
515 		free(nvdev);
516 		*pdev = NULL;
517 	}
518 }
519 
520 int
nouveau_getparam(struct nouveau_device * dev,uint64_t param,uint64_t * value)521 nouveau_getparam(struct nouveau_device *dev, uint64_t param, uint64_t *value)
522 {
523 	struct nouveau_drm *drm = nouveau_drm(&dev->object);
524 	struct drm_nouveau_getparam r = { .param = param };
525 	int fd = drm->fd, ret =
526 		drmCommandWriteRead(fd, DRM_NOUVEAU_GETPARAM, &r, sizeof(r));
527 	*value = r.value;
528 	return ret;
529 }
530 
531 int
nouveau_setparam(struct nouveau_device * dev,uint64_t param,uint64_t value)532 nouveau_setparam(struct nouveau_device *dev, uint64_t param, uint64_t value)
533 {
534 	struct nouveau_drm *drm = nouveau_drm(&dev->object);
535 	struct drm_nouveau_setparam r = { .param = param, .value = value };
536 	return drmCommandWrite(drm->fd, DRM_NOUVEAU_SETPARAM, &r, sizeof(r));
537 }
538 
539 int
nouveau_client_new(struct nouveau_device * dev,struct nouveau_client ** pclient)540 nouveau_client_new(struct nouveau_device *dev, struct nouveau_client **pclient)
541 {
542 	struct nouveau_device_priv *nvdev = nouveau_device(dev);
543 	struct nouveau_client_priv *pcli;
544 	int id = 0, i, ret = -ENOMEM;
545 	uint32_t *clients;
546 
547 	pthread_mutex_lock(&nvdev->lock);
548 
549 	for (i = 0; i < nvdev->nr_client; i++) {
550 		id = ffs(nvdev->client[i]) - 1;
551 		if (id >= 0)
552 			goto out;
553 	}
554 
555 	clients = realloc(nvdev->client, sizeof(uint32_t) * (i + 1));
556 	if (!clients)
557 		goto unlock;
558 	nvdev->client = clients;
559 	nvdev->client[i] = 0;
560 	nvdev->nr_client++;
561 
562 out:
563 	pcli = calloc(1, sizeof(*pcli));
564 	if (pcli) {
565 		nvdev->client[i] |= (1 << id);
566 		pcli->base.device = dev;
567 		pcli->base.id = (i * 32) + id;
568 		ret = 0;
569 	}
570 
571 	*pclient = &pcli->base;
572 
573 unlock:
574 	pthread_mutex_unlock(&nvdev->lock);
575 	return ret;
576 }
577 
578 void
nouveau_client_del(struct nouveau_client ** pclient)579 nouveau_client_del(struct nouveau_client **pclient)
580 {
581 	struct nouveau_client_priv *pcli = nouveau_client(*pclient);
582 	struct nouveau_device_priv *nvdev;
583 	if (pcli) {
584 		int id = pcli->base.id;
585 		nvdev = nouveau_device(pcli->base.device);
586 		pthread_mutex_lock(&nvdev->lock);
587 		nvdev->client[id / 32] &= ~(1 << (id % 32));
588 		pthread_mutex_unlock(&nvdev->lock);
589 		free(pcli->kref);
590 		free(pcli);
591 	}
592 }
593 
594 static void
nouveau_bo_del(struct nouveau_bo * bo)595 nouveau_bo_del(struct nouveau_bo *bo)
596 {
597 	struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
598 	struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
599 	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
600 	struct drm_gem_close req = { .handle = bo->handle };
601 
602 	if (nvbo->head.next) {
603 		pthread_mutex_lock(&nvdev->lock);
604 		if (atomic_read(&nvbo->refcnt) == 0) {
605 			DRMLISTDEL(&nvbo->head);
606 			/*
607 			 * This bo has to be closed with the lock held because
608 			 * gem handles are not refcounted. If a shared bo is
609 			 * closed and re-opened in another thread a race
610 			 * against DRM_IOCTL_GEM_OPEN or drmPrimeFDToHandle
611 			 * might cause the bo to be closed accidentally while
612 			 * re-importing.
613 			 */
614 			drmIoctl(drm->fd, DRM_IOCTL_GEM_CLOSE, &req);
615 		}
616 		pthread_mutex_unlock(&nvdev->lock);
617 	} else {
618 		drmIoctl(drm->fd, DRM_IOCTL_GEM_CLOSE, &req);
619 	}
620 	if (bo->map)
621 		drm_munmap(bo->map, bo->size);
622 	free(nvbo);
623 }
624 
625 int
nouveau_bo_new(struct nouveau_device * dev,uint32_t flags,uint32_t align,uint64_t size,union nouveau_bo_config * config,struct nouveau_bo ** pbo)626 nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, uint32_t align,
627 	       uint64_t size, union nouveau_bo_config *config,
628 	       struct nouveau_bo **pbo)
629 {
630 	struct nouveau_bo_priv *nvbo = calloc(1, sizeof(*nvbo));
631 	struct nouveau_bo *bo = &nvbo->base;
632 	int ret;
633 
634 	if (!nvbo)
635 		return -ENOMEM;
636 	atomic_set(&nvbo->refcnt, 1);
637 	bo->device = dev;
638 	bo->flags = flags;
639 	bo->size = size;
640 
641 	ret = abi16_bo_init(bo, align, config);
642 	if (ret) {
643 		free(nvbo);
644 		return ret;
645 	}
646 
647 	*pbo = bo;
648 	return 0;
649 }
650 
651 static int
nouveau_bo_wrap_locked(struct nouveau_device * dev,uint32_t handle,struct nouveau_bo ** pbo,int name)652 nouveau_bo_wrap_locked(struct nouveau_device *dev, uint32_t handle,
653 		       struct nouveau_bo **pbo, int name)
654 {
655 	struct nouveau_drm *drm = nouveau_drm(&dev->object);
656 	struct nouveau_device_priv *nvdev = nouveau_device(dev);
657 	struct drm_nouveau_gem_info req = { .handle = handle };
658 	struct nouveau_bo_priv *nvbo;
659 	int ret;
660 
661 	DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
662 		if (nvbo->base.handle == handle) {
663 			if (atomic_inc_return(&nvbo->refcnt) == 1) {
664 				/*
665 				 * Uh oh, this bo is dead and someone else
666 				 * will free it, but because refcnt is
667 				 * now non-zero fortunately they won't
668 				 * call the ioctl to close the bo.
669 				 *
670 				 * Remove this bo from the list so other
671 				 * calls to nouveau_bo_wrap_locked will
672 				 * see our replacement nvbo.
673 				 */
674 				DRMLISTDEL(&nvbo->head);
675 				if (!name)
676 					name = nvbo->name;
677 				break;
678 			}
679 
680 			*pbo = &nvbo->base;
681 			return 0;
682 		}
683 	}
684 
685 	ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_INFO,
686 				  &req, sizeof(req));
687 	if (ret)
688 		return ret;
689 
690 	nvbo = calloc(1, sizeof(*nvbo));
691 	if (nvbo) {
692 		atomic_set(&nvbo->refcnt, 1);
693 		nvbo->base.device = dev;
694 		abi16_bo_info(&nvbo->base, &req);
695 		nvbo->name = name;
696 		DRMLISTADD(&nvbo->head, &nvdev->bo_list);
697 		*pbo = &nvbo->base;
698 		return 0;
699 	}
700 
701 	return -ENOMEM;
702 }
703 
704 static void
nouveau_bo_make_global(struct nouveau_bo_priv * nvbo)705 nouveau_bo_make_global(struct nouveau_bo_priv *nvbo)
706 {
707 	if (!nvbo->head.next) {
708 		struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
709 		pthread_mutex_lock(&nvdev->lock);
710 		if (!nvbo->head.next)
711 			DRMLISTADD(&nvbo->head, &nvdev->bo_list);
712 		pthread_mutex_unlock(&nvdev->lock);
713 	}
714 }
715 
716 int
nouveau_bo_wrap(struct nouveau_device * dev,uint32_t handle,struct nouveau_bo ** pbo)717 nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle,
718 		struct nouveau_bo **pbo)
719 {
720 	struct nouveau_device_priv *nvdev = nouveau_device(dev);
721 	int ret;
722 	pthread_mutex_lock(&nvdev->lock);
723 	ret = nouveau_bo_wrap_locked(dev, handle, pbo, 0);
724 	pthread_mutex_unlock(&nvdev->lock);
725 	return ret;
726 }
727 
728 int
nouveau_bo_name_ref(struct nouveau_device * dev,uint32_t name,struct nouveau_bo ** pbo)729 nouveau_bo_name_ref(struct nouveau_device *dev, uint32_t name,
730 		    struct nouveau_bo **pbo)
731 {
732 	struct nouveau_drm *drm = nouveau_drm(&dev->object);
733 	struct nouveau_device_priv *nvdev = nouveau_device(dev);
734 	struct nouveau_bo_priv *nvbo;
735 	struct drm_gem_open req = { .name = name };
736 	int ret;
737 
738 	pthread_mutex_lock(&nvdev->lock);
739 	DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
740 		if (nvbo->name == name) {
741 			ret = nouveau_bo_wrap_locked(dev, nvbo->base.handle,
742 						     pbo, name);
743 			pthread_mutex_unlock(&nvdev->lock);
744 			return ret;
745 		}
746 	}
747 
748 	ret = drmIoctl(drm->fd, DRM_IOCTL_GEM_OPEN, &req);
749 	if (ret == 0) {
750 		ret = nouveau_bo_wrap_locked(dev, req.handle, pbo, name);
751 	}
752 
753 	pthread_mutex_unlock(&nvdev->lock);
754 	return ret;
755 }
756 
757 int
nouveau_bo_name_get(struct nouveau_bo * bo,uint32_t * name)758 nouveau_bo_name_get(struct nouveau_bo *bo, uint32_t *name)
759 {
760 	struct drm_gem_flink req = { .handle = bo->handle };
761 	struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
762 	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
763 
764 	*name = nvbo->name;
765 	if (!*name) {
766 		int ret = drmIoctl(drm->fd, DRM_IOCTL_GEM_FLINK, &req);
767 
768 		if (ret) {
769 			*name = 0;
770 			return ret;
771 		}
772 		nvbo->name = *name = req.name;
773 
774 		nouveau_bo_make_global(nvbo);
775 	}
776 	return 0;
777 }
778 
779 void
nouveau_bo_ref(struct nouveau_bo * bo,struct nouveau_bo ** pref)780 nouveau_bo_ref(struct nouveau_bo *bo, struct nouveau_bo **pref)
781 {
782 	struct nouveau_bo *ref = *pref;
783 	if (bo) {
784 		atomic_inc(&nouveau_bo(bo)->refcnt);
785 	}
786 	if (ref) {
787 		if (atomic_dec_and_test(&nouveau_bo(ref)->refcnt))
788 			nouveau_bo_del(ref);
789 	}
790 	*pref = bo;
791 }
792 
793 int
nouveau_bo_prime_handle_ref(struct nouveau_device * dev,int prime_fd,struct nouveau_bo ** bo)794 nouveau_bo_prime_handle_ref(struct nouveau_device *dev, int prime_fd,
795 			    struct nouveau_bo **bo)
796 {
797 	struct nouveau_drm *drm = nouveau_drm(&dev->object);
798 	struct nouveau_device_priv *nvdev = nouveau_device(dev);
799 	int ret;
800 	unsigned int handle;
801 
802 	nouveau_bo_ref(NULL, bo);
803 
804 	pthread_mutex_lock(&nvdev->lock);
805 	ret = drmPrimeFDToHandle(drm->fd, prime_fd, &handle);
806 	if (ret == 0) {
807 		ret = nouveau_bo_wrap_locked(dev, handle, bo, 0);
808 	}
809 	pthread_mutex_unlock(&nvdev->lock);
810 	return ret;
811 }
812 
813 int
nouveau_bo_set_prime(struct nouveau_bo * bo,int * prime_fd)814 nouveau_bo_set_prime(struct nouveau_bo *bo, int *prime_fd)
815 {
816 	struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
817 	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
818 	int ret;
819 
820 	ret = drmPrimeHandleToFD(drm->fd, nvbo->base.handle, DRM_CLOEXEC, prime_fd);
821 	if (ret)
822 		return ret;
823 
824 	nouveau_bo_make_global(nvbo);
825 	return 0;
826 }
827 
828 int
nouveau_bo_wait(struct nouveau_bo * bo,uint32_t access,struct nouveau_client * client)829 nouveau_bo_wait(struct nouveau_bo *bo, uint32_t access,
830 		struct nouveau_client *client)
831 {
832 	struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
833 	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
834 	struct drm_nouveau_gem_cpu_prep req;
835 	struct nouveau_pushbuf *push;
836 	int ret = 0;
837 
838 	if (!(access & NOUVEAU_BO_RDWR))
839 		return 0;
840 
841 	push = cli_push_get(client, bo);
842 	if (push && push->channel)
843 		nouveau_pushbuf_kick(push, push->channel);
844 
845 	if (!nvbo->head.next && !(nvbo->access & NOUVEAU_BO_WR) &&
846 				!(access & NOUVEAU_BO_WR))
847 		return 0;
848 
849 	req.handle = bo->handle;
850 	req.flags = 0;
851 	if (access & NOUVEAU_BO_WR)
852 		req.flags |= NOUVEAU_GEM_CPU_PREP_WRITE;
853 	if (access & NOUVEAU_BO_NOBLOCK)
854 		req.flags |= NOUVEAU_GEM_CPU_PREP_NOWAIT;
855 
856 	ret = drmCommandWrite(drm->fd, DRM_NOUVEAU_GEM_CPU_PREP,
857 			      &req, sizeof(req));
858 	if (ret == 0)
859 		nvbo->access = 0;
860 	return ret;
861 }
862 
863 int
nouveau_bo_map(struct nouveau_bo * bo,uint32_t access,struct nouveau_client * client)864 nouveau_bo_map(struct nouveau_bo *bo, uint32_t access,
865 	       struct nouveau_client *client)
866 {
867 	struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
868 	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
869 	if (bo->map == NULL) {
870 		bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
871 			       MAP_SHARED, drm->fd, nvbo->map_handle);
872 		if (bo->map == MAP_FAILED) {
873 			bo->map = NULL;
874 			return -errno;
875 		}
876 	}
877 	return nouveau_bo_wait(bo, access, client);
878 }
879