1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2011 Texas Instruments, Inc
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <rob@ti.com>
27 */
28
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
32
33 #include <stdlib.h>
34 #include <linux/stddef.h>
35 #include <linux/types.h>
36 #include <errno.h>
37 #include <sys/mman.h>
38 #include <fcntl.h>
39 #include <unistd.h>
40 #include <pthread.h>
41
42 #include <libdrm.h>
43 #include <xf86drm.h>
44 #include <xf86atomic.h>
45
46 #include "omap_drm.h"
47 #include "omap_drmif.h"
48
49 #define __round_mask(x, y) ((__typeof__(x))((y)-1))
50 #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
51 #define PAGE_SIZE 4096
52
53 static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
54 static void * dev_table;
55
56 struct omap_device {
57 int fd;
58 atomic_t refcnt;
59
60 /* The handle_table is used to track GEM bo handles associated w/
61 * this fd. This is needed, in particular, when importing
62 * dmabuf's because we don't want multiple 'struct omap_bo's
63 * floating around with the same handle. Otherwise, when the
64 * first one is omap_bo_del()'d the handle becomes no longer
65 * valid, and the remaining 'struct omap_bo's are left pointing
66 * to an invalid handle (and possible a GEM bo that is already
67 * free'd).
68 */
69 void *handle_table;
70 };
71
72 /* a GEM buffer object allocated from the DRM device */
73 struct omap_bo {
74 struct omap_device *dev;
75 void *map; /* userspace mmap'ing (if there is one) */
76 uint32_t size;
77 uint32_t handle;
78 uint32_t name; /* flink global handle (DRI2 name) */
79 uint64_t offset; /* offset to mmap() */
80 int fd; /* dmabuf handle */
81 atomic_t refcnt;
82 };
83
omap_device_new_impl(int fd)84 static struct omap_device * omap_device_new_impl(int fd)
85 {
86 struct omap_device *dev = calloc(sizeof(*dev), 1);
87 if (!dev)
88 return NULL;
89 dev->fd = fd;
90 atomic_set(&dev->refcnt, 1);
91 dev->handle_table = drmHashCreate();
92 return dev;
93 }
94
omap_device_new(int fd)95 drm_public struct omap_device * omap_device_new(int fd)
96 {
97 struct omap_device *dev = NULL;
98
99 pthread_mutex_lock(&table_lock);
100
101 if (!dev_table)
102 dev_table = drmHashCreate();
103
104 if (drmHashLookup(dev_table, fd, (void **)&dev)) {
105 /* not found, create new device */
106 dev = omap_device_new_impl(fd);
107 drmHashInsert(dev_table, fd, dev);
108 } else {
109 /* found, just incr refcnt */
110 dev = omap_device_ref(dev);
111 }
112
113 pthread_mutex_unlock(&table_lock);
114
115 return dev;
116 }
117
omap_device_ref(struct omap_device * dev)118 drm_public struct omap_device * omap_device_ref(struct omap_device *dev)
119 {
120 atomic_inc(&dev->refcnt);
121 return dev;
122 }
123
omap_device_del(struct omap_device * dev)124 drm_public void omap_device_del(struct omap_device *dev)
125 {
126 if (!atomic_dec_and_test(&dev->refcnt))
127 return;
128 pthread_mutex_lock(&table_lock);
129 drmHashDestroy(dev->handle_table);
130 drmHashDelete(dev_table, dev->fd);
131 pthread_mutex_unlock(&table_lock);
132 free(dev);
133 }
134
135 drm_public int
omap_get_param(struct omap_device * dev,uint64_t param,uint64_t * value)136 omap_get_param(struct omap_device *dev, uint64_t param, uint64_t *value)
137 {
138 struct drm_omap_param req = {
139 .param = param,
140 };
141 int ret;
142
143 ret = drmCommandWriteRead(dev->fd, DRM_OMAP_GET_PARAM, &req, sizeof(req));
144 if (ret) {
145 return ret;
146 }
147
148 *value = req.value;
149
150 return 0;
151 }
152
153 drm_public int
omap_set_param(struct omap_device * dev,uint64_t param,uint64_t value)154 omap_set_param(struct omap_device *dev, uint64_t param, uint64_t value)
155 {
156 struct drm_omap_param req = {
157 .param = param,
158 .value = value,
159 };
160 return drmCommandWrite(dev->fd, DRM_OMAP_SET_PARAM, &req, sizeof(req));
161 }
162
163 /* lookup a buffer from it's handle, call w/ table_lock held: */
lookup_bo(struct omap_device * dev,uint32_t handle)164 static struct omap_bo * lookup_bo(struct omap_device *dev,
165 uint32_t handle)
166 {
167 struct omap_bo *bo = NULL;
168 if (!drmHashLookup(dev->handle_table, handle, (void **)&bo)) {
169 /* found, incr refcnt and return: */
170 bo = omap_bo_ref(bo);
171 }
172 return bo;
173 }
174
175 /* allocate a new buffer object, call w/ table_lock held */
bo_from_handle(struct omap_device * dev,uint32_t handle)176 static struct omap_bo * bo_from_handle(struct omap_device *dev,
177 uint32_t handle)
178 {
179 struct omap_bo *bo = calloc(sizeof(*bo), 1);
180 if (!bo) {
181 struct drm_gem_close req = {
182 .handle = handle,
183 };
184 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
185 return NULL;
186 }
187 bo->dev = omap_device_ref(dev);
188 bo->handle = handle;
189 atomic_set(&bo->refcnt, 1);
190 /* add ourselves to the handle table: */
191 drmHashInsert(dev->handle_table, handle, bo);
192 return bo;
193 }
194
195 /* allocate a new buffer object */
omap_bo_new_impl(struct omap_device * dev,union omap_gem_size size,uint32_t flags)196 static struct omap_bo * omap_bo_new_impl(struct omap_device *dev,
197 union omap_gem_size size, uint32_t flags)
198 {
199 struct omap_bo *bo = NULL;
200 struct drm_omap_gem_new req = {
201 .size = size,
202 .flags = flags,
203 };
204
205 if (size.bytes == 0) {
206 goto fail;
207 }
208
209 if (drmCommandWriteRead(dev->fd, DRM_OMAP_GEM_NEW, &req, sizeof(req))) {
210 goto fail;
211 }
212
213 pthread_mutex_lock(&table_lock);
214 bo = bo_from_handle(dev, req.handle);
215 pthread_mutex_unlock(&table_lock);
216
217 if (flags & OMAP_BO_TILED) {
218 bo->size = round_up(size.tiled.width, PAGE_SIZE) * size.tiled.height;
219 } else {
220 bo->size = size.bytes;
221 }
222
223 return bo;
224
225 fail:
226 free(bo);
227 return NULL;
228 }
229
230
231 /* allocate a new (un-tiled) buffer object */
232 drm_public struct omap_bo *
omap_bo_new(struct omap_device * dev,uint32_t size,uint32_t flags)233 omap_bo_new(struct omap_device *dev, uint32_t size, uint32_t flags)
234 {
235 union omap_gem_size gsize = {
236 .bytes = size,
237 };
238 if (flags & OMAP_BO_TILED) {
239 return NULL;
240 }
241 return omap_bo_new_impl(dev, gsize, flags);
242 }
243
244 /* allocate a new buffer object */
245 drm_public struct omap_bo *
omap_bo_new_tiled(struct omap_device * dev,uint32_t width,uint32_t height,uint32_t flags)246 omap_bo_new_tiled(struct omap_device *dev, uint32_t width,
247 uint32_t height, uint32_t flags)
248 {
249 union omap_gem_size gsize = {
250 .tiled = {
251 .width = width,
252 .height = height,
253 },
254 };
255 if (!(flags & OMAP_BO_TILED)) {
256 return NULL;
257 }
258 return omap_bo_new_impl(dev, gsize, flags);
259 }
260
omap_bo_ref(struct omap_bo * bo)261 drm_public struct omap_bo *omap_bo_ref(struct omap_bo *bo)
262 {
263 atomic_inc(&bo->refcnt);
264 return bo;
265 }
266
267 /* get buffer info */
get_buffer_info(struct omap_bo * bo)268 static int get_buffer_info(struct omap_bo *bo)
269 {
270 struct drm_omap_gem_info req = {
271 .handle = bo->handle,
272 };
273 int ret = drmCommandWriteRead(bo->dev->fd, DRM_OMAP_GEM_INFO,
274 &req, sizeof(req));
275 if (ret) {
276 return ret;
277 }
278
279 /* really all we need for now is mmap offset */
280 bo->offset = req.offset;
281 bo->size = req.size;
282
283 return 0;
284 }
285
286 /* import a buffer object from DRI2 name */
287 drm_public struct omap_bo *
omap_bo_from_name(struct omap_device * dev,uint32_t name)288 omap_bo_from_name(struct omap_device *dev, uint32_t name)
289 {
290 struct omap_bo *bo = NULL;
291 struct drm_gem_open req = {
292 .name = name,
293 };
294
295 pthread_mutex_lock(&table_lock);
296
297 if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
298 goto fail;
299 }
300
301 bo = lookup_bo(dev, req.handle);
302 if (!bo) {
303 bo = bo_from_handle(dev, req.handle);
304 bo->name = name;
305 }
306
307 pthread_mutex_unlock(&table_lock);
308
309 return bo;
310
311 fail:
312 pthread_mutex_unlock(&table_lock);
313 free(bo);
314 return NULL;
315 }
316
317 /* import a buffer from dmabuf fd, does not take ownership of the
318 * fd so caller should close() the fd when it is otherwise done
319 * with it (even if it is still using the 'struct omap_bo *')
320 */
321 drm_public struct omap_bo *
omap_bo_from_dmabuf(struct omap_device * dev,int fd)322 omap_bo_from_dmabuf(struct omap_device *dev, int fd)
323 {
324 struct omap_bo *bo = NULL;
325 struct drm_prime_handle req = {
326 .fd = fd,
327 };
328 int ret;
329
330 pthread_mutex_lock(&table_lock);
331
332 ret = drmIoctl(dev->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &req);
333 if (ret) {
334 goto fail;
335 }
336
337 bo = lookup_bo(dev, req.handle);
338 if (!bo) {
339 bo = bo_from_handle(dev, req.handle);
340 }
341
342 pthread_mutex_unlock(&table_lock);
343
344 return bo;
345
346 fail:
347 pthread_mutex_unlock(&table_lock);
348 free(bo);
349 return NULL;
350 }
351
352 /* destroy a buffer object */
omap_bo_del(struct omap_bo * bo)353 drm_public void omap_bo_del(struct omap_bo *bo)
354 {
355 if (!bo) {
356 return;
357 }
358
359 if (!atomic_dec_and_test(&bo->refcnt))
360 return;
361
362 if (bo->map) {
363 munmap(bo->map, bo->size);
364 }
365
366 if (bo->fd) {
367 close(bo->fd);
368 }
369
370 if (bo->handle) {
371 struct drm_gem_close req = {
372 .handle = bo->handle,
373 };
374 pthread_mutex_lock(&table_lock);
375 drmHashDelete(bo->dev->handle_table, bo->handle);
376 drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
377 pthread_mutex_unlock(&table_lock);
378 }
379
380 omap_device_del(bo->dev);
381
382 free(bo);
383 }
384
385 /* get the global flink/DRI2 buffer name */
omap_bo_get_name(struct omap_bo * bo,uint32_t * name)386 drm_public int omap_bo_get_name(struct omap_bo *bo, uint32_t *name)
387 {
388 if (!bo->name) {
389 struct drm_gem_flink req = {
390 .handle = bo->handle,
391 };
392 int ret;
393
394 ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
395 if (ret) {
396 return ret;
397 }
398
399 bo->name = req.name;
400 }
401
402 *name = bo->name;
403
404 return 0;
405 }
406
omap_bo_handle(struct omap_bo * bo)407 drm_public uint32_t omap_bo_handle(struct omap_bo *bo)
408 {
409 return bo->handle;
410 }
411
412 /* caller owns the dmabuf fd that is returned and is responsible
413 * to close() it when done
414 */
omap_bo_dmabuf(struct omap_bo * bo)415 drm_public int omap_bo_dmabuf(struct omap_bo *bo)
416 {
417 if (!bo->fd) {
418 struct drm_prime_handle req = {
419 .handle = bo->handle,
420 .flags = DRM_CLOEXEC,
421 };
422 int ret;
423
424 ret = drmIoctl(bo->dev->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &req);
425 if (ret) {
426 return ret;
427 }
428
429 bo->fd = req.fd;
430 }
431 return dup(bo->fd);
432 }
433
omap_bo_size(struct omap_bo * bo)434 drm_public uint32_t omap_bo_size(struct omap_bo *bo)
435 {
436 if (!bo->size) {
437 get_buffer_info(bo);
438 }
439 return bo->size;
440 }
441
omap_bo_map(struct omap_bo * bo)442 drm_public void *omap_bo_map(struct omap_bo *bo)
443 {
444 if (!bo->map) {
445 if (!bo->offset) {
446 get_buffer_info(bo);
447 }
448
449 bo->map = mmap(0, bo->size, PROT_READ | PROT_WRITE,
450 MAP_SHARED, bo->dev->fd, bo->offset);
451 if (bo->map == MAP_FAILED) {
452 bo->map = NULL;
453 }
454 }
455 return bo->map;
456 }
457
omap_bo_cpu_prep(struct omap_bo * bo,enum omap_gem_op op)458 drm_public int omap_bo_cpu_prep(struct omap_bo *bo, enum omap_gem_op op)
459 {
460 struct drm_omap_gem_cpu_prep req = {
461 .handle = bo->handle,
462 .op = op,
463 };
464 return drmCommandWrite(bo->dev->fd,
465 DRM_OMAP_GEM_CPU_PREP, &req, sizeof(req));
466 }
467
omap_bo_cpu_fini(struct omap_bo * bo,enum omap_gem_op op)468 drm_public int omap_bo_cpu_fini(struct omap_bo *bo, enum omap_gem_op op)
469 {
470 struct drm_omap_gem_cpu_fini req = {
471 .handle = bo->handle,
472 .op = op,
473 .nregions = 0,
474 };
475 return drmCommandWrite(bo->dev->fd,
476 DRM_OMAP_GEM_CPU_FINI, &req, sizeof(req));
477 }
478