1 /**************************************************************************
2  *
3  * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, Tx., USA
4  * All Rights Reserved.
5  * Copyright 2009 VMware, Inc., Palo Alto, CA., USA
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  **************************************************************************/
29 /*
30  * Authors: Thomas Hellstr�m <thomas-at-tungstengraphics-dot-com>
31  */
32 #ifdef HAVE_CONFIG_H
33 #include "config.h"
34 #endif
35 
36 #include <xf86drm.h>
37 #include <stdlib.h>
38 #include <unistd.h>
39 #include <errno.h>
40 #include <sys/mman.h>
41 #include <drm/psb_ttm_placement_user.h>
42 #include "wsbm_pool.h"
43 #include "assert.h"
44 #include "wsbm_priv.h"
45 #include "wsbm_manager.h"
46 
47 #define DRMRESTARTCOMMANDWRITE(_fd, _val, _arg, _ret)			\
48 	do {								\
49 		(_ret) = drmCommandWrite(_fd, _val, &(_arg), sizeof(_arg)); \
50 	} while ((_ret) == -EAGAIN || (_ret) == -ERESTART);		\
51 
52 #define DRMRESTARTCOMMANDWRITEREAD(_fd, _val, _arg, _ret)		\
53 	do {								\
54 		(_ret) = drmCommandWriteRead(_fd, _val, &(_arg), sizeof(_arg)); \
55 	} while ((_ret) == -EAGAIN || (_ret) == -ERESTART);		\
56 
57 /*
58  * Buffer pool implementation using DRM buffer objects as wsbm buffer objects.
59  */
60 
61 struct _TTMBuffer
62 {
63     struct _WsbmBufStorage buf;
64     struct _WsbmCond event;
65 
66     /*
67      * Remains constant after creation.
68      */
69 
70     uint64_t requestedSize;
71     uint64_t mapHandle;
72     uint64_t realSize;
73 
74     /*
75      * Protected by the kernel lock.
76      */
77 
78     struct _WsbmKernelBuf kBuf;
79 
80     /*
81      * Protected by the mutex.
82      */
83 
84     void *virtual;
85     int syncInProgress;
86     unsigned readers;
87     unsigned writers;
88 };
89 
90 struct _TTMPool
91 {
92     struct _WsbmBufferPool pool;
93     unsigned int pageSize;
94     unsigned int devOffset;
95 };
96 
97 static inline struct _TTMPool *
ttmGetPool(struct _TTMBuffer * dBuf)98 ttmGetPool(struct _TTMBuffer *dBuf)
99 {
100     return containerOf(dBuf->buf.pool, struct _TTMPool, pool);
101 }
102 
103 static inline struct _TTMBuffer *
ttmBuffer(struct _WsbmBufStorage * buf)104 ttmBuffer(struct _WsbmBufStorage *buf)
105 {
106     return containerOf(buf, struct _TTMBuffer, buf);
107 }
108 
109 static struct _WsbmBufStorage *
pool_create(struct _WsbmBufferPool * pool,unsigned long size,uint32_t placement,unsigned alignment)110 pool_create(struct _WsbmBufferPool *pool,
111 	    unsigned long size, uint32_t placement, unsigned alignment)
112 {
113     struct _TTMBuffer *dBuf = (struct _TTMBuffer *)
114 	calloc(1, sizeof(*dBuf));
115     struct _TTMPool *ttmPool = containerOf(pool, struct _TTMPool, pool);
116     int ret;
117     unsigned pageSize = ttmPool->pageSize;
118     union ttm_pl_create_arg arg;
119 
120     if (!dBuf)
121 	return NULL;
122 
123     if ((alignment > pageSize) && (alignment % pageSize))
124 	goto out_err0;
125 
126     ret = wsbmBufStorageInit(&dBuf->buf, pool);
127     if (ret)
128 	goto out_err0;
129 
130     ret = WSBM_COND_INIT(&dBuf->event);
131     if (ret)
132 	goto out_err1;
133 
134     arg.req.size = size;
135     arg.req.placement = placement;
136     arg.req.page_alignment = alignment / pageSize;
137 
138     DRMRESTARTCOMMANDWRITEREAD(pool->fd, ttmPool->devOffset + TTM_PL_CREATE,
139 			       arg, ret);
140 
141     if (ret)
142 	goto out_err2;
143 
144     dBuf->requestedSize = size;
145     dBuf->kBuf.gpuOffset = arg.rep.gpu_offset;
146     dBuf->mapHandle = arg.rep.map_handle;
147     dBuf->realSize = arg.rep.bo_size;
148     dBuf->kBuf.placement = arg.rep.placement;
149     dBuf->kBuf.handle = arg.rep.handle;
150 
151     return &dBuf->buf;
152 
153   out_err2:
154     WSBM_COND_FREE(&dBuf->event);
155   out_err1:
156     wsbmBufStorageTakedown(&dBuf->buf);
157   out_err0:
158     free(dBuf);
159     return NULL;
160 }
161 
162 static struct _WsbmBufStorage *
pool_reference(struct _WsbmBufferPool * pool,unsigned handle)163 pool_reference(struct _WsbmBufferPool *pool, unsigned handle)
164 {
165     struct _TTMBuffer *dBuf = (struct _TTMBuffer *)calloc(1, sizeof(*dBuf));
166     struct _TTMPool *ttmPool = containerOf(pool, struct _TTMPool, pool);
167     union ttm_pl_reference_arg arg;
168     int ret;
169 
170     if (!dBuf)
171 	return NULL;
172 
173     ret = wsbmBufStorageInit(&dBuf->buf, pool);
174     if (ret)
175 	goto out_err0;
176 
177     ret = WSBM_COND_INIT(&dBuf->event);
178     if (ret)
179 	goto out_err1;
180 
181     arg.req.handle = handle;
182     ret = drmCommandWriteRead(pool->fd, ttmPool->devOffset + TTM_PL_REFERENCE,
183 			      &arg, sizeof(arg));
184 
185     if (ret)
186 	goto out_err2;
187 
188     dBuf->requestedSize = arg.rep.bo_size;
189     dBuf->kBuf.gpuOffset = arg.rep.gpu_offset;
190     dBuf->mapHandle = arg.rep.map_handle;
191     dBuf->realSize = arg.rep.bo_size;
192     dBuf->kBuf.placement = arg.rep.placement;
193     dBuf->kBuf.handle = arg.rep.handle;
194     dBuf->kBuf.fence_type_mask = arg.rep.sync_object_arg;
195 
196     return &dBuf->buf;
197 
198   out_err2:
199     WSBM_COND_FREE(&dBuf->event);
200   out_err1:
201     wsbmBufStorageTakedown(&dBuf->buf);
202   out_err0:
203     free(dBuf);
204     return NULL;
205 }
206 
207 static void
pool_destroy(struct _WsbmBufStorage ** buf)208 pool_destroy(struct _WsbmBufStorage **buf)
209 {
210     struct _TTMBuffer *dBuf = ttmBuffer(*buf);
211     struct _TTMPool *ttmPool = ttmGetPool(dBuf);
212     struct ttm_pl_reference_req arg;
213 
214     *buf = NULL;
215     if (dBuf->virtual != NULL) {
216 	(void)munmap(dBuf->virtual, dBuf->requestedSize);
217 	dBuf->virtual = NULL;
218     }
219     arg.handle = dBuf->kBuf.handle;
220     (void)drmCommandWrite(dBuf->buf.pool->fd,
221 			  ttmPool->devOffset + TTM_PL_UNREF,
222 			  &arg, sizeof(arg));
223 
224     WSBM_COND_FREE(&dBuf->event);
225     wsbmBufStorageTakedown(&dBuf->buf);
226     free(dBuf);
227 }
228 
229 static int
syncforcpu_locked(struct _WsbmBufStorage * buf,unsigned mode)230 syncforcpu_locked(struct _WsbmBufStorage *buf, unsigned mode)
231 {
232     uint32_t kmode = 0;
233     struct _TTMBuffer *dBuf = ttmBuffer(buf);
234     struct _TTMPool *ttmPool = ttmGetPool(dBuf);
235     unsigned int readers;
236     unsigned int writers;
237     int ret = 0;
238 
239     while (dBuf->syncInProgress)
240 	WSBM_COND_WAIT(&dBuf->event, &buf->mutex);
241 
242     readers = dBuf->readers;
243     writers = dBuf->writers;
244 
245     if ((mode & WSBM_SYNCCPU_READ) && (++dBuf->readers == 1))
246 	kmode |= TTM_PL_SYNCCPU_MODE_READ;
247 
248     if ((mode & WSBM_SYNCCPU_WRITE) && (++dBuf->writers == 1))
249 	kmode |= TTM_PL_SYNCCPU_MODE_WRITE;
250 
251     if (kmode) {
252 	struct ttm_pl_synccpu_arg arg;
253 
254 	if (mode & WSBM_SYNCCPU_DONT_BLOCK)
255 	    kmode |= TTM_PL_SYNCCPU_MODE_NO_BLOCK;
256 
257 	dBuf->syncInProgress = 1;
258 
259 	/*
260 	 * This might be a lengthy wait, so
261 	 * release the mutex.
262 	 */
263 
264 	WSBM_MUTEX_UNLOCK(&buf->mutex);
265 
266 	arg.handle = dBuf->kBuf.handle;
267 	arg.access_mode = kmode;
268 	arg.op = TTM_PL_SYNCCPU_OP_GRAB;
269 
270 	DRMRESTARTCOMMANDWRITE(dBuf->buf.pool->fd,
271 			       ttmPool->devOffset + TTM_PL_SYNCCPU, arg, ret);
272 
273 	WSBM_MUTEX_LOCK(&buf->mutex);
274 	dBuf->syncInProgress = 0;
275 	WSBM_COND_BROADCAST(&dBuf->event);
276 
277 	if (ret) {
278 	    dBuf->readers = readers;
279 	    dBuf->writers = writers;
280 	}
281     }
282 
283     return ret;
284 }
285 
286 static int
releasefromcpu_locked(struct _WsbmBufStorage * buf,unsigned mode)287 releasefromcpu_locked(struct _WsbmBufStorage *buf, unsigned mode)
288 {
289     uint32_t kmode = 0;
290     struct _TTMBuffer *dBuf = ttmBuffer(buf);
291     struct _TTMPool *ttmPool = ttmGetPool(dBuf);
292     int ret = 0;
293 
294     while (dBuf->syncInProgress)
295 	WSBM_COND_WAIT(&dBuf->event, &buf->mutex);
296 
297     if ((mode & WSBM_SYNCCPU_READ) && (--dBuf->readers == 0))
298 	kmode |= TTM_PL_SYNCCPU_MODE_READ;
299 
300     if ((mode & WSBM_SYNCCPU_WRITE) && (--dBuf->writers == 0))
301 	kmode |= TTM_PL_SYNCCPU_MODE_WRITE;
302 
303     if (kmode) {
304 	struct ttm_pl_synccpu_arg arg;
305 
306 	arg.handle = dBuf->kBuf.handle;
307 	arg.access_mode = kmode;
308 	arg.op = TTM_PL_SYNCCPU_OP_RELEASE;
309 
310 	DRMRESTARTCOMMANDWRITE(dBuf->buf.pool->fd,
311 			       ttmPool->devOffset + TTM_PL_SYNCCPU, arg, ret);
312 
313     }
314 
315     return ret;
316 }
317 
318 static int
pool_syncforcpu(struct _WsbmBufStorage * buf,unsigned mode)319 pool_syncforcpu(struct _WsbmBufStorage *buf, unsigned mode)
320 {
321     int ret;
322 
323     WSBM_MUTEX_LOCK(&buf->mutex);
324     ret = syncforcpu_locked(buf, mode);
325     WSBM_MUTEX_UNLOCK(&buf->mutex);
326     return ret;
327 }
328 
329 static void
pool_releasefromcpu(struct _WsbmBufStorage * buf,unsigned mode)330 pool_releasefromcpu(struct _WsbmBufStorage *buf, unsigned mode)
331 {
332     WSBM_MUTEX_LOCK(&buf->mutex);
333     (void)releasefromcpu_locked(buf, mode);
334     WSBM_MUTEX_UNLOCK(&buf->mutex);
335 }
336 
337 #ifdef ANDROID
338 
339 /* No header but syscall provided by bionic */
340 void*  __mmap2(void*, size_t, int, int, int, size_t);
341 #define MMAP2_SHIFT 12 // 2**12 == 4096
342 
_temp_mmap(void * addr,size_t size,int prot,int flags,int fd,long long offset)343 static void* _temp_mmap(void *addr, size_t size, int prot, int flags, int fd, long long offset)
344 {
345     return __mmap2(addr, size, prot, flags, fd, (unsigned long)(offset >> MMAP2_SHIFT));
346 }
347 
348 #endif
349 
350 static int
pool_map(struct _WsbmBufStorage * buf,unsigned mode,void ** virtual)351 pool_map(struct _WsbmBufStorage *buf, unsigned mode __attribute__ ((unused)), void **virtual)
352 {
353     struct _TTMBuffer *dBuf = ttmBuffer(buf);
354     void *virt;
355     int ret = 0;
356 
357     WSBM_MUTEX_LOCK(&buf->mutex);
358 
359     /*
360      * mmaps are expensive, so we only really unmap if
361      * we destroy the buffer.
362      */
363 
364     if (dBuf->virtual == NULL) {
365 #if defined(__LP64__) || defined(_LP64) || defined(__LP64)
366 	virt = mmap(0, dBuf->requestedSize,
367 		    PROT_READ | PROT_WRITE, MAP_SHARED,
368 		    buf->pool->fd, dBuf->mapHandle);
369 #else
370 	virt = _temp_mmap(0, dBuf->requestedSize,
371 		    PROT_READ | PROT_WRITE, MAP_SHARED,
372 		    buf->pool->fd, dBuf->mapHandle);
373 #endif
374 	if (virt == MAP_FAILED) {
375 	    ret = -errno;
376 	    goto out_unlock;
377 	}
378 	dBuf->virtual = virt;
379     }
380 
381     *virtual = dBuf->virtual;
382   out_unlock:
383 
384     WSBM_MUTEX_UNLOCK(&buf->mutex);
385 
386     return ret;
387 }
388 
389 static void
pool_unmap(struct _WsbmBufStorage * buf)390 pool_unmap(struct _WsbmBufStorage *buf __attribute__ ((unused)))
391 {
392     ;
393 }
394 
395 static unsigned long
pool_offset(struct _WsbmBufStorage * buf)396 pool_offset(struct _WsbmBufStorage *buf)
397 {
398     struct _TTMBuffer *dBuf = ttmBuffer(buf);
399 
400     return dBuf->kBuf.gpuOffset;
401 }
402 
403 static unsigned long
pool_poolOffset(struct _WsbmBufStorage * buf)404 pool_poolOffset(struct _WsbmBufStorage *buf __attribute__ ((unused)))
405 {
406     return 0;
407 }
408 
409 static uint32_t
pool_placement(struct _WsbmBufStorage * buf)410 pool_placement(struct _WsbmBufStorage *buf)
411 {
412     struct _TTMBuffer *dBuf = ttmBuffer(buf);
413 
414     return dBuf->kBuf.placement;
415 }
416 
417 static unsigned long
pool_size(struct _WsbmBufStorage * buf)418 pool_size(struct _WsbmBufStorage *buf)
419 {
420     struct _TTMBuffer *dBuf = ttmBuffer(buf);
421 
422     return dBuf->realSize;
423 }
424 
425 static void
pool_fence(struct _WsbmBufStorage * buf,struct _WsbmFenceObject * fence)426 pool_fence(struct _WsbmBufStorage *buf __attribute__ ((unused)),
427         struct _WsbmFenceObject *fence __attribute__ ((unused)))
428 {
429     /*
430      * Noop. The kernel handles all fencing.
431      */
432 }
433 
434 static int
pool_waitIdle(struct _WsbmBufStorage * buf,int lazy)435 pool_waitIdle(struct _WsbmBufStorage *buf, int lazy)
436 {
437     struct _TTMBuffer *dBuf = ttmBuffer(buf);
438     struct _TTMPool *ttmPool = ttmGetPool(dBuf);
439     struct ttm_pl_waitidle_arg req;
440     struct _WsbmBufferPool *pool = buf->pool;
441     int ret;
442 
443     req.handle = dBuf->kBuf.handle;
444     req.mode = (lazy) ? TTM_PL_WAITIDLE_MODE_LAZY : 0;
445 
446     DRMRESTARTCOMMANDWRITE(pool->fd, ttmPool->devOffset + TTM_PL_WAITIDLE,
447 			   req, ret);
448 
449     return ret;
450 }
451 
452 static void
pool_takedown(struct _WsbmBufferPool * pool)453 pool_takedown(struct _WsbmBufferPool *pool)
454 {
455     struct _TTMPool *ttmPool = containerOf(pool, struct _TTMPool, pool);
456 
457     free(ttmPool);
458 }
459 
460 static int
pool_setStatus(struct _WsbmBufStorage * buf,uint32_t set_placement,uint32_t clr_placement)461 pool_setStatus(struct _WsbmBufStorage *buf, uint32_t set_placement,
462 	       uint32_t clr_placement)
463 {
464     struct _TTMBuffer *dBuf = ttmBuffer(buf);
465     struct _TTMPool *ttmPool = ttmGetPool(dBuf);
466     union ttm_pl_setstatus_arg arg;
467     struct ttm_pl_setstatus_req *req = &arg.req;
468     struct ttm_pl_rep *rep = &arg.rep;
469     struct _WsbmBufferPool *pool = buf->pool;
470     int ret;
471 
472     req->handle = dBuf->kBuf.handle;
473     req->set_placement = set_placement;
474     req->clr_placement = clr_placement;
475 
476     DRMRESTARTCOMMANDWRITEREAD(pool->fd,
477 			       ttmPool->devOffset + TTM_PL_SETSTATUS,
478 			       arg, ret);
479 
480     if (!ret) {
481 	dBuf->kBuf.gpuOffset = rep->gpu_offset;
482 	dBuf->kBuf.placement = rep->placement;
483     }
484 
485     return ret;
486 }
487 
488 static struct _WsbmKernelBuf *
pool_kernel(struct _WsbmBufStorage * buf)489 pool_kernel(struct _WsbmBufStorage *buf)
490 {
491     return (void *)&ttmBuffer(buf)->kBuf;
492 }
493 
494 struct _WsbmBufferPool *
wsbmTTMPoolInit(int fd,unsigned int devOffset)495 wsbmTTMPoolInit(int fd, unsigned int devOffset)
496 {
497     struct _TTMPool *ttmPool;
498     struct _WsbmBufferPool *pool;
499 
500     ttmPool = (struct _TTMPool *)calloc(1, sizeof(*ttmPool));
501 
502     if (!ttmPool)
503 	return NULL;
504 
505     ttmPool->pageSize = getpagesize();
506     ttmPool->devOffset = devOffset;
507     pool = &ttmPool->pool;
508 
509     pool->fd = fd;
510     pool->map = &pool_map;
511     pool->unmap = &pool_unmap;
512     pool->syncforcpu = &pool_syncforcpu;
513     pool->releasefromcpu = &pool_releasefromcpu;
514     pool->destroy = &pool_destroy;
515     pool->offset = &pool_offset;
516     pool->poolOffset = &pool_poolOffset;
517     pool->placement = &pool_placement;
518     pool->size = &pool_size;
519     pool->create = &pool_create;
520     pool->fence = &pool_fence;
521     pool->kernel = &pool_kernel;
522     pool->validate = NULL;
523     pool->unvalidate = NULL;
524     pool->waitIdle = &pool_waitIdle;
525     pool->takeDown = &pool_takedown;
526     pool->createByReference = &pool_reference;
527     pool->setStatus = &pool_setStatus;
528     return pool;
529 }
530 
531 struct _WsbmBufStorage *
ttm_pool_ub_create(struct _WsbmBufferPool * pool,unsigned long size,uint32_t placement,unsigned alignment,const unsigned long * user_ptr,int fd)532 ttm_pool_ub_create(struct _WsbmBufferPool *pool, unsigned long size, uint32_t placement, unsigned alignment, const unsigned long *user_ptr, int fd)
533 {
534     struct _TTMBuffer *dBuf = (struct _TTMBuffer *)
535 	    calloc(1, sizeof(*dBuf));
536     struct _TTMPool *ttmPool = containerOf(pool, struct _TTMPool, pool);
537     int ret;
538     unsigned pageSize = ttmPool->pageSize;
539     union ttm_pl_create_ub_arg arg;
540 
541     if (!dBuf)
542 	    return NULL;
543 
544     if ((alignment > pageSize) && (alignment % pageSize))
545 	    goto out_err0;
546 
547     ret = wsbmBufStorageInit(&dBuf->buf, pool);
548     if (ret)
549 	    goto out_err0;
550 
551     ret = WSBM_COND_INIT(&dBuf->event);
552     if (ret)
553 	    goto out_err1;
554 
555     arg.req.size = size;
556     arg.req.placement = placement;
557     arg.req.page_alignment = alignment / pageSize;
558     arg.req.user_address = (unsigned long)user_ptr;
559     arg.req.fd = fd;
560 
561     DRMRESTARTCOMMANDWRITEREAD(pool->fd, ttmPool->devOffset + TTM_PL_CREATE_UB,
562 			       arg, ret);
563     if (ret)
564         goto out_err2;
565 
566     dBuf->requestedSize = size;
567     dBuf->kBuf.gpuOffset = arg.rep.gpu_offset;
568     dBuf->mapHandle = arg.rep.map_handle;
569     dBuf->realSize = arg.rep.bo_size;
570     dBuf->kBuf.placement = arg.rep.placement;
571     dBuf->kBuf.handle = arg.rep.handle;
572 
573     return &dBuf->buf;
574 
575   out_err2:
576     WSBM_COND_FREE(&dBuf->event);
577   out_err1:
578     wsbmBufStorageTakedown(&dBuf->buf);
579   out_err0:
580     free(dBuf);
581     return NULL;
582 }
583 
584