• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /**************************************************************************
2   *
3   * Copyright 2007-2010 VMware, Inc.
4   * All Rights Reserved.
5   *
6   * Permission is hereby granted, free of charge, to any person obtaining a
7   * copy of this software and associated documentation files (the
8   * "Software"), to deal in the Software without restriction, including
9   * without limitation the rights to use, copy, modify, merge, publish,
10   * distribute, sub license, and/or sell copies of the Software, and to
11   * permit persons to whom the Software is furnished to do so, subject to
12   * the following conditions:
13   *
14   * The above copyright notice and this permission notice (including the
15   * next paragraph) shall be included in all copies or substantial portions
16   * of the Software.
17   *
18   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19   * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20   * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21   * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22   * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23   * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24   * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25   *
26   **************************************************************************/
27  
28  /**
29   * \file
30   * Implementation of fenced buffers.
31   *
32   * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
33   * \author Thomas Hellström <thellstrom-at-vmware-dot-com>
34   */
35  
36  
37  #include "pipe/p_config.h"
38  
39  #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
40  #include <unistd.h>
41  #include <sched.h>
42  #endif
43  
44  #include "pipe/p_compiler.h"
45  #include "pipe/p_defines.h"
46  #include "util/u_debug.h"
47  #include "os/os_thread.h"
48  #include "util/u_memory.h"
49  #include "util/u_double_list.h"
50  
51  #include "pb_buffer.h"
52  #include "pb_buffer_fenced.h"
53  #include "pb_bufmgr.h"
54  
55  
56  
57  /**
58   * Convenience macro (type safe).
59   */
60  #define SUPER(__derived) (&(__derived)->base)
61  
62  
63  struct fenced_manager
64  {
65     struct pb_manager base;
66     struct pb_manager *provider;
67     struct pb_fence_ops *ops;
68  
69     /**
70      * Maximum buffer size that can be safely allocated.
71      */
72     pb_size max_buffer_size;
73  
74     /**
75      * Maximum cpu memory we can allocate before we start waiting for the
76      * GPU to idle.
77      */
78     pb_size max_cpu_total_size;
79  
80     /**
81      * Following members are mutable and protected by this mutex.
82      */
83     pipe_mutex mutex;
84  
85     /**
86      * Fenced buffer list.
87      *
88      * All fenced buffers are placed in this listed, ordered from the oldest
89      * fence to the newest fence.
90      */
91     struct list_head fenced;
92     pb_size num_fenced;
93  
94     struct list_head unfenced;
95     pb_size num_unfenced;
96  
97     /**
98      * How much temporary CPU memory is being used to hold unvalidated buffers.
99      */
100     pb_size cpu_total_size;
101  };
102  
103  
104  /**
105   * Fenced buffer.
106   *
107   * Wrapper around a pipe buffer which adds fencing and reference counting.
108   */
109  struct fenced_buffer
110  {
111     /*
112      * Immutable members.
113      */
114  
115     struct pb_buffer base;
116     struct fenced_manager *mgr;
117  
118     /*
119      * Following members are mutable and protected by fenced_manager::mutex.
120      */
121  
122     struct list_head head;
123  
124     /**
125      * Buffer with storage.
126      */
127     struct pb_buffer *buffer;
128     pb_size size;
129     struct pb_desc desc;
130  
131     /**
132      * Temporary CPU storage data. Used when there isn't enough GPU memory to
133      * store the buffer.
134      */
135     void *data;
136  
137     /**
138      * A bitmask of PB_USAGE_CPU/GPU_READ/WRITE describing the current
139      * buffer usage.
140      */
141     unsigned flags;
142  
143     unsigned mapcount;
144  
145     struct pb_validate *vl;
146     unsigned validation_flags;
147  
148     struct pipe_fence_handle *fence;
149  };
150  
151  
152  static INLINE struct fenced_manager *
fenced_manager(struct pb_manager * mgr)153  fenced_manager(struct pb_manager *mgr)
154  {
155     assert(mgr);
156     return (struct fenced_manager *)mgr;
157  }
158  
159  
160  static INLINE struct fenced_buffer *
fenced_buffer(struct pb_buffer * buf)161  fenced_buffer(struct pb_buffer *buf)
162  {
163     assert(buf);
164     return (struct fenced_buffer *)buf;
165  }
166  
167  
168  static void
169  fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf);
170  
171  static enum pipe_error
172  fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
173                                          struct fenced_buffer *fenced_buf);
174  
175  static void
176  fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf);
177  
178  static enum pipe_error
179  fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
180                                          struct fenced_buffer *fenced_buf,
181                                          boolean wait);
182  
183  static enum pipe_error
184  fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf);
185  
186  static enum pipe_error
187  fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf);
188  
189  
190  /**
191   * Dump the fenced buffer list.
192   *
193   * Useful to understand failures to allocate buffers.
194   */
195  static void
fenced_manager_dump_locked(struct fenced_manager * fenced_mgr)196  fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
197  {
198  #ifdef DEBUG
199     struct pb_fence_ops *ops = fenced_mgr->ops;
200     struct list_head *curr, *next;
201     struct fenced_buffer *fenced_buf;
202  
203     debug_printf("%10s %7s %8s %7s %10s %s\n",
204                  "buffer", "size", "refcount", "storage", "fence", "signalled");
205  
206     curr = fenced_mgr->unfenced.next;
207     next = curr->next;
208     while(curr != &fenced_mgr->unfenced) {
209        fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
210        assert(!fenced_buf->fence);
211        debug_printf("%10p %7u %8u %7s\n",
212                     (void *) fenced_buf,
213                     fenced_buf->base.size,
214                     p_atomic_read(&fenced_buf->base.reference.count),
215                     fenced_buf->buffer ? "gpu" : (fenced_buf->data ? "cpu" : "none"));
216        curr = next;
217        next = curr->next;
218     }
219  
220     curr = fenced_mgr->fenced.next;
221     next = curr->next;
222     while(curr != &fenced_mgr->fenced) {
223        int signaled;
224        fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
225        assert(fenced_buf->buffer);
226        signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
227        debug_printf("%10p %7u %8u %7s %10p %s\n",
228                     (void *) fenced_buf,
229                     fenced_buf->base.size,
230                     p_atomic_read(&fenced_buf->base.reference.count),
231                     "gpu",
232                     (void *) fenced_buf->fence,
233                     signaled == 0 ? "y" : "n");
234        curr = next;
235        next = curr->next;
236     }
237  #else
238     (void)fenced_mgr;
239  #endif
240  }
241  
242  
243  static INLINE void
fenced_buffer_destroy_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf)244  fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
245                               struct fenced_buffer *fenced_buf)
246  {
247     assert(!pipe_is_referenced(&fenced_buf->base.reference));
248  
249     assert(!fenced_buf->fence);
250     assert(fenced_buf->head.prev);
251     assert(fenced_buf->head.next);
252     LIST_DEL(&fenced_buf->head);
253     assert(fenced_mgr->num_unfenced);
254     --fenced_mgr->num_unfenced;
255  
256     fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
257     fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
258  
259     FREE(fenced_buf);
260  }
261  
262  
263  /**
264   * Add the buffer to the fenced list.
265   *
266   * Reference count should be incremented before calling this function.
267   */
268  static INLINE void
fenced_buffer_add_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf)269  fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
270                           struct fenced_buffer *fenced_buf)
271  {
272     assert(pipe_is_referenced(&fenced_buf->base.reference));
273     assert(fenced_buf->flags & PB_USAGE_GPU_READ_WRITE);
274     assert(fenced_buf->fence);
275  
276     p_atomic_inc(&fenced_buf->base.reference.count);
277  
278     LIST_DEL(&fenced_buf->head);
279     assert(fenced_mgr->num_unfenced);
280     --fenced_mgr->num_unfenced;
281     LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
282     ++fenced_mgr->num_fenced;
283  }
284  
285  
286  /**
287   * Remove the buffer from the fenced list, and potentially destroy the buffer
288   * if the reference count reaches zero.
289   *
290   * Returns TRUE if the buffer was detroyed.
291   */
292  static INLINE boolean
fenced_buffer_remove_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf)293  fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
294                              struct fenced_buffer *fenced_buf)
295  {
296     struct pb_fence_ops *ops = fenced_mgr->ops;
297  
298     assert(fenced_buf->fence);
299     assert(fenced_buf->mgr == fenced_mgr);
300  
301     ops->fence_reference(ops, &fenced_buf->fence, NULL);
302     fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
303  
304     assert(fenced_buf->head.prev);
305     assert(fenced_buf->head.next);
306  
307     LIST_DEL(&fenced_buf->head);
308     assert(fenced_mgr->num_fenced);
309     --fenced_mgr->num_fenced;
310  
311     LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
312     ++fenced_mgr->num_unfenced;
313  
314     if (p_atomic_dec_zero(&fenced_buf->base.reference.count)) {
315        fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
316        return TRUE;
317     }
318  
319     return FALSE;
320  }
321  
322  
323  /**
324   * Wait for the fence to expire, and remove it from the fenced list.
325   *
326   * This function will release and re-aquire the mutex, so any copy of mutable
327   * state must be discarded after calling it.
328   */
329  static INLINE enum pipe_error
fenced_buffer_finish_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf)330  fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
331                              struct fenced_buffer *fenced_buf)
332  {
333     struct pb_fence_ops *ops = fenced_mgr->ops;
334     enum pipe_error ret = PIPE_ERROR;
335  
336  #if 0
337     debug_warning("waiting for GPU");
338  #endif
339  
340     assert(pipe_is_referenced(&fenced_buf->base.reference));
341     assert(fenced_buf->fence);
342  
343     if(fenced_buf->fence) {
344        struct pipe_fence_handle *fence = NULL;
345        int finished;
346        boolean proceed;
347  
348        ops->fence_reference(ops, &fence, fenced_buf->fence);
349  
350        pipe_mutex_unlock(fenced_mgr->mutex);
351  
352        finished = ops->fence_finish(ops, fenced_buf->fence, 0);
353  
354        pipe_mutex_lock(fenced_mgr->mutex);
355  
356        assert(pipe_is_referenced(&fenced_buf->base.reference));
357  
358        /*
359         * Only proceed if the fence object didn't change in the meanwhile.
360         * Otherwise assume the work has been already carried out by another
361         * thread that re-aquired the lock before us.
362         */
363        proceed = fence == fenced_buf->fence ? TRUE : FALSE;
364  
365        ops->fence_reference(ops, &fence, NULL);
366  
367        if(proceed && finished == 0) {
368           /*
369            * Remove from the fenced list
370            */
371  
372           boolean destroyed;
373  
374           destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
375  
376           /* TODO: remove consequents buffers with the same fence? */
377  
378           assert(!destroyed);
379  
380           fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
381  
382           ret = PIPE_OK;
383        }
384     }
385  
386     return ret;
387  }
388  
389  
390  /**
391   * Remove as many fenced buffers from the fenced list as possible.
392   *
393   * Returns TRUE if at least one buffer was removed.
394   */
395  static boolean
fenced_manager_check_signalled_locked(struct fenced_manager * fenced_mgr,boolean wait)396  fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
397                                        boolean wait)
398  {
399     struct pb_fence_ops *ops = fenced_mgr->ops;
400     struct list_head *curr, *next;
401     struct fenced_buffer *fenced_buf;
402     struct pipe_fence_handle *prev_fence = NULL;
403     boolean ret = FALSE;
404  
405     curr = fenced_mgr->fenced.next;
406     next = curr->next;
407     while(curr != &fenced_mgr->fenced) {
408        fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
409  
410        if(fenced_buf->fence != prev_fence) {
411  	 int signaled;
412  
413  	 if (wait) {
414  	    signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
415  
416  	    /*
417  	     * Don't return just now. Instead preemptively check if the
418  	     * following buffers' fences already expired, without further waits.
419  	     */
420  	    wait = FALSE;
421  	 }
422  	 else {
423  	    signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
424  	 }
425  
426  	 if (signaled != 0) {
427  	    return ret;
428           }
429  
430  	 prev_fence = fenced_buf->fence;
431        }
432        else {
433           /* This buffer's fence object is identical to the previous buffer's
434            * fence object, so no need to check the fence again.
435            */
436  	 assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
437        }
438  
439        fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
440  
441        ret = TRUE;
442  
443        curr = next;
444        next = curr->next;
445     }
446  
447     return ret;
448  }
449  
450  
451  /**
452   * Try to free some GPU memory by backing it up into CPU memory.
453   *
454   * Returns TRUE if at least one buffer was freed.
455   */
456  static boolean
fenced_manager_free_gpu_storage_locked(struct fenced_manager * fenced_mgr)457  fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr)
458  {
459     struct list_head *curr, *next;
460     struct fenced_buffer *fenced_buf;
461  
462     curr = fenced_mgr->unfenced.next;
463     next = curr->next;
464     while(curr != &fenced_mgr->unfenced) {
465        fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
466  
467        /*
468         * We can only move storage if the buffer is not mapped and not
469         * validated.
470         */
471        if(fenced_buf->buffer &&
472           !fenced_buf->mapcount &&
473           !fenced_buf->vl) {
474           enum pipe_error ret;
475  
476           ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
477           if(ret == PIPE_OK) {
478              ret = fenced_buffer_copy_storage_to_cpu_locked(fenced_buf);
479              if(ret == PIPE_OK) {
480                 fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
481                 return TRUE;
482              }
483              fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
484           }
485        }
486  
487        curr = next;
488        next = curr->next;
489     }
490  
491     return FALSE;
492  }
493  
494  
495  /**
496   * Destroy CPU storage for this buffer.
497   */
498  static void
fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer * fenced_buf)499  fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf)
500  {
501     if(fenced_buf->data) {
502        align_free(fenced_buf->data);
503        fenced_buf->data = NULL;
504        assert(fenced_buf->mgr->cpu_total_size >= fenced_buf->size);
505        fenced_buf->mgr->cpu_total_size -= fenced_buf->size;
506     }
507  }
508  
509  
510  /**
511   * Create CPU storage for this buffer.
512   */
513  static enum pipe_error
fenced_buffer_create_cpu_storage_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf)514  fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
515                                          struct fenced_buffer *fenced_buf)
516  {
517     assert(!fenced_buf->data);
518     if(fenced_buf->data)
519        return PIPE_OK;
520  
521     if (fenced_mgr->cpu_total_size + fenced_buf->size > fenced_mgr->max_cpu_total_size)
522        return PIPE_ERROR_OUT_OF_MEMORY;
523  
524     fenced_buf->data = align_malloc(fenced_buf->size, fenced_buf->desc.alignment);
525     if(!fenced_buf->data)
526        return PIPE_ERROR_OUT_OF_MEMORY;
527  
528     fenced_mgr->cpu_total_size += fenced_buf->size;
529  
530     return PIPE_OK;
531  }
532  
533  
534  /**
535   * Destroy the GPU storage.
536   */
537  static void
fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer * fenced_buf)538  fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf)
539  {
540     if(fenced_buf->buffer) {
541        pb_reference(&fenced_buf->buffer, NULL);
542     }
543  }
544  
545  
546  /**
547   * Try to create GPU storage for this buffer.
548   *
549   * This function is a shorthand around pb_manager::create_buffer for
550   * fenced_buffer_create_gpu_storage_locked()'s benefit.
551   */
552  static INLINE boolean
fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf)553  fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
554                                              struct fenced_buffer *fenced_buf)
555  {
556     struct pb_manager *provider = fenced_mgr->provider;
557  
558     assert(!fenced_buf->buffer);
559  
560     fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider,
561                                                  fenced_buf->size,
562                                                  &fenced_buf->desc);
563     return fenced_buf->buffer ? TRUE : FALSE;
564  }
565  
566  
567  /**
568   * Create GPU storage for this buffer.
569   */
570  static enum pipe_error
fenced_buffer_create_gpu_storage_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf,boolean wait)571  fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
572                                          struct fenced_buffer *fenced_buf,
573                                          boolean wait)
574  {
575     assert(!fenced_buf->buffer);
576  
577     /*
578      * Check for signaled buffers before trying to allocate.
579      */
580     fenced_manager_check_signalled_locked(fenced_mgr, FALSE);
581  
582     fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
583  
584     /*
585      * Keep trying while there is some sort of progress:
586      * - fences are expiring,
587      * - or buffers are being being swapped out from GPU memory into CPU memory.
588      */
589     while(!fenced_buf->buffer &&
590           (fenced_manager_check_signalled_locked(fenced_mgr, FALSE) ||
591            fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
592        fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
593     }
594  
595     if(!fenced_buf->buffer && wait) {
596        /*
597         * Same as before, but this time around, wait to free buffers if
598         * necessary.
599         */
600        while(!fenced_buf->buffer &&
601              (fenced_manager_check_signalled_locked(fenced_mgr, TRUE) ||
602               fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
603           fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
604        }
605     }
606  
607     if(!fenced_buf->buffer) {
608        if(0)
609           fenced_manager_dump_locked(fenced_mgr);
610  
611        /* give up */
612        return PIPE_ERROR_OUT_OF_MEMORY;
613     }
614  
615     return PIPE_OK;
616  }
617  
618  
619  static enum pipe_error
fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer * fenced_buf)620  fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf)
621  {
622     uint8_t *map;
623  
624     assert(fenced_buf->data);
625     assert(fenced_buf->buffer);
626  
627     map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_WRITE, NULL);
628     if(!map)
629        return PIPE_ERROR;
630  
631     memcpy(map, fenced_buf->data, fenced_buf->size);
632  
633     pb_unmap(fenced_buf->buffer);
634  
635     return PIPE_OK;
636  }
637  
638  
639  static enum pipe_error
fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer * fenced_buf)640  fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf)
641  {
642     const uint8_t *map;
643  
644     assert(fenced_buf->data);
645     assert(fenced_buf->buffer);
646  
647     map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_READ, NULL);
648     if(!map)
649        return PIPE_ERROR;
650  
651     memcpy(fenced_buf->data, map, fenced_buf->size);
652  
653     pb_unmap(fenced_buf->buffer);
654  
655     return PIPE_OK;
656  }
657  
658  
659  static void
fenced_buffer_destroy(struct pb_buffer * buf)660  fenced_buffer_destroy(struct pb_buffer *buf)
661  {
662     struct fenced_buffer *fenced_buf = fenced_buffer(buf);
663     struct fenced_manager *fenced_mgr = fenced_buf->mgr;
664  
665     assert(!pipe_is_referenced(&fenced_buf->base.reference));
666  
667     pipe_mutex_lock(fenced_mgr->mutex);
668  
669     fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
670  
671     pipe_mutex_unlock(fenced_mgr->mutex);
672  }
673  
674  
675  static void *
fenced_buffer_map(struct pb_buffer * buf,unsigned flags,void * flush_ctx)676  fenced_buffer_map(struct pb_buffer *buf,
677                    unsigned flags, void *flush_ctx)
678  {
679     struct fenced_buffer *fenced_buf = fenced_buffer(buf);
680     struct fenced_manager *fenced_mgr = fenced_buf->mgr;
681     struct pb_fence_ops *ops = fenced_mgr->ops;
682     void *map = NULL;
683  
684     pipe_mutex_lock(fenced_mgr->mutex);
685  
686     assert(!(flags & PB_USAGE_GPU_READ_WRITE));
687  
688     /*
689      * Serialize writes.
690      */
691     while((fenced_buf->flags & PB_USAGE_GPU_WRITE) ||
692           ((fenced_buf->flags & PB_USAGE_GPU_READ) &&
693            (flags & PB_USAGE_CPU_WRITE))) {
694  
695        /*
696         * Don't wait for the GPU to finish accessing it, if blocking is forbidden.
697         */
698        if((flags & PB_USAGE_DONTBLOCK) &&
699            ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) {
700           goto done;
701        }
702  
703        if (flags & PB_USAGE_UNSYNCHRONIZED) {
704           break;
705        }
706  
707        /*
708         * Wait for the GPU to finish accessing. This will release and re-acquire
709         * the mutex, so all copies of mutable state must be discarded.
710         */
711        fenced_buffer_finish_locked(fenced_mgr, fenced_buf);
712     }
713  
714     if(fenced_buf->buffer) {
715        map = pb_map(fenced_buf->buffer, flags, flush_ctx);
716     }
717     else {
718        assert(fenced_buf->data);
719        map = fenced_buf->data;
720     }
721  
722     if(map) {
723        ++fenced_buf->mapcount;
724        fenced_buf->flags |= flags & PB_USAGE_CPU_READ_WRITE;
725     }
726  
727  done:
728     pipe_mutex_unlock(fenced_mgr->mutex);
729  
730     return map;
731  }
732  
733  
734  static void
fenced_buffer_unmap(struct pb_buffer * buf)735  fenced_buffer_unmap(struct pb_buffer *buf)
736  {
737     struct fenced_buffer *fenced_buf = fenced_buffer(buf);
738     struct fenced_manager *fenced_mgr = fenced_buf->mgr;
739  
740     pipe_mutex_lock(fenced_mgr->mutex);
741  
742     assert(fenced_buf->mapcount);
743     if(fenced_buf->mapcount) {
744        if (fenced_buf->buffer)
745           pb_unmap(fenced_buf->buffer);
746        --fenced_buf->mapcount;
747        if(!fenced_buf->mapcount)
748  	 fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
749     }
750  
751     pipe_mutex_unlock(fenced_mgr->mutex);
752  }
753  
754  
755  static enum pipe_error
fenced_buffer_validate(struct pb_buffer * buf,struct pb_validate * vl,unsigned flags)756  fenced_buffer_validate(struct pb_buffer *buf,
757                         struct pb_validate *vl,
758                         unsigned flags)
759  {
760     struct fenced_buffer *fenced_buf = fenced_buffer(buf);
761     struct fenced_manager *fenced_mgr = fenced_buf->mgr;
762     enum pipe_error ret;
763  
764     pipe_mutex_lock(fenced_mgr->mutex);
765  
766     if(!vl) {
767        /* invalidate */
768        fenced_buf->vl = NULL;
769        fenced_buf->validation_flags = 0;
770        ret = PIPE_OK;
771        goto done;
772     }
773  
774     assert(flags & PB_USAGE_GPU_READ_WRITE);
775     assert(!(flags & ~PB_USAGE_GPU_READ_WRITE));
776     flags &= PB_USAGE_GPU_READ_WRITE;
777  
778     /* Buffer cannot be validated in two different lists */
779     if(fenced_buf->vl && fenced_buf->vl != vl) {
780        ret = PIPE_ERROR_RETRY;
781        goto done;
782     }
783  
784     if(fenced_buf->vl == vl &&
785        (fenced_buf->validation_flags & flags) == flags) {
786        /* Nothing to do -- buffer already validated */
787        ret = PIPE_OK;
788        goto done;
789     }
790  
791     /*
792      * Create and update GPU storage.
793      */
794     if(!fenced_buf->buffer) {
795        assert(!fenced_buf->mapcount);
796  
797        ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
798        if(ret != PIPE_OK) {
799           goto done;
800        }
801  
802        ret = fenced_buffer_copy_storage_to_gpu_locked(fenced_buf);
803        if(ret != PIPE_OK) {
804           fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
805           goto done;
806        }
807  
808        if(fenced_buf->mapcount) {
809           debug_printf("warning: validating a buffer while it is still mapped\n");
810        }
811        else {
812           fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
813        }
814     }
815  
816     ret = pb_validate(fenced_buf->buffer, vl, flags);
817     if (ret != PIPE_OK)
818        goto done;
819  
820     fenced_buf->vl = vl;
821     fenced_buf->validation_flags |= flags;
822  
823  done:
824     pipe_mutex_unlock(fenced_mgr->mutex);
825  
826     return ret;
827  }
828  
829  
830  static void
fenced_buffer_fence(struct pb_buffer * buf,struct pipe_fence_handle * fence)831  fenced_buffer_fence(struct pb_buffer *buf,
832                      struct pipe_fence_handle *fence)
833  {
834     struct fenced_buffer *fenced_buf = fenced_buffer(buf);
835     struct fenced_manager *fenced_mgr = fenced_buf->mgr;
836     struct pb_fence_ops *ops = fenced_mgr->ops;
837  
838     pipe_mutex_lock(fenced_mgr->mutex);
839  
840     assert(pipe_is_referenced(&fenced_buf->base.reference));
841     assert(fenced_buf->buffer);
842  
843     if(fence != fenced_buf->fence) {
844        assert(fenced_buf->vl);
845        assert(fenced_buf->validation_flags);
846  
847        if (fenced_buf->fence) {
848           boolean destroyed;
849           destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
850           assert(!destroyed);
851        }
852        if (fence) {
853           ops->fence_reference(ops, &fenced_buf->fence, fence);
854           fenced_buf->flags |= fenced_buf->validation_flags;
855           fenced_buffer_add_locked(fenced_mgr, fenced_buf);
856        }
857  
858        pb_fence(fenced_buf->buffer, fence);
859  
860        fenced_buf->vl = NULL;
861        fenced_buf->validation_flags = 0;
862     }
863  
864     pipe_mutex_unlock(fenced_mgr->mutex);
865  }
866  
867  
868  static void
fenced_buffer_get_base_buffer(struct pb_buffer * buf,struct pb_buffer ** base_buf,pb_size * offset)869  fenced_buffer_get_base_buffer(struct pb_buffer *buf,
870                                struct pb_buffer **base_buf,
871                                pb_size *offset)
872  {
873     struct fenced_buffer *fenced_buf = fenced_buffer(buf);
874     struct fenced_manager *fenced_mgr = fenced_buf->mgr;
875  
876     pipe_mutex_lock(fenced_mgr->mutex);
877  
878     /*
879      * This should only be called when the buffer is validated. Typically
880      * when processing relocations.
881      */
882     assert(fenced_buf->vl);
883     assert(fenced_buf->buffer);
884  
885     if(fenced_buf->buffer)
886        pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
887     else {
888        *base_buf = buf;
889        *offset = 0;
890     }
891  
892     pipe_mutex_unlock(fenced_mgr->mutex);
893  }
894  
895  
896  static const struct pb_vtbl
897  fenced_buffer_vtbl = {
898        fenced_buffer_destroy,
899        fenced_buffer_map,
900        fenced_buffer_unmap,
901        fenced_buffer_validate,
902        fenced_buffer_fence,
903        fenced_buffer_get_base_buffer
904  };
905  
906  
907  /**
908   * Wrap a buffer in a fenced buffer.
909   */
910  static struct pb_buffer *
fenced_bufmgr_create_buffer(struct pb_manager * mgr,pb_size size,const struct pb_desc * desc)911  fenced_bufmgr_create_buffer(struct pb_manager *mgr,
912                              pb_size size,
913                              const struct pb_desc *desc)
914  {
915     struct fenced_manager *fenced_mgr = fenced_manager(mgr);
916     struct fenced_buffer *fenced_buf;
917     enum pipe_error ret;
918  
919     /*
920      * Don't stall the GPU, waste time evicting buffers, or waste memory
921      * trying to create a buffer that will most likely never fit into the
922      * graphics aperture.
923      */
924     if(size > fenced_mgr->max_buffer_size) {
925        goto no_buffer;
926     }
927  
928     fenced_buf = CALLOC_STRUCT(fenced_buffer);
929     if(!fenced_buf)
930        goto no_buffer;
931  
932     pipe_reference_init(&fenced_buf->base.reference, 1);
933     fenced_buf->base.alignment = desc->alignment;
934     fenced_buf->base.usage = desc->usage;
935     fenced_buf->base.size = size;
936     fenced_buf->size = size;
937     fenced_buf->desc = *desc;
938  
939     fenced_buf->base.vtbl = &fenced_buffer_vtbl;
940     fenced_buf->mgr = fenced_mgr;
941  
942     pipe_mutex_lock(fenced_mgr->mutex);
943  
944     /*
945      * Try to create GPU storage without stalling,
946      */
947     ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE);
948  
949     /*
950      * Attempt to use CPU memory to avoid stalling the GPU.
951      */
952     if(ret != PIPE_OK) {
953        ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
954     }
955  
956     /*
957      * Create GPU storage, waiting for some to be available.
958      */
959     if(ret != PIPE_OK) {
960        ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
961     }
962  
963     /*
964      * Give up.
965      */
966     if(ret != PIPE_OK) {
967        goto no_storage;
968     }
969  
970     assert(fenced_buf->buffer || fenced_buf->data);
971  
972     LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
973     ++fenced_mgr->num_unfenced;
974     pipe_mutex_unlock(fenced_mgr->mutex);
975  
976     return &fenced_buf->base;
977  
978  no_storage:
979     pipe_mutex_unlock(fenced_mgr->mutex);
980     FREE(fenced_buf);
981  no_buffer:
982     return NULL;
983  }
984  
985  
986  static void
fenced_bufmgr_flush(struct pb_manager * mgr)987  fenced_bufmgr_flush(struct pb_manager *mgr)
988  {
989     struct fenced_manager *fenced_mgr = fenced_manager(mgr);
990  
991     pipe_mutex_lock(fenced_mgr->mutex);
992     while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
993        ;
994     pipe_mutex_unlock(fenced_mgr->mutex);
995  
996     assert(fenced_mgr->provider->flush);
997     if(fenced_mgr->provider->flush)
998        fenced_mgr->provider->flush(fenced_mgr->provider);
999  }
1000  
1001  
1002  static void
fenced_bufmgr_destroy(struct pb_manager * mgr)1003  fenced_bufmgr_destroy(struct pb_manager *mgr)
1004  {
1005     struct fenced_manager *fenced_mgr = fenced_manager(mgr);
1006  
1007     pipe_mutex_lock(fenced_mgr->mutex);
1008  
1009     /* Wait on outstanding fences */
1010     while (fenced_mgr->num_fenced) {
1011        pipe_mutex_unlock(fenced_mgr->mutex);
1012  #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
1013        sched_yield();
1014  #endif
1015        pipe_mutex_lock(fenced_mgr->mutex);
1016        while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
1017           ;
1018     }
1019  
1020  #ifdef DEBUG
1021     /*assert(!fenced_mgr->num_unfenced);*/
1022  #endif
1023  
1024     pipe_mutex_unlock(fenced_mgr->mutex);
1025     pipe_mutex_destroy(fenced_mgr->mutex);
1026  
1027     if(fenced_mgr->provider)
1028        fenced_mgr->provider->destroy(fenced_mgr->provider);
1029  
1030     fenced_mgr->ops->destroy(fenced_mgr->ops);
1031  
1032     FREE(fenced_mgr);
1033  }
1034  
1035  
1036  struct pb_manager *
fenced_bufmgr_create(struct pb_manager * provider,struct pb_fence_ops * ops,pb_size max_buffer_size,pb_size max_cpu_total_size)1037  fenced_bufmgr_create(struct pb_manager *provider,
1038                       struct pb_fence_ops *ops,
1039                       pb_size max_buffer_size,
1040                       pb_size max_cpu_total_size)
1041  {
1042     struct fenced_manager *fenced_mgr;
1043  
1044     if(!provider)
1045        return NULL;
1046  
1047     fenced_mgr = CALLOC_STRUCT(fenced_manager);
1048     if (!fenced_mgr)
1049        return NULL;
1050  
1051     fenced_mgr->base.destroy = fenced_bufmgr_destroy;
1052     fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
1053     fenced_mgr->base.flush = fenced_bufmgr_flush;
1054  
1055     fenced_mgr->provider = provider;
1056     fenced_mgr->ops = ops;
1057     fenced_mgr->max_buffer_size = max_buffer_size;
1058     fenced_mgr->max_cpu_total_size = max_cpu_total_size;
1059  
1060     LIST_INITHEAD(&fenced_mgr->fenced);
1061     fenced_mgr->num_fenced = 0;
1062  
1063     LIST_INITHEAD(&fenced_mgr->unfenced);
1064     fenced_mgr->num_unfenced = 0;
1065  
1066     pipe_mutex_init(fenced_mgr->mutex);
1067  
1068     return &fenced_mgr->base;
1069  }
1070