1 /**********************************************************
2  * Copyright 2008-2009 VMware, Inc.  All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person
5  * obtaining a copy of this software and associated documentation
6  * files (the "Software"), to deal in the Software without
7  * restriction, including without limitation the rights to use, copy,
8  * modify, merge, publish, distribute, sublicense, and/or sell copies
9  * of the Software, and to permit persons to whom the Software is
10  * furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  *
24  **********************************************************/
25 
26 
27 #include "os/os_thread.h"
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "util/u_inlines.h"
31 #include "util/u_math.h"
32 #include "util/u_memory.h"
33 
34 #include "svga_cmd.h"
35 #include "svga_context.h"
36 #include "svga_debug.h"
37 #include "svga_resource_buffer.h"
38 #include "svga_resource_buffer_upload.h"
39 #include "svga_screen.h"
40 #include "svga_winsys.h"
41 
42 
43 /**
44  * Allocate a winsys_buffer (ie. DMA, aka GMR memory).
45  *
46  * It will flush and retry in case the first attempt to create a DMA buffer
47  * fails, so it should not be called from any function involved in flushing
48  * to avoid recursion.
49  */
50 struct svga_winsys_buffer *
svga_winsys_buffer_create(struct svga_context * svga,unsigned alignment,unsigned usage,unsigned size)51 svga_winsys_buffer_create( struct svga_context *svga,
52                            unsigned alignment,
53                            unsigned usage,
54                            unsigned size )
55 {
56    struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
57    struct svga_winsys_screen *sws = svgascreen->sws;
58    struct svga_winsys_buffer *buf;
59 
60    /* Just try */
61    buf = sws->buffer_create(sws, alignment, usage, size);
62    if (!buf) {
63       SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "flushing context to find %d bytes GMR\n",
64                size);
65 
66       /* Try flushing all pending DMAs */
67       svga_context_flush(svga, NULL);
68       buf = sws->buffer_create(sws, alignment, usage, size);
69    }
70 
71    return buf;
72 }
73 
74 
75 void
svga_buffer_destroy_hw_storage(struct svga_screen * ss,struct svga_buffer * sbuf)76 svga_buffer_destroy_hw_storage(struct svga_screen *ss, struct svga_buffer *sbuf)
77 {
78    struct svga_winsys_screen *sws = ss->sws;
79 
80    assert(!sbuf->map.count);
81    assert(sbuf->hwbuf);
82    if (sbuf->hwbuf) {
83       sws->buffer_destroy(sws, sbuf->hwbuf);
84       sbuf->hwbuf = NULL;
85    }
86 }
87 
88 
89 
90 /**
91  * Allocate DMA'ble storage for the buffer.
92  *
93  * Called before mapping a buffer.
94  */
95 enum pipe_error
svga_buffer_create_hw_storage(struct svga_screen * ss,struct svga_buffer * sbuf)96 svga_buffer_create_hw_storage(struct svga_screen *ss,
97                               struct svga_buffer *sbuf)
98 {
99    assert(!sbuf->user);
100 
101    if (!sbuf->hwbuf) {
102       struct svga_winsys_screen *sws = ss->sws;
103       unsigned alignment = 16;
104       unsigned usage = 0;
105       unsigned size = sbuf->b.b.width0;
106 
107       sbuf->hwbuf = sws->buffer_create(sws, alignment, usage, size);
108       if (!sbuf->hwbuf)
109          return PIPE_ERROR_OUT_OF_MEMORY;
110 
111       assert(!sbuf->dma.pending);
112    }
113 
114    return PIPE_OK;
115 }
116 
117 
118 
119 enum pipe_error
svga_buffer_create_host_surface(struct svga_screen * ss,struct svga_buffer * sbuf)120 svga_buffer_create_host_surface(struct svga_screen *ss,
121                                 struct svga_buffer *sbuf)
122 {
123    assert(!sbuf->user);
124 
125    if (!sbuf->handle) {
126       sbuf->key.flags = 0;
127 
128       sbuf->key.format = SVGA3D_BUFFER;
129       if (sbuf->b.b.bind & PIPE_BIND_VERTEX_BUFFER)
130          sbuf->key.flags |= SVGA3D_SURFACE_HINT_VERTEXBUFFER;
131       if (sbuf->b.b.bind & PIPE_BIND_INDEX_BUFFER)
132          sbuf->key.flags |= SVGA3D_SURFACE_HINT_INDEXBUFFER;
133 
134       sbuf->key.size.width = sbuf->b.b.width0;
135       sbuf->key.size.height = 1;
136       sbuf->key.size.depth = 1;
137 
138       sbuf->key.numFaces = 1;
139       sbuf->key.numMipLevels = 1;
140       sbuf->key.cachable = 1;
141 
142       SVGA_DBG(DEBUG_DMA, "surface_create for buffer sz %d\n", sbuf->b.b.width0);
143 
144       sbuf->handle = svga_screen_surface_create(ss, &sbuf->key);
145       if (!sbuf->handle)
146          return PIPE_ERROR_OUT_OF_MEMORY;
147 
148       /* Always set the discard flag on the first time the buffer is written
149        * as svga_screen_surface_create might have passed a recycled host
150        * buffer.
151        */
152       sbuf->dma.flags.discard = TRUE;
153 
154       SVGA_DBG(DEBUG_DMA, "   --> got sid %p sz %d (buffer)\n", sbuf->handle, sbuf->b.b.width0);
155    }
156 
157    return PIPE_OK;
158 }
159 
160 
161 void
svga_buffer_destroy_host_surface(struct svga_screen * ss,struct svga_buffer * sbuf)162 svga_buffer_destroy_host_surface(struct svga_screen *ss,
163                                  struct svga_buffer *sbuf)
164 {
165    if (sbuf->handle) {
166       SVGA_DBG(DEBUG_DMA, " ungrab sid %p sz %d\n", sbuf->handle, sbuf->b.b.width0);
167       svga_screen_surface_destroy(ss, &sbuf->key, &sbuf->handle);
168    }
169 }
170 
171 
172 /**
173  * Variant of SVGA3D_BufferDMA which leaves the copy box temporarily in blank.
174  */
175 static enum pipe_error
svga_buffer_upload_command(struct svga_context * svga,struct svga_buffer * sbuf)176 svga_buffer_upload_command(struct svga_context *svga,
177                            struct svga_buffer *sbuf)
178 {
179    struct svga_winsys_context *swc = svga->swc;
180    struct svga_winsys_buffer *guest = sbuf->hwbuf;
181    struct svga_winsys_surface *host = sbuf->handle;
182    SVGA3dTransferType transfer = SVGA3D_WRITE_HOST_VRAM;
183    SVGA3dCmdSurfaceDMA *cmd;
184    uint32 numBoxes = sbuf->map.num_ranges;
185    SVGA3dCopyBox *boxes;
186    SVGA3dCmdSurfaceDMASuffix *pSuffix;
187    unsigned region_flags;
188    unsigned surface_flags;
189    struct pipe_resource *dummy;
190 
191    if (transfer == SVGA3D_WRITE_HOST_VRAM) {
192       region_flags = SVGA_RELOC_READ;
193       surface_flags = SVGA_RELOC_WRITE;
194    }
195    else if (transfer == SVGA3D_READ_HOST_VRAM) {
196       region_flags = SVGA_RELOC_WRITE;
197       surface_flags = SVGA_RELOC_READ;
198    }
199    else {
200       assert(0);
201       return PIPE_ERROR_BAD_INPUT;
202    }
203 
204    assert(numBoxes);
205 
206    cmd = SVGA3D_FIFOReserve(swc,
207                             SVGA_3D_CMD_SURFACE_DMA,
208                             sizeof *cmd + numBoxes * sizeof *boxes + sizeof *pSuffix,
209                             2);
210    if (!cmd)
211       return PIPE_ERROR_OUT_OF_MEMORY;
212 
213    swc->region_relocation(swc, &cmd->guest.ptr, guest, 0, region_flags);
214    cmd->guest.pitch = 0;
215 
216    swc->surface_relocation(swc, &cmd->host.sid, host, surface_flags);
217    cmd->host.face = 0;
218    cmd->host.mipmap = 0;
219 
220    cmd->transfer = transfer;
221 
222    sbuf->dma.boxes = (SVGA3dCopyBox *)&cmd[1];
223    sbuf->dma.svga = svga;
224 
225    /* Increment reference count */
226    dummy = NULL;
227    pipe_resource_reference(&dummy, &sbuf->b.b);
228 
229    pSuffix = (SVGA3dCmdSurfaceDMASuffix *)((uint8_t*)cmd + sizeof *cmd + numBoxes * sizeof *boxes);
230    pSuffix->suffixSize = sizeof *pSuffix;
231    pSuffix->maximumOffset = sbuf->b.b.width0;
232    pSuffix->flags = sbuf->dma.flags;
233 
234    SVGA_FIFOCommitAll(swc);
235 
236    sbuf->dma.flags.discard = FALSE;
237 
238    return PIPE_OK;
239 }
240 
241 
242 /**
243  * Patch up the upload DMA command reserved by svga_buffer_upload_command
244  * with the final ranges.
245  */
246 void
svga_buffer_upload_flush(struct svga_context * svga,struct svga_buffer * sbuf)247 svga_buffer_upload_flush(struct svga_context *svga,
248                          struct svga_buffer *sbuf)
249 {
250    SVGA3dCopyBox *boxes;
251    unsigned i;
252    struct pipe_resource *dummy;
253 
254    if (!sbuf->dma.pending) {
255       return;
256    }
257 
258    assert(sbuf->handle);
259    assert(sbuf->hwbuf);
260    assert(sbuf->map.num_ranges);
261    assert(sbuf->dma.svga == svga);
262    assert(sbuf->dma.boxes);
263 
264    /*
265     * Patch the DMA command with the final copy box.
266     */
267 
268    SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
269 
270    boxes = sbuf->dma.boxes;
271    for (i = 0; i < sbuf->map.num_ranges; ++i) {
272       SVGA_DBG(DEBUG_DMA, "  bytes %u - %u\n",
273                sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);
274 
275       boxes[i].x = sbuf->map.ranges[i].start;
276       boxes[i].y = 0;
277       boxes[i].z = 0;
278       boxes[i].w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
279       boxes[i].h = 1;
280       boxes[i].d = 1;
281       boxes[i].srcx = sbuf->map.ranges[i].start;
282       boxes[i].srcy = 0;
283       boxes[i].srcz = 0;
284    }
285 
286    sbuf->map.num_ranges = 0;
287 
288    assert(sbuf->head.prev && sbuf->head.next);
289    LIST_DEL(&sbuf->head);
290 #ifdef DEBUG
291    sbuf->head.next = sbuf->head.prev = NULL;
292 #endif
293    sbuf->dma.pending = FALSE;
294    sbuf->dma.flags.discard = FALSE;
295    sbuf->dma.flags.unsynchronized = FALSE;
296 
297    sbuf->dma.svga = NULL;
298    sbuf->dma.boxes = NULL;
299 
300    /* Decrement reference count (and potentially destroy) */
301    dummy = &sbuf->b.b;
302    pipe_resource_reference(&dummy, NULL);
303 }
304 
305 
306 /**
307  * Note a dirty range.
308  *
309  * This function only notes the range down. It doesn't actually emit a DMA
310  * upload command. That only happens when a context tries to refer to this
311  * buffer, and the DMA upload command is added to that context's command
312  * buffer.
313  *
314  * We try to lump as many contiguous DMA transfers together as possible.
315  */
316 void
svga_buffer_add_range(struct svga_buffer * sbuf,unsigned start,unsigned end)317 svga_buffer_add_range(struct svga_buffer *sbuf,
318                       unsigned start,
319                       unsigned end)
320 {
321    unsigned i;
322    unsigned nearest_range;
323    unsigned nearest_dist;
324 
325    assert(end > start);
326 
327    if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
328       nearest_range = sbuf->map.num_ranges;
329       nearest_dist = ~0;
330    } else {
331       nearest_range = SVGA_BUFFER_MAX_RANGES - 1;
332       nearest_dist = 0;
333    }
334 
335    /*
336     * Try to grow one of the ranges.
337     */
338 
339    for (i = 0; i < sbuf->map.num_ranges; ++i) {
340       int left_dist;
341       int right_dist;
342       int dist;
343 
344       left_dist = start - sbuf->map.ranges[i].end;
345       right_dist = sbuf->map.ranges[i].start - end;
346       dist = MAX2(left_dist, right_dist);
347 
348       if (dist <= 0) {
349          /*
350           * Ranges are contiguous or overlapping -- extend this one and return.
351           *
352           * Note that it is not this function's task to prevent overlapping
353           * ranges, as the GMR was already given so it is too late to do
354           * anything.  If the ranges overlap here it must surely be because
355           * PIPE_TRANSFER_UNSYNCHRONIZED was set.
356           */
357 
358          sbuf->map.ranges[i].start = MIN2(sbuf->map.ranges[i].start, start);
359          sbuf->map.ranges[i].end   = MAX2(sbuf->map.ranges[i].end,   end);
360          return;
361       }
362       else {
363          /*
364           * Discontiguous ranges -- keep track of the nearest range.
365           */
366 
367          if (dist < nearest_dist) {
368             nearest_range = i;
369             nearest_dist = dist;
370          }
371       }
372    }
373 
374    /*
375     * We cannot add a new range to an existing DMA command, so patch-up the
376     * pending DMA upload and start clean.
377     */
378 
379    svga_buffer_upload_flush(sbuf->dma.svga, sbuf);
380 
381    assert(!sbuf->dma.pending);
382    assert(!sbuf->dma.svga);
383    assert(!sbuf->dma.boxes);
384 
385    if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
386       /*
387        * Add a new range.
388        */
389 
390       sbuf->map.ranges[sbuf->map.num_ranges].start = start;
391       sbuf->map.ranges[sbuf->map.num_ranges].end = end;
392       ++sbuf->map.num_ranges;
393    } else {
394       /*
395        * Everything else failed, so just extend the nearest range.
396        *
397        * It is OK to do this because we always keep a local copy of the
398        * host buffer data, for SW TNL, and the host never modifies the buffer.
399        */
400 
401       assert(nearest_range < SVGA_BUFFER_MAX_RANGES);
402       assert(nearest_range < sbuf->map.num_ranges);
403       sbuf->map.ranges[nearest_range].start = MIN2(sbuf->map.ranges[nearest_range].start, start);
404       sbuf->map.ranges[nearest_range].end   = MAX2(sbuf->map.ranges[nearest_range].end,   end);
405    }
406 }
407 
408 
409 
410 /**
411  * Copy the contents of the malloc buffer to a hardware buffer.
412  */
413 static enum pipe_error
svga_buffer_update_hw(struct svga_screen * ss,struct svga_buffer * sbuf)414 svga_buffer_update_hw(struct svga_screen *ss, struct svga_buffer *sbuf)
415 {
416    assert(!sbuf->user);
417    if (!sbuf->hwbuf) {
418       enum pipe_error ret;
419       void *map;
420 
421       assert(sbuf->swbuf);
422       if (!sbuf->swbuf)
423          return PIPE_ERROR;
424 
425       ret = svga_buffer_create_hw_storage(ss, sbuf);
426       if (ret != PIPE_OK)
427          return ret;
428 
429       pipe_mutex_lock(ss->swc_mutex);
430       map = ss->sws->buffer_map(ss->sws, sbuf->hwbuf, PIPE_TRANSFER_WRITE);
431       assert(map);
432       if (!map) {
433 	 pipe_mutex_unlock(ss->swc_mutex);
434          svga_buffer_destroy_hw_storage(ss, sbuf);
435          return PIPE_ERROR;
436       }
437 
438       memcpy(map, sbuf->swbuf, sbuf->b.b.width0);
439       ss->sws->buffer_unmap(ss->sws, sbuf->hwbuf);
440 
441       /* This user/malloc buffer is now indistinguishable from a gpu buffer */
442       assert(!sbuf->map.count);
443       if (!sbuf->map.count) {
444          if (sbuf->user)
445             sbuf->user = FALSE;
446          else
447             align_free(sbuf->swbuf);
448          sbuf->swbuf = NULL;
449       }
450 
451       pipe_mutex_unlock(ss->swc_mutex);
452    }
453 
454    return PIPE_OK;
455 }
456 
457 
458 /**
459  * Upload the buffer to the host in a piecewise fashion.
460  *
461  * Used when the buffer is too big to fit in the GMR aperture.
462  */
463 static enum pipe_error
svga_buffer_upload_piecewise(struct svga_screen * ss,struct svga_context * svga,struct svga_buffer * sbuf)464 svga_buffer_upload_piecewise(struct svga_screen *ss,
465                              struct svga_context *svga,
466                              struct svga_buffer *sbuf)
467 {
468    struct svga_winsys_screen *sws = ss->sws;
469    const unsigned alignment = sizeof(void *);
470    const unsigned usage = 0;
471    unsigned i;
472 
473    assert(sbuf->map.num_ranges);
474    assert(!sbuf->dma.pending);
475 
476    SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
477 
478    for (i = 0; i < sbuf->map.num_ranges; ++i) {
479       struct svga_buffer_range *range = &sbuf->map.ranges[i];
480       unsigned offset = range->start;
481       unsigned size = range->end - range->start;
482 
483       while (offset < range->end) {
484          struct svga_winsys_buffer *hwbuf;
485          uint8_t *map;
486          enum pipe_error ret;
487 
488          if (offset + size > range->end)
489             size = range->end - offset;
490 
491          hwbuf = sws->buffer_create(sws, alignment, usage, size);
492          while (!hwbuf) {
493             size /= 2;
494             if (!size)
495                return PIPE_ERROR_OUT_OF_MEMORY;
496             hwbuf = sws->buffer_create(sws, alignment, usage, size);
497          }
498 
499          SVGA_DBG(DEBUG_DMA, "  bytes %u - %u\n",
500                   offset, offset + size);
501 
502          map = sws->buffer_map(sws, hwbuf,
503                                PIPE_TRANSFER_WRITE |
504                                PIPE_TRANSFER_DISCARD_RANGE);
505          assert(map);
506          if (map) {
507             memcpy(map, sbuf->swbuf, size);
508             sws->buffer_unmap(sws, hwbuf);
509          }
510 
511          ret = SVGA3D_BufferDMA(svga->swc,
512                                 hwbuf, sbuf->handle,
513                                 SVGA3D_WRITE_HOST_VRAM,
514                                 size, 0, offset, sbuf->dma.flags);
515          if (ret != PIPE_OK) {
516             svga_context_flush(svga, NULL);
517             ret =  SVGA3D_BufferDMA(svga->swc,
518                                     hwbuf, sbuf->handle,
519                                     SVGA3D_WRITE_HOST_VRAM,
520                                     size, 0, offset, sbuf->dma.flags);
521             assert(ret == PIPE_OK);
522          }
523 
524          sbuf->dma.flags.discard = FALSE;
525 
526          sws->buffer_destroy(sws, hwbuf);
527 
528          offset += size;
529       }
530    }
531 
532    sbuf->map.num_ranges = 0;
533 
534    return PIPE_OK;
535 }
536 
537 
538 
539 
540 /* Get (or create/upload) the winsys surface handle so that we can
541  * refer to this buffer in fifo commands.
542  */
543 struct svga_winsys_surface *
svga_buffer_handle(struct svga_context * svga,struct pipe_resource * buf)544 svga_buffer_handle(struct svga_context *svga,
545                    struct pipe_resource *buf)
546 {
547    struct pipe_screen *screen = svga->pipe.screen;
548    struct svga_screen *ss = svga_screen(screen);
549    struct svga_buffer *sbuf;
550    enum pipe_error ret;
551 
552    if (!buf)
553       return NULL;
554 
555    sbuf = svga_buffer(buf);
556 
557    assert(!sbuf->map.count);
558    assert(!sbuf->user);
559 
560    if (!sbuf->handle) {
561       ret = svga_buffer_create_host_surface(ss, sbuf);
562       if (ret != PIPE_OK)
563 	 return NULL;
564    }
565 
566    assert(sbuf->handle);
567 
568    if (sbuf->map.num_ranges) {
569       if (!sbuf->dma.pending) {
570          /*
571           * No pending DMA upload yet, so insert a DMA upload command now.
572           */
573 
574          /*
575           * Migrate the data from swbuf -> hwbuf if necessary.
576           */
577          ret = svga_buffer_update_hw(ss, sbuf);
578          if (ret == PIPE_OK) {
579             /*
580              * Queue a dma command.
581              */
582 
583             ret = svga_buffer_upload_command(svga, sbuf);
584             if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
585                svga_context_flush(svga, NULL);
586                ret = svga_buffer_upload_command(svga, sbuf);
587                assert(ret == PIPE_OK);
588             }
589             if (ret == PIPE_OK) {
590                sbuf->dma.pending = TRUE;
591                assert(!sbuf->head.prev && !sbuf->head.next);
592                LIST_ADDTAIL(&sbuf->head, &svga->dirty_buffers);
593             }
594          }
595          else if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
596             /*
597              * The buffer is too big to fit in the GMR aperture, so break it in
598              * smaller pieces.
599              */
600             ret = svga_buffer_upload_piecewise(ss, svga, sbuf);
601          }
602 
603          if (ret != PIPE_OK) {
604             /*
605              * Something unexpected happened above. There is very little that
606              * we can do other than proceeding while ignoring the dirty ranges.
607              */
608             assert(0);
609             sbuf->map.num_ranges = 0;
610          }
611       }
612       else {
613          /*
614           * There a pending dma already. Make sure it is from this context.
615           */
616          assert(sbuf->dma.svga == svga);
617       }
618    }
619 
620    assert(!sbuf->map.num_ranges || sbuf->dma.pending);
621 
622    return sbuf->handle;
623 }
624 
625 
626 
627 void
svga_context_flush_buffers(struct svga_context * svga)628 svga_context_flush_buffers(struct svga_context *svga)
629 {
630    struct list_head *curr, *next;
631    struct svga_buffer *sbuf;
632 
633    curr = svga->dirty_buffers.next;
634    next = curr->next;
635    while(curr != &svga->dirty_buffers) {
636       sbuf = LIST_ENTRY(struct svga_buffer, curr, head);
637 
638       assert(p_atomic_read(&sbuf->b.b.reference.count) != 0);
639       assert(sbuf->dma.pending);
640 
641       svga_buffer_upload_flush(svga, sbuf);
642 
643       curr = next;
644       next = curr->next;
645    }
646 }
647