1 /*
2  * Copyright 2010 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie
24  */
25 
26 #include <stdio.h>
27 
28 #include "util/u_inlines.h"
29 #include "util/u_memory.h"
30 #include "util/u_upload_mgr.h"
31 #include "util/u_math.h"
32 
33 #include "r300_screen_buffer.h"
34 
r300_upload_index_buffer(struct r300_context * r300,struct pipe_resource ** index_buffer,unsigned index_size,unsigned * start,unsigned count,const uint8_t * ptr)35 void r300_upload_index_buffer(struct r300_context *r300,
36 			      struct pipe_resource **index_buffer,
37 			      unsigned index_size, unsigned *start,
38 			      unsigned count, const uint8_t *ptr)
39 {
40     unsigned index_offset;
41 
42     *index_buffer = NULL;
43 
44     u_upload_data(r300->uploader,
45                   0, count * index_size, 4,
46                   ptr + (*start * index_size),
47                   &index_offset,
48                   index_buffer);
49 
50     *start = index_offset / index_size;
51 }
52 
r300_buffer_destroy(struct pipe_screen * screen,struct pipe_resource * buf)53 static void r300_buffer_destroy(struct pipe_screen *screen,
54 				struct pipe_resource *buf)
55 {
56     struct r300_resource *rbuf = r300_resource(buf);
57 
58     align_free(rbuf->malloced_buffer);
59 
60     if (rbuf->buf)
61         pb_reference(&rbuf->buf, NULL);
62 
63     FREE(rbuf);
64 }
65 
66 static void *
r300_buffer_transfer_map(struct pipe_context * context,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)67 r300_buffer_transfer_map( struct pipe_context *context,
68                           struct pipe_resource *resource,
69                           unsigned level,
70                           unsigned usage,
71                           const struct pipe_box *box,
72                           struct pipe_transfer **ptransfer )
73 {
74     struct r300_context *r300 = r300_context(context);
75     struct radeon_winsys *rws = r300->screen->rws;
76     struct r300_resource *rbuf = r300_resource(resource);
77     struct pipe_transfer *transfer;
78     uint8_t *map;
79 
80     transfer = slab_alloc(&r300->pool_transfers);
81     transfer->resource = resource;
82     transfer->level = level;
83     transfer->usage = usage;
84     transfer->box = *box;
85     transfer->stride = 0;
86     transfer->layer_stride = 0;
87 
88     if (rbuf->malloced_buffer) {
89         *ptransfer = transfer;
90         return rbuf->malloced_buffer + box->x;
91     }
92 
93     if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE &&
94         !(usage & PIPE_MAP_UNSYNCHRONIZED)) {
95         assert(usage & PIPE_MAP_WRITE);
96 
97         /* Check if mapping this buffer would cause waiting for the GPU. */
98         if (r300->rws->cs_is_buffer_referenced(r300->cs, rbuf->buf, RADEON_USAGE_READWRITE) ||
99             !r300->rws->buffer_wait(rbuf->buf, 0, RADEON_USAGE_READWRITE)) {
100             unsigned i;
101             struct pb_buffer *new_buf;
102 
103             /* Create a new one in the same pipe_resource. */
104             new_buf = r300->rws->buffer_create(r300->rws, rbuf->b.b.width0,
105                                                R300_BUFFER_ALIGNMENT,
106                                                rbuf->domain,
107                                                RADEON_FLAG_NO_INTERPROCESS_SHARING);
108             if (new_buf) {
109                 /* Discard the old buffer. */
110                 pb_reference(&rbuf->buf, NULL);
111                 rbuf->buf = new_buf;
112 
113                 /* We changed the buffer, now we need to bind it where the old one was bound. */
114                 for (i = 0; i < r300->nr_vertex_buffers; i++) {
115                     if (r300->vertex_buffer[i].buffer.resource == &rbuf->b.b) {
116                         r300->vertex_arrays_dirty = TRUE;
117                         break;
118                     }
119                 }
120             }
121         }
122     }
123 
124     /* Buffers are never used for write, therefore mapping for read can be
125      * unsynchronized. */
126     if (!(usage & PIPE_MAP_WRITE)) {
127        usage |= PIPE_MAP_UNSYNCHRONIZED;
128     }
129 
130     map = rws->buffer_map(rbuf->buf, r300->cs, usage);
131 
132     if (!map) {
133         slab_free(&r300->pool_transfers, transfer);
134         return NULL;
135     }
136 
137     *ptransfer = transfer;
138     return map + box->x;
139 }
140 
r300_buffer_transfer_unmap(struct pipe_context * pipe,struct pipe_transfer * transfer)141 static void r300_buffer_transfer_unmap( struct pipe_context *pipe,
142                                         struct pipe_transfer *transfer )
143 {
144     struct r300_context *r300 = r300_context(pipe);
145 
146     slab_free(&r300->pool_transfers, transfer);
147 }
148 
149 static const struct u_resource_vtbl r300_buffer_vtbl =
150 {
151    NULL,                               /* get_handle */
152    r300_buffer_destroy,                /* resource_destroy */
153    r300_buffer_transfer_map,           /* transfer_map */
154    NULL,                               /* transfer_flush_region */
155    r300_buffer_transfer_unmap,         /* transfer_unmap */
156 };
157 
r300_buffer_create(struct pipe_screen * screen,const struct pipe_resource * templ)158 struct pipe_resource *r300_buffer_create(struct pipe_screen *screen,
159 					 const struct pipe_resource *templ)
160 {
161     struct r300_screen *r300screen = r300_screen(screen);
162     struct r300_resource *rbuf;
163 
164     rbuf = MALLOC_STRUCT(r300_resource);
165 
166     rbuf->b.b = *templ;
167     rbuf->b.vtbl = &r300_buffer_vtbl;
168     pipe_reference_init(&rbuf->b.b.reference, 1);
169     rbuf->b.b.screen = screen;
170     rbuf->domain = RADEON_DOMAIN_GTT;
171     rbuf->buf = NULL;
172     rbuf->malloced_buffer = NULL;
173 
174     /* Allocate constant buffers and SWTCL vertex and index buffers in RAM.
175      * Note that uploaded index buffers use the flag PIPE_BIND_CUSTOM, so that
176      * we can distinguish them from user-created buffers.
177      */
178     if (templ->bind & PIPE_BIND_CONSTANT_BUFFER ||
179         (!r300screen->caps.has_tcl && !(templ->bind & PIPE_BIND_CUSTOM))) {
180         rbuf->malloced_buffer = align_malloc(templ->width0, 64);
181         return &rbuf->b.b;
182     }
183 
184     rbuf->buf =
185         r300screen->rws->buffer_create(r300screen->rws, rbuf->b.b.width0,
186                                        R300_BUFFER_ALIGNMENT,
187                                        rbuf->domain,
188                                        RADEON_FLAG_NO_INTERPROCESS_SHARING);
189     if (!rbuf->buf) {
190         FREE(rbuf);
191         return NULL;
192     }
193     return &rbuf->b.b;
194 }
195