1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "brw_context.h"
36 #include "brw_state.h"
37 #include "brw_defines.h"
38
39 #define VS 0
40 #define GS 1
41 #define CLP 2
42 #define SF 3
43 #define CS 4
44
45 /** @file brw_urb.c
46 *
47 * Manages the division of the URB space between the various fixed-function
48 * units.
49 *
50 * See the Thread Initiation Management section of the GEN4 B-Spec, and
51 * the individual *_STATE structures for restrictions on numbers of
52 * entries and threads.
53 */
54
55 /*
56 * Generally, a unit requires a min_nr_entries based on how many entries
57 * it produces before the downstream unit gets unblocked and can use and
58 * dereference some of its handles.
59 *
60 * The SF unit preallocates a PUE at the start of thread dispatch, and only
61 * uses that one. So it requires one entry per thread.
62 *
63 * For CLIP, the SF unit will hold the previous primitive while the
64 * next is getting assembled, meaning that linestrips require 3 CLIP VUEs
65 * (vertices) to ensure continued processing, trifans require 4, and tristrips
66 * require 5. There can be 1 or 2 threads, and each has the same requirement.
67 *
68 * GS has the same requirement as CLIP, but it never handles tristrips,
69 * so we can lower the minimum to 4 for the POLYGONs (trifans) it produces.
70 * We only run it single-threaded.
71 *
72 * For VS, the number of entries may be 8, 12, 16, or 32 (or 64 on G4X).
73 * Each thread processes 2 preallocated VUEs (vertices) at a time, and they
74 * get streamed down as soon as threads processing earlier vertices get
75 * theirs accepted.
76 *
77 * Each unit will take the number of URB entries we give it (based on the
78 * entry size calculated in brw_vs_emit.c for VUEs, brw_sf_emit.c for PUEs,
79 * and brw_curbe.c for the CURBEs) and decide its maximum number of
80 * threads it can support based on that. in brw_*_state.c.
81 *
82 * XXX: Are the min_entry_size numbers useful?
83 * XXX: Verify min_nr_entries, esp for VS.
84 * XXX: Verify SF min_entry_size.
85 */
86 static const struct {
87 GLuint min_nr_entries;
88 GLuint preferred_nr_entries;
89 GLuint min_entry_size;
90 GLuint max_entry_size;
91 } limits[CS+1] = {
92 { 16, 32, 1, 5 }, /* vs */
93 { 4, 8, 1, 5 }, /* gs */
94 { 5, 10, 1, 5 }, /* clp */
95 { 1, 8, 1, 12 }, /* sf */
96 { 1, 4, 1, 32 } /* cs */
97 };
98
99
check_urb_layout(struct brw_context * brw)100 static bool check_urb_layout(struct brw_context *brw)
101 {
102 brw->urb.vs_start = 0;
103 brw->urb.gs_start = brw->urb.nr_vs_entries * brw->urb.vsize;
104 brw->urb.clip_start = brw->urb.gs_start + brw->urb.nr_gs_entries * brw->urb.vsize;
105 brw->urb.sf_start = brw->urb.clip_start + brw->urb.nr_clip_entries * brw->urb.vsize;
106 brw->urb.cs_start = brw->urb.sf_start + brw->urb.nr_sf_entries * brw->urb.sfsize;
107
108 return brw->urb.cs_start + brw->urb.nr_cs_entries *
109 brw->urb.csize <= brw->urb.size;
110 }
111
112 /* Most minimal update, forces re-emit of URB fence packet after GS
113 * unit turned on/off.
114 */
recalculate_urb_fence(struct brw_context * brw)115 static void recalculate_urb_fence( struct brw_context *brw )
116 {
117 struct intel_context *intel = &brw->intel;
118 GLuint csize = brw->curbe.total_size;
119 GLuint vsize = brw->vs.prog_data->urb_entry_size;
120 GLuint sfsize = brw->sf.prog_data->urb_entry_size;
121
122 if (csize < limits[CS].min_entry_size)
123 csize = limits[CS].min_entry_size;
124
125 if (vsize < limits[VS].min_entry_size)
126 vsize = limits[VS].min_entry_size;
127
128 if (sfsize < limits[SF].min_entry_size)
129 sfsize = limits[SF].min_entry_size;
130
131 if (brw->urb.vsize < vsize ||
132 brw->urb.sfsize < sfsize ||
133 brw->urb.csize < csize ||
134 (brw->urb.constrained && (brw->urb.vsize > vsize ||
135 brw->urb.sfsize > sfsize ||
136 brw->urb.csize > csize))) {
137
138
139 brw->urb.csize = csize;
140 brw->urb.sfsize = sfsize;
141 brw->urb.vsize = vsize;
142
143 brw->urb.nr_vs_entries = limits[VS].preferred_nr_entries;
144 brw->urb.nr_gs_entries = limits[GS].preferred_nr_entries;
145 brw->urb.nr_clip_entries = limits[CLP].preferred_nr_entries;
146 brw->urb.nr_sf_entries = limits[SF].preferred_nr_entries;
147 brw->urb.nr_cs_entries = limits[CS].preferred_nr_entries;
148
149 brw->urb.constrained = 0;
150
151 if (intel->gen == 5) {
152 brw->urb.nr_vs_entries = 128;
153 brw->urb.nr_sf_entries = 48;
154 if (check_urb_layout(brw)) {
155 goto done;
156 } else {
157 brw->urb.constrained = 1;
158 brw->urb.nr_vs_entries = limits[VS].preferred_nr_entries;
159 brw->urb.nr_sf_entries = limits[SF].preferred_nr_entries;
160 }
161 } else if (intel->is_g4x) {
162 brw->urb.nr_vs_entries = 64;
163 if (check_urb_layout(brw)) {
164 goto done;
165 } else {
166 brw->urb.constrained = 1;
167 brw->urb.nr_vs_entries = limits[VS].preferred_nr_entries;
168 }
169 }
170
171 if (!check_urb_layout(brw)) {
172 brw->urb.nr_vs_entries = limits[VS].min_nr_entries;
173 brw->urb.nr_gs_entries = limits[GS].min_nr_entries;
174 brw->urb.nr_clip_entries = limits[CLP].min_nr_entries;
175 brw->urb.nr_sf_entries = limits[SF].min_nr_entries;
176 brw->urb.nr_cs_entries = limits[CS].min_nr_entries;
177
178 /* Mark us as operating with constrained nr_entries, so that next
179 * time we recalculate we'll resize the fences in the hope of
180 * escaping constrained mode and getting back to normal performance.
181 */
182 brw->urb.constrained = 1;
183
184 if (!check_urb_layout(brw)) {
185 /* This is impossible, given the maximal sizes of urb
186 * entries and the values for minimum nr of entries
187 * provided above.
188 */
189 printf("couldn't calculate URB layout!\n");
190 exit(1);
191 }
192
193 if (unlikely(INTEL_DEBUG & (DEBUG_URB|DEBUG_PERF)))
194 printf("URB CONSTRAINED\n");
195 }
196
197 done:
198 if (unlikely(INTEL_DEBUG & DEBUG_URB))
199 printf("URB fence: %d ..VS.. %d ..GS.. %d ..CLP.. %d ..SF.. %d ..CS.. %d\n",
200 brw->urb.vs_start,
201 brw->urb.gs_start,
202 brw->urb.clip_start,
203 brw->urb.sf_start,
204 brw->urb.cs_start,
205 brw->urb.size);
206
207 brw->state.dirty.brw |= BRW_NEW_URB_FENCE;
208 }
209 }
210
211
212 const struct brw_tracked_state brw_recalculate_urb_fence = {
213 .dirty = {
214 .mesa = 0,
215 .brw = BRW_NEW_CURBE_OFFSETS,
216 .cache = (CACHE_NEW_VS_PROG |
217 CACHE_NEW_SF_PROG)
218 },
219 .emit = recalculate_urb_fence
220 };
221
222
223
224
225
brw_upload_urb_fence(struct brw_context * brw)226 void brw_upload_urb_fence(struct brw_context *brw)
227 {
228 struct brw_urb_fence uf;
229 memset(&uf, 0, sizeof(uf));
230
231 uf.header.opcode = CMD_URB_FENCE;
232 uf.header.length = sizeof(uf)/4-2;
233 uf.header.vs_realloc = 1;
234 uf.header.gs_realloc = 1;
235 uf.header.clp_realloc = 1;
236 uf.header.sf_realloc = 1;
237 uf.header.vfe_realloc = 1;
238 uf.header.cs_realloc = 1;
239
240 /* The ordering below is correct, not the layout in the
241 * instruction.
242 *
243 * There are 256/384 urb reg pairs in total.
244 */
245 uf.bits0.vs_fence = brw->urb.gs_start;
246 uf.bits0.gs_fence = brw->urb.clip_start;
247 uf.bits0.clp_fence = brw->urb.sf_start;
248 uf.bits1.sf_fence = brw->urb.cs_start;
249 uf.bits1.cs_fence = brw->urb.size;
250
251 /* erratum: URB_FENCE must not cross a 64byte cacheline */
252 if ((brw->intel.batch.used & 15) > 12) {
253 int pad = 16 - (brw->intel.batch.used & 15);
254 do
255 brw->intel.batch.map[brw->intel.batch.used++] = MI_NOOP;
256 while (--pad);
257 }
258
259 BRW_BATCH_STRUCT(brw, &uf);
260 }
261