1 /*
2  Copyright (C) Intel Corp.  2006.  All Rights Reserved.
3  Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4  develop this 3D driver.
5 
6  Permission is hereby granted, free of charge, to any person obtaining
7  a copy of this software and associated documentation files (the
8  "Software"), to deal in the Software without restriction, including
9  without limitation the rights to use, copy, modify, merge, publish,
10  distribute, sublicense, and/or sell copies of the Software, and to
11  permit persons to whom the Software is furnished to do so, subject to
12  the following conditions:
13 
14  The above copyright notice and this permission notice (including the
15  next paragraph) shall be included in all copies or substantial
16  portions of the Software.
17 
18  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21  IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 
26  **********************************************************************/
27  /*
28   * Authors:
29   *   Keith Whitwell <keith@tungstengraphics.com>
30   */
31 
32 
33 
34 #include "intel_fbo.h"
35 #include "brw_context.h"
36 #include "brw_state.h"
37 #include "brw_defines.h"
38 #include "brw_wm.h"
39 
40 /***********************************************************************
41  * WM unit - fragment programs and rasterization
42  */
43 
44 bool
brw_color_buffer_write_enabled(struct brw_context * brw)45 brw_color_buffer_write_enabled(struct brw_context *brw)
46 {
47    struct gl_context *ctx = &brw->intel.ctx;
48    const struct gl_fragment_program *fp = brw->fragment_program;
49    int i;
50 
51    /* _NEW_BUFFERS */
52    for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
53       struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[i];
54 
55       /* _NEW_COLOR */
56       if (rb &&
57 	  (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_COLOR) ||
58 	   fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DATA0 + i)) &&
59 	  (ctx->Color.ColorMask[i][0] ||
60 	   ctx->Color.ColorMask[i][1] ||
61 	   ctx->Color.ColorMask[i][2] ||
62 	   ctx->Color.ColorMask[i][3])) {
63 	 return true;
64       }
65    }
66 
67    return false;
68 }
69 
70 /**
71  * Setup wm hardware state.  See page 225 of Volume 2
72  */
73 static void
brw_upload_wm_unit(struct brw_context * brw)74 brw_upload_wm_unit(struct brw_context *brw)
75 {
76    struct intel_context *intel = &brw->intel;
77    struct gl_context *ctx = &intel->ctx;
78    const struct gl_fragment_program *fp = brw->fragment_program;
79    struct brw_wm_unit_state *wm;
80 
81    wm = brw_state_batch(brw, AUB_TRACE_WM_STATE,
82 			sizeof(*wm), 32, &brw->wm.state_offset);
83    memset(wm, 0, sizeof(*wm));
84 
85    if (brw->wm.prog_data->prog_offset_16) {
86       /* These two fields should be the same pre-gen6, which is why we
87        * only have one hardware field to program for both dispatch
88        * widths.
89        */
90       assert(brw->wm.prog_data->first_curbe_grf ==
91 	     brw->wm.prog_data->first_curbe_grf_16);
92    }
93 
94    /* BRW_NEW_PROGRAM_CACHE | CACHE_NEW_WM_PROG */
95    wm->thread0.grf_reg_count = brw->wm.prog_data->reg_blocks;
96    wm->wm9.grf_reg_count_2 = brw->wm.prog_data->reg_blocks_16;
97 
98    wm->thread0.kernel_start_pointer =
99       brw_program_reloc(brw,
100 			brw->wm.state_offset +
101 			offsetof(struct brw_wm_unit_state, thread0),
102 			brw->wm.prog_offset +
103 			(wm->thread0.grf_reg_count << 1)) >> 6;
104 
105    wm->wm9.kernel_start_pointer_2 =
106       brw_program_reloc(brw,
107 			brw->wm.state_offset +
108 			offsetof(struct brw_wm_unit_state, wm9),
109 			brw->wm.prog_offset +
110 			brw->wm.prog_data->prog_offset_16 +
111 			(wm->wm9.grf_reg_count_2 << 1)) >> 6;
112 
113    wm->thread1.depth_coef_urb_read_offset = 1;
114    wm->thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754;
115 
116    wm->thread1.binding_table_entry_count = 0;
117 
118    if (brw->wm.prog_data->total_scratch != 0) {
119       wm->thread2.scratch_space_base_pointer =
120 	 brw->wm.scratch_bo->offset >> 10; /* reloc */
121       wm->thread2.per_thread_scratch_space =
122 	 ffs(brw->wm.prog_data->total_scratch) - 11;
123    } else {
124       wm->thread2.scratch_space_base_pointer = 0;
125       wm->thread2.per_thread_scratch_space = 0;
126    }
127 
128    wm->thread3.dispatch_grf_start_reg = brw->wm.prog_data->first_curbe_grf;
129    wm->thread3.urb_entry_read_length = brw->wm.prog_data->urb_read_length;
130    wm->thread3.urb_entry_read_offset = 0;
131    wm->thread3.const_urb_entry_read_length =
132       brw->wm.prog_data->curb_read_length;
133    /* BRW_NEW_CURBE_OFFSETS */
134    wm->thread3.const_urb_entry_read_offset = brw->curbe.wm_start * 2;
135 
136    if (intel->gen == 5)
137       wm->wm4.sampler_count = 0; /* hardware requirement */
138    else {
139       /* CACHE_NEW_SAMPLER */
140       wm->wm4.sampler_count = (brw->sampler.count + 1) / 4;
141    }
142 
143    if (brw->sampler.count) {
144       /* reloc */
145       wm->wm4.sampler_state_pointer = (intel->batch.bo->offset +
146 				       brw->sampler.offset) >> 5;
147    } else {
148       wm->wm4.sampler_state_pointer = 0;
149    }
150 
151    /* BRW_NEW_FRAGMENT_PROGRAM */
152    wm->wm5.program_uses_depth = (fp->Base.InputsRead &
153 				 (1 << FRAG_ATTRIB_WPOS)) != 0;
154    wm->wm5.program_computes_depth = (fp->Base.OutputsWritten &
155 				     BITFIELD64_BIT(FRAG_RESULT_DEPTH)) != 0;
156    /* _NEW_BUFFERS
157     * Override for NULL depthbuffer case, required by the Pixel Shader Computed
158     * Depth field.
159     */
160    if (!intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH))
161       wm->wm5.program_computes_depth = 0;
162 
163    /* _NEW_COLOR */
164    wm->wm5.program_uses_killpixel = fp->UsesKill || ctx->Color.AlphaEnabled;
165 
166 
167    /* BRW_NEW_FRAGMENT_PROGRAM
168     *
169     * If using the fragment shader backend, the program is always
170     * 8-wide.  If not, it's always 16.
171     */
172    if (ctx->Shader._CurrentFragmentProgram) {
173       struct brw_shader *shader = (struct brw_shader *)
174 	 ctx->Shader._CurrentFragmentProgram->_LinkedShaders[MESA_SHADER_FRAGMENT];
175 
176       if (shader != NULL && shader->ir != NULL) {
177 	 wm->wm5.enable_8_pix = 1;
178 	 if (brw->wm.prog_data->prog_offset_16)
179 	    wm->wm5.enable_16_pix = 1;
180       }
181    }
182    if (!wm->wm5.enable_8_pix)
183       wm->wm5.enable_16_pix = 1;
184 
185    wm->wm5.max_threads = brw->max_wm_threads - 1;
186 
187    /* _NEW_BUFFERS | _NEW_COLOR */
188    if (brw_color_buffer_write_enabled(brw) ||
189        wm->wm5.program_uses_killpixel ||
190        wm->wm5.program_computes_depth) {
191       wm->wm5.thread_dispatch_enable = 1;
192    }
193 
194    wm->wm5.legacy_line_rast = 0;
195    wm->wm5.legacy_global_depth_bias = 0;
196    wm->wm5.early_depth_test = 1;	        /* never need to disable */
197    wm->wm5.line_aa_region_width = 0;
198    wm->wm5.line_endcap_aa_region_width = 1;
199 
200    /* _NEW_POLYGONSTIPPLE */
201    wm->wm5.polygon_stipple = ctx->Polygon.StippleFlag;
202 
203    /* _NEW_POLYGON */
204    if (ctx->Polygon.OffsetFill) {
205       wm->wm5.depth_offset = 1;
206       /* Something wierd going on with legacy_global_depth_bias,
207        * offset_constant, scaling and MRD.  This value passes glean
208        * but gives some odd results elsewere (eg. the
209        * quad-offset-units test).
210        */
211       wm->global_depth_offset_constant = ctx->Polygon.OffsetUnits * 2;
212 
213       /* This is the only value that passes glean:
214        */
215       wm->global_depth_offset_scale = ctx->Polygon.OffsetFactor;
216    }
217 
218    /* _NEW_LINE */
219    wm->wm5.line_stipple = ctx->Line.StippleFlag;
220 
221    /* _NEW_DEPTH */
222    if (unlikely(INTEL_DEBUG & DEBUG_STATS) || intel->stats_wm)
223       wm->wm4.stats_enable = 1;
224 
225    /* Emit scratch space relocation */
226    if (brw->wm.prog_data->total_scratch != 0) {
227       drm_intel_bo_emit_reloc(intel->batch.bo,
228 			      brw->wm.state_offset +
229 			      offsetof(struct brw_wm_unit_state, thread2),
230 			      brw->wm.scratch_bo,
231 			      wm->thread2.per_thread_scratch_space,
232 			      I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
233    }
234 
235    /* Emit sampler state relocation */
236    if (brw->sampler.count != 0) {
237       drm_intel_bo_emit_reloc(intel->batch.bo,
238 			      brw->wm.state_offset +
239 			      offsetof(struct brw_wm_unit_state, wm4),
240 			      intel->batch.bo, (brw->sampler.offset |
241 						wm->wm4.stats_enable |
242 						(wm->wm4.sampler_count << 2)),
243 			      I915_GEM_DOMAIN_INSTRUCTION, 0);
244    }
245 
246    brw->state.dirty.cache |= CACHE_NEW_WM_UNIT;
247 }
248 
249 const struct brw_tracked_state brw_wm_unit = {
250    .dirty = {
251       .mesa = (_NEW_POLYGON |
252 	       _NEW_POLYGONSTIPPLE |
253 	       _NEW_LINE |
254 	       _NEW_COLOR |
255 	       _NEW_DEPTH |
256 	       _NEW_BUFFERS),
257 
258       .brw = (BRW_NEW_BATCH |
259 	      BRW_NEW_PROGRAM_CACHE |
260 	      BRW_NEW_FRAGMENT_PROGRAM |
261 	      BRW_NEW_CURBE_OFFSETS),
262 
263       .cache = (CACHE_NEW_WM_PROG |
264 		CACHE_NEW_SAMPLER)
265    },
266    .emit = brw_upload_wm_unit,
267 };
268 
269