1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Mika Kuoppala <mika.kuoppala@intel.com>
25  */
26 
27 #include "intel_renderstate.h"
28 #include <lib/gen6_render.h>
29 #include <lib/intel_reg.h>
30 #include <string.h>
31 
32 static const uint32_t ps_kernel_nomask_affine[][4] = {
33 	{ 0x0060005a, 0x204077be, 0x000000c0, 0x008d0040 },
34 	{ 0x0060005a, 0x206077be, 0x000000c0, 0x008d0080 },
35 	{ 0x0060005a, 0x208077be, 0x000000d0, 0x008d0040 },
36 	{ 0x0060005a, 0x20a077be, 0x000000d0, 0x008d0080 },
37 	{ 0x00000201, 0x20080061, 0x00000000, 0x00000000 },
38 	{ 0x00600001, 0x20200022, 0x008d0000, 0x00000000 },
39 	{ 0x02800031, 0x21c01cc9, 0x00000020, 0x0a8a0001 },
40 	{ 0x00600001, 0x204003be, 0x008d01c0, 0x00000000 },
41 	{ 0x00600001, 0x206003be, 0x008d01e0, 0x00000000 },
42 	{ 0x00600001, 0x208003be, 0x008d0200, 0x00000000 },
43 	{ 0x00600001, 0x20a003be, 0x008d0220, 0x00000000 },
44 	{ 0x00600001, 0x20c003be, 0x008d0240, 0x00000000 },
45 	{ 0x00600001, 0x20e003be, 0x008d0260, 0x00000000 },
46 	{ 0x00600001, 0x210003be, 0x008d0280, 0x00000000 },
47 	{ 0x00600001, 0x212003be, 0x008d02a0, 0x00000000 },
48 	{ 0x05800031, 0x24001cc8, 0x00000040, 0x90019000 },
49 	{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
50 	{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
51 	{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
52 	{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
53 	{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
54 	{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
55 	{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
56 	{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
57 };
58 
59 static uint32_t
gen6_bind_buf_null(struct intel_batchbuffer * batch)60 gen6_bind_buf_null(struct intel_batchbuffer *batch)
61 {
62 	struct gen6_surface_state ss;
63 	memset(&ss, 0, sizeof(ss));
64 
65 	return OUT_STATE_STRUCT(ss, 32);
66 }
67 
68 static uint32_t
gen6_bind_surfaces(struct intel_batchbuffer * batch)69 gen6_bind_surfaces(struct intel_batchbuffer *batch)
70 {
71 	unsigned offset;
72 
73 	offset = intel_batch_state_alloc(batch, 32, 32, "bind surfaces");
74 
75 	bb_area_emit_offset(batch->state, offset, gen6_bind_buf_null(batch), STATE_OFFSET, "bind 1");
76 	bb_area_emit_offset(batch->state, offset + 4, gen6_bind_buf_null(batch), STATE_OFFSET, "bind 2");
77 
78 	return offset;
79 }
80 
81 static void
gen6_emit_sip(struct intel_batchbuffer * batch)82 gen6_emit_sip(struct intel_batchbuffer *batch)
83 {
84 	OUT_BATCH(GEN4_STATE_SIP | 0);
85 	OUT_BATCH(0);
86 }
87 
88 static void
gen6_emit_urb(struct intel_batchbuffer * batch)89 gen6_emit_urb(struct intel_batchbuffer *batch)
90 {
91 	OUT_BATCH(GEN6_3DSTATE_URB | (3 - 2));
92 	OUT_BATCH((1 - 1) << GEN6_3DSTATE_URB_VS_SIZE_SHIFT |
93 		  24 << GEN6_3DSTATE_URB_VS_ENTRIES_SHIFT); /* at least 24 on GEN6 */
94 	OUT_BATCH(0 << GEN6_3DSTATE_URB_GS_SIZE_SHIFT |
95 		  0 << GEN6_3DSTATE_URB_GS_ENTRIES_SHIFT); /* no GS thread */
96 }
97 
98 static void
gen6_emit_state_base_address(struct intel_batchbuffer * batch)99 gen6_emit_state_base_address(struct intel_batchbuffer *batch)
100 {
101 	OUT_BATCH(GEN4_STATE_BASE_ADDRESS | (10 - 2));
102 	OUT_BATCH(0); /* general */
103 	OUT_RELOC(batch,
104 		  I915_GEM_DOMAIN_INSTRUCTION, 0,
105 		  BASE_ADDRESS_MODIFY);
106 	OUT_RELOC(batch, /* instruction */
107 		  I915_GEM_DOMAIN_INSTRUCTION, 0,
108 		  BASE_ADDRESS_MODIFY);
109 	OUT_BATCH(0); /* indirect */
110 	OUT_RELOC(batch,
111 		  I915_GEM_DOMAIN_INSTRUCTION, 0,
112 		  BASE_ADDRESS_MODIFY);
113 
114 	/* upper bounds, disable */
115 	OUT_BATCH(0);
116 	OUT_BATCH(BASE_ADDRESS_MODIFY);
117 	OUT_BATCH(0);
118 	OUT_BATCH(BASE_ADDRESS_MODIFY);
119 }
120 
121 static void
gen6_emit_viewports(struct intel_batchbuffer * batch,uint32_t cc_vp)122 gen6_emit_viewports(struct intel_batchbuffer *batch, uint32_t cc_vp)
123 {
124 	OUT_BATCH(GEN6_3DSTATE_VIEWPORT_STATE_POINTERS |
125 		  GEN6_3DSTATE_VIEWPORT_STATE_MODIFY_CC |
126 		  (4 - 2));
127 	OUT_BATCH(0);
128 	OUT_BATCH(0);
129 	OUT_BATCH_STATE_OFFSET(cc_vp);
130 }
131 
132 static void
gen6_emit_vs(struct intel_batchbuffer * batch)133 gen6_emit_vs(struct intel_batchbuffer *batch)
134 {
135 	/* disable VS constant buffer */
136 	OUT_BATCH(GEN6_3DSTATE_CONSTANT_VS | (5 - 2));
137 	OUT_BATCH(0);
138 	OUT_BATCH(0);
139 	OUT_BATCH(0);
140 	OUT_BATCH(0);
141 
142 	OUT_BATCH(GEN6_3DSTATE_VS | (6 - 2));
143 	OUT_BATCH(0); /* no VS kernel */
144 	OUT_BATCH(0);
145 	OUT_BATCH(0);
146 	OUT_BATCH(0);
147 	OUT_BATCH(0); /* pass-through */
148 }
149 
150 static void
gen6_emit_gs(struct intel_batchbuffer * batch)151 gen6_emit_gs(struct intel_batchbuffer *batch)
152 {
153 	/* disable GS constant buffer */
154 	OUT_BATCH(GEN6_3DSTATE_CONSTANT_GS | (5 - 2));
155 	OUT_BATCH(0);
156 	OUT_BATCH(0);
157 	OUT_BATCH(0);
158 	OUT_BATCH(0);
159 
160 	OUT_BATCH(GEN6_3DSTATE_GS | (7 - 2));
161 	OUT_BATCH(0); /* no GS kernel */
162 	OUT_BATCH(0);
163 	OUT_BATCH(0);
164 	OUT_BATCH(0);
165 	OUT_BATCH(0);
166 	OUT_BATCH(0); /* pass-through */
167 }
168 
169 static void
gen6_emit_clip(struct intel_batchbuffer * batch)170 gen6_emit_clip(struct intel_batchbuffer *batch)
171 {
172 	OUT_BATCH(GEN6_3DSTATE_CLIP | (4 - 2));
173 	OUT_BATCH(0);
174 	OUT_BATCH(0); /* pass-through */
175 	OUT_BATCH(0);
176 }
177 
178 static void
gen6_emit_wm_constants(struct intel_batchbuffer * batch)179 gen6_emit_wm_constants(struct intel_batchbuffer *batch)
180 {
181 	/* disable WM constant buffer */
182 	OUT_BATCH(GEN6_3DSTATE_CONSTANT_PS | (5 - 2));
183 	OUT_BATCH(0);
184 	OUT_BATCH(0);
185 	OUT_BATCH(0);
186 	OUT_BATCH(0);
187 }
188 
189 static void
gen6_emit_null_depth_buffer(struct intel_batchbuffer * batch)190 gen6_emit_null_depth_buffer(struct intel_batchbuffer *batch)
191 {
192 	OUT_BATCH(GEN4_3DSTATE_DEPTH_BUFFER | (7 - 2));
193 	OUT_BATCH(SURFACE_NULL << GEN4_3DSTATE_DEPTH_BUFFER_TYPE_SHIFT |
194 		  GEN4_DEPTHFORMAT_D32_FLOAT << GEN4_3DSTATE_DEPTH_BUFFER_FORMAT_SHIFT);
195 	OUT_BATCH(0);
196 	OUT_BATCH(0);
197 	OUT_BATCH(0);
198 	OUT_BATCH(0);
199 	OUT_BATCH(0);
200 
201 	OUT_BATCH(GEN4_3DSTATE_CLEAR_PARAMS | (2 - 2));
202 	OUT_BATCH(0);
203 }
204 
205 static void
gen6_emit_invariant(struct intel_batchbuffer * batch)206 gen6_emit_invariant(struct intel_batchbuffer *batch)
207 {
208 	OUT_BATCH(G4X_PIPELINE_SELECT | PIPELINE_SELECT_3D);
209 
210 	OUT_BATCH(GEN6_3DSTATE_MULTISAMPLE | (3 - 2));
211 	OUT_BATCH(GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
212 		  GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
213 	OUT_BATCH(0);
214 
215 	OUT_BATCH(GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
216 	OUT_BATCH(1);
217 }
218 
219 static void
gen6_emit_cc(struct intel_batchbuffer * batch,uint32_t blend)220 gen6_emit_cc(struct intel_batchbuffer *batch, uint32_t blend)
221 {
222 	OUT_BATCH(GEN6_3DSTATE_CC_STATE_POINTERS | (4 - 2));
223 	OUT_BATCH_STATE_OFFSET(blend | 1);
224 	OUT_BATCH(1024 | 1);
225 	OUT_BATCH(1024 | 1);
226 }
227 
228 static void
gen6_emit_sampler(struct intel_batchbuffer * batch,uint32_t state)229 gen6_emit_sampler(struct intel_batchbuffer *batch, uint32_t state)
230 {
231 	OUT_BATCH(GEN6_3DSTATE_SAMPLER_STATE_POINTERS |
232 		  GEN6_3DSTATE_SAMPLER_STATE_MODIFY_PS |
233 		  (4 - 2));
234 	OUT_BATCH(0); /* VS */
235 	OUT_BATCH(0); /* GS */
236 	OUT_BATCH_STATE_OFFSET(state);
237 }
238 
239 static void
gen6_emit_sf(struct intel_batchbuffer * batch)240 gen6_emit_sf(struct intel_batchbuffer *batch)
241 {
242 	OUT_BATCH(GEN6_3DSTATE_SF | (20 - 2));
243 	OUT_BATCH(1 << GEN6_3DSTATE_SF_NUM_OUTPUTS_SHIFT |
244 		  1 << GEN6_3DSTATE_SF_URB_ENTRY_READ_LENGTH_SHIFT |
245 		  1 << GEN6_3DSTATE_SF_URB_ENTRY_READ_OFFSET_SHIFT);
246 	OUT_BATCH(0);
247 	OUT_BATCH(GEN6_3DSTATE_SF_CULL_NONE);
248 	OUT_BATCH(2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT); /* DW4 */
249 	OUT_BATCH(0);
250 	OUT_BATCH(0);
251 	OUT_BATCH(0);
252 	OUT_BATCH(0);
253 	OUT_BATCH(0); /* DW9 */
254 	OUT_BATCH(0);
255 	OUT_BATCH(0);
256 	OUT_BATCH(0);
257 	OUT_BATCH(0);
258 	OUT_BATCH(0); /* DW14 */
259 	OUT_BATCH(0);
260 	OUT_BATCH(0);
261 	OUT_BATCH(0);
262 	OUT_BATCH(0);
263 	OUT_BATCH(0); /* DW19 */
264 }
265 
266 static void
gen6_emit_wm(struct intel_batchbuffer * batch,int kernel)267 gen6_emit_wm(struct intel_batchbuffer *batch, int kernel)
268 {
269 	OUT_BATCH(GEN6_3DSTATE_WM | (9 - 2));
270 	OUT_BATCH_STATE_OFFSET(kernel);
271 	OUT_BATCH(1 << GEN6_3DSTATE_WM_SAMPLER_COUNT_SHIFT |
272 		  2 << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT);
273 	OUT_BATCH(0);
274 	OUT_BATCH(6 << GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT); /* DW4 */
275 	OUT_BATCH((40 - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT |
276 		  GEN6_3DSTATE_WM_DISPATCH_ENABLE |
277 		  GEN6_3DSTATE_WM_16_DISPATCH_ENABLE);
278 	OUT_BATCH(1 << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT |
279 		  GEN6_3DSTATE_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
280 	OUT_BATCH(0);
281 	OUT_BATCH(0);
282 }
283 
284 static void
gen6_emit_binding_table(struct intel_batchbuffer * batch,uint32_t wm_table)285 gen6_emit_binding_table(struct intel_batchbuffer *batch, uint32_t wm_table)
286 {
287 	OUT_BATCH(GEN4_3DSTATE_BINDING_TABLE_POINTERS |
288 		  GEN6_3DSTATE_BINDING_TABLE_MODIFY_PS |
289 		  (4 - 2));
290 	OUT_BATCH(0);		/* vs */
291 	OUT_BATCH(0);		/* gs */
292 	OUT_BATCH_STATE_OFFSET(wm_table);
293 }
294 
295 static void
gen6_emit_drawing_rectangle(struct intel_batchbuffer * batch)296 gen6_emit_drawing_rectangle(struct intel_batchbuffer *batch)
297 {
298 	OUT_BATCH(GEN4_3DSTATE_DRAWING_RECTANGLE | (4 - 2));
299 	OUT_BATCH(0xffffffff);
300 	OUT_BATCH(0 | 0);
301 	OUT_BATCH(0);
302 }
303 
304 static void
gen6_emit_vertex_elements(struct intel_batchbuffer * batch)305 gen6_emit_vertex_elements(struct intel_batchbuffer *batch)
306 {
307 	/* The VUE layout
308 	 *    dword 0-3: pad (0.0, 0.0, 0.0. 0.0)
309 	 *    dword 4-7: position (x, y, 1.0, 1.0),
310 	 *    dword 8-11: texture coordinate 0 (u0, v0, 0, 0)
311 	 *
312 	 * dword 4-11 are fetched from vertex buffer
313 	 */
314 	OUT_BATCH(GEN4_3DSTATE_VERTEX_ELEMENTS | (2 * 3 + 1 - 2));
315 
316 	OUT_BATCH(0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN6_VE0_VALID |
317 		  SURFACEFORMAT_R32G32B32A32_FLOAT << VE0_FORMAT_SHIFT |
318 		  0 << VE0_OFFSET_SHIFT);
319 	OUT_BATCH(GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_0_SHIFT |
320 		  GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT |
321 		  GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
322 		  GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_3_SHIFT);
323 
324 	/* x,y */
325 	OUT_BATCH(0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN6_VE0_VALID |
326 		  SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
327 		  0 << VE0_OFFSET_SHIFT); /* offsets vb in bytes */
328 	OUT_BATCH(GEN4_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
329 		  GEN4_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
330 		  GEN4_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT |
331 		  GEN4_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
332 
333 	/* u0, v0 */
334 	OUT_BATCH(0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN6_VE0_VALID |
335 		  SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT |
336 		  4 << VE0_OFFSET_SHIFT);	/* offset vb in bytes */
337 	OUT_BATCH(GEN4_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
338 		  GEN4_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
339 		  GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
340 		  GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_3_SHIFT);
341 }
342 
343 static uint32_t
gen6_create_cc_viewport(struct intel_batchbuffer * batch)344 gen6_create_cc_viewport(struct intel_batchbuffer *batch)
345 {
346 	struct gen4_cc_viewport vp;
347 
348 	memset(&vp, 0, sizeof(vp));
349 
350 	vp.min_depth = -1.e35;
351 	vp.max_depth = 1.e35;
352 
353 	return OUT_STATE_STRUCT(vp, 32);
354 }
355 
356 static uint32_t
gen6_create_cc_blend(struct intel_batchbuffer * batch)357 gen6_create_cc_blend(struct intel_batchbuffer *batch)
358 {
359 	struct gen6_blend_state blend;
360 
361 	memset(&blend, 0, sizeof(blend));
362 
363 	blend.blend0.dest_blend_factor = GEN6_BLENDFACTOR_ZERO;
364 	blend.blend0.source_blend_factor = GEN6_BLENDFACTOR_ONE;
365 	blend.blend0.blend_func = GEN6_BLENDFUNCTION_ADD;
366 	blend.blend0.blend_enable = 1;
367 
368 	blend.blend1.post_blend_clamp_enable = 1;
369 	blend.blend1.pre_blend_clamp_enable = 1;
370 
371 	return OUT_STATE_STRUCT(blend, 64);
372 }
373 
374 static uint32_t
gen6_create_kernel(struct intel_batchbuffer * batch)375 gen6_create_kernel(struct intel_batchbuffer *batch)
376 {
377 	return intel_batch_state_copy(batch, ps_kernel_nomask_affine,
378 				      sizeof(ps_kernel_nomask_affine),
379 				      64, "ps_kernel");
380 }
381 
382 static uint32_t
gen6_create_sampler(struct intel_batchbuffer * batch,sampler_filter_t filter,sampler_extend_t extend)383 gen6_create_sampler(struct intel_batchbuffer *batch,
384 		    sampler_filter_t filter,
385 		   sampler_extend_t extend)
386 {
387 	struct gen6_sampler_state ss;
388 
389 	memset(&ss, 0, sizeof(ss));
390 
391 	ss.ss0.lod_preclamp = 1;	/* GL mode */
392 
393 	/* We use the legacy mode to get the semantics specified by
394 	 * the Render extension. */
395 	ss.ss0.border_color_mode = GEN4_BORDER_COLOR_MODE_LEGACY;
396 
397 	switch (filter) {
398 	default:
399 	case SAMPLER_FILTER_NEAREST:
400 		ss.ss0.min_filter = GEN4_MAPFILTER_NEAREST;
401 		ss.ss0.mag_filter = GEN4_MAPFILTER_NEAREST;
402 		break;
403 	case SAMPLER_FILTER_BILINEAR:
404 		ss.ss0.min_filter = GEN4_MAPFILTER_LINEAR;
405 		ss.ss0.mag_filter = GEN4_MAPFILTER_LINEAR;
406 		break;
407 	}
408 
409 	switch (extend) {
410 	default:
411 	case SAMPLER_EXTEND_NONE:
412 		ss.ss1.r_wrap_mode = GEN4_TEXCOORDMODE_CLAMP_BORDER;
413 		ss.ss1.s_wrap_mode = GEN4_TEXCOORDMODE_CLAMP_BORDER;
414 		ss.ss1.t_wrap_mode = GEN4_TEXCOORDMODE_CLAMP_BORDER;
415 		break;
416 	case SAMPLER_EXTEND_REPEAT:
417 		ss.ss1.r_wrap_mode = GEN4_TEXCOORDMODE_WRAP;
418 		ss.ss1.s_wrap_mode = GEN4_TEXCOORDMODE_WRAP;
419 		ss.ss1.t_wrap_mode = GEN4_TEXCOORDMODE_WRAP;
420 		break;
421 	case SAMPLER_EXTEND_PAD:
422 		ss.ss1.r_wrap_mode = GEN4_TEXCOORDMODE_CLAMP;
423 		ss.ss1.s_wrap_mode = GEN4_TEXCOORDMODE_CLAMP;
424 		ss.ss1.t_wrap_mode = GEN4_TEXCOORDMODE_CLAMP;
425 		break;
426 	case SAMPLER_EXTEND_REFLECT:
427 		ss.ss1.r_wrap_mode = GEN4_TEXCOORDMODE_MIRROR;
428 		ss.ss1.s_wrap_mode = GEN4_TEXCOORDMODE_MIRROR;
429 		ss.ss1.t_wrap_mode = GEN4_TEXCOORDMODE_MIRROR;
430 		break;
431 	}
432 
433 	return OUT_STATE_STRUCT(ss, 32);
434 }
435 
436 static uint32_t
gen6_create_vertex_buffer(struct intel_batchbuffer * batch)437 gen6_create_vertex_buffer(struct intel_batchbuffer *batch)
438 {
439 	uint16_t v[2];
440 
441 	v[0] = 0;
442 	v[1] = 0;
443 
444 	return intel_batch_state_copy(batch, v, sizeof(v), 8, "vertex buffer");
445 }
446 
gen6_emit_vertex_buffer(struct intel_batchbuffer * batch)447 static void gen6_emit_vertex_buffer(struct intel_batchbuffer *batch)
448 {
449 	uint32_t offset;
450 
451 	offset = gen6_create_vertex_buffer(batch);
452 
453 	OUT_BATCH(GEN4_3DSTATE_VERTEX_BUFFERS | 3);
454 	OUT_BATCH(GEN6_VB0_VERTEXDATA |
455 		  0 << GEN6_VB0_BUFFER_INDEX_SHIFT |
456 		  VB0_NULL_VERTEX_BUFFER |
457 		  0 << VB0_BUFFER_PITCH_SHIFT);
458 	OUT_RELOC_STATE(batch, I915_GEM_DOMAIN_VERTEX, 0, offset);
459 	OUT_RELOC_STATE(batch, I915_GEM_DOMAIN_VERTEX, 0, offset);
460 	OUT_BATCH(0);
461 }
462 
gen6_setup_null_render_state(struct intel_batchbuffer * batch)463 void gen6_setup_null_render_state(struct intel_batchbuffer *batch)
464 {
465 	uint32_t wm_state, wm_kernel, wm_table;
466 	uint32_t cc_vp, cc_blend;
467 
468 	wm_table  = gen6_bind_surfaces(batch);
469 	wm_kernel = gen6_create_kernel(batch);
470 	wm_state  = gen6_create_sampler(batch,
471 					SAMPLER_FILTER_NEAREST,
472 					SAMPLER_EXTEND_NONE);
473 
474 	cc_vp = gen6_create_cc_viewport(batch);
475 	cc_blend = gen6_create_cc_blend(batch);
476 
477 	gen6_emit_invariant(batch);
478 	gen6_emit_state_base_address(batch);
479 
480 	gen6_emit_sip(batch);
481 	gen6_emit_urb(batch);
482 
483 	gen6_emit_viewports(batch, cc_vp);
484 	gen6_emit_vs(batch);
485 	gen6_emit_gs(batch);
486 	gen6_emit_clip(batch);
487 	gen6_emit_wm_constants(batch);
488 	gen6_emit_null_depth_buffer(batch);
489 
490 	gen6_emit_drawing_rectangle(batch);
491 	gen6_emit_cc(batch, cc_blend);
492 	gen6_emit_sampler(batch, wm_state);
493 	gen6_emit_sf(batch);
494 	gen6_emit_wm(batch, wm_kernel);
495 	gen6_emit_vertex_elements(batch);
496 	gen6_emit_binding_table(batch, wm_table);
497 
498 	gen6_emit_vertex_buffer(batch);
499 
500 	OUT_BATCH(MI_BATCH_BUFFER_END);
501 }
502