1 /*
2  * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "pipe/p_state.h"
28 #include "util/u_dual_blend.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_helpers.h"
32 
33 #include "freedreno_state.h"
34 #include "freedreno_context.h"
35 #include "freedreno_resource.h"
36 #include "freedreno_texture.h"
37 #include "freedreno_gmem.h"
38 #include "freedreno_query_hw.h"
39 #include "freedreno_util.h"
40 
41 /* All the generic state handling.. In case of CSO's that are specific
42  * to the GPU version, when the bind and the delete are common they can
43  * go in here.
44  */
45 
46 static void
fd_set_blend_color(struct pipe_context * pctx,const struct pipe_blend_color * blend_color)47 fd_set_blend_color(struct pipe_context *pctx,
48 		const struct pipe_blend_color *blend_color)
49 {
50 	struct fd_context *ctx = fd_context(pctx);
51 	ctx->blend_color = *blend_color;
52 	ctx->dirty |= FD_DIRTY_BLEND_COLOR;
53 }
54 
55 static void
fd_set_stencil_ref(struct pipe_context * pctx,const struct pipe_stencil_ref * stencil_ref)56 fd_set_stencil_ref(struct pipe_context *pctx,
57 		const struct pipe_stencil_ref *stencil_ref)
58 {
59 	struct fd_context *ctx = fd_context(pctx);
60 	ctx->stencil_ref =* stencil_ref;
61 	ctx->dirty |= FD_DIRTY_STENCIL_REF;
62 }
63 
64 static void
fd_set_clip_state(struct pipe_context * pctx,const struct pipe_clip_state * clip)65 fd_set_clip_state(struct pipe_context *pctx,
66 		const struct pipe_clip_state *clip)
67 {
68 	struct fd_context *ctx = fd_context(pctx);
69 	ctx->ucp = *clip;
70 	ctx->dirty |= FD_DIRTY_UCP;
71 }
72 
73 static void
fd_set_sample_mask(struct pipe_context * pctx,unsigned sample_mask)74 fd_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
75 {
76 	struct fd_context *ctx = fd_context(pctx);
77 	ctx->sample_mask = (uint16_t)sample_mask;
78 	ctx->dirty |= FD_DIRTY_SAMPLE_MASK;
79 }
80 
81 static void
fd_set_min_samples(struct pipe_context * pctx,unsigned min_samples)82 fd_set_min_samples(struct pipe_context *pctx, unsigned min_samples)
83 {
84 	struct fd_context *ctx = fd_context(pctx);
85 	ctx->min_samples = min_samples;
86 	ctx->dirty |= FD_DIRTY_MIN_SAMPLES;
87 }
88 
89 /* notes from calim on #dri-devel:
90  * index==0 will be non-UBO (ie. glUniformXYZ()) all packed together padded
91  * out to vec4's
92  * I should be able to consider that I own the user_ptr until the next
93  * set_constant_buffer() call, at which point I don't really care about the
94  * previous values.
95  * index>0 will be UBO's.. well, I'll worry about that later
96  */
97 static void
fd_set_constant_buffer(struct pipe_context * pctx,enum pipe_shader_type shader,uint index,const struct pipe_constant_buffer * cb)98 fd_set_constant_buffer(struct pipe_context *pctx,
99 		enum pipe_shader_type shader, uint index,
100 		const struct pipe_constant_buffer *cb)
101 {
102 	struct fd_context *ctx = fd_context(pctx);
103 	struct fd_constbuf_stateobj *so = &ctx->constbuf[shader];
104 
105 	util_copy_constant_buffer(&so->cb[index], cb);
106 
107 	/* Note that gallium frontends can unbind constant buffers by
108 	 * passing NULL here.
109 	 */
110 	if (unlikely(!cb)) {
111 		so->enabled_mask &= ~(1 << index);
112 		return;
113 	}
114 
115 	so->enabled_mask |= 1 << index;
116 	ctx->dirty_shader[shader] |= FD_DIRTY_SHADER_CONST;
117 	ctx->dirty |= FD_DIRTY_CONST;
118 
119 	fd_resource_set_usage(cb->buffer, FD_DIRTY_CONST);
120 }
121 
122 static void
fd_set_shader_buffers(struct pipe_context * pctx,enum pipe_shader_type shader,unsigned start,unsigned count,const struct pipe_shader_buffer * buffers,unsigned writable_bitmask)123 fd_set_shader_buffers(struct pipe_context *pctx,
124 		enum pipe_shader_type shader,
125 		unsigned start, unsigned count,
126 		const struct pipe_shader_buffer *buffers,
127 		unsigned writable_bitmask)
128 {
129 	struct fd_context *ctx = fd_context(pctx);
130 	struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[shader];
131 	const unsigned modified_bits = u_bit_consecutive(start, count);
132 
133 	so->enabled_mask &= ~modified_bits;
134 	so->writable_mask &= ~modified_bits;
135 	so->writable_mask |= writable_bitmask << start;
136 
137 	for (unsigned i = 0; i < count; i++) {
138 		unsigned n = i + start;
139 		struct pipe_shader_buffer *buf = &so->sb[n];
140 
141 		if (buffers && buffers[i].buffer) {
142 			if ((buf->buffer == buffers[i].buffer) &&
143 					(buf->buffer_offset == buffers[i].buffer_offset) &&
144 					(buf->buffer_size == buffers[i].buffer_size))
145 				continue;
146 
147 			buf->buffer_offset = buffers[i].buffer_offset;
148 			buf->buffer_size = buffers[i].buffer_size;
149 			pipe_resource_reference(&buf->buffer, buffers[i].buffer);
150 
151 			fd_resource_set_usage(buffers[i].buffer, FD_DIRTY_SSBO);
152 
153 			so->enabled_mask |= BIT(n);
154 		} else {
155 			pipe_resource_reference(&buf->buffer, NULL);
156 		}
157 	}
158 
159 	ctx->dirty_shader[shader] |= FD_DIRTY_SHADER_SSBO;
160 	ctx->dirty |= FD_DIRTY_SSBO;
161 }
162 
163 void
fd_set_shader_images(struct pipe_context * pctx,enum pipe_shader_type shader,unsigned start,unsigned count,const struct pipe_image_view * images)164 fd_set_shader_images(struct pipe_context *pctx,
165 		enum pipe_shader_type shader,
166 		unsigned start, unsigned count,
167 		const struct pipe_image_view *images)
168 {
169 	struct fd_context *ctx = fd_context(pctx);
170 	struct fd_shaderimg_stateobj *so = &ctx->shaderimg[shader];
171 
172 	unsigned mask = 0;
173 
174 	if (images) {
175 		for (unsigned i = 0; i < count; i++) {
176 			unsigned n = i + start;
177 			struct pipe_image_view *buf = &so->si[n];
178 
179 			if ((buf->resource == images[i].resource) &&
180 					(buf->format == images[i].format) &&
181 					(buf->access == images[i].access) &&
182 					!memcmp(&buf->u, &images[i].u, sizeof(buf->u)))
183 				continue;
184 
185 			mask |= BIT(n);
186 			util_copy_image_view(buf, &images[i]);
187 
188 			if (buf->resource) {
189 				fd_resource_set_usage(buf->resource, FD_DIRTY_IMAGE);
190 				so->enabled_mask |= BIT(n);
191 			} else {
192 				so->enabled_mask &= ~BIT(n);
193 			}
194 		}
195 	} else {
196 		mask = (BIT(count) - 1) << start;
197 
198 		for (unsigned i = 0; i < count; i++) {
199 			unsigned n = i + start;
200 			struct pipe_image_view *img = &so->si[n];
201 
202 			pipe_resource_reference(&img->resource, NULL);
203 		}
204 
205 		so->enabled_mask &= ~mask;
206 	}
207 
208 	ctx->dirty_shader[shader] |= FD_DIRTY_SHADER_IMAGE;
209 	ctx->dirty |= FD_DIRTY_IMAGE;
210 }
211 
212 static void
fd_set_framebuffer_state(struct pipe_context * pctx,const struct pipe_framebuffer_state * framebuffer)213 fd_set_framebuffer_state(struct pipe_context *pctx,
214 		const struct pipe_framebuffer_state *framebuffer)
215 {
216 	struct fd_context *ctx = fd_context(pctx);
217 	struct pipe_framebuffer_state *cso;
218 
219 	DBG("%ux%u, %u layers, %u samples",
220 		framebuffer->width, framebuffer->height,
221 		framebuffer->layers, framebuffer->samples);
222 
223 	fd_context_switch_from(ctx);
224 
225 	cso = &ctx->framebuffer;
226 
227 	if (util_framebuffer_state_equal(cso, framebuffer))
228 		return;
229 
230 	util_copy_framebuffer_state(cso, framebuffer);
231 
232 	cso->samples = util_framebuffer_get_num_samples(cso);
233 
234 	if (ctx->screen->reorder) {
235 		struct fd_batch *old_batch = NULL;
236 
237 		fd_batch_reference(&old_batch, ctx->batch);
238 
239 		if (likely(old_batch))
240 			fd_batch_set_stage(old_batch, FD_STAGE_NULL);
241 
242 		fd_batch_reference(&ctx->batch, NULL);
243 		fd_context_all_dirty(ctx);
244 
245 		if (old_batch && old_batch->blit && !old_batch->back_blit) {
246 			/* for blits, there is not really much point in hanging on
247 			 * to the uncommitted batch (ie. you probably don't blit
248 			 * multiple times to the same surface), so we might as
249 			 * well go ahead and flush this one:
250 			 */
251 			fd_batch_flush(old_batch);
252 		}
253 
254 		fd_batch_reference(&old_batch, NULL);
255 	} else if (ctx->batch) {
256 		DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
257 				framebuffer->cbufs[0], framebuffer->zsbuf);
258 		fd_batch_flush(ctx->batch);
259 	}
260 
261 	ctx->dirty |= FD_DIRTY_FRAMEBUFFER;
262 
263 	ctx->disabled_scissor.minx = 0;
264 	ctx->disabled_scissor.miny = 0;
265 	ctx->disabled_scissor.maxx = cso->width;
266 	ctx->disabled_scissor.maxy = cso->height;
267 
268 	ctx->dirty |= FD_DIRTY_SCISSOR;
269 }
270 
271 static void
fd_set_polygon_stipple(struct pipe_context * pctx,const struct pipe_poly_stipple * stipple)272 fd_set_polygon_stipple(struct pipe_context *pctx,
273 		const struct pipe_poly_stipple *stipple)
274 {
275 	struct fd_context *ctx = fd_context(pctx);
276 	ctx->stipple = *stipple;
277 	ctx->dirty |= FD_DIRTY_STIPPLE;
278 }
279 
280 static void
fd_set_scissor_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * scissor)281 fd_set_scissor_states(struct pipe_context *pctx,
282 		unsigned start_slot,
283 		unsigned num_scissors,
284 		const struct pipe_scissor_state *scissor)
285 {
286 	struct fd_context *ctx = fd_context(pctx);
287 
288 	ctx->scissor = *scissor;
289 	ctx->dirty |= FD_DIRTY_SCISSOR;
290 }
291 
292 static void
fd_set_viewport_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * viewport)293 fd_set_viewport_states(struct pipe_context *pctx,
294 		unsigned start_slot,
295 		unsigned num_viewports,
296 		const struct pipe_viewport_state *viewport)
297 {
298 	struct fd_context *ctx = fd_context(pctx);
299 	struct pipe_scissor_state *scissor = &ctx->viewport_scissor;
300 	float minx, miny, maxx, maxy;
301 
302 	ctx->viewport = *viewport;
303 
304 	/* see si_get_scissor_from_viewport(): */
305 
306 	/* Convert (-1, -1) and (1, 1) from clip space into window space. */
307 	minx = -viewport->scale[0] + viewport->translate[0];
308 	miny = -viewport->scale[1] + viewport->translate[1];
309 	maxx = viewport->scale[0] + viewport->translate[0];
310 	maxy = viewport->scale[1] + viewport->translate[1];
311 
312 	/* Handle inverted viewports. */
313 	if (minx > maxx) {
314 		swap(minx, maxx);
315 	}
316 	if (miny > maxy) {
317 		swap(miny, maxy);
318 	}
319 
320 	debug_assert(miny >= 0);
321 	debug_assert(maxy >= 0);
322 
323 	/* Convert to integer and round up the max bounds. */
324 	scissor->minx = minx;
325 	scissor->miny = miny;
326 	scissor->maxx = ceilf(maxx);
327 	scissor->maxy = ceilf(maxy);
328 
329 	ctx->dirty |= FD_DIRTY_VIEWPORT;
330 }
331 
332 static void
fd_set_vertex_buffers(struct pipe_context * pctx,unsigned start_slot,unsigned count,const struct pipe_vertex_buffer * vb)333 fd_set_vertex_buffers(struct pipe_context *pctx,
334 		unsigned start_slot, unsigned count,
335 		const struct pipe_vertex_buffer *vb)
336 {
337 	struct fd_context *ctx = fd_context(pctx);
338 	struct fd_vertexbuf_stateobj *so = &ctx->vtx.vertexbuf;
339 	int i;
340 
341 	/* on a2xx, pitch is encoded in the vtx fetch instruction, so
342 	 * we need to mark VTXSTATE as dirty as well to trigger patching
343 	 * and re-emitting the vtx shader:
344 	 */
345 	if (ctx->screen->gpu_id < 300) {
346 		for (i = 0; i < count; i++) {
347 			bool new_enabled = vb && vb[i].buffer.resource;
348 			bool old_enabled = so->vb[i].buffer.resource != NULL;
349 			uint32_t new_stride = vb ? vb[i].stride : 0;
350 			uint32_t old_stride = so->vb[i].stride;
351 			if ((new_enabled != old_enabled) || (new_stride != old_stride)) {
352 				ctx->dirty |= FD_DIRTY_VTXSTATE;
353 				break;
354 			}
355 		}
356 	}
357 
358 	util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, start_slot, count);
359 	so->count = util_last_bit(so->enabled_mask);
360 
361 	if (!vb)
362 		return;
363 
364 	ctx->dirty |= FD_DIRTY_VTXBUF;
365 
366 	for (unsigned i = 0; i < count; i++) {
367 		assert(!vb[i].is_user_buffer);
368 		fd_resource_set_usage(vb[i].buffer.resource, FD_DIRTY_VTXBUF);
369 	}
370 }
371 
372 static void
fd_blend_state_bind(struct pipe_context * pctx,void * hwcso)373 fd_blend_state_bind(struct pipe_context *pctx, void *hwcso)
374 {
375 	struct fd_context *ctx = fd_context(pctx);
376 	struct pipe_blend_state *cso = hwcso;
377 	bool old_is_dual = ctx->blend ?
378 		ctx->blend->rt[0].blend_enable && util_blend_state_is_dual(ctx->blend, 0) :
379 		false;
380 	bool new_is_dual = cso ?
381 		cso->rt[0].blend_enable && util_blend_state_is_dual(cso, 0) :
382 		false;
383 	ctx->blend = hwcso;
384 	ctx->dirty |= FD_DIRTY_BLEND;
385 	if (old_is_dual != new_is_dual)
386 		ctx->dirty |= FD_DIRTY_BLEND_DUAL;
387 }
388 
389 static void
fd_blend_state_delete(struct pipe_context * pctx,void * hwcso)390 fd_blend_state_delete(struct pipe_context *pctx, void *hwcso)
391 {
392 	FREE(hwcso);
393 }
394 
395 static void
fd_rasterizer_state_bind(struct pipe_context * pctx,void * hwcso)396 fd_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso)
397 {
398 	struct fd_context *ctx = fd_context(pctx);
399 	struct pipe_scissor_state *old_scissor = fd_context_get_scissor(ctx);
400 	bool discard = ctx->rasterizer && ctx->rasterizer->rasterizer_discard;
401 
402 	ctx->rasterizer = hwcso;
403 	ctx->dirty |= FD_DIRTY_RASTERIZER;
404 
405 	if (ctx->rasterizer && ctx->rasterizer->scissor) {
406 		ctx->current_scissor = &ctx->scissor;
407 	} else {
408 		ctx->current_scissor = &ctx->disabled_scissor;
409 	}
410 
411 	/* if scissor enable bit changed we need to mark scissor
412 	 * state as dirty as well:
413 	 * NOTE: we can do a shallow compare, since we only care
414 	 * if it changed to/from &ctx->disable_scissor
415 	 */
416 	if (old_scissor != fd_context_get_scissor(ctx))
417 		ctx->dirty |= FD_DIRTY_SCISSOR;
418 
419 	if (ctx->rasterizer && (discard != ctx->rasterizer->rasterizer_discard))
420 		ctx->dirty |= FD_DIRTY_RASTERIZER_DISCARD;
421 }
422 
423 static void
fd_rasterizer_state_delete(struct pipe_context * pctx,void * hwcso)424 fd_rasterizer_state_delete(struct pipe_context *pctx, void *hwcso)
425 {
426 	FREE(hwcso);
427 }
428 
429 static void
fd_zsa_state_bind(struct pipe_context * pctx,void * hwcso)430 fd_zsa_state_bind(struct pipe_context *pctx, void *hwcso)
431 {
432 	struct fd_context *ctx = fd_context(pctx);
433 	ctx->zsa = hwcso;
434 	ctx->dirty |= FD_DIRTY_ZSA;
435 }
436 
437 static void
fd_zsa_state_delete(struct pipe_context * pctx,void * hwcso)438 fd_zsa_state_delete(struct pipe_context *pctx, void *hwcso)
439 {
440 	FREE(hwcso);
441 }
442 
443 static void *
fd_vertex_state_create(struct pipe_context * pctx,unsigned num_elements,const struct pipe_vertex_element * elements)444 fd_vertex_state_create(struct pipe_context *pctx, unsigned num_elements,
445 		const struct pipe_vertex_element *elements)
446 {
447 	struct fd_vertex_stateobj *so = CALLOC_STRUCT(fd_vertex_stateobj);
448 
449 	if (!so)
450 		return NULL;
451 
452 	memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
453 	so->num_elements = num_elements;
454 
455 	return so;
456 }
457 
458 static void
fd_vertex_state_delete(struct pipe_context * pctx,void * hwcso)459 fd_vertex_state_delete(struct pipe_context *pctx, void *hwcso)
460 {
461 	FREE(hwcso);
462 }
463 
464 static void
fd_vertex_state_bind(struct pipe_context * pctx,void * hwcso)465 fd_vertex_state_bind(struct pipe_context *pctx, void *hwcso)
466 {
467 	struct fd_context *ctx = fd_context(pctx);
468 	ctx->vtx.vtx = hwcso;
469 	ctx->dirty |= FD_DIRTY_VTXSTATE;
470 }
471 
472 static struct pipe_stream_output_target *
fd_create_stream_output_target(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned buffer_offset,unsigned buffer_size)473 fd_create_stream_output_target(struct pipe_context *pctx,
474 		struct pipe_resource *prsc, unsigned buffer_offset,
475 		unsigned buffer_size)
476 {
477 	struct pipe_stream_output_target *target;
478 	struct fd_resource *rsc = fd_resource(prsc);
479 
480 	target = CALLOC_STRUCT(pipe_stream_output_target);
481 	if (!target)
482 		return NULL;
483 
484 	pipe_reference_init(&target->reference, 1);
485 	pipe_resource_reference(&target->buffer, prsc);
486 
487 	target->context = pctx;
488 	target->buffer_offset = buffer_offset;
489 	target->buffer_size = buffer_size;
490 
491 	assert(rsc->base.target == PIPE_BUFFER);
492 	util_range_add(&rsc->base, &rsc->valid_buffer_range,
493 		buffer_offset, buffer_offset + buffer_size);
494 
495 	return target;
496 }
497 
498 static void
fd_stream_output_target_destroy(struct pipe_context * pctx,struct pipe_stream_output_target * target)499 fd_stream_output_target_destroy(struct pipe_context *pctx,
500 		struct pipe_stream_output_target *target)
501 {
502 	pipe_resource_reference(&target->buffer, NULL);
503 	FREE(target);
504 }
505 
506 static void
fd_set_stream_output_targets(struct pipe_context * pctx,unsigned num_targets,struct pipe_stream_output_target ** targets,const unsigned * offsets)507 fd_set_stream_output_targets(struct pipe_context *pctx,
508 		unsigned num_targets, struct pipe_stream_output_target **targets,
509 		const unsigned *offsets)
510 {
511 	struct fd_context *ctx = fd_context(pctx);
512 	struct fd_streamout_stateobj *so = &ctx->streamout;
513 	unsigned i;
514 
515 	debug_assert(num_targets <= ARRAY_SIZE(so->targets));
516 
517 	for (i = 0; i < num_targets; i++) {
518 		boolean changed = targets[i] != so->targets[i];
519 		boolean reset = (offsets[i] != (unsigned)-1);
520 
521 		so->reset |= (reset << i);
522 
523 		if (!changed && !reset)
524 			continue;
525 
526 		if (reset)
527 			so->offsets[i] = offsets[i];
528 
529 		pipe_so_target_reference(&so->targets[i], targets[i]);
530 	}
531 
532 	for (; i < so->num_targets; i++) {
533 		pipe_so_target_reference(&so->targets[i], NULL);
534 	}
535 
536 	so->num_targets = num_targets;
537 
538 	ctx->dirty |= FD_DIRTY_STREAMOUT;
539 }
540 
541 static void
fd_bind_compute_state(struct pipe_context * pctx,void * state)542 fd_bind_compute_state(struct pipe_context *pctx, void *state)
543 {
544 	struct fd_context *ctx = fd_context(pctx);
545 	ctx->compute = state;
546 	ctx->dirty_shader[PIPE_SHADER_COMPUTE] |= FD_DIRTY_SHADER_PROG;
547 }
548 
549 static void
fd_set_compute_resources(struct pipe_context * pctx,unsigned start,unsigned count,struct pipe_surface ** prscs)550 fd_set_compute_resources(struct pipe_context *pctx,
551 		unsigned start, unsigned count, struct pipe_surface **prscs)
552 {
553 	// TODO
554 }
555 
556 /* used by clover to bind global objects, returning the bo address
557  * via handles[n]
558  */
559 static void
fd_set_global_binding(struct pipe_context * pctx,unsigned first,unsigned count,struct pipe_resource ** prscs,uint32_t ** handles)560 fd_set_global_binding(struct pipe_context *pctx,
561 		unsigned first, unsigned count, struct pipe_resource **prscs,
562 		uint32_t **handles)
563 {
564 	struct fd_context *ctx = fd_context(pctx);
565 	struct fd_global_bindings_stateobj *so = &ctx->global_bindings;
566 	unsigned mask = 0;
567 
568 	if (prscs) {
569 		for (unsigned i = 0; i < count; i++) {
570 			unsigned n = i + first;
571 
572 			mask |= BIT(n);
573 
574 			pipe_resource_reference(&so->buf[n], prscs[i]);
575 
576 			if (so->buf[n]) {
577 				struct fd_resource *rsc = fd_resource(so->buf[n]);
578 				uint64_t iova = fd_bo_get_iova(rsc->bo);
579 				// TODO need to scream if iova > 32b or fix gallium API..
580 				*handles[i] += iova;
581 			}
582 
583 			if (prscs[i])
584 				so->enabled_mask |= BIT(n);
585 			else
586 				so->enabled_mask &= ~BIT(n);
587 		}
588 	} else {
589 		mask = (BIT(count) - 1) << first;
590 
591 		for (unsigned i = 0; i < count; i++) {
592 			unsigned n = i + first;
593 			pipe_resource_reference(&so->buf[n], NULL);
594 		}
595 
596 		so->enabled_mask &= ~mask;
597 	}
598 
599 }
600 
601 void
fd_state_init(struct pipe_context * pctx)602 fd_state_init(struct pipe_context *pctx)
603 {
604 	pctx->set_blend_color = fd_set_blend_color;
605 	pctx->set_stencil_ref = fd_set_stencil_ref;
606 	pctx->set_clip_state = fd_set_clip_state;
607 	pctx->set_sample_mask = fd_set_sample_mask;
608 	pctx->set_min_samples = fd_set_min_samples;
609 	pctx->set_constant_buffer = fd_set_constant_buffer;
610 	pctx->set_shader_buffers = fd_set_shader_buffers;
611 	pctx->set_shader_images = fd_set_shader_images;
612 	pctx->set_framebuffer_state = fd_set_framebuffer_state;
613 	pctx->set_polygon_stipple = fd_set_polygon_stipple;
614 	pctx->set_scissor_states = fd_set_scissor_states;
615 	pctx->set_viewport_states = fd_set_viewport_states;
616 
617 	pctx->set_vertex_buffers = fd_set_vertex_buffers;
618 
619 	pctx->bind_blend_state = fd_blend_state_bind;
620 	pctx->delete_blend_state = fd_blend_state_delete;
621 
622 	pctx->bind_rasterizer_state = fd_rasterizer_state_bind;
623 	pctx->delete_rasterizer_state = fd_rasterizer_state_delete;
624 
625 	pctx->bind_depth_stencil_alpha_state = fd_zsa_state_bind;
626 	pctx->delete_depth_stencil_alpha_state = fd_zsa_state_delete;
627 
628 	if (!pctx->create_vertex_elements_state)
629 		pctx->create_vertex_elements_state = fd_vertex_state_create;
630 	pctx->delete_vertex_elements_state = fd_vertex_state_delete;
631 	pctx->bind_vertex_elements_state = fd_vertex_state_bind;
632 
633 	pctx->create_stream_output_target = fd_create_stream_output_target;
634 	pctx->stream_output_target_destroy = fd_stream_output_target_destroy;
635 	pctx->set_stream_output_targets = fd_set_stream_output_targets;
636 
637 	if (has_compute(fd_screen(pctx->screen))) {
638 		pctx->bind_compute_state = fd_bind_compute_state;
639 		pctx->set_compute_resources = fd_set_compute_resources;
640 		pctx->set_global_binding = fd_set_global_binding;
641 	}
642 }
643