Home
last modified time | relevance | path

Searched refs:nir_imm_int (Results 1 – 25 of 53) sorted by relevance

123

/external/mesa3d/src/gallium/drivers/vc4/
Dvc4_nir_lower_txf_ms.c86 nir_ssa_def *x_tile = nir_ushr(b, x, nir_imm_int(b, tile_w_shift)); in vc4_nir_lower_txf_ms_instr()
87 nir_ssa_def *y_tile = nir_ushr(b, y, nir_imm_int(b, tile_h_shift)); in vc4_nir_lower_txf_ms_instr()
90 nir_imm_int(b, tile_size)), in vc4_nir_lower_txf_ms_instr()
92 nir_imm_int(b, (w_tiles * in vc4_nir_lower_txf_ms_instr()
95 nir_imm_int(b, (tile_w - 1) & ~1)); in vc4_nir_lower_txf_ms_instr()
97 nir_imm_int(b, (tile_h - 1) & ~1)); in vc4_nir_lower_txf_ms_instr()
100nir_imm_int(b, 2 * VC4_MAX_SAMPLES * sizeof(uint32_t))), in vc4_nir_lower_txf_ms_instr()
102 nir_imm_int(b, in vc4_nir_lower_txf_ms_instr()
110 nir_imm_int(b, 2)), in vc4_nir_lower_txf_ms_instr()
111 nir_imm_int(b, (1 << 2))), in vc4_nir_lower_txf_ms_instr()
[all …]
Dvc4_nir_lower_blend.c64 load->src[0] = nir_src_for_ssa(nir_imm_int(b, 0)); in vc4_nir_get_dst_color()
173 nir_iand(b, src0, nir_imm_int(b, ~chan_mask)), in vc4_nir_set_packed_chan()
174 nir_iand(b, src1, nir_imm_int(b, chan_mask))); in vc4_nir_set_packed_chan()
188 return nir_imm_int(b, ~0); in vc4_blend_channel_i()
202 nir_imm_int(b, ~0), in vc4_blend_channel_i()
209 return nir_imm_int(b, 0); in vc4_blend_channel_i()
232 return nir_imm_int(b, ~0); in vc4_blend_channel_i()
326 nir_ssa_def *or1 = nir_ior(b, src, nir_ishl(b, src, nir_imm_int(b, 8))); in vc4_nir_splat()
327 return nir_ior(b, or1, nir_ishl(b, or1, nir_imm_int(b, 16))); in vc4_nir_splat()
342 nir_ssa_def *imm_0xff = nir_imm_int(b, 0xff); in vc4_do_blending_i()
[all …]
Dvc4_nir_lower_io.c60 nir_imm_int(b, 8 * chan), in vc4_nir_unpack_8i()
61 nir_imm_int(b, 8)); in vc4_nir_unpack_8i()
70 nir_imm_int(b, 16 * chan), in vc4_nir_unpack_16i()
71 nir_imm_int(b, 16)); in vc4_nir_unpack_16i()
79 return nir_iand(b, src, nir_imm_int(b, 0xffff)); in vc4_nir_unpack_16u()
81 return nir_ushr(b, src, nir_imm_int(b, 16)); in vc4_nir_unpack_16u()
120 temp = nir_ixor(b, vpm, nir_imm_int(b, 0x80808080)); in vc4_nir_get_vattr_channel_vpm()
198 intr_comp->src[0] = nir_src_for_ssa(nir_imm_int(b, 0)); in vc4_nir_lower_vertex_attr()
345 nir_imm_int(b, 4))); in vc4_nir_lower_uniform()
/external/mesa3d/src/compiler/nir/
Dnir_lower_double_ops.c50 nir_ssa_def *new_hi = nir_bfi(b, nir_imm_int(b, 0x7ff00000), exp, hi); in set_exponent()
62 return nir_ubitfield_extract(b, hi, nir_imm_int(b, 20), nir_imm_int(b, 11)); in get_exponent()
78 nir_ssa_def *inf_hi = nir_ior(b, nir_imm_int(b, 0x7ff00000), zero_hi); in get_signed_inf()
79 return nir_pack_64_2x32_split(b, nir_imm_int(b, 0), inf_hi); in get_signed_inf()
97 res = nir_bcsel(b, nir_ior(b, nir_ige(b, nir_imm_int(b, 0), exp), in fix_inv_result()
114 nir_ssa_def *src_norm = set_exponent(b, src, nir_imm_int(b, 1023)); in lower_rcp()
126 nir_imm_int(b, 1023))); in lower_rcp()
175 nir_imm_int(b, 1023)); in lower_sqrt_rsq()
176 nir_ssa_def *even = nir_iand(b, unbiased_exp, nir_imm_int(b, 1)); in lower_sqrt_rsq()
177 nir_ssa_def *half = nir_ishr(b, unbiased_exp, nir_imm_int(b, 1)); in lower_sqrt_rsq()
[all …]
Dnir_lower_int64.c50 nir_ssa_def *res_hi = nir_ishr(b, x_hi, nir_imm_int(b, 31)); in lower_isign64()
82 nir_iand(b, nir_ieq(b, d_hi, nir_imm_int(b, 0)), nir_uge(b, n_hi, d_lo)); in lower_udiv64_mod64()
89 need_high_div = nir_imm_int(b, NIR_TRUE); in lower_udiv64_mod64()
99 nir_ssa_def *d_shift = nir_ishl(b, d_lo, nir_imm_int(b, i)); in lower_udiv64_mod64()
101 nir_ssa_def *new_q_hi = nir_ior(b, q_hi, nir_imm_int(b, 1u << i)); in lower_udiv64_mod64()
109 nir_ige(b, nir_imm_int(b, 31 - i), log2_d_lo)); in lower_udiv64_mod64()
129 nir_ssa_def *d_shift = nir_ishl(b, d, nir_imm_int(b, i)); in lower_udiv64_mod64()
131 nir_ssa_def *new_q_lo = nir_ior(b, q_lo, nir_imm_int(b, 1u << i)); in lower_udiv64_mod64()
138 nir_ige(b, nir_imm_int(b, 31 - i), log2_denom)); in lower_udiv64_mod64()
162 nir_ssa_def *negate = nir_ine(b, nir_ilt(b, n_hi, nir_imm_int(b, 0)), in lower_idiv64()
[all …]
Dnir_lower_uniforms_to_ubo.c42 nir_ssa_def *new_idx = nir_iadd(b, old_idx, nir_imm_int(b, 1)); in lower_instr()
49 nir_ssa_def *ubo_idx = nir_imm_int(b, 0); in lower_instr()
51 nir_imul(b, nir_imm_int(b, 16), in lower_instr()
52 nir_iadd(b, nir_imm_int(b, nir_intrinsic_base(instr)), in lower_instr()
Dnir_lower_alu_to_scalar.c146 nir_extract_u16(b, instr->src[0].src.ssa, nir_imm_int(b, 0)); in lower_alu_instr_scalar()
148 nir_ior(b, nir_ishl(b, nir_channel(b, word, 1), nir_imm_int(b, 16)), in lower_alu_instr_scalar()
161 nir_extract_u8(b, instr->src[0].src.ssa, nir_imm_int(b, 0)); in lower_alu_instr_scalar()
163 nir_ior(b, nir_ior(b, nir_ishl(b, nir_channel(b, byte, 3), nir_imm_int(b, 24)), in lower_alu_instr_scalar()
164 nir_ishl(b, nir_channel(b, byte, 2), nir_imm_int(b, 16))), in lower_alu_instr_scalar()
165 nir_ior(b, nir_ishl(b, nir_channel(b, byte, 1), nir_imm_int(b, 8)), in lower_alu_instr_scalar()
Dnir_lower_atomics_to_ssbo.c67 nir_ssa_def *new_idx = nir_iadd(b, old_idx, nir_imm_int(b, ssbo_offset)); in lower_instr()
106 nir_ssa_def *buffer = nir_imm_int(b, nir_intrinsic_base(instr)); in lower_instr()
117 temp = nir_imm_int(b, +1); in lower_instr()
125 temp = nir_imm_int(b, -1); in lower_instr()
Dnir_lower_gs_intrinsics.c80 nir_imm_int(b, b->shader->info.gs.vertices_out); in rewrite_emit_vertex()
98 nir_iadd(b, count, nir_imm_int(b, 1)), in rewrite_emit_vertex()
201 nir_store_var(&b, state.vertex_count_var, nir_imm_int(&b, 0), 0x1); in nir_lower_gs_intrinsics()
Dnir_lower_io.c102 nir_ssa_def *vtx = nir_imm_int(b, deref_array->base_offset); in get_io_offset()
119 return nir_imm_int(b, type_size(glsl_vec4_type()) * slot_offset); in get_io_offset()
123 nir_ssa_def *offset = nir_imm_int(b, 0); in get_io_offset()
134 nir_imm_int(b, size * deref_array->base_offset)); in get_io_offset()
138 nir_imul(b, nir_imm_int(b, size), in get_io_offset()
150 offset = nir_iadd(b, offset, nir_imm_int(b, field_offset)); in get_io_offset()
Dnir_lower_subgroups.c39 nir_ssa_def *zero = nir_imm_int(b, 0); in uint_to_ballot_type()
111 return nir_imm_int(b, NIR_TRUE); in lower_subgroups_intrin()
116 return nir_imm_int(b, options->subgroup_size); in lower_subgroups_intrin()
Dnir_lower_idiv.c74 bf = nir_isub(bld, bf, nir_imm_int(bld, 2)); /* yes, really */ in convert_instr()
104 r = nir_ishr(bld, r, nir_imm_int(bld, 31)); in convert_instr()
Dnir_lower_samplers.c58 nir_imul(b, nir_imm_int(b, *array_elements), in calc_sampler_offsets()
110 indirect = nir_umin(b, indirect, nir_imm_int(b, array_elements - 1)); in lower_sampler()
Dnir_lower_system_values.c92 nir_imm_int(b, b->shader->info.cs.local_size[0]); in convert_block()
94 nir_imm_int(b, b->shader->info.cs.local_size[1]); in convert_block()
/external/mesa3d/src/intel/vulkan/
Danv_nir_lower_multiview.c60 nir_imm_int(b, _mesa_bitcount(state->view_mask))); in build_instance_id()
76 state->view_index = nir_imm_int(b, ffs(state->view_mask) - 1); in build_view_index()
87 nir_imm_int(b, _mesa_bitcount(state->view_mask))); in build_view_index()
104 nir_ssa_def *shift = nir_imul(b, compacted, nir_imm_int(b, 4)); in build_view_index()
111 shifted = nir_ushr(b, nir_imm_int(b, remap), shift); in build_view_index()
114 nir_ushr(b, nir_imm_int(b, remap), shift); in build_view_index()
116 nir_ushr(b, nir_imm_int(b, remap >> 32), in build_view_index()
117 nir_isub(b, shift, nir_imm_int(b, 32))); in build_view_index()
118 shifted = nir_bcsel(b, nir_ilt(b, shift, nir_imm_int(b, 32)), in build_view_index()
121 state->view_index = nir_iand(b, shifted, nir_imm_int(b, 0xf)); in build_view_index()
/external/mesa3d/src/intel/blorp/
Dblorp_nir_builder.h83 nir_imm_int(b, 0x3)), in blorp_nir_mcs_is_clear_color()
84 nir_imm_int(b, 0x3)); in blorp_nir_mcs_is_clear_color()
87 return nir_ieq(b, nir_channel(b, mcs, 0), nir_imm_int(b, 0xff)); in blorp_nir_mcs_is_clear_color()
90 return nir_ieq(b, nir_channel(b, mcs, 0), nir_imm_int(b, ~0)); in blorp_nir_mcs_is_clear_color()
95 nir_imm_int(b, ~0)), in blorp_nir_mcs_is_clear_color()
97 nir_imm_int(b, ~0))); in blorp_nir_mcs_is_clear_color()
Dblorp_blit.c219 tex->src[1].src = nir_src_for_ssa(nir_imm_int(b, 0)); in blorp_nir_tex()
235 tex->src[1].src = nir_src_for_ssa(nir_imm_int(b, 0)); in blorp_nir_txf()
254 tex->src[1].src = nir_src_for_ssa(nir_imm_int(b, 0)); in blorp_nir_txf_ms()
289 nir_ssa_def *masked = nir_iand(b, src, nir_imm_int(b, src_mask)); in nir_mask_shift_or()
293 shifted = nir_ishl(b, masked, nir_imm_int(b, src_left_shift)); in nir_mask_shift_or()
295 shifted = nir_ushr(b, masked, nir_imm_int(b, -src_left_shift)); in nir_mask_shift_or()
347 nir_ssa_def *x_W = nir_imm_int(b, 0); in blorp_nir_retile_y_to_w()
352 nir_ssa_def *y_W = nir_imm_int(b, 0); in blorp_nir_retile_y_to_w()
382 nir_ssa_def *x_Y = nir_imm_int(b, 0); in blorp_nir_retile_w_to_y()
388 nir_ssa_def *y_Y = nir_imm_int(b, 0); in blorp_nir_retile_w_to_y()
[all …]
/external/mesa3d/src/amd/vulkan/
Dradv_query.c67 counter = nir_iadd(b, counter, nir_imm_int(b, 1)); in radv_break_on_count()
77 flags->src[0] = nir_src_for_ssa(nir_imm_int(b, offset)); in radv_load_push_int()
141 dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0)); in build_occlusion_query_shader()
149 src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0)); in build_occlusion_query_shader()
164 nir_ssa_def *input_stride = nir_imm_int(&b, db_count * 16); in build_occlusion_query_shader()
171 nir_store_var(&b, outer_counter, nir_imm_int(&b, 0), 0x1); in build_occlusion_query_shader()
172 nir_store_var(&b, available, nir_imm_int(&b, 1), 0x1); in build_occlusion_query_shader()
179 radv_break_on_count(&b, outer_counter, nir_imm_int(&b, db_count)); in build_occlusion_query_shader()
181 nir_ssa_def *load_offset = nir_imul(&b, current_outer_count, nir_imm_int(&b, 16)); in build_occlusion_query_shader()
212 nir_store_var(&b, available, nir_imm_int(&b, 0), 0x1); in build_occlusion_query_shader()
[all …]
Dradv_meta_buffer.c33 nir_ssa_def *offset = nir_imul(&b, global_id, nir_imm_int(&b, 16)); in build_buffer_fill_shader()
38 dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0)); in build_buffer_fill_shader()
47 load->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0)); in build_buffer_fill_shader()
85 nir_ssa_def *offset = nir_imul(&b, global_id, nir_imm_int(&b, 16)); in build_buffer_copy_shader()
90 dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0)); in build_buffer_copy_shader()
98 src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0)); in build_buffer_copy_shader()
Dradv_meta_bufimage.c77 offset->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0)); in build_nir_itob_compute_shader()
85 stride->src[0] = nir_src_for_ssa(nir_imm_int(&b, 12)); in build_nir_itob_compute_shader()
97 tex->src[1].src = nir_src_for_ssa(nir_imm_int(&b, 0)); in build_nir_itob_compute_shader()
301 offset->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0)); in build_nir_btoi_compute_shader()
309 stride->src[0] = nir_src_for_ssa(nir_imm_int(&b, 12)); in build_nir_btoi_compute_shader()
330 tex->src[1].src = nir_src_for_ssa(nir_imm_int(&b, 0)); in build_nir_btoi_compute_shader()
521 src_offset->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0)); in build_nir_itoi_compute_shader()
529 dst_offset->src[0] = nir_src_for_ssa(nir_imm_int(&b, 12)); in build_nir_itoi_compute_shader()
544 tex->src[1].src = nir_src_for_ssa(nir_imm_int(&b, 0)); in build_nir_itoi_compute_shader()
729 clear_val->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0)); in build_nir_cleari_compute_shader()
[all …]
/external/mesa3d/src/broadcom/compiler/
Dv3d_nir_lower_txf_ms.c51 nir_ssa_def *one = nir_imm_int(b, 1); in vc4_nir_lower_txf_ms_instr()
52 coord = nir_ishl(b, coord, nir_imm_int(b, 1)); in vc4_nir_lower_txf_ms_instr()
/external/mesa3d/src/intel/compiler/
Dbrw_nir_tcs_workarounds.c82 load->src[0] = nir_src_for_ssa(nir_imm_int(b, 0)); in load_output()
119 store->src[1] = nir_src_for_ssa(nir_imm_int(b, 0)); in emit_quads_workaround()
Dbrw_nir_lower_cs_intrinsics.c62 subgroup_id = nir_imm_int(b, 0); in lower_cs_intrinsics_convert_block()
67 nir_imul(b, subgroup_id, nir_imm_int(b, state->dispatch_width)); in lower_cs_intrinsics_convert_block()
/external/mesa3d/src/compiler/spirv/
Dvtn_variables.c124 return nir_imm_int(&b->nb, link.id * stride); in vtn_access_link_as_ssa()
134 return nir_imul(&b->nb, src0, nir_imm_int(&b->nb, stride)); in vtn_access_link_as_ssa()
144 desc_array_index = nir_imm_int(&b->nb, 0); in vtn_variable_resource_index()
204 desc_arr_idx = nir_imm_int(&b->nb, 0); in vtn_ssa_offset_pointer_dereference()
270 offset = nir_imm_int(&b->nb, base->var->shared_location); in vtn_ssa_offset_pointer_dereference()
276 offset = nir_imm_int(&b->nb, 0); in vtn_ssa_offset_pointer_dereference()
317 nir_ssa_def *mem_offset = nir_imm_int(&b->nb, type->offsets[member]); in vtn_ssa_offset_pointer_dereference()
617 nir_ssa_def *offset = nir_imm_int(&b->nb, 0); in vtn_pointer_to_offset()
645 nir_imm_int(&b->nb, type->offsets[member])); in vtn_pointer_to_offset()
773 nir_imm_int(&b->nb, access_offset))); in _vtn_load_store_tail()
[all …]
/external/mesa3d/src/gallium/drivers/freedreno/ir3/
Dir3_nir_lower_tg4_to_tex.c76 nir_vec2(b, nir_imm_int(b, offsets[i][0]), in lower_tg4()
77 nir_imm_int(b, offsets[i][1])); in lower_tg4()

123