1 /*
2  * Copyright © 2018 Red Hat Inc.
3  * Copyright © 2015 Intel Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include <math.h>
26 
27 #include "nir.h"
28 #include "nir_builtin_builder.h"
29 
30 nir_ssa_def*
nir_cross3(nir_builder * b,nir_ssa_def * x,nir_ssa_def * y)31 nir_cross3(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
32 {
33    unsigned yzx[3] = { 1, 2, 0 };
34    unsigned zxy[3] = { 2, 0, 1 };
35 
36    return nir_fsub(b, nir_fmul(b, nir_swizzle(b, x, yzx, 3),
37                                   nir_swizzle(b, y, zxy, 3)),
38                       nir_fmul(b, nir_swizzle(b, x, zxy, 3),
39                                   nir_swizzle(b, y, yzx, 3)));
40 }
41 
42 nir_ssa_def*
nir_cross4(nir_builder * b,nir_ssa_def * x,nir_ssa_def * y)43 nir_cross4(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
44 {
45    nir_ssa_def *cross = nir_cross3(b, x, y);
46 
47    return nir_vec4(b,
48       nir_channel(b, cross, 0),
49       nir_channel(b, cross, 1),
50       nir_channel(b, cross, 2),
51       nir_imm_intN_t(b, 0, cross->bit_size));
52 }
53 
54 nir_ssa_def*
nir_fast_length(nir_builder * b,nir_ssa_def * vec)55 nir_fast_length(nir_builder *b, nir_ssa_def *vec)
56 {
57    switch (vec->num_components) {
58    case 1: return nir_fsqrt(b, nir_fmul(b, vec, vec));
59    case 2: return nir_fsqrt(b, nir_fdot2(b, vec, vec));
60    case 3: return nir_fsqrt(b, nir_fdot3(b, vec, vec));
61    case 4: return nir_fsqrt(b, nir_fdot4(b, vec, vec));
62    case 8: return nir_fsqrt(b, nir_fdot8(b, vec, vec));
63    case 16: return nir_fsqrt(b, nir_fdot16(b, vec, vec));
64    default:
65       unreachable("Invalid number of components");
66    }
67 }
68 
69 nir_ssa_def*
nir_nextafter(nir_builder * b,nir_ssa_def * x,nir_ssa_def * y)70 nir_nextafter(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
71 {
72    nir_ssa_def *zero = nir_imm_intN_t(b, 0, x->bit_size);
73    nir_ssa_def *one = nir_imm_intN_t(b, 1, x->bit_size);
74 
75    nir_ssa_def *condeq = nir_feq(b, x, y);
76    nir_ssa_def *conddir = nir_flt(b, x, y);
77    nir_ssa_def *condzero = nir_feq(b, x, zero);
78 
79    /* beware of: +/-0.0 - 1 == NaN */
80    nir_ssa_def *xn =
81       nir_bcsel(b,
82                 condzero,
83                 nir_imm_intN_t(b, (1 << (x->bit_size - 1)) + 1, x->bit_size),
84                 nir_isub(b, x, one));
85 
86    /* beware of -0.0 + 1 == -0x1p-149 */
87    nir_ssa_def *xp = nir_bcsel(b, condzero, one, nir_iadd(b, x, one));
88 
89    /* nextafter can be implemented by just +/- 1 on the int value */
90    nir_ssa_def *res =
91       nir_bcsel(b, nir_ixor(b, conddir, nir_flt(b, x, zero)), xp, xn);
92 
93    return nir_nan_check2(b, x, y, nir_bcsel(b, condeq, x, res));
94 }
95 
96 nir_ssa_def*
nir_normalize(nir_builder * b,nir_ssa_def * vec)97 nir_normalize(nir_builder *b, nir_ssa_def *vec)
98 {
99    if (vec->num_components == 1)
100       return nir_fsign(b, vec);
101 
102    nir_ssa_def *f0 = nir_imm_floatN_t(b, 0.0, vec->bit_size);
103    nir_ssa_def *f1 = nir_imm_floatN_t(b, 1.0, vec->bit_size);
104    nir_ssa_def *finf = nir_imm_floatN_t(b, INFINITY, vec->bit_size);
105 
106    /* scale the input to increase precision */
107    nir_ssa_def *maxc = nir_fmax_abs_vec_comp(b, vec);
108    nir_ssa_def *svec = nir_fdiv(b, vec, maxc);
109    /* for inf */
110    nir_ssa_def *finfvec = nir_copysign(b, nir_bcsel(b, nir_feq(b, vec, finf), f1, f0), f1);
111 
112    nir_ssa_def *temp = nir_bcsel(b, nir_feq(b, maxc, finf), finfvec, svec);
113    nir_ssa_def *res = nir_fmul(b, temp, nir_frsq(b, nir_fdot(b, temp, temp)));
114 
115    return nir_bcsel(b, nir_feq(b, maxc, f0), vec, res);
116 }
117 
118 nir_ssa_def*
nir_smoothstep(nir_builder * b,nir_ssa_def * edge0,nir_ssa_def * edge1,nir_ssa_def * x)119 nir_smoothstep(nir_builder *b, nir_ssa_def *edge0, nir_ssa_def *edge1, nir_ssa_def *x)
120 {
121    nir_ssa_def *f2 = nir_imm_floatN_t(b, 2.0, x->bit_size);
122    nir_ssa_def *f3 = nir_imm_floatN_t(b, 3.0, x->bit_size);
123 
124    /* t = clamp((x - edge0) / (edge1 - edge0), 0, 1) */
125    nir_ssa_def *t =
126       nir_fsat(b, nir_fdiv(b, nir_fsub(b, x, edge0),
127                               nir_fsub(b, edge1, edge0)));
128 
129    /* result = t * t * (3 - 2 * t) */
130    return nir_fmul(b, t, nir_fmul(b, t, nir_fsub(b, f3, nir_fmul(b, f2, t))));
131 }
132 
133 nir_ssa_def*
nir_upsample(nir_builder * b,nir_ssa_def * hi,nir_ssa_def * lo)134 nir_upsample(nir_builder *b, nir_ssa_def *hi, nir_ssa_def *lo)
135 {
136    assert(lo->num_components == hi->num_components);
137    assert(lo->bit_size == hi->bit_size);
138 
139    nir_ssa_def *res[NIR_MAX_VEC_COMPONENTS];
140    for (unsigned i = 0; i < lo->num_components; ++i) {
141       nir_ssa_def *vec = nir_vec2(b, nir_channel(b, lo, i), nir_channel(b, hi, i));
142       res[i] = nir_pack_bits(b, vec, vec->bit_size * 2);
143    }
144 
145    return nir_vec(b, res, lo->num_components);
146 }
147 
148 /**
149  * Compute xs[0] + xs[1] + xs[2] + ... using fadd.
150  */
151 static nir_ssa_def *
build_fsum(nir_builder * b,nir_ssa_def ** xs,int terms)152 build_fsum(nir_builder *b, nir_ssa_def **xs, int terms)
153 {
154    nir_ssa_def *accum = xs[0];
155 
156    for (int i = 1; i < terms; i++)
157       accum = nir_fadd(b, accum, xs[i]);
158 
159    return accum;
160 }
161 
162 nir_ssa_def *
nir_atan(nir_builder * b,nir_ssa_def * y_over_x)163 nir_atan(nir_builder *b, nir_ssa_def *y_over_x)
164 {
165    const uint32_t bit_size = y_over_x->bit_size;
166 
167    nir_ssa_def *abs_y_over_x = nir_fabs(b, y_over_x);
168    nir_ssa_def *one = nir_imm_floatN_t(b, 1.0f, bit_size);
169 
170    /*
171     * range-reduction, first step:
172     *
173     *      / y_over_x         if |y_over_x| <= 1.0;
174     * x = <
175     *      \ 1.0 / y_over_x   otherwise
176     */
177    nir_ssa_def *x = nir_fdiv(b, nir_fmin(b, abs_y_over_x, one),
178                                 nir_fmax(b, abs_y_over_x, one));
179 
180    /*
181     * approximate atan by evaluating polynomial:
182     *
183     * x   * 0.9999793128310355 - x^3  * 0.3326756418091246 +
184     * x^5 * 0.1938924977115610 - x^7  * 0.1173503194786851 +
185     * x^9 * 0.0536813784310406 - x^11 * 0.0121323213173444
186     */
187    nir_ssa_def *x_2  = nir_fmul(b, x,   x);
188    nir_ssa_def *x_3  = nir_fmul(b, x_2, x);
189    nir_ssa_def *x_5  = nir_fmul(b, x_3, x_2);
190    nir_ssa_def *x_7  = nir_fmul(b, x_5, x_2);
191    nir_ssa_def *x_9  = nir_fmul(b, x_7, x_2);
192    nir_ssa_def *x_11 = nir_fmul(b, x_9, x_2);
193 
194    nir_ssa_def *polynomial_terms[] = {
195       nir_fmul_imm(b, x,     0.9999793128310355f),
196       nir_fmul_imm(b, x_3,  -0.3326756418091246f),
197       nir_fmul_imm(b, x_5,   0.1938924977115610f),
198       nir_fmul_imm(b, x_7,  -0.1173503194786851f),
199       nir_fmul_imm(b, x_9,   0.0536813784310406f),
200       nir_fmul_imm(b, x_11, -0.0121323213173444f),
201    };
202 
203    nir_ssa_def *tmp =
204       build_fsum(b, polynomial_terms, ARRAY_SIZE(polynomial_terms));
205 
206    /* range-reduction fixup */
207    tmp = nir_fadd(b, tmp,
208                   nir_fmul(b, nir_b2f(b, nir_flt(b, one, abs_y_over_x), bit_size),
209                            nir_fadd_imm(b, nir_fmul_imm(b, tmp, -2.0f), M_PI_2)));
210 
211    /* sign fixup */
212    return nir_fmul(b, tmp, nir_fsign(b, y_over_x));
213 }
214 
215 nir_ssa_def *
nir_atan2(nir_builder * b,nir_ssa_def * y,nir_ssa_def * x)216 nir_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x)
217 {
218    assert(y->bit_size == x->bit_size);
219    const uint32_t bit_size = x->bit_size;
220 
221    nir_ssa_def *zero = nir_imm_floatN_t(b, 0, bit_size);
222    nir_ssa_def *one = nir_imm_floatN_t(b, 1, bit_size);
223 
224    /* If we're on the left half-plane rotate the coordinates π/2 clock-wise
225     * for the y=0 discontinuity to end up aligned with the vertical
226     * discontinuity of atan(s/t) along t=0.  This also makes sure that we
227     * don't attempt to divide by zero along the vertical line, which may give
228     * unspecified results on non-GLSL 4.1-capable hardware.
229     */
230    nir_ssa_def *flip = nir_fge(b, zero, x);
231    nir_ssa_def *s = nir_bcsel(b, flip, nir_fabs(b, x), y);
232    nir_ssa_def *t = nir_bcsel(b, flip, y, nir_fabs(b, x));
233 
234    /* If the magnitude of the denominator exceeds some huge value, scale down
235     * the arguments in order to prevent the reciprocal operation from flushing
236     * its result to zero, which would cause precision problems, and for s
237     * infinite would cause us to return a NaN instead of the correct finite
238     * value.
239     *
240     * If fmin and fmax are respectively the smallest and largest positive
241     * normalized floating point values representable by the implementation,
242     * the constants below should be in agreement with:
243     *
244     *    huge <= 1 / fmin
245     *    scale <= 1 / fmin / fmax (for |t| >= huge)
246     *
247     * In addition scale should be a negative power of two in order to avoid
248     * loss of precision.  The values chosen below should work for most usual
249     * floating point representations with at least the dynamic range of ATI's
250     * 24-bit representation.
251     */
252    const double huge_val = bit_size >= 32 ? 1e18 : 16384;
253    nir_ssa_def *huge = nir_imm_floatN_t(b,  huge_val, bit_size);
254    nir_ssa_def *scale = nir_bcsel(b, nir_fge(b, nir_fabs(b, t), huge),
255                                   nir_imm_floatN_t(b, 0.25, bit_size), one);
256    nir_ssa_def *rcp_scaled_t = nir_frcp(b, nir_fmul(b, t, scale));
257    nir_ssa_def *s_over_t = nir_fmul(b, nir_fmul(b, s, scale), rcp_scaled_t);
258 
259    /* For |x| = |y| assume tan = 1 even if infinite (i.e. pretend momentarily
260     * that ∞/∞ = 1) in order to comply with the rather artificial rules
261     * inherited from IEEE 754-2008, namely:
262     *
263     *  "atan2(±∞, −∞) is ±3π/4
264     *   atan2(±∞, +∞) is ±π/4"
265     *
266     * Note that this is inconsistent with the rules for the neighborhood of
267     * zero that are based on iterated limits:
268     *
269     *  "atan2(±0, −0) is ±π
270     *   atan2(±0, +0) is ±0"
271     *
272     * but GLSL specifically allows implementations to deviate from IEEE rules
273     * at (0,0), so we take that license (i.e. pretend that 0/0 = 1 here as
274     * well).
275     */
276    nir_ssa_def *tan = nir_bcsel(b, nir_feq(b, nir_fabs(b, x), nir_fabs(b, y)),
277                                 one, nir_fabs(b, s_over_t));
278 
279    /* Calculate the arctangent and fix up the result if we had flipped the
280     * coordinate system.
281     */
282    nir_ssa_def *arc =
283       nir_fadd(b, nir_fmul_imm(b, nir_b2f(b, flip, bit_size), M_PI_2),
284                   nir_atan(b, tan));
285 
286    /* Rather convoluted calculation of the sign of the result.  When x < 0 we
287     * cannot use fsign because we need to be able to distinguish between
288     * negative and positive zero.  We don't use bitwise arithmetic tricks for
289     * consistency with the GLSL front-end.  When x >= 0 rcp_scaled_t will
290     * always be non-negative so this won't be able to distinguish between
291     * negative and positive zero, but we don't care because atan2 is
292     * continuous along the whole positive y = 0 half-line, so it won't affect
293     * the result significantly.
294     */
295    return nir_bcsel(b, nir_flt(b, nir_fmin(b, y, rcp_scaled_t), zero),
296                     nir_fneg(b, arc), arc);
297 }
298 
299 nir_ssa_def *
nir_get_texture_size(nir_builder * b,nir_tex_instr * tex)300 nir_get_texture_size(nir_builder *b, nir_tex_instr *tex)
301 {
302    b->cursor = nir_before_instr(&tex->instr);
303 
304    nir_tex_instr *txs;
305 
306    unsigned num_srcs = 1; /* One for the LOD */
307    for (unsigned i = 0; i < tex->num_srcs; i++) {
308       if (tex->src[i].src_type == nir_tex_src_texture_deref ||
309           tex->src[i].src_type == nir_tex_src_sampler_deref ||
310           tex->src[i].src_type == nir_tex_src_texture_offset ||
311           tex->src[i].src_type == nir_tex_src_sampler_offset ||
312           tex->src[i].src_type == nir_tex_src_texture_handle ||
313           tex->src[i].src_type == nir_tex_src_sampler_handle)
314          num_srcs++;
315    }
316 
317    txs = nir_tex_instr_create(b->shader, num_srcs);
318    txs->op = nir_texop_txs;
319    txs->sampler_dim = tex->sampler_dim;
320    txs->is_array = tex->is_array;
321    txs->is_shadow = tex->is_shadow;
322    txs->is_new_style_shadow = tex->is_new_style_shadow;
323    txs->texture_index = tex->texture_index;
324    txs->sampler_index = tex->sampler_index;
325    txs->dest_type = nir_type_int;
326 
327    unsigned idx = 0;
328    for (unsigned i = 0; i < tex->num_srcs; i++) {
329       if (tex->src[i].src_type == nir_tex_src_texture_deref ||
330           tex->src[i].src_type == nir_tex_src_sampler_deref ||
331           tex->src[i].src_type == nir_tex_src_texture_offset ||
332           tex->src[i].src_type == nir_tex_src_sampler_offset ||
333           tex->src[i].src_type == nir_tex_src_texture_handle ||
334           tex->src[i].src_type == nir_tex_src_sampler_handle) {
335          nir_src_copy(&txs->src[idx].src, &tex->src[i].src, txs);
336          txs->src[idx].src_type = tex->src[i].src_type;
337          idx++;
338       }
339    }
340    /* Add in an LOD because some back-ends require it */
341    txs->src[idx].src = nir_src_for_ssa(nir_imm_int(b, 0));
342    txs->src[idx].src_type = nir_tex_src_lod;
343 
344    nir_ssa_dest_init(&txs->instr, &txs->dest,
345                      nir_tex_instr_dest_size(txs), 32, NULL);
346    nir_builder_instr_insert(b, &txs->instr);
347 
348    return &txs->dest.ssa;
349 }
350 
351 nir_ssa_def *
nir_get_texture_lod(nir_builder * b,nir_tex_instr * tex)352 nir_get_texture_lod(nir_builder *b, nir_tex_instr *tex)
353 {
354    b->cursor = nir_before_instr(&tex->instr);
355 
356    nir_tex_instr *tql;
357 
358    unsigned num_srcs = 0;
359    for (unsigned i = 0; i < tex->num_srcs; i++) {
360       if (tex->src[i].src_type == nir_tex_src_coord ||
361           tex->src[i].src_type == nir_tex_src_texture_deref ||
362           tex->src[i].src_type == nir_tex_src_sampler_deref ||
363           tex->src[i].src_type == nir_tex_src_texture_offset ||
364           tex->src[i].src_type == nir_tex_src_sampler_offset ||
365           tex->src[i].src_type == nir_tex_src_texture_handle ||
366           tex->src[i].src_type == nir_tex_src_sampler_handle)
367          num_srcs++;
368    }
369 
370    tql = nir_tex_instr_create(b->shader, num_srcs);
371    tql->op = nir_texop_lod;
372    tql->coord_components = tex->coord_components;
373    tql->sampler_dim = tex->sampler_dim;
374    tql->is_array = tex->is_array;
375    tql->is_shadow = tex->is_shadow;
376    tql->is_new_style_shadow = tex->is_new_style_shadow;
377    tql->texture_index = tex->texture_index;
378    tql->sampler_index = tex->sampler_index;
379    tql->dest_type = nir_type_float;
380 
381    unsigned idx = 0;
382    for (unsigned i = 0; i < tex->num_srcs; i++) {
383       if (tex->src[i].src_type == nir_tex_src_coord ||
384           tex->src[i].src_type == nir_tex_src_texture_deref ||
385           tex->src[i].src_type == nir_tex_src_sampler_deref ||
386           tex->src[i].src_type == nir_tex_src_texture_offset ||
387           tex->src[i].src_type == nir_tex_src_sampler_offset ||
388           tex->src[i].src_type == nir_tex_src_texture_handle ||
389           tex->src[i].src_type == nir_tex_src_sampler_handle) {
390          nir_src_copy(&tql->src[idx].src, &tex->src[i].src, tql);
391          tql->src[idx].src_type = tex->src[i].src_type;
392          idx++;
393       }
394    }
395 
396    nir_ssa_dest_init(&tql->instr, &tql->dest, 2, 32, NULL);
397    nir_builder_instr_insert(b, &tql->instr);
398 
399    /* The LOD is the y component of the result */
400    return nir_channel(b, &tql->dest.ssa, 1);
401 }
402