xref: /aosp_15_r20/external/mesa3d/src/compiler/spirv/vtn_glsl450.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <math.h>
25 
26 #include "nir/nir_builtin_builder.h"
27 
28 #include "vtn_private.h"
29 #include "GLSL.std.450.h"
30 
31 #ifndef M_PIf
32 #define M_PIf   ((float) M_PI)
33 #endif
34 #ifndef M_PI_2f
35 #define M_PI_2f ((float) M_PI_2)
36 #endif
37 #ifndef M_PI_4f
38 #define M_PI_4f ((float) M_PI_4)
39 #endif
40 
41 /**
42  * Some fp16 instructions (i.e., asin and acos) are lowered as fp32. In these cases the
43  * generated fp32 instructions need the same fp_fast_math settings as fp16.
44  */
45 static void
propagate_fp16_fast_math_to_fp32(struct nir_builder * b)46 propagate_fp16_fast_math_to_fp32(struct nir_builder *b)
47 {
48    static_assert(FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32 ==
49                  (FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16 << 1),
50                  "FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32 is not "
51                  "FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16 << 1.");
52 
53    b->fp_fast_math |= (b->fp_fast_math & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16) << 1;
54 }
55 
56 static nir_def *build_det(nir_builder *b, nir_def **col, unsigned cols);
57 
58 /* Computes the determinate of the submatrix given by taking src and
59  * removing the specified row and column.
60  */
61 static nir_def *
build_mat_subdet(struct nir_builder * b,struct nir_def ** src,unsigned size,unsigned row,unsigned col)62 build_mat_subdet(struct nir_builder *b, struct nir_def **src,
63                  unsigned size, unsigned row, unsigned col)
64 {
65    assert(row < size && col < size);
66    if (size == 2) {
67       return nir_channel(b, src[1 - col], 1 - row);
68    } else {
69       /* Swizzle to get all but the specified row */
70       unsigned swiz[NIR_MAX_VEC_COMPONENTS] = {0};
71       for (unsigned j = 0; j < 3; j++)
72          swiz[j] = j + (j >= row);
73 
74       /* Grab all but the specified column */
75       nir_def *subcol[3];
76       for (unsigned j = 0; j < size; j++) {
77          if (j != col) {
78             subcol[j - (j > col)] = nir_swizzle(b, src[j], swiz, size - 1);
79          }
80       }
81 
82       return build_det(b, subcol, size - 1);
83    }
84 }
85 
86 static nir_def *
build_det(nir_builder * b,nir_def ** col,unsigned size)87 build_det(nir_builder *b, nir_def **col, unsigned size)
88 {
89    assert(size <= 4);
90    nir_def *subdet[4];
91    for (unsigned i = 0; i < size; i++)
92       subdet[i] = build_mat_subdet(b, col, size, i, 0);
93 
94    nir_def *prod = nir_fmul(b, col[0], nir_vec(b, subdet, size));
95 
96    nir_def *result = NULL;
97    for (unsigned i = 0; i < size; i += 2) {
98       nir_def *term;
99       if (i + 1 < size) {
100          term = nir_fsub(b, nir_channel(b, prod, i),
101                             nir_channel(b, prod, i + 1));
102       } else {
103          term = nir_channel(b, prod, i);
104       }
105 
106       result = result ? nir_fadd(b, result, term) : term;
107    }
108 
109    return result;
110 }
111 
112 static nir_def *
build_mat_det(struct vtn_builder * b,struct vtn_ssa_value * src)113 build_mat_det(struct vtn_builder *b, struct vtn_ssa_value *src)
114 {
115    unsigned size = glsl_get_vector_elements(src->type);
116 
117    nir_def *cols[4];
118    for (unsigned i = 0; i < size; i++)
119       cols[i] = src->elems[i]->def;
120 
121    return build_det(&b->nb, cols, size);
122 }
123 
124 static struct vtn_ssa_value *
matrix_inverse(struct vtn_builder * b,struct vtn_ssa_value * src)125 matrix_inverse(struct vtn_builder *b, struct vtn_ssa_value *src)
126 {
127    nir_def *adj_col[4];
128    unsigned size = glsl_get_vector_elements(src->type);
129 
130    nir_def *cols[4];
131    for (unsigned i = 0; i < size; i++)
132       cols[i] = src->elems[i]->def;
133 
134    /* Build up an adjugate matrix */
135    for (unsigned c = 0; c < size; c++) {
136       nir_def *elem[4];
137       for (unsigned r = 0; r < size; r++) {
138          elem[r] = build_mat_subdet(&b->nb, cols, size, c, r);
139 
140          if ((r + c) % 2)
141             elem[r] = nir_fneg(&b->nb, elem[r]);
142       }
143 
144       adj_col[c] = nir_vec(&b->nb, elem, size);
145    }
146 
147    nir_def *det_inv = nir_frcp(&b->nb, build_det(&b->nb, cols, size));
148 
149    struct vtn_ssa_value *val = vtn_create_ssa_value(b, src->type);
150    for (unsigned i = 0; i < size; i++)
151       val->elems[i]->def = nir_fmul(&b->nb, adj_col[i], det_inv);
152 
153    return val;
154 }
155 
156 /**
157  * Approximate asin(x) by the piecewise formula:
158  * for |x| < 0.5, asin~(x) = x * (1 + x²(pS0 + x²(pS1 + x²*pS2)) / (1 + x²*qS1))
159  * for |x| ≥ 0.5, asin~(x) = sign(x) * (π/2 - sqrt(1 - |x|) * (π/2 + |x|(π/4 - 1 + |x|(p0 + |x|p1))))
160  *
161  * The latter is correct to first order at x=0 and x=±1 regardless of the p
162  * coefficients but can be made second-order correct at both ends by selecting
163  * the fit coefficients appropriately.  Different p coefficients can be used
164  * in the asin and acos implementation to minimize some relative error metric
165  * in each case.
166  */
167 static nir_def *
build_asin(nir_builder * b,nir_def * x,float p0,float p1,bool piecewise)168 build_asin(nir_builder *b, nir_def *x, float p0, float p1, bool piecewise)
169 {
170    if (x->bit_size == 16) {
171       /* The polynomial approximation isn't precise enough to meet half-float
172        * precision requirements. Alternatively, we could implement this using
173        * the formula:
174        *
175        * asin(x) = atan2(x, sqrt(1 - x*x))
176        *
177        * But that is very expensive, so instead we just do the polynomial
178        * approximation in 32-bit math and then we convert the result back to
179        * 16-bit.
180        */
181       const uint32_t save = b->fp_fast_math;
182       propagate_fp16_fast_math_to_fp32(b);
183 
184       nir_def *result =
185          nir_f2f16(b, build_asin(b, nir_f2f32(b, x), p0, p1, piecewise));
186 
187       b->fp_fast_math = save;
188       return result;
189    }
190    nir_def *one = nir_imm_floatN_t(b, 1.0f, x->bit_size);
191    nir_def *half = nir_imm_floatN_t(b, 0.5f, x->bit_size);
192    nir_def *abs_x = nir_fabs(b, x);
193 
194    nir_def *p0_plus_xp1 = nir_ffma_imm12(b, abs_x, p1, p0);
195 
196    nir_def *expr_tail =
197       nir_ffma_imm2(b, abs_x,
198                        nir_ffma_imm2(b, abs_x, p0_plus_xp1, M_PI_4f - 1.0f),
199                        M_PI_2f);
200 
201    nir_def *result0 = nir_fmul(b, nir_fsign(b, x),
202                       nir_a_minus_bc(b, nir_imm_floatN_t(b, M_PI_2f, x->bit_size),
203                                         nir_fsqrt(b, nir_fsub(b, one, abs_x)),
204                                         expr_tail));
205    if (piecewise) {
206       /* approximation for |x| < 0.5 */
207       const float pS0 =  1.6666586697e-01f;
208       const float pS1 = -4.2743422091e-02f;
209       const float pS2 = -8.6563630030e-03f;
210       const float qS1 = -7.0662963390e-01f;
211 
212       nir_def *x2 = nir_fmul(b, x, x);
213       nir_def *p = nir_fmul(b,
214                                 x2,
215                                 nir_ffma_imm2(b, x2,
216                                                  nir_ffma_imm12(b, x2, pS2, pS1),
217                                                  pS0));
218 
219       nir_def *q = nir_ffma_imm1(b, x2, qS1, one);
220       nir_def *result1 = nir_ffma(b, x, nir_fdiv(b, p, q), x);
221       return nir_bcsel(b, nir_flt(b, abs_x, half), result1, result0);
222    } else {
223       return result0;
224    }
225 }
226 
227 static nir_op
vtn_nir_alu_op_for_spirv_glsl_opcode(struct vtn_builder * b,enum GLSLstd450 opcode,unsigned execution_mode,bool * exact)228 vtn_nir_alu_op_for_spirv_glsl_opcode(struct vtn_builder *b,
229                                      enum GLSLstd450 opcode,
230                                      unsigned execution_mode,
231                                      bool *exact)
232 {
233    *exact = false;
234    switch (opcode) {
235    case GLSLstd450Round:         return nir_op_fround_even;
236    case GLSLstd450RoundEven:     return nir_op_fround_even;
237    case GLSLstd450Trunc:         return nir_op_ftrunc;
238    case GLSLstd450FAbs:          return nir_op_fabs;
239    case GLSLstd450SAbs:          return nir_op_iabs;
240    case GLSLstd450FSign:         return nir_op_fsign;
241    case GLSLstd450SSign:         return nir_op_isign;
242    case GLSLstd450Floor:         return nir_op_ffloor;
243    case GLSLstd450Ceil:          return nir_op_fceil;
244    case GLSLstd450Fract:         return nir_op_ffract;
245    case GLSLstd450Sin:           return nir_op_fsin;
246    case GLSLstd450Cos:           return nir_op_fcos;
247    case GLSLstd450Pow:           return nir_op_fpow;
248    case GLSLstd450Exp2:          return nir_op_fexp2;
249    case GLSLstd450Log2:          return nir_op_flog2;
250    case GLSLstd450Sqrt:          return nir_op_fsqrt;
251    case GLSLstd450InverseSqrt:   return nir_op_frsq;
252    case GLSLstd450NMin:          *exact = true; return nir_op_fmin;
253    case GLSLstd450FMin:          return nir_op_fmin;
254    case GLSLstd450UMin:          return nir_op_umin;
255    case GLSLstd450SMin:          return nir_op_imin;
256    case GLSLstd450NMax:          *exact = true; return nir_op_fmax;
257    case GLSLstd450FMax:          return nir_op_fmax;
258    case GLSLstd450UMax:          return nir_op_umax;
259    case GLSLstd450SMax:          return nir_op_imax;
260    case GLSLstd450FMix:          return nir_op_flrp;
261    case GLSLstd450Fma:           return nir_op_ffma;
262    case GLSLstd450Ldexp:         return nir_op_ldexp;
263    case GLSLstd450FindILsb:      return nir_op_find_lsb;
264    case GLSLstd450FindSMsb:      return nir_op_ifind_msb;
265    case GLSLstd450FindUMsb:      return nir_op_ufind_msb;
266 
267    /* Packing/Unpacking functions */
268    case GLSLstd450PackSnorm4x8:     return nir_op_pack_snorm_4x8;
269    case GLSLstd450PackUnorm4x8:     return nir_op_pack_unorm_4x8;
270    case GLSLstd450PackSnorm2x16:    return nir_op_pack_snorm_2x16;
271    case GLSLstd450PackUnorm2x16:    return nir_op_pack_unorm_2x16;
272    case GLSLstd450PackHalf2x16:     return nir_op_pack_half_2x16;
273    case GLSLstd450PackDouble2x32:   return nir_op_pack_64_2x32;
274    case GLSLstd450UnpackSnorm4x8:   return nir_op_unpack_snorm_4x8;
275    case GLSLstd450UnpackUnorm4x8:   return nir_op_unpack_unorm_4x8;
276    case GLSLstd450UnpackSnorm2x16:  return nir_op_unpack_snorm_2x16;
277    case GLSLstd450UnpackUnorm2x16:  return nir_op_unpack_unorm_2x16;
278    case GLSLstd450UnpackHalf2x16:   return nir_op_unpack_half_2x16;
279    case GLSLstd450UnpackDouble2x32: return nir_op_unpack_64_2x32;
280 
281    default:
282       vtn_fail("No NIR equivalent");
283    }
284 }
285 
286 #define NIR_IMM_FP(n, v) (nir_imm_floatN_t(n, v, src[0]->bit_size))
287 
288 static void
handle_glsl450_alu(struct vtn_builder * b,enum GLSLstd450 entrypoint,const uint32_t * w,unsigned count)289 handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
290                    const uint32_t *w, unsigned count)
291 {
292    struct nir_builder *nb = &b->nb;
293    const struct glsl_type *dest_type = vtn_get_type(b, w[1])->type;
294    struct vtn_value *dest_val = vtn_untyped_value(b, w[2]);
295 
296    bool mediump_16bit;
297    switch (entrypoint) {
298    case GLSLstd450PackSnorm4x8:
299    case GLSLstd450PackUnorm4x8:
300    case GLSLstd450PackSnorm2x16:
301    case GLSLstd450PackUnorm2x16:
302    case GLSLstd450PackHalf2x16:
303    case GLSLstd450PackDouble2x32:
304    case GLSLstd450UnpackSnorm4x8:
305    case GLSLstd450UnpackUnorm4x8:
306    case GLSLstd450UnpackSnorm2x16:
307    case GLSLstd450UnpackUnorm2x16:
308    case GLSLstd450UnpackHalf2x16:
309    case GLSLstd450UnpackDouble2x32:
310       /* Asking for relaxed precision snorm 4x8 pack results (for example)
311        * doesn't even make sense.  The NIR opcodes have a fixed output size, so
312        * no trying to reduce precision.
313        */
314       mediump_16bit = false;
315       break;
316 
317    case GLSLstd450Frexp:
318    case GLSLstd450FrexpStruct:
319    case GLSLstd450Modf:
320    case GLSLstd450ModfStruct:
321       /* Not sure how to detect the ->elems[i] destinations on these in vtn_upconvert_value(). */
322       mediump_16bit = false;
323       break;
324 
325    default:
326       mediump_16bit = b->options->mediump_16bit_alu && vtn_value_is_relaxed_precision(b, dest_val);
327       break;
328    }
329 
330    /* Collect the various SSA sources */
331    unsigned num_inputs = count - 5;
332    nir_def *src[3] = { NULL, };
333    for (unsigned i = 0; i < num_inputs; i++) {
334       /* These are handled specially below */
335       if (vtn_untyped_value(b, w[i + 5])->value_type == vtn_value_type_pointer)
336          continue;
337 
338       src[i] = vtn_get_nir_ssa(b, w[i + 5]);
339       if (mediump_16bit) {
340          struct vtn_ssa_value *vtn_src = vtn_ssa_value(b, w[i + 5]);
341          src[i] = vtn_mediump_downconvert(b, glsl_get_base_type(vtn_src->type), src[i]);
342       }
343    }
344 
345    struct vtn_ssa_value *dest = vtn_create_ssa_value(b, dest_type);
346 
347    vtn_handle_no_contraction(b, vtn_untyped_value(b, w[2]));
348    switch (entrypoint) {
349    case GLSLstd450Radians:
350       dest->def = nir_radians(nb, src[0]);
351       break;
352    case GLSLstd450Degrees:
353       dest->def = nir_degrees(nb, src[0]);
354       break;
355    case GLSLstd450Tan:
356       dest->def = nir_ftan(nb, src[0]);
357       break;
358 
359    case GLSLstd450Modf: {
360       nir_def *inf = nir_imm_floatN_t(&b->nb, INFINITY, src[0]->bit_size);
361       nir_def *sign_bit =
362          nir_imm_intN_t(&b->nb, (uint64_t)1 << (src[0]->bit_size - 1),
363                         src[0]->bit_size);
364       nir_def *signed_zero = nir_iand(nb, src[0], sign_bit);
365       nir_def *abs = nir_fabs(nb, src[0]);
366 
367       /* NaN input should produce a NaN results, and ±Inf input should provide
368        * ±0 result.  The fmul(sign(x), ffract(x)) calculation will already
369        * produce the expected NaN.  To get ±0, directly compare for equality
370        * with Inf instead of using fisfinite (which is false for NaN).
371        */
372       dest->def = nir_bcsel(nb,
373                             nir_ieq(nb, abs, inf),
374                             signed_zero,
375                             nir_ior(nb, signed_zero, nir_ffract(nb, abs)));
376 
377       struct vtn_pointer *i_ptr = vtn_value(b, w[6], vtn_value_type_pointer)->pointer;
378       struct vtn_ssa_value *whole = vtn_create_ssa_value(b, i_ptr->type->pointed->type);
379       whole->def = nir_ior(nb, signed_zero, nir_ffloor(nb, abs));
380       vtn_variable_store(b, whole, i_ptr, 0);
381       break;
382    }
383 
384    case GLSLstd450ModfStruct: {
385       nir_def *inf = nir_imm_floatN_t(&b->nb, INFINITY, src[0]->bit_size);
386       nir_def *sign_bit =
387          nir_imm_intN_t(&b->nb, (uint64_t)1 << (src[0]->bit_size - 1),
388                         src[0]->bit_size);
389       nir_def *signed_zero = nir_iand(nb, src[0], sign_bit);
390       nir_def *abs = nir_fabs(nb, src[0]);
391       vtn_assert(glsl_type_is_struct_or_ifc(dest_type));
392 
393       /* See GLSLstd450Modf for explanation of the Inf and NaN handling. */
394       dest->elems[0]->def = nir_bcsel(nb,
395                                       nir_ieq(nb, abs, inf),
396                                       signed_zero,
397                                       nir_ior(nb, signed_zero, nir_ffract(nb, abs)));
398       dest->elems[1]->def = nir_ior(nb, signed_zero, nir_ffloor(nb, abs));
399       break;
400    }
401 
402    case GLSLstd450Step: {
403       /* The SPIR-V Extended Instructions for GLSL spec says:
404        *
405        *    Result is 0.0 if x < edge; otherwise result is 1.0.
406        *
407        * Here src[1] is x, and src[0] is edge.  The direct implementation is
408        *
409        *    bcsel(src[1] < src[0], 0.0, 1.0)
410        *
411        * This is effectively b2f(!(src1 < src0)).  Previously this was
412        * implemented using sge(src1, src0), but that produces incorrect
413        * results for NaN.  Instead, we use the identity b2f(!x) = 1 - b2f(x).
414        */
415       const bool exact = nb->exact;
416       nb->exact = true;
417 
418       nir_def *cmp = nir_slt(nb, src[1], src[0]);
419 
420       nb->exact = exact;
421       dest->def = nir_fsub_imm(nb, 1.0f, cmp);
422       break;
423    }
424 
425    case GLSLstd450Length:
426       dest->def = nir_fast_length(nb, src[0]);
427       break;
428    case GLSLstd450Distance:
429       dest->def = nir_fast_distance(nb, src[0], src[1]);
430       break;
431    case GLSLstd450Normalize:
432       dest->def = nir_fast_normalize(nb, src[0]);
433       break;
434 
435    case GLSLstd450Exp:
436       dest->def = nir_fexp(nb, src[0]);
437       break;
438 
439    case GLSLstd450Log:
440       dest->def = nir_flog(nb, src[0]);
441       break;
442 
443    case GLSLstd450FClamp:
444       dest->def = nir_fclamp(nb, src[0], src[1], src[2]);
445       break;
446    case GLSLstd450NClamp:
447       nb->exact = true;
448       dest->def = nir_fclamp(nb, src[0], src[1], src[2]);
449       nb->exact = false;
450       break;
451    case GLSLstd450UClamp:
452       dest->def = nir_uclamp(nb, src[0], src[1], src[2]);
453       break;
454    case GLSLstd450SClamp:
455       dest->def = nir_iclamp(nb, src[0], src[1], src[2]);
456       break;
457 
458    case GLSLstd450Cross: {
459       dest->def = nir_cross3(nb, src[0], src[1]);
460       break;
461    }
462 
463    case GLSLstd450SmoothStep: {
464       dest->def = nir_smoothstep(nb, src[0], src[1], src[2]);
465       break;
466    }
467 
468    case GLSLstd450FaceForward:
469       dest->def =
470          nir_bcsel(nb, nir_flt(nb, nir_fdot(nb, src[2], src[1]),
471                                    NIR_IMM_FP(nb, 0.0)),
472                        src[0], nir_fneg(nb, src[0]));
473       break;
474 
475    case GLSLstd450Reflect:
476       /* I - 2 * dot(N, I) * N */
477       dest->def =
478          nir_a_minus_bc(nb, src[0],
479                             src[1],
480                             nir_fmul(nb, nir_fdot(nb, src[0], src[1]),
481                                          NIR_IMM_FP(nb, 2.0)));
482       break;
483 
484    case GLSLstd450Refract: {
485       nir_def *I = src[0];
486       nir_def *N = src[1];
487       nir_def *eta = src[2];
488       nir_def *n_dot_i = nir_fdot(nb, N, I);
489       nir_def *one = NIR_IMM_FP(nb, 1.0);
490       nir_def *zero = NIR_IMM_FP(nb, 0.0);
491       /* According to the SPIR-V and GLSL specs, eta is always a float
492        * regardless of the type of the other operands. However in practice it
493        * seems that if you try to pass it a float then glslang will just
494        * promote it to a double and generate invalid SPIR-V. In order to
495        * support a hypothetical fixed version of glslang we’ll promote eta to
496        * double if the other operands are double also.
497        */
498       if (I->bit_size != eta->bit_size) {
499          eta = nir_type_convert(nb, eta, nir_type_float,
500                                 nir_type_float | I->bit_size,
501                                 nir_rounding_mode_undef);
502       }
503       /* k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)) */
504       nir_def *k =
505          nir_a_minus_bc(nb, one, eta,
506                             nir_fmul(nb, eta, nir_a_minus_bc(nb, one, n_dot_i, n_dot_i)));
507       nir_def *result =
508          nir_a_minus_bc(nb, nir_fmul(nb, eta, I),
509                             nir_ffma(nb, eta, n_dot_i, nir_fsqrt(nb, k)),
510                             N);
511       /* XXX: bcsel, or if statement? */
512       dest->def = nir_bcsel(nb, nir_flt(nb, k, zero), zero, result);
513       break;
514    }
515 
516    case GLSLstd450Sinh:
517       /* 0.5 * (e^x - e^(-x)) */
518       dest->def =
519          nir_fmul_imm(nb, nir_fsub(nb, nir_fexp(nb, src[0]),
520                                        nir_fexp(nb, nir_fneg(nb, src[0]))),
521                           0.5f);
522       break;
523 
524    case GLSLstd450Cosh:
525       /* 0.5 * (e^x + e^(-x)) */
526       dest->def =
527          nir_fmul_imm(nb, nir_fadd(nb, nir_fexp(nb, src[0]),
528                                        nir_fexp(nb, nir_fneg(nb, src[0]))),
529                           0.5f);
530       break;
531 
532    case GLSLstd450Tanh: {
533       /* tanh(x) := (e^x - e^(-x)) / (e^x + e^(-x))
534        *
535        * We clamp x to [-10, +10] to avoid precision problems.  When x > 10,
536        * e^x dominates the sum, e^(-x) is lost and tanh(x) is 1.0 for 32 bit
537        * floating point.
538        *
539        * For 16-bit precision this we clamp x to [-4.2, +4.2].
540        */
541       const uint32_t bit_size = src[0]->bit_size;
542       const double clamped_x = bit_size > 16 ? 10.0 : 4.2;
543       nir_def *x = nir_fclamp(nb, src[0],
544                                   nir_imm_floatN_t(nb, -clamped_x, bit_size),
545                                   nir_imm_floatN_t(nb, clamped_x, bit_size));
546 
547       /* The clamping will filter out NaN values causing an incorrect result.
548        * The comparison is carefully structured to get NaN result for NaN and
549        * get -0 for -0.
550        *
551        *    result = abs(s) > 0.0 ? ... : s;
552        */
553       const bool exact = nb->exact;
554 
555       nb->exact = true;
556       nir_def *is_regular = nir_flt(nb,
557                                         nir_imm_floatN_t(nb, 0, bit_size),
558                                         nir_fabs(nb, src[0]));
559 
560       /* The extra 1.0*s ensures that subnormal inputs are flushed to zero
561        * when that is selected by the shader.
562        */
563       nir_def *flushed = nir_fmul(nb,
564                                       src[0],
565                                       nir_imm_floatN_t(nb, 1.0, bit_size));
566       nb->exact = exact;
567 
568       dest->def = nir_bcsel(nb,
569                             is_regular,
570                             nir_fdiv(nb, nir_fsub(nb, nir_fexp(nb, x),
571                                                   nir_fexp(nb, nir_fneg(nb, x))),
572                                      nir_fadd(nb, nir_fexp(nb, x),
573                                               nir_fexp(nb, nir_fneg(nb, x)))),
574                             flushed);
575       break;
576    }
577 
578    case GLSLstd450Asinh:
579       dest->def = nir_fmul(nb, nir_fsign(nb, src[0]),
580          nir_flog(nb, nir_fadd(nb, nir_fabs(nb, src[0]),
581                       nir_fsqrt(nb, nir_ffma_imm2(nb, src[0], src[0], 1.0f)))));
582       break;
583    case GLSLstd450Acosh:
584       dest->def = nir_flog(nb, nir_fadd(nb, src[0],
585          nir_fsqrt(nb, nir_ffma_imm2(nb, src[0], src[0], -1.0f))));
586       break;
587    case GLSLstd450Atanh: {
588       dest->def =
589          nir_fmul_imm(nb, nir_flog(nb, nir_fdiv(nb, nir_fadd_imm(nb, src[0], 1.0),
590                                        nir_fsub_imm(nb, 1.0, src[0]))),
591                           0.5f);
592       break;
593    }
594 
595    case GLSLstd450Asin:
596       dest->def = build_asin(nb, src[0], 0.086566724, -0.03102955, true);
597       break;
598 
599    case GLSLstd450Acos:
600       dest->def =
601          nir_fsub_imm(nb, M_PI_2f,
602                           build_asin(nb, src[0], 0.08132463, -0.02363318, false));
603       break;
604 
605    case GLSLstd450Atan:
606       dest->def = nir_atan(nb, src[0]);
607       break;
608 
609    case GLSLstd450Atan2:
610       dest->def = nir_atan2(nb, src[0], src[1]);
611       break;
612 
613    case GLSLstd450Frexp: {
614       dest->def = nir_frexp_sig(nb, src[0]);
615 
616       struct vtn_pointer *i_ptr = vtn_value(b, w[6], vtn_value_type_pointer)->pointer;
617       struct vtn_ssa_value *exp = vtn_create_ssa_value(b, i_ptr->type->pointed->type);
618       exp->def = nir_frexp_exp(nb, src[0]);
619       vtn_variable_store(b, exp, i_ptr, 0);
620       break;
621    }
622 
623    case GLSLstd450FrexpStruct: {
624       vtn_assert(glsl_type_is_struct_or_ifc(dest_type));
625       dest->elems[0]->def = nir_frexp_sig(nb, src[0]);
626       dest->elems[1]->def = nir_frexp_exp(nb, src[0]);
627       break;
628    }
629 
630    default: {
631       unsigned execution_mode =
632          b->shader->info.float_controls_execution_mode;
633       bool exact;
634       nir_op op = vtn_nir_alu_op_for_spirv_glsl_opcode(b, entrypoint, execution_mode, &exact);
635       /* don't override explicit decoration */
636       b->nb.exact |= exact;
637       dest->def = nir_build_alu(&b->nb, op, src[0], src[1], src[2], NULL);
638       break;
639    }
640    }
641    b->nb.exact = false;
642 
643    if (mediump_16bit)
644       vtn_mediump_upconvert_value(b, dest);
645 
646    vtn_push_ssa_value(b, w[2], dest);
647 }
648 
649 static void
handle_glsl450_interpolation(struct vtn_builder * b,enum GLSLstd450 opcode,const uint32_t * w,unsigned count)650 handle_glsl450_interpolation(struct vtn_builder *b, enum GLSLstd450 opcode,
651                              const uint32_t *w, unsigned count)
652 {
653    nir_intrinsic_op op;
654    switch (opcode) {
655    case GLSLstd450InterpolateAtCentroid:
656       op = nir_intrinsic_interp_deref_at_centroid;
657       break;
658    case GLSLstd450InterpolateAtSample:
659       op = nir_intrinsic_interp_deref_at_sample;
660       break;
661    case GLSLstd450InterpolateAtOffset:
662       op = nir_intrinsic_interp_deref_at_offset;
663       break;
664    default:
665       vtn_fail("Invalid opcode");
666    }
667 
668    nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->nb.shader, op);
669 
670    struct vtn_pointer *ptr =
671       vtn_value(b, w[5], vtn_value_type_pointer)->pointer;
672    nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
673 
674    /* If the value we are interpolating has an index into a vector then
675     * interpolate the vector and index the result of that instead. This is
676     * necessary because the index will get generated as a series of nir_bcsel
677     * instructions so it would no longer be an input variable.
678     */
679    const bool vec_array_deref = deref->deref_type == nir_deref_type_array &&
680       glsl_type_is_vector(nir_deref_instr_parent(deref)->type);
681 
682    nir_deref_instr *vec_deref = NULL;
683    if (vec_array_deref) {
684       vec_deref = deref;
685       deref = nir_deref_instr_parent(deref);
686    }
687    intrin->src[0] = nir_src_for_ssa(&deref->def);
688 
689    switch (opcode) {
690    case GLSLstd450InterpolateAtCentroid:
691       break;
692    case GLSLstd450InterpolateAtSample:
693    case GLSLstd450InterpolateAtOffset:
694       intrin->src[1] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[6]));
695       break;
696    default:
697       vtn_fail("Invalid opcode");
698    }
699 
700    intrin->num_components = glsl_get_vector_elements(deref->type);
701    nir_def_init(&intrin->instr, &intrin->def,
702                 glsl_get_vector_elements(deref->type),
703                 glsl_get_bit_size(deref->type));
704 
705    nir_builder_instr_insert(&b->nb, &intrin->instr);
706 
707    nir_def *def = &intrin->def;
708    if (vec_array_deref)
709       def = nir_vector_extract(&b->nb, def, vec_deref->arr.index.ssa);
710 
711    vtn_push_nir_ssa(b, w[2], def);
712 }
713 
714 bool
vtn_handle_glsl450_instruction(struct vtn_builder * b,SpvOp ext_opcode,const uint32_t * w,unsigned count)715 vtn_handle_glsl450_instruction(struct vtn_builder *b, SpvOp ext_opcode,
716                                const uint32_t *w, unsigned count)
717 {
718    vtn_handle_fp_fast_math(b, vtn_untyped_value(b, w[2]));
719    switch ((enum GLSLstd450)ext_opcode) {
720    case GLSLstd450Determinant: {
721       vtn_push_nir_ssa(b, w[2], build_mat_det(b, vtn_ssa_value(b, w[5])));
722       break;
723    }
724 
725    case GLSLstd450MatrixInverse: {
726       vtn_push_ssa_value(b, w[2], matrix_inverse(b, vtn_ssa_value(b, w[5])));
727       break;
728    }
729 
730    case GLSLstd450InterpolateAtCentroid:
731    case GLSLstd450InterpolateAtSample:
732    case GLSLstd450InterpolateAtOffset:
733       handle_glsl450_interpolation(b, (enum GLSLstd450)ext_opcode, w, count);
734       break;
735 
736    default:
737       handle_glsl450_alu(b, (enum GLSLstd450)ext_opcode, w, count);
738    }
739 
740    return true;
741 }
742