xref: /aosp_15_r20/external/mesa3d/src/nouveau/compiler/nak_nir_lower_tex.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2023 Collabora, Ltd.
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "nak_private.h"
7 #include "nir_builder.h"
8 #include "nir_format_convert.h"
9 
10 #include "util/u_math.h"
11 
12 static enum glsl_sampler_dim
remap_sampler_dim(enum glsl_sampler_dim dim)13 remap_sampler_dim(enum glsl_sampler_dim dim)
14 {
15    switch (dim) {
16    case GLSL_SAMPLER_DIM_SUBPASS: return GLSL_SAMPLER_DIM_2D;
17    case GLSL_SAMPLER_DIM_SUBPASS_MS: return GLSL_SAMPLER_DIM_MS;
18    default: return dim;
19    }
20 }
21 
22 static bool
lower_tex(nir_builder * b,nir_tex_instr * tex,const struct nak_compiler * nak)23 lower_tex(nir_builder *b, nir_tex_instr *tex, const struct nak_compiler *nak)
24 {
25    b->cursor = nir_before_instr(&tex->instr);
26 
27    nir_def *tex_h = NULL, *samp_h = NULL, *coord = NULL, *ms_idx = NULL;
28    nir_def *offset = NULL, *lod = NULL, *bias = NULL, *min_lod = NULL;
29    nir_def *ddx = NULL, *ddy = NULL, *z_cmpr = NULL;
30    for (unsigned i = 0; i < tex->num_srcs; i++) {
31       switch (tex->src[i].src_type) {
32       case nir_tex_src_texture_handle: tex_h =     tex->src[i].src.ssa; break;
33       case nir_tex_src_sampler_handle: samp_h =    tex->src[i].src.ssa; break;
34       case nir_tex_src_coord:          coord =     tex->src[i].src.ssa; break;
35       case nir_tex_src_ms_index:       ms_idx =    tex->src[i].src.ssa; break;
36       case nir_tex_src_comparator:     z_cmpr =    tex->src[i].src.ssa; break;
37       case nir_tex_src_offset:         offset =    tex->src[i].src.ssa; break;
38       case nir_tex_src_lod:            lod =       tex->src[i].src.ssa; break;
39       case nir_tex_src_bias:           bias =      tex->src[i].src.ssa; break;
40       case nir_tex_src_min_lod:        min_lod =   tex->src[i].src.ssa; break;
41       case nir_tex_src_ddx:            ddx =       tex->src[i].src.ssa; break;
42       case nir_tex_src_ddy:            ddy =       tex->src[i].src.ssa; break;
43       default:
44          unreachable("Unsupported texture source");
45       }
46    }
47 
48    /* Combine sampler and texture into one if needed */
49    if (samp_h != NULL && samp_h != tex_h) {
50       tex_h = nir_ior(b, nir_iand_imm(b, tex_h,  0x000fffff),
51                          nir_iand_imm(b, samp_h, 0xfff00000));
52    }
53    tex_h = nir_u2u32(b, tex_h);
54 
55    /* Array index is treated separately, so pull it off if we have one. */
56    nir_def *arr_idx = NULL;
57    unsigned coord_components = tex->coord_components;
58    if (coord && tex->is_array) {
59       if (tex->op == nir_texop_lod) {
60          /* The HW wants an array index. Use zero. */
61          arr_idx = nir_imm_int(b, 0);
62       } else {
63          arr_idx = nir_channel(b, coord, --coord_components);
64 
65          /* Everything but texelFetch takes a float index
66           *
67           * TODO: Use F2I.U32.RNE
68           */
69          if (tex->op != nir_texop_txf && tex->op != nir_texop_txf_ms) {
70             arr_idx = nir_fadd_imm(b, arr_idx, 0.5);
71 
72             // TODO: Hardware seems to clamp negative values to zero for us
73             // in f2u, but we still need this fmax for constant folding.
74             arr_idx = nir_fmax(b, arr_idx, nir_imm_float(b, 0.0));
75 
76             arr_idx = nir_f2u32(b, arr_idx);
77          }
78 
79          arr_idx = nir_umin(b, arr_idx, nir_imm_int(b, UINT16_MAX));
80       }
81    }
82 
83    enum nak_nir_lod_mode lod_mode = NAK_NIR_LOD_MODE_AUTO;
84    if (tex->op == nir_texop_txf_ms) {
85       /* Multisampled textures do not have miplevels */
86       lod_mode = NAK_NIR_LOD_MODE_ZERO;
87       lod = NULL; /* We don't need this */
88    } else if (lod != NULL) {
89       nir_scalar lod_s = { .def = lod, .comp = 0 };
90       if (nir_scalar_is_const(lod_s) &&
91           nir_scalar_as_uint(lod_s) == 0) {
92          lod_mode = NAK_NIR_LOD_MODE_ZERO;
93          lod = NULL; /* We don't need this */
94       } else {
95          lod_mode = NAK_NIR_LOD_MODE_LOD;
96       }
97    } else if (bias != NULL) {
98       lod_mode = NAK_NIR_LOD_MODE_BIAS;
99       lod = bias;
100    }
101 
102    if (min_lod != NULL) {
103       switch (lod_mode) {
104       case NAK_NIR_LOD_MODE_AUTO:
105          lod_mode = NAK_NIR_LOD_MODE_CLAMP;
106          break;
107       case NAK_NIR_LOD_MODE_BIAS:
108          lod_mode = NAK_NIR_LOD_MODE_BIAS_CLAMP;
109          break;
110       default:
111          unreachable("Invalid min_lod");
112       }
113       min_lod = nir_f2u32(b, nir_fmax(b, nir_fmul_imm(b, min_lod, 256),
114                                          nir_imm_float(b, 16)));
115    }
116 
117    enum nak_nir_offset_mode offset_mode = NAK_NIR_OFFSET_MODE_NONE;
118    if (offset != NULL) {
119       /* For TG4, offsets, are packed into a single 32-bit value with 8 bits
120        * per component.  For all other texture instructions, offsets are
121        * packed into a single at most 16-bit value with 8 bits per component.
122        */
123       static const unsigned bits4[] = { 4, 4, 4, 4 };
124       static const unsigned bits8[] = { 8, 8, 8, 8 };
125       const unsigned *bits = tex->op == nir_texop_tg4 ? bits8 : bits4;
126 
127       offset = nir_pad_vector_imm_int(b, offset, 0, 4);
128       offset = nir_format_clamp_sint(b, offset, bits);
129       offset = nir_format_pack_uint(b, offset, bits, 4);
130       offset_mode = NAK_NIR_OFFSET_MODE_AOFFI;
131    } else if (nir_tex_instr_has_explicit_tg4_offsets(tex)) {
132       uint64_t off_u64 = 0;
133       for (uint8_t i = 0; i < 8; ++i) {
134          uint64_t off = (uint8_t)tex->tg4_offsets[i / 2][i % 2];
135          off_u64 |= off << (i * 8);
136       }
137       offset = nir_imm_ivec2(b, off_u64, off_u64 >> 32);
138       offset_mode = NAK_NIR_OFFSET_MODE_PER_PX;
139    }
140 
141    nir_def *src0[4] = { NULL, };
142    nir_def *src1[4] = { NULL, };
143    unsigned src0_comps = 0, src1_comps = 0;
144 
145 #define PUSH(a, x) do { \
146    nir_def *val = (x); \
147    assert(a##_comps < ARRAY_SIZE(a)); \
148    a[a##_comps++] = val; \
149 } while(0)
150 
151    if (nak->sm >= 50) {
152       if (tex->op == nir_texop_txd) {
153          PUSH(src0, tex_h);
154 
155          for (uint32_t i = 0; i < coord_components; i++)
156             PUSH(src0, nir_channel(b, coord, i));
157 
158          if (offset != NULL) {
159             nir_def *arr_idx_or_zero = arr_idx ? arr_idx : nir_imm_int(b, 0);
160             nir_def *arr_off = nir_prmt_nv(b, nir_imm_int(b, 0x1054),
161                                            offset, arr_idx_or_zero);
162             PUSH(src0, arr_off);
163          } else if (arr_idx != NULL) {
164             PUSH(src0, arr_idx);
165          }
166 
167          assert(ddx->num_components == coord_components);
168          for (uint32_t i = 0; i < coord_components; i++) {
169             PUSH(src1, nir_channel(b, ddx, i));
170             PUSH(src1, nir_channel(b, ddy, i));
171          }
172       } else {
173          if (min_lod != NULL) {
174             nir_def *arr_idx_or_zero = arr_idx ? arr_idx : nir_imm_int(b, 0);
175             nir_def *arr_ml = nir_prmt_nv(b, nir_imm_int(b, 0x1054),
176                                           min_lod, arr_idx_or_zero);
177             PUSH(src0, arr_ml);
178          } else if (arr_idx != NULL) {
179             PUSH(src0, arr_idx);
180          }
181 
182          for (uint32_t i = 0; i < coord_components; i++)
183             PUSH(src0, nir_channel(b, coord, i));
184 
185          PUSH(src1, tex_h);
186          if (ms_idx != NULL)
187             PUSH(src1, ms_idx);
188          if (lod != NULL)
189             PUSH(src1, lod);
190          if (offset_mode == NAK_NIR_OFFSET_MODE_AOFFI) {
191             PUSH(src1, offset);
192          } else if (offset_mode == NAK_NIR_OFFSET_MODE_PER_PX) {
193             PUSH(src1, nir_channel(b, offset, 0));
194             PUSH(src1, nir_channel(b, offset, 1));
195          }
196          if (z_cmpr != NULL)
197             PUSH(src1, z_cmpr);
198       }
199    } else {
200       unreachable("Unsupported shader model");
201    }
202 
203    nir_def *vec_srcs[2] = {
204       nir_vec(b, src0, src0_comps),
205       nir_vec(b, src1, src1_comps),
206    };
207 
208    tex->src[0].src_type = nir_tex_src_backend1;
209    nir_src_rewrite(&tex->src[0].src, vec_srcs[0]);
210 
211    tex->src[1].src_type = nir_tex_src_backend2;
212    nir_src_rewrite(&tex->src[1].src, vec_srcs[1]);
213 
214    /* Remove any extras */
215    while (tex->num_srcs > 2)
216       nir_tex_instr_remove_src(tex, tex->num_srcs - 1);
217 
218    tex->sampler_dim = remap_sampler_dim(tex->sampler_dim);
219 
220    struct nak_nir_tex_flags flags = {
221       .lod_mode = lod_mode,
222       .offset_mode = offset_mode,
223       .has_z_cmpr = tex->is_shadow,
224       .is_sparse = tex->is_sparse,
225    };
226    STATIC_ASSERT(sizeof(flags) == sizeof(tex->backend_flags));
227    memcpy(&tex->backend_flags, &flags, sizeof(flags));
228 
229    if (tex->op == nir_texop_lod) {
230       b->cursor = nir_after_instr(&tex->instr);
231 
232       /* The outputs are flipped compared to what NIR expects */
233       nir_def *abs = nir_channel(b, &tex->def, 1);
234       nir_def *rel = nir_channel(b, &tex->def, 0);
235 
236       /* The returned values are not quite what we want:
237        * (a) convert from s16/u16 to f32
238        * (b) multiply by 1/256
239        *
240        * TODO: We can make this cheaper once we have 16-bit in NAK
241        */
242       abs = nir_u2f32(b, nir_iand_imm(b, abs, 0xffff));
243       nir_def *shift = nir_imm_int(b, 16);
244       rel = nir_i2f32(b, nir_ishr(b, nir_ishl(b, rel, shift), shift));
245 
246       abs = nir_fmul_imm(b, abs, 1.0 / 256.0);
247       rel = nir_fmul_imm(b, rel, 1.0 / 256.0);
248 
249       nir_def *res = nir_vec2(b, abs, rel);
250       nir_def_rewrite_uses_after(&tex->def, res, res->parent_instr);
251    }
252 
253    return true;
254 }
255 
256 static bool
lower_txq(nir_builder * b,nir_tex_instr * tex,const struct nak_compiler * nak)257 lower_txq(nir_builder *b, nir_tex_instr *tex, const struct nak_compiler *nak)
258 {
259    b->cursor = nir_before_instr(&tex->instr);
260 
261    assert(!tex->is_sparse);
262 
263    nir_def *tex_h = NULL, *lod = NULL;
264    for (unsigned i = 0; i < tex->num_srcs; i++) {
265       switch (tex->src[i].src_type) {
266       case nir_tex_src_texture_handle: tex_h = tex->src[i].src.ssa; break;
267       case nir_tex_src_sampler_handle: break; /* Ignored */
268       case nir_tex_src_lod:            lod = tex->src[i].src.ssa; break;
269       default:
270          unreachable("Unsupported texture source");
271       }
272    }
273 
274    /* TODO: We should only support 32-bit handles */
275    tex_h = nir_u2u32(b, tex_h);
276 
277    nir_def *txq_src;
278    nir_component_mask_t mask;
279    switch (tex->op) {
280    case nir_texop_txs:
281       tex->op = nir_texop_hdr_dim_nv;
282       if (lod == NULL)
283          lod = nir_imm_int(b, 0);
284       txq_src = nir_vec2(b, tex_h, lod);
285       mask = BITSET_MASK(tex->def.num_components);
286       break;
287    case nir_texop_query_levels:
288       tex->op = nir_texop_hdr_dim_nv;
289       txq_src = nir_vec2(b, tex_h, nir_imm_int(b, 0));
290       mask = BITSET_BIT(3);
291       break;
292    case nir_texop_texture_samples:
293       tex->op = nir_texop_tex_type_nv;
294       txq_src = tex_h;
295       mask = BITSET_BIT(2);
296       break;
297    default:
298       unreachable("Invalid texture query op");
299    }
300 
301    tex->src[0].src_type = nir_tex_src_backend1;
302    nir_src_rewrite(&tex->src[0].src, txq_src);
303 
304    /* Remove any extras */
305    while (tex->num_srcs > 1)
306       nir_tex_instr_remove_src(tex, tex->num_srcs - 1);
307 
308    tex->sampler_dim = remap_sampler_dim(tex->sampler_dim);
309 
310    b->cursor = nir_after_instr(&tex->instr);
311 
312    /* Only pick off slected components */
313    tex->def.num_components = 4;
314    nir_def *res = nir_channels(b, &tex->def, mask);
315    nir_def_rewrite_uses_after(&tex->def, res, res->parent_instr);
316 
317    return true;
318 }
319 
320 static bool
shrink_image_load(nir_builder * b,nir_intrinsic_instr * intrin,const struct nak_compiler * nak)321 shrink_image_load(nir_builder *b, nir_intrinsic_instr *intrin,
322                   const struct nak_compiler *nak)
323 {
324    enum pipe_format format = nir_intrinsic_format(intrin);
325    nir_component_mask_t color_comps_read =
326       nir_def_components_read(&intrin->def);
327 
328    assert(intrin->intrinsic == nir_intrinsic_bindless_image_load ||
329           intrin->intrinsic == nir_intrinsic_bindless_image_sparse_load);
330 
331    /* Pick off the sparse resident component (if any) before we do anything
332     * else.  This makes later logic easier.
333     */
334    bool is_sparse = false;
335    if (intrin->intrinsic == nir_intrinsic_bindless_image_sparse_load) {
336       unsigned resident_comp = intrin->def.num_components - 1;
337       if (color_comps_read & BITFIELD_BIT(resident_comp)) {
338          is_sparse = true;
339          color_comps_read &= ~BITFIELD_BIT(resident_comp);
340       } else {
341          /* If the sparse bit is never used, get rid of it */
342          intrin->intrinsic = nir_intrinsic_bindless_image_load;
343          intrin->num_components--;
344          intrin->def.num_components--;
345       }
346    }
347 
348    if (intrin->def.bit_size == 64) {
349       assert(format == PIPE_FORMAT_NONE ||
350              format == PIPE_FORMAT_R64_UINT ||
351              format == PIPE_FORMAT_R64_SINT);
352 
353       b->cursor = nir_after_instr(&intrin->instr);
354 
355       nir_def *data_xy, *data_w, *resident = NULL;
356       if (color_comps_read & BITFIELD_BIT(3)) {
357          /* Thanks to descriptor indexing, we need to ensure that null
358           * descriptor behavior works properly.  In particular, normal zero
359           * reads will return (0, 0, 0, 1) whereas null descriptor reads need
360           * to return (0, 0, 0, 0).  This means we can't blindly extend with
361           * an alpha component of 1.  Instead, we need to trust the hardware
362           * to extend the original RG32 with z = 0 and w = 1 and copy the w
363           * value all the way out to 64-bit w value.
364           */
365          assert(intrin->num_components == 4 + is_sparse);
366          assert(intrin->def.num_components == 4 + is_sparse);
367          intrin->def.bit_size = 32;
368 
369          data_xy = nir_channels(b, &intrin->def, 0x3);
370          data_w = nir_channels(b, &intrin->def, 0x8);
371          if (is_sparse)
372             resident = nir_channel(b, &intrin->def, 4);
373       } else {
374          intrin->num_components = 2 + is_sparse;
375          intrin->def.num_components = 2 + is_sparse;
376          intrin->def.bit_size = 32;
377 
378          data_xy = nir_channels(b, &intrin->def, 0x3);
379          data_w = nir_imm_int(b, 0);
380          if (is_sparse)
381             resident = nir_channel(b, &intrin->def, 2);
382       }
383 
384       nir_def *data;
385       if (is_sparse) {
386          data = nir_vec5(b, nir_pack_64_2x32(b, data_xy),
387                          nir_imm_zero(b, 1, 64),
388                          nir_imm_zero(b, 1, 64),
389                          nir_u2u64(b, data_w),
390                          nir_u2u64(b, resident));
391       } else {
392          data = nir_vec4(b, nir_pack_64_2x32(b, data_xy),
393                          nir_imm_zero(b, 1, 64),
394                          nir_imm_zero(b, 1, 64),
395                          nir_u2u64(b, data_w));
396       }
397 
398       nir_def_rewrite_uses_after(&intrin->def, data, data->parent_instr);
399       return true;
400    }
401 
402    if (format == PIPE_FORMAT_NONE)
403       return false;
404 
405    /* In order for null descriptors to work properly, we don't want to shrink
406     * loads when the alpha channel is read even if we know the format has
407     * fewer channels.
408     */
409    if (color_comps_read & BITFIELD_BIT(3))
410       return false;
411 
412    const unsigned old_comps = intrin->def.num_components;
413 
414    unsigned new_comps = util_format_get_nr_components(format);
415    new_comps = util_next_power_of_two(new_comps);
416    if (color_comps_read <= BITFIELD_MASK(2))
417       new_comps = 2;
418    if (color_comps_read <= BITFIELD_MASK(1))
419       new_comps = 1;
420 
421    if (new_comps + is_sparse >= intrin->num_components)
422       return false;
423 
424    b->cursor = nir_after_instr(&intrin->instr);
425 
426    intrin->num_components = new_comps + is_sparse;
427    intrin->def.num_components = new_comps + is_sparse;
428 
429    assert(new_comps <= 4);
430    nir_def *comps[5];
431    for (unsigned c = 0; c < new_comps; c++)
432       comps[c] = nir_channel(b, &intrin->def, c);
433    for (unsigned c = new_comps; c < 3; c++)
434       comps[c] = nir_imm_intN_t(b, 0, intrin->def.bit_size);
435    if (new_comps < 4)
436       comps[3] = nir_imm_intN_t(b, 1, intrin->def.bit_size);
437 
438    /* The resident bit always goes in the last channel */
439    if (is_sparse)
440       comps[old_comps - 1] = nir_channel(b, &intrin->def, new_comps);
441 
442    nir_def *data = nir_vec(b, comps, old_comps);
443    nir_def_rewrite_uses_after(&intrin->def, data, data->parent_instr);
444    return true;
445 }
446 
447 static bool
shrink_image_store(nir_builder * b,nir_intrinsic_instr * intrin,const struct nak_compiler * nak)448 shrink_image_store(nir_builder *b, nir_intrinsic_instr *intrin,
449                   const struct nak_compiler *nak)
450 {
451    enum pipe_format format = nir_intrinsic_format(intrin);
452    nir_def *data = intrin->src[3].ssa;
453 
454    if (data->bit_size == 64) {
455       assert(format == PIPE_FORMAT_NONE ||
456              format == PIPE_FORMAT_R64_UINT ||
457              format == PIPE_FORMAT_R64_SINT);
458 
459       b->cursor = nir_before_instr(&intrin->instr);
460 
461       /* For 64-bit image ops, we actually want a vec2 */
462       nir_def *data_vec2 = nir_unpack_64_2x32(b, nir_channel(b, data, 0));
463       nir_src_rewrite(&intrin->src[3], data_vec2);
464       intrin->num_components = 2;
465       return true;
466    }
467 
468    if (format == PIPE_FORMAT_NONE)
469       return false;
470 
471    unsigned new_comps = util_format_get_nr_components(format);
472    new_comps = util_next_power_of_two(new_comps);
473    if (new_comps >= intrin->num_components)
474       return false;
475 
476    b->cursor = nir_before_instr(&intrin->instr);
477 
478    nir_def *trimmed = nir_trim_vector(b, data, new_comps);
479    nir_src_rewrite(&intrin->src[3], trimmed);
480    intrin->num_components = new_comps;
481    return true;
482 }
483 
484 static bool
lower_image_txq(nir_builder * b,nir_intrinsic_instr * intrin,const struct nak_compiler * nak)485 lower_image_txq(nir_builder *b, nir_intrinsic_instr *intrin,
486                 const struct nak_compiler *nak)
487 {
488    b->cursor = nir_instr_remove(&intrin->instr);
489 
490    /* TODO: We should only support 32-bit handles */
491    nir_def *img_h = nir_u2u32(b, intrin->src[0].ssa);
492 
493    nir_tex_instr *txq = nir_tex_instr_create(b->shader, 1);
494    txq->sampler_dim = remap_sampler_dim(nir_intrinsic_image_dim(intrin));
495    txq->is_array = nir_intrinsic_image_array(intrin);
496    txq->dest_type = nir_type_int32;
497 
498    nir_component_mask_t mask;
499    switch (intrin->intrinsic) {
500    case nir_intrinsic_bindless_image_size: {
501       nir_def *lod = intrin->src[1].ssa;
502 
503       txq->op = nir_texop_hdr_dim_nv;
504       txq->src[0] = (nir_tex_src) {
505          .src_type = nir_tex_src_backend1,
506          .src = nir_src_for_ssa(nir_vec2(b, img_h, lod)),
507       };
508       mask = BITSET_MASK(intrin->def.num_components);
509       break;
510    }
511 
512    case nir_intrinsic_bindless_image_samples:
513       txq->op = nir_texop_tex_type_nv;
514       txq->src[0] = (nir_tex_src) {
515          .src_type = nir_tex_src_backend1,
516          .src = nir_src_for_ssa(img_h),
517       };
518       mask = BITSET_BIT(2);
519       break;
520 
521    default:
522       unreachable("Invalid image query op");
523    }
524 
525    nir_def_init(&txq->instr, &txq->def, 4, 32);
526    nir_builder_instr_insert(b, &txq->instr);
527 
528    /* Only pick off slected components */
529    nir_def *res = nir_channels(b, &txq->def, mask);
530 
531    nir_def_rewrite_uses(&intrin->def, res);
532 
533    return true;
534 }
535 
536 static bool
lower_tex_instr(nir_builder * b,nir_instr * instr,void * _data)537 lower_tex_instr(nir_builder *b, nir_instr *instr, void *_data)
538 {
539    const struct nak_compiler *nak = _data;
540 
541    switch (instr->type) {
542    case nir_instr_type_tex: {
543       nir_tex_instr *tex = nir_instr_as_tex(instr);
544       switch (tex->op) {
545       case nir_texop_tex:
546       case nir_texop_txb:
547       case nir_texop_txl:
548       case nir_texop_txd:
549       case nir_texop_txf:
550       case nir_texop_txf_ms:
551       case nir_texop_tg4:
552       case nir_texop_lod:
553          return lower_tex(b, tex, nak);
554       case nir_texop_txs:
555       case nir_texop_query_levels:
556       case nir_texop_texture_samples:
557          return lower_txq(b, tex, nak);
558       default:
559          unreachable("Unsupported texture instruction");
560       }
561    }
562    case nir_instr_type_intrinsic: {
563       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
564       switch (intrin->intrinsic) {
565       case nir_intrinsic_bindless_image_load:
566       case nir_intrinsic_bindless_image_sparse_load:
567          return shrink_image_load(b, intrin, nak);
568       case nir_intrinsic_bindless_image_store:
569          return shrink_image_store(b, intrin, nak);
570       case nir_intrinsic_bindless_image_size:
571       case nir_intrinsic_bindless_image_samples:
572          return lower_image_txq(b, intrin, nak);
573       default:
574          return false;
575       }
576    }
577    default:
578       return false;
579    }
580 }
581 
582 bool
nak_nir_lower_tex(nir_shader * nir,const struct nak_compiler * nak)583 nak_nir_lower_tex(nir_shader *nir, const struct nak_compiler *nak)
584 {
585    return nir_shader_instructions_pass(nir, lower_tex_instr,
586                                        nir_metadata_control_flow,
587                                        (void *)nak);
588 }
589