xref: /aosp_15_r20/external/mesa3d/src/freedreno/ir3/ir3_nir_lower_64b.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2021 Google, Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "ir3_nir.h"
7 
8 /*
9  * Lowering for 64b intrinsics generated with OpenCL or with
10  * VK_KHR_buffer_device_address. All our intrinsics from a hw
11  * standpoint are 32b, so we just need to combine in zero for
12  * the upper 32bits and let the other nir passes clean up the mess.
13  */
14 
15 static bool
lower_64b_intrinsics_filter(const nir_instr * instr,const void * unused)16 lower_64b_intrinsics_filter(const nir_instr *instr, const void *unused)
17 {
18    (void)unused;
19 
20    if (instr->type != nir_instr_type_intrinsic)
21       return false;
22 
23    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
24 
25    if (intr->intrinsic == nir_intrinsic_load_deref ||
26        intr->intrinsic == nir_intrinsic_store_deref)
27       return false;
28 
29    if (is_intrinsic_store(intr->intrinsic))
30       return nir_src_bit_size(intr->src[0]) == 64;
31 
32    if (nir_intrinsic_dest_components(intr) == 0)
33       return false;
34 
35    return intr->def.bit_size == 64;
36 }
37 
38 static nir_def *
lower_64b_intrinsics(nir_builder * b,nir_instr * instr,void * unused)39 lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
40 {
41    (void)unused;
42 
43    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
44 
45    /* We could be *slightly* more clever and, for ex, turn a 64b vec4
46     * load into two 32b vec4 loads, rather than 4 32b vec2 loads.
47     */
48 
49    if (is_intrinsic_store(intr->intrinsic)) {
50       unsigned offset_src_idx;
51       switch (intr->intrinsic) {
52       case nir_intrinsic_store_ssbo:
53       case nir_intrinsic_store_global_ir3:
54          offset_src_idx = 2;
55          break;
56       default:
57          offset_src_idx = 1;
58       }
59 
60       unsigned num_comp = nir_intrinsic_src_components(intr, 0);
61       unsigned wrmask = nir_intrinsic_has_write_mask(intr) ?
62          nir_intrinsic_write_mask(intr) : BITSET_MASK(num_comp);
63       nir_def *val = intr->src[0].ssa;
64       nir_def *off = intr->src[offset_src_idx].ssa;
65 
66       for (unsigned i = 0; i < num_comp; i++) {
67          if (!(wrmask & BITFIELD_BIT(i)))
68             continue;
69 
70          nir_def *c64 = nir_channel(b, val, i);
71          nir_def *c32 = nir_unpack_64_2x32(b, c64);
72 
73          nir_intrinsic_instr *store =
74             nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intr->instr));
75          store->num_components = 2;
76          store->src[0] = nir_src_for_ssa(c32);
77          store->src[offset_src_idx] = nir_src_for_ssa(off);
78 
79          if (nir_intrinsic_has_write_mask(intr))
80             nir_intrinsic_set_write_mask(store, 0x3);
81          nir_builder_instr_insert(b, &store->instr);
82 
83          off = nir_iadd_imm(b, off, 8);
84       }
85 
86       return NIR_LOWER_INSTR_PROGRESS_REPLACE;
87    }
88 
89    unsigned num_comp = nir_intrinsic_dest_components(intr);
90 
91    nir_def *def = &intr->def;
92    def->bit_size = 32;
93 
94    /* load_kernel_input is handled specially, lowering to two 32b inputs:
95     */
96    if (intr->intrinsic == nir_intrinsic_load_kernel_input) {
97       assert(num_comp == 1);
98 
99       nir_def *offset = nir_iadd_imm(b,
100             intr->src[0].ssa, 4);
101 
102       nir_def *upper = nir_load_kernel_input(b, 1, 32, offset);
103 
104       return nir_pack_64_2x32_split(b, def, upper);
105    }
106 
107    nir_def *components[num_comp];
108 
109    if (is_intrinsic_load(intr->intrinsic)) {
110       unsigned offset_src_idx;
111       switch(intr->intrinsic) {
112       case nir_intrinsic_load_ssbo:
113       case nir_intrinsic_load_ubo:
114       case nir_intrinsic_load_global_ir3:
115          offset_src_idx = 1;
116          break;
117       default:
118          offset_src_idx = 0;
119       }
120 
121       nir_def *off = intr->src[offset_src_idx].ssa;
122 
123       for (unsigned i = 0; i < num_comp; i++) {
124          nir_intrinsic_instr *load =
125             nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intr->instr));
126          load->num_components = 2;
127          load->src[offset_src_idx] = nir_src_for_ssa(off);
128 
129          nir_def_init(&load->instr, &load->def, 2, 32);
130          nir_builder_instr_insert(b, &load->instr);
131 
132          components[i] = nir_pack_64_2x32(b, &load->def);
133 
134          off = nir_iadd_imm(b, off, 8);
135       }
136    } else {
137       /* The remaining (non load/store) intrinsics just get zero-
138        * extended from 32b to 64b:
139        */
140       for (unsigned i = 0; i < num_comp; i++) {
141          nir_def *c = nir_channel(b, def, i);
142          components[i] = nir_pack_64_2x32_split(b, c, nir_imm_zero(b, 1, 32));
143       }
144    }
145 
146    return nir_build_alu_src_arr(b, nir_op_vec(num_comp), components);
147 }
148 
149 bool
ir3_nir_lower_64b_intrinsics(nir_shader * shader)150 ir3_nir_lower_64b_intrinsics(nir_shader *shader)
151 {
152    return nir_shader_lower_instructions(
153          shader, lower_64b_intrinsics_filter,
154          lower_64b_intrinsics, NULL);
155 }
156 
157 /*
158  * Lowering for 64b undef instructions, splitting into a two 32b undefs
159  */
160 
161 static nir_def *
lower_64b_undef(nir_builder * b,nir_instr * instr,void * unused)162 lower_64b_undef(nir_builder *b, nir_instr *instr, void *unused)
163 {
164    (void)unused;
165 
166    nir_undef_instr *undef = nir_instr_as_undef(instr);
167    unsigned num_comp = undef->def.num_components;
168    nir_def *components[num_comp];
169 
170    for (unsigned i = 0; i < num_comp; i++) {
171       nir_def *lowered = nir_undef(b, 2, 32);
172 
173       components[i] = nir_pack_64_2x32_split(b,
174                                              nir_channel(b, lowered, 0),
175                                              nir_channel(b, lowered, 1));
176    }
177 
178    return nir_build_alu_src_arr(b, nir_op_vec(num_comp), components);
179 }
180 
181 static bool
lower_64b_undef_filter(const nir_instr * instr,const void * unused)182 lower_64b_undef_filter(const nir_instr *instr, const void *unused)
183 {
184    (void)unused;
185 
186    return instr->type == nir_instr_type_undef &&
187       nir_instr_as_undef(instr)->def.bit_size == 64;
188 }
189 
190 bool
ir3_nir_lower_64b_undef(nir_shader * shader)191 ir3_nir_lower_64b_undef(nir_shader *shader)
192 {
193    return nir_shader_lower_instructions(
194          shader, lower_64b_undef_filter,
195          lower_64b_undef, NULL);
196 }
197 
198 /*
199  * Lowering for load_global/store_global with 64b addresses to ir3
200  * variants, which instead take a uvec2_32
201  */
202 
203 static bool
lower_64b_global_filter(const nir_instr * instr,const void * unused)204 lower_64b_global_filter(const nir_instr *instr, const void *unused)
205 {
206    (void)unused;
207 
208    if (instr->type != nir_instr_type_intrinsic)
209       return false;
210 
211    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
212    switch (intr->intrinsic) {
213    case nir_intrinsic_load_global:
214    case nir_intrinsic_load_global_constant:
215    case nir_intrinsic_store_global:
216    case nir_intrinsic_global_atomic:
217    case nir_intrinsic_global_atomic_swap:
218       return true;
219    default:
220       return false;
221    }
222 }
223 
224 static nir_def *
lower_64b_global(nir_builder * b,nir_instr * instr,void * unused)225 lower_64b_global(nir_builder *b, nir_instr *instr, void *unused)
226 {
227    (void)unused;
228 
229    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
230    bool load = intr->intrinsic != nir_intrinsic_store_global;
231 
232    nir_def *addr64 = intr->src[load ? 0 : 1].ssa;
233    nir_def *addr = nir_unpack_64_2x32(b, addr64);
234 
235    /*
236     * Note that we can get vec8/vec16 with OpenCL.. we need to split
237     * those up into max 4 components per load/store.
238     */
239 
240    if (intr->intrinsic == nir_intrinsic_global_atomic) {
241       return nir_global_atomic_ir3(
242             b, intr->def.bit_size, addr,
243             intr->src[1].ssa,
244          .atomic_op = nir_intrinsic_atomic_op(intr));
245    } else if (intr->intrinsic == nir_intrinsic_global_atomic_swap) {
246       return nir_global_atomic_swap_ir3(
247          b, intr->def.bit_size, addr,
248          intr->src[1].ssa, intr->src[2].ssa,
249          .atomic_op = nir_intrinsic_atomic_op(intr));
250    }
251 
252    if (load) {
253       unsigned num_comp = nir_intrinsic_dest_components(intr);
254       nir_def *components[num_comp];
255       for (unsigned off = 0; off < num_comp;) {
256          unsigned c = MIN2(num_comp - off, 4);
257          nir_def *val = nir_load_global_ir3(
258                b, c, intr->def.bit_size,
259                addr, nir_imm_int(b, off));
260          for (unsigned i = 0; i < c; i++) {
261             components[off++] = nir_channel(b, val, i);
262          }
263       }
264       return nir_build_alu_src_arr(b, nir_op_vec(num_comp), components);
265    } else {
266       unsigned num_comp = nir_intrinsic_src_components(intr, 0);
267       nir_def *value = intr->src[0].ssa;
268       for (unsigned off = 0; off < num_comp; off += 4) {
269          unsigned c = MIN2(num_comp - off, 4);
270          nir_def *v = nir_channels(b, value, BITFIELD_MASK(c) << off);
271          nir_store_global_ir3(b, v, addr, nir_imm_int(b, off));
272       }
273       return NIR_LOWER_INSTR_PROGRESS_REPLACE;
274    }
275 }
276 
277 bool
ir3_nir_lower_64b_global(nir_shader * shader)278 ir3_nir_lower_64b_global(nir_shader *shader)
279 {
280    return nir_shader_lower_instructions(
281          shader, lower_64b_global_filter,
282          lower_64b_global, NULL);
283 }
284 
285 /*
286  * Lowering for 64b registers:
287  * - @decl_reg -> split in two 32b ones
288  * - @store_reg -> unpack_64_2x32_split_x/y and two separate stores
289  * - @load_reg -> two separate loads and pack_64_2x32_split
290  */
291 
292 static void
lower_64b_reg(nir_builder * b,nir_intrinsic_instr * reg)293 lower_64b_reg(nir_builder *b, nir_intrinsic_instr *reg)
294 {
295    unsigned num_components = nir_intrinsic_num_components(reg);
296    unsigned num_array_elems = nir_intrinsic_num_array_elems(reg);
297 
298    nir_def *reg_hi = nir_decl_reg(b, num_components, 32, num_array_elems);
299    nir_def *reg_lo = nir_decl_reg(b, num_components, 32, num_array_elems);
300 
301    nir_foreach_reg_store_safe (store_reg_src, reg) {
302       nir_intrinsic_instr *store =
303          nir_instr_as_intrinsic(nir_src_parent_instr(store_reg_src));
304       b->cursor = nir_before_instr(&store->instr);
305 
306       nir_def *packed = store->src[0].ssa;
307       nir_def *unpacked_lo = nir_unpack_64_2x32_split_x(b, packed);
308       nir_def *unpacked_hi = nir_unpack_64_2x32_split_y(b, packed);
309       int base = nir_intrinsic_base(store);
310 
311       if (store->intrinsic == nir_intrinsic_store_reg) {
312          nir_build_store_reg(b, unpacked_lo, reg_lo, .base = base);
313          nir_build_store_reg(b, unpacked_hi, reg_hi, .base = base);
314       } else {
315          assert(store->intrinsic == nir_intrinsic_store_reg_indirect);
316 
317          nir_def *offset = store->src[2].ssa;
318          nir_store_reg_indirect(b, unpacked_lo, reg_lo, offset, .base = base);
319          nir_store_reg_indirect(b, unpacked_hi, reg_hi, offset, .base = base);
320       }
321 
322       nir_instr_remove(&store->instr);
323    }
324 
325    nir_foreach_reg_load_safe (load_reg_src, reg) {
326       nir_intrinsic_instr *load =
327          nir_instr_as_intrinsic(nir_src_parent_instr(load_reg_src));
328       b->cursor = nir_before_instr(&load->instr);
329 
330       int base = nir_intrinsic_base(load);
331       nir_def *load_lo, *load_hi;
332 
333       if (load->intrinsic == nir_intrinsic_load_reg) {
334          load_lo =
335             nir_build_load_reg(b, num_components, 32, reg_lo, .base = base);
336          load_hi =
337             nir_build_load_reg(b, num_components, 32, reg_hi, .base = base);
338       } else {
339          assert(load->intrinsic == nir_intrinsic_load_reg_indirect);
340 
341          nir_def *offset = load->src[1].ssa;
342          load_lo = nir_load_reg_indirect(b, num_components, 32, reg_lo, offset,
343                                          .base = base);
344          load_hi = nir_load_reg_indirect(b, num_components, 32, reg_hi, offset,
345                                          .base = base);
346       }
347 
348       nir_def *packed = nir_pack_64_2x32_split(b, load_lo, load_hi);
349       nir_def_rewrite_uses(&load->def, packed);
350       nir_instr_remove(&load->instr);
351    }
352 
353    nir_instr_remove(&reg->instr);
354 }
355 
356 bool
ir3_nir_lower_64b_regs(nir_shader * shader)357 ir3_nir_lower_64b_regs(nir_shader *shader)
358 {
359    bool progress = false;
360 
361    nir_foreach_function_impl (impl, shader) {
362       bool impl_progress = false;
363       nir_builder b = nir_builder_create(impl);
364 
365       nir_foreach_reg_decl_safe (reg, impl) {
366          if (nir_intrinsic_bit_size(reg) == 64) {
367             lower_64b_reg(&b, reg);
368             impl_progress = true;
369          }
370       }
371 
372       if (impl_progress) {
373          nir_metadata_preserve(
374             impl, nir_metadata_control_flow);
375          progress = true;
376       }
377    }
378 
379    return progress;
380 }
381