xref: /aosp_15_r20/external/mesa3d/src/intel/compiler/brw_fs_visitor.cpp (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 /** @file
25  *
26  * This file supports generating the FS LIR from the GLSL IR.  The LIR
27  * makes it easier to do backend-specific optimizations than doing so
28  * in the GLSL IR or in the native code.
29  */
30 #include "brw_eu.h"
31 #include "brw_fs.h"
32 #include "brw_fs_builder.h"
33 #include "brw_nir.h"
34 #include "compiler/glsl_types.h"
35 #include "dev/intel_device_info.h"
36 
37 using namespace brw;
38 
39 void
emit_urb_writes(const brw_reg & gs_vertex_count)40 fs_visitor::emit_urb_writes(const brw_reg &gs_vertex_count)
41 {
42    int slot, urb_offset, length;
43    int starting_urb_offset = 0;
44    const struct brw_vue_prog_data *vue_prog_data =
45       brw_vue_prog_data(this->prog_data);
46    const GLbitfield64 psiz_mask =
47       VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT | VARYING_BIT_PSIZ | VARYING_BIT_PRIMITIVE_SHADING_RATE;
48    const struct intel_vue_map *vue_map = &vue_prog_data->vue_map;
49    bool flush;
50    brw_reg sources[8];
51    brw_reg urb_handle;
52 
53    switch (stage) {
54    case MESA_SHADER_VERTEX:
55       urb_handle = vs_payload().urb_handles;
56       break;
57    case MESA_SHADER_TESS_EVAL:
58       urb_handle = tes_payload().urb_output;
59       break;
60    case MESA_SHADER_GEOMETRY:
61       urb_handle = gs_payload().urb_handles;
62       break;
63    default:
64       unreachable("invalid stage");
65    }
66 
67    const fs_builder bld = fs_builder(this).at_end();
68 
69    brw_reg per_slot_offsets;
70 
71    if (stage == MESA_SHADER_GEOMETRY) {
72       const struct brw_gs_prog_data *gs_prog_data =
73          brw_gs_prog_data(this->prog_data);
74 
75       /* We need to increment the Global Offset to skip over the control data
76        * header and the extra "Vertex Count" field (1 HWord) at the beginning
77        * of the VUE.  We're counting in OWords, so the units are doubled.
78        */
79       starting_urb_offset = 2 * gs_prog_data->control_data_header_size_hwords;
80       if (gs_prog_data->static_vertex_count == -1)
81          starting_urb_offset += 2;
82 
83       /* The URB offset is in 128-bit units, so we need to multiply by 2 */
84       const int output_vertex_size_owords =
85          gs_prog_data->output_vertex_size_hwords * 2;
86 
87       /* On Xe2+ platform, LSC can operate on the Dword data element with byte
88        * offset granularity, so convert per slot offset in bytes since it's in
89        * Owords (16-bytes) unit else keep per slot offset in oword unit for
90        * previous platforms.
91        */
92       const int output_vertex_size = devinfo->ver >= 20 ?
93                                      output_vertex_size_owords * 16 :
94                                      output_vertex_size_owords;
95       if (gs_vertex_count.file == IMM) {
96          per_slot_offsets = brw_imm_ud(output_vertex_size *
97                                        gs_vertex_count.ud);
98       } else {
99          per_slot_offsets = bld.vgrf(BRW_TYPE_UD);
100          bld.MUL(per_slot_offsets, gs_vertex_count,
101                  brw_imm_ud(output_vertex_size));
102       }
103    }
104 
105    length = 0;
106    urb_offset = starting_urb_offset;
107    flush = false;
108 
109    /* SSO shaders can have VUE slots allocated which are never actually
110     * written to, so ignore them when looking for the last (written) slot.
111     */
112    int last_slot = vue_map->num_slots - 1;
113    while (last_slot > 0 &&
114           (vue_map->slot_to_varying[last_slot] == BRW_VARYING_SLOT_PAD ||
115            outputs[vue_map->slot_to_varying[last_slot]].file == BAD_FILE)) {
116       last_slot--;
117    }
118 
119    bool urb_written = false;
120    for (slot = 0; slot < vue_map->num_slots; slot++) {
121       int varying = vue_map->slot_to_varying[slot];
122       switch (varying) {
123       case VARYING_SLOT_PSIZ: {
124          /* The point size varying slot is the vue header and is always in the
125           * vue map.  But often none of the special varyings that live there
126           * are written and in that case we can skip writing to the vue
127           * header, provided the corresponding state properly clamps the
128           * values further down the pipeline. */
129          if ((vue_map->slots_valid & psiz_mask) == 0) {
130             assert(length == 0);
131             urb_offset++;
132             break;
133          }
134 
135          brw_reg zero = brw_vgrf(alloc.allocate(dispatch_width / 8),
136                                 BRW_TYPE_UD);
137          bld.MOV(zero, brw_imm_ud(0u));
138 
139          if (vue_map->slots_valid & VARYING_BIT_PRIMITIVE_SHADING_RATE &&
140              this->outputs[VARYING_SLOT_PRIMITIVE_SHADING_RATE].file != BAD_FILE) {
141             sources[length++] = this->outputs[VARYING_SLOT_PRIMITIVE_SHADING_RATE];
142          } else if (devinfo->has_coarse_pixel_primitive_and_cb) {
143             uint32_t one_fp16 = 0x3C00;
144             brw_reg one_by_one_fp16 = brw_vgrf(alloc.allocate(dispatch_width / 8),
145                                               BRW_TYPE_UD);
146             bld.MOV(one_by_one_fp16, brw_imm_ud((one_fp16 << 16) | one_fp16));
147             sources[length++] = one_by_one_fp16;
148          } else {
149             sources[length++] = zero;
150          }
151 
152          if (vue_map->slots_valid & VARYING_BIT_LAYER)
153             sources[length++] = this->outputs[VARYING_SLOT_LAYER];
154          else
155             sources[length++] = zero;
156 
157          if (vue_map->slots_valid & VARYING_BIT_VIEWPORT)
158             sources[length++] = this->outputs[VARYING_SLOT_VIEWPORT];
159          else
160             sources[length++] = zero;
161 
162          if (vue_map->slots_valid & VARYING_BIT_PSIZ)
163             sources[length++] = this->outputs[VARYING_SLOT_PSIZ];
164          else
165             sources[length++] = zero;
166          break;
167       }
168       case VARYING_SLOT_EDGE:
169          unreachable("unexpected scalar vs output");
170          break;
171 
172       default:
173          /* gl_Position is always in the vue map, but isn't always written by
174           * the shader.  Other varyings (clip distances) get added to the vue
175           * map but don't always get written.  In those cases, the
176           * corresponding this->output[] slot will be invalid we and can skip
177           * the urb write for the varying.  If we've already queued up a vue
178           * slot for writing we flush a mlen 5 urb write, otherwise we just
179           * advance the urb_offset.
180           */
181          if (varying == BRW_VARYING_SLOT_PAD ||
182              this->outputs[varying].file == BAD_FILE) {
183             if (length > 0)
184                flush = true;
185             else
186                urb_offset++;
187             break;
188          }
189 
190          int slot_offset = 0;
191 
192          /* When using Primitive Replication, there may be multiple slots
193           * assigned to POS.
194           */
195          if (varying == VARYING_SLOT_POS)
196             slot_offset = slot - vue_map->varying_to_slot[VARYING_SLOT_POS];
197 
198          for (unsigned i = 0; i < 4; i++) {
199             sources[length++] = offset(this->outputs[varying], bld,
200                                        i + (slot_offset * 4));
201          }
202          break;
203       }
204 
205       const fs_builder abld = bld.annotate("URB write");
206 
207       /* If we've queued up 8 registers of payload (2 VUE slots), if this is
208        * the last slot or if we need to flush (see BAD_FILE varying case
209        * above), emit a URB write send now to flush out the data.
210        */
211       if (length == 8 || (length > 0 && slot == last_slot))
212          flush = true;
213       if (flush) {
214          brw_reg srcs[URB_LOGICAL_NUM_SRCS];
215 
216          srcs[URB_LOGICAL_SRC_HANDLE] = urb_handle;
217          srcs[URB_LOGICAL_SRC_PER_SLOT_OFFSETS] = per_slot_offsets;
218          srcs[URB_LOGICAL_SRC_DATA] = brw_vgrf(alloc.allocate((dispatch_width / 8) * length),
219                                                BRW_TYPE_F);
220          srcs[URB_LOGICAL_SRC_COMPONENTS] = brw_imm_ud(length);
221          abld.LOAD_PAYLOAD(srcs[URB_LOGICAL_SRC_DATA], sources, length, 0);
222 
223          fs_inst *inst = abld.emit(SHADER_OPCODE_URB_WRITE_LOGICAL, reg_undef,
224                                    srcs, ARRAY_SIZE(srcs));
225 
226          /* For Wa_1805992985 one needs additional write in the end. */
227          if (intel_needs_workaround(devinfo, 1805992985) && stage == MESA_SHADER_TESS_EVAL)
228             inst->eot = false;
229          else
230             inst->eot = slot == last_slot && stage != MESA_SHADER_GEOMETRY;
231 
232          inst->offset = urb_offset;
233          urb_offset = starting_urb_offset + slot + 1;
234          length = 0;
235          flush = false;
236          urb_written = true;
237       }
238    }
239 
240    /* If we don't have any valid slots to write, just do a minimal urb write
241     * send to terminate the shader.  This includes 1 slot of undefined data,
242     * because it's invalid to write 0 data:
243     *
244     * From the Broadwell PRM, Volume 7: 3D Media GPGPU, Shared Functions -
245     * Unified Return Buffer (URB) > URB_SIMD8_Write and URB_SIMD8_Read >
246     * Write Data Payload:
247     *
248     *    "The write data payload can be between 1 and 8 message phases long."
249     */
250    if (!urb_written) {
251       /* For GS, just turn EmitVertex() into a no-op.  We don't want it to
252        * end the thread, and emit_gs_thread_end() already emits a SEND with
253        * EOT at the end of the program for us.
254        */
255       if (stage == MESA_SHADER_GEOMETRY)
256          return;
257 
258       brw_reg uniform_urb_handle = brw_vgrf(alloc.allocate(dispatch_width / 8),
259                                            BRW_TYPE_UD);
260       brw_reg payload = brw_vgrf(alloc.allocate(dispatch_width / 8),
261                                 BRW_TYPE_UD);
262 
263       bld.exec_all().MOV(uniform_urb_handle, urb_handle);
264 
265       brw_reg srcs[URB_LOGICAL_NUM_SRCS];
266       srcs[URB_LOGICAL_SRC_HANDLE] = uniform_urb_handle;
267       srcs[URB_LOGICAL_SRC_DATA] = payload;
268       srcs[URB_LOGICAL_SRC_COMPONENTS] = brw_imm_ud(1);
269 
270       fs_inst *inst = bld.emit(SHADER_OPCODE_URB_WRITE_LOGICAL, reg_undef,
271                                srcs, ARRAY_SIZE(srcs));
272       inst->eot = true;
273       inst->offset = 1;
274       return;
275    }
276 
277    /* Wa_1805992985:
278     *
279     * GPU hangs on one of tessellation vkcts tests with DS not done. The
280     * send cycle, which is a urb write with an eot must be 4 phases long and
281     * all 8 lanes must valid.
282     */
283    if (intel_needs_workaround(devinfo, 1805992985) && stage == MESA_SHADER_TESS_EVAL) {
284       assert(dispatch_width == 8);
285       brw_reg uniform_urb_handle = brw_vgrf(alloc.allocate(1), BRW_TYPE_UD);
286       brw_reg uniform_mask = brw_vgrf(alloc.allocate(1), BRW_TYPE_UD);
287       brw_reg payload = brw_vgrf(alloc.allocate(4), BRW_TYPE_UD);
288 
289       /* Workaround requires all 8 channels (lanes) to be valid. This is
290        * understood to mean they all need to be alive. First trick is to find
291        * a live channel and copy its urb handle for all the other channels to
292        * make sure all handles are valid.
293        */
294       bld.exec_all().MOV(uniform_urb_handle, bld.emit_uniformize(urb_handle));
295 
296       /* Second trick is to use masked URB write where one can tell the HW to
297        * actually write data only for selected channels even though all are
298        * active.
299        * Third trick is to take advantage of the must-be-zero (MBZ) area in
300        * the very beginning of the URB.
301        *
302        * One masks data to be written only for the first channel and uses
303        * offset zero explicitly to land data to the MBZ area avoiding trashing
304        * any other part of the URB.
305        *
306        * Since the WA says that the write needs to be 4 phases long one uses
307        * 4 slots data. All are explicitly zeros in order to to keep the MBZ
308        * area written as zeros.
309        */
310       bld.exec_all().MOV(uniform_mask, brw_imm_ud(0x10000u));
311       bld.exec_all().MOV(offset(payload, bld, 0), brw_imm_ud(0u));
312       bld.exec_all().MOV(offset(payload, bld, 1), brw_imm_ud(0u));
313       bld.exec_all().MOV(offset(payload, bld, 2), brw_imm_ud(0u));
314       bld.exec_all().MOV(offset(payload, bld, 3), brw_imm_ud(0u));
315 
316       brw_reg srcs[URB_LOGICAL_NUM_SRCS];
317       srcs[URB_LOGICAL_SRC_HANDLE] = uniform_urb_handle;
318       srcs[URB_LOGICAL_SRC_CHANNEL_MASK] = uniform_mask;
319       srcs[URB_LOGICAL_SRC_DATA] = payload;
320       srcs[URB_LOGICAL_SRC_COMPONENTS] = brw_imm_ud(4);
321 
322       fs_inst *inst = bld.exec_all().emit(SHADER_OPCODE_URB_WRITE_LOGICAL,
323                                           reg_undef, srcs, ARRAY_SIZE(srcs));
324       inst->eot = true;
325       inst->offset = 0;
326    }
327 }
328 
329 void
emit_cs_terminate()330 fs_visitor::emit_cs_terminate()
331 {
332    const fs_builder ubld = fs_builder(this).at_end().exec_all();
333 
334    /* We can't directly send from g0, since sends with EOT have to use
335     * g112-127. So, copy it to a virtual register, The register allocator will
336     * make sure it uses the appropriate register range.
337     */
338    struct brw_reg g0 = retype(brw_vec8_grf(0, 0), BRW_TYPE_UD);
339    brw_reg payload = brw_vgrf(alloc.allocate(reg_unit(devinfo)),
340                              BRW_TYPE_UD);
341    ubld.group(8 * reg_unit(devinfo), 0).MOV(payload, g0);
342 
343    /* Set the descriptor to "Dereference Resource" and "Root Thread" */
344    unsigned desc = 0;
345 
346    /* Set Resource Select to "Do not dereference URB" on Gfx < 11.
347     *
348     * Note that even though the thread has a URB resource associated with it,
349     * we set the "do not dereference URB" bit, because the URB resource is
350     * managed by the fixed-function unit, so it will free it automatically.
351     */
352    if (devinfo->ver < 11)
353       desc |= (1 << 4); /* Do not dereference URB */
354 
355    brw_reg srcs[4] = {
356       brw_imm_ud(desc), /* desc */
357       brw_imm_ud(0), /* ex_desc */
358       payload,       /* payload */
359       brw_reg(),      /* payload2 */
360    };
361 
362    fs_inst *send = ubld.emit(SHADER_OPCODE_SEND, reg_undef, srcs, 4);
363 
364    /* On Alchemist and later, send an EOT message to the message gateway to
365     * terminate a compute shader.  For older GPUs, send to the thread spawner.
366     */
367    send->sfid = devinfo->verx10 >= 125 ? BRW_SFID_MESSAGE_GATEWAY
368                                        : BRW_SFID_THREAD_SPAWNER;
369    send->mlen = reg_unit(devinfo);
370    send->eot = true;
371 }
372 
fs_visitor(const struct brw_compiler * compiler,const struct brw_compile_params * params,const brw_base_prog_key * key,struct brw_stage_prog_data * prog_data,const nir_shader * shader,unsigned dispatch_width,bool needs_register_pressure,bool debug_enabled)373 fs_visitor::fs_visitor(const struct brw_compiler *compiler,
374                        const struct brw_compile_params *params,
375                        const brw_base_prog_key *key,
376                        struct brw_stage_prog_data *prog_data,
377                        const nir_shader *shader,
378                        unsigned dispatch_width,
379                        bool needs_register_pressure,
380                        bool debug_enabled)
381    : compiler(compiler), log_data(params->log_data),
382      devinfo(compiler->devinfo), nir(shader),
383      mem_ctx(params->mem_ctx),
384      cfg(NULL), stage(shader->info.stage),
385      debug_enabled(debug_enabled),
386      key(key), gs_compile(NULL), prog_data(prog_data),
387      live_analysis(this), regpressure_analysis(this),
388      performance_analysis(this), idom_analysis(this), def_analysis(this),
389      needs_register_pressure(needs_register_pressure),
390      dispatch_width(dispatch_width),
391      max_polygons(0),
392      api_subgroup_size(brw_nir_api_subgroup_size(shader, dispatch_width))
393 {
394    init();
395 }
396 
fs_visitor(const struct brw_compiler * compiler,const struct brw_compile_params * params,const brw_wm_prog_key * key,struct brw_wm_prog_data * prog_data,const nir_shader * shader,unsigned dispatch_width,unsigned max_polygons,bool needs_register_pressure,bool debug_enabled)397 fs_visitor::fs_visitor(const struct brw_compiler *compiler,
398                        const struct brw_compile_params *params,
399                        const brw_wm_prog_key *key,
400                        struct brw_wm_prog_data *prog_data,
401                        const nir_shader *shader,
402                        unsigned dispatch_width, unsigned max_polygons,
403                        bool needs_register_pressure,
404                        bool debug_enabled)
405    : compiler(compiler), log_data(params->log_data),
406      devinfo(compiler->devinfo), nir(shader),
407      mem_ctx(params->mem_ctx),
408      cfg(NULL), stage(shader->info.stage),
409      debug_enabled(debug_enabled),
410      key(&key->base), gs_compile(NULL), prog_data(&prog_data->base),
411      live_analysis(this), regpressure_analysis(this),
412      performance_analysis(this), idom_analysis(this), def_analysis(this),
413      needs_register_pressure(needs_register_pressure),
414      dispatch_width(dispatch_width),
415      max_polygons(max_polygons),
416      api_subgroup_size(brw_nir_api_subgroup_size(shader, dispatch_width))
417 {
418    init();
419    assert(api_subgroup_size == 0 ||
420           api_subgroup_size == 8 ||
421           api_subgroup_size == 16 ||
422           api_subgroup_size == 32);
423 }
424 
fs_visitor(const struct brw_compiler * compiler,const struct brw_compile_params * params,struct brw_gs_compile * c,struct brw_gs_prog_data * prog_data,const nir_shader * shader,bool needs_register_pressure,bool debug_enabled)425 fs_visitor::fs_visitor(const struct brw_compiler *compiler,
426                        const struct brw_compile_params *params,
427                        struct brw_gs_compile *c,
428                        struct brw_gs_prog_data *prog_data,
429                        const nir_shader *shader,
430                        bool needs_register_pressure,
431                        bool debug_enabled)
432    : compiler(compiler), log_data(params->log_data),
433      devinfo(compiler->devinfo), nir(shader),
434      mem_ctx(params->mem_ctx),
435      cfg(NULL), stage(shader->info.stage),
436      debug_enabled(debug_enabled),
437      key(&c->key.base), gs_compile(c),
438      prog_data(&prog_data->base.base),
439      live_analysis(this), regpressure_analysis(this),
440      performance_analysis(this), idom_analysis(this), def_analysis(this),
441      needs_register_pressure(needs_register_pressure),
442      dispatch_width(compiler->devinfo->ver >= 20 ? 16 : 8),
443      max_polygons(0),
444      api_subgroup_size(brw_nir_api_subgroup_size(shader, dispatch_width))
445 {
446    init();
447    assert(api_subgroup_size == 0 ||
448           api_subgroup_size == 8 ||
449           api_subgroup_size == 16 ||
450           api_subgroup_size == 32);
451 }
452 
453 void
init()454 fs_visitor::init()
455 {
456    this->max_dispatch_width = 32;
457 
458    this->failed = false;
459    this->fail_msg = NULL;
460 
461    this->payload_ = NULL;
462    this->source_depth_to_render_target = false;
463    this->first_non_payload_grf = 0;
464 
465    this->uniforms = 0;
466    this->last_scratch = 0;
467    this->push_constant_loc = NULL;
468 
469    memset(&this->shader_stats, 0, sizeof(this->shader_stats));
470 
471    this->grf_used = 0;
472    this->spilled_any_registers = false;
473 }
474 
~fs_visitor()475 fs_visitor::~fs_visitor()
476 {
477    delete this->payload_;
478 }
479