xref: /aosp_15_r20/external/mesa3d/src/intel/compiler/brw_ir_fs.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /* -*- c++ -*- */
2 /*
3  * Copyright © 2010-2015 Intel Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #ifndef BRW_IR_FS_H
26 #define BRW_IR_FS_H
27 
28 #include "brw_ir.h"
29 #include "brw_ir_allocator.h"
30 
31 struct fs_inst : public exec_node {
32 private:
33    fs_inst &operator=(const fs_inst &);
34 
35    void init(enum opcode opcode, uint8_t exec_width, const brw_reg &dst,
36              const brw_reg *src, unsigned sources);
37 
38 public:
39    DECLARE_RALLOC_CXX_OPERATORS(fs_inst)
40 
41    fs_inst();
42    fs_inst(enum opcode opcode, uint8_t exec_size);
43    fs_inst(enum opcode opcode, uint8_t exec_size, const brw_reg &dst);
44    fs_inst(enum opcode opcode, uint8_t exec_size, const brw_reg &dst,
45            const brw_reg &src0);
46    fs_inst(enum opcode opcode, uint8_t exec_size, const brw_reg &dst,
47            const brw_reg &src0, const brw_reg &src1);
48    fs_inst(enum opcode opcode, uint8_t exec_size, const brw_reg &dst,
49            const brw_reg &src0, const brw_reg &src1, const brw_reg &src2);
50    fs_inst(enum opcode opcode, uint8_t exec_size, const brw_reg &dst,
51            const brw_reg src[], unsigned sources);
52    fs_inst(const fs_inst &that);
53    ~fs_inst();
54 
55    void resize_sources(uint8_t num_sources);
56 
57    bool is_send_from_grf() const;
58    bool is_payload(unsigned arg) const;
59    bool is_partial_write() const;
60    unsigned components_read(unsigned i) const;
61    unsigned size_read(int arg) const;
62    bool can_do_source_mods(const struct intel_device_info *devinfo) const;
63    bool can_do_cmod() const;
64    bool can_change_types() const;
65    bool has_source_and_destination_hazard() const;
66 
67    bool is_3src(const struct brw_compiler *compiler) const;
68    bool is_math() const;
69    bool is_control_flow_begin() const;
70    bool is_control_flow_end() const;
71    bool is_control_flow() const;
72    bool is_commutative() const;
73    bool is_raw_move() const;
74    bool can_do_saturate() const;
75    bool reads_accumulator_implicitly() const;
76    bool writes_accumulator_implicitly(const struct intel_device_info *devinfo) const;
77 
78    /**
79     * Instructions that use indirect addressing have additional register
80     * regioning restrictions.
81     */
82    bool uses_indirect_addressing() const;
83 
84    void remove(bblock_t *block, bool defer_later_block_ip_updates = false);
85    void insert_after(bblock_t *block, fs_inst *inst);
86    void insert_before(bblock_t *block, fs_inst *inst);
87 
88    /**
89     * True if the instruction has side effects other than writing to
90     * its destination registers.  You are expected not to reorder or
91     * optimize these out unless you know what you are doing.
92     */
93    bool has_side_effects() const;
94 
95    /**
96     * True if the instruction might be affected by side effects of other
97     * instructions.
98     */
99    bool is_volatile() const;
100 
101    /**
102     * Return whether \p arg is a control source of a virtual instruction which
103     * shouldn't contribute to the execution type and usual regioning
104     * restriction calculations of arithmetic instructions.
105     */
106    bool is_control_source(unsigned arg) const;
107 
108    /**
109     * Return the subset of flag registers read by the instruction as a bitset
110     * with byte granularity.
111     */
112    unsigned flags_read(const intel_device_info *devinfo) const;
113 
114    /**
115     * Return the subset of flag registers updated by the instruction (either
116     * partially or fully) as a bitset with byte granularity.
117     */
118    unsigned flags_written(const intel_device_info *devinfo) const;
119 
120    /**
121     * Return true if this instruction is a sampler message gathering residency
122     * data.
123     */
124    bool has_sampler_residency() const;
125 
126    uint8_t sources; /**< Number of brw_reg sources. */
127 
128    /**
129     * Execution size of the instruction.  This is used by the generator to
130     * generate the correct binary for the given instruction.  Current valid
131     * values are 1, 4, 8, 16, 32.
132     */
133    uint8_t exec_size;
134 
135    /**
136     * Channel group from the hardware execution and predication mask that
137     * should be applied to the instruction.  The subset of channel enable
138     * signals (calculated from the EU control flow and predication state)
139     * given by [group, group + exec_size) will be used to mask GRF writes and
140     * any other side effects of the instruction.
141     */
142    uint8_t group;
143 
144    uint8_t mlen; /**< SEND message length */
145    uint8_t ex_mlen; /**< SENDS extended message length */
146    uint8_t sfid; /**< SFID for SEND instructions */
147    /** The number of hardware registers used for a message header. */
148    uint8_t header_size;
149    uint8_t target; /**< MRT target. */
150    uint32_t desc; /**< SEND[S] message descriptor immediate */
151    uint32_t ex_desc; /**< SEND[S] extended message descriptor immediate */
152 
153    uint32_t offset; /**< spill/unspill offset or texture offset bitfield */
154    unsigned size_written; /**< Data written to the destination register in bytes. */
155 
156    enum opcode opcode; /* BRW_OPCODE_* or FS_OPCODE_* */
157    enum brw_conditional_mod conditional_mod; /**< BRW_CONDITIONAL_* */
158    enum brw_predicate predicate;
159 
160    tgl_swsb sched; /**< Scheduling info. */
161 
162    union {
163       struct {
164          /* Chooses which flag subregister (f0.0 to f3.1) is used for
165           * conditional mod and predication.
166           */
167          unsigned flag_subreg:3;
168 
169          /**
170           * Systolic depth used by DPAS instruction.
171           */
172          unsigned sdepth:4;
173 
174          /**
175           * Repeat count used by DPAS instruction.
176           */
177          unsigned rcount:4;
178 
179          unsigned pad:2;
180 
181          bool predicate_inverse:1;
182          bool writes_accumulator:1; /**< instruction implicitly writes accumulator */
183          bool force_writemask_all:1;
184          bool no_dd_clear:1;
185          bool no_dd_check:1;
186          bool saturate:1;
187          bool shadow_compare:1;
188          bool check_tdr:1; /**< Only valid for SEND; turns it into a SENDC */
189          bool send_has_side_effects:1; /**< Only valid for SHADER_OPCODE_SEND */
190          bool send_is_volatile:1; /**< Only valid for SHADER_OPCODE_SEND */
191          bool send_ex_desc_scratch:1; /**< Only valid for SHADER_OPCODE_SEND, use
192                                        *   the scratch surface offset to build
193                                        *   extended descriptor
194                                        */
195          bool send_ex_bso:1; /**< Only for SHADER_OPCODE_SEND, use extended
196                               *   bindless surface offset (26bits instead of
197                               *   20bits)
198                               */
199          /**
200           * The predication mask applied to this instruction is guaranteed to
201           * be uniform and a superset of the execution mask of the present block.
202           * No currently enabled channel will be disabled by the predicate.
203           */
204          bool predicate_trivial:1;
205          bool eot:1;
206          bool last_rt:1;
207          bool pi_noperspective:1;   /**< Pixel interpolator noperspective flag */
208          bool keep_payload_trailing_zeros:1;
209          /**
210           * Whether the parameters of the SEND instructions are build with
211           * NoMask (for A32 messages this covers only the surface handle, for
212           * A64 messages this covers the load address).
213           */
214          bool has_no_mask_send_params:1;
215       };
216       uint32_t bits;
217    };
218 
219    brw_reg dst;
220    brw_reg *src;
221    brw_reg builtin_src[4];
222 
223 #ifndef NDEBUG
224    /** @{
225     * Annotation for the generated IR.
226     */
227    const char *annotation;
228    /** @} */
229 #endif
230 };
231 
232 /**
233  * Make the execution of \p inst dependent on the evaluation of a possibly
234  * inverted predicate.
235  */
236 static inline fs_inst *
set_predicate_inv(enum brw_predicate pred,bool inverse,fs_inst * inst)237 set_predicate_inv(enum brw_predicate pred, bool inverse,
238                   fs_inst *inst)
239 {
240    inst->predicate = pred;
241    inst->predicate_inverse = inverse;
242    return inst;
243 }
244 
245 /**
246  * Make the execution of \p inst dependent on the evaluation of a predicate.
247  */
248 static inline fs_inst *
set_predicate(enum brw_predicate pred,fs_inst * inst)249 set_predicate(enum brw_predicate pred, fs_inst *inst)
250 {
251    return set_predicate_inv(pred, false, inst);
252 }
253 
254 /**
255  * Write the result of evaluating the condition given by \p mod to a flag
256  * register.
257  */
258 static inline fs_inst *
set_condmod(enum brw_conditional_mod mod,fs_inst * inst)259 set_condmod(enum brw_conditional_mod mod, fs_inst *inst)
260 {
261    inst->conditional_mod = mod;
262    return inst;
263 }
264 
265 /**
266  * Clamp the result of \p inst to the saturation range of its destination
267  * datatype.
268  */
269 static inline fs_inst *
set_saturate(bool saturate,fs_inst * inst)270 set_saturate(bool saturate, fs_inst *inst)
271 {
272    inst->saturate = saturate;
273    return inst;
274 }
275 
276 /**
277  * Return the number of dataflow registers written by the instruction (either
278  * fully or partially) counted from 'floor(reg_offset(inst->dst) /
279  * register_size)'.  The somewhat arbitrary register size unit is 4B for the
280  * UNIFORM and IMM files and 32B for all other files.
281  */
282 inline unsigned
regs_written(const fs_inst * inst)283 regs_written(const fs_inst *inst)
284 {
285    assert(inst->dst.file != UNIFORM && inst->dst.file != IMM);
286    return DIV_ROUND_UP(reg_offset(inst->dst) % REG_SIZE +
287                        inst->size_written -
288                        MIN2(inst->size_written, reg_padding(inst->dst)),
289                        REG_SIZE);
290 }
291 
292 /**
293  * Return the number of dataflow registers read by the instruction (either
294  * fully or partially) counted from 'floor(reg_offset(inst->src[i]) /
295  * register_size)'.  The somewhat arbitrary register size unit is 4B for the
296  * UNIFORM files and 32B for all other files.
297  */
298 inline unsigned
regs_read(const fs_inst * inst,unsigned i)299 regs_read(const fs_inst *inst, unsigned i)
300 {
301    if (inst->src[i].file == IMM)
302       return 1;
303 
304    const unsigned reg_size = inst->src[i].file == UNIFORM ? 4 : REG_SIZE;
305    return DIV_ROUND_UP(reg_offset(inst->src[i]) % reg_size +
306                        inst->size_read(i) -
307                        MIN2(inst->size_read(i), reg_padding(inst->src[i])),
308                        reg_size);
309 }
310 
311 static inline enum brw_reg_type
get_exec_type(const fs_inst * inst)312 get_exec_type(const fs_inst *inst)
313 {
314    brw_reg_type exec_type = BRW_TYPE_B;
315 
316    for (int i = 0; i < inst->sources; i++) {
317       if (inst->src[i].file != BAD_FILE &&
318           !inst->is_control_source(i)) {
319          const brw_reg_type t = get_exec_type(inst->src[i].type);
320          if (brw_type_size_bytes(t) > brw_type_size_bytes(exec_type))
321             exec_type = t;
322          else if (brw_type_size_bytes(t) == brw_type_size_bytes(exec_type) &&
323                   brw_type_is_float(t))
324             exec_type = t;
325       }
326    }
327 
328    if (exec_type == BRW_TYPE_B)
329       exec_type = inst->dst.type;
330 
331    assert(exec_type != BRW_TYPE_B);
332 
333    /* Promotion of the execution type to 32-bit for conversions from or to
334     * half-float seems to be consistent with the following text from the
335     * Cherryview PRM Vol. 7, "Execution Data Type":
336     *
337     * "When single precision and half precision floats are mixed between
338     *  source operands or between source and destination operand [..] single
339     *  precision float is the execution datatype."
340     *
341     * and from "Register Region Restrictions":
342     *
343     * "Conversion between Integer and HF (Half Float) must be DWord aligned
344     *  and strided by a DWord on the destination."
345     */
346    if (brw_type_size_bytes(exec_type) == 2 &&
347        inst->dst.type != exec_type) {
348       if (exec_type == BRW_TYPE_HF)
349          exec_type = BRW_TYPE_F;
350       else if (inst->dst.type == BRW_TYPE_HF)
351          exec_type = BRW_TYPE_D;
352    }
353 
354    return exec_type;
355 }
356 
357 static inline unsigned
get_exec_type_size(const fs_inst * inst)358 get_exec_type_size(const fs_inst *inst)
359 {
360    return brw_type_size_bytes(get_exec_type(inst));
361 }
362 
363 static inline bool
is_send(const fs_inst * inst)364 is_send(const fs_inst *inst)
365 {
366    return inst->mlen || inst->is_send_from_grf();
367 }
368 
369 /**
370  * Return whether the instruction isn't an ALU instruction and cannot be
371  * assumed to complete in-order.
372  */
373 static inline bool
is_unordered(const intel_device_info * devinfo,const fs_inst * inst)374 is_unordered(const intel_device_info *devinfo, const fs_inst *inst)
375 {
376    return is_send(inst) || (devinfo->ver < 20 && inst->is_math()) ||
377           inst->opcode == BRW_OPCODE_DPAS ||
378           (devinfo->has_64bit_float_via_math_pipe &&
379            (get_exec_type(inst) == BRW_TYPE_DF ||
380             inst->dst.type == BRW_TYPE_DF));
381 }
382 
383 /**
384  * Return whether the following regioning restriction applies to the specified
385  * instruction.  From the Cherryview PRM Vol 7. "Register Region
386  * Restrictions":
387  *
388  * "When source or destination datatype is 64b or operation is integer DWord
389  *  multiply, regioning in Align1 must follow these rules:
390  *
391  *  1. Source and Destination horizontal stride must be aligned to the same qword.
392  *  2. Regioning must ensure Src.Vstride = Src.Width * Src.Hstride.
393  *  3. Source and Destination offset must be the same, except the case of
394  *     scalar source."
395  */
396 static inline bool
has_dst_aligned_region_restriction(const intel_device_info * devinfo,const fs_inst * inst,brw_reg_type dst_type)397 has_dst_aligned_region_restriction(const intel_device_info *devinfo,
398                                    const fs_inst *inst,
399                                    brw_reg_type dst_type)
400 {
401    const brw_reg_type exec_type = get_exec_type(inst);
402    /* Even though the hardware spec claims that "integer DWord multiply"
403     * operations are restricted, empirical evidence and the behavior of the
404     * simulator suggest that only 32x32-bit integer multiplication is
405     * restricted.
406     */
407    const bool is_dword_multiply = !brw_type_is_float(exec_type) &&
408       ((inst->opcode == BRW_OPCODE_MUL &&
409         MIN2(brw_type_size_bytes(inst->src[0].type), brw_type_size_bytes(inst->src[1].type)) >= 4) ||
410        (inst->opcode == BRW_OPCODE_MAD &&
411         MIN2(brw_type_size_bytes(inst->src[1].type), brw_type_size_bytes(inst->src[2].type)) >= 4));
412 
413    if (brw_type_size_bytes(dst_type) > 4 || brw_type_size_bytes(exec_type) > 4 ||
414        (brw_type_size_bytes(exec_type) == 4 && is_dword_multiply))
415       return intel_device_info_is_9lp(devinfo) || devinfo->verx10 >= 125;
416 
417    else if (brw_type_is_float(dst_type))
418       return devinfo->verx10 >= 125;
419 
420    else
421       return false;
422 }
423 
424 static inline bool
has_dst_aligned_region_restriction(const intel_device_info * devinfo,const fs_inst * inst)425 has_dst_aligned_region_restriction(const intel_device_info *devinfo,
426                                    const fs_inst *inst)
427 {
428    return has_dst_aligned_region_restriction(devinfo, inst, inst->dst.type);
429 }
430 
431 /**
432  * Return true if the instruction can be potentially affected by the Xe2+
433  * regioning restrictions that apply to integer types smaller than a dword.
434  * The restriction isn't quoted here due to its length, see BSpec #56640 for
435  * details.
436  */
437 static inline bool
has_subdword_integer_region_restriction(const intel_device_info * devinfo,const fs_inst * inst,const brw_reg * srcs,unsigned num_srcs)438 has_subdword_integer_region_restriction(const intel_device_info *devinfo,
439                                         const fs_inst *inst,
440                                         const brw_reg *srcs, unsigned num_srcs)
441 {
442    if (devinfo->ver >= 20 &&
443        brw_type_is_int(inst->dst.type) &&
444        MAX2(byte_stride(inst->dst),
445             brw_type_size_bytes(inst->dst.type)) < 4) {
446       for (unsigned i = 0; i < num_srcs; i++) {
447          if (brw_type_is_int(srcs[i].type) &&
448              brw_type_size_bytes(srcs[i].type) < 4 &&
449              byte_stride(srcs[i]) >= 4)
450             return true;
451       }
452    }
453 
454    return false;
455 }
456 
457 static inline bool
has_subdword_integer_region_restriction(const intel_device_info * devinfo,const fs_inst * inst)458 has_subdword_integer_region_restriction(const intel_device_info *devinfo,
459                                         const fs_inst *inst)
460 {
461    return has_subdword_integer_region_restriction(devinfo, inst,
462                                                   inst->src, inst->sources);
463 }
464 
465 /**
466  * Return whether the LOAD_PAYLOAD instruction is a plain copy of bits from
467  * the specified register file into a VGRF.
468  *
469  * This implies identity register regions without any source-destination
470  * overlap, but otherwise has no implications on the location of sources and
471  * destination in the register file: Gathering any number of portions from
472  * multiple virtual registers in any order is allowed.
473  */
474 inline bool
is_copy_payload(brw_reg_file file,const fs_inst * inst)475 is_copy_payload(brw_reg_file file, const fs_inst *inst)
476 {
477    if (inst->opcode != SHADER_OPCODE_LOAD_PAYLOAD ||
478        inst->is_partial_write() || inst->saturate ||
479        inst->dst.file != VGRF)
480       return false;
481 
482    for (unsigned i = 0; i < inst->sources; i++) {
483       if (inst->src[i].file != file ||
484           inst->src[i].abs || inst->src[i].negate)
485          return false;
486 
487       if (!inst->src[i].is_contiguous())
488          return false;
489 
490       if (regions_overlap(inst->dst, inst->size_written,
491                           inst->src[i], inst->size_read(i)))
492          return false;
493    }
494 
495    return true;
496 }
497 
498 /**
499  * Like is_copy_payload(), but the instruction is required to copy a single
500  * contiguous block of registers from the given register file into the
501  * destination without any reordering.
502  */
503 inline bool
is_identity_payload(brw_reg_file file,const fs_inst * inst)504 is_identity_payload(brw_reg_file file, const fs_inst *inst) {
505    if (is_copy_payload(file, inst)) {
506       brw_reg reg = inst->src[0];
507 
508       for (unsigned i = 0; i < inst->sources; i++) {
509          reg.type = inst->src[i].type;
510          if (!inst->src[i].equals(reg))
511             return false;
512 
513          reg = byte_offset(reg, inst->size_read(i));
514       }
515 
516       return true;
517    } else {
518       return false;
519    }
520 }
521 
522 /**
523  * Like is_copy_payload(), but the instruction is required to source data from
524  * at least two disjoint VGRFs.
525  *
526  * This doesn't necessarily rule out the elimination of this instruction
527  * through register coalescing, but due to limitations of the register
528  * coalesce pass it might be impossible to do so directly until a later stage,
529  * when the LOAD_PAYLOAD instruction is unrolled into a sequence of MOV
530  * instructions.
531  */
532 inline bool
is_multi_copy_payload(const fs_inst * inst)533 is_multi_copy_payload(const fs_inst *inst) {
534    if (is_copy_payload(VGRF, inst)) {
535       for (unsigned i = 0; i < inst->sources; i++) {
536             if (inst->src[i].nr != inst->src[0].nr)
537                return true;
538       }
539    }
540 
541    return false;
542 }
543 
544 /**
545  * Like is_identity_payload(), but the instruction is required to copy the
546  * whole contents of a single VGRF into the destination.
547  *
548  * This means that there is a good chance that the instruction will be
549  * eliminated through register coalescing, but it's neither a necessary nor a
550  * sufficient condition for that to happen -- E.g. consider the case where
551  * source and destination registers diverge due to other instructions in the
552  * program overwriting part of their contents, which isn't something we can
553  * predict up front based on a cheap strictly local test of the copy
554  * instruction.
555  */
556 inline bool
is_coalescing_payload(const brw::simple_allocator & alloc,const fs_inst * inst)557 is_coalescing_payload(const brw::simple_allocator &alloc, const fs_inst *inst)
558 {
559    return is_identity_payload(VGRF, inst) &&
560           inst->src[0].offset == 0 &&
561           alloc.sizes[inst->src[0].nr] * REG_SIZE == inst->size_written;
562 }
563 
564 bool
565 has_bank_conflict(const struct brw_isa_info *isa, const fs_inst *inst);
566 
567 /* Return the subset of flag registers that an instruction could
568  * potentially read or write based on the execution controls and flag
569  * subregister number of the instruction.
570  */
571 static inline unsigned
brw_fs_flag_mask(const fs_inst * inst,unsigned width)572 brw_fs_flag_mask(const fs_inst *inst, unsigned width)
573 {
574    assert(util_is_power_of_two_nonzero(width));
575    const unsigned start = (inst->flag_subreg * 16 + inst->group) &
576                           ~(width - 1);
577   const unsigned end = start + ALIGN(inst->exec_size, width);
578    return ((1 << DIV_ROUND_UP(end, 8)) - 1) & ~((1 << (start / 8)) - 1);
579 }
580 
581 static inline unsigned
brw_fs_bit_mask(unsigned n)582 brw_fs_bit_mask(unsigned n)
583 {
584    return (n >= CHAR_BIT * sizeof(brw_fs_bit_mask(n)) ? ~0u : (1u << n) - 1);
585 }
586 
587 static inline unsigned
brw_fs_flag_mask(const brw_reg & r,unsigned sz)588 brw_fs_flag_mask(const brw_reg &r, unsigned sz)
589 {
590    if (r.file == ARF) {
591       const unsigned start = (r.nr - BRW_ARF_FLAG) * 4 + r.subnr;
592       const unsigned end = start + sz;
593       return brw_fs_bit_mask(end) & ~brw_fs_bit_mask(start);
594    } else {
595       return 0;
596    }
597 }
598 
599 #endif
600