xref: /aosp_15_r20/external/mesa3d/src/panfrost/midgard/midgard.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /* Author(s):
2  *   Connor Abbott
3  *   Alyssa Rosenzweig
4  *
5  * Copyright (c) 2013 Connor Abbott ([email protected])
6  * Copyright (c) 2018 Alyssa Rosenzweig ([email protected])
7  * Copyright (C) 2019-2020 Collabora, Ltd.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a copy
10  * of this software and associated documentation files (the "Software"), to deal
11  * in the Software without restriction, including without limitation the rights
12  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13  * copies of the Software, and to permit persons to whom the Software is
14  * furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25  * THE SOFTWARE.
26  */
27 
28 #ifndef __midgard_h__
29 #define __midgard_h__
30 
31 #include <stdbool.h>
32 #include <stdint.h>
33 
34 #define MIDGARD_DBG_SHADERS  0x0002
35 #define MIDGARD_DBG_SHADERDB 0x0004
36 #define MIDGARD_DBG_INORDER  0x0008
37 #define MIDGARD_DBG_VERBOSE  0x0010
38 #define MIDGARD_DBG_INTERNAL 0x0020
39 
40 extern int midgard_debug;
41 
42 typedef enum {
43    midgard_word_type_alu,
44    midgard_word_type_load_store,
45    midgard_word_type_texture
46 } midgard_word_type;
47 
48 typedef enum {
49    midgard_alu_vmul,
50    midgard_alu_sadd,
51    midgard_alu_smul,
52    midgard_alu_vadd,
53    midgard_alu_lut
54 } midgard_alu;
55 
56 enum {
57    TAG_INVALID = 0x0,
58    TAG_BREAK = 0x1,
59    TAG_TEXTURE_4_VTX = 0x2,
60    TAG_TEXTURE_4 = 0x3,
61    TAG_TEXTURE_4_BARRIER = 0x4,
62    TAG_LOAD_STORE_4 = 0x5,
63    TAG_UNKNOWN_1 = 0x6,
64    TAG_UNKNOWN_2 = 0x7,
65    TAG_ALU_4 = 0x8,
66    TAG_ALU_8 = 0x9,
67    TAG_ALU_12 = 0xA,
68    TAG_ALU_16 = 0xB,
69    TAG_ALU_4_WRITEOUT = 0xC,
70    TAG_ALU_8_WRITEOUT = 0xD,
71    TAG_ALU_12_WRITEOUT = 0xE,
72    TAG_ALU_16_WRITEOUT = 0xF
73 };
74 
75 /*
76  * ALU words
77  */
78 
79 typedef enum {
80    midgard_alu_op_fadd = 0x10, /* round to even */
81    midgard_alu_op_fadd_rtz = 0x11,
82    midgard_alu_op_fadd_rtn = 0x12,
83    midgard_alu_op_fadd_rtp = 0x13,
84    midgard_alu_op_fmul = 0x14, /* round to even */
85    midgard_alu_op_fmul_rtz = 0x15,
86    midgard_alu_op_fmul_rtn = 0x16,
87    midgard_alu_op_fmul_rtp = 0x17,
88 
89    midgard_alu_op_fmin = 0x28, /* if an operand is NaN, propagate the other */
90    midgard_alu_op_fmin_nan = 0x29,    /* if an operand is NaN, propagate it */
91    midgard_alu_op_fabsmin = 0x2A,     /* min(abs(a,b)) */
92    midgard_alu_op_fabsmin_nan = 0x2B, /* min_nan(abs(a,b)) */
93    midgard_alu_op_fmax = 0x2C, /* if an operand is NaN, propagate the other */
94    midgard_alu_op_fmax_nan = 0x2D,    /* if an operand is NaN, propagate it */
95    midgard_alu_op_fabsmax = 0x2E,     /* max(abs(a,b)) */
96    midgard_alu_op_fabsmax_nan = 0x2F, /* max_nan(abs(a,b)) */
97 
98    midgard_alu_op_fmov = 0x30, /* fmov_rte */
99    midgard_alu_op_fmov_rtz = 0x31,
100    midgard_alu_op_fmov_rtn = 0x32,
101    midgard_alu_op_fmov_rtp = 0x33,
102    midgard_alu_op_froundeven = 0x34,
103    midgard_alu_op_ftrunc = 0x35,
104    midgard_alu_op_ffloor = 0x36,
105    midgard_alu_op_fceil = 0x37,
106    midgard_alu_op_ffma = 0x38, /* rte */
107    midgard_alu_op_ffma_rtz = 0x39,
108    midgard_alu_op_ffma_rtn = 0x3A,
109    midgard_alu_op_ffma_rtp = 0x3B,
110    midgard_alu_op_fdot3 = 0x3C,
111    midgard_alu_op_fdot3r = 0x3D,
112    midgard_alu_op_fdot4 = 0x3E,
113    midgard_alu_op_freduce = 0x3F,
114 
115    midgard_alu_op_iadd = 0x40,
116    midgard_alu_op_ishladd = 0x41, /* (a<<1) + b */
117    midgard_alu_op_isub = 0x46,
118    midgard_alu_op_ishlsub = 0x47, /* (a<<1) - b */
119    midgard_alu_op_iaddsat = 0x48,
120    midgard_alu_op_uaddsat = 0x49,
121    midgard_alu_op_isubsat = 0x4E,
122    midgard_alu_op_usubsat = 0x4F,
123 
124    midgard_alu_op_imul = 0x58,
125    /* Multiplies two ints and stores the result in the next larger datasize. */
126    midgard_alu_op_iwmul = 0x59,  /* sint * sint = sint */
127    midgard_alu_op_uwmul = 0x5A,  /* uint * uint = uint */
128    midgard_alu_op_iuwmul = 0x5B, /* sint * uint = sint */
129 
130    midgard_alu_op_imin = 0x60,
131    midgard_alu_op_umin = 0x61,
132    midgard_alu_op_imax = 0x62,
133    midgard_alu_op_umax = 0x63,
134    midgard_alu_op_iavg = 0x64,
135    midgard_alu_op_uavg = 0x65,
136    midgard_alu_op_iravg = 0x66,
137    midgard_alu_op_uravg = 0x67,
138    midgard_alu_op_iasr = 0x68,
139    midgard_alu_op_ilsr = 0x69,
140    midgard_alu_op_ishlsat = 0x6C,
141    midgard_alu_op_ushlsat = 0x6D,
142    midgard_alu_op_ishl = 0x6E,
143 
144    midgard_alu_op_iand = 0x70,
145    midgard_alu_op_ior = 0x71,
146    midgard_alu_op_inand = 0x72,   /* ~(a & b), for inot let a = b */
147    midgard_alu_op_inor = 0x73,    /* ~(a | b) */
148    midgard_alu_op_iandnot = 0x74, /* (a & ~b), used for not/b2f */
149    midgard_alu_op_iornot = 0x75,  /* (a | ~b) */
150    midgard_alu_op_ixor = 0x76,
151    midgard_alu_op_inxor = 0x77,   /* ~(a ^ b) */
152    midgard_alu_op_iclz = 0x78,    /* Number of zeroes on left */
153    midgard_alu_op_ipopcnt = 0x7A, /* Population count */
154    midgard_alu_op_imov = 0x7B,
155    midgard_alu_op_iabsdiff = 0x7C,
156    midgard_alu_op_uabsdiff = 0x7D,
157    midgard_alu_op_ichoose =
158       0x7E, /* vector, component number - dupe for shuffle() */
159 
160    midgard_alu_op_feq = 0x80,
161    midgard_alu_op_fne = 0x81,
162    midgard_alu_op_flt = 0x82,
163    midgard_alu_op_fle = 0x83,
164    midgard_alu_op_fball_eq = 0x88,
165    midgard_alu_op_fball_neq = 0x89,
166    midgard_alu_op_fball_lt = 0x8A,  /* all(lessThan(.., ..)) */
167    midgard_alu_op_fball_lte = 0x8B, /* all(lessThanEqual(.., ..)) */
168 
169    midgard_alu_op_fbany_eq = 0x90,
170    midgard_alu_op_fbany_neq = 0x91,
171    midgard_alu_op_fbany_lt = 0x92,  /* any(lessThan(.., ..)) */
172    midgard_alu_op_fbany_lte = 0x93, /* any(lessThanEqual(.., ..)) */
173 
174    midgard_alu_op_f2i_rte = 0x98,
175    midgard_alu_op_f2i_rtz = 0x99,
176    midgard_alu_op_f2i_rtn = 0x9A,
177    midgard_alu_op_f2i_rtp = 0x9B,
178    midgard_alu_op_f2u_rte = 0x9C,
179    midgard_alu_op_f2u_rtz = 0x9D,
180    midgard_alu_op_f2u_rtn = 0x9E,
181    midgard_alu_op_f2u_rtp = 0x9F,
182 
183    midgard_alu_op_ieq = 0xA0,
184    midgard_alu_op_ine = 0xA1,
185    midgard_alu_op_ult = 0xA2,
186    midgard_alu_op_ule = 0xA3,
187    midgard_alu_op_ilt = 0xA4,
188    midgard_alu_op_ile = 0xA5,
189    midgard_alu_op_iball_eq = 0xA8,
190    midgard_alu_op_iball_neq = 0xA9,
191    midgard_alu_op_uball_lt = 0xAA,
192    midgard_alu_op_uball_lte = 0xAB,
193    midgard_alu_op_iball_lt = 0xAC,
194    midgard_alu_op_iball_lte = 0xAD,
195 
196    midgard_alu_op_ibany_eq = 0xB0,
197    midgard_alu_op_ibany_neq = 0xB1,
198    midgard_alu_op_ubany_lt = 0xB2,
199    midgard_alu_op_ubany_lte = 0xB3,
200    midgard_alu_op_ibany_lt = 0xB4,  /* any(lessThan(.., ..)) */
201    midgard_alu_op_ibany_lte = 0xB5, /* any(lessThanEqual(.., ..)) */
202    midgard_alu_op_i2f_rte = 0xB8,
203    midgard_alu_op_i2f_rtz = 0xB9,
204    midgard_alu_op_i2f_rtn = 0xBA,
205    midgard_alu_op_i2f_rtp = 0xBB,
206    midgard_alu_op_u2f_rte = 0xBC,
207    midgard_alu_op_u2f_rtz = 0xBD,
208    midgard_alu_op_u2f_rtn = 0xBE,
209    midgard_alu_op_u2f_rtp = 0xBF,
210 
211    /* All csel* instructions use as a condition the output of the previous
212     * vector or scalar unit, thus it must run on the second pipeline stage
213     * and be scheduled to the same bundle as the opcode that it uses as a
214     * condition. */
215    midgard_alu_op_icsel_v = 0xC0,
216    midgard_alu_op_icsel = 0xC1,
217    midgard_alu_op_fcsel_v = 0xC4,
218    midgard_alu_op_fcsel = 0xC5,
219    midgard_alu_op_froundaway = 0xC6, /* round to nearest away */
220 
221    midgard_alu_op_fatan2_pt2 = 0xE8,
222    midgard_alu_op_fpow_pt1 = 0xEC,
223    midgard_alu_op_fpown_pt1 = 0xED,
224    midgard_alu_op_fpowr_pt1 = 0xEE,
225 
226    midgard_alu_op_frcp = 0xF0,
227    midgard_alu_op_frsqrt = 0xF2,
228    midgard_alu_op_fsqrt = 0xF3,
229    midgard_alu_op_fexp2 = 0xF4,
230    midgard_alu_op_flog2 = 0xF5,
231    midgard_alu_op_fsinpi = 0xF6, /* sin(pi * x) */
232    midgard_alu_op_fcospi = 0xF7, /* cos(pi * x) */
233    midgard_alu_op_fatan2_pt1 = 0xF9,
234 } midgard_alu_op;
235 
236 typedef enum {
237    midgard_outmod_none = 0,
238    midgard_outmod_clamp_0_inf = 1, /* max(x, 0.0), NaNs become +0.0 */
239    midgard_outmod_clamp_m1_1 = 2,  /* clamp(x, -1.0, 1.0), NaNs become -1.0 */
240    midgard_outmod_clamp_0_1 = 3    /* clamp(x, 0.0, 1.0), NaNs become +0.0 */
241 } midgard_outmod_float;
242 
243 /* These are applied to the resulting value that's going to be stored in the
244  * dest reg. This should be set to midgard_outmod_keeplo when shrink_mode is
245  * midgard_shrink_mode_none. */
246 typedef enum {
247    midgard_outmod_ssat = 0,
248    midgard_outmod_usat = 1,
249    midgard_outmod_keeplo = 2, /* Keep low half */
250    midgard_outmod_keephi = 3, /* Keep high half */
251 } midgard_outmod_int;
252 
253 typedef enum {
254    midgard_reg_mode_8 = 0,
255    midgard_reg_mode_16 = 1,
256    midgard_reg_mode_32 = 2,
257    midgard_reg_mode_64 = 3
258 } midgard_reg_mode;
259 
260 typedef enum {
261    midgard_shrink_mode_lower = 0,
262    midgard_shrink_mode_upper = 1,
263    midgard_shrink_mode_none = 2
264 } midgard_shrink_mode;
265 
266 /* Only used if midgard_src_expand_mode is set to one of midgard_src_expand_*. */
267 typedef enum {
268    midgard_int_sign_extend = 0,
269    midgard_int_zero_extend = 1,
270    midgard_int_replicate = 2,
271    midgard_int_left_shift = 3
272 } midgard_int_mod;
273 
274 /* Unlike midgard_int_mod, fload modifiers are applied after the expansion
275  * happens, so they don't depend on midgard_src_expand_mode. */
276 #define MIDGARD_FLOAT_MOD_ABS (1 << 0)
277 #define MIDGARD_FLOAT_MOD_NEG (1 << 1)
278 
279 /* The expand options depend on both midgard_int_mod and midgard_reg_mode.  For
280  * example, a vec4 with midgard_int_sign_extend and midgard_src_expand_low is
281  * treated as a vec8 and each 16-bit element from the low 64-bits is then sign
282  * extended, resulting in a vec4 where each 32-bit element corresponds to a
283  * 16-bit element from the low 64-bits of the input vector. */
284 typedef enum {
285    midgard_src_passthrough = 0,
286    midgard_src_rep_low = 1,     /* replicate lower 64 bits to higher 64 bits */
287    midgard_src_rep_high = 2,    /* replicate higher 64 bits to lower 64 bits */
288    midgard_src_swap = 3,        /* swap lower 64 bits with higher 64 bits */
289    midgard_src_expand_low = 4,  /* expand low 64 bits */
290    midgard_src_expand_high = 5, /* expand high 64 bits */
291    midgard_src_expand_low_swap = 6,  /* expand low 64 bits, then swap */
292    midgard_src_expand_high_swap = 7, /* expand high 64 bits, then swap */
293 } midgard_src_expand_mode;
294 
295 #define INPUT_EXPANDS(a)                                                       \
296    (a >= midgard_src_expand_low && a <= midgard_src_expand_high_swap)
297 
298 #define INPUT_SWAPS(a)                                                         \
299    (a == midgard_src_swap || a >= midgard_src_expand_low_swap)
300 
301 typedef struct __attribute__((__packed__)) {
302    /* Either midgard_int_mod or from midgard_float_mod_*, depending on the
303     * type of op */
304    unsigned mod                        : 2;
305    midgard_src_expand_mode expand_mode : 3;
306    unsigned swizzle                    : 8;
307 } midgard_vector_alu_src;
308 
309 typedef struct __attribute__((__packed__)) {
310    midgard_alu_op op               : 8;
311    midgard_reg_mode reg_mode       : 2;
312    unsigned src1                   : 13;
313    unsigned src2                   : 13;
314    midgard_shrink_mode shrink_mode : 2;
315    unsigned outmod                 : 2;
316    unsigned mask                   : 8;
317 } midgard_vector_alu;
318 
319 typedef struct __attribute__((__packed__)) {
320    unsigned mod       : 2;
321    bool full          : 1; /* 0 = 16-bit, 1 = 32-bit */
322    unsigned component : 3;
323 } midgard_scalar_alu_src;
324 
325 typedef struct __attribute__((__packed__)) {
326    midgard_alu_op op : 8;
327    unsigned src1     : 6;
328    /* last 5 bits are used when src2 is an immediate */
329    unsigned src2             : 11;
330    unsigned reserved         : 1;
331    unsigned outmod           : 2;
332    bool output_full          : 1;
333    unsigned output_component : 3;
334 } midgard_scalar_alu;
335 
336 typedef struct __attribute__((__packed__)) {
337    unsigned src1_reg : 5;
338    unsigned src2_reg : 5;
339    unsigned out_reg  : 5;
340    bool src2_imm     : 1;
341 } midgard_reg_info;
342 
343 /* In addition to conditional branches and jumps (unconditional branches),
344  * Midgard implements a bit of fixed function functionality used in fragment
345  * shaders via specially crafted branches. These have special branch opcodes,
346  * which perform a fixed-function operation and/or use the results of a
347  * fixed-function operation as the branch condition.  */
348 
349 typedef enum {
350    /* Regular branches */
351    midgard_jmp_writeout_op_branch_uncond = 1,
352    midgard_jmp_writeout_op_branch_cond = 2,
353 
354    /* In a fragment shader, execute a discard_if instruction, with the
355     * corresponding condition code. Terminates the shader, so generally
356     * set the branch target to out of the shader */
357    midgard_jmp_writeout_op_discard = 4,
358 
359    /* Branch if the tilebuffer is not yet ready. At the beginning of a
360     * fragment shader that reads from the tile buffer, for instance via
361     * ARM_shader_framebuffer_fetch or EXT_pixel_local_storage, this branch
362     * operation should be used as a loop. An instruction like
363     * "br.tilebuffer.always -1" does the trick, corresponding to
364     * "while(!is_tilebuffer_ready) */
365    midgard_jmp_writeout_op_tilebuffer_pending = 6,
366 
367    /* In a fragment shader, try to write out the value pushed to r0 to the
368     * tilebuffer, subject to state in r1.z and r1.w. If this
369     * succeeds, the shader terminates. If it fails, it branches to the
370     * specified branch target. Generally, this should be used in a loop to
371     * itself, acting as "do { write(r0); } while(!write_successful);" */
372    midgard_jmp_writeout_op_writeout = 7,
373 } midgard_jmp_writeout_op;
374 
375 typedef enum {
376    midgard_condition_write0 = 0,
377 
378    /* These condition codes denote a conditional branch on FALSE and on
379     * TRUE respectively */
380    midgard_condition_false = 1,
381    midgard_condition_true = 2,
382 
383    /* This condition code always branches. For a pure branch, the
384     * unconditional branch coding should be used instead, but for
385     * fixed-function branch opcodes, this is still useful */
386    midgard_condition_always = 3,
387 } midgard_condition;
388 
389 enum midgard_call_mode {
390    midgard_call_mode_default = 1,
391    midgard_call_mode_call = 2,
392    midgard_call_mode_return = 3
393 };
394 
395 typedef struct __attribute__((__packed__)) {
396    midgard_jmp_writeout_op op       : 3; /* == branch_uncond */
397    unsigned dest_tag                : 4; /* tag of branch destination */
398    enum midgard_call_mode call_mode : 2;
399    int offset                       : 7;
400 } midgard_branch_uncond;
401 
402 typedef struct __attribute__((__packed__)) {
403    midgard_jmp_writeout_op op : 3; /* == branch_cond */
404    unsigned dest_tag          : 4; /* tag of branch destination */
405    int offset                 : 7;
406    midgard_condition cond     : 2;
407 } midgard_branch_cond;
408 
409 typedef struct __attribute__((__packed__)) {
410    midgard_jmp_writeout_op op       : 3; /* == branch_cond */
411    unsigned dest_tag                : 4; /* tag of branch destination */
412    enum midgard_call_mode call_mode : 2;
413    signed offset                    : 23;
414 
415    /* Extended branches permit inputting up to 4 conditions loaded into
416     * r31 (two in r31.w and two in r31.x). In the most general case, we
417     * specify a function f(A, B, C, D) mapping 4 1-bit conditions to a
418     * single 1-bit branch criteria. Note that the domain of f has 2^(2^4)
419     * elements, each mapping to 1-bit of output, so we can trivially
420     * construct a Godel numbering of f as a (2^4)=16-bit integer. This
421     * 16-bit integer serves as a lookup table to compute f, subject to
422     * some swaps for ordering.
423     *
424     * Interesting, the standard 2-bit condition codes are also a LUT with
425     * the same format (2^1-bit), but it's usually easier to use enums. */
426 
427    unsigned cond : 16;
428 } midgard_branch_extended;
429 
430 typedef struct __attribute__((__packed__)) {
431    midgard_jmp_writeout_op op : 3; /* == writeout */
432    unsigned unknown           : 13;
433 } midgard_writeout;
434 
435 /*
436  * Load/store words
437  */
438 
439 typedef enum {
440    midgard_op_ld_st_noop = 0x03,
441 
442    /* Unpacks a colour from a native format to <format> */
443    midgard_op_unpack_colour_f32 = 0x04,
444    midgard_op_unpack_colour_f16 = 0x05,
445    midgard_op_unpack_colour_u32 = 0x06,
446    midgard_op_unpack_colour_s32 = 0x07,
447 
448    /* Packs a colour from <format> to a native format */
449    midgard_op_pack_colour_f32 = 0x08,
450    midgard_op_pack_colour_f16 = 0x09,
451    midgard_op_pack_colour_u32 = 0x0A,
452    midgard_op_pack_colour_s32 = 0x0B,
453 
454    /* Computes the effective address of a mem address expression */
455    midgard_op_lea = 0x0C,
456 
457    /* Converts image coordinates into mem address */
458    midgard_op_lea_image = 0x0D,
459 
460    /* Unclear why this is on the L/S unit, but moves fp32 cube map
461     * coordinates in r27 to its cube map texture coordinate destination
462     * (e.g r29). */
463 
464    midgard_op_ld_cubemap_coords = 0x0E,
465 
466    /* A mov between registers that the ldst pipeline can access */
467    midgard_op_ldst_mov = 0x10,
468 
469    /* The L/S unit can do perspective division a clock faster than the ALU
470     * if you're lucky. Put the vec4 in r27, and call with 0x24 as the
471     * unknown state; the output will be <x/w, y/w, z/w, 1>. Replace w with
472     * z for the z version */
473    midgard_op_ldst_perspective_div_y = 0x11,
474    midgard_op_ldst_perspective_div_z = 0x12,
475    midgard_op_ldst_perspective_div_w = 0x13,
476 
477    /* val in r27.y, address embedded, outputs result to argument. Invert val for
478       sub. Let val = +-1 for inc/dec. */
479    midgard_op_atomic_add = 0x40,
480    midgard_op_atomic_add64 = 0x41,
481    midgard_op_atomic_add_be = 0x42,
482    midgard_op_atomic_add64_be = 0x43,
483 
484    midgard_op_atomic_and = 0x44,
485    midgard_op_atomic_and64 = 0x45,
486    midgard_op_atomic_and_be = 0x46,
487    midgard_op_atomic_and64_be = 0x47,
488    midgard_op_atomic_or = 0x48,
489    midgard_op_atomic_or64 = 0x49,
490    midgard_op_atomic_or_be = 0x4A,
491    midgard_op_atomic_or64_be = 0x4B,
492    midgard_op_atomic_xor = 0x4C,
493    midgard_op_atomic_xor64 = 0x4D,
494    midgard_op_atomic_xor_be = 0x4E,
495    midgard_op_atomic_xor64_be = 0x4F,
496 
497    midgard_op_atomic_imin = 0x50,
498    midgard_op_atomic_imin64 = 0x51,
499    midgard_op_atomic_imin_be = 0x52,
500    midgard_op_atomic_imin64_be = 0x53,
501    midgard_op_atomic_umin = 0x54,
502    midgard_op_atomic_umin64 = 0x55,
503    midgard_op_atomic_umin_be = 0x56,
504    midgard_op_atomic_umin64_be = 0x57,
505    midgard_op_atomic_imax = 0x58,
506    midgard_op_atomic_imax64 = 0x59,
507    midgard_op_atomic_imax_be = 0x5A,
508    midgard_op_atomic_imax64_be = 0x5B,
509    midgard_op_atomic_umax = 0x5C,
510    midgard_op_atomic_umax64 = 0x5D,
511    midgard_op_atomic_umax_be = 0x5E,
512    midgard_op_atomic_umax64_be = 0x5F,
513 
514    midgard_op_atomic_xchg = 0x60,
515    midgard_op_atomic_xchg64 = 0x61,
516    midgard_op_atomic_xchg_be = 0x62,
517    midgard_op_atomic_xchg64_be = 0x63,
518 
519    midgard_op_atomic_cmpxchg = 0x64,
520    midgard_op_atomic_cmpxchg64 = 0x65,
521    midgard_op_atomic_cmpxchg_be = 0x66,
522    midgard_op_atomic_cmpxchg64_be = 0x67,
523 
524    /* Used for compute shader's __global arguments, __local
525     * variables (or for register spilling) */
526 
527    midgard_op_ld_u8 = 0x80,         /* zero extends */
528    midgard_op_ld_i8 = 0x81,         /* sign extends */
529    midgard_op_ld_u16 = 0x84,        /* zero extends */
530    midgard_op_ld_i16 = 0x85,        /* sign extends */
531    midgard_op_ld_u16_be = 0x86,     /* zero extends, big endian */
532    midgard_op_ld_i16_be = 0x87,     /* sign extends, big endian */
533    midgard_op_ld_32 = 0x88,         /* short2, int, float */
534    midgard_op_ld_32_bswap2 = 0x89,  /* 16-bit big endian vector */
535    midgard_op_ld_32_bswap4 = 0x8A,  /* 32-bit big endian scalar */
536    midgard_op_ld_64 = 0x8C,         /* int2, float2, long */
537    midgard_op_ld_64_bswap2 = 0x8D,  /* 16-bit big endian vector */
538    midgard_op_ld_64_bswap4 = 0x8E,  /* 32-bit big endian vector */
539    midgard_op_ld_64_bswap8 = 0x8F,  /* 64-bit big endian scalar */
540    midgard_op_ld_128 = 0x90,        /* float4, long2 */
541    midgard_op_ld_128_bswap2 = 0x91, /* 16-bit big endian vector */
542    midgard_op_ld_128_bswap4 = 0x92, /* 32-bit big endian vector */
543    midgard_op_ld_128_bswap8 = 0x93, /* 64-bit big endian vector */
544 
545    midgard_op_ld_attr_32 = 0x94,
546    midgard_op_ld_attr_16 = 0x95,
547    midgard_op_ld_attr_32u = 0x96,
548    midgard_op_ld_attr_32i = 0x97,
549    midgard_op_ld_vary_32 = 0x98,
550    midgard_op_ld_vary_16 = 0x99,
551    midgard_op_ld_vary_32u = 0x9A,
552    midgard_op_ld_vary_32i = 0x9B,
553 
554    /* This instruction behaves differently depending if the gpu is a v4
555     * or a newer gpu. The main difference hinges on which values of the
556     * second argument are valid for each gpu.
557     * TODO: properly document and decode each possible value for the
558     * second argument. */
559    midgard_op_ld_special_32f = 0x9C,
560    midgard_op_ld_special_16f = 0x9D,
561    midgard_op_ld_special_32u = 0x9E,
562    midgard_op_ld_special_32i = 0x9F,
563 
564    /* The distinction between these ops is the alignment
565     * requirement / accompanying shift. Thus, the offset to
566     * ld_ubo_128 is in 16-byte units and can load 128-bit. The
567     * offset to ld_ubo_64 is in 8-byte units; ld_ubo_32 in 4-byte
568     * units. */
569    midgard_op_ld_ubo_u8 = 0xA0,     /* theoretical */
570    midgard_op_ld_ubo_i8 = 0xA1,     /* theoretical */
571    midgard_op_ld_ubo_u16 = 0xA4,    /* theoretical */
572    midgard_op_ld_ubo_i16 = 0xA5,    /* theoretical */
573    midgard_op_ld_ubo_u16_be = 0xA6, /* theoretical */
574    midgard_op_ld_ubo_i16_be = 0xA7, /* theoretical */
575    midgard_op_ld_ubo_32 = 0xA8,
576    midgard_op_ld_ubo_32_bswap2 = 0xA9,
577    midgard_op_ld_ubo_32_bswap4 = 0xAA,
578    midgard_op_ld_ubo_64 = 0xAC,
579    midgard_op_ld_ubo_64_bswap2 = 0xAD,
580    midgard_op_ld_ubo_64_bswap4 = 0xAE,
581    midgard_op_ld_ubo_64_bswap8 = 0xAF,
582    midgard_op_ld_ubo_128 = 0xB0,
583    midgard_op_ld_ubo_128_bswap2 = 0xB1,
584    midgard_op_ld_ubo_128_bswap4 = 0xB2,
585    midgard_op_ld_ubo_128_bswap8 = 0xB3,
586 
587    midgard_op_ld_image_32f = 0xB4,
588    midgard_op_ld_image_16f = 0xB5,
589    midgard_op_ld_image_32u = 0xB6,
590    midgard_op_ld_image_32i = 0xB7,
591 
592    /* Only works on v5 or newer.
593     * Older cards must use ld_special with tilebuffer selectors. */
594    midgard_op_ld_tilebuffer_32f = 0xB8,
595    midgard_op_ld_tilebuffer_16f = 0xB9,
596    midgard_op_ld_tilebuffer_raw = 0xBA,
597 
598    midgard_op_st_u8 = 0xC0,         /* zero extends */
599    midgard_op_st_i8 = 0xC1,         /* sign extends */
600    midgard_op_st_u16 = 0xC4,        /* zero extends */
601    midgard_op_st_i16 = 0xC5,        /* sign extends */
602    midgard_op_st_u16_be = 0xC6,     /* zero extends, big endian */
603    midgard_op_st_i16_be = 0xC7,     /* sign extends, big endian */
604    midgard_op_st_32 = 0xC8,         /* short2, int, float */
605    midgard_op_st_32_bswap2 = 0xC9,  /* 16-bit big endian vector */
606    midgard_op_st_32_bswap4 = 0xCA,  /* 32-bit big endian scalar */
607    midgard_op_st_64 = 0xCC,         /* int2, float2, long */
608    midgard_op_st_64_bswap2 = 0xCD,  /* 16-bit big endian vector */
609    midgard_op_st_64_bswap4 = 0xCE,  /* 32-bit big endian vector */
610    midgard_op_st_64_bswap8 = 0xCF,  /* 64-bit big endian scalar */
611    midgard_op_st_128 = 0xD0,        /* float4, long2 */
612    midgard_op_st_128_bswap2 = 0xD1, /* 16-bit big endian vector */
613    midgard_op_st_128_bswap4 = 0xD2, /* 32-bit big endian vector */
614    midgard_op_st_128_bswap8 = 0xD3, /* 64-bit big endian vector */
615 
616    midgard_op_st_vary_32 = 0xD4,
617    midgard_op_st_vary_16 = 0xD5,
618    midgard_op_st_vary_32u = 0xD6,
619    midgard_op_st_vary_32i = 0xD7,
620 
621    /* Value to st in r27, location r26.w as short2 */
622    midgard_op_st_image_32f = 0xD8,
623    midgard_op_st_image_16f = 0xD9,
624    midgard_op_st_image_32u = 0xDA,
625    midgard_op_st_image_32i = 0xDB,
626 
627    midgard_op_st_special_32f = 0xDC,
628    midgard_op_st_special_16f = 0xDD,
629    midgard_op_st_special_32u = 0xDE,
630    midgard_op_st_special_32i = 0xDF,
631 
632    /* Only works on v5 or newer.
633     * Older cards must use ld_special with tilebuffer selectors. */
634    midgard_op_st_tilebuffer_32f = 0xE8,
635    midgard_op_st_tilebuffer_16f = 0xE9,
636    midgard_op_st_tilebuffer_raw = 0xEA,
637    midgard_op_trap = 0xFC,
638 } midgard_load_store_op;
639 
640 typedef enum {
641    midgard_interp_sample = 0,
642    midgard_interp_centroid = 1,
643    midgard_interp_default = 2
644 } midgard_interpolation;
645 
646 typedef enum {
647    midgard_varying_mod_none = 0,
648 
649    /* Take the would-be result and divide all components by its y/z/w
650     * (perspective division baked in with the load)  */
651    midgard_varying_mod_perspective_y = 1,
652    midgard_varying_mod_perspective_z = 2,
653    midgard_varying_mod_perspective_w = 3,
654 
655    /* The result is a 64-bit cubemap descriptor to use with
656     * midgard_tex_op_normal or midgard_tex_op_gradient */
657    midgard_varying_mod_cubemap = 4,
658 } midgard_varying_modifier;
659 
660 typedef struct __attribute__((__packed__)) {
661    midgard_varying_modifier modifier : 3;
662 
663    bool flat_shading : 1;
664 
665    /* These are ignored if flat_shading is enabled. */
666    bool perspective_correction : 1;
667    bool centroid_mapping       : 1;
668 
669    /* This is ignored if the shader only runs once per pixel. */
670    bool interpolate_sample : 1;
671 
672    bool zero0 : 1; /* Always zero */
673 
674    unsigned direct_sample_pos_x : 4;
675    unsigned direct_sample_pos_y : 4;
676 } midgard_varying_params;
677 
678 /* 8-bit register/etc selector for load/store ops */
679 typedef struct __attribute__((__packed__)) {
680    /* Indexes into the register */
681    unsigned component : 2;
682 
683    /* Register select between r26/r27 */
684    unsigned select : 1;
685 
686    unsigned unknown : 2;
687 
688    /* Like any good Arm instruction set, load/store arguments can be
689     * implicitly left-shifted... but only the second argument. Zero for no
690     * shifting, up to <<7 possible though. This is useful for indexing.
691     *
692     * For the first argument, it's unknown what these bits mean */
693    unsigned shift : 3;
694 } midgard_ldst_register_select;
695 
696 typedef enum {
697    /* 0 is reserved */
698    midgard_index_address_u64 = 1,
699    midgard_index_address_u32 = 2,
700    midgard_index_address_s32 = 3,
701 } midgard_index_address_format;
702 
703 typedef struct __attribute__((__packed__)) {
704    midgard_load_store_op op : 8;
705 
706    /* Source/dest reg */
707    unsigned reg : 5;
708 
709    /* Generally is a writemask.
710     * For ST_ATTR and ST_TEX, unused.
711     * For other stores, each bit masks 1/4th of the output. */
712    unsigned mask : 4;
713 
714    /* Swizzle for stores, but for atomics it encodes also the source
715     * register. This fits because atomics dont need a swizzle since they
716     * are not vectorized instructions. */
717    unsigned swizzle : 8;
718 
719    /* Arg reg, meaning changes according to each opcode */
720    unsigned arg_comp : 2;
721    unsigned arg_reg  : 3;
722 
723    /* 64-bit address enable
724     * 32-bit data type enable for CUBEMAP and perspective div.
725     * Explicit indexing enable for LD_ATTR.
726     * 64-bit coordinate enable for LD_IMAGE. */
727    bool bitsize_toggle : 1;
728 
729    /* These are mainly used for opcodes that have addresses.
730     * For cmpxchg, index_reg is used for the comparison value.
731     * For ops that access the attrib table, bit 1 encodes which table.
732     * For LD_VAR and LD/ST_ATTR, bit 0 enables dest/src type inferral. */
733    midgard_index_address_format index_format : 2;
734    unsigned index_comp                       : 2;
735    unsigned index_reg                        : 3;
736    unsigned index_shift                      : 4;
737 
738    /* Generaly is a signed offset, but has different bitsize and starts at
739     * different bits depending on the opcode, LDST_*_DISPLACEMENT helpers
740     * are recommended when packing/unpacking this attribute.
741     * For LD_UBO, bit 0 enables ubo index immediate.
742     * For LD_TILEBUFFER_RAW, bit 0 disables sample index immediate. */
743    int signed_offset : 18;
744 } midgard_load_store_word;
745 
746 typedef struct __attribute__((__packed__)) {
747    unsigned type      : 4;
748    unsigned next_type : 4;
749    uint64_t word1     : 60;
750    uint64_t word2     : 60;
751 } midgard_load_store;
752 
753 /* 8-bit register selector used in texture ops to select a bias/LOD/gradient
754  * register, shoved into the `bias` field */
755 
756 typedef struct __attribute__((__packed__)) {
757    /* 32-bit register, clear for half-register */
758    unsigned full : 1;
759 
760    /* Register select between r28/r29 */
761    unsigned select : 1;
762 
763    /* For a half-register, selects the upper half */
764    unsigned upper : 1;
765 
766    /* Indexes into the register */
767    unsigned component : 2;
768 
769    /* Padding to make this 8-bit */
770    unsigned zero : 3;
771 } midgard_tex_register_select;
772 
773 /* Texture pipeline results are in r28-r29 */
774 #define REG_TEX_BASE 28
775 
776 enum mali_texture_op {
777    /* [texture + LOD bias]
778     * If the texture is mipmapped, barriers must be enabled in the
779     * instruction word in order for this opcode to compute the output
780     * correctly. */
781    midgard_tex_op_normal = 1,
782 
783    /* [texture + gradient for LOD and anisotropy]
784     * Unlike midgard_tex_op_normal, this opcode does not require barriers
785     * to compute the output correctly. */
786    midgard_tex_op_gradient = 2,
787 
788    /* [unfiltered texturing]
789     * Unlike midgard_tex_op_normal, this opcode does not require barriers
790     * to compute the output correctly. */
791    midgard_tex_op_fetch = 4,
792 
793    /* [gradient from derivative] */
794    midgard_tex_op_grad_from_derivative = 9,
795 
796    /* [mov] */
797    midgard_tex_op_mov = 10,
798 
799    /* [noop]
800     * Mostly used for barriers. */
801    midgard_tex_op_barrier = 11,
802 
803    /* [gradient from coords] */
804    midgard_tex_op_grad_from_coords = 12,
805 
806    /* [derivative]
807     * Computes derivatives in 2x2 fragment blocks. */
808    midgard_tex_op_derivative = 13
809 };
810 
811 enum mali_sampler_type {
812    /* 0 is reserved */
813    MALI_SAMPLER_FLOAT = 0x1,    /* sampler */
814    MALI_SAMPLER_UNSIGNED = 0x2, /* usampler */
815    MALI_SAMPLER_SIGNED = 0x3,   /* isampler */
816 };
817 
818 /* Texture modes */
819 enum mali_texture_mode {
820    TEXTURE_NORMAL = 1,
821    TEXTURE_SHADOW = 5,
822    TEXTURE_GATHER_SHADOW = 6,
823    TEXTURE_GATHER_X = 8,
824    TEXTURE_GATHER_Y = 9,
825    TEXTURE_GATHER_Z = 10,
826    TEXTURE_GATHER_W = 11,
827 };
828 
829 enum mali_derivative_mode {
830    TEXTURE_DFDX = 0,
831    TEXTURE_DFDY = 1,
832 };
833 
834 enum midgard_partial_execution {
835    MIDGARD_PARTIAL_EXECUTION_SKIP = 1,
836    MIDGARD_PARTIAL_EXECUTION_KILL = 2,
837    MIDGARD_PARTIAL_EXECUTION_NONE = 3
838 };
839 
840 typedef struct __attribute__((__packed__)) {
841    unsigned type      : 4;
842    unsigned next_type : 4;
843 
844    enum mali_texture_op op             : 4;
845    unsigned mode                       : 4;
846    enum midgard_partial_execution exec : 2;
847 
848    unsigned format : 2;
849 
850    /* Are sampler_handle/texture_handler respectively set by registers? If
851     * true, the lower 8-bits of the respective field is a register word.
852     * If false, they are an immediate */
853 
854    unsigned sampler_register : 1;
855    unsigned texture_register : 1;
856 
857    /* Is a register used to specify the
858     * LOD/bias/offset? If set, use the `bias` field as
859     * a register index. If clear, use the `bias` field
860     * as an immediate. */
861    unsigned lod_register : 1;
862 
863    /* Is a register used to specify an offset? If set, use the
864     * offset_reg_* fields to encode this, duplicated for each of the
865     * components. If clear, there is implcitly always an immediate offst
866     * specificed in offset_imm_* */
867    unsigned offset_register : 1;
868 
869    unsigned in_reg_full    : 1;
870    unsigned in_reg_select  : 1;
871    unsigned in_reg_upper   : 1;
872    unsigned in_reg_swizzle : 8;
873 
874    unsigned unknown8 : 2;
875 
876    unsigned out_full : 1;
877 
878    enum mali_sampler_type sampler_type : 2;
879 
880    unsigned out_reg_select : 1;
881    unsigned out_upper      : 1;
882 
883    unsigned mask : 4;
884 
885    /* Intriguingly, textures can take an outmod just like alu ops. Int
886     * outmods are not supported as far as I can tell, so this is only
887     * meaningful for float samplers */
888    midgard_outmod_float outmod : 2;
889 
890    unsigned swizzle : 8;
891 
892    /* These indicate how many bundles after this texture op may be
893     * executed in parallel with this op. We may execute only ALU and
894     * ld/st in parallel (not other textures), and obviously there cannot
895     * be any dependency (the blob appears to forbid even accessing other
896     * channels of a given texture register). */
897 
898    unsigned out_of_order : 4;
899    unsigned unknown4     : 8;
900 
901    /* In immediate mode, each offset field is an immediate range [0, 7].
902     *
903     * In register mode, offset_x becomes a register (full, select, upper)
904     * triplet followed by a vec3 swizzle is splattered across
905     * offset_y/offset_z in a genuinely bizarre way.
906     *
907     * For texel fetches in immediate mode, the range is the full [-8, 7],
908     * but for normal texturing the top bit must be zero and a register
909     * used instead. It's not clear where this limitation is from.
910     *
911     * union {
912     *      struct {
913     *              signed offset_x  : 4;
914     *              signed offset_y  : 4;
915     *              signed offset_z  : 4;
916     *      } immediate;
917     *      struct {
918     *              bool full        : 1;
919     *              bool select      : 1;
920     *              bool upper       : 1;
921     *              unsigned swizzle : 8;
922     *              unsigned zero    : 1;
923     *      } register;
924     * }
925     */
926 
927    unsigned offset : 12;
928 
929    /* In immediate bias mode, for a normal texture op, this is
930     * texture bias, computed as int(2^8 * frac(biasf)), with
931     * bias_int = floor(bias). For a textureLod, it's that, but
932     * s/bias/lod. For a texel fetch, this is the LOD as-is.
933     *
934     * In register mode, this is a midgard_tex_register_select
935     * structure and bias_int is zero */
936 
937    unsigned bias   : 8;
938    signed bias_int : 8;
939 
940    /* If sampler/texture_register is set, the bottom 8-bits are
941     * midgard_tex_register_select and the top 8-bits are zero. If they are
942     * clear, they are immediate texture indices */
943 
944    unsigned sampler_handle : 16;
945    unsigned texture_handle : 16;
946 } midgard_texture_word;
947 
948 /* Technically barriers are texture instructions but it's less work to add them
949  * as an explicitly zeroed special case, since most fields are forced to go to
950  * zero */
951 
952 typedef struct __attribute__((__packed__)) {
953    unsigned type      : 4;
954    unsigned next_type : 4;
955 
956    /* op = TEXTURE_OP_BARRIER */
957    unsigned op    : 6;
958    unsigned zero1 : 2;
959 
960    /* Since helper invocations don't make any sense, these are forced to one */
961    unsigned cont  : 1;
962    unsigned last  : 1;
963    unsigned zero2 : 14;
964 
965    unsigned zero3        : 24;
966    unsigned out_of_order : 4;
967    unsigned zero4        : 4;
968 
969    uint64_t zero5;
970 } midgard_texture_barrier_word;
971 
972 typedef union midgard_constants {
973    double f64[2];
974    uint64_t u64[2];
975    int64_t i64[2];
976    float f32[4];
977    uint32_t u32[4];
978    int32_t i32[4];
979    uint16_t f16[8];
980    uint16_t u16[8];
981    int16_t i16[8];
982    uint8_t u8[16];
983    int8_t i8[16];
984 } midgard_constants;
985 
986 enum midgard_roundmode {
987    MIDGARD_RTE = 0x0, /* round to even */
988    MIDGARD_RTZ = 0x1, /* round to zero */
989    MIDGARD_RTN = 0x2, /* round to negative */
990    MIDGARD_RTP = 0x3, /* round to positive */
991 };
992 
993 #endif
994