1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <[email protected]>
30 */
31
32 #ifndef BRW_EU_DEFINES_H
33 #define BRW_EU_DEFINES_H
34
35 #include <stdint.h>
36 #include <stdlib.h>
37 #include "util/macros.h"
38 #include "dev/intel_device_info.h"
39
40 /* The following hunk, up-to "Execution Unit" is used by both the
41 * intel/compiler and i965 codebase. */
42
43 #define INTEL_MASK(high, low) (((1u<<((high)-(low)+1))-1)<<(low))
44 /* Using the GNU statement expression extension */
45 #define SET_FIELD(value, field) \
46 ({ \
47 uint32_t fieldval = (uint32_t)(value) << field ## _SHIFT; \
48 assert((fieldval & ~ field ## _MASK) == 0); \
49 fieldval & field ## _MASK; \
50 })
51
52 #define SET_BITS(value, high, low) \
53 ({ \
54 const uint32_t fieldval = (uint32_t)(value) << (low); \
55 assert((fieldval & ~INTEL_MASK(high, low)) == 0); \
56 fieldval & INTEL_MASK(high, low); \
57 })
58
59 #define GET_BITS(data, high, low) ((data & INTEL_MASK((high), (low))) >> (low))
60 #define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
61
62 # define GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT 0
63 # define GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_SID 1
64
65 /* Execution Unit (EU) defines
66 */
67
68 #define BRW_ALIGN_1 0
69 #define BRW_ALIGN_16 1
70
71 #define BRW_ADDRESS_DIRECT 0
72 #define BRW_ADDRESS_REGISTER_INDIRECT_REGISTER 1
73
74 #define BRW_CHANNEL_X 0
75 #define BRW_CHANNEL_Y 1
76 #define BRW_CHANNEL_Z 2
77 #define BRW_CHANNEL_W 3
78
79 enum brw_compression {
80 BRW_COMPRESSION_NONE = 0,
81 BRW_COMPRESSION_2NDHALF = 1,
82 BRW_COMPRESSION_COMPRESSED = 2,
83 };
84
85 #define GFX6_COMPRESSION_1Q 0
86 #define GFX6_COMPRESSION_2Q 1
87 #define GFX6_COMPRESSION_3Q 2
88 #define GFX6_COMPRESSION_4Q 3
89 #define GFX6_COMPRESSION_1H 0
90 #define GFX6_COMPRESSION_2H 2
91
92 enum ENUM_PACKED brw_conditional_mod {
93 BRW_CONDITIONAL_NONE = 0,
94 BRW_CONDITIONAL_Z = 1,
95 BRW_CONDITIONAL_NZ = 2,
96 BRW_CONDITIONAL_EQ = 1, /* Z */
97 BRW_CONDITIONAL_NEQ = 2, /* NZ */
98 BRW_CONDITIONAL_G = 3,
99 BRW_CONDITIONAL_GE = 4,
100 BRW_CONDITIONAL_L = 5,
101 BRW_CONDITIONAL_LE = 6,
102 BRW_CONDITIONAL_R = 7, /* Gen <= 5 */
103 BRW_CONDITIONAL_O = 8,
104 BRW_CONDITIONAL_U = 9,
105 };
106
107 #define BRW_DEBUG_NONE 0
108 #define BRW_DEBUG_BREAKPOINT 1
109
110 enum ENUM_PACKED brw_execution_size {
111 BRW_EXECUTE_1 = 0,
112 BRW_EXECUTE_2 = 1,
113 BRW_EXECUTE_4 = 2,
114 BRW_EXECUTE_8 = 3,
115 BRW_EXECUTE_16 = 4,
116 BRW_EXECUTE_32 = 5,
117 };
118
119 enum ENUM_PACKED brw_horizontal_stride {
120 BRW_HORIZONTAL_STRIDE_0 = 0,
121 BRW_HORIZONTAL_STRIDE_1 = 1,
122 BRW_HORIZONTAL_STRIDE_2 = 2,
123 BRW_HORIZONTAL_STRIDE_4 = 3,
124 };
125
126 enum ENUM_PACKED gfx10_align1_3src_src_horizontal_stride {
127 BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0 = 0,
128 BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1 = 1,
129 BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2 = 2,
130 BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4 = 3,
131 };
132
133 enum ENUM_PACKED gfx10_align1_3src_dst_horizontal_stride {
134 BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1 = 0,
135 BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_2 = 1,
136 };
137
138 #define BRW_INSTRUCTION_NORMAL 0
139 #define BRW_INSTRUCTION_SATURATE 1
140
141 #define BRW_MASK_ENABLE 0
142 #define BRW_MASK_DISABLE 1
143
144 /** @{
145 *
146 * Gfx6 has replaced "mask enable/disable" with WECtrl, which is
147 * effectively the same but much simpler to think about. Now, there
148 * are two contributors ANDed together to whether channels are
149 * executed: The predication on the instruction, and the channel write
150 * enable.
151 */
152 /**
153 * This is the default value. It means that a channel's write enable is set
154 * if the per-channel IP is pointing at this instruction.
155 */
156 #define BRW_WE_NORMAL 0
157 /**
158 * This is used like BRW_MASK_DISABLE, and causes all channels to have
159 * their write enable set. Note that predication still contributes to
160 * whether the channel actually gets written.
161 */
162 #define BRW_WE_ALL 1
163 /** @} */
164
165 enum opcode {
166 /* These are the actual hardware instructions. */
167 BRW_OPCODE_ILLEGAL,
168 BRW_OPCODE_SYNC,
169 BRW_OPCODE_MOV,
170 BRW_OPCODE_SEL,
171 BRW_OPCODE_MOVI,
172 BRW_OPCODE_NOT,
173 BRW_OPCODE_AND,
174 BRW_OPCODE_OR,
175 BRW_OPCODE_XOR,
176 BRW_OPCODE_SHR,
177 BRW_OPCODE_SHL,
178 BRW_OPCODE_SMOV,
179 BRW_OPCODE_ASR,
180 BRW_OPCODE_ROR, /**< Gfx11+ */
181 BRW_OPCODE_ROL, /**< Gfx11+ */
182 BRW_OPCODE_CMP,
183 BRW_OPCODE_CMPN,
184 BRW_OPCODE_CSEL,
185 BRW_OPCODE_BFREV,
186 BRW_OPCODE_BFE,
187 BRW_OPCODE_BFI1,
188 BRW_OPCODE_BFI2,
189 BRW_OPCODE_JMPI,
190 BRW_OPCODE_BRD,
191 BRW_OPCODE_IF,
192 BRW_OPCODE_BRC,
193 BRW_OPCODE_ELSE,
194 BRW_OPCODE_ENDIF,
195 BRW_OPCODE_DO, /**< Used as pseudo opcode, will be moved later. */
196 BRW_OPCODE_WHILE,
197 BRW_OPCODE_BREAK,
198 BRW_OPCODE_CONTINUE,
199 BRW_OPCODE_HALT,
200 BRW_OPCODE_CALLA,
201 BRW_OPCODE_CALL,
202 BRW_OPCODE_RET,
203 BRW_OPCODE_GOTO,
204 BRW_OPCODE_WAIT,
205 BRW_OPCODE_SEND,
206 BRW_OPCODE_SENDC,
207 BRW_OPCODE_SENDS,
208 BRW_OPCODE_SENDSC,
209 BRW_OPCODE_MATH,
210 BRW_OPCODE_ADD,
211 BRW_OPCODE_MUL,
212 BRW_OPCODE_AVG,
213 BRW_OPCODE_FRC,
214 BRW_OPCODE_RNDU,
215 BRW_OPCODE_RNDD,
216 BRW_OPCODE_RNDE,
217 BRW_OPCODE_RNDZ,
218 BRW_OPCODE_MAC,
219 BRW_OPCODE_MACH,
220 BRW_OPCODE_LZD,
221 BRW_OPCODE_FBH,
222 BRW_OPCODE_FBL,
223 BRW_OPCODE_CBIT,
224 BRW_OPCODE_ADDC,
225 BRW_OPCODE_SUBB,
226 BRW_OPCODE_ADD3, /* Gen12+ only */
227 BRW_OPCODE_DP4,
228 BRW_OPCODE_DPH,
229 BRW_OPCODE_DP3,
230 BRW_OPCODE_DP2,
231 BRW_OPCODE_DP4A, /**< Gfx12+ */
232 BRW_OPCODE_LINE,
233 BRW_OPCODE_DPAS, /**< Gfx12.5+ */
234 BRW_OPCODE_PLN, /**< Up until Gfx9 */
235 BRW_OPCODE_MAD,
236 BRW_OPCODE_LRP,
237 BRW_OPCODE_MADM,
238 BRW_OPCODE_NOP,
239
240 NUM_BRW_OPCODES,
241
242 /**
243 * The position/ordering of the arguments are defined
244 * by the enum fb_write_logical_srcs.
245 */
246 FS_OPCODE_FB_WRITE_LOGICAL = NUM_BRW_OPCODES,
247
248 FS_OPCODE_FB_READ_LOGICAL,
249
250 SHADER_OPCODE_RCP,
251 SHADER_OPCODE_RSQ,
252 SHADER_OPCODE_SQRT,
253 SHADER_OPCODE_EXP2,
254 SHADER_OPCODE_LOG2,
255 SHADER_OPCODE_POW,
256 SHADER_OPCODE_INT_QUOTIENT,
257 SHADER_OPCODE_INT_REMAINDER,
258 SHADER_OPCODE_SIN,
259 SHADER_OPCODE_COS,
260
261 /**
262 * A generic "send" opcode. The first two sources are the message
263 * descriptor and extended message descriptor respectively. The third
264 * and optional fourth sources are the message payload
265 */
266 SHADER_OPCODE_SEND,
267
268 /**
269 * An "undefined" write which does nothing but indicates to liveness that
270 * we don't care about any values in the register which predate this
271 * instruction. Used to prevent partial writes from causing issues with
272 * live ranges.
273 */
274 SHADER_OPCODE_UNDEF,
275
276 /**
277 * Texture sampling opcodes.
278 *
279 * LOGICAL opcodes are eventually translated to SHADER_OPCODE_SEND but
280 * take parameters as individual sources. See enum tex_logical_srcs.
281 */
282 SHADER_OPCODE_TEX_LOGICAL,
283 SHADER_OPCODE_TXD_LOGICAL,
284 SHADER_OPCODE_TXF_LOGICAL,
285 SHADER_OPCODE_TXL_LOGICAL,
286 SHADER_OPCODE_TXS_LOGICAL,
287 FS_OPCODE_TXB_LOGICAL,
288 SHADER_OPCODE_TXF_CMS_W_LOGICAL,
289 SHADER_OPCODE_TXF_CMS_W_GFX12_LOGICAL,
290 SHADER_OPCODE_TXF_MCS_LOGICAL,
291 SHADER_OPCODE_LOD_LOGICAL,
292 SHADER_OPCODE_TG4_LOGICAL,
293 SHADER_OPCODE_TG4_IMPLICIT_LOD_LOGICAL,
294 SHADER_OPCODE_TG4_EXPLICIT_LOD_LOGICAL,
295 SHADER_OPCODE_TG4_BIAS_LOGICAL,
296 SHADER_OPCODE_TG4_OFFSET_LOGICAL,
297 SHADER_OPCODE_TG4_OFFSET_LOD_LOGICAL,
298 SHADER_OPCODE_TG4_OFFSET_BIAS_LOGICAL,
299 SHADER_OPCODE_SAMPLEINFO_LOGICAL,
300
301 SHADER_OPCODE_IMAGE_SIZE_LOGICAL,
302
303 /**
304 * Combines multiple sources of size 1 into a larger virtual GRF.
305 * For example, parameters for a send-from-GRF message. Or, updating
306 * channels of a size 4 VGRF used to store vec4s such as texturing results.
307 *
308 * This will be lowered into MOVs from each source to consecutive offsets
309 * of the destination VGRF.
310 *
311 * src[0] may be BAD_FILE. If so, the lowering pass skips emitting the MOV,
312 * but still reserves the first channel of the destination VGRF. This can be
313 * used to reserve space for, say, a message header set up by the generators.
314 */
315 SHADER_OPCODE_LOAD_PAYLOAD,
316
317 /**
318 * Packs a number of sources into a single value. Unlike LOAD_PAYLOAD, this
319 * acts intra-channel, obtaining the final value for each channel by
320 * combining the sources values for the same channel, the first source
321 * occupying the lowest bits and the last source occupying the highest
322 * bits.
323 */
324 FS_OPCODE_PACK,
325
326 SHADER_OPCODE_RND_MODE,
327 SHADER_OPCODE_FLOAT_CONTROL_MODE,
328
329 /**
330 * Memory fence messages.
331 *
332 * Source 0: Must be register g0, used as header.
333 * Source 1: Immediate bool to indicate whether control is returned to the
334 * thread only after the fence has been honored.
335 * Source 2: Immediate byte indicating which memory to fence. Zero means
336 * global memory; GFX7_BTI_SLM means SLM (for Gfx11+ only).
337 *
338 * Vec4 backend only uses Source 0.
339 */
340 SHADER_OPCODE_MEMORY_FENCE,
341
342 /**
343 * Scheduling-only fence.
344 *
345 * Sources can be used to force a stall until the registers in those are
346 * available. This might generate MOVs or SYNC_NOPs (Gfx12+).
347 */
348 FS_OPCODE_SCHEDULING_FENCE,
349
350 SHADER_OPCODE_SCRATCH_HEADER,
351
352 /**
353 * Gfx8+ SIMD8 URB messages.
354 */
355 SHADER_OPCODE_URB_READ_LOGICAL,
356 SHADER_OPCODE_URB_WRITE_LOGICAL,
357
358 /**
359 * Return the index of the first enabled live channel and assign it to
360 * to the first component of the destination. Frequently used as input
361 * for the BROADCAST pseudo-opcode.
362 */
363 SHADER_OPCODE_FIND_LIVE_CHANNEL,
364
365 /**
366 * Return the index of the last enabled live channel and assign it to
367 * the first component of the destination.
368 */
369 SHADER_OPCODE_FIND_LAST_LIVE_CHANNEL,
370
371 /**
372 * Return the current execution mask and assign it to the first component
373 * of the destination.
374 *
375 * \sa opcode::FS_OPCODE_LOAD_LIVE_CHANNELS
376 */
377 SHADER_OPCODE_LOAD_LIVE_CHANNELS,
378
379 /**
380 * Return the current execution mask in the specified flag subregister.
381 * Can be CSE'ed more easily than a plain MOV from the ce0 ARF register.
382 */
383 FS_OPCODE_LOAD_LIVE_CHANNELS,
384
385 /**
386 * Pick the channel from its first source register given by the index
387 * specified as second source. Useful for variable indexing of surfaces.
388 *
389 * Note that because the result of this instruction is by definition
390 * uniform and it can always be splatted to multiple channels using a
391 * scalar regioning mode, only the first channel of the destination region
392 * is guaranteed to be updated, which implies that BROADCAST instructions
393 * should usually be marked force_writemask_all.
394 */
395 SHADER_OPCODE_BROADCAST,
396
397 /* Pick the channel from its first source register given by the index
398 * specified as second source.
399 *
400 * This is similar to the BROADCAST instruction except that it takes a
401 * dynamic index and potentially puts a different value in each output
402 * channel.
403 */
404 SHADER_OPCODE_SHUFFLE,
405
406 /* Select between src0 and src1 based on channel enables.
407 *
408 * This instruction copies src0 into the enabled channels of the
409 * destination and copies src1 into the disabled channels.
410 */
411 SHADER_OPCODE_SEL_EXEC,
412
413 /* This turns into an align16 mov from src0 to dst with a swizzle
414 * provided as an immediate in src1.
415 */
416 SHADER_OPCODE_QUAD_SWIZZLE,
417
418 /* Take every Nth element in src0 and broadcast it to the group of N
419 * channels in which it lives in the destination. The offset within the
420 * cluster is given by src1 and the cluster size is given by src2.
421 */
422 SHADER_OPCODE_CLUSTER_BROADCAST,
423
424 SHADER_OPCODE_GET_BUFFER_SIZE,
425
426 SHADER_OPCODE_INTERLOCK,
427
428 /** Target for a HALT
429 *
430 * All HALT instructions in a shader must target the same jump point and
431 * that point is denoted by a HALT_TARGET instruction.
432 */
433 SHADER_OPCODE_HALT_TARGET,
434
435 FS_OPCODE_DDX_COARSE,
436 FS_OPCODE_DDX_FINE,
437 /**
438 * Compute dFdy(), dFdyCoarse(), or dFdyFine().
439 */
440 FS_OPCODE_DDY_COARSE,
441 FS_OPCODE_DDY_FINE,
442 FS_OPCODE_PIXEL_X,
443 FS_OPCODE_PIXEL_Y,
444 FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
445 FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL,
446 FS_OPCODE_PACK_HALF_2x16_SPLIT,
447 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
448 FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
449 FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET,
450
451 /**
452 * GLSL barrier()
453 */
454 SHADER_OPCODE_BARRIER,
455
456 /**
457 * Calculate the high 32-bits of a 32x32 multiply.
458 */
459 SHADER_OPCODE_MULH,
460
461 /** Signed subtraction with saturation. */
462 SHADER_OPCODE_ISUB_SAT,
463
464 /** Unsigned subtraction with saturation. */
465 SHADER_OPCODE_USUB_SAT,
466
467 /**
468 * A MOV that uses VxH indirect addressing.
469 *
470 * Source 0: A register to start from (HW_REG).
471 * Source 1: An indirect offset (in bytes, UD GRF).
472 * Source 2: The length of the region that could be accessed (in bytes,
473 * UD immediate).
474 */
475 SHADER_OPCODE_MOV_INDIRECT,
476
477 /** Fills out a relocatable immediate */
478 SHADER_OPCODE_MOV_RELOC_IMM,
479
480 SHADER_OPCODE_BTD_SPAWN_LOGICAL,
481 SHADER_OPCODE_BTD_RETIRE_LOGICAL,
482
483 SHADER_OPCODE_READ_ARCH_REG,
484
485 SHADER_OPCODE_LOAD_SUBGROUP_INVOCATION,
486
487 RT_OPCODE_TRACE_RAY_LOGICAL,
488
489 SHADER_OPCODE_MEMORY_LOAD_LOGICAL,
490 SHADER_OPCODE_MEMORY_STORE_LOGICAL,
491 SHADER_OPCODE_MEMORY_ATOMIC_LOGICAL,
492 };
493
494 enum fb_write_logical_srcs {
495 FB_WRITE_LOGICAL_SRC_COLOR0, /* REQUIRED */
496 FB_WRITE_LOGICAL_SRC_COLOR1, /* for dual source blend messages */
497 FB_WRITE_LOGICAL_SRC_SRC0_ALPHA,
498 FB_WRITE_LOGICAL_SRC_SRC_DEPTH, /* gl_FragDepth */
499 FB_WRITE_LOGICAL_SRC_DST_DEPTH, /* GFX4-5: passthrough from thread */
500 FB_WRITE_LOGICAL_SRC_SRC_STENCIL, /* gl_FragStencilRefARB */
501 FB_WRITE_LOGICAL_SRC_OMASK, /* Sample Mask (gl_SampleMask) */
502 FB_WRITE_LOGICAL_SRC_COMPONENTS, /* REQUIRED */
503 FB_WRITE_LOGICAL_NUM_SRCS
504 };
505
506 enum tex_logical_srcs {
507 /** Texture coordinates */
508 TEX_LOGICAL_SRC_COORDINATE,
509 /** Shadow comparator */
510 TEX_LOGICAL_SRC_SHADOW_C,
511 /** dPdx if the operation takes explicit derivatives, otherwise LOD value */
512 TEX_LOGICAL_SRC_LOD,
513 /** dPdy if the operation takes explicit derivatives */
514 TEX_LOGICAL_SRC_LOD2,
515 /** Min LOD */
516 TEX_LOGICAL_SRC_MIN_LOD,
517 /** Sample index */
518 TEX_LOGICAL_SRC_SAMPLE_INDEX,
519 /** MCS data */
520 TEX_LOGICAL_SRC_MCS,
521 /** REQUIRED: Texture surface index */
522 TEX_LOGICAL_SRC_SURFACE,
523 /** Texture sampler index */
524 TEX_LOGICAL_SRC_SAMPLER,
525 /** Texture surface bindless handle */
526 TEX_LOGICAL_SRC_SURFACE_HANDLE,
527 /** Texture sampler bindless handle */
528 TEX_LOGICAL_SRC_SAMPLER_HANDLE,
529 /** Texel offset for gathers */
530 TEX_LOGICAL_SRC_TG4_OFFSET,
531 /** REQUIRED: Number of coordinate components (as UD immediate) */
532 TEX_LOGICAL_SRC_COORD_COMPONENTS,
533 /** REQUIRED: Number of derivative components (as UD immediate) */
534 TEX_LOGICAL_SRC_GRAD_COMPONENTS,
535 /** REQUIRED: request residency (as UD immediate) */
536 TEX_LOGICAL_SRC_RESIDENCY,
537
538 TEX_LOGICAL_NUM_SRCS,
539 };
540
541 enum pull_uniform_constant_srcs {
542 /** Surface binding table index */
543 PULL_UNIFORM_CONSTANT_SRC_SURFACE,
544 /** Surface bindless handle */
545 PULL_UNIFORM_CONSTANT_SRC_SURFACE_HANDLE,
546 /** Surface offset */
547 PULL_UNIFORM_CONSTANT_SRC_OFFSET,
548 /** Pull size */
549 PULL_UNIFORM_CONSTANT_SRC_SIZE,
550
551 PULL_UNIFORM_CONSTANT_SRCS,
552 };
553
554 enum pull_varying_constant_srcs {
555 /** Surface binding table index */
556 PULL_VARYING_CONSTANT_SRC_SURFACE,
557 /** Surface bindless handle */
558 PULL_VARYING_CONSTANT_SRC_SURFACE_HANDLE,
559 /** Surface offset */
560 PULL_VARYING_CONSTANT_SRC_OFFSET,
561 /** Pull alignment */
562 PULL_VARYING_CONSTANT_SRC_ALIGNMENT,
563
564 PULL_VARYING_CONSTANT_SRCS,
565 };
566
567 enum get_buffer_size_srcs {
568 /** Surface binding table index */
569 GET_BUFFER_SIZE_SRC_SURFACE,
570 /** Surface bindless handle */
571 GET_BUFFER_SIZE_SRC_SURFACE_HANDLE,
572 /** LOD */
573 GET_BUFFER_SIZE_SRC_LOD,
574
575 GET_BUFFER_SIZE_SRCS
576 };
577
578 enum memory_logical_mode {
579 MEMORY_MODE_TYPED,
580 MEMORY_MODE_UNTYPED,
581 MEMORY_MODE_SHARED_LOCAL,
582 MEMORY_MODE_SCRATCH,
583 };
584
585 enum memory_logical_srcs {
586 /** enum lsc_opcode (as UD immediate) */
587 MEMORY_LOGICAL_OPCODE,
588
589 /** enum memory_logical_mode (as UD immediate) */
590 MEMORY_LOGICAL_MODE,
591
592 /** enum lsc_addr_surface_type (as UD immediate) */
593 MEMORY_LOGICAL_BINDING_TYPE,
594
595 /**
596 * Where to find the surface state. Depends on BINDING_TYPE above:
597 *
598 * - SS: pointer to surface state (relative to surface base address)
599 * - BSS: pointer to surface state (relative to bindless surface base)
600 * - BTI: binding table index
601 * - FLAT: This should should be BAD_FILE
602 */
603 MEMORY_LOGICAL_BINDING,
604
605 /** Coordinate/address/offset for where to access memory */
606 MEMORY_LOGICAL_ADDRESS,
607
608 /** Dimensionality of the "address" source (as UD immediate) */
609 MEMORY_LOGICAL_COORD_COMPONENTS,
610
611 /** Required alignment of address in bytes; 0 for natural alignment */
612 MEMORY_LOGICAL_ALIGNMENT,
613
614 /** Bit-size in the form of enum lsc_data_size (as UD immediate) */
615 MEMORY_LOGICAL_DATA_SIZE,
616
617 /** Number of vector components (as UD immediate) */
618 MEMORY_LOGICAL_COMPONENTS,
619
620 /** memory_flags bitfield (as UD immediate) */
621 MEMORY_LOGICAL_FLAGS,
622
623 /** Data to write for stores or the first operand for atomics */
624 MEMORY_LOGICAL_DATA0,
625
626 /** Second operand for two-source atomics */
627 MEMORY_LOGICAL_DATA1,
628
629 MEMORY_LOGICAL_NUM_SRCS
630 };
631
632 enum memory_flags {
633 /** Whether this is a transposed (i.e. block) memory access */
634 MEMORY_FLAG_TRANSPOSE = 1 << 0,
635 /** Whether this operation should fire for helper invocations */
636 MEMORY_FLAG_INCLUDE_HELPERS = 1 << 1,
637 };
638
639 enum rt_logical_srcs {
640 /** Address of the globals */
641 RT_LOGICAL_SRC_GLOBALS,
642 /** Level at which the tracing should start */
643 RT_LOGICAL_SRC_BVH_LEVEL,
644 /** Type of tracing operation */
645 RT_LOGICAL_SRC_TRACE_RAY_CONTROL,
646 /** Synchronous tracing (ray query) */
647 RT_LOGICAL_SRC_SYNCHRONOUS,
648
649 RT_LOGICAL_NUM_SRCS
650 };
651
652 enum urb_logical_srcs {
653 URB_LOGICAL_SRC_HANDLE,
654 /** Offset in bytes on Xe2+ or OWords on older platforms */
655 URB_LOGICAL_SRC_PER_SLOT_OFFSETS,
656 URB_LOGICAL_SRC_CHANNEL_MASK,
657 /** Data to be written. BAD_FILE for reads. */
658 URB_LOGICAL_SRC_DATA,
659 URB_LOGICAL_SRC_COMPONENTS,
660 URB_LOGICAL_NUM_SRCS
661 };
662
663 enum interpolator_logical_srcs {
664 /** Interpolation offset */
665 INTERP_SRC_OFFSET,
666 /** Message data */
667 INTERP_SRC_MSG_DESC,
668 /** Flag register for dynamic mode */
669 INTERP_SRC_DYNAMIC_MODE,
670
671 INTERP_NUM_SRCS
672 };
673
674 enum ENUM_PACKED brw_predicate {
675 BRW_PREDICATE_NONE = 0,
676 BRW_PREDICATE_NORMAL = 1,
677 BRW_PREDICATE_ALIGN1_ANYV = 2,
678 BRW_PREDICATE_ALIGN1_ALLV = 3,
679 BRW_PREDICATE_ALIGN1_ANY2H = 4,
680 BRW_PREDICATE_ALIGN1_ALL2H = 5,
681 BRW_PREDICATE_ALIGN1_ANY4H = 6,
682 BRW_PREDICATE_ALIGN1_ALL4H = 7,
683 BRW_PREDICATE_ALIGN1_ANY8H = 8,
684 BRW_PREDICATE_ALIGN1_ALL8H = 9,
685 BRW_PREDICATE_ALIGN1_ANY16H = 10,
686 BRW_PREDICATE_ALIGN1_ALL16H = 11,
687 BRW_PREDICATE_ALIGN1_ANY32H = 12,
688 BRW_PREDICATE_ALIGN1_ALL32H = 13,
689 BRW_PREDICATE_ALIGN16_REPLICATE_X = 2,
690 BRW_PREDICATE_ALIGN16_REPLICATE_Y = 3,
691 BRW_PREDICATE_ALIGN16_REPLICATE_Z = 4,
692 BRW_PREDICATE_ALIGN16_REPLICATE_W = 5,
693 BRW_PREDICATE_ALIGN16_ANY4H = 6,
694 BRW_PREDICATE_ALIGN16_ALL4H = 7,
695 XE2_PREDICATE_ANY = 2,
696 XE2_PREDICATE_ALL = 3
697 };
698
699 enum ENUM_PACKED brw_reg_file {
700 BAD_FILE = 0,
701
702 ARF,
703 FIXED_GRF,
704 IMM,
705
706 VGRF,
707 ATTR,
708 UNIFORM, /* prog_data->params[reg] */
709 };
710
711 /* CNL adds Align1 support for 3-src instructions. Bit 35 of the instruction
712 * word is "Execution Datatype" which controls whether the instruction operates
713 * on float or integer types. The register arguments have fields that offer
714 * more fine control their respective types.
715 */
716 enum ENUM_PACKED gfx10_align1_3src_exec_type {
717 BRW_ALIGN1_3SRC_EXEC_TYPE_INT = 0,
718 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT = 1,
719 };
720
721 #define BRW_ARF_NULL 0x00
722 #define BRW_ARF_ADDRESS 0x10
723 #define BRW_ARF_ACCUMULATOR 0x20
724 #define BRW_ARF_FLAG 0x30
725 #define BRW_ARF_MASK 0x40
726 #define BRW_ARF_STATE 0x70
727 #define BRW_ARF_CONTROL 0x80
728 #define BRW_ARF_NOTIFICATION_COUNT 0x90
729 #define BRW_ARF_IP 0xA0
730 #define BRW_ARF_TDR 0xB0
731 #define BRW_ARF_TIMESTAMP 0xC0
732
733 #define BRW_THREAD_NORMAL 0
734 #define BRW_THREAD_ATOMIC 1
735 #define BRW_THREAD_SWITCH 2
736
737 enum ENUM_PACKED brw_vertical_stride {
738 BRW_VERTICAL_STRIDE_0 = 0,
739 BRW_VERTICAL_STRIDE_1 = 1,
740 BRW_VERTICAL_STRIDE_2 = 2,
741 BRW_VERTICAL_STRIDE_4 = 3,
742 BRW_VERTICAL_STRIDE_8 = 4,
743 BRW_VERTICAL_STRIDE_16 = 5,
744 BRW_VERTICAL_STRIDE_32 = 6,
745 BRW_VERTICAL_STRIDE_ONE_DIMENSIONAL = 0xF,
746 };
747
748 enum ENUM_PACKED gfx10_align1_3src_vertical_stride {
749 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0 = 0,
750 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_1 = 1,
751 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2 = 1,
752 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4 = 2,
753 BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8 = 3,
754 };
755
756 enum ENUM_PACKED brw_width {
757 BRW_WIDTH_1 = 0,
758 BRW_WIDTH_2 = 1,
759 BRW_WIDTH_4 = 2,
760 BRW_WIDTH_8 = 3,
761 BRW_WIDTH_16 = 4,
762 };
763
764 /**
765 * Gfx12+ SWSB SBID synchronization mode.
766 *
767 * This is represented as a bitmask including any required SBID token
768 * synchronization modes, used to synchronize out-of-order instructions. Only
769 * the strongest mode of the mask will be provided to the hardware in the SWSB
770 * field of an actual hardware instruction, but virtual instructions may be
771 * able to take into account multiple of them.
772 */
773 enum tgl_sbid_mode {
774 TGL_SBID_NULL = 0,
775 TGL_SBID_SRC = 1,
776 TGL_SBID_DST = 2,
777 TGL_SBID_SET = 4
778 };
779
780
781 enum gfx12_sub_byte_precision {
782 BRW_SUB_BYTE_PRECISION_NONE = 0,
783
784 /** 4 bits. Signedness determined by base type */
785 BRW_SUB_BYTE_PRECISION_4BIT = 1,
786
787 /** 2 bits. Signedness determined by base type */
788 BRW_SUB_BYTE_PRECISION_2BIT = 2,
789 };
790
791 enum gfx12_systolic_depth {
792 BRW_SYSTOLIC_DEPTH_16 = 0,
793 BRW_SYSTOLIC_DEPTH_2 = 1,
794 BRW_SYSTOLIC_DEPTH_4 = 2,
795 BRW_SYSTOLIC_DEPTH_8 = 3,
796 };
797
798 #ifdef __cplusplus
799 /**
800 * Allow bitwise arithmetic of tgl_sbid_mode enums.
801 */
802 inline tgl_sbid_mode
803 operator|(tgl_sbid_mode x, tgl_sbid_mode y)
804 {
805 return tgl_sbid_mode(unsigned(x) | unsigned(y));
806 }
807
808 inline tgl_sbid_mode
809 operator&(tgl_sbid_mode x, tgl_sbid_mode y)
810 {
811 return tgl_sbid_mode(unsigned(x) & unsigned(y));
812 }
813
814 inline tgl_sbid_mode &
815 operator|=(tgl_sbid_mode &x, tgl_sbid_mode y)
816 {
817 return x = x | y;
818 }
819
820 #endif
821
822 /**
823 * TGL+ SWSB RegDist synchronization pipeline.
824 *
825 * On TGL all instructions that use the RegDist synchronization mechanism are
826 * considered to be executed as a single in-order pipeline, therefore only the
827 * TGL_PIPE_FLOAT pipeline is applicable. On XeHP+ platforms there are two
828 * additional asynchronous ALU pipelines (which still execute instructions
829 * in-order and use the RegDist synchronization mechanism). TGL_PIPE_NONE
830 * doesn't provide any RegDist pipeline synchronization information and allows
831 * the hardware to infer the pipeline based on the source types of the
832 * instruction. TGL_PIPE_ALL can be used when synchronization with all ALU
833 * pipelines is intended.
834 */
835 enum tgl_pipe {
836 TGL_PIPE_NONE = 0,
837 TGL_PIPE_FLOAT,
838 TGL_PIPE_INT,
839 TGL_PIPE_LONG,
840 TGL_PIPE_MATH,
841 TGL_PIPE_ALL
842 };
843
844 /**
845 * Logical representation of the SWSB scheduling information of a hardware
846 * instruction. The binary representation is slightly more compact.
847 */
848 struct tgl_swsb {
849 unsigned regdist : 3;
850 enum tgl_pipe pipe : 3;
851 unsigned sbid : 5;
852 enum tgl_sbid_mode mode : 3;
853 };
854
855 /**
856 * Construct a scheduling annotation with a single RegDist dependency. This
857 * synchronizes with the completion of the d-th previous in-order instruction.
858 * The index is one-based, zero causes a no-op tgl_swsb to be constructed.
859 */
860 static inline struct tgl_swsb
tgl_swsb_regdist(unsigned d)861 tgl_swsb_regdist(unsigned d)
862 {
863 const struct tgl_swsb swsb = { d, d ? TGL_PIPE_ALL : TGL_PIPE_NONE };
864 assert(swsb.regdist == d);
865 return swsb;
866 }
867
868 /**
869 * Construct a scheduling annotation that synchronizes with the specified SBID
870 * token.
871 */
872 static inline struct tgl_swsb
tgl_swsb_sbid(enum tgl_sbid_mode mode,unsigned sbid)873 tgl_swsb_sbid(enum tgl_sbid_mode mode, unsigned sbid)
874 {
875 const struct tgl_swsb swsb = { 0, TGL_PIPE_NONE, sbid, mode };
876 assert(swsb.sbid == sbid);
877 return swsb;
878 }
879
880 /**
881 * Construct a no-op scheduling annotation.
882 */
883 static inline struct tgl_swsb
tgl_swsb_null(void)884 tgl_swsb_null(void)
885 {
886 return tgl_swsb_regdist(0);
887 }
888
889 /**
890 * Return a scheduling annotation that allocates the same SBID synchronization
891 * token as \p swsb. In addition it will synchronize against a previous
892 * in-order instruction if \p regdist is non-zero.
893 */
894 static inline struct tgl_swsb
tgl_swsb_dst_dep(struct tgl_swsb swsb,unsigned regdist)895 tgl_swsb_dst_dep(struct tgl_swsb swsb, unsigned regdist)
896 {
897 swsb.regdist = regdist;
898 swsb.mode = swsb.mode & TGL_SBID_SET;
899 swsb.pipe = (regdist ? TGL_PIPE_ALL : TGL_PIPE_NONE);
900 return swsb;
901 }
902
903 /**
904 * Return a scheduling annotation that synchronizes against the same SBID and
905 * RegDist dependencies as \p swsb, but doesn't allocate any SBID token.
906 */
907 static inline struct tgl_swsb
tgl_swsb_src_dep(struct tgl_swsb swsb)908 tgl_swsb_src_dep(struct tgl_swsb swsb)
909 {
910 swsb.mode = swsb.mode & (TGL_SBID_SRC | TGL_SBID_DST);
911 return swsb;
912 }
913
914 /**
915 * Convert the provided tgl_swsb to the hardware's binary representation of an
916 * SWSB annotation.
917 */
918 static inline uint32_t
tgl_swsb_encode(const struct intel_device_info * devinfo,struct tgl_swsb swsb,enum opcode opcode)919 tgl_swsb_encode(const struct intel_device_info *devinfo,
920 struct tgl_swsb swsb, enum opcode opcode)
921 {
922 if (!swsb.mode) {
923 const unsigned pipe = devinfo->verx10 < 125 ? 0 :
924 swsb.pipe == TGL_PIPE_FLOAT ? 0x10 :
925 swsb.pipe == TGL_PIPE_INT ? 0x18 :
926 swsb.pipe == TGL_PIPE_LONG ? 0x20 :
927 swsb.pipe == TGL_PIPE_MATH ? 0x28 :
928 swsb.pipe == TGL_PIPE_ALL ? 0x8 : 0;
929 return pipe | swsb.regdist;
930
931 } else if (swsb.regdist) {
932 if (devinfo->ver >= 20) {
933 unsigned mode = 0;
934 if (opcode == BRW_OPCODE_DPAS) {
935 mode = (swsb.mode & TGL_SBID_SET) ? 0b01 :
936 (swsb.mode & TGL_SBID_SRC) ? 0b10 :
937 /* swsb.mode & TGL_SBID_DST */ 0b11;
938 } else if (swsb.mode & TGL_SBID_SET) {
939 assert(opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC);
940 assert(swsb.pipe == TGL_PIPE_ALL ||
941 swsb.pipe == TGL_PIPE_INT ||
942 swsb.pipe == TGL_PIPE_FLOAT);
943
944 mode = swsb.pipe == TGL_PIPE_INT ? 0b11 :
945 swsb.pipe == TGL_PIPE_FLOAT ? 0b10 :
946 /* swsb.pipe == TGL_PIPE_ALL */ 0b01;
947 } else {
948 assert(!(swsb.mode & ~(TGL_SBID_DST | TGL_SBID_SRC)));
949 mode = swsb.pipe == TGL_PIPE_ALL ? 0b11 :
950 swsb.mode == TGL_SBID_SRC ? 0b10 :
951 /* swsb.mode == TGL_SBID_DST */ 0b01;
952 }
953 return mode << 8 | swsb.regdist << 5 | swsb.sbid;
954 } else {
955 assert(!(swsb.sbid & ~0xfu));
956 return 0x80 | swsb.regdist << 4 | swsb.sbid;
957 }
958
959 } else {
960 if (devinfo->ver >= 20) {
961 return swsb.sbid | (swsb.mode & TGL_SBID_SET ? 0xc0 :
962 swsb.mode & TGL_SBID_DST ? 0x80 : 0xa0);
963 } else {
964 assert(!(swsb.sbid & ~0xfu));
965 return swsb.sbid | (swsb.mode & TGL_SBID_SET ? 0x40 :
966 swsb.mode & TGL_SBID_DST ? 0x20 : 0x30);
967 }
968 }
969 }
970
971 /**
972 * Convert the provided binary representation of an SWSB annotation to a
973 * tgl_swsb.
974 */
975 static inline struct tgl_swsb
tgl_swsb_decode(const struct intel_device_info * devinfo,const bool is_unordered,const uint32_t x,enum opcode opcode)976 tgl_swsb_decode(const struct intel_device_info *devinfo,
977 const bool is_unordered, const uint32_t x, enum opcode opcode)
978 {
979 if (devinfo->ver >= 20) {
980 if (x & 0x300) {
981 /* Mode isn't SingleInfo, there's a tuple */
982 if (opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC) {
983 const struct tgl_swsb swsb = {
984 (x & 0xe0u) >> 5,
985 ((x & 0x300) == 0x300 ? TGL_PIPE_INT :
986 (x & 0x300) == 0x200 ? TGL_PIPE_FLOAT :
987 TGL_PIPE_ALL),
988 x & 0x1fu,
989 TGL_SBID_SET
990 };
991 return swsb;
992 } else if (opcode == BRW_OPCODE_DPAS) {
993 const struct tgl_swsb swsb = {
994 .regdist = (x & 0xe0u) >> 5,
995 .pipe = TGL_PIPE_NONE,
996 .sbid = x & 0x1fu,
997 .mode = (x & 0x300) == 0x300 ? TGL_SBID_DST :
998 (x & 0x300) == 0x200 ? TGL_SBID_SRC :
999 TGL_SBID_SET,
1000 };
1001 return swsb;
1002 } else {
1003 const struct tgl_swsb swsb = {
1004 (x & 0xe0u) >> 5,
1005 ((x & 0x300) == 0x300 ? TGL_PIPE_ALL : TGL_PIPE_NONE),
1006 x & 0x1fu,
1007 ((x & 0x300) == 0x200 ? TGL_SBID_SRC : TGL_SBID_DST)
1008 };
1009 return swsb;
1010 }
1011
1012 } else if ((x & 0xe0) == 0x80) {
1013 return tgl_swsb_sbid(TGL_SBID_DST, x & 0x1f);
1014 } else if ((x & 0xe0) == 0xa0) {
1015 return tgl_swsb_sbid(TGL_SBID_SRC, x & 0x1fu);
1016 } else if ((x & 0xe0) == 0xc0) {
1017 return tgl_swsb_sbid(TGL_SBID_SET, x & 0x1fu);
1018 } else {
1019 const struct tgl_swsb swsb = { x & 0x7u,
1020 ((x & 0x38) == 0x10 ? TGL_PIPE_FLOAT :
1021 (x & 0x38) == 0x18 ? TGL_PIPE_INT :
1022 (x & 0x38) == 0x20 ? TGL_PIPE_LONG :
1023 (x & 0x38) == 0x28 ? TGL_PIPE_MATH :
1024 (x & 0x38) == 0x8 ? TGL_PIPE_ALL :
1025 TGL_PIPE_NONE) };
1026 return swsb;
1027 }
1028
1029 } else {
1030 if (x & 0x80) {
1031 const struct tgl_swsb swsb = { (x & 0x70u) >> 4, TGL_PIPE_NONE,
1032 x & 0xfu,
1033 is_unordered ?
1034 TGL_SBID_SET : TGL_SBID_DST };
1035 return swsb;
1036 } else if ((x & 0x70) == 0x20) {
1037 return tgl_swsb_sbid(TGL_SBID_DST, x & 0xfu);
1038 } else if ((x & 0x70) == 0x30) {
1039 return tgl_swsb_sbid(TGL_SBID_SRC, x & 0xfu);
1040 } else if ((x & 0x70) == 0x40) {
1041 return tgl_swsb_sbid(TGL_SBID_SET, x & 0xfu);
1042 } else {
1043 const struct tgl_swsb swsb = { x & 0x7u,
1044 ((x & 0x78) == 0x10 ? TGL_PIPE_FLOAT :
1045 (x & 0x78) == 0x18 ? TGL_PIPE_INT :
1046 (x & 0x78) == 0x50 ? TGL_PIPE_LONG :
1047 (x & 0x78) == 0x8 ? TGL_PIPE_ALL :
1048 TGL_PIPE_NONE) };
1049 assert(devinfo->verx10 >= 125 || swsb.pipe == TGL_PIPE_NONE);
1050 return swsb;
1051 }
1052 }
1053 }
1054
1055 enum tgl_sync_function {
1056 TGL_SYNC_NOP = 0x0,
1057 TGL_SYNC_ALLRD = 0x2,
1058 TGL_SYNC_ALLWR = 0x3,
1059 TGL_SYNC_FENCE = 0xd,
1060 TGL_SYNC_BAR = 0xe,
1061 TGL_SYNC_HOST = 0xf
1062 };
1063
1064 /**
1065 * Message target: Shared Function ID for where to SEND a message.
1066 *
1067 * These are enumerated in the ISA reference under "send - Send Message".
1068 * In particular, see the following tables:
1069 * - G45 PRM, Volume 4, Table 14-15 "Message Descriptor Definition"
1070 * - Sandybridge PRM, Volume 4 Part 2, Table 8-16 "Extended Message Descriptor"
1071 * - Ivybridge PRM, Volume 1 Part 1, section 3.2.7 "GPE Function IDs"
1072 */
1073 enum brw_message_target {
1074 BRW_SFID_NULL = 0,
1075 BRW_SFID_SAMPLER = 2,
1076 BRW_SFID_MESSAGE_GATEWAY = 3,
1077 BRW_SFID_URB = 6,
1078 BRW_SFID_THREAD_SPAWNER = 7,
1079 BRW_SFID_VME = 8,
1080
1081 GFX6_SFID_DATAPORT_SAMPLER_CACHE = 4,
1082 GFX6_SFID_DATAPORT_RENDER_CACHE = 5,
1083 GFX6_SFID_DATAPORT_CONSTANT_CACHE = 9,
1084
1085 GFX7_SFID_DATAPORT_DATA_CACHE = 10,
1086 GFX7_SFID_PIXEL_INTERPOLATOR = 11,
1087 HSW_SFID_DATAPORT_DATA_CACHE_1 = 12,
1088 HSW_SFID_CRE = 13,
1089
1090 GFX12_SFID_TGM = 13, /* Typed Global Memory */
1091 GFX12_SFID_SLM = 14, /* Shared Local Memory */
1092 GFX12_SFID_UGM = 15, /* Untyped Global Memory */
1093
1094 GEN_RT_SFID_BINDLESS_THREAD_DISPATCH = 7,
1095 GEN_RT_SFID_RAY_TRACE_ACCELERATOR = 8,
1096 };
1097
1098 #define GFX7_MESSAGE_TARGET_DP_DATA_CACHE 10
1099
1100 #define BRW_SAMPLER_RETURN_FORMAT_FLOAT32 0
1101 #define BRW_SAMPLER_RETURN_FORMAT_UINT32 2
1102 #define BRW_SAMPLER_RETURN_FORMAT_SINT32 3
1103
1104 #define GFX8_SAMPLER_RETURN_FORMAT_32BITS 0
1105 #define GFX8_SAMPLER_RETURN_FORMAT_16BITS 1
1106
1107 #define GFX5_SAMPLER_MESSAGE_SAMPLE 0
1108 #define GFX5_SAMPLER_MESSAGE_SAMPLE_BIAS 1
1109 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LOD 2
1110 #define GFX5_SAMPLER_MESSAGE_SAMPLE_COMPARE 3
1111 #define GFX5_SAMPLER_MESSAGE_SAMPLE_DERIVS 4
1112 #define GFX5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE 5
1113 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE 6
1114 #define GFX5_SAMPLER_MESSAGE_SAMPLE_LD 7
1115 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4 8
1116 #define GFX5_SAMPLER_MESSAGE_LOD 9
1117 #define GFX5_SAMPLER_MESSAGE_SAMPLE_RESINFO 10
1118 #define GFX6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO 11
1119 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_L 13
1120 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_B 14
1121 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_I 15
1122 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C 16
1123 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO 17
1124 #define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C 18
1125 #define XE2_SAMPLER_MESSAGE_SAMPLE_MLOD 18
1126 #define XE2_SAMPLER_MESSAGE_SAMPLE_COMPARE_MLOD 19
1127 #define HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE 20
1128 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_I_C 21
1129 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_L_C 23
1130 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LZ 24
1131 #define GFX9_SAMPLER_MESSAGE_SAMPLE_C_LZ 25
1132 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LD_LZ 26
1133 #define GFX9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W 28
1134 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD_MCS 29
1135 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD2DMS 30
1136 #define GFX7_SAMPLER_MESSAGE_SAMPLE_LD2DSS 31
1137 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_L 45
1138 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_B 46
1139 #define XE2_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_L_C 55
1140
1141 /* for GFX5 only */
1142 #define BRW_SAMPLER_SIMD_MODE_SIMD4X2 0
1143 #define BRW_SAMPLER_SIMD_MODE_SIMD8 1
1144 #define BRW_SAMPLER_SIMD_MODE_SIMD16 2
1145 #define BRW_SAMPLER_SIMD_MODE_SIMD32_64 3
1146
1147 #define GFX10_SAMPLER_SIMD_MODE_SIMD8H 5
1148 #define GFX10_SAMPLER_SIMD_MODE_SIMD16H 6
1149
1150 #define XE2_SAMPLER_SIMD_MODE_SIMD16 1
1151 #define XE2_SAMPLER_SIMD_MODE_SIMD32 2
1152 #define XE2_SAMPLER_SIMD_MODE_SIMD16H 5
1153 #define XE2_SAMPLER_SIMD_MODE_SIMD32H 6
1154
1155 /* GFX9 changes SIMD mode 0 to mean SIMD8D, but lets us get the SIMD4x2
1156 * behavior by setting bit 22 of dword 2 in the message header. */
1157 #define GFX9_SAMPLER_SIMD_MODE_SIMD8D 0
1158 #define GFX9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2 (1 << 22)
1159
1160 #define BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW 0
1161 #define BRW_DATAPORT_OWORD_BLOCK_1_OWORDHIGH 1
1162 #define BRW_DATAPORT_OWORD_BLOCK_2_OWORDS 2
1163 #define BRW_DATAPORT_OWORD_BLOCK_4_OWORDS 3
1164 #define BRW_DATAPORT_OWORD_BLOCK_8_OWORDS 4
1165 #define GFX12_DATAPORT_OWORD_BLOCK_16_OWORDS 5
1166 #define BRW_DATAPORT_OWORD_BLOCK_OWORDS(n) \
1167 ((n) == 1 ? BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW : \
1168 (n) == 2 ? BRW_DATAPORT_OWORD_BLOCK_2_OWORDS : \
1169 (n) == 4 ? BRW_DATAPORT_OWORD_BLOCK_4_OWORDS : \
1170 (n) == 8 ? BRW_DATAPORT_OWORD_BLOCK_8_OWORDS : \
1171 (n) == 16 ? GFX12_DATAPORT_OWORD_BLOCK_16_OWORDS : \
1172 (abort(), ~0))
1173 #define BRW_DATAPORT_OWORD_BLOCK_DWORDS(n) \
1174 ((n) == 4 ? BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW : \
1175 (n) == 8 ? BRW_DATAPORT_OWORD_BLOCK_2_OWORDS : \
1176 (n) == 16 ? BRW_DATAPORT_OWORD_BLOCK_4_OWORDS : \
1177 (n) == 32 ? BRW_DATAPORT_OWORD_BLOCK_8_OWORDS : \
1178 (abort(), ~0))
1179
1180 #define BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD 0
1181 #define BRW_DATAPORT_OWORD_DUAL_BLOCK_4OWORDS 2
1182
1183 #define BRW_DATAPORT_DWORD_SCATTERED_BLOCK_8DWORDS 2
1184 #define BRW_DATAPORT_DWORD_SCATTERED_BLOCK_16DWORDS 3
1185
1186 /* This one stays the same across generations. */
1187 #define BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ 0
1188 /* GFX6 */
1189 #define GFX6_DATAPORT_READ_MESSAGE_RENDER_UNORM_READ 1
1190 #define GFX6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 2
1191 #define GFX6_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ 4
1192 #define GFX6_DATAPORT_READ_MESSAGE_OWORD_UNALIGN_BLOCK_READ 5
1193 #define GFX6_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 6
1194
1195 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE 0
1196 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED 1
1197 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01 2
1198 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23 3
1199 #define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01 4
1200
1201 #define XE2_DATAPORT_RENDER_TARGET_WRITE_SIMD32_SINGLE_SOURCE 1
1202 #define XE2_DATAPORT_RENDER_TARGET_WRITE_SIMD16_DUAL_SOURCE 2
1203
1204 /* GFX6 */
1205 #define GFX6_DATAPORT_WRITE_MESSAGE_DWORD_ATOMIC_WRITE 7
1206 #define GFX6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE 8
1207 #define GFX6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE 9
1208 #define GFX6_DATAPORT_WRITE_MESSAGE_MEDIA_BLOCK_WRITE 10
1209 #define GFX6_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE 11
1210 #define GFX6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE 12
1211 #define GFX6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE 13
1212 #define GFX6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_UNORM_WRITE 14
1213
1214 /* GFX7 */
1215 #define GFX7_DATAPORT_RC_MEDIA_BLOCK_READ 4
1216 #define GFX7_DATAPORT_RC_TYPED_SURFACE_READ 5
1217 #define GFX7_DATAPORT_RC_TYPED_ATOMIC_OP 6
1218 #define GFX7_DATAPORT_RC_MEMORY_FENCE 7
1219 #define GFX7_DATAPORT_RC_MEDIA_BLOCK_WRITE 10
1220 #define GFX7_DATAPORT_RC_RENDER_TARGET_WRITE 12
1221 #define GFX7_DATAPORT_RC_TYPED_SURFACE_WRITE 13
1222 #define GFX7_DATAPORT_DC_OWORD_BLOCK_READ 0
1223 #define GFX7_DATAPORT_DC_UNALIGNED_OWORD_BLOCK_READ 1
1224 #define GFX7_DATAPORT_DC_OWORD_DUAL_BLOCK_READ 2
1225 #define GFX7_DATAPORT_DC_DWORD_SCATTERED_READ 3
1226 #define GFX7_DATAPORT_DC_BYTE_SCATTERED_READ 4
1227 #define GFX7_DATAPORT_DC_UNTYPED_SURFACE_READ 5
1228 #define GFX7_DATAPORT_DC_UNTYPED_ATOMIC_OP 6
1229 #define GFX7_DATAPORT_DC_MEMORY_FENCE 7
1230 #define GFX7_DATAPORT_DC_OWORD_BLOCK_WRITE 8
1231 #define GFX7_DATAPORT_DC_OWORD_DUAL_BLOCK_WRITE 10
1232 #define GFX7_DATAPORT_DC_DWORD_SCATTERED_WRITE 11
1233 #define GFX7_DATAPORT_DC_BYTE_SCATTERED_WRITE 12
1234 #define GFX7_DATAPORT_DC_UNTYPED_SURFACE_WRITE 13
1235
1236 #define GFX7_DATAPORT_SCRATCH_READ ((1 << 18) | \
1237 (0 << 17))
1238 #define GFX7_DATAPORT_SCRATCH_WRITE ((1 << 18) | \
1239 (1 << 17))
1240 #define GFX7_DATAPORT_SCRATCH_NUM_REGS_SHIFT 12
1241
1242 #define GFX7_PIXEL_INTERPOLATOR_LOC_SHARED_OFFSET 0
1243 #define GFX7_PIXEL_INTERPOLATOR_LOC_SAMPLE 1
1244 #define GFX7_PIXEL_INTERPOLATOR_LOC_CENTROID 2
1245 #define GFX7_PIXEL_INTERPOLATOR_LOC_PER_SLOT_OFFSET 3
1246
1247 /* HSW */
1248 #define HSW_DATAPORT_DC_PORT0_OWORD_BLOCK_READ 0
1249 #define HSW_DATAPORT_DC_PORT0_UNALIGNED_OWORD_BLOCK_READ 1
1250 #define HSW_DATAPORT_DC_PORT0_OWORD_DUAL_BLOCK_READ 2
1251 #define HSW_DATAPORT_DC_PORT0_DWORD_SCATTERED_READ 3
1252 #define HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ 4
1253 #define HSW_DATAPORT_DC_PORT0_MEMORY_FENCE 7
1254 #define HSW_DATAPORT_DC_PORT0_OWORD_BLOCK_WRITE 8
1255 #define HSW_DATAPORT_DC_PORT0_OWORD_DUAL_BLOCK_WRITE 10
1256 #define HSW_DATAPORT_DC_PORT0_DWORD_SCATTERED_WRITE 11
1257 #define HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE 12
1258
1259 #define HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ 1
1260 #define HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP 2
1261 #define HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2 3
1262 #define HSW_DATAPORT_DC_PORT1_MEDIA_BLOCK_READ 4
1263 #define HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ 5
1264 #define HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP 6
1265 #define HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2 7
1266 #define HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE 9
1267 #define HSW_DATAPORT_DC_PORT1_MEDIA_BLOCK_WRITE 10
1268 #define HSW_DATAPORT_DC_PORT1_ATOMIC_COUNTER_OP 11
1269 #define HSW_DATAPORT_DC_PORT1_ATOMIC_COUNTER_OP_SIMD4X2 12
1270 #define HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE 13
1271 #define GFX9_DATAPORT_DC_PORT1_A64_SCATTERED_READ 0x10
1272 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_READ 0x11
1273 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_OP 0x12
1274 #define GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_INT_OP 0x13
1275 #define GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_READ 0x14
1276 #define GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_WRITE 0x15
1277 #define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_WRITE 0x19
1278 #define GFX8_DATAPORT_DC_PORT1_A64_SCATTERED_WRITE 0x1a
1279 #define GFX9_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_FLOAT_OP 0x1b
1280 #define GFX9_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_FLOAT_OP 0x1d
1281 #define GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_FLOAT_OP 0x1e
1282
1283 /* GFX9 */
1284 #define GFX9_DATAPORT_RC_RENDER_TARGET_WRITE 12
1285 #define GFX9_DATAPORT_RC_RENDER_TARGET_READ 13
1286
1287 /* A64 scattered message subtype */
1288 #define GFX8_A64_SCATTERED_SUBTYPE_BYTE 0
1289 #define GFX8_A64_SCATTERED_SUBTYPE_DWORD 1
1290 #define GFX8_A64_SCATTERED_SUBTYPE_QWORD 2
1291 #define GFX8_A64_SCATTERED_SUBTYPE_HWORD 3
1292
1293 /* Dataport special binding table indices: */
1294 #define BRW_BTI_STATELESS 255
1295 #define GFX7_BTI_SLM 254
1296
1297 /* The hardware docs are a bit contradictory here. On Haswell, where they
1298 * first added cache ability control, there were 5 different cache modes (see
1299 * HSW_BTI_STATELESS_* above). On Broadwell, they reduced to two:
1300 *
1301 * - IA-Coherent (BTI=255): Coherent within Gen and coherent within the
1302 * entire IA cache memory hierarchy.
1303 *
1304 * - Non-Coherent (BTI=253): Coherent within Gen, same cache type.
1305 *
1306 * Information about stateless cache coherency can be found in the "A32
1307 * Stateless" section of the "3D Media GPGPU" volume of the PRM for each
1308 * hardware generation.
1309 *
1310 * Unfortunately, the docs for MDC_STATELESS appear to have been copied and
1311 * pasted from Haswell and give the Haswell definitions for the BTI values of
1312 * 255 and 253 including a warning about accessing 253 surfaces from multiple
1313 * threads. This seems to be a copy+paste error and the definitions from the
1314 * "A32 Stateless" section should be trusted instead.
1315 *
1316 * Note that because the DRM sets bit 4 of HDC_CHICKEN0 on BDW, CHV and at
1317 * least some pre-production steppings of SKL due to WaForceEnableNonCoherent,
1318 * HDC memory access may have been overridden by the kernel to be non-coherent
1319 * (matching the behavior of the same BTI on pre-Gfx8 hardware) and BTI 255
1320 * may actually be an alias for BTI 253.
1321 */
1322 #define GFX8_BTI_STATELESS_IA_COHERENT 255
1323 #define GFX8_BTI_STATELESS_NON_COHERENT 253
1324 #define GFX9_BTI_BINDLESS 252
1325
1326 /* Dataport atomic operations for Untyped Atomic Integer Operation message
1327 * (and others).
1328 */
1329 #define BRW_AOP_AND 1
1330 #define BRW_AOP_OR 2
1331 #define BRW_AOP_XOR 3
1332 #define BRW_AOP_MOV 4
1333 #define BRW_AOP_INC 5
1334 #define BRW_AOP_DEC 6
1335 #define BRW_AOP_ADD 7
1336 #define BRW_AOP_SUB 8
1337 #define BRW_AOP_REVSUB 9
1338 #define BRW_AOP_IMAX 10
1339 #define BRW_AOP_IMIN 11
1340 #define BRW_AOP_UMAX 12
1341 #define BRW_AOP_UMIN 13
1342 #define BRW_AOP_CMPWR 14
1343 #define BRW_AOP_PREDEC 15
1344
1345 /* Dataport atomic operations for Untyped Atomic Float Operation message. */
1346 #define BRW_AOP_FMAX 1
1347 #define BRW_AOP_FMIN 2
1348 #define BRW_AOP_FCMPWR 3
1349 #define BRW_AOP_FADD 4
1350
1351 #define BRW_MATH_FUNCTION_INV 1
1352 #define BRW_MATH_FUNCTION_LOG 2
1353 #define BRW_MATH_FUNCTION_EXP 3
1354 #define BRW_MATH_FUNCTION_SQRT 4
1355 #define BRW_MATH_FUNCTION_RSQ 5
1356 #define BRW_MATH_FUNCTION_SIN 6
1357 #define BRW_MATH_FUNCTION_COS 7
1358 #define BRW_MATH_FUNCTION_FDIV 9 /* gfx6+ */
1359 #define BRW_MATH_FUNCTION_POW 10
1360 #define BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER 11
1361 #define BRW_MATH_FUNCTION_INT_DIV_QUOTIENT 12
1362 #define BRW_MATH_FUNCTION_INT_DIV_REMAINDER 13
1363 #define GFX8_MATH_FUNCTION_INVM 14
1364 #define GFX8_MATH_FUNCTION_RSQRTM 15
1365
1366 #define GFX7_URB_OPCODE_ATOMIC_MOV 4
1367 #define GFX7_URB_OPCODE_ATOMIC_INC 5
1368 #define GFX8_URB_OPCODE_ATOMIC_ADD 6
1369 #define GFX8_URB_OPCODE_SIMD8_WRITE 7
1370 #define GFX8_URB_OPCODE_SIMD8_READ 8
1371 #define GFX125_URB_OPCODE_FENCE 9
1372
1373 #define BRW_URB_SWIZZLE_NONE 0
1374 #define BRW_URB_SWIZZLE_INTERLEAVE 1
1375 #define BRW_URB_SWIZZLE_TRANSPOSE 2
1376
1377 #define BRW_MESSAGE_GATEWAY_SFID_OPEN_GATEWAY 0
1378 #define BRW_MESSAGE_GATEWAY_SFID_CLOSE_GATEWAY 1
1379 #define BRW_MESSAGE_GATEWAY_SFID_FORWARD_MSG 2
1380 #define BRW_MESSAGE_GATEWAY_SFID_GET_TIMESTAMP 3
1381 #define BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG 4
1382 #define BRW_MESSAGE_GATEWAY_SFID_UPDATE_GATEWAY_STATE 5
1383 #define BRW_MESSAGE_GATEWAY_SFID_MMIO_READ_WRITE 6
1384
1385
1386 /* Gfx7 "GS URB Entry Allocation Size" is a U9-1 field, so the maximum gs_size
1387 * is 2^9, or 512. It's counted in multiples of 64 bytes.
1388 *
1389 * Identical for VS, DS, and HS.
1390 */
1391 #define GFX7_MAX_GS_URB_ENTRY_SIZE_BYTES (512*64)
1392 #define GFX7_MAX_DS_URB_ENTRY_SIZE_BYTES (512*64)
1393 #define GFX7_MAX_HS_URB_ENTRY_SIZE_BYTES (512*64)
1394 #define GFX7_MAX_VS_URB_ENTRY_SIZE_BYTES (512*64)
1395
1396 /* GS Thread Payload
1397 */
1398
1399 /* 3DSTATE_GS "Output Vertex Size" has an effective maximum of 62. It's
1400 * counted in multiples of 16 bytes.
1401 */
1402 #define GFX7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES (62*16)
1403
1404
1405 /* CR0.0[5:4] Floating-Point Rounding Modes
1406 * Skylake PRM, Volume 7 Part 1, "Control Register", page 756
1407 */
1408
1409 #define BRW_CR0_RND_MODE_MASK 0x30
1410 #define BRW_CR0_RND_MODE_SHIFT 4
1411
1412 enum ENUM_PACKED brw_rnd_mode {
1413 BRW_RND_MODE_RTNE = 0, /* Round to Nearest or Even */
1414 BRW_RND_MODE_RU = 1, /* Round Up, toward +inf */
1415 BRW_RND_MODE_RD = 2, /* Round Down, toward -inf */
1416 BRW_RND_MODE_RTZ = 3, /* Round Toward Zero */
1417 BRW_RND_MODE_UNSPECIFIED, /* Unspecified rounding mode */
1418 };
1419
1420 #define BRW_CR0_FP64_DENORM_PRESERVE (1 << 6)
1421 #define BRW_CR0_FP32_DENORM_PRESERVE (1 << 7)
1422 #define BRW_CR0_FP16_DENORM_PRESERVE (1 << 10)
1423
1424 #define BRW_CR0_FP_MODE_MASK (BRW_CR0_FP64_DENORM_PRESERVE | \
1425 BRW_CR0_FP32_DENORM_PRESERVE | \
1426 BRW_CR0_FP16_DENORM_PRESERVE | \
1427 BRW_CR0_RND_MODE_MASK)
1428
1429 /* MDC_DS - Data Size Message Descriptor Control Field
1430 * Skylake PRM, Volume 2d, page 129
1431 *
1432 * Specifies the number of Bytes to be read or written per Dword used at
1433 * byte_scattered read/write and byte_scaled read/write messages.
1434 */
1435 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_BYTE 0
1436 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_WORD 1
1437 #define GFX7_BYTE_SCATTERED_DATA_ELEMENT_DWORD 2
1438
1439 #define GEN_RT_BTD_MESSAGE_SPAWN 1
1440
1441 #define GEN_RT_TRACE_RAY_INITAL 0
1442 #define GEN_RT_TRACE_RAY_INSTANCE 1
1443 #define GEN_RT_TRACE_RAY_COMMIT 2
1444 #define GEN_RT_TRACE_RAY_CONTINUE 3
1445
1446 #define GEN_RT_BTD_SHADER_TYPE_ANY_HIT 0
1447 #define GEN_RT_BTD_SHADER_TYPE_CLOSEST_HIT 1
1448 #define GEN_RT_BTD_SHADER_TYPE_MISS 2
1449 #define GEN_RT_BTD_SHADER_TYPE_INTERSECTION 3
1450
1451 /* Starting with Xe-HPG, the old dataport was massively reworked dataport.
1452 * The new thing, called Load/Store Cache or LSC, has a significantly improved
1453 * interface. Instead of bespoke messages for every case, there's basically
1454 * one or two messages with different bits to control things like address
1455 * size, how much data is read/written, etc. It's way nicer but also means we
1456 * get to rewrite all our dataport encoding/decoding code. This patch kicks
1457 * off the party with all of the new enums.
1458 */
1459 enum lsc_opcode {
1460 LSC_OP_LOAD = 0,
1461 LSC_OP_LOAD_CMASK = 2,
1462 LSC_OP_STORE = 4,
1463 LSC_OP_STORE_CMASK = 6,
1464 LSC_OP_ATOMIC_INC = 8,
1465 LSC_OP_ATOMIC_DEC = 9,
1466 LSC_OP_ATOMIC_LOAD = 10,
1467 LSC_OP_ATOMIC_STORE = 11,
1468 LSC_OP_ATOMIC_ADD = 12,
1469 LSC_OP_ATOMIC_SUB = 13,
1470 LSC_OP_ATOMIC_MIN = 14,
1471 LSC_OP_ATOMIC_MAX = 15,
1472 LSC_OP_ATOMIC_UMIN = 16,
1473 LSC_OP_ATOMIC_UMAX = 17,
1474 LSC_OP_ATOMIC_CMPXCHG = 18,
1475 LSC_OP_ATOMIC_FADD = 19,
1476 LSC_OP_ATOMIC_FSUB = 20,
1477 LSC_OP_ATOMIC_FMIN = 21,
1478 LSC_OP_ATOMIC_FMAX = 22,
1479 LSC_OP_ATOMIC_FCMPXCHG = 23,
1480 LSC_OP_ATOMIC_AND = 24,
1481 LSC_OP_ATOMIC_OR = 25,
1482 LSC_OP_ATOMIC_XOR = 26,
1483 LSC_OP_FENCE = 31
1484 };
1485
1486 /*
1487 * Specifies the size of the dataport address payload in registers.
1488 */
1489 enum ENUM_PACKED lsc_addr_reg_size {
1490 LSC_ADDR_REG_SIZE_1 = 1,
1491 LSC_ADDR_REG_SIZE_2 = 2,
1492 LSC_ADDR_REG_SIZE_3 = 3,
1493 LSC_ADDR_REG_SIZE_4 = 4,
1494 LSC_ADDR_REG_SIZE_6 = 6,
1495 LSC_ADDR_REG_SIZE_8 = 8,
1496 };
1497
1498 /*
1499 * Specifies the size of the address payload item in a dataport message.
1500 */
1501 enum ENUM_PACKED lsc_addr_size {
1502 LSC_ADDR_SIZE_A16 = 1, /* 16-bit address offset */
1503 LSC_ADDR_SIZE_A32 = 2, /* 32-bit address offset */
1504 LSC_ADDR_SIZE_A64 = 3, /* 64-bit address offset */
1505 };
1506
1507 /*
1508 * Specifies the type of the address payload item in a dataport message. The
1509 * address type specifies how the dataport message decodes the Extended
1510 * Descriptor for the surface attributes and address calculation.
1511 */
1512 enum ENUM_PACKED lsc_addr_surface_type {
1513 LSC_ADDR_SURFTYPE_FLAT = 0, /* Flat */
1514 LSC_ADDR_SURFTYPE_BSS = 1, /* Bindless surface state */
1515 LSC_ADDR_SURFTYPE_SS = 2, /* Surface state */
1516 LSC_ADDR_SURFTYPE_BTI = 3, /* Binding table index */
1517 };
1518
1519 /*
1520 * Specifies the dataport message override to the default L1 and L3 memory
1521 * cache policies. Dataport L1 cache policies are uncached (UC), cached (C),
1522 * cache streaming (S) and invalidate-after-read (IAR). Dataport L3 cache
1523 * policies are uncached (UC) and cached (C).
1524 */
1525 enum lsc_cache_load {
1526 /* No override. Use the non-pipelined state or surface state cache settings
1527 * for L1 and L3.
1528 */
1529 LSC_CACHE_LOAD_L1STATE_L3MOCS = 0,
1530 /* Override to L1 uncached and L3 uncached */
1531 LSC_CACHE_LOAD_L1UC_L3UC = 1,
1532 /* Override to L1 uncached and L3 cached */
1533 LSC_CACHE_LOAD_L1UC_L3C = 2,
1534 /* Override to L1 cached and L3 uncached */
1535 LSC_CACHE_LOAD_L1C_L3UC = 3,
1536 /* Override to cache at both L1 and L3 */
1537 LSC_CACHE_LOAD_L1C_L3C = 4,
1538 /* Override to L1 streaming load and L3 uncached */
1539 LSC_CACHE_LOAD_L1S_L3UC = 5,
1540 /* Override to L1 streaming load and L3 cached */
1541 LSC_CACHE_LOAD_L1S_L3C = 6,
1542 /* For load messages, override to L1 invalidate-after-read, and L3 cached. */
1543 LSC_CACHE_LOAD_L1IAR_L3C = 7,
1544 };
1545
1546 /*
1547 * Specifies the dataport message override to the default L1 and L3 memory
1548 * cache policies. Dataport L1 cache policies are uncached (UC), cached (C),
1549 * streaming (S) and invalidate-after-read (IAR). Dataport L3 cache policies
1550 * are uncached (UC), cached (C), cached-as-a-constand (CC) and
1551 * invalidate-after-read (IAR).
1552 */
1553 enum PACKED xe2_lsc_cache_load {
1554 /* No override. Use the non-pipelined or surface state cache settings for L1
1555 * and L3.
1556 */
1557 XE2_LSC_CACHE_LOAD_L1STATE_L3MOCS = 0,
1558 /* Override to L1 uncached and L3 uncached */
1559 XE2_LSC_CACHE_LOAD_L1UC_L3UC = 2,
1560 /* Override to L1 uncached and L3 cached */
1561 XE2_LSC_CACHE_LOAD_L1UC_L3C = 4,
1562 /* Override to L1 uncached and L3 cached as a constant */
1563 XE2_LSC_CACHE_LOAD_L1UC_L3CC = 5,
1564 /* Override to L1 cached and L3 uncached */
1565 XE2_LSC_CACHE_LOAD_L1C_L3UC = 6,
1566 /* Override to L1 cached and L3 cached */
1567 XE2_LSC_CACHE_LOAD_L1C_L3C = 8,
1568 /* Override to L1 cached and L3 cached as a constant */
1569 XE2_LSC_CACHE_LOAD_L1C_L3CC = 9,
1570 /* Override to L1 cached as streaming load and L3 uncached */
1571 XE2_LSC_CACHE_LOAD_L1S_L3UC = 10,
1572 /* Override to L1 cached as streaming load and L3 cached */
1573 XE2_LSC_CACHE_LOAD_L1S_L3C = 12,
1574 /* Override to L1 and L3 invalidate after read */
1575 XE2_LSC_CACHE_LOAD_L1IAR_L3IAR = 14,
1576
1577 };
1578
1579 /*
1580 * Specifies the dataport message override to the default L1 and L3 memory
1581 * cache policies. Dataport L1 cache policies are uncached (UC), write-through
1582 * (WT), write-back (WB) and streaming (S). Dataport L3 cache policies are
1583 * uncached (UC) and cached (WB).
1584 */
1585 enum ENUM_PACKED lsc_cache_store {
1586 /* No override. Use the non-pipelined or surface state cache settings for L1
1587 * and L3.
1588 */
1589 LSC_CACHE_STORE_L1STATE_L3MOCS = 0,
1590 /* Override to L1 uncached and L3 uncached */
1591 LSC_CACHE_STORE_L1UC_L3UC = 1,
1592 /* Override to L1 uncached and L3 cached */
1593 LSC_CACHE_STORE_L1UC_L3WB = 2,
1594 /* Override to L1 write-through and L3 uncached */
1595 LSC_CACHE_STORE_L1WT_L3UC = 3,
1596 /* Override to L1 write-through and L3 cached */
1597 LSC_CACHE_STORE_L1WT_L3WB = 4,
1598 /* Override to L1 streaming and L3 uncached */
1599 LSC_CACHE_STORE_L1S_L3UC = 5,
1600 /* Override to L1 streaming and L3 cached */
1601 LSC_CACHE_STORE_L1S_L3WB = 6,
1602 /* Override to L1 write-back, and L3 cached */
1603 LSC_CACHE_STORE_L1WB_L3WB = 7,
1604
1605 };
1606
1607 /*
1608 * Specifies the dataport message override to the default L1 and L3 memory
1609 * cache policies. Dataport L1 cache policies are uncached (UC), write-through
1610 * (WT), write-back (WB) and streaming (S). Dataport L3 cache policies are
1611 * uncached (UC) and cached (WB).
1612 */
1613 enum PACKED xe2_lsc_cache_store {
1614 /* No override. Use the non-pipelined or surface state cache settings for L1
1615 * and L3.
1616 */
1617 XE2_LSC_CACHE_STORE_L1STATE_L3MOCS = 0,
1618 /* Override to L1 uncached and L3 uncached */
1619 XE2_LSC_CACHE_STORE_L1UC_L3UC = 2,
1620 /* Override to L1 uncached and L3 cached */
1621 XE2_LSC_CACHE_STORE_L1UC_L3WB = 4,
1622 /* Override to L1 write-through and L3 uncached */
1623 XE2_LSC_CACHE_STORE_L1WT_L3UC = 6,
1624 /* Override to L1 write-through and L3 cached */
1625 XE2_LSC_CACHE_STORE_L1WT_L3WB = 8,
1626 /* Override to L1 streaming and L3 uncached */
1627 XE2_LSC_CACHE_STORE_L1S_L3UC = 10,
1628 /* Override to L1 streaming and L3 cached */
1629 XE2_LSC_CACHE_STORE_L1S_L3WB = 12,
1630 /* Override to L1 write-back and L3 cached */
1631 XE2_LSC_CACHE_STORE_L1WB_L3WB = 14,
1632
1633 };
1634
1635 #define LSC_CACHE(devinfo, l_or_s, cc) \
1636 ((devinfo)->ver < 20 ? (unsigned)LSC_CACHE_ ## l_or_s ## _ ## cc : \
1637 (unsigned)XE2_LSC_CACHE_ ## l_or_s ## _ ## cc)
1638
1639 /*
1640 * Specifies which components of the data payload 4-element vector (X,Y,Z,W) is
1641 * packed into the register payload.
1642 */
1643 enum ENUM_PACKED lsc_cmask {
1644 LSC_CMASK_X = 0x1,
1645 LSC_CMASK_Y = 0x2,
1646 LSC_CMASK_XY = 0x3,
1647 LSC_CMASK_Z = 0x4,
1648 LSC_CMASK_XZ = 0x5,
1649 LSC_CMASK_YZ = 0x6,
1650 LSC_CMASK_XYZ = 0x7,
1651 LSC_CMASK_W = 0x8,
1652 LSC_CMASK_XW = 0x9,
1653 LSC_CMASK_YW = 0xa,
1654 LSC_CMASK_XYW = 0xb,
1655 LSC_CMASK_ZW = 0xc,
1656 LSC_CMASK_XZW = 0xd,
1657 LSC_CMASK_YZW = 0xe,
1658 LSC_CMASK_XYZW = 0xf,
1659 };
1660
1661 /*
1662 * Specifies the size of the data payload item in a dataport message.
1663 */
1664 enum ENUM_PACKED lsc_data_size {
1665 /* 8-bit scalar data value in memory, packed into a 8-bit data value in
1666 * register.
1667 */
1668 LSC_DATA_SIZE_D8 = 0,
1669 /* 16-bit scalar data value in memory, packed into a 16-bit data value in
1670 * register.
1671 */
1672 LSC_DATA_SIZE_D16 = 1,
1673 /* 32-bit scalar data value in memory, packed into 32-bit data value in
1674 * register.
1675 */
1676 LSC_DATA_SIZE_D32 = 2,
1677 /* 64-bit scalar data value in memory, packed into 64-bit data value in
1678 * register.
1679 */
1680 LSC_DATA_SIZE_D64 = 3,
1681 /* 8-bit scalar data value in memory, packed into 32-bit unsigned data value
1682 * in register.
1683 */
1684 LSC_DATA_SIZE_D8U32 = 4,
1685 /* 16-bit scalar data value in memory, packed into 32-bit unsigned data
1686 * value in register.
1687 */
1688 LSC_DATA_SIZE_D16U32 = 5,
1689 /* 16-bit scalar BigFloat data value in memory, packed into 32-bit float
1690 * value in register.
1691 */
1692 LSC_DATA_SIZE_D16BF32 = 6,
1693 };
1694
1695 /*
1696 * Enum specifies the scope of the fence.
1697 */
1698 enum ENUM_PACKED lsc_fence_scope {
1699 /* Wait until all previous memory transactions from this thread are observed
1700 * within the local thread-group.
1701 */
1702 LSC_FENCE_THREADGROUP = 0,
1703 /* Wait until all previous memory transactions from this thread are observed
1704 * within the local sub-slice.
1705 */
1706 LSC_FENCE_LOCAL = 1,
1707 /* Wait until all previous memory transactions from this thread are observed
1708 * in the local tile.
1709 */
1710 LSC_FENCE_TILE = 2,
1711 /* Wait until all previous memory transactions from this thread are observed
1712 * in the local GPU.
1713 */
1714 LSC_FENCE_GPU = 3,
1715 /* Wait until all previous memory transactions from this thread are observed
1716 * across all GPUs in the system.
1717 */
1718 LSC_FENCE_ALL_GPU = 4,
1719 /* Wait until all previous memory transactions from this thread are observed
1720 * at the "system" level.
1721 */
1722 LSC_FENCE_SYSTEM_RELEASE = 5,
1723 /* For GPUs that do not follow PCIe Write ordering for downstream writes
1724 * targeting device memory, a fence message with scope=System_Acquire will
1725 * commit to device memory all downstream and peer writes that have reached
1726 * the device.
1727 */
1728 LSC_FENCE_SYSTEM_ACQUIRE = 6,
1729 };
1730
1731 /*
1732 * Specifies the type of cache flush operation to perform after a fence is
1733 * complete.
1734 */
1735 enum ENUM_PACKED lsc_flush_type {
1736 LSC_FLUSH_TYPE_NONE = 0,
1737 /*
1738 * For a R/W cache, evict dirty lines (M to I state) and invalidate clean
1739 * lines. For a RO cache, invalidate clean lines.
1740 */
1741 LSC_FLUSH_TYPE_EVICT = 1,
1742 /*
1743 * For both R/W and RO cache, invalidate clean lines in the cache.
1744 */
1745 LSC_FLUSH_TYPE_INVALIDATE = 2,
1746 /*
1747 * For a R/W cache, invalidate dirty lines (M to I state), without
1748 * write-back to next level. This opcode does nothing for a RO cache.
1749 */
1750 LSC_FLUSH_TYPE_DISCARD = 3,
1751 /*
1752 * For a R/W cache, write-back dirty lines to the next level, but kept in
1753 * the cache as "clean" (M to V state). This opcode does nothing for a RO
1754 * cache.
1755 */
1756 LSC_FLUSH_TYPE_CLEAN = 4,
1757 /*
1758 * Flush "RW" section of the L3 cache, but leave L1 and L2 caches untouched.
1759 */
1760 LSC_FLUSH_TYPE_L3ONLY = 5,
1761 /*
1762 * HW maps this flush type internally to NONE.
1763 */
1764 LSC_FLUSH_TYPE_NONE_6 = 6,
1765
1766 };
1767
1768 enum ENUM_PACKED lsc_backup_fence_routing {
1769 /* Normal routing: UGM fence is routed to UGM pipeline. */
1770 LSC_NORMAL_ROUTING,
1771 /* Route UGM fence to LSC unit. */
1772 LSC_ROUTE_TO_LSC,
1773 };
1774
1775 /*
1776 * Specifies the size of the vector in a dataport message.
1777 */
1778 enum ENUM_PACKED lsc_vect_size {
1779 LSC_VECT_SIZE_V1 = 0, /* vector length 1 */
1780 LSC_VECT_SIZE_V2 = 1, /* vector length 2 */
1781 LSC_VECT_SIZE_V3 = 2, /* Vector length 3 */
1782 LSC_VECT_SIZE_V4 = 3, /* Vector length 4 */
1783 LSC_VECT_SIZE_V8 = 4, /* Vector length 8 */
1784 LSC_VECT_SIZE_V16 = 5, /* Vector length 16 */
1785 LSC_VECT_SIZE_V32 = 6, /* Vector length 32 */
1786 LSC_VECT_SIZE_V64 = 7, /* Vector length 64 */
1787 };
1788
1789 #define LSC_ONE_ADDR_REG 1
1790
1791 #endif /* BRW_EU_DEFINES_H */
1792