1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include "aco_builder.h"
8 #include "aco_ir.h"
9
10 #include "common/sid.h"
11
12 #include "util/memstream.h"
13
14 #include "ac_shader_util.h"
15 #include <algorithm>
16 #include <map>
17 #include <vector>
18
19 namespace aco {
20
21 struct constaddr_info {
22 unsigned getpc_end;
23 unsigned add_literal;
24 };
25
26 struct asm_context {
27 Program* program;
28 enum amd_gfx_level gfx_level;
29 std::vector<std::pair<int, SALU_instruction*>> branches;
30 std::map<unsigned, constaddr_info> constaddrs;
31 std::map<unsigned, constaddr_info> resumeaddrs;
32 std::vector<struct aco_symbol>* symbols;
33 Block* loop_header = NULL;
34 const int16_t* opcode;
35 // TODO: keep track of branch instructions referring blocks
36 // and, when emitting the block, correct the offset in instr
asm_contextaco::asm_context37 asm_context(Program* program_, std::vector<struct aco_symbol>* symbols_)
38 : program(program_), gfx_level(program->gfx_level), symbols(symbols_)
39 {
40 if (gfx_level <= GFX7)
41 opcode = &instr_info.opcode_gfx7[0];
42 else if (gfx_level <= GFX9)
43 opcode = &instr_info.opcode_gfx9[0];
44 else if (gfx_level <= GFX10_3)
45 opcode = &instr_info.opcode_gfx10[0];
46 else if (gfx_level <= GFX11_5)
47 opcode = &instr_info.opcode_gfx11[0];
48 else
49 opcode = &instr_info.opcode_gfx12[0];
50 }
51
52 int subvector_begin_pos = -1;
53 };
54
55 unsigned
get_mimg_nsa_dwords(const Instruction * instr)56 get_mimg_nsa_dwords(const Instruction* instr)
57 {
58 unsigned addr_dwords = instr->operands.size() - 3;
59 for (unsigned i = 1; i < addr_dwords; i++) {
60 if (instr->operands[3 + i].physReg() !=
61 instr->operands[3 + (i - 1)].physReg().advance(instr->operands[3 + (i - 1)].bytes()))
62 return DIV_ROUND_UP(addr_dwords - 1, 4);
63 }
64 return 0;
65 }
66
67 unsigned
get_vopd_opy_start(const Instruction * instr)68 get_vopd_opy_start(const Instruction* instr)
69 {
70 switch (instr->opcode) {
71 case aco_opcode::v_dual_fmac_f32:
72 case aco_opcode::v_dual_fmaak_f32:
73 case aco_opcode::v_dual_fmamk_f32:
74 case aco_opcode::v_dual_cndmask_b32:
75 case aco_opcode::v_dual_dot2acc_f32_f16:
76 case aco_opcode::v_dual_dot2acc_f32_bf16: return 3;
77 case aco_opcode::v_dual_mov_b32: return 1;
78 default: return 2;
79 }
80 }
81
82 uint32_t
reg(asm_context & ctx,PhysReg reg)83 reg(asm_context& ctx, PhysReg reg)
84 {
85 if (ctx.gfx_level >= GFX11) {
86 if (reg == m0)
87 return sgpr_null.reg();
88 else if (reg == sgpr_null)
89 return m0.reg();
90 }
91 return reg.reg();
92 }
93
94 ALWAYS_INLINE uint32_t
reg(asm_context & ctx,Operand op,unsigned width=32)95 reg(asm_context& ctx, Operand op, unsigned width = 32)
96 {
97 return reg(ctx, op.physReg()) & BITFIELD_MASK(width);
98 }
99
100 ALWAYS_INLINE uint32_t
reg(asm_context & ctx,Definition def,unsigned width=32)101 reg(asm_context& ctx, Definition def, unsigned width = 32)
102 {
103 return reg(ctx, def.physReg()) & BITFIELD_MASK(width);
104 }
105
106 bool
needs_vop3_gfx11(asm_context & ctx,Instruction * instr)107 needs_vop3_gfx11(asm_context& ctx, Instruction* instr)
108 {
109 if (ctx.gfx_level <= GFX10_3)
110 return false;
111
112 uint8_t mask = get_gfx11_true16_mask(instr->opcode);
113 if (!mask)
114 return false;
115
116 u_foreach_bit (i, mask & 0x3) {
117 if (instr->operands[i].physReg().reg() >= (256 + 128))
118 return true;
119 }
120 if ((mask & 0x8) && instr->definitions[0].physReg().reg() >= (256 + 128))
121 return true;
122 return false;
123 }
124
125 template <typename T>
126 uint32_t
get_gfx12_cpol(const T & instr)127 get_gfx12_cpol(const T& instr)
128 {
129 uint32_t scope = instr.cache.gfx12.scope;
130 uint32_t th = instr.cache.gfx12.temporal_hint;
131 return scope | (th << 2);
132 }
133
134 void
emit_sop2_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)135 emit_sop2_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
136 {
137 uint32_t opcode = ctx.opcode[(int)instr->opcode];
138
139 uint32_t encoding = (0b10 << 30);
140 encoding |= opcode << 23;
141 encoding |= !instr->definitions.empty() ? reg(ctx, instr->definitions[0]) << 16 : 0;
142 encoding |= instr->operands.size() >= 2 ? reg(ctx, instr->operands[1]) << 8 : 0;
143 encoding |= !instr->operands.empty() ? reg(ctx, instr->operands[0]) : 0;
144 out.push_back(encoding);
145 }
146
147 void
emit_sopk_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)148 emit_sopk_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
149 {
150 uint32_t opcode = ctx.opcode[(int)instr->opcode];
151 SALU_instruction& sopk = instr->salu();
152 assert(sopk.imm <= UINT16_MAX);
153
154 if (instr->opcode == aco_opcode::s_subvector_loop_begin) {
155 assert(ctx.gfx_level >= GFX10);
156 assert(ctx.subvector_begin_pos == -1);
157 ctx.subvector_begin_pos = out.size();
158 } else if (instr->opcode == aco_opcode::s_subvector_loop_end) {
159 assert(ctx.gfx_level >= GFX10);
160 assert(ctx.subvector_begin_pos != -1);
161 /* Adjust s_subvector_loop_begin instruction to the address after the end */
162 out[ctx.subvector_begin_pos] |= (out.size() - ctx.subvector_begin_pos);
163 /* Adjust s_subvector_loop_end instruction to the address after the beginning */
164 sopk.imm = (uint16_t)(ctx.subvector_begin_pos - (int)out.size());
165 ctx.subvector_begin_pos = -1;
166 }
167
168 uint32_t encoding = (0b1011 << 28);
169 encoding |= opcode << 23;
170 encoding |= !instr->definitions.empty() && !(instr->definitions[0].physReg() == scc)
171 ? reg(ctx, instr->definitions[0]) << 16
172 : !instr->operands.empty() && instr->operands[0].physReg() <= 127
173 ? reg(ctx, instr->operands[0]) << 16
174 : 0;
175 encoding |= sopk.imm;
176 out.push_back(encoding);
177 }
178
179 void
emit_sop1_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)180 emit_sop1_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
181 {
182 uint32_t opcode = ctx.opcode[(int)instr->opcode];
183
184 uint32_t encoding = (0b101111101 << 23);
185 encoding |= !instr->definitions.empty() ? reg(ctx, instr->definitions[0]) << 16 : 0;
186 encoding |= opcode << 8;
187 encoding |= !instr->operands.empty() ? reg(ctx, instr->operands[0]) : 0;
188 out.push_back(encoding);
189 }
190
191 void
emit_sopc_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)192 emit_sopc_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
193 {
194 uint32_t opcode = ctx.opcode[(int)instr->opcode];
195
196 uint32_t encoding = (0b101111110 << 23);
197 encoding |= opcode << 16;
198 encoding |= instr->operands.size() == 2 ? reg(ctx, instr->operands[1]) << 8 : 0;
199 encoding |= !instr->operands.empty() ? reg(ctx, instr->operands[0]) : 0;
200 out.push_back(encoding);
201 }
202
203 void
emit_sopp_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr,bool force_imm=false)204 emit_sopp_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr,
205 bool force_imm = false)
206 {
207 uint32_t opcode = ctx.opcode[(int)instr->opcode];
208 SALU_instruction& sopp = instr->salu();
209
210 uint32_t encoding = (0b101111111 << 23);
211 encoding |= opcode << 16;
212
213 if (!force_imm && instr_info.classes[(int)instr->opcode] == instr_class::branch) {
214 sopp.pass_flags = 0;
215 ctx.branches.emplace_back(out.size(), &sopp);
216 } else {
217 assert(sopp.imm <= UINT16_MAX);
218 encoding |= (uint16_t)sopp.imm;
219 }
220 out.push_back(encoding);
221 }
222
223 void
emit_smem_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)224 emit_smem_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
225 {
226 uint32_t opcode = ctx.opcode[(int)instr->opcode];
227 SMEM_instruction& smem = instr->smem();
228 bool glc = smem.cache.value & ac_glc;
229 bool dlc = smem.cache.value & ac_dlc;
230
231 bool soe = instr->operands.size() >= (!instr->definitions.empty() ? 3 : 4);
232 bool is_load = !instr->definitions.empty();
233 uint32_t encoding = 0;
234
235 if (ctx.gfx_level <= GFX7) {
236 encoding = (0b11000 << 27);
237 encoding |= opcode << 22;
238 encoding |= instr->definitions.size() ? reg(ctx, instr->definitions[0]) << 15 : 0;
239 encoding |= instr->operands.size() ? (reg(ctx, instr->operands[0]) >> 1) << 9 : 0;
240 if (instr->operands.size() >= 2) {
241 if (!instr->operands[1].isConstant()) {
242 encoding |= reg(ctx, instr->operands[1]);
243 } else if (instr->operands[1].constantValue() >= 1024) {
244 encoding |= 255; /* SQ_SRC_LITERAL */
245 } else {
246 encoding |= instr->operands[1].constantValue() >> 2;
247 encoding |= 1 << 8;
248 }
249 }
250 out.push_back(encoding);
251 /* SMRD instructions can take a literal on GFX7 */
252 if (instr->operands.size() >= 2 && instr->operands[1].isConstant() &&
253 instr->operands[1].constantValue() >= 1024)
254 out.push_back(instr->operands[1].constantValue() >> 2);
255 return;
256 }
257
258 if (ctx.gfx_level <= GFX9) {
259 encoding = (0b110000 << 26);
260 assert(!dlc); /* Device-level coherent is not supported on GFX9 and lower */
261 /* We don't use the NV bit. */
262 } else {
263 encoding = (0b111101 << 26);
264 if (ctx.gfx_level <= GFX11_5)
265 encoding |= dlc ? 1 << (ctx.gfx_level >= GFX11 ? 13 : 14) : 0;
266 }
267
268 if (ctx.gfx_level <= GFX11_5) {
269 encoding |= opcode << 18;
270 encoding |= glc ? 1 << (ctx.gfx_level >= GFX11 ? 14 : 16) : 0;
271 } else {
272 encoding |= opcode << 13;
273 encoding |= get_gfx12_cpol(smem) << 21;
274 }
275
276 if (ctx.gfx_level <= GFX9) {
277 if (instr->operands.size() >= 2)
278 encoding |= instr->operands[1].isConstant() ? 1 << 17 : 0; /* IMM - immediate enable */
279 }
280 if (ctx.gfx_level == GFX9) {
281 encoding |= soe ? 1 << 14 : 0;
282 }
283
284 if (is_load || instr->operands.size() >= 3) { /* SDATA */
285 encoding |= (is_load ? reg(ctx, instr->definitions[0]) : reg(ctx, instr->operands[2])) << 6;
286 }
287 if (instr->operands.size() >= 1) { /* SBASE */
288 encoding |= reg(ctx, instr->operands[0]) >> 1;
289 }
290
291 out.push_back(encoding);
292 encoding = 0;
293
294 int32_t offset = 0;
295 uint32_t soffset =
296 ctx.gfx_level >= GFX10
297 ? reg(ctx, sgpr_null) /* On GFX10 this is disabled by specifying SGPR_NULL */
298 : 0; /* On GFX9, it is disabled by the SOE bit (and it's not present on
299 GFX8 and below) */
300 if (instr->operands.size() >= 2) {
301 const Operand& op_off1 = instr->operands[1];
302 if (ctx.gfx_level <= GFX9) {
303 offset = op_off1.isConstant() ? op_off1.constantValue() : reg(ctx, op_off1);
304 } else {
305 /* GFX10 only supports constants in OFFSET, so put the operand in SOFFSET if it's an
306 * SGPR */
307 if (op_off1.isConstant()) {
308 offset = op_off1.constantValue();
309 } else {
310 soffset = reg(ctx, op_off1);
311 assert(!soe); /* There is no place to put the other SGPR offset, if any */
312 }
313 }
314
315 if (soe) {
316 const Operand& op_off2 = instr->operands.back();
317 assert(ctx.gfx_level >= GFX9); /* GFX8 and below don't support specifying a constant
318 and an SGPR at the same time */
319 assert(!op_off2.isConstant());
320 soffset = reg(ctx, op_off2);
321 }
322 }
323 encoding |= offset;
324 encoding |= soffset << 25;
325
326 out.push_back(encoding);
327 }
328
329 void
emit_vop2_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)330 emit_vop2_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
331 {
332 uint32_t opcode = ctx.opcode[(int)instr->opcode];
333 VALU_instruction& valu = instr->valu();
334
335 uint32_t encoding = 0;
336 encoding |= opcode << 25;
337 encoding |= reg(ctx, instr->definitions[0], 8) << 17;
338 encoding |= (valu.opsel[3] ? 128 : 0) << 17;
339 encoding |= reg(ctx, instr->operands[1], 8) << 9;
340 encoding |= (valu.opsel[1] ? 128 : 0) << 9;
341 encoding |= reg(ctx, instr->operands[0]);
342 encoding |= valu.opsel[0] ? 128 : 0;
343 out.push_back(encoding);
344 }
345
346 void
emit_vop1_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)347 emit_vop1_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
348 {
349 uint32_t opcode = ctx.opcode[(int)instr->opcode];
350 VALU_instruction& valu = instr->valu();
351
352 uint32_t encoding = (0b0111111 << 25);
353 if (!instr->definitions.empty()) {
354 encoding |= reg(ctx, instr->definitions[0], 8) << 17;
355 encoding |= (valu.opsel[3] ? 128 : 0) << 17;
356 }
357 encoding |= opcode << 9;
358 if (!instr->operands.empty()) {
359 encoding |= reg(ctx, instr->operands[0]);
360 encoding |= valu.opsel[0] ? 128 : 0;
361 }
362 out.push_back(encoding);
363 }
364
365 void
emit_vopc_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)366 emit_vopc_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
367 {
368 uint32_t opcode = ctx.opcode[(int)instr->opcode];
369 VALU_instruction& valu = instr->valu();
370
371 uint32_t encoding = (0b0111110 << 25);
372 encoding |= opcode << 17;
373 encoding |= reg(ctx, instr->operands[1], 8) << 9;
374 encoding |= (valu.opsel[1] ? 128 : 0) << 9;
375 encoding |= reg(ctx, instr->operands[0]);
376 encoding |= valu.opsel[0] ? 128 : 0;
377 out.push_back(encoding);
378 }
379
380 void
emit_vintrp_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)381 emit_vintrp_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
382 {
383 uint32_t opcode = ctx.opcode[(int)instr->opcode];
384 VINTRP_instruction& interp = instr->vintrp();
385
386 uint32_t encoding = 0;
387 if (instr->opcode == aco_opcode::v_interp_p1ll_f16 ||
388 instr->opcode == aco_opcode::v_interp_p1lv_f16 ||
389 instr->opcode == aco_opcode::v_interp_p2_legacy_f16 ||
390 instr->opcode == aco_opcode::v_interp_p2_f16 ||
391 instr->opcode == aco_opcode::v_interp_p2_hi_f16) {
392 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
393 encoding = (0b110100 << 26);
394 } else if (ctx.gfx_level >= GFX10) {
395 encoding = (0b110101 << 26);
396 } else {
397 unreachable("Unknown gfx_level.");
398 }
399
400 unsigned opsel = instr->opcode == aco_opcode::v_interp_p2_hi_f16 ? 0x8 : 0;
401
402 encoding |= opcode << 16;
403 encoding |= opsel << 11;
404 encoding |= reg(ctx, instr->definitions[0], 8);
405 out.push_back(encoding);
406
407 encoding = 0;
408 encoding |= interp.attribute;
409 encoding |= interp.component << 6;
410 encoding |= interp.high_16bits << 8;
411 encoding |= reg(ctx, instr->operands[0]) << 9;
412 if (instr->opcode == aco_opcode::v_interp_p2_f16 ||
413 instr->opcode == aco_opcode::v_interp_p2_hi_f16 ||
414 instr->opcode == aco_opcode::v_interp_p2_legacy_f16 ||
415 instr->opcode == aco_opcode::v_interp_p1lv_f16) {
416 encoding |= reg(ctx, instr->operands[2]) << 18;
417 }
418 out.push_back(encoding);
419 } else {
420 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
421 encoding = (0b110101 << 26); /* Vega ISA doc says 110010 but it's wrong */
422 } else {
423 encoding = (0b110010 << 26);
424 }
425
426 assert(encoding);
427 encoding |= reg(ctx, instr->definitions[0], 8) << 18;
428 encoding |= opcode << 16;
429 encoding |= interp.attribute << 10;
430 encoding |= interp.component << 8;
431 if (instr->opcode == aco_opcode::v_interp_mov_f32)
432 encoding |= (0x3 & instr->operands[0].constantValue());
433 else
434 encoding |= reg(ctx, instr->operands[0], 8);
435 out.push_back(encoding);
436 }
437 }
438
439 void
emit_vinterp_inreg_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)440 emit_vinterp_inreg_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
441 {
442 uint32_t opcode = ctx.opcode[(int)instr->opcode];
443 VINTERP_inreg_instruction& interp = instr->vinterp_inreg();
444
445 uint32_t encoding = (0b11001101 << 24);
446 encoding |= reg(ctx, instr->definitions[0], 8);
447 encoding |= (uint32_t)interp.wait_exp << 8;
448 encoding |= (uint32_t)interp.opsel << 11;
449 encoding |= (uint32_t)interp.clamp << 15;
450 encoding |= opcode << 16;
451 out.push_back(encoding);
452
453 encoding = 0;
454 for (unsigned i = 0; i < instr->operands.size(); i++)
455 encoding |= reg(ctx, instr->operands[i]) << (i * 9);
456 for (unsigned i = 0; i < 3; i++)
457 encoding |= interp.neg[i] << (29 + i);
458 out.push_back(encoding);
459 }
460
461 void
emit_vopd_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)462 emit_vopd_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
463 {
464 uint32_t opcode = ctx.opcode[(int)instr->opcode];
465 VOPD_instruction& vopd = instr->vopd();
466
467 uint32_t encoding = (0b110010 << 26);
468 encoding |= reg(ctx, instr->operands[0]);
469 if (instr->opcode != aco_opcode::v_dual_mov_b32)
470 encoding |= reg(ctx, instr->operands[1], 8) << 9;
471 encoding |= (uint32_t)ctx.opcode[(int)vopd.opy] << 17;
472 encoding |= opcode << 22;
473 out.push_back(encoding);
474
475 unsigned opy_start = get_vopd_opy_start(instr);
476
477 encoding = reg(ctx, instr->operands[opy_start]);
478 if (vopd.opy != aco_opcode::v_dual_mov_b32)
479 encoding |= reg(ctx, instr->operands[opy_start + 1], 8) << 9;
480 encoding |= (reg(ctx, instr->definitions[1], 8) >> 1) << 17;
481 encoding |= reg(ctx, instr->definitions[0], 8) << 24;
482 out.push_back(encoding);
483 }
484
485 void
emit_ds_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)486 emit_ds_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
487 {
488 uint32_t opcode = ctx.opcode[(int)instr->opcode];
489 DS_instruction& ds = instr->ds();
490
491 uint32_t encoding = (0b110110 << 26);
492 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
493 encoding |= opcode << 17;
494 encoding |= (ds.gds ? 1 : 0) << 16;
495 } else {
496 encoding |= opcode << 18;
497 encoding |= (ds.gds ? 1 : 0) << 17;
498 }
499 encoding |= ((0xFF & ds.offset1) << 8);
500 encoding |= (0xFFFF & ds.offset0);
501 out.push_back(encoding);
502 encoding = 0;
503 if (!instr->definitions.empty())
504 encoding |= reg(ctx, instr->definitions[0], 8) << 24;
505 for (unsigned i = 0; i < MIN2(instr->operands.size(), 3); i++) {
506 Operand& op = instr->operands[i];
507 if (op.physReg() != m0 && !op.isUndefined())
508 encoding |= reg(ctx, op, 8) << (8 * i);
509 }
510 out.push_back(encoding);
511 }
512
513 void
emit_ldsdir_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)514 emit_ldsdir_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
515 {
516 uint32_t opcode = ctx.opcode[(int)instr->opcode];
517 LDSDIR_instruction& dir = instr->ldsdir();
518
519 uint32_t encoding = (0b11001110 << 24);
520 encoding |= opcode << 20;
521 encoding |= (uint32_t)dir.wait_vdst << 16;
522 if (ctx.gfx_level >= GFX12)
523 encoding |= (uint32_t)dir.wait_vsrc << 23;
524 encoding |= (uint32_t)dir.attr << 10;
525 encoding |= (uint32_t)dir.attr_chan << 8;
526 encoding |= reg(ctx, instr->definitions[0], 8);
527 out.push_back(encoding);
528 }
529
530 void
emit_mubuf_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)531 emit_mubuf_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
532 {
533 uint32_t opcode = ctx.opcode[(int)instr->opcode];
534 MUBUF_instruction& mubuf = instr->mubuf();
535 bool glc = mubuf.cache.value & ac_glc;
536 bool slc = mubuf.cache.value & ac_slc;
537 bool dlc = mubuf.cache.value & ac_dlc;
538
539 uint32_t encoding = (0b111000 << 26);
540 if (ctx.gfx_level >= GFX11 && mubuf.lds) /* GFX11 has separate opcodes for LDS loads */
541 opcode = opcode == 0 ? 0x32 : (opcode + 0x1d);
542 else
543 encoding |= (mubuf.lds ? 1 : 0) << 16;
544 encoding |= opcode << 18;
545 encoding |= (glc ? 1 : 0) << 14;
546 if (ctx.gfx_level <= GFX10_3)
547 encoding |= (mubuf.idxen ? 1 : 0) << 13;
548 assert(!mubuf.addr64 || ctx.gfx_level <= GFX7);
549 if (ctx.gfx_level == GFX6 || ctx.gfx_level == GFX7)
550 encoding |= (mubuf.addr64 ? 1 : 0) << 15;
551 if (ctx.gfx_level <= GFX10_3)
552 encoding |= (mubuf.offen ? 1 : 0) << 12;
553 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
554 assert(!dlc); /* Device-level coherent is not supported on GFX9 and lower */
555 encoding |= (slc ? 1 : 0) << 17;
556 } else if (ctx.gfx_level >= GFX11) {
557 encoding |= (slc ? 1 : 0) << 12;
558 encoding |= (dlc ? 1 : 0) << 13;
559 } else if (ctx.gfx_level >= GFX10) {
560 encoding |= (dlc ? 1 : 0) << 15;
561 }
562 encoding |= 0x0FFF & mubuf.offset;
563 out.push_back(encoding);
564 encoding = 0;
565 if (ctx.gfx_level <= GFX7 || (ctx.gfx_level >= GFX10 && ctx.gfx_level <= GFX10_3)) {
566 encoding |= (slc ? 1 : 0) << 22;
567 }
568 encoding |= reg(ctx, instr->operands[2]) << 24;
569 if (ctx.gfx_level >= GFX11) {
570 encoding |= (mubuf.tfe ? 1 : 0) << 21;
571 encoding |= (mubuf.offen ? 1 : 0) << 22;
572 encoding |= (mubuf.idxen ? 1 : 0) << 23;
573 } else {
574 encoding |= (mubuf.tfe ? 1 : 0) << 23;
575 }
576 encoding |= (reg(ctx, instr->operands[0]) >> 2) << 16;
577 if (instr->operands.size() > 3 && !mubuf.lds)
578 encoding |= reg(ctx, instr->operands[3], 8) << 8;
579 else if (!mubuf.lds)
580 encoding |= reg(ctx, instr->definitions[0], 8) << 8;
581 encoding |= reg(ctx, instr->operands[1], 8);
582 out.push_back(encoding);
583 }
584
585 void
emit_mubuf_instruction_gfx12(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)586 emit_mubuf_instruction_gfx12(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
587 {
588 uint32_t opcode = ctx.opcode[(int)instr->opcode];
589 MUBUF_instruction& mubuf = instr->mubuf();
590 assert(!mubuf.lds);
591
592 uint32_t encoding = 0b110001 << 26;
593 encoding |= opcode << 14;
594 if (instr->operands[2].isConstant()) {
595 assert(instr->operands[2].constantValue() == 0);
596 encoding |= reg(ctx, sgpr_null);
597 } else {
598 encoding |= reg(ctx, instr->operands[2]);
599 }
600 encoding |= (mubuf.tfe ? 1 : 0) << 22;
601 out.push_back(encoding);
602
603 encoding = 0;
604 if (instr->operands.size() > 3)
605 encoding |= reg(ctx, instr->operands[3], 8);
606 else
607 encoding |= reg(ctx, instr->definitions[0], 8);
608 encoding |= reg(ctx, instr->operands[0]) << 9;
609 encoding |= (mubuf.offen ? 1 : 0) << 30;
610 encoding |= (mubuf.idxen ? 1 : 0) << 31;
611 encoding |= get_gfx12_cpol(mubuf) << 18;
612 encoding |= 1 << 23;
613 out.push_back(encoding);
614
615 encoding = 0;
616 if (!instr->operands[1].isUndefined())
617 encoding |= reg(ctx, instr->operands[1], 8);
618 encoding |= (mubuf.offset & 0x00ffffff) << 8;
619 out.push_back(encoding);
620 }
621
622 void
emit_mtbuf_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)623 emit_mtbuf_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
624 {
625 uint32_t opcode = ctx.opcode[(int)instr->opcode];
626 MTBUF_instruction& mtbuf = instr->mtbuf();
627 bool glc = mtbuf.cache.value & ac_glc;
628 bool slc = mtbuf.cache.value & ac_slc;
629 bool dlc = mtbuf.cache.value & ac_dlc;
630 uint32_t img_format = ac_get_tbuffer_format(ctx.gfx_level, mtbuf.dfmt, mtbuf.nfmt);
631 assert(img_format <= 0x7F);
632 assert(!dlc || ctx.gfx_level >= GFX10);
633
634 uint32_t encoding = (0b111010 << 26);
635 encoding |= (img_format << 19); /* Handles both the GFX10 FORMAT and the old NFMT+DFMT */
636 if (ctx.gfx_level < GFX8) {
637 encoding |= opcode << 16;
638 /* ADDR64 is unused */
639 } else if (ctx.gfx_level >= GFX10 && ctx.gfx_level < GFX11) {
640 /* DLC bit replaces one bit of the OPCODE on GFX10 */
641 encoding |= (opcode & 0x07) << 16; /* 3 LSBs of 4-bit OPCODE */
642 encoding |= (dlc ? 1 : 0) << 15;
643 } else {
644 encoding |= opcode << 15;
645 }
646 encoding |= (glc ? 1 : 0) << 14;
647 if (ctx.gfx_level >= GFX11) {
648 encoding |= (dlc ? 1 : 0) << 13;
649 encoding |= (slc ? 1 : 0) << 12;
650 } else {
651 encoding |= (mtbuf.idxen ? 1 : 0) << 13;
652 encoding |= (mtbuf.offen ? 1 : 0) << 12;
653 }
654 encoding |= 0x0FFF & mtbuf.offset;
655 out.push_back(encoding);
656
657 encoding = 0;
658 encoding |= reg(ctx, instr->operands[2]) << 24;
659 if (ctx.gfx_level >= GFX11) {
660 encoding |= (mtbuf.idxen ? 1 : 0) << 23;
661 encoding |= (mtbuf.offen ? 1 : 0) << 22;
662 encoding |= (mtbuf.tfe ? 1 : 0) << 21;
663 } else {
664 encoding |= (mtbuf.tfe ? 1 : 0) << 23;
665 encoding |= (slc ? 1 : 0) << 22;
666 if (ctx.gfx_level >= GFX10)
667 encoding |= (((opcode & 0x08) >> 3) << 21); /* MSB of 4-bit OPCODE */
668 }
669 encoding |= (reg(ctx, instr->operands[0]) >> 2) << 16;
670 if (instr->operands.size() > 3)
671 encoding |= reg(ctx, instr->operands[3], 8) << 8;
672 else
673 encoding |= reg(ctx, instr->definitions[0], 8) << 8;
674 encoding |= reg(ctx, instr->operands[1], 8);
675 out.push_back(encoding);
676 }
677
678 void
emit_mtbuf_instruction_gfx12(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)679 emit_mtbuf_instruction_gfx12(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
680 {
681 uint32_t opcode = ctx.opcode[(int)instr->opcode];
682 MTBUF_instruction& mtbuf = instr->mtbuf();
683
684 uint32_t img_format = ac_get_tbuffer_format(ctx.gfx_level, mtbuf.dfmt, mtbuf.nfmt);
685
686 uint32_t encoding = 0b110001 << 26;
687 encoding |= 0b1000 << 18;
688 encoding |= opcode << 14;
689 if (instr->operands[2].isConstant()) {
690 assert(instr->operands[2].constantValue() == 0);
691 encoding |= reg(ctx, sgpr_null);
692 } else {
693 encoding |= reg(ctx, instr->operands[2]);
694 }
695 encoding |= (mtbuf.tfe ? 1 : 0) << 22;
696 out.push_back(encoding);
697
698 encoding = 0;
699 if (instr->operands.size() > 3)
700 encoding |= reg(ctx, instr->operands[3], 8);
701 else
702 encoding |= reg(ctx, instr->definitions[0], 8);
703 encoding |= reg(ctx, instr->operands[0]) << 9;
704 encoding |= (mtbuf.offen ? 1 : 0) << 30;
705 encoding |= (mtbuf.idxen ? 1 : 0) << 31;
706 encoding |= get_gfx12_cpol(mtbuf) << 18;
707 encoding |= img_format << 23;
708 out.push_back(encoding);
709
710 encoding = 0;
711 encoding |= reg(ctx, instr->operands[1], 8);
712 encoding |= (mtbuf.offset & 0x00ffffff) << 8;
713 out.push_back(encoding);
714 }
715
716 void
emit_mimg_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)717 emit_mimg_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
718 {
719 uint32_t opcode = ctx.opcode[(int)instr->opcode];
720 MIMG_instruction& mimg = instr->mimg();
721 bool glc = mimg.cache.value & ac_glc;
722 bool slc = mimg.cache.value & ac_slc;
723 bool dlc = mimg.cache.value & ac_dlc;
724
725 unsigned nsa_dwords = get_mimg_nsa_dwords(instr);
726 assert(!nsa_dwords || ctx.gfx_level >= GFX10);
727
728 uint32_t encoding = (0b111100 << 26);
729 if (ctx.gfx_level >= GFX11) { /* GFX11: rearranges most fields */
730 assert(nsa_dwords <= 1);
731 encoding |= nsa_dwords;
732 encoding |= mimg.dim << 2;
733 encoding |= mimg.unrm ? 1 << 7 : 0;
734 encoding |= (0xF & mimg.dmask) << 8;
735 encoding |= slc ? 1 << 12 : 0;
736 encoding |= dlc ? 1 << 13 : 0;
737 encoding |= glc ? 1 << 14 : 0;
738 encoding |= mimg.r128 ? 1 << 15 : 0;
739 encoding |= mimg.a16 ? 1 << 16 : 0;
740 encoding |= mimg.d16 ? 1 << 17 : 0;
741 encoding |= (opcode & 0xFF) << 18;
742 } else {
743 encoding |= slc ? 1 << 25 : 0;
744 encoding |= (opcode & 0x7f) << 18;
745 encoding |= (opcode >> 7) & 1;
746 encoding |= mimg.lwe ? 1 << 17 : 0;
747 encoding |= mimg.tfe ? 1 << 16 : 0;
748 encoding |= glc ? 1 << 13 : 0;
749 encoding |= mimg.unrm ? 1 << 12 : 0;
750 if (ctx.gfx_level <= GFX9) {
751 assert(!dlc); /* Device-level coherent is not supported on GFX9 and lower */
752 assert(!mimg.r128);
753 encoding |= mimg.a16 ? 1 << 15 : 0;
754 encoding |= mimg.da ? 1 << 14 : 0;
755 } else {
756 encoding |= mimg.r128 ? 1 << 15
757 : 0; /* GFX10: A16 moved to 2nd word, R128 replaces it in 1st word */
758 encoding |= nsa_dwords << 1;
759 encoding |= mimg.dim << 3; /* GFX10: dimensionality instead of declare array */
760 encoding |= dlc ? 1 << 7 : 0;
761 }
762 encoding |= (0xF & mimg.dmask) << 8;
763 }
764 out.push_back(encoding);
765
766 encoding = reg(ctx, instr->operands[3], 8); /* VADDR */
767 if (!instr->definitions.empty()) {
768 encoding |= reg(ctx, instr->definitions[0], 8) << 8; /* VDATA */
769 } else if (!instr->operands[2].isUndefined()) {
770 encoding |= reg(ctx, instr->operands[2], 8) << 8; /* VDATA */
771 }
772 encoding |= (0x1F & (reg(ctx, instr->operands[0]) >> 2)) << 16; /* T# (resource) */
773
774 assert(!mimg.d16 || ctx.gfx_level >= GFX9);
775 if (ctx.gfx_level >= GFX11) {
776 if (!instr->operands[1].isUndefined())
777 encoding |= (0x1F & (reg(ctx, instr->operands[1]) >> 2)) << 26; /* sampler */
778
779 encoding |= mimg.tfe ? 1 << 21 : 0;
780 encoding |= mimg.lwe ? 1 << 22 : 0;
781 } else {
782 if (!instr->operands[1].isUndefined())
783 encoding |= (0x1F & (reg(ctx, instr->operands[1]) >> 2)) << 21; /* sampler */
784
785 encoding |= mimg.d16 ? 1 << 31 : 0;
786 if (ctx.gfx_level >= GFX10) {
787 /* GFX10: A16 still exists, but is in a different place */
788 encoding |= mimg.a16 ? 1 << 30 : 0;
789 }
790 }
791
792 out.push_back(encoding);
793
794 if (nsa_dwords) {
795 out.resize(out.size() + nsa_dwords);
796 std::vector<uint32_t>::iterator nsa = std::prev(out.end(), nsa_dwords);
797 for (unsigned i = 0; i < instr->operands.size() - 4u; i++)
798 nsa[i / 4] |= reg(ctx, instr->operands[4 + i], 8) << (i % 4 * 8);
799 }
800 }
801
802 void
emit_mimg_instruction_gfx12(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)803 emit_mimg_instruction_gfx12(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
804 {
805 uint32_t opcode = ctx.opcode[(int)instr->opcode];
806 MIMG_instruction& mimg = instr->mimg();
807
808 bool vsample = !instr->operands[1].isUndefined() || instr->opcode == aco_opcode::image_msaa_load;
809 uint32_t encoding = opcode << 14;
810 if (vsample) {
811 encoding |= 0b111001 << 26;
812 encoding |= mimg.tfe << 3;
813 encoding |= mimg.unrm << 13;
814 } else {
815 encoding |= 0b110100 << 26;
816 }
817 encoding |= mimg.dim;
818 encoding |= mimg.r128 << 4;
819 encoding |= mimg.d16 << 5;
820 encoding |= mimg.a16 << 6;
821 encoding |= (mimg.dmask & 0xf) << 22;
822 out.push_back(encoding);
823
824 uint8_t vaddr[5] = {0, 0, 0, 0, 0};
825 for (unsigned i = 3; i < instr->operands.size(); i++)
826 vaddr[i - 3] = reg(ctx, instr->operands[i], 8);
827 unsigned num_vaddr = instr->operands.size() - 3;
828 for (unsigned i = 0; i < MIN2(instr->operands.back().size() - 1, 5 - num_vaddr); i++)
829 vaddr[num_vaddr + i] = reg(ctx, instr->operands.back(), 8) + i + 1;
830
831 encoding = 0;
832 if (!instr->definitions.empty())
833 encoding |= reg(ctx, instr->definitions[0], 8); /* VDATA */
834 else if (!instr->operands[2].isUndefined())
835 encoding |= reg(ctx, instr->operands[2], 8); /* VDATA */
836 encoding |= reg(ctx, instr->operands[0]) << 9; /* T# (resource) */
837 if (vsample) {
838 encoding |= mimg.lwe << 8;
839 if (instr->opcode != aco_opcode::image_msaa_load)
840 encoding |= reg(ctx, instr->operands[1]) << 23; /* sampler */
841 } else {
842 encoding |= mimg.tfe << 23;
843 encoding |= vaddr[4] << 24;
844 }
845 encoding |= get_gfx12_cpol(mimg) << 18;
846 out.push_back(encoding);
847
848 encoding = 0;
849 for (unsigned i = 0; i < 4; i++)
850 encoding |= vaddr[i] << (i * 8);
851 out.push_back(encoding);
852 }
853
854 void
emit_flatlike_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)855 emit_flatlike_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
856 {
857 uint32_t opcode = ctx.opcode[(int)instr->opcode];
858 FLAT_instruction& flat = instr->flatlike();
859 bool glc = flat.cache.value & ac_glc;
860 bool slc = flat.cache.value & ac_slc;
861 bool dlc = flat.cache.value & ac_dlc;
862
863 uint32_t encoding = (0b110111 << 26);
864 encoding |= opcode << 18;
865 if (ctx.gfx_level == GFX9 || ctx.gfx_level >= GFX11) {
866 if (instr->isFlat())
867 assert(flat.offset <= 0xfff);
868 else
869 assert(flat.offset >= -4096 && flat.offset < 4096);
870 encoding |= flat.offset & 0x1fff;
871 } else if (ctx.gfx_level <= GFX8 || instr->isFlat()) {
872 /* GFX10 has a 12-bit immediate OFFSET field,
873 * but it has a hw bug: it ignores the offset, called FlatSegmentOffsetBug
874 */
875 assert(flat.offset == 0);
876 } else {
877 assert(flat.offset >= -2048 && flat.offset <= 2047);
878 encoding |= flat.offset & 0xfff;
879 }
880 if (instr->isScratch())
881 encoding |= 1 << (ctx.gfx_level >= GFX11 ? 16 : 14);
882 else if (instr->isGlobal())
883 encoding |= 2 << (ctx.gfx_level >= GFX11 ? 16 : 14);
884 encoding |= flat.lds ? 1 << 13 : 0;
885 encoding |= glc ? 1 << (ctx.gfx_level >= GFX11 ? 14 : 16) : 0;
886 encoding |= slc ? 1 << (ctx.gfx_level >= GFX11 ? 15 : 17) : 0;
887 if (ctx.gfx_level >= GFX10) {
888 assert(!flat.nv);
889 encoding |= dlc ? 1 << (ctx.gfx_level >= GFX11 ? 13 : 12) : 0;
890 } else {
891 assert(!dlc);
892 }
893 out.push_back(encoding);
894 encoding = reg(ctx, instr->operands[0], 8);
895 if (!instr->definitions.empty())
896 encoding |= reg(ctx, instr->definitions[0], 8) << 24;
897 if (instr->operands.size() >= 3)
898 encoding |= reg(ctx, instr->operands[2], 8) << 8;
899 if (!instr->operands[1].isUndefined()) {
900 assert(ctx.gfx_level >= GFX10 || instr->operands[1].physReg() != 0x7F);
901 assert(instr->format != Format::FLAT);
902 encoding |= reg(ctx, instr->operands[1], 8) << 16;
903 } else if (instr->format != Format::FLAT ||
904 ctx.gfx_level >= GFX10) { /* SADDR is actually used with FLAT on GFX10 */
905 /* For GFX10.3 scratch, 0x7F disables both ADDR and SADDR, unlike sgpr_null, which only
906 * disables SADDR. On GFX11, this was replaced with SVE.
907 */
908 if (ctx.gfx_level <= GFX9 ||
909 (instr->isScratch() && instr->operands[0].isUndefined() && ctx.gfx_level < GFX11))
910 encoding |= 0x7F << 16;
911 else
912 encoding |= reg(ctx, sgpr_null) << 16;
913 }
914 if (ctx.gfx_level >= GFX11 && instr->isScratch())
915 encoding |= !instr->operands[0].isUndefined() ? 1 << 23 : 0;
916 else
917 encoding |= flat.nv ? 1 << 23 : 0;
918 out.push_back(encoding);
919 }
920
921 void
emit_flatlike_instruction_gfx12(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)922 emit_flatlike_instruction_gfx12(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
923 {
924 uint32_t opcode = ctx.opcode[(int)instr->opcode];
925 FLAT_instruction& flat = instr->flatlike();
926 assert(!flat.lds);
927
928 uint32_t encoding = opcode << 14;
929 encoding |= 0b111011 << 26;
930 if (!instr->operands[1].isUndefined()) {
931 assert(!instr->isFlat());
932 encoding |= reg(ctx, instr->operands[1]);
933 } else {
934 encoding |= reg(ctx, sgpr_null);
935 }
936 if (instr->isScratch())
937 encoding |= 1 << 24;
938 else if (instr->isGlobal())
939 encoding |= 2 << 24;
940 out.push_back(encoding);
941
942 encoding = 0;
943 if (!instr->definitions.empty())
944 encoding |= reg(ctx, instr->definitions[0], 8);
945 if (instr->isScratch())
946 encoding |= !instr->operands[0].isUndefined() ? 1 << 17 : 0;
947 encoding |= get_gfx12_cpol(flat) << 18;
948 if (instr->operands.size() >= 3)
949 encoding |= reg(ctx, instr->operands[2], 8) << 23;
950 out.push_back(encoding);
951
952 encoding = 0;
953 if (!instr->operands[0].isUndefined())
954 encoding |= reg(ctx, instr->operands[0], 8);
955 encoding |= (flat.offset & 0x00ffffff) << 8;
956 out.push_back(encoding);
957 }
958
959 void
emit_exp_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)960 emit_exp_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
961 {
962 Export_instruction& exp = instr->exp();
963 uint32_t encoding;
964 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
965 encoding = (0b110001 << 26);
966 } else {
967 encoding = (0b111110 << 26);
968 }
969
970 if (ctx.gfx_level >= GFX11) {
971 encoding |= exp.row_en ? 0b1 << 13 : 0;
972 } else {
973 encoding |= exp.valid_mask ? 0b1 << 12 : 0;
974 encoding |= exp.compressed ? 0b1 << 10 : 0;
975 }
976 encoding |= exp.done ? 0b1 << 11 : 0;
977 encoding |= exp.dest << 4;
978 encoding |= exp.enabled_mask;
979 out.push_back(encoding);
980 encoding = reg(ctx, exp.operands[0], 8);
981 encoding |= reg(ctx, exp.operands[1], 8) << 8;
982 encoding |= reg(ctx, exp.operands[2], 8) << 16;
983 encoding |= reg(ctx, exp.operands[3], 8) << 24;
984 out.push_back(encoding);
985 }
986
987 void emit_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr);
988
989 void
emit_dpp16_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)990 emit_dpp16_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
991 {
992 assert(ctx.gfx_level >= GFX8);
993 DPP16_instruction& dpp = instr->dpp16();
994
995 /* first emit the instruction without the DPP operand */
996 Operand dpp_op = instr->operands[0];
997 instr->operands[0] = Operand(PhysReg{250}, v1);
998 instr->format = (Format)((uint16_t)instr->format & ~(uint16_t)Format::DPP16);
999 emit_instruction(ctx, out, instr);
1000 uint32_t encoding = (0xF & dpp.row_mask) << 28;
1001 encoding |= (0xF & dpp.bank_mask) << 24;
1002 encoding |= dpp.abs[1] << 23;
1003 encoding |= dpp.neg[1] << 22;
1004 encoding |= dpp.abs[0] << 21;
1005 encoding |= dpp.neg[0] << 20;
1006 encoding |= dpp.fetch_inactive << 18;
1007 encoding |= dpp.bound_ctrl << 19;
1008 encoding |= dpp.dpp_ctrl << 8;
1009 encoding |= reg(ctx, dpp_op, 8);
1010 encoding |= dpp.opsel[0] && !instr->isVOP3() ? 128 : 0;
1011 out.push_back(encoding);
1012 }
1013
1014 void
emit_dpp8_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)1015 emit_dpp8_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
1016 {
1017 assert(ctx.gfx_level >= GFX10);
1018 DPP8_instruction& dpp = instr->dpp8();
1019
1020 /* first emit the instruction without the DPP operand */
1021 Operand dpp_op = instr->operands[0];
1022 instr->operands[0] = Operand(PhysReg{233u + dpp.fetch_inactive}, v1);
1023 instr->format = (Format)((uint16_t)instr->format & ~(uint16_t)Format::DPP8);
1024 emit_instruction(ctx, out, instr);
1025 uint32_t encoding = reg(ctx, dpp_op, 8);
1026 encoding |= dpp.opsel[0] && !instr->isVOP3() ? 128 : 0;
1027 encoding |= dpp.lane_sel << 8;
1028 out.push_back(encoding);
1029 }
1030
1031 void
emit_vop3_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)1032 emit_vop3_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
1033 {
1034 uint32_t opcode = ctx.opcode[(int)instr->opcode];
1035 VALU_instruction& vop3 = instr->valu();
1036
1037 if (instr->isVOP2()) {
1038 opcode = opcode + 0x100;
1039 } else if (instr->isVOP1()) {
1040 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9)
1041 opcode = opcode + 0x140;
1042 else
1043 opcode = opcode + 0x180;
1044 } else if (instr->isVOPC()) {
1045 opcode = opcode + 0x0;
1046 } else if (instr->isVINTRP()) {
1047 opcode = opcode + 0x270;
1048 }
1049
1050 uint32_t encoding;
1051 if (ctx.gfx_level <= GFX9) {
1052 encoding = (0b110100 << 26);
1053 } else if (ctx.gfx_level >= GFX10) {
1054 encoding = (0b110101 << 26);
1055 } else {
1056 unreachable("Unknown gfx_level.");
1057 }
1058
1059 if (ctx.gfx_level <= GFX7) {
1060 encoding |= opcode << 17;
1061 encoding |= (vop3.clamp ? 1 : 0) << 11;
1062 } else {
1063 encoding |= opcode << 16;
1064 encoding |= (vop3.clamp ? 1 : 0) << 15;
1065 }
1066 encoding |= vop3.opsel << 11;
1067 for (unsigned i = 0; i < 3; i++)
1068 encoding |= vop3.abs[i] << (8 + i);
1069 /* On GFX9 and older, v_cmpx implicitly writes exec besides writing an SGPR pair.
1070 * On GFX10 and newer, v_cmpx always writes just exec.
1071 */
1072 if (instr->definitions.size() == 2 && instr->isVOPC())
1073 assert(ctx.gfx_level <= GFX9 && instr->definitions[1].physReg() == exec);
1074 else if (instr->definitions.size() == 2 && instr->opcode != aco_opcode::v_swap_b16)
1075 encoding |= reg(ctx, instr->definitions[1]) << 8;
1076 encoding |= reg(ctx, instr->definitions[0], 8);
1077 out.push_back(encoding);
1078 encoding = 0;
1079
1080 unsigned num_ops = instr->operands.size();
1081 /* Encoding implicit sources works fine with hardware but breaks some disassemblers. */
1082 if (instr->opcode == aco_opcode::v_writelane_b32_e64)
1083 num_ops = 2;
1084 else if (instr->opcode == aco_opcode::v_swap_b16)
1085 num_ops = 1;
1086
1087 for (unsigned i = 0; i < num_ops; i++)
1088 encoding |= reg(ctx, instr->operands[i]) << (i * 9);
1089 encoding |= vop3.omod << 27;
1090 for (unsigned i = 0; i < 3; i++)
1091 encoding |= vop3.neg[i] << (29 + i);
1092 out.push_back(encoding);
1093 }
1094
1095 void
emit_vop3p_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)1096 emit_vop3p_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
1097 {
1098 uint32_t opcode = ctx.opcode[(int)instr->opcode];
1099 VALU_instruction& vop3 = instr->valu();
1100
1101 uint32_t encoding;
1102 if (ctx.gfx_level == GFX9) {
1103 encoding = (0b110100111 << 23);
1104 } else if (ctx.gfx_level >= GFX10) {
1105 encoding = (0b110011 << 26);
1106 } else {
1107 unreachable("Unknown gfx_level.");
1108 }
1109
1110 encoding |= opcode << 16;
1111 encoding |= (vop3.clamp ? 1 : 0) << 15;
1112 encoding |= vop3.opsel_lo << 11;
1113 encoding |= ((vop3.opsel_hi & 0x4) ? 1 : 0) << 14;
1114 for (unsigned i = 0; i < 3; i++)
1115 encoding |= vop3.neg_hi[i] << (8 + i);
1116 encoding |= reg(ctx, instr->definitions[0], 8);
1117 out.push_back(encoding);
1118 encoding = 0;
1119 for (unsigned i = 0; i < instr->operands.size(); i++)
1120 encoding |= reg(ctx, instr->operands[i]) << (i * 9);
1121 encoding |= (vop3.opsel_hi & 0x3) << 27;
1122 for (unsigned i = 0; i < 3; i++)
1123 encoding |= vop3.neg_lo[i] << (29 + i);
1124 out.push_back(encoding);
1125 }
1126
1127 void
emit_sdwa_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)1128 emit_sdwa_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
1129 {
1130 assert(ctx.gfx_level >= GFX8 && ctx.gfx_level < GFX11);
1131 SDWA_instruction& sdwa = instr->sdwa();
1132
1133 /* first emit the instruction without the SDWA operand */
1134 Operand sdwa_op = instr->operands[0];
1135 instr->operands[0] = Operand(PhysReg{249}, v1);
1136 instr->format = (Format)((uint16_t)instr->format & ~(uint16_t)Format::SDWA);
1137 emit_instruction(ctx, out, instr);
1138
1139 uint32_t encoding = 0;
1140
1141 if (instr->isVOPC()) {
1142 if (instr->definitions[0].physReg() !=
1143 (ctx.gfx_level >= GFX10 && is_cmpx(instr->opcode) ? exec : vcc)) {
1144 encoding |= reg(ctx, instr->definitions[0]) << 8;
1145 encoding |= 1 << 15;
1146 }
1147 encoding |= (sdwa.clamp ? 1 : 0) << 13;
1148 } else {
1149 encoding |= sdwa.dst_sel.to_sdwa_sel(instr->definitions[0].physReg().byte()) << 8;
1150 uint32_t dst_u = sdwa.dst_sel.sign_extend() ? 1 : 0;
1151 if (instr->definitions[0].bytes() < 4) /* dst_preserve */
1152 dst_u = 2;
1153 encoding |= dst_u << 11;
1154 encoding |= (sdwa.clamp ? 1 : 0) << 13;
1155 encoding |= sdwa.omod << 14;
1156 }
1157
1158 encoding |= sdwa.sel[0].to_sdwa_sel(sdwa_op.physReg().byte()) << 16;
1159 encoding |= sdwa.sel[0].sign_extend() ? 1 << 19 : 0;
1160 encoding |= sdwa.abs[0] << 21;
1161 encoding |= sdwa.neg[0] << 20;
1162
1163 if (instr->operands.size() >= 2) {
1164 encoding |= sdwa.sel[1].to_sdwa_sel(instr->operands[1].physReg().byte()) << 24;
1165 encoding |= sdwa.sel[1].sign_extend() ? 1 << 27 : 0;
1166 encoding |= sdwa.abs[1] << 29;
1167 encoding |= sdwa.neg[1] << 28;
1168 }
1169
1170 encoding |= reg(ctx, sdwa_op, 8);
1171 encoding |= (sdwa_op.physReg() < 256) << 23;
1172 if (instr->operands.size() >= 2)
1173 encoding |= (instr->operands[1].physReg() < 256) << 31;
1174 out.push_back(encoding);
1175 }
1176
1177 void
emit_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)1178 emit_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
1179 {
1180 /* lower remaining pseudo-instructions */
1181 if (instr->opcode == aco_opcode::p_constaddr_getpc) {
1182 ctx.constaddrs[instr->operands[0].constantValue()].getpc_end = out.size() + 1;
1183
1184 instr->opcode = aco_opcode::s_getpc_b64;
1185 instr->operands.pop_back();
1186 } else if (instr->opcode == aco_opcode::p_constaddr_addlo) {
1187 ctx.constaddrs[instr->operands[2].constantValue()].add_literal = out.size() + 1;
1188
1189 instr->opcode = aco_opcode::s_add_u32;
1190 instr->operands.pop_back();
1191 assert(instr->operands[1].isConstant());
1192 /* in case it's an inline constant, make it a literal */
1193 instr->operands[1] = Operand::literal32(instr->operands[1].constantValue());
1194 } else if (instr->opcode == aco_opcode::p_resumeaddr_getpc) {
1195 ctx.resumeaddrs[instr->operands[0].constantValue()].getpc_end = out.size() + 1;
1196
1197 instr->opcode = aco_opcode::s_getpc_b64;
1198 instr->operands.pop_back();
1199 } else if (instr->opcode == aco_opcode::p_resumeaddr_addlo) {
1200 ctx.resumeaddrs[instr->operands[2].constantValue()].add_literal = out.size() + 1;
1201
1202 instr->opcode = aco_opcode::s_add_u32;
1203 instr->operands.pop_back();
1204 assert(instr->operands[1].isConstant());
1205 /* in case it's an inline constant, make it a literal */
1206 instr->operands[1] = Operand::literal32(instr->operands[1].constantValue());
1207 } else if (instr->opcode == aco_opcode::p_load_symbol) {
1208 assert(instr->operands[0].isConstant());
1209 assert(ctx.symbols);
1210
1211 struct aco_symbol info;
1212 info.id = (enum aco_symbol_id)instr->operands[0].constantValue();
1213 info.offset = out.size() + 1;
1214 ctx.symbols->push_back(info);
1215
1216 instr->opcode = aco_opcode::s_mov_b32;
1217 /* in case it's an inline constant, make it a literal */
1218 instr->operands[0] = Operand::literal32(0);
1219 }
1220
1221 /* Promote VOP12C to VOP3 if necessary. */
1222 if ((instr->isVOP1() || instr->isVOP2() || instr->isVOPC()) && !instr->isVOP3() &&
1223 needs_vop3_gfx11(ctx, instr)) {
1224 instr->format = asVOP3(instr->format);
1225 if (instr->opcode == aco_opcode::v_fmaak_f16) {
1226 instr->opcode = aco_opcode::v_fma_f16;
1227 instr->format = (Format)((uint32_t)instr->format & ~(uint32_t)Format::VOP2);
1228 } else if (instr->opcode == aco_opcode::v_fmamk_f16) {
1229 instr->valu().swapOperands(1, 2);
1230 instr->opcode = aco_opcode::v_fma_f16;
1231 instr->format = (Format)((uint32_t)instr->format & ~(uint32_t)Format::VOP2);
1232 }
1233 }
1234
1235 uint32_t opcode = ctx.opcode[(int)instr->opcode];
1236 if (opcode == (uint32_t)-1) {
1237 char* outmem;
1238 size_t outsize;
1239 struct u_memstream mem;
1240 u_memstream_open(&mem, &outmem, &outsize);
1241 FILE* const memf = u_memstream_get(&mem);
1242
1243 fprintf(memf, "Unsupported opcode: ");
1244 aco_print_instr(ctx.gfx_level, instr, memf);
1245 u_memstream_close(&mem);
1246
1247 aco_err(ctx.program, outmem);
1248 free(outmem);
1249
1250 abort();
1251 }
1252
1253 switch (instr->format) {
1254 case Format::SOP2: {
1255 emit_sop2_instruction(ctx, out, instr);
1256 break;
1257 }
1258 case Format::SOPK: {
1259 emit_sopk_instruction(ctx, out, instr);
1260 break;
1261 }
1262 case Format::SOP1: {
1263 emit_sop1_instruction(ctx, out, instr);
1264 break;
1265 }
1266 case Format::SOPC: {
1267 emit_sopc_instruction(ctx, out, instr);
1268 break;
1269 }
1270 case Format::SOPP: {
1271 emit_sopp_instruction(ctx, out, instr);
1272 break;
1273 }
1274 case Format::SMEM: {
1275 emit_smem_instruction(ctx, out, instr);
1276 return;
1277 }
1278 case Format::VOP2: {
1279 emit_vop2_instruction(ctx, out, instr);
1280 break;
1281 }
1282 case Format::VOP1: {
1283 emit_vop1_instruction(ctx, out, instr);
1284 break;
1285 }
1286 case Format::VOPC: {
1287 emit_vopc_instruction(ctx, out, instr);
1288 break;
1289 }
1290 case Format::VINTRP: {
1291 emit_vintrp_instruction(ctx, out, instr);
1292 break;
1293 }
1294 case Format::VINTERP_INREG: {
1295 emit_vinterp_inreg_instruction(ctx, out, instr);
1296 break;
1297 }
1298 case Format::VOPD: {
1299 emit_vopd_instruction(ctx, out, instr);
1300 break;
1301 }
1302 case Format::DS: {
1303 emit_ds_instruction(ctx, out, instr);
1304 break;
1305 }
1306 case Format::LDSDIR: {
1307 emit_ldsdir_instruction(ctx, out, instr);
1308 break;
1309 }
1310 case Format::MUBUF: {
1311 if (ctx.gfx_level >= GFX12)
1312 emit_mubuf_instruction_gfx12(ctx, out, instr);
1313 else
1314 emit_mubuf_instruction(ctx, out, instr);
1315 break;
1316 }
1317 case Format::MTBUF: {
1318 if (ctx.gfx_level >= GFX12)
1319 emit_mtbuf_instruction_gfx12(ctx, out, instr);
1320 else
1321 emit_mtbuf_instruction(ctx, out, instr);
1322 break;
1323 }
1324 case Format::MIMG: {
1325 if (ctx.gfx_level >= GFX12)
1326 emit_mimg_instruction_gfx12(ctx, out, instr);
1327 else
1328 emit_mimg_instruction(ctx, out, instr);
1329 break;
1330 }
1331 case Format::FLAT:
1332 case Format::SCRATCH:
1333 case Format::GLOBAL: {
1334 if (ctx.gfx_level >= GFX12)
1335 emit_flatlike_instruction_gfx12(ctx, out, instr);
1336 else
1337 emit_flatlike_instruction(ctx, out, instr);
1338 break;
1339 }
1340 case Format::EXP: {
1341 emit_exp_instruction(ctx, out, instr);
1342 break;
1343 }
1344 case Format::PSEUDO:
1345 case Format::PSEUDO_BARRIER:
1346 if (instr->opcode != aco_opcode::p_unit_test)
1347 unreachable("Pseudo instructions should be lowered before assembly.");
1348 break;
1349 default:
1350 if (instr->isDPP16()) {
1351 emit_dpp16_instruction(ctx, out, instr);
1352 return;
1353 } else if (instr->isDPP8()) {
1354 emit_dpp8_instruction(ctx, out, instr);
1355 return;
1356 } else if (instr->isVOP3()) {
1357 emit_vop3_instruction(ctx, out, instr);
1358 } else if (instr->isVOP3P()) {
1359 emit_vop3p_instruction(ctx, out, instr);
1360 } else if (instr->isSDWA()) {
1361 emit_sdwa_instruction(ctx, out, instr);
1362 } else {
1363 unreachable("unimplemented instruction format");
1364 }
1365 break;
1366 }
1367
1368 /* append literal dword */
1369 for (const Operand& op : instr->operands) {
1370 if (op.isLiteral()) {
1371 out.push_back(op.constantValue());
1372 break;
1373 }
1374 }
1375 }
1376
1377 void
emit_block(asm_context & ctx,std::vector<uint32_t> & out,Block & block)1378 emit_block(asm_context& ctx, std::vector<uint32_t>& out, Block& block)
1379 {
1380 for (aco_ptr<Instruction>& instr : block.instructions) {
1381 #if 0
1382 int start_idx = out.size();
1383 std::cerr << "Encoding:\t" << std::endl;
1384 aco_print_instr(&*instr, stderr);
1385 std::cerr << std::endl;
1386 #endif
1387 emit_instruction(ctx, out, instr.get());
1388 #if 0
1389 for (int i = start_idx; i < out.size(); i++)
1390 std::cerr << "encoding: " << "0x" << std::setfill('0') << std::setw(8) << std::hex << out[i] << std::endl;
1391 #endif
1392 }
1393 }
1394
1395 void
fix_exports(asm_context & ctx,std::vector<uint32_t> & out,Program * program)1396 fix_exports(asm_context& ctx, std::vector<uint32_t>& out, Program* program)
1397 {
1398 bool exported = false;
1399 for (Block& block : program->blocks) {
1400 if (!(block.kind & block_kind_export_end))
1401 continue;
1402 std::vector<aco_ptr<Instruction>>::reverse_iterator it = block.instructions.rbegin();
1403 while (it != block.instructions.rend()) {
1404 if ((*it)->isEXP()) {
1405 Export_instruction& exp = (*it)->exp();
1406 if (program->stage.hw == AC_HW_VERTEX_SHADER ||
1407 program->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER) {
1408 if (exp.dest >= V_008DFC_SQ_EXP_POS && exp.dest <= (V_008DFC_SQ_EXP_POS + 3)) {
1409 exp.done = true;
1410 exported = true;
1411 break;
1412 }
1413 } else {
1414 exp.done = true;
1415 exp.valid_mask = true;
1416 exported = true;
1417 break;
1418 }
1419 } else if ((*it)->definitions.size() && (*it)->definitions[0].physReg() == exec) {
1420 break;
1421 }
1422 ++it;
1423 }
1424 }
1425
1426 /* GFX10+ FS may not export anything if no discard is used. */
1427 bool may_skip_export = program->stage.hw == AC_HW_PIXEL_SHADER && program->gfx_level >= GFX10;
1428
1429 if (!exported && !may_skip_export) {
1430 /* Abort in order to avoid a GPU hang. */
1431 bool is_vertex_or_ngg = (program->stage.hw == AC_HW_VERTEX_SHADER ||
1432 program->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER);
1433 aco_err(program,
1434 "Missing export in %s shader:", is_vertex_or_ngg ? "vertex or NGG" : "fragment");
1435 aco_print_program(program, stderr);
1436 abort();
1437 }
1438 }
1439
1440 static void
insert_code(asm_context & ctx,std::vector<uint32_t> & out,unsigned insert_before,unsigned insert_count,const uint32_t * insert_data)1441 insert_code(asm_context& ctx, std::vector<uint32_t>& out, unsigned insert_before,
1442 unsigned insert_count, const uint32_t* insert_data)
1443 {
1444 out.insert(out.begin() + insert_before, insert_data, insert_data + insert_count);
1445
1446 /* Update the offset of each affected block */
1447 for (Block& block : ctx.program->blocks) {
1448 if (block.offset >= insert_before)
1449 block.offset += insert_count;
1450 }
1451
1452 /* Find first branch after the inserted code */
1453 auto branch_it = std::find_if(ctx.branches.begin(), ctx.branches.end(),
1454 [insert_before](const auto& branch) -> bool
1455 { return (unsigned)branch.first >= insert_before; });
1456
1457 /* Update the locations of branches */
1458 for (; branch_it != ctx.branches.end(); ++branch_it)
1459 branch_it->first += insert_count;
1460
1461 /* Update the locations of p_constaddr instructions */
1462 for (auto& constaddr : ctx.constaddrs) {
1463 constaddr_info& info = constaddr.second;
1464 if (info.getpc_end >= insert_before)
1465 info.getpc_end += insert_count;
1466 if (info.add_literal >= insert_before)
1467 info.add_literal += insert_count;
1468 }
1469 for (auto& constaddr : ctx.resumeaddrs) {
1470 constaddr_info& info = constaddr.second;
1471 if (info.getpc_end >= insert_before)
1472 info.getpc_end += insert_count;
1473 if (info.add_literal >= insert_before)
1474 info.add_literal += insert_count;
1475 }
1476
1477 if (ctx.symbols) {
1478 for (auto& symbol : *ctx.symbols) {
1479 if (symbol.offset >= insert_before)
1480 symbol.offset += insert_count;
1481 }
1482 }
1483 }
1484
1485 static void
fix_branches_gfx10(asm_context & ctx,std::vector<uint32_t> & out)1486 fix_branches_gfx10(asm_context& ctx, std::vector<uint32_t>& out)
1487 {
1488 /* Branches with an offset of 0x3f are buggy on GFX10,
1489 * we workaround by inserting NOPs if needed.
1490 */
1491 bool gfx10_3f_bug = false;
1492
1493 do {
1494 auto buggy_branch_it = std::find_if(
1495 ctx.branches.begin(), ctx.branches.end(),
1496 [&ctx](const auto& branch) -> bool {
1497 return ((int)ctx.program->blocks[branch.second->imm].offset - branch.first - 1) == 0x3f;
1498 });
1499
1500 gfx10_3f_bug = buggy_branch_it != ctx.branches.end();
1501
1502 if (gfx10_3f_bug) {
1503 /* Insert an s_nop after the branch */
1504 constexpr uint32_t s_nop_0 = 0xbf800000u;
1505 insert_code(ctx, out, buggy_branch_it->first + 1, 1, &s_nop_0);
1506 }
1507 } while (gfx10_3f_bug);
1508 }
1509
1510 void
emit_long_jump(asm_context & ctx,SALU_instruction * branch,bool backwards,std::vector<uint32_t> & out)1511 emit_long_jump(asm_context& ctx, SALU_instruction* branch, bool backwards,
1512 std::vector<uint32_t>& out)
1513 {
1514 Builder bld(ctx.program);
1515
1516 Definition def;
1517 if (branch->definitions.empty()) {
1518 assert(ctx.program->blocks[branch->imm].kind & block_kind_discard_early_exit);
1519 def = Definition(PhysReg(0), s2); /* The discard early exit block doesn't use SGPRs. */
1520 } else {
1521 def = branch->definitions[0];
1522 }
1523
1524 Definition def_tmp_lo(def.physReg(), s1);
1525 Operand op_tmp_lo(def.physReg(), s1);
1526 Definition def_tmp_hi(def.physReg().advance(4), s1);
1527 Operand op_tmp_hi(def.physReg().advance(4), s1);
1528
1529 aco_ptr<Instruction> instr;
1530
1531 if (branch->opcode != aco_opcode::s_branch) {
1532 /* for conditional branches, skip the long jump if the condition is false */
1533 aco_opcode inv;
1534 switch (branch->opcode) {
1535 case aco_opcode::s_cbranch_scc0: inv = aco_opcode::s_cbranch_scc1; break;
1536 case aco_opcode::s_cbranch_scc1: inv = aco_opcode::s_cbranch_scc0; break;
1537 case aco_opcode::s_cbranch_vccz: inv = aco_opcode::s_cbranch_vccnz; break;
1538 case aco_opcode::s_cbranch_vccnz: inv = aco_opcode::s_cbranch_vccz; break;
1539 case aco_opcode::s_cbranch_execz: inv = aco_opcode::s_cbranch_execnz; break;
1540 case aco_opcode::s_cbranch_execnz: inv = aco_opcode::s_cbranch_execz; break;
1541 default: unreachable("Unhandled long jump.");
1542 }
1543 unsigned size = ctx.gfx_level >= GFX12 ? 7 : 6;
1544 instr.reset(bld.sopp(inv, size));
1545 emit_sopp_instruction(ctx, out, instr.get(), true);
1546 }
1547
1548 /* create the new PC and stash SCC in the LSB */
1549 instr.reset(bld.sop1(aco_opcode::s_getpc_b64, def).instr);
1550 emit_instruction(ctx, out, instr.get());
1551
1552 if (ctx.gfx_level >= GFX12) {
1553 instr.reset(bld.sop1(aco_opcode::s_sext_i32_i16, def_tmp_hi, op_tmp_hi).instr);
1554 emit_instruction(ctx, out, instr.get());
1555 }
1556
1557 instr.reset(
1558 bld.sop2(aco_opcode::s_addc_u32, def_tmp_lo, op_tmp_lo, Operand::literal32(0)).instr);
1559 emit_instruction(ctx, out, instr.get());
1560 branch->pass_flags = out.size();
1561
1562 /* s_addc_u32 for high 32 bits not needed because the program is in a 32-bit VA range */
1563
1564 /* restore SCC and clear the LSB of the new PC */
1565 instr.reset(bld.sopc(aco_opcode::s_bitcmp1_b32, def_tmp_lo, op_tmp_lo, Operand::zero()).instr);
1566 emit_instruction(ctx, out, instr.get());
1567 instr.reset(bld.sop1(aco_opcode::s_bitset0_b32, def_tmp_lo, Operand::zero()).instr);
1568 emit_instruction(ctx, out, instr.get());
1569
1570 /* create the s_setpc_b64 to jump */
1571 instr.reset(bld.sop1(aco_opcode::s_setpc_b64, Operand(def.physReg(), s2)).instr);
1572 emit_instruction(ctx, out, instr.get());
1573 }
1574
1575 void
fix_branches(asm_context & ctx,std::vector<uint32_t> & out)1576 fix_branches(asm_context& ctx, std::vector<uint32_t>& out)
1577 {
1578 bool repeat = false;
1579 do {
1580 repeat = false;
1581
1582 if (ctx.gfx_level == GFX10)
1583 fix_branches_gfx10(ctx, out);
1584
1585 for (std::pair<int, SALU_instruction*>& branch : ctx.branches) {
1586 int offset = (int)ctx.program->blocks[branch.second->imm].offset - branch.first - 1;
1587 if ((offset < INT16_MIN || offset > INT16_MAX) && !branch.second->pass_flags) {
1588 std::vector<uint32_t> long_jump;
1589 bool backwards =
1590 ctx.program->blocks[branch.second->imm].offset < (unsigned)branch.first;
1591 emit_long_jump(ctx, branch.second, backwards, long_jump);
1592
1593 out[branch.first] = long_jump[0];
1594 insert_code(ctx, out, branch.first + 1, long_jump.size() - 1, long_jump.data() + 1);
1595
1596 repeat = true;
1597 break;
1598 }
1599
1600 if (branch.second->pass_flags) {
1601 int after_getpc = branch.first + branch.second->pass_flags - 2;
1602 offset = (int)ctx.program->blocks[branch.second->imm].offset - after_getpc;
1603 out[branch.first + branch.second->pass_flags - 1] = offset * 4;
1604 } else {
1605 out[branch.first] &= 0xffff0000u;
1606 out[branch.first] |= (uint16_t)offset;
1607 }
1608 }
1609 } while (repeat);
1610 }
1611
1612 void
fix_constaddrs(asm_context & ctx,std::vector<uint32_t> & out)1613 fix_constaddrs(asm_context& ctx, std::vector<uint32_t>& out)
1614 {
1615 for (auto& constaddr : ctx.constaddrs) {
1616 constaddr_info& info = constaddr.second;
1617 out[info.add_literal] += (out.size() - info.getpc_end) * 4u;
1618
1619 if (ctx.symbols) {
1620 struct aco_symbol sym;
1621 sym.id = aco_symbol_const_data_addr;
1622 sym.offset = info.add_literal;
1623 ctx.symbols->push_back(sym);
1624 }
1625 }
1626 for (auto& addr : ctx.resumeaddrs) {
1627 constaddr_info& info = addr.second;
1628 const Block& block = ctx.program->blocks[out[info.add_literal]];
1629 assert(block.kind & block_kind_resume);
1630 out[info.add_literal] = (block.offset - info.getpc_end) * 4u;
1631 }
1632 }
1633
1634 void
align_block(asm_context & ctx,std::vector<uint32_t> & code,Block & block)1635 align_block(asm_context& ctx, std::vector<uint32_t>& code, Block& block)
1636 {
1637 /* Blocks with block_kind_loop_exit might be eliminated after jump threading, so we instead find
1638 * loop exits using loop_nest_depth.
1639 */
1640 if (ctx.loop_header && !block.linear_preds.empty() &&
1641 block.loop_nest_depth < ctx.loop_header->loop_nest_depth) {
1642 Block* loop_header = ctx.loop_header;
1643 ctx.loop_header = NULL;
1644 std::vector<uint32_t> nops;
1645
1646 const unsigned loop_num_cl = DIV_ROUND_UP(block.offset - loop_header->offset, 16);
1647
1648 /* On GFX10.3+, change the prefetch mode if the loop fits into 2 or 3 cache lines.
1649 * Don't use the s_inst_prefetch instruction on GFX10 as it might cause hangs.
1650 */
1651 const bool change_prefetch = ctx.program->gfx_level >= GFX10_3 &&
1652 ctx.program->gfx_level <= GFX11 && loop_num_cl > 1 &&
1653 loop_num_cl <= 3;
1654
1655 if (change_prefetch) {
1656 Builder bld(ctx.program);
1657 int16_t prefetch_mode = loop_num_cl == 3 ? 0x1 : 0x2;
1658 aco_ptr<Instruction> instr(bld.sopp(aco_opcode::s_inst_prefetch, prefetch_mode));
1659 emit_instruction(ctx, nops, instr.get());
1660 insert_code(ctx, code, loop_header->offset, nops.size(), nops.data());
1661
1662 /* Change prefetch mode back to default (0x3). */
1663 instr->salu().imm = 0x3;
1664 emit_instruction(ctx, code, instr.get());
1665 }
1666
1667 const unsigned loop_start_cl = loop_header->offset >> 4;
1668 const unsigned loop_end_cl = (block.offset - 1) >> 4;
1669
1670 /* Align the loop if it fits into the fetched cache lines or if we can
1671 * reduce the number of cache lines with less than 8 NOPs.
1672 */
1673 const bool align_loop = loop_end_cl - loop_start_cl >= loop_num_cl &&
1674 (loop_num_cl == 1 || change_prefetch || loop_header->offset % 16 > 8);
1675
1676 if (align_loop) {
1677 nops.clear();
1678 nops.resize(16 - (loop_header->offset % 16), 0xbf800000u);
1679 insert_code(ctx, code, loop_header->offset, nops.size(), nops.data());
1680 }
1681 }
1682
1683 if (block.kind & block_kind_loop_header) {
1684 /* In case of nested loops, only handle the inner-most loops in order
1685 * to not break the alignment of inner loops by handling outer loops.
1686 * Also ignore loops without back-edge.
1687 */
1688 ctx.loop_header = block.linear_preds.size() > 1 ? &block : NULL;
1689 }
1690
1691 /* align resume shaders with cache line */
1692 if (block.kind & block_kind_resume) {
1693 size_t cache_aligned = align(code.size(), 16);
1694 code.resize(cache_aligned, 0xbf800000u); /* s_nop 0 */
1695 block.offset = code.size();
1696 }
1697 }
1698
1699 unsigned
emit_program(Program * program,std::vector<uint32_t> & code,std::vector<struct aco_symbol> * symbols,bool append_endpgm)1700 emit_program(Program* program, std::vector<uint32_t>& code, std::vector<struct aco_symbol>* symbols,
1701 bool append_endpgm)
1702 {
1703 asm_context ctx(program, symbols);
1704
1705 bool is_separately_compiled_ngg_vs_or_es =
1706 (program->stage.sw == SWStage::VS || program->stage.sw == SWStage::TES) &&
1707 program->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER &&
1708 program->info.merged_shader_compiled_separately;
1709
1710 /* Prolog has no exports. */
1711 if (!program->is_prolog && !program->info.ps.has_epilog &&
1712 !is_separately_compiled_ngg_vs_or_es &&
1713 (program->stage.hw == AC_HW_VERTEX_SHADER || program->stage.hw == AC_HW_PIXEL_SHADER ||
1714 program->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER))
1715 fix_exports(ctx, code, program);
1716
1717 for (Block& block : program->blocks) {
1718 block.offset = code.size();
1719 align_block(ctx, code, block);
1720 emit_block(ctx, code, block);
1721 }
1722
1723 fix_branches(ctx, code);
1724
1725 unsigned exec_size = code.size() * sizeof(uint32_t);
1726
1727 /* Add end-of-code markers for the UMR disassembler. */
1728 if (append_endpgm)
1729 code.resize(code.size() + 5, 0xbf9f0000u);
1730
1731 fix_constaddrs(ctx, code);
1732
1733 while (program->constant_data.size() % 4u)
1734 program->constant_data.push_back(0);
1735 /* Copy constant data */
1736 code.insert(code.end(), (uint32_t*)program->constant_data.data(),
1737 (uint32_t*)(program->constant_data.data() + program->constant_data.size()));
1738
1739 program->config->scratch_bytes_per_wave =
1740 align(program->config->scratch_bytes_per_wave, program->dev.scratch_alloc_granule);
1741
1742 return exec_size;
1743 }
1744
1745 } // namespace aco
1746