1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #define XXH_INLINE_ALL
25 #include "util/xxhash.h"
26
27 #include "brw_fs.h"
28 #include "brw_fs_builder.h"
29 #include "brw_cfg.h"
30
31 /** @file
32 *
33 * Support for SSA-based global Common Subexpression Elimination (CSE).
34 */
35
36 using namespace brw;
37
38 static bool
is_expression(const fs_visitor * v,const fs_inst * const inst)39 is_expression(const fs_visitor *v, const fs_inst *const inst)
40 {
41 switch (inst->opcode) {
42 case BRW_OPCODE_MOV:
43 case BRW_OPCODE_SEL:
44 case BRW_OPCODE_NOT:
45 case BRW_OPCODE_AND:
46 case BRW_OPCODE_OR:
47 case BRW_OPCODE_XOR:
48 case BRW_OPCODE_SHR:
49 case BRW_OPCODE_SHL:
50 case BRW_OPCODE_ASR:
51 case BRW_OPCODE_ROR:
52 case BRW_OPCODE_ROL:
53 case BRW_OPCODE_CMP:
54 case BRW_OPCODE_CMPN:
55 case BRW_OPCODE_CSEL:
56 case BRW_OPCODE_BFREV:
57 case BRW_OPCODE_BFE:
58 case BRW_OPCODE_BFI1:
59 case BRW_OPCODE_BFI2:
60 case BRW_OPCODE_ADD:
61 case BRW_OPCODE_MUL:
62 case SHADER_OPCODE_MULH:
63 case BRW_OPCODE_AVG:
64 case BRW_OPCODE_FRC:
65 case BRW_OPCODE_LZD:
66 case BRW_OPCODE_FBH:
67 case BRW_OPCODE_FBL:
68 case BRW_OPCODE_CBIT:
69 case BRW_OPCODE_ADD3:
70 case BRW_OPCODE_RNDU:
71 case BRW_OPCODE_RNDD:
72 case BRW_OPCODE_RNDE:
73 case BRW_OPCODE_RNDZ:
74 case BRW_OPCODE_LINE:
75 case BRW_OPCODE_PLN:
76 case BRW_OPCODE_MAD:
77 case BRW_OPCODE_LRP:
78 case FS_OPCODE_FB_READ_LOGICAL:
79 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
80 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL:
81 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
82 case SHADER_OPCODE_FIND_LAST_LIVE_CHANNEL:
83 case SHADER_OPCODE_LOAD_LIVE_CHANNELS:
84 case FS_OPCODE_LOAD_LIVE_CHANNELS:
85 case SHADER_OPCODE_BROADCAST:
86 case SHADER_OPCODE_SHUFFLE:
87 case SHADER_OPCODE_QUAD_SWIZZLE:
88 case SHADER_OPCODE_CLUSTER_BROADCAST:
89 case SHADER_OPCODE_MOV_INDIRECT:
90 case SHADER_OPCODE_TEX_LOGICAL:
91 case SHADER_OPCODE_TXD_LOGICAL:
92 case SHADER_OPCODE_TXF_LOGICAL:
93 case SHADER_OPCODE_TXL_LOGICAL:
94 case SHADER_OPCODE_TXS_LOGICAL:
95 case FS_OPCODE_TXB_LOGICAL:
96 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
97 case SHADER_OPCODE_TXF_CMS_W_GFX12_LOGICAL:
98 case SHADER_OPCODE_TXF_MCS_LOGICAL:
99 case SHADER_OPCODE_LOD_LOGICAL:
100 case SHADER_OPCODE_TG4_LOGICAL:
101 case SHADER_OPCODE_TG4_BIAS_LOGICAL:
102 case SHADER_OPCODE_TG4_EXPLICIT_LOD_LOGICAL:
103 case SHADER_OPCODE_TG4_IMPLICIT_LOD_LOGICAL:
104 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
105 case SHADER_OPCODE_TG4_OFFSET_LOD_LOGICAL:
106 case SHADER_OPCODE_TG4_OFFSET_BIAS_LOGICAL:
107 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
108 case SHADER_OPCODE_IMAGE_SIZE_LOGICAL:
109 case SHADER_OPCODE_GET_BUFFER_SIZE:
110 case FS_OPCODE_PACK:
111 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
112 case SHADER_OPCODE_RCP:
113 case SHADER_OPCODE_RSQ:
114 case SHADER_OPCODE_SQRT:
115 case SHADER_OPCODE_EXP2:
116 case SHADER_OPCODE_LOG2:
117 case SHADER_OPCODE_POW:
118 case SHADER_OPCODE_INT_QUOTIENT:
119 case SHADER_OPCODE_INT_REMAINDER:
120 case SHADER_OPCODE_SIN:
121 case SHADER_OPCODE_COS:
122 case SHADER_OPCODE_LOAD_SUBGROUP_INVOCATION:
123 return true;
124 case SHADER_OPCODE_LOAD_PAYLOAD:
125 return !is_coalescing_payload(v->alloc, inst);
126 default:
127 return inst->is_send_from_grf() && !inst->has_side_effects() &&
128 !inst->is_volatile();
129 }
130 }
131
132 /**
133 * True if the instruction should only be CSE'd within their local block.
134 */
135 bool
local_only(const fs_inst * inst)136 local_only(const fs_inst *inst)
137 {
138 switch (inst->opcode) {
139 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
140 case SHADER_OPCODE_FIND_LAST_LIVE_CHANNEL:
141 case SHADER_OPCODE_LOAD_LIVE_CHANNELS:
142 case FS_OPCODE_LOAD_LIVE_CHANNELS:
143 /* These depend on the current channel enables, so the same opcode
144 * in another block will likely return a different value.
145 */
146 return true;
147 case BRW_OPCODE_MOV:
148 /* Global CSE of MOVs is likely not worthwhile. It can increase
149 * register pressure by extending the lifetime of simple constants.
150 */
151 return true;
152 case SHADER_OPCODE_LOAD_PAYLOAD:
153 /* This is basically a MOV */
154 return inst->sources == 1;
155 case BRW_OPCODE_CMP:
156 /* Seems to increase spilling a lot without much benefit */
157 return true;
158 default:
159 return false;
160 }
161 }
162
163 static bool
operands_match(const fs_inst * a,const fs_inst * b,bool * negate)164 operands_match(const fs_inst *a, const fs_inst *b, bool *negate)
165 {
166 brw_reg *xs = a->src;
167 brw_reg *ys = b->src;
168
169 if (a->opcode == BRW_OPCODE_MAD) {
170 return xs[0].equals(ys[0]) &&
171 ((xs[1].equals(ys[1]) && xs[2].equals(ys[2])) ||
172 (xs[2].equals(ys[1]) && xs[1].equals(ys[2])));
173 } else if (a->opcode == BRW_OPCODE_MUL && a->dst.type == BRW_TYPE_F) {
174 bool xs0_negate = xs[0].negate;
175 bool xs1_negate = xs[1].file == IMM ? xs[1].f < 0.0f
176 : xs[1].negate;
177 bool ys0_negate = ys[0].negate;
178 bool ys1_negate = ys[1].file == IMM ? ys[1].f < 0.0f
179 : ys[1].negate;
180 float xs1_imm = xs[1].f;
181 float ys1_imm = ys[1].f;
182
183 xs[0].negate = false;
184 xs[1].negate = false;
185 ys[0].negate = false;
186 ys[1].negate = false;
187 xs[1].f = fabsf(xs[1].f);
188 ys[1].f = fabsf(ys[1].f);
189
190 bool ret = (xs[0].equals(ys[0]) && xs[1].equals(ys[1])) ||
191 (xs[1].equals(ys[0]) && xs[0].equals(ys[1]));
192
193 xs[0].negate = xs0_negate;
194 xs[1].negate = xs[1].file == IMM ? false : xs1_negate;
195 ys[0].negate = ys0_negate;
196 ys[1].negate = ys[1].file == IMM ? false : ys1_negate;
197 xs[1].f = xs1_imm;
198 ys[1].f = ys1_imm;
199
200 *negate = (xs0_negate != xs1_negate) != (ys0_negate != ys1_negate);
201 if (*negate && (a->saturate || b->saturate))
202 return false;
203 return ret;
204 } else if (!a->is_commutative()) {
205 bool match = true;
206 for (int i = 0; i < a->sources; i++) {
207 if (!xs[i].equals(ys[i])) {
208 match = false;
209 break;
210 }
211 }
212 return match;
213 } else if (a->sources == 3) {
214 return (xs[0].equals(ys[0]) && xs[1].equals(ys[1]) && xs[2].equals(ys[2])) ||
215 (xs[0].equals(ys[0]) && xs[1].equals(ys[2]) && xs[2].equals(ys[1])) ||
216 (xs[0].equals(ys[1]) && xs[1].equals(ys[0]) && xs[2].equals(ys[2])) ||
217 (xs[0].equals(ys[1]) && xs[1].equals(ys[2]) && xs[2].equals(ys[1])) ||
218 (xs[0].equals(ys[2]) && xs[1].equals(ys[0]) && xs[2].equals(ys[1])) ||
219 (xs[0].equals(ys[2]) && xs[1].equals(ys[1]) && xs[2].equals(ys[0]));
220 } else {
221 return (xs[0].equals(ys[0]) && xs[1].equals(ys[1])) ||
222 (xs[1].equals(ys[0]) && xs[0].equals(ys[1]));
223 }
224 }
225
226 static bool
instructions_match(fs_inst * a,fs_inst * b,bool * negate)227 instructions_match(fs_inst *a, fs_inst *b, bool *negate)
228 {
229 return a->opcode == b->opcode &&
230 a->exec_size == b->exec_size &&
231 a->group == b->group &&
232 a->predicate == b->predicate &&
233 a->conditional_mod == b->conditional_mod &&
234 a->dst.type == b->dst.type &&
235 a->offset == b->offset &&
236 a->mlen == b->mlen &&
237 a->ex_mlen == b->ex_mlen &&
238 a->sfid == b->sfid &&
239 a->desc == b->desc &&
240 a->ex_desc == b->ex_desc &&
241 a->size_written == b->size_written &&
242 a->check_tdr == b->check_tdr &&
243 a->header_size == b->header_size &&
244 a->target == b->target &&
245 a->sources == b->sources &&
246 a->bits == b->bits &&
247 operands_match(a, b, negate);
248 }
249
250 /* -------------------------------------------------------------------- */
251
252 #define HASH(hash, data) XXH32(&(data), sizeof(data), hash)
253
254 uint32_t
hash_reg(uint32_t hash,const brw_reg & r)255 hash_reg(uint32_t hash, const brw_reg &r)
256 {
257 struct {
258 uint64_t u64;
259 uint32_t u32;
260 uint16_t u16a;
261 uint16_t u16b;
262 } data = {
263 .u64 = r.u64, .u32 = r.bits, .u16a = r.offset, .u16b = r.stride
264 };
265 STATIC_ASSERT(sizeof(data) == 16); /* ensure there's no padding */
266 hash = HASH(hash, data);
267 return hash;
268 }
269
270 static uint32_t
hash_inst(const void * v)271 hash_inst(const void *v)
272 {
273 const fs_inst *inst = static_cast<const fs_inst *>(v);
274 uint32_t hash = 0;
275
276 /* Skip dst - that would make nothing ever match */
277
278 /* Skip ir and annotation - we don't care for equivalency purposes. */
279
280 const uint8_t u8data[] = {
281 inst->sources,
282 inst->exec_size,
283 inst->group,
284 inst->mlen,
285 inst->ex_mlen,
286 inst->sfid,
287 inst->header_size,
288 inst->target,
289
290 inst->conditional_mod,
291 inst->predicate,
292 };
293 const uint32_t u32data[] = {
294 inst->desc,
295 inst->ex_desc,
296 inst->offset,
297 inst->size_written,
298 inst->opcode,
299 inst->bits,
300 };
301
302 hash = HASH(hash, u8data);
303 hash = HASH(hash, u32data);
304
305 /* Skip hashing sched - we shouldn't be CSE'ing after that SWSB */
306
307 if (inst->opcode == BRW_OPCODE_MAD) {
308 /* Commutatively combine the hashes for the multiplicands */
309 hash = hash_reg(hash, inst->src[0]);
310 uint32_t hash1 = hash_reg(hash, inst->src[1]);
311 uint32_t hash2 = hash_reg(hash, inst->src[2]);
312 hash = hash1 * hash2;
313 } else if (inst->opcode == BRW_OPCODE_MUL &&
314 inst->dst.type == BRW_TYPE_F) {
315 /* Canonicalize negations on either source (or both) and commutatively
316 * combine the hashes for both sources.
317 */
318 brw_reg src[2] = { inst->src[0], inst->src[1] };
319 uint32_t src_hash[2];
320
321 for (int i = 0; i < 2; i++) {
322 src[i].negate = false;
323 if (src[i].file == IMM)
324 src[i].f = fabs(src[i].f);
325
326 src_hash[i] = hash_reg(hash, src[i]);
327 }
328
329 hash = src_hash[0] * src_hash[1];
330 } else if (inst->is_commutative()) {
331 /* Commutatively combine the sources */
332 uint32_t hash0 = hash_reg(hash, inst->src[0]);
333 uint32_t hash1 = hash_reg(hash, inst->src[1]);
334 uint32_t hash2 = inst->sources > 2 ? hash_reg(hash, inst->src[2]) : 1;
335 hash = hash0 * hash1 * hash2;
336 } else {
337 /* Just hash all the sources */
338 for (int i = 0; i < inst->sources; i++)
339 hash = hash_reg(hash, inst->src[i]);
340 }
341
342 return hash;
343 }
344
345 /* -------------------------------------------------------------------- */
346
347 static bool
cmp_func(const void * data1,const void * data2)348 cmp_func(const void *data1, const void *data2)
349 {
350 bool negate;
351 return instructions_match((fs_inst *) data1, (fs_inst *) data2, &negate);
352 }
353
354 /* We set bit 31 in remap_table entries if it needs to be negated. */
355 #define REMAP_NEGATE (0x80000000u)
356
357 static void
remap_sources(fs_visitor & s,const brw::def_analysis & defs,fs_inst * inst,unsigned * remap_table)358 remap_sources(fs_visitor &s, const brw::def_analysis &defs,
359 fs_inst *inst, unsigned *remap_table)
360 {
361 for (int i = 0; i < inst->sources; i++) {
362 if (inst->src[i].file == VGRF &&
363 inst->src[i].nr < defs.count() &&
364 remap_table[inst->src[i].nr] != ~0u) {
365 const unsigned old_nr = inst->src[i].nr;
366 unsigned new_nr = remap_table[old_nr];
367 const bool need_negate = new_nr & REMAP_NEGATE;
368 new_nr &= ~REMAP_NEGATE;
369 inst->src[i].nr = new_nr;
370
371 if (need_negate) {
372 if ((inst->src[i].type != BRW_TYPE_F &&
373 !inst->can_change_types()) ||
374 !inst->can_do_source_mods(s.devinfo)) {
375 /* We can't use the negate directly, resolve it just after the
376 * def and use that for any future uses.
377 */
378 fs_inst *def = defs.get(inst->src[i]);
379 bblock_t *def_block = defs.get_block(inst->src[i]);
380 const fs_builder dbld =
381 fs_builder(&s, def_block, def).at(def_block, def->next);
382
383 /* Resolve any deferred block IP changes before inserting */
384 if (def_block->end_ip_delta)
385 s.cfg->adjust_block_ips();
386
387 brw_reg neg = brw_vgrf(new_nr, BRW_TYPE_F);
388 brw_reg tmp = dbld.MOV(negate(neg));
389 inst->src[i].nr = tmp.nr;
390 remap_table[old_nr] = tmp.nr;
391 } else {
392 inst->src[i].negate = !inst->src[i].negate;
393 inst->src[i].type = BRW_TYPE_F;
394 }
395 }
396 }
397 }
398 }
399
400 bool
brw_fs_opt_cse_defs(fs_visitor & s)401 brw_fs_opt_cse_defs(fs_visitor &s)
402 {
403 const intel_device_info *devinfo = s.devinfo;
404 const idom_tree &idom = s.idom_analysis.require();
405 const brw::def_analysis &defs = s.def_analysis.require();
406 bool progress = false;
407 bool need_remaps = false;
408
409 unsigned *remap_table = new unsigned[defs.count()];
410 memset(remap_table, ~0u, defs.count() * sizeof(int));
411 struct set *set = _mesa_set_create(NULL, NULL, cmp_func);
412
413 foreach_block(block, s.cfg) {
414 fs_inst *last_flag_write = NULL;
415 fs_inst *last = NULL;
416
417 foreach_inst_in_block_safe(fs_inst, inst, block) {
418 if (need_remaps)
419 remap_sources(s, defs, inst, remap_table);
420
421 /* Updating last_flag_written should be at the bottom of the loop,
422 * but doing it this way lets us use "continue" more easily.
423 */
424 if (last && last->flags_written(devinfo))
425 last_flag_write = last;
426 last = inst;
427
428 if (inst->dst.is_null()) {
429 bool ignored;
430 if (last_flag_write && !inst->writes_accumulator &&
431 instructions_match(last_flag_write, inst, &ignored)) {
432 /* This instruction has no destination but has a flag write
433 * which is redundant with the previous flag write in our
434 * basic block. So we can simply remove it.
435 */
436 inst->remove(block, true);
437 last = NULL;
438 progress = true;
439 }
440 } else if (is_expression(&s, inst) && defs.get(inst->dst)) {
441 assert(!inst->writes_accumulator);
442 assert(!inst->reads_accumulator_implicitly());
443
444 uint32_t hash = hash_inst(inst);
445 if (inst->flags_read(devinfo)) {
446 hash = last_flag_write ? HASH(hash, last_flag_write)
447 : HASH(hash, block);
448 }
449
450 struct set_entry *e =
451 _mesa_set_search_or_add_pre_hashed(set, hash, inst, NULL);
452 if (!e) goto out; /* out of memory error */
453 fs_inst *match = (fs_inst *) e->key;
454
455 /* If there was no match, move on */
456 if (match == inst)
457 continue;
458
459 bblock_t *def_block = defs.get_block(match->dst);
460 if (block != def_block && (local_only(inst) ||
461 !idom.dominates(def_block, block))) {
462 /* If `match` doesn't dominate `inst` then remove it from
463 * the set and add `inst` instead so future lookups see that.
464 */
465 e->key = inst;
466 continue;
467 }
468
469 /* We can replace inst with match or negate(match). */
470 bool negate = false;
471 if (inst->opcode == BRW_OPCODE_MUL &&
472 inst->dst.type == BRW_TYPE_F) {
473 /* Determine whether inst is actually negate(match) */
474 bool ops_must_match = operands_match(inst, match, &negate);
475 assert(ops_must_match);
476 }
477
478 progress = true;
479 need_remaps = true;
480 remap_table[inst->dst.nr] =
481 match->dst.nr | (negate ? REMAP_NEGATE : 0);
482
483 inst->remove(block, true);
484 }
485 }
486 }
487
488 out:
489 delete [] remap_table;
490 _mesa_set_destroy(set, NULL);
491
492 if (progress) {
493 s.cfg->adjust_block_ips();
494 s.invalidate_analysis(DEPENDENCY_INSTRUCTION_DATA_FLOW |
495 DEPENDENCY_INSTRUCTION_DETAIL);
496 }
497
498 return progress;
499 }
500
501 #undef HASH
502