xref: /aosp_15_r20/external/mesa3d/src/intel/compiler/elk/elk_schedule_instructions.cpp (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <[email protected]>
25  *
26  */
27 
28 #include "elk_eu.h"
29 #include "elk_fs.h"
30 #include "elk_fs_live_variables.h"
31 #include "elk_vec4.h"
32 #include "elk_cfg.h"
33 #include "elk_shader.h"
34 #include <new>
35 
36 using namespace elk;
37 
38 /** @file elk_fs_schedule_instructions.cpp
39  *
40  * List scheduling of FS instructions.
41  *
42  * The basic model of the list scheduler is to take a basic block,
43  * compute a DAG of the dependencies (RAW ordering with latency, WAW
44  * ordering with latency, WAR ordering), and make a list of the DAG heads.
45  * Heuristically pick a DAG head, then put all the children that are
46  * now DAG heads into the list of things to schedule.
47  *
48  * The heuristic is the important part.  We're trying to be cheap,
49  * since actually computing the optimal scheduling is NP complete.
50  * What we do is track a "current clock".  When we schedule a node, we
51  * update the earliest-unblocked clock time of its children, and
52  * increment the clock.  Then, when trying to schedule, we just pick
53  * the earliest-unblocked instruction to schedule.
54  *
55  * Note that often there will be many things which could execute
56  * immediately, and there are a range of heuristic options to choose
57  * from in picking among those.
58  */
59 
60 static bool debug = false;
61 
62 class elk_instruction_scheduler;
63 struct elk_schedule_node_child;
64 
65 class elk_schedule_node : public exec_node
66 {
67 public:
68    void set_latency_gfx4();
69    void set_latency_gfx7(const struct elk_isa_info *isa);
70 
71    elk_backend_instruction *inst;
72    elk_schedule_node_child *children;
73    int children_count;
74    int children_cap;
75    int initial_parent_count;
76    int initial_unblocked_time;
77    int latency;
78 
79    /**
80     * This is the sum of the instruction's latency plus the maximum delay of
81     * its children, or just the issue_time if it's a leaf node.
82     */
83    int delay;
84 
85    /**
86     * Preferred exit node among the (direct or indirect) successors of this
87     * node.  Among the scheduler nodes blocked by this node, this will be the
88     * one that may cause earliest program termination, or NULL if none of the
89     * successors is an exit node.
90     */
91    elk_schedule_node *exit;
92 
93    /**
94     * How many cycles this instruction takes to issue.
95     *
96     * Instructions in gen hardware are handled one simd4 vector at a time,
97     * with 1 cycle per vector dispatched.  Thus SIMD8 pixel shaders take 2
98     * cycles to dispatch and SIMD16 (compressed) instructions take 4.
99     */
100    int issue_time;
101 
102    /* Temporary data used during the scheduling process. */
103    struct {
104       int parent_count;
105       int unblocked_time;
106 
107       /**
108        * Which iteration of pushing groups of children onto the candidates list
109        * this node was a part of.
110        */
111       unsigned cand_generation;
112    } tmp;
113 };
114 
115 struct elk_schedule_node_child {
116    elk_schedule_node *n;
117    int effective_latency;
118 };
119 
120 static inline void
reset_node_tmp(elk_schedule_node * n)121 reset_node_tmp(elk_schedule_node *n)
122 {
123    n->tmp.parent_count = n->initial_parent_count;
124    n->tmp.unblocked_time = n->initial_unblocked_time;
125    n->tmp.cand_generation = 0;
126 }
127 
128 /**
129  * Lower bound of the scheduling time after which one of the instructions
130  * blocked by this node may lead to program termination.
131  *
132  * exit_unblocked_time() determines a strict partial ordering relation '«' on
133  * the set of scheduler nodes as follows:
134  *
135  *   n « m <-> exit_unblocked_time(n) < exit_unblocked_time(m)
136  *
137  * which can be used to heuristically order nodes according to how early they
138  * can unblock an exit node and lead to program termination.
139  */
140 static inline int
exit_tmp_unblocked_time(const elk_schedule_node * n)141 exit_tmp_unblocked_time(const elk_schedule_node *n)
142 {
143    return n->exit ? n->exit->tmp.unblocked_time : INT_MAX;
144 }
145 
146 static inline int
exit_initial_unblocked_time(const elk_schedule_node * n)147 exit_initial_unblocked_time(const elk_schedule_node *n)
148 {
149    return n->exit ? n->exit->initial_unblocked_time : INT_MAX;
150 }
151 
152 void
set_latency_gfx4()153 elk_schedule_node::set_latency_gfx4()
154 {
155    int chans = 8;
156    int math_latency = 22;
157 
158    switch (inst->opcode) {
159    case ELK_SHADER_OPCODE_RCP:
160       this->latency = 1 * chans * math_latency;
161       break;
162    case ELK_SHADER_OPCODE_RSQ:
163       this->latency = 2 * chans * math_latency;
164       break;
165    case ELK_SHADER_OPCODE_INT_QUOTIENT:
166    case ELK_SHADER_OPCODE_SQRT:
167    case ELK_SHADER_OPCODE_LOG2:
168       /* full precision log.  partial is 2. */
169       this->latency = 3 * chans * math_latency;
170       break;
171    case ELK_SHADER_OPCODE_INT_REMAINDER:
172    case ELK_SHADER_OPCODE_EXP2:
173       /* full precision.  partial is 3, same throughput. */
174       this->latency = 4 * chans * math_latency;
175       break;
176    case ELK_SHADER_OPCODE_POW:
177       this->latency = 8 * chans * math_latency;
178       break;
179    case ELK_SHADER_OPCODE_SIN:
180    case ELK_SHADER_OPCODE_COS:
181       /* minimum latency, max is 12 rounds. */
182       this->latency = 5 * chans * math_latency;
183       break;
184    default:
185       this->latency = 2;
186       break;
187    }
188 }
189 
190 void
set_latency_gfx7(const struct elk_isa_info * isa)191 elk_schedule_node::set_latency_gfx7(const struct elk_isa_info *isa)
192 {
193    const bool is_haswell = isa->devinfo->verx10 == 75;
194 
195    switch (inst->opcode) {
196    case ELK_OPCODE_MAD:
197       /* 2 cycles
198        *  (since the last two src operands are in different register banks):
199        * mad(8) g4<1>F g2.2<4,4,1>F.x  g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
200        *
201        * 3 cycles on IVB, 4 on HSW
202        *  (since the last two src operands are in the same register bank):
203        * mad(8) g4<1>F g2.2<4,4,1>F.x  g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
204        *
205        * 18 cycles on IVB, 16 on HSW
206        *  (since the last two src operands are in different register banks):
207        * mad(8) g4<1>F g2.2<4,4,1>F.x  g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
208        * mov(8) null   g4<4,5,1>F                     { align16 WE_normal 1Q };
209        *
210        * 20 cycles on IVB, 18 on HSW
211        *  (since the last two src operands are in the same register bank):
212        * mad(8) g4<1>F g2.2<4,4,1>F.x  g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
213        * mov(8) null   g4<4,4,1>F                     { align16 WE_normal 1Q };
214        */
215 
216       /* Our register allocator doesn't know about register banks, so use the
217        * higher latency.
218        */
219       latency = is_haswell ? 16 : 18;
220       break;
221 
222    case ELK_OPCODE_LRP:
223       /* 2 cycles
224        *  (since the last two src operands are in different register banks):
225        * lrp(8) g4<1>F g2.2<4,4,1>F.x  g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
226        *
227        * 3 cycles on IVB, 4 on HSW
228        *  (since the last two src operands are in the same register bank):
229        * lrp(8) g4<1>F g2.2<4,4,1>F.x  g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
230        *
231        * 16 cycles on IVB, 14 on HSW
232        *  (since the last two src operands are in different register banks):
233        * lrp(8) g4<1>F g2.2<4,4,1>F.x  g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
234        * mov(8) null   g4<4,4,1>F                     { align16 WE_normal 1Q };
235        *
236        * 16 cycles
237        *  (since the last two src operands are in the same register bank):
238        * lrp(8) g4<1>F g2.2<4,4,1>F.x  g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
239        * mov(8) null   g4<4,4,1>F                     { align16 WE_normal 1Q };
240        */
241 
242       /* Our register allocator doesn't know about register banks, so use the
243        * higher latency.
244        */
245       latency = 14;
246       break;
247 
248    case ELK_SHADER_OPCODE_RCP:
249    case ELK_SHADER_OPCODE_RSQ:
250    case ELK_SHADER_OPCODE_SQRT:
251    case ELK_SHADER_OPCODE_LOG2:
252    case ELK_SHADER_OPCODE_EXP2:
253    case ELK_SHADER_OPCODE_SIN:
254    case ELK_SHADER_OPCODE_COS:
255       /* 2 cycles:
256        * math inv(8) g4<1>F g2<0,1,0>F      null       { align1 WE_normal 1Q };
257        *
258        * 18 cycles:
259        * math inv(8) g4<1>F g2<0,1,0>F      null       { align1 WE_normal 1Q };
260        * mov(8)      null   g4<8,8,1>F                 { align1 WE_normal 1Q };
261        *
262        * Same for exp2, log2, rsq, sqrt, sin, cos.
263        */
264       latency = is_haswell ? 14 : 16;
265       break;
266 
267    case ELK_SHADER_OPCODE_POW:
268       /* 2 cycles:
269        * math pow(8) g4<1>F g2<0,1,0>F   g2.1<0,1,0>F  { align1 WE_normal 1Q };
270        *
271        * 26 cycles:
272        * math pow(8) g4<1>F g2<0,1,0>F   g2.1<0,1,0>F  { align1 WE_normal 1Q };
273        * mov(8)      null   g4<8,8,1>F                 { align1 WE_normal 1Q };
274        */
275       latency = is_haswell ? 22 : 24;
276       break;
277 
278    case ELK_SHADER_OPCODE_TEX:
279    case ELK_SHADER_OPCODE_TXD:
280    case ELK_SHADER_OPCODE_TXF:
281    case ELK_SHADER_OPCODE_TXF_LZ:
282    case ELK_SHADER_OPCODE_TXL:
283    case ELK_SHADER_OPCODE_TXL_LZ:
284       /* 18 cycles:
285        * mov(8)  g115<1>F   0F                         { align1 WE_normal 1Q };
286        * mov(8)  g114<1>F   0F                         { align1 WE_normal 1Q };
287        * send(8) g4<1>UW    g114<8,8,1>F
288        *   sampler (10, 0, 0, 1) mlen 2 rlen 4         { align1 WE_normal 1Q };
289        *
290        * 697 +/-49 cycles (min 610, n=26):
291        * mov(8)  g115<1>F   0F                         { align1 WE_normal 1Q };
292        * mov(8)  g114<1>F   0F                         { align1 WE_normal 1Q };
293        * send(8) g4<1>UW    g114<8,8,1>F
294        *   sampler (10, 0, 0, 1) mlen 2 rlen 4         { align1 WE_normal 1Q };
295        * mov(8)  null       g4<8,8,1>F                 { align1 WE_normal 1Q };
296        *
297        * So the latency on our first texture load of the batchbuffer takes
298        * ~700 cycles, since the caches are cold at that point.
299        *
300        * 840 +/- 92 cycles (min 720, n=25):
301        * mov(8)  g115<1>F   0F                         { align1 WE_normal 1Q };
302        * mov(8)  g114<1>F   0F                         { align1 WE_normal 1Q };
303        * send(8) g4<1>UW    g114<8,8,1>F
304        *   sampler (10, 0, 0, 1) mlen 2 rlen 4         { align1 WE_normal 1Q };
305        * mov(8)  null       g4<8,8,1>F                 { align1 WE_normal 1Q };
306        * send(8) g4<1>UW    g114<8,8,1>F
307        *   sampler (10, 0, 0, 1) mlen 2 rlen 4         { align1 WE_normal 1Q };
308        * mov(8)  null       g4<8,8,1>F                 { align1 WE_normal 1Q };
309        *
310        * On the second load, it takes just an extra ~140 cycles, and after
311        * accounting for the 14 cycles of the MOV's latency, that makes ~130.
312        *
313        * 683 +/- 49 cycles (min = 602, n=47):
314        * mov(8)  g115<1>F   0F                         { align1 WE_normal 1Q };
315        * mov(8)  g114<1>F   0F                         { align1 WE_normal 1Q };
316        * send(8) g4<1>UW    g114<8,8,1>F
317        *   sampler (10, 0, 0, 1) mlen 2 rlen 4         { align1 WE_normal 1Q };
318        * send(8) g50<1>UW   g114<8,8,1>F
319        *   sampler (10, 0, 0, 1) mlen 2 rlen 4         { align1 WE_normal 1Q };
320        * mov(8)  null       g4<8,8,1>F                 { align1 WE_normal 1Q };
321        *
322        * The unit appears to be pipelined, since this matches up with the
323        * cache-cold case, despite there being two loads here.  If you replace
324        * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
325        *
326        * So, take some number between the cache-hot 140 cycles and the
327        * cache-cold 700 cycles.  No particular tuning was done on this.
328        *
329        * I haven't done significant testing of the non-TEX opcodes.  TXL at
330        * least looked about the same as TEX.
331        */
332       latency = 200;
333       break;
334 
335    case ELK_SHADER_OPCODE_TXS:
336       /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
337        * cycles (n=15):
338        * mov(8)   g114<1>UD  0D                        { align1 WE_normal 1Q };
339        * send(8)  g6<1>UW    g114<8,8,1>F
340        *   sampler (10, 0, 10, 1) mlen 1 rlen 4        { align1 WE_normal 1Q };
341        * mov(16)  g6<1>F     g6<8,8,1>D                { align1 WE_normal 1Q };
342        *
343        *
344        * Two loads was 535 +/- 30 cycles (n=19):
345        * mov(16)   g114<1>UD  0D                       { align1 WE_normal 1H };
346        * send(16)  g6<1>UW    g114<8,8,1>F
347        *   sampler (10, 0, 10, 2) mlen 2 rlen 8        { align1 WE_normal 1H };
348        * mov(16)   g114<1>UD  0D                       { align1 WE_normal 1H };
349        * mov(16)   g6<1>F     g6<8,8,1>D               { align1 WE_normal 1H };
350        * send(16)  g8<1>UW    g114<8,8,1>F
351        *   sampler (10, 0, 10, 2) mlen 2 rlen 8        { align1 WE_normal 1H };
352        * mov(16)   g8<1>F     g8<8,8,1>D               { align1 WE_normal 1H };
353        * add(16)   g6<1>F     g6<8,8,1>F   g8<8,8,1>F  { align1 WE_normal 1H };
354        *
355        * Since the only caches that should matter are just the
356        * instruction/state cache containing the surface state, assume that we
357        * always have hot caches.
358        */
359       latency = 100;
360       break;
361 
362    case ELK_FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GFX4:
363    case ELK_FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
364    case ELK_VS_OPCODE_PULL_CONSTANT_LOAD:
365       /* testing using varying-index pull constants:
366        *
367        * 16 cycles:
368        * mov(8)  g4<1>D  g2.1<0,1,0>F                  { align1 WE_normal 1Q };
369        * send(8) g4<1>F  g4<8,8,1>D
370        *   data (9, 2, 3) mlen 1 rlen 1                { align1 WE_normal 1Q };
371        *
372        * ~480 cycles:
373        * mov(8)  g4<1>D  g2.1<0,1,0>F                  { align1 WE_normal 1Q };
374        * send(8) g4<1>F  g4<8,8,1>D
375        *   data (9, 2, 3) mlen 1 rlen 1                { align1 WE_normal 1Q };
376        * mov(8)  null    g4<8,8,1>F                    { align1 WE_normal 1Q };
377        *
378        * ~620 cycles:
379        * mov(8)  g4<1>D  g2.1<0,1,0>F                  { align1 WE_normal 1Q };
380        * send(8) g4<1>F  g4<8,8,1>D
381        *   data (9, 2, 3) mlen 1 rlen 1                { align1 WE_normal 1Q };
382        * mov(8)  null    g4<8,8,1>F                    { align1 WE_normal 1Q };
383        * send(8) g4<1>F  g4<8,8,1>D
384        *   data (9, 2, 3) mlen 1 rlen 1                { align1 WE_normal 1Q };
385        * mov(8)  null    g4<8,8,1>F                    { align1 WE_normal 1Q };
386        *
387        * So, if it's cache-hot, it's about 140.  If it's cache cold, it's
388        * about 460.  We expect to mostly be cache hot, so pick something more
389        * in that direction.
390        */
391       latency = 200;
392       break;
393 
394    case ELK_SHADER_OPCODE_GFX7_SCRATCH_READ:
395       /* Testing a load from offset 0, that had been previously written:
396        *
397        * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
398        * mov(8)  null      g114<8,8,1>F { align1 WE_normal 1Q };
399        *
400        * The cycles spent seemed to be grouped around 40-50 (as low as 38),
401        * then around 140.  Presumably this is cache hit vs miss.
402        */
403       latency = 50;
404       break;
405 
406    case ELK_VEC4_OPCODE_UNTYPED_ATOMIC:
407       /* See GFX7_DATAPORT_DC_UNTYPED_ATOMIC_OP */
408       latency = 14000;
409       break;
410 
411    case ELK_VEC4_OPCODE_UNTYPED_SURFACE_READ:
412    case ELK_VEC4_OPCODE_UNTYPED_SURFACE_WRITE:
413       /* See also GFX7_DATAPORT_DC_UNTYPED_SURFACE_READ */
414       latency = is_haswell ? 300 : 600;
415       break;
416 
417    case ELK_SHADER_OPCODE_SEND:
418       switch (inst->sfid) {
419       case ELK_SFID_SAMPLER: {
420          unsigned msg_type = (inst->desc >> 12) & 0x1f;
421          switch (msg_type) {
422          case GFX5_SAMPLER_MESSAGE_SAMPLE_RESINFO:
423          case GFX6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO:
424             /* See also ELK_SHADER_OPCODE_TXS */
425             latency = 100;
426             break;
427 
428          default:
429             /* See also ELK_SHADER_OPCODE_TEX */
430             latency = 200;
431             break;
432          }
433          break;
434       }
435 
436       case GFX6_SFID_DATAPORT_CONSTANT_CACHE:
437          /* See ELK_FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD */
438          latency = 200;
439          break;
440 
441       case GFX6_SFID_DATAPORT_RENDER_CACHE:
442          switch (elk_fb_desc_msg_type(isa->devinfo, inst->desc)) {
443          case GFX7_DATAPORT_RC_TYPED_SURFACE_WRITE:
444          case GFX7_DATAPORT_RC_TYPED_SURFACE_READ:
445             /* See also ELK_SHADER_OPCODE_TYPED_SURFACE_READ */
446             assert(!is_haswell);
447             latency = 600;
448             break;
449 
450          case GFX7_DATAPORT_RC_TYPED_ATOMIC_OP:
451             /* See also ELK_SHADER_OPCODE_TYPED_ATOMIC */
452             assert(!is_haswell);
453             latency = 14000;
454             break;
455 
456          case GFX6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE:
457             /* completely fabricated number */
458             latency = 600;
459             break;
460 
461          default:
462             unreachable("Unknown render cache message");
463          }
464          break;
465 
466       case GFX7_SFID_DATAPORT_DATA_CACHE:
467          switch ((inst->desc >> 14) & 0x1f) {
468          case ELK_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ:
469          case GFX7_DATAPORT_DC_UNALIGNED_OWORD_BLOCK_READ:
470          case GFX6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE:
471             /* We have no data for this but assume it's a little faster than
472              * untyped surface read/write.
473              */
474             latency = 200;
475             break;
476 
477          case GFX7_DATAPORT_DC_DWORD_SCATTERED_READ:
478          case GFX6_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE:
479          case HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ:
480          case HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE:
481             /* We have no data for this but assume it's roughly the same as
482              * untyped surface read/write.
483              */
484             latency = 300;
485             break;
486 
487          case GFX7_DATAPORT_DC_UNTYPED_SURFACE_READ:
488          case GFX7_DATAPORT_DC_UNTYPED_SURFACE_WRITE:
489             /* Test code:
490              *   mov(8)    g112<1>UD       0x00000000UD       { align1 WE_all 1Q };
491              *   mov(1)    g112.7<1>UD     g1.7<0,1,0>UD      { align1 WE_all };
492              *   mov(8)    g113<1>UD       0x00000000UD       { align1 WE_normal 1Q };
493              *   send(8)   g4<1>UD         g112<8,8,1>UD
494              *             data (38, 6, 5) mlen 2 rlen 1      { align1 WE_normal 1Q };
495              *   .
496              *   . [repeats 8 times]
497              *   .
498              *   mov(8)    g112<1>UD       0x00000000UD       { align1 WE_all 1Q };
499              *   mov(1)    g112.7<1>UD     g1.7<0,1,0>UD      { align1 WE_all };
500              *   mov(8)    g113<1>UD       0x00000000UD       { align1 WE_normal 1Q };
501              *   send(8)   g4<1>UD         g112<8,8,1>UD
502              *             data (38, 6, 5) mlen 2 rlen 1      { align1 WE_normal 1Q };
503              *
504              * Running it 100 times as fragment shader on a 128x128 quad
505              * gives an average latency of 583 cycles per surface read,
506              * standard deviation 0.9%.
507              */
508             assert(!is_haswell);
509             latency = 600;
510             break;
511 
512          case GFX7_DATAPORT_DC_UNTYPED_ATOMIC_OP:
513             /* Test code:
514              *   mov(8)    g112<1>ud       0x00000000ud       { align1 WE_all 1Q };
515              *   mov(1)    g112.7<1>ud     g1.7<0,1,0>ud      { align1 WE_all };
516              *   mov(8)    g113<1>ud       0x00000000ud       { align1 WE_normal 1Q };
517              *   send(8)   g4<1>ud         g112<8,8,1>ud
518              *             data (38, 5, 6) mlen 2 rlen 1      { align1 WE_normal 1Q };
519              *
520              * Running it 100 times as fragment shader on a 128x128 quad
521              * gives an average latency of 13867 cycles per atomic op,
522              * standard deviation 3%.  Note that this is a rather
523              * pessimistic estimate, the actual latency in cases with few
524              * collisions between threads and favorable pipelining has been
525              * seen to be reduced by a factor of 100.
526              */
527             assert(!is_haswell);
528             latency = 14000;
529             break;
530 
531          default:
532             unreachable("Unknown data cache message");
533          }
534          break;
535 
536       case HSW_SFID_DATAPORT_DATA_CACHE_1:
537          switch (elk_dp_desc_msg_type(isa->devinfo, inst->desc)) {
538          case HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ:
539          case HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE:
540          case HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ:
541          case HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE:
542          case GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_WRITE:
543          case GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_READ:
544          case GFX8_DATAPORT_DC_PORT1_A64_SCATTERED_WRITE:
545          case GFX9_DATAPORT_DC_PORT1_A64_SCATTERED_READ:
546          case GFX8_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_READ:
547          case GFX8_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_WRITE:
548             /* See also GFX7_DATAPORT_DC_UNTYPED_SURFACE_READ */
549             latency = 300;
550             break;
551 
552          case HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP:
553          case HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2:
554          case HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2:
555          case HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP:
556          case GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_OP:
557             /* See also GFX7_DATAPORT_DC_UNTYPED_ATOMIC_OP */
558             latency = 14000;
559             break;
560 
561          default:
562             unreachable("Unknown data cache message");
563          }
564          break;
565 
566       case GFX7_SFID_PIXEL_INTERPOLATOR:
567          latency = 50; /* TODO */
568          break;
569 
570       case ELK_SFID_URB:
571          latency = 200;
572          break;
573 
574       default:
575          unreachable("Unknown SFID");
576       }
577       break;
578 
579    default:
580       /* 2 cycles:
581        * mul(8) g4<1>F g2<0,1,0>F      0.5F            { align1 WE_normal 1Q };
582        *
583        * 16 cycles:
584        * mul(8) g4<1>F g2<0,1,0>F      0.5F            { align1 WE_normal 1Q };
585        * mov(8) null   g4<8,8,1>F                      { align1 WE_normal 1Q };
586        */
587       latency = 14;
588       break;
589    }
590 }
591 
592 class elk_instruction_scheduler {
593 public:
elk_instruction_scheduler(void * mem_ctx,const elk_backend_shader * s,int grf_count,int grf_write_scale,bool post_reg_alloc)594    elk_instruction_scheduler(void *mem_ctx, const elk_backend_shader *s, int grf_count,
595                          int grf_write_scale, bool post_reg_alloc):
596       bs(s)
597    {
598       this->mem_ctx = mem_ctx;
599       this->lin_ctx = linear_context(this->mem_ctx);
600       this->grf_count = grf_count;
601       this->post_reg_alloc = post_reg_alloc;
602 
603       this->last_grf_write = linear_zalloc_array(lin_ctx, elk_schedule_node *, grf_count * grf_write_scale);
604 
605       this->nodes_len = s->cfg->last_block()->end_ip + 1;
606       this->nodes = linear_zalloc_array(lin_ctx, elk_schedule_node, this->nodes_len);
607 
608       const struct intel_device_info *devinfo = bs->devinfo;
609       const struct elk_isa_info *isa = &bs->compiler->isa;
610 
611       elk_schedule_node *n = nodes;
612       foreach_block_and_inst(block, elk_backend_instruction, inst, s->cfg) {
613          n->inst = inst;
614 
615          /* We can't measure Gfx6 timings directly but expect them to be much
616           * closer to Gfx7 than Gfx4.
617           */
618          if (!post_reg_alloc)
619             n->latency = 1;
620          else if (devinfo->ver >= 6)
621             n->set_latency_gfx7(isa);
622          else
623             n->set_latency_gfx4();
624 
625          n++;
626       }
627       assert(n == nodes + nodes_len);
628 
629       current.block = NULL;
630       current.start = NULL;
631       current.end = NULL;
632       current.len = 0;
633       current.time = 0;
634       current.cand_generation = 0;
635       current.available.make_empty();
636    }
637 
638    void add_barrier_deps(elk_schedule_node *n);
639    void add_cross_lane_deps(elk_schedule_node *n);
640    void add_dep(elk_schedule_node *before, elk_schedule_node *after, int latency);
641    void add_dep(elk_schedule_node *before, elk_schedule_node *after);
642 
643    void set_current_block(elk_bblock_t *block);
644    void compute_delays();
645    void compute_exits();
646 
647    void schedule(elk_schedule_node *chosen);
648    void update_children(elk_schedule_node *chosen);
649 
650    void *mem_ctx;
651    linear_ctx *lin_ctx;
652 
653    elk_schedule_node *nodes;
654    int nodes_len;
655 
656    /* Current block being processed. */
657    struct {
658       elk_bblock_t *block;
659 
660       /* Range of nodes in the block.  End will point to first node
661        * address after the block, i.e. the range is [start, end).
662        */
663       elk_schedule_node *start;
664       elk_schedule_node *end;
665       int len;
666 
667       int scheduled;
668 
669       unsigned cand_generation;
670       int time;
671       exec_list available;
672    } current;
673 
674    bool post_reg_alloc;
675    int grf_count;
676    const elk_backend_shader *bs;
677 
678    /**
679     * Last instruction to have written the grf (or a channel in the grf, for the
680     * scalar backend)
681     */
682    elk_schedule_node **last_grf_write;
683 };
684 
685 class elk_fs_instruction_scheduler : public elk_instruction_scheduler
686 {
687 public:
688    elk_fs_instruction_scheduler(void *mem_ctx, const elk_fs_visitor *v, int grf_count, int hw_reg_count,
689                             int block_count, bool post_reg_alloc);
690    void calculate_deps();
691    bool is_compressed(const elk_fs_inst *inst);
692    elk_schedule_node *choose_instruction_to_schedule();
693    int calculate_issue_time(elk_backend_instruction *inst);
694 
695    void count_reads_remaining(elk_backend_instruction *inst);
696    void setup_liveness(elk_cfg_t *cfg);
697    void update_register_pressure(elk_backend_instruction *inst);
698    int get_register_pressure_benefit(elk_backend_instruction *inst);
699    void clear_last_grf_write();
700 
701    void schedule_instructions();
702    void run(instruction_scheduler_mode mode);
703 
704    const elk_fs_visitor *v;
705    unsigned hw_reg_count;
706    int reg_pressure;
707    instruction_scheduler_mode mode;
708 
709    /*
710     * The register pressure at the beginning of each basic block.
711     */
712 
713    int *reg_pressure_in;
714 
715    /*
716     * The virtual GRF's whose range overlaps the beginning of each basic block.
717     */
718 
719    BITSET_WORD **livein;
720 
721    /*
722     * The virtual GRF's whose range overlaps the end of each basic block.
723     */
724 
725    BITSET_WORD **liveout;
726 
727    /*
728     * The hardware GRF's whose range overlaps the end of each basic block.
729     */
730 
731    BITSET_WORD **hw_liveout;
732 
733    /*
734     * Whether we've scheduled a write for this virtual GRF yet.
735     */
736 
737    bool *written;
738 
739    /*
740     * How many reads we haven't scheduled for this virtual GRF yet.
741     */
742 
743    int *reads_remaining;
744 
745    /*
746     * How many reads we haven't scheduled for this hardware GRF yet.
747     */
748 
749    int *hw_reads_remaining;
750 
751 };
752 
elk_fs_instruction_scheduler(void * mem_ctx,const elk_fs_visitor * v,int grf_count,int hw_reg_count,int block_count,bool post_reg_alloc)753 elk_fs_instruction_scheduler::elk_fs_instruction_scheduler(void *mem_ctx, const elk_fs_visitor *v,
754                                                    int grf_count, int hw_reg_count,
755                                                    int block_count, bool post_reg_alloc)
756    : elk_instruction_scheduler(mem_ctx, v, grf_count, /* grf_write_scale */ 16,
757                            post_reg_alloc),
758      v(v)
759 {
760    this->hw_reg_count = hw_reg_count;
761    this->mode = SCHEDULE_NONE;
762    this->reg_pressure = 0;
763 
764    if (!post_reg_alloc) {
765       this->reg_pressure_in = linear_zalloc_array(lin_ctx, int, block_count);
766 
767       this->livein = linear_alloc_array(lin_ctx, BITSET_WORD *, block_count);
768       for (int i = 0; i < block_count; i++)
769          this->livein[i] = linear_zalloc_array(lin_ctx, BITSET_WORD,
770                                          BITSET_WORDS(grf_count));
771 
772       this->liveout = linear_alloc_array(lin_ctx, BITSET_WORD *, block_count);
773       for (int i = 0; i < block_count; i++)
774          this->liveout[i] = linear_zalloc_array(lin_ctx, BITSET_WORD,
775                                           BITSET_WORDS(grf_count));
776 
777       this->hw_liveout = linear_alloc_array(lin_ctx, BITSET_WORD *, block_count);
778       for (int i = 0; i < block_count; i++)
779          this->hw_liveout[i] = linear_zalloc_array(lin_ctx, BITSET_WORD,
780                                              BITSET_WORDS(hw_reg_count));
781 
782       setup_liveness(v->cfg);
783 
784       this->written = linear_alloc_array(lin_ctx, bool, grf_count);
785 
786       this->reads_remaining = linear_alloc_array(lin_ctx, int, grf_count);
787 
788       this->hw_reads_remaining = linear_alloc_array(lin_ctx, int, hw_reg_count);
789    } else {
790       this->reg_pressure_in = NULL;
791       this->livein = NULL;
792       this->liveout = NULL;
793       this->hw_liveout = NULL;
794       this->written = NULL;
795       this->reads_remaining = NULL;
796       this->hw_reads_remaining = NULL;
797    }
798 
799    foreach_block(block, v->cfg) {
800       set_current_block(block);
801 
802       for (elk_schedule_node *n = current.start; n < current.end; n++)
803          n->issue_time = calculate_issue_time(n->inst);
804 
805       calculate_deps();
806       compute_delays();
807       compute_exits();
808    }
809 }
810 
811 static bool
is_src_duplicate(elk_fs_inst * inst,int src)812 is_src_duplicate(elk_fs_inst *inst, int src)
813 {
814    for (int i = 0; i < src; i++)
815      if (inst->src[i].equals(inst->src[src]))
816        return true;
817 
818   return false;
819 }
820 
821 void
count_reads_remaining(elk_backend_instruction * be)822 elk_fs_instruction_scheduler::count_reads_remaining(elk_backend_instruction *be)
823 {
824    assert(reads_remaining);
825 
826    elk_fs_inst *inst = (elk_fs_inst *)be;
827 
828    for (int i = 0; i < inst->sources; i++) {
829       if (is_src_duplicate(inst, i))
830          continue;
831 
832       if (inst->src[i].file == VGRF) {
833          reads_remaining[inst->src[i].nr]++;
834       } else if (inst->src[i].file == FIXED_GRF) {
835          if (inst->src[i].nr >= hw_reg_count)
836             continue;
837 
838          for (unsigned j = 0; j < regs_read(inst, i); j++)
839             hw_reads_remaining[inst->src[i].nr + j]++;
840       }
841    }
842 }
843 
844 void
setup_liveness(elk_cfg_t * cfg)845 elk_fs_instruction_scheduler::setup_liveness(elk_cfg_t *cfg)
846 {
847    const fs_live_variables &live = v->live_analysis.require();
848 
849    /* First, compute liveness on a per-GRF level using the in/out sets from
850     * liveness calculation.
851     */
852    for (int block = 0; block < cfg->num_blocks; block++) {
853       for (int i = 0; i < live.num_vars; i++) {
854          if (BITSET_TEST(live.block_data[block].livein, i)) {
855             int vgrf = live.vgrf_from_var[i];
856             if (!BITSET_TEST(livein[block], vgrf)) {
857                reg_pressure_in[block] += v->alloc.sizes[vgrf];
858                BITSET_SET(livein[block], vgrf);
859             }
860          }
861 
862          if (BITSET_TEST(live.block_data[block].liveout, i))
863             BITSET_SET(liveout[block], live.vgrf_from_var[i]);
864       }
865    }
866 
867    /* Now, extend the live in/live out sets for when a range crosses a block
868     * boundary, which matches what our register allocator/interference code
869     * does to account for force_writemask_all and incompatible exec_mask's.
870     */
871    for (int block = 0; block < cfg->num_blocks - 1; block++) {
872       for (int i = 0; i < grf_count; i++) {
873          if (live.vgrf_start[i] <= cfg->blocks[block]->end_ip &&
874              live.vgrf_end[i] >= cfg->blocks[block + 1]->start_ip) {
875             if (!BITSET_TEST(livein[block + 1], i)) {
876                 reg_pressure_in[block + 1] += v->alloc.sizes[i];
877                 BITSET_SET(livein[block + 1], i);
878             }
879 
880             BITSET_SET(liveout[block], i);
881          }
882       }
883    }
884 
885    int payload_last_use_ip[hw_reg_count];
886    v->calculate_payload_ranges(hw_reg_count, payload_last_use_ip);
887 
888    for (unsigned i = 0; i < hw_reg_count; i++) {
889       if (payload_last_use_ip[i] == -1)
890          continue;
891 
892       for (int block = 0; block < cfg->num_blocks; block++) {
893          if (cfg->blocks[block]->start_ip <= payload_last_use_ip[i])
894             reg_pressure_in[block]++;
895 
896          if (cfg->blocks[block]->end_ip <= payload_last_use_ip[i])
897             BITSET_SET(hw_liveout[block], i);
898       }
899    }
900 }
901 
902 void
update_register_pressure(elk_backend_instruction * be)903 elk_fs_instruction_scheduler::update_register_pressure(elk_backend_instruction *be)
904 {
905    assert(reads_remaining);
906 
907    elk_fs_inst *inst = (elk_fs_inst *)be;
908 
909    if (inst->dst.file == VGRF) {
910       written[inst->dst.nr] = true;
911    }
912 
913    for (int i = 0; i < inst->sources; i++) {
914       if (is_src_duplicate(inst, i))
915           continue;
916 
917       if (inst->src[i].file == VGRF) {
918          reads_remaining[inst->src[i].nr]--;
919       } else if (inst->src[i].file == FIXED_GRF &&
920                  inst->src[i].nr < hw_reg_count) {
921          for (unsigned off = 0; off < regs_read(inst, i); off++)
922             hw_reads_remaining[inst->src[i].nr + off]--;
923       }
924    }
925 }
926 
927 int
get_register_pressure_benefit(elk_backend_instruction * be)928 elk_fs_instruction_scheduler::get_register_pressure_benefit(elk_backend_instruction *be)
929 {
930    elk_fs_inst *inst = (elk_fs_inst *)be;
931    int benefit = 0;
932    const int block_idx = current.block->num;
933 
934    if (inst->dst.file == VGRF) {
935       if (!BITSET_TEST(livein[block_idx], inst->dst.nr) &&
936           !written[inst->dst.nr])
937          benefit -= v->alloc.sizes[inst->dst.nr];
938    }
939 
940    for (int i = 0; i < inst->sources; i++) {
941       if (is_src_duplicate(inst, i))
942          continue;
943 
944       if (inst->src[i].file == VGRF &&
945           !BITSET_TEST(liveout[block_idx], inst->src[i].nr) &&
946           reads_remaining[inst->src[i].nr] == 1)
947          benefit += v->alloc.sizes[inst->src[i].nr];
948 
949       if (inst->src[i].file == FIXED_GRF &&
950           inst->src[i].nr < hw_reg_count) {
951          for (unsigned off = 0; off < regs_read(inst, i); off++) {
952             int reg = inst->src[i].nr + off;
953             if (!BITSET_TEST(hw_liveout[block_idx], reg) &&
954                 hw_reads_remaining[reg] == 1) {
955                benefit++;
956             }
957          }
958       }
959    }
960 
961    return benefit;
962 }
963 
964 class elk_vec4_instruction_scheduler : public elk_instruction_scheduler
965 {
966 public:
967    elk_vec4_instruction_scheduler(void *mem_ctx, const vec4_visitor *v, int grf_count);
968    void calculate_deps();
969    elk_schedule_node *choose_instruction_to_schedule();
970    const vec4_visitor *v;
971 
972    void run();
973 };
974 
elk_vec4_instruction_scheduler(void * mem_ctx,const vec4_visitor * v,int grf_count)975 elk_vec4_instruction_scheduler::elk_vec4_instruction_scheduler(void *mem_ctx, const vec4_visitor *v,
976                                                        int grf_count)
977    : elk_instruction_scheduler(mem_ctx, v, grf_count, /* grf_write_scale */ 1,
978                            /* post_reg_alloc */ true),
979      v(v)
980 {
981 }
982 
983 void
set_current_block(elk_bblock_t * block)984 elk_instruction_scheduler::set_current_block(elk_bblock_t *block)
985 {
986    current.block = block;
987    current.start = nodes + block->start_ip;
988    current.len = block->end_ip - block->start_ip + 1;
989    current.end = current.start + current.len;
990    current.time = 0;
991    current.scheduled = 0;
992    current.cand_generation = 1;
993 }
994 
995 /** Computation of the delay member of each node. */
996 void
compute_delays()997 elk_instruction_scheduler::compute_delays()
998 {
999    for (elk_schedule_node *n = current.end - 1; n >= current.start; n--) {
1000       if (!n->children_count) {
1001          n->delay = n->issue_time;
1002       } else {
1003          for (int i = 0; i < n->children_count; i++) {
1004             assert(n->children[i].n->delay);
1005             n->delay = MAX2(n->delay, n->latency + n->children[i].n->delay);
1006          }
1007       }
1008    }
1009 }
1010 
1011 void
compute_exits()1012 elk_instruction_scheduler::compute_exits()
1013 {
1014    /* Calculate a lower bound of the scheduling time of each node in the
1015     * graph.  This is analogous to the node's critical path but calculated
1016     * from the top instead of from the bottom of the block.
1017     */
1018    for (elk_schedule_node *n = current.start; n < current.end; n++) {
1019       for (int i = 0; i < n->children_count; i++) {
1020          elk_schedule_node_child *child = &n->children[i];
1021          child->n->initial_unblocked_time =
1022             MAX2(child->n->initial_unblocked_time,
1023                  n->initial_unblocked_time + n->issue_time + child->effective_latency);
1024       }
1025    }
1026 
1027    /* Calculate the exit of each node by induction based on the exit nodes of
1028     * its children.  The preferred exit of a node is the one among the exit
1029     * nodes of its children which can be unblocked first according to the
1030     * optimistic unblocked time estimate calculated above.
1031     */
1032    for (elk_schedule_node *n = current.end - 1; n >= current.start; n--) {
1033       n->exit = (n->inst->opcode == ELK_OPCODE_HALT ? n : NULL);
1034 
1035       for (int i = 0; i < n->children_count; i++) {
1036          if (exit_initial_unblocked_time(n->children[i].n) < exit_initial_unblocked_time(n))
1037             n->exit = n->children[i].n->exit;
1038       }
1039    }
1040 }
1041 
1042 /**
1043  * Add a dependency between two instruction nodes.
1044  *
1045  * The @after node will be scheduled after @before.  We will try to
1046  * schedule it @latency cycles after @before, but no guarantees there.
1047  */
1048 void
add_dep(elk_schedule_node * before,elk_schedule_node * after,int latency)1049 elk_instruction_scheduler::add_dep(elk_schedule_node *before, elk_schedule_node *after,
1050                                int latency)
1051 {
1052    if (!before || !after)
1053       return;
1054 
1055    assert(before != after);
1056 
1057    for (int i = 0; i < before->children_count; i++) {
1058       elk_schedule_node_child *child = &before->children[i];
1059       if (child->n == after) {
1060          child->effective_latency = MAX2(child->effective_latency, latency);
1061          return;
1062       }
1063    }
1064 
1065    if (before->children_cap <= before->children_count) {
1066       if (before->children_cap < 16)
1067          before->children_cap = 16;
1068       else
1069          before->children_cap *= 2;
1070 
1071       before->children = reralloc(mem_ctx, before->children,
1072                                   elk_schedule_node_child,
1073                                   before->children_cap);
1074    }
1075 
1076    elk_schedule_node_child *child = &before->children[before->children_count];
1077    child->n = after;
1078    child->effective_latency = latency;
1079    before->children_count++;
1080    after->initial_parent_count++;
1081 }
1082 
1083 void
add_dep(elk_schedule_node * before,elk_schedule_node * after)1084 elk_instruction_scheduler::add_dep(elk_schedule_node *before, elk_schedule_node *after)
1085 {
1086    if (!before)
1087       return;
1088 
1089    add_dep(before, after, before->latency);
1090 }
1091 
1092 static bool
is_scheduling_barrier(const elk_backend_instruction * inst)1093 is_scheduling_barrier(const elk_backend_instruction *inst)
1094 {
1095    return inst->opcode == ELK_SHADER_OPCODE_HALT_TARGET ||
1096           inst->is_control_flow() ||
1097           inst->has_side_effects();
1098 }
1099 
1100 static bool
has_cross_lane_access(const elk_fs_inst * inst)1101 has_cross_lane_access(const elk_fs_inst *inst)
1102 {
1103    /* FINISHME:
1104     *
1105     * This function is likely incomplete in terms of identify cross lane
1106     * accesses.
1107     */
1108    if (inst->opcode == ELK_SHADER_OPCODE_BROADCAST ||
1109        inst->opcode == ELK_SHADER_OPCODE_READ_SR_REG ||
1110        inst->opcode == ELK_SHADER_OPCODE_CLUSTER_BROADCAST ||
1111        inst->opcode == ELK_SHADER_OPCODE_SHUFFLE ||
1112        inst->opcode == ELK_FS_OPCODE_LOAD_LIVE_CHANNELS ||
1113        inst->opcode == ELK_SHADER_OPCODE_FIND_LAST_LIVE_CHANNEL ||
1114        inst->opcode == ELK_SHADER_OPCODE_FIND_LIVE_CHANNEL)
1115       return true;
1116 
1117    for (unsigned s = 0; s < inst->sources; s++) {
1118       if (inst->src[s].file == VGRF) {
1119          if (inst->src[s].stride == 0)
1120             return true;
1121       }
1122    }
1123 
1124    return false;
1125 }
1126 
1127 /**
1128  * Sometimes we really want this node to execute after everything that
1129  * was before it and before everything that followed it.  This adds
1130  * the deps to do so.
1131  */
1132 void
add_barrier_deps(elk_schedule_node * n)1133 elk_instruction_scheduler::add_barrier_deps(elk_schedule_node *n)
1134 {
1135    for (elk_schedule_node *prev = n - 1; prev >= current.start; prev--) {
1136       add_dep(prev, n, 0);
1137       if (is_scheduling_barrier(prev->inst))
1138          break;
1139    }
1140 
1141    for (elk_schedule_node *next = n + 1; next < current.end; next++) {
1142       add_dep(n, next, 0);
1143       if (is_scheduling_barrier(next->inst))
1144          break;
1145    }
1146 }
1147 
1148 /**
1149  * Because some instructions like HALT can disable lanes, scheduling prior to
1150  * a cross lane access should not be allowed, otherwise we could end up with
1151  * later instructions accessing uninitialized data.
1152  */
1153 void
add_cross_lane_deps(elk_schedule_node * n)1154 elk_instruction_scheduler::add_cross_lane_deps(elk_schedule_node *n)
1155 {
1156    for (elk_schedule_node *prev = n - 1; prev >= current.start; prev--) {
1157       if (has_cross_lane_access((elk_fs_inst*)prev->inst))
1158          add_dep(prev, n, 0);
1159    }
1160 }
1161 
1162 /* instruction scheduling needs to be aware of when an MRF write
1163  * actually writes 2 MRFs.
1164  */
1165 bool
is_compressed(const elk_fs_inst * inst)1166 elk_fs_instruction_scheduler::is_compressed(const elk_fs_inst *inst)
1167 {
1168    return inst->exec_size == 16;
1169 }
1170 
1171 /* Clears last_grf_write to be ready to start calculating deps for a block
1172  * again.
1173  *
1174  * Since pre-ra grf_count scales with instructions, and instructions scale with
1175  * BBs, we don't want to memset all of last_grf_write per block or you'll end up
1176  * O(n^2) with number of blocks.  For shaders using softfp64, we get a *lot* of
1177  * blocks.
1178  *
1179  * We don't bother being careful for post-ra, since then grf_count doesn't scale
1180  * with instructions.
1181  */
1182 void
clear_last_grf_write()1183 elk_fs_instruction_scheduler::clear_last_grf_write()
1184 {
1185    if (!post_reg_alloc) {
1186       for (elk_schedule_node *n = current.start; n < current.end; n++) {
1187          elk_fs_inst *inst = (elk_fs_inst *)n->inst;
1188 
1189          if (inst->dst.file == VGRF) {
1190             /* Don't bother being careful with regs_written(), quicker to just clear 2 cachelines. */
1191             memset(&last_grf_write[inst->dst.nr * 16], 0, sizeof(*last_grf_write) * 16);
1192          }
1193       }
1194    } else {
1195       memset(last_grf_write, 0, sizeof(*last_grf_write) * grf_count * 16);
1196    }
1197 }
1198 
1199 void
calculate_deps()1200 elk_fs_instruction_scheduler::calculate_deps()
1201 {
1202    /* Pre-register-allocation, this tracks the last write per VGRF offset.
1203     * After register allocation, reg_offsets are gone and we track individual
1204     * GRF registers.
1205     */
1206    elk_schedule_node *last_mrf_write[ELK_MAX_MRF(v->devinfo->ver)];
1207    elk_schedule_node *last_conditional_mod[8] = {};
1208    elk_schedule_node *last_accumulator_write = NULL;
1209    /* Fixed HW registers are assumed to be separate from the virtual
1210     * GRFs, so they can be tracked separately.  We don't really write
1211     * to fixed GRFs much, so don't bother tracking them on a more
1212     * granular level.
1213     */
1214    elk_schedule_node *last_fixed_grf_write = NULL;
1215 
1216    memset(last_mrf_write, 0, sizeof(last_mrf_write));
1217 
1218    /* top-to-bottom dependencies: RAW and WAW. */
1219    for (elk_schedule_node *n = current.start; n < current.end; n++) {
1220       elk_fs_inst *inst = (elk_fs_inst *)n->inst;
1221 
1222       if (is_scheduling_barrier(inst))
1223          add_barrier_deps(n);
1224 
1225       if (inst->opcode == ELK_OPCODE_HALT ||
1226           inst->opcode == ELK_SHADER_OPCODE_HALT_TARGET)
1227           add_cross_lane_deps(n);
1228 
1229       /* read-after-write deps. */
1230       for (int i = 0; i < inst->sources; i++) {
1231          if (inst->src[i].file == VGRF) {
1232             if (post_reg_alloc) {
1233                for (unsigned r = 0; r < regs_read(inst, i); r++)
1234                   add_dep(last_grf_write[inst->src[i].nr + r], n);
1235             } else {
1236                for (unsigned r = 0; r < regs_read(inst, i); r++) {
1237                   add_dep(last_grf_write[inst->src[i].nr * 16 +
1238                                          inst->src[i].offset / REG_SIZE + r], n);
1239                }
1240             }
1241          } else if (inst->src[i].file == FIXED_GRF) {
1242             if (post_reg_alloc) {
1243                for (unsigned r = 0; r < regs_read(inst, i); r++)
1244                   add_dep(last_grf_write[inst->src[i].nr + r], n);
1245             } else {
1246                add_dep(last_fixed_grf_write, n);
1247             }
1248          } else if (inst->src[i].is_accumulator()) {
1249             add_dep(last_accumulator_write, n);
1250          } else if (inst->src[i].file == ARF && !inst->src[i].is_null()) {
1251             add_barrier_deps(n);
1252          }
1253       }
1254 
1255       if (inst->base_mrf != -1) {
1256          for (int i = 0; i < inst->mlen; i++) {
1257             /* It looks like the MRF regs are released in the send
1258              * instruction once it's sent, not when the result comes
1259              * back.
1260              */
1261             add_dep(last_mrf_write[inst->base_mrf + i], n);
1262          }
1263       }
1264 
1265       if (const unsigned mask = inst->flags_read(v->devinfo)) {
1266          assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1267 
1268          for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1269             if (mask & (1 << i))
1270                add_dep(last_conditional_mod[i], n);
1271          }
1272       }
1273 
1274       if (inst->reads_accumulator_implicitly()) {
1275          add_dep(last_accumulator_write, n);
1276       }
1277 
1278       /* write-after-write deps. */
1279       if (inst->dst.file == VGRF) {
1280          if (post_reg_alloc) {
1281             for (unsigned r = 0; r < regs_written(inst); r++) {
1282                add_dep(last_grf_write[inst->dst.nr + r], n);
1283                last_grf_write[inst->dst.nr + r] = n;
1284             }
1285          } else {
1286             for (unsigned r = 0; r < regs_written(inst); r++) {
1287                add_dep(last_grf_write[inst->dst.nr * 16 +
1288                                       inst->dst.offset / REG_SIZE + r], n);
1289                last_grf_write[inst->dst.nr * 16 +
1290                               inst->dst.offset / REG_SIZE + r] = n;
1291             }
1292          }
1293       } else if (inst->dst.file == MRF) {
1294          int reg = inst->dst.nr & ~ELK_MRF_COMPR4;
1295 
1296          add_dep(last_mrf_write[reg], n);
1297          last_mrf_write[reg] = n;
1298          if (is_compressed(inst)) {
1299             if (inst->dst.nr & ELK_MRF_COMPR4)
1300                reg += 4;
1301             else
1302                reg++;
1303             add_dep(last_mrf_write[reg], n);
1304             last_mrf_write[reg] = n;
1305          }
1306       } else if (inst->dst.file == FIXED_GRF) {
1307          if (post_reg_alloc) {
1308             for (unsigned r = 0; r < regs_written(inst); r++) {
1309                add_dep(last_grf_write[inst->dst.nr + r], n);
1310                last_grf_write[inst->dst.nr + r] = n;
1311             }
1312          } else {
1313             add_dep(last_fixed_grf_write, n);
1314             last_fixed_grf_write = n;
1315          }
1316       } else if (inst->dst.is_accumulator()) {
1317          add_dep(last_accumulator_write, n);
1318          last_accumulator_write = n;
1319       } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1320          add_barrier_deps(n);
1321       }
1322 
1323       if (inst->mlen > 0 && inst->base_mrf != -1) {
1324          for (unsigned i = 0; i < inst->implied_mrf_writes(); i++) {
1325             add_dep(last_mrf_write[inst->base_mrf + i], n);
1326             last_mrf_write[inst->base_mrf + i] = n;
1327          }
1328       }
1329 
1330       if (const unsigned mask = inst->flags_written(v->devinfo)) {
1331          assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1332 
1333          for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1334             if (mask & (1 << i)) {
1335                add_dep(last_conditional_mod[i], n, 0);
1336                last_conditional_mod[i] = n;
1337             }
1338          }
1339       }
1340 
1341       if (inst->writes_accumulator_implicitly(v->devinfo) &&
1342           !inst->dst.is_accumulator()) {
1343          add_dep(last_accumulator_write, n);
1344          last_accumulator_write = n;
1345       }
1346    }
1347 
1348    clear_last_grf_write();
1349 
1350    /* bottom-to-top dependencies: WAR */
1351    memset(last_mrf_write, 0, sizeof(last_mrf_write));
1352    memset(last_conditional_mod, 0, sizeof(last_conditional_mod));
1353    last_accumulator_write = NULL;
1354    last_fixed_grf_write = NULL;
1355 
1356    for (elk_schedule_node *n = current.end - 1; n >= current.start; n--) {
1357       elk_fs_inst *inst = (elk_fs_inst *)n->inst;
1358 
1359       /* write-after-read deps. */
1360       for (int i = 0; i < inst->sources; i++) {
1361          if (inst->src[i].file == VGRF) {
1362             if (post_reg_alloc) {
1363                for (unsigned r = 0; r < regs_read(inst, i); r++)
1364                   add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1365             } else {
1366                for (unsigned r = 0; r < regs_read(inst, i); r++) {
1367                   add_dep(n, last_grf_write[inst->src[i].nr * 16 +
1368                                             inst->src[i].offset / REG_SIZE + r], 0);
1369                }
1370             }
1371          } else if (inst->src[i].file == FIXED_GRF) {
1372             if (post_reg_alloc) {
1373                for (unsigned r = 0; r < regs_read(inst, i); r++)
1374                   add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1375             } else {
1376                add_dep(n, last_fixed_grf_write, 0);
1377             }
1378          } else if (inst->src[i].is_accumulator()) {
1379             add_dep(n, last_accumulator_write, 0);
1380          } else if (inst->src[i].file == ARF && !inst->src[i].is_null()) {
1381             add_barrier_deps(n);
1382          }
1383       }
1384 
1385       if (inst->base_mrf != -1) {
1386          for (int i = 0; i < inst->mlen; i++) {
1387             /* It looks like the MRF regs are released in the send
1388              * instruction once it's sent, not when the result comes
1389              * back.
1390              */
1391             add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1392          }
1393       }
1394 
1395       if (const unsigned mask = inst->flags_read(v->devinfo)) {
1396          assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1397 
1398          for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1399             if (mask & (1 << i))
1400                add_dep(n, last_conditional_mod[i]);
1401          }
1402       }
1403 
1404       if (inst->reads_accumulator_implicitly()) {
1405          add_dep(n, last_accumulator_write);
1406       }
1407 
1408       /* Update the things this instruction wrote, so earlier reads
1409        * can mark this as WAR dependency.
1410        */
1411       if (inst->dst.file == VGRF) {
1412          if (post_reg_alloc) {
1413             for (unsigned r = 0; r < regs_written(inst); r++)
1414                last_grf_write[inst->dst.nr + r] = n;
1415          } else {
1416             for (unsigned r = 0; r < regs_written(inst); r++) {
1417                last_grf_write[inst->dst.nr * 16 +
1418                               inst->dst.offset / REG_SIZE + r] = n;
1419             }
1420          }
1421       } else if (inst->dst.file == MRF) {
1422          int reg = inst->dst.nr & ~ELK_MRF_COMPR4;
1423 
1424          last_mrf_write[reg] = n;
1425 
1426          if (is_compressed(inst)) {
1427             if (inst->dst.nr & ELK_MRF_COMPR4)
1428                reg += 4;
1429             else
1430                reg++;
1431 
1432             last_mrf_write[reg] = n;
1433          }
1434       } else if (inst->dst.file == FIXED_GRF) {
1435          if (post_reg_alloc) {
1436             for (unsigned r = 0; r < regs_written(inst); r++)
1437                last_grf_write[inst->dst.nr + r] = n;
1438          } else {
1439             last_fixed_grf_write = n;
1440          }
1441       } else if (inst->dst.is_accumulator()) {
1442          last_accumulator_write = n;
1443       } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1444          add_barrier_deps(n);
1445       }
1446 
1447       if (inst->mlen > 0 && inst->base_mrf != -1) {
1448          for (unsigned i = 0; i < inst->implied_mrf_writes(); i++) {
1449             last_mrf_write[inst->base_mrf + i] = n;
1450          }
1451       }
1452 
1453       if (const unsigned mask = inst->flags_written(v->devinfo)) {
1454          assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1455 
1456          for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1457             if (mask & (1 << i))
1458                last_conditional_mod[i] = n;
1459          }
1460       }
1461 
1462       if (inst->writes_accumulator_implicitly(v->devinfo)) {
1463          last_accumulator_write = n;
1464       }
1465    }
1466 
1467    clear_last_grf_write();
1468 }
1469 
1470 void
calculate_deps()1471 elk_vec4_instruction_scheduler::calculate_deps()
1472 {
1473    elk_schedule_node *last_mrf_write[ELK_MAX_MRF(v->devinfo->ver)];
1474    elk_schedule_node *last_conditional_mod = NULL;
1475    elk_schedule_node *last_accumulator_write = NULL;
1476    /* Fixed HW registers are assumed to be separate from the virtual
1477     * GRFs, so they can be tracked separately.  We don't really write
1478     * to fixed GRFs much, so don't bother tracking them on a more
1479     * granular level.
1480     */
1481    elk_schedule_node *last_fixed_grf_write = NULL;
1482 
1483    memset(last_grf_write, 0, grf_count * sizeof(*last_grf_write));
1484    memset(last_mrf_write, 0, sizeof(last_mrf_write));
1485 
1486    /* top-to-bottom dependencies: RAW and WAW. */
1487    for (elk_schedule_node *n = current.start; n < current.end; n++) {
1488       vec4_instruction *inst = (vec4_instruction *)n->inst;
1489 
1490       if (is_scheduling_barrier(inst))
1491          add_barrier_deps(n);
1492 
1493       /* read-after-write deps. */
1494       for (int i = 0; i < 3; i++) {
1495          if (inst->src[i].file == VGRF) {
1496             for (unsigned j = 0; j < regs_read(inst, i); ++j)
1497                add_dep(last_grf_write[inst->src[i].nr + j], n);
1498          } else if (inst->src[i].file == FIXED_GRF) {
1499             add_dep(last_fixed_grf_write, n);
1500          } else if (inst->src[i].is_accumulator()) {
1501             assert(last_accumulator_write);
1502             add_dep(last_accumulator_write, n);
1503          } else if (inst->src[i].file == ARF && !inst->src[i].is_null()) {
1504             add_barrier_deps(n);
1505          }
1506       }
1507 
1508       if (inst->reads_g0_implicitly())
1509          add_dep(last_fixed_grf_write, n);
1510 
1511       if (!inst->is_send_from_grf()) {
1512          for (int i = 0; i < inst->mlen; i++) {
1513             /* It looks like the MRF regs are released in the send
1514              * instruction once it's sent, not when the result comes
1515              * back.
1516              */
1517             add_dep(last_mrf_write[inst->base_mrf + i], n);
1518          }
1519       }
1520 
1521       if (inst->reads_flag()) {
1522          assert(last_conditional_mod);
1523          add_dep(last_conditional_mod, n);
1524       }
1525 
1526       if (inst->reads_accumulator_implicitly()) {
1527          assert(last_accumulator_write);
1528          add_dep(last_accumulator_write, n);
1529       }
1530 
1531       /* write-after-write deps. */
1532       if (inst->dst.file == VGRF) {
1533          for (unsigned j = 0; j < regs_written(inst); ++j) {
1534             add_dep(last_grf_write[inst->dst.nr + j], n);
1535             last_grf_write[inst->dst.nr + j] = n;
1536          }
1537       } else if (inst->dst.file == MRF) {
1538          add_dep(last_mrf_write[inst->dst.nr], n);
1539          last_mrf_write[inst->dst.nr] = n;
1540      } else if (inst->dst.file == FIXED_GRF) {
1541          add_dep(last_fixed_grf_write, n);
1542          last_fixed_grf_write = n;
1543       } else if (inst->dst.is_accumulator()) {
1544          add_dep(last_accumulator_write, n);
1545          last_accumulator_write = n;
1546       } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1547          add_barrier_deps(n);
1548       }
1549 
1550       if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1551          for (unsigned i = 0; i < inst->implied_mrf_writes(); i++) {
1552             add_dep(last_mrf_write[inst->base_mrf + i], n);
1553             last_mrf_write[inst->base_mrf + i] = n;
1554          }
1555       }
1556 
1557       if (inst->writes_flag(v->devinfo)) {
1558          add_dep(last_conditional_mod, n, 0);
1559          last_conditional_mod = n;
1560       }
1561 
1562       if (inst->writes_accumulator_implicitly(v->devinfo) &&
1563           !inst->dst.is_accumulator()) {
1564          add_dep(last_accumulator_write, n);
1565          last_accumulator_write = n;
1566       }
1567    }
1568 
1569    /* bottom-to-top dependencies: WAR */
1570    memset(last_grf_write, 0, grf_count * sizeof(*last_grf_write));
1571    memset(last_mrf_write, 0, sizeof(last_mrf_write));
1572    last_conditional_mod = NULL;
1573    last_accumulator_write = NULL;
1574    last_fixed_grf_write = NULL;
1575 
1576    for (elk_schedule_node *n = current.end - 1; n >= current.start; n--) {
1577       vec4_instruction *inst = (vec4_instruction *)n->inst;
1578 
1579       /* write-after-read deps. */
1580       for (int i = 0; i < 3; i++) {
1581          if (inst->src[i].file == VGRF) {
1582             for (unsigned j = 0; j < regs_read(inst, i); ++j)
1583                add_dep(n, last_grf_write[inst->src[i].nr + j]);
1584          } else if (inst->src[i].file == FIXED_GRF) {
1585             add_dep(n, last_fixed_grf_write);
1586          } else if (inst->src[i].is_accumulator()) {
1587             add_dep(n, last_accumulator_write);
1588          } else if (inst->src[i].file == ARF && !inst->src[i].is_null()) {
1589             add_barrier_deps(n);
1590          }
1591       }
1592 
1593       if (!inst->is_send_from_grf()) {
1594          for (int i = 0; i < inst->mlen; i++) {
1595             /* It looks like the MRF regs are released in the send
1596              * instruction once it's sent, not when the result comes
1597              * back.
1598              */
1599             add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1600          }
1601       }
1602 
1603       if (inst->reads_flag()) {
1604          add_dep(n, last_conditional_mod);
1605       }
1606 
1607       if (inst->reads_accumulator_implicitly()) {
1608          add_dep(n, last_accumulator_write);
1609       }
1610 
1611       /* Update the things this instruction wrote, so earlier reads
1612        * can mark this as WAR dependency.
1613        */
1614       if (inst->dst.file == VGRF) {
1615          for (unsigned j = 0; j < regs_written(inst); ++j)
1616             last_grf_write[inst->dst.nr + j] = n;
1617       } else if (inst->dst.file == MRF) {
1618          last_mrf_write[inst->dst.nr] = n;
1619       } else if (inst->dst.file == FIXED_GRF) {
1620          last_fixed_grf_write = n;
1621       } else if (inst->dst.is_accumulator()) {
1622          last_accumulator_write = n;
1623       } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1624          add_barrier_deps(n);
1625       }
1626 
1627       if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1628          for (unsigned i = 0; i < inst->implied_mrf_writes(); i++) {
1629             last_mrf_write[inst->base_mrf + i] = n;
1630          }
1631       }
1632 
1633       if (inst->writes_flag(v->devinfo)) {
1634          last_conditional_mod = n;
1635       }
1636 
1637       if (inst->writes_accumulator_implicitly(v->devinfo)) {
1638          last_accumulator_write = n;
1639       }
1640    }
1641 }
1642 
1643 elk_schedule_node *
choose_instruction_to_schedule()1644 elk_fs_instruction_scheduler::choose_instruction_to_schedule()
1645 {
1646    elk_schedule_node *chosen = NULL;
1647 
1648    if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) {
1649       int chosen_time = 0;
1650 
1651       /* Of the instructions ready to execute or the closest to being ready,
1652        * choose the one most likely to unblock an early program exit, or
1653        * otherwise the oldest one.
1654        */
1655       foreach_in_list(elk_schedule_node, n, &current.available) {
1656          if (!chosen ||
1657              exit_tmp_unblocked_time(n) < exit_tmp_unblocked_time(chosen) ||
1658              (exit_tmp_unblocked_time(n) == exit_tmp_unblocked_time(chosen) &&
1659               n->tmp.unblocked_time < chosen_time)) {
1660             chosen = n;
1661             chosen_time = n->tmp.unblocked_time;
1662          }
1663       }
1664    } else {
1665       int chosen_register_pressure_benefit = 0;
1666 
1667       /* Before register allocation, we don't care about the latencies of
1668        * instructions.  All we care about is reducing live intervals of
1669        * variables so that we can avoid register spilling, or get SIMD16
1670        * shaders which naturally do a better job of hiding instruction
1671        * latency.
1672        */
1673       foreach_in_list(elk_schedule_node, n, &current.available) {
1674          elk_fs_inst *inst = (elk_fs_inst *)n->inst;
1675 
1676          if (!chosen) {
1677             chosen = n;
1678             chosen_register_pressure_benefit =
1679                   get_register_pressure_benefit(chosen->inst);
1680             continue;
1681          }
1682 
1683          /* Most important: If we can definitely reduce register pressure, do
1684           * so immediately.
1685           */
1686          int register_pressure_benefit = get_register_pressure_benefit(n->inst);
1687 
1688          if (register_pressure_benefit > 0 &&
1689              register_pressure_benefit > chosen_register_pressure_benefit) {
1690             chosen = n;
1691             chosen_register_pressure_benefit = register_pressure_benefit;
1692             continue;
1693          } else if (chosen_register_pressure_benefit > 0 &&
1694                     (register_pressure_benefit <
1695                      chosen_register_pressure_benefit)) {
1696             continue;
1697          }
1698 
1699          if (mode == SCHEDULE_PRE_LIFO) {
1700             /* Prefer instructions that recently became available for
1701              * scheduling.  These are the things that are most likely to
1702              * (eventually) make a variable dead and reduce register pressure.
1703              * Typical register pressure estimates don't work for us because
1704              * most of our pressure comes from texturing, where no single
1705              * instruction to schedule will make a vec4 value dead.
1706              */
1707             if (n->tmp.cand_generation > chosen->tmp.cand_generation) {
1708                chosen = n;
1709                chosen_register_pressure_benefit = register_pressure_benefit;
1710                continue;
1711             } else if (n->tmp.cand_generation < chosen->tmp.cand_generation) {
1712                continue;
1713             }
1714 
1715             /* On MRF-using chips, prefer non-SEND instructions.  If we don't
1716              * do this, then because we prefer instructions that just became
1717              * candidates, we'll end up in a pattern of scheduling a SEND,
1718              * then the MRFs for the next SEND, then the next SEND, then the
1719              * MRFs, etc., without ever consuming the results of a send.
1720              */
1721             if (v->devinfo->ver < 7) {
1722                elk_fs_inst *chosen_inst = (elk_fs_inst *)chosen->inst;
1723 
1724                /* We use size_written > 4 * exec_size as our test for the kind
1725                 * of send instruction to avoid -- only sends generate many
1726                 * regs, and a single-result send is probably actually reducing
1727                 * register pressure.
1728                 */
1729                if (inst->size_written <= 4 * inst->exec_size &&
1730                    chosen_inst->size_written > 4 * chosen_inst->exec_size) {
1731                   chosen = n;
1732                   chosen_register_pressure_benefit = register_pressure_benefit;
1733                   continue;
1734                } else if (inst->size_written > chosen_inst->size_written) {
1735                   continue;
1736                }
1737             }
1738          }
1739 
1740          /* For instructions pushed on the cands list at the same time, prefer
1741           * the one with the highest delay to the end of the program.  This is
1742           * most likely to have its values able to be consumed first (such as
1743           * for a large tree of lowered ubo loads, which appear reversed in
1744           * the instruction stream with respect to when they can be consumed).
1745           */
1746          if (n->delay > chosen->delay) {
1747             chosen = n;
1748             chosen_register_pressure_benefit = register_pressure_benefit;
1749             continue;
1750          } else if (n->delay < chosen->delay) {
1751             continue;
1752          }
1753 
1754          /* Prefer the node most likely to unblock an early program exit.
1755           */
1756          if (exit_tmp_unblocked_time(n) < exit_tmp_unblocked_time(chosen)) {
1757             chosen = n;
1758             chosen_register_pressure_benefit = register_pressure_benefit;
1759             continue;
1760          } else if (exit_tmp_unblocked_time(n) > exit_tmp_unblocked_time(chosen)) {
1761             continue;
1762          }
1763 
1764          /* If all other metrics are equal, we prefer the first instruction in
1765           * the list (program execution).
1766           */
1767       }
1768    }
1769 
1770    return chosen;
1771 }
1772 
1773 elk_schedule_node *
choose_instruction_to_schedule()1774 elk_vec4_instruction_scheduler::choose_instruction_to_schedule()
1775 {
1776    elk_schedule_node *chosen = NULL;
1777    int chosen_time = 0;
1778 
1779    /* Of the instructions ready to execute or the closest to being ready,
1780     * choose the oldest one.
1781     */
1782    foreach_in_list(elk_schedule_node, n, &current.available) {
1783       if (!chosen || n->tmp.unblocked_time < chosen_time) {
1784          chosen = n;
1785          chosen_time = n->tmp.unblocked_time;
1786       }
1787    }
1788 
1789    return chosen;
1790 }
1791 
1792 int
calculate_issue_time(elk_backend_instruction * inst0)1793 elk_fs_instruction_scheduler::calculate_issue_time(elk_backend_instruction *inst0)
1794 {
1795    const struct elk_isa_info *isa = &v->compiler->isa;
1796    const elk_fs_inst *inst = static_cast<elk_fs_inst *>(inst0);
1797    const unsigned overhead = v->grf_used && elk_has_bank_conflict(isa, inst) ?
1798       DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE) : 0;
1799    if (is_compressed(inst))
1800       return 4 + overhead;
1801    else
1802       return 2 + overhead;
1803 }
1804 
1805 void
schedule(elk_schedule_node * chosen)1806 elk_instruction_scheduler::schedule(elk_schedule_node *chosen)
1807 {
1808    assert(current.scheduled < current.len);
1809    current.scheduled++;
1810 
1811    assert(chosen);
1812    chosen->remove();
1813    current.block->instructions.push_tail(chosen->inst);
1814 
1815    /* If we expected a delay for scheduling, then bump the clock to reflect
1816     * that.  In reality, the hardware will switch to another hyperthread
1817     * and may not return to dispatching our thread for a while even after
1818     * we're unblocked.  After this, we have the time when the chosen
1819     * instruction will start executing.
1820     */
1821    current.time = MAX2(current.time, chosen->tmp.unblocked_time);
1822 
1823    /* Update the clock for how soon an instruction could start after the
1824     * chosen one.
1825     */
1826    current.time += chosen->issue_time;
1827 
1828    if (debug) {
1829       fprintf(stderr, "clock %4d, scheduled: ", current.time);
1830       bs->dump_instruction(chosen->inst);
1831    }
1832 }
1833 
1834 void
update_children(elk_schedule_node * chosen)1835 elk_instruction_scheduler::update_children(elk_schedule_node *chosen)
1836 {
1837    /* Now that we've scheduled a new instruction, some of its
1838     * children can be promoted to the list of instructions ready to
1839     * be scheduled.  Update the children's unblocked time for this
1840     * DAG edge as we do so.
1841     */
1842    for (int i = chosen->children_count - 1; i >= 0; i--) {
1843       elk_schedule_node_child *child = &chosen->children[i];
1844 
1845       child->n->tmp.unblocked_time = MAX2(child->n->tmp.unblocked_time,
1846                                           current.time + child->effective_latency);
1847 
1848       if (debug) {
1849          fprintf(stderr, "\tchild %d, %d parents: ", i, child->n->tmp.parent_count);
1850          bs->dump_instruction(child->n->inst);
1851       }
1852 
1853       child->n->tmp.cand_generation = current.cand_generation;
1854       child->n->tmp.parent_count--;
1855       if (child->n->tmp.parent_count == 0) {
1856          if (debug) {
1857             fprintf(stderr, "\t\tnow available\n");
1858          }
1859          current.available.push_head(child->n);
1860       }
1861    }
1862    current.cand_generation++;
1863 
1864    /* Shared resource: the mathbox.  There's one mathbox per EU on Gfx6+
1865     * but it's more limited pre-gfx6, so if we send something off to it then
1866     * the next math instruction isn't going to make progress until the first
1867     * is done.
1868     */
1869    if (bs->devinfo->ver < 6 && chosen->inst->is_math()) {
1870       foreach_in_list(elk_schedule_node, n, &current.available) {
1871          if (n->inst->is_math())
1872             n->tmp.unblocked_time = MAX2(n->tmp.unblocked_time,
1873                                          current.time + chosen->latency);
1874       }
1875    }
1876 }
1877 
1878 void
schedule_instructions()1879 elk_fs_instruction_scheduler::schedule_instructions()
1880 {
1881    if (!post_reg_alloc)
1882       reg_pressure = reg_pressure_in[current.block->num];
1883 
1884    assert(current.available.is_empty());
1885    for (elk_schedule_node *n = current.start; n < current.end; n++) {
1886       reset_node_tmp(n);
1887 
1888       /* Add DAG heads to the list of available instructions. */
1889       if (n->tmp.parent_count == 0)
1890          current.available.push_tail(n);
1891    }
1892 
1893    current.block->instructions.make_empty();
1894 
1895    while (!current.available.is_empty()) {
1896       elk_schedule_node *chosen = choose_instruction_to_schedule();
1897       schedule(chosen);
1898 
1899       if (!post_reg_alloc) {
1900          reg_pressure -= get_register_pressure_benefit(chosen->inst);
1901          update_register_pressure(chosen->inst);
1902          if (debug)
1903             fprintf(stderr, "(register pressure %d)\n", reg_pressure);
1904       }
1905 
1906       update_children(chosen);
1907    }
1908 }
1909 
1910 void
run(instruction_scheduler_mode mode)1911 elk_fs_instruction_scheduler::run(instruction_scheduler_mode mode)
1912 {
1913    this->mode = mode;
1914 
1915    if (debug && !post_reg_alloc) {
1916       fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n",
1917               post_reg_alloc);
1918          bs->dump_instructions();
1919    }
1920 
1921    if (!post_reg_alloc) {
1922       memset(reads_remaining, 0, grf_count * sizeof(*reads_remaining));
1923       memset(hw_reads_remaining, 0, hw_reg_count * sizeof(*hw_reads_remaining));
1924       memset(written, 0, grf_count * sizeof(*written));
1925    }
1926 
1927    foreach_block(block, v->cfg) {
1928       set_current_block(block);
1929 
1930       if (!post_reg_alloc) {
1931          for (elk_schedule_node *n = current.start; n < current.end; n++)
1932             count_reads_remaining(n->inst);
1933       }
1934 
1935       schedule_instructions();
1936    }
1937 
1938    if (debug && !post_reg_alloc) {
1939       fprintf(stderr, "\nInstructions after scheduling (reg_alloc %d)\n",
1940               post_reg_alloc);
1941       bs->dump_instructions();
1942    }
1943 }
1944 
1945 void
run()1946 elk_vec4_instruction_scheduler::run()
1947 {
1948    foreach_block(block, v->cfg) {
1949       set_current_block(block);
1950 
1951       for (elk_schedule_node *n = current.start; n < current.end; n++) {
1952          /* We always execute as two vec4s in parallel. */
1953          n->issue_time = 2;
1954       }
1955 
1956       calculate_deps();
1957 
1958       compute_delays();
1959       compute_exits();
1960 
1961       assert(current.available.is_empty());
1962       for (elk_schedule_node *n = current.start; n < current.end; n++) {
1963          reset_node_tmp(n);
1964 
1965          /* Add DAG heads to the list of available instructions. */
1966          if (n->tmp.parent_count == 0)
1967             current.available.push_tail(n);
1968       }
1969 
1970       current.block->instructions.make_empty();
1971 
1972       while (!current.available.is_empty()) {
1973          elk_schedule_node *chosen = choose_instruction_to_schedule();
1974          schedule(chosen);
1975          update_children(chosen);
1976       }
1977    }
1978 }
1979 
1980 elk_fs_instruction_scheduler *
prepare_scheduler(void * mem_ctx)1981 elk_fs_visitor::prepare_scheduler(void *mem_ctx)
1982 {
1983    const int grf_count = alloc.count;
1984 
1985    elk_fs_instruction_scheduler *empty = rzalloc(mem_ctx, elk_fs_instruction_scheduler);
1986    return new (empty) elk_fs_instruction_scheduler(mem_ctx, this, grf_count, first_non_payload_grf,
1987                                                cfg->num_blocks, /* post_reg_alloc */ false);
1988 }
1989 
1990 void
schedule_instructions_pre_ra(elk_fs_instruction_scheduler * sched,instruction_scheduler_mode mode)1991 elk_fs_visitor::schedule_instructions_pre_ra(elk_fs_instruction_scheduler *sched,
1992                                          instruction_scheduler_mode mode)
1993 {
1994    if (mode == SCHEDULE_NONE)
1995       return;
1996 
1997    sched->run(mode);
1998 
1999    invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
2000 }
2001 
2002 void
schedule_instructions_post_ra()2003 elk_fs_visitor::schedule_instructions_post_ra()
2004 {
2005    const bool post_reg_alloc = true;
2006    const int grf_count = reg_unit(devinfo) * grf_used;
2007 
2008    void *mem_ctx = ralloc_context(NULL);
2009 
2010    elk_fs_instruction_scheduler sched(mem_ctx, this, grf_count, first_non_payload_grf,
2011                                   cfg->num_blocks, post_reg_alloc);
2012    sched.run(SCHEDULE_POST);
2013 
2014    ralloc_free(mem_ctx);
2015 
2016    invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
2017 }
2018 
2019 void
opt_schedule_instructions()2020 vec4_visitor::opt_schedule_instructions()
2021 {
2022    void *mem_ctx = ralloc_context(NULL);
2023 
2024    elk_vec4_instruction_scheduler sched(mem_ctx, this, prog_data->total_grf);
2025    sched.run();
2026 
2027    ralloc_free(mem_ctx);
2028 
2029    invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
2030 }
2031