1 /*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 // At the moment, this implements more-or-less traditional linear scan register
18 // allocation.
19 //
20 // Input is virtual register lifetimes list, sorted by begin. Each lifetime is
21 // a list of continuous live ranges with lifetime holes in between. Each live
22 // range tracks insns that actually use the virtual register.
23 //
24 // Allocator walks sorted lifetime list and allocates lifetimes to hard
25 // registers. When lifetimes do not interfere, so live ranges of one lifetime
26 // fit into holes of another lifetime, both lifetimes can be allocated to the
27 // same hard register.
28 //
29 // If there is no available hard register, allocator selects hard register to
30 // free. All lifetimes allocated to that hard register that interfere with
31 // lifetime being allocated are spilled.
32 //
33 // Lifetime being spilled is split into tiny lifetimes, each for one insn
34 // where that virtual register is used. If register is read by insn, reload
35 // insn is added before, and if register is written by insn, spill insn is
36 // added after.
37 //
38 // Tiny lifetimes originated from spilling still need to be allocated. For tiny
39 // lifetimes that end before lifetime in favor of which they were spilled,
40 // previously allocated hard register is used. Tiny lifetimes that begin after
41 // are merged into a list of not yet allocated lifetimes.
42 //
43 // The problematic case is when tiny lifetime overlaps with begin of lifetime
44 // in favor of which it was spilled. In this case previously allocated hard
45 // register can't be used (otherwise it doesn't become free) and tiny lifetime
46 // can't be allocated later according to order by begin. In this case spill is
47 // considered impossible.
48 //
49 // The above is the most significant difference from classic linear scan
50 // register allocation algorithms, which usually solve tiny lifetimes
51 // allocation either by backtracking or using reserved registers. Hopefully
52 // this new approach works if there are more suitable hard registers than can
53 // be used in one insn, so there is always a suitable register that is not used
54 // at the point of spill.
55 //
56 // TODO(b/232598137): this might blow up when lifetimes compete for some single
57 // specific register (ecx for x86 shift insn)! Can be solved by generating code
58 // that minimizes lifetimes of such registers - moves in right before and moves
59 // out right after the insn using such register.
60 //
61 // TODO(b/232598137): investigate how code quality is affected when there are few
62 // available hard registers (x86_32)!
63
64 #include "berberis/backend/common/reg_alloc.h"
65
66 #include <iterator> // std::next()
67 #include <string>
68
69 #include "berberis/backend/common/lifetime.h"
70 #include "berberis/backend/common/lifetime_analysis.h"
71 #include "berberis/backend/common/machine_ir.h"
72 #include "berberis/base/arena_alloc.h"
73 #include "berberis/base/arena_list.h"
74 #include "berberis/base/arena_vector.h"
75 #include "berberis/base/config.h"
76 #include "berberis/base/logging.h"
77
78 // #define LOG_REG_ALLOC(...) ALOGE(__VA_ARGS__)
79 #define LOG_REG_ALLOC(...) ((void)0)
80
81 namespace berberis {
82
83 namespace {
84
85 // Lifetimes themselves are owned by lifetime list populated by lifetime
86 // analysis. Use list of pointers to track lifetimes currently allocated
87 // to particular hard register.
88 using VRegLifetimePtrList = ArenaList<VRegLifetime*>;
89
90 // How to spill one virtual register.
91 struct VRegLifetimeSpill {
92 VRegLifetimePtrList::iterator lifetime;
93 SplitPos realloc_pos;
94
VRegLifetimeSpillberberis::__anon9adbd7eb0111::VRegLifetimeSpill95 VRegLifetimeSpill(VRegLifetimePtrList::iterator i, const SplitPos& p)
96 : lifetime(i), realloc_pos(p) {}
97 };
98
99 // Every possible spill should have some smaller weight (CHECKed).
100 const int kInfiniteSpillWeight = 99999;
101
102 // Track what virtual registers are currently allocated to this particular
103 // hard register, and how to spill them.
104 class HardRegAllocation {
105 public:
HardRegAllocation(Arena * arena)106 explicit HardRegAllocation(Arena* arena)
107 : arena_(arena), lifetimes_(arena), new_lifetime_(nullptr), spills_(arena) {}
108
109 HardRegAllocation(const HardRegAllocation& other) = default;
110
111 // If new_lifetime doesn't interfere with lifetimes currently allocated
112 // to this hard register, allocate new_lifetime to this register as well.
113 bool TryAssign(VRegLifetime* new_lifetime);
114
115 // If TryAssign returned false:
116 // Check if it is possible to spill all lifetimes allocated to this hard
117 // register that interfere with new_lifetime, and return spill weight.
118 int ConsiderSpill(VRegLifetime* new_lifetime);
119
120 // If ConsiderSpill returned non-infinite weight:
121 // Given spill is possible, actually spill lifetimes that interfere with
122 // new_lifetime to spill_slot. Insert newly created tiny lifetimes into
123 // 'lifetimes' list, starting at position 'pos'.
124 void SpillAndAssign(VRegLifetime* new_lifetime,
125 int spill_slot,
126 VRegLifetimeList* lifetimes,
127 VRegLifetimeList::iterator pos);
128
129 private:
130 // Arena for allocations.
131 Arena* arena_;
132 // Lifetimes currently allocated to this hard register.
133 VRegLifetimePtrList lifetimes_;
134
135 // Last lifetime being allocated, for CHECKing.
136 // TODO(b/232598137): probably use this for ConsiderSpill and SpillAndAssign?
137 // This looks more natural...
138 VRegLifetime* new_lifetime_;
139
140 // How to free this register for last considered new lifetime.
141 // This is here for the following reasons:
142 // - it is highly coupled with lifetimes_
143 // - to avoid reallocating this for every spill consideration
144 ArenaVector<VRegLifetimeSpill> spills_;
145 };
146
TryAssign(VRegLifetime * new_lifetime)147 bool HardRegAllocation::TryAssign(VRegLifetime* new_lifetime) {
148 // TODO(b/232598137): had to disable the check below! The problem is that when
149 // new_lifetime_ is split so that there remains no live ranges, we can't
150 // call begin() for it. Seems this place requires some rethinking, as such
151 // case means we can simply reorder lifetimes instead of actually splitting...
152 // Check lifetimes are processed in order by increasing begin.
153 // CHECK(!new_lifetime_ || new_lifetime_->begin() <= new_lifetime->begin());
154 new_lifetime_ = new_lifetime;
155
156 for (auto curr = lifetimes_.begin(); curr != lifetimes_.end();) {
157 VRegLifetime* curr_lifetime = *curr;
158
159 if (curr_lifetime->end() <= new_lifetime->begin()) {
160 // Curr lifetime ends before new lifetime starts, expire it.
161 curr = lifetimes_.erase(curr);
162 } else if (curr_lifetime->TestInterference(*new_lifetime)) {
163 // Lifetimes interfere, can't assign.
164 return false;
165 } else {
166 ++curr;
167 }
168 }
169
170 // No lifetimes interfere with new, can assign.
171 lifetimes_.push_back(new_lifetime);
172 return true;
173 }
174
ConsiderSpill(VRegLifetime * new_lifetime)175 int HardRegAllocation::ConsiderSpill(VRegLifetime* new_lifetime) {
176 CHECK_EQ(new_lifetime_, new_lifetime);
177
178 spills_.clear();
179 int weight = 0;
180
181 for (auto curr = lifetimes_.begin(); curr != lifetimes_.end(); ++curr) {
182 VRegLifetime* curr_lifetime = *curr;
183
184 if (!curr_lifetime->TestInterference(*new_lifetime)) {
185 // No interference, no need to spill.
186 continue;
187 }
188
189 SplitPos split_pos;
190 SplitKind split_kind = curr_lifetime->FindSplitPos(new_lifetime->begin(), &split_pos);
191 if (split_kind == SPLIT_IMPOSSIBLE) {
192 // Lifetimes interfere in such a way that spill is not possible.
193 return kInfiniteSpillWeight;
194 } else if (split_kind == SPLIT_CONFLICT) {
195 // A use within this lifetime conflicts with first use in 'new_lifetime'.
196 // If we spill it, it will compete with 'new_lifetime' at reallocation,
197 // and if it can only use register suitable for 'new_lifetime' as well,
198 // 'new_lifetime' can be evicted back, resulting in double spill.
199 if (curr_lifetime->GetRegClass()->IsSubsetOf(new_lifetime->GetRegClass())) {
200 return kInfiniteSpillWeight;
201 }
202 }
203
204 // Record spill.
205 spills_.push_back(VRegLifetimeSpill(curr, split_pos));
206 // Evicting tiny lifetime is free.
207 if (curr_lifetime->GetSpill() == -1) {
208 weight += curr_lifetime->spill_weight();
209 }
210 }
211
212 CHECK_LT(weight, kInfiniteSpillWeight);
213 return weight;
214 }
215
216 // Same as std::list::merge, but starting from a 'pos' position.
MergeVRegLifetimeList(VRegLifetimeList * dst,VRegLifetimeList::iterator dst_pos,VRegLifetimeList * src)217 void MergeVRegLifetimeList(VRegLifetimeList* dst,
218 VRegLifetimeList::iterator dst_pos,
219 VRegLifetimeList* src) {
220 while (!src->empty()) {
221 auto curr = src->begin();
222 for (; dst_pos != dst->end(); ++dst_pos) {
223 if (curr->begin() < dst_pos->begin()) {
224 break;
225 }
226 }
227 dst->splice(dst_pos, *src, curr);
228 }
229 }
230
SpillAndAssign(VRegLifetime * new_lifetime,int spill_slot,VRegLifetimeList * lifetimes,VRegLifetimeList::iterator pos)231 void HardRegAllocation::SpillAndAssign(VRegLifetime* new_lifetime,
232 int spill_slot,
233 VRegLifetimeList* lifetimes,
234 VRegLifetimeList::iterator pos) {
235 CHECK_EQ(new_lifetime_, new_lifetime);
236 CHECK(!spills_.empty());
237
238 for (const auto& spill : spills_) {
239 VRegLifetime* spill_lifetime = *spill.lifetime;
240
241 // Assign spill slot.
242 // Lifetimes being spilled do not interfere, can share spill slot!
243 // TODO(b/232598137): evicted tiny lifetime have spill slot already.
244 // If we only evict tiny lifetimes, we might not need new spill_slot!
245 // Allocate spill slot here when needed.
246 if (spill_lifetime->GetSpill() == -1) {
247 spill_lifetime->SetSpill(spill_slot);
248 }
249
250 // Split spilled lifetime into tiny lifetimes, enqueue them for allocation.
251 VRegLifetimeList split(arena_);
252 spill_lifetime->Split(spill.realloc_pos, &split);
253 MergeVRegLifetimeList(lifetimes, pos, &split);
254
255 // Expire spilled lifetime.
256 lifetimes_.erase(spill.lifetime);
257 }
258
259 // Spilled all interfering lifetimes, can assign.
260 lifetimes_.push_back(new_lifetime);
261 }
262
263 // Simple register allocator.
264 // Walk list of lifetimes sorted by begin and allocates in order.
265 // Modifies lifetimes that have been spilled and adds tiny lifetimes split
266 // from spilled lifetimes to the same list.
267 class VRegLifetimeAllocator {
268 public:
VRegLifetimeAllocator(MachineIR * machine_ir,VRegLifetimeList * lifetimes)269 VRegLifetimeAllocator(MachineIR* machine_ir, VRegLifetimeList* lifetimes)
270 : machine_ir_(machine_ir),
271 lifetimes_(lifetimes),
272 allocations_(config::kMaxHardRegs,
273 HardRegAllocation(machine_ir->arena()),
274 machine_ir->arena()) {}
275
276 void Allocate();
277
278 private:
279 void AllocateLifetime(VRegLifetimeList::iterator lifetime_it);
280
281 bool TryAssignHardReg(VRegLifetime* lifetime, MachineReg hard_reg);
282
283 int ConsiderSpillHardReg(MachineReg hard_reg, VRegLifetime* lifetime);
284
285 void SpillAndAssignHardReg(MachineReg hard_reg, VRegLifetimeList::iterator curr);
286
287 void RewriteAllocatedLifetimes();
288
289 MachineIR* machine_ir_;
290
291 VRegLifetimeList* lifetimes_;
292
293 ArenaVector<HardRegAllocation> allocations_;
294 };
295
ConsiderSpillHardReg(MachineReg hard_reg,VRegLifetime * lifetime)296 int VRegLifetimeAllocator::ConsiderSpillHardReg(MachineReg hard_reg, VRegLifetime* lifetime) {
297 return allocations_[hard_reg.reg()].ConsiderSpill(lifetime);
298 }
299
TryAssignHardReg(VRegLifetime * curr_lifetime,MachineReg hard_reg)300 bool VRegLifetimeAllocator::TryAssignHardReg(VRegLifetime* curr_lifetime, MachineReg hard_reg) {
301 if (allocations_[hard_reg.reg()].TryAssign(curr_lifetime)) {
302 curr_lifetime->set_hard_reg(hard_reg);
303 LOG_REG_ALLOC(".. to %s\n", GetMachineHardRegDebugName(hard_reg));
304 return true;
305 }
306 return false;
307 }
308
SpillAndAssignHardReg(MachineReg hard_reg,VRegLifetimeList::iterator curr)309 void VRegLifetimeAllocator::SpillAndAssignHardReg(MachineReg hard_reg,
310 VRegLifetimeList::iterator curr) {
311 auto next = std::next(curr);
312 allocations_[hard_reg.reg()].SpillAndAssign(&*curr, machine_ir_->AllocSpill(), lifetimes_, next);
313 curr->set_hard_reg(hard_reg);
314 LOG_REG_ALLOC(".. to %s (after spill)\n", GetMachineHardRegDebugName(hard_reg));
315 }
316
AllocateLifetime(VRegLifetimeList::iterator lifetime_it)317 void VRegLifetimeAllocator::AllocateLifetime(VRegLifetimeList::iterator lifetime_it) {
318 VRegLifetime* lifetime = &*lifetime_it;
319 const MachineRegClass* reg_class = lifetime->GetRegClass();
320
321 LOG_REG_ALLOC(
322 "allocating lifetime %s:\n%s", reg_class->GetDebugName(), lifetime->GetDebugString().c_str());
323
324 // First try preferred register.
325 MachineReg pref_reg = lifetime->FindMoveHint()->hard_reg();
326 if (reg_class->HasReg(pref_reg) && TryAssignHardReg(lifetime, pref_reg)) {
327 return;
328 }
329
330 // Walk registers from reg class.
331 for (MachineReg hard_reg : *reg_class) {
332 if (hard_reg != pref_reg && TryAssignHardReg(lifetime, hard_reg)) {
333 return;
334 }
335 }
336
337 LOG_REG_ALLOC("... failed to find free hard reg, will try spilling");
338
339 // Walk registers again, consider each for spilling.
340 int best_spill_weight = kInfiniteSpillWeight;
341 MachineReg best_reg{0};
342 for (MachineReg hard_reg : *reg_class) {
343 int spill_weight = ConsiderSpillHardReg(hard_reg, lifetime);
344 LOG_REG_ALLOC(
345 "... consider spilling %s, weight %d", GetMachineHardRegDebugName(hard_reg), spill_weight);
346 if (best_spill_weight > spill_weight) {
347 best_spill_weight = spill_weight;
348 best_reg = hard_reg;
349 }
350 }
351
352 // Spill register with best spill weight.
353 CHECK_LT(best_spill_weight, kInfiniteSpillWeight);
354 SpillAndAssignHardReg(best_reg, lifetime_it);
355 }
356
RewriteAllocatedLifetimes()357 void VRegLifetimeAllocator::RewriteAllocatedLifetimes() {
358 for (auto& lifetime : *lifetimes_) {
359 lifetime.Rewrite(machine_ir_);
360 }
361 }
362
Allocate()363 void VRegLifetimeAllocator::Allocate() {
364 for (auto lifetime_it = lifetimes_->begin(); lifetime_it != lifetimes_->end(); ++lifetime_it) {
365 AllocateLifetime(lifetime_it);
366 }
367 RewriteAllocatedLifetimes();
368 }
369
CollectLifetimes(const MachineIR * machine_ir,VRegLifetimeList * lifetimes)370 void CollectLifetimes(const MachineIR* machine_ir, VRegLifetimeList* lifetimes) {
371 VRegLifetimeAnalysis lifetime_analysis(machine_ir->arena(), 2 * machine_ir->NumVReg(), lifetimes);
372
373 // Not 'const' because we need pointer to modifiable bb->insn_list().
374 for (auto* bb : machine_ir->bb_list()) {
375 for (const auto reg : bb->live_in()) {
376 lifetime_analysis.SetLiveIn(reg);
377 }
378
379 for (auto insn_it = bb->insn_list().begin(); insn_it != bb->insn_list().end(); ++insn_it) {
380 lifetime_analysis.AddInsn(MachineInsnListPosition(&(bb->insn_list()), insn_it));
381 }
382
383 for (const auto reg : bb->live_out()) {
384 lifetime_analysis.SetLiveOut(reg);
385 }
386 lifetime_analysis.EndBasicBlock();
387 }
388 }
389
390 } // namespace
391
AllocRegs(MachineIR * machine_ir)392 void AllocRegs(MachineIR* machine_ir) {
393 VRegLifetimeList lifetimes(machine_ir->arena());
394 CollectLifetimes(machine_ir, &lifetimes);
395
396 VRegLifetimeAllocator allocator(machine_ir, &lifetimes);
397 allocator.Allocate();
398 }
399
400 } // namespace berberis
401