LLVM 22.0.0git
RegAllocFast.cpp
Go to the documentation of this file.
1//===- RegAllocFast.cpp - A fast register allocator for debug code --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file This register allocator allocates registers to a basic block at a
10/// time, attempting to keep values in registers and reusing registers as
11/// appropriate.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/IndexedMap.h"
19#include "llvm/ADT/MapVector.h"
20#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/SparseSet.h"
23#include "llvm/ADT/Statistic.h"
41#include "llvm/Pass.h"
42#include "llvm/Support/Debug.h"
45#include <cassert>
46#include <tuple>
47#include <vector>
48
49using namespace llvm;
50
51#define DEBUG_TYPE "regalloc"
52
53STATISTIC(NumStores, "Number of stores added");
54STATISTIC(NumLoads, "Number of loads added");
55STATISTIC(NumCoalesced, "Number of copies coalesced");
56
57// FIXME: Remove this switch when all testcases are fixed!
58static cl::opt<bool> IgnoreMissingDefs("rafast-ignore-missing-defs",
60
61static RegisterRegAlloc fastRegAlloc("fast", "fast register allocator",
63
64namespace {
65
66/// Assign ascending index for instructions in machine basic block. The index
67/// can be used to determine dominance between instructions in same MBB.
68class InstrPosIndexes {
69public:
70 void unsetInitialized() { IsInitialized = false; }
71
72 void init(const MachineBasicBlock &MBB) {
73 CurMBB = &MBB;
74 Instr2PosIndex.clear();
75 uint64_t LastIndex = 0;
76 for (const MachineInstr &MI : MBB) {
77 LastIndex += InstrDist;
78 Instr2PosIndex[&MI] = LastIndex;
79 }
80 }
81
82 /// Set \p Index to index of \p MI. If \p MI is new inserted, it try to assign
83 /// index without affecting existing instruction's index. Return true if all
84 /// instructions index has been reassigned.
85 bool getIndex(const MachineInstr &MI, uint64_t &Index) {
86 if (!IsInitialized) {
87 init(*MI.getParent());
88 IsInitialized = true;
89 Index = Instr2PosIndex.at(&MI);
90 return true;
91 }
92
93 assert(MI.getParent() == CurMBB && "MI is not in CurMBB");
94 auto It = Instr2PosIndex.find(&MI);
95 if (It != Instr2PosIndex.end()) {
96 Index = It->second;
97 return false;
98 }
99
100 // Distance is the number of consecutive unassigned instructions including
101 // MI. Start is the first instruction of them. End is the next of last
102 // instruction of them.
103 // e.g.
104 // |Instruction| A | B | C | MI | D | E |
105 // | Index | 1024 | | | | | 2048 |
106 //
107 // In this case, B, C, MI, D are unassigned. Distance is 4, Start is B, End
108 // is E.
109 unsigned Distance = 1;
111 End = std::next(Start);
112 while (Start != CurMBB->begin() &&
113 !Instr2PosIndex.count(&*std::prev(Start))) {
114 --Start;
115 ++Distance;
116 }
117 while (End != CurMBB->end() && !Instr2PosIndex.count(&*(End))) {
118 ++End;
119 ++Distance;
120 }
121
122 // LastIndex is initialized to last used index prior to MI or zero.
123 // In previous example, LastIndex is 1024, EndIndex is 2048;
124 uint64_t LastIndex =
125 Start == CurMBB->begin() ? 0 : Instr2PosIndex.at(&*std::prev(Start));
126 uint64_t Step;
127 if (End == CurMBB->end())
128 Step = static_cast<uint64_t>(InstrDist);
129 else {
130 // No instruction uses index zero.
131 uint64_t EndIndex = Instr2PosIndex.at(&*End);
132 assert(EndIndex > LastIndex && "Index must be ascending order");
133 unsigned NumAvailableIndexes = EndIndex - LastIndex - 1;
134 // We want index gap between two adjacent MI is as same as possible. Given
135 // total A available indexes, D is number of consecutive unassigned
136 // instructions, S is the step.
137 // |<- S-1 -> MI <- S-1 -> MI <- A-S*D ->|
138 // There're S-1 available indexes between unassigned instruction and its
139 // predecessor. There're A-S*D available indexes between the last
140 // unassigned instruction and its successor.
141 // Ideally, we want
142 // S-1 = A-S*D
143 // then
144 // S = (A+1)/(D+1)
145 // An valid S must be integer greater than zero, so
146 // S <= (A+1)/(D+1)
147 // =>
148 // A-S*D >= 0
149 // That means we can safely use (A+1)/(D+1) as step.
150 // In previous example, Step is 204, Index of B, C, MI, D is 1228, 1432,
151 // 1636, 1840.
152 Step = (NumAvailableIndexes + 1) / (Distance + 1);
153 }
154
155 // Reassign index for all instructions if number of new inserted
156 // instructions exceed slot or all instructions are new.
157 if (LLVM_UNLIKELY(!Step || (!LastIndex && Step == InstrDist))) {
158 init(*CurMBB);
159 Index = Instr2PosIndex.at(&MI);
160 return true;
161 }
162
163 for (auto I = Start; I != End; ++I) {
164 LastIndex += Step;
165 Instr2PosIndex[&*I] = LastIndex;
166 }
167 Index = Instr2PosIndex.at(&MI);
168 return false;
169 }
170
171private:
172 bool IsInitialized = false;
173 enum { InstrDist = 1024 };
174 const MachineBasicBlock *CurMBB = nullptr;
175 DenseMap<const MachineInstr *, uint64_t> Instr2PosIndex;
176};
177
178class RegAllocFastImpl {
179public:
180 RegAllocFastImpl(const RegAllocFilterFunc F = nullptr,
181 bool ClearVirtRegs_ = true)
182 : ShouldAllocateRegisterImpl(F), StackSlotForVirtReg(-1),
183 ClearVirtRegs(ClearVirtRegs_) {}
184
185private:
186 MachineFrameInfo *MFI = nullptr;
187 MachineRegisterInfo *MRI = nullptr;
188 const TargetRegisterInfo *TRI = nullptr;
189 const TargetInstrInfo *TII = nullptr;
190 RegisterClassInfo RegClassInfo;
191 const RegAllocFilterFunc ShouldAllocateRegisterImpl;
192
193 /// Basic block currently being allocated.
194 MachineBasicBlock *MBB = nullptr;
195
196 /// Maps virtual regs to the frame index where these values are spilled.
197 IndexedMap<int, VirtReg2IndexFunctor> StackSlotForVirtReg;
198
199 /// Everything we know about a live virtual register.
200 struct LiveReg {
201 MachineInstr *LastUse = nullptr; ///< Last instr to use reg.
202 Register VirtReg; ///< Virtual register number.
203 MCPhysReg PhysReg = 0; ///< Currently held here.
204 bool LiveOut = false; ///< Register is possibly live out.
205 bool Reloaded = false; ///< Register was reloaded.
206 bool Error = false; ///< Could not allocate.
207
208 explicit LiveReg(Register VirtReg) : VirtReg(VirtReg) {}
209 explicit LiveReg() {}
210
211 unsigned getSparseSetIndex() const { return VirtReg.virtRegIndex(); }
212 };
213
214 using LiveRegMap = SparseSet<LiveReg, identity<unsigned>, uint16_t>;
215 /// This map contains entries for each virtual register that is currently
216 /// available in a physical register.
217 LiveRegMap LiveVirtRegs;
218
219 /// Stores assigned virtual registers present in the bundle MI.
220 DenseMap<Register, LiveReg> BundleVirtRegsMap;
221
222 DenseMap<Register, SmallVector<MachineOperand *, 2>> LiveDbgValueMap;
223 /// List of DBG_VALUE that we encountered without the vreg being assigned
224 /// because they were placed after the last use of the vreg.
225 DenseMap<Register, SmallVector<MachineInstr *, 1>> DanglingDbgValues;
226
227 /// Has a bit set for every virtual register for which it was determined
228 /// that it is alive across blocks.
229 BitVector MayLiveAcrossBlocks;
230
231 /// State of a register unit.
232 enum RegUnitState {
233 /// A free register is not currently in use and can be allocated
234 /// immediately without checking aliases.
235 regFree,
236
237 /// A pre-assigned register has been assigned before register allocation
238 /// (e.g., setting up a call parameter).
239 regPreAssigned,
240
241 /// Used temporarily in reloadAtBegin() to mark register units that are
242 /// live-in to the basic block.
243 regLiveIn,
244
245 /// A register state may also be a virtual register number, indication
246 /// that the physical register is currently allocated to a virtual
247 /// register. In that case, LiveVirtRegs contains the inverse mapping.
248 };
249
250 /// Maps each physical register to a RegUnitState enum or virtual register.
251 std::vector<unsigned> RegUnitStates;
252
254
255 /// Track register units that are used in the current instruction, and so
256 /// cannot be allocated.
257 ///
258 /// In the first phase (tied defs/early clobber), we consider also physical
259 /// uses, afterwards, we don't. If the lowest bit isn't set, it's a solely
260 /// physical use (markPhysRegUsedInInstr), otherwise, it's a normal use. To
261 /// avoid resetting the entire vector after every instruction, we track the
262 /// instruction "generation" in the remaining 31 bits -- this means, that if
263 /// UsedInInstr[Idx] < InstrGen, the register unit is unused. InstrGen is
264 /// never zero and always incremented by two.
265 ///
266 /// Don't allocate inline storage: the number of register units is typically
267 /// quite large (e.g., AArch64 > 100, X86 > 200, AMDGPU > 1000).
268 uint32_t InstrGen;
269 SmallVector<unsigned, 0> UsedInInstr;
270
271 SmallVector<unsigned, 8> DefOperandIndexes;
272 // Register masks attached to the current instruction.
274
275 // Assign index for each instruction to quickly determine dominance.
276 InstrPosIndexes PosIndexes;
277
278 void setPhysRegState(MCRegister PhysReg, unsigned NewState);
279 bool isPhysRegFree(MCRegister PhysReg) const;
280
281 /// Mark a physreg as used in this instruction.
282 void markRegUsedInInstr(MCPhysReg PhysReg) {
283 for (MCRegUnit Unit : TRI->regunits(PhysReg))
284 UsedInInstr[Unit] = InstrGen | 1;
285 }
286
287 // Check if physreg is clobbered by instruction's regmask(s).
288 bool isClobberedByRegMasks(MCRegister PhysReg) const {
289 return llvm::any_of(RegMasks, [PhysReg](const uint32_t *Mask) {
290 return MachineOperand::clobbersPhysReg(Mask, PhysReg);
291 });
292 }
293
294 /// Check if a physreg or any of its aliases are used in this instruction.
295 bool isRegUsedInInstr(MCRegister PhysReg, bool LookAtPhysRegUses) const {
296 if (LookAtPhysRegUses && isClobberedByRegMasks(PhysReg))
297 return true;
298 for (MCRegUnit Unit : TRI->regunits(PhysReg))
299 if (UsedInInstr[Unit] >= (InstrGen | !LookAtPhysRegUses))
300 return true;
301 return false;
302 }
303
304 /// Mark physical register as being used in a register use operand.
305 /// This is only used by the special livethrough handling code.
306 void markPhysRegUsedInInstr(MCRegister PhysReg) {
307 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
308 assert(UsedInInstr[Unit] <= InstrGen && "non-phys use before phys use?");
309 UsedInInstr[Unit] = InstrGen;
310 }
311 }
312
313 /// Remove mark of physical register being used in the instruction.
314 void unmarkRegUsedInInstr(MCRegister PhysReg) {
315 for (MCRegUnit Unit : TRI->regunits(PhysReg))
316 UsedInInstr[Unit] = 0;
317 }
318
319 enum : unsigned {
320 spillClean = 50,
321 spillDirty = 100,
322 spillPrefBonus = 20,
323 spillImpossible = ~0u
324 };
325
326public:
327 bool ClearVirtRegs;
328
329 bool runOnMachineFunction(MachineFunction &MF);
330
331private:
332 void allocateBasicBlock(MachineBasicBlock &MBB);
333
334 void addRegClassDefCounts(MutableArrayRef<unsigned> RegClassDefCounts,
335 Register Reg) const;
336
337 void findAndSortDefOperandIndexes(const MachineInstr &MI);
338
339 void allocateInstruction(MachineInstr &MI);
340 void handleDebugValue(MachineInstr &MI);
341 void handleBundle(MachineInstr &MI);
342
343 bool usePhysReg(MachineInstr &MI, MCRegister PhysReg);
344 bool definePhysReg(MachineInstr &MI, MCRegister PhysReg);
345 bool displacePhysReg(MachineInstr &MI, MCRegister PhysReg);
346 void freePhysReg(MCRegister PhysReg);
347
348 unsigned calcSpillCost(MCPhysReg PhysReg) const;
349
350 LiveRegMap::iterator findLiveVirtReg(Register VirtReg) {
351 return LiveVirtRegs.find(VirtReg.virtRegIndex());
352 }
353
354 LiveRegMap::const_iterator findLiveVirtReg(Register VirtReg) const {
355 return LiveVirtRegs.find(VirtReg.virtRegIndex());
356 }
357
358 void assignVirtToPhysReg(MachineInstr &MI, LiveReg &, MCRegister PhysReg);
359 void allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint,
360 bool LookAtPhysRegUses = false);
361 void allocVirtRegUndef(MachineOperand &MO);
362 void assignDanglingDebugValues(MachineInstr &Def, Register VirtReg,
363 MCRegister Reg);
364 bool defineLiveThroughVirtReg(MachineInstr &MI, unsigned OpNum,
365 Register VirtReg);
366 bool defineVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg,
367 bool LookAtPhysRegUses = false);
368 bool useVirtReg(MachineInstr &MI, MachineOperand &MO, Register VirtReg);
369
370 MCPhysReg getErrorAssignment(const LiveReg &LR, MachineInstr &MI,
371 const TargetRegisterClass &RC);
372
374 getMBBBeginInsertionPoint(MachineBasicBlock &MBB,
375 SmallSet<Register, 2> &PrologLiveIns) const;
376
377 void reloadAtBegin(MachineBasicBlock &MBB);
378 bool setPhysReg(MachineInstr &MI, MachineOperand &MO,
379 const LiveReg &Assignment);
380
381 Register traceCopies(Register VirtReg) const;
382 Register traceCopyChain(Register Reg) const;
383
384 bool shouldAllocateRegister(const Register Reg) const;
385 int getStackSpaceFor(Register VirtReg);
386 void spill(MachineBasicBlock::iterator Before, Register VirtReg,
387 MCPhysReg AssignedReg, bool Kill, bool LiveOut);
388 void reload(MachineBasicBlock::iterator Before, Register VirtReg,
389 MCPhysReg PhysReg);
390
391 bool mayLiveOut(Register VirtReg);
392 bool mayLiveIn(Register VirtReg);
393
394 bool mayBeSpillFromInlineAsmBr(const MachineInstr &MI) const;
395
396 void dumpState() const;
397};
398
399class RegAllocFast : public MachineFunctionPass {
400 RegAllocFastImpl Impl;
401
402public:
403 static char ID;
404
405 RegAllocFast(const RegAllocFilterFunc F = nullptr, bool ClearVirtRegs_ = true)
406 : MachineFunctionPass(ID), Impl(F, ClearVirtRegs_) {}
407
408 bool runOnMachineFunction(MachineFunction &MF) override {
409 return Impl.runOnMachineFunction(MF);
410 }
411
412 StringRef getPassName() const override { return "Fast Register Allocator"; }
413
414 void getAnalysisUsage(AnalysisUsage &AU) const override {
415 AU.setPreservesCFG();
417 }
418
419 MachineFunctionProperties getRequiredProperties() const override {
420 return MachineFunctionProperties().setNoPHIs();
421 }
422
423 MachineFunctionProperties getSetProperties() const override {
424 if (Impl.ClearVirtRegs) {
425 return MachineFunctionProperties().setNoVRegs();
426 }
427
428 return MachineFunctionProperties();
429 }
430
431 MachineFunctionProperties getClearedProperties() const override {
432 return MachineFunctionProperties().setIsSSA();
433 }
434};
435
436} // end anonymous namespace
437
438char RegAllocFast::ID = 0;
439
440INITIALIZE_PASS(RegAllocFast, "regallocfast", "Fast Register Allocator", false,
441 false)
442
443bool RegAllocFastImpl::shouldAllocateRegister(const Register Reg) const {
444 assert(Reg.isVirtual());
445 if (!ShouldAllocateRegisterImpl)
446 return true;
447
448 return ShouldAllocateRegisterImpl(*TRI, *MRI, Reg);
449}
450
451void RegAllocFastImpl::setPhysRegState(MCRegister PhysReg, unsigned NewState) {
452 for (MCRegUnit Unit : TRI->regunits(PhysReg))
453 RegUnitStates[Unit] = NewState;
454}
455
456bool RegAllocFastImpl::isPhysRegFree(MCRegister PhysReg) const {
457 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
458 if (RegUnitStates[Unit] != regFree)
459 return false;
460 }
461 return true;
462}
463
464/// This allocates space for the specified virtual register to be held on the
465/// stack.
466int RegAllocFastImpl::getStackSpaceFor(Register VirtReg) {
467 // Find the location Reg would belong...
468 int SS = StackSlotForVirtReg[VirtReg];
469 // Already has space allocated?
470 if (SS != -1)
471 return SS;
472
473 // Allocate a new stack object for this spill location...
474 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
475 unsigned Size = TRI->getSpillSize(RC);
476 Align Alignment = TRI->getSpillAlign(RC);
477
478 const MachineFunction &MF = MRI->getMF();
479 auto &ST = MF.getSubtarget();
480 Align CurrentAlign = ST.getFrameLowering()->getStackAlign();
481 if (Alignment > CurrentAlign && !TRI->canRealignStack(MF))
482 Alignment = CurrentAlign;
483
484 int FrameIdx = MFI->CreateSpillStackObject(Size, Alignment);
485
486 // Assign the slot.
487 StackSlotForVirtReg[VirtReg] = FrameIdx;
488 return FrameIdx;
489}
490
491static bool dominates(InstrPosIndexes &PosIndexes, const MachineInstr &A,
492 const MachineInstr &B) {
493 uint64_t IndexA, IndexB;
494 PosIndexes.getIndex(A, IndexA);
495 if (LLVM_UNLIKELY(PosIndexes.getIndex(B, IndexB)))
496 PosIndexes.getIndex(A, IndexA);
497 return IndexA < IndexB;
498}
499
500/// Returns true if \p MI is a spill of a live-in physical register in a block
501/// targeted by an INLINEASM_BR. Such spills must precede reloads of live-in
502/// virtual registers, so that we do not reload from an uninitialized stack
503/// slot.
504bool RegAllocFastImpl::mayBeSpillFromInlineAsmBr(const MachineInstr &MI) const {
505 int FI;
506 auto *MBB = MI.getParent();
508 MFI->isSpillSlotObjectIndex(FI))
509 for (const auto &Op : MI.operands())
510 if (Op.isReg() && MBB->isLiveIn(Op.getReg()))
511 return true;
512 return false;
513}
514
515/// Returns false if \p VirtReg is known to not live out of the current block.
516bool RegAllocFastImpl::mayLiveOut(Register VirtReg) {
517 if (MayLiveAcrossBlocks.test(VirtReg.virtRegIndex())) {
518 // Cannot be live-out if there are no successors.
519 return !MBB->succ_empty();
520 }
521
522 const MachineInstr *SelfLoopDef = nullptr;
523
524 // If this block loops back to itself, it is necessary to check whether the
525 // use comes after the def.
526 if (MBB->isSuccessor(MBB)) {
527 // Find the first def in the self loop MBB.
528 for (const MachineInstr &DefInst : MRI->def_instructions(VirtReg)) {
529 if (DefInst.getParent() != MBB) {
530 MayLiveAcrossBlocks.set(VirtReg.virtRegIndex());
531 return true;
532 } else {
533 if (!SelfLoopDef || dominates(PosIndexes, DefInst, *SelfLoopDef))
534 SelfLoopDef = &DefInst;
535 }
536 }
537 if (!SelfLoopDef) {
538 MayLiveAcrossBlocks.set(VirtReg.virtRegIndex());
539 return true;
540 }
541 }
542
543 // See if the first \p Limit uses of the register are all in the current
544 // block.
545 static const unsigned Limit = 8;
546 unsigned C = 0;
547 for (const MachineInstr &UseInst : MRI->use_nodbg_instructions(VirtReg)) {
548 if (UseInst.getParent() != MBB || ++C >= Limit) {
549 MayLiveAcrossBlocks.set(VirtReg.virtRegIndex());
550 // Cannot be live-out if there are no successors.
551 return !MBB->succ_empty();
552 }
553
554 if (SelfLoopDef) {
555 // Try to handle some simple cases to avoid spilling and reloading every
556 // value inside a self looping block.
557 if (SelfLoopDef == &UseInst ||
558 !dominates(PosIndexes, *SelfLoopDef, UseInst)) {
559 MayLiveAcrossBlocks.set(VirtReg.virtRegIndex());
560 return true;
561 }
562 }
563 }
564
565 return false;
566}
567
568/// Returns false if \p VirtReg is known to not be live into the current block.
569bool RegAllocFastImpl::mayLiveIn(Register VirtReg) {
570 if (MayLiveAcrossBlocks.test(VirtReg.virtRegIndex()))
571 return !MBB->pred_empty();
572
573 // See if the first \p Limit def of the register are all in the current block.
574 static const unsigned Limit = 8;
575 unsigned C = 0;
576 for (const MachineInstr &DefInst : MRI->def_instructions(VirtReg)) {
577 if (DefInst.getParent() != MBB || ++C >= Limit) {
578 MayLiveAcrossBlocks.set(VirtReg.virtRegIndex());
579 return !MBB->pred_empty();
580 }
581 }
582
583 return false;
584}
585
586/// Insert spill instruction for \p AssignedReg before \p Before. Update
587/// DBG_VALUEs with \p VirtReg operands with the stack slot.
588void RegAllocFastImpl::spill(MachineBasicBlock::iterator Before,
589 Register VirtReg, MCPhysReg AssignedReg, bool Kill,
590 bool LiveOut) {
591 LLVM_DEBUG(dbgs() << "Spilling " << printReg(VirtReg, TRI) << " in "
592 << printReg(AssignedReg, TRI));
593 int FI = getStackSpaceFor(VirtReg);
594 LLVM_DEBUG(dbgs() << " to stack slot #" << FI << '\n');
595
596 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
597 TII->storeRegToStackSlot(*MBB, Before, AssignedReg, Kill, FI, &RC, TRI,
598 VirtReg);
599 ++NumStores;
600
602
603 // When we spill a virtual register, we will have spill instructions behind
604 // every definition of it, meaning we can switch all the DBG_VALUEs over
605 // to just reference the stack slot.
606 SmallVectorImpl<MachineOperand *> &LRIDbgOperands = LiveDbgValueMap[VirtReg];
607 SmallMapVector<MachineInstr *, SmallVector<const MachineOperand *>, 2>
608 SpilledOperandsMap;
609 for (MachineOperand *MO : LRIDbgOperands)
610 SpilledOperandsMap[MO->getParent()].push_back(MO);
611 for (const auto &MISpilledOperands : SpilledOperandsMap) {
612 MachineInstr &DBG = *MISpilledOperands.first;
613 // We don't have enough support for tracking operands of DBG_VALUE_LISTs.
614 if (DBG.isDebugValueList())
615 continue;
616 MachineInstr *NewDV = buildDbgValueForSpill(
617 *MBB, Before, *MISpilledOperands.first, FI, MISpilledOperands.second);
618 assert(NewDV->getParent() == MBB && "dangling parent pointer");
619 (void)NewDV;
620 LLVM_DEBUG(dbgs() << "Inserting debug info due to spill:\n" << *NewDV);
621
622 if (LiveOut) {
623 // We need to insert a DBG_VALUE at the end of the block if the spill slot
624 // is live out, but there is another use of the value after the
625 // spill. This will allow LiveDebugValues to see the correct live out
626 // value to propagate to the successors.
627 MachineInstr *ClonedDV = MBB->getParent()->CloneMachineInstr(NewDV);
628 MBB->insert(FirstTerm, ClonedDV);
629 LLVM_DEBUG(dbgs() << "Cloning debug info due to live out spill\n");
630 }
631
632 // Rewrite unassigned dbg_values to use the stack slot.
633 // TODO We can potentially do this for list debug values as well if we know
634 // how the dbg_values are getting unassigned.
635 if (DBG.isNonListDebugValue()) {
636 MachineOperand &MO = DBG.getDebugOperand(0);
637 if (MO.isReg() && MO.getReg() == 0) {
638 updateDbgValueForSpill(DBG, FI, 0);
639 }
640 }
641 }
642 // Now this register is spilled there is should not be any DBG_VALUE
643 // pointing to this register because they are all pointing to spilled value
644 // now.
645 LRIDbgOperands.clear();
646}
647
648/// Insert reload instruction for \p PhysReg before \p Before.
649void RegAllocFastImpl::reload(MachineBasicBlock::iterator Before,
650 Register VirtReg, MCPhysReg PhysReg) {
651 LLVM_DEBUG(dbgs() << "Reloading " << printReg(VirtReg, TRI) << " into "
652 << printReg(PhysReg, TRI) << '\n');
653 int FI = getStackSpaceFor(VirtReg);
654 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
655 TII->loadRegFromStackSlot(*MBB, Before, PhysReg, FI, &RC, TRI, VirtReg);
656 ++NumLoads;
657}
658
659/// Get basic block begin insertion point.
660/// This is not just MBB.begin() because surprisingly we have EH_LABEL
661/// instructions marking the begin of a basic block. This means we must insert
662/// new instructions after such labels...
663MachineBasicBlock::iterator RegAllocFastImpl::getMBBBeginInsertionPoint(
664 MachineBasicBlock &MBB, SmallSet<Register, 2> &PrologLiveIns) const {
666 while (I != MBB.end()) {
667 if (I->isLabel()) {
668 ++I;
669 continue;
670 }
671
672 // Skip prologues and inlineasm_br spills to place reloads afterwards.
673 if (!TII->isBasicBlockPrologue(*I) && !mayBeSpillFromInlineAsmBr(*I))
674 break;
675
676 // However if a prolog instruction reads a register that needs to be
677 // reloaded, the reload should be inserted before the prolog.
678 for (MachineOperand &MO : I->operands()) {
679 if (MO.isReg())
680 PrologLiveIns.insert(MO.getReg());
681 }
682
683 ++I;
684 }
685
686 return I;
687}
688
689/// Reload all currently assigned virtual registers.
690void RegAllocFastImpl::reloadAtBegin(MachineBasicBlock &MBB) {
691 if (LiveVirtRegs.empty())
692 return;
693
694 for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) {
695 MCRegister Reg = P.PhysReg;
696 // Set state to live-in. This possibly overrides mappings to virtual
697 // registers but we don't care anymore at this point.
698 setPhysRegState(Reg, regLiveIn);
699 }
700
701 SmallSet<Register, 2> PrologLiveIns;
702
703 // The LiveRegMap is keyed by an unsigned (the virtreg number), so the order
704 // of spilling here is deterministic, if arbitrary.
705 MachineBasicBlock::iterator InsertBefore =
706 getMBBBeginInsertionPoint(MBB, PrologLiveIns);
707 for (const LiveReg &LR : LiveVirtRegs) {
708 MCPhysReg PhysReg = LR.PhysReg;
709 if (PhysReg == 0 || LR.Error)
710 continue;
711
712 MCRegUnit FirstUnit = *TRI->regunits(PhysReg).begin();
713 if (RegUnitStates[FirstUnit] == regLiveIn)
714 continue;
715
717 "no reload in start block. Missing vreg def?");
718
719 if (PrologLiveIns.count(PhysReg)) {
720 // FIXME: Theoretically this should use an insert point skipping labels
721 // but I'm not sure how labels should interact with prolog instruction
722 // that need reloads.
723 reload(MBB.begin(), LR.VirtReg, PhysReg);
724 } else
725 reload(InsertBefore, LR.VirtReg, PhysReg);
726 }
727 LiveVirtRegs.clear();
728}
729
730/// Handle the direct use of a physical register. Check that the register is
731/// not used by a virtreg. Kill the physreg, marking it free. This may add
732/// implicit kills to MO->getParent() and invalidate MO.
733bool RegAllocFastImpl::usePhysReg(MachineInstr &MI, MCRegister Reg) {
734 assert(Register::isPhysicalRegister(Reg) && "expected physreg");
735 bool displacedAny = displacePhysReg(MI, Reg);
736 setPhysRegState(Reg, regPreAssigned);
737 markRegUsedInInstr(Reg);
738 return displacedAny;
739}
740
741bool RegAllocFastImpl::definePhysReg(MachineInstr &MI, MCRegister Reg) {
742 bool displacedAny = displacePhysReg(MI, Reg);
743 setPhysRegState(Reg, regPreAssigned);
744 return displacedAny;
745}
746
747/// Mark PhysReg as reserved or free after spilling any virtregs. This is very
748/// similar to defineVirtReg except the physreg is reserved instead of
749/// allocated.
750bool RegAllocFastImpl::displacePhysReg(MachineInstr &MI, MCRegister PhysReg) {
751 bool displacedAny = false;
752
753 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
754 switch (unsigned VirtReg = RegUnitStates[Unit]) {
755 default: {
756 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
757 assert(LRI != LiveVirtRegs.end() && "datastructures in sync");
758 MachineBasicBlock::iterator ReloadBefore =
759 std::next((MachineBasicBlock::iterator)MI.getIterator());
760 while (mayBeSpillFromInlineAsmBr(*ReloadBefore))
761 ++ReloadBefore;
762 reload(ReloadBefore, VirtReg, LRI->PhysReg);
763
764 setPhysRegState(LRI->PhysReg, regFree);
765 LRI->PhysReg = 0;
766 LRI->Reloaded = true;
767 displacedAny = true;
768 break;
769 }
770 case regPreAssigned:
771 RegUnitStates[Unit] = regFree;
772 displacedAny = true;
773 break;
774 case regFree:
775 break;
776 }
777 }
778 return displacedAny;
779}
780
781void RegAllocFastImpl::freePhysReg(MCRegister PhysReg) {
782 LLVM_DEBUG(dbgs() << "Freeing " << printReg(PhysReg, TRI) << ':');
783
784 MCRegUnit FirstUnit = *TRI->regunits(PhysReg).begin();
785 switch (unsigned VirtReg = RegUnitStates[FirstUnit]) {
786 case regFree:
787 LLVM_DEBUG(dbgs() << '\n');
788 return;
789 case regPreAssigned:
790 LLVM_DEBUG(dbgs() << '\n');
791 setPhysRegState(PhysReg, regFree);
792 return;
793 default: {
794 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
795 assert(LRI != LiveVirtRegs.end());
796 LLVM_DEBUG(dbgs() << ' ' << printReg(LRI->VirtReg, TRI) << '\n');
797 setPhysRegState(LRI->PhysReg, regFree);
798 LRI->PhysReg = 0;
799 }
800 return;
801 }
802}
803
804/// Return the cost of spilling clearing out PhysReg and aliases so it is free
805/// for allocation. Returns 0 when PhysReg is free or disabled with all aliases
806/// disabled - it can be allocated directly.
807/// \returns spillImpossible when PhysReg or an alias can't be spilled.
808unsigned RegAllocFastImpl::calcSpillCost(MCPhysReg PhysReg) const {
809 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
810 switch (unsigned VirtReg = RegUnitStates[Unit]) {
811 case regFree:
812 break;
813 case regPreAssigned:
814 LLVM_DEBUG(dbgs() << "Cannot spill pre-assigned "
815 << printReg(PhysReg, TRI) << '\n');
816 return spillImpossible;
817 default: {
818 bool SureSpill = StackSlotForVirtReg[VirtReg] != -1 ||
819 findLiveVirtReg(VirtReg)->LiveOut;
820 return SureSpill ? spillClean : spillDirty;
821 }
822 }
823 }
824 return 0;
825}
826
827void RegAllocFastImpl::assignDanglingDebugValues(MachineInstr &Definition,
828 Register VirtReg,
829 MCRegister Reg) {
830 auto UDBGValIter = DanglingDbgValues.find(VirtReg);
831 if (UDBGValIter == DanglingDbgValues.end())
832 return;
833
834 SmallVectorImpl<MachineInstr *> &Dangling = UDBGValIter->second;
835 for (MachineInstr *DbgValue : Dangling) {
836 assert(DbgValue->isDebugValue());
837 if (!DbgValue->hasDebugOperandForReg(VirtReg))
838 continue;
839
840 // Test whether the physreg survives from the definition to the DBG_VALUE.
841 MCPhysReg SetToReg = Reg;
842 unsigned Limit = 20;
843 for (MachineBasicBlock::iterator I = std::next(Definition.getIterator()),
844 E = DbgValue->getIterator();
845 I != E; ++I) {
846 if (I->modifiesRegister(Reg, TRI) || --Limit == 0) {
847 LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
848 << '\n');
849 SetToReg = 0;
850 break;
851 }
852 }
853 for (MachineOperand &MO : DbgValue->getDebugOperandsForReg(VirtReg)) {
854 MO.setReg(SetToReg);
855 if (SetToReg != 0)
856 MO.setIsRenamable();
857 }
858 }
859 Dangling.clear();
860}
861
862/// This method updates local state so that we know that PhysReg is the
863/// proper container for VirtReg now. The physical register must not be used
864/// for anything else when this is called.
865void RegAllocFastImpl::assignVirtToPhysReg(MachineInstr &AtMI, LiveReg &LR,
866 MCRegister PhysReg) {
867 Register VirtReg = LR.VirtReg;
868 LLVM_DEBUG(dbgs() << "Assigning " << printReg(VirtReg, TRI) << " to "
869 << printReg(PhysReg, TRI) << '\n');
870 assert(LR.PhysReg == 0 && "Already assigned a physreg");
871 assert(PhysReg != 0 && "Trying to assign no register");
872 LR.PhysReg = PhysReg;
873 setPhysRegState(PhysReg, VirtReg.id());
874
875 assignDanglingDebugValues(AtMI, VirtReg, PhysReg);
876}
877
878static bool isCoalescable(const MachineInstr &MI) { return MI.isFullCopy(); }
879
880Register RegAllocFastImpl::traceCopyChain(Register Reg) const {
881 static const unsigned ChainLengthLimit = 3;
882 unsigned C = 0;
883 do {
884 if (Reg.isPhysical())
885 return Reg;
887
888 MachineInstr *VRegDef = MRI->getUniqueVRegDef(Reg);
889 if (!VRegDef || !isCoalescable(*VRegDef))
890 return 0;
891 Reg = VRegDef->getOperand(1).getReg();
892 } while (++C <= ChainLengthLimit);
893 return 0;
894}
895
896/// Check if any of \p VirtReg's definitions is a copy. If it is follow the
897/// chain of copies to check whether we reach a physical register we can
898/// coalesce with.
899Register RegAllocFastImpl::traceCopies(Register VirtReg) const {
900 static const unsigned DefLimit = 3;
901 unsigned C = 0;
902 for (const MachineInstr &MI : MRI->def_instructions(VirtReg)) {
903 if (isCoalescable(MI)) {
904 Register Reg = MI.getOperand(1).getReg();
905 Reg = traceCopyChain(Reg);
906 if (Reg.isValid())
907 return Reg;
908 }
909
910 if (++C >= DefLimit)
911 break;
912 }
913 return Register();
914}
915
916/// Allocates a physical register for VirtReg.
917void RegAllocFastImpl::allocVirtReg(MachineInstr &MI, LiveReg &LR,
918 Register Hint0, bool LookAtPhysRegUses) {
919 const Register VirtReg = LR.VirtReg;
920 assert(LR.PhysReg == 0);
921
922 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
923 LLVM_DEBUG(dbgs() << "Search register for " << printReg(VirtReg)
924 << " in class " << TRI->getRegClassName(&RC)
925 << " with hint " << printReg(Hint0, TRI) << '\n');
926
927 // Take hint when possible.
928 if (Hint0.isPhysical() && MRI->isAllocatable(Hint0) && RC.contains(Hint0) &&
929 !isRegUsedInInstr(Hint0, LookAtPhysRegUses)) {
930 // Take hint if the register is currently free.
931 if (isPhysRegFree(Hint0)) {
932 LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint0, TRI)
933 << '\n');
934 assignVirtToPhysReg(MI, LR, Hint0);
935 return;
936 } else {
937 LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint0, TRI)
938 << " occupied\n");
939 }
940 } else {
941 Hint0 = Register();
942 }
943
944 // Try other hint.
945 Register Hint1 = traceCopies(VirtReg);
946 if (Hint1.isPhysical() && MRI->isAllocatable(Hint1) && RC.contains(Hint1) &&
947 !isRegUsedInInstr(Hint1, LookAtPhysRegUses)) {
948 // Take hint if the register is currently free.
949 if (isPhysRegFree(Hint1)) {
950 LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint1, TRI)
951 << '\n');
952 assignVirtToPhysReg(MI, LR, Hint1);
953 return;
954 } else {
955 LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint1, TRI)
956 << " occupied\n");
957 }
958 } else {
959 Hint1 = Register();
960 }
961
962 MCPhysReg BestReg = 0;
963 unsigned BestCost = spillImpossible;
964 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
965 for (MCPhysReg PhysReg : AllocationOrder) {
966 LLVM_DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg, TRI) << ' ');
967 if (isRegUsedInInstr(PhysReg, LookAtPhysRegUses)) {
968 LLVM_DEBUG(dbgs() << "already used in instr.\n");
969 continue;
970 }
971
972 unsigned Cost = calcSpillCost(PhysReg);
973 LLVM_DEBUG(dbgs() << "Cost: " << Cost << " BestCost: " << BestCost << '\n');
974 // Immediate take a register with cost 0.
975 if (Cost == 0) {
976 assignVirtToPhysReg(MI, LR, PhysReg);
977 return;
978 }
979
980 if (PhysReg == Hint0 || PhysReg == Hint1)
981 Cost -= spillPrefBonus;
982
983 if (Cost < BestCost) {
984 BestReg = PhysReg;
985 BestCost = Cost;
986 }
987 }
988
989 if (!BestReg) {
990 // Nothing we can do: Report an error and keep going with an invalid
991 // allocation.
992 LR.PhysReg = getErrorAssignment(LR, MI, RC);
993 LR.Error = true;
994 return;
995 }
996
997 displacePhysReg(MI, BestReg);
998 assignVirtToPhysReg(MI, LR, BestReg);
999}
1000
1001void RegAllocFastImpl::allocVirtRegUndef(MachineOperand &MO) {
1002 assert(MO.isUndef() && "expected undef use");
1003 Register VirtReg = MO.getReg();
1004 assert(VirtReg.isVirtual() && "Expected virtreg");
1005 if (!shouldAllocateRegister(VirtReg))
1006 return;
1007
1008 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
1009 MCPhysReg PhysReg;
1010 bool IsRenamable = true;
1011 if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
1012 PhysReg = LRI->PhysReg;
1013 } else {
1014 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
1015 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
1016 if (AllocationOrder.empty()) {
1017 // All registers in the class were reserved.
1018 //
1019 // It might be OK to take any entry from the class as this is an undef
1020 // use, but accepting this would give different behavior than greedy and
1021 // basic.
1022 PhysReg = getErrorAssignment(*LRI, *MO.getParent(), RC);
1023 LRI->Error = true;
1024 IsRenamable = false;
1025 } else
1026 PhysReg = AllocationOrder.front();
1027 }
1028
1029 unsigned SubRegIdx = MO.getSubReg();
1030 if (SubRegIdx != 0) {
1031 PhysReg = TRI->getSubReg(PhysReg, SubRegIdx);
1032 MO.setSubReg(0);
1033 }
1034 MO.setReg(PhysReg);
1035 MO.setIsRenamable(IsRenamable);
1036}
1037
1038/// Variation of defineVirtReg() with special handling for livethrough regs
1039/// (tied or earlyclobber) that may interfere with preassigned uses.
1040/// \return true if MI's MachineOperands were re-arranged/invalidated.
1041bool RegAllocFastImpl::defineLiveThroughVirtReg(MachineInstr &MI,
1042 unsigned OpNum,
1043 Register VirtReg) {
1044 if (!shouldAllocateRegister(VirtReg))
1045 return false;
1046 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
1047 if (LRI != LiveVirtRegs.end()) {
1048 MCPhysReg PrevReg = LRI->PhysReg;
1049 if (PrevReg != 0 && isRegUsedInInstr(PrevReg, true)) {
1050 LLVM_DEBUG(dbgs() << "Need new assignment for " << printReg(PrevReg, TRI)
1051 << " (tied/earlyclobber resolution)\n");
1052 freePhysReg(PrevReg);
1053 LRI->PhysReg = 0;
1054 allocVirtReg(MI, *LRI, 0, true);
1055 MachineBasicBlock::iterator InsertBefore =
1056 std::next((MachineBasicBlock::iterator)MI.getIterator());
1057 LLVM_DEBUG(dbgs() << "Copy " << printReg(LRI->PhysReg, TRI) << " to "
1058 << printReg(PrevReg, TRI) << '\n');
1059 BuildMI(*MBB, InsertBefore, MI.getDebugLoc(),
1060 TII->get(TargetOpcode::COPY), PrevReg)
1061 .addReg(LRI->PhysReg, llvm::RegState::Kill);
1062 }
1063 MachineOperand &MO = MI.getOperand(OpNum);
1064 if (MO.getSubReg() && !MO.isUndef()) {
1065 LRI->LastUse = &MI;
1066 }
1067 }
1068 return defineVirtReg(MI, OpNum, VirtReg, true);
1069}
1070
1071/// Allocates a register for VirtReg definition. Typically the register is
1072/// already assigned from a use of the virtreg, however we still need to
1073/// perform an allocation if:
1074/// - It is a dead definition without any uses.
1075/// - The value is live out and all uses are in different basic blocks.
1076///
1077/// \return true if MI's MachineOperands were re-arranged/invalidated.
1078bool RegAllocFastImpl::defineVirtReg(MachineInstr &MI, unsigned OpNum,
1079 Register VirtReg, bool LookAtPhysRegUses) {
1080 assert(VirtReg.isVirtual() && "Not a virtual register");
1081 if (!shouldAllocateRegister(VirtReg))
1082 return false;
1083 MachineOperand &MO = MI.getOperand(OpNum);
1084 LiveRegMap::iterator LRI;
1085 bool New;
1086 std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
1087 if (New) {
1088 if (!MO.isDead()) {
1089 if (mayLiveOut(VirtReg)) {
1090 LRI->LiveOut = true;
1091 } else {
1092 // It is a dead def without the dead flag; add the flag now.
1093 MO.setIsDead(true);
1094 }
1095 }
1096 }
1097 if (LRI->PhysReg == 0) {
1098 allocVirtReg(MI, *LRI, 0, LookAtPhysRegUses);
1099 } else {
1100 assert((!isRegUsedInInstr(LRI->PhysReg, LookAtPhysRegUses) || LRI->Error) &&
1101 "TODO: preassign mismatch");
1102 LLVM_DEBUG(dbgs() << "In def of " << printReg(VirtReg, TRI)
1103 << " use existing assignment to "
1104 << printReg(LRI->PhysReg, TRI) << '\n');
1105 }
1106
1107 MCPhysReg PhysReg = LRI->PhysReg;
1108 if (LRI->Reloaded || LRI->LiveOut) {
1109 if (!MI.isImplicitDef()) {
1110 MachineBasicBlock::iterator SpillBefore =
1111 std::next((MachineBasicBlock::iterator)MI.getIterator());
1112 LLVM_DEBUG(dbgs() << "Spill Reason: LO: " << LRI->LiveOut
1113 << " RL: " << LRI->Reloaded << '\n');
1114 bool Kill = LRI->LastUse == nullptr;
1115 spill(SpillBefore, VirtReg, PhysReg, Kill, LRI->LiveOut);
1116
1117 // We need to place additional spills for each indirect destination of an
1118 // INLINEASM_BR.
1119 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) {
1120 int FI = StackSlotForVirtReg[VirtReg];
1121 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
1122 for (MachineOperand &MO : MI.operands()) {
1123 if (MO.isMBB()) {
1124 MachineBasicBlock *Succ = MO.getMBB();
1125 TII->storeRegToStackSlot(*Succ, Succ->begin(), PhysReg, Kill, FI,
1126 &RC, TRI, VirtReg);
1127 ++NumStores;
1128 Succ->addLiveIn(PhysReg);
1129 }
1130 }
1131 }
1132
1133 LRI->LastUse = nullptr;
1134 }
1135 LRI->LiveOut = false;
1136 LRI->Reloaded = false;
1137 }
1138 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
1139 BundleVirtRegsMap[VirtReg] = *LRI;
1140 }
1141 markRegUsedInInstr(PhysReg);
1142 return setPhysReg(MI, MO, *LRI);
1143}
1144
1145/// Allocates a register for a VirtReg use.
1146/// \return true if MI's MachineOperands were re-arranged/invalidated.
1147bool RegAllocFastImpl::useVirtReg(MachineInstr &MI, MachineOperand &MO,
1148 Register VirtReg) {
1149 assert(VirtReg.isVirtual() && "Not a virtual register");
1150 if (!shouldAllocateRegister(VirtReg))
1151 return false;
1152 LiveRegMap::iterator LRI;
1153 bool New;
1154 std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
1155 if (New) {
1156 if (!MO.isKill()) {
1157 if (mayLiveOut(VirtReg)) {
1158 LRI->LiveOut = true;
1159 } else {
1160 // It is a last (killing) use without the kill flag; add the flag now.
1161 MO.setIsKill(true);
1162 }
1163 }
1164 } else {
1165 assert((!MO.isKill() || LRI->LastUse == &MI) && "Invalid kill flag");
1166 }
1167
1168 // If necessary allocate a register.
1169 if (LRI->PhysReg == 0) {
1170 assert(!MO.isTied() && "tied op should be allocated");
1171 Register Hint;
1172 if (MI.isCopy() && MI.getOperand(1).getSubReg() == 0) {
1173 Hint = MI.getOperand(0).getReg();
1174 if (Hint.isVirtual()) {
1175 assert(!shouldAllocateRegister(Hint));
1176 Hint = Register();
1177 } else {
1178 assert(Hint.isPhysical() &&
1179 "Copy destination should already be assigned");
1180 }
1181 }
1182 allocVirtReg(MI, *LRI, Hint, false);
1183 }
1184
1185 LRI->LastUse = &MI;
1186
1187 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
1188 BundleVirtRegsMap[VirtReg] = *LRI;
1189 }
1190 markRegUsedInInstr(LRI->PhysReg);
1191 return setPhysReg(MI, MO, *LRI);
1192}
1193
1194/// Query a physical register to use as a filler in contexts where the
1195/// allocation has failed. This will raise an error, but not abort the
1196/// compilation.
1197MCPhysReg RegAllocFastImpl::getErrorAssignment(const LiveReg &LR,
1198 MachineInstr &MI,
1199 const TargetRegisterClass &RC) {
1200 MachineFunction &MF = *MI.getMF();
1201
1202 // Avoid repeating the error every time a register is used.
1203 bool EmitError = !MF.getProperties().hasFailedRegAlloc();
1204 if (EmitError)
1205 MF.getProperties().setFailedRegAlloc();
1206
1207 // If the allocation order was empty, all registers in the class were
1208 // probably reserved. Fall back to taking the first register in the class,
1209 // even if it's reserved.
1210 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
1211 if (AllocationOrder.empty()) {
1212 const Function &Fn = MF.getFunction();
1213 if (EmitError) {
1214 Fn.getContext().diagnose(DiagnosticInfoRegAllocFailure(
1215 "no registers from class available to allocate", Fn,
1216 MI.getDebugLoc()));
1217 }
1218
1219 ArrayRef<MCPhysReg> RawRegs = RC.getRegisters();
1220 assert(!RawRegs.empty() && "register classes cannot have no registers");
1221 return RawRegs.front();
1222 }
1223
1224 if (!LR.Error && EmitError) {
1225 // Nothing we can do: Report an error and keep going with an invalid
1226 // allocation.
1227 if (MI.isInlineAsm()) {
1228 MI.emitInlineAsmError(
1229 "inline assembly requires more registers than available");
1230 } else {
1231 const Function &Fn = MBB->getParent()->getFunction();
1232 Fn.getContext().diagnose(DiagnosticInfoRegAllocFailure(
1233 "ran out of registers during register allocation", Fn,
1234 MI.getDebugLoc()));
1235 }
1236 }
1237
1238 return AllocationOrder.front();
1239}
1240
1241/// Changes operand OpNum in MI the refer the PhysReg, considering subregs.
1242/// \return true if MI's MachineOperands were re-arranged/invalidated.
1243bool RegAllocFastImpl::setPhysReg(MachineInstr &MI, MachineOperand &MO,
1244 const LiveReg &Assignment) {
1245 MCPhysReg PhysReg = Assignment.PhysReg;
1246 assert(PhysReg && "assignments should always be to a valid physreg");
1247
1248 if (LLVM_UNLIKELY(Assignment.Error)) {
1249 // Make sure we don't set renamable in error scenarios, as we may have
1250 // assigned to a reserved register.
1251 if (MO.isUse())
1252 MO.setIsUndef(true);
1253 }
1254
1255 if (!MO.getSubReg()) {
1256 MO.setReg(PhysReg);
1257 MO.setIsRenamable(!Assignment.Error);
1258 return false;
1259 }
1260
1261 // Handle subregister index.
1262 MO.setReg(TRI->getSubReg(PhysReg, MO.getSubReg()));
1263 MO.setIsRenamable(!Assignment.Error);
1264
1265 // Note: We leave the subreg number around a little longer in case of defs.
1266 // This is so that the register freeing logic in allocateInstruction can still
1267 // recognize this as subregister defs. The code there will clear the number.
1268 if (!MO.isDef())
1269 MO.setSubReg(0);
1270
1271 // A kill flag implies killing the full register. Add corresponding super
1272 // register kill.
1273 if (MO.isKill()) {
1274 MI.addRegisterKilled(PhysReg, TRI, true);
1275 // Conservatively assume implicit MOs were re-arranged
1276 return true;
1277 }
1278
1279 // A <def,read-undef> of a sub-register requires an implicit def of the full
1280 // register.
1281 if (MO.isDef() && MO.isUndef()) {
1282 if (MO.isDead())
1283 MI.addRegisterDead(PhysReg, TRI, true);
1284 else
1285 MI.addRegisterDefined(PhysReg, TRI);
1286 // Conservatively assume implicit MOs were re-arranged
1287 return true;
1288 }
1289 return false;
1290}
1291
1292#ifndef NDEBUG
1293
1294void RegAllocFastImpl::dumpState() const {
1295 for (unsigned Unit = 1, UnitE = TRI->getNumRegUnits(); Unit != UnitE;
1296 ++Unit) {
1297 switch (unsigned VirtReg = RegUnitStates[Unit]) {
1298 case regFree:
1299 break;
1300 case regPreAssigned:
1301 dbgs() << " " << printRegUnit(Unit, TRI) << "[P]";
1302 break;
1303 case regLiveIn:
1304 llvm_unreachable("Should not have regLiveIn in map");
1305 default: {
1306 dbgs() << ' ' << printRegUnit(Unit, TRI) << '=' << printReg(VirtReg);
1307 LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg);
1308 assert(I != LiveVirtRegs.end() && "have LiveVirtRegs entry");
1309 if (I->LiveOut || I->Reloaded) {
1310 dbgs() << '[';
1311 if (I->LiveOut)
1312 dbgs() << 'O';
1313 if (I->Reloaded)
1314 dbgs() << 'R';
1315 dbgs() << ']';
1316 }
1317 assert(TRI->hasRegUnit(I->PhysReg, Unit) && "inverse mapping present");
1318 break;
1319 }
1320 }
1321 }
1322 dbgs() << '\n';
1323 // Check that LiveVirtRegs is the inverse.
1324 for (const LiveReg &LR : LiveVirtRegs) {
1325 Register VirtReg = LR.VirtReg;
1326 assert(VirtReg.isVirtual() && "Bad map key");
1327 MCPhysReg PhysReg = LR.PhysReg;
1328 if (PhysReg != 0) {
1329 assert(Register::isPhysicalRegister(PhysReg) && "mapped to physreg");
1330 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
1331 assert(RegUnitStates[Unit] == VirtReg && "inverse map valid");
1332 }
1333 }
1334 }
1335}
1336#endif
1337
1338/// Count number of defs consumed from each register class by \p Reg
1339void RegAllocFastImpl::addRegClassDefCounts(
1340 MutableArrayRef<unsigned> RegClassDefCounts, Register Reg) const {
1341 assert(RegClassDefCounts.size() == TRI->getNumRegClasses());
1342
1343 if (Reg.isVirtual()) {
1344 if (!shouldAllocateRegister(Reg))
1345 return;
1346 const TargetRegisterClass *OpRC = MRI->getRegClass(Reg);
1347 for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses();
1348 RCIdx != RCIdxEnd; ++RCIdx) {
1349 const TargetRegisterClass *IdxRC = TRI->getRegClass(RCIdx);
1350 // FIXME: Consider aliasing sub/super registers.
1351 if (OpRC->hasSubClassEq(IdxRC))
1352 ++RegClassDefCounts[RCIdx];
1353 }
1354
1355 return;
1356 }
1357
1358 for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses();
1359 RCIdx != RCIdxEnd; ++RCIdx) {
1360 const TargetRegisterClass *IdxRC = TRI->getRegClass(RCIdx);
1361 for (MCRegAliasIterator Alias(Reg, TRI, true); Alias.isValid(); ++Alias) {
1362 if (IdxRC->contains(*Alias)) {
1363 ++RegClassDefCounts[RCIdx];
1364 break;
1365 }
1366 }
1367 }
1368}
1369
1370/// Compute \ref DefOperandIndexes so it contains the indices of "def" operands
1371/// that are to be allocated. Those are ordered in a way that small classes,
1372/// early clobbers and livethroughs are allocated first.
1373void RegAllocFastImpl::findAndSortDefOperandIndexes(const MachineInstr &MI) {
1374 DefOperandIndexes.clear();
1375
1376 LLVM_DEBUG(dbgs() << "Need to assign livethroughs\n");
1377 for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) {
1378 const MachineOperand &MO = MI.getOperand(I);
1379 if (!MO.isReg())
1380 continue;
1381 Register Reg = MO.getReg();
1382 if (MO.readsReg()) {
1383 if (Reg.isPhysical()) {
1384 LLVM_DEBUG(dbgs() << "mark extra used: " << printReg(Reg, TRI) << '\n');
1385 markPhysRegUsedInInstr(Reg);
1386 }
1387 }
1388
1389 if (MO.isDef() && Reg.isVirtual() && shouldAllocateRegister(Reg))
1390 DefOperandIndexes.push_back(I);
1391 }
1392
1393 // Most instructions only have one virtual def, so there's no point in
1394 // computing the possible number of defs for every register class.
1395 if (DefOperandIndexes.size() <= 1)
1396 return;
1397
1398 // Track number of defs which may consume a register from the class. This is
1399 // used to assign registers for possibly-too-small classes first. Example:
1400 // defs are eax, 3 * gr32_abcd, 2 * gr32 => we want to assign the gr32_abcd
1401 // registers first so that the gr32 don't use the gr32_abcd registers before
1402 // we assign these.
1403 SmallVector<unsigned> RegClassDefCounts(TRI->getNumRegClasses(), 0);
1404
1405 for (const MachineOperand &MO : MI.all_defs())
1406 addRegClassDefCounts(RegClassDefCounts, MO.getReg());
1407
1408 llvm::sort(DefOperandIndexes, [&](unsigned I0, unsigned I1) {
1409 const MachineOperand &MO0 = MI.getOperand(I0);
1410 const MachineOperand &MO1 = MI.getOperand(I1);
1411 Register Reg0 = MO0.getReg();
1412 Register Reg1 = MO1.getReg();
1413 const TargetRegisterClass &RC0 = *MRI->getRegClass(Reg0);
1414 const TargetRegisterClass &RC1 = *MRI->getRegClass(Reg1);
1415
1416 // Identify regclass that are easy to use up completely just in this
1417 // instruction.
1418 unsigned ClassSize0 = RegClassInfo.getOrder(&RC0).size();
1419 unsigned ClassSize1 = RegClassInfo.getOrder(&RC1).size();
1420
1421 bool SmallClass0 = ClassSize0 < RegClassDefCounts[RC0.getID()];
1422 bool SmallClass1 = ClassSize1 < RegClassDefCounts[RC1.getID()];
1423 if (SmallClass0 > SmallClass1)
1424 return true;
1425 if (SmallClass0 < SmallClass1)
1426 return false;
1427
1428 // Allocate early clobbers and livethrough operands first.
1429 bool Livethrough0 = MO0.isEarlyClobber() || MO0.isTied() ||
1430 (MO0.getSubReg() == 0 && !MO0.isUndef());
1431 bool Livethrough1 = MO1.isEarlyClobber() || MO1.isTied() ||
1432 (MO1.getSubReg() == 0 && !MO1.isUndef());
1433 if (Livethrough0 > Livethrough1)
1434 return true;
1435 if (Livethrough0 < Livethrough1)
1436 return false;
1437
1438 // Tie-break rule: operand index.
1439 return I0 < I1;
1440 });
1441}
1442
1443// Returns true if MO is tied and the operand it's tied to is not Undef (not
1444// Undef is not the same thing as Def).
1445static bool isTiedToNotUndef(const MachineOperand &MO) {
1446 if (!MO.isTied())
1447 return false;
1448 const MachineInstr &MI = *MO.getParent();
1449 unsigned TiedIdx = MI.findTiedOperandIdx(MI.getOperandNo(&MO));
1450 const MachineOperand &TiedMO = MI.getOperand(TiedIdx);
1451 return !TiedMO.isUndef();
1452}
1453
1454void RegAllocFastImpl::allocateInstruction(MachineInstr &MI) {
1455 // The basic algorithm here is:
1456 // 1. Mark registers of def operands as free
1457 // 2. Allocate registers to use operands and place reload instructions for
1458 // registers displaced by the allocation.
1459 //
1460 // However we need to handle some corner cases:
1461 // - pre-assigned defs and uses need to be handled before the other def/use
1462 // operands are processed to avoid the allocation heuristics clashing with
1463 // the pre-assignment.
1464 // - The "free def operands" step has to come last instead of first for tied
1465 // operands and early-clobbers.
1466
1467 InstrGen += 2;
1468 // In the event we ever get more than 2**31 instructions...
1469 if (LLVM_UNLIKELY(InstrGen == 0)) {
1470 UsedInInstr.assign(UsedInInstr.size(), 0);
1471 InstrGen = 2;
1472 }
1473 RegMasks.clear();
1474 BundleVirtRegsMap.clear();
1475
1476 // Scan for special cases; Apply pre-assigned register defs to state.
1477 bool HasPhysRegUse = false;
1478 bool HasRegMask = false;
1479 bool HasVRegDef = false;
1480 bool HasDef = false;
1481 bool HasEarlyClobber = false;
1482 bool NeedToAssignLiveThroughs = false;
1483 for (MachineOperand &MO : MI.operands()) {
1484 if (MO.isReg()) {
1485 Register Reg = MO.getReg();
1486 if (Reg.isVirtual()) {
1487 if (!shouldAllocateRegister(Reg))
1488 continue;
1489 if (MO.isDef()) {
1490 HasDef = true;
1491 HasVRegDef = true;
1492 if (MO.isEarlyClobber()) {
1493 HasEarlyClobber = true;
1494 NeedToAssignLiveThroughs = true;
1495 }
1496 if (isTiedToNotUndef(MO) || (MO.getSubReg() != 0 && !MO.isUndef()))
1497 NeedToAssignLiveThroughs = true;
1498 }
1499 } else if (Reg.isPhysical()) {
1500 if (!MRI->isReserved(Reg)) {
1501 if (MO.isDef()) {
1502 HasDef = true;
1503 bool displacedAny = definePhysReg(MI, Reg);
1504 if (MO.isEarlyClobber())
1505 HasEarlyClobber = true;
1506 if (!displacedAny)
1507 MO.setIsDead(true);
1508 }
1509 if (MO.readsReg())
1510 HasPhysRegUse = true;
1511 }
1512 }
1513 } else if (MO.isRegMask()) {
1514 HasRegMask = true;
1515 RegMasks.push_back(MO.getRegMask());
1516 }
1517 }
1518
1519 // Allocate virtreg defs.
1520 if (HasDef) {
1521 if (HasVRegDef) {
1522 // Note that Implicit MOs can get re-arranged by defineVirtReg(), so loop
1523 // multiple times to ensure no operand is missed.
1524 bool ReArrangedImplicitOps = true;
1525
1526 // Special handling for early clobbers, tied operands or subregister defs:
1527 // Compared to "normal" defs these:
1528 // - Must not use a register that is pre-assigned for a use operand.
1529 // - In order to solve tricky inline assembly constraints we change the
1530 // heuristic to figure out a good operand order before doing
1531 // assignments.
1532 if (NeedToAssignLiveThroughs) {
1533 while (ReArrangedImplicitOps) {
1534 ReArrangedImplicitOps = false;
1535 findAndSortDefOperandIndexes(MI);
1536 for (unsigned OpIdx : DefOperandIndexes) {
1537 MachineOperand &MO = MI.getOperand(OpIdx);
1538 LLVM_DEBUG(dbgs() << "Allocating " << MO << '\n');
1539 Register Reg = MO.getReg();
1540 if (MO.isEarlyClobber() || isTiedToNotUndef(MO) ||
1541 (MO.getSubReg() && !MO.isUndef())) {
1542 ReArrangedImplicitOps = defineLiveThroughVirtReg(MI, OpIdx, Reg);
1543 } else {
1544 ReArrangedImplicitOps = defineVirtReg(MI, OpIdx, Reg);
1545 }
1546 // Implicit operands of MI were re-arranged,
1547 // re-compute DefOperandIndexes.
1548 if (ReArrangedImplicitOps)
1549 break;
1550 }
1551 }
1552 } else {
1553 // Assign virtual register defs.
1554 while (ReArrangedImplicitOps) {
1555 ReArrangedImplicitOps = false;
1556 for (MachineOperand &MO : MI.all_defs()) {
1557 Register Reg = MO.getReg();
1558 if (Reg.isVirtual()) {
1559 ReArrangedImplicitOps =
1560 defineVirtReg(MI, MI.getOperandNo(&MO), Reg);
1561 if (ReArrangedImplicitOps)
1562 break;
1563 }
1564 }
1565 }
1566 }
1567 }
1568
1569 // Free registers occupied by defs.
1570 // Iterate operands in reverse order, so we see the implicit super register
1571 // defs first (we added them earlier in case of <def,read-undef>).
1572 for (MachineOperand &MO : reverse(MI.all_defs())) {
1573 Register Reg = MO.getReg();
1574
1575 // subreg defs don't free the full register. We left the subreg number
1576 // around as a marker in setPhysReg() to recognize this case here.
1577 if (Reg.isPhysical() && MO.getSubReg() != 0) {
1578 MO.setSubReg(0);
1579 continue;
1580 }
1581
1582 assert((!MO.isTied() || !isClobberedByRegMasks(MO.getReg())) &&
1583 "tied def assigned to clobbered register");
1584
1585 // Do not free tied operands and early clobbers.
1586 if (isTiedToNotUndef(MO) || MO.isEarlyClobber())
1587 continue;
1588 if (!Reg)
1589 continue;
1590 if (Reg.isVirtual()) {
1591 assert(!shouldAllocateRegister(Reg));
1592 continue;
1593 }
1595 if (MRI->isReserved(Reg))
1596 continue;
1597 freePhysReg(Reg);
1598 unmarkRegUsedInInstr(Reg);
1599 }
1600 }
1601
1602 // Displace clobbered registers.
1603 if (HasRegMask) {
1604 assert(!RegMasks.empty() && "expected RegMask");
1605 // MRI bookkeeping.
1606 for (const auto *RM : RegMasks)
1607 MRI->addPhysRegsUsedFromRegMask(RM);
1608
1609 // Displace clobbered registers.
1610 for (const LiveReg &LR : LiveVirtRegs) {
1611 MCPhysReg PhysReg = LR.PhysReg;
1612 if (PhysReg != 0 && isClobberedByRegMasks(PhysReg))
1613 displacePhysReg(MI, PhysReg);
1614 }
1615 }
1616
1617 // Apply pre-assigned register uses to state.
1618 if (HasPhysRegUse) {
1619 for (MachineOperand &MO : MI.operands()) {
1620 if (!MO.isReg() || !MO.readsReg())
1621 continue;
1622 Register Reg = MO.getReg();
1623 if (!Reg.isPhysical())
1624 continue;
1625 if (MRI->isReserved(Reg))
1626 continue;
1627 if (!usePhysReg(MI, Reg))
1628 MO.setIsKill(true);
1629 }
1630 }
1631
1632 // Allocate virtreg uses and insert reloads as necessary.
1633 // Implicit MOs can get moved/removed by useVirtReg(), so loop multiple
1634 // times to ensure no operand is missed.
1635 bool HasUndefUse = false;
1636 bool ReArrangedImplicitMOs = true;
1637 while (ReArrangedImplicitMOs) {
1638 ReArrangedImplicitMOs = false;
1639 for (MachineOperand &MO : MI.operands()) {
1640 if (!MO.isReg() || !MO.isUse())
1641 continue;
1642 Register Reg = MO.getReg();
1643 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1644 continue;
1645
1646 if (MO.isUndef()) {
1647 HasUndefUse = true;
1648 continue;
1649 }
1650
1651 // Populate MayLiveAcrossBlocks in case the use block is allocated before
1652 // the def block (removing the vreg uses).
1653 mayLiveIn(Reg);
1654
1655 assert(!MO.isInternalRead() && "Bundles not supported");
1656 assert(MO.readsReg() && "reading use");
1657 ReArrangedImplicitMOs = useVirtReg(MI, MO, Reg);
1658 if (ReArrangedImplicitMOs)
1659 break;
1660 }
1661 }
1662
1663 // Allocate undef operands. This is a separate step because in a situation
1664 // like ` = OP undef %X, %X` both operands need the same register assign
1665 // so we should perform the normal assignment first.
1666 if (HasUndefUse) {
1667 for (MachineOperand &MO : MI.all_uses()) {
1668 Register Reg = MO.getReg();
1669 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1670 continue;
1671
1672 assert(MO.isUndef() && "Should only have undef virtreg uses left");
1673 allocVirtRegUndef(MO);
1674 }
1675 }
1676
1677 // Free early clobbers.
1678 if (HasEarlyClobber) {
1679 for (MachineOperand &MO : reverse(MI.all_defs())) {
1680 if (!MO.isEarlyClobber())
1681 continue;
1682 assert(!MO.getSubReg() && "should be already handled in def processing");
1683
1684 Register Reg = MO.getReg();
1685 if (!Reg)
1686 continue;
1687 if (Reg.isVirtual()) {
1688 assert(!shouldAllocateRegister(Reg));
1689 continue;
1690 }
1691 assert(Reg.isPhysical() && "should have register assigned");
1692
1693 // We sometimes get odd situations like:
1694 // early-clobber %x0 = INSTRUCTION %x0
1695 // which is semantically questionable as the early-clobber should
1696 // apply before the use. But in practice we consider the use to
1697 // happen before the early clobber now. Don't free the early clobber
1698 // register in this case.
1699 if (MI.readsRegister(Reg, TRI))
1700 continue;
1701
1702 freePhysReg(Reg);
1703 }
1704 }
1705
1706 LLVM_DEBUG(dbgs() << "<< " << MI);
1707 if (MI.isCopy() && MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
1708 MI.getNumOperands() == 2) {
1709 LLVM_DEBUG(dbgs() << "Mark identity copy for removal\n");
1710 Coalesced.push_back(&MI);
1711 }
1712}
1713
1714void RegAllocFastImpl::handleDebugValue(MachineInstr &MI) {
1715 // Ignore DBG_VALUEs that aren't based on virtual registers. These are
1716 // mostly constants and frame indices.
1717 assert(MI.isDebugValue() && "not a DBG_VALUE*");
1718 for (const auto &MO : MI.debug_operands()) {
1719 if (!MO.isReg())
1720 continue;
1721 Register Reg = MO.getReg();
1722 if (!Reg.isVirtual())
1723 continue;
1724 if (!shouldAllocateRegister(Reg))
1725 continue;
1726
1727 // Already spilled to a stackslot?
1728 int SS = StackSlotForVirtReg[Reg];
1729 if (SS != -1) {
1730 // Modify DBG_VALUE now that the value is in a spill slot.
1732 LLVM_DEBUG(dbgs() << "Rewrite DBG_VALUE for spilled memory: " << MI);
1733 continue;
1734 }
1735
1736 // See if this virtual register has already been allocated to a physical
1737 // register or spilled to a stack slot.
1738 LiveRegMap::iterator LRI = findLiveVirtReg(Reg);
1740 llvm::make_pointer_range(MI.getDebugOperandsForReg(Reg)));
1741
1742 if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
1743 // Update every use of Reg within MI.
1744 for (auto &RegMO : DbgOps)
1745 setPhysReg(MI, *RegMO, *LRI);
1746 } else {
1747 DanglingDbgValues[Reg].push_back(&MI);
1748 }
1749
1750 // If Reg hasn't been spilled, put this DBG_VALUE in LiveDbgValueMap so
1751 // that future spills of Reg will have DBG_VALUEs.
1752 LiveDbgValueMap[Reg].append(DbgOps.begin(), DbgOps.end());
1753 }
1754}
1755
1756void RegAllocFastImpl::handleBundle(MachineInstr &MI) {
1757 MachineBasicBlock::instr_iterator BundledMI = MI.getIterator();
1758 ++BundledMI;
1759 while (BundledMI->isBundledWithPred()) {
1760 for (MachineOperand &MO : BundledMI->operands()) {
1761 if (!MO.isReg())
1762 continue;
1763
1764 Register Reg = MO.getReg();
1765 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1766 continue;
1767
1768 DenseMap<Register, LiveReg>::iterator DI = BundleVirtRegsMap.find(Reg);
1769 assert(DI != BundleVirtRegsMap.end() && "Unassigned virtual register");
1770
1771 setPhysReg(MI, MO, DI->second);
1772 }
1773
1774 ++BundledMI;
1775 }
1776}
1777
1778void RegAllocFastImpl::allocateBasicBlock(MachineBasicBlock &MBB) {
1779 this->MBB = &MBB;
1780 LLVM_DEBUG(dbgs() << "\nAllocating " << MBB);
1781
1782 PosIndexes.unsetInitialized();
1783 RegUnitStates.assign(TRI->getNumRegUnits(), regFree);
1784 assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?");
1785
1786 for (const auto &LiveReg : MBB.liveouts())
1787 setPhysRegState(LiveReg.PhysReg, regPreAssigned);
1788
1789 Coalesced.clear();
1790
1791 // Traverse block in reverse order allocating instructions one by one.
1792 for (MachineInstr &MI : reverse(MBB)) {
1793 LLVM_DEBUG(dbgs() << "\n>> " << MI << "Regs:"; dumpState());
1794
1795 // Special handling for debug values. Note that they are not allowed to
1796 // affect codegen of the other instructions in any way.
1797 if (MI.isDebugValue()) {
1798 handleDebugValue(MI);
1799 continue;
1800 }
1801
1802 allocateInstruction(MI);
1803
1804 // Once BUNDLE header is assigned registers, same assignments need to be
1805 // done for bundled MIs.
1806 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
1807 handleBundle(MI);
1808 }
1809 }
1810
1811 LLVM_DEBUG(dbgs() << "Begin Regs:"; dumpState());
1812
1813 // Spill all physical registers holding virtual registers now.
1814 LLVM_DEBUG(dbgs() << "Loading live registers at begin of block.\n");
1815 reloadAtBegin(MBB);
1816
1817 // Erase all the coalesced copies. We are delaying it until now because
1818 // LiveVirtRegs might refer to the instrs.
1819 for (MachineInstr *MI : Coalesced)
1820 MBB.erase(MI);
1821 NumCoalesced += Coalesced.size();
1822
1823 for (auto &UDBGPair : DanglingDbgValues) {
1824 for (MachineInstr *DbgValue : UDBGPair.second) {
1825 assert(DbgValue->isDebugValue() && "expected DBG_VALUE");
1826 // Nothing to do if the vreg was spilled in the meantime.
1827 if (!DbgValue->hasDebugOperandForReg(UDBGPair.first))
1828 continue;
1829 LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
1830 << '\n');
1831 DbgValue->setDebugValueUndef();
1832 }
1833 }
1834 DanglingDbgValues.clear();
1835
1836 LLVM_DEBUG(MBB.dump());
1837}
1838
1839bool RegAllocFastImpl::runOnMachineFunction(MachineFunction &MF) {
1840 LLVM_DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n"
1841 << "********** Function: " << MF.getName() << '\n');
1842 MRI = &MF.getRegInfo();
1843 const TargetSubtargetInfo &STI = MF.getSubtarget();
1844 TRI = STI.getRegisterInfo();
1845 TII = STI.getInstrInfo();
1846 MFI = &MF.getFrameInfo();
1847 MRI->freezeReservedRegs();
1848 RegClassInfo.runOnMachineFunction(MF);
1849 unsigned NumRegUnits = TRI->getNumRegUnits();
1850 InstrGen = 0;
1851 UsedInInstr.assign(NumRegUnits, 0);
1852
1853 // initialize the virtual->physical register map to have a 'null'
1854 // mapping for all virtual registers
1855 unsigned NumVirtRegs = MRI->getNumVirtRegs();
1856 StackSlotForVirtReg.resize(NumVirtRegs);
1857 LiveVirtRegs.setUniverse(NumVirtRegs);
1858 MayLiveAcrossBlocks.clear();
1859 MayLiveAcrossBlocks.resize(NumVirtRegs);
1860
1861 // Loop over all of the basic blocks, eliminating virtual register references
1862 for (MachineBasicBlock &MBB : MF)
1863 allocateBasicBlock(MBB);
1864
1865 if (ClearVirtRegs) {
1866 // All machine operands and other references to virtual registers have been
1867 // replaced. Remove the virtual registers.
1868 MRI->clearVirtRegs();
1869 }
1870
1871 StackSlotForVirtReg.clear();
1872 LiveDbgValueMap.clear();
1873 return true;
1874}
1875
1878 MFPropsModifier _(*this, MF);
1879 RegAllocFastImpl Impl(Opts.Filter, Opts.ClearVRegs);
1880 bool Changed = Impl.runOnMachineFunction(MF);
1881 if (!Changed)
1882 return PreservedAnalyses::all();
1884 PA.preserveSet<CFGAnalyses>();
1885 return PA;
1886}
1887
1889 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1890 bool PrintFilterName = Opts.FilterName != "all";
1891 bool PrintNoClearVRegs = !Opts.ClearVRegs;
1892 bool PrintSemicolon = PrintFilterName && PrintNoClearVRegs;
1893
1894 OS << "regallocfast";
1895 if (PrintFilterName || PrintNoClearVRegs) {
1896 OS << '<';
1897 if (PrintFilterName)
1898 OS << "filter=" << Opts.FilterName;
1899 if (PrintSemicolon)
1900 OS << ';';
1901 if (PrintNoClearVRegs)
1902 OS << "no-clear-vregs";
1903 OS << '>';
1904 }
1905}
1906
1907FunctionPass *llvm::createFastRegisterAllocator() { return new RegAllocFast(); }
1908
1910 bool ClearVirtRegs) {
1911 return new RegAllocFast(Ftor, ClearVirtRegs);
1912}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
MachineBasicBlock & MBB
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_UNLIKELY(EXPR)
Definition Compiler.h:336
This file defines the DenseMap class.
const HexagonInstrInfo * TII
#define _
IRTranslator LLVM IR MI
This file implements an indexed map.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Register Reg
Register const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static bool isCoalescable(const MachineInstr &MI)
static cl::opt< bool > IgnoreMissingDefs("rafast-ignore-missing-defs", cl::Hidden)
static bool dominates(InstrPosIndexes &PosIndexes, const MachineInstr &A, const MachineInstr &B)
static bool isTiedToNotUndef(const MachineOperand &MO)
static RegisterRegAlloc fastRegAlloc("fast", "fast register allocator", createFastRegisterAllocator)
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the SparseSet class derived from the version described in Briggs,...
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
const T & front() const
front - Get the first element.
Definition ArrayRef.h:150
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
bool test(unsigned Idx) const
Definition BitVector.h:461
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
Definition BitVector.h:341
void clear()
clear - Removes all bits from the bitvector.
Definition BitVector.h:335
BitVector & set()
Definition BitVector.h:351
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:165
iterator end()
Definition DenseMap.h:81
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Store the specified register of the given register class to the specified stack frame index.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Load the specified register of the given register class from the specified stack frame index.
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
void resize(typename StorageT::size_type s)
Definition IndexedMap.h:58
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
An RAII based helper class to modify MachineFunctionProperties when running pass.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
iterator_range< liveout_iterator > liveouts() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator_range< livein_iterator > liveins() const
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
LLVM_ABI void dump() const
Instructions::iterator instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
LLVM_ABI bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
LLVM_ABI int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
bool hasDebugOperandForReg(Register Reg) const
Returns whether this debug value has at least one debug operand with the register Reg.
bool isDebugValueList() const
void setDebugValueUndef()
Sets all register debug operands in this debug value instruction to be undef.
const MachineBasicBlock * getParent() const
bool isNonListDebugValue() const
bool isDebugValue() const
MachineOperand & getDebugOperand(unsigned Index)
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
LLVM_ABI void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
void setIsDead(bool Val=true)
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
void setIsUndef(bool Val=true)
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI void runOnMachineFunction(const MachineFunction &MF, bool Rev=false)
runOnFunction - Prepare to answer questions about MF.
ArrayRef< MCPhysReg > getOrder(const TargetRegisterClass *RC) const
getOrder - Returns the preferred allocation order for RC.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
unsigned virtRegIndex() const
Convert a virtual register number to a 0-based index.
Definition Register.h:82
constexpr bool isValid() const
Definition Register.h:107
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:74
constexpr unsigned id() const
Definition Register.h:95
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition SmallSet.h:175
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:181
void assign(size_type NumElts, ValueParamT Elt)
void push_back(const T &Elt)
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
ArrayRef< MCPhysReg > getRegisters() const
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
Definition ilist_node.h:134
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
InstructionCost Cost
LLVM_ABI void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex, Register Reg)
Update a DBG_VALUE whose value has been spilled to FrameIndex.
LLVM_ABI Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1721
auto reverse(ContainerTy &&C)
Definition STLExtras.h:401
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1633
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
unsigned MCRegUnit
Register units are used to compute register aliasing.
Definition MCRegister.h:30
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
MutableArrayRef(T &OneElt) -> MutableArrayRef< T >
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.