LLVM 22.0.0git
MipsInstructionSelector.cpp
Go to the documentation of this file.
1//===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// Mips.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
15#include "MipsMachineFunction.h"
17#include "MipsTargetMachine.h"
21#include "llvm/IR/IntrinsicsMips.h"
22
23#define DEBUG_TYPE "mips-isel"
24
25using namespace llvm;
26
27namespace {
28
29#define GET_GLOBALISEL_PREDICATE_BITSET
30#include "MipsGenGlobalISel.inc"
31#undef GET_GLOBALISEL_PREDICATE_BITSET
32
33class MipsInstructionSelector : public InstructionSelector {
34public:
35 MipsInstructionSelector(const MipsTargetMachine &TM, const MipsSubtarget &STI,
36 const MipsRegisterBankInfo &RBI);
37
38 bool select(MachineInstr &I) override;
39 static const char *getName() { return DEBUG_TYPE; }
40
41private:
42 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
43 bool isRegInGprb(Register Reg, MachineRegisterInfo &MRI) const;
44 bool isRegInFprb(Register Reg, MachineRegisterInfo &MRI) const;
45 bool materialize32BitImm(Register DestReg, APInt Imm,
46 MachineIRBuilder &B) const;
49 getRegClassForTypeOnBank(Register Reg, MachineRegisterInfo &MRI) const;
50 unsigned selectLoadStoreOpCode(MachineInstr &I,
52 bool buildUnalignedStore(MachineInstr &I, unsigned Opc,
53 MachineOperand &BaseAddr, unsigned Offset,
54 MachineMemOperand *MMO) const;
55 bool buildUnalignedLoad(MachineInstr &I, unsigned Opc, Register Dest,
56 MachineOperand &BaseAddr, unsigned Offset,
57 Register TiedDest, MachineMemOperand *MMO) const;
58
59 const MipsTargetMachine &TM;
60 const MipsSubtarget &STI;
61 const MipsInstrInfo &TII;
62 const MipsRegisterInfo &TRI;
63 const MipsRegisterBankInfo &RBI;
64
65#define GET_GLOBALISEL_PREDICATES_DECL
66#include "MipsGenGlobalISel.inc"
67#undef GET_GLOBALISEL_PREDICATES_DECL
68
69#define GET_GLOBALISEL_TEMPORARIES_DECL
70#include "MipsGenGlobalISel.inc"
71#undef GET_GLOBALISEL_TEMPORARIES_DECL
72};
73
74} // end anonymous namespace
75
76#define GET_GLOBALISEL_IMPL
77#include "MipsGenGlobalISel.inc"
78#undef GET_GLOBALISEL_IMPL
79
80MipsInstructionSelector::MipsInstructionSelector(
81 const MipsTargetMachine &TM, const MipsSubtarget &STI,
82 const MipsRegisterBankInfo &RBI)
83 : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
84 RBI(RBI),
85
87#include "MipsGenGlobalISel.inc"
90#include "MipsGenGlobalISel.inc"
92{
93}
94
95bool MipsInstructionSelector::isRegInGprb(Register Reg,
96 MachineRegisterInfo &MRI) const {
97 return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::GPRBRegBankID;
98}
99
100bool MipsInstructionSelector::isRegInFprb(Register Reg,
101 MachineRegisterInfo &MRI) const {
102 return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::FPRBRegBankID;
103}
104
105bool MipsInstructionSelector::selectCopy(MachineInstr &I,
106 MachineRegisterInfo &MRI) const {
107 Register DstReg = I.getOperand(0).getReg();
108 if (DstReg.isPhysical())
109 return true;
110
111 const TargetRegisterClass *RC = getRegClassForTypeOnBank(DstReg, MRI);
112 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
113 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
114 << " operand\n");
115 return false;
116 }
117 return true;
118}
119
120const TargetRegisterClass *MipsInstructionSelector::getRegClassForTypeOnBank(
121 Register Reg, MachineRegisterInfo &MRI) const {
122 const LLT Ty = MRI.getType(Reg);
123 const unsigned TySize = Ty.getSizeInBits();
124
125 if (isRegInGprb(Reg, MRI)) {
126 assert((Ty.isScalar() || Ty.isPointer()) && TySize == 32 &&
127 "Register class not available for LLT, register bank combination");
128 return &Mips::GPR32RegClass;
129 }
130
131 if (isRegInFprb(Reg, MRI)) {
132 if (Ty.isScalar()) {
133 assert((TySize == 32 || TySize == 64) &&
134 "Register class not available for LLT, register bank combination");
135 if (TySize == 32)
136 return &Mips::FGR32RegClass;
137 return STI.isFP64bit() ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
138 }
139 }
140
141 llvm_unreachable("Unsupported register bank.");
142}
143
144bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm,
145 MachineIRBuilder &B) const {
146 assert(Imm.getBitWidth() == 32 && "Unsupported immediate size.");
147 // Ori zero extends immediate. Used for values with zeros in high 16 bits.
148 if (Imm.getHiBits(16).isZero()) {
149 MachineInstr *Inst =
150 B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)})
151 .addImm(Imm.getLoBits(16).getLimitedValue());
152 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
153 }
154 // Lui places immediate in high 16 bits and sets low 16 bits to zero.
155 if (Imm.getLoBits(16).isZero()) {
156 MachineInstr *Inst = B.buildInstr(Mips::LUi, {DestReg}, {})
157 .addImm(Imm.getHiBits(16).getLimitedValue());
158 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
159 }
160 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
161 if (Imm.isSignedIntN(16)) {
162 MachineInstr *Inst =
163 B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)})
164 .addImm(Imm.getLoBits(16).getLimitedValue());
165 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
166 }
167 // Values that cannot be materialized with single immediate instruction.
168 Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass);
169 MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {})
170 .addImm(Imm.getHiBits(16).getLimitedValue());
171 MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg})
172 .addImm(Imm.getLoBits(16).getLimitedValue());
173 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
174 return false;
175 if (!constrainSelectedInstRegOperands(*ORi, TII, TRI, RBI))
176 return false;
177 return true;
178}
179
180/// When I.getOpcode() is returned, we failed to select MIPS instruction opcode.
181unsigned
182MipsInstructionSelector::selectLoadStoreOpCode(MachineInstr &I,
183 MachineRegisterInfo &MRI) const {
184 const Register ValueReg = I.getOperand(0).getReg();
185 const LLT Ty = MRI.getType(ValueReg);
186 const unsigned TySize = Ty.getSizeInBits();
187 const unsigned MemSizeInBytes =
188 (*I.memoperands_begin())->getSize().getValue();
189 unsigned Opc = I.getOpcode();
190 const bool isStore = Opc == TargetOpcode::G_STORE;
191
192 if (isRegInGprb(ValueReg, MRI)) {
193 assert(((Ty.isScalar() && TySize == 32) ||
194 (Ty.isPointer() && TySize == 32 && MemSizeInBytes == 4)) &&
195 "Unsupported register bank, LLT, MemSizeInBytes combination");
196 (void)TySize;
197 if (isStore)
198 switch (MemSizeInBytes) {
199 case 4:
200 return Mips::SW;
201 case 2:
202 return Mips::SH;
203 case 1:
204 return Mips::SB;
205 default:
206 return Opc;
207 }
208 else
209 // Unspecified extending load is selected into zeroExtending load.
210 switch (MemSizeInBytes) {
211 case 4:
212 return Mips::LW;
213 case 2:
214 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu;
215 case 1:
216 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu;
217 default:
218 return Opc;
219 }
220 }
221
222 if (isRegInFprb(ValueReg, MRI)) {
223 if (Ty.isScalar()) {
224 assert(((TySize == 32 && MemSizeInBytes == 4) ||
225 (TySize == 64 && MemSizeInBytes == 8)) &&
226 "Unsupported register bank, LLT, MemSizeInBytes combination");
227
228 if (MemSizeInBytes == 4)
229 return isStore ? Mips::SWC1 : Mips::LWC1;
230
231 if (STI.isFP64bit())
232 return isStore ? Mips::SDC164 : Mips::LDC164;
233 return isStore ? Mips::SDC1 : Mips::LDC1;
234 }
235
236 if (Ty.isVector()) {
237 assert(STI.hasMSA() && "Vector instructions require target with MSA.");
238 assert((TySize == 128 && MemSizeInBytes == 16) &&
239 "Unsupported register bank, LLT, MemSizeInBytes combination");
240 switch (Ty.getElementType().getSizeInBits()) {
241 case 8:
242 return isStore ? Mips::ST_B : Mips::LD_B;
243 case 16:
244 return isStore ? Mips::ST_H : Mips::LD_H;
245 case 32:
246 return isStore ? Mips::ST_W : Mips::LD_W;
247 case 64:
248 return isStore ? Mips::ST_D : Mips::LD_D;
249 default:
250 return Opc;
251 }
252 }
253 }
254
255 return Opc;
256}
257
258bool MipsInstructionSelector::buildUnalignedStore(
259 MachineInstr &I, unsigned Opc, MachineOperand &BaseAddr, unsigned Offset,
260 MachineMemOperand *MMO) const {
261 MachineInstr *NewInst =
262 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc))
263 .add(I.getOperand(0))
264 .add(BaseAddr)
265 .addImm(Offset)
266 .addMemOperand(MMO);
267 if (!constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI))
268 return false;
269 return true;
270}
271
272bool MipsInstructionSelector::buildUnalignedLoad(
273 MachineInstr &I, unsigned Opc, Register Dest, MachineOperand &BaseAddr,
274 unsigned Offset, Register TiedDest, MachineMemOperand *MMO) const {
275 MachineInstr *NewInst =
276 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc))
277 .addDef(Dest)
278 .add(BaseAddr)
279 .addImm(Offset)
280 .addUse(TiedDest)
281 .addMemOperand(*I.memoperands_begin());
282 if (!constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI))
283 return false;
284 return true;
285}
286
287bool MipsInstructionSelector::select(MachineInstr &I) {
288
289 MachineBasicBlock &MBB = *I.getParent();
290 MachineFunction &MF = *MBB.getParent();
291 MachineRegisterInfo &MRI = MF.getRegInfo();
292
293 if (!isPreISelGenericOpcode(I.getOpcode())) {
294 if (I.isCopy())
295 return selectCopy(I, MRI);
296
297 return true;
298 }
299
300 if (I.getOpcode() == Mips::G_MUL &&
301 isRegInGprb(I.getOperand(0).getReg(), MRI)) {
302 MachineInstr *Mul = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MUL))
303 .add(I.getOperand(0))
304 .add(I.getOperand(1))
305 .add(I.getOperand(2));
307 return false;
308 Mul->getOperand(3).setIsDead(true);
309 Mul->getOperand(4).setIsDead(true);
310
311 I.eraseFromParent();
312 return true;
313 }
314
315 if (selectImpl(I, *CoverageInfo))
316 return true;
317
318 MachineInstr *MI = nullptr;
319 using namespace TargetOpcode;
320
321 switch (I.getOpcode()) {
322 case G_UMULH: {
323 Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
324 MachineInstr *PseudoMULTu, *PseudoMove;
325
326 PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu))
327 .addDef(PseudoMULTuReg)
328 .add(I.getOperand(1))
329 .add(I.getOperand(2));
330 if (!constrainSelectedInstRegOperands(*PseudoMULTu, TII, TRI, RBI))
331 return false;
332
333 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMFHI))
334 .addDef(I.getOperand(0).getReg())
335 .addUse(PseudoMULTuReg);
336 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
337 return false;
338
339 I.eraseFromParent();
340 return true;
341 }
342 case G_PTR_ADD: {
343 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
344 .add(I.getOperand(0))
345 .add(I.getOperand(1))
346 .add(I.getOperand(2));
347 break;
348 }
349 case G_INTTOPTR:
350 case G_PTRTOINT: {
351 I.setDesc(TII.get(COPY));
352 return selectCopy(I, MRI);
353 }
354 case G_FRAME_INDEX: {
355 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
356 .add(I.getOperand(0))
357 .add(I.getOperand(1))
358 .addImm(0);
359 break;
360 }
361 case G_BRJT: {
362 unsigned EntrySize =
364 assert(isPowerOf2_32(EntrySize) &&
365 "Non-power-of-two jump-table entry size not supported.");
366
367 Register JTIndex = MRI.createVirtualRegister(&Mips::GPR32RegClass);
368 MachineInstr *SLL = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SLL))
369 .addDef(JTIndex)
370 .addUse(I.getOperand(2).getReg())
371 .addImm(Log2_32(EntrySize));
372 if (!constrainSelectedInstRegOperands(*SLL, TII, TRI, RBI))
373 return false;
374
375 Register DestAddress = MRI.createVirtualRegister(&Mips::GPR32RegClass);
376 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
377 .addDef(DestAddress)
378 .addUse(I.getOperand(0).getReg())
379 .addUse(JTIndex);
380 if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
381 return false;
382
383 Register Dest = MRI.createVirtualRegister(&Mips::GPR32RegClass);
384 MachineInstr *LW =
385 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
386 .addDef(Dest)
387 .addUse(DestAddress)
388 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_LO)
390 MachinePointerInfo(), MachineMemOperand::MOLoad, 4, Align(4)));
392 return false;
393
394 if (MF.getTarget().isPositionIndependent()) {
395 Register DestTmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
396 LW->getOperand(0).setReg(DestTmp);
397 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
398 .addDef(Dest)
399 .addUse(DestTmp)
400 .addUse(MF.getInfo<MipsFunctionInfo>()
401 ->getGlobalBaseRegForGlobalISel(MF));
402 if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
403 return false;
404 }
405
406 MachineInstr *Branch =
407 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
408 .addUse(Dest);
409 if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI))
410 return false;
411
412 I.eraseFromParent();
413 return true;
414 }
415 case G_BRINDIRECT: {
416 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
417 .add(I.getOperand(0));
418 break;
419 }
420 case G_PHI: {
421 const Register DestReg = I.getOperand(0).getReg();
422
423 const TargetRegisterClass *DefRC = nullptr;
424 if (DestReg.isPhysical())
425 DefRC = TRI.getRegClass(DestReg);
426 else
427 DefRC = getRegClassForTypeOnBank(DestReg, MRI);
428
429 I.setDesc(TII.get(TargetOpcode::PHI));
430 return RBI.constrainGenericRegister(DestReg, *DefRC, MRI);
431 }
432 case G_STORE:
433 case G_LOAD:
434 case G_ZEXTLOAD:
435 case G_SEXTLOAD: {
436 auto MMO = *I.memoperands_begin();
437 MachineOperand BaseAddr = I.getOperand(1);
438 int64_t SignedOffset = 0;
439 // Try to fold load/store + G_PTR_ADD + G_CONSTANT
440 // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
441 // %Addr:(p0) = G_PTR_ADD %BaseAddr, %SignedOffset
442 // %LoadResult/%StoreSrc = load/store %Addr(p0)
443 // into:
444 // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
445
446 MachineInstr *Addr = MRI.getVRegDef(I.getOperand(1).getReg());
447 if (Addr->getOpcode() == G_PTR_ADD) {
448 MachineInstr *Offset = MRI.getVRegDef(Addr->getOperand(2).getReg());
449 if (Offset->getOpcode() == G_CONSTANT) {
450 APInt OffsetValue = Offset->getOperand(1).getCImm()->getValue();
451 if (OffsetValue.isSignedIntN(16)) {
452 BaseAddr = Addr->getOperand(1);
453 SignedOffset = OffsetValue.getSExtValue();
454 }
455 }
456 }
457
458 // Unaligned memory access
459 if ((!MMO->getSize().hasValue() ||
460 MMO->getAlign() < MMO->getSize().getValue()) &&
462 if (MMO->getSize() != 4 || !isRegInGprb(I.getOperand(0).getReg(), MRI))
463 return false;
464
465 if (I.getOpcode() == G_STORE) {
466 if (!buildUnalignedStore(I, Mips::SWL, BaseAddr, SignedOffset + 3, MMO))
467 return false;
468 if (!buildUnalignedStore(I, Mips::SWR, BaseAddr, SignedOffset, MMO))
469 return false;
470 I.eraseFromParent();
471 return true;
472 }
473
474 if (I.getOpcode() == G_LOAD) {
475 Register ImplDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
476 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF))
477 .addDef(ImplDef);
478 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
479 if (!buildUnalignedLoad(I, Mips::LWL, Tmp, BaseAddr, SignedOffset + 3,
480 ImplDef, MMO))
481 return false;
482 if (!buildUnalignedLoad(I, Mips::LWR, I.getOperand(0).getReg(),
483 BaseAddr, SignedOffset, Tmp, MMO))
484 return false;
485 I.eraseFromParent();
486 return true;
487 }
488
489 return false;
490 }
491
492 const unsigned NewOpc = selectLoadStoreOpCode(I, MRI);
493 if (NewOpc == I.getOpcode())
494 return false;
495
496 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
497 .add(I.getOperand(0))
498 .add(BaseAddr)
499 .addImm(SignedOffset)
500 .addMemOperand(MMO);
501 break;
502 }
503 case G_UDIV:
504 case G_UREM:
505 case G_SDIV:
506 case G_SREM: {
507 Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
508 bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV;
509 bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV;
510
511 MachineInstr *PseudoDIV, *PseudoMove;
512 PseudoDIV = BuildMI(MBB, I, I.getDebugLoc(),
513 TII.get(IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV))
514 .addDef(HILOReg)
515 .add(I.getOperand(1))
516 .add(I.getOperand(2));
517 if (!constrainSelectedInstRegOperands(*PseudoDIV, TII, TRI, RBI))
518 return false;
519
520 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(),
521 TII.get(IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI))
522 .addDef(I.getOperand(0).getReg())
523 .addUse(HILOReg);
524 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
525 return false;
526
527 I.eraseFromParent();
528 return true;
529 }
530 case G_SELECT: {
531 // Handle operands with pointer type.
532 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MOVN_I_I))
533 .add(I.getOperand(0))
534 .add(I.getOperand(2))
535 .add(I.getOperand(1))
536 .add(I.getOperand(3));
537 break;
538 }
539 case G_UNMERGE_VALUES: {
540 if (I.getNumOperands() != 3)
541 return false;
542 Register Src = I.getOperand(2).getReg();
543 Register Lo = I.getOperand(0).getReg();
544 Register Hi = I.getOperand(1).getReg();
545 if (!isRegInFprb(Src, MRI) ||
546 !(isRegInGprb(Lo, MRI) && isRegInGprb(Hi, MRI)))
547 return false;
548
549 unsigned Opcode =
550 STI.isFP64bit() ? Mips::ExtractElementF64_64 : Mips::ExtractElementF64;
551
552 MachineInstr *ExtractLo = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
553 .addDef(Lo)
554 .addUse(Src)
555 .addImm(0);
556 if (!constrainSelectedInstRegOperands(*ExtractLo, TII, TRI, RBI))
557 return false;
558
559 MachineInstr *ExtractHi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
560 .addDef(Hi)
561 .addUse(Src)
562 .addImm(1);
563 if (!constrainSelectedInstRegOperands(*ExtractHi, TII, TRI, RBI))
564 return false;
565
566 I.eraseFromParent();
567 return true;
568 }
569 case G_IMPLICIT_DEF: {
570 Register Dst = I.getOperand(0).getReg();
571 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF))
572 .addDef(Dst);
573
574 // Set class based on register bank, there can be fpr and gpr implicit def.
575 MRI.setRegClass(Dst, getRegClassForTypeOnBank(Dst, MRI));
576 break;
577 }
578 case G_CONSTANT: {
579 MachineIRBuilder B(I);
580 if (!materialize32BitImm(I.getOperand(0).getReg(),
581 I.getOperand(1).getCImm()->getValue(), B))
582 return false;
583
584 I.eraseFromParent();
585 return true;
586 }
587 case G_FCONSTANT: {
588 const APFloat &FPimm = I.getOperand(1).getFPImm()->getValueAPF();
589 APInt APImm = FPimm.bitcastToAPInt();
590 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
591
592 if (Size == 32) {
593 Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
594 MachineIRBuilder B(I);
595 if (!materialize32BitImm(GPRReg, APImm, B))
596 return false;
597
598 MachineInstrBuilder MTC1 =
599 B.buildInstr(Mips::MTC1, {I.getOperand(0).getReg()}, {GPRReg});
600 if (!MTC1.constrainAllUses(TII, TRI, RBI))
601 return false;
602 }
603 if (Size == 64) {
604 Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass);
605 Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass);
606 MachineIRBuilder B(I);
607 if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B))
608 return false;
609 if (!materialize32BitImm(GPRRegLow, APImm.getLoBits(32).trunc(32), B))
610 return false;
611
612 MachineInstrBuilder PairF64 = B.buildInstr(
613 STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64,
614 {I.getOperand(0).getReg()}, {GPRRegLow, GPRRegHigh});
615 if (!PairF64.constrainAllUses(TII, TRI, RBI))
616 return false;
617 }
618
619 I.eraseFromParent();
620 return true;
621 }
622 case G_FABS: {
623 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
624 unsigned FABSOpcode =
625 Size == 32 ? Mips::FABS_S
626 : STI.isFP64bit() ? Mips::FABS_D64 : Mips::FABS_D32;
627 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FABSOpcode))
628 .add(I.getOperand(0))
629 .add(I.getOperand(1));
630 break;
631 }
632 case G_FPTOSI: {
633 unsigned FromSize = MRI.getType(I.getOperand(1).getReg()).getSizeInBits();
634 unsigned ToSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
635 (void)ToSize;
636 assert((ToSize == 32) && "Unsupported integer size for G_FPTOSI");
637 assert((FromSize == 32 || FromSize == 64) &&
638 "Unsupported floating point size for G_FPTOSI");
639
640 unsigned Opcode;
641 if (FromSize == 32)
642 Opcode = Mips::TRUNC_W_S;
643 else
644 Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32;
645 Register ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass);
646 MachineInstr *Trunc = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
647 .addDef(ResultInFPR)
648 .addUse(I.getOperand(1).getReg());
649 if (!constrainSelectedInstRegOperands(*Trunc, TII, TRI, RBI))
650 return false;
651
652 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MFC1))
653 .addDef(I.getOperand(0).getReg())
654 .addUse(ResultInFPR);
655 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
656 return false;
657
658 I.eraseFromParent();
659 return true;
660 }
661 case G_GLOBAL_VALUE: {
662 const llvm::GlobalValue *GVal = I.getOperand(1).getGlobal();
663 if (MF.getTarget().isPositionIndependent()) {
664 MachineInstr *LWGOT = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
665 .addDef(I.getOperand(0).getReg())
666 .addReg(MF.getInfo<MipsFunctionInfo>()
667 ->getGlobalBaseRegForGlobalISel(MF))
668 .addGlobalAddress(GVal);
669 // Global Values that don't have local linkage are handled differently
670 // when they are part of call sequence. MipsCallLowering::lowerCall
671 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds
672 // MO_GOT_CALL flag when Callee doesn't have local linkage.
673 if (I.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL)
675 else
677 LWGOT->addMemOperand(
680 if (!constrainSelectedInstRegOperands(*LWGOT, TII, TRI, RBI))
681 return false;
682
683 if (GVal->hasLocalLinkage()) {
684 Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
685 LWGOT->getOperand(0).setReg(LWGOTDef);
686
687 MachineInstr *ADDiu =
688 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
689 .addDef(I.getOperand(0).getReg())
690 .addReg(LWGOTDef)
691 .addGlobalAddress(GVal);
693 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
694 return false;
695 }
696 } else {
697 Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
698
699 MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
700 .addDef(LUiReg)
701 .addGlobalAddress(GVal);
703 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
704 return false;
705
706 MachineInstr *ADDiu =
707 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
708 .addDef(I.getOperand(0).getReg())
709 .addUse(LUiReg)
710 .addGlobalAddress(GVal);
712 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
713 return false;
714 }
715 I.eraseFromParent();
716 return true;
717 }
718 case G_JUMP_TABLE: {
719 if (MF.getTarget().isPositionIndependent()) {
720 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
721 .addDef(I.getOperand(0).getReg())
722 .addReg(MF.getInfo<MipsFunctionInfo>()
723 ->getGlobalBaseRegForGlobalISel(MF))
724 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_GOT)
727 Align(4)));
728 } else {
729 MI =
730 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
731 .addDef(I.getOperand(0).getReg())
732 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_HI);
733 }
734 break;
735 }
736 case G_ICMP: {
737 struct Instr {
738 unsigned Opcode;
740 Instr(unsigned Opcode, Register Def, Register LHS, Register RHS)
741 : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){};
742
743 bool hasImm() const {
744 if (Opcode == Mips::SLTiu || Opcode == Mips::XORi)
745 return true;
746 return false;
747 }
748 };
749
751 Register ICMPReg = I.getOperand(0).getReg();
752 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
753 Register LHS = I.getOperand(2).getReg();
754 Register RHS = I.getOperand(3).getReg();
756 static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
757
758 switch (Cond) {
759 case CmpInst::ICMP_EQ: // LHS == RHS -> (LHS ^ RHS) < 1
760 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
761 Instructions.emplace_back(Mips::SLTiu, ICMPReg, Temp, 1);
762 break;
763 case CmpInst::ICMP_NE: // LHS != RHS -> 0 < (LHS ^ RHS)
764 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
765 Instructions.emplace_back(Mips::SLTu, ICMPReg, Mips::ZERO, Temp);
766 break;
767 case CmpInst::ICMP_UGT: // LHS > RHS -> RHS < LHS
768 Instructions.emplace_back(Mips::SLTu, ICMPReg, RHS, LHS);
769 break;
770 case CmpInst::ICMP_UGE: // LHS >= RHS -> !(LHS < RHS)
771 Instructions.emplace_back(Mips::SLTu, Temp, LHS, RHS);
772 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
773 break;
774 case CmpInst::ICMP_ULT: // LHS < RHS -> LHS < RHS
775 Instructions.emplace_back(Mips::SLTu, ICMPReg, LHS, RHS);
776 break;
777 case CmpInst::ICMP_ULE: // LHS <= RHS -> !(RHS < LHS)
778 Instructions.emplace_back(Mips::SLTu, Temp, RHS, LHS);
779 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
780 break;
781 case CmpInst::ICMP_SGT: // LHS > RHS -> RHS < LHS
782 Instructions.emplace_back(Mips::SLT, ICMPReg, RHS, LHS);
783 break;
784 case CmpInst::ICMP_SGE: // LHS >= RHS -> !(LHS < RHS)
785 Instructions.emplace_back(Mips::SLT, Temp, LHS, RHS);
786 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
787 break;
788 case CmpInst::ICMP_SLT: // LHS < RHS -> LHS < RHS
789 Instructions.emplace_back(Mips::SLT, ICMPReg, LHS, RHS);
790 break;
791 case CmpInst::ICMP_SLE: // LHS <= RHS -> !(RHS < LHS)
792 Instructions.emplace_back(Mips::SLT, Temp, RHS, LHS);
793 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
794 break;
795 default:
796 return false;
797 }
798
799 MachineIRBuilder B(I);
800 for (const struct Instr &Instruction : Instructions) {
801 MachineInstrBuilder MIB = B.buildInstr(
802 Instruction.Opcode, {Instruction.Def}, {Instruction.LHS});
803
804 if (Instruction.hasImm())
805 MIB.addImm(Instruction.RHS);
806 else
807 MIB.addUse(Instruction.RHS);
808
809 if (!MIB.constrainAllUses(TII, TRI, RBI))
810 return false;
811 }
812
813 I.eraseFromParent();
814 return true;
815 }
816 case G_FCMP: {
817 unsigned MipsFCMPCondCode;
818 bool isLogicallyNegated;
819 switch (CmpInst::Predicate Cond = static_cast<CmpInst::Predicate>(
820 I.getOperand(1).getPredicate())) {
821 case CmpInst::FCMP_UNO: // Unordered
822 case CmpInst::FCMP_ORD: // Ordered (OR)
823 MipsFCMPCondCode = Mips::FCOND_UN;
824 isLogicallyNegated = Cond != CmpInst::FCMP_UNO;
825 break;
826 case CmpInst::FCMP_OEQ: // Equal
827 case CmpInst::FCMP_UNE: // Not Equal (NEQ)
828 MipsFCMPCondCode = Mips::FCOND_OEQ;
829 isLogicallyNegated = Cond != CmpInst::FCMP_OEQ;
830 break;
831 case CmpInst::FCMP_UEQ: // Unordered or Equal
832 case CmpInst::FCMP_ONE: // Ordered or Greater Than or Less Than (OGL)
833 MipsFCMPCondCode = Mips::FCOND_UEQ;
834 isLogicallyNegated = Cond != CmpInst::FCMP_UEQ;
835 break;
836 case CmpInst::FCMP_OLT: // Ordered or Less Than
837 case CmpInst::FCMP_UGE: // Unordered or Greater Than or Equal (UGE)
838 MipsFCMPCondCode = Mips::FCOND_OLT;
839 isLogicallyNegated = Cond != CmpInst::FCMP_OLT;
840 break;
841 case CmpInst::FCMP_ULT: // Unordered or Less Than
842 case CmpInst::FCMP_OGE: // Ordered or Greater Than or Equal (OGE)
843 MipsFCMPCondCode = Mips::FCOND_ULT;
844 isLogicallyNegated = Cond != CmpInst::FCMP_ULT;
845 break;
846 case CmpInst::FCMP_OLE: // Ordered or Less Than or Equal
847 case CmpInst::FCMP_UGT: // Unordered or Greater Than (UGT)
848 MipsFCMPCondCode = Mips::FCOND_OLE;
849 isLogicallyNegated = Cond != CmpInst::FCMP_OLE;
850 break;
851 case CmpInst::FCMP_ULE: // Unordered or Less Than or Equal
852 case CmpInst::FCMP_OGT: // Ordered or Greater Than (OGT)
853 MipsFCMPCondCode = Mips::FCOND_ULE;
854 isLogicallyNegated = Cond != CmpInst::FCMP_ULE;
855 break;
856 default:
857 return false;
858 }
859
860 // Default compare result in gpr register will be `true`.
861 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false
862 // using MOVF_I. When orignal predicate (Cond) is logically negated
863 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used.
864 unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I;
865
866 Register TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
867 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
868 .addDef(TrueInReg)
869 .addUse(Mips::ZERO)
870 .addImm(1);
871
872 unsigned Size = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
873 unsigned FCMPOpcode =
874 Size == 32 ? Mips::FCMP_S32
875 : STI.isFP64bit() ? Mips::FCMP_D64 : Mips::FCMP_D32;
876 MachineInstr *FCMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FCMPOpcode))
877 .addUse(I.getOperand(2).getReg())
878 .addUse(I.getOperand(3).getReg())
879 .addImm(MipsFCMPCondCode);
880 if (!constrainSelectedInstRegOperands(*FCMP, TII, TRI, RBI))
881 return false;
882
883 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(MoveOpcode))
884 .addDef(I.getOperand(0).getReg())
885 .addUse(Mips::ZERO)
886 .addUse(Mips::FCC0)
887 .addUse(TrueInReg);
888 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
889 return false;
890
891 I.eraseFromParent();
892 return true;
893 }
894 case G_FENCE: {
895 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SYNC)).addImm(0);
896 break;
897 }
898 case G_VASTART: {
899 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
900 int FI = FuncInfo->getVarArgsFrameIndex();
901
902 Register LeaReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
903 MachineInstr *LEA_ADDiu =
904 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LEA_ADDiu))
905 .addDef(LeaReg)
906 .addFrameIndex(FI)
907 .addImm(0);
908 if (!constrainSelectedInstRegOperands(*LEA_ADDiu, TII, TRI, RBI))
909 return false;
910
911 MachineInstr *Store = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SW))
912 .addUse(LeaReg)
913 .addUse(I.getOperand(0).getReg())
914 .addImm(0);
915 if (!constrainSelectedInstRegOperands(*Store, TII, TRI, RBI))
916 return false;
917
918 I.eraseFromParent();
919 return true;
920 }
921 default:
922 return false;
923 }
924
925 I.eraseFromParent();
927}
928
929namespace llvm {
930InstructionSelector *
932 const MipsSubtarget &Subtarget,
933 const MipsRegisterBankInfo &RBI) {
934 return new MipsInstructionSelector(TM, Subtarget, RBI);
935}
936} // end namespace llvm
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isStore(int Opcode)
MachineBasicBlock & MBB
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define DEBUG_TYPE
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition MD5.cpp:58
This file declares the MachineIRBuilder class.
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
This file declares the targeting of the RegisterBankInfo class for Mips.
static StringRef getName(Value *V)
const SmallVectorImpl< MachineOperand > & Cond
#define LLVM_DEBUG(...)
Definition Debug.h:114
Value * RHS
Value * LHS
BinaryOperator * Mul
APInt bitcastToAPInt() const
Definition APFloat.h:1353
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
Definition APInt.cpp:644
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
Definition APInt.cpp:639
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition APInt.h:435
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1562
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:678
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:681
@ ICMP_SLT
signed less than
Definition InstrTypes.h:707
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:708
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:684
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:693
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:682
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:683
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:702
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:701
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:705
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:692
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:686
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:689
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:703
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:690
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:685
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:687
@ ICMP_NE
not equal
Definition InstrTypes.h:700
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:706
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:694
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:704
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:691
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:688
bool hasLocalLinkage() const
constexpr bool isScalar() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
bool hasValue() const
TypeSize getValue() const
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
LLVM_ABI unsigned getEntrySize(const DataLayout &TD) const
getEntrySize - Return the size of each entry in the jump table.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
@ MOLoad
The memory access reads data.
LLVM_ABI Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
MachineOperand class - Representation of each machine instruction operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
Register getReg() const
getReg - Returns the register number.
void setTargetFlags(unsigned F)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This class provides the information for the target register banks.
bool isFP64bit() const
bool systemSupportsUnalignedAccess() const
Does the system support unaligned memory access.
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
bool isPositionIndependent() const
Value * getOperand(unsigned i) const
Definition User.h:232
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
bool hasImm(uint64_t TSFlags)
NodeAddr< DefNode * > Def
Definition RDFGraph.h:384
NodeAddr< InstrNode * > Instr
Definition RDFGraph.h:389
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
InstructionSelector * createMipsInstructionSelector(const MipsTargetMachine &, const MipsSubtarget &, const MipsRegisterBankInfo &)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:342
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:288
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.