89 using BaseKind =
enum { RegBase, FrameIndexBase };
92 BaseKind Kind = RegBase;
104 void setKind(BaseKind K) { Kind =
K; }
105 BaseKind getKind()
const {
return Kind; }
106 bool isRegBase()
const {
return Kind == RegBase; }
107 bool isFIBase()
const {
return Kind == FrameIndexBase; }
110 assert(isRegBase() &&
"Invalid base register access!");
115 assert(isRegBase() &&
"Invalid base register access!");
120 assert(isFIBase() &&
"Invalid base frame index access!");
125 assert(isFIBase() &&
"Invalid base frame index access!");
129 void setOffset(
int O) { Offset =
O; }
133class ARMFastISel final :
public FastISel {
136 const ARMSubtarget *Subtarget;
138 const ARMBaseInstrInfo &TII;
139 const ARMTargetLowering &TLI;
140 const ARMBaseTargetMachine &TM;
141 ARMFunctionInfo *AFI;
145 LLVMContext *Context;
148 explicit ARMFastISel(FunctionLoweringInfo &funcInfo,
149 const TargetLibraryInfo *libInfo)
150 : FastISel(funcInfo, libInfo),
151 Subtarget(&funcInfo.MF->getSubtarget<ARMSubtarget>()),
153 TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()),
155 AFI = funcInfo.
MF->
getInfo<ARMFunctionInfo>();
156 isThumb2 = AFI->isThumbFunction();
163 Register fastEmitInst_r(
unsigned MachineInstOpcode,
164 const TargetRegisterClass *RC,
Register Op0);
165 Register fastEmitInst_rr(
unsigned MachineInstOpcode,
166 const TargetRegisterClass *RC,
Register Op0,
168 Register fastEmitInst_ri(
unsigned MachineInstOpcode,
169 const TargetRegisterClass *RC,
Register Op0,
171 Register fastEmitInst_i(
unsigned MachineInstOpcode,
172 const TargetRegisterClass *RC, uint64_t Imm);
176 bool fastSelectInstruction(
const Instruction *
I)
override;
177 Register fastMaterializeConstant(
const Constant *
C)
override;
178 Register fastMaterializeAlloca(
const AllocaInst *AI)
override;
179 bool tryToFoldLoadIntoMI(MachineInstr *
MI,
unsigned OpNo,
180 const LoadInst *LI)
override;
181 bool fastLowerArguments()
override;
183#include "ARMGenFastISel.inc"
187 bool SelectLoad(
const Instruction *
I);
188 bool SelectStore(
const Instruction *
I);
189 bool SelectBranch(
const Instruction *
I);
190 bool SelectIndirectBr(
const Instruction *
I);
191 bool SelectCmp(
const Instruction *
I);
192 bool SelectFPExt(
const Instruction *
I);
193 bool SelectFPTrunc(
const Instruction *
I);
194 bool SelectBinaryIntOp(
const Instruction *
I,
unsigned ISDOpcode);
195 bool SelectBinaryFPOp(
const Instruction *
I,
unsigned ISDOpcode);
196 bool SelectIToFP(
const Instruction *
I,
bool isSigned);
197 bool SelectFPToI(
const Instruction *
I,
bool isSigned);
198 bool SelectDiv(
const Instruction *
I,
bool isSigned);
199 bool SelectRem(
const Instruction *
I,
bool isSigned);
200 bool SelectCall(
const Instruction *
I,
const char *IntrMemName);
201 bool SelectIntrinsicCall(
const IntrinsicInst &
I);
202 bool SelectSelect(
const Instruction *
I);
203 bool SelectRet(
const Instruction *
I);
204 bool SelectTrunc(
const Instruction *
I);
205 bool SelectIntExt(
const Instruction *
I);
210 bool isPositionIndependent()
const;
211 bool isTypeLegal(
Type *Ty, MVT &VT);
212 bool isLoadTypeLegal(
Type *Ty, MVT &VT);
213 bool ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
216 MaybeAlign Alignment = std::nullopt,
bool isZExt =
true,
217 bool allocReg =
true);
219 MaybeAlign Alignment = std::nullopt);
220 bool ARMComputeAddress(
const Value *Obj,
Address &Addr);
221 void ARMSimplifyAddress(
Address &Addr, MVT VT,
bool useAM3);
222 bool ARMIsMemCpySmall(uint64_t Len);
223 bool ARMTryEmitSmallMemCpy(
Address Dest,
Address Src, uint64_t Len,
224 MaybeAlign Alignment);
225 Register ARMEmitIntExt(MVT SrcVT,
Register SrcReg, MVT DestVT,
bool isZExt);
226 Register ARMMaterializeFP(
const ConstantFP *CFP, MVT VT);
227 Register ARMMaterializeInt(
const Constant *
C, MVT VT);
228 Register ARMMaterializeGV(
const GlobalValue *GV, MVT VT);
231 unsigned ARMSelectCallOp(
bool UseReg);
232 Register ARMLowerPICELF(
const GlobalValue *GV, MVT VT);
234 const TargetLowering *getTargetLowering() {
return &TLI; }
238 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC,
241 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args,
242 SmallVectorImpl<Register> &ArgRegs,
243 SmallVectorImpl<MVT> &ArgVTs,
244 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
245 SmallVectorImpl<Register> &RegArgs,
249 Register getLibcallReg(
const Twine &Name);
250 bool FinishCall(MVT RetVT, SmallVectorImpl<Register> &UsedRegs,
251 const Instruction *
I, CallingConv::ID CC,
252 unsigned &NumBytes,
bool isVarArg);
253 bool ARMEmitLibcall(
const Instruction *
I, RTLIB::Libcall
Call);
257 bool isARMNEONPred(
const MachineInstr *
MI);
258 bool DefinesOptionalPredicate(MachineInstr *
MI,
bool *CPSR);
259 const MachineInstrBuilder &AddOptionalDefs(
const MachineInstrBuilder &MIB);
260 void AddLoadStoreOperands(MVT VT,
Address &Addr,
261 const MachineInstrBuilder &MIB,
270bool ARMFastISel::DefinesOptionalPredicate(
MachineInstr *
MI,
bool *CPSR) {
271 if (!
MI->hasOptionalDef())
275 for (
const MachineOperand &MO :
MI->operands()) {
276 if (!MO.isReg() || !MO.isDef())
continue;
277 if (MO.getReg() == ARM::CPSR)
283bool ARMFastISel::isARMNEONPred(
const MachineInstr *
MI) {
284 const MCInstrDesc &MCID =
MI->getDesc();
289 return MI->isPredicable();
291 for (
const MCOperandInfo &opInfo : MCID.
operands())
292 if (opInfo.isPredicate())
303const MachineInstrBuilder &
304ARMFastISel::AddOptionalDefs(
const MachineInstrBuilder &MIB) {
305 MachineInstr *
MI = &*MIB;
310 if (isARMNEONPred(
MI))
316 if (DefinesOptionalPredicate(
MI, &CPSR))
321Register ARMFastISel::fastEmitInst_r(
unsigned MachineInstOpcode,
322 const TargetRegisterClass *RC,
324 Register ResultReg = createResultReg(RC);
325 const MCInstrDesc &
II =
TII.get(MachineInstOpcode);
330 if (
II.getNumDefs() >= 1) {
331 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II,
334 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
336 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
337 TII.get(TargetOpcode::COPY), ResultReg)
343Register ARMFastISel::fastEmitInst_rr(
unsigned MachineInstOpcode,
344 const TargetRegisterClass *RC,
346 Register ResultReg = createResultReg(RC);
347 const MCInstrDesc &
II =
TII.get(MachineInstOpcode);
354 if (
II.getNumDefs() >= 1) {
356 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
360 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
363 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
364 TII.get(TargetOpcode::COPY), ResultReg)
370Register ARMFastISel::fastEmitInst_ri(
unsigned MachineInstOpcode,
371 const TargetRegisterClass *RC,
373 Register ResultReg = createResultReg(RC);
374 const MCInstrDesc &
II =
TII.get(MachineInstOpcode);
379 if (
II.getNumDefs() >= 1) {
381 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
385 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
388 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
389 TII.get(TargetOpcode::COPY), ResultReg)
395Register ARMFastISel::fastEmitInst_i(
unsigned MachineInstOpcode,
396 const TargetRegisterClass *RC,
398 Register ResultReg = createResultReg(RC);
399 const MCInstrDesc &
II =
TII.get(MachineInstOpcode);
401 if (
II.getNumDefs() >= 1) {
402 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II,
405 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
407 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
408 TII.get(TargetOpcode::COPY), ResultReg)
421 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
422 TII.get(ARM::VMOVSR), MoveReg)
432 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
433 TII.get(ARM::VMOVRS), MoveReg)
441Register ARMFastISel::ARMMaterializeFP(
const ConstantFP *CFP, MVT VT) {
443 bool is64bit = VT == MVT::f64;
458 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
464 if (!Subtarget->hasVFP2Base())
return false;
468 unsigned Idx = MCP.getConstantPoolIndex(
cast<Constant>(CFP), Alignment);
470 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
474 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc), DestReg)
480Register ARMFastISel::ARMMaterializeInt(
const Constant *
C, MVT VT) {
481 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
488 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
489 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
491 Register ImmReg = createResultReg(RC);
492 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
499 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->
isNegative()) {
504 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
505 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
507 Register ImmReg = createResultReg(RC);
508 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
516 if (Subtarget->useMovt())
527 Align Alignment =
DL.getPrefTypeAlign(
C->getType());
528 unsigned Idx = MCP.getConstantPoolIndex(
C, Alignment);
531 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
532 TII.get(ARM::t2LDRpci), ResultReg)
537 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
538 TII.get(ARM::LDRcp), ResultReg)
545bool ARMFastISel::isPositionIndependent()
const {
549Register ARMFastISel::ARMMaterializeGV(
const GlobalValue *GV, MVT VT) {
555 if (Subtarget->isROPI() || Subtarget->isRWPI())
558 bool IsIndirect = Subtarget->isGVIndirectSymbol(GV);
559 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
561 Register DestReg = createResultReg(RC);
566 if (!Subtarget->isTargetMachO() && IsThreadLocal)
569 bool IsPositionIndependent = isPositionIndependent();
572 if (Subtarget->useMovt() &&
573 (Subtarget->isTargetMachO() || !IsPositionIndependent)) {
575 unsigned char TF = 0;
576 if (Subtarget->isTargetMachO())
579 if (IsPositionIndependent)
580 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
582 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
583 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
589 if (Subtarget->isTargetELF() && IsPositionIndependent)
590 return ARMLowerPICELF(GV, VT);
593 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
598 unsigned Idx = MCP.getConstantPoolIndex(CPV, Alignment);
601 MachineInstrBuilder MIB;
603 unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci;
604 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc),
606 if (IsPositionIndependent)
608 AddOptionalDefs(MIB);
612 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
613 TII.get(ARM::LDRcp), DestReg)
616 AddOptionalDefs(MIB);
618 if (IsPositionIndependent) {
619 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
622 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
623 MIMD,
TII.get(
Opc), NewDestReg)
626 AddOptionalDefs(MIB);
632 if ((Subtarget->isTargetELF() && Subtarget->isGVInGOT(GV)) ||
633 (Subtarget->isTargetMachO() && IsIndirect)) {
634 MachineInstrBuilder MIB;
637 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
638 TII.get(ARM::t2LDRi12), NewDestReg)
642 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
643 TII.get(ARM::LDRi12), NewDestReg)
646 DestReg = NewDestReg;
647 AddOptionalDefs(MIB);
653Register ARMFastISel::fastMaterializeConstant(
const Constant *
C) {
662 return ARMMaterializeFP(CFP, VT);
664 return ARMMaterializeGV(GV, VT);
666 return ARMMaterializeInt(
C, VT);
673Register ARMFastISel::fastMaterializeAlloca(
const AllocaInst *AI) {
675 if (!FuncInfo.StaticAllocaMap.count(AI))
679 if (!isLoadTypeLegal(AI->
getType(), VT))
682 DenseMap<const AllocaInst*, int>::iterator
SI =
683 FuncInfo.StaticAllocaMap.find(AI);
687 if (SI != FuncInfo.StaticAllocaMap.end()) {
688 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
690 Register ResultReg = createResultReg(RC);
693 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
703bool ARMFastISel::isTypeLegal(
Type *Ty, MVT &VT) {
707 if (evt == MVT::Other || !evt.
isSimple())
return false;
715bool ARMFastISel::isLoadTypeLegal(
Type *Ty, MVT &VT) {
716 if (isTypeLegal(Ty, VT))
return true;
720 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
727bool ARMFastISel::ARMComputeAddress(
const Value *Obj,
Address &Addr) {
729 const User *
U =
nullptr;
730 unsigned Opcode = Instruction::UserOp1;
734 if (FuncInfo.StaticAllocaMap.count(
static_cast<const AllocaInst *
>(Obj)) ||
735 FuncInfo.getMBB(
I->getParent()) == FuncInfo.MBB) {
736 Opcode =
I->getOpcode();
740 Opcode =
C->getOpcode();
745 if (Ty->getAddressSpace() > 255)
753 case Instruction::BitCast:
755 return ARMComputeAddress(
U->getOperand(0), Addr);
756 case Instruction::IntToPtr:
760 return ARMComputeAddress(
U->getOperand(0), Addr);
762 case Instruction::PtrToInt:
765 return ARMComputeAddress(
U->getOperand(0), Addr);
767 case Instruction::GetElementPtr: {
769 int TmpOffset = Addr.getOffset();
775 i != e; ++i, ++GTI) {
778 const StructLayout *SL =
DL.getStructLayout(STy);
789 if (canFoldAddIntoGEP(U,
Op)) {
799 goto unsupported_gep;
805 Addr.setOffset(TmpOffset);
806 if (ARMComputeAddress(
U->getOperand(0), Addr))
return true;
814 case Instruction::Alloca: {
816 DenseMap<const AllocaInst*, int>::iterator
SI =
817 FuncInfo.StaticAllocaMap.find(AI);
818 if (SI != FuncInfo.StaticAllocaMap.end()) {
819 Addr.setKind(Address::FrameIndexBase);
820 Addr.setFI(
SI->second);
829 Addr.setReg(getRegForValue(Obj));
830 return Addr.getReg();
833void ARMFastISel::ARMSimplifyAddress(
Address &Addr, MVT VT,
bool useAM3) {
834 bool needsLowering =
false;
843 needsLowering = ((Addr.getOffset() & 0xfff) != Addr.getOffset());
845 if (needsLowering && isThumb2)
846 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.getOffset() < 0 &&
847 Addr.getOffset() > -256);
850 needsLowering = (Addr.getOffset() > 255 || Addr.getOffset() < -255);
856 needsLowering = ((Addr.getOffset() & 0xff) != Addr.getOffset());
863 if (needsLowering && Addr.isFIBase()) {
864 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
866 Register ResultReg = createResultReg(RC);
867 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
869 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc), ResultReg)
872 Addr.setKind(Address::RegBase);
873 Addr.setReg(ResultReg);
879 Addr.setReg(fastEmit_ri_(MVT::i32,
ISD::ADD, Addr.getReg(),
880 Addr.getOffset(), MVT::i32));
885void ARMFastISel::AddLoadStoreOperands(MVT VT,
Address &Addr,
886 const MachineInstrBuilder &MIB,
892 Addr.setOffset(Addr.getOffset() / 4);
895 if (Addr.isFIBase()) {
896 int FI = Addr.getFI();
897 int Offset = Addr.getOffset();
898 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
900 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
907 int Imm = (Addr.getOffset() < 0) ? (0x100 | -Addr.getOffset())
912 MIB.
addImm(Addr.getOffset());
917 MIB.
addReg(Addr.getReg());
922 int Imm = (Addr.getOffset() < 0) ? (0x100 | -Addr.getOffset())
927 MIB.
addImm(Addr.getOffset());
930 AddOptionalDefs(MIB);
933bool ARMFastISel::ARMEmitLoad(MVT VT,
Register &ResultReg,
Address &Addr,
934 MaybeAlign Alignment,
bool isZExt,
938 bool needVMOV =
false;
939 const TargetRegisterClass *RC;
942 default:
return false;
946 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
947 Subtarget->hasV6T2Ops())
948 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
950 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
959 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
962 if (Alignment && *Alignment <
Align(2) &&
963 !Subtarget->allowsUnalignedMem())
967 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
968 Subtarget->hasV6T2Ops())
969 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
971 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
973 Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
976 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
979 if (Alignment && *Alignment <
Align(4) &&
980 !Subtarget->allowsUnalignedMem())
984 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
985 Subtarget->hasV6T2Ops())
992 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
995 if (!Subtarget->hasVFP2Base())
return false;
997 if (Alignment && *Alignment <
Align(4)) {
1000 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
1001 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
1009 if (!Subtarget->hasVFP2Base())
return false;
1012 if (Alignment && *Alignment <
Align(4))
1020 ARMSimplifyAddress(Addr, VT, useAM3);
1024 ResultReg = createResultReg(RC);
1025 assert(ResultReg.
isVirtual() &&
"Expected an allocated virtual register.");
1026 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1027 TII.get(
Opc), ResultReg);
1034 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1035 TII.get(ARM::VMOVSR), MoveReg)
1042bool ARMFastISel::SelectLoad(
const Instruction *
I) {
1047 const Value *SV =
I->getOperand(0);
1052 if (Arg->hasSwiftErrorAttr())
1057 if (Alloca->isSwiftError())
1064 if (!isLoadTypeLegal(
I->getType(), VT))
1069 if (!ARMComputeAddress(
I->getOperand(0), Addr))
return false;
1074 updateValueMap(
I, ResultReg);
1079 MaybeAlign Alignment) {
1081 bool useAM3 =
false;
1084 default:
return false;
1086 Register Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
1087 : &ARM::GPRRegClass);
1088 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
1090 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1098 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
1099 Subtarget->hasV6T2Ops())
1100 StrOpc = ARM::t2STRBi8;
1102 StrOpc = ARM::t2STRBi12;
1104 StrOpc = ARM::STRBi12;
1108 if (Alignment && *Alignment <
Align(2) &&
1109 !Subtarget->allowsUnalignedMem())
1113 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
1114 Subtarget->hasV6T2Ops())
1115 StrOpc = ARM::t2STRHi8;
1117 StrOpc = ARM::t2STRHi12;
1124 if (Alignment && *Alignment <
Align(4) &&
1125 !Subtarget->allowsUnalignedMem())
1129 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
1130 Subtarget->hasV6T2Ops())
1131 StrOpc = ARM::t2STRi8;
1133 StrOpc = ARM::t2STRi12;
1135 StrOpc = ARM::STRi12;
1139 if (!Subtarget->hasVFP2Base())
return false;
1141 if (Alignment && *Alignment <
Align(4)) {
1143 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1144 TII.get(ARM::VMOVRS), MoveReg)
1148 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
1150 StrOpc = ARM::VSTRS;
1155 if (!Subtarget->hasVFP2Base())
return false;
1158 if (Alignment && *Alignment <
Align(4))
1161 StrOpc = ARM::VSTRD;
1165 ARMSimplifyAddress(Addr, VT, useAM3);
1169 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1176bool ARMFastISel::SelectStore(
const Instruction *
I) {
1177 Value *Op0 =
I->getOperand(0);
1184 const Value *PtrV =
I->getOperand(1);
1189 if (Arg->hasSwiftErrorAttr())
1194 if (Alloca->isSwiftError())
1201 if (!isLoadTypeLegal(
I->getOperand(0)->getType(), VT))
1205 SrcReg = getRegForValue(Op0);
1211 if (!ARMComputeAddress(
I->getOperand(1), Addr))
1266bool ARMFastISel::SelectBranch(
const Instruction *
I) {
1269 MachineBasicBlock *FBB = FuncInfo.getMBB(BI->
getSuccessor(1));
1276 if (CI->
hasOneUse() && (CI->getParent() ==
I->getParent())) {
1280 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1294 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1295 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1302 if (TI->hasOneUse() && TI->getParent() ==
I->getParent() &&
1303 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
1304 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1305 Register OpReg = getRegForValue(TI->getOperand(0));
1307 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1312 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1317 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1318 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1324 }
else if (
const ConstantInt *CI =
1328 fastEmitBranch(Target, MIMD.getDL());
1343 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1346 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TstOpc))
1351 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1356 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1357 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1363bool ARMFastISel::SelectIndirectBr(
const Instruction *
I) {
1364 Register AddrReg = getRegForValue(
I->getOperand(0));
1368 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
1369 assert(isThumb2 || Subtarget->hasV4TOps());
1371 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1375 for (
const BasicBlock *SuccBB :
IB->successors())
1376 FuncInfo.MBB->addSuccessor(FuncInfo.getMBB(SuccBB));
1381bool ARMFastISel::ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
1385 if (!SrcEVT.
isSimple())
return false;
1388 if (Ty->
isFloatTy() && !Subtarget->hasVFP2Base())
1391 if (Ty->
isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1397 bool UseImm =
false;
1398 bool isNegativeImm =
false;
1402 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||
1404 const APInt &CIVal = ConstInt->getValue();
1409 if (Imm < 0 && Imm != (
int)0x80000000) {
1410 isNegativeImm =
true;
1417 if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
1418 if (ConstFP->isZero() && !ConstFP->isNegative())
1424 bool needsExt =
false;
1426 default:
return false;
1430 CmpOpc = UseImm ? ARM::VCMPZS : ARM::VCMPS;
1434 CmpOpc = UseImm ? ARM::VCMPZD : ARM::VCMPD;
1444 CmpOpc = ARM::t2CMPrr;
1446 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
1449 CmpOpc = ARM::CMPrr;
1451 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
1456 Register SrcReg1 = getRegForValue(Src1Value);
1462 SrcReg2 = getRegForValue(Src2Value);
1469 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
1473 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
1479 const MCInstrDesc &
II =
TII.get(CmpOpc);
1483 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
1486 MachineInstrBuilder MIB;
1487 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
1493 AddOptionalDefs(MIB);
1499 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1500 TII.get(ARM::FMSTAT)));
1504bool ARMFastISel::SelectCmp(
const Instruction *
I) {
1519 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1520 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
1521 : &ARM::GPRRegClass;
1522 Register DestReg = createResultReg(RC);
1524 Register ZeroReg = fastMaterializeConstant(Zero);
1526 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc), DestReg)
1530 updateValueMap(
I, DestReg);
1534bool ARMFastISel::SelectFPExt(
const Instruction *
I) {
1536 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())
return false;
1539 if (!
I->getType()->isDoubleTy() ||
1540 !
V->getType()->isFloatTy())
return false;
1547 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1548 TII.get(ARM::VCVTDS), Result)
1550 updateValueMap(
I, Result);
1554bool ARMFastISel::SelectFPTrunc(
const Instruction *
I) {
1556 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())
return false;
1559 if (!(
I->getType()->isFloatTy() &&
1560 V->getType()->isDoubleTy()))
return false;
1567 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1568 TII.get(ARM::VCVTSD), Result)
1570 updateValueMap(
I, Result);
1574bool ARMFastISel::SelectIToFP(
const Instruction *
I,
bool isSigned) {
1576 if (!Subtarget->hasVFP2Base())
return false;
1579 Type *Ty =
I->getType();
1580 if (!isTypeLegal(Ty, DstVT))
1583 Value *Src =
I->getOperand(0);
1588 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1591 Register SrcReg = getRegForValue(Src);
1596 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
1597 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32,
1605 Register FP = ARMMoveToFPReg(MVT::f32, SrcReg);
1611 else if (Ty->
isDoubleTy() && Subtarget->hasFP64())
1616 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1618 updateValueMap(
I, ResultReg);
1622bool ARMFastISel::SelectFPToI(
const Instruction *
I,
bool isSigned) {
1624 if (!Subtarget->hasVFP2Base())
return false;
1627 Type *RetTy =
I->getType();
1628 if (!isTypeLegal(RetTy, DstVT))
1636 Type *OpTy =
I->getOperand(0)->getType();
1638 else if (OpTy->
isDoubleTy() && Subtarget->hasFP64())
1644 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1649 Register IntReg = ARMMoveToIntReg(DstVT, ResultReg);
1653 updateValueMap(
I, IntReg);
1657bool ARMFastISel::SelectSelect(
const Instruction *
I) {
1659 if (!isTypeLegal(
I->getType(), VT))
1663 if (VT != MVT::i32)
return false;
1665 Register CondReg = getRegForValue(
I->getOperand(0));
1668 Register Op1Reg = getRegForValue(
I->getOperand(1));
1674 bool UseImm =
false;
1675 bool isNegativeImm =
false;
1677 assert(VT == MVT::i32 &&
"Expecting an i32.");
1678 Imm = (int)ConstInt->getValue().getZExtValue();
1680 isNegativeImm =
true;
1689 Op2Reg = getRegForValue(
I->getOperand(2));
1694 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1697 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TstOpc))
1702 const TargetRegisterClass *RC;
1704 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
1705 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1707 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass;
1709 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1711 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
1713 Register ResultReg = createResultReg(RC);
1717 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc),
1725 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc),
1732 updateValueMap(
I, ResultReg);
1736bool ARMFastISel::SelectDiv(
const Instruction *
I,
bool isSigned) {
1738 Type *Ty =
I->getType();
1739 if (!isTypeLegal(Ty, VT))
1745 if (Subtarget->hasDivideInThumbMode())
1749 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1751 LC =
isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;
1752 else if (VT == MVT::i16)
1753 LC =
isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;
1754 else if (VT == MVT::i32)
1755 LC =
isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;
1756 else if (VT == MVT::i64)
1757 LC =
isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;
1758 else if (VT == MVT::i128)
1759 LC =
isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;
1760 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unsupported SDIV!");
1762 return ARMEmitLibcall(
I, LC);
1765bool ARMFastISel::SelectRem(
const Instruction *
I,
bool isSigned) {
1767 Type *Ty =
I->getType();
1768 if (!isTypeLegal(Ty, VT))
1778 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1780 LC =
isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
1781 else if (VT == MVT::i16)
1782 LC =
isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;
1783 else if (VT == MVT::i32)
1784 LC =
isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;
1785 else if (VT == MVT::i64)
1786 LC =
isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;
1787 else if (VT == MVT::i128)
1788 LC =
isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;
1789 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unsupported SREM!");
1791 return ARMEmitLibcall(
I, LC);
1794bool ARMFastISel::SelectBinaryIntOp(
const Instruction *
I,
unsigned ISDOpcode) {
1799 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1803 switch (ISDOpcode) {
1804 default:
return false;
1806 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
1809 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
1812 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
1816 Register SrcReg1 = getRegForValue(
I->getOperand(0));
1822 Register SrcReg2 = getRegForValue(
I->getOperand(1));
1826 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
1829 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1832 updateValueMap(
I, ResultReg);
1836bool ARMFastISel::SelectBinaryFPOp(
const Instruction *
I,
unsigned ISDOpcode) {
1838 if (!FPVT.
isSimple())
return false;
1849 Type *Ty =
I->getType();
1850 if (Ty->
isFloatTy() && !Subtarget->hasVFP2Base())
1852 if (Ty->
isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1856 bool is64bit = VT == MVT::f64 || VT == MVT::i64;
1857 switch (ISDOpcode) {
1858 default:
return false;
1860 Opc = is64bit ? ARM::VADDD : ARM::VADDS;
1863 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
1866 Opc = is64bit ? ARM::VMULD : ARM::VMULS;
1869 Register Op1 = getRegForValue(
I->getOperand(0));
1873 Register Op2 = getRegForValue(
I->getOperand(1));
1878 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1881 updateValueMap(
I, ResultReg);
1889CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
1895 case CallingConv::Fast:
1896 if (Subtarget->hasVFP2Base() && !isVarArg) {
1897 if (!
TM.isAAPCS_ABI())
1903 case CallingConv::C:
1904 case CallingConv::CXX_FAST_TLS:
1906 if (
TM.isAAPCS_ABI()) {
1907 if (Subtarget->hasFPRegs() &&
1915 case CallingConv::ARM_AAPCS_VFP:
1916 case CallingConv::Swift:
1917 case CallingConv::SwiftTail:
1923 case CallingConv::ARM_AAPCS:
1925 case CallingConv::ARM_APCS:
1927 case CallingConv::GHC:
1932 case CallingConv::CFGuard_Check:
1937bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
1938 SmallVectorImpl<Register> &ArgRegs,
1939 SmallVectorImpl<MVT> &ArgVTs,
1940 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
1941 SmallVectorImpl<Register> &RegArgs,
1947 for (
Value *Arg : Args)
1949 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, ArgLocs, *
Context);
1950 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, OrigTys,
1951 CCAssignFnForCall(CC,
false, isVarArg));
1955 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
1956 CCValAssign &VA = ArgLocs[i];
1970 !VA.
isRegLoc() || !ArgLocs[++i].isRegLoc())
1982 if (!Subtarget->hasVFP2Base())
1986 if (!Subtarget->hasVFP2Base())
1996 NumBytes = CCInfo.getStackSize();
1999 unsigned AdjStackDown =
TII.getCallFrameSetupOpcode();
2000 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2001 TII.get(AdjStackDown))
2005 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
2006 CCValAssign &VA = ArgLocs[i];
2012 "We don't handle NEON/vector parameters yet.");
2019 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT,
false);
2020 assert(Arg &&
"Failed to emit a sext");
2028 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT,
true);
2029 assert(Arg &&
"Failed to emit a zext");
2035 assert(BC &&
"Failed to emit a bitcast!");
2045 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2051 "Custom lowering for v2f64 args not available");
2054 CCValAssign &NextVA = ArgLocs[++i];
2057 "We only handle register args!");
2059 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2074 Addr.setKind(Address::RegBase);
2075 Addr.setReg(ARM::SP);
2078 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet;
2079 assert(EmitRet &&
"Could not emit a store for argument!");
2086bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<Register> &UsedRegs,
2087 const Instruction *
I, CallingConv::ID CC,
2088 unsigned &NumBytes,
bool isVarArg) {
2090 unsigned AdjStackUp =
TII.getCallFrameDestroyOpcode();
2091 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2092 TII.get(AdjStackUp))
2096 if (RetVT != MVT::isVoid) {
2098 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *
Context);
2099 CCInfo.AnalyzeCallResult(RetVT,
I->getType(),
2100 CCAssignFnForCall(CC,
true, isVarArg));
2103 if (RVLocs.
size() == 2 && RetVT == MVT::f64) {
2106 MVT DestVT = RVLocs[0].getValVT();
2108 Register ResultReg = createResultReg(DstRC);
2109 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2110 TII.get(ARM::VMOVDRR), ResultReg)
2111 .
addReg(RVLocs[0].getLocReg())
2112 .
addReg(RVLocs[1].getLocReg()));
2114 UsedRegs.
push_back(RVLocs[0].getLocReg());
2115 UsedRegs.
push_back(RVLocs[1].getLocReg());
2118 updateValueMap(
I, ResultReg);
2120 assert(RVLocs.
size() == 1 &&
"Can't handle non-double multi-reg retvals!");
2121 MVT CopyVT = RVLocs[0].getValVT();
2124 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
2129 Register ResultReg = createResultReg(DstRC);
2130 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2131 TII.get(TargetOpcode::COPY),
2132 ResultReg).
addReg(RVLocs[0].getLocReg());
2133 UsedRegs.
push_back(RVLocs[0].getLocReg());
2136 updateValueMap(
I, ResultReg);
2143bool ARMFastISel::SelectRet(
const Instruction *
I) {
2145 const Function &
F = *
I->getParent()->getParent();
2146 const bool IsCmseNSEntry =
F.hasFnAttribute(
"cmse_nonsecure_entry");
2148 if (!FuncInfo.CanLowerReturn)
2152 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
2161 CallingConv::ID CC =
F.getCallingConv();
2162 if (
Ret->getNumOperands() > 0) {
2168 CCState CCInfo(CC,
F.isVarArg(), *FuncInfo.MF, ValLocs,
I->getContext());
2169 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC,
true ,
2172 const Value *RV =
Ret->getOperand(0);
2178 if (ValLocs.
size() != 1)
2181 CCValAssign &VA = ValLocs[0];
2192 if (!RVEVT.
isSimple())
return false;
2196 if (RVVT != DestVT) {
2197 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
2200 assert(DestVT == MVT::i32 &&
"ARM should always ext to i32");
2204 if (Outs[0].
Flags.isZExt() || Outs[0].Flags.isSExt()) {
2205 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].
Flags.isZExt());
2213 const TargetRegisterClass* SrcRC =
MRI.getRegClass(SrcReg);
2217 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2218 TII.get(TargetOpcode::COPY), DstReg).
addReg(SrcReg);
2227 RetOpc = ARM::tBXNS_RET;
2231 RetOpc = Subtarget->getReturnOpcode();
2233 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2235 AddOptionalDefs(MIB);
2241unsigned ARMFastISel::ARMSelectCallOp(
bool UseReg) {
2245 return isThumb2 ? ARM::tBL : ARM::BL;
2248Register ARMFastISel::getLibcallReg(
const Twine &Name) {
2255 GlobalValue *GV =
M.getNamedGlobal(
Name.str());
2257 GV =
new GlobalVariable(M, Type::getInt32Ty(*
Context),
false,
2260 return ARMMaterializeGV(GV, LCREVT.
getSimpleVT());
2270bool ARMFastISel::ARMEmitLibcall(
const Instruction *
I, RTLIB::Libcall
Call) {
2274 Type *RetTy =
I->getType();
2277 RetVT = MVT::isVoid;
2278 else if (!isTypeLegal(RetTy, RetVT))
2282 if (RetVT != MVT::isVoid && RetVT != MVT::i32) {
2284 CCState CCInfo(CC,
false, *FuncInfo.MF, RVLocs, *
Context);
2285 CCInfo.AnalyzeCallResult(RetVT, RetTy, CCAssignFnForCall(CC,
true,
false));
2286 if (RVLocs.
size() >= 2 && RetVT != MVT::f64)
2291 SmallVector<Value*, 8>
Args;
2295 Args.reserve(
I->getNumOperands());
2296 ArgRegs.
reserve(
I->getNumOperands());
2297 ArgVTs.
reserve(
I->getNumOperands());
2298 ArgFlags.
reserve(
I->getNumOperands());
2304 Type *ArgTy =
Op->getType();
2306 if (!isTypeLegal(ArgTy, ArgVT))
return false;
2308 ISD::ArgFlagsTy
Flags;
2309 Flags.setOrigAlign(
DL.getABITypeAlign(ArgTy));
2320 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2321 RegArgs, CC, NumBytes,
false))
2325 if (Subtarget->genLongCalls()) {
2332 unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls());
2333 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2334 MIMD,
TII.get(CallOpc));
2338 if (Subtarget->genLongCalls()) {
2355 if (!FinishCall(RetVT, UsedRegs,
I, CC, NumBytes,
false))
return false;
2358 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs,
TRI);
2363bool ARMFastISel::SelectCall(
const Instruction *
I,
2364 const char *IntrMemName =
nullptr) {
2380 bool isVarArg = FTy->isVarArg();
2383 Type *RetTy =
I->getType();
2386 RetVT = MVT::isVoid;
2387 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 &&
2388 RetVT != MVT::i8 && RetVT != MVT::i1)
2392 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 &&
2393 RetVT != MVT::i16 && RetVT != MVT::i32) {
2395 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *
Context);
2396 CCInfo.AnalyzeCallResult(RetVT, RetTy,
2397 CCAssignFnForCall(CC,
true, isVarArg));
2398 if (RVLocs.
size() >= 2 && RetVT != MVT::f64)
2403 SmallVector<Value*, 8>
Args;
2407 unsigned arg_size = CI->
arg_size();
2408 Args.reserve(arg_size);
2412 for (
auto ArgI = CI->
arg_begin(), ArgE = CI->
arg_end(); ArgI != ArgE; ++ArgI) {
2415 if (IntrMemName && ArgE - ArgI <= 1)
2418 ISD::ArgFlagsTy
Flags;
2419 unsigned ArgIdx = ArgI - CI->
arg_begin();
2434 Type *ArgTy = (*ArgI)->getType();
2436 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 &&
2440 Register Arg = getRegForValue(*ArgI);
2444 Flags.setOrigAlign(
DL.getABITypeAlign(ArgTy));
2446 Args.push_back(*ArgI);
2455 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2456 RegArgs, CC, NumBytes, isVarArg))
2461 if (!GV || Subtarget->genLongCalls())
UseReg =
true;
2466 CalleeReg = getLibcallReg(IntrMemName);
2468 CalleeReg = getRegForValue(Callee);
2475 unsigned CallOpc = ARMSelectCallOp(
UseReg);
2476 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2477 MIMD,
TII.get(CallOpc));
2486 }
else if (!IntrMemName)
2501 if (!FinishCall(RetVT, UsedRegs,
I, CC, NumBytes, isVarArg))
2505 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs,
TRI);
2511bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
2515bool ARMFastISel::ARMTryEmitSmallMemCpy(
Address Dest,
Address Src, uint64_t Len,
2516 MaybeAlign Alignment) {
2518 if (!ARMIsMemCpySmall(Len))
2523 if (!Alignment || *Alignment >= 4) {
2529 assert(Len == 1 &&
"Expected a length of 1!");
2533 assert(Alignment &&
"Alignment is set in this branch");
2535 if (Len >= 2 && *Alignment == 2)
2544 RV = ARMEmitLoad(VT, ResultReg, Src);
2545 assert(RV &&
"Should be able to handle this load.");
2546 RV = ARMEmitStore(VT, ResultReg, Dest);
2547 assert(RV &&
"Should be able to handle this store.");
2552 Dest.setOffset(Dest.getOffset() +
Size);
2553 Src.setOffset(Src.getOffset() +
Size);
2559bool ARMFastISel::SelectIntrinsicCall(
const IntrinsicInst &
I) {
2561 switch (
I.getIntrinsicID()) {
2562 default:
return false;
2563 case Intrinsic::frameaddress: {
2564 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
2567 unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
2568 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
2569 : &ARM::GPRRegClass;
2571 const ARMBaseRegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2583 DestReg = createResultReg(RC);
2584 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2585 TII.get(LdrOpc), DestReg)
2589 updateValueMap(&
I, SrcReg);
2592 case Intrinsic::memcpy:
2593 case Intrinsic::memmove: {
2601 bool isMemCpy = (
I.getIntrinsicID() == Intrinsic::memcpy);
2606 if (ARMIsMemCpySmall(Len)) {
2608 if (!ARMComputeAddress(MTI.
getRawDest(), Dest) ||
2611 MaybeAlign Alignment;
2615 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment))
2627 return SelectCall(&
I, IntrMemName);
2629 case Intrinsic::memset: {
2641 return SelectCall(&
I,
"memset");
2643 case Intrinsic::trap: {
2644 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2645 TII.get(Subtarget->isThumb() ? ARM::tTRAP : ARM::TRAP));
2651bool ARMFastISel::SelectTrunc(
const Instruction *
I) {
2660 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
2662 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
2666 if (!SrcReg)
return false;
2670 updateValueMap(
I, SrcReg);
2676 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
2678 if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1)
2683 static const uint8_t isSingleInstrTbl[3][2][2][2] = {
2687 { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } },
2688 { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } },
2689 { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }
2696 static const TargetRegisterClass *RCTbl[2][2] = {
2698 { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass },
2699 { &ARM::tGPRRegClass, &ARM::rGPRRegClass }
2703 static const struct InstructionTable {
2708 }
IT[2][2][3][2] = {
2750 assert((SrcBits < DestBits) &&
"can only extend to larger types");
2751 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) &&
2752 "other sizes unimplemented");
2753 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) &&
2754 "other sizes unimplemented");
2756 bool hasV6Ops = Subtarget->hasV6Ops();
2757 unsigned Bitness = SrcBits / 8;
2758 assert((Bitness < 3) &&
"sanity-check table bounds");
2760 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt];
2761 const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr];
2762 const InstructionTable *ITP = &
IT[isSingleInstr][isThumb2][Bitness][isZExt];
2763 unsigned Opc = ITP->Opc;
2764 assert(ARM::KILL !=
Opc &&
"Invalid table entry");
2765 unsigned hasS = ITP->hasS;
2768 "only MOVsi has shift operand addressing mode");
2769 unsigned Imm = ITP->Imm;
2772 bool setsCPSR = &ARM::tGPRRegClass == RC;
2773 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi;
2788 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2;
2789 for (
unsigned Instr = 0;
Instr != NumInstrsEmitted; ++
Instr) {
2790 ResultReg = createResultReg(RC);
2791 bool isLsl = (0 ==
Instr) && !isSingleInstr;
2792 unsigned Opcode = isLsl ? LSLOpc :
Opc;
2795 bool isKill = 1 ==
Instr;
2796 MachineInstrBuilder MIB =
BuildMI(
2797 *FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opcode), ResultReg);
2813bool ARMFastISel::SelectIntExt(
const Instruction *
I) {
2816 Type *DestTy =
I->getType();
2817 Value *Src =
I->getOperand(0);
2818 Type *SrcTy = Src->getType();
2821 Register SrcReg = getRegForValue(Src);
2822 if (!SrcReg)
return false;
2824 EVT SrcEVT, DestEVT;
2827 if (!SrcEVT.
isSimple())
return false;
2828 if (!DestEVT.
isSimple())
return false;
2832 Register ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2835 updateValueMap(
I, ResultReg);
2839bool ARMFastISel::SelectShift(
const Instruction *
I,
2848 if (DestVT != MVT::i32)
2851 unsigned Opc = ARM::MOVsr;
2853 Value *Src2Value =
I->getOperand(1);
2855 ShiftImm = CI->getZExtValue();
2859 if (ShiftImm == 0 || ShiftImm >=32)
2865 Value *Src1Value =
I->getOperand(0);
2866 Register Reg1 = getRegForValue(Src1Value);
2871 if (
Opc == ARM::MOVsr) {
2872 Reg2 = getRegForValue(Src2Value);
2877 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
2881 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2885 if (
Opc == ARM::MOVsi)
2887 else if (
Opc == ARM::MOVsr) {
2892 AddOptionalDefs(MIB);
2893 updateValueMap(
I, ResultReg);
2898bool ARMFastISel::fastSelectInstruction(
const Instruction *
I) {
2899 switch (
I->getOpcode()) {
2900 case Instruction::Load:
2901 return SelectLoad(
I);
2902 case Instruction::Store:
2903 return SelectStore(
I);
2904 case Instruction::Br:
2905 return SelectBranch(
I);
2906 case Instruction::IndirectBr:
2907 return SelectIndirectBr(
I);
2908 case Instruction::ICmp:
2909 case Instruction::FCmp:
2910 return SelectCmp(
I);
2911 case Instruction::FPExt:
2912 return SelectFPExt(
I);
2913 case Instruction::FPTrunc:
2914 return SelectFPTrunc(
I);
2915 case Instruction::SIToFP:
2916 return SelectIToFP(
I,
true);
2917 case Instruction::UIToFP:
2918 return SelectIToFP(
I,
false);
2919 case Instruction::FPToSI:
2920 return SelectFPToI(
I,
true);
2921 case Instruction::FPToUI:
2922 return SelectFPToI(
I,
false);
2923 case Instruction::Add:
2925 case Instruction::Or:
2926 return SelectBinaryIntOp(
I,
ISD::OR);
2927 case Instruction::Sub:
2929 case Instruction::FAdd:
2931 case Instruction::FSub:
2933 case Instruction::FMul:
2935 case Instruction::SDiv:
2936 return SelectDiv(
I,
true);
2937 case Instruction::UDiv:
2938 return SelectDiv(
I,
false);
2939 case Instruction::SRem:
2940 return SelectRem(
I,
true);
2941 case Instruction::URem:
2942 return SelectRem(
I,
false);
2943 case Instruction::Call:
2945 return SelectIntrinsicCall(*
II);
2946 return SelectCall(
I);
2947 case Instruction::Select:
2948 return SelectSelect(
I);
2949 case Instruction::Ret:
2950 return SelectRet(
I);
2951 case Instruction::Trunc:
2952 return SelectTrunc(
I);
2953 case Instruction::ZExt:
2954 case Instruction::SExt:
2955 return SelectIntExt(
I);
2956 case Instruction::Shl:
2958 case Instruction::LShr:
2960 case Instruction::AShr:
2977 { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 },
2978 { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 },
2979 { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 },
2980 { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 },
2981 { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 }
2988bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *
MI,
unsigned OpNo,
2989 const LoadInst *LI) {
2992 if (!isLoadTypeLegal(LI->
getType(), VT))
2999 if (
MI->getNumOperands() < 3 || !
MI->getOperand(2).isImm())
3001 const uint64_t
Imm =
MI->getOperand(2).getImm();
3006 if (FLE.Opc[isThumb2] ==
MI->getOpcode() &&
3007 (uint64_t)FLE.ExpectedImm ==
Imm &&
3010 isZExt = FLE.isZExt;
3013 if (!Found)
return false;
3017 if (!ARMComputeAddress(LI->
getOperand(0), Addr))
return false;
3019 Register ResultReg =
MI->getOperand(0).getReg();
3020 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->
getAlign(), isZExt,
false))
3023 removeDeadCode(
I, std::next(
I));
3027Register ARMFastISel::ARMLowerPICELF(
const GlobalValue *GV, MVT VT) {
3029 LLVMContext *
Context = &MF->getFunction().getContext();
3031 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
3038 MF->getDataLayout().getPrefTypeAlign(PointerType::get(*
Context, 0));
3039 unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign);
3040 MachineMemOperand *CPMMO =
3044 Register TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass);
3045 unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp;
3046 MachineInstrBuilder MIB =
3047 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc), TempReg)
3050 if (
Opc == ARM::LDRcp)
3056 Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR
3059 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc), DestReg)
3061 .
addImm(ARMPCLabelIndex);
3063 if (!Subtarget->isThumb())
3066 if (UseGOT_PREL && Subtarget->isThumb()) {
3068 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3069 TII.get(ARM::t2LDRi12), NewDestReg)
3072 DestReg = NewDestReg;
3073 AddOptionalDefs(MIB);
3078bool ARMFastISel::fastLowerArguments() {
3079 if (!FuncInfo.CanLowerReturn)
3086 CallingConv::ID CC =
F->getCallingConv();
3090 case CallingConv::Fast:
3091 case CallingConv::C:
3092 case CallingConv::ARM_AAPCS_VFP:
3093 case CallingConv::ARM_AAPCS:
3094 case CallingConv::ARM_APCS:
3095 case CallingConv::Swift:
3096 case CallingConv::SwiftTail:
3102 for (
const Argument &Arg :
F->args()) {
3103 if (Arg.getArgNo() >= 4)
3106 if (Arg.hasAttribute(Attribute::InReg) ||
3107 Arg.hasAttribute(Attribute::StructRet) ||
3108 Arg.hasAttribute(Attribute::SwiftSelf) ||
3109 Arg.hasAttribute(Attribute::SwiftError) ||
3110 Arg.hasAttribute(Attribute::ByVal))
3113 Type *ArgTy = Arg.getType();
3118 if (!ArgVT.
isSimple())
return false;
3130 ARM::R0, ARM::R1, ARM::R2, ARM::R3
3133 const TargetRegisterClass *RC = &ARM::rGPRRegClass;
3134 for (
const Argument &Arg :
F->args()) {
3135 unsigned ArgNo = Arg.getArgNo();
3137 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3141 Register ResultReg = createResultReg(RC);
3142 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3143 TII.get(TargetOpcode::COPY),
3145 updateValueMap(&Arg, ResultReg);
3156 return new ARMFastISel(funcInfo, libInfo);
unsigned const MachineRegisterInfo * MRI
static const MCPhysReg GPRArgRegs[]
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred)
static const struct FoldableLoadExtendsStruct FoldableLoadExtends[]
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
static bool isSigned(unsigned int Opcode)
This file defines the FastISel class.
static Register UseReg(const MachineOperand &MO)
const HexagonInstrInfo * TII
static MaybeAlign getAlign(Value *Ptr)
Module.h This file contains the declarations for the Module class.
Machine Check Debug Module
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const GCNTargetMachine & getTM(const GCNSubtarget *STI)
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static const unsigned FramePtr
uint64_t getZExtValue() const
Get zero extended value.
int64_t getSExtValue() const
Get sign extended value.
Register getFrameRegister(const MachineFunction &MF) const override
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
bool isThumb2Function() const
unsigned createPICLabelUId()
bool useFastISel() const
True if fast-isel is used.
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize=false) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively.
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const override
getRegClassFor - Return the register class that should be used for the specified value type.
bool hasStandaloneRem(EVT VT) const override
Return true if the target can handle a standalone remainder operation.
PointerType * getType() const
Overload to return most specific pointer type.
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
unsigned getValNo() const
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Value * getCalledOperand() const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
unsigned arg_size() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
const APFloat & getValueAPF() const
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
PointerType * getType() const
Global values are always pointers.
@ ExternalLinkage
Externally visible function.
Align getAlign() const
Return the alignment of the access that is being performed.
ArrayRef< MCOperandInfo > operands() const
bool isVector() const
Return true if this is a vector value type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
MachineInstrBundleIterator< MachineInstr > iterator
void setFrameAddressIsTaken(bool T)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Value * getLength() const
Value * getRawDest() const
MaybeAlign getDestAlign() const
unsigned getDestAddressSpace() const
Value * getRawSource() const
Return the arguments to the instruction.
unsigned getSourceAddressSpace() const
MaybeAlign getSourceAlign() const
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr unsigned id() const
void reserve(size_type N)
void push_back(const T &Elt)
TypeSize getElementOffset(unsigned Idx) const
Provides information about what library functions are available for the current target.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
bool isPositionIndependent() const
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isStructTy() const
True if this is an instance of StructType.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isVoidTy() const
Return true if this is 'void'.
const Use * const_op_iterator
Value * getOperand(unsigned i) const
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
StructType * getStructTypeOrNull() const
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ GOT_PREL
Thread Local Storage (General Dynamic Mode)
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ C
The default llvm calling convention, compatible with C.
@ ADD
Simple integer binary arithmetic operators.
@ FADD
Simple binary floating point operators.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ User
could "use" a pointer
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
bool RetFastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
FunctionAddr VTableAddr Value
LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
bool CC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool RetCC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool RetCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool RetCC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool CC_ARM_APCS_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool FastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool CC_ARM_Win32_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
static MachineOperand t1CondCodeOp(bool isDead=false)
Get the operand corresponding to the conditional code result for Thumb1.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_begin(const User *GEP)
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
unsigned gettBLXrOpcode(const MachineFunction &MF)
bool CC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
unsigned getBLXOpcode(const MachineFunction &MF)
bool CC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.