49#define GET_GICOMBINER_DEPS
50#include "AArch64GenPostLegalizeGILowering.inc"
51#undef GET_GICOMBINER_DEPS
53#define DEBUG_TYPE "aarch64-postlegalizer-lowering"
61#define GET_GICOMBINER_TYPES
62#include "AArch64GenPostLegalizeGILowering.inc"
63#undef GET_GICOMBINER_TYPES
68struct ShuffleVectorPseudo {
73 std::initializer_list<SrcOp> SrcOps)
74 :
Opc(
Opc), Dst(Dst), SrcOps(SrcOps){};
75 ShuffleVectorPseudo() =
default;
80std::optional<std::pair<bool, uint64_t>> getExtMask(
ArrayRef<int> M,
83 auto FirstRealElt =
find_if(M, [](
int Elt) {
return Elt >= 0; });
84 if (FirstRealElt == M.end())
89 APInt ExpectedElt =
APInt(MaskBits, *FirstRealElt + 1,
false,
true);
95 [&ExpectedElt](
int Elt) { return Elt != ExpectedElt++ && Elt >= 0; }))
105 bool ReverseExt =
false;
117 return std::make_pair(ReverseExt, Imm);
129 int NumInputElements) {
130 if (M.size() !=
static_cast<size_t>(NumInputElements))
132 int NumLHSMatch = 0, NumRHSMatch = 0;
133 int LastLHSMismatch = -1, LastRHSMismatch = -1;
134 for (
int Idx = 0; Idx < NumInputElements; ++Idx) {
140 M[Idx] == Idx ? ++NumLHSMatch : LastLHSMismatch = Idx;
141 M[Idx] == Idx + NumInputElements ? ++NumRHSMatch : LastRHSMismatch = Idx;
143 const int NumNeededToMatch = NumInputElements - 1;
144 if (NumLHSMatch == NumNeededToMatch)
145 return std::make_pair(
true, LastLHSMismatch);
146 if (NumRHSMatch == NumNeededToMatch)
147 return std::make_pair(
false, LastRHSMismatch);
154 ShuffleVectorPseudo &MatchInfo) {
155 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
159 LLT Ty =
MRI.getType(Dst);
160 unsigned EltSize = Ty.getScalarSizeInBits();
166 unsigned NumElts = Ty.getNumElements();
169 for (
unsigned LaneSize : {64U, 32U, 16U}) {
170 if (
isREVMask(ShuffleMask, EltSize, NumElts, LaneSize)) {
173 Opcode = AArch64::G_REV64;
174 else if (LaneSize == 32U)
175 Opcode = AArch64::G_REV32;
177 Opcode = AArch64::G_REV16;
179 MatchInfo = ShuffleVectorPseudo(Opcode, Dst, {Src});
190 ShuffleVectorPseudo &MatchInfo) {
191 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
192 unsigned WhichResult;
195 unsigned NumElts =
MRI.getType(Dst).getNumElements();
196 if (!
isTRNMask(ShuffleMask, NumElts, WhichResult))
198 unsigned Opc = (WhichResult == 0) ? AArch64::G_TRN1 :
AArch64::G_TRN2;
201 MatchInfo = ShuffleVectorPseudo(
Opc, Dst, {V1, V2});
211 ShuffleVectorPseudo &MatchInfo) {
212 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
213 unsigned WhichResult;
216 unsigned NumElts =
MRI.getType(Dst).getNumElements();
217 if (!
isUZPMask(ShuffleMask, NumElts, WhichResult))
219 unsigned Opc = (WhichResult == 0) ? AArch64::G_UZP1 :
AArch64::G_UZP2;
222 MatchInfo = ShuffleVectorPseudo(
Opc, Dst, {V1, V2});
227 ShuffleVectorPseudo &MatchInfo) {
228 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
229 unsigned WhichResult;
232 unsigned NumElts =
MRI.getType(Dst).getNumElements();
233 if (!
isZIPMask(ShuffleMask, NumElts, WhichResult))
235 unsigned Opc = (WhichResult == 0) ? AArch64::G_ZIP1 :
AArch64::G_ZIP2;
238 MatchInfo = ShuffleVectorPseudo(
Opc, Dst, {V1, V2});
245 ShuffleVectorPseudo &MatchInfo) {
264 auto *InsMI =
getOpcodeDef(TargetOpcode::G_INSERT_VECTOR_ELT,
265 MI.getOperand(1).getReg(),
MRI);
269 if (!
getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, InsMI->getOperand(1).getReg(),
277 MatchInfo = ShuffleVectorPseudo(AArch64::G_DUP,
MI.getOperand(0).getReg(),
278 {InsMI->getOperand(2).getReg()});
285 ShuffleVectorPseudo &MatchInfo) {
286 assert(Lane >= 0 &&
"Expected positive lane?");
287 int NumElements =
MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
292 MI.getOperand(Lane < NumElements ? 1 : 2).getReg(),
MRI);
294 if (NumElements <= Lane)
299 Register Reg = BuildVecMI->getOperand(Lane + 1).getReg();
301 ShuffleVectorPseudo(AArch64::G_DUP,
MI.getOperand(0).getReg(), {Reg});
306 ShuffleVectorPseudo &MatchInfo) {
307 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
311 int Lane = *MaybeLane;
315 if (matchDupFromInsertVectorElt(Lane,
MI,
MRI, MatchInfo))
317 if (matchDupFromBuildVector(Lane,
MI,
MRI, MatchInfo))
325 unsigned NumElts = Ty.getNumElements();
334 unsigned ExpectedElt = M[0];
335 for (
unsigned I = 1;
I < NumElts; ++
I) {
339 if (ExpectedElt == NumElts)
344 if (ExpectedElt !=
static_cast<unsigned>(M[
I]))
352 ShuffleVectorPseudo &MatchInfo) {
353 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
355 LLT DstTy =
MRI.getType(Dst);
358 auto Mask =
MI.getOperand(3).getShuffleMask();
361 uint64_t ExtFactor =
MRI.getType(V1).getScalarSizeInBits() / 8;
365 !isSingletonExtMask(Mask, DstTy))
369 MatchInfo = ShuffleVectorPseudo(AArch64::G_EXT, Dst, {V1, V1,
Imm});
373 std::tie(ReverseExt, Imm) = *ExtInfo;
377 MatchInfo = ShuffleVectorPseudo(AArch64::G_EXT, Dst, {V1, V2,
Imm});
384 ShuffleVectorPseudo &MatchInfo) {
386 MIRBuilder.buildInstr(MatchInfo.Opc, {MatchInfo.Dst}, MatchInfo.SrcOps);
387 MI.eraseFromParent();
395 if (MatchInfo.SrcOps[2].getImm() == 0)
396 MIRBuilder.buildCopy(MatchInfo.Dst, MatchInfo.SrcOps[0]);
400 MIRBuilder.buildConstant(
LLT::scalar(32), MatchInfo.SrcOps[2].getImm());
401 MIRBuilder.buildInstr(MatchInfo.Opc, {MatchInfo.Dst},
402 {MatchInfo.SrcOps[0], MatchInfo.SrcOps[1], Cst});
404 MI.eraseFromParent();
410 LLT DstTy =
MRI.getType(Dst);
412 "Expected 128bit vector in applyFullRev");
414 auto Cst = MIRBuilder.buildConstant(
LLT::scalar(32), 8);
415 auto Rev = MIRBuilder.buildInstr(AArch64::G_REV64, {DstTy}, {Src});
416 MIRBuilder.buildInstr(AArch64::G_EXT, {Dst}, {Rev, Rev, Cst});
417 MI.eraseFromParent();
421 assert(
MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT);
431 Builder.setInstrAndDebugLoc(Insert);
449 auto StackTemp = Builder.buildFrameIndex(FramePtrTy, FrameIdx);
451 Builder.buildStore(
Insert.getOperand(1), StackTemp, PtrInfo,
Align(8));
456 "Expected a power-2 vector size");
459 auto EltSize = Builder.buildConstant(IdxTy, EltTy.
getSizeInBytes());
462 Builder.buildPtrAdd(
MRI.getType(StackTemp.getReg(0)), StackTemp,
Mul)
466 Builder.buildStore(
Insert.getElementReg(), EltPtr, PtrInfo,
Align(1));
468 Builder.buildLoad(
Insert.getReg(0), StackTemp, PtrInfo,
Align(8));
484 std::tuple<Register, int, Register, int> &MatchInfo) {
485 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
488 int NumElts =
MRI.getType(Dst).getNumElements();
489 auto DstIsLeftAndDstLane =
isINSMask(ShuffleMask, NumElts);
490 if (!DstIsLeftAndDstLane)
494 std::tie(DstIsLeft, DstLane) = *DstIsLeftAndDstLane;
500 int SrcLane = ShuffleMask[DstLane];
501 if (SrcLane >= NumElts) {
506 MatchInfo = std::make_tuple(DstVec, DstLane, SrcVec, SrcLane);
512 std::tuple<Register, int, Register, int> &MatchInfo) {
513 Builder.setInstrAndDebugLoc(
MI);
515 auto ScalarTy =
MRI.getType(Dst).getElementType();
517 int DstLane, SrcLane;
518 std::tie(DstVec, DstLane, SrcVec, SrcLane) = MatchInfo;
519 auto SrcCst = Builder.buildConstant(
LLT::scalar(64), SrcLane);
520 auto Extract = Builder.buildExtractVectorElement(ScalarTy, SrcVec, SrcCst);
521 auto DstCst = Builder.buildConstant(
LLT::scalar(64), DstLane);
522 Builder.buildInsertVectorElement(Dst, DstVec, Extract, DstCst);
523 MI.eraseFromParent();
531 assert(Ty.isVector() &&
"vector shift count is not a vector type");
537 int64_t ElementBits = Ty.getScalarSizeInBits();
538 return Cnt >= 1 && Cnt <= ElementBits;
544 assert(
MI.getOpcode() == TargetOpcode::G_ASHR ||
545 MI.getOpcode() == TargetOpcode::G_LSHR);
546 LLT Ty =
MRI.getType(
MI.getOperand(1).getReg());
554 unsigned Opc =
MI.getOpcode();
555 assert(
Opc == TargetOpcode::G_ASHR ||
Opc == TargetOpcode::G_LSHR);
557 Opc == TargetOpcode::G_ASHR ? AArch64::G_VASHR : AArch64::G_VLSHR;
559 auto ImmDef = MIB.buildConstant(
LLT::scalar(32), Imm);
560 MIB.buildInstr(NewOpc, {
MI.getOperand(0)}, {
MI.getOperand(1), ImmDef});
561 MI.eraseFromParent();
571std::optional<std::pair<uint64_t, CmpInst::Predicate>>
574 const auto &Ty =
MRI.getType(
RHS);
577 unsigned Size = Ty.getSizeInBits();
578 assert((
Size == 32 ||
Size == 64) &&
"Expected 32 or 64 bit compare only?");
585 uint64_t OriginalC = ValAndVReg->Value.getZExtValue();
604 (
Size == 32 &&
static_cast<int32_t
>(
C) == INT32_MIN))
617 assert(
C != 0 &&
"C should not be zero here!");
629 if ((
Size == 32 &&
static_cast<int32_t
>(
C) == INT32_MAX) ||
643 if ((
Size == 32 &&
static_cast<uint32_t>(
C) == UINT32_MAX) ||
664 if (NumberOfInstrToLoadImm(OriginalC) > NumberOfInstrToLoadImm(
C))
678bool matchAdjustICmpImmAndPred(
680 std::pair<uint64_t, CmpInst::Predicate> &MatchInfo) {
681 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
684 if (
auto MaybeNewImmAndPred = tryAdjustICmpImmAndPred(
RHS, Pred,
MRI)) {
685 MatchInfo = *MaybeNewImmAndPred;
691void applyAdjustICmpImmAndPred(
692 MachineInstr &
MI, std::pair<uint64_t, CmpInst::Predicate> &MatchInfo,
700 RHS.setReg(Cst->getOperand(0).getReg());
701 MI.getOperand(1).setPredicate(MatchInfo.second);
706 std::pair<unsigned, int> &MatchInfo) {
707 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
709 const LLT SrcTy =
MRI.getType(Src1Reg);
710 const LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
717 if (*LaneIdx >= SrcTy.getNumElements())
727 switch (SrcTy.getNumElements()) {
729 if (ScalarSize == 64)
730 Opc = AArch64::G_DUPLANE64;
731 else if (ScalarSize == 32)
732 Opc = AArch64::G_DUPLANE32;
735 if (ScalarSize == 32)
736 Opc = AArch64::G_DUPLANE32;
737 else if (ScalarSize == 16)
738 Opc = AArch64::G_DUPLANE16;
742 Opc = AArch64::G_DUPLANE8;
743 else if (ScalarSize == 16)
744 Opc = AArch64::G_DUPLANE16;
748 Opc = AArch64::G_DUPLANE8;
756 MatchInfo.first =
Opc;
757 MatchInfo.second = *LaneIdx;
763 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
765 const LLT SrcTy =
MRI.getType(Src1Reg);
767 B.setInstrAndDebugLoc(
MI);
768 auto Lane =
B.buildConstant(
LLT::scalar(64), MatchInfo.second);
773 if (SrcTy.getSizeInBits() == 64) {
774 auto Undef =
B.buildUndef(SrcTy);
775 DupSrc =
B.buildConcatVectors(SrcTy.multiplyElements(2),
776 {Src1Reg, Undef.getReg(0)})
779 B.buildInstr(MatchInfo.first, {MI.getOperand(0).getReg()}, {DupSrc, Lane});
780 MI.eraseFromParent();
785 Register Src1Reg = Unmerge.getReg(Unmerge.getNumOperands() - 1);
786 const LLT SrcTy =
MRI.getType(Src1Reg);
787 if (SrcTy.getSizeInBits() != 128 && SrcTy.getSizeInBits() != 64)
789 return SrcTy.isVector() && !SrcTy.isScalable() &&
790 Unmerge.getNumOperands() == (
unsigned)SrcTy.getNumElements() + 1;
796 Register Src1Reg = Unmerge.getReg(Unmerge.getNumOperands() - 1);
797 const LLT SrcTy =
MRI.getType(Src1Reg);
798 assert((SrcTy.isVector() && !SrcTy.isScalable()) &&
799 "Expected a fixed length vector");
801 for (
int I = 0;
I < SrcTy.getNumElements(); ++
I)
802 B.buildExtractVectorElementConstant(Unmerge.getReg(
I), Src1Reg,
I);
803 MI.eraseFromParent();
807 assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
820 B.setInstrAndDebugLoc(
MI);
821 B.buildInstr(AArch64::G_DUP, {
MI.getOperand(0).getReg()},
822 {
MI.getOperand(1).getReg()});
823 MI.eraseFromParent();
830 if (!
MRI.hasOneNonDBGUse(CmpOp))
835 if (
MI.getOpcode() == TargetOpcode::G_SEXT_INREG)
837 if (
MI.getOpcode() != TargetOpcode::G_AND)
844 return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF);
848 if (IsSupportedExtend(*Def))
851 unsigned Opc =
Def->getOpcode();
852 if (
Opc != TargetOpcode::G_SHL &&
Opc != TargetOpcode::G_ASHR &&
853 Opc != TargetOpcode::G_LSHR)
860 uint64_t ShiftAmt = MaybeShiftAmt->Value.getZExtValue();
867 if (IsSupportedExtend(*ShiftLHS))
868 return (ShiftAmt <= 4) ? 2 : 1;
870 LLT Ty =
MRI.getType(
Def->getOperand(0).getReg());
873 unsigned ShiftSize = Ty.getSizeInBits();
874 if ((ShiftSize == 32 && ShiftAmt <= 31) ||
875 (ShiftSize == 64 && ShiftAmt <= 63))
883 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
924 MI.getOperand(2).setReg(
RHS);
925 MI.getOperand(3).setReg(
LHS);
937 assert(DstTy ==
MRI.getType(
RHS) &&
"Src and Dst types must match!");
972 assert(
MI.getOpcode() == TargetOpcode::G_FCMP);
976 LLT DstTy =
MRI.getType(Dst);
980 unsigned EltSize =
MRI.getType(
LHS).getScalarSizeInBits();
981 if (EltSize == 16 && !
ST.hasFullFP16())
983 if (EltSize != 16 && EltSize != 32 && EltSize != 64)
992 assert(
MI.getOpcode() == TargetOpcode::G_FCMP);
1002 LLT DstTy =
MRI.getType(Dst);
1004 bool Invert =
false;
1023 ST.getTargetLowering()->getTargetMachine().Options.NoNaNsFPMath;
1030 auto Cmp2 = getVectorFCMP(CC2,
LHS,
RHS, NoNans,
MRI);
1031 auto Cmp2Dst = Cmp2(MIB);
1032 auto Cmp1Dst =
Cmp(MIB);
1037 MRI.replaceRegWith(Dst, CmpRes);
1038 MI.eraseFromParent();
1046 for (
unsigned I = 0;
I < GBuildVec->getNumSources(); ++
I) {
1050 if (!ConstVal.has_value())
1060 LLT DstTy =
MRI.getType(GBuildVec->getReg(0));
1061 Register DstReg =
B.buildUndef(DstTy).getReg(0);
1063 for (
unsigned I = 0;
I < GBuildVec->getNumSources(); ++
I) {
1064 Register SrcReg = GBuildVec->getSourceReg(
I);
1069 B.buildInsertVectorElement(DstTy, DstReg, SrcReg, IdxReg).getReg(0);
1071 B.buildCopy(GBuildVec->getReg(0), DstReg);
1072 GBuildVec->eraseFromParent();
1077 assert(
MI.getOpcode() == TargetOpcode::G_STORE);
1079 if (
MRI.getType(DstReg).isVector())
1085 return MRI.getType(SrcReg).getSizeInBits() <= 64;
1091 assert(
MI.getOpcode() == TargetOpcode::G_STORE);
1093 MI.getOperand(0).setReg(SrcReg);
1101 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1103 LLT DstTy =
MRI.getType(DstReg);
1109 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1110 B.setInstrAndDebugLoc(
MI);
1112 Helper.lower(
MI, 0,
LLT());
1120 if (Unmerge.getNumDefs() != 2)
1122 if (!
MRI.use_nodbg_empty(Unmerge.getReg(1)))
1125 LLT DstTy =
MRI.getType(Unmerge.getReg(0));
1137 if (!LowestVal || LowestVal->Value.getZExtValue() != DstTy.
getSizeInBytes())
1143 MatchInfo = ExtSrc1;
1153 MI.getOperand(0).setReg(
MI.getOperand(1).getReg());
1154 MI.getOperand(1).setReg(Dst1);
1155 MI.getOperand(2).setReg(SrcReg);
1166 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
1172 assert(
MI.getOpcode() == TargetOpcode::G_MUL &&
1173 "Expected a G_MUL instruction");
1176 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
1179 Helper.fewerElementsVector(
1184class AArch64PostLegalizerLoweringImpl :
public Combiner {
1186 const CombinerHelper Helper;
1187 const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig;
1188 const AArch64Subtarget &STI;
1191 AArch64PostLegalizerLoweringImpl(
1192 MachineFunction &MF, CombinerInfo &CInfo,
const TargetPassConfig *TPC,
1193 GISelCSEInfo *CSEInfo,
1194 const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig,
1195 const AArch64Subtarget &STI);
1197 static const char *
getName() {
return "AArch6400PreLegalizerCombiner"; }
1199 bool tryCombineAll(MachineInstr &
I)
const override;
1202#define GET_GICOMBINER_CLASS_MEMBERS
1203#include "AArch64GenPostLegalizeGILowering.inc"
1204#undef GET_GICOMBINER_CLASS_MEMBERS
1207#define GET_GICOMBINER_IMPL
1208#include "AArch64GenPostLegalizeGILowering.inc"
1209#undef GET_GICOMBINER_IMPL
1211AArch64PostLegalizerLoweringImpl::AArch64PostLegalizerLoweringImpl(
1214 const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig,
1216 :
Combiner(MF, CInfo, TPC, nullptr, CSEInfo),
1217 Helper(Observer,
B,
true), RuleConfig(RuleConfig),
1220#include
"AArch64GenPostLegalizeGILowering.inc"
1229 AArch64PostLegalizerLowering();
1232 return "AArch64PostLegalizerLowering";
1239 AArch64PostLegalizerLoweringImplRuleConfig RuleConfig;
1243void AArch64PostLegalizerLowering::getAnalysisUsage(
AnalysisUsage &AU)
const {
1250AArch64PostLegalizerLowering::AArch64PostLegalizerLowering()
1252 if (!RuleConfig.parseCommandLineOption())
1256bool AArch64PostLegalizerLowering::runOnMachineFunction(
MachineFunction &MF) {
1260 auto *TPC = &getAnalysis<TargetPassConfig>();
1266 F.hasOptSize(),
F.hasMinSize());
1268 CInfo.MaxIterations = 1;
1271 CInfo.EnableFullDCE =
false;
1272 AArch64PostLegalizerLoweringImpl Impl(MF, CInfo, TPC,
nullptr,
1274 return Impl.combineMachineInstrs();
1277char AArch64PostLegalizerLowering::ID = 0;
1279 "Lower AArch64 MachineInstrs after legalization",
false,
1283 "Lower AArch64 MachineInstrs after legalization",
false,
1288 return new AArch64PostLegalizerLowering();
unsigned const MachineRegisterInfo * MRI
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt)
isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift righ...
static bool isINSMask(ArrayRef< int > M, int NumInputElements, bool &DstIsLeft, int &Anomaly)
static unsigned getCmpOperandFoldingProfit(SDValue Op)
Returns how profitable it is to fold a comparison's operand's shift and/or extension operations.
This file declares the targeting of the Machinelegalizer class for AArch64.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
#define GET_GICOMBINER_CONSTRUCTOR_INITS
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This contains common combine transformations that may be used in a combine pass,or by the target else...
Option class for Targets to specify which operations are combined how and when.
This contains the base class for all Combiners generated by TableGen.
This contains common code to allow clients to notify changes to machine instr.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Promote Memory to Register
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static StringRef getName(Value *V)
Target-Independent Code Generator Pass Configuration Options pass.
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
unsigned logBase2() const
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
FunctionPass class - This class is used to implement most global optimizations.
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
Helper class to build MachineInstr.
MachineInstrBuilder buildNot(const DstOp &Dst, const SrcOp &Src0)
Build and insert a bitwise not, NegOne = G_CONSTANT -1 Res = G_OR Op0, NegOne.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
void setInstrAndDebugLoc(MachineInstr &MI)
Set the insertion point to before MI, and set the debug loc to MI's loc.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_OR Op0, Op1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Wrapper class representing virtual and physical registers.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Target-Independent Code Generator Pass Configuration Options.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
std::optional< RegOrConstant > getAArch64VectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
constexpr bool isLegalArithImmed(const uint64_t C)
void changeVectorFCMPPredToAArch64CC(const CmpInst::Predicate P, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2, bool &Invert)
Find the AArch64 condition codes necessary to represent P for a vector floating point comparison.
bool isCMN(const MachineInstr *MaybeSub, const CmpInst::Predicate &Pred, const MachineRegisterInfo &MRI)
std::optional< int64_t > getAArch64VectorSplatScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI)
void expandMOVImm(uint64_t Imm, unsigned BitSize, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more real move-immediate instructions to...
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
ImplicitDefMatch m_GImplicitDef()
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
@ Undef
Value of the register doesn't matter.
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
LLVM_ABI MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isTRNMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResult)
Return true for trn1 or trn2 masks of the form: <0, 8, 2, 10, 4, 12, 6, 14> or <1,...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
LLVM_ABI MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
FunctionPass * createAArch64PostLegalizerLowering()
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
bool isUZPMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResultOut)
Return true for uzp1 or uzp2 masks of the form: <0, 2, 4, 6, 8, 10, 12, 14> or <1,...
bool isREVMask(ArrayRef< int > M, unsigned EltSize, unsigned NumElts, unsigned BlockSize)
isREVMask - Check if a vector shuffle corresponds to a REV instruction with the specified blocksize.
LLVM_ABI std::optional< ValueAndVReg > getAnyConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true, bool LookThroughAnyExt=false)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT or G_FCONST...
LLVM_ABI bool isBuildVectorAllOnes(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
bool isZIPMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResultOut)
Return true for zip1 or zip2 masks of the form: <0, 8, 1, 9, 2, 10, 3, 11> or <4, 12,...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
@ SinglePass
Enables Observer-based DCE and additional heuristics that retry combining defined and used instructio...
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.