LLVM 22.0.0git
ExpandVectorPredication.cpp
Go to the documentation of this file.
1//===----- CodeGen/ExpandVectorPredication.cpp - Expand VP intrinsics -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements IR expansion for vector predication intrinsics, allowing
10// targets to enable vector predication until just before codegen.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/Statistic.h"
19#include "llvm/IR/Constants.h"
20#include "llvm/IR/Function.h"
21#include "llvm/IR/IRBuilder.h"
24#include "llvm/IR/Intrinsics.h"
27#include "llvm/Support/Debug.h"
29#include <optional>
30
31using namespace llvm;
32
35
36// Keep this in sync with TargetTransformInfo::VPLegalization.
37#define VPINTERNAL_VPLEGAL_CASES \
38 VPINTERNAL_CASE(Legal) \
39 VPINTERNAL_CASE(Discard) \
40 VPINTERNAL_CASE(Convert)
41
42#define VPINTERNAL_CASE(X) "|" #X
43
44// Override options.
46 "expandvp-override-evl-transform", cl::init(""), cl::Hidden,
47 cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
48 ". If non-empty, ignore "
49 "TargetTransformInfo and "
50 "always use this transformation for the %evl parameter (Used in "
51 "testing)."));
52
54 "expandvp-override-mask-transform", cl::init(""), cl::Hidden,
55 cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
56 ". If non-empty, Ignore "
57 "TargetTransformInfo and "
58 "always use this transformation for the %mask parameter (Used in "
59 "testing)."));
60
61#undef VPINTERNAL_CASE
62#define VPINTERNAL_CASE(X) .Case(#X, VPLegalization::X)
63
64static VPTransform parseOverrideOption(const std::string &TextOpt) {
66}
67
68#undef VPINTERNAL_VPLEGAL_CASES
69
70// Whether any override options are set.
72 return !EVLTransformOverride.empty() || !MaskTransformOverride.empty();
73}
74
75#define DEBUG_TYPE "expandvp"
76
77STATISTIC(NumFoldedVL, "Number of folded vector length params");
78STATISTIC(NumLoweredVPOps, "Number of folded vector predication operations");
79
80///// Helpers {
81
82/// \returns Whether the vector mask \p MaskVal has all lane bits set.
83static bool isAllTrueMask(Value *MaskVal) {
84 if (Value *SplattedVal = getSplatValue(MaskVal))
85 if (auto *ConstValue = dyn_cast<Constant>(SplattedVal))
86 return ConstValue->isAllOnesValue();
87
88 return false;
89}
90
91/// \returns A non-excepting divisor constant for this type.
92static Constant *getSafeDivisor(Type *DivTy) {
93 assert(DivTy->isIntOrIntVectorTy() && "Unsupported divisor type");
94 return ConstantInt::get(DivTy, 1u, false);
95}
96
97/// Transfer operation properties from \p OldVPI to \p NewVal.
98static void transferDecorations(Value &NewVal, VPIntrinsic &VPI) {
99 auto *NewInst = dyn_cast<Instruction>(&NewVal);
100 if (!NewInst || !isa<FPMathOperator>(NewVal))
101 return;
102
103 auto *OldFMOp = dyn_cast<FPMathOperator>(&VPI);
104 if (!OldFMOp)
105 return;
106
107 NewInst->setFastMathFlags(OldFMOp->getFastMathFlags());
108}
109
110/// Transfer all properties from \p OldOp to \p NewOp and replace all uses.
111/// OldVP gets erased.
112static void replaceOperation(Value &NewOp, VPIntrinsic &OldOp) {
113 transferDecorations(NewOp, OldOp);
114
115 if (isa<Instruction>(NewOp) && !NewOp.hasName() && OldOp.hasName())
116 NewOp.takeName(&OldOp);
117
118 OldOp.replaceAllUsesWith(&NewOp);
119 OldOp.eraseFromParent();
120}
121
123 // The result of VP reductions depends on the mask and evl.
125 return false;
126 // Fallback to whether the intrinsic is speculatable.
127 if (auto IntrID = VPI.getFunctionalIntrinsicID())
128 return Intrinsic::getFnAttributes(VPI.getContext(), *IntrID)
129 .hasAttribute(Attribute::AttrKind::Speculatable);
130 if (auto Opc = VPI.getFunctionalOpcode())
132 return false;
133}
134
135//// } Helpers
136
137namespace {
138
139// Expansion pass state at function scope.
140struct CachingVPExpander {
141 const TargetTransformInfo &TTI;
142
143 /// \returns A bitmask that is true where the lane position is less-than \p
144 /// EVLParam
145 ///
146 /// \p Builder
147 /// Used for instruction creation.
148 /// \p VLParam
149 /// The explicit vector length parameter to test against the lane
150 /// positions.
151 /// \p ElemCount
152 /// Static (potentially scalable) number of vector elements.
153 Value *convertEVLToMask(IRBuilder<> &Builder, Value *EVLParam,
154 ElementCount ElemCount);
155
156 /// If needed, folds the EVL in the mask operand and discards the EVL
157 /// parameter. Returns true if the mask was actually folded.
158 bool foldEVLIntoMask(VPIntrinsic &VPI);
159
160 /// "Remove" the %evl parameter of \p PI by setting it to the static vector
161 /// length of the operation. Returns true if the %evl (if any) was effectively
162 /// changed.
163 bool discardEVLParameter(VPIntrinsic &PI);
164
165 /// Lower this VP binary operator to a unpredicated binary operator.
166 bool expandPredicationInBinaryOperator(IRBuilder<> &Builder, VPIntrinsic &PI);
167
168 /// Lower this VP int call to a unpredicated int call.
169 bool expandPredicationToIntCall(IRBuilder<> &Builder, VPIntrinsic &PI);
170
171 /// Lower this VP fp call to a unpredicated fp call.
172 bool expandPredicationToFPCall(IRBuilder<> &Builder, VPIntrinsic &PI,
173 unsigned UnpredicatedIntrinsicID);
174
175 /// Lower this VP reduction to a call to an unpredicated reduction intrinsic.
176 bool expandPredicationInReduction(IRBuilder<> &Builder,
177 VPReductionIntrinsic &PI);
178
179 /// Lower this VP cast operation to a non-VP intrinsic.
180 bool expandPredicationToCastIntrinsic(IRBuilder<> &Builder, VPIntrinsic &VPI);
181
182 /// Lower this VP memory operation to a non-VP intrinsic.
183 bool expandPredicationInMemoryIntrinsic(IRBuilder<> &Builder,
184 VPIntrinsic &VPI);
185
186 /// Lower this VP comparison to a call to an unpredicated comparison.
187 bool expandPredicationInComparison(IRBuilder<> &Builder, VPCmpIntrinsic &PI);
188
189 /// Query TTI and expand the vector predication in \p P accordingly.
190 bool expandPredication(VPIntrinsic &PI);
191
192 /// Determine how and whether the VPIntrinsic \p VPI shall be expanded. This
193 /// overrides TTI with the cl::opts listed at the top of this file.
194 VPLegalization getVPLegalizationStrategy(const VPIntrinsic &VPI) const;
195 bool UsingTTIOverrides;
196
197public:
198 CachingVPExpander(const TargetTransformInfo &TTI)
199 : TTI(TTI), UsingTTIOverrides(anyExpandVPOverridesSet()) {}
200
201 /// Expand llvm.vp.* intrinsics as requested by \p TTI.
202 /// Returns the details of the expansion.
203 VPExpansionDetails expandVectorPredication(VPIntrinsic &VPI);
204};
205
206//// CachingVPExpander {
207
208Value *CachingVPExpander::convertEVLToMask(IRBuilder<> &Builder,
209 Value *EVLParam,
210 ElementCount ElemCount) {
211 // TODO add caching
212 // Scalable vector %evl conversion.
213 if (ElemCount.isScalable()) {
214 Type *BoolVecTy = VectorType::get(Builder.getInt1Ty(), ElemCount);
215 // `get_active_lane_mask` performs an implicit less-than comparison.
216 Value *ConstZero = Builder.getInt32(0);
217 return Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
218 {BoolVecTy, EVLParam->getType()},
219 {ConstZero, EVLParam});
220 }
221
222 // Fixed vector %evl conversion.
223 Type *LaneTy = EVLParam->getType();
224 unsigned NumElems = ElemCount.getFixedValue();
225 Value *VLSplat = Builder.CreateVectorSplat(NumElems, EVLParam);
226 Value *IdxVec = Builder.CreateStepVector(VectorType::get(LaneTy, ElemCount));
227 return Builder.CreateICmp(CmpInst::ICMP_ULT, IdxVec, VLSplat);
228}
229
230bool CachingVPExpander::expandPredicationInBinaryOperator(IRBuilder<> &Builder,
231 VPIntrinsic &VPI) {
233 "Implicitly dropping %evl in non-speculatable operator!");
234
235 auto OC = static_cast<Instruction::BinaryOps>(*VPI.getFunctionalOpcode());
237
238 Value *Op0 = VPI.getOperand(0);
239 Value *Op1 = VPI.getOperand(1);
240 Value *Mask = VPI.getMaskParam();
241
242 // Blend in safe operands.
243 if (Mask && !isAllTrueMask(Mask)) {
244 switch (OC) {
245 default:
246 // Can safely ignore the predicate.
247 break;
248
249 // Division operators need a safe divisor on masked-off lanes (1).
250 case Instruction::UDiv:
251 case Instruction::SDiv:
252 case Instruction::URem:
253 case Instruction::SRem:
254 // 2nd operand must not be zero.
255 Value *SafeDivisor = getSafeDivisor(VPI.getType());
256 Op1 = Builder.CreateSelect(Mask, Op1, SafeDivisor);
257 }
258 }
259
260 Value *NewBinOp = Builder.CreateBinOp(OC, Op0, Op1);
261
262 replaceOperation(*NewBinOp, VPI);
263 return true;
264}
265
266bool CachingVPExpander::expandPredicationToIntCall(IRBuilder<> &Builder,
267 VPIntrinsic &VPI) {
268 std::optional<unsigned> FID = VPI.getFunctionalIntrinsicID();
269 if (!FID)
270 return false;
271 SmallVector<Value *, 2> Argument;
272 for (unsigned i = 0; i < VPI.getNumOperands() - 3; i++) {
273 Argument.push_back(VPI.getOperand(i));
274 }
275 Value *NewOp =
276 Builder.CreateIntrinsic(FID.value(), {VPI.getType()}, Argument);
277 replaceOperation(*NewOp, VPI);
278 return true;
279}
280
281bool CachingVPExpander::expandPredicationToFPCall(
282 IRBuilder<> &Builder, VPIntrinsic &VPI, unsigned UnpredicatedIntrinsicID) {
284 "Implicitly dropping %evl in non-speculatable operator!");
285
286 switch (UnpredicatedIntrinsicID) {
287 case Intrinsic::fabs:
288 case Intrinsic::sqrt:
289 case Intrinsic::maxnum:
290 case Intrinsic::minnum: {
291 SmallVector<Value *, 2> Argument;
292 for (unsigned i = 0; i < VPI.getNumOperands() - 3; i++) {
293 Argument.push_back(VPI.getOperand(i));
294 }
295 Value *NewOp = Builder.CreateIntrinsic(UnpredicatedIntrinsicID,
296 {VPI.getType()}, Argument);
297 replaceOperation(*NewOp, VPI);
298 return true;
299 }
300 case Intrinsic::fma:
301 case Intrinsic::fmuladd:
302 case Intrinsic::experimental_constrained_fma:
303 case Intrinsic::experimental_constrained_fmuladd: {
304 Value *Op0 = VPI.getOperand(0);
305 Value *Op1 = VPI.getOperand(1);
306 Value *Op2 = VPI.getOperand(2);
308 VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
309 Value *NewOp;
310 if (Intrinsic::isConstrainedFPIntrinsic(UnpredicatedIntrinsicID))
311 NewOp = Builder.CreateConstrainedFPCall(Fn, {Op0, Op1, Op2});
312 else
313 NewOp = Builder.CreateCall(Fn, {Op0, Op1, Op2});
314 replaceOperation(*NewOp, VPI);
315 return true;
316 }
317 }
318
319 return false;
320}
321
322static Value *getNeutralReductionElement(const VPReductionIntrinsic &VPI,
323 Type *EltTy) {
325 FastMathFlags FMF;
326 if (isa<FPMathOperator>(VPI))
327 FMF = VPI.getFastMathFlags();
328 return getReductionIdentity(RdxID, EltTy, FMF);
329}
330
331bool CachingVPExpander::expandPredicationInReduction(
332 IRBuilder<> &Builder, VPReductionIntrinsic &VPI) {
334 "Implicitly dropping %evl in non-speculatable operator!");
335
336 Value *Mask = VPI.getMaskParam();
337 Value *RedOp = VPI.getOperand(VPI.getVectorParamPos());
338
339 // Insert neutral element in masked-out positions
340 if (Mask && !isAllTrueMask(Mask)) {
341 auto *NeutralElt = getNeutralReductionElement(VPI, VPI.getType());
342 auto *NeutralVector = Builder.CreateVectorSplat(
343 cast<VectorType>(RedOp->getType())->getElementCount(), NeutralElt);
344 RedOp = Builder.CreateSelect(Mask, RedOp, NeutralVector);
345 }
346
349
350 switch (VPI.getIntrinsicID()) {
351 default:
352 llvm_unreachable("Impossible reduction kind");
353 case Intrinsic::vp_reduce_add:
354 case Intrinsic::vp_reduce_mul:
355 case Intrinsic::vp_reduce_and:
356 case Intrinsic::vp_reduce_or:
357 case Intrinsic::vp_reduce_xor: {
359 unsigned Opc = getArithmeticReductionInstruction(RedID);
361 Reduction = Builder.CreateUnaryIntrinsic(RedID, RedOp);
362 Reduction =
364 break;
365 }
366 case Intrinsic::vp_reduce_smax:
367 case Intrinsic::vp_reduce_smin:
368 case Intrinsic::vp_reduce_umax:
369 case Intrinsic::vp_reduce_umin:
370 case Intrinsic::vp_reduce_fmax:
371 case Intrinsic::vp_reduce_fmin:
372 case Intrinsic::vp_reduce_fmaximum:
373 case Intrinsic::vp_reduce_fminimum: {
376 Reduction = Builder.CreateUnaryIntrinsic(RedID, RedOp);
378 Reduction = Builder.CreateBinaryIntrinsic(ScalarID, Reduction, Start);
379 break;
380 }
381 case Intrinsic::vp_reduce_fadd:
382 Reduction = Builder.CreateFAddReduce(Start, RedOp);
383 break;
384 case Intrinsic::vp_reduce_fmul:
385 Reduction = Builder.CreateFMulReduce(Start, RedOp);
386 break;
387 }
388
390 return true;
391}
392
393bool CachingVPExpander::expandPredicationToCastIntrinsic(IRBuilder<> &Builder,
394 VPIntrinsic &VPI) {
395 Intrinsic::ID VPID = VPI.getIntrinsicID();
396 unsigned CastOpcode = VPIntrinsic::getFunctionalOpcodeForVP(VPID).value();
397 assert(Instruction::isCast(CastOpcode));
398 Value *CastOp = Builder.CreateCast(Instruction::CastOps(CastOpcode),
399 VPI.getOperand(0), VPI.getType());
400
401 replaceOperation(*CastOp, VPI);
402 return true;
403}
404
405bool CachingVPExpander::expandPredicationInMemoryIntrinsic(IRBuilder<> &Builder,
406 VPIntrinsic &VPI) {
408
409 const auto &DL = VPI.getDataLayout();
410
411 Value *MaskParam = VPI.getMaskParam();
412 Value *PtrParam = VPI.getMemoryPointerParam();
413 Value *DataParam = VPI.getMemoryDataParam();
414 bool IsUnmasked = isAllTrueMask(MaskParam);
415
416 MaybeAlign AlignOpt = VPI.getPointerAlignment();
417
418 Value *NewMemoryInst = nullptr;
419 switch (VPI.getIntrinsicID()) {
420 default:
421 llvm_unreachable("Not a VP memory intrinsic");
422 case Intrinsic::vp_store:
423 if (IsUnmasked) {
424 StoreInst *NewStore =
425 Builder.CreateStore(DataParam, PtrParam, /*IsVolatile*/ false);
426 if (AlignOpt.has_value())
427 NewStore->setAlignment(*AlignOpt);
428 NewMemoryInst = NewStore;
429 } else
430 NewMemoryInst = Builder.CreateMaskedStore(
431 DataParam, PtrParam, AlignOpt.valueOrOne(), MaskParam);
432
433 break;
434 case Intrinsic::vp_load:
435 if (IsUnmasked) {
436 LoadInst *NewLoad =
437 Builder.CreateLoad(VPI.getType(), PtrParam, /*IsVolatile*/ false);
438 if (AlignOpt.has_value())
439 NewLoad->setAlignment(*AlignOpt);
440 NewMemoryInst = NewLoad;
441 } else
442 NewMemoryInst = Builder.CreateMaskedLoad(
443 VPI.getType(), PtrParam, AlignOpt.valueOrOne(), MaskParam);
444
445 break;
446 case Intrinsic::vp_scatter: {
447 auto *ElementType =
448 cast<VectorType>(DataParam->getType())->getElementType();
449 NewMemoryInst = Builder.CreateMaskedScatter(
450 DataParam, PtrParam,
451 AlignOpt.value_or(DL.getPrefTypeAlign(ElementType)), MaskParam);
452 break;
453 }
454 case Intrinsic::vp_gather: {
455 auto *ElementType = cast<VectorType>(VPI.getType())->getElementType();
456 NewMemoryInst = Builder.CreateMaskedGather(
457 VPI.getType(), PtrParam,
458 AlignOpt.value_or(DL.getPrefTypeAlign(ElementType)), MaskParam,
459 nullptr);
460 break;
461 }
462 }
463
464 assert(NewMemoryInst);
465 replaceOperation(*NewMemoryInst, VPI);
466 return true;
467}
468
469bool CachingVPExpander::expandPredicationInComparison(IRBuilder<> &Builder,
470 VPCmpIntrinsic &VPI) {
472 "Implicitly dropping %evl in non-speculatable operator!");
473
474 assert(*VPI.getFunctionalOpcode() == Instruction::ICmp ||
475 *VPI.getFunctionalOpcode() == Instruction::FCmp);
476
477 Value *Op0 = VPI.getOperand(0);
478 Value *Op1 = VPI.getOperand(1);
479 auto Pred = VPI.getPredicate();
480
481 auto *NewCmp = Builder.CreateCmp(Pred, Op0, Op1);
482
483 replaceOperation(*NewCmp, VPI);
484 return true;
485}
486
487bool CachingVPExpander::discardEVLParameter(VPIntrinsic &VPI) {
488 LLVM_DEBUG(dbgs() << "Discard EVL parameter in " << VPI << "\n");
489
491 return false;
492
493 Value *EVLParam = VPI.getVectorLengthParam();
494 if (!EVLParam)
495 return false;
496
497 ElementCount StaticElemCount = VPI.getStaticVectorLength();
498 Value *MaxEVL = nullptr;
499 Type *Int32Ty = Type::getInt32Ty(VPI.getContext());
500 if (StaticElemCount.isScalable()) {
501 // TODO add caching
502 IRBuilder<> Builder(VPI.getParent(), VPI.getIterator());
503 Value *FactorConst = Builder.getInt32(StaticElemCount.getKnownMinValue());
504 Value *VScale = Builder.CreateVScale(Int32Ty, "vscale");
505 MaxEVL = Builder.CreateNUWMul(VScale, FactorConst, "scalable_size");
506 } else {
507 MaxEVL = ConstantInt::get(Int32Ty, StaticElemCount.getFixedValue(), false);
508 }
509 VPI.setVectorLengthParam(MaxEVL);
510 return true;
511}
512
513bool CachingVPExpander::foldEVLIntoMask(VPIntrinsic &VPI) {
514 LLVM_DEBUG(dbgs() << "Folding vlen for " << VPI << '\n');
515
516 IRBuilder<> Builder(&VPI);
517
518 // Ineffective %evl parameter and so nothing to do here.
520 return false;
521
522 // Only VP intrinsics can have an %evl parameter.
523 Value *OldMaskParam = VPI.getMaskParam();
524 if (!OldMaskParam) {
525 assert((VPI.getIntrinsicID() == Intrinsic::vp_merge ||
526 VPI.getIntrinsicID() == Intrinsic::vp_select) &&
527 "Unexpected VP intrinsic without mask operand");
528 OldMaskParam = VPI.getArgOperand(0);
529 }
530
531 Value *OldEVLParam = VPI.getVectorLengthParam();
532 assert(OldMaskParam && "no mask param to fold the vl param into");
533 assert(OldEVLParam && "no EVL param to fold away");
534
535 LLVM_DEBUG(dbgs() << "OLD evl: " << *OldEVLParam << '\n');
536 LLVM_DEBUG(dbgs() << "OLD mask: " << *OldMaskParam << '\n');
537
538 // Convert the %evl predication into vector mask predication.
539 ElementCount ElemCount = VPI.getStaticVectorLength();
540 Value *VLMask = convertEVLToMask(Builder, OldEVLParam, ElemCount);
541 Value *NewMaskParam = Builder.CreateAnd(VLMask, OldMaskParam);
542 if (VPI.getIntrinsicID() == Intrinsic::vp_merge ||
543 VPI.getIntrinsicID() == Intrinsic::vp_select)
544 VPI.setArgOperand(0, NewMaskParam);
545 else
546 VPI.setMaskParam(NewMaskParam);
547
548 // Drop the %evl parameter.
549 discardEVLParameter(VPI);
551 "transformation did not render the evl param ineffective!");
552
553 // Reassess the modified instruction.
554 return true;
555}
556
557bool CachingVPExpander::expandPredication(VPIntrinsic &VPI) {
558 LLVM_DEBUG(dbgs() << "Lowering to unpredicated op: " << VPI << '\n');
559
560 IRBuilder<> Builder(&VPI);
561
562 // Try lowering to a LLVM instruction first.
563 auto OC = VPI.getFunctionalOpcode();
564
565 if (OC && Instruction::isBinaryOp(*OC))
566 return expandPredicationInBinaryOperator(Builder, VPI);
567
568 if (auto *VPRI = dyn_cast<VPReductionIntrinsic>(&VPI))
569 return expandPredicationInReduction(Builder, *VPRI);
570
571 if (auto *VPCmp = dyn_cast<VPCmpIntrinsic>(&VPI))
572 return expandPredicationInComparison(Builder, *VPCmp);
573
575 return expandPredicationToCastIntrinsic(Builder, VPI);
576
577 switch (VPI.getIntrinsicID()) {
578 default:
579 break;
580 case Intrinsic::vp_fneg: {
581 Value *NewNegOp = Builder.CreateFNeg(VPI.getOperand(0));
582 replaceOperation(*NewNegOp, VPI);
583 return NewNegOp;
584 }
585 case Intrinsic::vp_select:
586 case Intrinsic::vp_merge: {
588 Value *NewSelectOp = Builder.CreateSelect(
589 VPI.getOperand(0), VPI.getOperand(1), VPI.getOperand(2));
590 replaceOperation(*NewSelectOp, VPI);
591 return NewSelectOp;
592 }
593 case Intrinsic::vp_abs:
594 case Intrinsic::vp_smax:
595 case Intrinsic::vp_smin:
596 case Intrinsic::vp_umax:
597 case Intrinsic::vp_umin:
598 case Intrinsic::vp_bswap:
599 case Intrinsic::vp_bitreverse:
600 case Intrinsic::vp_ctpop:
601 case Intrinsic::vp_ctlz:
602 case Intrinsic::vp_cttz:
603 case Intrinsic::vp_sadd_sat:
604 case Intrinsic::vp_uadd_sat:
605 case Intrinsic::vp_ssub_sat:
606 case Intrinsic::vp_usub_sat:
607 case Intrinsic::vp_fshl:
608 case Intrinsic::vp_fshr:
609 return expandPredicationToIntCall(Builder, VPI);
610 case Intrinsic::vp_fabs:
611 case Intrinsic::vp_sqrt:
612 case Intrinsic::vp_maxnum:
613 case Intrinsic::vp_minnum:
614 case Intrinsic::vp_maximum:
615 case Intrinsic::vp_minimum:
616 case Intrinsic::vp_fma:
617 case Intrinsic::vp_fmuladd:
618 return expandPredicationToFPCall(Builder, VPI,
619 VPI.getFunctionalIntrinsicID().value());
620 case Intrinsic::vp_load:
621 case Intrinsic::vp_store:
622 case Intrinsic::vp_gather:
623 case Intrinsic::vp_scatter:
624 return expandPredicationInMemoryIntrinsic(Builder, VPI);
625 }
626
627 if (auto CID = VPI.getConstrainedIntrinsicID())
628 if (expandPredicationToFPCall(Builder, VPI, *CID))
629 return true;
630
631 return false;
632}
633
634//// } CachingVPExpander
635
636void sanitizeStrategy(VPIntrinsic &VPI, VPLegalization &LegalizeStrat) {
637 // Operations with speculatable lanes do not strictly need predication.
638 if (maySpeculateLanes(VPI)) {
639 // Converting a speculatable VP intrinsic means dropping %mask and %evl.
640 // No need to expand %evl into the %mask only to ignore that code.
641 if (LegalizeStrat.OpStrategy == VPLegalization::Convert)
643 return;
644 }
645
646 // We have to preserve the predicating effect of %evl for this
647 // non-speculatable VP intrinsic.
648 // 1) Never discard %evl.
649 // 2) If this VP intrinsic will be expanded to non-VP code, make sure that
650 // %evl gets folded into %mask.
651 if ((LegalizeStrat.EVLParamStrategy == VPLegalization::Discard) ||
652 (LegalizeStrat.OpStrategy == VPLegalization::Convert)) {
654 }
655}
656
658CachingVPExpander::getVPLegalizationStrategy(const VPIntrinsic &VPI) const {
659 auto VPStrat = TTI.getVPLegalizationStrategy(VPI);
660 if (LLVM_LIKELY(!UsingTTIOverrides)) {
661 // No overrides - we are in production.
662 return VPStrat;
663 }
664
665 // Overrides set - we are in testing, the following does not need to be
666 // efficient.
668 VPStrat.OpStrategy = parseOverrideOption(MaskTransformOverride);
669 return VPStrat;
670}
671
673CachingVPExpander::expandVectorPredication(VPIntrinsic &VPI) {
674 auto Strategy = getVPLegalizationStrategy(VPI);
675 sanitizeStrategy(VPI, Strategy);
676
677 VPExpansionDetails Changed = VPExpansionDetails::IntrinsicUnchanged;
678
679 // Transform the EVL parameter.
680 switch (Strategy.EVLParamStrategy) {
682 break;
684 if (discardEVLParameter(VPI))
685 Changed = VPExpansionDetails::IntrinsicUpdated;
686 break;
688 if (foldEVLIntoMask(VPI)) {
689 Changed = VPExpansionDetails::IntrinsicUpdated;
690 ++NumFoldedVL;
691 }
692 break;
693 }
694
695 // Replace with a non-predicated operation.
696 switch (Strategy.OpStrategy) {
698 break;
700 llvm_unreachable("Invalid strategy for operators.");
702 if (expandPredication(VPI)) {
703 ++NumLoweredVPOps;
704 Changed = VPExpansionDetails::IntrinsicReplaced;
705 }
706 break;
707 }
708
709 return Changed;
710}
711} // namespace
712
715 const TargetTransformInfo &TTI) {
716 return CachingVPExpander(TTI).expandVectorPredication(VPI);
717}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define LLVM_LIKELY(EXPR)
Definition Compiler.h:335
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static VPTransform parseOverrideOption(const std::string &TextOpt)
static cl::opt< std::string > MaskTransformOverride("expandvp-override-mask-transform", cl::init(""), cl::Hidden, cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES ". If non-empty, Ignore " "TargetTransformInfo and " "always use this transformation for the %mask parameter (Used in " "testing)."))
static cl::opt< std::string > EVLTransformOverride("expandvp-override-evl-transform", cl::init(""), cl::Hidden, cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES ". If non-empty, ignore " "TargetTransformInfo and " "always use this transformation for the %evl parameter (Used in " "testing)."))
static void replaceOperation(Value &NewOp, VPIntrinsic &OldOp)
Transfer all properties from OldOp to NewOp and replace all uses.
static bool isAllTrueMask(Value *MaskVal)
static void transferDecorations(Value &NewVal, VPIntrinsic &VPI)
Transfer operation properties from OldVPI to NewVal.
TargetTransformInfo::VPLegalization VPLegalization
TargetTransformInfo::VPLegalization::VPTransform VPTransform
static bool anyExpandVPOverridesSet()
static bool maySpeculateLanes(VPIntrinsic &VPI)
static Constant * getSafeDivisor(Type *DivTy)
#define VPINTERNAL_VPLEGAL_CASES
loop Loop Strength Reduction
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:167
#define LLVM_DEBUG(...)
Definition Debug.h:114
This pass exposes codegen information to IR-level passes.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:703
This is an important base class in LLVM.
Definition Constant.h:43
Value * CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1450
LLVM_ABI CallInst * CreateFAddReduce(Value *Acc, Value *Src)
Create a sequential vector fadd reduction intrinsic of the source vector.
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition IRBuilder.h:547
LLVM_ABI Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
LLVM_ABI CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
LLVM_ABI CallInst * CreateConstrainedFPCall(Function *Callee, ArrayRef< Value * > Args, const Twine &Name="", std::optional< RoundingMode > Rounding=std::nullopt, std::optional< fp::ExceptionBehavior > Except=std::nullopt)
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr, FMFSource FMFSource={})
Definition IRBuilder.h:2238
Value * CreateVScale(Type *Ty, const Twine &Name="")
Create a call to llvm.vscale.<Ty>().
Definition IRBuilder.h:958
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition IRBuilder.h:522
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2463
LLVM_ABI CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1847
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1551
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1860
LLVM_ABI CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2508
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:1708
LLVM_ABI CallInst * CreateFMulReduce(Value *Acc, Value *Src)
Create a sequential vector fmul reduction intrinsic of the source vector.
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2439
Value * CreateFNeg(Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:1790
LLVM_ABI Value * CreateStepVector(Type *DstType, const Twine &Name="")
Creates a vector of type DstType with the linear sequence <0, 1, ...>
LLVM_ABI CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
LLVM_ABI CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2780
bool isCast() const
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool isBinaryOp() const
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
void setAlignment(Align Align)
void setAlignment(Align Align)
A switch()-like statement whose cases are string literals.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
static LLVM_ABI bool isVPCast(Intrinsic::ID ID)
LLVM_ABI CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
std::optional< unsigned > getFunctionalIntrinsicID() const
LLVM_ABI bool canIgnoreVectorLengthParam() const
LLVM_ABI void setMaskParam(Value *)
static LLVM_ABI std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
LLVM_ABI Value * getVectorLengthParam() const
LLVM_ABI void setVectorLengthParam(Value *)
LLVM_ABI Value * getMemoryDataParam() const
LLVM_ABI Value * getMemoryPointerParam() const
std::optional< unsigned > getConstrainedIntrinsicID() const
LLVM_ABI MaybeAlign getPointerAlignment() const
LLVM_ABI Value * getMaskParam() const
LLVM_ABI ElementCount getStaticVectorLength() const
std::optional< unsigned > getFunctionalOpcode() const
LLVM_ABI unsigned getStartParamPos() const
LLVM_ABI unsigned getVectorParamPos() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1101
bool hasName() const
Definition Value.h:262
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:134
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
LLVM_ABI bool isConstrainedFPIntrinsic(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics".
LLVM_ABI AttributeSet getFnAttributes(LLVMContext &C, ID id)
Return the function attributes for an intrinsic.
initializer< Ty > init(const Ty &Val)
ElementType
The element type of an SRV or UAV resource.
Definition DXILABI.h:60
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
LLVM_ABI Value * getReductionIdentity(Intrinsic::ID RdxID, Type *Ty, FastMathFlags FMF)
Given information about an @llvm.vector.reduce.
LLVM_ABI unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
VPExpansionDetails expandVectorPredicationIntrinsic(VPIntrinsic &VPI, const TargetTransformInfo &TTI)
Expand a vector predication intrinsic.
LLVM_ABI bool isSafeToSpeculativelyExecuteWithOpcode(unsigned Opcode, const Instruction *Inst, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
This returns the same result as isSafeToSpeculativelyExecute if Opcode is the actual opcode of Inst.
TargetTransformInfo TTI
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
VPExpansionDetails
Represents the details the expansion of a VP intrinsic.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:141