LLVM 22.0.0git
SpillUtils.cpp
Go to the documentation of this file.
1//===- SpillUtils.cpp - Utilities for checking for spills ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10#include "CoroInternal.h"
11#include "llvm/Analysis/CFG.h"
13#include "llvm/IR/CFG.h"
14#include "llvm/IR/DebugInfo.h"
15#include "llvm/IR/Dominators.h"
18
19namespace llvm {
20
21namespace coro {
22
23namespace {
24
25typedef SmallPtrSet<BasicBlock *, 8> VisitedBlocksSet;
26
27static bool isNonSpilledIntrinsic(Instruction &I) {
28 // Structural coroutine intrinsics that should not be spilled into the
29 // coroutine frame.
31}
32
33/// Does control flow starting at the given block ever reach a suspend
34/// instruction before reaching a block in VisitedOrFreeBBs?
35static bool isSuspendReachableFrom(BasicBlock *From,
36 VisitedBlocksSet &VisitedOrFreeBBs) {
37 // Eagerly try to add this block to the visited set. If it's already
38 // there, stop recursing; this path doesn't reach a suspend before
39 // either looping or reaching a freeing block.
40 if (!VisitedOrFreeBBs.insert(From).second)
41 return false;
42
43 // We assume that we'll already have split suspends into their own blocks.
44 if (coro::isSuspendBlock(From))
45 return true;
46
47 // Recurse on the successors.
48 for (auto *Succ : successors(From)) {
49 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
50 return true;
51 }
52
53 return false;
54}
55
56/// Is the given alloca "local", i.e. bounded in lifetime to not cross a
57/// suspend point?
58static bool isLocalAlloca(CoroAllocaAllocInst *AI) {
59 // Seed the visited set with all the basic blocks containing a free
60 // so that we won't pass them up.
61 VisitedBlocksSet VisitedOrFreeBBs;
62 for (auto *User : AI->users()) {
63 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
64 VisitedOrFreeBBs.insert(FI->getParent());
65 }
66
67 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
68}
69
70/// Turn the given coro.alloca.alloc call into a dynamic allocation.
71/// This happens during the all-instructions iteration, so it must not
72/// delete the call.
73static Instruction *
74lowerNonLocalAlloca(CoroAllocaAllocInst *AI, const coro::Shape &Shape,
75 SmallVectorImpl<Instruction *> &DeadInsts) {
76 IRBuilder<> Builder(AI);
77 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
78
79 for (User *U : AI->users()) {
81 U->replaceAllUsesWith(Alloc);
82 } else {
83 auto FI = cast<CoroAllocaFreeInst>(U);
84 Builder.SetInsertPoint(FI);
85 Shape.emitDealloc(Builder, Alloc, nullptr);
86 }
87 DeadInsts.push_back(cast<Instruction>(U));
88 }
89
90 // Push this on last so that it gets deleted after all the others.
91 DeadInsts.push_back(AI);
92
93 // Return the new allocation value so that we can check for needed spills.
95}
96
97// We need to make room to insert a spill after initial PHIs, but before
98// catchswitch instruction. Placing it before violates the requirement that
99// catchswitch, like all other EHPads must be the first nonPHI in a block.
100//
101// Split away catchswitch into a separate block and insert in its place:
102//
103// cleanuppad <InsertPt> cleanupret.
104//
105// cleanupret instruction will act as an insert point for the spill.
106static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
107 BasicBlock *CurrentBlock = CatchSwitch->getParent();
108 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
109 CurrentBlock->getTerminator()->eraseFromParent();
110
111 auto *CleanupPad =
112 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
113 auto *CleanupRet =
114 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
115 return CleanupRet;
116}
117
118// We use a pointer use visitor to track how an alloca is being used.
119// The goal is to be able to answer the following three questions:
120// 1. Should this alloca be allocated on the frame instead.
121// 2. Could the content of the alloca be modified prior to CoroBegin, which
122// would require copying the data from the alloca to the frame after
123// CoroBegin.
124// 3. Are there any aliases created for this alloca prior to CoroBegin, but
125// used after CoroBegin. In that case, we will need to recreate the alias
126// after CoroBegin based off the frame.
127//
128// To answer question 1, we track two things:
129// A. List of all BasicBlocks that use this alloca or any of the aliases of
130// the alloca. In the end, we check if there exists any two basic blocks that
131// cross suspension points. If so, this alloca must be put on the frame.
132// B. Whether the alloca or any alias of the alloca is escaped at some point,
133// either by storing the address somewhere, or the address is used in a
134// function call that might capture. If it's ever escaped, this alloca must be
135// put on the frame conservatively.
136//
137// To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
138// Whenever a potential write happens, either through a store instruction, a
139// function call or any of the memory intrinsics, we check whether this
140// instruction is prior to CoroBegin.
141//
142// To answer question 3, we track the offsets of all aliases created for the
143// alloca prior to CoroBegin but used after CoroBegin. std::optional is used to
144// be able to represent the case when the offset is unknown (e.g. when you have
145// a PHINode that takes in different offset values). We cannot handle unknown
146// offsets and will assert. This is the potential issue left out. An ideal
147// solution would likely require a significant redesign.
148
149namespace {
150struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
151 using Base = PtrUseVisitor<AllocaUseVisitor>;
152 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
153 const coro::Shape &CoroShape,
154 const SuspendCrossingInfo &Checker,
155 bool ShouldUseLifetimeStartInfo)
156 : PtrUseVisitor(DL), DT(DT), CoroShape(CoroShape), Checker(Checker),
157 ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {
158 for (AnyCoroSuspendInst *SuspendInst : CoroShape.CoroSuspends)
159 CoroSuspendBBs.insert(SuspendInst->getParent());
160 }
161
162 void visit(Instruction &I) {
163 Users.insert(&I);
164 Base::visit(I);
165 // If the pointer is escaped prior to CoroBegin, we have to assume it would
166 // be written into before CoroBegin as well.
167 if (PI.isEscaped() &&
168 !DT.dominates(CoroShape.CoroBegin, PI.getEscapingInst())) {
169 MayWriteBeforeCoroBegin = true;
170 }
171 }
172 // We need to provide this overload as PtrUseVisitor uses a pointer based
173 // visiting function.
174 void visit(Instruction *I) { return visit(*I); }
175
176 void visitPHINode(PHINode &I) {
177 enqueueUsers(I);
178 handleAlias(I);
179 }
180
181 void visitSelectInst(SelectInst &I) {
182 enqueueUsers(I);
183 handleAlias(I);
184 }
185
186 void visitInsertElementInst(InsertElementInst &I) {
187 enqueueUsers(I);
188 handleAlias(I);
189 }
190
191 void visitInsertValueInst(InsertValueInst &I) {
192 enqueueUsers(I);
193 handleAlias(I);
194 }
195
196 void visitStoreInst(StoreInst &SI) {
197 // Regardless whether the alias of the alloca is the value operand or the
198 // pointer operand, we need to assume the alloca is been written.
199 handleMayWrite(SI);
200
201 if (SI.getValueOperand() != U->get())
202 return;
203
204 // We are storing the pointer into a memory location, potentially escaping.
205 // As an optimization, we try to detect simple cases where it doesn't
206 // actually escape, for example:
207 // %ptr = alloca ..
208 // %addr = alloca ..
209 // store %ptr, %addr
210 // %x = load %addr
211 // ..
212 // If %addr is only used by loading from it, we could simply treat %x as
213 // another alias of %ptr, and not considering %ptr being escaped.
214 auto IsSimpleStoreThenLoad = [&]() {
215 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
216 // If the memory location we are storing to is not an alloca, it
217 // could be an alias of some other memory locations, which is difficult
218 // to analyze.
219 if (!AI)
220 return false;
221 // StoreAliases contains aliases of the memory location stored into.
222 SmallVector<Instruction *, 4> StoreAliases = {AI};
223 while (!StoreAliases.empty()) {
224 Instruction *I = StoreAliases.pop_back_val();
225 for (User *U : I->users()) {
226 // If we are loading from the memory location, we are creating an
227 // alias of the original pointer.
228 if (auto *LI = dyn_cast<LoadInst>(U)) {
229 enqueueUsers(*LI);
230 handleAlias(*LI);
231 continue;
232 }
233 // If we are overriding the memory location, the pointer certainly
234 // won't escape.
235 if (auto *S = dyn_cast<StoreInst>(U))
236 if (S->getPointerOperand() == I)
237 continue;
238 if (isa<LifetimeIntrinsic>(U))
239 continue;
240 // BitCastInst creats aliases of the memory location being stored
241 // into.
242 if (auto *BI = dyn_cast<BitCastInst>(U)) {
243 StoreAliases.push_back(BI);
244 continue;
245 }
246 return false;
247 }
248 }
249
250 return true;
251 };
252
253 if (!IsSimpleStoreThenLoad())
254 PI.setEscaped(&SI);
255 }
256
257 // All mem intrinsics modify the data.
258 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
259
260 void visitBitCastInst(BitCastInst &BC) {
261 Base::visitBitCastInst(BC);
262 handleAlias(BC);
263 }
264
265 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
266 Base::visitAddrSpaceCastInst(ASC);
267 handleAlias(ASC);
268 }
269
270 void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
271 // The base visitor will adjust Offset accordingly.
272 Base::visitGetElementPtrInst(GEPI);
273 handleAlias(GEPI);
274 }
275
276 void visitIntrinsicInst(IntrinsicInst &II) {
277 switch (II.getIntrinsicID()) {
278 default:
279 return Base::visitIntrinsicInst(II);
280 case Intrinsic::lifetime_start:
281 LifetimeStarts.insert(&II);
282 LifetimeStartBBs.push_back(II.getParent());
283 break;
284 case Intrinsic::lifetime_end:
285 LifetimeEndBBs.insert(II.getParent());
286 break;
287 }
288 }
289
290 void visitCallBase(CallBase &CB) {
291 for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op)
292 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
293 PI.setEscaped(&CB);
294 handleMayWrite(CB);
295 }
296
297 bool getShouldLiveOnFrame() const {
298 if (!ShouldLiveOnFrame)
299 ShouldLiveOnFrame = computeShouldLiveOnFrame();
300 return *ShouldLiveOnFrame;
301 }
302
303 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
304
305 DenseMap<Instruction *, std::optional<APInt>> getAliasesCopy() const {
306 assert(getShouldLiveOnFrame() && "This method should only be called if the "
307 "alloca needs to live on the frame.");
308 for (const auto &P : AliasOffetMap)
309 if (!P.second)
310 report_fatal_error("Unable to handle an alias with unknown offset "
311 "created before CoroBegin.");
312 return AliasOffetMap;
313 }
314
315private:
316 const DominatorTree &DT;
317 const coro::Shape &CoroShape;
318 const SuspendCrossingInfo &Checker;
319 // All alias to the original AllocaInst, created before CoroBegin and used
320 // after CoroBegin. Each entry contains the instruction and the offset in the
321 // original Alloca. They need to be recreated after CoroBegin off the frame.
322 DenseMap<Instruction *, std::optional<APInt>> AliasOffetMap{};
323 SmallPtrSet<Instruction *, 4> Users{};
324 SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
325 SmallVector<BasicBlock *> LifetimeStartBBs{};
326 SmallPtrSet<BasicBlock *, 2> LifetimeEndBBs{};
327 SmallPtrSet<const BasicBlock *, 2> CoroSuspendBBs{};
328 bool MayWriteBeforeCoroBegin{false};
329 bool ShouldUseLifetimeStartInfo{true};
330
331 mutable std::optional<bool> ShouldLiveOnFrame{};
332
333 bool computeShouldLiveOnFrame() const {
334 // If lifetime information is available, we check it first since it's
335 // more precise. We look at every pair of lifetime.start intrinsic and
336 // every basic block that uses the pointer to see if they cross suspension
337 // points. The uses cover both direct uses as well as indirect uses.
338 if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
339 // If there is no explicit lifetime.end, then assume the address can
340 // cross suspension points.
341 if (LifetimeEndBBs.empty())
342 return true;
343
344 // If there is a path from a lifetime.start to a suspend without a
345 // corresponding lifetime.end, then the alloca's lifetime persists
346 // beyond that suspension point and the alloca must go on the frame.
347 llvm::SmallVector<BasicBlock *> Worklist(LifetimeStartBBs);
348 if (isManyPotentiallyReachableFromMany(Worklist, CoroSuspendBBs,
349 &LifetimeEndBBs, &DT))
350 return true;
351
352 // Addresses are guaranteed to be identical after every lifetime.start so
353 // we cannot use the local stack if the address escaped and there is a
354 // suspend point between lifetime markers. This should also cover the
355 // case of a single lifetime.start intrinsic in a loop with suspend point.
356 if (PI.isEscaped()) {
357 for (auto *A : LifetimeStarts) {
358 for (auto *B : LifetimeStarts) {
359 if (Checker.hasPathOrLoopCrossingSuspendPoint(A->getParent(),
360 B->getParent()))
361 return true;
362 }
363 }
364 }
365 return false;
366 }
367 // FIXME: Ideally the isEscaped check should come at the beginning.
368 // However there are a few loose ends that need to be fixed first before
369 // we can do that. We need to make sure we are not over-conservative, so
370 // that the data accessed in-between await_suspend and symmetric transfer
371 // is always put on the stack, and also data accessed after coro.end is
372 // always put on the stack (esp the return object). To fix that, we need
373 // to:
374 // 1) Potentially treat sret as nocapture in calls
375 // 2) Special handle the return object and put it on the stack
376 // 3) Utilize lifetime.end intrinsic
377 if (PI.isEscaped())
378 return true;
379
380 for (auto *U1 : Users)
381 for (auto *U2 : Users)
382 if (Checker.isDefinitionAcrossSuspend(*U1, U2))
383 return true;
384
385 return false;
386 }
387
388 void handleMayWrite(const Instruction &I) {
389 if (!DT.dominates(CoroShape.CoroBegin, &I))
390 MayWriteBeforeCoroBegin = true;
391 }
392
393 bool usedAfterCoroBegin(Instruction &I) {
394 for (auto &U : I.uses())
395 if (DT.dominates(CoroShape.CoroBegin, U))
396 return true;
397 return false;
398 }
399
400 void handleAlias(Instruction &I) {
401 // We track all aliases created prior to CoroBegin but used after.
402 // These aliases may need to be recreated after CoroBegin if the alloca
403 // need to live on the frame.
404 if (DT.dominates(CoroShape.CoroBegin, &I) || !usedAfterCoroBegin(I))
405 return;
406
407 if (!IsOffsetKnown) {
408 AliasOffetMap[&I].reset();
409 } else {
410 auto [Itr, Inserted] = AliasOffetMap.try_emplace(&I, Offset);
411 if (!Inserted && Itr->second && *Itr->second != Offset) {
412 // If we have seen two different possible values for this alias, we set
413 // it to empty.
414 Itr->second.reset();
415 }
416 }
417 }
418};
419} // namespace
420
421static void collectFrameAlloca(AllocaInst *AI, const coro::Shape &Shape,
422 const SuspendCrossingInfo &Checker,
423 SmallVectorImpl<AllocaInfo> &Allocas,
424 const DominatorTree &DT) {
425 if (Shape.CoroSuspends.empty())
426 return;
427
428 // The PromiseAlloca will be specially handled since it needs to be in a
429 // fixed position in the frame.
431 return;
432
433 // The __coro_gro alloca should outlive the promise, make sure we
434 // keep it outside the frame.
435 if (AI->hasMetadata(LLVMContext::MD_coro_outside_frame))
436 return;
437
438 // The code that uses lifetime.start intrinsic does not work for functions
439 // with loops without exit. Disable it on ABIs we know to generate such
440 // code.
441 bool ShouldUseLifetimeStartInfo =
444 AllocaUseVisitor Visitor{AI->getDataLayout(), DT, Shape, Checker,
445 ShouldUseLifetimeStartInfo};
446 Visitor.visitPtr(*AI);
447 if (!Visitor.getShouldLiveOnFrame())
448 return;
449 Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
450 Visitor.getMayWriteBeforeCoroBegin());
451}
452
453} // namespace
454
456 const SuspendCrossingInfo &Checker) {
457 // Collect the spills for arguments and other not-materializable values.
458 for (Argument &A : F.args())
459 for (User *U : A.users())
460 if (Checker.isDefinitionAcrossSuspend(A, U))
461 Spills[&A].push_back(cast<Instruction>(U));
462}
463
465 SpillInfo &Spills, SmallVector<AllocaInfo, 8> &Allocas,
466 SmallVector<Instruction *, 4> &DeadInstructions,
468 const SuspendCrossingInfo &Checker, const DominatorTree &DT,
469 const coro::Shape &Shape) {
470
471 for (Instruction &I : instructions(F)) {
472 // Values returned from coroutine structure intrinsics should not be part
473 // of the Coroutine Frame.
474 if (isNonSpilledIntrinsic(I) || &I == Shape.CoroBegin)
475 continue;
476
477 // Handle alloca.alloc specially here.
478 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
479 // Check whether the alloca's lifetime is bounded by suspend points.
480 if (isLocalAlloca(AI)) {
481 LocalAllocas.push_back(AI);
482 continue;
483 }
484
485 // If not, do a quick rewrite of the alloca and then add spills of
486 // the rewritten value. The rewrite doesn't invalidate anything in
487 // Spills because the other alloca intrinsics have no other operands
488 // besides AI, and it doesn't invalidate the iteration because we delay
489 // erasing AI.
490 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
491
492 for (User *U : Alloc->users()) {
493 if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
494 Spills[Alloc].push_back(cast<Instruction>(U));
495 }
496 continue;
497 }
498
499 // Ignore alloca.get; we process this as part of coro.alloca.alloc.
501 continue;
502
503 if (auto *AI = dyn_cast<AllocaInst>(&I)) {
504 collectFrameAlloca(AI, Shape, Checker, Allocas, DT);
505 continue;
506 }
507
508 for (User *U : I.users())
509 if (Checker.isDefinitionAcrossSuspend(I, U)) {
510 // We cannot spill a token.
511 if (I.getType()->isTokenTy())
513 "token definition is separated from the use by a suspend point");
514 Spills[&I].push_back(cast<Instruction>(U));
515 }
516 }
517}
518
520 const SuspendCrossingInfo &Checker) {
521 // We don't want the layout of coroutine frame to be affected
522 // by debug information. So we only choose to salvage dbg.values for
523 // whose value is already in the frame.
524 // We would handle the dbg.values for allocas specially
525 for (auto &Iter : Spills) {
526 auto *V = Iter.first;
528 findDbgValues(V, DVRs);
529 // Add the instructions which carry debug info that is in the frame.
530 for (DbgVariableRecord *DVR : DVRs)
531 if (Checker.isDefinitionAcrossSuspend(*V, DVR->Marker->MarkedInstr))
532 Spills[V].push_back(DVR->Marker->MarkedInstr);
533 }
534}
535
536/// Async and Retcon{Once} conventions assume that all spill uses can be sunk
537/// after the coro.begin intrinsic.
539 CoroBeginInst *CoroBegin,
540 coro::SpillInfo &Spills,
544
545 // Collect all users that precede coro.begin.
546 auto collectUsers = [&](Value *Def) {
547 for (User *U : Def->users()) {
548 auto Inst = cast<Instruction>(U);
549 if (Inst->getParent() != CoroBegin->getParent() ||
550 Dom.dominates(CoroBegin, Inst))
551 continue;
552 if (ToMove.insert(Inst))
553 Worklist.push_back(Inst);
554 }
555 };
556 for (auto &I : Spills)
557 collectUsers(I.first);
558 for (auto &I : Allocas)
559 collectUsers(I.Alloca);
560
561 // Recursively collect users before coro.begin.
562 while (!Worklist.empty()) {
563 auto *Def = Worklist.pop_back_val();
564 for (User *U : Def->users()) {
565 auto Inst = cast<Instruction>(U);
566 if (Dom.dominates(CoroBegin, Inst))
567 continue;
568 if (ToMove.insert(Inst))
569 Worklist.push_back(Inst);
570 }
571 }
572
573 // Sort by dominance.
574 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
575 llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
576 // If a dominates b it should precede (<) b.
577 return Dom.dominates(A, B);
578 });
579
580 Instruction *InsertPt = CoroBegin->getNextNode();
581 for (Instruction *Inst : InsertionList)
582 Inst->moveBefore(InsertPt->getIterator());
583}
584
586 const DominatorTree &DT) {
587 BasicBlock::iterator InsertPt;
588 if (auto *Arg = dyn_cast<Argument>(Def)) {
589 // For arguments, we will place the store instruction right after
590 // the coroutine frame pointer instruction, i.e. coro.begin.
591 InsertPt = Shape.getInsertPtAfterFramePtr();
592
593 // If we're spilling an Argument, make sure we clear 'captures'
594 // from the coroutine function.
595 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::Captures);
596 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
597 // Don't spill immediately after a suspend; splitting assumes
598 // that the suspend will be followed by a branch.
599 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHIIt();
600 } else {
601 auto *I = cast<Instruction>(Def);
602 if (!DT.dominates(Shape.CoroBegin, I)) {
603 // If it is not dominated by CoroBegin, then spill should be
604 // inserted immediately after CoroFrame is computed.
605 InsertPt = Shape.getInsertPtAfterFramePtr();
606 } else if (auto *II = dyn_cast<InvokeInst>(I)) {
607 // If we are spilling the result of the invoke instruction, split
608 // the normal edge and insert the spill in the new block.
609 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
610 InsertPt = NewBB->getTerminator()->getIterator();
611 } else if (isa<PHINode>(I)) {
612 // Skip the PHINodes and EH pads instructions.
613 BasicBlock *DefBlock = I->getParent();
614 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
615 InsertPt = splitBeforeCatchSwitch(CSI)->getIterator();
616 else
617 InsertPt = DefBlock->getFirstInsertionPt();
618 } else {
619 assert(!I->isTerminator() && "unexpected terminator");
620 // For all other values, the spill is placed immediately after
621 // the definition.
622 InsertPt = I->getNextNode()->getIterator();
623 }
624 }
625
626 return InsertPt;
627}
628
629} // End namespace coro.
630
631} // End namespace llvm.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
Definition IVUsers.cpp:48
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
uint64_t IntrinsicInst * II
#define P(N)
StandardInstrumentations SI(Mod->getContext(), Debug, VerifyEach)
This file provides a collection of visitors which walk the (instruction) uses of a pointer.
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
static CleanupPadInst * Create(Value *ParentPad, ArrayRef< Value * > Args={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
This class represents the llvm.coro.begin or llvm.coro.begin.custom.abi instructions.
Definition CoroInstr.h:449
Record of a variable value-assignment, aka a non instruction representation of the dbg....
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
iterator end()
Get an iterator to the end of the SetVector.
Definition SetVector.h:119
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition SetVector.h:109
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:168
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:356
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const
LLVM Value Representation.
Definition Value.h:75
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:359
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
SmallMapVector< Value *, SmallVector< Instruction *, 2 >, 8 > SpillInfo
Definition SpillUtils.h:20
@ Async
The "async continuation" lowering, where each suspend point creates a single continuation function.
Definition CoroShape.h:48
@ RetconOnce
The "unique returned-continuation" lowering, where each suspend point creates a single continuation f...
Definition CoroShape.h:43
@ Retcon
The "returned-continuation" lowering, where each suspend point creates a single continuation function...
Definition CoroShape.h:36
BasicBlock::iterator getSpillInsertionPt(const coro::Shape &, Value *Def, const DominatorTree &DT)
bool isSuspendBlock(BasicBlock *BB)
void sinkSpillUsesAfterCoroBegin(const DominatorTree &DT, CoroBeginInst *CoroBegin, coro::SpillInfo &Spills, SmallVectorImpl< coro::AllocaInfo > &Allocas)
Async and Retcon{Once} conventions assume that all spill uses can be sunk after the coro....
void collectSpillsFromArgs(SpillInfo &Spills, Function &F, const SuspendCrossingInfo &Checker)
void collectSpillsFromDbgInfo(SpillInfo &Spills, Function &F, const SuspendCrossingInfo &Checker)
void collectSpillsAndAllocasFromInsts(SpillInfo &Spills, SmallVector< AllocaInfo, 8 > &Allocas, SmallVector< Instruction *, 4 > &DeadInstructions, SmallVector< CoroAllocaAllocInst *, 4 > &LocalAllocas, Function &F, const SuspendCrossingInfo &Checker, const DominatorTree &DT, const coro::Shape &Shape)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
auto successors(const MachineBasicBlock *BB)
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1632
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
LLVM_ABI bool isManyPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const SmallPtrSetImpl< const BasicBlock * > &StopSet, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether there is a potentially a path from at least one block in 'Worklist' to at least one...
Definition CFG.cpp:249
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
LLVM_ABI BasicBlock * SplitEdge(BasicBlock *From, BasicBlock *To, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the edge connecting the specified blocks, and return the newly created basic block between From...
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Definition CoroShape.h:58
LLVM_ABI Value * emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const
Allocate memory according to the rules of the active lowering.
SwitchLoweringStorage SwitchLowering
Definition CoroShape.h:153
CoroBeginInst * CoroBegin
Definition CoroShape.h:54
BasicBlock::iterator getInsertPtAfterFramePtr() const
Definition CoroShape.h:250
LLVM_ABI void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const
Deallocate memory according to the rules of the active lowering.