LLVM 22.0.0git
SafeStack.cpp
Go to the documentation of this file.
1//===- SafeStack.cpp - Safe Stack Insertion -------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass splits the stack into the safe stack (kept as-is for LLVM backend)
10// and the unsafe stack (explicitly allocated and managed through the runtime
11// support library).
12//
13// http://clang.llvm.org/docs/SafeStack.html
14//
15//===----------------------------------------------------------------------===//
16
18#include "SafeStackLayout.h"
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/Statistic.h"
36#include "llvm/IR/Argument.h"
37#include "llvm/IR/Attributes.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DIBuilder.h"
41#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/Function.h"
45#include "llvm/IR/IRBuilder.h"
47#include "llvm/IR/Instruction.h"
50#include "llvm/IR/Intrinsics.h"
51#include "llvm/IR/MDBuilder.h"
52#include "llvm/IR/Metadata.h"
53#include "llvm/IR/Module.h"
54#include "llvm/IR/Type.h"
55#include "llvm/IR/Use.h"
56#include "llvm/IR/Value.h"
58#include "llvm/Pass.h"
60#include "llvm/Support/Debug.h"
67#include <algorithm>
68#include <cassert>
69#include <cstdint>
70#include <optional>
71#include <string>
72#include <utility>
73
74using namespace llvm;
75using namespace llvm::safestack;
76
77#define DEBUG_TYPE "safe-stack"
78
79namespace llvm {
80
81STATISTIC(NumFunctions, "Total number of functions");
82STATISTIC(NumUnsafeStackFunctions, "Number of functions with unsafe stack");
83STATISTIC(NumUnsafeStackRestorePointsFunctions,
84 "Number of functions that use setjmp or exceptions");
85
86STATISTIC(NumAllocas, "Total number of allocas");
87STATISTIC(NumUnsafeStaticAllocas, "Number of unsafe static allocas");
88STATISTIC(NumUnsafeDynamicAllocas, "Number of unsafe dynamic allocas");
89STATISTIC(NumUnsafeByValArguments, "Number of unsafe byval arguments");
90STATISTIC(NumUnsafeStackRestorePoints, "Number of setjmps and landingpads");
91
92} // namespace llvm
93
94/// Use __safestack_pointer_address even if the platform has a faster way of
95/// access safe stack pointer.
96static cl::opt<bool>
97 SafeStackUsePointerAddress("safestack-use-pointer-address",
98 cl::init(false), cl::Hidden);
99
100static cl::opt<bool> ClColoring("safe-stack-coloring",
101 cl::desc("enable safe stack coloring"),
102 cl::Hidden, cl::init(true));
103
104namespace {
105
106/// The SafeStack pass splits the stack of each function into the safe
107/// stack, which is only accessed through memory safe dereferences (as
108/// determined statically), and the unsafe stack, which contains all
109/// local variables that are accessed in ways that we can't prove to
110/// be safe.
111class SafeStack {
112 Function &F;
113 const TargetLoweringBase &TL;
114 const DataLayout &DL;
115 DomTreeUpdater *DTU;
116 ScalarEvolution &SE;
117
118 Type *StackPtrTy;
119 Type *IntPtrTy;
120 Type *Int32Ty;
121
122 Value *UnsafeStackPtr = nullptr;
123
124 /// Unsafe stack alignment. Each stack frame must ensure that the stack is
125 /// aligned to this value. We need to re-align the unsafe stack if the
126 /// alignment of any object on the stack exceeds this value.
127 ///
128 /// 16 seems like a reasonable upper bound on the alignment of objects that we
129 /// might expect to appear on the stack on most common targets.
130 static constexpr Align StackAlignment = Align::Constant<16>();
131
132 /// Return the value of the stack canary.
134
135 /// Load stack guard from the frame and check if it has changed.
136 void checkStackGuard(IRBuilder<> &IRB, Function &F, Instruction &RI,
137 AllocaInst *StackGuardSlot, Value *StackGuard);
138
139 /// Find all static allocas, dynamic allocas, return instructions and
140 /// stack restore points (exception unwind blocks and setjmp calls) in the
141 /// given function and append them to the respective vectors.
142 void findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas,
143 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
144 SmallVectorImpl<Argument *> &ByValArguments,
146 SmallVectorImpl<Instruction *> &StackRestorePoints);
147
148 /// Calculate the allocation size of a given alloca. Returns 0 if the
149 /// size can not be statically determined.
150 uint64_t getStaticAllocaAllocationSize(const AllocaInst* AI);
151
152 /// Allocate space for all static allocas in \p StaticAllocas,
153 /// replace allocas with pointers into the unsafe stack.
154 ///
155 /// \returns A pointer to the top of the unsafe stack after all unsafe static
156 /// allocas are allocated.
157 Value *moveStaticAllocasToUnsafeStack(IRBuilder<> &IRB, Function &F,
158 ArrayRef<AllocaInst *> StaticAllocas,
159 ArrayRef<Argument *> ByValArguments,
160 Instruction *BasePointer,
161 AllocaInst *StackGuardSlot);
162
163 /// Generate code to restore the stack after all stack restore points
164 /// in \p StackRestorePoints.
165 ///
166 /// \returns A local variable in which to maintain the dynamic top of the
167 /// unsafe stack if needed.
168 AllocaInst *
169 createStackRestorePoints(IRBuilder<> &IRB, Function &F,
170 ArrayRef<Instruction *> StackRestorePoints,
171 Value *StaticTop, bool NeedDynamicTop);
172
173 /// Replace all allocas in \p DynamicAllocas with code to allocate
174 /// space dynamically on the unsafe stack and store the dynamic unsafe stack
175 /// top to \p DynamicTop if non-null.
176 void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr,
177 AllocaInst *DynamicTop,
178 ArrayRef<AllocaInst *> DynamicAllocas);
179
180 bool IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize);
181
182 bool IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
183 const Value *AllocaPtr, uint64_t AllocaSize);
184 bool IsAccessSafe(Value *Addr, uint64_t Size, const Value *AllocaPtr,
185 uint64_t AllocaSize);
186
187 bool ShouldInlinePointerAddress(CallInst &CI);
188 void TryInlinePointerAddress();
189
190public:
191 SafeStack(Function &F, const TargetLoweringBase &TL, const DataLayout &DL,
193 : F(F), TL(TL), DL(DL), DTU(DTU), SE(SE),
194 StackPtrTy(DL.getAllocaPtrType(F.getContext())),
195 IntPtrTy(DL.getIntPtrType(F.getContext())),
196 Int32Ty(Type::getInt32Ty(F.getContext())) {}
197
198 // Run the transformation on the associated function.
199 // Returns whether the function was changed.
200 bool run();
201};
202
203constexpr Align SafeStack::StackAlignment;
204
205uint64_t SafeStack::getStaticAllocaAllocationSize(const AllocaInst* AI) {
206 uint64_t Size = DL.getTypeAllocSize(AI->getAllocatedType());
207 if (AI->isArrayAllocation()) {
209 if (!C)
210 return 0;
211 Size *= C->getZExtValue();
212 }
213 return Size;
214}
215
216bool SafeStack::IsAccessSafe(Value *Addr, uint64_t AccessSize,
217 const Value *AllocaPtr, uint64_t AllocaSize) {
218 const SCEV *AddrExpr = SE.getSCEV(Addr);
219 const auto *Base = dyn_cast<SCEVUnknown>(SE.getPointerBase(AddrExpr));
220 if (!Base || Base->getValue() != AllocaPtr) {
222 dbgs() << "[SafeStack] "
223 << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
224 << *AllocaPtr << "\n"
225 << "SCEV " << *AddrExpr << " not directly based on alloca\n");
226 return false;
227 }
228
229 const SCEV *Expr = SE.removePointerBase(AddrExpr);
230 uint64_t BitWidth = SE.getTypeSizeInBits(Expr->getType());
231 ConstantRange AccessStartRange = SE.getUnsignedRange(Expr);
232 ConstantRange SizeRange =
233 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AccessSize));
234 ConstantRange AccessRange = AccessStartRange.add(SizeRange);
235 ConstantRange AllocaRange =
236 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AllocaSize));
237 bool Safe = AllocaRange.contains(AccessRange);
238
240 dbgs() << "[SafeStack] "
241 << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
242 << *AllocaPtr << "\n"
243 << " Access " << *Addr << "\n"
244 << " SCEV " << *Expr
245 << " U: " << SE.getUnsignedRange(Expr)
246 << ", S: " << SE.getSignedRange(Expr) << "\n"
247 << " Range " << AccessRange << "\n"
248 << " AllocaRange " << AllocaRange << "\n"
249 << " " << (Safe ? "safe" : "unsafe") << "\n");
250
251 return Safe;
252}
253
254bool SafeStack::IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
255 const Value *AllocaPtr,
256 uint64_t AllocaSize) {
257 if (auto MTI = dyn_cast<MemTransferInst>(MI)) {
258 if (MTI->getRawSource() != U && MTI->getRawDest() != U)
259 return true;
260 } else {
261 if (MI->getRawDest() != U)
262 return true;
263 }
264
265 auto Len = MI->getLengthInBytes();
266 // Non-constant size => unsafe. FIXME: try SCEV getRange.
267 if (!Len) return false;
268 return IsAccessSafe(U, Len->getZExtValue(), AllocaPtr, AllocaSize);
269}
270
271/// Check whether a given allocation must be put on the safe
272/// stack or not. The function analyzes all uses of AI and checks whether it is
273/// only accessed in a memory safe way (as decided statically).
274bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) {
275 // Go through all uses of this alloca and check whether all accesses to the
276 // allocated object are statically known to be memory safe and, hence, the
277 // object can be placed on the safe stack.
278 SmallPtrSet<const Value *, 16> Visited;
279 SmallVector<const Value *, 8> WorkList;
280 WorkList.push_back(AllocaPtr);
281
282 // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc.
283 while (!WorkList.empty()) {
284 const Value *V = WorkList.pop_back_val();
285 for (const Use &UI : V->uses()) {
286 auto I = cast<const Instruction>(UI.getUser());
287 assert(V == UI.get());
288
289 switch (I->getOpcode()) {
290 case Instruction::Load:
291 if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getType()), AllocaPtr,
292 AllocaSize))
293 return false;
294 break;
295
296 case Instruction::VAArg:
297 // "va-arg" from a pointer is safe.
298 break;
299 case Instruction::Store:
300 if (V == I->getOperand(0)) {
301 // Stored the pointer - conservatively assume it may be unsafe.
303 << "[SafeStack] Unsafe alloca: " << *AllocaPtr
304 << "\n store of address: " << *I << "\n");
305 return false;
306 }
307
308 if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getOperand(0)->getType()),
309 AllocaPtr, AllocaSize))
310 return false;
311 break;
312
313 case Instruction::Ret:
314 // Information leak.
315 return false;
316
317 case Instruction::Call:
318 case Instruction::Invoke: {
319 const CallBase &CS = *cast<CallBase>(I);
320
321 if (I->isLifetimeStartOrEnd())
322 continue;
323
324 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
325 if (!IsMemIntrinsicSafe(MI, UI, AllocaPtr, AllocaSize)) {
327 << "[SafeStack] Unsafe alloca: " << *AllocaPtr
328 << "\n unsafe memintrinsic: " << *I << "\n");
329 return false;
330 }
331 continue;
332 }
333
334 // LLVM 'nocapture' attribute is only set for arguments whose address
335 // is not stored, passed around, or used in any other non-trivial way.
336 // We assume that passing a pointer to an object as a 'nocapture
337 // readnone' argument is safe.
338 // FIXME: a more precise solution would require an interprocedural
339 // analysis here, which would look at all uses of an argument inside
340 // the function being called.
341 auto B = CS.arg_begin(), E = CS.arg_end();
342 for (const auto *A = B; A != E; ++A)
343 if (A->get() == V)
344 if (!(CS.doesNotCapture(A - B) && (CS.doesNotAccessMemory(A - B) ||
345 CS.doesNotAccessMemory()))) {
346 LLVM_DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
347 << "\n unsafe call: " << *I << "\n");
348 return false;
349 }
350 continue;
351 }
352
353 default:
354 if (Visited.insert(I).second)
356 }
357 }
358 }
359
360 // All uses of the alloca are safe, we can place it on the safe stack.
361 return true;
362}
363
364Value *SafeStack::getStackGuard(IRBuilder<> &IRB, Function &F) {
365 Value *StackGuardVar = TL.getIRStackGuard(IRB);
366 Module *M = F.getParent();
367
368 if (!StackGuardVar) {
370 return IRB.CreateIntrinsic(Intrinsic::stackguard, {});
371 }
372
373 return IRB.CreateLoad(StackPtrTy, StackGuardVar, "StackGuard");
374}
375
376void SafeStack::findInsts(Function &F,
377 SmallVectorImpl<AllocaInst *> &StaticAllocas,
378 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
379 SmallVectorImpl<Argument *> &ByValArguments,
380 SmallVectorImpl<Instruction *> &Returns,
381 SmallVectorImpl<Instruction *> &StackRestorePoints) {
382 for (Instruction &I : instructions(&F)) {
383 if (auto AI = dyn_cast<AllocaInst>(&I)) {
384 ++NumAllocas;
385
386 uint64_t Size = getStaticAllocaAllocationSize(AI);
387 if (IsSafeStackAlloca(AI, Size))
388 continue;
389
390 if (AI->isStaticAlloca()) {
391 ++NumUnsafeStaticAllocas;
392 StaticAllocas.push_back(AI);
393 } else {
394 ++NumUnsafeDynamicAllocas;
395 DynamicAllocas.push_back(AI);
396 }
397 } else if (auto RI = dyn_cast<ReturnInst>(&I)) {
398 if (CallInst *CI = I.getParent()->getTerminatingMustTailCall())
399 Returns.push_back(CI);
400 else
401 Returns.push_back(RI);
402 } else if (auto CI = dyn_cast<CallInst>(&I)) {
403 // setjmps require stack restore.
404 if (CI->getCalledFunction() && CI->canReturnTwice())
405 StackRestorePoints.push_back(CI);
406 } else if (auto LP = dyn_cast<LandingPadInst>(&I)) {
407 // Exception landing pads require stack restore.
408 StackRestorePoints.push_back(LP);
409 } else if (auto II = dyn_cast<IntrinsicInst>(&I)) {
410 if (II->getIntrinsicID() == Intrinsic::gcroot)
412 "gcroot intrinsic not compatible with safestack attribute");
413 }
414 }
415 for (Argument &Arg : F.args()) {
416 if (!Arg.hasByValAttr())
417 continue;
418 uint64_t Size = DL.getTypeStoreSize(Arg.getParamByValType());
419 if (IsSafeStackAlloca(&Arg, Size))
420 continue;
421
422 ++NumUnsafeByValArguments;
423 ByValArguments.push_back(&Arg);
424 }
425}
426
427AllocaInst *
428SafeStack::createStackRestorePoints(IRBuilder<> &IRB, Function &F,
429 ArrayRef<Instruction *> StackRestorePoints,
430 Value *StaticTop, bool NeedDynamicTop) {
431 assert(StaticTop && "The stack top isn't set.");
432
433 if (StackRestorePoints.empty())
434 return nullptr;
435
436 // We need the current value of the shadow stack pointer to restore
437 // after longjmp or exception catching.
438
439 // FIXME: On some platforms this could be handled by the longjmp/exception
440 // runtime itself.
441
442 AllocaInst *DynamicTop = nullptr;
443 if (NeedDynamicTop) {
444 // If we also have dynamic alloca's, the stack pointer value changes
445 // throughout the function. For now we store it in an alloca.
446 DynamicTop = IRB.CreateAlloca(StackPtrTy, /*ArraySize=*/nullptr,
447 "unsafe_stack_dynamic_ptr");
448 IRB.CreateStore(StaticTop, DynamicTop);
449 }
450
451 // Restore current stack pointer after longjmp/exception catch.
452 for (Instruction *I : StackRestorePoints) {
453 ++NumUnsafeStackRestorePoints;
454
455 IRB.SetInsertPoint(I->getNextNode());
456 Value *CurrentTop =
457 DynamicTop ? IRB.CreateLoad(StackPtrTy, DynamicTop) : StaticTop;
458 IRB.CreateStore(CurrentTop, UnsafeStackPtr);
459 }
460
461 return DynamicTop;
462}
463
464void SafeStack::checkStackGuard(IRBuilder<> &IRB, Function &F, Instruction &RI,
465 AllocaInst *StackGuardSlot, Value *StackGuard) {
466 Value *V = IRB.CreateLoad(StackPtrTy, StackGuardSlot);
467 Value *Cmp = IRB.CreateICmpNE(StackGuard, V);
468
471 MDNode *Weights = MDBuilder(F.getContext())
472 .createBranchWeights(SuccessProb.getNumerator(),
473 FailureProb.getNumerator());
474 Instruction *CheckTerm =
475 SplitBlockAndInsertIfThen(Cmp, &RI, /* Unreachable */ true, Weights, DTU);
476 IRBuilder<> IRBFail(CheckTerm);
477 // FIXME: respect -fsanitize-trap / -ftrap-function here?
478 const char *StackChkFailName =
479 TL.getLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL);
480 if (!StackChkFailName) {
481 F.getContext().emitError(
482 "no libcall available for stackprotector check fail");
483 return;
484 }
485
486 FunctionCallee StackChkFail =
487 F.getParent()->getOrInsertFunction(StackChkFailName, IRB.getVoidTy());
488 IRBFail.CreateCall(StackChkFail, {});
489}
490
491/// We explicitly compute and set the unsafe stack layout for all unsafe
492/// static alloca instructions. We save the unsafe "base pointer" in the
493/// prologue into a local variable and restore it in the epilogue.
494Value *SafeStack::moveStaticAllocasToUnsafeStack(
495 IRBuilder<> &IRB, Function &F, ArrayRef<AllocaInst *> StaticAllocas,
496 ArrayRef<Argument *> ByValArguments, Instruction *BasePointer,
497 AllocaInst *StackGuardSlot) {
498 if (StaticAllocas.empty() && ByValArguments.empty())
499 return BasePointer;
500
501 DIBuilder DIB(*F.getParent());
502
503 StackLifetime SSC(F, StaticAllocas, StackLifetime::LivenessType::May);
504 static const StackLifetime::LiveRange NoColoringRange(1, true);
505 if (ClColoring)
506 SSC.run();
507
508 for (const auto *I : SSC.getMarkers()) {
509 auto *Op = dyn_cast<Instruction>(I->getOperand(1));
510 const_cast<IntrinsicInst *>(I)->eraseFromParent();
511 // Remove the operand bitcast, too, if it has no more uses left.
512 if (Op && Op->use_empty())
513 Op->eraseFromParent();
514 }
515
516 // Unsafe stack always grows down.
517 StackLayout SSL(StackAlignment);
518 if (StackGuardSlot) {
519 Type *Ty = StackGuardSlot->getAllocatedType();
520 Align Align = std::max(DL.getPrefTypeAlign(Ty), StackGuardSlot->getAlign());
521 SSL.addObject(StackGuardSlot, getStaticAllocaAllocationSize(StackGuardSlot),
522 Align, SSC.getFullLiveRange());
523 }
524
525 for (Argument *Arg : ByValArguments) {
526 Type *Ty = Arg->getParamByValType();
527 uint64_t Size = DL.getTypeStoreSize(Ty);
528 if (Size == 0)
529 Size = 1; // Don't create zero-sized stack objects.
530
531 // Ensure the object is properly aligned.
532 Align Align = DL.getPrefTypeAlign(Ty);
533 if (auto A = Arg->getParamAlign())
534 Align = std::max(Align, *A);
535 SSL.addObject(Arg, Size, Align, SSC.getFullLiveRange());
536 }
537
538 for (AllocaInst *AI : StaticAllocas) {
539 Type *Ty = AI->getAllocatedType();
540 uint64_t Size = getStaticAllocaAllocationSize(AI);
541 if (Size == 0)
542 Size = 1; // Don't create zero-sized stack objects.
543
544 // Ensure the object is properly aligned.
545 Align Align = std::max(DL.getPrefTypeAlign(Ty), AI->getAlign());
546
547 SSL.addObject(AI, Size, Align,
548 ClColoring ? SSC.getLiveRange(AI) : NoColoringRange);
549 }
550
551 SSL.computeLayout();
552 Align FrameAlignment = SSL.getFrameAlignment();
553
554 // FIXME: tell SSL that we start at a less-then-MaxAlignment aligned location
555 // (AlignmentSkew).
556 if (FrameAlignment > StackAlignment) {
557 // Re-align the base pointer according to the max requested alignment.
558 IRB.SetInsertPoint(BasePointer->getNextNode());
559 BasePointer = cast<Instruction>(IRB.CreateIntToPtr(
560 IRB.CreateAnd(
561 IRB.CreatePtrToInt(BasePointer, IntPtrTy),
562 ConstantInt::get(IntPtrTy, ~(FrameAlignment.value() - 1))),
563 StackPtrTy));
564 }
565
566 IRB.SetInsertPoint(BasePointer->getNextNode());
567
568 if (StackGuardSlot) {
569 unsigned Offset = SSL.getObjectOffset(StackGuardSlot);
570 Value *Off =
571 IRB.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -Offset));
572 Value *NewAI =
573 IRB.CreateBitCast(Off, StackGuardSlot->getType(), "StackGuardSlot");
574
575 // Replace alloc with the new location.
576 StackGuardSlot->replaceAllUsesWith(NewAI);
577 StackGuardSlot->eraseFromParent();
578 }
579
580 for (Argument *Arg : ByValArguments) {
581 unsigned Offset = SSL.getObjectOffset(Arg);
582 MaybeAlign Align(SSL.getObjectAlignment(Arg));
583 Type *Ty = Arg->getParamByValType();
584
585 uint64_t Size = DL.getTypeStoreSize(Ty);
586 if (Size == 0)
587 Size = 1; // Don't create zero-sized stack objects.
588
589 Value *Off =
590 IRB.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -Offset));
591 Value *NewArg = IRB.CreateBitCast(Off, Arg->getType(),
592 Arg->getName() + ".unsafe-byval");
593
594 // Replace alloc with the new location.
595 replaceDbgDeclare(Arg, BasePointer, DIB, DIExpression::ApplyOffset,
596 -Offset);
597 Arg->replaceAllUsesWith(NewArg);
599 IRB.CreateMemCpy(Off, Align, Arg, Arg->getParamAlign(), Size);
600 }
601
602 // Allocate space for every unsafe static AllocaInst on the unsafe stack.
603 for (AllocaInst *AI : StaticAllocas) {
604 IRB.SetInsertPoint(AI);
605 unsigned Offset = SSL.getObjectOffset(AI);
606
607 replaceDbgDeclare(AI, BasePointer, DIB, DIExpression::ApplyOffset, -Offset);
608 replaceDbgValueForAlloca(AI, BasePointer, DIB, -Offset);
609
610 // Replace uses of the alloca with the new location.
611 // Insert address calculation close to each use to work around PR27844.
612 std::string Name = std::string(AI->getName()) + ".unsafe";
613 while (!AI->use_empty()) {
614 Use &U = *AI->use_begin();
615 Instruction *User = cast<Instruction>(U.getUser());
616
617 // Drop lifetime markers now that this is no longer an alloca.
618 // SafeStack has already performed its own stack coloring.
619 if (User->isLifetimeStartOrEnd()) {
620 User->eraseFromParent();
621 continue;
622 }
623
624 Instruction *InsertBefore;
625 if (auto *PHI = dyn_cast<PHINode>(User))
626 InsertBefore = PHI->getIncomingBlock(U)->getTerminator();
627 else
628 InsertBefore = User;
629
630 IRBuilder<> IRBUser(InsertBefore);
631 Value *Off =
632 IRBUser.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -Offset));
633 Value *Replacement =
634 IRBUser.CreateAddrSpaceCast(Off, AI->getType(), Name);
635
636 if (auto *PHI = dyn_cast<PHINode>(User))
637 // PHI nodes may have multiple incoming edges from the same BB (why??),
638 // all must be updated at once with the same incoming value.
639 PHI->setIncomingValueForBlock(PHI->getIncomingBlock(U), Replacement);
640 else
641 U.set(Replacement);
642 }
643
644 AI->eraseFromParent();
645 }
646
647 // Re-align BasePointer so that our callees would see it aligned as
648 // expected.
649 // FIXME: no need to update BasePointer in leaf functions.
650 unsigned FrameSize = alignTo(SSL.getFrameSize(), StackAlignment);
651
652 MDBuilder MDB(F.getContext());
654 Data.push_back(MDB.createString("unsafe-stack-size"));
655 Data.push_back(MDB.createConstant(ConstantInt::get(Int32Ty, FrameSize)));
656 MDNode *MD = MDTuple::get(F.getContext(), Data);
657 F.setMetadata(LLVMContext::MD_annotation, MD);
658
659 // Update shadow stack pointer in the function epilogue.
660 IRB.SetInsertPoint(BasePointer->getNextNode());
661
662 Value *StaticTop =
663 IRB.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -FrameSize),
664 "unsafe_stack_static_top");
665 IRB.CreateStore(StaticTop, UnsafeStackPtr);
666 return StaticTop;
667}
668
669void SafeStack::moveDynamicAllocasToUnsafeStack(
670 Function &F, Value *UnsafeStackPtr, AllocaInst *DynamicTop,
671 ArrayRef<AllocaInst *> DynamicAllocas) {
672 DIBuilder DIB(*F.getParent());
673
674 for (AllocaInst *AI : DynamicAllocas) {
675 IRBuilder<> IRB(AI);
676
677 // Compute the new SP value (after AI).
678 Value *ArraySize = AI->getArraySize();
679 if (ArraySize->getType() != IntPtrTy)
680 ArraySize = IRB.CreateIntCast(ArraySize, IntPtrTy, false);
681
682 Type *Ty = AI->getAllocatedType();
683 uint64_t TySize = DL.getTypeAllocSize(Ty);
684 Value *Size = IRB.CreateMul(ArraySize, ConstantInt::get(IntPtrTy, TySize));
685
686 Value *SP = IRB.CreatePtrToInt(IRB.CreateLoad(StackPtrTy, UnsafeStackPtr),
687 IntPtrTy);
688 SP = IRB.CreateSub(SP, Size);
689
690 // Align the SP value to satisfy the AllocaInst, type and stack alignments.
691 auto Align = std::max(std::max(DL.getPrefTypeAlign(Ty), AI->getAlign()),
692 StackAlignment);
693
694 Value *NewTop = IRB.CreateIntToPtr(
695 IRB.CreateAnd(SP,
696 ConstantInt::get(IntPtrTy, ~uint64_t(Align.value() - 1))),
697 StackPtrTy);
698
699 // Save the stack pointer.
700 IRB.CreateStore(NewTop, UnsafeStackPtr);
701 if (DynamicTop)
702 IRB.CreateStore(NewTop, DynamicTop);
703
704 Value *NewAI = IRB.CreatePointerCast(NewTop, AI->getType());
705 if (AI->hasName() && isa<Instruction>(NewAI))
706 NewAI->takeName(AI);
707
709 AI->replaceAllUsesWith(NewAI);
710 AI->eraseFromParent();
711 }
712
713 if (!DynamicAllocas.empty()) {
714 // Now go through the instructions again, replacing stacksave/stackrestore.
715 for (Instruction &I : llvm::make_early_inc_range(instructions(&F))) {
716 auto *II = dyn_cast<IntrinsicInst>(&I);
717 if (!II)
718 continue;
719
720 if (II->getIntrinsicID() == Intrinsic::stacksave) {
721 IRBuilder<> IRB(II);
722 Instruction *LI = IRB.CreateLoad(StackPtrTy, UnsafeStackPtr);
723 LI->takeName(II);
724 II->replaceAllUsesWith(LI);
725 II->eraseFromParent();
726 } else if (II->getIntrinsicID() == Intrinsic::stackrestore) {
727 IRBuilder<> IRB(II);
728 Instruction *SI = IRB.CreateStore(II->getArgOperand(0), UnsafeStackPtr);
729 SI->takeName(II);
730 assert(II->use_empty());
731 II->eraseFromParent();
732 }
733 }
734 }
735}
736
737bool SafeStack::ShouldInlinePointerAddress(CallInst &CI) {
739 if (CI.hasFnAttr(Attribute::AlwaysInline) &&
740 isInlineViable(*Callee).isSuccess())
741 return true;
742 if (Callee->isInterposable() || Callee->hasFnAttribute(Attribute::NoInline) ||
743 CI.isNoInline())
744 return false;
745 return true;
746}
747
748void SafeStack::TryInlinePointerAddress() {
749 auto *CI = dyn_cast<CallInst>(UnsafeStackPtr);
750 if (!CI)
751 return;
752
753 if(F.hasOptNone())
754 return;
755
757 if (!Callee || Callee->isDeclaration())
758 return;
759
760 if (!ShouldInlinePointerAddress(*CI))
761 return;
762
763 InlineFunctionInfo IFI;
764 InlineFunction(*CI, IFI);
765}
766
767bool SafeStack::run() {
768 assert(F.hasFnAttribute(Attribute::SafeStack) &&
769 "Can't run SafeStack on a function without the attribute");
770 assert(!F.isDeclaration() && "Can't run SafeStack on a function declaration");
771
772 ++NumFunctions;
773
774 SmallVector<AllocaInst *, 16> StaticAllocas;
775 SmallVector<AllocaInst *, 4> DynamicAllocas;
776 SmallVector<Argument *, 4> ByValArguments;
777 SmallVector<Instruction *, 4> Returns;
778
779 // Collect all points where stack gets unwound and needs to be restored
780 // This is only necessary because the runtime (setjmp and unwind code) is
781 // not aware of the unsafe stack and won't unwind/restore it properly.
782 // To work around this problem without changing the runtime, we insert
783 // instrumentation to restore the unsafe stack pointer when necessary.
784 SmallVector<Instruction *, 4> StackRestorePoints;
785
786 // Find all static and dynamic alloca instructions that must be moved to the
787 // unsafe stack, all return instructions and stack restore points.
788 findInsts(F, StaticAllocas, DynamicAllocas, ByValArguments, Returns,
789 StackRestorePoints);
790
791 if (StaticAllocas.empty() && DynamicAllocas.empty() &&
792 ByValArguments.empty() && StackRestorePoints.empty())
793 return false; // Nothing to do in this function.
794
795 if (!StaticAllocas.empty() || !DynamicAllocas.empty() ||
796 !ByValArguments.empty())
797 ++NumUnsafeStackFunctions; // This function has the unsafe stack.
798
799 if (!StackRestorePoints.empty())
800 ++NumUnsafeStackRestorePointsFunctions;
801
802 IRBuilder<> IRB(&F.front(), F.begin()->getFirstInsertionPt());
803 // Calls must always have a debug location, or else inlining breaks. So
804 // we explicitly set a artificial debug location here.
805 if (DISubprogram *SP = F.getSubprogram())
807 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP));
809 const char *SafestackPointerAddressName =
810 TL.getLibcallName(RTLIB::SAFESTACK_POINTER_ADDRESS);
811 if (!SafestackPointerAddressName) {
812 F.getContext().emitError(
813 "no libcall available for safestack pointer address");
814 return false;
815 }
816
817 FunctionCallee Fn = F.getParent()->getOrInsertFunction(
818 SafestackPointerAddressName, IRB.getPtrTy(0));
819 UnsafeStackPtr = IRB.CreateCall(Fn);
820 } else {
821 UnsafeStackPtr = TL.getSafeStackPointerLocation(IRB);
822 }
823
824 // Load the current stack pointer (we'll also use it as a base pointer).
825 // FIXME: use a dedicated register for it ?
826 Instruction *BasePointer =
827 IRB.CreateLoad(StackPtrTy, UnsafeStackPtr, false, "unsafe_stack_ptr");
828 assert(BasePointer->getType() == StackPtrTy);
829
830 AllocaInst *StackGuardSlot = nullptr;
831 // FIXME: implement weaker forms of stack protector.
832 if (F.hasFnAttribute(Attribute::StackProtect) ||
833 F.hasFnAttribute(Attribute::StackProtectStrong) ||
834 F.hasFnAttribute(Attribute::StackProtectReq)) {
835 Value *StackGuard = getStackGuard(IRB, F);
836 StackGuardSlot = IRB.CreateAlloca(StackPtrTy, nullptr);
837 IRB.CreateStore(StackGuard, StackGuardSlot);
838
839 for (Instruction *RI : Returns) {
840 IRBuilder<> IRBRet(RI);
841 checkStackGuard(IRBRet, F, *RI, StackGuardSlot, StackGuard);
842 }
843 }
844
845 // The top of the unsafe stack after all unsafe static allocas are
846 // allocated.
847 Value *StaticTop = moveStaticAllocasToUnsafeStack(
848 IRB, F, StaticAllocas, ByValArguments, BasePointer, StackGuardSlot);
849
850 // Safe stack object that stores the current unsafe stack top. It is updated
851 // as unsafe dynamic (non-constant-sized) allocas are allocated and freed.
852 // This is only needed if we need to restore stack pointer after longjmp
853 // or exceptions, and we have dynamic allocations.
854 // FIXME: a better alternative might be to store the unsafe stack pointer
855 // before setjmp / invoke instructions.
856 AllocaInst *DynamicTop = createStackRestorePoints(
857 IRB, F, StackRestorePoints, StaticTop, !DynamicAllocas.empty());
858
859 // Handle dynamic allocas.
860 moveDynamicAllocasToUnsafeStack(F, UnsafeStackPtr, DynamicTop,
861 DynamicAllocas);
862
863 // Restore the unsafe stack pointer before each return.
864 for (Instruction *RI : Returns) {
865 IRB.SetInsertPoint(RI);
866 IRB.CreateStore(BasePointer, UnsafeStackPtr);
867 }
868
869 TryInlinePointerAddress();
870
871 LLVM_DEBUG(dbgs() << "[SafeStack] safestack applied\n");
872 return true;
873}
874
875class SafeStackLegacyPass : public FunctionPass {
876 const TargetMachine *TM = nullptr;
877
878public:
879 static char ID; // Pass identification, replacement for typeid..
880
881 SafeStackLegacyPass() : FunctionPass(ID) {
883 }
884
885 void getAnalysisUsage(AnalysisUsage &AU) const override {
886 AU.addRequired<TargetPassConfig>();
887 AU.addRequired<TargetLibraryInfoWrapperPass>();
888 AU.addRequired<AssumptionCacheTracker>();
889 AU.addPreserved<DominatorTreeWrapperPass>();
890 }
891
892 bool runOnFunction(Function &F) override {
893 LLVM_DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
894
895 if (!F.hasFnAttribute(Attribute::SafeStack)) {
896 LLVM_DEBUG(dbgs() << "[SafeStack] safestack is not requested"
897 " for this function\n");
898 return false;
899 }
900
901 if (F.isDeclaration()) {
902 LLVM_DEBUG(dbgs() << "[SafeStack] function definition"
903 " is not available\n");
904 return false;
905 }
906
907 TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
908 auto *TL = TM->getSubtargetImpl(F)->getTargetLowering();
909 if (!TL)
910 report_fatal_error("TargetLowering instance is required");
911
912 auto *DL = &F.getDataLayout();
913 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
914 auto &ACT = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
915
916 // Compute DT and LI only for functions that have the attribute.
917 // This is only useful because the legacy pass manager doesn't let us
918 // compute analyzes lazily.
919
920 DominatorTree *DT;
921 bool ShouldPreserveDominatorTree;
922 std::optional<DominatorTree> LazilyComputedDomTree;
923
924 // Do we already have a DominatorTree available from the previous pass?
925 // Note that we should *NOT* require it, to avoid the case where we end up
926 // not needing it, but the legacy PM would have computed it for us anyways.
927 if (auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>()) {
928 DT = &DTWP->getDomTree();
929 ShouldPreserveDominatorTree = true;
930 } else {
931 // Otherwise, we need to compute it.
932 LazilyComputedDomTree.emplace(F);
933 DT = &*LazilyComputedDomTree;
934 ShouldPreserveDominatorTree = false;
935 }
936
937 // Likewise, lazily compute loop info.
938 LoopInfo LI(*DT);
939
940 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
941
942 ScalarEvolution SE(F, TLI, ACT, *DT, LI);
943
944 return SafeStack(F, *TL, *DL, ShouldPreserveDominatorTree ? &DTU : nullptr,
945 SE)
946 .run();
947 }
948};
949
950} // end anonymous namespace
951
954 LLVM_DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
955
956 if (!F.hasFnAttribute(Attribute::SafeStack)) {
957 LLVM_DEBUG(dbgs() << "[SafeStack] safestack is not requested"
958 " for this function\n");
959 return PreservedAnalyses::all();
960 }
961
962 if (F.isDeclaration()) {
963 LLVM_DEBUG(dbgs() << "[SafeStack] function definition"
964 " is not available\n");
965 return PreservedAnalyses::all();
966 }
967
968 auto *TL = TM->getSubtargetImpl(F)->getTargetLowering();
969 if (!TL)
970 report_fatal_error("TargetLowering instance is required");
971
972 auto &DL = F.getDataLayout();
973
974 // preserve DominatorTree
975 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
976 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
977 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
978
979 bool Changed = SafeStack(F, *TL, DL, &DTU, SE).run();
980
981 if (!Changed)
982 return PreservedAnalyses::all();
985 return PA;
986}
987
988char SafeStackLegacyPass::ID = 0;
989
991 "Safe Stack instrumentation pass", false, false)
994INITIALIZE_PASS_END(SafeStackLegacyPass, DEBUG_TYPE,
995 "Safe Stack instrumentation pass", false, false)
996
997FunctionPass *llvm::createSafeStackPass() { return new SafeStackLegacyPass(); }
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool runOnFunction(Function &F, bool PostInlining)
#define DEBUG_TYPE
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
This defines the Use class.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Machine Check Debug Module
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
static cl::opt< bool > SafeStackUsePointerAddress("safestack-use-pointer-address", cl::init(false), cl::Hidden)
Use __safestack_pointer_address even if the platform has a faster way of access safe stack pointer.
static cl::opt< bool > ClColoring("safe-stack-coloring", cl::desc("enable safe stack coloring"), cl::Hidden, cl::init(true))
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static Value * getStackGuard(const TargetLoweringBase *TLI, Module *M, IRBuilder<> &B, bool *SupportsSelectionDAGSP=nullptr)
Create a stack guard loading and populate whether SelectionDAG SSP is supported.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:167
#define LLVM_DEBUG(...)
Definition Debug.h:114
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
an instruction to allocate memory on the stack
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
static BranchProbability getBranchProbStackProtector(bool IsLikely)
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
bool isNoInline() const
Return true if the call should not be inlined.
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
This class represents a function call, abstracting a target machine's calling convention.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:322
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition IRBuilder.h:1830
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
Definition IRBuilder.h:687
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2251
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2199
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2036
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition IRBuilder.h:247
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2333
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1420
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2204
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1847
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1551
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1860
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2194
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2508
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition IRBuilder.h:605
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition IRBuilder.h:2277
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Type * getVoidTy()
Fetch the type representing void.
Definition IRBuilder.h:600
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1437
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2780
bool isSuccess() const
Definition InlineCost.h:190
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1522
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
virtual Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const
Returns the target-specific address of the unsafe stack pointer.
virtual Value * getIRStackGuard(IRBuilderBase &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
Target-Independent Code Generator Pass Configuration Options.
virtual const TargetLowering * getTargetLowering() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
use_iterator use_begin()
Definition Value.h:364
bool use_empty() const
Definition Value.h:346
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:359
Changed
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
This function inlines the called function into the basic block of the caller.
LLVM_ABI FunctionPass * createSafeStackPass()
This pass splits the stack into a safe stack and an unsafe stack to protect against stack-based overf...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:626
LLVM_ABI InlineResult isInlineViable(Function &Callee)
Check if it is mechanically possible to inline the function Callee, based on the contents of the func...
LLVM_ABI void initializeSafeStackLegacyPassPass(PassRegistry &)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:189
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:155
DWARFExpression::Operation Op
LLVM_ABI void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, int Offset=0)
Replaces multiple dbg.value records when the alloca it describes is replaced with a new value.
Definition Local.cpp:1982
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
Definition Local.cpp:1942
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:85
static constexpr Align Constant()
Allow constructions of constexpr Align.
Definition Alignment.h:96