clang 22.0.0git
CGBuiltin.cpp
Go to the documentation of this file.
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBuiltin.h"
14#include "ABIInfo.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGDebugInfo.h"
18#include "CGObjCRuntime.h"
19#include "CGOpenCLRuntime.h"
20#include "CGRecordLayout.h"
21#include "CGValue.h"
22#include "CodeGenFunction.h"
23#include "CodeGenModule.h"
24#include "ConstantEmitter.h"
25#include "PatternInit.h"
26#include "TargetInfo.h"
27#include "clang/AST/OSLog.h"
31#include "llvm/IR/InlineAsm.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Intrinsics.h"
34#include "llvm/IR/IntrinsicsX86.h"
35#include "llvm/IR/MatrixBuilder.h"
36#include "llvm/Support/ConvertUTF.h"
37#include "llvm/Support/ScopedPrinter.h"
38#include <optional>
39#include <utility>
40
41using namespace clang;
42using namespace CodeGen;
43using namespace llvm;
44
45/// Some builtins do not have library implementation on some targets and
46/// are instead emitted as LLVM IRs by some target builtin emitters.
47/// FIXME: Remove this when library support is added
48static bool shouldEmitBuiltinAsIR(unsigned BuiltinID,
49 const Builtin::Context &BI,
50 const CodeGenFunction &CGF) {
51 if (!CGF.CGM.getLangOpts().MathErrno &&
55 switch (BuiltinID) {
56 default:
57 return false;
58 case Builtin::BIlogbf:
59 case Builtin::BI__builtin_logbf:
60 case Builtin::BIlogb:
61 case Builtin::BI__builtin_logb:
62 case Builtin::BIscalbnf:
63 case Builtin::BI__builtin_scalbnf:
64 case Builtin::BIscalbn:
65 case Builtin::BI__builtin_scalbn:
66 return true;
67 }
68 }
69 return false;
70}
71
73 unsigned BuiltinID, const CallExpr *E,
74 ReturnValueSlot ReturnValue,
75 llvm::Triple::ArchType Arch) {
76 // When compiling in HipStdPar mode we have to be conservative in rejecting
77 // target specific features in the FE, and defer the possible error to the
78 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
79 // referenced by an accelerator executable function, we emit an error.
80 // Returning nullptr here leads to the builtin being handled in
81 // EmitStdParUnsupportedBuiltin.
82 if (CGF->getLangOpts().HIPStdPar && CGF->getLangOpts().CUDAIsDevice &&
83 Arch != CGF->getTarget().getTriple().getArch())
84 return nullptr;
85
86 switch (Arch) {
87 case llvm::Triple::arm:
88 case llvm::Triple::armeb:
89 case llvm::Triple::thumb:
90 case llvm::Triple::thumbeb:
91 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
92 case llvm::Triple::aarch64:
93 case llvm::Triple::aarch64_32:
94 case llvm::Triple::aarch64_be:
95 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
96 case llvm::Triple::bpfeb:
97 case llvm::Triple::bpfel:
98 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
99 case llvm::Triple::dxil:
100 return CGF->EmitDirectXBuiltinExpr(BuiltinID, E);
101 case llvm::Triple::x86:
102 case llvm::Triple::x86_64:
103 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
104 case llvm::Triple::ppc:
105 case llvm::Triple::ppcle:
106 case llvm::Triple::ppc64:
107 case llvm::Triple::ppc64le:
108 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
109 case llvm::Triple::r600:
110 case llvm::Triple::amdgcn:
111 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
112 case llvm::Triple::systemz:
113 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
114 case llvm::Triple::nvptx:
115 case llvm::Triple::nvptx64:
116 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
117 case llvm::Triple::wasm32:
118 case llvm::Triple::wasm64:
119 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
120 case llvm::Triple::hexagon:
121 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
122 case llvm::Triple::riscv32:
123 case llvm::Triple::riscv64:
124 return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
125 case llvm::Triple::spirv32:
126 case llvm::Triple::spirv64:
127 if (CGF->getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
128 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
129 [[fallthrough]];
130 case llvm::Triple::spirv:
131 return CGF->EmitSPIRVBuiltinExpr(BuiltinID, E);
132 default:
133 return nullptr;
134 }
135}
136
138 const CallExpr *E,
140 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
141 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
143 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
144 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
145 }
146
147 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
148 getTarget().getTriple().getArch());
149}
150
151static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
152 Align AlignmentInBytes) {
153 ConstantInt *Byte;
154 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
156 // Nothing to initialize.
157 return;
159 Byte = CGF.Builder.getInt8(0x00);
160 break;
162 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
163 Byte = llvm::dyn_cast<llvm::ConstantInt>(
164 initializationPatternFor(CGF.CGM, Int8));
165 break;
166 }
167 }
168 if (CGF.CGM.stopAutoInit())
169 return;
170 auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
171 I->addAnnotationMetadata("auto-init");
172}
173
174/// getBuiltinLibFunction - Given a builtin id for a function like
175/// "__builtin_fabsf", return a Function* for "fabsf".
177 unsigned BuiltinID) {
178 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
179
180 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
181 // to build this up so provide a small stack buffer to handle the vast
182 // majority of names.
184 GlobalDecl D(FD);
185
186 // TODO: This list should be expanded or refactored after all GCC-compatible
187 // std libcall builtins are implemented.
188 static SmallDenseMap<unsigned, StringRef, 64> F128Builtins{
189 {Builtin::BI__builtin___fprintf_chk, "__fprintf_chkieee128"},
190 {Builtin::BI__builtin___printf_chk, "__printf_chkieee128"},
191 {Builtin::BI__builtin___snprintf_chk, "__snprintf_chkieee128"},
192 {Builtin::BI__builtin___sprintf_chk, "__sprintf_chkieee128"},
193 {Builtin::BI__builtin___vfprintf_chk, "__vfprintf_chkieee128"},
194 {Builtin::BI__builtin___vprintf_chk, "__vprintf_chkieee128"},
195 {Builtin::BI__builtin___vsnprintf_chk, "__vsnprintf_chkieee128"},
196 {Builtin::BI__builtin___vsprintf_chk, "__vsprintf_chkieee128"},
197 {Builtin::BI__builtin_fprintf, "__fprintfieee128"},
198 {Builtin::BI__builtin_printf, "__printfieee128"},
199 {Builtin::BI__builtin_snprintf, "__snprintfieee128"},
200 {Builtin::BI__builtin_sprintf, "__sprintfieee128"},
201 {Builtin::BI__builtin_vfprintf, "__vfprintfieee128"},
202 {Builtin::BI__builtin_vprintf, "__vprintfieee128"},
203 {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"},
204 {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"},
205 {Builtin::BI__builtin_fscanf, "__fscanfieee128"},
206 {Builtin::BI__builtin_scanf, "__scanfieee128"},
207 {Builtin::BI__builtin_sscanf, "__sscanfieee128"},
208 {Builtin::BI__builtin_vfscanf, "__vfscanfieee128"},
209 {Builtin::BI__builtin_vscanf, "__vscanfieee128"},
210 {Builtin::BI__builtin_vsscanf, "__vsscanfieee128"},
211 {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"},
212 };
213
214 // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit
215 // IBM 'long double' (i.e. __ibm128). Map to the 'double' versions
216 // if it is 64-bit 'long double' mode.
217 static SmallDenseMap<unsigned, StringRef, 4> AIXLongDouble64Builtins{
218 {Builtin::BI__builtin_frexpl, "frexp"},
219 {Builtin::BI__builtin_ldexpl, "ldexp"},
220 {Builtin::BI__builtin_modfl, "modf"},
221 };
222
223 // If the builtin has been declared explicitly with an assembler label,
224 // use the mangled name. This differs from the plain label on platforms
225 // that prefix labels.
226 if (FD->hasAttr<AsmLabelAttr>())
227 Name = getMangledName(D);
228 else {
229 // TODO: This mutation should also be applied to other targets other than
230 // PPC, after backend supports IEEE 128-bit style libcalls.
231 if (getTriple().isPPC64() &&
232 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() &&
233 F128Builtins.contains(BuiltinID))
234 Name = F128Builtins[BuiltinID];
235 else if (getTriple().isOSAIX() &&
236 &getTarget().getLongDoubleFormat() ==
237 &llvm::APFloat::IEEEdouble() &&
238 AIXLongDouble64Builtins.contains(BuiltinID))
239 Name = AIXLongDouble64Builtins[BuiltinID];
240 else
241 Name = Context.BuiltinInfo.getName(BuiltinID).substr(10);
242 }
243
244 llvm::FunctionType *Ty =
245 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
246
247 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
248}
249
250/// Emit the conversions required to turn the given value into an
251/// integer of the given size.
252Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
253 QualType T, llvm::IntegerType *IntType) {
254 V = CGF.EmitToMemory(V, T);
255
256 if (V->getType()->isPointerTy())
257 return CGF.Builder.CreatePtrToInt(V, IntType);
258
259 assert(V->getType() == IntType);
260 return V;
261}
262
263Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
264 QualType T, llvm::Type *ResultType) {
265 V = CGF.EmitFromMemory(V, T);
266
267 if (ResultType->isPointerTy())
268 return CGF.Builder.CreateIntToPtr(V, ResultType);
269
270 assert(V->getType() == ResultType);
271 return V;
272}
273
275 ASTContext &Ctx = CGF.getContext();
276 Address Ptr = CGF.EmitPointerWithAlignment(E->getArg(0));
277 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
278 unsigned Bytes = Ptr.getElementType()->isPointerTy()
280 : DL.getTypeStoreSize(Ptr.getElementType());
281 unsigned Align = Ptr.getAlignment().getQuantity();
282 if (Align % Bytes != 0) {
283 DiagnosticsEngine &Diags = CGF.CGM.getDiags();
284 Diags.Report(E->getBeginLoc(), diag::warn_sync_op_misaligned);
285 // Force address to be at least naturally-aligned.
286 return Ptr.withAlignment(CharUnits::fromQuantity(Bytes));
287 }
288 return Ptr;
289}
290
291/// Utility to insert an atomic instruction based on Intrinsic::ID
292/// and the expression node.
294 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
295 AtomicOrdering Ordering) {
296
297 QualType T = E->getType();
298 assert(E->getArg(0)->getType()->isPointerType());
300 E->getArg(0)->getType()->getPointeeType()));
301 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
302
303 Address DestAddr = CheckAtomicAlignment(CGF, E);
304
305 llvm::IntegerType *IntType = llvm::IntegerType::get(
306 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
307
308 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
309 llvm::Type *ValueType = Val->getType();
310 Val = EmitToInt(CGF, Val, T, IntType);
311
312 llvm::Value *Result =
313 CGF.Builder.CreateAtomicRMW(Kind, DestAddr, Val, Ordering);
314 return EmitFromInt(CGF, Result, T, ValueType);
315}
316
318 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
320
321 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
322 LValue LV = CGF.MakeAddrLValue(Addr, E->getArg(0)->getType());
323 LV.setNontemporal(true);
324 CGF.EmitStoreOfScalar(Val, LV, false);
325 return nullptr;
326}
327
330
331 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
332 LV.setNontemporal(true);
333 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
334}
335
337 llvm::AtomicRMWInst::BinOp Kind,
338 const CallExpr *E) {
339 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
340}
341
342/// Utility to insert an atomic instruction based Intrinsic::ID and
343/// the expression node, where the return value is the result of the
344/// operation.
346 llvm::AtomicRMWInst::BinOp Kind,
347 const CallExpr *E,
348 Instruction::BinaryOps Op,
349 bool Invert = false) {
350 QualType T = E->getType();
351 assert(E->getArg(0)->getType()->isPointerType());
353 E->getArg(0)->getType()->getPointeeType()));
354 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
355
356 Address DestAddr = CheckAtomicAlignment(CGF, E);
357
358 llvm::IntegerType *IntType = llvm::IntegerType::get(
359 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
360
361 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
362 llvm::Type *ValueType = Val->getType();
363 Val = EmitToInt(CGF, Val, T, IntType);
364
365 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
366 Kind, DestAddr, Val, llvm::AtomicOrdering::SequentiallyConsistent);
367 Result = CGF.Builder.CreateBinOp(Op, Result, Val);
368 if (Invert)
369 Result =
370 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
371 llvm::ConstantInt::getAllOnesValue(IntType));
372 Result = EmitFromInt(CGF, Result, T, ValueType);
373 return RValue::get(Result);
374}
375
376/// Utility to insert an atomic cmpxchg instruction.
377///
378/// @param CGF The current codegen function.
379/// @param E Builtin call expression to convert to cmpxchg.
380/// arg0 - address to operate on
381/// arg1 - value to compare with
382/// arg2 - new value
383/// @param ReturnBool Specifies whether to return success flag of
384/// cmpxchg result or the old value.
385///
386/// @returns result of cmpxchg, according to ReturnBool
387///
388/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
389/// invoke the function EmitAtomicCmpXchgForMSIntrin.
391 bool ReturnBool) {
392 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
393 Address DestAddr = CheckAtomicAlignment(CGF, E);
394
395 llvm::IntegerType *IntType = llvm::IntegerType::get(
396 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
397
398 Value *Cmp = CGF.EmitScalarExpr(E->getArg(1));
399 llvm::Type *ValueType = Cmp->getType();
400 Cmp = EmitToInt(CGF, Cmp, T, IntType);
401 Value *New = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
402
404 DestAddr, Cmp, New, llvm::AtomicOrdering::SequentiallyConsistent,
405 llvm::AtomicOrdering::SequentiallyConsistent);
406 if (ReturnBool)
407 // Extract boolean success flag and zext it to int.
408 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
409 CGF.ConvertType(E->getType()));
410 else
411 // Extract old value and emit it using the same type as compare value.
412 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
413 ValueType);
414}
415
416/// This function should be invoked to emit atomic cmpxchg for Microsoft's
417/// _InterlockedCompareExchange* intrinsics which have the following signature:
418/// T _InterlockedCompareExchange(T volatile *Destination,
419/// T Exchange,
420/// T Comparand);
421///
422/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
423/// cmpxchg *Destination, Comparand, Exchange.
424/// So we need to swap Comparand and Exchange when invoking
425/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
426/// function MakeAtomicCmpXchgValue since it expects the arguments to be
427/// already swapped.
428
429static
431 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
432 assert(E->getArg(0)->getType()->isPointerType());
434 E->getType(), E->getArg(0)->getType()->getPointeeType()));
435 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
436 E->getArg(1)->getType()));
437 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
438 E->getArg(2)->getType()));
439
440 Address DestAddr = CheckAtomicAlignment(CGF, E);
441
442 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
443 auto *RTy = Exchange->getType();
444
445 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
446
447 if (RTy->isPointerTy()) {
448 Exchange = CGF.Builder.CreatePtrToInt(Exchange, CGF.IntPtrTy);
449 Comparand = CGF.Builder.CreatePtrToInt(Comparand, CGF.IntPtrTy);
450 }
451
452 // For Release ordering, the failure ordering should be Monotonic.
453 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
454 AtomicOrdering::Monotonic :
455 SuccessOrdering;
456
457 // The atomic instruction is marked volatile for consistency with MSVC. This
458 // blocks the few atomics optimizations that LLVM has. If we want to optimize
459 // _Interlocked* operations in the future, we will have to remove the volatile
460 // marker.
461 auto *CmpXchg = CGF.Builder.CreateAtomicCmpXchg(
462 DestAddr, Comparand, Exchange, SuccessOrdering, FailureOrdering);
463 CmpXchg->setVolatile(true);
464
465 auto *Result = CGF.Builder.CreateExtractValue(CmpXchg, 0);
466 if (RTy->isPointerTy()) {
467 Result = CGF.Builder.CreateIntToPtr(Result, RTy);
468 }
469
470 return Result;
471}
472
473// 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
474// prototyped like this:
475//
476// unsigned char _InterlockedCompareExchange128...(
477// __int64 volatile * _Destination,
478// __int64 _ExchangeHigh,
479// __int64 _ExchangeLow,
480// __int64 * _ComparandResult);
481//
482// Note that Destination is assumed to be at least 16-byte aligned, despite
483// being typed int64.
484
486 const CallExpr *E,
487 AtomicOrdering SuccessOrdering) {
488 assert(E->getNumArgs() == 4);
489 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
490 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
491 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
492 Address ComparandAddr = CGF.EmitPointerWithAlignment(E->getArg(3));
493
494 assert(DestPtr->getType()->isPointerTy());
495 assert(!ExchangeHigh->getType()->isPointerTy());
496 assert(!ExchangeLow->getType()->isPointerTy());
497
498 // For Release ordering, the failure ordering should be Monotonic.
499 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
500 ? AtomicOrdering::Monotonic
501 : SuccessOrdering;
502
503 // Convert to i128 pointers and values. Alignment is also overridden for
504 // destination pointer.
505 llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
506 Address DestAddr(DestPtr, Int128Ty,
508 ComparandAddr = ComparandAddr.withElementType(Int128Ty);
509
510 // (((i128)hi) << 64) | ((i128)lo)
511 ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
512 ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
513 ExchangeHigh =
514 CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
515 llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
516
517 // Load the comparand for the instruction.
518 llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandAddr);
519
520 auto *CXI = CGF.Builder.CreateAtomicCmpXchg(DestAddr, Comparand, Exchange,
521 SuccessOrdering, FailureOrdering);
522
523 // The atomic instruction is marked volatile for consistency with MSVC. This
524 // blocks the few atomics optimizations that LLVM has. If we want to optimize
525 // _Interlocked* operations in the future, we will have to remove the volatile
526 // marker.
527 CXI->setVolatile(true);
528
529 // Store the result as an outparameter.
530 CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
531 ComparandAddr);
532
533 // Get the success boolean and zero extend it to i8.
534 Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
535 return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
536}
537
539 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
540 assert(E->getArg(0)->getType()->isPointerType());
541
542 auto *IntTy = CGF.ConvertType(E->getType());
543 Address DestAddr = CheckAtomicAlignment(CGF, E);
544 auto *Result = CGF.Builder.CreateAtomicRMW(
545 AtomicRMWInst::Add, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
546 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
547}
548
550 CodeGenFunction &CGF, const CallExpr *E,
551 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
552 assert(E->getArg(0)->getType()->isPointerType());
553
554 auto *IntTy = CGF.ConvertType(E->getType());
555 Address DestAddr = CheckAtomicAlignment(CGF, E);
556 auto *Result = CGF.Builder.CreateAtomicRMW(
557 AtomicRMWInst::Sub, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
558 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
559}
560
561// Build a plain volatile load.
563 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
564 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
565 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
566 llvm::Type *ITy =
567 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
568 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
569 Load->setVolatile(true);
570 return Load;
571}
572
573// Build a plain volatile store.
575 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
576 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
577 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
578 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
579 llvm::StoreInst *Store =
580 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
581 Store->setVolatile(true);
582 return Store;
583}
584
585// Emit a simple mangled intrinsic that has 1 argument and a return type
586// matching the argument type. Depending on mode, this may be a constrained
587// floating-point intrinsic.
589 const CallExpr *E, unsigned IntrinsicID,
590 unsigned ConstrainedIntrinsicID) {
591 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
592
593 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
594 if (CGF.Builder.getIsFPConstrained()) {
595 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
596 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
597 } else {
598 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
599 return CGF.Builder.CreateCall(F, Src0);
600 }
601}
602
603// Emit an intrinsic that has 2 operands of the same type as its result.
604// Depending on mode, this may be a constrained floating-point intrinsic.
606 const CallExpr *E, unsigned IntrinsicID,
607 unsigned ConstrainedIntrinsicID) {
608 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
609 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
610
611 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
612 if (CGF.Builder.getIsFPConstrained()) {
613 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
614 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
615 } else {
616 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
617 return CGF.Builder.CreateCall(F, { Src0, Src1 });
618 }
619}
620
621// Has second type mangled argument.
622static Value *
624 Intrinsic::ID IntrinsicID,
625 Intrinsic::ID ConstrainedIntrinsicID) {
626 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
627 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
628
629 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
630 if (CGF.Builder.getIsFPConstrained()) {
631 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
632 {Src0->getType(), Src1->getType()});
633 return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
634 }
635
636 Function *F =
637 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), Src1->getType()});
638 return CGF.Builder.CreateCall(F, {Src0, Src1});
639}
640
641// Emit an intrinsic that has 3 operands of the same type as its result.
642// Depending on mode, this may be a constrained floating-point intrinsic.
644 const CallExpr *E, unsigned IntrinsicID,
645 unsigned ConstrainedIntrinsicID) {
646 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
647 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
648 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
649
650 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
651 if (CGF.Builder.getIsFPConstrained()) {
652 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
653 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
654 } else {
655 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
656 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
657 }
658}
659
660// Emit an intrinsic that has overloaded integer result and fp operand.
661static Value *
663 unsigned IntrinsicID,
664 unsigned ConstrainedIntrinsicID) {
665 llvm::Type *ResultType = CGF.ConvertType(E->getType());
666 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
667
668 if (CGF.Builder.getIsFPConstrained()) {
669 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
670 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
671 {ResultType, Src0->getType()});
672 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
673 } else {
674 Function *F =
675 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
676 return CGF.Builder.CreateCall(F, Src0);
677 }
678}
679
681 Intrinsic::ID IntrinsicID) {
682 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
683 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
684
685 QualType IntPtrTy = E->getArg(1)->getType()->getPointeeType();
686 llvm::Type *IntTy = CGF.ConvertType(IntPtrTy);
687 llvm::Function *F =
688 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), IntTy});
689 llvm::Value *Call = CGF.Builder.CreateCall(F, Src0);
690
691 llvm::Value *Exp = CGF.Builder.CreateExtractValue(Call, 1);
692 LValue LV = CGF.MakeNaturalAlignAddrLValue(Src1, IntPtrTy);
693 CGF.EmitStoreOfScalar(Exp, LV);
694
695 return CGF.Builder.CreateExtractValue(Call, 0);
696}
697
698static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E,
699 Intrinsic::ID IntrinsicID) {
700 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
701 llvm::Value *Dest0 = CGF.EmitScalarExpr(E->getArg(1));
702 llvm::Value *Dest1 = CGF.EmitScalarExpr(E->getArg(2));
703
704 llvm::Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {Val->getType()});
705 llvm::Value *Call = CGF.Builder.CreateCall(F, Val);
706
707 llvm::Value *SinResult = CGF.Builder.CreateExtractValue(Call, 0);
708 llvm::Value *CosResult = CGF.Builder.CreateExtractValue(Call, 1);
709
710 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
711 LValue SinLV = CGF.MakeNaturalAlignAddrLValue(Dest0, DestPtrType);
712 LValue CosLV = CGF.MakeNaturalAlignAddrLValue(Dest1, DestPtrType);
713
714 llvm::StoreInst *StoreSin =
715 CGF.Builder.CreateStore(SinResult, SinLV.getAddress());
716 llvm::StoreInst *StoreCos =
717 CGF.Builder.CreateStore(CosResult, CosLV.getAddress());
718
719 // Mark the two stores as non-aliasing with each other. The order of stores
720 // emitted by this builtin is arbitrary, enforcing a particular order will
721 // prevent optimizations later on.
722 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
723 MDNode *Domain = MDHelper.createAnonymousAliasScopeDomain();
724 MDNode *AliasScope = MDHelper.createAnonymousAliasScope(Domain);
725 MDNode *AliasScopeList = MDNode::get(Call->getContext(), AliasScope);
726 StoreSin->setMetadata(LLVMContext::MD_alias_scope, AliasScopeList);
727 StoreCos->setMetadata(LLVMContext::MD_noalias, AliasScopeList);
728}
729
730static llvm::Value *emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E,
731 Intrinsic::ID IntrinsicID) {
732 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
733 llvm::Value *IntPartDest = CGF.EmitScalarExpr(E->getArg(1));
734
735 llvm::Value *Call =
736 CGF.Builder.CreateIntrinsic(IntrinsicID, {Val->getType()}, Val);
737
738 llvm::Value *FractionalResult = CGF.Builder.CreateExtractValue(Call, 0);
739 llvm::Value *IntegralResult = CGF.Builder.CreateExtractValue(Call, 1);
740
741 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
742 LValue IntegralLV = CGF.MakeNaturalAlignAddrLValue(IntPartDest, DestPtrType);
743 CGF.EmitStoreOfScalar(IntegralResult, IntegralLV);
744
745 return FractionalResult;
746}
747
748/// EmitFAbs - Emit a call to @llvm.fabs().
750 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
751 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
752 Call->setDoesNotAccessMemory();
753 return Call;
754}
755
756/// Emit the computation of the sign bit for a floating point value. Returns
757/// the i1 sign bit value.
759 LLVMContext &C = CGF.CGM.getLLVMContext();
760
761 llvm::Type *Ty = V->getType();
762 int Width = Ty->getPrimitiveSizeInBits();
763 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
764 V = CGF.Builder.CreateBitCast(V, IntTy);
765 if (Ty->isPPC_FP128Ty()) {
766 // We want the sign bit of the higher-order double. The bitcast we just
767 // did works as if the double-double was stored to memory and then
768 // read as an i128. The "store" will put the higher-order double in the
769 // lower address in both little- and big-Endian modes, but the "load"
770 // will treat those bits as a different part of the i128: the low bits in
771 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
772 // we need to shift the high bits down to the low before truncating.
773 Width >>= 1;
774 if (CGF.getTarget().isBigEndian()) {
775 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
776 V = CGF.Builder.CreateLShr(V, ShiftCst);
777 }
778 // We are truncating value in order to extract the higher-order
779 // double, which we will be using to extract the sign from.
780 IntTy = llvm::IntegerType::get(C, Width);
781 V = CGF.Builder.CreateTrunc(V, IntTy);
782 }
783 Value *Zero = llvm::Constant::getNullValue(IntTy);
784 return CGF.Builder.CreateICmpSLT(V, Zero);
785}
786
787/// Checks no arguments or results are passed indirectly in the ABI (i.e. via a
788/// hidden pointer). This is used to check annotating FP libcalls (that could
789/// set `errno`) with "int" TBAA metadata is safe. If any floating-point
790/// arguments are passed indirectly, setup for the call could be incorrectly
791/// optimized out.
793 auto IsIndirect = [&](ABIArgInfo const &info) {
794 return info.isIndirect() || info.isIndirectAliased() || info.isInAlloca();
795 };
796 return !IsIndirect(FnInfo.getReturnInfo()) &&
797 llvm::none_of(FnInfo.arguments(),
798 [&](CGFunctionInfoArgInfo const &ArgInfo) {
799 return IsIndirect(ArgInfo.info);
800 });
801}
802
804 const CallExpr *E, llvm::Constant *calleeValue) {
805 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
806 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
807 llvm::CallBase *callOrInvoke = nullptr;
808 CGFunctionInfo const *FnInfo = nullptr;
809 RValue Call =
810 CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot(),
811 /*Chain=*/nullptr, &callOrInvoke, &FnInfo);
812
813 if (unsigned BuiltinID = FD->getBuiltinID()) {
814 // Check whether a FP math builtin function, such as BI__builtin_expf
815 ASTContext &Context = CGF.getContext();
816 bool ConstWithoutErrnoAndExceptions =
818 // Restrict to target with errno, for example, MacOS doesn't set errno.
819 // TODO: Support builtin function with complex type returned, eg: cacosh
820 if (ConstWithoutErrnoAndExceptions && CGF.CGM.getLangOpts().MathErrno &&
821 !CGF.Builder.getIsFPConstrained() && Call.isScalar() &&
823 // Emit "int" TBAA metadata on FP math libcalls.
824 clang::QualType IntTy = Context.IntTy;
825 TBAAAccessInfo TBAAInfo = CGF.CGM.getTBAAAccessInfo(IntTy);
826 CGF.CGM.DecorateInstructionWithTBAA(callOrInvoke, TBAAInfo);
827 }
828 }
829 return Call;
830}
831
832/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
833/// depending on IntrinsicID.
834///
835/// \arg CGF The current codegen function.
836/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
837/// \arg X The first argument to the llvm.*.with.overflow.*.
838/// \arg Y The second argument to the llvm.*.with.overflow.*.
839/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
840/// \returns The result (i.e. sum/product) returned by the intrinsic.
842 const Intrinsic::ID IntrinsicID,
843 llvm::Value *X, llvm::Value *Y,
844 llvm::Value *&Carry) {
845 // Make sure we have integers of the same width.
846 assert(X->getType() == Y->getType() &&
847 "Arguments must be the same type. (Did you forget to make sure both "
848 "arguments have the same integer width?)");
849
850 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
851 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
852 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
853 return CGF.Builder.CreateExtractValue(Tmp, 0);
854}
855
856namespace {
857 struct WidthAndSignedness {
858 unsigned Width;
859 bool Signed;
860 };
861}
862
863static WidthAndSignedness
865 const clang::QualType Type) {
866 assert(Type->isIntegerType() && "Given type is not an integer.");
867 unsigned Width = context.getIntWidth(Type);
869 return {Width, Signed};
870}
871
872// Given one or more integer types, this function produces an integer type that
873// encompasses them: any value in one of the given types could be expressed in
874// the encompassing type.
875static struct WidthAndSignedness
876EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
877 assert(Types.size() > 0 && "Empty list of types.");
878
879 // If any of the given types is signed, we must return a signed type.
880 bool Signed = false;
881 for (const auto &Type : Types) {
882 Signed |= Type.Signed;
883 }
884
885 // The encompassing type must have a width greater than or equal to the width
886 // of the specified types. Additionally, if the encompassing type is signed,
887 // its width must be strictly greater than the width of any unsigned types
888 // given.
889 unsigned Width = 0;
890 for (const auto &Type : Types) {
891 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
892 if (Width < MinWidth) {
893 Width = MinWidth;
894 }
895 }
896
897 return {Width, Signed};
898}
899
900Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
901 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
902 return Builder.CreateCall(CGM.getIntrinsic(inst, {ArgValue->getType()}),
903 ArgValue);
904}
905
906/// Checks if using the result of __builtin_object_size(p, @p From) in place of
907/// __builtin_object_size(p, @p To) is correct
908static bool areBOSTypesCompatible(int From, int To) {
909 // Note: Our __builtin_object_size implementation currently treats Type=0 and
910 // Type=2 identically. Encoding this implementation detail here may make
911 // improving __builtin_object_size difficult in the future, so it's omitted.
912 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
913}
914
915static llvm::Value *
916getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
917 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
918}
919
920llvm::Value *
921CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
922 llvm::IntegerType *ResType,
923 llvm::Value *EmittedE,
924 bool IsDynamic) {
925 uint64_t ObjectSize;
926 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
927 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
928 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
929}
930
931namespace {
932
933/// StructFieldAccess is a simple visitor class to grab the first MemberExpr
934/// from an Expr. It records any ArraySubscriptExpr we meet along the way.
935class StructFieldAccess
936 : public ConstStmtVisitor<StructFieldAccess, const Expr *> {
937 bool AddrOfSeen = false;
938
939public:
940 const Expr *ArrayIndex = nullptr;
941 QualType ArrayElementTy;
942
943 const Expr *VisitMemberExpr(const MemberExpr *E) {
944 if (AddrOfSeen && E->getType()->isArrayType())
945 // Avoid forms like '&ptr->array'.
946 return nullptr;
947 return E;
948 }
949
950 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
951 if (ArrayIndex)
952 // We don't support multiple subscripts.
953 return nullptr;
954
955 AddrOfSeen = false; // '&ptr->array[idx]' is okay.
956 ArrayIndex = E->getIdx();
957 ArrayElementTy = E->getBase()->getType();
958 return Visit(E->getBase());
959 }
960 const Expr *VisitCastExpr(const CastExpr *E) {
961 if (E->getCastKind() == CK_LValueToRValue)
962 return E;
963 return Visit(E->getSubExpr());
964 }
965 const Expr *VisitParenExpr(const ParenExpr *E) {
966 return Visit(E->getSubExpr());
967 }
968 const Expr *VisitUnaryAddrOf(const clang::UnaryOperator *E) {
969 AddrOfSeen = true;
970 return Visit(E->getSubExpr());
971 }
972 const Expr *VisitUnaryDeref(const clang::UnaryOperator *E) {
973 AddrOfSeen = false;
974 return Visit(E->getSubExpr());
975 }
976 const Expr *VisitBinaryOperator(const clang::BinaryOperator *Op) {
977 return Op->isCommaOp() ? Visit(Op->getRHS()) : nullptr;
978 }
979};
980
981} // end anonymous namespace
982
983/// Find a struct's flexible array member. It may be embedded inside multiple
984/// sub-structs, but must still be the last field.
986 ASTContext &Ctx,
987 const RecordDecl *RD) {
988 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
989 CGF.getLangOpts().getStrictFlexArraysLevel();
990
991 if (RD->isImplicit())
992 return nullptr;
993
994 for (const FieldDecl *FD : RD->fields()) {
996 Ctx, FD, FD->getType(), StrictFlexArraysLevel,
997 /*IgnoreTemplateOrMacroSubstitution=*/true))
998 return FD;
999
1000 if (const auto *RD = FD->getType()->getAsRecordDecl())
1001 if (const FieldDecl *FD = FindFlexibleArrayMemberField(CGF, Ctx, RD))
1002 return FD;
1003 }
1004
1005 return nullptr;
1006}
1007
1008/// Calculate the offset of a struct field. It may be embedded inside multiple
1009/// sub-structs.
1010static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD,
1011 const FieldDecl *FD, int64_t &Offset) {
1012 if (RD->isImplicit())
1013 return false;
1014
1015 // Keep track of the field number ourselves, because the other methods
1016 // (CGRecordLayout::getLLVMFieldNo) aren't always equivalent to how the AST
1017 // is laid out.
1018 uint32_t FieldNo = 0;
1019 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
1020
1021 for (const FieldDecl *Field : RD->fields()) {
1022 if (Field == FD) {
1023 Offset += Layout.getFieldOffset(FieldNo);
1024 return true;
1025 }
1026
1027 if (const auto *RD = Field->getType()->getAsRecordDecl()) {
1028 if (GetFieldOffset(Ctx, RD, FD, Offset)) {
1029 Offset += Layout.getFieldOffset(FieldNo);
1030 return true;
1031 }
1032 }
1033
1034 if (!RD->isUnion())
1035 ++FieldNo;
1036 }
1037
1038 return false;
1039}
1040
1041static std::optional<int64_t>
1042GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD) {
1043 int64_t Offset = 0;
1044
1045 if (GetFieldOffset(Ctx, RD, FD, Offset))
1046 return std::optional<int64_t>(Offset);
1047
1048 return std::nullopt;
1049}
1050
1051llvm::Value *CodeGenFunction::emitCountedBySize(const Expr *E,
1052 llvm::Value *EmittedE,
1053 unsigned Type,
1054 llvm::IntegerType *ResType) {
1055 // Note: If the whole struct is specificed in the __bdos (i.e. Visitor
1056 // returns a DeclRefExpr). The calculation of the whole size of the structure
1057 // with a flexible array member can be done in two ways:
1058 //
1059 // 1) sizeof(struct S) + count * sizeof(typeof(fam))
1060 // 2) offsetof(struct S, fam) + count * sizeof(typeof(fam))
1061 //
1062 // The first will add additional padding after the end of the array
1063 // allocation while the second method is more precise, but not quite expected
1064 // from programmers. See
1065 // https://lore.kernel.org/lkml/ZvV6X5FPBBW7CO1f@archlinux/ for a discussion
1066 // of the topic.
1067 //
1068 // GCC isn't (currently) able to calculate __bdos on a pointer to the whole
1069 // structure. Therefore, because of the above issue, we choose to match what
1070 // GCC does for consistency's sake.
1071
1072 StructFieldAccess Visitor;
1073 E = Visitor.Visit(E);
1074 if (!E)
1075 return nullptr;
1076
1077 const Expr *Idx = Visitor.ArrayIndex;
1078 if (Idx) {
1079 if (Idx->HasSideEffects(getContext()))
1080 // We can't have side-effects.
1081 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1082
1083 if (const auto *IL = dyn_cast<IntegerLiteral>(Idx)) {
1084 int64_t Val = IL->getValue().getSExtValue();
1085 if (Val < 0)
1086 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1087
1088 // The index is 0, so we don't need to take it into account.
1089 if (Val == 0)
1090 Idx = nullptr;
1091 }
1092 }
1093
1094 // __counted_by on either a flexible array member or a pointer into a struct
1095 // with a flexible array member.
1096 if (const auto *ME = dyn_cast<MemberExpr>(E))
1097 return emitCountedByMemberSize(ME, Idx, EmittedE, Visitor.ArrayElementTy,
1098 Type, ResType);
1099
1100 // __counted_by on a pointer in a struct.
1101 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E);
1102 ICE && ICE->getCastKind() == CK_LValueToRValue)
1103 return emitCountedByPointerSize(ICE, Idx, EmittedE, Visitor.ArrayElementTy,
1104 Type, ResType);
1105
1106 return nullptr;
1107}
1108
1110 llvm::Value *Res,
1111 llvm::Value *Index,
1112 llvm::IntegerType *ResType,
1113 bool IsSigned) {
1114 // cmp = (array_size >= 0)
1115 Value *Cmp = CGF.Builder.CreateIsNotNeg(Res);
1116 if (Index)
1117 // cmp = (cmp && index >= 0)
1118 Cmp = CGF.Builder.CreateAnd(CGF.Builder.CreateIsNotNeg(Index), Cmp);
1119
1120 // return cmp ? result : 0
1121 return CGF.Builder.CreateSelect(Cmp, Res,
1122 ConstantInt::get(ResType, 0, IsSigned));
1123}
1124
1125static std::pair<llvm::Value *, llvm::Value *>
1127 const FieldDecl *ArrayFD, const FieldDecl *CountFD,
1128 const Expr *Idx, llvm::IntegerType *ResType,
1129 bool IsSigned) {
1130 // count = ptr->count;
1131 Value *Count = CGF.EmitLoadOfCountedByField(ME, ArrayFD, CountFD);
1132 if (!Count)
1133 return std::make_pair<Value *>(nullptr, nullptr);
1134 Count = CGF.Builder.CreateIntCast(Count, ResType, IsSigned, "count");
1135
1136 // index = ptr->index;
1137 Value *Index = nullptr;
1138 if (Idx) {
1139 bool IdxSigned = Idx->getType()->isSignedIntegerType();
1140 Index = CGF.EmitScalarExpr(Idx);
1141 Index = CGF.Builder.CreateIntCast(Index, ResType, IdxSigned, "index");
1142 }
1143
1144 return std::make_pair(Count, Index);
1145}
1146
1147llvm::Value *CodeGenFunction::emitCountedByPointerSize(
1148 const ImplicitCastExpr *E, const Expr *Idx, llvm::Value *EmittedE,
1149 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1150 assert(E->getCastKind() == CK_LValueToRValue &&
1151 "must be an LValue to RValue cast");
1152
1153 const MemberExpr *ME =
1154 dyn_cast<MemberExpr>(E->getSubExpr()->IgnoreParenNoopCasts(getContext()));
1155 if (!ME)
1156 return nullptr;
1157
1158 const auto *ArrayBaseFD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1159 if (!ArrayBaseFD || !ArrayBaseFD->getType()->isPointerType() ||
1160 !ArrayBaseFD->getType()->isCountAttributedType())
1161 return nullptr;
1162
1163 // Get the 'count' FieldDecl.
1164 const FieldDecl *CountFD = ArrayBaseFD->findCountedByField();
1165 if (!CountFD)
1166 // Can't find the field referenced by the "counted_by" attribute.
1167 return nullptr;
1168
1169 // Calculate the array's object size using these formulae. (Note: if the
1170 // calculation is negative, we return 0.):
1171 //
1172 // struct p;
1173 // struct s {
1174 // /* ... */
1175 // struct p **array __attribute__((counted_by(count)));
1176 // int count;
1177 // };
1178 //
1179 // 1) 'ptr->array':
1180 //
1181 // count = ptr->count;
1182 //
1183 // array_element_size = sizeof (*ptr->array);
1184 // array_size = count * array_element_size;
1185 //
1186 // result = array_size;
1187 //
1188 // cmp = (result >= 0)
1189 // return cmp ? result : 0;
1190 //
1191 // 2) '&((cast) ptr->array)[idx]':
1192 //
1193 // count = ptr->count;
1194 // index = idx;
1195 //
1196 // array_element_size = sizeof (*ptr->array);
1197 // array_size = count * array_element_size;
1198 //
1199 // casted_array_element_size = sizeof (*((cast) ptr->array));
1200 //
1201 // index_size = index * casted_array_element_size;
1202 // result = array_size - index_size;
1203 //
1204 // cmp = (result >= 0)
1205 // if (index)
1206 // cmp = (cmp && index > 0)
1207 // return cmp ? result : 0;
1208
1209 auto GetElementBaseSize = [&](QualType ElementTy) {
1210 CharUnits ElementSize =
1211 getContext().getTypeSizeInChars(ElementTy->getPointeeType());
1212
1213 if (ElementSize.isZero()) {
1214 // This might be a __sized_by on a 'void *', which counts bytes, not
1215 // elements.
1216 auto *CAT = ElementTy->getAs<CountAttributedType>();
1217 if (!CAT || (CAT->getKind() != CountAttributedType::SizedBy &&
1218 CAT->getKind() != CountAttributedType::SizedByOrNull))
1219 // Okay, not sure what it is now.
1220 // FIXME: Should this be an assert?
1221 return std::optional<CharUnits>();
1222
1223 ElementSize = CharUnits::One();
1224 }
1225
1226 return std::optional<CharUnits>(ElementSize);
1227 };
1228
1229 // Get the sizes of the original array element and the casted array element,
1230 // if different.
1231 std::optional<CharUnits> ArrayElementBaseSize =
1232 GetElementBaseSize(ArrayBaseFD->getType());
1233 if (!ArrayElementBaseSize)
1234 return nullptr;
1235
1236 std::optional<CharUnits> CastedArrayElementBaseSize = ArrayElementBaseSize;
1237 if (!CastedArrayElementTy.isNull() && CastedArrayElementTy->isPointerType()) {
1238 CastedArrayElementBaseSize = GetElementBaseSize(CastedArrayElementTy);
1239 if (!CastedArrayElementBaseSize)
1240 return nullptr;
1241 }
1242
1243 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1244
1245 // count = ptr->count;
1246 // index = ptr->index;
1247 Value *Count, *Index;
1248 std::tie(Count, Index) = GetCountFieldAndIndex(
1249 *this, ME, ArrayBaseFD, CountFD, Idx, ResType, IsSigned);
1250 if (!Count)
1251 return nullptr;
1252
1253 // array_element_size = sizeof (*ptr->array)
1254 auto *ArrayElementSize = llvm::ConstantInt::get(
1255 ResType, ArrayElementBaseSize->getQuantity(), IsSigned);
1256
1257 // casted_array_element_size = sizeof (*((cast) ptr->array));
1258 auto *CastedArrayElementSize = llvm::ConstantInt::get(
1259 ResType, CastedArrayElementBaseSize->getQuantity(), IsSigned);
1260
1261 // array_size = count * array_element_size;
1262 Value *ArraySize = Builder.CreateMul(Count, ArrayElementSize, "array_size",
1263 !IsSigned, IsSigned);
1264
1265 // Option (1) 'ptr->array'
1266 // result = array_size
1267 Value *Result = ArraySize;
1268
1269 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1270 // index_size = index * casted_array_element_size;
1271 Value *IndexSize = Builder.CreateMul(Index, CastedArrayElementSize,
1272 "index_size", !IsSigned, IsSigned);
1273
1274 // result = result - index_size;
1275 Result =
1276 Builder.CreateSub(Result, IndexSize, "result", !IsSigned, IsSigned);
1277 }
1278
1279 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1280}
1281
1282llvm::Value *CodeGenFunction::emitCountedByMemberSize(
1283 const MemberExpr *ME, const Expr *Idx, llvm::Value *EmittedE,
1284 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1285 const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1286 if (!FD)
1287 return nullptr;
1288
1289 // Find the flexible array member and check that it has the __counted_by
1290 // attribute.
1291 ASTContext &Ctx = getContext();
1292 const RecordDecl *RD = FD->getDeclContext()->getOuterLexicalRecordContext();
1293 const FieldDecl *FlexibleArrayMemberFD = nullptr;
1294
1296 Ctx, FD, FD->getType(), getLangOpts().getStrictFlexArraysLevel(),
1297 /*IgnoreTemplateOrMacroSubstitution=*/true))
1298 FlexibleArrayMemberFD = FD;
1299 else
1300 FlexibleArrayMemberFD = FindFlexibleArrayMemberField(*this, Ctx, RD);
1301
1302 if (!FlexibleArrayMemberFD ||
1303 !FlexibleArrayMemberFD->getType()->isCountAttributedType())
1304 return nullptr;
1305
1306 // Get the 'count' FieldDecl.
1307 const FieldDecl *CountFD = FlexibleArrayMemberFD->findCountedByField();
1308 if (!CountFD)
1309 // Can't find the field referenced by the "counted_by" attribute.
1310 return nullptr;
1311
1312 // Calculate the flexible array member's object size using these formulae.
1313 // (Note: if the calculation is negative, we return 0.):
1314 //
1315 // struct p;
1316 // struct s {
1317 // /* ... */
1318 // int count;
1319 // struct p *array[] __attribute__((counted_by(count)));
1320 // };
1321 //
1322 // 1) 'ptr->array':
1323 //
1324 // count = ptr->count;
1325 //
1326 // flexible_array_member_element_size = sizeof (*ptr->array);
1327 // flexible_array_member_size =
1328 // count * flexible_array_member_element_size;
1329 //
1330 // result = flexible_array_member_size;
1331 //
1332 // cmp = (result >= 0)
1333 // return cmp ? result : 0;
1334 //
1335 // 2) '&((cast) ptr->array)[idx]':
1336 //
1337 // count = ptr->count;
1338 // index = idx;
1339 //
1340 // flexible_array_member_element_size = sizeof (*ptr->array);
1341 // flexible_array_member_size =
1342 // count * flexible_array_member_element_size;
1343 //
1344 // casted_flexible_array_member_element_size =
1345 // sizeof (*((cast) ptr->array));
1346 // index_size = index * casted_flexible_array_member_element_size;
1347 //
1348 // result = flexible_array_member_size - index_size;
1349 //
1350 // cmp = (result >= 0)
1351 // if (index != 0)
1352 // cmp = (cmp && index >= 0)
1353 // return cmp ? result : 0;
1354 //
1355 // 3) '&ptr->field':
1356 //
1357 // count = ptr->count;
1358 // sizeof_struct = sizeof (struct s);
1359 //
1360 // flexible_array_member_element_size = sizeof (*ptr->array);
1361 // flexible_array_member_size =
1362 // count * flexible_array_member_element_size;
1363 //
1364 // field_offset = offsetof (struct s, field);
1365 // offset_diff = sizeof_struct - field_offset;
1366 //
1367 // result = offset_diff + flexible_array_member_size;
1368 //
1369 // cmp = (result >= 0)
1370 // return cmp ? result : 0;
1371 //
1372 // 4) '&((cast) ptr->field_array)[idx]':
1373 //
1374 // count = ptr->count;
1375 // index = idx;
1376 // sizeof_struct = sizeof (struct s);
1377 //
1378 // flexible_array_member_element_size = sizeof (*ptr->array);
1379 // flexible_array_member_size =
1380 // count * flexible_array_member_element_size;
1381 //
1382 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1383 // field_offset = offsetof (struct s, field)
1384 // field_offset += index * casted_field_element_size;
1385 //
1386 // offset_diff = sizeof_struct - field_offset;
1387 //
1388 // result = offset_diff + flexible_array_member_size;
1389 //
1390 // cmp = (result >= 0)
1391 // if (index != 0)
1392 // cmp = (cmp && index >= 0)
1393 // return cmp ? result : 0;
1394
1395 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1396
1397 QualType FlexibleArrayMemberTy = FlexibleArrayMemberFD->getType();
1398
1399 // Explicit cast because otherwise the CharWidth will promote an i32's into
1400 // u64's leading to overflows.
1401 int64_t CharWidth = static_cast<int64_t>(CGM.getContext().getCharWidth());
1402
1403 // field_offset = offsetof (struct s, field);
1404 Value *FieldOffset = nullptr;
1405 if (FlexibleArrayMemberFD != FD) {
1406 std::optional<int64_t> Offset = GetFieldOffset(Ctx, RD, FD);
1407 if (!Offset)
1408 return nullptr;
1409 FieldOffset =
1410 llvm::ConstantInt::get(ResType, *Offset / CharWidth, IsSigned);
1411 }
1412
1413 // count = ptr->count;
1414 // index = ptr->index;
1415 Value *Count, *Index;
1416 std::tie(Count, Index) = GetCountFieldAndIndex(
1417 *this, ME, FlexibleArrayMemberFD, CountFD, Idx, ResType, IsSigned);
1418 if (!Count)
1419 return nullptr;
1420
1421 // flexible_array_member_element_size = sizeof (*ptr->array);
1422 const ArrayType *ArrayTy = Ctx.getAsArrayType(FlexibleArrayMemberTy);
1423 CharUnits BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1424 auto *FlexibleArrayMemberElementSize =
1425 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1426
1427 // flexible_array_member_size = count * flexible_array_member_element_size;
1428 Value *FlexibleArrayMemberSize =
1429 Builder.CreateMul(Count, FlexibleArrayMemberElementSize,
1430 "flexible_array_member_size", !IsSigned, IsSigned);
1431
1432 Value *Result = nullptr;
1433 if (FlexibleArrayMemberFD == FD) {
1434 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1435 // casted_flexible_array_member_element_size =
1436 // sizeof (*((cast) ptr->array));
1437 llvm::ConstantInt *CastedFlexibleArrayMemberElementSize =
1438 FlexibleArrayMemberElementSize;
1439 if (!CastedArrayElementTy.isNull() &&
1440 CastedArrayElementTy->isPointerType()) {
1441 CharUnits BaseSize =
1442 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1443 CastedFlexibleArrayMemberElementSize =
1444 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1445 }
1446
1447 // index_size = index * casted_flexible_array_member_element_size;
1448 Value *IndexSize =
1449 Builder.CreateMul(Index, CastedFlexibleArrayMemberElementSize,
1450 "index_size", !IsSigned, IsSigned);
1451
1452 // result = flexible_array_member_size - index_size;
1453 Result = Builder.CreateSub(FlexibleArrayMemberSize, IndexSize, "result",
1454 !IsSigned, IsSigned);
1455 } else { // Option (1) 'ptr->array'
1456 // result = flexible_array_member_size;
1457 Result = FlexibleArrayMemberSize;
1458 }
1459 } else {
1460 // sizeof_struct = sizeof (struct s);
1461 llvm::StructType *StructTy = getTypes().getCGRecordLayout(RD).getLLVMType();
1462 const llvm::DataLayout &Layout = CGM.getDataLayout();
1463 TypeSize Size = Layout.getTypeSizeInBits(StructTy);
1464 Value *SizeofStruct =
1465 llvm::ConstantInt::get(ResType, Size.getKnownMinValue() / CharWidth);
1466
1467 if (Idx) { // Option (4) '&((cast) ptr->field_array)[idx]'
1468 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1469 CharUnits BaseSize;
1470 if (!CastedArrayElementTy.isNull() &&
1471 CastedArrayElementTy->isPointerType()) {
1472 BaseSize =
1473 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1474 } else {
1475 const ArrayType *ArrayTy = Ctx.getAsArrayType(FD->getType());
1476 BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1477 }
1478
1479 llvm::ConstantInt *CastedFieldElementSize =
1480 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1481
1482 // field_offset += index * casted_field_element_size;
1483 Value *Mul = Builder.CreateMul(Index, CastedFieldElementSize,
1484 "field_offset", !IsSigned, IsSigned);
1485 FieldOffset = Builder.CreateAdd(FieldOffset, Mul);
1486 }
1487 // Option (3) '&ptr->field', and Option (4) continuation.
1488 // offset_diff = flexible_array_member_offset - field_offset;
1489 Value *OffsetDiff = Builder.CreateSub(SizeofStruct, FieldOffset,
1490 "offset_diff", !IsSigned, IsSigned);
1491
1492 // result = offset_diff + flexible_array_member_size;
1493 Result = Builder.CreateAdd(FlexibleArrayMemberSize, OffsetDiff, "result");
1494 }
1495
1496 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1497}
1498
1499/// Returns a Value corresponding to the size of the given expression.
1500/// This Value may be either of the following:
1501/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
1502/// it)
1503/// - A call to the @llvm.objectsize intrinsic
1504///
1505/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
1506/// and we wouldn't otherwise try to reference a pass_object_size parameter,
1507/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
1508llvm::Value *
1509CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
1510 llvm::IntegerType *ResType,
1511 llvm::Value *EmittedE, bool IsDynamic) {
1512 // We need to reference an argument if the pointer is a parameter with the
1513 // pass_object_size attribute.
1514 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
1515 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
1516 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
1517 if (Param != nullptr && PS != nullptr &&
1518 areBOSTypesCompatible(PS->getType(), Type)) {
1519 auto Iter = SizeArguments.find(Param);
1520 assert(Iter != SizeArguments.end());
1521
1522 const ImplicitParamDecl *D = Iter->second;
1523 auto DIter = LocalDeclMap.find(D);
1524 assert(DIter != LocalDeclMap.end());
1525
1526 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
1527 getContext().getSizeType(), E->getBeginLoc());
1528 }
1529 }
1530
1531 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
1532 // evaluate E for side-effects. In either case, we shouldn't lower to
1533 // @llvm.objectsize.
1534 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
1535 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1536
1537 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
1538 assert(Ptr->getType()->isPointerTy() &&
1539 "Non-pointer passed to __builtin_object_size?");
1540
1541 if (IsDynamic)
1542 // Emit special code for a flexible array member with the "counted_by"
1543 // attribute.
1544 if (Value *V = emitCountedBySize(E, Ptr, Type, ResType))
1545 return V;
1546
1547 Function *F =
1548 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
1549
1550 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
1551 Value *Min = Builder.getInt1((Type & 2) != 0);
1552 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
1553 Value *NullIsUnknown = Builder.getTrue();
1554 Value *Dynamic = Builder.getInt1(IsDynamic);
1555 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
1556}
1557
1558namespace {
1559/// A struct to generically describe a bit test intrinsic.
1560struct BitTest {
1561 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
1562 enum InterlockingKind : uint8_t {
1563 Unlocked,
1564 Sequential,
1565 Acquire,
1566 Release,
1567 NoFence
1568 };
1569
1570 ActionKind Action;
1571 InterlockingKind Interlocking;
1572 bool Is64Bit;
1573
1574 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
1575};
1576
1577} // namespace
1578
1579BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
1580 switch (BuiltinID) {
1581 // Main portable variants.
1582 case Builtin::BI_bittest:
1583 return {TestOnly, Unlocked, false};
1584 case Builtin::BI_bittestandcomplement:
1585 return {Complement, Unlocked, false};
1586 case Builtin::BI_bittestandreset:
1587 return {Reset, Unlocked, false};
1588 case Builtin::BI_bittestandset:
1589 return {Set, Unlocked, false};
1590 case Builtin::BI_interlockedbittestandreset:
1591 return {Reset, Sequential, false};
1592 case Builtin::BI_interlockedbittestandset:
1593 return {Set, Sequential, false};
1594
1595 // 64-bit variants.
1596 case Builtin::BI_bittest64:
1597 return {TestOnly, Unlocked, true};
1598 case Builtin::BI_bittestandcomplement64:
1599 return {Complement, Unlocked, true};
1600 case Builtin::BI_bittestandreset64:
1601 return {Reset, Unlocked, true};
1602 case Builtin::BI_bittestandset64:
1603 return {Set, Unlocked, true};
1604 case Builtin::BI_interlockedbittestandreset64:
1605 return {Reset, Sequential, true};
1606 case Builtin::BI_interlockedbittestandset64:
1607 return {Set, Sequential, true};
1608
1609 // ARM/AArch64-specific ordering variants.
1610 case Builtin::BI_interlockedbittestandset_acq:
1611 return {Set, Acquire, false};
1612 case Builtin::BI_interlockedbittestandset_rel:
1613 return {Set, Release, false};
1614 case Builtin::BI_interlockedbittestandset_nf:
1615 return {Set, NoFence, false};
1616 case Builtin::BI_interlockedbittestandreset_acq:
1617 return {Reset, Acquire, false};
1618 case Builtin::BI_interlockedbittestandreset_rel:
1619 return {Reset, Release, false};
1620 case Builtin::BI_interlockedbittestandreset_nf:
1621 return {Reset, NoFence, false};
1622 case Builtin::BI_interlockedbittestandreset64_acq:
1623 return {Reset, Acquire, false};
1624 case Builtin::BI_interlockedbittestandreset64_rel:
1625 return {Reset, Release, false};
1626 case Builtin::BI_interlockedbittestandreset64_nf:
1627 return {Reset, NoFence, false};
1628 case Builtin::BI_interlockedbittestandset64_acq:
1629 return {Set, Acquire, false};
1630 case Builtin::BI_interlockedbittestandset64_rel:
1631 return {Set, Release, false};
1632 case Builtin::BI_interlockedbittestandset64_nf:
1633 return {Set, NoFence, false};
1634 }
1635 llvm_unreachable("expected only bittest intrinsics");
1636}
1637
1638static char bitActionToX86BTCode(BitTest::ActionKind A) {
1639 switch (A) {
1640 case BitTest::TestOnly: return '\0';
1641 case BitTest::Complement: return 'c';
1642 case BitTest::Reset: return 'r';
1643 case BitTest::Set: return 's';
1644 }
1645 llvm_unreachable("invalid action");
1646}
1647
1649 BitTest BT,
1650 const CallExpr *E, Value *BitBase,
1651 Value *BitPos) {
1652 char Action = bitActionToX86BTCode(BT.Action);
1653 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
1654
1655 // Build the assembly.
1657 raw_svector_ostream AsmOS(Asm);
1658 if (BT.Interlocking != BitTest::Unlocked)
1659 AsmOS << "lock ";
1660 AsmOS << "bt";
1661 if (Action)
1662 AsmOS << Action;
1663 AsmOS << SizeSuffix << " $2, ($1)";
1664
1665 // Build the constraints. FIXME: We should support immediates when possible.
1666 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
1667 std::string_view MachineClobbers = CGF.getTarget().getClobbers();
1668 if (!MachineClobbers.empty()) {
1669 Constraints += ',';
1670 Constraints += MachineClobbers;
1671 }
1672 llvm::IntegerType *IntType = llvm::IntegerType::get(
1673 CGF.getLLVMContext(),
1674 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
1675 llvm::FunctionType *FTy =
1676 llvm::FunctionType::get(CGF.Int8Ty, {CGF.UnqualPtrTy, IntType}, false);
1677
1678 llvm::InlineAsm *IA =
1679 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1680 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
1681}
1682
1683static llvm::AtomicOrdering
1684getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
1685 switch (I) {
1686 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
1687 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
1688 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
1689 case BitTest::Release: return llvm::AtomicOrdering::Release;
1690 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
1691 }
1692 llvm_unreachable("invalid interlocking");
1693}
1694
1695static llvm::Value *EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E) {
1696 llvm::Value *ArgValue = CGF.EmitScalarExpr(E);
1697 llvm::Type *ArgType = ArgValue->getType();
1698
1699 // Boolean vectors can be casted directly to its bitfield representation. We
1700 // intentionally do not round up to the next power of two size and let LLVM
1701 // handle the trailing bits.
1702 if (auto *VT = dyn_cast<llvm::FixedVectorType>(ArgType);
1703 VT && VT->getElementType()->isIntegerTy(1)) {
1704 llvm::Type *StorageType =
1705 llvm::Type::getIntNTy(CGF.getLLVMContext(), VT->getNumElements());
1706 ArgValue = CGF.Builder.CreateBitCast(ArgValue, StorageType);
1707 }
1708
1709 return ArgValue;
1710}
1711
1712/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
1713/// bits and a bit position and read and optionally modify the bit at that
1714/// position. The position index can be arbitrarily large, i.e. it can be larger
1715/// than 31 or 63, so we need an indexed load in the general case.
1716static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
1717 unsigned BuiltinID,
1718 const CallExpr *E) {
1719 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
1720 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
1721
1722 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
1723
1724 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
1725 // indexing operation internally. Use them if possible.
1726 if (CGF.getTarget().getTriple().isX86())
1727 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
1728
1729 // Otherwise, use generic code to load one byte and test the bit. Use all but
1730 // the bottom three bits as the array index, and the bottom three bits to form
1731 // a mask.
1732 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
1733 Value *ByteIndex = CGF.Builder.CreateAShr(
1734 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
1735 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBase, ByteIndex,
1736 "bittest.byteaddr"),
1737 CGF.Int8Ty, CharUnits::One());
1738 Value *PosLow =
1739 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
1740 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
1741
1742 // The updating instructions will need a mask.
1743 Value *Mask = nullptr;
1744 if (BT.Action != BitTest::TestOnly) {
1745 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
1746 "bittest.mask");
1747 }
1748
1749 // Check the action and ordering of the interlocked intrinsics.
1750 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
1751
1752 Value *OldByte = nullptr;
1753 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
1754 // Emit a combined atomicrmw load/store operation for the interlocked
1755 // intrinsics.
1756 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
1757 if (BT.Action == BitTest::Reset) {
1758 Mask = CGF.Builder.CreateNot(Mask);
1759 RMWOp = llvm::AtomicRMWInst::And;
1760 }
1761 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr, Mask, Ordering);
1762 } else {
1763 // Emit a plain load for the non-interlocked intrinsics.
1764 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
1765 Value *NewByte = nullptr;
1766 switch (BT.Action) {
1767 case BitTest::TestOnly:
1768 // Don't store anything.
1769 break;
1770 case BitTest::Complement:
1771 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
1772 break;
1773 case BitTest::Reset:
1774 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
1775 break;
1776 case BitTest::Set:
1777 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
1778 break;
1779 }
1780 if (NewByte)
1781 CGF.Builder.CreateStore(NewByte, ByteAddr);
1782 }
1783
1784 // However we loaded the old byte, either by plain load or atomicrmw, shift
1785 // the bit into the low position and mask it to 0 or 1.
1786 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
1787 return CGF.Builder.CreateAnd(
1788 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
1789}
1790
1791namespace {
1792enum class MSVCSetJmpKind {
1793 _setjmpex,
1794 _setjmp3,
1795 _setjmp
1796};
1797}
1798
1799/// MSVC handles setjmp a bit differently on different platforms. On every
1800/// architecture except 32-bit x86, the frame address is passed. On x86, extra
1801/// parameters can be passed as variadic arguments, but we always pass none.
1802static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1803 const CallExpr *E) {
1804 llvm::Value *Arg1 = nullptr;
1805 llvm::Type *Arg1Ty = nullptr;
1806 StringRef Name;
1807 bool IsVarArg = false;
1808 if (SJKind == MSVCSetJmpKind::_setjmp3) {
1809 Name = "_setjmp3";
1810 Arg1Ty = CGF.Int32Ty;
1811 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1812 IsVarArg = true;
1813 } else {
1814 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1815 Arg1Ty = CGF.Int8PtrTy;
1816 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1817 Arg1 = CGF.Builder.CreateCall(
1818 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1819 } else
1820 Arg1 = CGF.Builder.CreateCall(
1821 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1822 llvm::ConstantInt::get(CGF.Int32Ty, 0));
1823 }
1824
1825 // Mark the call site and declaration with ReturnsTwice.
1826 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1827 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1828 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1829 llvm::Attribute::ReturnsTwice);
1830 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1831 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1832 ReturnsTwiceAttr, /*Local=*/true);
1833
1834 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1835 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1836 llvm::Value *Args[] = {Buf, Arg1};
1837 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1838 CB->setAttributes(ReturnsTwiceAttr);
1839 return RValue::get(CB);
1840}
1841
1842// Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1844 const CallExpr *E) {
1845 switch (BuiltinID) {
1848 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1849 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1850
1851 llvm::Type *ArgType = ArgValue->getType();
1852 llvm::Type *IndexType = IndexAddress.getElementType();
1853 llvm::Type *ResultType = ConvertType(E->getType());
1854
1855 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1856 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1857 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1858
1859 BasicBlock *Begin = Builder.GetInsertBlock();
1860 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1861 Builder.SetInsertPoint(End);
1862 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1863
1864 Builder.SetInsertPoint(Begin);
1865 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1866 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1867 Builder.CreateCondBr(IsZero, End, NotZero);
1868 Result->addIncoming(ResZero, Begin);
1869
1870 Builder.SetInsertPoint(NotZero);
1871
1872 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1873 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1874 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1875 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1876 Builder.CreateStore(ZeroCount, IndexAddress, false);
1877 } else {
1878 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1879 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1880
1881 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1882 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1883 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1884 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1885 Builder.CreateStore(Index, IndexAddress, false);
1886 }
1887 Builder.CreateBr(End);
1888 Result->addIncoming(ResOne, NotZero);
1889
1890 Builder.SetInsertPoint(End);
1891 return Result;
1892 }
1894 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1896 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1898 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1900 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1902 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1904 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1906 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1907 AtomicOrdering::Acquire);
1909 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1910 AtomicOrdering::Release);
1912 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1913 AtomicOrdering::Monotonic);
1915 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1916 AtomicOrdering::Acquire);
1918 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1919 AtomicOrdering::Release);
1921 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1922 AtomicOrdering::Monotonic);
1924 return EmitAtomicCmpXchgForMSIntrin(*this, E);
1926 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1928 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1930 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1933 *this, E, AtomicOrdering::SequentiallyConsistent);
1935 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1937 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1939 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1941 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1942 AtomicOrdering::Acquire);
1944 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1945 AtomicOrdering::Release);
1947 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1948 AtomicOrdering::Monotonic);
1950 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1951 AtomicOrdering::Acquire);
1953 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1954 AtomicOrdering::Release);
1956 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1957 AtomicOrdering::Monotonic);
1959 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1960 AtomicOrdering::Acquire);
1962 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1963 AtomicOrdering::Release);
1965 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1966 AtomicOrdering::Monotonic);
1968 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1970 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1972 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1974 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1976 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1978 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1979
1981 return EmitAtomicDecrementValue(*this, E);
1983 return EmitAtomicIncrementValue(*this, E);
1984
1986 // Request immediate process termination from the kernel. The instruction
1987 // sequences to do this are documented on MSDN:
1988 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1989 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1990 StringRef Asm, Constraints;
1991 switch (ISA) {
1992 default:
1993 ErrorUnsupported(E, "__fastfail call for this architecture");
1994 break;
1995 case llvm::Triple::x86:
1996 case llvm::Triple::x86_64:
1997 Asm = "int $$0x29";
1998 Constraints = "{cx}";
1999 break;
2000 case llvm::Triple::thumb:
2001 Asm = "udf #251";
2002 Constraints = "{r0}";
2003 break;
2004 case llvm::Triple::aarch64:
2005 Asm = "brk #0xF003";
2006 Constraints = "{w0}";
2007 }
2008 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
2009 llvm::InlineAsm *IA =
2010 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
2011 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
2012 getLLVMContext(), llvm::AttributeList::FunctionIndex,
2013 llvm::Attribute::NoReturn);
2014 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
2015 CI->setAttributes(NoReturnAttr);
2016 return CI;
2017 }
2018 }
2019 llvm_unreachable("Incorrect MSVC intrinsic!");
2020}
2021
2022namespace {
2023// ARC cleanup for __builtin_os_log_format
2024struct CallObjCArcUse final : EHScopeStack::Cleanup {
2025 CallObjCArcUse(llvm::Value *object) : object(object) {}
2026 llvm::Value *object;
2027
2028 void Emit(CodeGenFunction &CGF, Flags flags) override {
2029 CGF.EmitARCIntrinsicUse(object);
2030 }
2031};
2032}
2033
2035 BuiltinCheckKind Kind) {
2036 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) &&
2037 "Unsupported builtin check kind");
2038
2039 Value *ArgValue = EmitBitCountExpr(*this, E);
2040 if (!SanOpts.has(SanitizerKind::Builtin))
2041 return ArgValue;
2042
2043 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2044 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2045 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2046 Value *Cond = Builder.CreateICmpNE(
2047 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
2048 EmitCheck(std::make_pair(Cond, CheckOrdinal), CheckHandler,
2050 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
2051 {});
2052 return ArgValue;
2053}
2054
2056 Value *ArgValue = EvaluateExprAsBool(E);
2057 if (!SanOpts.has(SanitizerKind::Builtin))
2058 return ArgValue;
2059
2060 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2061 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2062 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2063 EmitCheck(
2064 std::make_pair(ArgValue, CheckOrdinal), CheckHandler,
2066 llvm::ConstantInt::get(Builder.getInt8Ty(), BCK_AssumePassedFalse)},
2067 {});
2068 return ArgValue;
2069}
2070
2071static Value *EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW) {
2072 return CGF.Builder.CreateBinaryIntrinsic(
2073 Intrinsic::abs, ArgValue,
2074 ConstantInt::get(CGF.Builder.getInt1Ty(), HasNSW));
2075}
2076
2078 bool SanitizeOverflow) {
2079 Value *ArgValue = CGF.EmitScalarExpr(E->getArg(0));
2080
2081 // Try to eliminate overflow check.
2082 if (const auto *VCI = dyn_cast<llvm::ConstantInt>(ArgValue)) {
2083 if (!VCI->isMinSignedValue())
2084 return EmitAbs(CGF, ArgValue, true);
2085 }
2086
2088 SanitizerHandler CheckHandler;
2089 if (SanitizeOverflow) {
2090 Ordinals.push_back(SanitizerKind::SO_SignedIntegerOverflow);
2091 CheckHandler = SanitizerHandler::NegateOverflow;
2092 } else
2093 CheckHandler = SanitizerHandler::SubOverflow;
2094
2095 SanitizerDebugLocation SanScope(&CGF, Ordinals, CheckHandler);
2096
2097 Constant *Zero = Constant::getNullValue(ArgValue->getType());
2098 Value *ResultAndOverflow = CGF.Builder.CreateBinaryIntrinsic(
2099 Intrinsic::ssub_with_overflow, Zero, ArgValue);
2100 Value *Result = CGF.Builder.CreateExtractValue(ResultAndOverflow, 0);
2101 Value *NotOverflow = CGF.Builder.CreateNot(
2102 CGF.Builder.CreateExtractValue(ResultAndOverflow, 1));
2103
2104 // TODO: support -ftrapv-handler.
2105 if (SanitizeOverflow) {
2106 CGF.EmitCheck({{NotOverflow, SanitizerKind::SO_SignedIntegerOverflow}},
2107 CheckHandler,
2110 {ArgValue});
2111 } else
2112 CGF.EmitTrapCheck(NotOverflow, CheckHandler);
2113
2114 Value *CmpResult = CGF.Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
2115 return CGF.Builder.CreateSelect(CmpResult, Result, ArgValue, "abs");
2116}
2117
2118/// Get the argument type for arguments to os_log_helper.
2120 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
2121 return C.getCanonicalType(UnsignedTy);
2122}
2123
2126 CharUnits BufferAlignment) {
2127 ASTContext &Ctx = getContext();
2128
2130 {
2131 raw_svector_ostream OS(Name);
2132 OS << "__os_log_helper";
2133 OS << "_" << BufferAlignment.getQuantity();
2134 OS << "_" << int(Layout.getSummaryByte());
2135 OS << "_" << int(Layout.getNumArgsByte());
2136 for (const auto &Item : Layout.Items)
2137 OS << "_" << int(Item.getSizeByte()) << "_"
2138 << int(Item.getDescriptorByte());
2139 }
2140
2141 if (llvm::Function *F = CGM.getModule().getFunction(Name))
2142 return F;
2143
2145 FunctionArgList Args;
2146 Args.push_back(ImplicitParamDecl::Create(
2147 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
2149 ArgTys.emplace_back(Ctx.VoidPtrTy);
2150
2151 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
2152 char Size = Layout.Items[I].getSizeByte();
2153 if (!Size)
2154 continue;
2155
2156 QualType ArgTy = getOSLogArgType(Ctx, Size);
2157 Args.push_back(ImplicitParamDecl::Create(
2158 Ctx, nullptr, SourceLocation(),
2159 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
2161 ArgTys.emplace_back(ArgTy);
2162 }
2163
2164 QualType ReturnTy = Ctx.VoidTy;
2165
2166 // The helper function has linkonce_odr linkage to enable the linker to merge
2167 // identical functions. To ensure the merging always happens, 'noinline' is
2168 // attached to the function when compiling with -Oz.
2169 const CGFunctionInfo &FI =
2170 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
2171 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
2172 llvm::Function *Fn = llvm::Function::Create(
2173 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
2174 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
2175 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
2176 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
2177 Fn->setDoesNotThrow();
2178
2179 // Attach 'noinline' at -Oz.
2180 if (CGM.getCodeGenOpts().OptimizeSize == 2)
2181 Fn->addFnAttr(llvm::Attribute::NoInline);
2182
2183 auto NL = ApplyDebugLocation::CreateEmpty(*this);
2184 StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args);
2185
2186 // Create a scope with an artificial location for the body of this function.
2187 auto AL = ApplyDebugLocation::CreateArtificial(*this);
2188
2189 CharUnits Offset;
2191 Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Ctx.VoidTy,
2192 BufferAlignment);
2193 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
2194 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
2195 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
2196 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
2197
2198 unsigned I = 1;
2199 for (const auto &Item : Layout.Items) {
2200 Builder.CreateStore(
2201 Builder.getInt8(Item.getDescriptorByte()),
2202 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
2203 Builder.CreateStore(
2204 Builder.getInt8(Item.getSizeByte()),
2205 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
2206
2207 CharUnits Size = Item.size();
2208 if (!Size.getQuantity())
2209 continue;
2210
2211 Address Arg = GetAddrOfLocalVar(Args[I]);
2212 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
2213 Addr = Addr.withElementType(Arg.getElementType());
2214 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
2215 Offset += Size;
2216 ++I;
2217 }
2218
2220
2221 return Fn;
2222}
2223
2225 assert(E.getNumArgs() >= 2 &&
2226 "__builtin_os_log_format takes at least 2 arguments");
2227 ASTContext &Ctx = getContext();
2230 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
2231
2232 // Ignore argument 1, the format string. It is not currently used.
2233 CallArgList Args;
2234 Args.add(RValue::get(BufAddr.emitRawPointer(*this)), Ctx.VoidPtrTy);
2235
2236 for (const auto &Item : Layout.Items) {
2237 int Size = Item.getSizeByte();
2238 if (!Size)
2239 continue;
2240
2241 llvm::Value *ArgVal;
2242
2243 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
2244 uint64_t Val = 0;
2245 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
2246 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
2247 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
2248 } else if (const Expr *TheExpr = Item.getExpr()) {
2249 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
2250
2251 // If a temporary object that requires destruction after the full
2252 // expression is passed, push a lifetime-extended cleanup to extend its
2253 // lifetime to the end of the enclosing block scope.
2254 auto LifetimeExtendObject = [&](const Expr *E) {
2255 E = E->IgnoreParenCasts();
2256 // Extend lifetimes of objects returned by function calls and message
2257 // sends.
2258
2259 // FIXME: We should do this in other cases in which temporaries are
2260 // created including arguments of non-ARC types (e.g., C++
2261 // temporaries).
2263 return true;
2264 return false;
2265 };
2266
2267 if (TheExpr->getType()->isObjCRetainableType() &&
2268 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
2269 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
2270 "Only scalar can be a ObjC retainable type");
2271 if (!isa<Constant>(ArgVal)) {
2272 CleanupKind Cleanup = getARCCleanupKind();
2273 QualType Ty = TheExpr->getType();
2275 RawAddress Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
2276 ArgVal = EmitARCRetain(Ty, ArgVal);
2277 Builder.CreateStore(ArgVal, Addr);
2278 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
2280 Cleanup & EHCleanup);
2281
2282 // Push a clang.arc.use call to ensure ARC optimizer knows that the
2283 // argument has to be alive.
2284 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
2286 }
2287 }
2288 } else {
2289 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
2290 }
2291
2292 unsigned ArgValSize =
2293 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
2294 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
2295 ArgValSize);
2296 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
2297 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
2298 // If ArgVal has type x86_fp80, zero-extend ArgVal.
2299 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
2300 Args.add(RValue::get(ArgVal), ArgTy);
2301 }
2302
2303 const CGFunctionInfo &FI =
2304 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
2305 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
2306 Layout, BufAddr.getAlignment());
2308 return RValue::get(BufAddr, *this);
2309}
2310
2312 unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
2313 WidthAndSignedness ResultInfo) {
2314 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2315 Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
2316 !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
2317}
2318
2320 CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
2321 const clang::Expr *Op2, WidthAndSignedness Op2Info,
2322 const clang::Expr *ResultArg, QualType ResultQTy,
2323 WidthAndSignedness ResultInfo) {
2325 Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
2326 "Cannot specialize this multiply");
2327
2328 llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
2329 llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
2330
2331 llvm::Value *HasOverflow;
2332 llvm::Value *Result = EmitOverflowIntrinsic(
2333 CGF, Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
2334
2335 // The intrinsic call will detect overflow when the value is > UINT_MAX,
2336 // however, since the original builtin had a signed result, we need to report
2337 // an overflow when the result is greater than INT_MAX.
2338 auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
2339 llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
2340
2341 llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
2342 HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
2343
2344 bool isVolatile =
2345 ResultArg->getType()->getPointeeType().isVolatileQualified();
2346 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2347 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2348 isVolatile);
2349 return RValue::get(HasOverflow);
2350}
2351
2352/// Determine if a binop is a checked mixed-sign multiply we can specialize.
2353static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
2354 WidthAndSignedness Op1Info,
2355 WidthAndSignedness Op2Info,
2356 WidthAndSignedness ResultInfo) {
2357 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2358 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
2359 Op1Info.Signed != Op2Info.Signed;
2360}
2361
2362/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
2363/// the generic checked-binop irgen.
2364static RValue
2366 WidthAndSignedness Op1Info, const clang::Expr *Op2,
2367 WidthAndSignedness Op2Info,
2368 const clang::Expr *ResultArg, QualType ResultQTy,
2369 WidthAndSignedness ResultInfo) {
2370 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
2371 Op2Info, ResultInfo) &&
2372 "Not a mixed-sign multipliction we can specialize");
2373
2374 // Emit the signed and unsigned operands.
2375 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
2376 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
2377 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
2378 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
2379 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
2380 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
2381
2382 // One of the operands may be smaller than the other. If so, [s|z]ext it.
2383 if (SignedOpWidth < UnsignedOpWidth)
2384 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
2385 if (UnsignedOpWidth < SignedOpWidth)
2386 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
2387
2388 llvm::Type *OpTy = Signed->getType();
2389 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
2390 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2391 llvm::Type *ResTy = CGF.getTypes().ConvertType(ResultQTy);
2392 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
2393
2394 // Take the absolute value of the signed operand.
2395 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
2396 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
2397 llvm::Value *AbsSigned =
2398 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
2399
2400 // Perform a checked unsigned multiplication.
2401 llvm::Value *UnsignedOverflow;
2402 llvm::Value *UnsignedResult =
2403 EmitOverflowIntrinsic(CGF, Intrinsic::umul_with_overflow, AbsSigned,
2404 Unsigned, UnsignedOverflow);
2405
2406 llvm::Value *Overflow, *Result;
2407 if (ResultInfo.Signed) {
2408 // Signed overflow occurs if the result is greater than INT_MAX or lesser
2409 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
2410 auto IntMax =
2411 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zext(OpWidth);
2412 llvm::Value *MaxResult =
2413 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
2414 CGF.Builder.CreateZExt(IsNegative, OpTy));
2415 llvm::Value *SignedOverflow =
2416 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
2417 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
2418
2419 // Prepare the signed result (possibly by negating it).
2420 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
2421 llvm::Value *SignedResult =
2422 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
2423 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
2424 } else {
2425 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
2426 llvm::Value *Underflow = CGF.Builder.CreateAnd(
2427 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
2428 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
2429 if (ResultInfo.Width < OpWidth) {
2430 auto IntMax =
2431 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
2432 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
2433 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
2434 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
2435 }
2436
2437 // Negate the product if it would be negative in infinite precision.
2438 Result = CGF.Builder.CreateSelect(
2439 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
2440
2441 Result = CGF.Builder.CreateTrunc(Result, ResTy);
2442 }
2443 assert(Overflow && Result && "Missing overflow or result");
2444
2445 bool isVolatile =
2446 ResultArg->getType()->getPointeeType().isVolatileQualified();
2447 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2448 isVolatile);
2449 return RValue::get(Overflow);
2450}
2451
2452static bool
2454 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
2455 if (const auto *Arr = Ctx.getAsArrayType(Ty))
2456 Ty = Ctx.getBaseElementType(Arr);
2457
2458 const auto *Record = Ty->getAsCXXRecordDecl();
2459 if (!Record)
2460 return false;
2461
2462 // We've already checked this type, or are in the process of checking it.
2463 if (!Seen.insert(Record).second)
2464 return false;
2465
2466 assert(Record->hasDefinition() &&
2467 "Incomplete types should already be diagnosed");
2468
2469 if (Record->isDynamicClass())
2470 return true;
2471
2472 for (FieldDecl *F : Record->fields()) {
2473 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
2474 return true;
2475 }
2476 return false;
2477}
2478
2479/// Determine if the specified type requires laundering by checking if it is a
2480/// dynamic class type or contains a subobject which is a dynamic class type.
2482 if (!CGM.getCodeGenOpts().StrictVTablePointers)
2483 return false;
2485 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
2486}
2487
2488RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
2489 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
2490 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
2491
2492 // The builtin's shift arg may have a different type than the source arg and
2493 // result, but the LLVM intrinsic uses the same type for all values.
2494 llvm::Type *Ty = Src->getType();
2495 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
2496
2497 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2498 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
2499 Function *F = CGM.getIntrinsic(IID, Ty);
2500 return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
2501}
2502
2503// Map math builtins for long-double to f128 version.
2504static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
2505 switch (BuiltinID) {
2506#define MUTATE_LDBL(func) \
2507 case Builtin::BI__builtin_##func##l: \
2508 return Builtin::BI__builtin_##func##f128;
2539 MUTATE_LDBL(nans)
2540 MUTATE_LDBL(inf)
2559 MUTATE_LDBL(huge_val)
2569#undef MUTATE_LDBL
2570 default:
2571 return BuiltinID;
2572 }
2573}
2574
2575static Value *tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID,
2576 Value *V) {
2577 if (CGF.Builder.getIsFPConstrained() &&
2578 CGF.Builder.getDefaultConstrainedExcept() != fp::ebIgnore) {
2579 if (Value *Result =
2580 CGF.getTargetHooks().testFPKind(V, BuiltinID, CGF.Builder, CGF.CGM))
2581 return Result;
2582 }
2583 return nullptr;
2584}
2585
2587 const FunctionDecl *FD) {
2588 auto Name = FD->getNameAsString() + "__hipstdpar_unsupported";
2589 auto FnTy = CGF->CGM.getTypes().GetFunctionType(FD);
2590 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2591
2593 for (auto &&FormalTy : FnTy->params())
2594 Args.push_back(llvm::PoisonValue::get(FormalTy));
2595
2596 return RValue::get(CGF->Builder.CreateCall(UBF, Args));
2597}
2598
2600 const CallExpr *E,
2602 assert(!getContext().BuiltinInfo.isImmediate(BuiltinID) &&
2603 "Should not codegen for consteval builtins");
2604
2605 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
2606 // See if we can constant fold this builtin. If so, don't emit it at all.
2607 // TODO: Extend this handling to all builtin calls that we can constant-fold.
2609 if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) &&
2610 !Result.hasSideEffects()) {
2611 if (Result.Val.isInt())
2612 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2613 Result.Val.getInt()));
2614 if (Result.Val.isFloat())
2615 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2616 Result.Val.getFloat()));
2617 }
2618
2619 // If current long-double semantics is IEEE 128-bit, replace math builtins
2620 // of long-double with f128 equivalent.
2621 // TODO: This mutation should also be applied to other targets other than PPC,
2622 // after backend supports IEEE 128-bit style libcalls.
2623 if (getTarget().getTriple().isPPC64() &&
2624 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
2625 BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
2626
2627 // If the builtin has been declared explicitly with an assembler label,
2628 // disable the specialized emitting below. Ideally we should communicate the
2629 // rename in IR, or at least avoid generating the intrinsic calls that are
2630 // likely to get lowered to the renamed library functions.
2631 const unsigned BuiltinIDIfNoAsmLabel =
2632 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
2633
2634 std::optional<bool> ErrnoOverriden;
2635 // ErrnoOverriden is true if math-errno is overriden via the
2636 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
2637 // which implies math-errno.
2638 if (E->hasStoredFPFeatures()) {
2640 if (OP.hasMathErrnoOverride())
2641 ErrnoOverriden = OP.getMathErrnoOverride();
2642 }
2643 // True if 'attribute__((optnone))' is used. This attribute overrides
2644 // fast-math which implies math-errno.
2645 bool OptNone = CurFuncDecl && CurFuncDecl->hasAttr<OptimizeNoneAttr>();
2646
2647 // True if we are compiling at -O2 and errno has been disabled
2648 // using the '#pragma float_control(precise, off)', and
2649 // attribute opt-none hasn't been seen.
2650 bool ErrnoOverridenToFalseWithOpt =
2651 ErrnoOverriden.has_value() && !ErrnoOverriden.value() && !OptNone &&
2652 CGM.getCodeGenOpts().OptimizationLevel != 0;
2653
2654 // There are LLVM math intrinsics/instructions corresponding to math library
2655 // functions except the LLVM op will never set errno while the math library
2656 // might. Also, math builtins have the same semantics as their math library
2657 // twins. Thus, we can transform math library and builtin calls to their
2658 // LLVM counterparts if the call is marked 'const' (known to never set errno).
2659 // In case FP exceptions are enabled, the experimental versions of the
2660 // intrinsics model those.
2661 bool ConstAlways =
2662 getContext().BuiltinInfo.isConst(BuiltinID);
2663
2664 // There's a special case with the fma builtins where they are always const
2665 // if the target environment is GNU or the target is OS is Windows and we're
2666 // targeting the MSVCRT.dll environment.
2667 // FIXME: This list can be become outdated. Need to find a way to get it some
2668 // other way.
2669 switch (BuiltinID) {
2670 case Builtin::BI__builtin_fma:
2671 case Builtin::BI__builtin_fmaf:
2672 case Builtin::BI__builtin_fmal:
2673 case Builtin::BI__builtin_fmaf16:
2674 case Builtin::BIfma:
2675 case Builtin::BIfmaf:
2676 case Builtin::BIfmal: {
2677 auto &Trip = CGM.getTriple();
2678 if (Trip.isGNUEnvironment() || Trip.isOSMSVCRT())
2679 ConstAlways = true;
2680 break;
2681 }
2682 default:
2683 break;
2684 }
2685
2686 bool ConstWithoutErrnoAndExceptions =
2688 bool ConstWithoutExceptions =
2690
2691 // ConstAttr is enabled in fast-math mode. In fast-math mode, math-errno is
2692 // disabled.
2693 // Math intrinsics are generated only when math-errno is disabled. Any pragmas
2694 // or attributes that affect math-errno should prevent or allow math
2695 // intrinsics to be generated. Intrinsics are generated:
2696 // 1- In fast math mode, unless math-errno is overriden
2697 // via '#pragma float_control(precise, on)', or via an
2698 // 'attribute__((optnone))'.
2699 // 2- If math-errno was enabled on command line but overriden
2700 // to false via '#pragma float_control(precise, off))' and
2701 // 'attribute__((optnone))' hasn't been used.
2702 // 3- If we are compiling with optimization and errno has been disabled
2703 // via '#pragma float_control(precise, off)', and
2704 // 'attribute__((optnone))' hasn't been used.
2705
2706 bool ConstWithoutErrnoOrExceptions =
2707 ConstWithoutErrnoAndExceptions || ConstWithoutExceptions;
2708 bool GenerateIntrinsics =
2709 (ConstAlways && !OptNone) ||
2710 (!getLangOpts().MathErrno &&
2711 !(ErrnoOverriden.has_value() && ErrnoOverriden.value()) && !OptNone);
2712 if (!GenerateIntrinsics) {
2713 GenerateIntrinsics =
2714 ConstWithoutErrnoOrExceptions && !ConstWithoutErrnoAndExceptions;
2715 if (!GenerateIntrinsics)
2716 GenerateIntrinsics =
2717 ConstWithoutErrnoOrExceptions &&
2718 (!getLangOpts().MathErrno &&
2719 !(ErrnoOverriden.has_value() && ErrnoOverriden.value()) && !OptNone);
2720 if (!GenerateIntrinsics)
2721 GenerateIntrinsics =
2722 ConstWithoutErrnoOrExceptions && ErrnoOverridenToFalseWithOpt;
2723 }
2724 if (GenerateIntrinsics) {
2725 switch (BuiltinIDIfNoAsmLabel) {
2726 case Builtin::BIacos:
2727 case Builtin::BIacosf:
2728 case Builtin::BIacosl:
2729 case Builtin::BI__builtin_acos:
2730 case Builtin::BI__builtin_acosf:
2731 case Builtin::BI__builtin_acosf16:
2732 case Builtin::BI__builtin_acosl:
2733 case Builtin::BI__builtin_acosf128:
2735 *this, E, Intrinsic::acos, Intrinsic::experimental_constrained_acos));
2736
2737 case Builtin::BIasin:
2738 case Builtin::BIasinf:
2739 case Builtin::BIasinl:
2740 case Builtin::BI__builtin_asin:
2741 case Builtin::BI__builtin_asinf:
2742 case Builtin::BI__builtin_asinf16:
2743 case Builtin::BI__builtin_asinl:
2744 case Builtin::BI__builtin_asinf128:
2746 *this, E, Intrinsic::asin, Intrinsic::experimental_constrained_asin));
2747
2748 case Builtin::BIatan:
2749 case Builtin::BIatanf:
2750 case Builtin::BIatanl:
2751 case Builtin::BI__builtin_atan:
2752 case Builtin::BI__builtin_atanf:
2753 case Builtin::BI__builtin_atanf16:
2754 case Builtin::BI__builtin_atanl:
2755 case Builtin::BI__builtin_atanf128:
2757 *this, E, Intrinsic::atan, Intrinsic::experimental_constrained_atan));
2758
2759 case Builtin::BIatan2:
2760 case Builtin::BIatan2f:
2761 case Builtin::BIatan2l:
2762 case Builtin::BI__builtin_atan2:
2763 case Builtin::BI__builtin_atan2f:
2764 case Builtin::BI__builtin_atan2f16:
2765 case Builtin::BI__builtin_atan2l:
2766 case Builtin::BI__builtin_atan2f128:
2768 *this, E, Intrinsic::atan2,
2769 Intrinsic::experimental_constrained_atan2));
2770
2771 case Builtin::BIceil:
2772 case Builtin::BIceilf:
2773 case Builtin::BIceill:
2774 case Builtin::BI__builtin_ceil:
2775 case Builtin::BI__builtin_ceilf:
2776 case Builtin::BI__builtin_ceilf16:
2777 case Builtin::BI__builtin_ceill:
2778 case Builtin::BI__builtin_ceilf128:
2780 Intrinsic::ceil,
2781 Intrinsic::experimental_constrained_ceil));
2782
2783 case Builtin::BIcopysign:
2784 case Builtin::BIcopysignf:
2785 case Builtin::BIcopysignl:
2786 case Builtin::BI__builtin_copysign:
2787 case Builtin::BI__builtin_copysignf:
2788 case Builtin::BI__builtin_copysignf16:
2789 case Builtin::BI__builtin_copysignl:
2790 case Builtin::BI__builtin_copysignf128:
2791 return RValue::get(
2792 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
2793
2794 case Builtin::BIcos:
2795 case Builtin::BIcosf:
2796 case Builtin::BIcosl:
2797 case Builtin::BI__builtin_cos:
2798 case Builtin::BI__builtin_cosf:
2799 case Builtin::BI__builtin_cosf16:
2800 case Builtin::BI__builtin_cosl:
2801 case Builtin::BI__builtin_cosf128:
2803 Intrinsic::cos,
2804 Intrinsic::experimental_constrained_cos));
2805
2806 case Builtin::BIcosh:
2807 case Builtin::BIcoshf:
2808 case Builtin::BIcoshl:
2809 case Builtin::BI__builtin_cosh:
2810 case Builtin::BI__builtin_coshf:
2811 case Builtin::BI__builtin_coshf16:
2812 case Builtin::BI__builtin_coshl:
2813 case Builtin::BI__builtin_coshf128:
2815 *this, E, Intrinsic::cosh, Intrinsic::experimental_constrained_cosh));
2816
2817 case Builtin::BIexp:
2818 case Builtin::BIexpf:
2819 case Builtin::BIexpl:
2820 case Builtin::BI__builtin_exp:
2821 case Builtin::BI__builtin_expf:
2822 case Builtin::BI__builtin_expf16:
2823 case Builtin::BI__builtin_expl:
2824 case Builtin::BI__builtin_expf128:
2826 Intrinsic::exp,
2827 Intrinsic::experimental_constrained_exp));
2828
2829 case Builtin::BIexp2:
2830 case Builtin::BIexp2f:
2831 case Builtin::BIexp2l:
2832 case Builtin::BI__builtin_exp2:
2833 case Builtin::BI__builtin_exp2f:
2834 case Builtin::BI__builtin_exp2f16:
2835 case Builtin::BI__builtin_exp2l:
2836 case Builtin::BI__builtin_exp2f128:
2838 Intrinsic::exp2,
2839 Intrinsic::experimental_constrained_exp2));
2840 case Builtin::BI__builtin_exp10:
2841 case Builtin::BI__builtin_exp10f:
2842 case Builtin::BI__builtin_exp10f16:
2843 case Builtin::BI__builtin_exp10l:
2844 case Builtin::BI__builtin_exp10f128: {
2845 // TODO: strictfp support
2846 if (Builder.getIsFPConstrained())
2847 break;
2848 return RValue::get(
2849 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::exp10));
2850 }
2851 case Builtin::BIfabs:
2852 case Builtin::BIfabsf:
2853 case Builtin::BIfabsl:
2854 case Builtin::BI__builtin_fabs:
2855 case Builtin::BI__builtin_fabsf:
2856 case Builtin::BI__builtin_fabsf16:
2857 case Builtin::BI__builtin_fabsl:
2858 case Builtin::BI__builtin_fabsf128:
2859 return RValue::get(
2860 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs));
2861
2862 case Builtin::BIfloor:
2863 case Builtin::BIfloorf:
2864 case Builtin::BIfloorl:
2865 case Builtin::BI__builtin_floor:
2866 case Builtin::BI__builtin_floorf:
2867 case Builtin::BI__builtin_floorf16:
2868 case Builtin::BI__builtin_floorl:
2869 case Builtin::BI__builtin_floorf128:
2871 Intrinsic::floor,
2872 Intrinsic::experimental_constrained_floor));
2873
2874 case Builtin::BIfma:
2875 case Builtin::BIfmaf:
2876 case Builtin::BIfmal:
2877 case Builtin::BI__builtin_fma:
2878 case Builtin::BI__builtin_fmaf:
2879 case Builtin::BI__builtin_fmaf16:
2880 case Builtin::BI__builtin_fmal:
2881 case Builtin::BI__builtin_fmaf128:
2883 Intrinsic::fma,
2884 Intrinsic::experimental_constrained_fma));
2885
2886 case Builtin::BIfmax:
2887 case Builtin::BIfmaxf:
2888 case Builtin::BIfmaxl:
2889 case Builtin::BI__builtin_fmax:
2890 case Builtin::BI__builtin_fmaxf:
2891 case Builtin::BI__builtin_fmaxf16:
2892 case Builtin::BI__builtin_fmaxl:
2893 case Builtin::BI__builtin_fmaxf128:
2895 Intrinsic::maxnum,
2896 Intrinsic::experimental_constrained_maxnum));
2897
2898 case Builtin::BIfmin:
2899 case Builtin::BIfminf:
2900 case Builtin::BIfminl:
2901 case Builtin::BI__builtin_fmin:
2902 case Builtin::BI__builtin_fminf:
2903 case Builtin::BI__builtin_fminf16:
2904 case Builtin::BI__builtin_fminl:
2905 case Builtin::BI__builtin_fminf128:
2907 Intrinsic::minnum,
2908 Intrinsic::experimental_constrained_minnum));
2909
2910 case Builtin::BIfmaximum_num:
2911 case Builtin::BIfmaximum_numf:
2912 case Builtin::BIfmaximum_numl:
2913 case Builtin::BI__builtin_fmaximum_num:
2914 case Builtin::BI__builtin_fmaximum_numf:
2915 case Builtin::BI__builtin_fmaximum_numf16:
2916 case Builtin::BI__builtin_fmaximum_numl:
2917 case Builtin::BI__builtin_fmaximum_numf128:
2918 return RValue::get(
2919 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::maximumnum));
2920
2921 case Builtin::BIfminimum_num:
2922 case Builtin::BIfminimum_numf:
2923 case Builtin::BIfminimum_numl:
2924 case Builtin::BI__builtin_fminimum_num:
2925 case Builtin::BI__builtin_fminimum_numf:
2926 case Builtin::BI__builtin_fminimum_numf16:
2927 case Builtin::BI__builtin_fminimum_numl:
2928 case Builtin::BI__builtin_fminimum_numf128:
2929 return RValue::get(
2930 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::minimumnum));
2931
2932 // fmod() is a special-case. It maps to the frem instruction rather than an
2933 // LLVM intrinsic.
2934 case Builtin::BIfmod:
2935 case Builtin::BIfmodf:
2936 case Builtin::BIfmodl:
2937 case Builtin::BI__builtin_fmod:
2938 case Builtin::BI__builtin_fmodf:
2939 case Builtin::BI__builtin_fmodf16:
2940 case Builtin::BI__builtin_fmodl:
2941 case Builtin::BI__builtin_fmodf128:
2942 case Builtin::BI__builtin_elementwise_fmod: {
2943 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2944 Value *Arg1 = EmitScalarExpr(E->getArg(0));
2945 Value *Arg2 = EmitScalarExpr(E->getArg(1));
2946 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
2947 }
2948
2949 case Builtin::BIlog:
2950 case Builtin::BIlogf:
2951 case Builtin::BIlogl:
2952 case Builtin::BI__builtin_log:
2953 case Builtin::BI__builtin_logf:
2954 case Builtin::BI__builtin_logf16:
2955 case Builtin::BI__builtin_logl:
2956 case Builtin::BI__builtin_logf128:
2958 Intrinsic::log,
2959 Intrinsic::experimental_constrained_log));
2960
2961 case Builtin::BIlog10:
2962 case Builtin::BIlog10f:
2963 case Builtin::BIlog10l:
2964 case Builtin::BI__builtin_log10:
2965 case Builtin::BI__builtin_log10f:
2966 case Builtin::BI__builtin_log10f16:
2967 case Builtin::BI__builtin_log10l:
2968 case Builtin::BI__builtin_log10f128:
2970 Intrinsic::log10,
2971 Intrinsic::experimental_constrained_log10));
2972
2973 case Builtin::BIlog2:
2974 case Builtin::BIlog2f:
2975 case Builtin::BIlog2l:
2976 case Builtin::BI__builtin_log2:
2977 case Builtin::BI__builtin_log2f:
2978 case Builtin::BI__builtin_log2f16:
2979 case Builtin::BI__builtin_log2l:
2980 case Builtin::BI__builtin_log2f128:
2982 Intrinsic::log2,
2983 Intrinsic::experimental_constrained_log2));
2984
2985 case Builtin::BInearbyint:
2986 case Builtin::BInearbyintf:
2987 case Builtin::BInearbyintl:
2988 case Builtin::BI__builtin_nearbyint:
2989 case Builtin::BI__builtin_nearbyintf:
2990 case Builtin::BI__builtin_nearbyintl:
2991 case Builtin::BI__builtin_nearbyintf128:
2993 Intrinsic::nearbyint,
2994 Intrinsic::experimental_constrained_nearbyint));
2995
2996 case Builtin::BIpow:
2997 case Builtin::BIpowf:
2998 case Builtin::BIpowl:
2999 case Builtin::BI__builtin_pow:
3000 case Builtin::BI__builtin_powf:
3001 case Builtin::BI__builtin_powf16:
3002 case Builtin::BI__builtin_powl:
3003 case Builtin::BI__builtin_powf128:
3005 Intrinsic::pow,
3006 Intrinsic::experimental_constrained_pow));
3007
3008 case Builtin::BIrint:
3009 case Builtin::BIrintf:
3010 case Builtin::BIrintl:
3011 case Builtin::BI__builtin_rint:
3012 case Builtin::BI__builtin_rintf:
3013 case Builtin::BI__builtin_rintf16:
3014 case Builtin::BI__builtin_rintl:
3015 case Builtin::BI__builtin_rintf128:
3017 Intrinsic::rint,
3018 Intrinsic::experimental_constrained_rint));
3019
3020 case Builtin::BIround:
3021 case Builtin::BIroundf:
3022 case Builtin::BIroundl:
3023 case Builtin::BI__builtin_round:
3024 case Builtin::BI__builtin_roundf:
3025 case Builtin::BI__builtin_roundf16:
3026 case Builtin::BI__builtin_roundl:
3027 case Builtin::BI__builtin_roundf128:
3029 Intrinsic::round,
3030 Intrinsic::experimental_constrained_round));
3031
3032 case Builtin::BIroundeven:
3033 case Builtin::BIroundevenf:
3034 case Builtin::BIroundevenl:
3035 case Builtin::BI__builtin_roundeven:
3036 case Builtin::BI__builtin_roundevenf:
3037 case Builtin::BI__builtin_roundevenf16:
3038 case Builtin::BI__builtin_roundevenl:
3039 case Builtin::BI__builtin_roundevenf128:
3041 Intrinsic::roundeven,
3042 Intrinsic::experimental_constrained_roundeven));
3043
3044 case Builtin::BIsin:
3045 case Builtin::BIsinf:
3046 case Builtin::BIsinl:
3047 case Builtin::BI__builtin_sin:
3048 case Builtin::BI__builtin_sinf:
3049 case Builtin::BI__builtin_sinf16:
3050 case Builtin::BI__builtin_sinl:
3051 case Builtin::BI__builtin_sinf128:
3053 Intrinsic::sin,
3054 Intrinsic::experimental_constrained_sin));
3055
3056 case Builtin::BIsinh:
3057 case Builtin::BIsinhf:
3058 case Builtin::BIsinhl:
3059 case Builtin::BI__builtin_sinh:
3060 case Builtin::BI__builtin_sinhf:
3061 case Builtin::BI__builtin_sinhf16:
3062 case Builtin::BI__builtin_sinhl:
3063 case Builtin::BI__builtin_sinhf128:
3065 *this, E, Intrinsic::sinh, Intrinsic::experimental_constrained_sinh));
3066
3067 case Builtin::BI__builtin_sincospi:
3068 case Builtin::BI__builtin_sincospif:
3069 case Builtin::BI__builtin_sincospil:
3070 if (Builder.getIsFPConstrained())
3071 break; // TODO: Emit constrained sincospi intrinsic once one exists.
3072 emitSincosBuiltin(*this, E, Intrinsic::sincospi);
3073 return RValue::get(nullptr);
3074
3075 case Builtin::BIsincos:
3076 case Builtin::BIsincosf:
3077 case Builtin::BIsincosl:
3078 case Builtin::BI__builtin_sincos:
3079 case Builtin::BI__builtin_sincosf:
3080 case Builtin::BI__builtin_sincosf16:
3081 case Builtin::BI__builtin_sincosl:
3082 case Builtin::BI__builtin_sincosf128:
3083 if (Builder.getIsFPConstrained())
3084 break; // TODO: Emit constrained sincos intrinsic once one exists.
3085 emitSincosBuiltin(*this, E, Intrinsic::sincos);
3086 return RValue::get(nullptr);
3087
3088 case Builtin::BIsqrt:
3089 case Builtin::BIsqrtf:
3090 case Builtin::BIsqrtl:
3091 case Builtin::BI__builtin_sqrt:
3092 case Builtin::BI__builtin_sqrtf:
3093 case Builtin::BI__builtin_sqrtf16:
3094 case Builtin::BI__builtin_sqrtl:
3095 case Builtin::BI__builtin_sqrtf128:
3096 case Builtin::BI__builtin_elementwise_sqrt: {
3098 *this, E, Intrinsic::sqrt, Intrinsic::experimental_constrained_sqrt);
3100 return RValue::get(Call);
3101 }
3102
3103 case Builtin::BItan:
3104 case Builtin::BItanf:
3105 case Builtin::BItanl:
3106 case Builtin::BI__builtin_tan:
3107 case Builtin::BI__builtin_tanf:
3108 case Builtin::BI__builtin_tanf16:
3109 case Builtin::BI__builtin_tanl:
3110 case Builtin::BI__builtin_tanf128:
3112 *this, E, Intrinsic::tan, Intrinsic::experimental_constrained_tan));
3113
3114 case Builtin::BItanh:
3115 case Builtin::BItanhf:
3116 case Builtin::BItanhl:
3117 case Builtin::BI__builtin_tanh:
3118 case Builtin::BI__builtin_tanhf:
3119 case Builtin::BI__builtin_tanhf16:
3120 case Builtin::BI__builtin_tanhl:
3121 case Builtin::BI__builtin_tanhf128:
3123 *this, E, Intrinsic::tanh, Intrinsic::experimental_constrained_tanh));
3124
3125 case Builtin::BItrunc:
3126 case Builtin::BItruncf:
3127 case Builtin::BItruncl:
3128 case Builtin::BI__builtin_trunc:
3129 case Builtin::BI__builtin_truncf:
3130 case Builtin::BI__builtin_truncf16:
3131 case Builtin::BI__builtin_truncl:
3132 case Builtin::BI__builtin_truncf128:
3134 Intrinsic::trunc,
3135 Intrinsic::experimental_constrained_trunc));
3136
3137 case Builtin::BIlround:
3138 case Builtin::BIlroundf:
3139 case Builtin::BIlroundl:
3140 case Builtin::BI__builtin_lround:
3141 case Builtin::BI__builtin_lroundf:
3142 case Builtin::BI__builtin_lroundl:
3143 case Builtin::BI__builtin_lroundf128:
3145 *this, E, Intrinsic::lround,
3146 Intrinsic::experimental_constrained_lround));
3147
3148 case Builtin::BIllround:
3149 case Builtin::BIllroundf:
3150 case Builtin::BIllroundl:
3151 case Builtin::BI__builtin_llround:
3152 case Builtin::BI__builtin_llroundf:
3153 case Builtin::BI__builtin_llroundl:
3154 case Builtin::BI__builtin_llroundf128:
3156 *this, E, Intrinsic::llround,
3157 Intrinsic::experimental_constrained_llround));
3158
3159 case Builtin::BIlrint:
3160 case Builtin::BIlrintf:
3161 case Builtin::BIlrintl:
3162 case Builtin::BI__builtin_lrint:
3163 case Builtin::BI__builtin_lrintf:
3164 case Builtin::BI__builtin_lrintl:
3165 case Builtin::BI__builtin_lrintf128:
3167 *this, E, Intrinsic::lrint,
3168 Intrinsic::experimental_constrained_lrint));
3169
3170 case Builtin::BIllrint:
3171 case Builtin::BIllrintf:
3172 case Builtin::BIllrintl:
3173 case Builtin::BI__builtin_llrint:
3174 case Builtin::BI__builtin_llrintf:
3175 case Builtin::BI__builtin_llrintl:
3176 case Builtin::BI__builtin_llrintf128:
3178 *this, E, Intrinsic::llrint,
3179 Intrinsic::experimental_constrained_llrint));
3180 case Builtin::BI__builtin_ldexp:
3181 case Builtin::BI__builtin_ldexpf:
3182 case Builtin::BI__builtin_ldexpl:
3183 case Builtin::BI__builtin_ldexpf16:
3184 case Builtin::BI__builtin_ldexpf128: {
3186 *this, E, Intrinsic::ldexp,
3187 Intrinsic::experimental_constrained_ldexp));
3188 }
3189 default:
3190 break;
3191 }
3192 }
3193
3194 // Check NonnullAttribute/NullabilityArg and Alignment.
3195 auto EmitArgCheck = [&](TypeCheckKind Kind, Address A, const Expr *Arg,
3196 unsigned ParmNum) {
3197 Value *Val = A.emitRawPointer(*this);
3198 EmitNonNullArgCheck(RValue::get(Val), Arg->getType(), Arg->getExprLoc(), FD,
3199 ParmNum);
3200
3201 if (SanOpts.has(SanitizerKind::Alignment)) {
3202 SanitizerSet SkippedChecks;
3203 SkippedChecks.set(SanitizerKind::All);
3204 SkippedChecks.clear(SanitizerKind::Alignment);
3205 SourceLocation Loc = Arg->getExprLoc();
3206 // Strip an implicit cast.
3207 if (auto *CE = dyn_cast<ImplicitCastExpr>(Arg))
3208 if (CE->getCastKind() == CK_BitCast)
3209 Arg = CE->getSubExpr();
3210 EmitTypeCheck(Kind, Loc, Val, Arg->getType(), A.getAlignment(),
3211 SkippedChecks);
3212 }
3213 };
3214
3215 switch (BuiltinIDIfNoAsmLabel) {
3216 default: break;
3217 case Builtin::BI__builtin___CFStringMakeConstantString:
3218 case Builtin::BI__builtin___NSStringMakeConstantString:
3219 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
3220 case Builtin::BI__builtin_stdarg_start:
3221 case Builtin::BI__builtin_va_start:
3222 case Builtin::BI__va_start:
3223 case Builtin::BI__builtin_c23_va_start:
3224 case Builtin::BI__builtin_va_end:
3225 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
3226 ? EmitScalarExpr(E->getArg(0))
3227 : EmitVAListRef(E->getArg(0)).emitRawPointer(*this),
3228 BuiltinID != Builtin::BI__builtin_va_end);
3229 return RValue::get(nullptr);
3230 case Builtin::BI__builtin_va_copy: {
3231 Value *DstPtr = EmitVAListRef(E->getArg(0)).emitRawPointer(*this);
3232 Value *SrcPtr = EmitVAListRef(E->getArg(1)).emitRawPointer(*this);
3233 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy, {DstPtr->getType()}),
3234 {DstPtr, SrcPtr});
3235 return RValue::get(nullptr);
3236 }
3237 case Builtin::BIabs:
3238 case Builtin::BIlabs:
3239 case Builtin::BIllabs:
3240 case Builtin::BI__builtin_abs:
3241 case Builtin::BI__builtin_labs:
3242 case Builtin::BI__builtin_llabs: {
3243 bool SanitizeOverflow = SanOpts.has(SanitizerKind::SignedIntegerOverflow);
3244
3245 Value *Result;
3246 switch (getLangOpts().getSignedOverflowBehavior()) {
3248 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), false);
3249 break;
3251 if (!SanitizeOverflow) {
3252 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), true);
3253 break;
3254 }
3255 [[fallthrough]];
3257 // TODO: Somehow handle the corner case when the address of abs is taken.
3258 Result = EmitOverflowCheckedAbs(*this, E, SanitizeOverflow);
3259 break;
3260 }
3261 return RValue::get(Result);
3262 }
3263 case Builtin::BI__builtin_complex: {
3264 Value *Real = EmitScalarExpr(E->getArg(0));
3265 Value *Imag = EmitScalarExpr(E->getArg(1));
3266 return RValue::getComplex({Real, Imag});
3267 }
3268 case Builtin::BI__builtin_conj:
3269 case Builtin::BI__builtin_conjf:
3270 case Builtin::BI__builtin_conjl:
3271 case Builtin::BIconj:
3272 case Builtin::BIconjf:
3273 case Builtin::BIconjl: {
3274 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3275 Value *Real = ComplexVal.first;
3276 Value *Imag = ComplexVal.second;
3277 Imag = Builder.CreateFNeg(Imag, "neg");
3278 return RValue::getComplex(std::make_pair(Real, Imag));
3279 }
3280 case Builtin::BI__builtin_creal:
3281 case Builtin::BI__builtin_crealf:
3282 case Builtin::BI__builtin_creall:
3283 case Builtin::BIcreal:
3284 case Builtin::BIcrealf:
3285 case Builtin::BIcreall: {
3286 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3287 return RValue::get(ComplexVal.first);
3288 }
3289
3290 case Builtin::BI__builtin_preserve_access_index: {
3291 // Only enabled preserved access index region when debuginfo
3292 // is available as debuginfo is needed to preserve user-level
3293 // access pattern.
3294 if (!getDebugInfo()) {
3295 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
3296 return RValue::get(EmitScalarExpr(E->getArg(0)));
3297 }
3298
3299 // Nested builtin_preserve_access_index() not supported
3301 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
3302 return RValue::get(EmitScalarExpr(E->getArg(0)));
3303 }
3304
3305 IsInPreservedAIRegion = true;
3306 Value *Res = EmitScalarExpr(E->getArg(0));
3307 IsInPreservedAIRegion = false;
3308 return RValue::get(Res);
3309 }
3310
3311 case Builtin::BI__builtin_cimag:
3312 case Builtin::BI__builtin_cimagf:
3313 case Builtin::BI__builtin_cimagl:
3314 case Builtin::BIcimag:
3315 case Builtin::BIcimagf:
3316 case Builtin::BIcimagl: {
3317 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3318 return RValue::get(ComplexVal.second);
3319 }
3320
3321 case Builtin::BI__builtin_clrsb:
3322 case Builtin::BI__builtin_clrsbl:
3323 case Builtin::BI__builtin_clrsbll: {
3324 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
3325 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3326
3327 llvm::Type *ArgType = ArgValue->getType();
3328 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3329
3330 llvm::Type *ResultType = ConvertType(E->getType());
3331 Value *Zero = llvm::Constant::getNullValue(ArgType);
3332 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
3333 Value *Inverse = Builder.CreateNot(ArgValue, "not");
3334 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
3335 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
3336 Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
3337 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3338 "cast");
3339 return RValue::get(Result);
3340 }
3341 case Builtin::BI__builtin_ctzs:
3342 case Builtin::BI__builtin_ctz:
3343 case Builtin::BI__builtin_ctzl:
3344 case Builtin::BI__builtin_ctzll:
3345 case Builtin::BI__builtin_ctzg:
3346 case Builtin::BI__builtin_elementwise_cttz: {
3347 bool HasFallback =
3348 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_ctzg ||
3349 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_cttz) &&
3350 E->getNumArgs() > 1;
3351
3352 Value *ArgValue =
3353 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3355
3356 llvm::Type *ArgType = ArgValue->getType();
3357 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3358
3359 llvm::Type *ResultType = ConvertType(E->getType());
3360 // The elementwise builtins always exhibit zero-is-undef behaviour
3361 Value *ZeroUndef = Builder.getInt1(
3362 HasFallback || getTarget().isCLZForZeroUndef() ||
3363 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_cttz);
3364 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3365 if (Result->getType() != ResultType)
3366 Result =
3367 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3368 if (!HasFallback)
3369 return RValue::get(Result);
3370
3371 Value *Zero = Constant::getNullValue(ArgType);
3372 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3373 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3374 Value *ResultOrFallback =
3375 Builder.CreateSelect(IsZero, FallbackValue, Result, "ctzg");
3376 return RValue::get(ResultOrFallback);
3377 }
3378 case Builtin::BI__builtin_clzs:
3379 case Builtin::BI__builtin_clz:
3380 case Builtin::BI__builtin_clzl:
3381 case Builtin::BI__builtin_clzll:
3382 case Builtin::BI__builtin_clzg:
3383 case Builtin::BI__builtin_elementwise_ctlz: {
3384 bool HasFallback =
3385 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_clzg ||
3386 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctlz) &&
3387 E->getNumArgs() > 1;
3388
3389 Value *ArgValue =
3390 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3392
3393 llvm::Type *ArgType = ArgValue->getType();
3394 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3395
3396 llvm::Type *ResultType = ConvertType(E->getType());
3397 // The elementwise builtins always exhibit zero-is-undef behaviour
3398 Value *ZeroUndef = Builder.getInt1(
3399 HasFallback || getTarget().isCLZForZeroUndef() ||
3400 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctlz);
3401 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3402 if (Result->getType() != ResultType)
3403 Result =
3404 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3405 if (!HasFallback)
3406 return RValue::get(Result);
3407
3408 Value *Zero = Constant::getNullValue(ArgType);
3409 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3410 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3411 Value *ResultOrFallback =
3412 Builder.CreateSelect(IsZero, FallbackValue, Result, "clzg");
3413 return RValue::get(ResultOrFallback);
3414 }
3415 case Builtin::BI__builtin_ffs:
3416 case Builtin::BI__builtin_ffsl:
3417 case Builtin::BI__builtin_ffsll: {
3418 // ffs(x) -> x ? cttz(x) + 1 : 0
3419 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3420
3421 llvm::Type *ArgType = ArgValue->getType();
3422 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3423
3424 llvm::Type *ResultType = ConvertType(E->getType());
3425 Value *Tmp =
3426 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
3427 llvm::ConstantInt::get(ArgType, 1));
3428 Value *Zero = llvm::Constant::getNullValue(ArgType);
3429 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3430 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
3431 if (Result->getType() != ResultType)
3432 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3433 "cast");
3434 return RValue::get(Result);
3435 }
3436 case Builtin::BI__builtin_parity:
3437 case Builtin::BI__builtin_parityl:
3438 case Builtin::BI__builtin_parityll: {
3439 // parity(x) -> ctpop(x) & 1
3440 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3441
3442 llvm::Type *ArgType = ArgValue->getType();
3443 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3444
3445 llvm::Type *ResultType = ConvertType(E->getType());
3446 Value *Tmp = Builder.CreateCall(F, ArgValue);
3447 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
3448 if (Result->getType() != ResultType)
3449 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3450 "cast");
3451 return RValue::get(Result);
3452 }
3453 case Builtin::BI__lzcnt16:
3454 case Builtin::BI__lzcnt:
3455 case Builtin::BI__lzcnt64: {
3456 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3457
3458 llvm::Type *ArgType = ArgValue->getType();
3459 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3460
3461 llvm::Type *ResultType = ConvertType(E->getType());
3462 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
3463 if (Result->getType() != ResultType)
3464 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3465 "cast");
3466 return RValue::get(Result);
3467 }
3468 case Builtin::BI__popcnt16:
3469 case Builtin::BI__popcnt:
3470 case Builtin::BI__popcnt64:
3471 case Builtin::BI__builtin_popcount:
3472 case Builtin::BI__builtin_popcountl:
3473 case Builtin::BI__builtin_popcountll:
3474 case Builtin::BI__builtin_popcountg: {
3475 Value *ArgValue = EmitBitCountExpr(*this, E->getArg(0));
3476
3477 llvm::Type *ArgType = ArgValue->getType();
3478 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3479
3480 llvm::Type *ResultType = ConvertType(E->getType());
3481 Value *Result = Builder.CreateCall(F, ArgValue);
3482 if (Result->getType() != ResultType)
3483 Result =
3484 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3485 return RValue::get(Result);
3486 }
3487 case Builtin::BI__builtin_unpredictable: {
3488 // Always return the argument of __builtin_unpredictable. LLVM does not
3489 // handle this builtin. Metadata for this builtin should be added directly
3490 // to instructions such as branches or switches that use it.
3491 return RValue::get(EmitScalarExpr(E->getArg(0)));
3492 }
3493 case Builtin::BI__builtin_expect: {
3494 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3495 llvm::Type *ArgType = ArgValue->getType();
3496
3497 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3498 // Don't generate llvm.expect on -O0 as the backend won't use it for
3499 // anything.
3500 // Note, we still IRGen ExpectedValue because it could have side-effects.
3501 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3502 return RValue::get(ArgValue);
3503
3504 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
3505 Value *Result =
3506 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
3507 return RValue::get(Result);
3508 }
3509 case Builtin::BI__builtin_expect_with_probability: {
3510 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3511 llvm::Type *ArgType = ArgValue->getType();
3512
3513 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3514 llvm::APFloat Probability(0.0);
3515 const Expr *ProbArg = E->getArg(2);
3516 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
3517 assert(EvalSucceed && "probability should be able to evaluate as float");
3518 (void)EvalSucceed;
3519 bool LoseInfo = false;
3520 Probability.convert(llvm::APFloat::IEEEdouble(),
3521 llvm::RoundingMode::Dynamic, &LoseInfo);
3522 llvm::Type *Ty = ConvertType(ProbArg->getType());
3523 Constant *Confidence = ConstantFP::get(Ty, Probability);
3524 // Don't generate llvm.expect.with.probability on -O0 as the backend
3525 // won't use it for anything.
3526 // Note, we still IRGen ExpectedValue because it could have side-effects.
3527 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3528 return RValue::get(ArgValue);
3529
3530 Function *FnExpect =
3531 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
3532 Value *Result = Builder.CreateCall(
3533 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
3534 return RValue::get(Result);
3535 }
3536 case Builtin::BI__builtin_assume_aligned: {
3537 const Expr *Ptr = E->getArg(0);
3538 Value *PtrValue = EmitScalarExpr(Ptr);
3539 Value *OffsetValue =
3540 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
3541
3542 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
3543 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
3544 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
3545 AlignmentCI = ConstantInt::get(AlignmentCI->getIntegerType(),
3546 llvm::Value::MaximumAlignment);
3547
3548 emitAlignmentAssumption(PtrValue, Ptr,
3549 /*The expr loc is sufficient.*/ SourceLocation(),
3550 AlignmentCI, OffsetValue);
3551 return RValue::get(PtrValue);
3552 }
3553 case Builtin::BI__builtin_assume_dereferenceable: {
3554 const Expr *Ptr = E->getArg(0);
3555 const Expr *Size = E->getArg(1);
3556 Value *PtrValue = EmitScalarExpr(Ptr);
3557 Value *SizeValue = EmitScalarExpr(Size);
3558 if (SizeValue->getType() != IntPtrTy)
3559 SizeValue =
3560 Builder.CreateIntCast(SizeValue, IntPtrTy, false, "casted.size");
3561 Builder.CreateDereferenceableAssumption(PtrValue, SizeValue);
3562 return RValue::get(nullptr);
3563 }
3564 case Builtin::BI__assume:
3565 case Builtin::BI__builtin_assume: {
3566 if (E->getArg(0)->HasSideEffects(getContext()))
3567 return RValue::get(nullptr);
3568
3569 Value *ArgValue = EmitCheckedArgForAssume(E->getArg(0));
3570 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
3571 Builder.CreateCall(FnAssume, ArgValue);
3572 return RValue::get(nullptr);
3573 }
3574 case Builtin::BI__builtin_assume_separate_storage: {
3575 const Expr *Arg0 = E->getArg(0);
3576 const Expr *Arg1 = E->getArg(1);
3577
3578 Value *Value0 = EmitScalarExpr(Arg0);
3579 Value *Value1 = EmitScalarExpr(Arg1);
3580
3581 Value *Values[] = {Value0, Value1};
3582 OperandBundleDefT<Value *> OBD("separate_storage", Values);
3583 Builder.CreateAssumption(ConstantInt::getTrue(getLLVMContext()), {OBD});
3584 return RValue::get(nullptr);
3585 }
3586 case Builtin::BI__builtin_allow_runtime_check: {
3587 StringRef Kind =
3588 cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
3589 LLVMContext &Ctx = CGM.getLLVMContext();
3590 llvm::Value *Allow = Builder.CreateCall(
3591 CGM.getIntrinsic(Intrinsic::allow_runtime_check),
3592 llvm::MetadataAsValue::get(Ctx, llvm::MDString::get(Ctx, Kind)));
3593 return RValue::get(Allow);
3594 }
3595 case Builtin::BI__arithmetic_fence: {
3596 // Create the builtin call if FastMath is selected, and the target
3597 // supports the builtin, otherwise just return the argument.
3598 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3599 llvm::FastMathFlags FMF = Builder.getFastMathFlags();
3600 bool isArithmeticFenceEnabled =
3601 FMF.allowReassoc() &&
3603 QualType ArgType = E->getArg(0)->getType();
3604 if (ArgType->isComplexType()) {
3605 if (isArithmeticFenceEnabled) {
3606 QualType ElementType = ArgType->castAs<ComplexType>()->getElementType();
3607 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3608 Value *Real = Builder.CreateArithmeticFence(ComplexVal.first,
3609 ConvertType(ElementType));
3610 Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second,
3611 ConvertType(ElementType));
3612 return RValue::getComplex(std::make_pair(Real, Imag));
3613 }
3614 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3615 Value *Real = ComplexVal.first;
3616 Value *Imag = ComplexVal.second;
3617 return RValue::getComplex(std::make_pair(Real, Imag));
3618 }
3619 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3620 if (isArithmeticFenceEnabled)
3621 return RValue::get(
3622 Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType)));
3623 return RValue::get(ArgValue);
3624 }
3625 case Builtin::BI__builtin_bswap16:
3626 case Builtin::BI__builtin_bswap32:
3627 case Builtin::BI__builtin_bswap64:
3628 case Builtin::BI_byteswap_ushort:
3629 case Builtin::BI_byteswap_ulong:
3630 case Builtin::BI_byteswap_uint64: {
3631 return RValue::get(
3632 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3633 }
3634 case Builtin::BI__builtin_bitreverse8:
3635 case Builtin::BI__builtin_bitreverse16:
3636 case Builtin::BI__builtin_bitreverse32:
3637 case Builtin::BI__builtin_bitreverse64: {
3638 return RValue::get(
3639 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bitreverse));
3640 }
3641 case Builtin::BI__builtin_rotateleft8:
3642 case Builtin::BI__builtin_rotateleft16:
3643 case Builtin::BI__builtin_rotateleft32:
3644 case Builtin::BI__builtin_rotateleft64:
3645 case Builtin::BI_rotl8: // Microsoft variants of rotate left
3646 case Builtin::BI_rotl16:
3647 case Builtin::BI_rotl:
3648 case Builtin::BI_lrotl:
3649 case Builtin::BI_rotl64:
3650 return emitRotate(E, false);
3651
3652 case Builtin::BI__builtin_rotateright8:
3653 case Builtin::BI__builtin_rotateright16:
3654 case Builtin::BI__builtin_rotateright32:
3655 case Builtin::BI__builtin_rotateright64:
3656 case Builtin::BI_rotr8: // Microsoft variants of rotate right
3657 case Builtin::BI_rotr16:
3658 case Builtin::BI_rotr:
3659 case Builtin::BI_lrotr:
3660 case Builtin::BI_rotr64:
3661 return emitRotate(E, true);
3662
3663 case Builtin::BI__builtin_constant_p: {
3664 llvm::Type *ResultType = ConvertType(E->getType());
3665
3666 const Expr *Arg = E->getArg(0);
3667 QualType ArgType = Arg->getType();
3668 // FIXME: The allowance for Obj-C pointers and block pointers is historical
3669 // and likely a mistake.
3670 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
3671 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
3672 // Per the GCC documentation, only numeric constants are recognized after
3673 // inlining.
3674 return RValue::get(ConstantInt::get(ResultType, 0));
3675
3676 if (Arg->HasSideEffects(getContext()))
3677 // The argument is unevaluated, so be conservative if it might have
3678 // side-effects.
3679 return RValue::get(ConstantInt::get(ResultType, 0));
3680
3681 Value *ArgValue = EmitScalarExpr(Arg);
3682 if (ArgType->isObjCObjectPointerType()) {
3683 // Convert Objective-C objects to id because we cannot distinguish between
3684 // LLVM types for Obj-C classes as they are opaque.
3685 ArgType = CGM.getContext().getObjCIdType();
3686 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
3687 }
3688 Function *F =
3689 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
3690 Value *Result = Builder.CreateCall(F, ArgValue);
3691 if (Result->getType() != ResultType)
3692 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
3693 return RValue::get(Result);
3694 }
3695 case Builtin::BI__builtin_dynamic_object_size:
3696 case Builtin::BI__builtin_object_size: {
3697 unsigned Type =
3698 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
3699 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
3700
3701 // We pass this builtin onto the optimizer so that it can figure out the
3702 // object size in more complex cases.
3703 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
3704 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
3705 /*EmittedE=*/nullptr, IsDynamic));
3706 }
3707 case Builtin::BI__builtin_counted_by_ref: {
3708 // Default to returning '(void *) 0'.
3709 llvm::Value *Result = llvm::ConstantPointerNull::get(
3710 llvm::PointerType::getUnqual(getLLVMContext()));
3711
3712 const Expr *Arg = E->getArg(0)->IgnoreParenImpCasts();
3713
3714 if (auto *UO = dyn_cast<UnaryOperator>(Arg);
3715 UO && UO->getOpcode() == UO_AddrOf) {
3716 Arg = UO->getSubExpr()->IgnoreParenImpCasts();
3717
3718 if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Arg))
3719 Arg = ASE->getBase()->IgnoreParenImpCasts();
3720 }
3721
3722 if (const MemberExpr *ME = dyn_cast_if_present<MemberExpr>(Arg)) {
3723 if (auto *CATy =
3725 CATy && CATy->getKind() == CountAttributedType::CountedBy) {
3726 const auto *FAMDecl = cast<FieldDecl>(ME->getMemberDecl());
3727 if (const FieldDecl *CountFD = FAMDecl->findCountedByField())
3728 Result = GetCountedByFieldExprGEP(Arg, FAMDecl, CountFD);
3729 else
3730 llvm::report_fatal_error("Cannot find the counted_by 'count' field");
3731 }
3732 }
3733
3734 return RValue::get(Result);
3735 }
3736 case Builtin::BI__builtin_prefetch: {
3737 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
3738 // FIXME: Technically these constants should of type 'int', yes?
3739 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
3740 llvm::ConstantInt::get(Int32Ty, 0);
3741 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
3742 llvm::ConstantInt::get(Int32Ty, 3);
3743 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
3744 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
3745 Builder.CreateCall(F, {Address, RW, Locality, Data});
3746 return RValue::get(nullptr);
3747 }
3748 case Builtin::BI__builtin_readcyclecounter: {
3749 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
3750 return RValue::get(Builder.CreateCall(F));
3751 }
3752 case Builtin::BI__builtin_readsteadycounter: {
3753 Function *F = CGM.getIntrinsic(Intrinsic::readsteadycounter);
3754 return RValue::get(Builder.CreateCall(F));
3755 }
3756 case Builtin::BI__builtin___clear_cache: {
3757 Value *Begin = EmitScalarExpr(E->getArg(0));
3758 Value *End = EmitScalarExpr(E->getArg(1));
3759 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
3760 return RValue::get(Builder.CreateCall(F, {Begin, End}));
3761 }
3762 case Builtin::BI__builtin_trap:
3763 EmitTrapCall(Intrinsic::trap);
3764 return RValue::get(nullptr);
3765 case Builtin::BI__builtin_verbose_trap: {
3766 llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
3767 if (getDebugInfo()) {
3768 TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
3769 TrapLocation, *E->getArg(0)->tryEvaluateString(getContext()),
3771 }
3772 ApplyDebugLocation ApplyTrapDI(*this, TrapLocation);
3773 // Currently no attempt is made to prevent traps from being merged.
3774 EmitTrapCall(Intrinsic::trap);
3775 return RValue::get(nullptr);
3776 }
3777 case Builtin::BI__debugbreak:
3778 EmitTrapCall(Intrinsic::debugtrap);
3779 return RValue::get(nullptr);
3780 case Builtin::BI__builtin_unreachable: {
3782
3783 // We do need to preserve an insertion point.
3784 EmitBlock(createBasicBlock("unreachable.cont"));
3785
3786 return RValue::get(nullptr);
3787 }
3788
3789 case Builtin::BI__builtin_powi:
3790 case Builtin::BI__builtin_powif:
3791 case Builtin::BI__builtin_powil: {
3792 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
3793 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
3794
3795 if (Builder.getIsFPConstrained()) {
3796 // FIXME: llvm.powi has 2 mangling types,
3797 // llvm.experimental.constrained.powi has one.
3798 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3799 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
3800 Src0->getType());
3801 return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
3802 }
3803
3804 Function *F = CGM.getIntrinsic(Intrinsic::powi,
3805 { Src0->getType(), Src1->getType() });
3806 return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
3807 }
3808 case Builtin::BI__builtin_frexpl: {
3809 // Linux PPC will not be adding additional PPCDoubleDouble support.
3810 // WIP to switch default to IEEE long double. Will emit libcall for
3811 // frexpl instead of legalizing this type in the BE.
3812 if (&getTarget().getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble())
3813 break;
3814 [[fallthrough]];
3815 }
3816 case Builtin::BI__builtin_frexp:
3817 case Builtin::BI__builtin_frexpf:
3818 case Builtin::BI__builtin_frexpf128:
3819 case Builtin::BI__builtin_frexpf16:
3820 return RValue::get(emitFrexpBuiltin(*this, E, Intrinsic::frexp));
3821 case Builtin::BImodf:
3822 case Builtin::BImodff:
3823 case Builtin::BImodfl:
3824 case Builtin::BI__builtin_modf:
3825 case Builtin::BI__builtin_modff:
3826 case Builtin::BI__builtin_modfl:
3827 if (Builder.getIsFPConstrained())
3828 break; // TODO: Emit constrained modf intrinsic once one exists.
3829 return RValue::get(emitModfBuiltin(*this, E, Intrinsic::modf));
3830 case Builtin::BI__builtin_isgreater:
3831 case Builtin::BI__builtin_isgreaterequal:
3832 case Builtin::BI__builtin_isless:
3833 case Builtin::BI__builtin_islessequal:
3834 case Builtin::BI__builtin_islessgreater:
3835 case Builtin::BI__builtin_isunordered: {
3836 // Ordered comparisons: we know the arguments to these are matching scalar
3837 // floating point values.
3838 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3839 Value *LHS = EmitScalarExpr(E->getArg(0));
3840 Value *RHS = EmitScalarExpr(E->getArg(1));
3841
3842 switch (BuiltinID) {
3843 default: llvm_unreachable("Unknown ordered comparison");
3844 case Builtin::BI__builtin_isgreater:
3845 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
3846 break;
3847 case Builtin::BI__builtin_isgreaterequal:
3848 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
3849 break;
3850 case Builtin::BI__builtin_isless:
3851 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
3852 break;
3853 case Builtin::BI__builtin_islessequal:
3854 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
3855 break;
3856 case Builtin::BI__builtin_islessgreater:
3857 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
3858 break;
3859 case Builtin::BI__builtin_isunordered:
3860 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
3861 break;
3862 }
3863 // ZExt bool to int type.
3864 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
3865 }
3866
3867 case Builtin::BI__builtin_isnan: {
3868 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3869 Value *V = EmitScalarExpr(E->getArg(0));
3870 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3871 return RValue::get(Result);
3872 return RValue::get(
3873 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNan),
3874 ConvertType(E->getType())));
3875 }
3876
3877 case Builtin::BI__builtin_issignaling: {
3878 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3879 Value *V = EmitScalarExpr(E->getArg(0));
3880 return RValue::get(
3881 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSNan),
3882 ConvertType(E->getType())));
3883 }
3884
3885 case Builtin::BI__builtin_isinf: {
3886 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3887 Value *V = EmitScalarExpr(E->getArg(0));
3888 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3889 return RValue::get(Result);
3890 return RValue::get(
3891 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcInf),
3892 ConvertType(E->getType())));
3893 }
3894
3895 case Builtin::BIfinite:
3896 case Builtin::BI__finite:
3897 case Builtin::BIfinitef:
3898 case Builtin::BI__finitef:
3899 case Builtin::BIfinitel:
3900 case Builtin::BI__finitel:
3901 case Builtin::BI__builtin_isfinite: {
3902 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3903 Value *V = EmitScalarExpr(E->getArg(0));
3904 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3905 return RValue::get(Result);
3906 return RValue::get(
3907 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcFinite),
3908 ConvertType(E->getType())));
3909 }
3910
3911 case Builtin::BI__builtin_isnormal: {
3912 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3913 Value *V = EmitScalarExpr(E->getArg(0));
3914 return RValue::get(
3915 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNormal),
3916 ConvertType(E->getType())));
3917 }
3918
3919 case Builtin::BI__builtin_issubnormal: {
3920 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3921 Value *V = EmitScalarExpr(E->getArg(0));
3922 return RValue::get(
3923 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSubnormal),
3924 ConvertType(E->getType())));
3925 }
3926
3927 case Builtin::BI__builtin_iszero: {
3928 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3929 Value *V = EmitScalarExpr(E->getArg(0));
3930 return RValue::get(
3931 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcZero),
3932 ConvertType(E->getType())));
3933 }
3934
3935 case Builtin::BI__builtin_isfpclass: {
3937 if (!E->getArg(1)->EvaluateAsInt(Result, CGM.getContext()))
3938 break;
3939 uint64_t Test = Result.Val.getInt().getLimitedValue();
3940 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3941 Value *V = EmitScalarExpr(E->getArg(0));
3942 return RValue::get(Builder.CreateZExt(Builder.createIsFPClass(V, Test),
3943 ConvertType(E->getType())));
3944 }
3945
3946 case Builtin::BI__builtin_nondeterministic_value: {
3947 llvm::Type *Ty = ConvertType(E->getArg(0)->getType());
3948
3949 Value *Result = PoisonValue::get(Ty);
3950 Result = Builder.CreateFreeze(Result);
3951
3952 return RValue::get(Result);
3953 }
3954
3955 case Builtin::BI__builtin_elementwise_abs: {
3956 Value *Result;
3957 QualType QT = E->getArg(0)->getType();
3958
3959 if (auto *VecTy = QT->getAs<VectorType>())
3960 QT = VecTy->getElementType();
3961 if (QT->isIntegerType())
3962 Result = Builder.CreateBinaryIntrinsic(
3963 Intrinsic::abs, EmitScalarExpr(E->getArg(0)), Builder.getFalse(),
3964 nullptr, "elt.abs");
3965 else
3966 Result = emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs,
3967 "elt.abs");
3968
3969 return RValue::get(Result);
3970 }
3971 case Builtin::BI__builtin_elementwise_acos:
3973 *this, E, Intrinsic::acos, "elt.acos"));
3974 case Builtin::BI__builtin_elementwise_asin:
3976 *this, E, Intrinsic::asin, "elt.asin"));
3977 case Builtin::BI__builtin_elementwise_atan:
3979 *this, E, Intrinsic::atan, "elt.atan"));
3980 case Builtin::BI__builtin_elementwise_atan2:
3982 *this, E, Intrinsic::atan2, "elt.atan2"));
3983 case Builtin::BI__builtin_elementwise_ceil:
3985 *this, E, Intrinsic::ceil, "elt.ceil"));
3986 case Builtin::BI__builtin_elementwise_exp:
3988 *this, E, Intrinsic::exp, "elt.exp"));
3989 case Builtin::BI__builtin_elementwise_exp2:
3991 *this, E, Intrinsic::exp2, "elt.exp2"));
3992 case Builtin::BI__builtin_elementwise_exp10:
3994 *this, E, Intrinsic::exp10, "elt.exp10"));
3995 case Builtin::BI__builtin_elementwise_log:
3997 *this, E, Intrinsic::log, "elt.log"));
3998 case Builtin::BI__builtin_elementwise_log2:
4000 *this, E, Intrinsic::log2, "elt.log2"));
4001 case Builtin::BI__builtin_elementwise_log10:
4003 *this, E, Intrinsic::log10, "elt.log10"));
4004 case Builtin::BI__builtin_elementwise_pow: {
4005 return RValue::get(
4006 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::pow));
4007 }
4008 case Builtin::BI__builtin_elementwise_bitreverse:
4010 *this, E, Intrinsic::bitreverse, "elt.bitreverse"));
4011 case Builtin::BI__builtin_elementwise_cos:
4013 *this, E, Intrinsic::cos, "elt.cos"));
4014 case Builtin::BI__builtin_elementwise_cosh:
4016 *this, E, Intrinsic::cosh, "elt.cosh"));
4017 case Builtin::BI__builtin_elementwise_floor:
4019 *this, E, Intrinsic::floor, "elt.floor"));
4020 case Builtin::BI__builtin_elementwise_popcount:
4022 *this, E, Intrinsic::ctpop, "elt.ctpop"));
4023 case Builtin::BI__builtin_elementwise_roundeven:
4025 *this, E, Intrinsic::roundeven, "elt.roundeven"));
4026 case Builtin::BI__builtin_elementwise_round:
4028 *this, E, Intrinsic::round, "elt.round"));
4029 case Builtin::BI__builtin_elementwise_rint:
4031 *this, E, Intrinsic::rint, "elt.rint"));
4032 case Builtin::BI__builtin_elementwise_nearbyint:
4034 *this, E, Intrinsic::nearbyint, "elt.nearbyint"));
4035 case Builtin::BI__builtin_elementwise_sin:
4037 *this, E, Intrinsic::sin, "elt.sin"));
4038 case Builtin::BI__builtin_elementwise_sinh:
4040 *this, E, Intrinsic::sinh, "elt.sinh"));
4041 case Builtin::BI__builtin_elementwise_tan:
4043 *this, E, Intrinsic::tan, "elt.tan"));
4044 case Builtin::BI__builtin_elementwise_tanh:
4046 *this, E, Intrinsic::tanh, "elt.tanh"));
4047 case Builtin::BI__builtin_elementwise_trunc:
4049 *this, E, Intrinsic::trunc, "elt.trunc"));
4050 case Builtin::BI__builtin_elementwise_canonicalize:
4052 *this, E, Intrinsic::canonicalize, "elt.canonicalize"));
4053 case Builtin::BI__builtin_elementwise_copysign:
4054 return RValue::get(
4055 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
4056 case Builtin::BI__builtin_elementwise_fma:
4057 return RValue::get(
4058 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fma));
4059 case Builtin::BI__builtin_elementwise_fshl:
4060 return RValue::get(
4061 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshl));
4062 case Builtin::BI__builtin_elementwise_fshr:
4063 return RValue::get(
4064 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshr));
4065
4066 case Builtin::BI__builtin_elementwise_add_sat:
4067 case Builtin::BI__builtin_elementwise_sub_sat: {
4068 Value *Op0 = EmitScalarExpr(E->getArg(0));
4069 Value *Op1 = EmitScalarExpr(E->getArg(1));
4070 Value *Result;
4071 assert(Op0->getType()->isIntOrIntVectorTy() && "integer type expected");
4072 QualType Ty = E->getArg(0)->getType();
4073 if (auto *VecTy = Ty->getAs<VectorType>())
4074 Ty = VecTy->getElementType();
4075 bool IsSigned = Ty->isSignedIntegerType();
4076 unsigned Opc;
4077 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_add_sat)
4078 Opc = IsSigned ? Intrinsic::sadd_sat : Intrinsic::uadd_sat;
4079 else
4080 Opc = IsSigned ? Intrinsic::ssub_sat : Intrinsic::usub_sat;
4081 Result = Builder.CreateBinaryIntrinsic(Opc, Op0, Op1, nullptr, "elt.sat");
4082 return RValue::get(Result);
4083 }
4084
4085 case Builtin::BI__builtin_elementwise_max: {
4086 Value *Op0 = EmitScalarExpr(E->getArg(0));
4087 Value *Op1 = EmitScalarExpr(E->getArg(1));
4088 Value *Result;
4089 if (Op0->getType()->isIntOrIntVectorTy()) {
4090 QualType Ty = E->getArg(0)->getType();
4091 if (auto *VecTy = Ty->getAs<VectorType>())
4092 Ty = VecTy->getElementType();
4093 Result = Builder.CreateBinaryIntrinsic(
4094 Ty->isSignedIntegerType() ? Intrinsic::smax : Intrinsic::umax, Op0,
4095 Op1, nullptr, "elt.max");
4096 } else
4097 Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.max");
4098 return RValue::get(Result);
4099 }
4100 case Builtin::BI__builtin_elementwise_min: {
4101 Value *Op0 = EmitScalarExpr(E->getArg(0));
4102 Value *Op1 = EmitScalarExpr(E->getArg(1));
4103 Value *Result;
4104 if (Op0->getType()->isIntOrIntVectorTy()) {
4105 QualType Ty = E->getArg(0)->getType();
4106 if (auto *VecTy = Ty->getAs<VectorType>())
4107 Ty = VecTy->getElementType();
4108 Result = Builder.CreateBinaryIntrinsic(
4109 Ty->isSignedIntegerType() ? Intrinsic::smin : Intrinsic::umin, Op0,
4110 Op1, nullptr, "elt.min");
4111 } else
4112 Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.min");
4113 return RValue::get(Result);
4114 }
4115
4116 case Builtin::BI__builtin_elementwise_maxnum: {
4117 Value *Op0 = EmitScalarExpr(E->getArg(0));
4118 Value *Op1 = EmitScalarExpr(E->getArg(1));
4119 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::maxnum, Op0,
4120 Op1, nullptr, "elt.maxnum");
4121 return RValue::get(Result);
4122 }
4123
4124 case Builtin::BI__builtin_elementwise_minnum: {
4125 Value *Op0 = EmitScalarExpr(E->getArg(0));
4126 Value *Op1 = EmitScalarExpr(E->getArg(1));
4127 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::minnum, Op0,
4128 Op1, nullptr, "elt.minnum");
4129 return RValue::get(Result);
4130 }
4131
4132 case Builtin::BI__builtin_elementwise_maximum: {
4133 Value *Op0 = EmitScalarExpr(E->getArg(0));
4134 Value *Op1 = EmitScalarExpr(E->getArg(1));
4135 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::maximum, Op0, Op1,
4136 nullptr, "elt.maximum");
4137 return RValue::get(Result);
4138 }
4139
4140 case Builtin::BI__builtin_elementwise_minimum: {
4141 Value *Op0 = EmitScalarExpr(E->getArg(0));
4142 Value *Op1 = EmitScalarExpr(E->getArg(1));
4143 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::minimum, Op0, Op1,
4144 nullptr, "elt.minimum");
4145 return RValue::get(Result);
4146 }
4147
4148 case Builtin::BI__builtin_elementwise_maximumnum: {
4149 Value *Op0 = EmitScalarExpr(E->getArg(0));
4150 Value *Op1 = EmitScalarExpr(E->getArg(1));
4151 Value *Result = Builder.CreateBinaryIntrinsic(
4152 Intrinsic::maximumnum, Op0, Op1, nullptr, "elt.maximumnum");
4153 return RValue::get(Result);
4154 }
4155
4156 case Builtin::BI__builtin_elementwise_minimumnum: {
4157 Value *Op0 = EmitScalarExpr(E->getArg(0));
4158 Value *Op1 = EmitScalarExpr(E->getArg(1));
4159 Value *Result = Builder.CreateBinaryIntrinsic(
4160 Intrinsic::minimumnum, Op0, Op1, nullptr, "elt.minimumnum");
4161 return RValue::get(Result);
4162 }
4163
4164 case Builtin::BI__builtin_reduce_max: {
4165 auto GetIntrinsicID = [this](QualType QT) {
4166 if (auto *VecTy = QT->getAs<VectorType>())
4167 QT = VecTy->getElementType();
4168 else if (QT->isSizelessVectorType())
4169 QT = QT->getSizelessVectorEltType(CGM.getContext());
4170
4171 if (QT->isSignedIntegerType())
4172 return Intrinsic::vector_reduce_smax;
4173 if (QT->isUnsignedIntegerType())
4174 return Intrinsic::vector_reduce_umax;
4175 assert(QT->isFloatingType() && "must have a float here");
4176 return Intrinsic::vector_reduce_fmax;
4177 };
4179 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4180 }
4181
4182 case Builtin::BI__builtin_reduce_min: {
4183 auto GetIntrinsicID = [this](QualType QT) {
4184 if (auto *VecTy = QT->getAs<VectorType>())
4185 QT = VecTy->getElementType();
4186 else if (QT->isSizelessVectorType())
4187 QT = QT->getSizelessVectorEltType(CGM.getContext());
4188
4189 if (QT->isSignedIntegerType())
4190 return Intrinsic::vector_reduce_smin;
4191 if (QT->isUnsignedIntegerType())
4192 return Intrinsic::vector_reduce_umin;
4193 assert(QT->isFloatingType() && "must have a float here");
4194 return Intrinsic::vector_reduce_fmin;
4195 };
4196
4198 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4199 }
4200
4201 case Builtin::BI__builtin_reduce_add:
4203 *this, E, Intrinsic::vector_reduce_add, "rdx.add"));
4204 case Builtin::BI__builtin_reduce_mul:
4206 *this, E, Intrinsic::vector_reduce_mul, "rdx.mul"));
4207 case Builtin::BI__builtin_reduce_xor:
4209 *this, E, Intrinsic::vector_reduce_xor, "rdx.xor"));
4210 case Builtin::BI__builtin_reduce_or:
4212 *this, E, Intrinsic::vector_reduce_or, "rdx.or"));
4213 case Builtin::BI__builtin_reduce_and:
4215 *this, E, Intrinsic::vector_reduce_and, "rdx.and"));
4216 case Builtin::BI__builtin_reduce_maximum:
4218 *this, E, Intrinsic::vector_reduce_fmaximum, "rdx.maximum"));
4219 case Builtin::BI__builtin_reduce_minimum:
4221 *this, E, Intrinsic::vector_reduce_fminimum, "rdx.minimum"));
4222
4223 case Builtin::BI__builtin_matrix_transpose: {
4224 auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>();
4225 Value *MatValue = EmitScalarExpr(E->getArg(0));
4226 MatrixBuilder MB(Builder);
4227 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
4228 MatrixTy->getNumColumns());
4229 return RValue::get(Result);
4230 }
4231
4232 case Builtin::BI__builtin_matrix_column_major_load: {
4233 MatrixBuilder MB(Builder);
4234 // Emit everything that isn't dependent on the first parameter type
4235 Value *Stride = EmitScalarExpr(E->getArg(3));
4236 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
4237 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
4238 assert(PtrTy && "arg0 must be of pointer type");
4239 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4240
4243 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4244 0);
4245 Value *Result = MB.CreateColumnMajorLoad(
4246 Src.getElementType(), Src.emitRawPointer(*this),
4247 Align(Src.getAlignment().getQuantity()), Stride, IsVolatile,
4248 ResultTy->getNumRows(), ResultTy->getNumColumns(), "matrix");
4249 return RValue::get(Result);
4250 }
4251
4252 case Builtin::BI__builtin_matrix_column_major_store: {
4253 MatrixBuilder MB(Builder);
4254 Value *Matrix = EmitScalarExpr(E->getArg(0));
4256 Value *Stride = EmitScalarExpr(E->getArg(2));
4257
4258 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
4259 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
4260 assert(PtrTy && "arg1 must be of pointer type");
4261 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4262
4264 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4265 0);
4266 Value *Result = MB.CreateColumnMajorStore(
4267 Matrix, Dst.emitRawPointer(*this),
4268 Align(Dst.getAlignment().getQuantity()), Stride, IsVolatile,
4269 MatrixTy->getNumRows(), MatrixTy->getNumColumns());
4271 return RValue::get(Result);
4272 }
4273
4274 case Builtin::BI__builtin_masked_load:
4275 case Builtin::BI__builtin_masked_expand_load: {
4276 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4277 llvm::Value *Ptr = EmitScalarExpr(E->getArg(1));
4278
4279 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4280 CharUnits Align = CGM.getNaturalTypeAlignment(E->getType(), nullptr);
4281 llvm::Value *AlignVal =
4282 llvm::ConstantInt::get(Int32Ty, Align.getQuantity());
4283
4284 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4285 if (E->getNumArgs() > 2)
4286 PassThru = EmitScalarExpr(E->getArg(2));
4287
4288 llvm::Value *Result;
4289 if (BuiltinID == Builtin::BI__builtin_masked_load) {
4290 Function *F =
4291 CGM.getIntrinsic(Intrinsic::masked_load, {RetTy, UnqualPtrTy});
4292 Result =
4293 Builder.CreateCall(F, {Ptr, AlignVal, Mask, PassThru}, "masked_load");
4294 } else {
4295 Function *F = CGM.getIntrinsic(Intrinsic::masked_expandload, {RetTy});
4296 Result =
4297 Builder.CreateCall(F, {Ptr, Mask, PassThru}, "masked_expand_load");
4298 }
4299 return RValue::get(Result);
4300 };
4301 case Builtin::BI__builtin_masked_store:
4302 case Builtin::BI__builtin_masked_compress_store: {
4303 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4304 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
4305 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4306
4307 QualType ValTy = E->getArg(1)->getType();
4308 llvm::Type *ValLLTy = CGM.getTypes().ConvertType(ValTy);
4309 llvm::Type *PtrTy = Ptr->getType();
4310
4311 CharUnits Align = CGM.getNaturalTypeAlignment(ValTy, nullptr);
4312 llvm::Value *AlignVal =
4313 llvm::ConstantInt::get(Int32Ty, Align.getQuantity());
4314
4315 if (BuiltinID == Builtin::BI__builtin_masked_store) {
4316 llvm::Function *F =
4317 CGM.getIntrinsic(llvm::Intrinsic::masked_store, {ValLLTy, PtrTy});
4318 Builder.CreateCall(F, {Val, Ptr, AlignVal, Mask});
4319 } else {
4320 llvm::Function *F =
4321 CGM.getIntrinsic(llvm::Intrinsic::masked_compressstore, {ValLLTy});
4322 Builder.CreateCall(F, {Val, Ptr, Mask});
4323 }
4324 return RValue::get(nullptr);
4325 }
4326
4327 case Builtin::BI__builtin_isinf_sign: {
4328 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
4329 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4330 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4331 Value *Arg = EmitScalarExpr(E->getArg(0));
4332 Value *AbsArg = EmitFAbs(*this, Arg);
4333 Value *IsInf = Builder.CreateFCmpOEQ(
4334 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
4335 Value *IsNeg = EmitSignBit(*this, Arg);
4336
4337 llvm::Type *IntTy = ConvertType(E->getType());
4338 Value *Zero = Constant::getNullValue(IntTy);
4339 Value *One = ConstantInt::get(IntTy, 1);
4340 Value *NegativeOne = ConstantInt::get(IntTy, -1);
4341 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
4342 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
4343 return RValue::get(Result);
4344 }
4345
4346 case Builtin::BI__builtin_flt_rounds: {
4347 Function *F = CGM.getIntrinsic(Intrinsic::get_rounding);
4348
4349 llvm::Type *ResultType = ConvertType(E->getType());
4350 Value *Result = Builder.CreateCall(F);
4351 if (Result->getType() != ResultType)
4352 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
4353 "cast");
4354 return RValue::get(Result);
4355 }
4356
4357 case Builtin::BI__builtin_set_flt_rounds: {
4358 Function *F = CGM.getIntrinsic(Intrinsic::set_rounding);
4359
4360 Value *V = EmitScalarExpr(E->getArg(0));
4361 Builder.CreateCall(F, V);
4362 return RValue::get(nullptr);
4363 }
4364
4365 case Builtin::BI__builtin_fpclassify: {
4366 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4367 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4368 Value *V = EmitScalarExpr(E->getArg(5));
4369 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
4370
4371 // Create Result
4372 BasicBlock *Begin = Builder.GetInsertBlock();
4373 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
4374 Builder.SetInsertPoint(End);
4375 PHINode *Result =
4376 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
4377 "fpclassify_result");
4378
4379 // if (V==0) return FP_ZERO
4380 Builder.SetInsertPoint(Begin);
4381 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
4382 "iszero");
4383 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
4384 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
4385 Builder.CreateCondBr(IsZero, End, NotZero);
4386 Result->addIncoming(ZeroLiteral, Begin);
4387
4388 // if (V != V) return FP_NAN
4389 Builder.SetInsertPoint(NotZero);
4390 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
4391 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
4392 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
4393 Builder.CreateCondBr(IsNan, End, NotNan);
4394 Result->addIncoming(NanLiteral, NotZero);
4395
4396 // if (fabs(V) == infinity) return FP_INFINITY
4397 Builder.SetInsertPoint(NotNan);
4398 Value *VAbs = EmitFAbs(*this, V);
4399 Value *IsInf =
4400 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
4401 "isinf");
4402 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
4403 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
4404 Builder.CreateCondBr(IsInf, End, NotInf);
4405 Result->addIncoming(InfLiteral, NotNan);
4406
4407 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
4408 Builder.SetInsertPoint(NotInf);
4409 APFloat Smallest = APFloat::getSmallestNormalized(
4410 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
4411 Value *IsNormal =
4412 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
4413 "isnormal");
4414 Value *NormalResult =
4415 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
4416 EmitScalarExpr(E->getArg(3)));
4417 Builder.CreateBr(End);
4418 Result->addIncoming(NormalResult, NotInf);
4419
4420 // return Result
4421 Builder.SetInsertPoint(End);
4422 return RValue::get(Result);
4423 }
4424
4425 // An alloca will always return a pointer to the alloca (stack) address
4426 // space. This address space need not be the same as the AST / Language
4427 // default (e.g. in C / C++ auto vars are in the generic address space). At
4428 // the AST level this is handled within CreateTempAlloca et al., but for the
4429 // builtin / dynamic alloca we have to handle it here. We use an explicit cast
4430 // instead of passing an AS to CreateAlloca so as to not inhibit optimisation.
4431 case Builtin::BIalloca:
4432 case Builtin::BI_alloca:
4433 case Builtin::BI__builtin_alloca_uninitialized:
4434 case Builtin::BI__builtin_alloca: {
4435 Value *Size = EmitScalarExpr(E->getArg(0));
4436 const TargetInfo &TI = getContext().getTargetInfo();
4437 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
4438 const Align SuitableAlignmentInBytes =
4439 CGM.getContext()
4440 .toCharUnitsFromBits(TI.getSuitableAlign())
4441 .getAsAlign();
4442 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4443 AI->setAlignment(SuitableAlignmentInBytes);
4444 if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized)
4445 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
4448 if (AAS != EAS) {
4449 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4450 return RValue::get(
4451 getTargetHooks().performAddrSpaceCast(*this, AI, AAS, Ty));
4452 }
4453 return RValue::get(AI);
4454 }
4455
4456 case Builtin::BI__builtin_alloca_with_align_uninitialized:
4457 case Builtin::BI__builtin_alloca_with_align: {
4458 Value *Size = EmitScalarExpr(E->getArg(0));
4459 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
4460 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
4461 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
4462 const Align AlignmentInBytes =
4463 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
4464 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4465 AI->setAlignment(AlignmentInBytes);
4466 if (BuiltinID != Builtin::BI__builtin_alloca_with_align_uninitialized)
4467 initializeAlloca(*this, AI, Size, AlignmentInBytes);
4470 if (AAS != EAS) {
4471 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4472 return RValue::get(
4473 getTargetHooks().performAddrSpaceCast(*this, AI, AAS, Ty));
4474 }
4475 return RValue::get(AI);
4476 }
4477
4478 case Builtin::BIbzero:
4479 case Builtin::BI__builtin_bzero: {
4481 Value *SizeVal = EmitScalarExpr(E->getArg(1));
4482 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4483 E->getArg(0)->getExprLoc(), FD, 0);
4484 auto *I = Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
4485 addInstToNewSourceAtom(I, nullptr);
4486 return RValue::get(nullptr);
4487 }
4488
4489 case Builtin::BIbcopy:
4490 case Builtin::BI__builtin_bcopy: {
4493 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4495 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4496 0);
4498 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4499 0);
4500 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4501 addInstToNewSourceAtom(I, nullptr);
4502 return RValue::get(nullptr);
4503 }
4504
4505 case Builtin::BImemcpy:
4506 case Builtin::BI__builtin_memcpy:
4507 case Builtin::BImempcpy:
4508 case Builtin::BI__builtin_mempcpy: {
4511 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4512 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4513 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4514 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4515 addInstToNewSourceAtom(I, nullptr);
4516 if (BuiltinID == Builtin::BImempcpy ||
4517 BuiltinID == Builtin::BI__builtin_mempcpy)
4518 return RValue::get(Builder.CreateInBoundsGEP(
4519 Dest.getElementType(), Dest.emitRawPointer(*this), SizeVal));
4520 else
4521 return RValue::get(Dest, *this);
4522 }
4523
4524 case Builtin::BI__builtin_memcpy_inline: {
4527 uint64_t Size =
4528 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4529 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4530 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4531 auto *I = Builder.CreateMemCpyInline(Dest, Src, Size);
4532 addInstToNewSourceAtom(I, nullptr);
4533 return RValue::get(nullptr);
4534 }
4535
4536 case Builtin::BI__builtin_char_memchr:
4537 BuiltinID = Builtin::BI__builtin_memchr;
4538 break;
4539
4540 case Builtin::BI__builtin___memcpy_chk: {
4541 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
4542 Expr::EvalResult SizeResult, DstSizeResult;
4543 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4544 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4545 break;
4546 llvm::APSInt Size = SizeResult.Val.getInt();
4547 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4548 if (Size.ugt(DstSize))
4549 break;
4552 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4553 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4554 addInstToNewSourceAtom(I, nullptr);
4555 return RValue::get(Dest, *this);
4556 }
4557
4558 case Builtin::BI__builtin_objc_memmove_collectable: {
4559 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
4560 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
4561 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4562 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
4563 DestAddr, SrcAddr, SizeVal);
4564 return RValue::get(DestAddr, *this);
4565 }
4566
4567 case Builtin::BI__builtin___memmove_chk: {
4568 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
4569 Expr::EvalResult SizeResult, DstSizeResult;
4570 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4571 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4572 break;
4573 llvm::APSInt Size = SizeResult.Val.getInt();
4574 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4575 if (Size.ugt(DstSize))
4576 break;
4579 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4580 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4581 addInstToNewSourceAtom(I, nullptr);
4582 return RValue::get(Dest, *this);
4583 }
4584
4585 case Builtin::BI__builtin_trivially_relocate:
4586 case Builtin::BImemmove:
4587 case Builtin::BI__builtin_memmove: {
4590 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4591 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_trivially_relocate)
4592 SizeVal = Builder.CreateMul(
4593 SizeVal,
4594 ConstantInt::get(
4595 SizeVal->getType(),
4596 getContext()
4597 .getTypeSizeInChars(E->getArg(0)->getType()->getPointeeType())
4598 .getQuantity()));
4599 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4600 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4601 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4602 addInstToNewSourceAtom(I, nullptr);
4603 return RValue::get(Dest, *this);
4604 }
4605 case Builtin::BImemset:
4606 case Builtin::BI__builtin_memset: {
4608 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4609 Builder.getInt8Ty());
4610 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4611 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4612 E->getArg(0)->getExprLoc(), FD, 0);
4613 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4614 addInstToNewSourceAtom(I, ByteVal);
4615 return RValue::get(Dest, *this);
4616 }
4617 case Builtin::BI__builtin_memset_inline: {
4619 Value *ByteVal =
4620 Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
4621 uint64_t Size =
4622 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4624 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4625 0);
4626 auto *I = Builder.CreateMemSetInline(Dest, ByteVal, Size);
4627 addInstToNewSourceAtom(I, nullptr);
4628 return RValue::get(nullptr);
4629 }
4630 case Builtin::BI__builtin___memset_chk: {
4631 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
4632 Expr::EvalResult SizeResult, DstSizeResult;
4633 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4634 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4635 break;
4636 llvm::APSInt Size = SizeResult.Val.getInt();
4637 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4638 if (Size.ugt(DstSize))
4639 break;
4641 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4642 Builder.getInt8Ty());
4643 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4644 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4645 addInstToNewSourceAtom(I, nullptr);
4646 return RValue::get(Dest, *this);
4647 }
4648 case Builtin::BI__builtin_wmemchr: {
4649 // The MSVC runtime library does not provide a definition of wmemchr, so we
4650 // need an inline implementation.
4651 if (!getTarget().getTriple().isOSMSVCRT())
4652 break;
4653
4654 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4655 Value *Str = EmitScalarExpr(E->getArg(0));
4656 Value *Chr = EmitScalarExpr(E->getArg(1));
4657 Value *Size = EmitScalarExpr(E->getArg(2));
4658
4659 BasicBlock *Entry = Builder.GetInsertBlock();
4660 BasicBlock *CmpEq = createBasicBlock("wmemchr.eq");
4661 BasicBlock *Next = createBasicBlock("wmemchr.next");
4662 BasicBlock *Exit = createBasicBlock("wmemchr.exit");
4663 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4664 Builder.CreateCondBr(SizeEq0, Exit, CmpEq);
4665
4666 EmitBlock(CmpEq);
4667 PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2);
4668 StrPhi->addIncoming(Str, Entry);
4669 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4670 SizePhi->addIncoming(Size, Entry);
4671 CharUnits WCharAlign =
4673 Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign);
4674 Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0);
4675 Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr);
4676 Builder.CreateCondBr(StrEqChr, Exit, Next);
4677
4678 EmitBlock(Next);
4679 Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1);
4680 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4681 Value *NextSizeEq0 =
4682 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4683 Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq);
4684 StrPhi->addIncoming(NextStr, Next);
4685 SizePhi->addIncoming(NextSize, Next);
4686
4687 EmitBlock(Exit);
4688 PHINode *Ret = Builder.CreatePHI(Str->getType(), 3);
4689 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry);
4690 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next);
4691 Ret->addIncoming(FoundChr, CmpEq);
4692 return RValue::get(Ret);
4693 }
4694 case Builtin::BI__builtin_wmemcmp: {
4695 // The MSVC runtime library does not provide a definition of wmemcmp, so we
4696 // need an inline implementation.
4697 if (!getTarget().getTriple().isOSMSVCRT())
4698 break;
4699
4700 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4701
4702 Value *Dst = EmitScalarExpr(E->getArg(0));
4703 Value *Src = EmitScalarExpr(E->getArg(1));
4704 Value *Size = EmitScalarExpr(E->getArg(2));
4705
4706 BasicBlock *Entry = Builder.GetInsertBlock();
4707 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
4708 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
4709 BasicBlock *Next = createBasicBlock("wmemcmp.next");
4710 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
4711 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4712 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
4713
4714 EmitBlock(CmpGT);
4715 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
4716 DstPhi->addIncoming(Dst, Entry);
4717 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
4718 SrcPhi->addIncoming(Src, Entry);
4719 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4720 SizePhi->addIncoming(Size, Entry);
4721 CharUnits WCharAlign =
4723 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
4724 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
4725 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
4726 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
4727
4728 EmitBlock(CmpLT);
4729 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
4730 Builder.CreateCondBr(DstLtSrc, Exit, Next);
4731
4732 EmitBlock(Next);
4733 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
4734 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
4735 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4736 Value *NextSizeEq0 =
4737 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4738 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
4739 DstPhi->addIncoming(NextDst, Next);
4740 SrcPhi->addIncoming(NextSrc, Next);
4741 SizePhi->addIncoming(NextSize, Next);
4742
4743 EmitBlock(Exit);
4744 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
4745 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
4746 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
4747 Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
4748 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
4749 return RValue::get(Ret);
4750 }
4751 case Builtin::BI__builtin_dwarf_cfa: {
4752 // The offset in bytes from the first argument to the CFA.
4753 //
4754 // Why on earth is this in the frontend? Is there any reason at
4755 // all that the backend can't reasonably determine this while
4756 // lowering llvm.eh.dwarf.cfa()?
4757 //
4758 // TODO: If there's a satisfactory reason, add a target hook for
4759 // this instead of hard-coding 0, which is correct for most targets.
4760 int32_t Offset = 0;
4761
4762 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
4763 return RValue::get(Builder.CreateCall(F,
4764 llvm::ConstantInt::get(Int32Ty, Offset)));
4765 }
4766 case Builtin::BI__builtin_return_address: {
4767 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4768 getContext().UnsignedIntTy);
4769 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
4770 return RValue::get(Builder.CreateCall(F, Depth));
4771 }
4772 case Builtin::BI_ReturnAddress: {
4773 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
4774 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
4775 }
4776 case Builtin::BI__builtin_frame_address: {
4777 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4778 getContext().UnsignedIntTy);
4779 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
4780 return RValue::get(Builder.CreateCall(F, Depth));
4781 }
4782 case Builtin::BI__builtin_extract_return_addr: {
4785 return RValue::get(Result);
4786 }
4787 case Builtin::BI__builtin_frob_return_addr: {
4790 return RValue::get(Result);
4791 }
4792 case Builtin::BI__builtin_dwarf_sp_column: {
4793 llvm::IntegerType *Ty
4796 if (Column == -1) {
4797 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
4798 return RValue::get(llvm::UndefValue::get(Ty));
4799 }
4800 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
4801 }
4802 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
4804 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
4805 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
4806 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
4807 }
4808 case Builtin::BI__builtin_eh_return: {
4809 Value *Int = EmitScalarExpr(E->getArg(0));
4810 Value *Ptr = EmitScalarExpr(E->getArg(1));
4811
4812 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
4813 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
4814 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
4815 Function *F =
4816 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
4817 : Intrinsic::eh_return_i64);
4818 Builder.CreateCall(F, {Int, Ptr});
4819 Builder.CreateUnreachable();
4820
4821 // We do need to preserve an insertion point.
4822 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
4823
4824 return RValue::get(nullptr);
4825 }
4826 case Builtin::BI__builtin_unwind_init: {
4827 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
4828 Builder.CreateCall(F);
4829 return RValue::get(nullptr);
4830 }
4831 case Builtin::BI__builtin_extend_pointer: {
4832 // Extends a pointer to the size of an _Unwind_Word, which is
4833 // uint64_t on all platforms. Generally this gets poked into a
4834 // register and eventually used as an address, so if the
4835 // addressing registers are wider than pointers and the platform
4836 // doesn't implicitly ignore high-order bits when doing
4837 // addressing, we need to make sure we zext / sext based on
4838 // the platform's expectations.
4839 //
4840 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
4841
4842 // Cast the pointer to intptr_t.
4843 Value *Ptr = EmitScalarExpr(E->getArg(0));
4844 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
4845
4846 // If that's 64 bits, we're done.
4847 if (IntPtrTy->getBitWidth() == 64)
4848 return RValue::get(Result);
4849
4850 // Otherwise, ask the codegen data what to do.
4851 if (getTargetHooks().extendPointerWithSExt())
4852 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
4853 else
4854 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
4855 }
4856 case Builtin::BI__builtin_setjmp: {
4857 // Buffer is a void**.
4859
4860 if (getTarget().getTriple().getArch() == llvm::Triple::systemz) {
4861 // On this target, the back end fills in the context buffer completely.
4862 // It doesn't really matter if the frontend stores to the buffer before
4863 // calling setjmp, the back-end is going to overwrite them anyway.
4864 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
4865 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
4866 }
4867
4868 // Store the frame pointer to the setjmp buffer.
4869 Value *FrameAddr = Builder.CreateCall(
4870 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
4871 ConstantInt::get(Int32Ty, 0));
4872 Builder.CreateStore(FrameAddr, Buf);
4873
4874 // Store the stack pointer to the setjmp buffer.
4875 Value *StackAddr = Builder.CreateStackSave();
4876 assert(Buf.emitRawPointer(*this)->getType() == StackAddr->getType());
4877
4878 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
4879 Builder.CreateStore(StackAddr, StackSaveSlot);
4880
4881 // Call LLVM's EH setjmp, which is lightweight.
4882 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
4883 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
4884 }
4885 case Builtin::BI__builtin_longjmp: {
4886 Value *Buf = EmitScalarExpr(E->getArg(0));
4887
4888 // Call LLVM's EH longjmp, which is lightweight.
4889 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
4890
4891 // longjmp doesn't return; mark this as unreachable.
4892 Builder.CreateUnreachable();
4893
4894 // We do need to preserve an insertion point.
4895 EmitBlock(createBasicBlock("longjmp.cont"));
4896
4897 return RValue::get(nullptr);
4898 }
4899 case Builtin::BI__builtin_launder: {
4900 const Expr *Arg = E->getArg(0);
4901 QualType ArgTy = Arg->getType()->getPointeeType();
4902 Value *Ptr = EmitScalarExpr(Arg);
4903 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
4904 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
4905
4906 return RValue::get(Ptr);
4907 }
4908 case Builtin::BI__sync_fetch_and_add:
4909 case Builtin::BI__sync_fetch_and_sub:
4910 case Builtin::BI__sync_fetch_and_or:
4911 case Builtin::BI__sync_fetch_and_and:
4912 case Builtin::BI__sync_fetch_and_xor:
4913 case Builtin::BI__sync_fetch_and_nand:
4914 case Builtin::BI__sync_add_and_fetch:
4915 case Builtin::BI__sync_sub_and_fetch:
4916 case Builtin::BI__sync_and_and_fetch:
4917 case Builtin::BI__sync_or_and_fetch:
4918 case Builtin::BI__sync_xor_and_fetch:
4919 case Builtin::BI__sync_nand_and_fetch:
4920 case Builtin::BI__sync_val_compare_and_swap:
4921 case Builtin::BI__sync_bool_compare_and_swap:
4922 case Builtin::BI__sync_lock_test_and_set:
4923 case Builtin::BI__sync_lock_release:
4924 case Builtin::BI__sync_swap:
4925 llvm_unreachable("Shouldn't make it through sema");
4926 case Builtin::BI__sync_fetch_and_add_1:
4927 case Builtin::BI__sync_fetch_and_add_2:
4928 case Builtin::BI__sync_fetch_and_add_4:
4929 case Builtin::BI__sync_fetch_and_add_8:
4930 case Builtin::BI__sync_fetch_and_add_16:
4931 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
4932 case Builtin::BI__sync_fetch_and_sub_1:
4933 case Builtin::BI__sync_fetch_and_sub_2:
4934 case Builtin::BI__sync_fetch_and_sub_4:
4935 case Builtin::BI__sync_fetch_and_sub_8:
4936 case Builtin::BI__sync_fetch_and_sub_16:
4937 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
4938 case Builtin::BI__sync_fetch_and_or_1:
4939 case Builtin::BI__sync_fetch_and_or_2:
4940 case Builtin::BI__sync_fetch_and_or_4:
4941 case Builtin::BI__sync_fetch_and_or_8:
4942 case Builtin::BI__sync_fetch_and_or_16:
4943 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
4944 case Builtin::BI__sync_fetch_and_and_1:
4945 case Builtin::BI__sync_fetch_and_and_2:
4946 case Builtin::BI__sync_fetch_and_and_4:
4947 case Builtin::BI__sync_fetch_and_and_8:
4948 case Builtin::BI__sync_fetch_and_and_16:
4949 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
4950 case Builtin::BI__sync_fetch_and_xor_1:
4951 case Builtin::BI__sync_fetch_and_xor_2:
4952 case Builtin::BI__sync_fetch_and_xor_4:
4953 case Builtin::BI__sync_fetch_and_xor_8:
4954 case Builtin::BI__sync_fetch_and_xor_16:
4955 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
4956 case Builtin::BI__sync_fetch_and_nand_1:
4957 case Builtin::BI__sync_fetch_and_nand_2:
4958 case Builtin::BI__sync_fetch_and_nand_4:
4959 case Builtin::BI__sync_fetch_and_nand_8:
4960 case Builtin::BI__sync_fetch_and_nand_16:
4961 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
4962
4963 // Clang extensions: not overloaded yet.
4964 case Builtin::BI__sync_fetch_and_min:
4965 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
4966 case Builtin::BI__sync_fetch_and_max:
4967 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
4968 case Builtin::BI__sync_fetch_and_umin:
4969 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
4970 case Builtin::BI__sync_fetch_and_umax:
4971 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
4972
4973 case Builtin::BI__sync_add_and_fetch_1:
4974 case Builtin::BI__sync_add_and_fetch_2:
4975 case Builtin::BI__sync_add_and_fetch_4:
4976 case Builtin::BI__sync_add_and_fetch_8:
4977 case Builtin::BI__sync_add_and_fetch_16:
4978 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
4979 llvm::Instruction::Add);
4980 case Builtin::BI__sync_sub_and_fetch_1:
4981 case Builtin::BI__sync_sub_and_fetch_2:
4982 case Builtin::BI__sync_sub_and_fetch_4:
4983 case Builtin::BI__sync_sub_and_fetch_8:
4984 case Builtin::BI__sync_sub_and_fetch_16:
4985 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
4986 llvm::Instruction::Sub);
4987 case Builtin::BI__sync_and_and_fetch_1:
4988 case Builtin::BI__sync_and_and_fetch_2:
4989 case Builtin::BI__sync_and_and_fetch_4:
4990 case Builtin::BI__sync_and_and_fetch_8:
4991 case Builtin::BI__sync_and_and_fetch_16:
4992 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
4993 llvm::Instruction::And);
4994 case Builtin::BI__sync_or_and_fetch_1:
4995 case Builtin::BI__sync_or_and_fetch_2:
4996 case Builtin::BI__sync_or_and_fetch_4:
4997 case Builtin::BI__sync_or_and_fetch_8:
4998 case Builtin::BI__sync_or_and_fetch_16:
4999 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
5000 llvm::Instruction::Or);
5001 case Builtin::BI__sync_xor_and_fetch_1:
5002 case Builtin::BI__sync_xor_and_fetch_2:
5003 case Builtin::BI__sync_xor_and_fetch_4:
5004 case Builtin::BI__sync_xor_and_fetch_8:
5005 case Builtin::BI__sync_xor_and_fetch_16:
5006 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
5007 llvm::Instruction::Xor);
5008 case Builtin::BI__sync_nand_and_fetch_1:
5009 case Builtin::BI__sync_nand_and_fetch_2:
5010 case Builtin::BI__sync_nand_and_fetch_4:
5011 case Builtin::BI__sync_nand_and_fetch_8:
5012 case Builtin::BI__sync_nand_and_fetch_16:
5013 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
5014 llvm::Instruction::And, true);
5015
5016 case Builtin::BI__sync_val_compare_and_swap_1:
5017 case Builtin::BI__sync_val_compare_and_swap_2:
5018 case Builtin::BI__sync_val_compare_and_swap_4:
5019 case Builtin::BI__sync_val_compare_and_swap_8:
5020 case Builtin::BI__sync_val_compare_and_swap_16:
5021 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
5022
5023 case Builtin::BI__sync_bool_compare_and_swap_1:
5024 case Builtin::BI__sync_bool_compare_and_swap_2:
5025 case Builtin::BI__sync_bool_compare_and_swap_4:
5026 case Builtin::BI__sync_bool_compare_and_swap_8:
5027 case Builtin::BI__sync_bool_compare_and_swap_16:
5028 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
5029
5030 case Builtin::BI__sync_swap_1:
5031 case Builtin::BI__sync_swap_2:
5032 case Builtin::BI__sync_swap_4:
5033 case Builtin::BI__sync_swap_8:
5034 case Builtin::BI__sync_swap_16:
5035 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
5036
5037 case Builtin::BI__sync_lock_test_and_set_1:
5038 case Builtin::BI__sync_lock_test_and_set_2:
5039 case Builtin::BI__sync_lock_test_and_set_4:
5040 case Builtin::BI__sync_lock_test_and_set_8:
5041 case Builtin::BI__sync_lock_test_and_set_16:
5042 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
5043
5044 case Builtin::BI__sync_lock_release_1:
5045 case Builtin::BI__sync_lock_release_2:
5046 case Builtin::BI__sync_lock_release_4:
5047 case Builtin::BI__sync_lock_release_8:
5048 case Builtin::BI__sync_lock_release_16: {
5049 Address Ptr = CheckAtomicAlignment(*this, E);
5050 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
5051
5052 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
5053 getContext().getTypeSize(ElTy));
5054 llvm::StoreInst *Store =
5055 Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
5056 Store->setAtomic(llvm::AtomicOrdering::Release);
5057 return RValue::get(nullptr);
5058 }
5059
5060 case Builtin::BI__sync_synchronize: {
5061 // We assume this is supposed to correspond to a C++0x-style
5062 // sequentially-consistent fence (i.e. this is only usable for
5063 // synchronization, not device I/O or anything like that). This intrinsic
5064 // is really badly designed in the sense that in theory, there isn't
5065 // any way to safely use it... but in practice, it mostly works
5066 // to use it with non-atomic loads and stores to get acquire/release
5067 // semantics.
5068 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
5069 return RValue::get(nullptr);
5070 }
5071
5072 case Builtin::BI__builtin_nontemporal_load:
5073 return RValue::get(EmitNontemporalLoad(*this, E));
5074 case Builtin::BI__builtin_nontemporal_store:
5075 return RValue::get(EmitNontemporalStore(*this, E));
5076 case Builtin::BI__c11_atomic_is_lock_free:
5077 case Builtin::BI__atomic_is_lock_free: {
5078 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
5079 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
5080 // _Atomic(T) is always properly-aligned.
5081 const char *LibCallName = "__atomic_is_lock_free";
5082 CallArgList Args;
5083 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
5084 getContext().getSizeType());
5085 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
5086 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
5088 else
5089 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
5091 const CGFunctionInfo &FuncInfo =
5092 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
5093 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
5094 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
5095 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
5096 ReturnValueSlot(), Args);
5097 }
5098
5099 case Builtin::BI__atomic_thread_fence:
5100 case Builtin::BI__atomic_signal_fence:
5101 case Builtin::BI__c11_atomic_thread_fence:
5102 case Builtin::BI__c11_atomic_signal_fence: {
5103 llvm::SyncScope::ID SSID;
5104 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
5105 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
5106 SSID = llvm::SyncScope::SingleThread;
5107 else
5108 SSID = llvm::SyncScope::System;
5109 Value *Order = EmitScalarExpr(E->getArg(0));
5110 if (isa<llvm::ConstantInt>(Order)) {
5111 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
5112 switch (ord) {
5113 case 0: // memory_order_relaxed
5114 default: // invalid order
5115 break;
5116 case 1: // memory_order_consume
5117 case 2: // memory_order_acquire
5118 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5119 break;
5120 case 3: // memory_order_release
5121 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5122 break;
5123 case 4: // memory_order_acq_rel
5124 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5125 break;
5126 case 5: // memory_order_seq_cst
5127 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5128 break;
5129 }
5130 return RValue::get(nullptr);
5131 }
5132
5133 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
5134 AcquireBB = createBasicBlock("acquire", CurFn);
5135 ReleaseBB = createBasicBlock("release", CurFn);
5136 AcqRelBB = createBasicBlock("acqrel", CurFn);
5137 SeqCstBB = createBasicBlock("seqcst", CurFn);
5138 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
5139
5140 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5141 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5142
5143 Builder.SetInsertPoint(AcquireBB);
5144 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5145 Builder.CreateBr(ContBB);
5146 SI->addCase(Builder.getInt32(1), AcquireBB);
5147 SI->addCase(Builder.getInt32(2), AcquireBB);
5148
5149 Builder.SetInsertPoint(ReleaseBB);
5150 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5151 Builder.CreateBr(ContBB);
5152 SI->addCase(Builder.getInt32(3), ReleaseBB);
5153
5154 Builder.SetInsertPoint(AcqRelBB);
5155 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5156 Builder.CreateBr(ContBB);
5157 SI->addCase(Builder.getInt32(4), AcqRelBB);
5158
5159 Builder.SetInsertPoint(SeqCstBB);
5160 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5161 Builder.CreateBr(ContBB);
5162 SI->addCase(Builder.getInt32(5), SeqCstBB);
5163
5164 Builder.SetInsertPoint(ContBB);
5165 return RValue::get(nullptr);
5166 }
5167 case Builtin::BI__scoped_atomic_thread_fence: {
5169
5170 Value *Order = EmitScalarExpr(E->getArg(0));
5171 Value *Scope = EmitScalarExpr(E->getArg(1));
5172 auto Ord = dyn_cast<llvm::ConstantInt>(Order);
5173 auto Scp = dyn_cast<llvm::ConstantInt>(Scope);
5174 if (Ord && Scp) {
5175 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5176 ? ScopeModel->map(Scp->getZExtValue())
5177 : ScopeModel->map(ScopeModel->getFallBackValue());
5178 switch (Ord->getZExtValue()) {
5179 case 0: // memory_order_relaxed
5180 default: // invalid order
5181 break;
5182 case 1: // memory_order_consume
5183 case 2: // memory_order_acquire
5184 Builder.CreateFence(
5185 llvm::AtomicOrdering::Acquire,
5186 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5187 llvm::AtomicOrdering::Acquire,
5188 getLLVMContext()));
5189 break;
5190 case 3: // memory_order_release
5191 Builder.CreateFence(
5192 llvm::AtomicOrdering::Release,
5193 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5194 llvm::AtomicOrdering::Release,
5195 getLLVMContext()));
5196 break;
5197 case 4: // memory_order_acq_rel
5198 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease,
5199 getTargetHooks().getLLVMSyncScopeID(
5200 getLangOpts(), SS,
5201 llvm::AtomicOrdering::AcquireRelease,
5202 getLLVMContext()));
5203 break;
5204 case 5: // memory_order_seq_cst
5205 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
5206 getTargetHooks().getLLVMSyncScopeID(
5207 getLangOpts(), SS,
5208 llvm::AtomicOrdering::SequentiallyConsistent,
5209 getLLVMContext()));
5210 break;
5211 }
5212 return RValue::get(nullptr);
5213 }
5214
5215 llvm::BasicBlock *ContBB = createBasicBlock("atomic.scope.continue", CurFn);
5216
5218 OrderBBs;
5219 if (Ord) {
5220 switch (Ord->getZExtValue()) {
5221 case 0: // memory_order_relaxed
5222 default: // invalid order
5223 ContBB->eraseFromParent();
5224 return RValue::get(nullptr);
5225 case 1: // memory_order_consume
5226 case 2: // memory_order_acquire
5227 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5228 llvm::AtomicOrdering::Acquire);
5229 break;
5230 case 3: // memory_order_release
5231 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5232 llvm::AtomicOrdering::Release);
5233 break;
5234 case 4: // memory_order_acq_rel
5235 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5236 llvm::AtomicOrdering::AcquireRelease);
5237 break;
5238 case 5: // memory_order_seq_cst
5239 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5240 llvm::AtomicOrdering::SequentiallyConsistent);
5241 break;
5242 }
5243 } else {
5244 llvm::BasicBlock *AcquireBB = createBasicBlock("acquire", CurFn);
5245 llvm::BasicBlock *ReleaseBB = createBasicBlock("release", CurFn);
5246 llvm::BasicBlock *AcqRelBB = createBasicBlock("acqrel", CurFn);
5247 llvm::BasicBlock *SeqCstBB = createBasicBlock("seqcst", CurFn);
5248
5249 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5250 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5251 SI->addCase(Builder.getInt32(1), AcquireBB);
5252 SI->addCase(Builder.getInt32(2), AcquireBB);
5253 SI->addCase(Builder.getInt32(3), ReleaseBB);
5254 SI->addCase(Builder.getInt32(4), AcqRelBB);
5255 SI->addCase(Builder.getInt32(5), SeqCstBB);
5256
5257 OrderBBs.emplace_back(AcquireBB, llvm::AtomicOrdering::Acquire);
5258 OrderBBs.emplace_back(ReleaseBB, llvm::AtomicOrdering::Release);
5259 OrderBBs.emplace_back(AcqRelBB, llvm::AtomicOrdering::AcquireRelease);
5260 OrderBBs.emplace_back(SeqCstBB,
5261 llvm::AtomicOrdering::SequentiallyConsistent);
5262 }
5263
5264 for (auto &[OrderBB, Ordering] : OrderBBs) {
5265 Builder.SetInsertPoint(OrderBB);
5266 if (Scp) {
5267 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5268 ? ScopeModel->map(Scp->getZExtValue())
5269 : ScopeModel->map(ScopeModel->getFallBackValue());
5270 Builder.CreateFence(Ordering,
5271 getTargetHooks().getLLVMSyncScopeID(
5272 getLangOpts(), SS, Ordering, getLLVMContext()));
5273 Builder.CreateBr(ContBB);
5274 } else {
5275 llvm::DenseMap<unsigned, llvm::BasicBlock *> BBs;
5276 for (unsigned Scp : ScopeModel->getRuntimeValues())
5277 BBs[Scp] = createBasicBlock(getAsString(ScopeModel->map(Scp)), CurFn);
5278
5279 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
5280 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, ContBB);
5281 for (unsigned Scp : ScopeModel->getRuntimeValues()) {
5282 auto *B = BBs[Scp];
5283 SI->addCase(Builder.getInt32(Scp), B);
5284
5285 Builder.SetInsertPoint(B);
5286 Builder.CreateFence(Ordering, getTargetHooks().getLLVMSyncScopeID(
5287 getLangOpts(), ScopeModel->map(Scp),
5288 Ordering, getLLVMContext()));
5289 Builder.CreateBr(ContBB);
5290 }
5291 }
5292 }
5293
5294 Builder.SetInsertPoint(ContBB);
5295 return RValue::get(nullptr);
5296 }
5297
5298 case Builtin::BI__builtin_signbit:
5299 case Builtin::BI__builtin_signbitf:
5300 case Builtin::BI__builtin_signbitl: {
5301 return RValue::get(
5302 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
5303 ConvertType(E->getType())));
5304 }
5305 case Builtin::BI__warn_memset_zero_len:
5306 return RValue::getIgnored();
5307 case Builtin::BI__annotation: {
5308 // Re-encode each wide string to UTF8 and make an MDString.
5310 for (const Expr *Arg : E->arguments()) {
5311 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
5312 assert(Str->getCharByteWidth() == 2);
5313 StringRef WideBytes = Str->getBytes();
5314 std::string StrUtf8;
5315 if (!convertUTF16ToUTF8String(
5316 ArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
5317 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
5318 continue;
5319 }
5320 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
5321 }
5322
5323 // Build and MDTuple of MDStrings and emit the intrinsic call.
5324 llvm::Function *F = CGM.getIntrinsic(Intrinsic::codeview_annotation, {});
5325 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
5326 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
5327 return RValue::getIgnored();
5328 }
5329 case Builtin::BI__builtin_annotation: {
5330 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
5331 llvm::Function *F = CGM.getIntrinsic(
5332 Intrinsic::annotation, {AnnVal->getType(), CGM.ConstGlobalsPtrTy});
5333
5334 // Get the annotation string, go through casts. Sema requires this to be a
5335 // non-wide string literal, potentially casted, so the cast<> is safe.
5336 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
5337 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
5338 return RValue::get(
5339 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
5340 }
5341 case Builtin::BI__builtin_addcb:
5342 case Builtin::BI__builtin_addcs:
5343 case Builtin::BI__builtin_addc:
5344 case Builtin::BI__builtin_addcl:
5345 case Builtin::BI__builtin_addcll:
5346 case Builtin::BI__builtin_subcb:
5347 case Builtin::BI__builtin_subcs:
5348 case Builtin::BI__builtin_subc:
5349 case Builtin::BI__builtin_subcl:
5350 case Builtin::BI__builtin_subcll: {
5351
5352 // We translate all of these builtins from expressions of the form:
5353 // int x = ..., y = ..., carryin = ..., carryout, result;
5354 // result = __builtin_addc(x, y, carryin, &carryout);
5355 //
5356 // to LLVM IR of the form:
5357 //
5358 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
5359 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
5360 // %carry1 = extractvalue {i32, i1} %tmp1, 1
5361 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
5362 // i32 %carryin)
5363 // %result = extractvalue {i32, i1} %tmp2, 0
5364 // %carry2 = extractvalue {i32, i1} %tmp2, 1
5365 // %tmp3 = or i1 %carry1, %carry2
5366 // %tmp4 = zext i1 %tmp3 to i32
5367 // store i32 %tmp4, i32* %carryout
5368
5369 // Scalarize our inputs.
5370 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5371 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5372 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
5373 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
5374
5375 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
5376 Intrinsic::ID IntrinsicId;
5377 switch (BuiltinID) {
5378 default: llvm_unreachable("Unknown multiprecision builtin id.");
5379 case Builtin::BI__builtin_addcb:
5380 case Builtin::BI__builtin_addcs:
5381 case Builtin::BI__builtin_addc:
5382 case Builtin::BI__builtin_addcl:
5383 case Builtin::BI__builtin_addcll:
5384 IntrinsicId = Intrinsic::uadd_with_overflow;
5385 break;
5386 case Builtin::BI__builtin_subcb:
5387 case Builtin::BI__builtin_subcs:
5388 case Builtin::BI__builtin_subc:
5389 case Builtin::BI__builtin_subcl:
5390 case Builtin::BI__builtin_subcll:
5391 IntrinsicId = Intrinsic::usub_with_overflow;
5392 break;
5393 }
5394
5395 // Construct our resulting LLVM IR expression.
5396 llvm::Value *Carry1;
5397 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
5398 X, Y, Carry1);
5399 llvm::Value *Carry2;
5400 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
5401 Sum1, Carryin, Carry2);
5402 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
5403 X->getType());
5404 Builder.CreateStore(CarryOut, CarryOutPtr);
5405 return RValue::get(Sum2);
5406 }
5407
5408 case Builtin::BI__builtin_add_overflow:
5409 case Builtin::BI__builtin_sub_overflow:
5410 case Builtin::BI__builtin_mul_overflow: {
5411 const clang::Expr *LeftArg = E->getArg(0);
5412 const clang::Expr *RightArg = E->getArg(1);
5413 const clang::Expr *ResultArg = E->getArg(2);
5414
5415 clang::QualType ResultQTy =
5416 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
5417
5418 WidthAndSignedness LeftInfo =
5419 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
5420 WidthAndSignedness RightInfo =
5421 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
5422 WidthAndSignedness ResultInfo =
5423 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
5424
5425 // Handle mixed-sign multiplication as a special case, because adding
5426 // runtime or backend support for our generic irgen would be too expensive.
5427 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
5428 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
5429 RightInfo, ResultArg, ResultQTy,
5430 ResultInfo);
5431
5432 if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
5433 ResultInfo))
5435 *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
5436 ResultInfo);
5437
5438 WidthAndSignedness EncompassingInfo =
5439 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
5440
5441 llvm::Type *EncompassingLLVMTy =
5442 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
5443
5444 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
5445
5446 Intrinsic::ID IntrinsicId;
5447 switch (BuiltinID) {
5448 default:
5449 llvm_unreachable("Unknown overflow builtin id.");
5450 case Builtin::BI__builtin_add_overflow:
5451 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::sadd_with_overflow
5452 : Intrinsic::uadd_with_overflow;
5453 break;
5454 case Builtin::BI__builtin_sub_overflow:
5455 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::ssub_with_overflow
5456 : Intrinsic::usub_with_overflow;
5457 break;
5458 case Builtin::BI__builtin_mul_overflow:
5459 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::smul_with_overflow
5460 : Intrinsic::umul_with_overflow;
5461 break;
5462 }
5463
5464 llvm::Value *Left = EmitScalarExpr(LeftArg);
5465 llvm::Value *Right = EmitScalarExpr(RightArg);
5466 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
5467
5468 // Extend each operand to the encompassing type.
5469 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
5470 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
5471
5472 // Perform the operation on the extended values.
5473 llvm::Value *Overflow, *Result;
5474 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
5475
5476 if (EncompassingInfo.Width > ResultInfo.Width) {
5477 // The encompassing type is wider than the result type, so we need to
5478 // truncate it.
5479 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
5480
5481 // To see if the truncation caused an overflow, we will extend
5482 // the result and then compare it to the original result.
5483 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
5484 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
5485 llvm::Value *TruncationOverflow =
5486 Builder.CreateICmpNE(Result, ResultTruncExt);
5487
5488 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
5489 Result = ResultTrunc;
5490 }
5491
5492 // Finally, store the result using the pointer.
5493 bool isVolatile =
5494 ResultArg->getType()->getPointeeType().isVolatileQualified();
5495 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
5496
5497 return RValue::get(Overflow);
5498 }
5499
5500 case Builtin::BI__builtin_uadd_overflow:
5501 case Builtin::BI__builtin_uaddl_overflow:
5502 case Builtin::BI__builtin_uaddll_overflow:
5503 case Builtin::BI__builtin_usub_overflow:
5504 case Builtin::BI__builtin_usubl_overflow:
5505 case Builtin::BI__builtin_usubll_overflow:
5506 case Builtin::BI__builtin_umul_overflow:
5507 case Builtin::BI__builtin_umull_overflow:
5508 case Builtin::BI__builtin_umulll_overflow:
5509 case Builtin::BI__builtin_sadd_overflow:
5510 case Builtin::BI__builtin_saddl_overflow:
5511 case Builtin::BI__builtin_saddll_overflow:
5512 case Builtin::BI__builtin_ssub_overflow:
5513 case Builtin::BI__builtin_ssubl_overflow:
5514 case Builtin::BI__builtin_ssubll_overflow:
5515 case Builtin::BI__builtin_smul_overflow:
5516 case Builtin::BI__builtin_smull_overflow:
5517 case Builtin::BI__builtin_smulll_overflow: {
5518
5519 // We translate all of these builtins directly to the relevant llvm IR node.
5520
5521 // Scalarize our inputs.
5522 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5523 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5524 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
5525
5526 // Decide which of the overflow intrinsics we are lowering to:
5527 Intrinsic::ID IntrinsicId;
5528 switch (BuiltinID) {
5529 default: llvm_unreachable("Unknown overflow builtin id.");
5530 case Builtin::BI__builtin_uadd_overflow:
5531 case Builtin::BI__builtin_uaddl_overflow:
5532 case Builtin::BI__builtin_uaddll_overflow:
5533 IntrinsicId = Intrinsic::uadd_with_overflow;
5534 break;
5535 case Builtin::BI__builtin_usub_overflow:
5536 case Builtin::BI__builtin_usubl_overflow:
5537 case Builtin::BI__builtin_usubll_overflow:
5538 IntrinsicId = Intrinsic::usub_with_overflow;
5539 break;
5540 case Builtin::BI__builtin_umul_overflow:
5541 case Builtin::BI__builtin_umull_overflow:
5542 case Builtin::BI__builtin_umulll_overflow:
5543 IntrinsicId = Intrinsic::umul_with_overflow;
5544 break;
5545 case Builtin::BI__builtin_sadd_overflow:
5546 case Builtin::BI__builtin_saddl_overflow:
5547 case Builtin::BI__builtin_saddll_overflow:
5548 IntrinsicId = Intrinsic::sadd_with_overflow;
5549 break;
5550 case Builtin::BI__builtin_ssub_overflow:
5551 case Builtin::BI__builtin_ssubl_overflow:
5552 case Builtin::BI__builtin_ssubll_overflow:
5553 IntrinsicId = Intrinsic::ssub_with_overflow;
5554 break;
5555 case Builtin::BI__builtin_smul_overflow:
5556 case Builtin::BI__builtin_smull_overflow:
5557 case Builtin::BI__builtin_smulll_overflow:
5558 IntrinsicId = Intrinsic::smul_with_overflow;
5559 break;
5560 }
5561
5562
5563 llvm::Value *Carry;
5564 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
5565 Builder.CreateStore(Sum, SumOutPtr);
5566
5567 return RValue::get(Carry);
5568 }
5569 case Builtin::BIaddressof:
5570 case Builtin::BI__addressof:
5571 case Builtin::BI__builtin_addressof:
5572 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5573 case Builtin::BI__builtin_function_start:
5574 return RValue::get(CGM.GetFunctionStart(
5575 E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext())));
5576 case Builtin::BI__builtin_operator_new:
5578 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
5579 case Builtin::BI__builtin_operator_delete:
5581 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
5582 return RValue::get(nullptr);
5583
5584 case Builtin::BI__builtin_is_aligned:
5585 return EmitBuiltinIsAligned(E);
5586 case Builtin::BI__builtin_align_up:
5587 return EmitBuiltinAlignTo(E, true);
5588 case Builtin::BI__builtin_align_down:
5589 return EmitBuiltinAlignTo(E, false);
5590
5591 case Builtin::BI__noop:
5592 // __noop always evaluates to an integer literal zero.
5593 return RValue::get(ConstantInt::get(IntTy, 0));
5594 case Builtin::BI__builtin_call_with_static_chain: {
5595 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
5596 const Expr *Chain = E->getArg(1);
5597 return EmitCall(Call->getCallee()->getType(),
5598 EmitCallee(Call->getCallee()), Call, ReturnValue,
5599 EmitScalarExpr(Chain));
5600 }
5601 case Builtin::BI_InterlockedExchange8:
5602 case Builtin::BI_InterlockedExchange16:
5603 case Builtin::BI_InterlockedExchange:
5604 case Builtin::BI_InterlockedExchangePointer:
5605 return RValue::get(
5607 case Builtin::BI_InterlockedCompareExchangePointer:
5608 return RValue::get(
5610 case Builtin::BI_InterlockedCompareExchangePointer_nf:
5611 return RValue::get(
5613 case Builtin::BI_InterlockedCompareExchange8:
5614 case Builtin::BI_InterlockedCompareExchange16:
5615 case Builtin::BI_InterlockedCompareExchange:
5616 case Builtin::BI_InterlockedCompareExchange64:
5617 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
5618 case Builtin::BI_InterlockedIncrement16:
5619 case Builtin::BI_InterlockedIncrement:
5620 return RValue::get(
5622 case Builtin::BI_InterlockedDecrement16:
5623 case Builtin::BI_InterlockedDecrement:
5624 return RValue::get(
5626 case Builtin::BI_InterlockedAnd8:
5627 case Builtin::BI_InterlockedAnd16:
5628 case Builtin::BI_InterlockedAnd:
5630 case Builtin::BI_InterlockedExchangeAdd8:
5631 case Builtin::BI_InterlockedExchangeAdd16:
5632 case Builtin::BI_InterlockedExchangeAdd:
5633 return RValue::get(
5635 case Builtin::BI_InterlockedExchangeSub8:
5636 case Builtin::BI_InterlockedExchangeSub16:
5637 case Builtin::BI_InterlockedExchangeSub:
5638 return RValue::get(
5640 case Builtin::BI_InterlockedOr8:
5641 case Builtin::BI_InterlockedOr16:
5642 case Builtin::BI_InterlockedOr:
5644 case Builtin::BI_InterlockedXor8:
5645 case Builtin::BI_InterlockedXor16:
5646 case Builtin::BI_InterlockedXor:
5648
5649 case Builtin::BI_bittest64:
5650 case Builtin::BI_bittest:
5651 case Builtin::BI_bittestandcomplement64:
5652 case Builtin::BI_bittestandcomplement:
5653 case Builtin::BI_bittestandreset64:
5654 case Builtin::BI_bittestandreset:
5655 case Builtin::BI_bittestandset64:
5656 case Builtin::BI_bittestandset:
5657 case Builtin::BI_interlockedbittestandreset:
5658 case Builtin::BI_interlockedbittestandreset64:
5659 case Builtin::BI_interlockedbittestandreset64_acq:
5660 case Builtin::BI_interlockedbittestandreset64_rel:
5661 case Builtin::BI_interlockedbittestandreset64_nf:
5662 case Builtin::BI_interlockedbittestandset64:
5663 case Builtin::BI_interlockedbittestandset64_acq:
5664 case Builtin::BI_interlockedbittestandset64_rel:
5665 case Builtin::BI_interlockedbittestandset64_nf:
5666 case Builtin::BI_interlockedbittestandset:
5667 case Builtin::BI_interlockedbittestandset_acq:
5668 case Builtin::BI_interlockedbittestandset_rel:
5669 case Builtin::BI_interlockedbittestandset_nf:
5670 case Builtin::BI_interlockedbittestandreset_acq:
5671 case Builtin::BI_interlockedbittestandreset_rel:
5672 case Builtin::BI_interlockedbittestandreset_nf:
5673 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
5674
5675 // These builtins exist to emit regular volatile loads and stores not
5676 // affected by the -fms-volatile setting.
5677 case Builtin::BI__iso_volatile_load8:
5678 case Builtin::BI__iso_volatile_load16:
5679 case Builtin::BI__iso_volatile_load32:
5680 case Builtin::BI__iso_volatile_load64:
5681 return RValue::get(EmitISOVolatileLoad(*this, E));
5682 case Builtin::BI__iso_volatile_store8:
5683 case Builtin::BI__iso_volatile_store16:
5684 case Builtin::BI__iso_volatile_store32:
5685 case Builtin::BI__iso_volatile_store64:
5686 return RValue::get(EmitISOVolatileStore(*this, E));
5687
5688 case Builtin::BI__builtin_ptrauth_sign_constant:
5689 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
5690
5691 case Builtin::BI__builtin_ptrauth_auth:
5692 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5693 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5694 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5695 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5696 case Builtin::BI__builtin_ptrauth_strip: {
5697 // Emit the arguments.
5699 for (auto argExpr : E->arguments())
5700 Args.push_back(EmitScalarExpr(argExpr));
5701
5702 // Cast the value to intptr_t, saving its original type.
5703 llvm::Type *OrigValueType = Args[0]->getType();
5704 if (OrigValueType->isPointerTy())
5705 Args[0] = Builder.CreatePtrToInt(Args[0], IntPtrTy);
5706
5707 switch (BuiltinID) {
5708 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5709 if (Args[4]->getType()->isPointerTy())
5710 Args[4] = Builder.CreatePtrToInt(Args[4], IntPtrTy);
5711 [[fallthrough]];
5712
5713 case Builtin::BI__builtin_ptrauth_auth:
5714 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5715 if (Args[2]->getType()->isPointerTy())
5716 Args[2] = Builder.CreatePtrToInt(Args[2], IntPtrTy);
5717 break;
5718
5719 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5720 if (Args[1]->getType()->isPointerTy())
5721 Args[1] = Builder.CreatePtrToInt(Args[1], IntPtrTy);
5722 break;
5723
5724 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5725 case Builtin::BI__builtin_ptrauth_strip:
5726 break;
5727 }
5728
5729 // Call the intrinsic.
5730 auto IntrinsicID = [&]() -> unsigned {
5731 switch (BuiltinID) {
5732 case Builtin::BI__builtin_ptrauth_auth:
5733 return Intrinsic::ptrauth_auth;
5734 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5735 return Intrinsic::ptrauth_resign;
5736 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5737 return Intrinsic::ptrauth_blend;
5738 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5739 return Intrinsic::ptrauth_sign_generic;
5740 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5741 return Intrinsic::ptrauth_sign;
5742 case Builtin::BI__builtin_ptrauth_strip:
5743 return Intrinsic::ptrauth_strip;
5744 }
5745 llvm_unreachable("bad ptrauth intrinsic");
5746 }();
5747 auto Intrinsic = CGM.getIntrinsic(IntrinsicID);
5748 llvm::Value *Result = EmitRuntimeCall(Intrinsic, Args);
5749
5750 if (BuiltinID != Builtin::BI__builtin_ptrauth_sign_generic_data &&
5751 BuiltinID != Builtin::BI__builtin_ptrauth_blend_discriminator &&
5752 OrigValueType->isPointerTy()) {
5753 Result = Builder.CreateIntToPtr(Result, OrigValueType);
5754 }
5755 return RValue::get(Result);
5756 }
5757
5758 case Builtin::BI__builtin_get_vtable_pointer: {
5759 const Expr *Target = E->getArg(0);
5760 QualType TargetType = Target->getType();
5761 const CXXRecordDecl *Decl = TargetType->getPointeeCXXRecordDecl();
5762 assert(Decl);
5763 auto ThisAddress = EmitPointerWithAlignment(Target);
5764 assert(ThisAddress.isValid());
5765 llvm::Value *VTablePointer =
5767 return RValue::get(VTablePointer);
5768 }
5769
5770 case Builtin::BI__exception_code:
5771 case Builtin::BI_exception_code:
5773 case Builtin::BI__exception_info:
5774 case Builtin::BI_exception_info:
5776 case Builtin::BI__abnormal_termination:
5777 case Builtin::BI_abnormal_termination:
5779 case Builtin::BI_setjmpex:
5780 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5781 E->getArg(0)->getType()->isPointerType())
5782 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5783 break;
5784 case Builtin::BI_setjmp:
5785 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5786 E->getArg(0)->getType()->isPointerType()) {
5787 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
5788 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
5789 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
5790 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5791 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
5792 }
5793 break;
5794
5795 // C++ std:: builtins.
5796 case Builtin::BImove:
5797 case Builtin::BImove_if_noexcept:
5798 case Builtin::BIforward:
5799 case Builtin::BIforward_like:
5800 case Builtin::BIas_const:
5801 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5802 case Builtin::BI__GetExceptionInfo: {
5803 if (llvm::GlobalVariable *GV =
5804 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
5805 return RValue::get(GV);
5806 break;
5807 }
5808
5809 case Builtin::BI__fastfail:
5811
5812 case Builtin::BI__builtin_coro_id:
5813 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
5814 case Builtin::BI__builtin_coro_promise:
5815 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
5816 case Builtin::BI__builtin_coro_resume:
5817 EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
5818 return RValue::get(nullptr);
5819 case Builtin::BI__builtin_coro_frame:
5820 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
5821 case Builtin::BI__builtin_coro_noop:
5822 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
5823 case Builtin::BI__builtin_coro_free:
5824 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
5825 case Builtin::BI__builtin_coro_destroy:
5826 EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
5827 return RValue::get(nullptr);
5828 case Builtin::BI__builtin_coro_done:
5829 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
5830 case Builtin::BI__builtin_coro_alloc:
5831 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
5832 case Builtin::BI__builtin_coro_begin:
5833 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
5834 case Builtin::BI__builtin_coro_end:
5835 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
5836 case Builtin::BI__builtin_coro_suspend:
5837 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
5838 case Builtin::BI__builtin_coro_size:
5839 return EmitCoroutineIntrinsic(E, Intrinsic::coro_size);
5840 case Builtin::BI__builtin_coro_align:
5841 return EmitCoroutineIntrinsic(E, Intrinsic::coro_align);
5842
5843 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
5844 case Builtin::BIread_pipe:
5845 case Builtin::BIwrite_pipe: {
5846 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5847 *Arg1 = EmitScalarExpr(E->getArg(1));
5848 CGOpenCLRuntime OpenCLRT(CGM);
5849 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5850 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5851
5852 // Type of the generic packet parameter.
5853 unsigned GenericAS =
5855 llvm::Type *I8PTy = llvm::PointerType::get(getLLVMContext(), GenericAS);
5856
5857 // Testing which overloaded version we should generate the call for.
5858 if (2U == E->getNumArgs()) {
5859 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
5860 : "__write_pipe_2";
5861 // Creating a generic function type to be able to call with any builtin or
5862 // user defined type.
5863 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
5864 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5865 Value *ACast = Builder.CreateAddrSpaceCast(Arg1, I8PTy);
5866 return RValue::get(
5867 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5868 {Arg0, ACast, PacketSize, PacketAlign}));
5869 } else {
5870 assert(4 == E->getNumArgs() &&
5871 "Illegal number of parameters to pipe function");
5872 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
5873 : "__write_pipe_4";
5874
5875 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
5876 Int32Ty, Int32Ty};
5877 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
5878 *Arg3 = EmitScalarExpr(E->getArg(3));
5879 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5880 Value *ACast = Builder.CreateAddrSpaceCast(Arg3, I8PTy);
5881 // We know the third argument is an integer type, but we may need to cast
5882 // it to i32.
5883 if (Arg2->getType() != Int32Ty)
5884 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
5885 return RValue::get(
5886 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5887 {Arg0, Arg1, Arg2, ACast, PacketSize, PacketAlign}));
5888 }
5889 }
5890 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
5891 // functions
5892 case Builtin::BIreserve_read_pipe:
5893 case Builtin::BIreserve_write_pipe:
5894 case Builtin::BIwork_group_reserve_read_pipe:
5895 case Builtin::BIwork_group_reserve_write_pipe:
5896 case Builtin::BIsub_group_reserve_read_pipe:
5897 case Builtin::BIsub_group_reserve_write_pipe: {
5898 // Composing the mangled name for the function.
5899 const char *Name;
5900 if (BuiltinID == Builtin::BIreserve_read_pipe)
5901 Name = "__reserve_read_pipe";
5902 else if (BuiltinID == Builtin::BIreserve_write_pipe)
5903 Name = "__reserve_write_pipe";
5904 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
5905 Name = "__work_group_reserve_read_pipe";
5906 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
5907 Name = "__work_group_reserve_write_pipe";
5908 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
5909 Name = "__sub_group_reserve_read_pipe";
5910 else
5911 Name = "__sub_group_reserve_write_pipe";
5912
5913 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5914 *Arg1 = EmitScalarExpr(E->getArg(1));
5915 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
5916 CGOpenCLRuntime OpenCLRT(CGM);
5917 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5918 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5919
5920 // Building the generic function prototype.
5921 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
5922 llvm::FunctionType *FTy =
5923 llvm::FunctionType::get(ReservedIDTy, ArgTys, false);
5924 // We know the second argument is an integer type, but we may need to cast
5925 // it to i32.
5926 if (Arg1->getType() != Int32Ty)
5927 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
5928 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5929 {Arg0, Arg1, PacketSize, PacketAlign}));
5930 }
5931 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
5932 // functions
5933 case Builtin::BIcommit_read_pipe:
5934 case Builtin::BIcommit_write_pipe:
5935 case Builtin::BIwork_group_commit_read_pipe:
5936 case Builtin::BIwork_group_commit_write_pipe:
5937 case Builtin::BIsub_group_commit_read_pipe:
5938 case Builtin::BIsub_group_commit_write_pipe: {
5939 const char *Name;
5940 if (BuiltinID == Builtin::BIcommit_read_pipe)
5941 Name = "__commit_read_pipe";
5942 else if (BuiltinID == Builtin::BIcommit_write_pipe)
5943 Name = "__commit_write_pipe";
5944 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
5945 Name = "__work_group_commit_read_pipe";
5946 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
5947 Name = "__work_group_commit_write_pipe";
5948 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
5949 Name = "__sub_group_commit_read_pipe";
5950 else
5951 Name = "__sub_group_commit_write_pipe";
5952
5953 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5954 *Arg1 = EmitScalarExpr(E->getArg(1));
5955 CGOpenCLRuntime OpenCLRT(CGM);
5956 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5957 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5958
5959 // Building the generic function prototype.
5960 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
5961 llvm::FunctionType *FTy = llvm::FunctionType::get(
5962 llvm::Type::getVoidTy(getLLVMContext()), ArgTys, false);
5963
5964 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5965 {Arg0, Arg1, PacketSize, PacketAlign}));
5966 }
5967 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
5968 case Builtin::BIget_pipe_num_packets:
5969 case Builtin::BIget_pipe_max_packets: {
5970 const char *BaseName;
5971 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
5972 if (BuiltinID == Builtin::BIget_pipe_num_packets)
5973 BaseName = "__get_pipe_num_packets";
5974 else
5975 BaseName = "__get_pipe_max_packets";
5976 std::string Name = std::string(BaseName) +
5977 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
5978
5979 // Building the generic function prototype.
5980 Value *Arg0 = EmitScalarExpr(E->getArg(0));
5981 CGOpenCLRuntime OpenCLRT(CGM);
5982 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5983 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5984 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
5985 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5986
5987 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5988 {Arg0, PacketSize, PacketAlign}));
5989 }
5990
5991 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
5992 case Builtin::BIto_global:
5993 case Builtin::BIto_local:
5994 case Builtin::BIto_private: {
5995 auto Arg0 = EmitScalarExpr(E->getArg(0));
5996 auto NewArgT = llvm::PointerType::get(
5998 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
5999 auto NewRetT = llvm::PointerType::get(
6001 CGM.getContext().getTargetAddressSpace(
6003 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
6004 llvm::Value *NewArg;
6005 if (Arg0->getType()->getPointerAddressSpace() !=
6006 NewArgT->getPointerAddressSpace())
6007 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
6008 else
6009 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
6010 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
6011 auto NewCall =
6012 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
6013 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
6014 ConvertType(E->getType())));
6015 }
6016
6017 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
6018 // Table 6.13.17.1 specifies four overload forms of enqueue_kernel.
6019 // The code below expands the builtin call to a call to one of the following
6020 // functions that an OpenCL runtime library will have to provide:
6021 // __enqueue_kernel_basic
6022 // __enqueue_kernel_varargs
6023 // __enqueue_kernel_basic_events
6024 // __enqueue_kernel_events_varargs
6025 case Builtin::BIenqueue_kernel: {
6026 StringRef Name; // Generated function call name
6027 unsigned NumArgs = E->getNumArgs();
6028
6029 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
6030 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6031 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6032
6033 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
6034 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
6035 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
6036 llvm::Value *Range = NDRangeL.getAddress().emitRawPointer(*this);
6037
6038 // FIXME: Look through the addrspacecast which may exist to the stack
6039 // temporary as a hack.
6040 //
6041 // This is hardcoding the assumed ABI of the target function. This assumes
6042 // direct passing for every argument except NDRange, which is assumed to be
6043 // byval or byref indirect passed.
6044 //
6045 // This should be fixed to query a signature from CGOpenCLRuntime, and go
6046 // through EmitCallArgs to get the correct target ABI.
6047 Range = Range->stripPointerCasts();
6048
6049 llvm::Type *RangePtrTy = Range->getType();
6050
6051 if (NumArgs == 4) {
6052 // The most basic form of the call with parameters:
6053 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
6054 Name = "__enqueue_kernel_basic";
6055 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangePtrTy, GenericVoidPtrTy,
6056 GenericVoidPtrTy};
6057 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6058
6059 auto Info =
6060 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
6061 llvm::Value *Kernel =
6062 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6063 llvm::Value *Block =
6064 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6065
6066 auto RTCall = EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6067 {Queue, Flags, Range, Kernel, Block});
6068 return RValue::get(RTCall);
6069 }
6070 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
6071
6072 // Create a temporary array to hold the sizes of local pointer arguments
6073 // for the block. \p First is the position of the first size argument.
6074 auto CreateArrayForSizeVar =
6075 [=](unsigned First) -> std::pair<llvm::Value *, llvm::Value *> {
6076 llvm::APInt ArraySize(32, NumArgs - First);
6078 getContext().getSizeType(), ArraySize, nullptr,
6080 /*IndexTypeQuals=*/0);
6081 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
6082 llvm::Value *TmpPtr = Tmp.getPointer();
6083 // The EmitLifetime* pair expect a naked Alloca as their last argument,
6084 // however for cases where the default AS is not the Alloca AS, Tmp is
6085 // actually the Alloca ascasted to the default AS, hence the
6086 // stripPointerCasts()
6087 llvm::Value *Alloca = TmpPtr->stripPointerCasts();
6088 llvm::Value *ElemPtr;
6089 EmitLifetimeStart(Alloca);
6090 // Each of the following arguments specifies the size of the corresponding
6091 // argument passed to the enqueued block.
6092 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
6093 for (unsigned I = First; I < NumArgs; ++I) {
6094 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
6095 auto *GEP =
6096 Builder.CreateGEP(Tmp.getElementType(), Alloca, {Zero, Index});
6097 if (I == First)
6098 ElemPtr = GEP;
6099 auto *V =
6100 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
6101 Builder.CreateAlignedStore(
6102 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
6103 }
6104 // Return the Alloca itself rather than a potential ascast as this is only
6105 // used by the paired EmitLifetimeEnd.
6106 return {ElemPtr, Alloca};
6107 };
6108
6109 // Could have events and/or varargs.
6110 if (E->getArg(3)->getType()->isBlockPointerType()) {
6111 // No events passed, but has variadic arguments.
6112 Name = "__enqueue_kernel_varargs";
6113 auto Info =
6114 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
6115 llvm::Value *Kernel =
6116 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6117 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6118 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(4);
6119
6120 // Create a vector of the arguments, as well as a constant value to
6121 // express to the runtime the number of variadic arguments.
6122 llvm::Value *const Args[] = {Queue, Flags,
6123 Range, Kernel,
6124 Block, ConstantInt::get(IntTy, NumArgs - 4),
6125 ElemPtr};
6126 llvm::Type *const ArgTys[] = {
6127 QueueTy, IntTy, RangePtrTy, GenericVoidPtrTy,
6128 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
6129
6130 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6131 auto Call = RValue::get(
6132 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6133 EmitLifetimeEnd(TmpPtr);
6134 return Call;
6135 }
6136 // Any calls now have event arguments passed.
6137 if (NumArgs >= 7) {
6138 llvm::PointerType *PtrTy = llvm::PointerType::get(
6139 CGM.getLLVMContext(),
6140 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
6141
6142 llvm::Value *NumEvents =
6143 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
6144
6145 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
6146 // to be a null pointer constant (including `0` literal), we can take it
6147 // into account and emit null pointer directly.
6148 llvm::Value *EventWaitList = nullptr;
6149 if (E->getArg(4)->isNullPointerConstant(
6151 EventWaitList = llvm::ConstantPointerNull::get(PtrTy);
6152 } else {
6153 EventWaitList =
6154 E->getArg(4)->getType()->isArrayType()
6156 : EmitScalarExpr(E->getArg(4));
6157 // Convert to generic address space.
6158 EventWaitList = Builder.CreatePointerCast(EventWaitList, PtrTy);
6159 }
6160 llvm::Value *EventRet = nullptr;
6161 if (E->getArg(5)->isNullPointerConstant(
6163 EventRet = llvm::ConstantPointerNull::get(PtrTy);
6164 } else {
6165 EventRet =
6166 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), PtrTy);
6167 }
6168
6169 auto Info =
6170 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
6171 llvm::Value *Kernel =
6172 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6173 llvm::Value *Block =
6174 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6175
6176 std::vector<llvm::Type *> ArgTys = {
6177 QueueTy, Int32Ty, RangePtrTy, Int32Ty,
6178 PtrTy, PtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
6179
6180 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
6181 NumEvents, EventWaitList, EventRet,
6182 Kernel, Block};
6183
6184 if (NumArgs == 7) {
6185 // Has events but no variadics.
6186 Name = "__enqueue_kernel_basic_events";
6187 llvm::FunctionType *FTy =
6188 llvm::FunctionType::get(Int32Ty, ArgTys, false);
6189 return RValue::get(
6190 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6191 }
6192 // Has event info and variadics
6193 // Pass the number of variadics to the runtime function too.
6194 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
6195 ArgTys.push_back(Int32Ty);
6196 Name = "__enqueue_kernel_events_varargs";
6197
6198 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(7);
6199 Args.push_back(ElemPtr);
6200 ArgTys.push_back(ElemPtr->getType());
6201
6202 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6203 auto Call = RValue::get(
6204 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6205 EmitLifetimeEnd(TmpPtr);
6206 return Call;
6207 }
6208 llvm_unreachable("Unexpected enqueue_kernel signature");
6209 }
6210 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
6211 // parameter.
6212 case Builtin::BIget_kernel_work_group_size: {
6213 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6214 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6215 auto Info =
6216 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6217 Value *Kernel =
6218 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6219 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6221 CGM.CreateRuntimeFunction(
6222 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6223 false),
6224 "__get_kernel_work_group_size_impl"),
6225 {Kernel, Arg}));
6226 }
6227 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
6228 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6229 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6230 auto Info =
6231 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6232 Value *Kernel =
6233 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6234 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6236 CGM.CreateRuntimeFunction(
6237 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6238 false),
6239 "__get_kernel_preferred_work_group_size_multiple_impl"),
6240 {Kernel, Arg}));
6241 }
6242 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
6243 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
6244 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6245 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6246 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
6247 llvm::Value *NDRange = NDRangeL.getAddress().emitRawPointer(*this);
6248 auto Info =
6249 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
6250 Value *Kernel =
6251 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6252 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6253 const char *Name =
6254 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
6255 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
6256 : "__get_kernel_sub_group_count_for_ndrange_impl";
6258 CGM.CreateRuntimeFunction(
6259 llvm::FunctionType::get(
6260 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
6261 false),
6262 Name),
6263 {NDRange, Kernel, Block}));
6264 }
6265 case Builtin::BI__builtin_store_half:
6266 case Builtin::BI__builtin_store_halff: {
6267 Value *Val = EmitScalarExpr(E->getArg(0));
6269 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
6270 Builder.CreateStore(HalfVal, Address);
6271 return RValue::get(nullptr);
6272 }
6273 case Builtin::BI__builtin_load_half: {
6275 Value *HalfVal = Builder.CreateLoad(Address);
6276 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
6277 }
6278 case Builtin::BI__builtin_load_halff: {
6280 Value *HalfVal = Builder.CreateLoad(Address);
6281 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
6282 }
6283 case Builtin::BI__builtin_printf:
6284 case Builtin::BIprintf:
6285 if (getTarget().getTriple().isNVPTX() ||
6286 getTarget().getTriple().isAMDGCN() ||
6287 (getTarget().getTriple().isSPIRV() &&
6288 getTarget().getTriple().getVendor() == Triple::VendorType::AMD)) {
6289 if (getTarget().getTriple().isNVPTX())
6291 if ((getTarget().getTriple().isAMDGCN() ||
6292 getTarget().getTriple().isSPIRV()) &&
6293 getLangOpts().HIP)
6295 }
6296
6297 break;
6298 case Builtin::BI__builtin_canonicalize:
6299 case Builtin::BI__builtin_canonicalizef:
6300 case Builtin::BI__builtin_canonicalizef16:
6301 case Builtin::BI__builtin_canonicalizel:
6302 return RValue::get(
6303 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::canonicalize));
6304
6305 case Builtin::BI__builtin_thread_pointer: {
6306 if (!getContext().getTargetInfo().isTLSSupported())
6307 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
6308
6309 return RValue::get(Builder.CreateIntrinsic(llvm::Intrinsic::thread_pointer,
6310 {GlobalsInt8PtrTy}, {}));
6311 }
6312 case Builtin::BI__builtin_os_log_format:
6313 return emitBuiltinOSLogFormat(*E);
6314
6315 case Builtin::BI__xray_customevent: {
6317 return RValue::getIgnored();
6318
6319 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6321 return RValue::getIgnored();
6322
6323 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6324 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
6325 return RValue::getIgnored();
6326
6327 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
6328 auto FTy = F->getFunctionType();
6329 auto Arg0 = E->getArg(0);
6330 auto Arg0Val = EmitScalarExpr(Arg0);
6331 auto Arg0Ty = Arg0->getType();
6332 auto PTy0 = FTy->getParamType(0);
6333 if (PTy0 != Arg0Val->getType()) {
6334 if (Arg0Ty->isArrayType())
6335 Arg0Val = EmitArrayToPointerDecay(Arg0).emitRawPointer(*this);
6336 else
6337 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
6338 }
6339 auto Arg1 = EmitScalarExpr(E->getArg(1));
6340 auto PTy1 = FTy->getParamType(1);
6341 if (PTy1 != Arg1->getType())
6342 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
6343 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
6344 }
6345
6346 case Builtin::BI__xray_typedevent: {
6347 // TODO: There should be a way to always emit events even if the current
6348 // function is not instrumented. Losing events in a stream can cripple
6349 // a trace.
6351 return RValue::getIgnored();
6352
6353 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6355 return RValue::getIgnored();
6356
6357 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6358 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
6359 return RValue::getIgnored();
6360
6361 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
6362 auto FTy = F->getFunctionType();
6363 auto Arg0 = EmitScalarExpr(E->getArg(0));
6364 auto PTy0 = FTy->getParamType(0);
6365 if (PTy0 != Arg0->getType())
6366 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
6367 auto Arg1 = E->getArg(1);
6368 auto Arg1Val = EmitScalarExpr(Arg1);
6369 auto Arg1Ty = Arg1->getType();
6370 auto PTy1 = FTy->getParamType(1);
6371 if (PTy1 != Arg1Val->getType()) {
6372 if (Arg1Ty->isArrayType())
6373 Arg1Val = EmitArrayToPointerDecay(Arg1).emitRawPointer(*this);
6374 else
6375 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
6376 }
6377 auto Arg2 = EmitScalarExpr(E->getArg(2));
6378 auto PTy2 = FTy->getParamType(2);
6379 if (PTy2 != Arg2->getType())
6380 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
6381 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
6382 }
6383
6384 case Builtin::BI__builtin_ms_va_start:
6385 case Builtin::BI__builtin_ms_va_end:
6386 return RValue::get(
6388 BuiltinID == Builtin::BI__builtin_ms_va_start));
6389
6390 case Builtin::BI__builtin_ms_va_copy: {
6391 // Lower this manually. We can't reliably determine whether or not any
6392 // given va_copy() is for a Win64 va_list from the calling convention
6393 // alone, because it's legal to do this from a System V ABI function.
6394 // With opaque pointer types, we won't have enough information in LLVM
6395 // IR to determine this from the argument types, either. Best to do it
6396 // now, while we have enough information.
6397 Address DestAddr = EmitMSVAListRef(E->getArg(0));
6398 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
6399
6400 DestAddr = DestAddr.withElementType(Int8PtrTy);
6401 SrcAddr = SrcAddr.withElementType(Int8PtrTy);
6402
6403 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
6404 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
6405 }
6406
6407 case Builtin::BI__builtin_get_device_side_mangled_name: {
6408 auto Name = CGM.getCUDARuntime().getDeviceSideName(
6409 cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
6410 auto Str = CGM.GetAddrOfConstantCString(Name, "");
6411 return RValue::get(Str.getPointer());
6412 }
6413 }
6414
6415 // If this is an alias for a lib function (e.g. __builtin_sin), emit
6416 // the call using the normal call path, but using the unmangled
6417 // version of the function name.
6418 const auto &BI = getContext().BuiltinInfo;
6419 if (!shouldEmitBuiltinAsIR(BuiltinID, BI, *this) &&
6420 BI.isLibFunction(BuiltinID))
6421 return emitLibraryCall(*this, FD, E,
6422 CGM.getBuiltinLibFunction(FD, BuiltinID));
6423
6424 // If this is a predefined lib function (e.g. malloc), emit the call
6425 // using exactly the normal call path.
6426 if (BI.isPredefinedLibFunction(BuiltinID))
6427 return emitLibraryCall(*this, FD, E, CGM.getRawFunctionPointer(FD));
6428
6429 // Check that a call to a target specific builtin has the correct target
6430 // features.
6431 // This is down here to avoid non-target specific builtins, however, if
6432 // generic builtins start to require generic target features then we
6433 // can move this up to the beginning of the function.
6434 checkTargetFeatures(E, FD);
6435
6436 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
6437 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
6438
6439 // See if we have a target specific intrinsic.
6440 std::string Name = getContext().BuiltinInfo.getName(BuiltinID);
6441 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
6442 StringRef Prefix =
6443 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
6444 if (!Prefix.empty()) {
6445 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix.data(), Name);
6446 if (IntrinsicID == Intrinsic::not_intrinsic && Prefix == "spv" &&
6447 getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
6448 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin("amdgcn", Name);
6449 // NOTE we don't need to perform a compatibility flag check here since the
6450 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
6451 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
6452 if (IntrinsicID == Intrinsic::not_intrinsic)
6453 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
6454 }
6455
6456 if (IntrinsicID != Intrinsic::not_intrinsic) {
6458
6459 // Find out if any arguments are required to be integer constant
6460 // expressions.
6461 unsigned ICEArguments = 0;
6463 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
6464 assert(Error == ASTContext::GE_None && "Should not codegen an error");
6465
6466 Function *F = CGM.getIntrinsic(IntrinsicID);
6467 llvm::FunctionType *FTy = F->getFunctionType();
6468
6469 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
6470 Value *ArgValue = EmitScalarOrConstFoldImmArg(ICEArguments, i, E);
6471 // If the intrinsic arg type is different from the builtin arg type
6472 // we need to do a bit cast.
6473 llvm::Type *PTy = FTy->getParamType(i);
6474 if (PTy != ArgValue->getType()) {
6475 // XXX - vector of pointers?
6476 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
6477 if (PtrTy->getAddressSpace() !=
6478 ArgValue->getType()->getPointerAddressSpace()) {
6479 ArgValue = Builder.CreateAddrSpaceCast(
6480 ArgValue, llvm::PointerType::get(getLLVMContext(),
6481 PtrTy->getAddressSpace()));
6482 }
6483 }
6484
6485 // Cast vector type (e.g., v256i32) to x86_amx, this only happen
6486 // in amx intrinsics.
6487 if (PTy->isX86_AMXTy())
6488 ArgValue = Builder.CreateIntrinsic(Intrinsic::x86_cast_vector_to_tile,
6489 {ArgValue->getType()}, {ArgValue});
6490 else
6491 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
6492 }
6493
6494 Args.push_back(ArgValue);
6495 }
6496
6497 Value *V = Builder.CreateCall(F, Args);
6498 QualType BuiltinRetType = E->getType();
6499
6500 llvm::Type *RetTy = VoidTy;
6501 if (!BuiltinRetType->isVoidType())
6502 RetTy = ConvertType(BuiltinRetType);
6503
6504 if (RetTy != V->getType()) {
6505 // XXX - vector of pointers?
6506 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
6507 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
6508 V = Builder.CreateAddrSpaceCast(
6509 V, llvm::PointerType::get(getLLVMContext(),
6510 PtrTy->getAddressSpace()));
6511 }
6512 }
6513
6514 // Cast x86_amx to vector type (e.g., v256i32), this only happen
6515 // in amx intrinsics.
6516 if (V->getType()->isX86_AMXTy())
6517 V = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, {RetTy},
6518 {V});
6519 else
6520 V = Builder.CreateBitCast(V, RetTy);
6521 }
6522
6523 if (RetTy->isVoidTy())
6524 return RValue::get(nullptr);
6525
6526 return RValue::get(V);
6527 }
6528
6529 // Some target-specific builtins can have aggregate return values, e.g.
6530 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
6531 // ReturnValue to be non-null, so that the target-specific emission code can
6532 // always just emit into it.
6534 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
6535 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
6536 ReturnValue = ReturnValueSlot(DestPtr, false);
6537 }
6538
6539 // Now see if we can emit a target-specific builtin.
6540 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
6541 switch (EvalKind) {
6542 case TEK_Scalar:
6543 if (V->getType()->isVoidTy())
6544 return RValue::get(nullptr);
6545 return RValue::get(V);
6546 case TEK_Aggregate:
6547 return RValue::getAggregate(ReturnValue.getAddress(),
6548 ReturnValue.isVolatile());
6549 case TEK_Complex:
6550 llvm_unreachable("No current target builtin returns complex");
6551 }
6552 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6553 }
6554
6555 // EmitHLSLBuiltinExpr will check getLangOpts().HLSL
6556 if (Value *V = EmitHLSLBuiltinExpr(BuiltinID, E, ReturnValue)) {
6557 switch (EvalKind) {
6558 case TEK_Scalar:
6559 if (V->getType()->isVoidTy())
6560 return RValue::get(nullptr);
6561 return RValue::get(V);
6562 case TEK_Aggregate:
6563 return RValue::getAggregate(ReturnValue.getAddress(),
6564 ReturnValue.isVolatile());
6565 case TEK_Complex:
6566 llvm_unreachable("No current hlsl builtin returns complex");
6567 }
6568 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6569 }
6570
6571 if (getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice)
6572 return EmitHipStdParUnsupportedBuiltin(this, FD);
6573
6574 ErrorUnsupported(E, "builtin function");
6575
6576 // Unknown builtin, for now just dump it out and return undef.
6577 return GetUndefRValue(E->getType());
6578}
6579
6580namespace {
6581struct BuiltinAlignArgs {
6582 llvm::Value *Src = nullptr;
6583 llvm::Type *SrcType = nullptr;
6584 llvm::Value *Alignment = nullptr;
6585 llvm::Value *Mask = nullptr;
6586 llvm::IntegerType *IntType = nullptr;
6587
6588 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
6589 QualType AstType = E->getArg(0)->getType();
6590 if (AstType->isArrayType())
6591 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(CGF);
6592 else
6593 Src = CGF.EmitScalarExpr(E->getArg(0));
6594 SrcType = Src->getType();
6595 if (SrcType->isPointerTy()) {
6596 IntType = IntegerType::get(
6597 CGF.getLLVMContext(),
6598 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
6599 } else {
6600 assert(SrcType->isIntegerTy());
6601 IntType = cast<llvm::IntegerType>(SrcType);
6602 }
6603 Alignment = CGF.EmitScalarExpr(E->getArg(1));
6604 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
6605 auto *One = llvm::ConstantInt::get(IntType, 1);
6606 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
6607 }
6608};
6609} // namespace
6610
6611/// Generate (x & (y-1)) == 0.
6613 BuiltinAlignArgs Args(E, *this);
6614 llvm::Value *SrcAddress = Args.Src;
6615 if (Args.SrcType->isPointerTy())
6616 SrcAddress =
6617 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
6618 return RValue::get(Builder.CreateICmpEQ(
6619 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
6620 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
6621}
6622
6623/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
6624/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
6625/// llvm.ptrmask intrinsic (with a GEP before in the align_up case).
6627 BuiltinAlignArgs Args(E, *this);
6628 llvm::Value *SrcForMask = Args.Src;
6629 if (AlignUp) {
6630 // When aligning up we have to first add the mask to ensure we go over the
6631 // next alignment value and then align down to the next valid multiple.
6632 // By adding the mask, we ensure that align_up on an already aligned
6633 // value will not change the value.
6634 if (Args.Src->getType()->isPointerTy()) {
6635 if (getLangOpts().PointerOverflowDefined)
6636 SrcForMask =
6637 Builder.CreateGEP(Int8Ty, SrcForMask, Args.Mask, "over_boundary");
6638 else
6639 SrcForMask = EmitCheckedInBoundsGEP(Int8Ty, SrcForMask, Args.Mask,
6640 /*SignedIndices=*/true,
6641 /*isSubtraction=*/false,
6642 E->getExprLoc(), "over_boundary");
6643 } else {
6644 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
6645 }
6646 }
6647 // Invert the mask to only clear the lower bits.
6648 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
6649 llvm::Value *Result = nullptr;
6650 if (Args.Src->getType()->isPointerTy()) {
6651 Result = Builder.CreateIntrinsic(
6652 Intrinsic::ptrmask, {Args.SrcType, Args.IntType},
6653 {SrcForMask, InvertedMask}, nullptr, "aligned_result");
6654 } else {
6655 Result = Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
6656 }
6657 assert(Result->getType() == Args.SrcType);
6658 return RValue::get(Result);
6659}
#define V(N, I)
static char bitActionToX86BTCode(BitTest::ActionKind A)
static Value * EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering)
static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static CanQualType getOSLogArgType(ASTContext &C, int Size)
Get the argument type for arguments to os_log_helper.
static Value * EmitOverflowCheckedAbs(CodeGenFunction &CGF, const CallExpr *E, bool SanitizeOverflow)
static llvm::Value * EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E)
static Value * tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID, Value *V)
static bool areBOSTypesCompatible(int From, int To)
Checks if using the result of __builtin_object_size(p, From) in place of __builtin_object_size(p,...
static std::pair< llvm::Value *, llvm::Value * > GetCountFieldAndIndex(CodeGenFunction &CGF, const MemberExpr *ME, const FieldDecl *ArrayFD, const FieldDecl *CountFD, const Expr *Idx, llvm::IntegerType *ResType, bool IsSigned)
Value * EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::Type *ResultType)
static bool TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty, llvm::SmallPtrSetImpl< const Decl * > &Seen)
static Value * EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind, const CallExpr *E)
MSVC handles setjmp a bit differently on different platforms.
#define MUTATE_LDBL(func)
static Value * emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty)
Determine if the specified type requires laundering by checking if it is a dynamic class type or cont...
static Value * EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E)
static struct WidthAndSignedness EncompassingIntegerType(ArrayRef< struct WidthAndSignedness > Types)
static Value * EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition CGBuiltin.cpp:72
static Value * emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, Instruction::BinaryOps Op, bool Invert=false)
Utility to insert an atomic instruction based Intrinsic::ID and the expression node,...
static bool HasNoIndirectArgumentsOrResults(CGFunctionInfo const &FnInfo)
Checks no arguments or results are passed indirectly in the ABI (i.e.
Value * EmitToInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::IntegerType *IntType)
Emit the conversions required to turn the given value into an integer of the given size.
static Value * emitBinaryExpMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID, Intrinsic::ID ConstrainedIntrinsicID)
static llvm::Value * EmitBitTestIntrinsic(CodeGenFunction &CGF, unsigned BuiltinID, const CallExpr *E)
Emit a _bittest* intrinsic.
static Value * EmitSignBit(CodeGenFunction &CGF, Value *V)
Emit the computation of the sign bit for a floating point value.
static Value * EmitFAbs(CodeGenFunction &CGF, Value *V)
EmitFAbs - Emit a call to @llvm.fabs().
static llvm::Value * EmitPositiveResultOrZero(CodeGenFunction &CGF, llvm::Value *Res, llvm::Value *Index, llvm::IntegerType *ResType, bool IsSigned)
static bool shouldEmitBuiltinAsIR(unsigned BuiltinID, const Builtin::Context &BI, const CodeGenFunction &CGF)
Some builtins do not have library implementation on some targets and are instead emitted as LLVM IRs ...
Definition CGBuiltin.cpp:48
static bool isSpecialUnsignedMultiplySignedResult(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
static llvm::Value * getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType)
static Value * emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &context, const clang::QualType Type)
static RValue EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Emit a checked mixed-sign multiply.
static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID)
Value * MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, bool ReturnBool)
Utility to insert an atomic cmpxchg instruction.
static RValue EmitBinaryAtomic(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E)
static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, Align AlignmentInBytes)
static Value * EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering=AtomicOrdering::SequentiallyConsistent)
This function should be invoked to emit atomic cmpxchg for Microsoft's _InterlockedCompareExchange* i...
static bool isSpecialMixedSignMultiply(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
Determine if a binop is a checked mixed-sign multiply we can specialize.
static Value * emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static llvm::Value * emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static Value * EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E)
static const FieldDecl * FindFlexibleArrayMemberField(CodeGenFunction &CGF, ASTContext &Ctx, const RecordDecl *RD)
Find a struct's flexible array member.
static Value * EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E)
static RValue EmitHipStdParUnsupportedBuiltin(CodeGenFunction *CGF, const FunctionDecl *FD)
static llvm::Value * EmitX86BitTestIntrinsic(CodeGenFunction &CGF, BitTest BT, const CallExpr *E, Value *BitBase, Value *BitPos)
static RValue EmitCheckedUnsignedMultiplySignedResult(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Address CheckAtomicAlignment(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E)
static llvm::AtomicOrdering getBitTestAtomicOrdering(BitTest::InterlockingKind I)
static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD, int64_t &Offset)
Calculate the offset of a struct field.
Value * MakeBinaryAtomicValue(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, AtomicOrdering Ordering)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
llvm::Value * EmitOverflowIntrinsic(CodeGenFunction &CGF, const Intrinsic::ID IntrinsicID, llvm::Value *X, llvm::Value *Y, llvm::Value *&Carry)
Emit a call to llvm.
static Value * EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW)
static Value * EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
llvm::Value * emitBuiltinWithOneOverloadedType(clang::CodeGen::CodeGenFunction &CGF, const clang::CallExpr *E, unsigned IntrinsicID, llvm::StringRef Name="")
Definition CGBuiltin.h:63
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
TokenType getType() const
Returns the token's type, e.g.
FormatToken * Next
The next token in the unwrapped line.
#define X(type, name)
Definition Value.h:97
static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target)
llvm::MachO::Record Record
Definition MachO.h:31
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
__DEVICE__ float modf(float __x, float *__iptr)
__DEVICE__ double nan(const char *)
APSInt & getInt()
Definition APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:188
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType VoidPtrTy
IdentifierTable & Idents
Definition ASTContext.h:737
Builtin::Context & BuiltinInfo
Definition ASTContext.h:739
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:856
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
@ GE_None
No error.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
QualType getElementType() const
Definition TypeBase.h:3732
static std::unique_ptr< AtomicScopeModel > create(AtomicScopeModelKind K)
Create an atomic scope model by AtomicScopeModelKind.
Definition SyncScope.h:273
static bool isCommaOp(Opcode Opc)
Definition Expr.h:4077
Expr * getRHS() const
Definition Expr.h:4026
Holds information about both target-independent and target-specific builtins, allowing easy queries b...
Definition Builtins.h:228
bool isConstWithoutErrnoAndExceptions(unsigned ID) const
Return true if this function has no side effects and doesn't read memory, except for possibly errno o...
Definition Builtins.h:400
std::string getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition Builtins.cpp:80
bool isConstWithoutExceptions(unsigned ID) const
Definition Builtins.h:404
bool isConst(unsigned ID) const
Return true if this function has no side effects and doesn't read memory.
Definition Builtins.h:275
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2879
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3083
bool hasStoredFPFeatures() const
Definition Expr.h:3038
SourceLocation getBeginLoc() const
Definition Expr.h:3213
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3062
Expr * getCallee()
Definition Expr.h:3026
FPOptionsOverride getFPFeatures() const
Definition Expr.h:3178
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3070
arg_range arguments()
Definition Expr.h:3131
CastKind getCastKind() const
Definition Expr.h:3656
Expr * getSubExpr()
Definition Expr.h:3662
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
Address withAlignment(CharUnits NewAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:269
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition CGBuilder.h:140
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
Definition CGBuilder.h:147
llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:184
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition CGBuilder.h:402
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:173
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition CGBuilder.h:132
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition CGBuilder.h:350
All available information about a concrete callee.
Definition CGCall.h:63
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
llvm::DILocation * CreateTrapFailureMessageFor(llvm::DebugLoc TrapLocation, StringRef Category, StringRef FailureMsg)
Create a debug location from TrapLocation that adds an artificial inline frame where the frame name i...
CGFunctionInfo - Class to encapsulate the information about a function definition.
MutableArrayRef< ArgInfo > arguments()
virtual llvm::Value * getPipeElemAlign(const Expr *PipeArg)
virtual llvm::Value * getPipeElemSize(const Expr *PipeArg)
llvm::StructType * getLLVMType() const
Return the "complete object" LLVM type associated with this record.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Definition CGClass.cpp:2696
RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E)
RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID)
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
Definition AMDGPU.cpp:258
SanitizerSet SanOpts
Sanitizers enabled for this function.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Definition CGExpr.cpp:1185
llvm::Type * ConvertType(QualType T)
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
BuiltinCheckKind
Specifies which type of sanitizer check to apply when handling a particular builtin.
llvm::Value * EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SystemZ.cpp:39
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition CGCall.cpp:5059
llvm::Value * EmitSEHAbnormalTermination()
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
Produce the code to do a retain.
Definition CGObjC.cpp:2328
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
llvm::Value * EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart)
Emits a call to an LLVM variable-argument intrinsic, either llvm.va_start or llvm....
llvm::Value * EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition AMDGPU.cpp:332
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3648
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:6657
RValue emitBuiltinOSLogFormat(const CallExpr &E)
Emit IR for __builtin_os_log_format.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
llvm::Function * generateBuiltinOSLogHelperFunction(const analyze_os_log::OSLogBufferLayout &Layout, CharUnits BufferAlignment)
const LangOptions & getLangOpts() const
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Definition RISCV.cpp:1073
llvm::Value * EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind)
Emits an argument for a call to a builtin.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3538
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition CGCall.cpp:4558
const TargetInfo & getTarget() const
RValue emitRotate(const CallExpr *E, bool IsRotateRight)
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
llvm::Value * EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition ARM.cpp:2664
CGCallee EmitCallee(const Expr *E)
Definition CGExpr.cpp:6057
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
llvm::Value * EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition ARM.cpp:7962
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:223
llvm::Value * EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition PPC.cpp:73
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:3788
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1357
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2153
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5215
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp)
Emit IR for __builtin_align_up/__builtin_align_down.
void EmitLifetimeEnd(llvm::Value *Addr)
Definition CGDecl.cpp:1369
llvm::Value * EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitDirectXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition DirectX.cpp:22
llvm::Value * EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::Triple::ArchType Arch)
Definition ARM.cpp:4974
llvm::Value * EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4228
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2332
llvm::Value * EmitSPIRVBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SPIR.cpp:22
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Address EmitVAListRef(const Expr *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
Definition CGExpr.cpp:1532
RValue EmitBuiltinIsAligned(const CallExpr *E)
Emit IR for __builtin_is_aligned.
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, const CallExpr *TheCallExpr, bool IsDelete)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition Hexagon.cpp:77
CodeGenTypes & getTypes() const
llvm::Value * EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition X86.cpp:737
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1515
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:186
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4213
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4140
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2183
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
Definition CGExpr.cpp:1228
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
llvm::Value * EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition NVPTX.cpp:414
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
Definition CGExpr.cpp:4128
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1631
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
EmitTargetBuiltinExpr - Emit the given builtin call.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
Definition CGObjC.cpp:2167
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:652
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
llvm::Constant * getBuiltinLibFunction(const FunctionDecl *FD, unsigned BuiltinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
const llvm::Triple & getTriple() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition CGCall.cpp:1701
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:182
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:361
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
static RValue getIgnored()
Definition CGValue.h:93
static RValue get(llvm::Value *V)
Definition CGValue.h:98
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:125
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition CGValue.h:108
An abstract representation of an aligned address.
Definition Address.h:42
static RawAddress invalid()
Definition Address.h:61
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
virtual bool supportsLibCall() const
supportsLibCall - Query to whether or not target supports all lib calls.
Definition TargetInfo.h:76
virtual llvm::Value * encodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert the address of an instruction into a return address ...
Definition TargetInfo.h:174
virtual llvm::Value * decodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert a return address as stored by the system into the ac...
Definition TargetInfo.h:164
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
Definition TargetInfo.h:146
virtual llvm::Value * testFPKind(llvm::Value *V, unsigned BuiltinID, CGBuilderTy &Builder, CodeGenModule &CGM) const
Performs a target specific test of a floating point value for things like IsNaN, Infinity,...
Definition TargetInfo.h:183
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3275
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4371
Represents a sugar type with __counted_by or __sized_by annotations, including their _or_null variant...
Definition TypeBase.h:3436
DynamicCountPointerKind getKind() const
Definition TypeBase.h:3466
static bool isFlexibleArrayMemberLike(const ASTContext &Context, const Decl *D, QualType Ty, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution)
Whether it resembles a flexible array member.
Definition DeclBase.cpp:437
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:593
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:251
bool hasAttr() const
Definition DeclBase.h:577
Concrete class used by the front-end to report problems and issues.
Definition Diagnostic.h:231
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3100
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition Expr.cpp:3078
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3073
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
bool isPRValue() const
Definition Expr.h:285
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
Definition Expr.h:837
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3624
std::optional< std::string > tryEvaluateString(ASTContext &Ctx) const
If the current Expr can be evaluated to a pointer to a null-terminated constant string,...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3053
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Definition Expr.cpp:4001
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
bool tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
const ValueDecl * getAsBuiltinConstantDeclRef(const ASTContext &Context) const
If this expression is an unambiguous reference to a single declaration, in the style of __builtin_fun...
Definition Expr.cpp:222
Represents difference between two FPOptions values.
LangOptions::FPExceptionModeKind getExceptionMode() const
Represents a member of a struct/union/class.
Definition Decl.h:3157
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition Decl.cpp:4796
Represents a function declaration or definition.
Definition Decl.h:1999
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2794
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3703
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5264
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
IdentifierInfo & get(StringRef Name)
Return the identifier token info for the specified named identifier.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition Decl.cpp:5470
@ FPE_Ignore
Assume that floating-point exceptions are masked.
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3300
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3383
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:300
std::string getNameAsString() const
Get a human-readable name for the declaration, even if it is one of the special kinds of names (C++ c...
Definition Decl.h:316
const Expr * getSubExpr() const
Definition Expr.h:2201
PipeType - OpenCL20.
Definition TypeBase.h:8103
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8369
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8411
Represents a struct/union/class.
Definition Decl.h:4309
field_range fields() const
Definition Decl.h:4512
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:346
bool isUnion() const
Definition Decl.h:3919
Exposes information about the current target.
Definition TargetInfo.h:226
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isBigEndian() const
virtual bool checkArithmeticFenceSupported() const
Controls if __arithmetic_fence is supported in the targeted backend.
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:742
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
The base class of the type hierarchy.
Definition TypeBase.h:1833
bool isBlockPointerType() const
Definition TypeBase.h:8542
bool isVoidType() const
Definition TypeBase.h:8878
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2205
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8621
bool isCountAttributedType() const
Definition Type.cpp:741
bool isPointerType() const
Definition TypeBase.h:8522
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8922
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9168
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1909
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9101
Expr * getSubExpr() const
Definition Expr.h:2287
QualType getType() const
Definition Decl.h:722
QualType getType() const
Definition Value.cpp:237
Represents a GCC generic vector type.
Definition TypeBase.h:4173
SmallVector< OSLogBufferItem, 4 > Items
Definition OSLog.h:113
unsigned char getNumArgsByte() const
Definition OSLog.h:148
unsigned char getSummaryByte() const
Definition OSLog.h:139
Defines the clang::TargetInfo interface.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:154
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:145
llvm::Constant * initializationPatternFor(CodeGenModule &, llvm::Type *)
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
constexpr XRayInstrMask Typed
Definition XRayInstr.h:42
constexpr XRayInstrMask Custom
Definition XRayInstr.h:41
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
bool Mul(InterpState &S, CodePtr OpPC)
Definition Interp.h:445
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Success
Annotation was successful.
Definition Parser.h:65
nullptr
This class represents a compute construct, representing a 'Kind' of β€˜parallel’, 'serial',...
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
LangAS
Defines the address space values used by the address space qualifier of QualType.
SyncScope
Defines sync scope values used internally by clang.
Definition SyncScope.h:42
llvm::StringRef getAsString(SyncScope S)
Definition SyncScope.h:60
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1745
unsigned long uint64_t
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
void clear(SanitizerMask K=SanitizerKind::All)
Disable the sanitizers specified in K.
Definition Sanitizers.h:195
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
#define sinh(__x)
Definition tgmath.h:373
#define asin(__x)
Definition tgmath.h:112
#define scalbln(__x, __y)
Definition tgmath.h:1182
#define sqrt(__x)
Definition tgmath.h:520
#define acos(__x)
Definition tgmath.h:83
#define fmin(__x, __y)
Definition tgmath.h:780
#define exp(__x)
Definition tgmath.h:431
#define ilogb(__x)
Definition tgmath.h:851
#define copysign(__x, __y)
Definition tgmath.h:618
#define erf(__x)
Definition tgmath.h:636
#define atanh(__x)
Definition tgmath.h:228
#define remquo(__x, __y, __z)
Definition tgmath.h:1111
#define nextafter(__x, __y)
Definition tgmath.h:1055
#define frexp(__x, __y)
Definition tgmath.h:816
#define asinh(__x)
Definition tgmath.h:199
#define erfc(__x)
Definition tgmath.h:653
#define atan2(__x, __y)
Definition tgmath.h:566
#define nexttoward(__x, __y)
Definition tgmath.h:1073
#define hypot(__x, __y)
Definition tgmath.h:833
#define exp2(__x)
Definition tgmath.h:670
#define sin(__x)
Definition tgmath.h:286
#define cbrt(__x)
Definition tgmath.h:584
#define log2(__x)
Definition tgmath.h:970
#define llround(__x)
Definition tgmath.h:919
#define cosh(__x)
Definition tgmath.h:344
#define trunc(__x)
Definition tgmath.h:1216
#define fmax(__x, __y)
Definition tgmath.h:762
#define ldexp(__x, __y)
Definition tgmath.h:868
#define acosh(__x)
Definition tgmath.h:170
#define tgamma(__x)
Definition tgmath.h:1199
#define scalbn(__x, __y)
Definition tgmath.h:1165
#define round(__x)
Definition tgmath.h:1148
#define fmod(__x, __y)
Definition tgmath.h:798
#define llrint(__x)
Definition tgmath.h:902
#define tan(__x)
Definition tgmath.h:315
#define cos(__x)
Definition tgmath.h:257
#define log10(__x)
Definition tgmath.h:936
#define fabs(__x)
Definition tgmath.h:549
#define pow(__x, __y)
Definition tgmath.h:490
#define log1p(__x)
Definition tgmath.h:953
#define rint(__x)
Definition tgmath.h:1131
#define expm1(__x)
Definition tgmath.h:687
#define remainder(__x, __y)
Definition tgmath.h:1090
#define fdim(__x, __y)
Definition tgmath.h:704
#define lgamma(__x)
Definition tgmath.h:885
#define tanh(__x)
Definition tgmath.h:402
#define lrint(__x)
Definition tgmath.h:1004
#define atan(__x)
Definition tgmath.h:141
#define floor(__x)
Definition tgmath.h:722
#define ceil(__x)
Definition tgmath.h:601
#define log(__x)
Definition tgmath.h:460
#define logb(__x)
Definition tgmath.h:987
#define nearbyint(__x)
Definition tgmath.h:1038
#define lround(__x)
Definition tgmath.h:1021
#define fma(__x, __y, __z)
Definition tgmath.h:742