clang 22.0.0git
CIRGenFunction.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14
15#include "CIRGenCXXABI.h"
16#include "CIRGenCall.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/Location.h"
19#include "clang/AST/ExprCXX.h"
22
23#include <cassert>
24
25namespace clang::CIRGen {
26
28 bool suppressNewContext)
29 : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
30 ehStack.setCGF(this);
31}
32
34
35// This is copied from clang/lib/CodeGen/CodeGenFunction.cpp
37 type = type.getCanonicalType();
38 while (true) {
39 switch (type->getTypeClass()) {
40#define TYPE(name, parent)
41#define ABSTRACT_TYPE(name, parent)
42#define NON_CANONICAL_TYPE(name, parent) case Type::name:
43#define DEPENDENT_TYPE(name, parent) case Type::name:
44#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
45#include "clang/AST/TypeNodes.inc"
46 llvm_unreachable("non-canonical or dependent type in IR-generation");
47
48 case Type::Auto:
49 case Type::DeducedTemplateSpecialization:
50 llvm_unreachable("undeduced type in IR-generation");
51
52 // Various scalar types.
53 case Type::Builtin:
54 case Type::Pointer:
55 case Type::BlockPointer:
56 case Type::LValueReference:
57 case Type::RValueReference:
58 case Type::MemberPointer:
59 case Type::Vector:
60 case Type::ExtVector:
61 case Type::ConstantMatrix:
62 case Type::FunctionProto:
63 case Type::FunctionNoProto:
64 case Type::Enum:
65 case Type::ObjCObjectPointer:
66 case Type::Pipe:
67 case Type::BitInt:
68 case Type::HLSLAttributedResource:
69 case Type::HLSLInlineSpirv:
70 return cir::TEK_Scalar;
71
72 // Complexes.
73 case Type::Complex:
74 return cir::TEK_Complex;
75
76 // Arrays, records, and Objective-C objects.
77 case Type::ConstantArray:
78 case Type::IncompleteArray:
79 case Type::VariableArray:
80 case Type::Record:
81 case Type::ObjCObject:
82 case Type::ObjCInterface:
83 case Type::ArrayParameter:
84 return cir::TEK_Aggregate;
85
86 // We operate on atomic values according to their underlying type.
87 case Type::Atomic:
88 type = cast<AtomicType>(type)->getValueType();
89 continue;
90 }
91 llvm_unreachable("unknown type kind!");
92 }
93}
94
96 return cgm.getTypes().convertTypeForMem(t);
97}
98
100 return cgm.getTypes().convertType(t);
101}
102
104 // Some AST nodes might contain invalid source locations (e.g.
105 // CXXDefaultArgExpr), workaround that to still get something out.
106 if (srcLoc.isValid()) {
108 PresumedLoc pLoc = sm.getPresumedLoc(srcLoc);
109 StringRef filename = pLoc.getFilename();
110 return mlir::FileLineColLoc::get(builder.getStringAttr(filename),
111 pLoc.getLine(), pLoc.getColumn());
112 }
113 // Do our best...
114 assert(currSrcLoc && "expected to inherit some source location");
115 return *currSrcLoc;
116}
117
118mlir::Location CIRGenFunction::getLoc(SourceRange srcLoc) {
119 // Some AST nodes might contain invalid source locations (e.g.
120 // CXXDefaultArgExpr), workaround that to still get something out.
121 if (srcLoc.isValid()) {
122 mlir::Location beg = getLoc(srcLoc.getBegin());
123 mlir::Location end = getLoc(srcLoc.getEnd());
124 SmallVector<mlir::Location, 2> locs = {beg, end};
125 mlir::Attribute metadata;
126 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
127 }
128 if (currSrcLoc) {
129 return *currSrcLoc;
130 }
131 // We're brave, but time to give up.
132 return builder.getUnknownLoc();
133}
134
135mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) {
136 SmallVector<mlir::Location, 2> locs = {lhs, rhs};
137 mlir::Attribute metadata;
138 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
139}
140
141bool CIRGenFunction::containsLabel(const Stmt *s, bool ignoreCaseStmts) {
142 // Null statement, not a label!
143 if (!s)
144 return false;
145
146 // If this is a label, we have to emit the code, consider something like:
147 // if (0) { ... foo: bar(); } goto foo;
148 //
149 // TODO: If anyone cared, we could track __label__'s, since we know that you
150 // can't jump to one from outside their declared region.
151 if (isa<LabelStmt>(s))
152 return true;
153
154 // If this is a case/default statement, and we haven't seen a switch, we
155 // have to emit the code.
156 if (isa<SwitchCase>(s) && !ignoreCaseStmts)
157 return true;
158
159 // If this is a switch statement, we want to ignore case statements when we
160 // recursively process the sub-statements of the switch. If we haven't
161 // encountered a switch statement, we treat case statements like labels, but
162 // if we are processing a switch statement, case statements are expected.
163 if (isa<SwitchStmt>(s))
164 ignoreCaseStmts = true;
165
166 // Scan subexpressions for verboten labels.
167 return std::any_of(s->child_begin(), s->child_end(),
168 [=](const Stmt *subStmt) {
169 return containsLabel(subStmt, ignoreCaseStmts);
170 });
171}
172
173/// If the specified expression does not fold to a constant, or if it does but
174/// contains a label, return false. If it constant folds return true and set
175/// the boolean result in Result.
176bool CIRGenFunction::constantFoldsToBool(const Expr *cond, bool &resultBool,
177 bool allowLabels) {
178 llvm::APSInt resultInt;
179 if (!constantFoldsToSimpleInteger(cond, resultInt, allowLabels))
180 return false;
181
182 resultBool = resultInt.getBoolValue();
183 return true;
184}
185
186/// If the specified expression does not fold to a constant, or if it does
187/// fold but contains a label, return false. If it constant folds, return
188/// true and set the folded value.
190 llvm::APSInt &resultInt,
191 bool allowLabels) {
192 // FIXME: Rename and handle conversion of other evaluatable things
193 // to bool.
194 Expr::EvalResult result;
195 if (!cond->EvaluateAsInt(result, getContext()))
196 return false; // Not foldable, not integer or not fully evaluatable.
197
198 llvm::APSInt intValue = result.Val.getInt();
199 if (!allowLabels && containsLabel(cond))
200 return false; // Contains a label.
201
202 resultInt = intValue;
203 return true;
204}
205
206void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc,
207 CharUnits alignment) {
208 if (!type->isVoidType()) {
209 mlir::Value addr = emitAlloca("__retval", convertType(type), loc, alignment,
210 /*insertIntoFnEntryBlock=*/false);
211 fnRetAlloca = addr;
212 returnValue = Address(addr, alignment);
213 }
214}
215
216void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
217 mlir::Location loc, CharUnits alignment,
218 bool isParam) {
219 assert(isa<NamedDecl>(var) && "Needs a named decl");
220 assert(!symbolTable.count(var) && "not supposed to be available just yet");
221
222 auto allocaOp = addrVal.getDefiningOp<cir::AllocaOp>();
223 assert(allocaOp && "expected cir::AllocaOp");
224
225 if (isParam)
226 allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
227 if (ty->isReferenceType() || ty.isConstQualified())
228 allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext()));
229
230 symbolTable.insert(var, allocaOp);
231}
232
234 CIRGenBuilderTy &builder = cgf.builder;
235 LexicalScope *localScope = cgf.curLexScope;
236
237 auto applyCleanup = [&]() {
238 if (performCleanup) {
239 // ApplyDebugLocation
241 forceCleanup();
242 }
243 };
244
245 if (returnBlock != nullptr) {
246 // Write out the return block, which loads the value from `__retval` and
247 // issues the `cir.return`.
248 mlir::OpBuilder::InsertionGuard guard(builder);
249 builder.setInsertionPointToEnd(returnBlock);
250 (void)emitReturn(*returnLoc);
251 }
252
253 auto insertCleanupAndLeave = [&](mlir::Block *insPt) {
254 mlir::OpBuilder::InsertionGuard guard(builder);
255 builder.setInsertionPointToEnd(insPt);
256
257 // If we still don't have a cleanup block, it means that `applyCleanup`
258 // below might be able to get us one.
259 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
260
261 // Leverage and defers to RunCleanupsScope's dtor and scope handling.
262 applyCleanup();
263
264 // If we now have one after `applyCleanup`, hook it up properly.
265 if (!cleanupBlock && localScope->getCleanupBlock(builder)) {
266 cleanupBlock = localScope->getCleanupBlock(builder);
267 builder.create<cir::BrOp>(insPt->back().getLoc(), cleanupBlock);
268 if (!cleanupBlock->mightHaveTerminator()) {
269 mlir::OpBuilder::InsertionGuard guard(builder);
270 builder.setInsertionPointToEnd(cleanupBlock);
271 builder.create<cir::YieldOp>(localScope->endLoc);
272 }
273 }
274
275 if (localScope->depth == 0) {
276 // Reached the end of the function.
277 if (returnBlock != nullptr) {
278 if (returnBlock->getUses().empty()) {
279 returnBlock->erase();
280 } else {
281 // Thread return block via cleanup block.
282 if (cleanupBlock) {
283 for (mlir::BlockOperand &blockUse : returnBlock->getUses()) {
284 cir::BrOp brOp = mlir::cast<cir::BrOp>(blockUse.getOwner());
285 brOp.setSuccessor(cleanupBlock);
286 }
287 }
288
289 builder.create<cir::BrOp>(*returnLoc, returnBlock);
290 return;
291 }
292 }
293 emitImplicitReturn();
294 return;
295 }
296
297 // End of any local scope != function
298 // Ternary ops have to deal with matching arms for yielding types
299 // and do return a value, it must do its own cir.yield insertion.
300 if (!localScope->isTernary() && !insPt->mightHaveTerminator()) {
301 !retVal ? builder.create<cir::YieldOp>(localScope->endLoc)
302 : builder.create<cir::YieldOp>(localScope->endLoc, retVal);
303 }
304 };
305
306 // If a cleanup block has been created at some point, branch to it
307 // and set the insertion point to continue at the cleanup block.
308 // Terminators are then inserted either in the cleanup block or
309 // inline in this current block.
310 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
311 if (cleanupBlock)
312 insertCleanupAndLeave(cleanupBlock);
313
314 // Now deal with any pending block wrap up like implicit end of
315 // scope.
316
317 mlir::Block *curBlock = builder.getBlock();
318 if (isGlobalInit() && !curBlock)
319 return;
320 if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
321 return;
322
323 // Get rid of any empty block at the end of the scope.
324 bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
325 if (!entryBlock && curBlock->empty()) {
326 curBlock->erase();
327 if (returnBlock != nullptr && returnBlock->getUses().empty())
328 returnBlock->erase();
329 return;
330 }
331
332 // If there's a cleanup block, branch to it, nothing else to do.
333 if (cleanupBlock) {
334 builder.create<cir::BrOp>(curBlock->back().getLoc(), cleanupBlock);
335 return;
336 }
337
338 // No pre-existent cleanup block, emit cleanup code and yield/return.
339 insertCleanupAndLeave(curBlock);
340}
341
342cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
343 CIRGenBuilderTy &builder = cgf.getBuilder();
344
345 if (!cgf.curFn.getFunctionType().hasVoidReturn()) {
346 // Load the value from `__retval` and return it via the `cir.return` op.
347 auto value = builder.create<cir::LoadOp>(
348 loc, cgf.curFn.getFunctionType().getReturnType(), *cgf.fnRetAlloca);
349 return builder.create<cir::ReturnOp>(loc,
350 llvm::ArrayRef(value.getResult()));
351 }
352 return builder.create<cir::ReturnOp>(loc);
353}
354
355// This is copied from CodeGenModule::MayDropFunctionReturn. This is a
356// candidate for sharing between CIRGen and CodeGen.
357static bool mayDropFunctionReturn(const ASTContext &astContext,
358 QualType returnType) {
359 // We can't just discard the return value for a record type with a complex
360 // destructor or a non-trivially copyable type.
361 if (const auto *classDecl = returnType->getAsCXXRecordDecl())
362 return classDecl->hasTrivialDestructor();
363 return returnType.isTriviallyCopyableType(astContext);
364}
365
366void CIRGenFunction::LexicalScope::emitImplicitReturn() {
367 CIRGenBuilderTy &builder = cgf.getBuilder();
368 LexicalScope *localScope = cgf.curLexScope;
369
370 const auto *fd = cast<clang::FunctionDecl>(cgf.curGD.getDecl());
371
372 // In C++, flowing off the end of a non-void function is always undefined
373 // behavior. In C, flowing off the end of a non-void function is undefined
374 // behavior only if the non-existent return value is used by the caller.
375 // That influences whether the terminating op is trap, unreachable, or
376 // return.
377 if (cgf.getLangOpts().CPlusPlus && !fd->hasImplicitReturnZero() &&
378 !cgf.sawAsmBlock && !fd->getReturnType()->isVoidType() &&
379 builder.getInsertionBlock()) {
380 bool shouldEmitUnreachable =
381 cgf.cgm.getCodeGenOpts().StrictReturn ||
382 !mayDropFunctionReturn(fd->getASTContext(), fd->getReturnType());
383
384 if (shouldEmitUnreachable) {
386 if (cgf.cgm.getCodeGenOpts().OptimizationLevel == 0)
387 builder.create<cir::TrapOp>(localScope->endLoc);
388 else
389 builder.create<cir::UnreachableOp>(localScope->endLoc);
390 builder.clearInsertionPoint();
391 return;
392 }
393 }
394
395 (void)emitReturn(localScope->endLoc);
396}
397
399 cir::FuncOp fn, cir::FuncType funcType,
401 SourceLocation startLoc) {
402 assert(!curFn &&
403 "CIRGenFunction can only be used for one function at a time");
404
405 curFn = fn;
406
407 const Decl *d = gd.getDecl();
408 const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
410
411 prologueCleanupDepth = ehStack.stable_begin();
412
413 mlir::Block *entryBB = &fn.getBlocks().front();
414 builder.setInsertionPointToStart(entryBB);
415
416 // TODO(cir): this should live in `emitFunctionProlog
417 // Declare all the function arguments in the symbol table.
418 for (const auto nameValue : llvm::zip(args, entryBB->getArguments())) {
419 const VarDecl *paramVar = std::get<0>(nameValue);
420 mlir::Value paramVal = std::get<1>(nameValue);
421 CharUnits alignment = getContext().getDeclAlign(paramVar);
422 mlir::Location paramLoc = getLoc(paramVar->getSourceRange());
423 paramVal.setLoc(paramLoc);
424
425 mlir::Value addrVal =
426 emitAlloca(cast<NamedDecl>(paramVar)->getName(),
427 convertType(paramVar->getType()), paramLoc, alignment,
428 /*insertIntoFnEntryBlock=*/true);
429
430 declare(addrVal, paramVar, paramVar->getType(), paramLoc, alignment,
431 /*isParam=*/true);
432
433 setAddrOfLocalVar(paramVar, Address(addrVal, alignment));
434
435 bool isPromoted = isa<ParmVarDecl>(paramVar) &&
436 cast<ParmVarDecl>(paramVar)->isKNRPromoted();
438 if (isPromoted)
439 cgm.errorNYI(fd->getSourceRange(), "Function argument demotion");
440
441 // Location of the store to the param storage tracked as beginning of
442 // the function body.
443 mlir::Location fnBodyBegin = getLoc(fd->getBody()->getBeginLoc());
444 builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addrVal);
445 }
446 assert(builder.getInsertionBlock() && "Should be valid");
447
448 // When the current function is not void, create an address to store the
449 // result value.
450 if (!returnType->isVoidType())
451 emitAndUpdateRetAlloca(returnType, getLoc(fd->getBody()->getEndLoc()),
452 getContext().getTypeAlignInChars(returnType));
453
454 if (isa_and_nonnull<CXXMethodDecl>(d) &&
455 cast<CXXMethodDecl>(d)->isInstance()) {
456 cgm.getCXXABI().emitInstanceFunctionProlog(loc, *this);
457
458 const auto *md = cast<CXXMethodDecl>(d);
459 if (md->getParent()->isLambda() && md->getOverloadedOperator() == OO_Call) {
460 cgm.errorNYI(loc, "lambda call operator");
461 } else {
462 // Not in a lambda; just use 'this' from the method.
463 // FIXME: Should we generate a new load for each use of 'this'? The fast
464 // register allocator would be happier...
466 }
467
470 }
471}
472
474 // Pop any cleanups that might have been associated with the
475 // parameters. Do this in whatever block we're currently in; it's
476 // important to do this before we enter the return block or return
477 // edges will be *really* confused.
478 // TODO(cir): Use prologueCleanupDepth here.
479 bool hasCleanups = ehStack.stable_begin() != prologueCleanupDepth;
480 if (hasCleanups) {
482 // FIXME(cir): should we clearInsertionPoint? breaks many testcases
484 }
485}
486
487mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
488 // We start with function level scope for variables.
490
491 if (const CompoundStmt *block = dyn_cast<CompoundStmt>(body))
492 return emitCompoundStmtWithoutScope(*block);
493
494 return emitStmt(body, /*useCurrentScope=*/true);
495}
496
497static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) {
498 // Remove any leftover blocks that are unreachable and empty, since they do
499 // not represent unreachable code useful for warnings nor anything deemed
500 // useful in general.
501 SmallVector<mlir::Block *> blocksToDelete;
502 for (mlir::Block &block : func.getBlocks()) {
503 if (block.empty() && block.getUses().empty())
504 blocksToDelete.push_back(&block);
505 }
506 for (mlir::Block *block : blocksToDelete)
507 block->erase();
508}
509
510cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
511 cir::FuncType funcType) {
512 const auto funcDecl = cast<FunctionDecl>(gd.getDecl());
513 curGD = gd;
514
515 SourceLocation loc = funcDecl->getLocation();
516 Stmt *body = funcDecl->getBody();
517 SourceRange bodyRange =
518 body ? body->getSourceRange() : funcDecl->getLocation();
519
520 SourceLocRAIIObject fnLoc{*this, loc.isValid() ? getLoc(loc)
521 : builder.getUnknownLoc()};
522
523 auto validMLIRLoc = [&](clang::SourceLocation clangLoc) {
524 return clangLoc.isValid() ? getLoc(clangLoc) : builder.getUnknownLoc();
525 };
526 const mlir::Location fusedLoc = mlir::FusedLoc::get(
528 {validMLIRLoc(bodyRange.getBegin()), validMLIRLoc(bodyRange.getEnd())});
529 mlir::Block *entryBB = fn.addEntryBlock();
530
531 FunctionArgList args;
532 QualType retTy = buildFunctionArgList(gd, args);
533
534 // Create a scope in the symbol table to hold variable declarations.
536 {
537 LexicalScope lexScope(*this, fusedLoc, entryBB);
538
539 startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin());
540
541 if (isa<CXXDestructorDecl>(funcDecl)) {
542 emitDestructorBody(args);
543 } else if (isa<CXXConstructorDecl>(funcDecl)) {
545 } else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice &&
546 funcDecl->hasAttr<CUDAGlobalAttr>()) {
547 getCIRGenModule().errorNYI(bodyRange, "CUDA kernel");
548 } else if (isa<CXXMethodDecl>(funcDecl) &&
549 cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker()) {
550 getCIRGenModule().errorNYI(bodyRange, "Lambda static invoker");
551 } else if (funcDecl->isDefaulted() && isa<CXXMethodDecl>(funcDecl) &&
552 (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator() ||
553 cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator())) {
554 // Implicit copy-assignment gets the same special treatment as implicit
555 // copy-constructors.
557 } else if (body) {
558 if (mlir::failed(emitFunctionBody(body))) {
559 return nullptr;
560 }
561 } else {
562 // Anything without a body should have been handled above.
563 llvm_unreachable("no definition for normal function");
564 }
565
566 if (mlir::failed(fn.verifyBody()))
567 return nullptr;
568
569 finishFunction(bodyRange.getEnd());
570 }
571
573 return fn;
574}
575
578 const auto *ctor = cast<CXXConstructorDecl>(curGD.getDecl());
579 CXXCtorType ctorType = curGD.getCtorType();
580
581 assert((cgm.getTarget().getCXXABI().hasConstructorVariants() ||
582 ctorType == Ctor_Complete) &&
583 "can only generate complete ctor for this ABI");
584
585 if (ctorType == Ctor_Complete && isConstructorDelegationValid(ctor) &&
586 cgm.getTarget().getCXXABI().hasConstructorVariants()) {
587 emitDelegateCXXConstructorCall(ctor, Ctor_Base, args, ctor->getEndLoc());
588 return;
589 }
590
591 const FunctionDecl *definition = nullptr;
592 Stmt *body = ctor->getBody(definition);
593 assert(definition == ctor && "emitting wrong constructor body");
594
595 if (isa_and_nonnull<CXXTryStmt>(body)) {
596 cgm.errorNYI(ctor->getSourceRange(), "emitConstructorBody: try body");
597 return;
598 }
599
602
603 // TODO: in restricted cases, we can emit the vbase initializers of a
604 // complete ctor and then delegate to the base ctor.
605
606 // Emit the constructor prologue, i.e. the base and member initializers.
607 emitCtorPrologue(ctor, ctorType, args);
608
609 // TODO(cir): propagate this result via mlir::logical result. Just unreachable
610 // now just to have it handled.
611 if (mlir::failed(emitStmt(body, true))) {
612 cgm.errorNYI(ctor->getSourceRange(),
613 "emitConstructorBody: emit body statement failed.");
614 return;
615 }
616}
617
618/// Emits the body of the current destructor.
620 const CXXDestructorDecl *dtor = cast<CXXDestructorDecl>(curGD.getDecl());
621 CXXDtorType dtorType = curGD.getDtorType();
622
623 // For an abstract class, non-base destructors are never used (and can't
624 // be emitted in general, because vbase dtors may not have been validated
625 // by Sema), but the Itanium ABI doesn't make them optional and Clang may
626 // in fact emit references to them from other compilations, so emit them
627 // as functions containing a trap instruction.
628 if (dtorType != Dtor_Base && dtor->getParent()->isAbstract()) {
629 cgm.errorNYI(dtor->getSourceRange(), "abstract base class destructors");
630 return;
631 }
632
633 Stmt *body = dtor->getBody();
635
636 // The call to operator delete in a deleting destructor happens
637 // outside of the function-try-block, which means it's always
638 // possible to delegate the destructor body to the complete
639 // destructor. Do so.
640 if (dtorType == Dtor_Deleting) {
641 cgm.errorNYI(dtor->getSourceRange(), "deleting destructor");
642 return;
643 }
644
645 // If the body is a function-try-block, enter the try before
646 // anything else.
647 const bool isTryBody = isa_and_nonnull<CXXTryStmt>(body);
648 if (isTryBody)
649 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
650
653
654 // If this is the complete variant, just invoke the base variant;
655 // the epilogue will destruct the virtual bases. But we can't do
656 // this optimization if the body is a function-try-block, because
657 // we'd introduce *two* handler blocks. In the Microsoft ABI, we
658 // always delegate because we might not have a definition in this TU.
659 switch (dtorType) {
660 case Dtor_Unified:
661 llvm_unreachable("not expecting a unified dtor");
662 case Dtor_Comdat:
663 llvm_unreachable("not expecting a COMDAT");
664 case Dtor_Deleting:
665 llvm_unreachable("already handled deleting case");
666
667 case Dtor_Complete:
668 assert((body || getTarget().getCXXABI().isMicrosoft()) &&
669 "can't emit a dtor without a body for non-Microsoft ABIs");
670
672
673 if (!isTryBody) {
675 emitCXXDestructorCall(dtor, Dtor_Base, /*forVirtualBase=*/false,
676 /*delegating=*/false, loadCXXThisAddress(), thisTy);
677 break;
678 }
679
680 // Fallthrough: act like we're in the base variant.
681 [[fallthrough]];
682
683 case Dtor_Base:
684 assert(body);
685
688
689 if (isTryBody) {
690 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
691 } else if (body) {
692 (void)emitStmt(body, /*useCurrentScope=*/true);
693 } else {
694 assert(dtor->isImplicit() && "bodyless dtor not implicit");
695 // nothing to do besides what's in the epilogue
696 }
697 // -fapple-kext must inline any call to this dtor into
698 // the caller's body.
700
701 break;
702 }
703
705
706 // Exit the try if applicable.
707 if (isTryBody)
708 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
709}
710
711/// Given a value of type T* that may not be to a complete object, construct
712/// an l-vlaue withi the natural pointee alignment of T.
714 QualType ty) {
715 // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps
716 // assert on the result type first.
717 LValueBaseInfo baseInfo;
719 CharUnits align = cgm.getNaturalTypeAlignment(ty, &baseInfo);
720 return makeAddrLValue(Address(val, align), ty, baseInfo);
721}
722
724 QualType ty) {
725 LValueBaseInfo baseInfo;
726 CharUnits alignment = cgm.getNaturalTypeAlignment(ty, &baseInfo);
727 Address addr(val, convertTypeForMem(ty), alignment);
729 return makeAddrLValue(addr, ty, baseInfo);
730}
731
733 FunctionArgList &args) {
734 const auto *fd = cast<FunctionDecl>(gd.getDecl());
735 QualType retTy = fd->getReturnType();
736
737 const auto *md = dyn_cast<CXXMethodDecl>(fd);
738 if (md && md->isInstance()) {
739 if (cgm.getCXXABI().hasThisReturn(gd))
740 cgm.errorNYI(fd->getSourceRange(), "this return");
741 else if (cgm.getCXXABI().hasMostDerivedReturn(gd))
742 cgm.errorNYI(fd->getSourceRange(), "most derived return");
743 cgm.getCXXABI().buildThisParam(*this, args);
744 }
745
746 if (const auto *cd = dyn_cast<CXXConstructorDecl>(fd))
747 if (cd->getInheritedConstructor())
748 cgm.errorNYI(fd->getSourceRange(),
749 "buildFunctionArgList: inherited constructor");
750
751 for (auto *param : fd->parameters())
752 args.push_back(param);
753
754 if (md && (isa<CXXConstructorDecl>(md) || isa<CXXDestructorDecl>(md)))
755 cgm.getCXXABI().addImplicitStructorParams(*this, retTy, args);
756
757 return retTy;
758}
759
760/// Emit code to compute a designator that specifies the location
761/// of the expression.
762/// FIXME: document this function better.
764 // FIXME: ApplyDebugLocation DL(*this, e);
765 switch (e->getStmtClass()) {
766 default:
768 std::string("l-value not implemented for '") +
769 e->getStmtClassName() + "'");
770 return LValue();
771 case Expr::ArraySubscriptExprClass:
773 case Expr::UnaryOperatorClass:
775 case Expr::StringLiteralClass:
777 case Expr::MemberExprClass:
779 case Expr::CompoundLiteralExprClass:
781 case Expr::BinaryOperatorClass:
783 case Expr::CompoundAssignOperatorClass: {
784 QualType ty = e->getType();
785 if (ty->getAs<AtomicType>()) {
786 cgm.errorNYI(e->getSourceRange(),
787 "CompoundAssignOperator with AtomicType");
788 return LValue();
789 }
790 if (!ty->isAnyComplexType())
792
794 }
795 case Expr::CallExprClass:
796 case Expr::CXXMemberCallExprClass:
797 case Expr::CXXOperatorCallExprClass:
798 case Expr::UserDefinedLiteralClass:
800 case Expr::ParenExprClass:
801 return emitLValue(cast<ParenExpr>(e)->getSubExpr());
802 case Expr::DeclRefExprClass:
804 case Expr::CStyleCastExprClass:
805 case Expr::CXXStaticCastExprClass:
806 case Expr::CXXDynamicCastExprClass:
807 case Expr::ImplicitCastExprClass:
809 case Expr::MaterializeTemporaryExprClass:
811 }
812}
813
814static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) {
815 SmallString<256> buffer;
816 llvm::raw_svector_ostream out(buffer);
817 out << name << cnt;
818 return std::string(out.str());
819}
820
824
828
829void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr,
830 QualType ty) {
831 // Ignore empty classes in C++.
833 if (const auto *rd = ty->getAsCXXRecordDecl(); rd && rd->isEmpty())
834 return;
835
836 // Cast the dest ptr to the appropriate i8 pointer type.
837 if (builder.isInt8Ty(destPtr.getElementType())) {
838 cgm.errorNYI(loc, "Cast the dest ptr to the appropriate i8 pointer type");
839 }
840
841 // Get size and alignment info for this aggregate.
842 const CharUnits size = getContext().getTypeSizeInChars(ty);
843 if (size.isZero()) {
844 // But note that getTypeInfo returns 0 for a VLA.
845 if (isa<VariableArrayType>(getContext().getAsArrayType(ty))) {
846 cgm.errorNYI(loc,
847 "emitNullInitialization for zero size VariableArrayType");
848 } else {
849 return;
850 }
851 }
852
853 // If the type contains a pointer to data member we can't memset it to zero.
854 // Instead, create a null constant and copy it to the destination.
855 // TODO: there are other patterns besides zero that we can usefully memset,
856 // like -1, which happens to be the pattern used by member-pointers.
857 if (!cgm.getTypes().isZeroInitializable(ty)) {
858 cgm.errorNYI(loc, "type is not zero initializable");
859 }
860
861 // In LLVM Codegen: otherwise, just memset the whole thing to zero using
862 // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the
863 // respective address.
864 // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
865 const mlir::Value zeroValue = builder.getNullValue(convertType(ty), loc);
866 builder.createStore(loc, zeroValue, destPtr);
867}
868
869// TODO(cir): should be shared with LLVM codegen.
871 const Expr *e = ce->getSubExpr();
872
873 if (ce->getCastKind() == CK_UncheckedDerivedToBase)
874 return false;
875
876 if (isa<CXXThisExpr>(e->IgnoreParens())) {
877 // We always assume that 'this' is never null.
878 return false;
879 }
880
881 if (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(ce)) {
882 // And that glvalue casts are never null.
883 if (ice->isGLValue())
884 return false;
885 }
886
887 return true;
888}
889
890/// Computes the length of an array in elements, as well as the base
891/// element type and a properly-typed first element pointer.
892mlir::Value
894 QualType &baseType, Address &addr) {
895 const clang::ArrayType *arrayType = origArrayType;
896
897 // If it's a VLA, we have to load the stored size. Note that
898 // this is the size of the VLA in bytes, not its size in elements.
901 cgm.errorNYI(*currSrcLoc, "VLAs");
902 return builder.getConstInt(*currSrcLoc, SizeTy, 0);
903 }
904
905 uint64_t countFromCLAs = 1;
906 QualType eltType;
907
908 auto cirArrayType = mlir::dyn_cast<cir::ArrayType>(addr.getElementType());
909
910 while (cirArrayType) {
912 countFromCLAs *= cirArrayType.getSize();
913 eltType = arrayType->getElementType();
914
915 cirArrayType =
916 mlir::dyn_cast<cir::ArrayType>(cirArrayType.getElementType());
917
918 arrayType = getContext().getAsArrayType(arrayType->getElementType());
919 assert((!cirArrayType || arrayType) &&
920 "CIR and Clang types are out-of-sync");
921 }
922
923 if (arrayType) {
924 // From this point onwards, the Clang array type has been emitted
925 // as some other type (probably a packed struct). Compute the array
926 // size, and just emit the 'begin' expression as a bitcast.
927 cgm.errorNYI(*currSrcLoc, "length for non-array underlying types");
928 }
929
930 baseType = eltType;
931 return builder.getConstInt(*currSrcLoc, SizeTy, countFromCLAs);
932}
933
935 mlir::Value ptrValue, QualType ty, SourceLocation loc,
936 SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue) {
938 return cir::AssumeAlignedOp::create(builder, getLoc(assumptionLoc), ptrValue,
939 alignment, offsetValue);
940}
941
943 mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc,
944 int64_t alignment, mlir::Value offsetValue) {
945 QualType ty = expr->getType();
946 SourceLocation loc = expr->getExprLoc();
947 return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment,
948 offsetValue);
949}
950
951// TODO(cir): Most of this function can be shared between CIRGen
952// and traditional LLVM codegen
954 assert(type->isVariablyModifiedType() &&
955 "Must pass variably modified type to EmitVLASizes!");
956
957 // We're going to walk down into the type and look for VLA
958 // expressions.
959 do {
960 assert(type->isVariablyModifiedType());
961
962 const Type *ty = type.getTypePtr();
963 switch (ty->getTypeClass()) {
964 case Type::CountAttributed:
965 case Type::PackIndexing:
966 case Type::ArrayParameter:
967 case Type::HLSLAttributedResource:
968 case Type::HLSLInlineSpirv:
969 case Type::PredefinedSugar:
970 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
971 break;
972
973#define TYPE(Class, Base)
974#define ABSTRACT_TYPE(Class, Base)
975#define NON_CANONICAL_TYPE(Class, Base)
976#define DEPENDENT_TYPE(Class, Base) case Type::Class:
977#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
978#include "clang/AST/TypeNodes.inc"
979 llvm_unreachable(
980 "dependent type must be resolved before the CIR codegen");
981
982 // These types are never variably-modified.
983 case Type::Builtin:
984 case Type::Complex:
985 case Type::Vector:
986 case Type::ExtVector:
987 case Type::ConstantMatrix:
988 case Type::Record:
989 case Type::Enum:
990 case Type::Using:
991 case Type::TemplateSpecialization:
992 case Type::ObjCTypeParam:
993 case Type::ObjCObject:
994 case Type::ObjCInterface:
995 case Type::ObjCObjectPointer:
996 case Type::BitInt:
997 llvm_unreachable("type class is never variably-modified!");
998
999 case Type::Adjusted:
1000 type = cast<clang::AdjustedType>(ty)->getAdjustedType();
1001 break;
1002
1003 case Type::Decayed:
1004 type = cast<clang::DecayedType>(ty)->getPointeeType();
1005 break;
1006
1007 case Type::Pointer:
1008 type = cast<clang::PointerType>(ty)->getPointeeType();
1009 break;
1010
1011 case Type::BlockPointer:
1012 type = cast<clang::BlockPointerType>(ty)->getPointeeType();
1013 break;
1014
1015 case Type::LValueReference:
1016 case Type::RValueReference:
1017 type = cast<clang::ReferenceType>(ty)->getPointeeType();
1018 break;
1019
1020 case Type::MemberPointer:
1021 type = cast<clang::MemberPointerType>(ty)->getPointeeType();
1022 break;
1023
1024 case Type::ConstantArray:
1025 case Type::IncompleteArray:
1026 // Losing element qualification here is fine.
1027 type = cast<clang::ArrayType>(ty)->getElementType();
1028 break;
1029
1030 case Type::VariableArray: {
1031 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType VLA");
1032 break;
1033 }
1034
1035 case Type::FunctionProto:
1036 case Type::FunctionNoProto:
1037 type = cast<clang::FunctionType>(ty)->getReturnType();
1038 break;
1039
1040 case Type::Paren:
1041 case Type::TypeOf:
1042 case Type::UnaryTransform:
1043 case Type::Attributed:
1044 case Type::BTFTagAttributed:
1045 case Type::SubstTemplateTypeParm:
1046 case Type::MacroQualified:
1047 // Keep walking after single level desugaring.
1048 type = type.getSingleStepDesugaredType(getContext());
1049 break;
1050
1051 case Type::Typedef:
1052 case Type::Decltype:
1053 case Type::Auto:
1054 case Type::DeducedTemplateSpecialization:
1055 // Stop walking: nothing to do.
1056 return;
1057
1058 case Type::TypeOfExpr:
1059 // Stop walking: emit typeof expression.
1060 emitIgnoredExpr(cast<clang::TypeOfExprType>(ty)->getUnderlyingExpr());
1061 return;
1062
1063 case Type::Atomic:
1064 type = cast<clang::AtomicType>(ty)->getValueType();
1065 break;
1066
1067 case Type::Pipe:
1068 type = cast<clang::PipeType>(ty)->getElementType();
1069 break;
1070 }
1071 } while (type->isVariablyModifiedType());
1072}
1073
1075 if (getContext().getBuiltinVaListType()->isArrayType())
1076 return emitPointerWithAlignment(e);
1077 return emitLValue(e).getAddress();
1078}
1079
1080} // namespace clang::CIRGen
Defines the clang::Expr interface and subclasses for C++ expressions.
__device__ __2f16 float __ockl_bool s
APSInt & getInt()
Definition APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:188
SourceManager & getSourceManager()
Definition ASTContext.h:798
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3720
mlir::Type getElementType() const
Definition Address.h:101
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
mlir::Type convertType(clang::QualType t)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
const clang::LangOptions & getLangOpts() const
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
LValue emitStringLiteralLValue(const StringLiteral *e)
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
mlir::Type convertTypeForMem(QualType t)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
Address returnValue
The temporary alloca to hold the return value.
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
void emitConstructorBody(FunctionArgList &args)
LValue emitCallExprLValue(const clang::CallExpr *e)
cir::FuncOp curFn
The function for which code is currently being generated.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
clang::ASTContext & getContext() const
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth)
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:191
Address getAddress() const
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2255
QualType getFunctionObjectParameterType() const
Definition DeclCXX.h:2279
bool isAbstract() const
Determine whether this class has a pure virtual function.
Definition DeclCXX.h:1221
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3612
CastKind getCastKind() const
Definition Expr.h:3656
Expr * getSubExpr()
Definition Expr.h:3662
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1720
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:593
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3069
QualType getType() const
Definition Expr.h:144
Represents a function declaration or definition.
Definition Decl.h:1999
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition Decl.cpp:3271
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4490
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3789
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2867
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
This class handles loading and caching of source files into memory.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:85
StmtClass getStmtClass() const
Definition Stmt.h:1472
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:334
const char * getStmtClassName() const
Definition Stmt.cpp:87
bool isVoidType() const
Definition TypeBase.h:8878
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isAnyComplexType() const
Definition TypeBase.h:8657
TypeClass getTypeClass() const
Definition TypeBase.h:2385
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9101
QualType getType() const
Definition Decl.h:722
Represents a variable declaration or definition.
Definition Decl.h:925
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:2190
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt)
static bool mayDropFunctionReturn(const ASTContext &astContext, QualType returnType)
static void eraseEmptyAndUnusedBlocks(cir::FuncOp func)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
@ Ctor_Base
Base object ctor.
Definition ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition ABI.h:25
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
CXXDtorType
C++ destructor types.
Definition ABI.h:34
@ Dtor_Comdat
The COMDAT used for dtors.
Definition ABI.h:38
@ Dtor_Unified
GCC-style unified dtor.
Definition ABI.h:39
@ Dtor_Base
Base object dtor.
Definition ABI.h:37
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
@ Dtor_Deleting
Deleting dtor.
Definition ABI.h:35
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool vtableInitialization()
static bool constructABIArgDirectExtend()
static bool dtorCleanups()
static bool runCleanupsScope()
static bool emitTypeCheck()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
mlir::Block * getCleanupBlock(mlir::OpBuilder &builder)
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647