clang 22.0.0git
CIRGenExprAggregate.cpp
Go to the documentation of this file.
1//===- CIRGenExprAggregrate.cpp - Emit CIR Code from Aggregate Expressions ===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Aggregate Expr nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenBuilder.h"
14#include "CIRGenFunction.h"
15#include "CIRGenValue.h"
17
18#include "clang/AST/Expr.h"
21#include <cstdint>
22
23using namespace clang;
24using namespace clang::CIRGen;
25
26namespace {
27class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
28
29 CIRGenFunction &cgf;
30 AggValueSlot dest;
31
32 // Calls `fn` with a valid return value slot, potentially creating a temporary
33 // to do so. If a temporary is created, an appropriate copy into `Dest` will
34 // be emitted, as will lifetime markers.
35 //
36 // The given function should take a ReturnValueSlot, and return an RValue that
37 // points to said slot.
38 void withReturnValueSlot(const Expr *e,
39 llvm::function_ref<RValue(ReturnValueSlot)> fn);
40
41 AggValueSlot ensureSlot(mlir::Location loc, QualType t) {
42 if (!dest.isIgnored())
43 return dest;
44
45 cgf.cgm.errorNYI(loc, "Slot for ignored address");
46 return dest;
47 }
48
49public:
50 AggExprEmitter(CIRGenFunction &cgf, AggValueSlot dest)
51 : cgf(cgf), dest(dest) {}
52
53 /// Given an expression with aggregate type that represents a value lvalue,
54 /// this method emits the address of the lvalue, then loads the result into
55 /// DestPtr.
56 void emitAggLoadOfLValue(const Expr *e);
57
58 void emitArrayInit(Address destPtr, cir::ArrayType arrayTy, QualType arrayQTy,
59 Expr *exprToVisit, ArrayRef<Expr *> args,
60 Expr *arrayFiller);
61
62 /// Perform the final copy to DestPtr, if desired.
63 void emitFinalDestCopy(QualType type, const LValue &src);
64
65 void emitCopy(QualType type, const AggValueSlot &dest,
66 const AggValueSlot &src);
67
68 void emitInitializationToLValue(Expr *e, LValue lv);
69
70 void emitNullInitializationToLValue(mlir::Location loc, LValue lv);
71
72 void Visit(Expr *e) { StmtVisitor<AggExprEmitter>::Visit(e); }
73
74 void VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
75 emitAggLoadOfLValue(e);
76 }
77
78 void VisitCallExpr(const CallExpr *e);
79 void VisitStmtExpr(const StmtExpr *e) {
80 CIRGenFunction::StmtExprEvaluation eval(cgf);
81 Address retAlloca =
82 cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
83 (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca, dest);
84 }
85
86 void VisitDeclRefExpr(DeclRefExpr *e) { emitAggLoadOfLValue(e); }
87
88 void VisitInitListExpr(InitListExpr *e);
89 void VisitCXXConstructExpr(const CXXConstructExpr *e);
90
91 void visitCXXParenListOrInitListExpr(Expr *e, ArrayRef<Expr *> args,
92 FieldDecl *initializedFieldInUnion,
93 Expr *arrayFiller);
94 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
95 CIRGenFunction::CXXDefaultInitExprScope Scope(cgf, die);
96 Visit(die->getExpr());
97 }
98 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *e) {
100 Visit(e->getSubExpr());
101 }
102
103 // Stubs -- These should be moved up when they are implemented.
104 void VisitCastExpr(CastExpr *e) {
105 switch (e->getCastKind()) {
106 case CK_LValueToRValue:
107 // If we're loading from a volatile type, force the destination
108 // into existence.
110 cgf.cgm.errorNYI(e->getSourceRange(),
111 "AggExprEmitter: volatile lvalue-to-rvalue cast");
112 [[fallthrough]];
113 case CK_NoOp:
114 case CK_UserDefinedConversion:
115 case CK_ConstructorConversion:
116 assert(cgf.getContext().hasSameUnqualifiedType(e->getSubExpr()->getType(),
117 e->getType()) &&
118 "Implicit cast types must be compatible");
119 Visit(e->getSubExpr());
120 break;
121 default:
122 cgf.cgm.errorNYI(e->getSourceRange(),
123 std::string("AggExprEmitter: VisitCastExpr: ") +
124 e->getCastKindName());
125 break;
126 }
127 }
128 void VisitStmt(Stmt *s) {
129 cgf.cgm.errorNYI(s->getSourceRange(),
130 std::string("AggExprEmitter::VisitStmt: ") +
131 s->getStmtClassName());
132 }
133 void VisitParenExpr(ParenExpr *pe) {
134 cgf.cgm.errorNYI(pe->getSourceRange(), "AggExprEmitter: VisitParenExpr");
135 }
136 void VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
137 cgf.cgm.errorNYI(ge->getSourceRange(),
138 "AggExprEmitter: VisitGenericSelectionExpr");
139 }
140 void VisitCoawaitExpr(CoawaitExpr *e) {
141 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoawaitExpr");
142 }
143 void VisitCoyieldExpr(CoyieldExpr *e) {
144 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoyieldExpr");
145 }
146 void VisitUnaryCoawait(UnaryOperator *e) {
147 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitUnaryCoawait");
148 }
149 void VisitUnaryExtension(UnaryOperator *e) {
150 cgf.cgm.errorNYI(e->getSourceRange(),
151 "AggExprEmitter: VisitUnaryExtension");
152 }
153 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
154 cgf.cgm.errorNYI(e->getSourceRange(),
155 "AggExprEmitter: VisitSubstNonTypeTemplateParmExpr");
156 }
157 void VisitConstantExpr(ConstantExpr *e) {
158 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitConstantExpr");
159 }
160 void VisitMemberExpr(MemberExpr *e) {
161 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitMemberExpr");
162 }
163 void VisitUnaryDeref(UnaryOperator *e) {
164 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitUnaryDeref");
165 }
166 void VisitStringLiteral(StringLiteral *e) {
167 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitStringLiteral");
168 }
169 void VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
170 cgf.cgm.errorNYI(e->getSourceRange(),
171 "AggExprEmitter: VisitCompoundLiteralExpr");
172 }
173 void VisitPredefinedExpr(const PredefinedExpr *e) {
174 cgf.cgm.errorNYI(e->getSourceRange(),
175 "AggExprEmitter: VisitPredefinedExpr");
176 }
177 void VisitBinaryOperator(const BinaryOperator *e) {
178 cgf.cgm.errorNYI(e->getSourceRange(),
179 "AggExprEmitter: VisitBinaryOperator");
180 }
181 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *e) {
182 cgf.cgm.errorNYI(e->getSourceRange(),
183 "AggExprEmitter: VisitPointerToDataMemberBinaryOperator");
184 }
185 void VisitBinAssign(const BinaryOperator *e) {
186 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinAssign");
187 }
188 void VisitBinComma(const BinaryOperator *e) {
189 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinComma");
190 }
191 void VisitBinCmp(const BinaryOperator *e) {
192 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinCmp");
193 }
194 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *e) {
195 cgf.cgm.errorNYI(e->getSourceRange(),
196 "AggExprEmitter: VisitCXXRewrittenBinaryOperator");
197 }
198 void VisitObjCMessageExpr(ObjCMessageExpr *e) {
199 cgf.cgm.errorNYI(e->getSourceRange(),
200 "AggExprEmitter: VisitObjCMessageExpr");
201 }
202 void VisitObjCIVarRefExpr(ObjCIvarRefExpr *e) {
203 cgf.cgm.errorNYI(e->getSourceRange(),
204 "AggExprEmitter: VisitObjCIVarRefExpr");
205 }
206
207 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *e) {
208 cgf.cgm.errorNYI(e->getSourceRange(),
209 "AggExprEmitter: VisitDesignatedInitUpdateExpr");
210 }
211 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *e) {
212 cgf.cgm.errorNYI(e->getSourceRange(),
213 "AggExprEmitter: VisitAbstractConditionalOperator");
214 }
215 void VisitChooseExpr(const ChooseExpr *e) {
216 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitChooseExpr");
217 }
218 void VisitCXXParenListInitExpr(CXXParenListInitExpr *e) {
219 cgf.cgm.errorNYI(e->getSourceRange(),
220 "AggExprEmitter: VisitCXXParenListInitExpr");
221 }
222 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *e,
223 llvm::Value *outerBegin = nullptr) {
224 cgf.cgm.errorNYI(e->getSourceRange(),
225 "AggExprEmitter: VisitArrayInitLoopExpr");
226 }
227 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *e) {
228 cgf.cgm.errorNYI(e->getSourceRange(),
229 "AggExprEmitter: VisitImplicitValueInitExpr");
230 }
231 void VisitNoInitExpr(NoInitExpr *e) {
232 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitNoInitExpr");
233 }
234 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) {
235 cgf.cgm.errorNYI(dae->getSourceRange(),
236 "AggExprEmitter: VisitCXXDefaultArgExpr");
237 }
238 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *e) {
239 cgf.cgm.errorNYI(e->getSourceRange(),
240 "AggExprEmitter: VisitCXXInheritedCtorInitExpr");
241 }
242 void VisitLambdaExpr(LambdaExpr *e) {
243 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitLambdaExpr");
244 }
245 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *e) {
246 cgf.cgm.errorNYI(e->getSourceRange(),
247 "AggExprEmitter: VisitCXXStdInitializerListExpr");
248 }
249
250 void VisitExprWithCleanups(ExprWithCleanups *e) {
251 cgf.cgm.errorNYI(e->getSourceRange(),
252 "AggExprEmitter: VisitExprWithCleanups");
253 }
254 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *e) {
255 cgf.cgm.errorNYI(e->getSourceRange(),
256 "AggExprEmitter: VisitCXXScalarValueInitExpr");
257 }
258 void VisitCXXTypeidExpr(CXXTypeidExpr *e) {
259 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXTypeidExpr");
260 }
261 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *e) {
262 cgf.cgm.errorNYI(e->getSourceRange(),
263 "AggExprEmitter: VisitMaterializeTemporaryExpr");
264 }
265 void VisitOpaqueValueExpr(OpaqueValueExpr *e) {
266 cgf.cgm.errorNYI(e->getSourceRange(),
267 "AggExprEmitter: VisitOpaqueValueExpr");
268 }
269
270 void VisitPseudoObjectExpr(PseudoObjectExpr *e) {
271 cgf.cgm.errorNYI(e->getSourceRange(),
272 "AggExprEmitter: VisitPseudoObjectExpr");
273 }
274
275 void VisitVAArgExpr(VAArgExpr *e) {
276 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitVAArgExpr");
277 }
278
279 void VisitCXXThrowExpr(const CXXThrowExpr *e) {
280 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXThrowExpr");
281 }
282 void VisitAtomicExpr(AtomicExpr *e) {
283 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitAtomicExpr");
284 }
285};
286
287} // namespace
288
289static bool isTrivialFiller(Expr *e) {
290 if (!e)
291 return true;
292
294 return true;
295
296 if (auto *ile = dyn_cast<InitListExpr>(e)) {
297 if (ile->getNumInits())
298 return false;
299 return isTrivialFiller(ile->getArrayFiller());
300 }
301
302 if (const auto *cons = dyn_cast_or_null<CXXConstructExpr>(e))
303 return cons->getConstructor()->isDefaultConstructor() &&
304 cons->getConstructor()->isTrivial();
305
306 return false;
307}
308
309/// Given an expression with aggregate type that represents a value lvalue, this
310/// method emits the address of the lvalue, then loads the result into DestPtr.
311void AggExprEmitter::emitAggLoadOfLValue(const Expr *e) {
312 LValue lv = cgf.emitLValue(e);
313
314 // If the type of the l-value is atomic, then do an atomic load.
316
317 emitFinalDestCopy(e->getType(), lv);
318}
319
320void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
321 QualType arrayQTy, Expr *e,
322 ArrayRef<Expr *> args, Expr *arrayFiller) {
323 CIRGenBuilderTy &builder = cgf.getBuilder();
324 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
325
326 const uint64_t numInitElements = args.size();
327
328 const QualType elementType =
329 cgf.getContext().getAsArrayType(arrayQTy)->getElementType();
330
331 if (elementType.isDestructedType() && cgf.cgm.getLangOpts().Exceptions) {
332 cgf.cgm.errorNYI(loc, "initialized array requires destruction");
333 return;
334 }
335
336 const QualType elementPtrType = cgf.getContext().getPointerType(elementType);
337
338 const mlir::Type cirElementType = cgf.convertType(elementType);
339 const cir::PointerType cirElementPtrType =
340 builder.getPointerTo(cirElementType);
341
342 auto begin = cir::CastOp::create(builder, loc, cirElementPtrType,
343 cir::CastKind::array_to_ptrdecay,
344 destPtr.getPointer());
345
346 const CharUnits elementSize =
347 cgf.getContext().getTypeSizeInChars(elementType);
348 const CharUnits elementAlign =
349 destPtr.getAlignment().alignmentOfArrayElement(elementSize);
350
351 // The 'current element to initialize'. The invariants on this
352 // variable are complicated. Essentially, after each iteration of
353 // the loop, it points to the last initialized element, except
354 // that it points to the beginning of the array before any
355 // elements have been initialized.
356 mlir::Value element = begin;
357
358 // Don't build the 'one' before the cycle to avoid
359 // emmiting the redundant `cir.const 1` instrs.
360 mlir::Value one;
361
362 // Emit the explicit initializers.
363 for (uint64_t i = 0; i != numInitElements; ++i) {
364 // Advance to the next element.
365 if (i > 0) {
366 one = builder.getConstantInt(loc, cgf.PtrDiffTy, i);
367 element = builder.createPtrStride(loc, begin, one);
368 }
369
370 const Address address = Address(element, cirElementType, elementAlign);
371 const LValue elementLV = cgf.makeAddrLValue(address, elementType);
372 emitInitializationToLValue(args[i], elementLV);
373 }
374
375 const uint64_t numArrayElements = arrayTy.getSize();
376
377 // Check whether there's a non-trivial array-fill expression.
378 const bool hasTrivialFiller = isTrivialFiller(arrayFiller);
379
380 // Any remaining elements need to be zero-initialized, possibly
381 // using the filler expression. We can skip this if the we're
382 // emitting to zeroed memory.
383 if (numInitElements != numArrayElements &&
384 !(dest.isZeroed() && hasTrivialFiller &&
385 cgf.getTypes().isZeroInitializable(elementType))) {
386 // Advance to the start of the rest of the array.
387 if (numInitElements) {
388 one = builder.getConstantInt(loc, cgf.PtrDiffTy, 1);
389 element = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
390 element, one);
391 }
392
393 // Allocate the temporary variable
394 // to store the pointer to first unitialized element
395 const Address tmpAddr = cgf.createTempAlloca(
396 cirElementPtrType, cgf.getPointerAlign(), loc, "arrayinit.temp");
397 LValue tmpLV = cgf.makeAddrLValue(tmpAddr, elementPtrType);
398 cgf.emitStoreThroughLValue(RValue::get(element), tmpLV);
399
400 // Compute the end of array
401 cir::ConstantOp numArrayElementsConst = builder.getConstInt(
402 loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), numArrayElements);
403 mlir::Value end = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
404 begin, numArrayElementsConst);
405
406 builder.createDoWhile(
407 loc,
408 /*condBuilder=*/
409 [&](mlir::OpBuilder &b, mlir::Location loc) {
410 cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
411 mlir::Type boolTy = cgf.convertType(cgf.getContext().BoolTy);
412 cir::CmpOp cmp = cir::CmpOp::create(
413 builder, loc, boolTy, cir::CmpOpKind::ne, currentElement, end);
414 builder.createCondition(cmp);
415 },
416 /*bodyBuilder=*/
417 [&](mlir::OpBuilder &b, mlir::Location loc) {
418 cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
419
421
422 // Emit the actual filler expression.
423 LValue elementLV = cgf.makeAddrLValue(
424 Address(currentElement, cirElementType, elementAlign),
425 elementType);
426 if (arrayFiller)
427 emitInitializationToLValue(arrayFiller, elementLV);
428 else
429 emitNullInitializationToLValue(loc, elementLV);
430
431 // Tell the EH cleanup that we finished with the last element.
432 if (cgf.cgm.getLangOpts().Exceptions) {
433 cgf.cgm.errorNYI(loc, "update destructed array element for EH");
434 return;
435 }
436
437 // Advance pointer and store them to temporary variable
438 cir::ConstantOp one = builder.getConstInt(
439 loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), 1);
440 auto nextElement = cir::PtrStrideOp::create(
441 builder, loc, cirElementPtrType, currentElement, one);
442 cgf.emitStoreThroughLValue(RValue::get(nextElement), tmpLV);
443
444 builder.createYield(loc);
445 });
446 }
447}
448
449/// Perform the final copy to destPtr, if desired.
450void AggExprEmitter::emitFinalDestCopy(QualType type, const LValue &src) {
451 // If dest is ignored, then we're evaluating an aggregate expression
452 // in a context that doesn't care about the result. Note that loads
453 // from volatile l-values force the existence of a non-ignored
454 // destination.
455 if (dest.isIgnored())
456 return;
457
461
462 AggValueSlot srcAgg = AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
465 emitCopy(type, dest, srcAgg);
466}
467
468/// Perform a copy from the source into the destination.
469///
470/// \param type - the type of the aggregate being copied; qualifiers are
471/// ignored
472void AggExprEmitter::emitCopy(QualType type, const AggValueSlot &dest,
473 const AggValueSlot &src) {
475
476 // If the result of the assignment is used, copy the LHS there also.
477 // It's volatile if either side is. Use the minimum alignment of
478 // the two sides.
479 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), type);
480 LValue srcLV = cgf.makeAddrLValue(src.getAddress(), type);
482 cgf.emitAggregateCopy(destLV, srcLV, type, dest.mayOverlap());
483}
484
485void AggExprEmitter::emitInitializationToLValue(Expr *e, LValue lv) {
486 const QualType type = lv.getType();
487
489 const mlir::Location loc = e->getSourceRange().isValid()
490 ? cgf.getLoc(e->getSourceRange())
491 : *cgf.currSrcLoc;
492 return emitNullInitializationToLValue(loc, lv);
493 }
494
495 if (isa<NoInitExpr>(e))
496 return;
497
498 if (type->isReferenceType())
499 cgf.cgm.errorNYI("emitInitializationToLValue ReferenceType");
500
501 switch (cgf.getEvaluationKind(type)) {
502 case cir::TEK_Complex:
503 cgf.cgm.errorNYI("emitInitializationToLValue TEK_Complex");
504 break;
509 dest.isZeroed()));
510
511 return;
512 case cir::TEK_Scalar:
513 if (lv.isSimple())
514 cgf.emitScalarInit(e, cgf.getLoc(e->getSourceRange()), lv);
515 else
517 return;
518 }
519}
520
521void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *e) {
522 AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
523 cgf.emitCXXConstructExpr(e, slot);
524}
525
526void AggExprEmitter::emitNullInitializationToLValue(mlir::Location loc,
527 LValue lv) {
528 const QualType type = lv.getType();
529
530 // If the destination slot is already zeroed out before the aggregate is
531 // copied into it, we don't have to emit any zeros here.
532 if (dest.isZeroed() && cgf.getTypes().isZeroInitializable(type))
533 return;
534
535 if (cgf.hasScalarEvaluationKind(type)) {
536 // For non-aggregates, we can store the appropriate null constant.
537 mlir::Value null = cgf.cgm.emitNullConstant(type, loc);
538 if (lv.isSimple()) {
539 cgf.emitStoreOfScalar(null, lv, /* isInitialization */ true);
540 return;
541 }
542
543 cgf.cgm.errorNYI("emitStoreThroughBitfieldLValue");
544 return;
545 }
546
547 // There's a potential optimization opportunity in combining
548 // memsets; that would be easy for arrays, but relatively
549 // difficult for structures with the current code.
550 cgf.emitNullInitialization(loc, lv.getAddress(), lv.getType());
551}
552
553void AggExprEmitter::VisitCallExpr(const CallExpr *e) {
555 cgf.cgm.errorNYI(e->getSourceRange(), "reference return type");
556 return;
557 }
558
559 withReturnValueSlot(
560 e, [&](ReturnValueSlot slot) { return cgf.emitCallExpr(e, slot); });
561}
562
563void AggExprEmitter::withReturnValueSlot(
564 const Expr *e, llvm::function_ref<RValue(ReturnValueSlot)> fn) {
565 QualType retTy = e->getType();
566
568 bool requiresDestruction =
570 if (requiresDestruction)
571 cgf.cgm.errorNYI(
572 e->getSourceRange(),
573 "withReturnValueSlot: return value requiring destruction is NYI");
574
575 // If it makes no observable difference, save a memcpy + temporary.
576 //
577 // We need to always provide our own temporary if destruction is required.
578 // Otherwise, fn will emit its own, notice that it's "unused", and end its
579 // lifetime before we have the chance to emit a proper destructor call.
582
583 Address retAddr = dest.getAddress();
585
588 fn(ReturnValueSlot(retAddr));
589}
590
591void AggExprEmitter::VisitInitListExpr(InitListExpr *e) {
593 llvm_unreachable("GNU array range designator extension");
594
595 if (e->isTransparent())
596 return Visit(e->getInit(0));
597
598 visitCXXParenListOrInitListExpr(
600}
601
602void AggExprEmitter::visitCXXParenListOrInitListExpr(
603 Expr *e, ArrayRef<Expr *> args, FieldDecl *initializedFieldInUnion,
604 Expr *arrayFiller) {
605
606 const AggValueSlot dest =
607 ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
608
609 if (e->getType()->isConstantArrayType()) {
610 cir::ArrayType arrayTy =
612 emitArrayInit(dest.getAddress(), arrayTy, e->getType(), e, args,
613 arrayFiller);
614 return;
615 } else if (e->getType()->isVariableArrayType()) {
616 cgf.cgm.errorNYI(e->getSourceRange(),
617 "visitCXXParenListOrInitListExpr variable array type");
618 return;
619 }
620
621 if (e->getType()->isArrayType()) {
622 cgf.cgm.errorNYI(e->getSourceRange(),
623 "visitCXXParenListOrInitListExpr array type");
624 return;
625 }
626
627 assert(e->getType()->isRecordType() && "Only support structs/unions here!");
628
629 // Do struct initialization; this code just sets each individual member
630 // to the approprate value. This makes bitfield support automatic;
631 // the disadvantage is that the generated code is more difficult for
632 // the optimizer, especially with bitfields.
633 unsigned numInitElements = args.size();
634 auto *record = e->getType()->castAsRecordDecl();
635
636 // We'll need to enter cleanup scopes in case any of the element
637 // initializers throws an exception.
639
640 unsigned curInitIndex = 0;
641
642 // Emit initialization of base classes.
643 if (auto *cxxrd = dyn_cast<CXXRecordDecl>(record)) {
644 assert(numInitElements >= cxxrd->getNumBases() &&
645 "missing initializer for base class");
646 if (cxxrd->getNumBases() > 0) {
647 cgf.cgm.errorNYI(e->getSourceRange(),
648 "visitCXXParenListOrInitListExpr base class init");
649 return;
650 }
651 }
652
653 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), e->getType());
654
655 if (record->isUnion()) {
656 cgf.cgm.errorNYI(e->getSourceRange(),
657 "visitCXXParenListOrInitListExpr union type");
658 return;
659 }
660
661 // Here we iterate over the fields; this makes it simpler to both
662 // default-initialize fields and skip over unnamed fields.
663 for (const FieldDecl *field : record->fields()) {
664 // We're done once we hit the flexible array member.
665 if (field->getType()->isIncompleteArrayType())
666 break;
667
668 // Always skip anonymous bitfields.
669 if (field->isUnnamedBitField())
670 continue;
671
672 // We're done if we reach the end of the explicit initializers, we
673 // have a zeroed object, and the rest of the fields are
674 // zero-initializable.
675 if (curInitIndex == numInitElements && dest.isZeroed() &&
677 break;
678 LValue lv =
679 cgf.emitLValueForFieldInitialization(destLV, field, field->getName());
680 // We never generate write-barriers for initialized fields.
682
683 if (curInitIndex < numInitElements) {
684 // Store the initializer into the field.
685 CIRGenFunction::SourceLocRAIIObject loc{
686 cgf, cgf.getLoc(record->getSourceRange())};
687 emitInitializationToLValue(args[curInitIndex++], lv);
688 } else {
689 // We're out of initializers; default-initialize to null
690 emitNullInitializationToLValue(cgf.getLoc(e->getSourceRange()), lv);
691 }
692
693 // Push a destructor if necessary.
694 // FIXME: if we have an array of structures, all explicitly
695 // initialized, we can end up pushing a linear number of cleanups.
696 if (field->getType().isDestructedType()) {
697 cgf.cgm.errorNYI(e->getSourceRange(),
698 "visitCXXParenListOrInitListExpr destructor");
699 return;
700 }
701
702 // From classic codegen, maybe not useful for CIR:
703 // If the GEP didn't get used because of a dead zero init or something
704 // else, clean it up for -O0 builds and general tidiness.
705 }
706}
707
708// TODO(cir): This could be shared with classic codegen.
710 const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual) {
711 // If the most-derived object is a field declared with [[no_unique_address]],
712 // the tail padding of any virtual base could be reused for other subobjects
713 // of that field's class.
714 if (isVirtual)
716
717 // If the base class is laid out entirely within the nvsize of the derived
718 // class, its tail padding cannot yet be initialized, so we can issue
719 // stores at the full width of the base class.
720 const ASTRecordLayout &layout = getContext().getASTRecordLayout(rd);
721 if (layout.getBaseClassOffset(baseRD) +
722 getContext().getASTRecordLayout(baseRD).getSize() <=
723 layout.getNonVirtualSize())
725
726 // The tail padding may contain values we need to preserve.
728}
729
731 AggExprEmitter(*this, slot).Visit(const_cast<Expr *>(e));
732}
733
735 AggValueSlot::Overlap_t mayOverlap) {
736 // TODO(cir): this function needs improvements, commented code for now since
737 // this will be touched again soon.
738 assert(!ty->isAnyComplexType() && "Unexpected copy of complex");
739
740 Address destPtr = dest.getAddress();
741 Address srcPtr = src.getAddress();
742
743 if (getLangOpts().CPlusPlus) {
744 if (auto *record = ty->getAsCXXRecordDecl()) {
745 assert((record->hasTrivialCopyConstructor() ||
746 record->hasTrivialCopyAssignment() ||
747 record->hasTrivialMoveConstructor() ||
748 record->hasTrivialMoveAssignment() ||
749 record->hasAttr<TrivialABIAttr>() || record->isUnion()) &&
750 "Trying to aggregate-copy a type without a trivial copy/move "
751 "constructor or assignment operator");
752 // Ignore empty classes in C++.
753 if (record->isEmpty())
754 return;
755 }
756 }
757
759
760 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
761 // C99 6.5.16.1p3, which states "If the value being stored in an object is
762 // read from another object that overlaps in anyway the storage of the first
763 // object, then the overlap shall be exact and the two objects shall have
764 // qualified or unqualified versions of a compatible type."
765 //
766 // memcpy is not defined if the source and destination pointers are exactly
767 // equal, but other compilers do this optimization, and almost every memcpy
768 // implementation handles this case safely. If there is a libc that does not
769 // safely handle this, we can add a target hook.
770
771 // Get data size info for this aggregate. Don't copy the tail padding if this
772 // might be a potentially-overlapping subobject, since the tail padding might
773 // be occupied by a different object. Otherwise, copying it is fine.
774 TypeInfoChars typeInfo;
775 if (mayOverlap)
776 typeInfo = getContext().getTypeInfoDataSizeInChars(ty);
777 else
778 typeInfo = getContext().getTypeInfoInChars(ty);
779
781
782 // NOTE(cir): original codegen would normally convert destPtr and srcPtr to
783 // i8* since memcpy operates on bytes. We don't need that in CIR because
784 // cir.copy will operate on any CIR pointer that points to a sized type.
785
786 // Don't do any of the memmove_collectable tests if GC isn't set.
787 if (cgm.getLangOpts().getGC() != LangOptions::NonGC)
788 cgm.errorNYI("emitAggregateCopy: GC");
789
790 [[maybe_unused]] cir::CopyOp copyOp =
791 builder.createCopy(destPtr.getPointer(), srcPtr.getPointer());
792
794}
795
796// TODO(cir): This could be shared with classic codegen.
799 if (!fd->hasAttr<NoUniqueAddressAttr>() || !fd->getType()->isRecordType())
801
802 // If the field lies entirely within the enclosing class's nvsize, its tail
803 // padding cannot overlap any already-initialized object. (The only subobjects
804 // with greater addresses that might already be initialized are vbases.)
805 const RecordDecl *classRD = fd->getParent();
806 const ASTRecordLayout &layout = getContext().getASTRecordLayout(classRD);
807 if (layout.getFieldOffset(fd->getFieldIndex()) +
808 getContext().getTypeSize(fd->getType()) <=
809 (uint64_t)getContext().toBits(layout.getNonVirtualSize()))
811
812 // The tail padding may contain values we need to preserve.
814}
815
static bool isTrivialFiller(Expr *e)
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
cir::ConditionOp createCondition(mlir::Value condition)
Create a loop condition.
cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base, mlir::Value stride)
cir::PointerType getPointerTo(mlir::Type ty)
cir::DoWhileOp createDoWhile(mlir::Location loc, llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> condBuilder, llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> bodyBuilder)
Create a do-while operation.
cir::ConstantOp getConstantInt(mlir::Location loc, mlir::Type ty, int64_t value)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType BoolTy
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
QualType getElementType() const
Definition TypeBase.h:3732
mlir::Value getPointer() const
Definition Address.h:81
mlir::Type getElementType() const
Definition Address.h:101
clang::CharUnits getAlignment() const
Definition Address.h:109
An aggregate value slot.
IsZeroed_t isZeroed() const
Overlap_t mayOverlap() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal)
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
static bool hasScalarEvaluationKind(clang::QualType type)
mlir::Type convertType(clang::QualType t)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
CIRGenTypes & getTypes() const
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, bool isInit=false, bool isNontemporal=false)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *fd)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
LValue emitAggExprToLValue(const Expr *e)
static bool hasAggregateEvaluationKind(clang::QualType type)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
mlir::Value emitScalarExpr(const clang::Expr *e)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual)
Determine whether a base class initialization may overlap some other object.
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
clang::ASTContext & getContext() const
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap)
Emit an aggregate copy.
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const clang::LangOptions & getLangOpts() const
mlir::Value emitNullConstant(QualType t, mlir::Location loc)
Return the result of value-initializing the given type, i.e.
bool isZeroInitializable(clang::QualType ty)
Return whether a type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
Address getAddress() const
static RValue get(mlir::Value v)
Definition CIRGenValue.h:82
const Expr * getSubExpr() const
Definition ExprCXX.h:1516
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprCXX.h:5195
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprCXX.h:353
SourceRange getSourceRange() const LLVM_READONLY
Retrieve the source range of the expression.
Definition ExprCXX.h:828
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprCXX.h:902
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1599
CastKind getCastKind() const
Definition Expr.h:3656
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1946
Expr * getSubExpr()
Definition Expr.h:3662
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3157
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3242
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3393
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition Expr.cpp:2457
FieldDecl * getInitializedFieldInUnion()
If this initializes a union, specifies which field in the union to initialize.
Definition Expr.h:5361
bool hadArrayRangeDesignator() const
Definition Expr.h:5419
Expr * getArrayFiller()
If this initializer list initializes an array with more elements than there are initializers in the l...
Definition Expr.h:5337
const Expr * getInit(unsigned Init) const
Definition Expr.h:5289
ArrayRef< Expr * > inits()
Definition Expr.h:5285
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8369
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1545
Represents a struct/union/class.
Definition Decl.h:4309
CompoundStmt * getSubStmt()
Definition Expr.h:4548
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:334
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8625
bool isArrayType() const
Definition TypeBase.h:8621
bool isReferenceType() const
Definition TypeBase.h:8546
bool isVariableArrayType() const
Definition TypeBase.h:8633
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8657
bool isRecordType() const
Definition TypeBase.h:8649
QualType getType() const
Definition Decl.h:722
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
static bool emitLifetimeMarkers()
static bool aggValueSlotDestructedFlag()
static bool aggValueSlotGC()
static bool aggValueSlotAlias()
static bool opLoadStoreAtomic()
static bool aggEmitFinalDestCopyRValue()
static bool aggValueSlotVolatile()
static bool cudaSupport()
static bool requiresCleanups()
clang::CharUnits getPointerAlign() const