27 CharUnits atomicAlign;
34 AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
35 : cgf(cgf), loc(loc) {
36 assert(!lvalue.isGlobalReg());
37 ASTContext &ctx = cgf.getContext();
38 if (lvalue.isSimple()) {
39 atomicTy = lvalue.getType();
40 if (
auto *ty = atomicTy->getAs<AtomicType>())
41 valueTy = ty->getValueType();
44 evaluationKind = cgf.getEvaluationKind(valueTy);
47 TypeInfo atomicTypeInfo = ctx.
getTypeInfo(atomicTy);
50 valueSizeInBits = valueTypeInfo.
Width;
51 atomicSizeInBits = atomicTypeInfo.
Width;
52 assert(valueSizeInBits <= atomicSizeInBits);
53 assert(valueAlignInBits <= atomicAlignInBits);
57 if (lvalue.getAlignment().isZero())
58 lvalue.setAlignment(atomicAlign);
60 this->lvalue = lvalue;
63 cgf.cgm.errorNYI(loc,
"AtomicInfo: non-simple lvalue");
69 QualType getValueType()
const {
return valueTy; }
70 CharUnits getAtomicAlignment()
const {
return atomicAlign; }
72 mlir::Value getAtomicPointer()
const {
73 if (lvalue.isSimple())
74 return lvalue.getPointer();
78 Address getAtomicAddress()
const {
80 if (lvalue.isSimple()) {
81 elemTy = lvalue.getAddress().getElementType();
84 cgf.cgm.errorNYI(loc,
"AtomicInfo::getAtomicAddress: non-simple lvalue");
86 return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
95 bool hasPadding()
const {
return (valueSizeInBits != atomicSizeInBits); }
97 bool emitMemSetZeroIfNecessary()
const;
101 Address castToAtomicIntPointer(Address addr)
const;
106 Address convertToAtomicIntPointer(Address addr)
const;
109 void emitCopyIntoMemory(RValue rvalue)
const;
112 LValue projectValue()
const {
113 assert(lvalue.isSimple());
114 Address addr = getAtomicAddress();
116 cgf.cgm.errorNYI(loc,
"AtomicInfo::projectValue: padding");
120 return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
124 Address createTempAlloca()
const;
127 bool requiresMemSetZero(mlir::Type ty)
const;
143 uint64_t expectedSize) {
150bool AtomicInfo::requiresMemSetZero(mlir::Type ty)
const {
156 switch (getEvaluationKind()) {
163 mlir::cast<cir::ComplexType>(ty).getElementType(),
164 atomicSizeInBits / 2);
169 llvm_unreachable(
"bad evaluation kind");
172Address AtomicInfo::convertToAtomicIntPointer(Address addr)
const {
175 if (sourceSizeInBits != atomicSizeInBits) {
178 "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
181 return castToAtomicIntPointer(addr);
184Address AtomicInfo::createTempAlloca()
const {
186 (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
188 getAtomicAlignment(), loc,
"atomic-temp");
191 if (lvalue.isBitField()) {
192 cgf.
cgm.
errorNYI(loc,
"AtomicInfo::createTempAlloca: bitfield lvalue");
198Address AtomicInfo::castToAtomicIntPointer(Address addr)
const {
201 if (intTy && intTy.getWidth() == atomicSizeInBits)
207bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
208 assert(lvalue.isSimple());
209 Address addr = lvalue.getAddress();
214 "AtomicInfo::emitMemSetZeroIfNecessary: emit memset zero");
220void AtomicInfo::emitCopyIntoMemory(RValue rvalue)
const {
221 assert(lvalue.isSimple());
227 cgf.
cgm.
errorNYI(
"copying aggregate into atomic lvalue");
234 emitMemSetZeroIfNecessary();
237 LValue tempLValue = projectValue();
243 cgf.
cgm.
errorNYI(
"copying complex into atomic lvalue");
250 cir::MemOrder successOrder,
251 cir::MemOrder failureOrder) {
255 mlir::Value expected = builder.
createLoad(loc, val1);
256 mlir::Value desired = builder.
createLoad(loc, val2);
258 auto cmpxchg = cir::AtomicCmpXchg::create(
266 cmpxchg.setWeak(isWeak);
268 mlir::Value failed = builder.
createNot(cmpxchg.getSuccess());
269 cir::IfOp::create(builder, loc, failed,
false,
270 [&](mlir::OpBuilder &, mlir::Location) {
271 auto ptrTy = mlir::cast<cir::PointerType>(
290 Expr *failureOrderExpr, uint64_t size,
291 cir::MemOrder successOrder) {
294 uint64_t failureOrderInt = failureOrderEval.
Val.
getInt().getZExtValue();
296 cir::MemOrder failureOrder;
298 failureOrder = cir::MemOrder::Relaxed;
300 switch ((cir::MemOrder)failureOrderInt) {
301 case cir::MemOrder::Relaxed:
304 case cir::MemOrder::Release:
305 case cir::MemOrder::AcquireRelease:
306 failureOrder = cir::MemOrder::Relaxed;
308 case cir::MemOrder::Consume:
309 case cir::MemOrder::Acquire:
310 failureOrder = cir::MemOrder::Acquire;
312 case cir::MemOrder::SequentiallyConsistent:
313 failureOrder = cir::MemOrder::SequentiallyConsistent;
329 "emitAtomicCmpXchgFailureSet: non-constant failure order");
334 Expr *isWeakExpr,
Expr *failureOrderExpr, int64_t size,
335 cir::MemOrder order) {
336 std::unique_ptr<AtomicScopeModel> scopeModel =
expr->getScopeModel();
339 cgf.
cgm.
errorNYI(
expr->getSourceRange(),
"emitAtomicOp: atomic scope");
346 mlir::Location loc = cgf.
getLoc(
expr->getSourceRange());
347 auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
349 switch (
expr->getOp()) {
350 case AtomicExpr::AO__c11_atomic_init:
351 llvm_unreachable(
"already handled!");
353 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
355 val2, failureOrderExpr, size, order);
358 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
360 val2, failureOrderExpr, size, order);
363 case AtomicExpr::AO__atomic_compare_exchange:
364 case AtomicExpr::AO__atomic_compare_exchange_n: {
368 failureOrderExpr, size, order);
372 "emitAtomicOp: non-constant isWeak");
377 case AtomicExpr::AO__c11_atomic_load:
378 case AtomicExpr::AO__atomic_load_n:
379 case AtomicExpr::AO__atomic_load: {
385 load->setAttr(
"mem_order", orderAttr);
387 builder.
createStore(loc, load->getResult(0), dest);
391 case AtomicExpr::AO__c11_atomic_store:
392 case AtomicExpr::AO__atomic_store_n:
393 case AtomicExpr::AO__atomic_store: {
394 cir::LoadOp loadVal1 = builder.
createLoad(loc, val1);
399 mlir::IntegerAttr{}, orderAttr);
403 case AtomicExpr::AO__opencl_atomic_init:
405 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
406 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
408 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
409 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
411 case AtomicExpr::AO__scoped_atomic_compare_exchange:
412 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
414 case AtomicExpr::AO__opencl_atomic_load:
415 case AtomicExpr::AO__hip_atomic_load:
416 case AtomicExpr::AO__scoped_atomic_load_n:
417 case AtomicExpr::AO__scoped_atomic_load:
419 case AtomicExpr::AO__opencl_atomic_store:
420 case AtomicExpr::AO__hip_atomic_store:
421 case AtomicExpr::AO__scoped_atomic_store:
422 case AtomicExpr::AO__scoped_atomic_store_n:
424 case AtomicExpr::AO__c11_atomic_exchange:
425 case AtomicExpr::AO__hip_atomic_exchange:
426 case AtomicExpr::AO__opencl_atomic_exchange:
427 case AtomicExpr::AO__atomic_exchange_n:
428 case AtomicExpr::AO__atomic_exchange:
429 case AtomicExpr::AO__scoped_atomic_exchange_n:
430 case AtomicExpr::AO__scoped_atomic_exchange:
432 case AtomicExpr::AO__atomic_add_fetch:
433 case AtomicExpr::AO__scoped_atomic_add_fetch:
435 case AtomicExpr::AO__c11_atomic_fetch_add:
436 case AtomicExpr::AO__hip_atomic_fetch_add:
437 case AtomicExpr::AO__opencl_atomic_fetch_add:
438 case AtomicExpr::AO__atomic_fetch_add:
439 case AtomicExpr::AO__scoped_atomic_fetch_add:
441 case AtomicExpr::AO__atomic_sub_fetch:
442 case AtomicExpr::AO__scoped_atomic_sub_fetch:
444 case AtomicExpr::AO__c11_atomic_fetch_sub:
445 case AtomicExpr::AO__hip_atomic_fetch_sub:
446 case AtomicExpr::AO__opencl_atomic_fetch_sub:
447 case AtomicExpr::AO__atomic_fetch_sub:
448 case AtomicExpr::AO__scoped_atomic_fetch_sub:
450 case AtomicExpr::AO__atomic_min_fetch:
451 case AtomicExpr::AO__scoped_atomic_min_fetch:
453 case AtomicExpr::AO__c11_atomic_fetch_min:
454 case AtomicExpr::AO__hip_atomic_fetch_min:
455 case AtomicExpr::AO__opencl_atomic_fetch_min:
456 case AtomicExpr::AO__atomic_fetch_min:
457 case AtomicExpr::AO__scoped_atomic_fetch_min:
459 case AtomicExpr::AO__atomic_max_fetch:
460 case AtomicExpr::AO__scoped_atomic_max_fetch:
462 case AtomicExpr::AO__c11_atomic_fetch_max:
463 case AtomicExpr::AO__hip_atomic_fetch_max:
464 case AtomicExpr::AO__opencl_atomic_fetch_max:
465 case AtomicExpr::AO__atomic_fetch_max:
466 case AtomicExpr::AO__scoped_atomic_fetch_max:
468 case AtomicExpr::AO__atomic_and_fetch:
469 case AtomicExpr::AO__scoped_atomic_and_fetch:
471 case AtomicExpr::AO__c11_atomic_fetch_and:
472 case AtomicExpr::AO__hip_atomic_fetch_and:
473 case AtomicExpr::AO__opencl_atomic_fetch_and:
474 case AtomicExpr::AO__atomic_fetch_and:
475 case AtomicExpr::AO__scoped_atomic_fetch_and:
477 case AtomicExpr::AO__atomic_or_fetch:
478 case AtomicExpr::AO__scoped_atomic_or_fetch:
480 case AtomicExpr::AO__c11_atomic_fetch_or:
481 case AtomicExpr::AO__hip_atomic_fetch_or:
482 case AtomicExpr::AO__opencl_atomic_fetch_or:
483 case AtomicExpr::AO__atomic_fetch_or:
484 case AtomicExpr::AO__scoped_atomic_fetch_or:
486 case AtomicExpr::AO__atomic_xor_fetch:
487 case AtomicExpr::AO__scoped_atomic_xor_fetch:
489 case AtomicExpr::AO__c11_atomic_fetch_xor:
490 case AtomicExpr::AO__hip_atomic_fetch_xor:
491 case AtomicExpr::AO__opencl_atomic_fetch_xor:
492 case AtomicExpr::AO__atomic_fetch_xor:
493 case AtomicExpr::AO__scoped_atomic_fetch_xor:
495 case AtomicExpr::AO__atomic_nand_fetch:
496 case AtomicExpr::AO__scoped_atomic_nand_fetch:
498 case AtomicExpr::AO__c11_atomic_fetch_nand:
499 case AtomicExpr::AO__atomic_fetch_nand:
500 case AtomicExpr::AO__scoped_atomic_fetch_nand:
502 case AtomicExpr::AO__atomic_test_and_set:
504 case AtomicExpr::AO__atomic_clear:
513 auto memOrder =
static_cast<cir::MemOrder
>(order);
515 return memOrder != cir::MemOrder::Consume &&
516 memOrder != cir::MemOrder::Acquire &&
517 memOrder != cir::MemOrder::AcquireRelease;
519 return memOrder != cir::MemOrder::Release &&
520 memOrder != cir::MemOrder::AcquireRelease;
528 memTy = ty->getValueType();
530 Expr *isWeakExpr =
nullptr;
531 Expr *orderFailExpr =
nullptr;
539 if (e->
getOp() == AtomicExpr::AO__c11_atomic_init) {
553 bool shouldCastToIntPtrTy =
true;
555 switch (e->
getOp()) {
560 case AtomicExpr::AO__c11_atomic_init:
561 llvm_unreachable(
"already handled above with emitAtomicInit");
563 case AtomicExpr::AO__atomic_load_n:
564 case AtomicExpr::AO__c11_atomic_load:
567 case AtomicExpr::AO__atomic_load:
571 case AtomicExpr::AO__atomic_store:
575 case AtomicExpr::AO__atomic_compare_exchange:
576 case AtomicExpr::AO__atomic_compare_exchange_n:
577 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
578 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
580 if (e->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
581 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
586 if (e->
getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
587 e->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
588 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
589 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
593 case AtomicExpr::AO__atomic_store_n:
594 case AtomicExpr::AO__c11_atomic_store:
607 if (shouldCastToIntPtrTy) {
608 ptr = atomics.castToAtomicIntPointer(ptr);
610 val1 = atomics.convertToAtomicIntPointer(val1);
613 if (shouldCastToIntPtrTy)
614 dest = atomics.castToAtomicIntPointer(dest);
618 dest = atomics.createTempAlloca();
619 if (shouldCastToIntPtrTy)
620 dest = atomics.castToAtomicIntPointer(dest);
623 bool powerOf2Size = (size & (size - 1)) == 0;
624 bool useLibCall = !powerOf2Size || (size > 16);
641 bool isStore = e->
getOp() == AtomicExpr::AO__c11_atomic_store ||
642 e->
getOp() == AtomicExpr::AO__opencl_atomic_store ||
643 e->
getOp() == AtomicExpr::AO__hip_atomic_store ||
644 e->
getOp() == AtomicExpr::AO__atomic_store ||
645 e->
getOp() == AtomicExpr::AO__atomic_store_n ||
646 e->
getOp() == AtomicExpr::AO__scoped_atomic_store ||
647 e->
getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
648 e->
getOp() == AtomicExpr::AO__atomic_clear;
649 bool isLoad = e->
getOp() == AtomicExpr::AO__c11_atomic_load ||
650 e->
getOp() == AtomicExpr::AO__opencl_atomic_load ||
651 e->
getOp() == AtomicExpr::AO__hip_atomic_load ||
652 e->
getOp() == AtomicExpr::AO__atomic_load ||
653 e->
getOp() == AtomicExpr::AO__atomic_load_n ||
654 e->
getOp() == AtomicExpr::AO__scoped_atomic_load ||
655 e->
getOp() == AtomicExpr::AO__scoped_atomic_load_n;
661 uint64_t ord = orderConst.
Val.
getInt().getZExtValue();
663 emitAtomicOp(*
this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
664 size,
static_cast<cir::MemOrder
>(ord));
682 switch (atomics.getEvaluationKind()) {
700 llvm_unreachable(
"bad evaluation kind");
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder)
static Address emitValToTemp(CIRGenFunction &cgf, Expr *e)
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order)
static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
mlir::Value createNot(mlir::Value value)
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::BoolType getBoolTy()
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
llvm::TypeSize getTypeStoreSize(mlir::Type ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Expr * getOrderFail() const
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
mlir::Value getPointer() const
mlir::Type getElementType() const
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
cir::IntType getUIntNTy(int n)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::MemOrderAttr order={})
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, bool isInit=false, bool isNontemporal=false)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitAtomicExpr(AtomicExpr *e)
mlir::Type convertTypeForMem(QualType t)
mlir::Value emitScalarExpr(const clang::Expr *e)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
void emitAtomicInit(Expr *init, LValue dest)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
clang::ASTContext & getContext() const
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(mlir::Value v)
mlir::Value getValue() const
Return the value of this scalar value.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
This represents one expression.
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsBooleanCondition - Return true if this is a constant which we can fold and convert to a boo...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
A (possibly-)qualified type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
const T * getAs() const
Member-template getAs<specific type>'.
bool isValidCIRAtomicOrderingCABI(Int value)
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
static bool atomicInfoGetAtomicPointer()
static bool atomicScope()
static bool atomicUseLibCall()
static bool atomicSyncScopeID()
static bool atomicInfoGetAtomicAddress()
EvalResult is a struct with detailed info about an evaluated expression.
APValue Val
Val - This is the value the expression can be folded to.