clang 22.0.0git
CIRGenAtomic.cpp
Go to the documentation of this file.
1//===--- CIRGenAtomic.cpp - Emit CIR for atomic operations ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the code for emitting atomic operations.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
15
16using namespace clang;
17using namespace clang::CIRGen;
18using namespace cir;
19
20namespace {
21class AtomicInfo {
22 CIRGenFunction &cgf;
23 QualType atomicTy;
24 QualType valueTy;
25 uint64_t atomicSizeInBits = 0;
26 uint64_t valueSizeInBits = 0;
27 CharUnits atomicAlign;
28 CharUnits valueAlign;
29 TypeEvaluationKind evaluationKind = cir::TEK_Scalar;
30 LValue lvalue;
31 mlir::Location loc;
32
33public:
34 AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
35 : cgf(cgf), loc(loc) {
36 assert(!lvalue.isGlobalReg());
37 ASTContext &ctx = cgf.getContext();
38 if (lvalue.isSimple()) {
39 atomicTy = lvalue.getType();
40 if (auto *ty = atomicTy->getAs<AtomicType>())
41 valueTy = ty->getValueType();
42 else
43 valueTy = atomicTy;
44 evaluationKind = cgf.getEvaluationKind(valueTy);
45
46 TypeInfo valueTypeInfo = ctx.getTypeInfo(valueTy);
47 TypeInfo atomicTypeInfo = ctx.getTypeInfo(atomicTy);
48 uint64_t valueAlignInBits = valueTypeInfo.Align;
49 uint64_t atomicAlignInBits = atomicTypeInfo.Align;
50 valueSizeInBits = valueTypeInfo.Width;
51 atomicSizeInBits = atomicTypeInfo.Width;
52 assert(valueSizeInBits <= atomicSizeInBits);
53 assert(valueAlignInBits <= atomicAlignInBits);
54
55 atomicAlign = ctx.toCharUnitsFromBits(atomicAlignInBits);
56 valueAlign = ctx.toCharUnitsFromBits(valueAlignInBits);
57 if (lvalue.getAlignment().isZero())
58 lvalue.setAlignment(atomicAlign);
59
60 this->lvalue = lvalue;
61 } else {
63 cgf.cgm.errorNYI(loc, "AtomicInfo: non-simple lvalue");
64 }
65
67 }
68
69 QualType getValueType() const { return valueTy; }
70 CharUnits getAtomicAlignment() const { return atomicAlign; }
71 TypeEvaluationKind getEvaluationKind() const { return evaluationKind; }
72 mlir::Value getAtomicPointer() const {
73 if (lvalue.isSimple())
74 return lvalue.getPointer();
76 return nullptr;
77 }
78 Address getAtomicAddress() const {
79 mlir::Type elemTy;
80 if (lvalue.isSimple()) {
81 elemTy = lvalue.getAddress().getElementType();
82 } else {
84 cgf.cgm.errorNYI(loc, "AtomicInfo::getAtomicAddress: non-simple lvalue");
85 }
86 return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
87 }
88
89 /// Is the atomic size larger than the underlying value type?
90 ///
91 /// Note that the absence of padding does not mean that atomic
92 /// objects are completely interchangeable with non-atomic
93 /// objects: we might have promoted the alignment of a type
94 /// without making it bigger.
95 bool hasPadding() const { return (valueSizeInBits != atomicSizeInBits); }
96
97 bool emitMemSetZeroIfNecessary() const;
98
99 /// Cast the given pointer to an integer pointer suitable for atomic
100 /// operations on the source.
101 Address castToAtomicIntPointer(Address addr) const;
102
103 /// If addr is compatible with the iN that will be used for an atomic
104 /// operation, bitcast it. Otherwise, create a temporary that is suitable and
105 /// copy the value across.
106 Address convertToAtomicIntPointer(Address addr) const;
107
108 /// Copy an atomic r-value into atomic-layout memory.
109 void emitCopyIntoMemory(RValue rvalue) const;
110
111 /// Project an l-value down to the value field.
112 LValue projectValue() const {
113 assert(lvalue.isSimple());
114 Address addr = getAtomicAddress();
115 if (hasPadding()) {
116 cgf.cgm.errorNYI(loc, "AtomicInfo::projectValue: padding");
117 }
118
120 return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
121 }
122
123 /// Creates temp alloca for intermediate operations on atomic value.
124 Address createTempAlloca() const;
125
126private:
127 bool requiresMemSetZero(mlir::Type ty) const;
128};
129} // namespace
130
131// This function emits any expression (scalar, complex, or aggregate)
132// into a temporary alloca.
134 Address declPtr = cgf.createMemTemp(
135 e->getType(), cgf.getLoc(e->getSourceRange()), ".atomictmp");
136 cgf.emitAnyExprToMem(e, declPtr, e->getType().getQualifiers(),
137 /*Init*/ true);
138 return declPtr;
139}
140
141/// Does a store of the given IR type modify the full expected width?
142static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty,
143 uint64_t expectedSize) {
144 return cgm.getDataLayout().getTypeStoreSize(ty) * 8 == expectedSize;
145}
146
147/// Does the atomic type require memsetting to zero before initialization?
148///
149/// The IR type is provided as a way of making certain queries faster.
150bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const {
151 // If the atomic type has size padding, we definitely need a memset.
152 if (hasPadding())
153 return true;
154
155 // Otherwise, do some simple heuristics to try to avoid it:
156 switch (getEvaluationKind()) {
157 // For scalars and complexes, check whether the store size of the
158 // type uses the full size.
159 case cir::TEK_Scalar:
160 return !isFullSizeType(cgf.cgm, ty, atomicSizeInBits);
161 case cir::TEK_Complex:
162 return !isFullSizeType(cgf.cgm,
163 mlir::cast<cir::ComplexType>(ty).getElementType(),
164 atomicSizeInBits / 2);
165 // Padding in structs has an undefined bit pattern. User beware.
167 return false;
168 }
169 llvm_unreachable("bad evaluation kind");
170}
171
172Address AtomicInfo::convertToAtomicIntPointer(Address addr) const {
173 mlir::Type ty = addr.getElementType();
174 uint64_t sourceSizeInBits = cgf.cgm.getDataLayout().getTypeSizeInBits(ty);
175 if (sourceSizeInBits != atomicSizeInBits) {
176 cgf.cgm.errorNYI(
177 loc,
178 "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
179 }
180
181 return castToAtomicIntPointer(addr);
182}
183
184Address AtomicInfo::createTempAlloca() const {
185 Address tempAlloca = cgf.createMemTemp(
186 (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
187 : atomicTy,
188 getAtomicAlignment(), loc, "atomic-temp");
189
190 // Cast to pointer to value type for bitfields.
191 if (lvalue.isBitField()) {
192 cgf.cgm.errorNYI(loc, "AtomicInfo::createTempAlloca: bitfield lvalue");
193 }
194
195 return tempAlloca;
196}
197
198Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
199 auto intTy = mlir::dyn_cast<cir::IntType>(addr.getElementType());
200 // Don't bother with int casts if the integer size is the same.
201 if (intTy && intTy.getWidth() == atomicSizeInBits)
202 return addr;
203 auto ty = cgf.getBuilder().getUIntNTy(atomicSizeInBits);
204 return addr.withElementType(cgf.getBuilder(), ty);
205}
206
207bool AtomicInfo::emitMemSetZeroIfNecessary() const {
208 assert(lvalue.isSimple());
209 Address addr = lvalue.getAddress();
210 if (!requiresMemSetZero(addr.getElementType()))
211 return false;
212
213 cgf.cgm.errorNYI(loc,
214 "AtomicInfo::emitMemSetZeroIfNecessary: emit memset zero");
215 return false;
216}
217
218/// Copy an r-value into memory as part of storing to an atomic type.
219/// This needs to create a bit-pattern suitable for atomic operations.
220void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
221 assert(lvalue.isSimple());
222
223 // If we have an r-value, the rvalue should be of the atomic type,
224 // which means that the caller is responsible for having zeroed
225 // any padding. Just do an aggregate copy of that type.
226 if (rvalue.isAggregate()) {
227 cgf.cgm.errorNYI("copying aggregate into atomic lvalue");
228 return;
229 }
230
231 // Okay, otherwise we're copying stuff.
232
233 // Zero out the buffer if necessary.
234 emitMemSetZeroIfNecessary();
235
236 // Drill past the padding if present.
237 LValue tempLValue = projectValue();
238
239 // Okay, store the rvalue in.
240 if (rvalue.isScalar()) {
241 cgf.emitStoreOfScalar(rvalue.getValue(), tempLValue, /*isInit=*/true);
242 } else {
243 cgf.cgm.errorNYI("copying complex into atomic lvalue");
244 }
245}
246
247static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak,
248 Address dest, Address ptr, Address val1,
249 Address val2, uint64_t size,
250 cir::MemOrder successOrder,
251 cir::MemOrder failureOrder) {
252 mlir::Location loc = cgf.getLoc(e->getSourceRange());
253
254 CIRGenBuilderTy &builder = cgf.getBuilder();
255 mlir::Value expected = builder.createLoad(loc, val1);
256 mlir::Value desired = builder.createLoad(loc, val2);
257
258 auto cmpxchg = cir::AtomicCmpXchg::create(
259 builder, loc, expected.getType(), builder.getBoolTy(), ptr.getPointer(),
260 expected, desired,
261 cir::MemOrderAttr::get(&cgf.getMLIRContext(), successOrder),
262 cir::MemOrderAttr::get(&cgf.getMLIRContext(), failureOrder),
263 builder.getI64IntegerAttr(ptr.getAlignment().getAsAlign().value()));
264
265 cmpxchg.setIsVolatile(e->isVolatile());
266 cmpxchg.setWeak(isWeak);
267
268 mlir::Value failed = builder.createNot(cmpxchg.getSuccess());
269 cir::IfOp::create(builder, loc, failed, /*withElseRegion=*/false,
270 [&](mlir::OpBuilder &, mlir::Location) {
271 auto ptrTy = mlir::cast<cir::PointerType>(
272 val1.getPointer().getType());
273 if (val1.getElementType() != ptrTy.getPointee()) {
274 val1 = val1.withPointer(builder.createPtrBitcast(
275 val1.getPointer(), val1.getElementType()));
276 }
277 builder.createStore(loc, cmpxchg.getOld(), val1);
278 builder.createYield(loc);
279 });
280
281 // Update the memory at Dest with Success's value.
282 cgf.emitStoreOfScalar(cmpxchg.getSuccess(),
283 cgf.makeAddrLValue(dest, e->getType()),
284 /*isInit=*/false);
285}
286
288 bool isWeak, Address dest, Address ptr,
289 Address val1, Address val2,
290 Expr *failureOrderExpr, uint64_t size,
291 cir::MemOrder successOrder) {
292 Expr::EvalResult failureOrderEval;
293 if (failureOrderExpr->EvaluateAsInt(failureOrderEval, cgf.getContext())) {
294 uint64_t failureOrderInt = failureOrderEval.Val.getInt().getZExtValue();
295
296 cir::MemOrder failureOrder;
297 if (!cir::isValidCIRAtomicOrderingCABI(failureOrderInt)) {
298 failureOrder = cir::MemOrder::Relaxed;
299 } else {
300 switch ((cir::MemOrder)failureOrderInt) {
301 case cir::MemOrder::Relaxed:
302 // 31.7.2.18: "The failure argument shall not be memory_order_release
303 // nor memory_order_acq_rel". Fallback to monotonic.
304 case cir::MemOrder::Release:
305 case cir::MemOrder::AcquireRelease:
306 failureOrder = cir::MemOrder::Relaxed;
307 break;
308 case cir::MemOrder::Consume:
309 case cir::MemOrder::Acquire:
310 failureOrder = cir::MemOrder::Acquire;
311 break;
312 case cir::MemOrder::SequentiallyConsistent:
313 failureOrder = cir::MemOrder::SequentiallyConsistent;
314 break;
315 }
316 }
317
318 // Prior to c++17, "the failure argument shall be no stronger than the
319 // success argument". This condition has been lifted and the only
320 // precondition is 31.7.2.18. Effectively treat this as a DR and skip
321 // language version checks.
322 emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size, successOrder,
323 failureOrder);
324 return;
325 }
326
328 cgf.cgm.errorNYI(e->getSourceRange(),
329 "emitAtomicCmpXchgFailureSet: non-constant failure order");
330}
331
333 Address ptr, Address val1, Address val2,
334 Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size,
335 cir::MemOrder order) {
336 std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
337 if (scopeModel) {
339 cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: atomic scope");
340 return;
341 }
342
344
345 CIRGenBuilderTy &builder = cgf.getBuilder();
346 mlir::Location loc = cgf.getLoc(expr->getSourceRange());
347 auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
348
349 switch (expr->getOp()) {
350 case AtomicExpr::AO__c11_atomic_init:
351 llvm_unreachable("already handled!");
352
353 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
354 emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/false, dest, ptr, val1,
355 val2, failureOrderExpr, size, order);
356 return;
357
358 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
359 emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/true, dest, ptr, val1,
360 val2, failureOrderExpr, size, order);
361 return;
362
363 case AtomicExpr::AO__atomic_compare_exchange:
364 case AtomicExpr::AO__atomic_compare_exchange_n: {
365 bool isWeak = false;
366 if (isWeakExpr->EvaluateAsBooleanCondition(isWeak, cgf.getContext())) {
367 emitAtomicCmpXchgFailureSet(cgf, expr, isWeak, dest, ptr, val1, val2,
368 failureOrderExpr, size, order);
369 } else {
371 cgf.cgm.errorNYI(expr->getSourceRange(),
372 "emitAtomicOp: non-constant isWeak");
373 }
374 return;
375 }
376
377 case AtomicExpr::AO__c11_atomic_load:
378 case AtomicExpr::AO__atomic_load_n:
379 case AtomicExpr::AO__atomic_load: {
380 cir::LoadOp load =
381 builder.createLoad(loc, ptr, /*isVolatile=*/expr->isVolatile());
382
384
385 load->setAttr("mem_order", orderAttr);
386
387 builder.createStore(loc, load->getResult(0), dest);
388 return;
389 }
390
391 case AtomicExpr::AO__c11_atomic_store:
392 case AtomicExpr::AO__atomic_store_n:
393 case AtomicExpr::AO__atomic_store: {
394 cir::LoadOp loadVal1 = builder.createLoad(loc, val1);
395
397
398 builder.createStore(loc, loadVal1, ptr, expr->isVolatile(),
399 /*align=*/mlir::IntegerAttr{}, orderAttr);
400 return;
401 }
402
403 case AtomicExpr::AO__opencl_atomic_init:
404
405 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
406 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
407
408 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
409 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
410
411 case AtomicExpr::AO__scoped_atomic_compare_exchange:
412 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
413
414 case AtomicExpr::AO__opencl_atomic_load:
415 case AtomicExpr::AO__hip_atomic_load:
416 case AtomicExpr::AO__scoped_atomic_load_n:
417 case AtomicExpr::AO__scoped_atomic_load:
418
419 case AtomicExpr::AO__opencl_atomic_store:
420 case AtomicExpr::AO__hip_atomic_store:
421 case AtomicExpr::AO__scoped_atomic_store:
422 case AtomicExpr::AO__scoped_atomic_store_n:
423
424 case AtomicExpr::AO__c11_atomic_exchange:
425 case AtomicExpr::AO__hip_atomic_exchange:
426 case AtomicExpr::AO__opencl_atomic_exchange:
427 case AtomicExpr::AO__atomic_exchange_n:
428 case AtomicExpr::AO__atomic_exchange:
429 case AtomicExpr::AO__scoped_atomic_exchange_n:
430 case AtomicExpr::AO__scoped_atomic_exchange:
431
432 case AtomicExpr::AO__atomic_add_fetch:
433 case AtomicExpr::AO__scoped_atomic_add_fetch:
434
435 case AtomicExpr::AO__c11_atomic_fetch_add:
436 case AtomicExpr::AO__hip_atomic_fetch_add:
437 case AtomicExpr::AO__opencl_atomic_fetch_add:
438 case AtomicExpr::AO__atomic_fetch_add:
439 case AtomicExpr::AO__scoped_atomic_fetch_add:
440
441 case AtomicExpr::AO__atomic_sub_fetch:
442 case AtomicExpr::AO__scoped_atomic_sub_fetch:
443
444 case AtomicExpr::AO__c11_atomic_fetch_sub:
445 case AtomicExpr::AO__hip_atomic_fetch_sub:
446 case AtomicExpr::AO__opencl_atomic_fetch_sub:
447 case AtomicExpr::AO__atomic_fetch_sub:
448 case AtomicExpr::AO__scoped_atomic_fetch_sub:
449
450 case AtomicExpr::AO__atomic_min_fetch:
451 case AtomicExpr::AO__scoped_atomic_min_fetch:
452
453 case AtomicExpr::AO__c11_atomic_fetch_min:
454 case AtomicExpr::AO__hip_atomic_fetch_min:
455 case AtomicExpr::AO__opencl_atomic_fetch_min:
456 case AtomicExpr::AO__atomic_fetch_min:
457 case AtomicExpr::AO__scoped_atomic_fetch_min:
458
459 case AtomicExpr::AO__atomic_max_fetch:
460 case AtomicExpr::AO__scoped_atomic_max_fetch:
461
462 case AtomicExpr::AO__c11_atomic_fetch_max:
463 case AtomicExpr::AO__hip_atomic_fetch_max:
464 case AtomicExpr::AO__opencl_atomic_fetch_max:
465 case AtomicExpr::AO__atomic_fetch_max:
466 case AtomicExpr::AO__scoped_atomic_fetch_max:
467
468 case AtomicExpr::AO__atomic_and_fetch:
469 case AtomicExpr::AO__scoped_atomic_and_fetch:
470
471 case AtomicExpr::AO__c11_atomic_fetch_and:
472 case AtomicExpr::AO__hip_atomic_fetch_and:
473 case AtomicExpr::AO__opencl_atomic_fetch_and:
474 case AtomicExpr::AO__atomic_fetch_and:
475 case AtomicExpr::AO__scoped_atomic_fetch_and:
476
477 case AtomicExpr::AO__atomic_or_fetch:
478 case AtomicExpr::AO__scoped_atomic_or_fetch:
479
480 case AtomicExpr::AO__c11_atomic_fetch_or:
481 case AtomicExpr::AO__hip_atomic_fetch_or:
482 case AtomicExpr::AO__opencl_atomic_fetch_or:
483 case AtomicExpr::AO__atomic_fetch_or:
484 case AtomicExpr::AO__scoped_atomic_fetch_or:
485
486 case AtomicExpr::AO__atomic_xor_fetch:
487 case AtomicExpr::AO__scoped_atomic_xor_fetch:
488
489 case AtomicExpr::AO__c11_atomic_fetch_xor:
490 case AtomicExpr::AO__hip_atomic_fetch_xor:
491 case AtomicExpr::AO__opencl_atomic_fetch_xor:
492 case AtomicExpr::AO__atomic_fetch_xor:
493 case AtomicExpr::AO__scoped_atomic_fetch_xor:
494
495 case AtomicExpr::AO__atomic_nand_fetch:
496 case AtomicExpr::AO__scoped_atomic_nand_fetch:
497
498 case AtomicExpr::AO__c11_atomic_fetch_nand:
499 case AtomicExpr::AO__atomic_fetch_nand:
500 case AtomicExpr::AO__scoped_atomic_fetch_nand:
501
502 case AtomicExpr::AO__atomic_test_and_set:
503
504 case AtomicExpr::AO__atomic_clear:
505 cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI");
506 break;
507 }
508}
509
510static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) {
512 return false;
513 auto memOrder = static_cast<cir::MemOrder>(order);
514 if (isStore)
515 return memOrder != cir::MemOrder::Consume &&
516 memOrder != cir::MemOrder::Acquire &&
517 memOrder != cir::MemOrder::AcquireRelease;
518 if (isLoad)
519 return memOrder != cir::MemOrder::Release &&
520 memOrder != cir::MemOrder::AcquireRelease;
521 return true;
522}
523
525 QualType atomicTy = e->getPtr()->getType()->getPointeeType();
526 QualType memTy = atomicTy;
527 if (const auto *ty = atomicTy->getAs<AtomicType>())
528 memTy = ty->getValueType();
529
530 Expr *isWeakExpr = nullptr;
531 Expr *orderFailExpr = nullptr;
532
533 Address val1 = Address::invalid();
534 Address val2 = Address::invalid();
535 Address dest = Address::invalid();
537
539 if (e->getOp() == AtomicExpr::AO__c11_atomic_init) {
540 LValue lvalue = makeAddrLValue(ptr, atomicTy);
541 emitAtomicInit(e->getVal1(), lvalue);
542 return RValue::get(nullptr);
543 }
544
545 TypeInfoChars typeInfo = getContext().getTypeInfoInChars(atomicTy);
546 uint64_t size = typeInfo.Width.getQuantity();
547
548 Expr::EvalResult orderConst;
549 mlir::Value order;
550 if (!e->getOrder()->EvaluateAsInt(orderConst, getContext()))
551 order = emitScalarExpr(e->getOrder());
552
553 bool shouldCastToIntPtrTy = true;
554
555 switch (e->getOp()) {
556 default:
557 cgm.errorNYI(e->getSourceRange(), "atomic op NYI");
558 return RValue::get(nullptr);
559
560 case AtomicExpr::AO__c11_atomic_init:
561 llvm_unreachable("already handled above with emitAtomicInit");
562
563 case AtomicExpr::AO__atomic_load_n:
564 case AtomicExpr::AO__c11_atomic_load:
565 break;
566
567 case AtomicExpr::AO__atomic_load:
569 break;
570
571 case AtomicExpr::AO__atomic_store:
573 break;
574
575 case AtomicExpr::AO__atomic_compare_exchange:
576 case AtomicExpr::AO__atomic_compare_exchange_n:
577 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
578 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
580 if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
581 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
583 else
584 val2 = emitValToTemp(*this, e->getVal2());
585 orderFailExpr = e->getOrderFail();
586 if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
587 e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
588 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
589 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
590 isWeakExpr = e->getWeak();
591 break;
592
593 case AtomicExpr::AO__atomic_store_n:
594 case AtomicExpr::AO__c11_atomic_store:
595 val1 = emitValToTemp(*this, e->getVal1());
596 break;
597 }
598
599 QualType resultTy = e->getType().getUnqualifiedType();
600
601 // The inlined atomics only function on iN types, where N is a power of 2. We
602 // need to make sure (via temporaries if necessary) that all incoming values
603 // are compatible.
604 LValue atomicValue = makeAddrLValue(ptr, atomicTy);
605 AtomicInfo atomics(*this, atomicValue, getLoc(e->getSourceRange()));
606
607 if (shouldCastToIntPtrTy) {
608 ptr = atomics.castToAtomicIntPointer(ptr);
609 if (val1.isValid())
610 val1 = atomics.convertToAtomicIntPointer(val1);
611 }
612 if (dest.isValid()) {
613 if (shouldCastToIntPtrTy)
614 dest = atomics.castToAtomicIntPointer(dest);
615 } else if (e->isCmpXChg()) {
616 dest = createMemTemp(resultTy, getLoc(e->getSourceRange()), "cmpxchg.bool");
617 } else if (!resultTy->isVoidType()) {
618 dest = atomics.createTempAlloca();
619 if (shouldCastToIntPtrTy)
620 dest = atomics.castToAtomicIntPointer(dest);
621 }
622
623 bool powerOf2Size = (size & (size - 1)) == 0;
624 bool useLibCall = !powerOf2Size || (size > 16);
625
626 // For atomics larger than 16 bytes, emit a libcall from the frontend. This
627 // avoids the overhead of dealing with excessively-large value types in IR.
628 // Non-power-of-2 values also lower to libcall here, as they are not currently
629 // permitted in IR instructions (although that constraint could be relaxed in
630 // the future). For other cases where a libcall is required on a given
631 // platform, we let the backend handle it (this includes handling for all of
632 // the size-optimized libcall variants, which are only valid up to 16 bytes.)
633 //
634 // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
635 if (useLibCall) {
637 cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: emit atomic lib call");
638 return RValue::get(nullptr);
639 }
640
641 bool isStore = e->getOp() == AtomicExpr::AO__c11_atomic_store ||
642 e->getOp() == AtomicExpr::AO__opencl_atomic_store ||
643 e->getOp() == AtomicExpr::AO__hip_atomic_store ||
644 e->getOp() == AtomicExpr::AO__atomic_store ||
645 e->getOp() == AtomicExpr::AO__atomic_store_n ||
646 e->getOp() == AtomicExpr::AO__scoped_atomic_store ||
647 e->getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
648 e->getOp() == AtomicExpr::AO__atomic_clear;
649 bool isLoad = e->getOp() == AtomicExpr::AO__c11_atomic_load ||
650 e->getOp() == AtomicExpr::AO__opencl_atomic_load ||
651 e->getOp() == AtomicExpr::AO__hip_atomic_load ||
652 e->getOp() == AtomicExpr::AO__atomic_load ||
653 e->getOp() == AtomicExpr::AO__atomic_load_n ||
654 e->getOp() == AtomicExpr::AO__scoped_atomic_load ||
655 e->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
656
657 if (!order) {
658 // We have evaluated the memory order as an integer constant in orderConst.
659 // We should not ever get to a case where the ordering isn't a valid CABI
660 // value, but it's hard to enforce that in general.
661 uint64_t ord = orderConst.Val.getInt().getZExtValue();
662 if (isMemOrderValid(ord, isStore, isLoad))
663 emitAtomicOp(*this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
664 size, static_cast<cir::MemOrder>(ord));
665 } else {
667 cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: dynamic memory order");
668 return RValue::get(nullptr);
669 }
670
671 if (resultTy->isVoidType())
672 return RValue::get(nullptr);
673
674 return convertTempToRValue(
675 dest.withElementType(builder, convertTypeForMem(resultTy)), resultTy,
676 e->getExprLoc());
677}
678
680 AtomicInfo atomics(*this, dest, getLoc(init->getSourceRange()));
681
682 switch (atomics.getEvaluationKind()) {
683 case cir::TEK_Scalar: {
684 mlir::Value value = emitScalarExpr(init);
685 atomics.emitCopyIntoMemory(RValue::get(value));
686 return;
687 }
688
689 case cir::TEK_Complex: {
690 mlir::Value value = emitComplexExpr(init);
691 atomics.emitCopyIntoMemory(RValue::get(value));
692 return;
693 }
694
696 cgm.errorNYI(init->getSourceRange(), "emitAtomicInit: aggregate type");
697 return;
698 }
699
700 llvm_unreachable("bad evaluation kind");
701}
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder)
static Address emitValToTemp(CIRGenFunction &cgf, Expr *e)
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order)
static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
mlir::Value createNot(mlir::Value value)
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::BoolType getBoolTy()
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
llvm::TypeSize getTypeStoreSize(mlir::Type ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
APSInt & getInt()
Definition APValue.h:489
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6816
Expr * getVal2() const
Definition Expr.h:6867
Expr * getOrder() const
Definition Expr.h:6850
bool isCmpXChg() const
Definition Expr.h:6900
AtomicOp getOp() const
Definition Expr.h:6879
Expr * getVal1() const
Definition Expr.h:6857
Expr * getPtr() const
Definition Expr.h:6847
Expr * getWeak() const
Definition Expr.h:6873
Expr * getOrderFail() const
Definition Expr.h:6863
bool isVolatile() const
Definition Expr.h:6896
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
Definition Address.h:73
mlir::Value getPointer() const
Definition Address.h:81
mlir::Type getElementType() const
Definition Address.h:101
static Address invalid()
Definition Address.h:66
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:109
bool isValid() const
Definition Address.h:67
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
cir::IntType getUIntNTy(int n)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::MemOrderAttr order={})
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, bool isInit=false, bool isNontemporal=false)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitAtomicExpr(AtomicExpr *e)
mlir::Type convertTypeForMem(QualType t)
mlir::Value emitScalarExpr(const clang::Expr *e)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
void emitAtomicInit(Expr *init, LValue dest)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
clang::ASTContext & getContext() const
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
bool isAggregate() const
Definition CIRGenValue.h:51
static RValue get(mlir::Value v)
Definition CIRGenValue.h:82
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:56
bool isScalar() const
Definition CIRGenValue.h:49
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsBooleanCondition - Return true if this is a constant which we can fold and convert to a boo...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
A (possibly-)qualified type.
Definition TypeBase.h:937
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8325
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition TypeBase.h:8379
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:334
bool isVoidType() const
Definition TypeBase.h:8878
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9101
bool isValidCIRAtomicOrderingCABI(Int value)
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
unsigned long uint64_t
static bool atomicInfoGetAtomicPointer()
static bool atomicScope()
static bool atomicUseLibCall()
static bool atomicSyncScopeID()
static bool atomicInfoGetAtomicAddress()
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647