LLVM 22.0.0git
AArch64AsmPrinter.cpp
Go to the documentation of this file.
1//===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a printer that converts from our internal representation
10// of machine-dependent LLVM code to the AArch64 assembly language.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64.h"
15#include "AArch64MCInstLower.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
27#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/ScopeExit.h"
31#include "llvm/ADT/StringRef.h"
32#include "llvm/ADT/Twine.h"
46#include "llvm/IR/DataLayout.h"
48#include "llvm/IR/Mangler.h"
49#include "llvm/IR/Module.h"
50#include "llvm/MC/MCAsmInfo.h"
51#include "llvm/MC/MCContext.h"
52#include "llvm/MC/MCInst.h"
56#include "llvm/MC/MCStreamer.h"
57#include "llvm/MC/MCSymbol.h"
67#include <cassert>
68#include <cstdint>
69#include <map>
70#include <memory>
71
72using namespace llvm;
73
76 "aarch64-ptrauth-auth-checks", cl::Hidden,
77 cl::values(clEnumValN(Unchecked, "none", "don't test for failure"),
78 clEnumValN(Poison, "poison", "poison on failure"),
79 clEnumValN(Trap, "trap", "trap on failure")),
80 cl::desc("Check pointer authentication auth/resign failures"),
82
83#define DEBUG_TYPE "asm-printer"
84
85namespace {
86
87class AArch64AsmPrinter : public AsmPrinter {
88 AArch64MCInstLower MCInstLowering;
89 FaultMaps FM;
90 const AArch64Subtarget *STI;
91 bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
92#ifndef NDEBUG
93 unsigned InstsEmitted;
94#endif
95 bool EnableImportCallOptimization = false;
97 SectionToImportedFunctionCalls;
98
99public:
100 static char ID;
101
102 AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
103 : AsmPrinter(TM, std::move(Streamer), ID),
104 MCInstLowering(OutContext, *this), FM(*this) {}
105
106 StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
107
108 /// Wrapper for MCInstLowering.lowerOperand() for the
109 /// tblgen'erated pseudo lowering.
110 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
111 return MCInstLowering.lowerOperand(MO, MCOp);
112 }
113
114 const MCExpr *lowerConstantPtrAuth(const ConstantPtrAuth &CPA) override;
115
116 const MCExpr *lowerBlockAddressConstant(const BlockAddress &BA) override;
117
118 void emitStartOfAsmFile(Module &M) override;
119 void emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
120 ArrayRef<unsigned> JumpTableIndices) override;
121 std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
123 getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr,
124 const MCSymbol *BranchLabel) const override;
125
126 void emitFunctionEntryLabel() override;
127
128 void emitXXStructor(const DataLayout &DL, const Constant *CV) override;
129
130 void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
131
132 void LowerHardenedBRJumpTable(const MachineInstr &MI);
133
134 void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
135
136 void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
137 const MachineInstr &MI);
138 void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
139 const MachineInstr &MI);
140 void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
141 const MachineInstr &MI);
142 void LowerFAULTING_OP(const MachineInstr &MI);
143
144 void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
145 void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
146 void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
147 void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, bool Typed);
148
149 typedef std::tuple<unsigned, bool, uint32_t, bool, uint64_t>
150 HwasanMemaccessTuple;
151 std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
152 void LowerKCFI_CHECK(const MachineInstr &MI);
153 void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
154 void emitHwasanMemaccessSymbols(Module &M);
155
156 void emitSled(const MachineInstr &MI, SledKind Kind);
157
158 // Emit the sequence for BRA/BLRA (authenticate + branch/call).
159 void emitPtrauthBranch(const MachineInstr *MI);
160
161 void emitPtrauthCheckAuthenticatedValue(Register TestedReg,
162 Register ScratchReg,
165 bool ShouldTrap,
166 const MCSymbol *OnFailure);
167
168 // Check authenticated LR before tail calling.
169 void emitPtrauthTailCallHardening(const MachineInstr *TC);
170
171 // Emit the sequence for AUT or AUTPAC.
172 void emitPtrauthAuthResign(Register AUTVal, AArch64PACKey::ID AUTKey,
173 uint64_t AUTDisc,
174 const MachineOperand *AUTAddrDisc,
175 Register Scratch,
176 std::optional<AArch64PACKey::ID> PACKey,
177 uint64_t PACDisc, Register PACAddrDisc);
178
179 // Emit the sequence for PAC.
180 void emitPtrauthSign(const MachineInstr *MI);
181
182 // Emit the sequence to compute the discriminator.
183 //
184 // The returned register is either unmodified AddrDisc or ScratchReg.
185 //
186 // If the expanded pseudo is allowed to clobber AddrDisc register, setting
187 // MayUseAddrAsScratch may save one MOV instruction, provided the address
188 // is already in x16/x17 (i.e. return x16/x17 which is the *modified* AddrDisc
189 // register at the same time) or the OS doesn't make it safer to use x16/x17
190 // (see AArch64Subtarget::isX16X17Safer()):
191 //
192 // mov x17, x16
193 // movk x17, #1234, lsl #48
194 // ; x16 is not used anymore
195 //
196 // can be replaced by
197 //
198 // movk x16, #1234, lsl #48
199 Register emitPtrauthDiscriminator(uint16_t Disc, Register AddrDisc,
200 Register ScratchReg,
201 bool MayUseAddrAsScratch = false);
202
203 // Emit the sequence for LOADauthptrstatic
204 void LowerLOADauthptrstatic(const MachineInstr &MI);
205
206 // Emit the sequence for LOADgotPAC/MOVaddrPAC (either GOT adrp-ldr or
207 // adrp-add followed by PAC sign)
208 void LowerMOVaddrPAC(const MachineInstr &MI);
209
210 // Emit the sequence for LOADgotAUTH (load signed pointer from signed ELF GOT
211 // and authenticate it with, if FPAC bit is not set, check+trap sequence after
212 // authenticating)
213 void LowerLOADgotAUTH(const MachineInstr &MI);
214
215 /// tblgen'erated driver function for lowering simple MI->MC
216 /// pseudo instructions.
217 bool lowerPseudoInstExpansion(const MachineInstr *MI, MCInst &Inst);
218
219 // Emit Build Attributes
220 void emitAttributes(unsigned Flags, uint64_t PAuthABIPlatform,
221 uint64_t PAuthABIVersion, AArch64TargetStreamer *TS);
222
223 // Emit expansion of Compare-and-branch pseudo instructions
224 void emitCBPseudoExpansion(const MachineInstr *MI);
225
226 void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
227 void EmitToStreamer(const MCInst &Inst) {
228 EmitToStreamer(*OutStreamer, Inst);
229 }
230
231 void emitInstruction(const MachineInstr *MI) override;
232
233 void emitFunctionHeaderComment() override;
234
235 void getAnalysisUsage(AnalysisUsage &AU) const override {
237 AU.setPreservesAll();
238 }
239
240 bool runOnMachineFunction(MachineFunction &MF) override {
241 if (auto *PSIW = getAnalysisIfAvailable<ProfileSummaryInfoWrapperPass>())
242 PSI = &PSIW->getPSI();
243 if (auto *SDPIW =
244 getAnalysisIfAvailable<StaticDataProfileInfoWrapperPass>())
245 SDPI = &SDPIW->getStaticDataProfileInfo();
246
247 AArch64FI = MF.getInfo<AArch64FunctionInfo>();
248 STI = &MF.getSubtarget<AArch64Subtarget>();
249
250 SetupMachineFunction(MF);
251
252 if (STI->isTargetCOFF()) {
253 bool Local = MF.getFunction().hasLocalLinkage();
256 int Type =
258
259 OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
260 OutStreamer->emitCOFFSymbolStorageClass(Scl);
261 OutStreamer->emitCOFFSymbolType(Type);
262 OutStreamer->endCOFFSymbolDef();
263 }
264
265 // Emit the rest of the function body.
266 emitFunctionBody();
267
268 // Emit the XRay table for this function.
269 emitXRayTable();
270
271 // We didn't modify anything.
272 return false;
273 }
274
275 const MCExpr *lowerConstant(const Constant *CV,
276 const Constant *BaseCV = nullptr,
277 uint64_t Offset = 0) override;
278
279private:
280 void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
281 bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
282 bool printAsmRegInClass(const MachineOperand &MO,
283 const TargetRegisterClass *RC, unsigned AltName,
284 raw_ostream &O);
285
286 bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
287 const char *ExtraCode, raw_ostream &O) override;
288 bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
289 const char *ExtraCode, raw_ostream &O) override;
290
291 void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
292
293 void emitFunctionBodyEnd() override;
294 void emitGlobalAlias(const Module &M, const GlobalAlias &GA) override;
295
296 MCSymbol *GetCPISymbol(unsigned CPID) const override;
297 void emitEndOfAsmFile(Module &M) override;
298
299 AArch64FunctionInfo *AArch64FI = nullptr;
300
301 /// Emit the LOHs contained in AArch64FI.
302 void emitLOHs();
303
304 void emitMovXReg(Register Dest, Register Src);
305 void emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift);
306 void emitMOVK(Register Dest, uint64_t Imm, unsigned Shift);
307
308 /// Emit instruction to set float register to zero.
309 void emitFMov0(const MachineInstr &MI);
310 void emitFMov0AsFMov(const MachineInstr &MI, Register DestReg);
311
312 using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
313
314 MInstToMCSymbol LOHInstToLabel;
315
316 bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const override {
317 return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
318 }
319
320 const MCSubtargetInfo *getIFuncMCSubtargetInfo() const override {
321 assert(STI);
322 return STI;
323 }
324 void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
325 MCSymbol *LazyPointer) override;
326 void emitMachOIFuncStubHelperBody(Module &M, const GlobalIFunc &GI,
327 MCSymbol *LazyPointer) override;
328
329 /// Checks if this instruction is part of a sequence that is eligle for import
330 /// call optimization and, if so, records it to be emitted in the import call
331 /// section.
332 void recordIfImportCall(const MachineInstr *BranchInst);
333};
334
335} // end anonymous namespace
336
337void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
338 const Triple &TT = TM.getTargetTriple();
339
340 if (TT.isOSBinFormatCOFF()) {
341 emitCOFFFeatureSymbol(M);
342 emitCOFFReplaceableFunctionData(M);
343
344 if (M.getModuleFlag("import-call-optimization"))
345 EnableImportCallOptimization = true;
346 }
347
348 if (!TT.isOSBinFormatELF())
349 return;
350
351 // For emitting build attributes and .note.gnu.property section
352 auto *TS =
353 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
354 // Assemble feature flags that may require creation of build attributes and a
355 // note section.
356 unsigned BAFlags = 0;
357 unsigned GNUFlags = 0;
358 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
359 M.getModuleFlag("branch-target-enforcement"))) {
360 if (!BTE->isZero()) {
361 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_BTI_Flag;
363 }
364 }
365
366 if (const auto *GCS = mdconst::extract_or_null<ConstantInt>(
367 M.getModuleFlag("guarded-control-stack"))) {
368 if (!GCS->isZero()) {
369 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_GCS_Flag;
371 }
372 }
373
374 if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
375 M.getModuleFlag("sign-return-address"))) {
376 if (!Sign->isZero()) {
377 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_PAC_Flag;
379 }
380 }
381
382 uint64_t PAuthABIPlatform = -1;
383 if (const auto *PAP = mdconst::extract_or_null<ConstantInt>(
384 M.getModuleFlag("aarch64-elf-pauthabi-platform"))) {
385 PAuthABIPlatform = PAP->getZExtValue();
386 }
387
388 uint64_t PAuthABIVersion = -1;
389 if (const auto *PAV = mdconst::extract_or_null<ConstantInt>(
390 M.getModuleFlag("aarch64-elf-pauthabi-version"))) {
391 PAuthABIVersion = PAV->getZExtValue();
392 }
393
394 // Emit AArch64 Build Attributes
395 emitAttributes(BAFlags, PAuthABIPlatform, PAuthABIVersion, TS);
396 // Emit a .note.gnu.property section with the flags.
397 TS->emitNoteSection(GNUFlags, PAuthABIPlatform, PAuthABIVersion);
398}
399
400void AArch64AsmPrinter::emitFunctionHeaderComment() {
401 const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
402 std::optional<std::string> OutlinerString = FI->getOutliningStyle();
403 if (OutlinerString != std::nullopt)
404 OutStreamer->getCommentOS() << ' ' << OutlinerString;
405}
406
407void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
408{
409 const Function &F = MF->getFunction();
410 if (F.hasFnAttribute("patchable-function-entry")) {
411 unsigned Num;
412 if (F.getFnAttribute("patchable-function-entry")
413 .getValueAsString()
414 .getAsInteger(10, Num))
415 return;
416 emitNops(Num);
417 return;
418 }
419
420 emitSled(MI, SledKind::FUNCTION_ENTER);
421}
422
423void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
424 emitSled(MI, SledKind::FUNCTION_EXIT);
425}
426
427void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
428 emitSled(MI, SledKind::TAIL_CALL);
429}
430
431void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
432 static const int8_t NoopsInSledCount = 7;
433 // We want to emit the following pattern:
434 //
435 // .Lxray_sled_N:
436 // ALIGN
437 // B #32
438 // ; 7 NOP instructions (28 bytes)
439 // .tmpN
440 //
441 // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
442 // over the full 32 bytes (8 instructions) with the following pattern:
443 //
444 // STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
445 // LDR W17, #12 ; W17 := function ID
446 // LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
447 // BLR X16 ; call the tracing trampoline
448 // ;DATA: 32 bits of function ID
449 // ;DATA: lower 32 bits of the address of the trampoline
450 // ;DATA: higher 32 bits of the address of the trampoline
451 // LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
452 //
453 OutStreamer->emitCodeAlignment(Align(4), &getSubtargetInfo());
454 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
455 OutStreamer->emitLabel(CurSled);
456 auto Target = OutContext.createTempSymbol();
457
458 // Emit "B #32" instruction, which jumps over the next 28 bytes.
459 // The operand has to be the number of 4-byte instructions to jump over,
460 // including the current instruction.
461 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
462
463 for (int8_t I = 0; I < NoopsInSledCount; I++)
464 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
465
466 OutStreamer->emitLabel(Target);
467 recordSled(CurSled, MI, Kind, 2);
468}
469
470void AArch64AsmPrinter::emitAttributes(unsigned Flags,
471 uint64_t PAuthABIPlatform,
472 uint64_t PAuthABIVersion,
473 AArch64TargetStreamer *TS) {
474
475 PAuthABIPlatform = (uint64_t(-1) == PAuthABIPlatform) ? 0 : PAuthABIPlatform;
476 PAuthABIVersion = (uint64_t(-1) == PAuthABIVersion) ? 0 : PAuthABIVersion;
477
478 if (PAuthABIPlatform || PAuthABIVersion) {
482 AArch64BuildAttributes::SubsectionOptional::REQUIRED,
483 AArch64BuildAttributes::SubsectionType::ULEB128);
487 PAuthABIPlatform, "");
491 "");
492 }
493
494 unsigned BTIValue =
496 unsigned PACValue =
498 unsigned GCSValue =
500
501 if (BTIValue || PACValue || GCSValue) {
505 AArch64BuildAttributes::SubsectionOptional::OPTIONAL,
506 AArch64BuildAttributes::SubsectionType::ULEB128);
516 }
517}
518
519// Emit the following code for Intrinsic::{xray_customevent,xray_typedevent}
520// (built-in functions __xray_customevent/__xray_typedevent).
521//
522// .Lxray_event_sled_N:
523// b 1f
524// save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
525// set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
526// bl __xray_CustomEvent or __xray_TypedEvent
527// restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
528// 1:
529//
530// There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
531//
532// Then record a sled of kind CUSTOM_EVENT or TYPED_EVENT.
533// After patching, b .+N will become a nop.
534void AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
535 bool Typed) {
536 auto &O = *OutStreamer;
537 MCSymbol *CurSled = OutContext.createTempSymbol("xray_sled_", true);
538 O.emitLabel(CurSled);
539 bool MachO = TM.getTargetTriple().isOSBinFormatMachO();
540 auto *Sym = MCSymbolRefExpr::create(
541 OutContext.getOrCreateSymbol(
542 Twine(MachO ? "_" : "") +
543 (Typed ? "__xray_TypedEvent" : "__xray_CustomEvent")),
544 OutContext);
545 if (Typed) {
546 O.AddComment("Begin XRay typed event");
547 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(9));
548 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
549 .addReg(AArch64::SP)
550 .addReg(AArch64::X0)
551 .addReg(AArch64::X1)
552 .addReg(AArch64::SP)
553 .addImm(-4));
554 EmitToStreamer(O, MCInstBuilder(AArch64::STRXui)
555 .addReg(AArch64::X2)
556 .addReg(AArch64::SP)
557 .addImm(2));
558 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
559 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
560 emitMovXReg(AArch64::X2, MI.getOperand(2).getReg());
561 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
562 EmitToStreamer(O, MCInstBuilder(AArch64::LDRXui)
563 .addReg(AArch64::X2)
564 .addReg(AArch64::SP)
565 .addImm(2));
566 O.AddComment("End XRay typed event");
567 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
568 .addReg(AArch64::SP)
569 .addReg(AArch64::X0)
570 .addReg(AArch64::X1)
571 .addReg(AArch64::SP)
572 .addImm(4));
573
574 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
575 } else {
576 O.AddComment("Begin XRay custom event");
577 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(6));
578 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
579 .addReg(AArch64::SP)
580 .addReg(AArch64::X0)
581 .addReg(AArch64::X1)
582 .addReg(AArch64::SP)
583 .addImm(-2));
584 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
585 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
586 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
587 O.AddComment("End XRay custom event");
588 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
589 .addReg(AArch64::SP)
590 .addReg(AArch64::X0)
591 .addReg(AArch64::X1)
592 .addReg(AArch64::SP)
593 .addImm(2));
594
595 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
596 }
597}
598
599void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
600 Register AddrReg = MI.getOperand(0).getReg();
601 assert(std::next(MI.getIterator())->isCall() &&
602 "KCFI_CHECK not followed by a call instruction");
603 assert(std::next(MI.getIterator())->getOperand(0).getReg() == AddrReg &&
604 "KCFI_CHECK call target doesn't match call operand");
605
606 // Default to using the intra-procedure-call temporary registers for
607 // comparing the hashes.
608 unsigned ScratchRegs[] = {AArch64::W16, AArch64::W17};
609 if (AddrReg == AArch64::XZR) {
610 // Checking XZR makes no sense. Instead of emitting a load, zero
611 // ScratchRegs[0] and use it for the ESR AddrIndex below.
612 AddrReg = getXRegFromWReg(ScratchRegs[0]);
613 emitMovXReg(AddrReg, AArch64::XZR);
614 } else {
615 // If one of the scratch registers is used for the call target (e.g.
616 // with AArch64::TCRETURNriBTI), we can clobber another caller-saved
617 // temporary register instead (in this case, AArch64::W9) as the check
618 // is immediately followed by the call instruction.
619 for (auto &Reg : ScratchRegs) {
620 if (Reg == getWRegFromXReg(AddrReg)) {
621 Reg = AArch64::W9;
622 break;
623 }
624 }
625 assert(ScratchRegs[0] != AddrReg && ScratchRegs[1] != AddrReg &&
626 "Invalid scratch registers for KCFI_CHECK");
627
628 // Adjust the offset for patchable-function-prefix. This assumes that
629 // patchable-function-prefix is the same for all functions.
630 int64_t PrefixNops = 0;
631 (void)MI.getMF()
632 ->getFunction()
633 .getFnAttribute("patchable-function-prefix")
634 .getValueAsString()
635 .getAsInteger(10, PrefixNops);
636
637 // Load the target function type hash.
638 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDURWi)
639 .addReg(ScratchRegs[0])
640 .addReg(AddrReg)
641 .addImm(-(PrefixNops * 4 + 4)));
642 }
643
644 // Load the expected type hash.
645 const int64_t Type = MI.getOperand(1).getImm();
646 emitMOVK(ScratchRegs[1], Type & 0xFFFF, 0);
647 emitMOVK(ScratchRegs[1], (Type >> 16) & 0xFFFF, 16);
648
649 // Compare the hashes and trap if there's a mismatch.
650 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSWrs)
651 .addReg(AArch64::WZR)
652 .addReg(ScratchRegs[0])
653 .addReg(ScratchRegs[1])
654 .addImm(0));
655
656 MCSymbol *Pass = OutContext.createTempSymbol();
657 EmitToStreamer(*OutStreamer,
658 MCInstBuilder(AArch64::Bcc)
659 .addImm(AArch64CC::EQ)
660 .addExpr(MCSymbolRefExpr::create(Pass, OutContext)));
661
662 // The base ESR is 0x8000 and the register information is encoded in bits
663 // 0-9 as follows:
664 // - 0-4: n, where the register Xn contains the target address
665 // - 5-9: m, where the register Wm contains the expected type hash
666 // Where n, m are in [0, 30].
667 unsigned TypeIndex = ScratchRegs[1] - AArch64::W0;
668 unsigned AddrIndex;
669 switch (AddrReg) {
670 default:
671 AddrIndex = AddrReg - AArch64::X0;
672 break;
673 case AArch64::FP:
674 AddrIndex = 29;
675 break;
676 case AArch64::LR:
677 AddrIndex = 30;
678 break;
679 }
680
681 assert(AddrIndex < 31 && TypeIndex < 31);
682
683 unsigned ESR = 0x8000 | ((TypeIndex & 31) << 5) | (AddrIndex & 31);
684 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(ESR));
685 OutStreamer->emitLabel(Pass);
686}
687
688void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
689 Register Reg = MI.getOperand(0).getReg();
690
691 // The HWASan pass won't emit a CHECK_MEMACCESS intrinsic with a pointer
692 // statically known to be zero. However, conceivably, the HWASan pass may
693 // encounter a "cannot currently statically prove to be null" pointer (and is
694 // therefore unable to omit the intrinsic) that later optimization passes
695 // convert into a statically known-null pointer.
696 if (Reg == AArch64::XZR)
697 return;
698
699 bool IsShort =
700 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES) ||
701 (MI.getOpcode() ==
702 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
703 uint32_t AccessInfo = MI.getOperand(1).getImm();
704 bool IsFixedShadow =
705 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW) ||
706 (MI.getOpcode() ==
707 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
708 uint64_t FixedShadowOffset = IsFixedShadow ? MI.getOperand(2).getImm() : 0;
709
710 MCSymbol *&Sym = HwasanMemaccessSymbols[HwasanMemaccessTuple(
711 Reg, IsShort, AccessInfo, IsFixedShadow, FixedShadowOffset)];
712 if (!Sym) {
713 // FIXME: Make this work on non-ELF.
714 if (!TM.getTargetTriple().isOSBinFormatELF())
715 report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
716
717 std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
718 utostr(AccessInfo);
719 if (IsFixedShadow)
720 SymName += "_fixed_" + utostr(FixedShadowOffset);
721 if (IsShort)
722 SymName += "_short_v2";
723 Sym = OutContext.getOrCreateSymbol(SymName);
724 }
725
726 EmitToStreamer(*OutStreamer,
727 MCInstBuilder(AArch64::BL)
728 .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
729}
730
731void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
732 if (HwasanMemaccessSymbols.empty())
733 return;
734
735 const Triple &TT = TM.getTargetTriple();
736 assert(TT.isOSBinFormatELF());
737 std::unique_ptr<MCSubtargetInfo> STI(
738 TM.getTarget().createMCSubtargetInfo(TT, "", ""));
739 assert(STI && "Unable to create subtarget info");
740 this->STI = static_cast<const AArch64Subtarget *>(&*STI);
741
742 MCSymbol *HwasanTagMismatchV1Sym =
743 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
744 MCSymbol *HwasanTagMismatchV2Sym =
745 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
746
747 const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
748 MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
749 const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
750 MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
751
752 for (auto &P : HwasanMemaccessSymbols) {
753 unsigned Reg = std::get<0>(P.first);
754 bool IsShort = std::get<1>(P.first);
755 uint32_t AccessInfo = std::get<2>(P.first);
756 bool IsFixedShadow = std::get<3>(P.first);
757 uint64_t FixedShadowOffset = std::get<4>(P.first);
758 const MCSymbolRefExpr *HwasanTagMismatchRef =
759 IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
760 MCSymbol *Sym = P.second;
761
762 bool HasMatchAllTag =
763 (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
764 uint8_t MatchAllTag =
765 (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
766 unsigned Size =
767 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
768 bool CompileKernel =
769 (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
770
771 OutStreamer->switchSection(OutContext.getELFSection(
772 ".text.hot", ELF::SHT_PROGBITS,
774 /*IsComdat=*/true));
775
776 OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
777 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
778 OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
779 OutStreamer->emitLabel(Sym);
780
781 EmitToStreamer(MCInstBuilder(AArch64::SBFMXri)
782 .addReg(AArch64::X16)
783 .addReg(Reg)
784 .addImm(4)
785 .addImm(55));
786
787 if (IsFixedShadow) {
788 // Aarch64 makes it difficult to embed large constants in the code.
789 // Fortuitously, kShadowBaseAlignment == 32, so we use the 32-bit
790 // left-shift option in the MOV instruction. Combined with the 16-bit
791 // immediate, this is enough to represent any offset up to 2**48.
792 emitMOVZ(AArch64::X17, FixedShadowOffset >> 32, 32);
793 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
794 .addReg(AArch64::W16)
795 .addReg(AArch64::X17)
796 .addReg(AArch64::X16)
797 .addImm(0)
798 .addImm(0));
799 } else {
800 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
801 .addReg(AArch64::W16)
802 .addReg(IsShort ? AArch64::X20 : AArch64::X9)
803 .addReg(AArch64::X16)
804 .addImm(0)
805 .addImm(0));
806 }
807
808 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
809 .addReg(AArch64::XZR)
810 .addReg(AArch64::X16)
811 .addReg(Reg)
813 MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
814 EmitToStreamer(MCInstBuilder(AArch64::Bcc)
815 .addImm(AArch64CC::NE)
817 HandleMismatchOrPartialSym, OutContext)));
818 MCSymbol *ReturnSym = OutContext.createTempSymbol();
819 OutStreamer->emitLabel(ReturnSym);
820 EmitToStreamer(MCInstBuilder(AArch64::RET).addReg(AArch64::LR));
821 OutStreamer->emitLabel(HandleMismatchOrPartialSym);
822
823 if (HasMatchAllTag) {
824 EmitToStreamer(MCInstBuilder(AArch64::UBFMXri)
825 .addReg(AArch64::X17)
826 .addReg(Reg)
827 .addImm(56)
828 .addImm(63));
829 EmitToStreamer(MCInstBuilder(AArch64::SUBSXri)
830 .addReg(AArch64::XZR)
831 .addReg(AArch64::X17)
832 .addImm(MatchAllTag)
833 .addImm(0));
834 EmitToStreamer(
835 MCInstBuilder(AArch64::Bcc)
836 .addImm(AArch64CC::EQ)
837 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
838 }
839
840 if (IsShort) {
841 EmitToStreamer(MCInstBuilder(AArch64::SUBSWri)
842 .addReg(AArch64::WZR)
843 .addReg(AArch64::W16)
844 .addImm(15)
845 .addImm(0));
846 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
847 EmitToStreamer(
848 MCInstBuilder(AArch64::Bcc)
849 .addImm(AArch64CC::HI)
850 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
851
852 EmitToStreamer(MCInstBuilder(AArch64::ANDXri)
853 .addReg(AArch64::X17)
854 .addReg(Reg)
855 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
856 if (Size != 1)
857 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
858 .addReg(AArch64::X17)
859 .addReg(AArch64::X17)
860 .addImm(Size - 1)
861 .addImm(0));
862 EmitToStreamer(MCInstBuilder(AArch64::SUBSWrs)
863 .addReg(AArch64::WZR)
864 .addReg(AArch64::W16)
865 .addReg(AArch64::W17)
866 .addImm(0));
867 EmitToStreamer(
868 MCInstBuilder(AArch64::Bcc)
869 .addImm(AArch64CC::LS)
870 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
871
872 EmitToStreamer(MCInstBuilder(AArch64::ORRXri)
873 .addReg(AArch64::X16)
874 .addReg(Reg)
875 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
876 EmitToStreamer(MCInstBuilder(AArch64::LDRBBui)
877 .addReg(AArch64::W16)
878 .addReg(AArch64::X16)
879 .addImm(0));
880 EmitToStreamer(
881 MCInstBuilder(AArch64::SUBSXrs)
882 .addReg(AArch64::XZR)
883 .addReg(AArch64::X16)
884 .addReg(Reg)
886 EmitToStreamer(
887 MCInstBuilder(AArch64::Bcc)
888 .addImm(AArch64CC::EQ)
889 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
890
891 OutStreamer->emitLabel(HandleMismatchSym);
892 }
893
894 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
895 .addReg(AArch64::SP)
896 .addReg(AArch64::X0)
897 .addReg(AArch64::X1)
898 .addReg(AArch64::SP)
899 .addImm(-32));
900 EmitToStreamer(MCInstBuilder(AArch64::STPXi)
901 .addReg(AArch64::FP)
902 .addReg(AArch64::LR)
903 .addReg(AArch64::SP)
904 .addImm(29));
905
906 if (Reg != AArch64::X0)
907 emitMovXReg(AArch64::X0, Reg);
908 emitMOVZ(AArch64::X1, AccessInfo & HWASanAccessInfo::RuntimeMask, 0);
909
910 if (CompileKernel) {
911 // The Linux kernel's dynamic loader doesn't support GOT relative
912 // relocations, but it doesn't support late binding either, so just call
913 // the function directly.
914 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef));
915 } else {
916 // Intentionally load the GOT entry and branch to it, rather than possibly
917 // late binding the function, which may clobber the registers before we
918 // have a chance to save them.
919 EmitToStreamer(MCInstBuilder(AArch64::ADRP)
920 .addReg(AArch64::X16)
921 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
923 OutContext)));
924 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
925 .addReg(AArch64::X16)
926 .addReg(AArch64::X16)
927 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
929 OutContext)));
930 EmitToStreamer(MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
931 }
932 }
933 this->STI = nullptr;
934}
935
936static void emitAuthenticatedPointer(MCStreamer &OutStreamer,
937 MCSymbol *StubLabel,
938 const MCExpr *StubAuthPtrRef) {
939 // sym$auth_ptr$key$disc:
940 OutStreamer.emitLabel(StubLabel);
941 OutStreamer.emitValue(StubAuthPtrRef, /*size=*/8);
942}
943
944void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
945 emitHwasanMemaccessSymbols(M);
946
947 const Triple &TT = TM.getTargetTriple();
948 if (TT.isOSBinFormatMachO()) {
949 // Output authenticated pointers as indirect symbols, if we have any.
950 MachineModuleInfoMachO &MMIMacho =
951 MMI->getObjFileInfo<MachineModuleInfoMachO>();
952
953 auto Stubs = MMIMacho.getAuthGVStubList();
954
955 if (!Stubs.empty()) {
956 // Switch to the "__auth_ptr" section.
957 OutStreamer->switchSection(
958 OutContext.getMachOSection("__DATA", "__auth_ptr", MachO::S_REGULAR,
960 emitAlignment(Align(8));
961
962 for (const auto &Stub : Stubs)
963 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
964
965 OutStreamer->addBlankLine();
966 }
967
968 // Funny Darwin hack: This flag tells the linker that no global symbols
969 // contain code that falls through to other global symbols (e.g. the obvious
970 // implementation of multiple entry points). If this doesn't occur, the
971 // linker can safely perform dead code stripping. Since LLVM never
972 // generates code that does this, it is always safe to set.
973 OutStreamer->emitSubsectionsViaSymbols();
974 }
975
976 if (TT.isOSBinFormatELF()) {
977 // Output authenticated pointers as indirect symbols, if we have any.
978 MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
979
980 auto Stubs = MMIELF.getAuthGVStubList();
981
982 if (!Stubs.empty()) {
983 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
984 OutStreamer->switchSection(TLOF.getDataSection());
985 emitAlignment(Align(8));
986
987 for (const auto &Stub : Stubs)
988 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
989
990 OutStreamer->addBlankLine();
991 }
992
993 // With signed ELF GOT enabled, the linker looks at the symbol type to
994 // choose between keys IA (for STT_FUNC) and DA (for other types). Symbols
995 // for functions not defined in the module have STT_NOTYPE type by default.
996 // This makes linker to emit signing schema with DA key (instead of IA) for
997 // corresponding R_AARCH64_AUTH_GLOB_DAT dynamic reloc. To avoid that, force
998 // all function symbols used in the module to have STT_FUNC type. See
999 // https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#default-signing-schema
1000 const auto *PtrAuthELFGOTFlag = mdconst::extract_or_null<ConstantInt>(
1001 M.getModuleFlag("ptrauth-elf-got"));
1002 if (PtrAuthELFGOTFlag && PtrAuthELFGOTFlag->getZExtValue() == 1)
1003 for (const GlobalValue &GV : M.global_values())
1004 if (!GV.use_empty() && isa<Function>(GV) &&
1005 !GV.getName().starts_with("llvm."))
1006 OutStreamer->emitSymbolAttribute(getSymbol(&GV),
1008 }
1009
1010 // Emit stack and fault map information.
1012
1013 // If import call optimization is enabled, emit the appropriate section.
1014 // We do this whether or not we recorded any import calls.
1015 if (EnableImportCallOptimization && TT.isOSBinFormatCOFF()) {
1016 OutStreamer->switchSection(getObjFileLowering().getImportCallSection());
1017
1018 // Section always starts with some magic.
1019 constexpr char ImpCallMagic[12] = "Imp_Call_V1";
1020 OutStreamer->emitBytes(StringRef{ImpCallMagic, sizeof(ImpCallMagic)});
1021
1022 // Layout of this section is:
1023 // Per section that contains calls to imported functions:
1024 // uint32_t SectionSize: Size in bytes for information in this section.
1025 // uint32_t Section Number
1026 // Per call to imported function in section:
1027 // uint32_t Kind: the kind of imported function.
1028 // uint32_t BranchOffset: the offset of the branch instruction in its
1029 // parent section.
1030 // uint32_t TargetSymbolId: the symbol id of the called function.
1031 for (auto &[Section, CallsToImportedFuncs] :
1032 SectionToImportedFunctionCalls) {
1033 unsigned SectionSize =
1034 sizeof(uint32_t) * (2 + 3 * CallsToImportedFuncs.size());
1035 OutStreamer->emitInt32(SectionSize);
1036 OutStreamer->emitCOFFSecNumber(Section->getBeginSymbol());
1037 for (auto &[CallsiteSymbol, CalledSymbol] : CallsToImportedFuncs) {
1038 // Kind is always IMAGE_REL_ARM64_DYNAMIC_IMPORT_CALL (0x13).
1039 OutStreamer->emitInt32(0x13);
1040 OutStreamer->emitCOFFSecOffset(CallsiteSymbol);
1041 OutStreamer->emitCOFFSymbolIndex(CalledSymbol);
1042 }
1043 }
1044 }
1045}
1046
1047void AArch64AsmPrinter::emitLOHs() {
1049
1050 for (const auto &D : AArch64FI->getLOHContainer()) {
1051 for (const MachineInstr *MI : D.getArgs()) {
1052 MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
1053 assert(LabelIt != LOHInstToLabel.end() &&
1054 "Label hasn't been inserted for LOH related instruction");
1055 MCArgs.push_back(LabelIt->second);
1056 }
1057 OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
1058 MCArgs.clear();
1059 }
1060}
1061
1062void AArch64AsmPrinter::emitFunctionBodyEnd() {
1063 if (!AArch64FI->getLOHRelated().empty())
1064 emitLOHs();
1065}
1066
1067/// GetCPISymbol - Return the symbol for the specified constant pool entry.
1068MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
1069 // Darwin uses a linker-private symbol name for constant-pools (to
1070 // avoid addends on the relocation?), ELF has no such concept and
1071 // uses a normal private symbol.
1072 if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
1073 return OutContext.getOrCreateSymbol(
1074 Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
1075 Twine(getFunctionNumber()) + "_" + Twine(CPID));
1076
1077 return AsmPrinter::GetCPISymbol(CPID);
1078}
1079
1080void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
1081 raw_ostream &O) {
1082 const MachineOperand &MO = MI->getOperand(OpNum);
1083 switch (MO.getType()) {
1084 default:
1085 llvm_unreachable("<unknown operand type>");
1087 Register Reg = MO.getReg();
1089 assert(!MO.getSubReg() && "Subregs should be eliminated!");
1091 break;
1092 }
1094 O << MO.getImm();
1095 break;
1096 }
1098 PrintSymbolOperand(MO, O);
1099 break;
1100 }
1102 MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
1103 Sym->print(O, MAI);
1104 break;
1105 }
1106 }
1107}
1108
1109bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
1110 raw_ostream &O) {
1111 Register Reg = MO.getReg();
1112 switch (Mode) {
1113 default:
1114 return true; // Unknown mode.
1115 case 'w':
1117 break;
1118 case 'x':
1120 break;
1121 case 't':
1123 break;
1124 }
1125
1127 return false;
1128}
1129
1130// Prints the register in MO using class RC using the offset in the
1131// new register class. This should not be used for cross class
1132// printing.
1133bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
1134 const TargetRegisterClass *RC,
1135 unsigned AltName, raw_ostream &O) {
1136 assert(MO.isReg() && "Should only get here with a register!");
1137 const TargetRegisterInfo *RI = STI->getRegisterInfo();
1138 Register Reg = MO.getReg();
1139 MCRegister RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
1140 if (!RI->regsOverlap(RegToPrint, Reg))
1141 return true;
1142 O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
1143 return false;
1144}
1145
1146bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
1147 const char *ExtraCode, raw_ostream &O) {
1148 const MachineOperand &MO = MI->getOperand(OpNum);
1149
1150 // First try the generic code, which knows about modifiers like 'c' and 'n'.
1151 if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
1152 return false;
1153
1154 // Does this asm operand have a single letter operand modifier?
1155 if (ExtraCode && ExtraCode[0]) {
1156 if (ExtraCode[1] != 0)
1157 return true; // Unknown modifier.
1158
1159 switch (ExtraCode[0]) {
1160 default:
1161 return true; // Unknown modifier.
1162 case 'w': // Print W register
1163 case 'x': // Print X register
1164 if (MO.isReg())
1165 return printAsmMRegister(MO, ExtraCode[0], O);
1166 if (MO.isImm() && MO.getImm() == 0) {
1167 unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
1169 return false;
1170 }
1171 printOperand(MI, OpNum, O);
1172 return false;
1173 case 'b': // Print B register.
1174 case 'h': // Print H register.
1175 case 's': // Print S register.
1176 case 'd': // Print D register.
1177 case 'q': // Print Q register.
1178 case 'z': // Print Z register.
1179 if (MO.isReg()) {
1180 const TargetRegisterClass *RC;
1181 switch (ExtraCode[0]) {
1182 case 'b':
1183 RC = &AArch64::FPR8RegClass;
1184 break;
1185 case 'h':
1186 RC = &AArch64::FPR16RegClass;
1187 break;
1188 case 's':
1189 RC = &AArch64::FPR32RegClass;
1190 break;
1191 case 'd':
1192 RC = &AArch64::FPR64RegClass;
1193 break;
1194 case 'q':
1195 RC = &AArch64::FPR128RegClass;
1196 break;
1197 case 'z':
1198 RC = &AArch64::ZPRRegClass;
1199 break;
1200 default:
1201 return true;
1202 }
1203 return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
1204 }
1205 printOperand(MI, OpNum, O);
1206 return false;
1207 }
1208 }
1209
1210 // According to ARM, we should emit x and v registers unless we have a
1211 // modifier.
1212 if (MO.isReg()) {
1213 Register Reg = MO.getReg();
1214
1215 // If this is a w or x register, print an x register.
1216 if (AArch64::GPR32allRegClass.contains(Reg) ||
1217 AArch64::GPR64allRegClass.contains(Reg))
1218 return printAsmMRegister(MO, 'x', O);
1219
1220 // If this is an x register tuple, print an x register.
1221 if (AArch64::GPR64x8ClassRegClass.contains(Reg))
1222 return printAsmMRegister(MO, 't', O);
1223
1224 unsigned AltName = AArch64::NoRegAltName;
1225 const TargetRegisterClass *RegClass;
1226 if (AArch64::ZPRRegClass.contains(Reg)) {
1227 RegClass = &AArch64::ZPRRegClass;
1228 } else if (AArch64::PPRRegClass.contains(Reg)) {
1229 RegClass = &AArch64::PPRRegClass;
1230 } else if (AArch64::PNRRegClass.contains(Reg)) {
1231 RegClass = &AArch64::PNRRegClass;
1232 } else {
1233 RegClass = &AArch64::FPR128RegClass;
1234 AltName = AArch64::vreg;
1235 }
1236
1237 // If this is a b, h, s, d, or q register, print it as a v register.
1238 return printAsmRegInClass(MO, RegClass, AltName, O);
1239 }
1240
1241 printOperand(MI, OpNum, O);
1242 return false;
1243}
1244
1245bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
1246 unsigned OpNum,
1247 const char *ExtraCode,
1248 raw_ostream &O) {
1249 if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
1250 return true; // Unknown modifier.
1251
1252 const MachineOperand &MO = MI->getOperand(OpNum);
1253 assert(MO.isReg() && "unexpected inline asm memory operand");
1254 O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
1255 return false;
1256}
1257
1258void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
1259 raw_ostream &OS) {
1260 unsigned NOps = MI->getNumOperands();
1261 assert(NOps == 4);
1262 OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
1263 // cast away const; DIetc do not take const operands for some reason.
1264 OS << MI->getDebugVariable()->getName();
1265 OS << " <- ";
1266 // Frame address. Currently handles register +- offset only.
1267 assert(MI->isIndirectDebugValue());
1268 OS << '[';
1269 for (unsigned I = 0, E = std::distance(MI->debug_operands().begin(),
1270 MI->debug_operands().end());
1271 I < E; ++I) {
1272 if (I != 0)
1273 OS << ", ";
1274 printOperand(MI, I, OS);
1275 }
1276 OS << ']';
1277 OS << "+";
1278 printOperand(MI, NOps - 2, OS);
1279}
1280
1281void AArch64AsmPrinter::emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
1282 ArrayRef<unsigned> JumpTableIndices) {
1283 // Fast return if there is nothing to emit to avoid creating empty sections.
1284 if (JumpTableIndices.empty())
1285 return;
1286 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1287 const auto &F = MF->getFunction();
1289
1290 MCSection *ReadOnlySec = nullptr;
1291 if (TM.Options.EnableStaticDataPartitioning) {
1292 ReadOnlySec =
1293 TLOF.getSectionForJumpTable(F, TM, &JT[JumpTableIndices.front()]);
1294 } else {
1295 ReadOnlySec = TLOF.getSectionForJumpTable(F, TM);
1296 }
1297 OutStreamer->switchSection(ReadOnlySec);
1298
1299 auto AFI = MF->getInfo<AArch64FunctionInfo>();
1300 for (unsigned JTI : JumpTableIndices) {
1301 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1302
1303 // If this jump table was deleted, ignore it.
1304 if (JTBBs.empty()) continue;
1305
1306 unsigned Size = AFI->getJumpTableEntrySize(JTI);
1307 emitAlignment(Align(Size));
1308 OutStreamer->emitLabel(GetJTISymbol(JTI));
1309
1310 const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1311 const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
1312
1313 for (auto *JTBB : JTBBs) {
1314 const MCExpr *Value =
1315 MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
1316
1317 // Each entry is:
1318 // .byte/.hword (LBB - Lbase)>>2
1319 // or plain:
1320 // .word LBB - Lbase
1321 Value = MCBinaryExpr::createSub(Value, Base, OutContext);
1322 if (Size != 4)
1324 Value, MCConstantExpr::create(2, OutContext), OutContext);
1325
1326 OutStreamer->emitValue(Value, Size);
1327 }
1328 }
1329}
1330
1331std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
1333AArch64AsmPrinter::getCodeViewJumpTableInfo(int JTI,
1334 const MachineInstr *BranchInstr,
1335 const MCSymbol *BranchLabel) const {
1336 const auto AFI = MF->getInfo<AArch64FunctionInfo>();
1337 const auto Base = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1339 switch (AFI->getJumpTableEntrySize(JTI)) {
1340 case 1:
1341 EntrySize = codeview::JumpTableEntrySize::UInt8ShiftLeft;
1342 break;
1343 case 2:
1344 EntrySize = codeview::JumpTableEntrySize::UInt16ShiftLeft;
1345 break;
1346 case 4:
1347 EntrySize = codeview::JumpTableEntrySize::Int32;
1348 break;
1349 default:
1350 llvm_unreachable("Unexpected jump table entry size");
1351 }
1352 return std::make_tuple(Base, 0, BranchLabel, EntrySize);
1353}
1354
1355void AArch64AsmPrinter::emitFunctionEntryLabel() {
1356 const Triple &TT = TM.getTargetTriple();
1357 if (TT.isOSBinFormatELF() &&
1358 (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
1359 MF->getFunction().getCallingConv() ==
1360 CallingConv::AArch64_SVE_VectorCall ||
1361 MF->getInfo<AArch64FunctionInfo>()->isSVECC())) {
1362 auto *TS =
1363 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1364 TS->emitDirectiveVariantPCS(CurrentFnSym);
1365 }
1366
1368
1369 if (TT.isWindowsArm64EC() && !MF->getFunction().hasLocalLinkage()) {
1370 // For ARM64EC targets, a function definition's name is mangled differently
1371 // from the normal symbol, emit required aliases here.
1372 auto emitFunctionAlias = [&](MCSymbol *Src, MCSymbol *Dst) {
1373 OutStreamer->emitSymbolAttribute(Src, MCSA_WeakAntiDep);
1374 OutStreamer->emitAssignment(
1375 Src, MCSymbolRefExpr::create(Dst, MMI->getContext()));
1376 };
1377
1378 auto getSymbolFromMetadata = [&](StringRef Name) {
1379 MCSymbol *Sym = nullptr;
1380 if (MDNode *Node = MF->getFunction().getMetadata(Name)) {
1381 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1382 Sym = MMI->getContext().getOrCreateSymbol(NameStr);
1383 }
1384 return Sym;
1385 };
1386
1387 SmallVector<MDNode *> UnmangledNames;
1388 MF->getFunction().getMetadata("arm64ec_unmangled_name", UnmangledNames);
1389 for (MDNode *Node : UnmangledNames) {
1390 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1391 MCSymbol *UnmangledSym = MMI->getContext().getOrCreateSymbol(NameStr);
1392 if (std::optional<std::string> MangledName =
1393 getArm64ECMangledFunctionName(UnmangledSym->getName())) {
1394 MCSymbol *ECMangledSym =
1395 MMI->getContext().getOrCreateSymbol(*MangledName);
1396 emitFunctionAlias(UnmangledSym, ECMangledSym);
1397 }
1398 }
1399 if (MCSymbol *ECMangledSym =
1400 getSymbolFromMetadata("arm64ec_ecmangled_name"))
1401 emitFunctionAlias(ECMangledSym, CurrentFnSym);
1402 }
1403}
1404
1405void AArch64AsmPrinter::emitXXStructor(const DataLayout &DL,
1406 const Constant *CV) {
1407 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(CV))
1408 if (CPA->hasAddressDiscriminator() &&
1409 !CPA->hasSpecialAddressDiscriminator(
1412 "unexpected address discrimination value for ctors/dtors entry, only "
1413 "'ptr inttoptr (i64 1 to ptr)' is allowed");
1414 // If we have signed pointers in xxstructors list, they'll be lowered to @AUTH
1415 // MCExpr's via AArch64AsmPrinter::lowerConstantPtrAuth. It does not look at
1416 // actual address discrimination value and only checks
1417 // hasAddressDiscriminator(), so it's OK to leave special address
1418 // discrimination value here.
1420}
1421
1422void AArch64AsmPrinter::emitGlobalAlias(const Module &M,
1423 const GlobalAlias &GA) {
1424 if (auto F = dyn_cast_or_null<Function>(GA.getAliasee())) {
1425 // Global aliases must point to a definition, but unmangled patchable
1426 // symbols are special and need to point to an undefined symbol with "EXP+"
1427 // prefix. Such undefined symbol is resolved by the linker by creating
1428 // x86 thunk that jumps back to the actual EC target.
1429 if (MDNode *Node = F->getMetadata("arm64ec_exp_name")) {
1430 StringRef ExpStr = cast<MDString>(Node->getOperand(0))->getString();
1431 MCSymbol *ExpSym = MMI->getContext().getOrCreateSymbol(ExpStr);
1432 MCSymbol *Sym = MMI->getContext().getOrCreateSymbol(GA.getName());
1433
1434 OutStreamer->beginCOFFSymbolDef(ExpSym);
1435 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1436 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1438 OutStreamer->endCOFFSymbolDef();
1439
1440 OutStreamer->beginCOFFSymbolDef(Sym);
1441 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1442 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1444 OutStreamer->endCOFFSymbolDef();
1445 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
1446 OutStreamer->emitAssignment(
1447 Sym, MCSymbolRefExpr::create(ExpSym, MMI->getContext()));
1448 return;
1449 }
1450 }
1452}
1453
1454/// Small jump tables contain an unsigned byte or half, representing the offset
1455/// from the lowest-addressed possible destination to the desired basic
1456/// block. Since all instructions are 4-byte aligned, this is further compressed
1457/// by counting in instructions rather than bytes (i.e. divided by 4). So, to
1458/// materialize the correct destination we need:
1459///
1460/// adr xDest, .LBB0_0
1461/// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh).
1462/// add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
1463void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
1464 const llvm::MachineInstr &MI) {
1465 Register DestReg = MI.getOperand(0).getReg();
1466 Register ScratchReg = MI.getOperand(1).getReg();
1467 Register ScratchRegW =
1468 STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
1469 Register TableReg = MI.getOperand(2).getReg();
1470 Register EntryReg = MI.getOperand(3).getReg();
1471 int JTIdx = MI.getOperand(4).getIndex();
1472 int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
1473
1474 // This has to be first because the compression pass based its reachability
1475 // calculations on the start of the JumpTableDest instruction.
1476 auto Label =
1477 MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
1478
1479 // If we don't already have a symbol to use as the base, use the ADR
1480 // instruction itself.
1481 if (!Label) {
1482 Label = MF->getContext().createTempSymbol();
1483 AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
1484 OutStreamer.emitLabel(Label);
1485 }
1486
1487 auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
1488 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
1489 .addReg(DestReg)
1490 .addExpr(LabelExpr));
1491
1492 // Load the number of instruction-steps to offset from the label.
1493 unsigned LdrOpcode;
1494 switch (Size) {
1495 case 1: LdrOpcode = AArch64::LDRBBroX; break;
1496 case 2: LdrOpcode = AArch64::LDRHHroX; break;
1497 case 4: LdrOpcode = AArch64::LDRSWroX; break;
1498 default:
1499 llvm_unreachable("Unknown jump table size");
1500 }
1501
1502 EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
1503 .addReg(Size == 4 ? ScratchReg : ScratchRegW)
1504 .addReg(TableReg)
1505 .addReg(EntryReg)
1506 .addImm(0)
1507 .addImm(Size == 1 ? 0 : 1));
1508
1509 // Add to the already materialized base label address, multiplying by 4 if
1510 // compressed.
1511 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1512 .addReg(DestReg)
1513 .addReg(DestReg)
1514 .addReg(ScratchReg)
1515 .addImm(Size == 4 ? 0 : 2));
1516}
1517
1518void AArch64AsmPrinter::LowerHardenedBRJumpTable(const MachineInstr &MI) {
1519 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1520 assert(MJTI && "Can't lower jump-table dispatch without JTI");
1521
1522 const std::vector<MachineJumpTableEntry> &JTs = MJTI->getJumpTables();
1523 assert(!JTs.empty() && "Invalid JT index for jump-table dispatch");
1524
1525 // Emit:
1526 // mov x17, #<size of table> ; depending on table size, with MOVKs
1527 // cmp x16, x17 ; or #imm if table size fits in 12-bit
1528 // csel x16, x16, xzr, ls ; check for index overflow
1529 //
1530 // adrp x17, Ltable@PAGE ; materialize table address
1531 // add x17, Ltable@PAGEOFF
1532 // ldrsw x16, [x17, x16, lsl #2] ; load table entry
1533 //
1534 // Lanchor:
1535 // adr x17, Lanchor ; compute target address
1536 // add x16, x17, x16
1537 // br x16 ; branch to target
1538
1539 MachineOperand JTOp = MI.getOperand(0);
1540
1541 unsigned JTI = JTOp.getIndex();
1542 assert(!AArch64FI->getJumpTableEntryPCRelSymbol(JTI) &&
1543 "unsupported compressed jump table");
1544
1545 const uint64_t NumTableEntries = JTs[JTI].MBBs.size();
1546
1547 // cmp only supports a 12-bit immediate. If we need more, materialize the
1548 // immediate, using x17 as a scratch register.
1549 uint64_t MaxTableEntry = NumTableEntries - 1;
1550 if (isUInt<12>(MaxTableEntry)) {
1551 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXri)
1552 .addReg(AArch64::XZR)
1553 .addReg(AArch64::X16)
1554 .addImm(MaxTableEntry)
1555 .addImm(0));
1556 } else {
1557 emitMOVZ(AArch64::X17, static_cast<uint16_t>(MaxTableEntry), 0);
1558 // It's sad that we have to manually materialize instructions, but we can't
1559 // trivially reuse the main pseudo expansion logic.
1560 // A MOVK sequence is easy enough to generate and handles the general case.
1561 for (int Offset = 16; Offset < 64; Offset += 16) {
1562 if ((MaxTableEntry >> Offset) == 0)
1563 break;
1564 emitMOVK(AArch64::X17, static_cast<uint16_t>(MaxTableEntry >> Offset),
1565 Offset);
1566 }
1567 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXrs)
1568 .addReg(AArch64::XZR)
1569 .addReg(AArch64::X16)
1570 .addReg(AArch64::X17)
1571 .addImm(0));
1572 }
1573
1574 // This picks entry #0 on failure.
1575 // We might want to trap instead.
1576 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::CSELXr)
1577 .addReg(AArch64::X16)
1578 .addReg(AArch64::X16)
1579 .addReg(AArch64::XZR)
1580 .addImm(AArch64CC::LS));
1581
1582 // Prepare the @PAGE/@PAGEOFF low/high operands.
1583 MachineOperand JTMOHi(JTOp), JTMOLo(JTOp);
1584 MCOperand JTMCHi, JTMCLo;
1585
1586 JTMOHi.setTargetFlags(AArch64II::MO_PAGE);
1587 JTMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1588
1589 MCInstLowering.lowerOperand(JTMOHi, JTMCHi);
1590 MCInstLowering.lowerOperand(JTMOLo, JTMCLo);
1591
1592 EmitToStreamer(
1593 *OutStreamer,
1594 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(JTMCHi));
1595
1596 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXri)
1597 .addReg(AArch64::X17)
1598 .addReg(AArch64::X17)
1599 .addOperand(JTMCLo)
1600 .addImm(0));
1601
1602 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWroX)
1603 .addReg(AArch64::X16)
1604 .addReg(AArch64::X17)
1605 .addReg(AArch64::X16)
1606 .addImm(0)
1607 .addImm(1));
1608
1609 MCSymbol *AdrLabel = MF->getContext().createTempSymbol();
1610 const auto *AdrLabelE = MCSymbolRefExpr::create(AdrLabel, MF->getContext());
1611 AArch64FI->setJumpTableEntryInfo(JTI, 4, AdrLabel);
1612
1613 OutStreamer->emitLabel(AdrLabel);
1614 EmitToStreamer(
1615 *OutStreamer,
1616 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addExpr(AdrLabelE));
1617
1618 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1619 .addReg(AArch64::X16)
1620 .addReg(AArch64::X17)
1621 .addReg(AArch64::X16)
1622 .addImm(0));
1623
1624 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
1625}
1626
1627void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
1628 const llvm::MachineInstr &MI) {
1629 unsigned Opcode = MI.getOpcode();
1630 assert(STI->hasMOPS());
1631 assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
1632
1633 const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
1634 if (Opcode == AArch64::MOPSMemoryCopyPseudo)
1635 return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
1636 if (Opcode == AArch64::MOPSMemoryMovePseudo)
1637 return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
1638 if (Opcode == AArch64::MOPSMemorySetPseudo)
1639 return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
1640 if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
1641 return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
1642 llvm_unreachable("Unhandled memory operation pseudo");
1643 }();
1644 const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
1645 Opcode == AArch64::MOPSMemorySetTaggingPseudo;
1646
1647 for (auto Op : Ops) {
1648 int i = 0;
1649 auto MCIB = MCInstBuilder(Op);
1650 // Destination registers
1651 MCIB.addReg(MI.getOperand(i++).getReg());
1652 MCIB.addReg(MI.getOperand(i++).getReg());
1653 if (!IsSet)
1654 MCIB.addReg(MI.getOperand(i++).getReg());
1655 // Input registers
1656 MCIB.addReg(MI.getOperand(i++).getReg());
1657 MCIB.addReg(MI.getOperand(i++).getReg());
1658 MCIB.addReg(MI.getOperand(i++).getReg());
1659
1660 EmitToStreamer(OutStreamer, MCIB);
1661 }
1662}
1663
1664void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
1665 const MachineInstr &MI) {
1666 unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
1667
1668 auto &Ctx = OutStreamer.getContext();
1669 MCSymbol *MILabel = Ctx.createTempSymbol();
1670 OutStreamer.emitLabel(MILabel);
1671
1672 SM.recordStackMap(*MILabel, MI);
1673 assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1674
1675 // Scan ahead to trim the shadow.
1676 const MachineBasicBlock &MBB = *MI.getParent();
1678 ++MII;
1679 while (NumNOPBytes > 0) {
1680 if (MII == MBB.end() || MII->isCall() ||
1681 MII->getOpcode() == AArch64::DBG_VALUE ||
1682 MII->getOpcode() == TargetOpcode::PATCHPOINT ||
1683 MII->getOpcode() == TargetOpcode::STACKMAP)
1684 break;
1685 ++MII;
1686 NumNOPBytes -= 4;
1687 }
1688
1689 // Emit nops.
1690 for (unsigned i = 0; i < NumNOPBytes; i += 4)
1691 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1692}
1693
1694// Lower a patchpoint of the form:
1695// [<def>], <id>, <numBytes>, <target>, <numArgs>
1696void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1697 const MachineInstr &MI) {
1698 auto &Ctx = OutStreamer.getContext();
1699 MCSymbol *MILabel = Ctx.createTempSymbol();
1700 OutStreamer.emitLabel(MILabel);
1701 SM.recordPatchPoint(*MILabel, MI);
1702
1703 PatchPointOpers Opers(&MI);
1704
1705 int64_t CallTarget = Opers.getCallTarget().getImm();
1706 unsigned EncodedBytes = 0;
1707 if (CallTarget) {
1708 assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1709 "High 16 bits of call target should be zero.");
1710 Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1711 EncodedBytes = 16;
1712 // Materialize the jump address:
1713 emitMOVZ(ScratchReg, (CallTarget >> 32) & 0xFFFF, 32);
1714 emitMOVK(ScratchReg, (CallTarget >> 16) & 0xFFFF, 16);
1715 emitMOVK(ScratchReg, CallTarget & 0xFFFF, 0);
1716 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1717 }
1718 // Emit padding.
1719 unsigned NumBytes = Opers.getNumPatchBytes();
1720 assert(NumBytes >= EncodedBytes &&
1721 "Patchpoint can't request size less than the length of a call.");
1722 assert((NumBytes - EncodedBytes) % 4 == 0 &&
1723 "Invalid number of NOP bytes requested!");
1724 for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1725 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1726}
1727
1728void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1729 const MachineInstr &MI) {
1730 StatepointOpers SOpers(&MI);
1731 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1732 assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1733 for (unsigned i = 0; i < PatchBytes; i += 4)
1734 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1735 } else {
1736 // Lower call target and choose correct opcode
1737 const MachineOperand &CallTarget = SOpers.getCallTarget();
1738 MCOperand CallTargetMCOp;
1739 unsigned CallOpcode;
1740 switch (CallTarget.getType()) {
1743 MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1744 CallOpcode = AArch64::BL;
1745 break;
1747 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1748 CallOpcode = AArch64::BL;
1749 break;
1751 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1752 CallOpcode = AArch64::BLR;
1753 break;
1754 default:
1755 llvm_unreachable("Unsupported operand type in statepoint call target");
1756 break;
1757 }
1758
1759 EmitToStreamer(OutStreamer,
1760 MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1761 }
1762
1763 auto &Ctx = OutStreamer.getContext();
1764 MCSymbol *MILabel = Ctx.createTempSymbol();
1765 OutStreamer.emitLabel(MILabel);
1766 SM.recordStatepoint(*MILabel, MI);
1767}
1768
1769void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1770 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1771 // <opcode>, <operands>
1772
1773 Register DefRegister = FaultingMI.getOperand(0).getReg();
1775 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1776 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1777 unsigned Opcode = FaultingMI.getOperand(3).getImm();
1778 unsigned OperandsBeginIdx = 4;
1779
1780 auto &Ctx = OutStreamer->getContext();
1781 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1782 OutStreamer->emitLabel(FaultingLabel);
1783
1784 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1785 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1786
1787 MCInst MI;
1788 MI.setOpcode(Opcode);
1789
1790 if (DefRegister != (Register)0)
1791 MI.addOperand(MCOperand::createReg(DefRegister));
1792
1793 for (const MachineOperand &MO :
1794 llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx)) {
1795 MCOperand Dest;
1796 lowerOperand(MO, Dest);
1797 MI.addOperand(Dest);
1798 }
1799
1800 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1801 EmitToStreamer(MI);
1802}
1803
1804void AArch64AsmPrinter::emitMovXReg(Register Dest, Register Src) {
1805 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ORRXrs)
1806 .addReg(Dest)
1807 .addReg(AArch64::XZR)
1808 .addReg(Src)
1809 .addImm(0));
1810}
1811
1812void AArch64AsmPrinter::emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift) {
1813 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1814 EmitToStreamer(*OutStreamer,
1815 MCInstBuilder(Is64Bit ? AArch64::MOVZXi : AArch64::MOVZWi)
1816 .addReg(Dest)
1817 .addImm(Imm)
1818 .addImm(Shift));
1819}
1820
1821void AArch64AsmPrinter::emitMOVK(Register Dest, uint64_t Imm, unsigned Shift) {
1822 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1823 EmitToStreamer(*OutStreamer,
1824 MCInstBuilder(Is64Bit ? AArch64::MOVKXi : AArch64::MOVKWi)
1825 .addReg(Dest)
1826 .addReg(Dest)
1827 .addImm(Imm)
1828 .addImm(Shift));
1829}
1830
1831void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1832 Register DestReg = MI.getOperand(0).getReg();
1833 if (!STI->hasZeroCycleZeroingFPWorkaround() && STI->isNeonAvailable()) {
1834 if (STI->hasZeroCycleZeroingFPR64()) {
1835 // Convert H/S register to corresponding D register
1836 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1837 if (AArch64::FPR16RegClass.contains(DestReg))
1838 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1839 &AArch64::FPR64RegClass);
1840 else if (AArch64::FPR32RegClass.contains(DestReg))
1841 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1842 &AArch64::FPR64RegClass);
1843 else
1844 assert(AArch64::FPR64RegClass.contains(DestReg));
1845
1846 MCInst MOVI;
1847 MOVI.setOpcode(AArch64::MOVID);
1848 MOVI.addOperand(MCOperand::createReg(DestReg));
1850 EmitToStreamer(*OutStreamer, MOVI);
1851 } else if (STI->hasZeroCycleZeroingFPR128()) {
1852 // Convert H/S/D register to corresponding Q register
1853 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1854 if (AArch64::FPR16RegClass.contains(DestReg)) {
1855 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1856 &AArch64::FPR128RegClass);
1857 } else if (AArch64::FPR32RegClass.contains(DestReg)) {
1858 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1859 &AArch64::FPR128RegClass);
1860 } else {
1861 assert(AArch64::FPR64RegClass.contains(DestReg));
1862 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::dsub,
1863 &AArch64::FPR128RegClass);
1864 }
1865
1866 MCInst MOVI;
1867 MOVI.setOpcode(AArch64::MOVIv2d_ns);
1868 MOVI.addOperand(MCOperand::createReg(DestReg));
1870 EmitToStreamer(*OutStreamer, MOVI);
1871 } else {
1872 emitFMov0AsFMov(MI, DestReg);
1873 }
1874 } else {
1875 emitFMov0AsFMov(MI, DestReg);
1876 }
1877}
1878
1879void AArch64AsmPrinter::emitFMov0AsFMov(const MachineInstr &MI,
1880 Register DestReg) {
1881 MCInst FMov;
1882 switch (MI.getOpcode()) {
1883 default:
1884 llvm_unreachable("Unexpected opcode");
1885 case AArch64::FMOVH0:
1886 FMov.setOpcode(STI->hasFullFP16() ? AArch64::FMOVWHr : AArch64::FMOVWSr);
1887 if (!STI->hasFullFP16())
1888 DestReg = (AArch64::S0 + (DestReg - AArch64::H0));
1889 FMov.addOperand(MCOperand::createReg(DestReg));
1890 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1891 break;
1892 case AArch64::FMOVS0:
1893 FMov.setOpcode(AArch64::FMOVWSr);
1894 FMov.addOperand(MCOperand::createReg(DestReg));
1895 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1896 break;
1897 case AArch64::FMOVD0:
1898 FMov.setOpcode(AArch64::FMOVXDr);
1899 FMov.addOperand(MCOperand::createReg(DestReg));
1900 FMov.addOperand(MCOperand::createReg(AArch64::XZR));
1901 break;
1902 }
1903 EmitToStreamer(*OutStreamer, FMov);
1904}
1905
1906Register AArch64AsmPrinter::emitPtrauthDiscriminator(uint16_t Disc,
1907 Register AddrDisc,
1908 Register ScratchReg,
1909 bool MayUseAddrAsScratch) {
1910 assert(ScratchReg == AArch64::X16 || ScratchReg == AArch64::X17 ||
1911 !STI->isX16X17Safer());
1912 // So far we've used NoRegister in pseudos. Now we need real encodings.
1913 if (AddrDisc == AArch64::NoRegister)
1914 AddrDisc = AArch64::XZR;
1915
1916 // If there is no constant discriminator, there's no blend involved:
1917 // just use the address discriminator register as-is (XZR or not).
1918 if (!Disc)
1919 return AddrDisc;
1920
1921 // If there's only a constant discriminator, MOV it into the scratch register.
1922 if (AddrDisc == AArch64::XZR) {
1923 emitMOVZ(ScratchReg, Disc, 0);
1924 return ScratchReg;
1925 }
1926
1927 // If there are both, emit a blend into the scratch register.
1928
1929 // Check if we can save one MOV instruction.
1930 assert(MayUseAddrAsScratch || ScratchReg != AddrDisc);
1931 bool AddrDiscIsSafe = AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17 ||
1932 !STI->isX16X17Safer();
1933 if (MayUseAddrAsScratch && AddrDiscIsSafe)
1934 ScratchReg = AddrDisc;
1935 else
1936 emitMovXReg(ScratchReg, AddrDisc);
1937
1938 emitMOVK(ScratchReg, Disc, 48);
1939 return ScratchReg;
1940}
1941
1942/// Emits a code sequence to check an authenticated pointer value.
1943///
1944/// If OnFailure argument is passed, jump there on check failure instead
1945/// of proceeding to the next instruction (only if ShouldTrap is false).
1946void AArch64AsmPrinter::emitPtrauthCheckAuthenticatedValue(
1947 Register TestedReg, Register ScratchReg, AArch64PACKey::ID Key,
1948 AArch64PAuth::AuthCheckMethod Method, bool ShouldTrap,
1949 const MCSymbol *OnFailure) {
1950 // Insert a sequence to check if authentication of TestedReg succeeded,
1951 // such as:
1952 //
1953 // - checked and clearing:
1954 // ; x16 is TestedReg, x17 is ScratchReg
1955 // mov x17, x16
1956 // xpaci x17
1957 // cmp x16, x17
1958 // b.eq Lsuccess
1959 // mov x16, x17
1960 // b Lend
1961 // Lsuccess:
1962 // ; skipped if authentication failed
1963 // Lend:
1964 // ...
1965 //
1966 // - checked and trapping:
1967 // mov x17, x16
1968 // xpaci x17
1969 // cmp x16, x17
1970 // b.eq Lsuccess
1971 // brk #<0xc470 + aut key>
1972 // Lsuccess:
1973 // ...
1974 //
1975 // See the documentation on AuthCheckMethod enumeration constants for
1976 // the specific code sequences that can be used to perform the check.
1978
1979 if (Method == AuthCheckMethod::None)
1980 return;
1981 if (Method == AuthCheckMethod::DummyLoad) {
1982 EmitToStreamer(MCInstBuilder(AArch64::LDRWui)
1983 .addReg(getWRegFromXReg(ScratchReg))
1984 .addReg(TestedReg)
1985 .addImm(0));
1986 assert(ShouldTrap && !OnFailure && "DummyLoad always traps on error");
1987 return;
1988 }
1989
1990 MCSymbol *SuccessSym = createTempSymbol("auth_success_");
1991 if (Method == AuthCheckMethod::XPAC || Method == AuthCheckMethod::XPACHint) {
1992 // mov Xscratch, Xtested
1993 emitMovXReg(ScratchReg, TestedReg);
1994
1995 if (Method == AuthCheckMethod::XPAC) {
1996 // xpac(i|d) Xscratch
1997 unsigned XPACOpc = getXPACOpcodeForKey(Key);
1998 EmitToStreamer(
1999 MCInstBuilder(XPACOpc).addReg(ScratchReg).addReg(ScratchReg));
2000 } else {
2001 // xpaclri
2002
2003 // Note that this method applies XPAC to TestedReg instead of ScratchReg.
2004 assert(TestedReg == AArch64::LR &&
2005 "XPACHint mode is only compatible with checking the LR register");
2007 "XPACHint mode is only compatible with I-keys");
2008 EmitToStreamer(MCInstBuilder(AArch64::XPACLRI));
2009 }
2010
2011 // cmp Xtested, Xscratch
2012 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
2013 .addReg(AArch64::XZR)
2014 .addReg(TestedReg)
2015 .addReg(ScratchReg)
2016 .addImm(0));
2017
2018 // b.eq Lsuccess
2019 EmitToStreamer(
2020 MCInstBuilder(AArch64::Bcc)
2021 .addImm(AArch64CC::EQ)
2022 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2023 } else if (Method == AuthCheckMethod::HighBitsNoTBI) {
2024 // eor Xscratch, Xtested, Xtested, lsl #1
2025 EmitToStreamer(MCInstBuilder(AArch64::EORXrs)
2026 .addReg(ScratchReg)
2027 .addReg(TestedReg)
2028 .addReg(TestedReg)
2029 .addImm(1));
2030 // tbz Xscratch, #62, Lsuccess
2031 EmitToStreamer(
2032 MCInstBuilder(AArch64::TBZX)
2033 .addReg(ScratchReg)
2034 .addImm(62)
2035 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2036 } else {
2037 llvm_unreachable("Unsupported check method");
2038 }
2039
2040 if (ShouldTrap) {
2041 assert(!OnFailure && "Cannot specify OnFailure with ShouldTrap");
2042 // Trapping sequences do a 'brk'.
2043 // brk #<0xc470 + aut key>
2044 EmitToStreamer(MCInstBuilder(AArch64::BRK).addImm(0xc470 | Key));
2045 } else {
2046 // Non-trapping checked sequences return the stripped result in TestedReg,
2047 // skipping over success-only code (such as re-signing the pointer) if
2048 // there is one.
2049 // Note that this can introduce an authentication oracle (such as based on
2050 // the high bits of the re-signed value).
2051
2052 // FIXME: The XPAC method can be optimized by applying XPAC to TestedReg
2053 // instead of ScratchReg, thus eliminating one `mov` instruction.
2054 // Both XPAC and XPACHint can be further optimized by not using a
2055 // conditional branch jumping over an unconditional one.
2056
2057 switch (Method) {
2058 case AuthCheckMethod::XPACHint:
2059 // LR is already XPAC-ed at this point.
2060 break;
2061 case AuthCheckMethod::XPAC:
2062 // mov Xtested, Xscratch
2063 emitMovXReg(TestedReg, ScratchReg);
2064 break;
2065 default:
2066 // If Xtested was not XPAC-ed so far, emit XPAC here.
2067 // xpac(i|d) Xtested
2068 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2069 EmitToStreamer(
2070 MCInstBuilder(XPACOpc).addReg(TestedReg).addReg(TestedReg));
2071 }
2072
2073 if (OnFailure) {
2074 // b Lend
2075 EmitToStreamer(
2076 MCInstBuilder(AArch64::B)
2077 .addExpr(MCSymbolRefExpr::create(OnFailure, OutContext)));
2078 }
2079 }
2080
2081 // If the auth check succeeds, we can continue.
2082 // Lsuccess:
2083 OutStreamer->emitLabel(SuccessSym);
2084}
2085
2086// With Pointer Authentication, it may be needed to explicitly check the
2087// authenticated value in LR before performing a tail call.
2088// Otherwise, the callee may re-sign the invalid return address,
2089// introducing a signing oracle.
2090void AArch64AsmPrinter::emitPtrauthTailCallHardening(const MachineInstr *TC) {
2091 if (!AArch64FI->shouldSignReturnAddress(*MF))
2092 return;
2093
2094 auto LRCheckMethod = STI->getAuthenticatedLRCheckMethod(*MF);
2095 if (LRCheckMethod == AArch64PAuth::AuthCheckMethod::None)
2096 return;
2097
2098 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
2099 Register ScratchReg =
2100 TC->readsRegister(AArch64::X16, TRI) ? AArch64::X17 : AArch64::X16;
2101 assert(!TC->readsRegister(ScratchReg, TRI) &&
2102 "Neither x16 nor x17 is available as a scratch register");
2105 emitPtrauthCheckAuthenticatedValue(
2106 AArch64::LR, ScratchReg, Key, LRCheckMethod,
2107 /*ShouldTrap=*/true, /*OnFailure=*/nullptr);
2108}
2109
2110void AArch64AsmPrinter::emitPtrauthAuthResign(
2111 Register AUTVal, AArch64PACKey::ID AUTKey, uint64_t AUTDisc,
2112 const MachineOperand *AUTAddrDisc, Register Scratch,
2113 std::optional<AArch64PACKey::ID> PACKey, uint64_t PACDisc,
2114 Register PACAddrDisc) {
2115 const bool IsAUTPAC = PACKey.has_value();
2116
2117 // We expand AUT/AUTPAC into a sequence of the form
2118 //
2119 // ; authenticate x16
2120 // ; check pointer in x16
2121 // Lsuccess:
2122 // ; sign x16 (if AUTPAC)
2123 // Lend: ; if not trapping on failure
2124 //
2125 // with the checking sequence chosen depending on whether/how we should check
2126 // the pointer and whether we should trap on failure.
2127
2128 // By default, auth/resign sequences check for auth failures.
2129 bool ShouldCheck = true;
2130 // In the checked sequence, we only trap if explicitly requested.
2131 bool ShouldTrap = MF->getFunction().hasFnAttribute("ptrauth-auth-traps");
2132
2133 // On an FPAC CPU, you get traps whether you want them or not: there's
2134 // no point in emitting checks or traps.
2135 if (STI->hasFPAC())
2136 ShouldCheck = ShouldTrap = false;
2137
2138 // However, command-line flags can override this, for experimentation.
2139 switch (PtrauthAuthChecks) {
2141 break;
2143 ShouldCheck = ShouldTrap = false;
2144 break;
2146 ShouldCheck = true;
2147 ShouldTrap = false;
2148 break;
2150 ShouldCheck = ShouldTrap = true;
2151 break;
2152 }
2153
2154 // Compute aut discriminator
2155 assert(isUInt<16>(AUTDisc));
2156 Register AUTDiscReg = emitPtrauthDiscriminator(
2157 AUTDisc, AUTAddrDisc->getReg(), Scratch, AUTAddrDisc->isKill());
2158 bool AUTZero = AUTDiscReg == AArch64::XZR;
2159 unsigned AUTOpc = getAUTOpcodeForKey(AUTKey, AUTZero);
2160
2161 // autiza x16 ; if AUTZero
2162 // autia x16, x17 ; if !AUTZero
2163 MCInst AUTInst;
2164 AUTInst.setOpcode(AUTOpc);
2165 AUTInst.addOperand(MCOperand::createReg(AUTVal));
2166 AUTInst.addOperand(MCOperand::createReg(AUTVal));
2167 if (!AUTZero)
2168 AUTInst.addOperand(MCOperand::createReg(AUTDiscReg));
2169 EmitToStreamer(*OutStreamer, AUTInst);
2170
2171 // Unchecked or checked-but-non-trapping AUT is just an "AUT": we're done.
2172 if (!IsAUTPAC && (!ShouldCheck || !ShouldTrap))
2173 return;
2174
2175 MCSymbol *EndSym = nullptr;
2176
2177 if (ShouldCheck) {
2178 if (IsAUTPAC && !ShouldTrap)
2179 EndSym = createTempSymbol("resign_end_");
2180
2181 emitPtrauthCheckAuthenticatedValue(AUTVal, Scratch, AUTKey,
2182 AArch64PAuth::AuthCheckMethod::XPAC,
2183 ShouldTrap, EndSym);
2184 }
2185
2186 // We already emitted unchecked and checked-but-non-trapping AUTs.
2187 // That left us with trapping AUTs, and AUTPACs.
2188 // Trapping AUTs don't need PAC: we're done.
2189 if (!IsAUTPAC)
2190 return;
2191
2192 // Compute pac discriminator
2193 assert(isUInt<16>(PACDisc));
2194 Register PACDiscReg =
2195 emitPtrauthDiscriminator(PACDisc, PACAddrDisc, Scratch);
2196 bool PACZero = PACDiscReg == AArch64::XZR;
2197 unsigned PACOpc = getPACOpcodeForKey(*PACKey, PACZero);
2198
2199 // pacizb x16 ; if PACZero
2200 // pacib x16, x17 ; if !PACZero
2201 MCInst PACInst;
2202 PACInst.setOpcode(PACOpc);
2203 PACInst.addOperand(MCOperand::createReg(AUTVal));
2204 PACInst.addOperand(MCOperand::createReg(AUTVal));
2205 if (!PACZero)
2206 PACInst.addOperand(MCOperand::createReg(PACDiscReg));
2207 EmitToStreamer(*OutStreamer, PACInst);
2208
2209 // Lend:
2210 if (EndSym)
2211 OutStreamer->emitLabel(EndSym);
2212}
2213
2214void AArch64AsmPrinter::emitPtrauthSign(const MachineInstr *MI) {
2215 Register Val = MI->getOperand(1).getReg();
2216 auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
2217 uint64_t Disc = MI->getOperand(3).getImm();
2218 Register AddrDisc = MI->getOperand(4).getReg();
2219 bool AddrDiscKilled = MI->getOperand(4).isKill();
2220
2221 // As long as at least one of Val and AddrDisc is in GPR64noip, a scratch
2222 // register is available.
2223 Register ScratchReg = Val == AArch64::X16 ? AArch64::X17 : AArch64::X16;
2224 assert(ScratchReg != AddrDisc &&
2225 "Neither X16 nor X17 is available as a scratch register");
2226
2227 // Compute pac discriminator
2228 assert(isUInt<16>(Disc));
2229 Register DiscReg = emitPtrauthDiscriminator(
2230 Disc, AddrDisc, ScratchReg, /*MayUseAddrAsScratch=*/AddrDiscKilled);
2231 bool IsZeroDisc = DiscReg == AArch64::XZR;
2232 unsigned Opc = getPACOpcodeForKey(Key, IsZeroDisc);
2233
2234 // paciza x16 ; if IsZeroDisc
2235 // pacia x16, x17 ; if !IsZeroDisc
2236 MCInst PACInst;
2237 PACInst.setOpcode(Opc);
2238 PACInst.addOperand(MCOperand::createReg(Val));
2239 PACInst.addOperand(MCOperand::createReg(Val));
2240 if (!IsZeroDisc)
2241 PACInst.addOperand(MCOperand::createReg(DiscReg));
2242 EmitToStreamer(*OutStreamer, PACInst);
2243}
2244
2245void AArch64AsmPrinter::emitPtrauthBranch(const MachineInstr *MI) {
2246 bool IsCall = MI->getOpcode() == AArch64::BLRA;
2247 unsigned BrTarget = MI->getOperand(0).getReg();
2248
2249 auto Key = (AArch64PACKey::ID)MI->getOperand(1).getImm();
2251 "Invalid auth call key");
2252
2253 uint64_t Disc = MI->getOperand(2).getImm();
2254 assert(isUInt<16>(Disc));
2255
2256 unsigned AddrDisc = MI->getOperand(3).getReg();
2257
2258 // Make sure AddrDisc is solely used to compute the discriminator.
2259 // While hardly meaningful, it is still possible to describe an authentication
2260 // of a pointer against its own value (instead of storage address) with
2261 // intrinsics, so use report_fatal_error instead of assert.
2262 if (BrTarget == AddrDisc)
2263 report_fatal_error("Branch target is signed with its own value");
2264
2265 // If we are printing BLRA pseudo, try to save one MOV by making use of the
2266 // fact that x16 and x17 are described as clobbered by the MI instruction and
2267 // AddrDisc is not used as any other input.
2268 //
2269 // Back in the day, emitPtrauthDiscriminator was restricted to only returning
2270 // either x16 or x17, meaning the returned register is always among the
2271 // implicit-def'ed registers of BLRA pseudo. Now this property can be violated
2272 // if isX16X17Safer predicate is false, thus manually check if AddrDisc is
2273 // among x16 and x17 to prevent clobbering unexpected registers.
2274 //
2275 // Unlike BLRA, BRA pseudo is used to perform computed goto, and thus not
2276 // declared as clobbering x16/x17.
2277 //
2278 // FIXME: Make use of `killed` flags and register masks instead.
2279 bool AddrDiscIsImplicitDef =
2280 IsCall && (AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17);
2281 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17,
2282 AddrDiscIsImplicitDef);
2283 bool IsZeroDisc = DiscReg == AArch64::XZR;
2284
2285 unsigned Opc;
2286 if (IsCall) {
2287 if (Key == AArch64PACKey::IA)
2288 Opc = IsZeroDisc ? AArch64::BLRAAZ : AArch64::BLRAA;
2289 else
2290 Opc = IsZeroDisc ? AArch64::BLRABZ : AArch64::BLRAB;
2291 } else {
2292 if (Key == AArch64PACKey::IA)
2293 Opc = IsZeroDisc ? AArch64::BRAAZ : AArch64::BRAA;
2294 else
2295 Opc = IsZeroDisc ? AArch64::BRABZ : AArch64::BRAB;
2296 }
2297
2298 MCInst BRInst;
2299 BRInst.setOpcode(Opc);
2300 BRInst.addOperand(MCOperand::createReg(BrTarget));
2301 if (!IsZeroDisc)
2302 BRInst.addOperand(MCOperand::createReg(DiscReg));
2303 EmitToStreamer(*OutStreamer, BRInst);
2304}
2305
2306const MCExpr *
2307AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) {
2308 MCContext &Ctx = OutContext;
2309
2310 // Figure out the base symbol and the addend, if any.
2311 APInt Offset(64, 0);
2312 const Value *BaseGV = CPA.getPointer()->stripAndAccumulateConstantOffsets(
2313 getDataLayout(), Offset, /*AllowNonInbounds=*/true);
2314
2315 auto *BaseGVB = dyn_cast<GlobalValue>(BaseGV);
2316
2317 // If we can't understand the referenced ConstantExpr, there's nothing
2318 // else we can do: emit an error.
2319 if (!BaseGVB) {
2320 BaseGV->getContext().emitError(
2321 "cannot resolve target base/addend of ptrauth constant");
2322 return nullptr;
2323 }
2324
2325 // If there is an addend, turn that into the appropriate MCExpr.
2326 const MCExpr *Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx);
2327 if (Offset.sgt(0))
2329 Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx);
2330 else if (Offset.slt(0))
2332 Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx);
2333
2334 uint64_t KeyID = CPA.getKey()->getZExtValue();
2335 // We later rely on valid KeyID value in AArch64PACKeyIDToString call from
2336 // AArch64AuthMCExpr::printImpl, so fail fast.
2337 if (KeyID > AArch64PACKey::LAST) {
2338 CPA.getContext().emitError("AArch64 PAC Key ID '" + Twine(KeyID) +
2339 "' out of range [0, " +
2340 Twine((unsigned)AArch64PACKey::LAST) + "]");
2341 KeyID = 0;
2342 }
2343
2344 uint64_t Disc = CPA.getDiscriminator()->getZExtValue();
2345 if (!isUInt<16>(Disc)) {
2346 CPA.getContext().emitError("AArch64 PAC Discriminator '" + Twine(Disc) +
2347 "' out of range [0, 0xFFFF]");
2348 Disc = 0;
2349 }
2350
2351 // Finally build the complete @AUTH expr.
2352 return AArch64AuthMCExpr::create(Sym, Disc, AArch64PACKey::ID(KeyID),
2353 CPA.hasAddressDiscriminator(), Ctx);
2354}
2355
2356void AArch64AsmPrinter::LowerLOADauthptrstatic(const MachineInstr &MI) {
2357 unsigned DstReg = MI.getOperand(0).getReg();
2358 const MachineOperand &GAOp = MI.getOperand(1);
2359 const uint64_t KeyC = MI.getOperand(2).getImm();
2360 assert(KeyC <= AArch64PACKey::LAST &&
2361 "key is out of range [0, AArch64PACKey::LAST]");
2362 const auto Key = (AArch64PACKey::ID)KeyC;
2363 const uint64_t Disc = MI.getOperand(3).getImm();
2364 assert(isUInt<16>(Disc) &&
2365 "constant discriminator is out of range [0, 0xffff]");
2366
2367 // Emit instruction sequence like the following:
2368 // ADRP x16, symbol$auth_ptr$key$disc
2369 // LDR x16, [x16, :lo12:symbol$auth_ptr$key$disc]
2370 //
2371 // Where the $auth_ptr$ symbol is the stub slot containing the signed pointer
2372 // to symbol.
2373 MCSymbol *AuthPtrStubSym;
2374 if (TM.getTargetTriple().isOSBinFormatELF()) {
2375 const auto &TLOF =
2376 static_cast<const AArch64_ELFTargetObjectFile &>(getObjFileLowering());
2377
2378 assert(GAOp.getOffset() == 0 &&
2379 "non-zero offset for $auth_ptr$ stub slots is not supported");
2380 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2381 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2382 } else {
2383 assert(TM.getTargetTriple().isOSBinFormatMachO() &&
2384 "LOADauthptrstatic is implemented only for MachO/ELF");
2385
2386 const auto &TLOF = static_cast<const AArch64_MachoTargetObjectFile &>(
2387 getObjFileLowering());
2388
2389 assert(GAOp.getOffset() == 0 &&
2390 "non-zero offset for $auth_ptr$ stub slots is not supported");
2391 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2392 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2393 }
2394
2395 MachineOperand StubMOHi =
2397 MachineOperand StubMOLo = MachineOperand::CreateMCSymbol(
2398 AuthPtrStubSym, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2399 MCOperand StubMCHi, StubMCLo;
2400
2401 MCInstLowering.lowerOperand(StubMOHi, StubMCHi);
2402 MCInstLowering.lowerOperand(StubMOLo, StubMCLo);
2403
2404 EmitToStreamer(
2405 *OutStreamer,
2406 MCInstBuilder(AArch64::ADRP).addReg(DstReg).addOperand(StubMCHi));
2407
2408 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRXui)
2409 .addReg(DstReg)
2410 .addReg(DstReg)
2411 .addOperand(StubMCLo));
2412}
2413
2414void AArch64AsmPrinter::LowerMOVaddrPAC(const MachineInstr &MI) {
2415 const bool IsGOTLoad = MI.getOpcode() == AArch64::LOADgotPAC;
2416 const bool IsELFSignedGOT = MI.getParent()
2417 ->getParent()
2418 ->getInfo<AArch64FunctionInfo>()
2419 ->hasELFSignedGOT();
2420 MachineOperand GAOp = MI.getOperand(0);
2421 const uint64_t KeyC = MI.getOperand(1).getImm();
2422 assert(KeyC <= AArch64PACKey::LAST &&
2423 "key is out of range [0, AArch64PACKey::LAST]");
2424 const auto Key = (AArch64PACKey::ID)KeyC;
2425 const unsigned AddrDisc = MI.getOperand(2).getReg();
2426 const uint64_t Disc = MI.getOperand(3).getImm();
2427 assert(isUInt<16>(Disc) &&
2428 "constant discriminator is out of range [0, 0xffff]");
2429
2430 const int64_t Offset = GAOp.getOffset();
2431 GAOp.setOffset(0);
2432
2433 // Emit:
2434 // target materialization:
2435 // - via GOT:
2436 // - unsigned GOT:
2437 // adrp x16, :got:target
2438 // ldr x16, [x16, :got_lo12:target]
2439 // add offset to x16 if offset != 0
2440 // - ELF signed GOT:
2441 // adrp x17, :got:target
2442 // add x17, x17, :got_auth_lo12:target
2443 // ldr x16, [x17]
2444 // aut{i|d}a x16, x17
2445 // check+trap sequence (if no FPAC)
2446 // add offset to x16 if offset != 0
2447 //
2448 // - direct:
2449 // adrp x16, target
2450 // add x16, x16, :lo12:target
2451 // add offset to x16 if offset != 0
2452 //
2453 // add offset to x16:
2454 // - abs(offset) fits 24 bits:
2455 // add/sub x16, x16, #<offset>[, #lsl 12] (up to 2 instructions)
2456 // - abs(offset) does not fit 24 bits:
2457 // - offset < 0:
2458 // movn+movk sequence filling x17 register with the offset (up to 4
2459 // instructions)
2460 // add x16, x16, x17
2461 // - offset > 0:
2462 // movz+movk sequence filling x17 register with the offset (up to 4
2463 // instructions)
2464 // add x16, x16, x17
2465 //
2466 // signing:
2467 // - 0 discriminator:
2468 // paciza x16
2469 // - Non-0 discriminator, no address discriminator:
2470 // mov x17, #Disc
2471 // pacia x16, x17
2472 // - address discriminator (with potentially folded immediate discriminator):
2473 // pacia x16, xAddrDisc
2474
2475 MachineOperand GAMOHi(GAOp), GAMOLo(GAOp);
2476 MCOperand GAMCHi, GAMCLo;
2477
2478 GAMOHi.setTargetFlags(AArch64II::MO_PAGE);
2479 GAMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2480 if (IsGOTLoad) {
2481 GAMOHi.addTargetFlag(AArch64II::MO_GOT);
2482 GAMOLo.addTargetFlag(AArch64II::MO_GOT);
2483 }
2484
2485 MCInstLowering.lowerOperand(GAMOHi, GAMCHi);
2486 MCInstLowering.lowerOperand(GAMOLo, GAMCLo);
2487
2488 EmitToStreamer(
2489 MCInstBuilder(AArch64::ADRP)
2490 .addReg(IsGOTLoad && IsELFSignedGOT ? AArch64::X17 : AArch64::X16)
2491 .addOperand(GAMCHi));
2492
2493 if (IsGOTLoad) {
2494 if (IsELFSignedGOT) {
2495 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2496 .addReg(AArch64::X17)
2497 .addReg(AArch64::X17)
2498 .addOperand(GAMCLo)
2499 .addImm(0));
2500
2501 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2502 .addReg(AArch64::X16)
2503 .addReg(AArch64::X17)
2504 .addImm(0));
2505
2506 assert(GAOp.isGlobal());
2507 assert(GAOp.getGlobal()->getValueType() != nullptr);
2508 unsigned AuthOpcode = GAOp.getGlobal()->getValueType()->isFunctionTy()
2509 ? AArch64::AUTIA
2510 : AArch64::AUTDA;
2511
2512 EmitToStreamer(MCInstBuilder(AuthOpcode)
2513 .addReg(AArch64::X16)
2514 .addReg(AArch64::X16)
2515 .addReg(AArch64::X17));
2516
2517 if (!STI->hasFPAC()) {
2518 auto AuthKey = (AuthOpcode == AArch64::AUTIA ? AArch64PACKey::IA
2520
2521 emitPtrauthCheckAuthenticatedValue(AArch64::X16, AArch64::X17, AuthKey,
2522 AArch64PAuth::AuthCheckMethod::XPAC,
2523 /*ShouldTrap=*/true,
2524 /*OnFailure=*/nullptr);
2525 }
2526 } else {
2527 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2528 .addReg(AArch64::X16)
2529 .addReg(AArch64::X16)
2530 .addOperand(GAMCLo));
2531 }
2532 } else {
2533 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2534 .addReg(AArch64::X16)
2535 .addReg(AArch64::X16)
2536 .addOperand(GAMCLo)
2537 .addImm(0));
2538 }
2539
2540 if (Offset != 0) {
2541 const uint64_t AbsOffset = (Offset > 0 ? Offset : -((uint64_t)Offset));
2542 const bool IsNeg = Offset < 0;
2543 if (isUInt<24>(AbsOffset)) {
2544 for (int BitPos = 0; BitPos != 24 && (AbsOffset >> BitPos);
2545 BitPos += 12) {
2546 EmitToStreamer(
2547 MCInstBuilder(IsNeg ? AArch64::SUBXri : AArch64::ADDXri)
2548 .addReg(AArch64::X16)
2549 .addReg(AArch64::X16)
2550 .addImm((AbsOffset >> BitPos) & 0xfff)
2551 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, BitPos)));
2552 }
2553 } else {
2554 const uint64_t UOffset = Offset;
2555 EmitToStreamer(MCInstBuilder(IsNeg ? AArch64::MOVNXi : AArch64::MOVZXi)
2556 .addReg(AArch64::X17)
2557 .addImm((IsNeg ? ~UOffset : UOffset) & 0xffff)
2558 .addImm(/*shift=*/0));
2559 auto NeedMovk = [IsNeg, UOffset](int BitPos) -> bool {
2560 assert(BitPos == 16 || BitPos == 32 || BitPos == 48);
2561 uint64_t Shifted = UOffset >> BitPos;
2562 if (!IsNeg)
2563 return Shifted != 0;
2564 for (int I = 0; I != 64 - BitPos; I += 16)
2565 if (((Shifted >> I) & 0xffff) != 0xffff)
2566 return true;
2567 return false;
2568 };
2569 for (int BitPos = 16; BitPos != 64 && NeedMovk(BitPos); BitPos += 16)
2570 emitMOVK(AArch64::X17, (UOffset >> BitPos) & 0xffff, BitPos);
2571
2572 EmitToStreamer(MCInstBuilder(AArch64::ADDXrs)
2573 .addReg(AArch64::X16)
2574 .addReg(AArch64::X16)
2575 .addReg(AArch64::X17)
2576 .addImm(/*shift=*/0));
2577 }
2578 }
2579
2580 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17);
2581
2582 auto MIB = MCInstBuilder(getPACOpcodeForKey(Key, DiscReg == AArch64::XZR))
2583 .addReg(AArch64::X16)
2584 .addReg(AArch64::X16);
2585 if (DiscReg != AArch64::XZR)
2586 MIB.addReg(DiscReg);
2587 EmitToStreamer(MIB);
2588}
2589
2590void AArch64AsmPrinter::LowerLOADgotAUTH(const MachineInstr &MI) {
2591 Register DstReg = MI.getOperand(0).getReg();
2592 Register AuthResultReg = STI->hasFPAC() ? DstReg : AArch64::X16;
2593 const MachineOperand &GAMO = MI.getOperand(1);
2594 assert(GAMO.getOffset() == 0);
2595
2596 if (MI.getMF()->getTarget().getCodeModel() == CodeModel::Tiny) {
2597 MCOperand GAMC;
2598 MCInstLowering.lowerOperand(GAMO, GAMC);
2599 EmitToStreamer(
2600 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addOperand(GAMC));
2601 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2602 .addReg(AuthResultReg)
2603 .addReg(AArch64::X17)
2604 .addImm(0));
2605 } else {
2606 MachineOperand GAHiOp(GAMO);
2607 MachineOperand GALoOp(GAMO);
2608 GAHiOp.addTargetFlag(AArch64II::MO_PAGE);
2609 GALoOp.addTargetFlag(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2610
2611 MCOperand GAMCHi, GAMCLo;
2612 MCInstLowering.lowerOperand(GAHiOp, GAMCHi);
2613 MCInstLowering.lowerOperand(GALoOp, GAMCLo);
2614
2615 EmitToStreamer(
2616 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(GAMCHi));
2617
2618 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2619 .addReg(AArch64::X17)
2620 .addReg(AArch64::X17)
2621 .addOperand(GAMCLo)
2622 .addImm(0));
2623
2624 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2625 .addReg(AuthResultReg)
2626 .addReg(AArch64::X17)
2627 .addImm(0));
2628 }
2629
2630 assert(GAMO.isGlobal());
2631 MCSymbol *UndefWeakSym;
2632 if (GAMO.getGlobal()->hasExternalWeakLinkage()) {
2633 UndefWeakSym = createTempSymbol("undef_weak");
2634 EmitToStreamer(
2635 MCInstBuilder(AArch64::CBZX)
2636 .addReg(AuthResultReg)
2637 .addExpr(MCSymbolRefExpr::create(UndefWeakSym, OutContext)));
2638 }
2639
2640 assert(GAMO.getGlobal()->getValueType() != nullptr);
2641 unsigned AuthOpcode = GAMO.getGlobal()->getValueType()->isFunctionTy()
2642 ? AArch64::AUTIA
2643 : AArch64::AUTDA;
2644 EmitToStreamer(MCInstBuilder(AuthOpcode)
2645 .addReg(AuthResultReg)
2646 .addReg(AuthResultReg)
2647 .addReg(AArch64::X17));
2648
2649 if (GAMO.getGlobal()->hasExternalWeakLinkage())
2650 OutStreamer->emitLabel(UndefWeakSym);
2651
2652 if (!STI->hasFPAC()) {
2653 auto AuthKey =
2654 (AuthOpcode == AArch64::AUTIA ? AArch64PACKey::IA : AArch64PACKey::DA);
2655
2656 emitPtrauthCheckAuthenticatedValue(AuthResultReg, AArch64::X17, AuthKey,
2657 AArch64PAuth::AuthCheckMethod::XPAC,
2658 /*ShouldTrap=*/true,
2659 /*OnFailure=*/nullptr);
2660
2661 emitMovXReg(DstReg, AuthResultReg);
2662 }
2663}
2664
2665const MCExpr *
2666AArch64AsmPrinter::lowerBlockAddressConstant(const BlockAddress &BA) {
2667 const MCExpr *BAE = AsmPrinter::lowerBlockAddressConstant(BA);
2668 const Function &Fn = *BA.getFunction();
2669
2670 if (std::optional<uint16_t> BADisc =
2672 return AArch64AuthMCExpr::create(BAE, *BADisc, AArch64PACKey::IA,
2673 /*HasAddressDiversity=*/false, OutContext);
2674
2675 return BAE;
2676}
2677
2678void AArch64AsmPrinter::emitCBPseudoExpansion(const MachineInstr *MI) {
2679 bool IsImm = false;
2680 bool Is32Bit = false;
2681
2682 switch (MI->getOpcode()) {
2683 default:
2684 llvm_unreachable("This is not a CB pseudo instruction");
2685 case AArch64::CBWPrr:
2686 Is32Bit = true;
2687 break;
2688 case AArch64::CBXPrr:
2689 Is32Bit = false;
2690 break;
2691 case AArch64::CBWPri:
2692 IsImm = true;
2693 Is32Bit = true;
2694 break;
2695 case AArch64::CBXPri:
2696 IsImm = true;
2697 break;
2698 }
2699
2701 static_cast<AArch64CC::CondCode>(MI->getOperand(0).getImm());
2702 bool NeedsRegSwap = false;
2703 bool NeedsImmDec = false;
2704 bool NeedsImmInc = false;
2705
2706 // Decide if we need to either swap register operands or increment/decrement
2707 // immediate operands
2708 unsigned MCOpC;
2709 switch (CC) {
2710 default:
2711 llvm_unreachable("Invalid CB condition code");
2712 case AArch64CC::EQ:
2713 MCOpC = IsImm ? (Is32Bit ? AArch64::CBEQWri : AArch64::CBEQXri)
2714 : (Is32Bit ? AArch64::CBEQWrr : AArch64::CBEQXrr);
2715 break;
2716 case AArch64CC::NE:
2717 MCOpC = IsImm ? (Is32Bit ? AArch64::CBNEWri : AArch64::CBNEXri)
2718 : (Is32Bit ? AArch64::CBNEWrr : AArch64::CBNEXrr);
2719 break;
2720 case AArch64CC::HS:
2721 MCOpC = IsImm ? (Is32Bit ? AArch64::CBHIWri : AArch64::CBHIXri)
2722 : (Is32Bit ? AArch64::CBHSWrr : AArch64::CBHSXrr);
2723 NeedsImmDec = IsImm;
2724 break;
2725 case AArch64CC::LO:
2726 MCOpC = IsImm ? (Is32Bit ? AArch64::CBLOWri : AArch64::CBLOXri)
2727 : (Is32Bit ? AArch64::CBHIWrr : AArch64::CBHIXrr);
2728 NeedsRegSwap = !IsImm;
2729 break;
2730 case AArch64CC::HI:
2731 MCOpC = IsImm ? (Is32Bit ? AArch64::CBHIWri : AArch64::CBHIXri)
2732 : (Is32Bit ? AArch64::CBHIWrr : AArch64::CBHIXrr);
2733 break;
2734 case AArch64CC::LS:
2735 MCOpC = IsImm ? (Is32Bit ? AArch64::CBLOWri : AArch64::CBLOXri)
2736 : (Is32Bit ? AArch64::CBHSWrr : AArch64::CBHSXrr);
2737 NeedsRegSwap = !IsImm;
2738 NeedsImmInc = IsImm;
2739 break;
2740 case AArch64CC::GE:
2741 MCOpC = IsImm ? (Is32Bit ? AArch64::CBGTWri : AArch64::CBGTXri)
2742 : (Is32Bit ? AArch64::CBGEWrr : AArch64::CBGEXrr);
2743 NeedsImmDec = IsImm;
2744 break;
2745 case AArch64CC::LT:
2746 MCOpC = IsImm ? (Is32Bit ? AArch64::CBLTWri : AArch64::CBLTXri)
2747 : (Is32Bit ? AArch64::CBGTWrr : AArch64::CBGTXrr);
2748 NeedsRegSwap = !IsImm;
2749 break;
2750 case AArch64CC::GT:
2751 MCOpC = IsImm ? (Is32Bit ? AArch64::CBGTWri : AArch64::CBGTXri)
2752 : (Is32Bit ? AArch64::CBGTWrr : AArch64::CBGTXrr);
2753 break;
2754 case AArch64CC::LE:
2755 MCOpC = IsImm ? (Is32Bit ? AArch64::CBLTWri : AArch64::CBLTXri)
2756 : (Is32Bit ? AArch64::CBGEWrr : AArch64::CBGEXrr);
2757 NeedsRegSwap = !IsImm;
2758 NeedsImmInc = IsImm;
2759 break;
2760 }
2761
2762 MCInst Inst;
2763 Inst.setOpcode(MCOpC);
2764
2765 MCOperand Lhs, Rhs, Trgt;
2766 lowerOperand(MI->getOperand(1), Lhs);
2767 lowerOperand(MI->getOperand(2), Rhs);
2768 lowerOperand(MI->getOperand(3), Trgt);
2769
2770 // Now swap, increment or decrement
2771 if (NeedsRegSwap) {
2772 assert(Lhs.isReg() && "Expected register operand for CB");
2773 assert(Rhs.isReg() && "Expected register operand for CB");
2774 Inst.addOperand(Rhs);
2775 Inst.addOperand(Lhs);
2776 } else if (NeedsImmDec) {
2777 Rhs.setImm(Rhs.getImm() - 1);
2778 Inst.addOperand(Lhs);
2779 Inst.addOperand(Rhs);
2780 } else if (NeedsImmInc) {
2781 Rhs.setImm(Rhs.getImm() + 1);
2782 Inst.addOperand(Lhs);
2783 Inst.addOperand(Rhs);
2784 } else {
2785 Inst.addOperand(Lhs);
2786 Inst.addOperand(Rhs);
2787 }
2788
2789 assert((!IsImm || (Rhs.getImm() >= 0 && Rhs.getImm() < 64)) &&
2790 "CB immediate operand out-of-bounds");
2791
2792 Inst.addOperand(Trgt);
2793 EmitToStreamer(*OutStreamer, Inst);
2794}
2795
2796// Simple pseudo-instructions have their lowering (with expansion to real
2797// instructions) auto-generated.
2798#include "AArch64GenMCPseudoLowering.inc"
2799
2800void AArch64AsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
2801 S.emitInstruction(Inst, *STI);
2802#ifndef NDEBUG
2803 ++InstsEmitted;
2804#endif
2805}
2806
2807void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
2808 AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
2809
2810#ifndef NDEBUG
2811 InstsEmitted = 0;
2812 auto CheckMISize = make_scope_exit([&]() {
2813 assert(STI->getInstrInfo()->getInstSizeInBytes(*MI) >= InstsEmitted * 4);
2814 });
2815#endif
2816
2817 // Do any auto-generated pseudo lowerings.
2818 if (MCInst OutInst; lowerPseudoInstExpansion(MI, OutInst)) {
2819 EmitToStreamer(*OutStreamer, OutInst);
2820 return;
2821 }
2822
2823 if (MI->getOpcode() == AArch64::ADRP) {
2824 for (auto &Opd : MI->operands()) {
2825 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
2826 "swift_async_extendedFramePointerFlags") {
2827 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
2828 }
2829 }
2830 }
2831
2832 if (AArch64FI->getLOHRelated().count(MI)) {
2833 // Generate a label for LOH related instruction
2834 MCSymbol *LOHLabel = createTempSymbol("loh");
2835 // Associate the instruction with the label
2836 LOHInstToLabel[MI] = LOHLabel;
2837 OutStreamer->emitLabel(LOHLabel);
2838 }
2839
2840 AArch64TargetStreamer *TS =
2841 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
2842 // Do any manual lowerings.
2843 switch (MI->getOpcode()) {
2844 default:
2846 "Unhandled tail call instruction");
2847 break;
2848 case AArch64::HINT: {
2849 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
2850 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
2851 // non-empty. If MI is the initial BTI, place the
2852 // __patchable_function_entries label after BTI.
2853 if (CurrentPatchableFunctionEntrySym &&
2854 CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
2855 MI == &MF->front().front()) {
2856 int64_t Imm = MI->getOperand(0).getImm();
2857 if ((Imm & 32) && (Imm & 6)) {
2858 MCInst Inst;
2859 MCInstLowering.Lower(MI, Inst);
2860 EmitToStreamer(*OutStreamer, Inst);
2861 CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
2862 OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
2863 return;
2864 }
2865 }
2866 break;
2867 }
2868 case AArch64::MOVMCSym: {
2869 Register DestReg = MI->getOperand(0).getReg();
2870 const MachineOperand &MO_Sym = MI->getOperand(1);
2871 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
2872 MCOperand Hi_MCSym, Lo_MCSym;
2873
2874 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
2875 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
2876
2877 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
2878 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
2879
2880 MCInst MovZ;
2881 MovZ.setOpcode(AArch64::MOVZXi);
2882 MovZ.addOperand(MCOperand::createReg(DestReg));
2883 MovZ.addOperand(Hi_MCSym);
2885 EmitToStreamer(*OutStreamer, MovZ);
2886
2887 MCInst MovK;
2888 MovK.setOpcode(AArch64::MOVKXi);
2889 MovK.addOperand(MCOperand::createReg(DestReg));
2890 MovK.addOperand(MCOperand::createReg(DestReg));
2891 MovK.addOperand(Lo_MCSym);
2893 EmitToStreamer(*OutStreamer, MovK);
2894 return;
2895 }
2896 case AArch64::MOVIv2d_ns:
2897 // It is generally beneficial to rewrite "fmov s0, wzr" to "movi d0, #0".
2898 // as movi is more efficient across all cores. Newer cores can eliminate
2899 // fmovs early and there is no difference with movi, but this not true for
2900 // all implementations.
2901 //
2902 // The floating-point version doesn't quite work in rare cases on older
2903 // CPUs, so on those targets we lower this instruction to movi.16b instead.
2904 if (STI->hasZeroCycleZeroingFPWorkaround() &&
2905 MI->getOperand(1).getImm() == 0) {
2906 MCInst TmpInst;
2907 TmpInst.setOpcode(AArch64::MOVIv16b_ns);
2908 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
2909 TmpInst.addOperand(MCOperand::createImm(0));
2910 EmitToStreamer(*OutStreamer, TmpInst);
2911 return;
2912 }
2913 break;
2914
2915 case AArch64::DBG_VALUE:
2916 case AArch64::DBG_VALUE_LIST:
2917 if (isVerbose() && OutStreamer->hasRawTextSupport()) {
2918 SmallString<128> TmpStr;
2919 raw_svector_ostream OS(TmpStr);
2920 PrintDebugValueComment(MI, OS);
2921 OutStreamer->emitRawText(StringRef(OS.str()));
2922 }
2923 return;
2924
2925 case AArch64::EMITBKEY: {
2926 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
2927 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
2928 ExceptionHandlingType != ExceptionHandling::ARM)
2929 return;
2930
2931 if (getFunctionCFISectionType(*MF) == CFISection::None)
2932 return;
2933
2934 OutStreamer->emitCFIBKeyFrame();
2935 return;
2936 }
2937
2938 case AArch64::EMITMTETAGGED: {
2939 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
2940 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
2941 ExceptionHandlingType != ExceptionHandling::ARM)
2942 return;
2943
2944 if (getFunctionCFISectionType(*MF) != CFISection::None)
2945 OutStreamer->emitCFIMTETaggedFrame();
2946 return;
2947 }
2948
2949 case AArch64::AUTx16x17:
2950 emitPtrauthAuthResign(AArch64::X16,
2951 (AArch64PACKey::ID)MI->getOperand(0).getImm(),
2952 MI->getOperand(1).getImm(), &MI->getOperand(2),
2953 AArch64::X17, std::nullopt, 0, 0);
2954 return;
2955
2956 case AArch64::AUTxMxN:
2957 emitPtrauthAuthResign(MI->getOperand(0).getReg(),
2958 (AArch64PACKey::ID)MI->getOperand(3).getImm(),
2959 MI->getOperand(4).getImm(), &MI->getOperand(5),
2960 MI->getOperand(1).getReg(), std::nullopt, 0, 0);
2961 return;
2962
2963 case AArch64::AUTPAC:
2964 emitPtrauthAuthResign(
2965 AArch64::X16, (AArch64PACKey::ID)MI->getOperand(0).getImm(),
2966 MI->getOperand(1).getImm(), &MI->getOperand(2), AArch64::X17,
2967 (AArch64PACKey::ID)MI->getOperand(3).getImm(),
2968 MI->getOperand(4).getImm(), MI->getOperand(5).getReg());
2969 return;
2970
2971 case AArch64::PAC:
2972 emitPtrauthSign(MI);
2973 return;
2974
2975 case AArch64::LOADauthptrstatic:
2976 LowerLOADauthptrstatic(*MI);
2977 return;
2978
2979 case AArch64::LOADgotPAC:
2980 case AArch64::MOVaddrPAC:
2981 LowerMOVaddrPAC(*MI);
2982 return;
2983
2984 case AArch64::LOADgotAUTH:
2985 LowerLOADgotAUTH(*MI);
2986 return;
2987
2988 case AArch64::BRA:
2989 case AArch64::BLRA:
2990 emitPtrauthBranch(MI);
2991 return;
2992
2993 // Tail calls use pseudo instructions so they have the proper code-gen
2994 // attributes (isCall, isReturn, etc.). We lower them to the real
2995 // instruction here.
2996 case AArch64::AUTH_TCRETURN:
2997 case AArch64::AUTH_TCRETURN_BTI: {
2998 Register Callee = MI->getOperand(0).getReg();
2999 const uint64_t Key = MI->getOperand(2).getImm();
3001 "Invalid auth key for tail-call return");
3002
3003 const uint64_t Disc = MI->getOperand(3).getImm();
3004 assert(isUInt<16>(Disc) && "Integer discriminator is too wide");
3005
3006 Register AddrDisc = MI->getOperand(4).getReg();
3007
3008 Register ScratchReg = Callee == AArch64::X16 ? AArch64::X17 : AArch64::X16;
3009
3010 emitPtrauthTailCallHardening(MI);
3011
3012 // See the comments in emitPtrauthBranch.
3013 if (Callee == AddrDisc)
3014 report_fatal_error("Call target is signed with its own value");
3015
3016 // After isX16X17Safer predicate was introduced, emitPtrauthDiscriminator is
3017 // no longer restricted to only reusing AddrDisc when it is X16 or X17
3018 // (which are implicit-def'ed by AUTH_TCRETURN pseudos), thus impose this
3019 // restriction manually not to clobber an unexpected register.
3020 bool AddrDiscIsImplicitDef =
3021 AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17;
3022 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, ScratchReg,
3023 AddrDiscIsImplicitDef);
3024
3025 const bool IsZero = DiscReg == AArch64::XZR;
3026 const unsigned Opcodes[2][2] = {{AArch64::BRAA, AArch64::BRAAZ},
3027 {AArch64::BRAB, AArch64::BRABZ}};
3028
3029 MCInst TmpInst;
3030 TmpInst.setOpcode(Opcodes[Key][IsZero]);
3031 TmpInst.addOperand(MCOperand::createReg(Callee));
3032 if (!IsZero)
3033 TmpInst.addOperand(MCOperand::createReg(DiscReg));
3034 EmitToStreamer(*OutStreamer, TmpInst);
3035 return;
3036 }
3037
3038 case AArch64::TCRETURNri:
3039 case AArch64::TCRETURNrix16x17:
3040 case AArch64::TCRETURNrix17:
3041 case AArch64::TCRETURNrinotx16:
3042 case AArch64::TCRETURNriALL: {
3043 emitPtrauthTailCallHardening(MI);
3044
3045 recordIfImportCall(MI);
3046 MCInst TmpInst;
3047 TmpInst.setOpcode(AArch64::BR);
3048 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3049 EmitToStreamer(*OutStreamer, TmpInst);
3050 return;
3051 }
3052 case AArch64::TCRETURNdi: {
3053 emitPtrauthTailCallHardening(MI);
3054
3055 MCOperand Dest;
3056 MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
3057 recordIfImportCall(MI);
3058 MCInst TmpInst;
3059 TmpInst.setOpcode(AArch64::B);
3060 TmpInst.addOperand(Dest);
3061 EmitToStreamer(*OutStreamer, TmpInst);
3062 return;
3063 }
3064 case AArch64::SpeculationBarrierISBDSBEndBB: {
3065 // Print DSB SYS + ISB
3066 MCInst TmpInstDSB;
3067 TmpInstDSB.setOpcode(AArch64::DSB);
3068 TmpInstDSB.addOperand(MCOperand::createImm(0xf));
3069 EmitToStreamer(*OutStreamer, TmpInstDSB);
3070 MCInst TmpInstISB;
3071 TmpInstISB.setOpcode(AArch64::ISB);
3072 TmpInstISB.addOperand(MCOperand::createImm(0xf));
3073 EmitToStreamer(*OutStreamer, TmpInstISB);
3074 return;
3075 }
3076 case AArch64::SpeculationBarrierSBEndBB: {
3077 // Print SB
3078 MCInst TmpInstSB;
3079 TmpInstSB.setOpcode(AArch64::SB);
3080 EmitToStreamer(*OutStreamer, TmpInstSB);
3081 return;
3082 }
3083 case AArch64::TLSDESC_AUTH_CALLSEQ: {
3084 /// lower this to:
3085 /// adrp x0, :tlsdesc_auth:var
3086 /// ldr x16, [x0, #:tlsdesc_auth_lo12:var]
3087 /// add x0, x0, #:tlsdesc_auth_lo12:var
3088 /// blraa x16, x0
3089 /// (TPIDR_EL0 offset now in x0)
3090 const MachineOperand &MO_Sym = MI->getOperand(0);
3091 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3092 MCOperand SymTLSDescLo12, SymTLSDesc;
3093 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3094 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3095 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3096 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3097
3098 MCInst Adrp;
3099 Adrp.setOpcode(AArch64::ADRP);
3100 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3101 Adrp.addOperand(SymTLSDesc);
3102 EmitToStreamer(*OutStreamer, Adrp);
3103
3104 MCInst Ldr;
3105 Ldr.setOpcode(AArch64::LDRXui);
3106 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3107 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3108 Ldr.addOperand(SymTLSDescLo12);
3110 EmitToStreamer(*OutStreamer, Ldr);
3111
3112 MCInst Add;
3113 Add.setOpcode(AArch64::ADDXri);
3114 Add.addOperand(MCOperand::createReg(AArch64::X0));
3115 Add.addOperand(MCOperand::createReg(AArch64::X0));
3116 Add.addOperand(SymTLSDescLo12);
3118 EmitToStreamer(*OutStreamer, Add);
3119
3120 // Authenticated TLSDESC accesses are not relaxed.
3121 // Thus, do not emit .tlsdesccall for AUTH TLSDESC.
3122
3123 MCInst Blraa;
3124 Blraa.setOpcode(AArch64::BLRAA);
3125 Blraa.addOperand(MCOperand::createReg(AArch64::X16));
3126 Blraa.addOperand(MCOperand::createReg(AArch64::X0));
3127 EmitToStreamer(*OutStreamer, Blraa);
3128
3129 return;
3130 }
3131 case AArch64::TLSDESC_CALLSEQ: {
3132 /// lower this to:
3133 /// adrp x0, :tlsdesc:var
3134 /// ldr x1, [x0, #:tlsdesc_lo12:var]
3135 /// add x0, x0, #:tlsdesc_lo12:var
3136 /// .tlsdesccall var
3137 /// blr x1
3138 /// (TPIDR_EL0 offset now in x0)
3139 const MachineOperand &MO_Sym = MI->getOperand(0);
3140 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3141 MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
3142 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3143 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3144 MCInstLowering.lowerOperand(MO_Sym, Sym);
3145 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3146 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3147
3148 MCInst Adrp;
3149 Adrp.setOpcode(AArch64::ADRP);
3150 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3151 Adrp.addOperand(SymTLSDesc);
3152 EmitToStreamer(*OutStreamer, Adrp);
3153
3154 MCInst Ldr;
3155 if (STI->isTargetILP32()) {
3156 Ldr.setOpcode(AArch64::LDRWui);
3157 Ldr.addOperand(MCOperand::createReg(AArch64::W1));
3158 } else {
3159 Ldr.setOpcode(AArch64::LDRXui);
3160 Ldr.addOperand(MCOperand::createReg(AArch64::X1));
3161 }
3162 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3163 Ldr.addOperand(SymTLSDescLo12);
3165 EmitToStreamer(*OutStreamer, Ldr);
3166
3167 MCInst Add;
3168 if (STI->isTargetILP32()) {
3169 Add.setOpcode(AArch64::ADDWri);
3170 Add.addOperand(MCOperand::createReg(AArch64::W0));
3171 Add.addOperand(MCOperand::createReg(AArch64::W0));
3172 } else {
3173 Add.setOpcode(AArch64::ADDXri);
3174 Add.addOperand(MCOperand::createReg(AArch64::X0));
3175 Add.addOperand(MCOperand::createReg(AArch64::X0));
3176 }
3177 Add.addOperand(SymTLSDescLo12);
3179 EmitToStreamer(*OutStreamer, Add);
3180
3181 // Emit a relocation-annotation. This expands to no code, but requests
3182 // the following instruction gets an R_AARCH64_TLSDESC_CALL.
3183 MCInst TLSDescCall;
3184 TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
3185 TLSDescCall.addOperand(Sym);
3186 EmitToStreamer(*OutStreamer, TLSDescCall);
3187#ifndef NDEBUG
3188 --InstsEmitted; // no code emitted
3189#endif
3190
3191 MCInst Blr;
3192 Blr.setOpcode(AArch64::BLR);
3193 Blr.addOperand(MCOperand::createReg(AArch64::X1));
3194 EmitToStreamer(*OutStreamer, Blr);
3195
3196 return;
3197 }
3198
3199 case AArch64::JumpTableDest32:
3200 case AArch64::JumpTableDest16:
3201 case AArch64::JumpTableDest8:
3202 LowerJumpTableDest(*OutStreamer, *MI);
3203 return;
3204
3205 case AArch64::BR_JumpTable:
3206 LowerHardenedBRJumpTable(*MI);
3207 return;
3208
3209 case AArch64::FMOVH0:
3210 case AArch64::FMOVS0:
3211 case AArch64::FMOVD0:
3212 emitFMov0(*MI);
3213 return;
3214
3215 case AArch64::MOPSMemoryCopyPseudo:
3216 case AArch64::MOPSMemoryMovePseudo:
3217 case AArch64::MOPSMemorySetPseudo:
3218 case AArch64::MOPSMemorySetTaggingPseudo:
3219 LowerMOPS(*OutStreamer, *MI);
3220 return;
3221
3222 case TargetOpcode::STACKMAP:
3223 return LowerSTACKMAP(*OutStreamer, SM, *MI);
3224
3225 case TargetOpcode::PATCHPOINT:
3226 return LowerPATCHPOINT(*OutStreamer, SM, *MI);
3227
3228 case TargetOpcode::STATEPOINT:
3229 return LowerSTATEPOINT(*OutStreamer, SM, *MI);
3230
3231 case TargetOpcode::FAULTING_OP:
3232 return LowerFAULTING_OP(*MI);
3233
3234 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
3235 LowerPATCHABLE_FUNCTION_ENTER(*MI);
3236 return;
3237
3238 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
3239 LowerPATCHABLE_FUNCTION_EXIT(*MI);
3240 return;
3241
3242 case TargetOpcode::PATCHABLE_TAIL_CALL:
3243 LowerPATCHABLE_TAIL_CALL(*MI);
3244 return;
3245 case TargetOpcode::PATCHABLE_EVENT_CALL:
3246 return LowerPATCHABLE_EVENT_CALL(*MI, false);
3247 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
3248 return LowerPATCHABLE_EVENT_CALL(*MI, true);
3249
3250 case AArch64::KCFI_CHECK:
3251 LowerKCFI_CHECK(*MI);
3252 return;
3253
3254 case AArch64::HWASAN_CHECK_MEMACCESS:
3255 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
3256 case AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW:
3257 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW:
3258 LowerHWASAN_CHECK_MEMACCESS(*MI);
3259 return;
3260
3261 case AArch64::SEH_StackAlloc:
3262 TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
3263 return;
3264
3265 case AArch64::SEH_SaveFPLR:
3266 TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
3267 return;
3268
3269 case AArch64::SEH_SaveFPLR_X:
3270 assert(MI->getOperand(0).getImm() < 0 &&
3271 "Pre increment SEH opcode must have a negative offset");
3272 TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
3273 return;
3274
3275 case AArch64::SEH_SaveReg:
3276 TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
3277 MI->getOperand(1).getImm());
3278 return;
3279
3280 case AArch64::SEH_SaveReg_X:
3281 assert(MI->getOperand(1).getImm() < 0 &&
3282 "Pre increment SEH opcode must have a negative offset");
3283 TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
3284 -MI->getOperand(1).getImm());
3285 return;
3286
3287 case AArch64::SEH_SaveRegP:
3288 if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
3289 MI->getOperand(0).getImm() <= 28) {
3290 assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
3291 "Register paired with LR must be odd");
3292 TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
3293 MI->getOperand(2).getImm());
3294 return;
3295 }
3296 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3297 "Non-consecutive registers not allowed for save_regp");
3298 TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
3299 MI->getOperand(2).getImm());
3300 return;
3301
3302 case AArch64::SEH_SaveRegP_X:
3303 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3304 "Non-consecutive registers not allowed for save_regp_x");
3305 assert(MI->getOperand(2).getImm() < 0 &&
3306 "Pre increment SEH opcode must have a negative offset");
3307 TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
3308 -MI->getOperand(2).getImm());
3309 return;
3310
3311 case AArch64::SEH_SaveFReg:
3312 TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
3313 MI->getOperand(1).getImm());
3314 return;
3315
3316 case AArch64::SEH_SaveFReg_X:
3317 assert(MI->getOperand(1).getImm() < 0 &&
3318 "Pre increment SEH opcode must have a negative offset");
3319 TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
3320 -MI->getOperand(1).getImm());
3321 return;
3322
3323 case AArch64::SEH_SaveFRegP:
3324 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3325 "Non-consecutive registers not allowed for save_regp");
3326 TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
3327 MI->getOperand(2).getImm());
3328 return;
3329
3330 case AArch64::SEH_SaveFRegP_X:
3331 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3332 "Non-consecutive registers not allowed for save_regp_x");
3333 assert(MI->getOperand(2).getImm() < 0 &&
3334 "Pre increment SEH opcode must have a negative offset");
3335 TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
3336 -MI->getOperand(2).getImm());
3337 return;
3338
3339 case AArch64::SEH_SetFP:
3341 return;
3342
3343 case AArch64::SEH_AddFP:
3344 TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
3345 return;
3346
3347 case AArch64::SEH_Nop:
3348 TS->emitARM64WinCFINop();
3349 return;
3350
3351 case AArch64::SEH_PrologEnd:
3353 return;
3354
3355 case AArch64::SEH_EpilogStart:
3357 return;
3358
3359 case AArch64::SEH_EpilogEnd:
3361 return;
3362
3363 case AArch64::SEH_PACSignLR:
3365 return;
3366
3367 case AArch64::SEH_SaveAnyRegQP:
3368 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3369 "Non-consecutive registers not allowed for save_any_reg");
3370 assert(MI->getOperand(2).getImm() >= 0 &&
3371 "SaveAnyRegQP SEH opcode offset must be non-negative");
3372 assert(MI->getOperand(2).getImm() <= 1008 &&
3373 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3374 TS->emitARM64WinCFISaveAnyRegQP(MI->getOperand(0).getImm(),
3375 MI->getOperand(2).getImm());
3376 return;
3377
3378 case AArch64::SEH_SaveAnyRegQPX:
3379 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3380 "Non-consecutive registers not allowed for save_any_reg");
3381 assert(MI->getOperand(2).getImm() < 0 &&
3382 "SaveAnyRegQPX SEH opcode offset must be negative");
3383 assert(MI->getOperand(2).getImm() >= -1008 &&
3384 "SaveAnyRegQPX SEH opcode offset must fit into 6 bits");
3385 TS->emitARM64WinCFISaveAnyRegQPX(MI->getOperand(0).getImm(),
3386 -MI->getOperand(2).getImm());
3387 return;
3388
3389 case AArch64::SEH_AllocZ:
3390 assert(MI->getOperand(0).getImm() >= 0 &&
3391 "AllocZ SEH opcode offset must be non-negative");
3392 assert(MI->getOperand(0).getImm() <= 255 &&
3393 "AllocZ SEH opcode offset must fit into 8 bits");
3394 TS->emitARM64WinCFIAllocZ(MI->getOperand(0).getImm());
3395 return;
3396
3397 case AArch64::SEH_SaveZReg:
3398 assert(MI->getOperand(1).getImm() >= 0 &&
3399 "SaveZReg SEH opcode offset must be non-negative");
3400 assert(MI->getOperand(1).getImm() <= 255 &&
3401 "SaveZReg SEH opcode offset must fit into 8 bits");
3402 TS->emitARM64WinCFISaveZReg(MI->getOperand(0).getImm(),
3403 MI->getOperand(1).getImm());
3404 return;
3405
3406 case AArch64::SEH_SavePReg:
3407 assert(MI->getOperand(1).getImm() >= 0 &&
3408 "SavePReg SEH opcode offset must be non-negative");
3409 assert(MI->getOperand(1).getImm() <= 255 &&
3410 "SavePReg SEH opcode offset must fit into 8 bits");
3411 TS->emitARM64WinCFISavePReg(MI->getOperand(0).getImm(),
3412 MI->getOperand(1).getImm());
3413 return;
3414
3415 case AArch64::BLR:
3416 case AArch64::BR: {
3417 recordIfImportCall(MI);
3418 MCInst TmpInst;
3419 MCInstLowering.Lower(MI, TmpInst);
3420 EmitToStreamer(*OutStreamer, TmpInst);
3421 return;
3422 }
3423 case AArch64::CBWPri:
3424 case AArch64::CBXPri:
3425 case AArch64::CBWPrr:
3426 case AArch64::CBXPrr:
3427 emitCBPseudoExpansion(MI);
3428 return;
3429 }
3430
3431 // Finally, do the automated lowerings for everything else.
3432 MCInst TmpInst;
3433 MCInstLowering.Lower(MI, TmpInst);
3434 EmitToStreamer(*OutStreamer, TmpInst);
3435}
3436
3437void AArch64AsmPrinter::recordIfImportCall(
3438 const llvm::MachineInstr *BranchInst) {
3439 if (!EnableImportCallOptimization)
3440 return;
3441
3442 auto [GV, OpFlags] = BranchInst->getMF()->tryGetCalledGlobal(BranchInst);
3443 if (GV && GV->hasDLLImportStorageClass()) {
3444 auto *CallSiteSymbol = MMI->getContext().createNamedTempSymbol("impcall");
3445 OutStreamer->emitLabel(CallSiteSymbol);
3446
3447 auto *CalledSymbol = MCInstLowering.GetGlobalValueSymbol(GV, OpFlags);
3448 SectionToImportedFunctionCalls[OutStreamer->getCurrentSectionOnly()]
3449 .push_back({CallSiteSymbol, CalledSymbol});
3450 }
3451}
3452
3453void AArch64AsmPrinter::emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
3454 MCSymbol *LazyPointer) {
3455 // _ifunc:
3456 // adrp x16, lazy_pointer@GOTPAGE
3457 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3458 // ldr x16, [x16]
3459 // br x16
3460
3461 {
3462 MCInst Adrp;
3463 Adrp.setOpcode(AArch64::ADRP);
3464 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3465 MCOperand SymPage;
3466 MCInstLowering.lowerOperand(
3469 SymPage);
3470 Adrp.addOperand(SymPage);
3471 EmitToStreamer(Adrp);
3472 }
3473
3474 {
3475 MCInst Ldr;
3476 Ldr.setOpcode(AArch64::LDRXui);
3477 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3478 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3479 MCOperand SymPageOff;
3480 MCInstLowering.lowerOperand(
3483 SymPageOff);
3484 Ldr.addOperand(SymPageOff);
3486 EmitToStreamer(Ldr);
3487 }
3488
3489 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
3490 .addReg(AArch64::X16)
3491 .addReg(AArch64::X16)
3492 .addImm(0));
3493
3494 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3495 : AArch64::BR)
3496 .addReg(AArch64::X16));
3497}
3498
3499void AArch64AsmPrinter::emitMachOIFuncStubHelperBody(Module &M,
3500 const GlobalIFunc &GI,
3501 MCSymbol *LazyPointer) {
3502 // These stub helpers are only ever called once, so here we're optimizing for
3503 // minimum size by using the pre-indexed store variants, which saves a few
3504 // bytes of instructions to bump & restore sp.
3505
3506 // _ifunc.stub_helper:
3507 // stp fp, lr, [sp, #-16]!
3508 // mov fp, sp
3509 // stp x1, x0, [sp, #-16]!
3510 // stp x3, x2, [sp, #-16]!
3511 // stp x5, x4, [sp, #-16]!
3512 // stp x7, x6, [sp, #-16]!
3513 // stp d1, d0, [sp, #-16]!
3514 // stp d3, d2, [sp, #-16]!
3515 // stp d5, d4, [sp, #-16]!
3516 // stp d7, d6, [sp, #-16]!
3517 // bl _resolver
3518 // adrp x16, lazy_pointer@GOTPAGE
3519 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3520 // str x0, [x16]
3521 // mov x16, x0
3522 // ldp d7, d6, [sp], #16
3523 // ldp d5, d4, [sp], #16
3524 // ldp d3, d2, [sp], #16
3525 // ldp d1, d0, [sp], #16
3526 // ldp x7, x6, [sp], #16
3527 // ldp x5, x4, [sp], #16
3528 // ldp x3, x2, [sp], #16
3529 // ldp x1, x0, [sp], #16
3530 // ldp fp, lr, [sp], #16
3531 // br x16
3532
3533 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3534 .addReg(AArch64::SP)
3535 .addReg(AArch64::FP)
3536 .addReg(AArch64::LR)
3537 .addReg(AArch64::SP)
3538 .addImm(-2));
3539
3540 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3541 .addReg(AArch64::FP)
3542 .addReg(AArch64::SP)
3543 .addImm(0)
3544 .addImm(0));
3545
3546 for (int I = 0; I != 4; ++I)
3547 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3548 .addReg(AArch64::SP)
3549 .addReg(AArch64::X1 + 2 * I)
3550 .addReg(AArch64::X0 + 2 * I)
3551 .addReg(AArch64::SP)
3552 .addImm(-2));
3553
3554 for (int I = 0; I != 4; ++I)
3555 EmitToStreamer(MCInstBuilder(AArch64::STPDpre)
3556 .addReg(AArch64::SP)
3557 .addReg(AArch64::D1 + 2 * I)
3558 .addReg(AArch64::D0 + 2 * I)
3559 .addReg(AArch64::SP)
3560 .addImm(-2));
3561
3562 EmitToStreamer(
3563 MCInstBuilder(AArch64::BL)
3565
3566 {
3567 MCInst Adrp;
3568 Adrp.setOpcode(AArch64::ADRP);
3569 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3570 MCOperand SymPage;
3571 MCInstLowering.lowerOperand(
3572 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3574 SymPage);
3575 Adrp.addOperand(SymPage);
3576 EmitToStreamer(Adrp);
3577 }
3578
3579 {
3580 MCInst Ldr;
3581 Ldr.setOpcode(AArch64::LDRXui);
3582 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3583 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3584 MCOperand SymPageOff;
3585 MCInstLowering.lowerOperand(
3586 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3588 SymPageOff);
3589 Ldr.addOperand(SymPageOff);
3591 EmitToStreamer(Ldr);
3592 }
3593
3594 EmitToStreamer(MCInstBuilder(AArch64::STRXui)
3595 .addReg(AArch64::X0)
3596 .addReg(AArch64::X16)
3597 .addImm(0));
3598
3599 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3600 .addReg(AArch64::X16)
3601 .addReg(AArch64::X0)
3602 .addImm(0)
3603 .addImm(0));
3604
3605 for (int I = 3; I != -1; --I)
3606 EmitToStreamer(MCInstBuilder(AArch64::LDPDpost)
3607 .addReg(AArch64::SP)
3608 .addReg(AArch64::D1 + 2 * I)
3609 .addReg(AArch64::D0 + 2 * I)
3610 .addReg(AArch64::SP)
3611 .addImm(2));
3612
3613 for (int I = 3; I != -1; --I)
3614 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3615 .addReg(AArch64::SP)
3616 .addReg(AArch64::X1 + 2 * I)
3617 .addReg(AArch64::X0 + 2 * I)
3618 .addReg(AArch64::SP)
3619 .addImm(2));
3620
3621 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3622 .addReg(AArch64::SP)
3623 .addReg(AArch64::FP)
3624 .addReg(AArch64::LR)
3625 .addReg(AArch64::SP)
3626 .addImm(2));
3627
3628 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3629 : AArch64::BR)
3630 .addReg(AArch64::X16));
3631}
3632
3633const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV,
3634 const Constant *BaseCV,
3635 uint64_t Offset) {
3636 if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
3637 return MCSymbolRefExpr::create(MCInstLowering.GetGlobalValueSymbol(GV, 0),
3638 OutContext);
3639 }
3640
3641 return AsmPrinter::lowerConstant(CV, BaseCV, Offset);
3642}
3643
3644char AArch64AsmPrinter::ID = 0;
3645
3646INITIALIZE_PASS(AArch64AsmPrinter, "aarch64-asm-printer",
3647 "AArch64 Assembly Printer", false, false)
3648
3649// Force static initialization.
3650extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
3651LLVMInitializeAArch64AsmPrinter() {
3657}
static cl::opt< PtrauthCheckMode > PtrauthAuthChecks("aarch64-ptrauth-auth-checks", cl::Hidden, cl::values(clEnumValN(Unchecked, "none", "don't test for failure"), clEnumValN(Poison, "poison", "poison on failure"), clEnumValN(Trap, "trap", "trap on failure")), cl::desc("Check pointer authentication auth/resign failures"), cl::init(Default))
PtrauthCheckMode
@ Unchecked
static void emitAuthenticatedPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel, const MCExpr *StubAuthPtrRef)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
This file defines the DenseMap class.
@ Default
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Machine Check Debug Module
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool printOperand(raw_ostream &OS, const SelectionDAG *G, const SDValue Value)
Shrink Wrap Pass
This file defines the SmallString class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool printAsmMRegister(const X86AsmPrinter &P, const MachineOperand &MO, char Mode, raw_ostream &O)
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
bool shouldSignReturnAddress(const MachineFunction &MF) const
const SetOfInstructions & getLOHRelated() const
unsigned getJumpTableEntrySize(int Idx) const
MCSymbol * getJumpTableEntryPCRelSymbol(int Idx) const
std::optional< std::string > getOutliningStyle() const
const MILOHContainer & getLOHContainer() const
void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
AArch64MCInstLower - This class is used to lower an MachineInstr into an MCInst.
MCSymbol * GetGlobalValueSymbol(const GlobalValue *GV, unsigned TargetFlags) const
void Lower(const MachineInstr *MI, MCInst &OutMI) const
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const
const AArch64RegisterInfo * getRegisterInfo() const override
bool isNeonAvailable() const
Returns true if the target has NEON and the function at runtime is known to have NEON enabled (e....
const AArch64InstrInfo * getInstrInfo() const override
std::optional< uint16_t > getPtrAuthBlockAddressDiscriminatorIfEnabled(const Function &ParentFn) const
Compute the integer discriminator for a given BlockAddress constant, if blockaddress signing is enabl...
bool isX16X17Safer() const
Returns whether the operating system makes it safer to store sensitive values in x16 and x17 as oppos...
AArch64PAuth::AuthCheckMethod getAuthenticatedLRCheckMethod(const MachineFunction &MF) const
Choose a method of checking LR before performing a tail call.
virtual void emitARM64WinCFISaveRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQP(unsigned Reg, int Offset)
virtual void emitAttributesSubsection(StringRef VendorName, AArch64BuildAttributes::SubsectionOptional IsOptional, AArch64BuildAttributes::SubsectionType ParameterType)
Build attributes implementation.
virtual void emitARM64WinCFISavePReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFIAllocStack(unsigned Size)
virtual void emitARM64WinCFISaveFPLRX(int Offset)
virtual void emitARM64WinCFIAllocZ(int Offset)
virtual void emitDirectiveVariantPCS(MCSymbol *Symbol)
Callback used to implement the .variant_pcs directive.
virtual void emitARM64WinCFIAddFP(unsigned Size)
virtual void emitARM64WinCFISaveFPLR(int Offset)
virtual void emitARM64WinCFISaveFRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveZReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveLRPair(unsigned Reg, int Offset)
virtual void emitAttribute(StringRef VendorName, unsigned Tag, unsigned Value, std::string String)
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
const T & front() const
front - Get the first element.
Definition ArrayRef.h:150
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
This class is intended to be used as a driving class for all asm writers.
Definition AsmPrinter.h:90
virtual void emitGlobalAlias(const Module &M, const GlobalAlias &GA)
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
virtual const MCExpr * lowerConstant(const Constant *CV, const Constant *BaseCV=nullptr, uint64_t Offset=0)
Lower the specified LLVM Constant to an MCExpr.
void getAnalysisUsage(AnalysisUsage &AU) const override
Record analysis usage.
virtual void emitXXStructor(const DataLayout &DL, const Constant *CV)
Targets can override this to change how global constants that are part of a C++ static/global constru...
Definition AsmPrinter.h:646
virtual void emitFunctionEntryLabel()
EmitFunctionEntryLabel - Emit the label that is the entrypoint for the function.
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant.
virtual const MCExpr * lowerBlockAddressConstant(const BlockAddress &BA)
Lower the specified BlockAddress to an MCExpr.
The address of a basic block.
Definition Constants.h:899
Function * getFunction() const
Definition Constants.h:935
Conditional or Unconditional Branch instruction.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
A signed pointer, in the ptrauth sense.
Definition Constants.h:1032
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1059
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1062
bool hasAddressDiscriminator() const
Whether there is any non-null address discriminator.
Definition Constants.h:1077
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1065
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
void recordFaultingOp(FaultKind FaultTy, const MCSymbol *FaultingLabel, const MCSymbol *HandlerLabel)
Definition FaultMaps.cpp:28
void serializeToFaultMapSection()
Definition FaultMaps.cpp:45
const Constant * getAliasee() const
Definition GlobalAlias.h:87
const Constant * getResolver() const
Definition GlobalIFunc.h:73
bool hasLocalLinkage() const
bool hasExternalWeakLinkage() const
Type * getValueType() const
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
static const MCBinaryExpr * createLShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:423
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:343
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:428
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
MCSection * getDataSection() const
Instances of this class represent operands of the MCInst class.
Definition MCInst.h:40
void setImm(int64_t Val)
Definition MCInst.h:89
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isReg() const
Definition MCInst.h:65
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:220
virtual void emitCFIBKeyFrame()
virtual void beginCOFFSymbolDef(const MCSymbol *Symbol)
Start emitting COFF symbol definition.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitCOFFSymbolType(int Type)
Emit the type of the symbol.
virtual bool hasRawTextSupport() const
Return true if this asm streamer supports emitting unformatted text to the .s file with EmitRawText.
Definition MCStreamer.h:368
virtual void endCOFFSymbolDef()
Marks the end of the symbol definition.
MCContext & getContext() const
Definition MCStreamer.h:314
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
Definition MCStreamer.h:387
virtual void emitCFIMTETaggedFrame()
void emitValue(const MCExpr *Value, unsigned Size, SMLoc Loc=SMLoc())
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:324
MCSection * getCurrentSectionOnly() const
Definition MCStreamer.h:421
void emitRawText(const Twine &String)
If this file is backed by a assembly streamer, this dumps the specified string in the output ....
virtual void emitCOFFSymbolStorageClass(int StorageClass)
Emit the storage class of the symbol.
Generic base class for all target subtargets.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
LLVM_ABI void print(raw_ostream &OS, const MCAsmInfo *MAI) const
print - Print the value to the stream OS.
Definition MCSymbol.cpp:59
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
CalledGlobalInfo tryGetCalledGlobal(const MachineInstr *MI) const
Tries to get the global and target flags for a call site, if the instruction is a call to a global.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Representation of each machine instruction.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
mop_range operands()
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const MachineOperand & getOperand(unsigned i) const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
const GlobalValue * getGlobal() const
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
const BlockAddress * getBlockAddress() const
void setOffset(int64_t Offset)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
@ MO_Immediate
Immediate operand.
@ MO_GlobalAddress
Address of a global value.
@ MO_BlockAddress
Address of a basic block.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
int64_t getOffset() const
Return the offset from the symbol in this operand.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
static SectionKind getMetadata()
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void push_back(const T &Elt)
LLVM_ABI void recordStatepoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a statepoint instruction.
LLVM_ABI void recordPatchPoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a patchpoint instruction.
LLVM_ABI void recordStackMap(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a stackmap instruction.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:148
virtual MCSection * getSectionForJumpTable(const Function &F, const TargetMachine &TM) const
Primary interface to the complete machine description for the target machine.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition Type.h:258
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1101
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
StringRef getVendorName(unsigned const Vendor)
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_G1
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address,...
@ MO_S
MO_S - Indicates that the bits of the symbol operand represented by MO_G0 etc are signed.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_G0
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address,...
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TLS
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
AuthCheckMethod
Variants of check performed on an authenticated pointer.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ SectionSize
Definition COFF.h:61
SymbolStorageClass
Storage class tells where and what the symbol represents.
Definition COFF.h:218
@ IMAGE_SYM_CLASS_EXTERNAL
External symbol.
Definition COFF.h:224
@ IMAGE_SYM_CLASS_STATIC
Static.
Definition COFF.h:225
@ IMAGE_SYM_DTYPE_FUNCTION
A function that returns a base type.
Definition COFF.h:276
@ SCT_COMPLEX_TYPE_SHIFT
Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
Definition COFF.h:280
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ SHT_PROGBITS
Definition ELF.h:1140
@ SHF_ALLOC
Definition ELF.h:1240
@ SHF_GROUP
Definition ELF.h:1262
@ SHF_EXECINSTR
Definition ELF.h:1243
@ GNU_PROPERTY_AARCH64_FEATURE_1_BTI
Definition ELF.h:1850
@ GNU_PROPERTY_AARCH64_FEATURE_1_PAC
Definition ELF.h:1851
@ GNU_PROPERTY_AARCH64_FEATURE_1_GCS
Definition ELF.h:1852
@ S_REGULAR
S_REGULAR - Regular section.
Definition MachO.h:127
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:681
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
bool empty() const
Definition BasicBlock.h:101
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:310
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI std::optional< std::string > getArm64ECMangledFunctionName(StringRef Name)
Returns the ARM64EC mangled function name unless the input is already mangled.
Definition Mangler.cpp:294
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition ScopeExit.h:59
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
ExceptionHandling
Definition CodeGen.h:53
Target & getTheAArch64beTarget()
std::string utostr(uint64_t X, bool isNeg=false)
Target & getTheAArch64leTarget()
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:759
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:198
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
Target & getTheARM64_32Target()
static MCRegister getXRegFromWReg(MCRegister Reg)
@ Add
Sum of integers.
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getXRegFromXRegTuple(MCRegister RegTuple)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
static MCRegister getWRegFromXReg(MCRegister Reg)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
@ MCSA_Weak
.weak
@ MCSA_WeakAntiDep
.weak_anti_dep (COFF)
@ MCSA_ELF_TypeFunction
.type _foo, STT_FUNC # aka @function
@ MCSA_Hidden
.hidden (ELF)
RegisterAsmPrinter - Helper template for registering a target specific assembly printer,...