LLVM 22.0.0git
VEInstrInfo.cpp
Go to the documentation of this file.
1//===-- VEInstrInfo.cpp - VE Instruction Information ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the VE implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "VEInstrInfo.h"
14#include "VE.h"
16#include "VESubtarget.h"
17#include "llvm/ADT/STLExtras.h"
24#include "llvm/Support/Debug.h"
26
27#define DEBUG_TYPE "ve-instr-info"
28
29using namespace llvm;
30
31#define GET_INSTRINFO_CTOR_DTOR
32#include "VEGenInstrInfo.inc"
33
34// Pin the vtable to this file.
35void VEInstrInfo::anchor() {}
36
38 : VEGenInstrInfo(ST, VE::ADJCALLSTACKDOWN, VE::ADJCALLSTACKUP), RI() {}
39
40static bool IsIntegerCC(unsigned CC) { return (CC < VECC::CC_AF); }
41
43 switch (CC) {
44 case VECC::CC_IG:
45 return VECC::CC_ILE;
46 case VECC::CC_IL:
47 return VECC::CC_IGE;
48 case VECC::CC_INE:
49 return VECC::CC_IEQ;
50 case VECC::CC_IEQ:
51 return VECC::CC_INE;
52 case VECC::CC_IGE:
53 return VECC::CC_IL;
54 case VECC::CC_ILE:
55 return VECC::CC_IG;
56 case VECC::CC_AF:
57 return VECC::CC_AT;
58 case VECC::CC_G:
59 return VECC::CC_LENAN;
60 case VECC::CC_L:
61 return VECC::CC_GENAN;
62 case VECC::CC_NE:
63 return VECC::CC_EQNAN;
64 case VECC::CC_EQ:
65 return VECC::CC_NENAN;
66 case VECC::CC_GE:
67 return VECC::CC_LNAN;
68 case VECC::CC_LE:
69 return VECC::CC_GNAN;
70 case VECC::CC_NUM:
71 return VECC::CC_NAN;
72 case VECC::CC_NAN:
73 return VECC::CC_NUM;
74 case VECC::CC_GNAN:
75 return VECC::CC_LE;
76 case VECC::CC_LNAN:
77 return VECC::CC_GE;
78 case VECC::CC_NENAN:
79 return VECC::CC_EQ;
80 case VECC::CC_EQNAN:
81 return VECC::CC_NE;
82 case VECC::CC_GENAN:
83 return VECC::CC_L;
84 case VECC::CC_LENAN:
85 return VECC::CC_G;
86 case VECC::CC_AT:
87 return VECC::CC_AF;
88 case VECC::UNKNOWN:
89 return VECC::UNKNOWN;
90 }
91 llvm_unreachable("Invalid cond code");
92}
93
94// Treat a branch relative long always instruction as unconditional branch.
95// For example, br.l.t and br.l.
96static bool isUncondBranchOpcode(int Opc) {
97 using namespace llvm::VE;
98
99#define BRKIND(NAME) (Opc == NAME##a || Opc == NAME##a_nt || Opc == NAME##a_t)
100 // VE has other branch relative always instructions for word/double/float,
101 // but we use only long branches in our lower. So, check it here.
102 assert(!BRKIND(BRCFW) && !BRKIND(BRCFD) && !BRKIND(BRCFS) &&
103 "Branch relative word/double/float always instructions should not be "
104 "used!");
105 return BRKIND(BRCFL);
106#undef BRKIND
107}
108
109// Treat branch relative conditional as conditional branch instructions.
110// For example, brgt.l.t and brle.s.nt.
111static bool isCondBranchOpcode(int Opc) {
112 using namespace llvm::VE;
113
114#define BRKIND(NAME) \
115 (Opc == NAME##rr || Opc == NAME##rr_nt || Opc == NAME##rr_t || \
116 Opc == NAME##ir || Opc == NAME##ir_nt || Opc == NAME##ir_t)
117 return BRKIND(BRCFL) || BRKIND(BRCFW) || BRKIND(BRCFD) || BRKIND(BRCFS);
118#undef BRKIND
119}
120
121// Treat branch long always instructions as indirect branch.
122// For example, b.l.t and b.l.
123static bool isIndirectBranchOpcode(int Opc) {
124 using namespace llvm::VE;
125
126#define BRKIND(NAME) \
127 (Opc == NAME##ari || Opc == NAME##ari_nt || Opc == NAME##ari_t)
128 // VE has other branch always instructions for word/double/float, but
129 // we use only long branches in our lower. So, check it here.
130 assert(!BRKIND(BCFW) && !BRKIND(BCFD) && !BRKIND(BCFS) &&
131 "Branch word/double/float always instructions should not be used!");
132 return BRKIND(BCFL);
133#undef BRKIND
134}
135
138 Cond.push_back(MachineOperand::CreateImm(LastInst->getOperand(0).getImm()));
139 Cond.push_back(LastInst->getOperand(1));
140 Cond.push_back(LastInst->getOperand(2));
141 Target = LastInst->getOperand(3).getMBB();
142}
143
145 MachineBasicBlock *&FBB,
147 bool AllowModify) const {
148 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
149 if (I == MBB.end())
150 return false;
151
152 if (!isUnpredicatedTerminator(*I))
153 return false;
154
155 // Get the last instruction in the block.
156 MachineInstr *LastInst = &*I;
157 unsigned LastOpc = LastInst->getOpcode();
158
159 // If there is only one terminator instruction, process it.
160 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
161 if (isUncondBranchOpcode(LastOpc)) {
162 TBB = LastInst->getOperand(0).getMBB();
163 return false;
164 }
165 if (isCondBranchOpcode(LastOpc)) {
166 // Block ends with fall-through condbranch.
167 parseCondBranch(LastInst, TBB, Cond);
168 return false;
169 }
170 return true; // Can't handle indirect branch.
171 }
172
173 // Get the instruction before it if it is a terminator.
174 MachineInstr *SecondLastInst = &*I;
175 unsigned SecondLastOpc = SecondLastInst->getOpcode();
176
177 // If AllowModify is true and the block ends with two or more unconditional
178 // branches, delete all but the first unconditional branch.
179 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
180 while (isUncondBranchOpcode(SecondLastOpc)) {
181 LastInst->eraseFromParent();
182 LastInst = SecondLastInst;
183 LastOpc = LastInst->getOpcode();
184 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
185 // Return now the only terminator is an unconditional branch.
186 TBB = LastInst->getOperand(0).getMBB();
187 return false;
188 }
189 SecondLastInst = &*I;
190 SecondLastOpc = SecondLastInst->getOpcode();
191 }
192 }
193
194 // If there are three terminators, we don't know what sort of block this is.
195 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
196 return true;
197
198 // If the block ends with a B and a Bcc, handle it.
199 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
200 parseCondBranch(SecondLastInst, TBB, Cond);
201 FBB = LastInst->getOperand(0).getMBB();
202 return false;
203 }
204
205 // If the block ends with two unconditional branches, handle it. The second
206 // one is not executed.
207 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
208 TBB = SecondLastInst->getOperand(0).getMBB();
209 return false;
210 }
211
212 // ...likewise if it ends with an indirect branch followed by an unconditional
213 // branch.
214 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
215 I = LastInst;
216 if (AllowModify)
217 I->eraseFromParent();
218 return true;
219 }
220
221 // Otherwise, can't handle this.
222 return true;
223}
224
229 const DebugLoc &DL, int *BytesAdded) const {
230 assert(TBB && "insertBranch must not be told to insert a fallthrough");
231 assert((Cond.size() == 3 || Cond.size() == 0) &&
232 "VE branch conditions should have three component!");
233 assert(!BytesAdded && "code size not handled");
234 if (Cond.empty()) {
235 // Uncondition branch
236 assert(!FBB && "Unconditional branch with multiple successors!");
237 BuildMI(&MBB, DL, get(VE::BRCFLa_t))
238 .addMBB(TBB);
239 return 1;
240 }
241
242 // Conditional branch
243 // (BRCFir CC sy sz addr)
244 assert(Cond[0].isImm() && Cond[2].isReg() && "not implemented");
245
246 unsigned opc[2];
248 MachineFunction *MF = MBB.getParent();
249 const MachineRegisterInfo &MRI = MF->getRegInfo();
250 Register Reg = Cond[2].getReg();
251 if (IsIntegerCC(Cond[0].getImm())) {
252 if (TRI->getRegSizeInBits(Reg, MRI) == 32) {
253 opc[0] = VE::BRCFWir;
254 opc[1] = VE::BRCFWrr;
255 } else {
256 opc[0] = VE::BRCFLir;
257 opc[1] = VE::BRCFLrr;
258 }
259 } else {
260 if (TRI->getRegSizeInBits(Reg, MRI) == 32) {
261 opc[0] = VE::BRCFSir;
262 opc[1] = VE::BRCFSrr;
263 } else {
264 opc[0] = VE::BRCFDir;
265 opc[1] = VE::BRCFDrr;
266 }
267 }
268 if (Cond[1].isImm()) {
269 BuildMI(&MBB, DL, get(opc[0]))
270 .add(Cond[0]) // condition code
271 .add(Cond[1]) // lhs
272 .add(Cond[2]) // rhs
273 .addMBB(TBB);
274 } else {
275 BuildMI(&MBB, DL, get(opc[1]))
276 .add(Cond[0])
277 .add(Cond[1])
278 .add(Cond[2])
279 .addMBB(TBB);
280 }
281
282 if (!FBB)
283 return 1;
284
285 BuildMI(&MBB, DL, get(VE::BRCFLa_t))
286 .addMBB(FBB);
287 return 2;
288}
289
291 int *BytesRemoved) const {
292 assert(!BytesRemoved && "code size not handled");
293
295 unsigned Count = 0;
296 while (I != MBB.begin()) {
297 --I;
298
299 if (I->isDebugValue())
300 continue;
301
302 if (!isUncondBranchOpcode(I->getOpcode()) &&
303 !isCondBranchOpcode(I->getOpcode()))
304 break; // Not a branch
305
306 I->eraseFromParent();
307 I = MBB.end();
308 ++Count;
309 }
310 return Count;
311}
312
315 VECC::CondCode CC = static_cast<VECC::CondCode>(Cond[0].getImm());
316 Cond[0].setImm(GetOppositeBranchCondition(CC));
317 return false;
318}
319
321 return VE::I32RegClass.contains(Reg) || VE::I64RegClass.contains(Reg) ||
322 VE::F32RegClass.contains(Reg);
323}
324
327 MCRegister DestReg, MCRegister SrcReg, bool KillSrc,
328 const MCInstrDesc &MCID, unsigned int NumSubRegs,
329 const unsigned *SubRegIdx,
330 const TargetRegisterInfo *TRI) {
331 MachineInstr *MovMI = nullptr;
332
333 for (unsigned Idx = 0; Idx != NumSubRegs; ++Idx) {
334 Register SubDest = TRI->getSubReg(DestReg, SubRegIdx[Idx]);
335 Register SubSrc = TRI->getSubReg(SrcReg, SubRegIdx[Idx]);
336 assert(SubDest && SubSrc && "Bad sub-register");
337
338 if (MCID.getOpcode() == VE::ORri) {
339 // generate "ORri, dest, src, 0" instruction.
341 BuildMI(MBB, I, DL, MCID, SubDest).addReg(SubSrc).addImm(0);
342 MovMI = MIB.getInstr();
343 } else if (MCID.getOpcode() == VE::ANDMmm) {
344 // generate "ANDM, dest, vm0, src" instruction.
346 BuildMI(MBB, I, DL, MCID, SubDest).addReg(VE::VM0).addReg(SubSrc);
347 MovMI = MIB.getInstr();
348 } else {
349 llvm_unreachable("Unexpected reg-to-reg copy instruction");
350 }
351 }
352 // Add implicit super-register defs and kills to the last MovMI.
353 MovMI->addRegisterDefined(DestReg, TRI);
354 if (KillSrc)
355 MovMI->addRegisterKilled(SrcReg, TRI, true);
356}
357
360 Register DestReg, Register SrcReg, bool KillSrc,
361 bool RenamableDest, bool RenamableSrc) const {
362
363 if (IsAliasOfSX(SrcReg) && IsAliasOfSX(DestReg)) {
364 BuildMI(MBB, I, DL, get(VE::ORri), DestReg)
365 .addReg(SrcReg, getKillRegState(KillSrc))
366 .addImm(0);
367 } else if (VE::V64RegClass.contains(DestReg, SrcReg)) {
368 // Generate following instructions
369 // %sw16 = LEA32zii 256
370 // VORmvl %dest, (0)1, %src, %sw16
371 // TODO: reuse a register if vl is already assigned to a register
372 // FIXME: it would be better to scavenge a register here instead of
373 // reserving SX16 all of the time.
375 Register TmpReg = VE::SX16;
376 Register SubTmp = TRI->getSubReg(TmpReg, VE::sub_i32);
377 BuildMI(MBB, I, DL, get(VE::LEAzii), TmpReg)
378 .addImm(0)
379 .addImm(0)
380 .addImm(256);
381 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(VE::VORmvl), DestReg)
382 .addImm(M1(0)) // Represent (0)1.
383 .addReg(SrcReg, getKillRegState(KillSrc))
384 .addReg(SubTmp, getKillRegState(true));
385 MIB.getInstr()->addRegisterKilled(TmpReg, TRI, true);
386 } else if (VE::VMRegClass.contains(DestReg, SrcReg)) {
387 BuildMI(MBB, I, DL, get(VE::ANDMmm), DestReg)
388 .addReg(VE::VM0)
389 .addReg(SrcReg, getKillRegState(KillSrc));
390 } else if (VE::VM512RegClass.contains(DestReg, SrcReg)) {
391 // Use two instructions.
392 const unsigned SubRegIdx[] = {VE::sub_vm_even, VE::sub_vm_odd};
393 unsigned int NumSubRegs = 2;
394 copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, get(VE::ANDMmm),
395 NumSubRegs, SubRegIdx, &getRegisterInfo());
396 } else if (VE::F128RegClass.contains(DestReg, SrcReg)) {
397 // Use two instructions.
398 const unsigned SubRegIdx[] = {VE::sub_even, VE::sub_odd};
399 unsigned int NumSubRegs = 2;
400 copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, get(VE::ORri),
401 NumSubRegs, SubRegIdx, &getRegisterInfo());
402 } else {
404 dbgs() << "Impossible reg-to-reg copy from " << printReg(SrcReg, TRI)
405 << " to " << printReg(DestReg, TRI) << "\n";
406 llvm_unreachable("Impossible reg-to-reg copy");
407 }
408}
409
410/// isLoadFromStackSlot - If the specified machine instruction is a direct
411/// load from a stack slot, return the virtual or physical register number of
412/// the destination along with the FrameIndex of the loaded stack slot. If
413/// not, return 0. This predicate must return 0 if the instruction has
414/// any side effects other than loading from the stack slot.
416 int &FrameIndex) const {
417 if (MI.getOpcode() == VE::LDrii || // I64
418 MI.getOpcode() == VE::LDLSXrii || // I32
419 MI.getOpcode() == VE::LDUrii || // F32
420 MI.getOpcode() == VE::LDQrii || // F128 (pseudo)
421 MI.getOpcode() == VE::LDVMrii || // VM (pseudo)
422 MI.getOpcode() == VE::LDVM512rii // VM512 (pseudo)
423 ) {
424 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
425 MI.getOperand(2).getImm() == 0 && MI.getOperand(3).isImm() &&
426 MI.getOperand(3).getImm() == 0) {
427 FrameIndex = MI.getOperand(1).getIndex();
428 return MI.getOperand(0).getReg();
429 }
430 }
431 return 0;
432}
433
434/// isStoreToStackSlot - If the specified machine instruction is a direct
435/// store to a stack slot, return the virtual or physical register number of
436/// the source reg along with the FrameIndex of the loaded stack slot. If
437/// not, return 0. This predicate must return 0 if the instruction has
438/// any side effects other than storing to the stack slot.
440 int &FrameIndex) const {
441 if (MI.getOpcode() == VE::STrii || // I64
442 MI.getOpcode() == VE::STLrii || // I32
443 MI.getOpcode() == VE::STUrii || // F32
444 MI.getOpcode() == VE::STQrii || // F128 (pseudo)
445 MI.getOpcode() == VE::STVMrii || // VM (pseudo)
446 MI.getOpcode() == VE::STVM512rii // VM512 (pseudo)
447 ) {
448 if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
449 MI.getOperand(1).getImm() == 0 && MI.getOperand(2).isImm() &&
450 MI.getOperand(2).getImm() == 0) {
451 FrameIndex = MI.getOperand(0).getIndex();
452 return MI.getOperand(3).getReg();
453 }
454 }
455 return 0;
456}
457
460 Register SrcReg, bool isKill, int FI,
461 const TargetRegisterClass *RC,
462 const TargetRegisterInfo *TRI,
463 Register VReg,
464 MachineInstr::MIFlag Flags) const {
465 DebugLoc DL;
466 if (I != MBB.end())
467 DL = I->getDebugLoc();
468
469 MachineFunction *MF = MBB.getParent();
470 const MachineFrameInfo &MFI = MF->getFrameInfo();
473 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
474
475 // On the order of operands here: think "[FrameIdx + 0] = SrcReg".
476 if (RC == &VE::I64RegClass) {
477 BuildMI(MBB, I, DL, get(VE::STrii))
478 .addFrameIndex(FI)
479 .addImm(0)
480 .addImm(0)
481 .addReg(SrcReg, getKillRegState(isKill))
482 .addMemOperand(MMO);
483 } else if (RC == &VE::I32RegClass) {
484 BuildMI(MBB, I, DL, get(VE::STLrii))
485 .addFrameIndex(FI)
486 .addImm(0)
487 .addImm(0)
488 .addReg(SrcReg, getKillRegState(isKill))
489 .addMemOperand(MMO);
490 } else if (RC == &VE::F32RegClass) {
491 BuildMI(MBB, I, DL, get(VE::STUrii))
492 .addFrameIndex(FI)
493 .addImm(0)
494 .addImm(0)
495 .addReg(SrcReg, getKillRegState(isKill))
496 .addMemOperand(MMO);
497 } else if (VE::F128RegClass.hasSubClassEq(RC)) {
498 BuildMI(MBB, I, DL, get(VE::STQrii))
499 .addFrameIndex(FI)
500 .addImm(0)
501 .addImm(0)
502 .addReg(SrcReg, getKillRegState(isKill))
503 .addMemOperand(MMO);
504 } else if (RC == &VE::VMRegClass) {
505 BuildMI(MBB, I, DL, get(VE::STVMrii))
506 .addFrameIndex(FI)
507 .addImm(0)
508 .addImm(0)
509 .addReg(SrcReg, getKillRegState(isKill))
510 .addMemOperand(MMO);
511 } else if (VE::VM512RegClass.hasSubClassEq(RC)) {
512 BuildMI(MBB, I, DL, get(VE::STVM512rii))
513 .addFrameIndex(FI)
514 .addImm(0)
515 .addImm(0)
516 .addReg(SrcReg, getKillRegState(isKill))
517 .addMemOperand(MMO);
518 } else
519 report_fatal_error("Can't store this register to stack slot");
520}
521
524 int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
525 Register VReg, MachineInstr::MIFlag Flags) const {
526 DebugLoc DL;
527 if (I != MBB.end())
528 DL = I->getDebugLoc();
529
530 MachineFunction *MF = MBB.getParent();
531 const MachineFrameInfo &MFI = MF->getFrameInfo();
534 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
535
536 if (RC == &VE::I64RegClass) {
537 BuildMI(MBB, I, DL, get(VE::LDrii), DestReg)
538 .addFrameIndex(FI)
539 .addImm(0)
540 .addImm(0)
541 .addMemOperand(MMO);
542 } else if (RC == &VE::I32RegClass) {
543 BuildMI(MBB, I, DL, get(VE::LDLSXrii), DestReg)
544 .addFrameIndex(FI)
545 .addImm(0)
546 .addImm(0)
547 .addMemOperand(MMO);
548 } else if (RC == &VE::F32RegClass) {
549 BuildMI(MBB, I, DL, get(VE::LDUrii), DestReg)
550 .addFrameIndex(FI)
551 .addImm(0)
552 .addImm(0)
553 .addMemOperand(MMO);
554 } else if (VE::F128RegClass.hasSubClassEq(RC)) {
555 BuildMI(MBB, I, DL, get(VE::LDQrii), DestReg)
556 .addFrameIndex(FI)
557 .addImm(0)
558 .addImm(0)
559 .addMemOperand(MMO);
560 } else if (RC == &VE::VMRegClass) {
561 BuildMI(MBB, I, DL, get(VE::LDVMrii), DestReg)
562 .addFrameIndex(FI)
563 .addImm(0)
564 .addImm(0)
565 .addMemOperand(MMO);
566 } else if (VE::VM512RegClass.hasSubClassEq(RC)) {
567 BuildMI(MBB, I, DL, get(VE::LDVM512rii), DestReg)
568 .addFrameIndex(FI)
569 .addImm(0)
570 .addImm(0)
571 .addMemOperand(MMO);
572 } else
573 report_fatal_error("Can't load this register from stack slot");
574}
575
577 Register Reg, MachineRegisterInfo *MRI) const {
578 LLVM_DEBUG(dbgs() << "foldImmediate\n");
579
580 LLVM_DEBUG(dbgs() << "checking DefMI\n");
581 int64_t ImmVal;
582 switch (DefMI.getOpcode()) {
583 default:
584 return false;
585 case VE::ORim:
586 // General move small immediate instruction on VE.
587 LLVM_DEBUG(dbgs() << "checking ORim\n");
588 LLVM_DEBUG(DefMI.dump());
589 // FIXME: We may need to support FPImm too.
590 assert(DefMI.getOperand(1).isImm());
591 assert(DefMI.getOperand(2).isImm());
592 ImmVal =
593 DefMI.getOperand(1).getImm() + mimm2Val(DefMI.getOperand(2).getImm());
594 LLVM_DEBUG(dbgs() << "ImmVal is " << ImmVal << "\n");
595 break;
596 case VE::LEAzii:
597 // General move immediate instruction on VE.
598 LLVM_DEBUG(dbgs() << "checking LEAzii\n");
599 LLVM_DEBUG(DefMI.dump());
600 // FIXME: We may need to support FPImm too.
601 assert(DefMI.getOperand(2).isImm());
602 if (!DefMI.getOperand(3).isImm())
603 // LEAzii may refer label
604 return false;
605 ImmVal = DefMI.getOperand(2).getImm() + DefMI.getOperand(3).getImm();
606 LLVM_DEBUG(dbgs() << "ImmVal is " << ImmVal << "\n");
607 break;
608 }
609
610 // Try to fold like below:
611 // %1:i64 = ORim 0, 0(1)
612 // %2:i64 = CMPSLrr %0, %1
613 // To
614 // %2:i64 = CMPSLrm %0, 0(1)
615 //
616 // Another example:
617 // %1:i64 = ORim 6, 0(1)
618 // %2:i64 = CMPSLrr %1, %0
619 // To
620 // %2:i64 = CMPSLir 6, %0
621 //
622 // Support commutable instructions like below:
623 // %1:i64 = ORim 6, 0(1)
624 // %2:i64 = ADDSLrr %1, %0
625 // To
626 // %2:i64 = ADDSLri %0, 6
627 //
628 // FIXME: Need to support i32. Current implementtation requires
629 // EXTRACT_SUBREG, so input has following COPY and it avoids folding:
630 // %1:i64 = ORim 6, 0(1)
631 // %2:i32 = COPY %1.sub_i32
632 // %3:i32 = ADDSWSXrr %0, %2
633 // FIXME: Need to support shift, cmov, and more instructions.
634 // FIXME: Need to support lvl too, but LVLGen runs after peephole-opt.
635
636 LLVM_DEBUG(dbgs() << "checking UseMI\n");
637 LLVM_DEBUG(UseMI.dump());
638 unsigned NewUseOpcSImm7;
639 unsigned NewUseOpcMImm;
640 enum InstType {
641 rr2ri_rm, // rr -> ri or rm, commutable
642 rr2ir_rm, // rr -> ir or rm
643 } InstType;
644
645 using namespace llvm::VE;
646#define INSTRKIND(NAME) \
647 case NAME##rr: \
648 NewUseOpcSImm7 = NAME##ri; \
649 NewUseOpcMImm = NAME##rm; \
650 InstType = rr2ri_rm; \
651 break
652#define NCINSTRKIND(NAME) \
653 case NAME##rr: \
654 NewUseOpcSImm7 = NAME##ir; \
655 NewUseOpcMImm = NAME##rm; \
656 InstType = rr2ir_rm; \
657 break
658
659 switch (UseMI.getOpcode()) {
660 default:
661 return false;
662
663 INSTRKIND(ADDUL);
664 INSTRKIND(ADDSWSX);
665 INSTRKIND(ADDSWZX);
666 INSTRKIND(ADDSL);
667 NCINSTRKIND(SUBUL);
668 NCINSTRKIND(SUBSWSX);
669 NCINSTRKIND(SUBSWZX);
670 NCINSTRKIND(SUBSL);
671 INSTRKIND(MULUL);
672 INSTRKIND(MULSWSX);
673 INSTRKIND(MULSWZX);
674 INSTRKIND(MULSL);
675 NCINSTRKIND(DIVUL);
676 NCINSTRKIND(DIVSWSX);
677 NCINSTRKIND(DIVSWZX);
678 NCINSTRKIND(DIVSL);
679 NCINSTRKIND(CMPUL);
680 NCINSTRKIND(CMPSWSX);
681 NCINSTRKIND(CMPSWZX);
682 NCINSTRKIND(CMPSL);
683 INSTRKIND(MAXSWSX);
684 INSTRKIND(MAXSWZX);
685 INSTRKIND(MAXSL);
686 INSTRKIND(MINSWSX);
687 INSTRKIND(MINSWZX);
688 INSTRKIND(MINSL);
689 INSTRKIND(AND);
690 INSTRKIND(OR);
691 INSTRKIND(XOR);
692 INSTRKIND(EQV);
693 NCINSTRKIND(NND);
694 NCINSTRKIND(MRG);
695 }
696
697#undef INSTRKIND
698
699 unsigned NewUseOpc;
700 unsigned UseIdx;
701 bool Commute = false;
702 LLVM_DEBUG(dbgs() << "checking UseMI operands\n");
703 switch (InstType) {
704 case rr2ri_rm:
705 UseIdx = 2;
706 if (UseMI.getOperand(1).getReg() == Reg) {
707 Commute = true;
708 } else {
709 assert(UseMI.getOperand(2).getReg() == Reg);
710 }
711 if (isInt<7>(ImmVal)) {
712 // This ImmVal matches to SImm7 slot, so change UseOpc to an instruction
713 // holds a simm7 slot.
714 NewUseOpc = NewUseOpcSImm7;
715 } else if (isMImmVal(ImmVal)) {
716 // Similarly, change UseOpc to an instruction holds a mimm slot.
717 NewUseOpc = NewUseOpcMImm;
718 ImmVal = val2MImm(ImmVal);
719 } else
720 return false;
721 break;
722 case rr2ir_rm:
723 if (UseMI.getOperand(1).getReg() == Reg) {
724 // Check immediate value whether it matchs to the UseMI instruction.
725 if (!isInt<7>(ImmVal))
726 return false;
727 NewUseOpc = NewUseOpcSImm7;
728 UseIdx = 1;
729 } else {
730 assert(UseMI.getOperand(2).getReg() == Reg);
731 // Check immediate value whether it matchs to the UseMI instruction.
732 if (!isMImmVal(ImmVal))
733 return false;
734 NewUseOpc = NewUseOpcMImm;
735 ImmVal = val2MImm(ImmVal);
736 UseIdx = 2;
737 }
738 break;
739 }
740
741 LLVM_DEBUG(dbgs() << "modifying UseMI\n");
742 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
743 UseMI.setDesc(get(NewUseOpc));
744 if (Commute) {
745 UseMI.getOperand(1).setReg(UseMI.getOperand(UseIdx).getReg());
746 }
747 UseMI.getOperand(UseIdx).ChangeToImmediate(ImmVal);
748 if (DeleteDef)
749 DefMI.eraseFromParent();
750
751 return true;
752}
753
756 Register GlobalBaseReg = VEFI->getGlobalBaseReg();
757 if (GlobalBaseReg != 0)
758 return GlobalBaseReg;
759
760 // We use %s15 (%got) as a global base register
761 GlobalBaseReg = VE::SX15;
762
763 // Insert a pseudo instruction to set the GlobalBaseReg into the first
764 // MBB of the function
765 MachineBasicBlock &FirstMBB = MF->front();
767 DebugLoc dl;
768 BuildMI(FirstMBB, MBBI, dl, get(VE::GETGOT), GlobalBaseReg);
769 VEFI->setGlobalBaseReg(GlobalBaseReg);
770 return GlobalBaseReg;
771}
772
774 return (reg - VE::VMP0) * 2 + VE::VM0;
775}
776
777static Register getVM512Lower(Register reg) { return getVM512Upper(reg) + 1; }
778
779// Expand pseudo logical vector instructions for VM512 registers.
781 MachineBasicBlock *MBB = MI.getParent();
782 DebugLoc DL = MI.getDebugLoc();
783
784 Register VMXu = getVM512Upper(MI.getOperand(0).getReg());
785 Register VMXl = getVM512Lower(MI.getOperand(0).getReg());
786 Register VMYu = getVM512Upper(MI.getOperand(1).getReg());
787 Register VMYl = getVM512Lower(MI.getOperand(1).getReg());
788
789 switch (MI.getOpcode()) {
790 default: {
791 Register VMZu = getVM512Upper(MI.getOperand(2).getReg());
792 Register VMZl = getVM512Lower(MI.getOperand(2).getReg());
793 BuildMI(*MBB, MI, DL, MCID).addDef(VMXu).addUse(VMYu).addUse(VMZu);
794 BuildMI(*MBB, MI, DL, MCID).addDef(VMXl).addUse(VMYl).addUse(VMZl);
795 break;
796 }
797 case VE::NEGMy:
798 BuildMI(*MBB, MI, DL, MCID).addDef(VMXu).addUse(VMYu);
799 BuildMI(*MBB, MI, DL, MCID).addDef(VMXl).addUse(VMYl);
800 break;
801 }
802 MI.eraseFromParent();
803}
804
806 bool Upper) {
807 // VM512
808 MIB.addReg(Upper ? getVM512Upper(MI.getOperand(0).getReg())
809 : getVM512Lower(MI.getOperand(0).getReg()));
810
811 switch (MI.getNumExplicitOperands()) {
812 default:
813 report_fatal_error("unexpected number of operands for pvfmk");
814 case 2: // _Ml: VM512, VL
815 // VL
816 MIB.addReg(MI.getOperand(1).getReg());
817 break;
818 case 4: // _Mvl: VM512, CC, VR, VL
819 // CC
820 MIB.addImm(MI.getOperand(1).getImm());
821 // VR
822 MIB.addReg(MI.getOperand(2).getReg());
823 // VL
824 MIB.addReg(MI.getOperand(3).getReg());
825 break;
826 case 5: // _MvMl: VM512, CC, VR, VM512, VL
827 // CC
828 MIB.addImm(MI.getOperand(1).getImm());
829 // VR
830 MIB.addReg(MI.getOperand(2).getReg());
831 // VM512
832 MIB.addReg(Upper ? getVM512Upper(MI.getOperand(3).getReg())
833 : getVM512Lower(MI.getOperand(3).getReg()));
834 // VL
835 MIB.addReg(MI.getOperand(4).getReg());
836 break;
837 }
838}
839
841 // replace to pvfmk.w.up and pvfmk.w.lo
842 // replace to pvfmk.s.up and pvfmk.s.lo
843
844 static const std::pair<unsigned, std::pair<unsigned, unsigned>> VFMKMap[] = {
845 {VE::VFMKyal, {VE::VFMKLal, VE::VFMKLal}},
846 {VE::VFMKynal, {VE::VFMKLnal, VE::VFMKLnal}},
847 {VE::VFMKWyvl, {VE::PVFMKWUPvl, VE::PVFMKWLOvl}},
848 {VE::VFMKWyvyl, {VE::PVFMKWUPvml, VE::PVFMKWLOvml}},
849 {VE::VFMKSyvl, {VE::PVFMKSUPvl, VE::PVFMKSLOvl}},
850 {VE::VFMKSyvyl, {VE::PVFMKSUPvml, VE::PVFMKSLOvml}},
851 };
852
853 unsigned Opcode = MI.getOpcode();
854
855 const auto *Found =
856 llvm::find_if(VFMKMap, [&](auto P) { return P.first == Opcode; });
857 if (Found == std::end(VFMKMap))
858 report_fatal_error("unexpected opcode for pseudo vfmk");
859
860 unsigned OpcodeUpper = (*Found).second.first;
861 unsigned OpcodeLower = (*Found).second.second;
862
863 MachineBasicBlock *MBB = MI.getParent();
864 DebugLoc DL = MI.getDebugLoc();
865
866 MachineInstrBuilder Bu = BuildMI(*MBB, MI, DL, TI.get(OpcodeUpper));
867 addOperandsForVFMK(Bu, MI, /* Upper */ true);
868 MachineInstrBuilder Bl = BuildMI(*MBB, MI, DL, TI.get(OpcodeLower));
869 addOperandsForVFMK(Bl, MI, /* Upper */ false);
870
871 MI.eraseFromParent();
872}
873
875 switch (MI.getOpcode()) {
876 case VE::EXTEND_STACK: {
878 }
879 case VE::EXTEND_STACK_GUARD: {
880 MI.eraseFromParent(); // The pseudo instruction is gone now.
881 return true;
882 }
883 case VE::GETSTACKTOP: {
885 }
886
887 case VE::ANDMyy:
888 expandPseudoLogM(MI, get(VE::ANDMmm));
889 return true;
890 case VE::ORMyy:
891 expandPseudoLogM(MI, get(VE::ORMmm));
892 return true;
893 case VE::XORMyy:
894 expandPseudoLogM(MI, get(VE::XORMmm));
895 return true;
896 case VE::EQVMyy:
897 expandPseudoLogM(MI, get(VE::EQVMmm));
898 return true;
899 case VE::NNDMyy:
900 expandPseudoLogM(MI, get(VE::NNDMmm));
901 return true;
902 case VE::NEGMy:
903 expandPseudoLogM(MI, get(VE::NEGMm));
904 return true;
905
906 case VE::LVMyir:
907 case VE::LVMyim:
908 case VE::LVMyir_y:
909 case VE::LVMyim_y: {
910 Register VMXu = getVM512Upper(MI.getOperand(0).getReg());
911 Register VMXl = getVM512Lower(MI.getOperand(0).getReg());
912 int64_t Imm = MI.getOperand(1).getImm();
913 bool IsSrcReg =
914 MI.getOpcode() == VE::LVMyir || MI.getOpcode() == VE::LVMyir_y;
915 Register Src = IsSrcReg ? MI.getOperand(2).getReg() : VE::NoRegister;
916 int64_t MImm = IsSrcReg ? 0 : MI.getOperand(2).getImm();
917 bool KillSrc = IsSrcReg ? MI.getOperand(2).isKill() : false;
918 Register VMX = VMXl;
919 if (Imm >= 4) {
920 VMX = VMXu;
921 Imm -= 4;
922 }
923 MachineBasicBlock *MBB = MI.getParent();
924 DebugLoc DL = MI.getDebugLoc();
925 switch (MI.getOpcode()) {
926 case VE::LVMyir:
927 BuildMI(*MBB, MI, DL, get(VE::LVMir))
928 .addDef(VMX)
929 .addImm(Imm)
930 .addReg(Src, getKillRegState(KillSrc));
931 break;
932 case VE::LVMyim:
933 BuildMI(*MBB, MI, DL, get(VE::LVMim))
934 .addDef(VMX)
935 .addImm(Imm)
936 .addImm(MImm);
937 break;
938 case VE::LVMyir_y:
939 assert(MI.getOperand(0).getReg() == MI.getOperand(3).getReg() &&
940 "LVMyir_y has different register in 3rd operand");
941 BuildMI(*MBB, MI, DL, get(VE::LVMir_m))
942 .addDef(VMX)
943 .addImm(Imm)
944 .addReg(Src, getKillRegState(KillSrc))
945 .addReg(VMX);
946 break;
947 case VE::LVMyim_y:
948 assert(MI.getOperand(0).getReg() == MI.getOperand(3).getReg() &&
949 "LVMyim_y has different register in 3rd operand");
950 BuildMI(*MBB, MI, DL, get(VE::LVMim_m))
951 .addDef(VMX)
952 .addImm(Imm)
953 .addImm(MImm)
954 .addReg(VMX);
955 break;
956 }
957 MI.eraseFromParent();
958 return true;
959 }
960 case VE::SVMyi: {
961 Register Dest = MI.getOperand(0).getReg();
962 Register VMZu = getVM512Upper(MI.getOperand(1).getReg());
963 Register VMZl = getVM512Lower(MI.getOperand(1).getReg());
964 bool KillSrc = MI.getOperand(1).isKill();
965 int64_t Imm = MI.getOperand(2).getImm();
966 Register VMZ = VMZl;
967 if (Imm >= 4) {
968 VMZ = VMZu;
969 Imm -= 4;
970 }
971 MachineBasicBlock *MBB = MI.getParent();
972 DebugLoc DL = MI.getDebugLoc();
974 BuildMI(*MBB, MI, DL, get(VE::SVMmi), Dest).addReg(VMZ).addImm(Imm);
975 MachineInstr *Inst = MIB.getInstr();
976 if (KillSrc) {
978 Inst->addRegisterKilled(MI.getOperand(1).getReg(), TRI, true);
979 }
980 MI.eraseFromParent();
981 return true;
982 }
983 case VE::VFMKyal:
984 case VE::VFMKynal:
985 case VE::VFMKWyvl:
986 case VE::VFMKWyvyl:
987 case VE::VFMKSyvl:
988 case VE::VFMKSyvyl:
989 expandPseudoVFMK(*this, MI);
990 return true;
991 }
992 return false;
993}
994
996 MachineBasicBlock &MBB = *MI.getParent();
997 MachineFunction &MF = *MBB.getParent();
998 const VESubtarget &STI = MF.getSubtarget<VESubtarget>();
999 const VEInstrInfo &TII = *STI.getInstrInfo();
1000 DebugLoc dl = MBB.findDebugLoc(MI);
1001
1002 // Create following instructions and multiple basic blocks.
1003 //
1004 // thisBB:
1005 // brge.l.t %sp, %sl, sinkBB
1006 // syscallBB:
1007 // ld %s61, 0x18(, %tp) // load param area
1008 // or %s62, 0, %s0 // spill the value of %s0
1009 // lea %s63, 0x13b // syscall # of grow
1010 // shm.l %s63, 0x0(%s61) // store syscall # at addr:0
1011 // shm.l %sl, 0x8(%s61) // store old limit at addr:8
1012 // shm.l %sp, 0x10(%s61) // store new limit at addr:16
1013 // monc // call monitor
1014 // or %s0, 0, %s62 // restore the value of %s0
1015 // sinkBB:
1016
1017 // Create new MBB
1018 MachineBasicBlock *BB = &MBB;
1019 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1020 MachineBasicBlock *syscallMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1021 MachineBasicBlock *sinkMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1023 MF.insert(It, syscallMBB);
1024 MF.insert(It, sinkMBB);
1025
1026 // Transfer the remainder of BB and its successor edges to sinkMBB.
1027 sinkMBB->splice(sinkMBB->begin(), BB,
1028 std::next(std::next(MachineBasicBlock::iterator(MI))),
1029 BB->end());
1031
1032 // Next, add the true and fallthrough blocks as its successors.
1033 BB->addSuccessor(syscallMBB);
1034 BB->addSuccessor(sinkMBB);
1035 BuildMI(BB, dl, TII.get(VE::BRCFLrr_t))
1037 .addReg(VE::SX11) // %sp
1038 .addReg(VE::SX8) // %sl
1039 .addMBB(sinkMBB);
1040
1041 BB = syscallMBB;
1042
1043 // Update machine-CFG edges
1044 BB->addSuccessor(sinkMBB);
1045
1046 BuildMI(BB, dl, TII.get(VE::LDrii), VE::SX61)
1047 .addReg(VE::SX14)
1048 .addImm(0)
1049 .addImm(0x18);
1050 BuildMI(BB, dl, TII.get(VE::ORri), VE::SX62)
1051 .addReg(VE::SX0)
1052 .addImm(0);
1053 BuildMI(BB, dl, TII.get(VE::LEAzii), VE::SX63)
1054 .addImm(0)
1055 .addImm(0)
1056 .addImm(0x13b);
1057 BuildMI(BB, dl, TII.get(VE::SHMLri))
1058 .addReg(VE::SX61)
1059 .addImm(0)
1060 .addReg(VE::SX63);
1061 BuildMI(BB, dl, TII.get(VE::SHMLri))
1062 .addReg(VE::SX61)
1063 .addImm(8)
1064 .addReg(VE::SX8);
1065 BuildMI(BB, dl, TII.get(VE::SHMLri))
1066 .addReg(VE::SX61)
1067 .addImm(16)
1068 .addReg(VE::SX11);
1069 BuildMI(BB, dl, TII.get(VE::MONC));
1070
1071 BuildMI(BB, dl, TII.get(VE::ORri), VE::SX0)
1072 .addReg(VE::SX62)
1073 .addImm(0);
1074
1075 MI.eraseFromParent(); // The pseudo instruction is gone now.
1076 return true;
1077}
1078
1080 MachineBasicBlock *MBB = MI.getParent();
1081 MachineFunction &MF = *MBB->getParent();
1082 const VESubtarget &STI = MF.getSubtarget<VESubtarget>();
1083 const VEInstrInfo &TII = *STI.getInstrInfo();
1084 DebugLoc DL = MBB->findDebugLoc(MI);
1085
1086 // Create following instruction
1087 //
1088 // dst = %sp + target specific frame + the size of parameter area
1089
1090 const MachineFrameInfo &MFI = MF.getFrameInfo();
1091 const VEFrameLowering &TFL = *STI.getFrameLowering();
1092
1093 // The VE ABI requires a reserved area at the top of stack as described
1094 // in VEFrameLowering.cpp. So, we adjust it here.
1095 unsigned NumBytes = STI.getAdjustedFrameSize(0);
1096
1097 // Also adds the size of parameter area.
1098 if (MFI.adjustsStack() && TFL.hasReservedCallFrame(MF))
1099 NumBytes += MFI.getMaxCallFrameSize();
1100
1101 BuildMI(*MBB, MI, DL, TII.get(VE::LEArii))
1102 .addDef(MI.getOperand(0).getReg())
1103 .addReg(VE::SX11)
1104 .addImm(0)
1105 .addImm(NumBytes);
1106
1107 MI.eraseFromParent(); // The pseudo instruction is gone now.
1108 return true;
1109}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition MD5.cpp:58
Register Reg
Register const TargetRegisterInfo * TRI
static bool isReg(const MCInst &MI, unsigned OpNo)
#define P(N)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
This file defines the SmallVector class.
static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
#define LLVM_DEBUG(...)
Definition Debug.h:114
static bool IsIntegerCC(unsigned CC)
static void expandPseudoVFMK(const TargetInstrInfo &TI, MachineInstr &MI)
#define INSTRKIND(NAME)
#define NCINSTRKIND(NAME)
static Register getVM512Lower(Register reg)
static void copyPhysSubRegs(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, const MCInstrDesc &MCID, unsigned int NumSubRegs, const unsigned *SubRegIdx, const TargetRegisterInfo *TRI)
static bool IsAliasOfSX(Register Reg)
static Register getVM512Upper(Register reg)
#define BRKIND(NAME)
static void expandPseudoLogM(MachineInstr &MI, const MCInstrDesc &MCID)
static void addOperandsForVFMK(MachineInstrBuilder &MIB, MachineInstr &MI, bool Upper)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
LLVM Basic Block Representation.
Definition BasicBlock.h:62
A debug info location.
Definition DebugLoc.h:124
Describe properties that are true of each instruction in the target description file.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:64
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
LLVM_ABI void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
int64_t getImm() const
MachineBasicBlock * getMBB() const
static MachineOperand CreateImm(int64_t Val)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Definition Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Target - Wrapper for Target specific information.
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
} Stack Spill & Reload
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
} Branch Analysis & Modification
bool expandPostRAPseudo(MachineInstr &MI) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Register getGlobalBaseReg(MachineFunction *MF) const
} Optimization
const VERegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
Definition VEInstrInfo.h:62
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Stack Spill & Reload {.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
Branch Analysis & Modification {.
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
isStoreToStackSlot - If the specified machine instruction is a direct store to a stack slot,...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
VEInstrInfo(const VESubtarget &ST)
bool expandExtendStackPseudo(MachineInstr &MI) const
bool expandGetStackTopPseudo(MachineInstr &MI) const
uint64_t getAdjustedFrameSize(uint64_t FrameSize) const
Given a actual stack size as determined by FrameInfo, this function returns adjusted framesize which ...
const VEInstrInfo * getInstrInfo() const override
Definition VESubtarget.h:51
const VEFrameLowering * getFrameLowering() const override
Definition VESubtarget.h:52
self_iterator getIterator()
Definition ilist_node.h:134
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
CondCode
Definition VE.h:43
@ CC_GENAN
Definition VE.h:66
@ CC_EQNAN
Definition VE.h:65
@ CC_G
Definition VE.h:54
@ CC_NENAN
Definition VE.h:64
@ CC_LE
Definition VE.h:59
@ CC_LENAN
Definition VE.h:67
@ CC_ILE
Definition VE.h:50
@ CC_NUM
Definition VE.h:60
@ CC_EQ
Definition VE.h:57
@ CC_GNAN
Definition VE.h:62
@ CC_IG
Definition VE.h:45
@ CC_INE
Definition VE.h:47
@ CC_AF
Definition VE.h:53
@ CC_L
Definition VE.h:55
@ CC_GE
Definition VE.h:58
@ CC_NE
Definition VE.h:56
@ CC_LNAN
Definition VE.h:63
@ CC_IEQ
Definition VE.h:48
@ UNKNOWN
Definition VE.h:69
@ CC_AT
Definition VE.h:68
@ CC_NAN
Definition VE.h:61
@ CC_IGE
Definition VE.h:49
@ CC_IL
Definition VE.h:46
This is an optimization pass for GlobalISel generic memory operations.
static bool isCondBranchOpcode(int Opc)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
static bool isIndirectBranchOpcode(int Opc)
unsigned M1(unsigned Val)
Definition VE.h:377
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
static uint64_t val2MImm(uint64_t Val)
val2MImm - Convert an integer immediate value to target MImm immediate.
Definition VE.h:359
unsigned getKillRegState(bool B)
static uint64_t mimm2Val(uint64_t Val)
mimm2Val - Convert a target MImm immediate to an integer immediate value.
Definition VE.h:368
static bool isUncondBranchOpcode(int Opc)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1740
static bool isMImmVal(uint64_t Val)
Definition VE.h:332
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.