34 cl::desc(
"Force the use of resource intervals in the schedule model"));
37 return EnableSchedModel && SchedModel.hasInstrSchedModel();
41 return EnableSchedItins && !InstrItins.isEmpty();
45 bool EnableSModel,
bool EnableSItins) {
49 STI->initInstrItins(InstrItins);
51 EnableSchedModel = EnableSModel;
52 EnableSchedItins = EnableSItins;
54 unsigned NumRes = SchedModel.getNumProcResourceKinds();
55 ResourceFactors.resize(NumRes);
56 ResourceLCM = SchedModel.IssueWidth;
57 for (
unsigned Idx = 0; Idx < NumRes; ++Idx) {
58 unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
60 ResourceLCM = std::lcm(ResourceLCM, NumUnits);
62 MicroOpFactor = ResourceLCM / SchedModel.IssueWidth;
63 for (
unsigned Idx = 0; Idx < NumRes; ++Idx) {
64 unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
65 ResourceFactors[Idx] = NumUnits ? (ResourceLCM / NumUnits) : 0;
95 int UOps = InstrItins.getNumMicroOps(
MI->getDesc().getSchedClass());
96 return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, *
MI);
104 return MI->isTransient() ? 0 : 1;
112 return Cycles >= 0 ? Cycles : 1000;
120 unsigned SchedClass =
MI->getDesc().getSchedClass();
129 assert(++NIter < 6 &&
"Variants are nested deeper than the magic number");
131 SchedClass = STI->resolveSchedClass(SchedClass,
MI,
this);
132 SCDesc = SchedModel.getSchedClassDesc(SchedClass);
144 for (
unsigned i = 0; i != DefOperIdx; ++i) {
160 for (
unsigned i = 0; i != UseOperIdx; ++i) {
174 const unsigned InstrLatency = computeInstrLatency(
DefMI);
175 const unsigned DefaultDefLatency = TII->defaultDefLatency(SchedModel, *
DefMI);
178 return DefaultDefLatency;
181 std::optional<unsigned> OperLatency;
183 OperLatency = TII->getOperandLatency(&InstrItins, *
DefMI, DefOperIdx,
187 unsigned DefClass =
DefMI->getDesc().getSchedClass();
188 OperLatency = InstrItins.getOperandCycle(DefClass, DefOperIdx);
193 return OperLatency ? *OperLatency
194 : std::max(InstrLatency, DefaultDefLatency);
200 if (DefIdx < SCDesc->NumWriteLatencyEntries) {
203 STI->getWriteLatencyEntry(SCDesc, DefIdx);
214 int Advance = STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID);
215 if (Advance > 0 && (
unsigned)Advance >
Latency)
222 if (SCDesc->
isValid() && !
DefMI->getOperand(DefOperIdx).isImplicit() &&
223 !
DefMI->getDesc().operands()[DefOperIdx].isOptionalDef() &&
224 SchedModel.isComplete()) {
225 errs() <<
"DefIdx " << DefIdx <<
" exceeds machine model writes for "
226 << *
DefMI <<
" (Try with MCSchedModel.CompleteModel set to false)";
233 return DefMI->isTransient() ? 0 : DefaultDefLatency;
237TargetSchedModel::computeInstrLatency(
const MCSchedClassDesc &SCDesc)
const {
241unsigned TargetSchedModel::computeInstrLatency(
unsigned Opcode)
const {
243 unsigned SCIdx = TII->get(Opcode).getSchedClass();
244 return capLatency(SchedModel.computeInstrLatency(*STI, SCIdx));
247unsigned TargetSchedModel::computeInstrLatency(
const MCInst &Inst)
const {
249 return capLatency(SchedModel.computeInstrLatency(*STI, *TII, Inst));
250 return computeInstrLatency(Inst.
getOpcode());
255 bool UseDefaultDefLatency)
const {
260 return TII->getInstrLatency(&InstrItins, *
MI);
265 return computeInstrLatency(*SCDesc);
267 return TII->defaultDefLatency(SchedModel, *
MI);
273 if (!SchedModel.isOutOfOrder())
288 return computeInstrLatency(
DefMI);
296 *PRE = STI->getWriteProcResEnd(SCDesc); PRI != PRE; ++PRI) {
297 if (!SchedModel.getProcResource(PRI->ProcResourceIdx)->BufferSize)
308 unsigned SchedClass =
MI->getDesc().getSchedClass();
321 unsigned SchedClass = TII->get(Opcode).getSchedClass();
337 return SchedModel.getReciprocalThroughput(*STI, *TII,
MI);
345 return SchedModel.EnableIntervals;
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Register const TargetRegisterInfo * TRI
static unsigned findUseIdx(const MachineInstr *MI, unsigned UseOperIdx)
Find the use index of this operand.
static unsigned capLatency(int Cycles)
static unsigned findDefIdx(const MachineInstr *MI, unsigned DefOperIdx)
Find the def index of this operand.
static cl::opt< bool > ForceEnableIntervals("sched-model-force-enable-intervals", cl::Hidden, cl::init(false), cl::desc("Force the use of resource intervals in the schedule model"))
Instances of this class represent a single low-level machine instruction.
unsigned getOpcode() const
const MCSchedModel & getSchedModel() const
Get the machine model for this subtarget's CPU.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Representation of each machine instruction.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
MachineOperand class - Representation of each machine instruction operand.
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Wrapper class representing virtual and physical registers.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
LLVM_ABI bool mustEndGroup(const MachineInstr *MI, const MCSchedClassDesc *SC=nullptr) const
Return true if current group must end.
LLVM_ABI bool hasInstrSchedModel() const
Return true if this machine model includes an instruction-level scheduling model.
LLVM_ABI unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *DepMI) const
Output dependency latency of a pair of defs of the same register.
LLVM_ABI bool mustBeginGroup(const MachineInstr *MI, const MCSchedClassDesc *SC=nullptr) const
Return true if new group must begin.
LLVM_ABI void init(const TargetSubtargetInfo *TSInfo, bool EnableSModel=true, bool EnableSItins=true)
Initialize the machine model for instruction scheduling.
LLVM_ABI const MCSchedClassDesc * resolveSchedClass(const MachineInstr *MI) const
Return the MCSchedClassDesc for this instruction.
LLVM_ABI unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *UseMI, unsigned UseOperIdx) const
Compute operand latency based on the available machine model.
LLVM_ABI double computeReciprocalThroughput(const MachineInstr *MI) const
Compute the reciprocal throughput of the given instruction.
LLVM_ABI unsigned getNumMicroOps(const MachineInstr *MI, const MCSchedClassDesc *SC=nullptr) const
Return the number of issue slots required for this MI.
const InstrItineraryData * getInstrItineraries() const
LLVM_ABI bool enableIntervals() const
LLVM_ABI bool hasInstrItineraries() const
Return true if this machine model includes cycle-to-cycle itinerary data.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Summarize the scheduling resources required for an instruction of a particular scheduling class.
uint16_t NumReadAdvanceEntries
static LLVM_ABI int computeInstrLatency(const MCSubtargetInfo &STI, const MCSchedClassDesc &SCDesc)
Returns the latency value for the scheduling class.
static LLVM_ABI double getReciprocalThroughput(const MCSubtargetInfo &STI, const MCSchedClassDesc &SCDesc)
Specify the latency in cpu cycles for a particular scheduling class and def index.
Identify one of the processor resource kinds consumed by a particular scheduling class for the specif...