11#include "TargetInfo.h"
24 CommonSPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); }
30class SPIRVABIInfo :
public CommonSPIRABIInfo {
32 SPIRVABIInfo(CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {}
33 void computeInfo(CGFunctionInfo &FI)
const override;
37 ABIArgInfo classifyKernelArgumentType(QualType Ty)
const;
44 CommonSPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
45 : TargetCodeGenInfo(std::make_unique<CommonSPIRABIInfo>(CGT)) {}
46 CommonSPIRTargetCodeGenInfo(std::unique_ptr<ABIInfo> ABIInfo)
47 : TargetCodeGenInfo(std::move(ABIInfo)) {}
49 LangAS getASTAllocaAddressSpace()
const override {
51 getABIInfo().getDataLayout().getAllocaAddrSpace());
54 unsigned getDeviceKernelCallingConv()
const override;
55 llvm::Type *getOpenCLType(CodeGenModule &CGM,
const Type *
T)
const override;
57 getHLSLType(CodeGenModule &CGM,
const Type *Ty,
58 const SmallVector<int32_t> *Packoffsets =
nullptr)
const override;
59 llvm::Type *getSPIRVImageTypeFromHLSLResource(
60 const HLSLAttributedResourceType::Attributes &attributes,
61 QualType SampledType, CodeGenModule &CGM)
const;
63 setOCLKernelStubCallingConvention(
const FunctionType *&FT)
const override;
65class SPIRVTargetCodeGenInfo :
public CommonSPIRTargetCodeGenInfo {
67 SPIRVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
68 : CommonSPIRTargetCodeGenInfo(std::make_unique<SPIRVABIInfo>(CGT)) {}
70 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
71 const VarDecl *D)
const override;
72 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
73 CodeGen::CodeGenModule &M)
const override;
74 llvm::SyncScope::ID getLLVMSyncScopeID(
const LangOptions &LangOpts,
76 llvm::AtomicOrdering Ordering,
77 llvm::LLVMContext &Ctx)
const override;
78 bool supportsLibCall()
const override {
79 return getABIInfo().getTarget().getTriple().getVendor() !=
88 return "singlethread";
110void CommonSPIRABIInfo::setCCs() {
111 assert(getRuntimeCC() == llvm::CallingConv::C);
112 RuntimeCC = llvm::CallingConv::SPIR_FUNC;
115ABIArgInfo SPIRVABIInfo::classifyReturnType(QualType RetTy)
const {
116 if (getTarget().getTriple().getVendor() != llvm::Triple::AMD)
134ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty)
const {
135 if (getContext().getLangOpts().isTargetDevice()) {
140 llvm::Type *LTy = CGT.ConvertType(Ty);
141 auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default);
142 auto GlobalAS = getContext().getTargetAddressSpace(LangAS::opencl_global);
143 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(LTy);
144 if (PtrTy && PtrTy->getAddressSpace() == DefaultAS) {
145 LTy = llvm::PointerType::get(PtrTy->getContext(), GlobalAS);
150 if (getTarget().getTriple().getVendor() == llvm::Triple::AMD)
170 return getNaturalAlignIndirect(Ty, 0,
true);
176ABIArgInfo SPIRVABIInfo::classifyArgumentType(QualType Ty)
const {
177 if (getTarget().getTriple().getVendor() != llvm::Triple::AMD)
185 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
195void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI)
const {
204 if (CC == llvm::CallingConv::SPIR_KERNEL) {
205 I.info = classifyKernelArgumentType(I.type);
216 SPIRVABIInfo(CGM.
getTypes()).computeInfo(FI);
218 CommonSPIRABIInfo(CGM.
getTypes()).computeInfo(FI);
223unsigned CommonSPIRTargetCodeGenInfo::getDeviceKernelCallingConv()
const {
224 return llvm::CallingConv::SPIR_KERNEL;
227void SPIRVTargetCodeGenInfo::setCUDAKernelCallingConvention(
228 const FunctionType *&FT)
const {
230 if (getABIInfo().getContext().getLangOpts().
HIP) {
231 FT = getABIInfo().getContext().adjustFunctionType(
237void CommonSPIRTargetCodeGenInfo::setOCLKernelStubCallingConvention(
238 const FunctionType *&FT)
const {
239 FT = getABIInfo().getContext().adjustFunctionType(
244SPIRVTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
245 const VarDecl *D)
const {
248 "Address space agnostic languages only");
256 return DefaultGlobalAS;
259 if (AddrSpace != LangAS::Default)
262 return DefaultGlobalAS;
265void SPIRVTargetCodeGenInfo::setTargetAttributes(
266 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M)
const {
270 if (GV->isDeclaration())
273 auto F = dyn_cast<llvm::Function>(GV);
277 auto FD = dyn_cast_or_null<FunctionDecl>(D);
280 if (!FD->hasAttr<CUDAGlobalAttr>())
283 unsigned N = M.
getLangOpts().GPUMaxThreadsPerBlock;
284 if (
auto FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>())
285 N = FlatWGS->getMax()->EvaluateKnownConstInt(M.
getContext()).getExtValue();
291 llvm::Metadata *AttrMDArgs[] = {
292 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, N)),
293 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 1)),
294 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 1))};
296 F->setMetadata(
"max_work_group_size",
301SPIRVTargetCodeGenInfo::getLLVMSyncScopeID(
const LangOptions &,
SyncScope Scope,
302 llvm::AtomicOrdering,
303 llvm::LLVMContext &Ctx)
const {
304 return Ctx.getOrInsertSyncScopeID(mapClangSyncScopeToLLVM(Scope));
309 StringRef OpenCLName,
310 unsigned AccessQualifier) {
321 if (OpenCLName.starts_with(
"image2d"))
323 else if (OpenCLName.starts_with(
"image3d"))
325 else if (OpenCLName ==
"image1d_buffer")
328 assert(OpenCLName.starts_with(
"image1d") &&
"Unknown image type");
333 if (OpenCLName.contains(
"_depth"))
335 if (OpenCLName.contains(
"_array"))
337 if (OpenCLName.contains(
"_msaa"))
341 IntParams.push_back(AccessQualifier);
343 return llvm::TargetExtType::get(Ctx, BaseType, {llvm::Type::getVoidTy(Ctx)},
347llvm::Type *CommonSPIRTargetCodeGenInfo::getOpenCLType(CodeGenModule &CGM,
348 const Type *Ty)
const {
350 if (
auto *PipeTy = dyn_cast<PipeType>(Ty))
351 return llvm::TargetExtType::get(Ctx,
"spirv.Pipe", {},
352 {!PipeTy->isReadOnly()});
353 if (
auto *BuiltinTy = dyn_cast<BuiltinType>(Ty)) {
354 enum AccessQualifier :
unsigned { AQ_ro = 0, AQ_wo = 1, AQ_rw = 2 };
355 switch (BuiltinTy->getKind()) {
356#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
357 case BuiltinType::Id: \
358 return getSPIRVImageType(Ctx, "spirv.Image", #ImgType, AQ_##Suffix);
359#include "clang/Basic/OpenCLImageTypes.def"
360 case BuiltinType::OCLSampler:
361 return llvm::TargetExtType::get(Ctx,
"spirv.Sampler");
362 case BuiltinType::OCLEvent:
363 return llvm::TargetExtType::get(Ctx,
"spirv.Event");
364 case BuiltinType::OCLClkEvent:
365 return llvm::TargetExtType::get(Ctx,
"spirv.DeviceEvent");
366 case BuiltinType::OCLQueue:
367 return llvm::TargetExtType::get(Ctx,
"spirv.Queue");
368 case BuiltinType::OCLReserveID:
369 return llvm::TargetExtType::get(Ctx,
"spirv.ReserveId");
370#define INTEL_SUBGROUP_AVC_TYPE(Name, Id) \
371 case BuiltinType::OCLIntelSubgroupAVC##Id: \
372 return llvm::TargetExtType::get(Ctx, "spirv.Avc" #Id "INTEL");
373#include "clang/Basic/OpenCLExtensionTypes.def"
385 llvm::Type *IntegralType,
392 while (
Value.ugt(0)) {
393 uint32_t Word =
Value.trunc(32).getZExtValue();
394 Value.lshrInPlace(32);
396 Words.push_back(Word);
398 if (Words.size() == 0)
402 return llvm::TargetExtType::get(Ctx,
"spirv.IntegralConstant",
403 {IntegralType}, Words);
404 return llvm::TargetExtType::get(Ctx,
"spirv.Literal", {}, Words);
408 const HLSLInlineSpirvType *SpirvType) {
413 for (
auto &Operand : SpirvType->getOperands()) {
414 using SpirvOperandKind = SpirvOperand::SpirvOperandKind;
416 llvm::Type *Result =
nullptr;
417 switch (Operand.getKind()) {
418 case SpirvOperandKind::ConstantId: {
419 llvm::Type *IntegralType =
425 case SpirvOperandKind::Literal: {
429 case SpirvOperandKind::TypeId: {
430 QualType TypeOperand = Operand.getResultType();
432 assert(RD->isCompleteDefinition() &&
433 "Type completion should have been required in Sema");
435 const FieldDecl *HandleField = RD->findFirstNamedDataMember();
438 if (ResourceType->
getAs<HLSLAttributedResourceType>()) {
439 TypeOperand = ResourceType;
447 llvm_unreachable(
"HLSLInlineSpirvType had invalid operand!");
452 Operands.push_back(Result);
455 return llvm::TargetExtType::get(Ctx,
"spirv.Type", Operands,
456 {SpirvType->getOpcode(), SpirvType->getSize(),
457 SpirvType->getAlignment()});
460llvm::Type *CommonSPIRTargetCodeGenInfo::getHLSLType(
461 CodeGenModule &CGM,
const Type *Ty,
462 const SmallVector<int32_t> *Packoffsets)
const {
465 if (
auto *SpirvType = dyn_cast<HLSLInlineSpirvType>(Ty))
468 auto *ResType = dyn_cast<HLSLAttributedResourceType>(Ty);
472 const HLSLAttributedResourceType::Attributes &ResAttrs = ResType->getAttrs();
473 switch (ResAttrs.ResourceClass) {
474 case llvm::dxil::ResourceClass::UAV:
475 case llvm::dxil::ResourceClass::SRV: {
477 QualType ContainedTy = ResType->getContainedType();
481 assert(!ResAttrs.IsROV &&
482 "Rasterizer order views not implemented for SPIR-V yet");
484 if (!ResAttrs.RawBuffer) {
486 return getSPIRVImageTypeFromHLSLResource(ResAttrs, ContainedTy, CGM);
490 llvm::ArrayType *RuntimeArrayType = llvm::ArrayType::get(ElemType, 0);
492 bool IsWritable = ResAttrs.ResourceClass == llvm::dxil::ResourceClass::UAV;
493 return llvm::TargetExtType::get(Ctx,
"spirv.VulkanBuffer",
497 case llvm::dxil::ResourceClass::CBuffer: {
498 QualType ContainedTy = ResType->getContainedType();
502 llvm::Type *BufferLayoutTy =
503 HLSLBufferLayoutBuilder(CGM,
"spirv.Layout")
507 return llvm::TargetExtType::get(Ctx,
"spirv.VulkanBuffer", {BufferLayoutTy},
511 case llvm::dxil::ResourceClass::Sampler:
512 return llvm::TargetExtType::get(Ctx,
"spirv.Sampler");
519 const HLSLAttributedResourceType::Attributes &attributes,
520 llvm::Type *SampledType,
QualType Ty,
unsigned NumChannels) {
525 if (LangOpts.HLSLSpvUseUnknownImageFormat ||
526 attributes.ResourceClass != llvm::dxil::ResourceClass::UAV) {
530 if (SampledType->isIntegerTy(32)) {
532 if (NumChannels == 1)
534 if (NumChannels == 2)
536 if (NumChannels == 4)
539 if (NumChannels == 1)
541 if (NumChannels == 2)
543 if (NumChannels == 4)
546 }
else if (SampledType->isIntegerTy(64)) {
547 if (NumChannels == 1) {
553 }
else if (SampledType->isFloatTy()) {
554 if (NumChannels == 1)
556 if (NumChannels == 2)
558 if (NumChannels == 4)
565llvm::Type *CommonSPIRTargetCodeGenInfo::getSPIRVImageTypeFromHLSLResource(
566 const HLSLAttributedResourceType::Attributes &attributes, QualType Ty,
567 CodeGenModule &CGM)
const {
570 unsigned NumChannels = 1;
572 if (
const VectorType *
V = dyn_cast<VectorType>(Ty)) {
573 NumChannels =
V->getNumElements();
574 Ty =
V->getElementType();
576 assert(!Ty->
isVectorType() &&
"We still have a vector type.");
580 assert((SampledType->isIntegerTy() || SampledType->isFloatingPointTy()) &&
581 "The element type for a SPIR-V resource must be a scalar integer or "
582 "floating point type.");
587 SmallVector<unsigned, 6> IntParams(6, 0);
608 attributes.ResourceClass == llvm::dxil::ResourceClass::UAV ? 2 : 1;
614 llvm::TargetExtType *ImageType =
615 llvm::TargetExtType::get(Ctx, Name, {SampledType}, IntParams);
619std::unique_ptr<TargetCodeGenInfo>
621 return std::make_unique<CommonSPIRTargetCodeGenInfo>(CGM.
getTypes());
624std::unique_ptr<TargetCodeGenInfo>
626 return std::make_unique<SPIRVTargetCodeGenInfo>(CGM.
getTypes());
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static llvm::Type * getInlineSpirvType(CodeGenModule &CGM, const HLSLInlineSpirvType *SpirvType)
static llvm::Type * getSPIRVImageType(llvm::LLVMContext &Ctx, StringRef BaseType, StringRef OpenCLName, unsigned AccessQualifier)
Construct a SPIR-V target extension type for the given OpenCL image type.
static unsigned getImageFormat(const LangOptions &LangOpts, const HLSLAttributedResourceType::Attributes &attributes, llvm::Type *SampledType, QualType Ty, unsigned NumChannels)
static llvm::Type * getInlineSpirvConstant(CodeGenModule &CGM, llvm::Type *IntegralType, llvm::APInt Value)
unsigned getTargetAddressSpace(LangAS AS) const
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
This class organizes the cross-function state that is used while generating LLVM code.
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
ASTContext & getContext() const
llvm::LLVMContext & getLLVMContext()
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
DefaultABIInfo - The default implementation for ABI specific details.
ABIArgInfo classifyArgumentType(QualType RetTy) const
ABIArgInfo classifyReturnType(QualType RetTy) const
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Represents a member of a struct/union/class.
ExtInfo withCallingConv(CallingConv cc) const
ExtInfo getExtInfo() const
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
A (possibly-)qualified type.
bool isNull() const
Return true if this QualType doesn't point to a type yet.
LangAS getAddressSpace() const
Return the address space of this type.
bool hasFlexibleArrayMember() const
Scope - A scope is a transient data structure that is used while parsing the program.
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isStructureType() const
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
CanQualType getCanonicalTypeUnqualified() const
bool isVectorType() const
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
const T * getAs() const
Member-template getAs<specific type>'.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createSPIRVTargetCodeGenInfo(CodeGenModule &CGM)
std::unique_ptr< TargetCodeGenInfo > createCommonSPIRTargetCodeGenInfo(CodeGenModule &CGM)
The JSON file list parser is used to communicate input to InstallAPI.
StorageClass
Storage classes.
const FunctionProtoType * T
@ Type
The name was classified as a type.
LangAS
Defines the address space values used by the address space qualifier of QualType.
SyncScope
Defines sync scope values used internally by clang.
LangAS getLangASFromTargetAS(unsigned TargetAS)