clang 22.0.0git
SPIR.cpp
Go to the documentation of this file.
1//===- SPIR.cpp -----------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfoImpl.h"
11#include "TargetInfo.h"
12
13using namespace clang;
14using namespace clang::CodeGen;
15
16//===----------------------------------------------------------------------===//
17// Base ABI and target codegen info implementation common between SPIR and
18// SPIR-V.
19//===----------------------------------------------------------------------===//
20
21namespace {
22class CommonSPIRABIInfo : public DefaultABIInfo {
23public:
24 CommonSPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); }
25
26private:
27 void setCCs();
28};
29
30class SPIRVABIInfo : public CommonSPIRABIInfo {
31public:
32 SPIRVABIInfo(CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {}
33 void computeInfo(CGFunctionInfo &FI) const override;
34
35private:
36 ABIArgInfo classifyReturnType(QualType RetTy) const;
37 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
38 ABIArgInfo classifyArgumentType(QualType Ty) const;
39};
40} // end anonymous namespace
41namespace {
42class CommonSPIRTargetCodeGenInfo : public TargetCodeGenInfo {
43public:
44 CommonSPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
45 : TargetCodeGenInfo(std::make_unique<CommonSPIRABIInfo>(CGT)) {}
46 CommonSPIRTargetCodeGenInfo(std::unique_ptr<ABIInfo> ABIInfo)
47 : TargetCodeGenInfo(std::move(ABIInfo)) {}
48
49 LangAS getASTAllocaAddressSpace() const override {
51 getABIInfo().getDataLayout().getAllocaAddrSpace());
52 }
53
54 unsigned getDeviceKernelCallingConv() const override;
55 llvm::Type *getOpenCLType(CodeGenModule &CGM, const Type *T) const override;
56 llvm::Type *
57 getHLSLType(CodeGenModule &CGM, const Type *Ty,
58 const SmallVector<int32_t> *Packoffsets = nullptr) const override;
59 llvm::Type *getSPIRVImageTypeFromHLSLResource(
60 const HLSLAttributedResourceType::Attributes &attributes,
61 QualType SampledType, CodeGenModule &CGM) const;
62 void
63 setOCLKernelStubCallingConvention(const FunctionType *&FT) const override;
64};
65class SPIRVTargetCodeGenInfo : public CommonSPIRTargetCodeGenInfo {
66public:
67 SPIRVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
68 : CommonSPIRTargetCodeGenInfo(std::make_unique<SPIRVABIInfo>(CGT)) {}
69 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
70 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
71 const VarDecl *D) const override;
72 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
73 CodeGen::CodeGenModule &M) const override;
74 llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
75 SyncScope Scope,
76 llvm::AtomicOrdering Ordering,
77 llvm::LLVMContext &Ctx) const override;
78 bool supportsLibCall() const override {
79 return getABIInfo().getTarget().getTriple().getVendor() !=
80 llvm::Triple::AMD;
81 }
82};
83
84inline StringRef mapClangSyncScopeToLLVM(SyncScope Scope) {
85 switch (Scope) {
88 return "singlethread";
92 return "subgroup";
96 return "workgroup";
100 return "device";
104 return "";
105 }
106 return "";
107}
108} // End anonymous namespace.
109
110void CommonSPIRABIInfo::setCCs() {
111 assert(getRuntimeCC() == llvm::CallingConv::C);
112 RuntimeCC = llvm::CallingConv::SPIR_FUNC;
113}
114
115ABIArgInfo SPIRVABIInfo::classifyReturnType(QualType RetTy) const {
116 if (getTarget().getTriple().getVendor() != llvm::Triple::AMD)
118 if (!isAggregateTypeForABI(RetTy) || getRecordArgABI(RetTy, getCXXABI()))
120
121 if (const auto *RD = RetTy->getAsRecordDecl();
122 RD && RD->hasFlexibleArrayMember())
124
125 // TODO: The AMDGPU ABI is non-trivial to represent in SPIR-V; in order to
126 // avoid encoding various architecture specific bits here we return everything
127 // as direct to retain type info for things like aggregates, for later perusal
128 // when translating back to LLVM/lowering in the BE. This is also why we
129 // disable flattening as the outcomes can mismatch between SPIR-V and AMDGPU.
130 // This will be revisited / optimised in the future.
131 return ABIArgInfo::getDirect(CGT.ConvertType(RetTy), 0u, nullptr, false);
132}
133
134ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const {
135 if (getContext().getLangOpts().isTargetDevice()) {
136 // Coerce pointer arguments with default address space to CrossWorkGroup
137 // pointers for target devices as default address space kernel arguments
138 // are not allowed. We use the opencl_global language address space which
139 // always maps to CrossWorkGroup.
140 llvm::Type *LTy = CGT.ConvertType(Ty);
141 auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default);
142 auto GlobalAS = getContext().getTargetAddressSpace(LangAS::opencl_global);
143 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(LTy);
144 if (PtrTy && PtrTy->getAddressSpace() == DefaultAS) {
145 LTy = llvm::PointerType::get(PtrTy->getContext(), GlobalAS);
146 return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
147 }
148
149 if (isAggregateTypeForABI(Ty)) {
150 if (getTarget().getTriple().getVendor() == llvm::Triple::AMD)
151 // TODO: The AMDGPU kernel ABI passes aggregates byref, which is not
152 // currently expressible in SPIR-V; SPIR-V passes aggregates byval,
153 // which the AMDGPU kernel ABI does not allow. Passing aggregates as
154 // direct works around this impedance mismatch, as it retains type info
155 // and can be correctly handled, post reverse-translation, by the AMDGPU
156 // BE, which has to support this CC for legacy OpenCL purposes. It can
157 // be brittle and does lead to performance degradation in certain
158 // pathological cases. This will be revisited / optimised in the future,
159 // once a way to deal with the byref/byval impedance mismatch is
160 // identified.
161 return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
162 // Force copying aggregate type in kernel arguments by value when
163 // compiling CUDA targeting SPIR-V. This is required for the object
164 // copied to be valid on the device.
165 // This behavior follows the CUDA spec
166 // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#global-function-argument-processing,
167 // and matches the NVPTX implementation. TODO: hardcoding to 0 should be
168 // revisited if HIPSPV / byval starts making use of the AS of an indirect
169 // arg.
170 return getNaturalAlignIndirect(Ty, /*AddrSpace=*/0, /*byval=*/true);
171 }
172 }
173 return classifyArgumentType(Ty);
174}
175
176ABIArgInfo SPIRVABIInfo::classifyArgumentType(QualType Ty) const {
177 if (getTarget().getTriple().getVendor() != llvm::Triple::AMD)
179 if (!isAggregateTypeForABI(Ty))
181
182 // Records with non-trivial destructors/copy-constructors should not be
183 // passed by value.
184 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
185 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
187
188 if (const auto *RD = Ty->getAsRecordDecl();
189 RD && RD->hasFlexibleArrayMember())
191
192 return ABIArgInfo::getDirect(CGT.ConvertType(Ty), 0u, nullptr, false);
193}
194
195void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const {
196 // The logic is same as in DefaultABIInfo with an exception on the kernel
197 // arguments handling.
198 llvm::CallingConv::ID CC = FI.getCallingConvention();
199
200 if (!getCXXABI().classifyReturnType(FI))
202
203 for (auto &I : FI.arguments()) {
204 if (CC == llvm::CallingConv::SPIR_KERNEL) {
205 I.info = classifyKernelArgumentType(I.type);
206 } else {
207 I.info = classifyArgumentType(I.type);
208 }
209 }
210}
211
212namespace clang {
213namespace CodeGen {
215 if (CGM.getTarget().getTriple().isSPIRV())
216 SPIRVABIInfo(CGM.getTypes()).computeInfo(FI);
217 else
218 CommonSPIRABIInfo(CGM.getTypes()).computeInfo(FI);
219}
220}
221}
222
223unsigned CommonSPIRTargetCodeGenInfo::getDeviceKernelCallingConv() const {
224 return llvm::CallingConv::SPIR_KERNEL;
225}
226
227void SPIRVTargetCodeGenInfo::setCUDAKernelCallingConvention(
228 const FunctionType *&FT) const {
229 // Convert HIP kernels to SPIR-V kernels.
230 if (getABIInfo().getContext().getLangOpts().HIP) {
231 FT = getABIInfo().getContext().adjustFunctionType(
233 return;
234 }
235}
236
237void CommonSPIRTargetCodeGenInfo::setOCLKernelStubCallingConvention(
238 const FunctionType *&FT) const {
239 FT = getABIInfo().getContext().adjustFunctionType(
241}
242
243LangAS
244SPIRVTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
245 const VarDecl *D) const {
246 assert(!CGM.getLangOpts().OpenCL &&
247 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
248 "Address space agnostic languages only");
249 // If we're here it means that we're using the SPIRDefIsGen ASMap, hence for
250 // the global AS we can rely on either cuda_device or sycl_global to be
251 // correct; however, since this is not a CUDA Device context, we use
252 // sycl_global to prevent confusion with the assertion.
253 LangAS DefaultGlobalAS = getLangASFromTargetAS(
254 CGM.getContext().getTargetAddressSpace(LangAS::sycl_global));
255 if (!D)
256 return DefaultGlobalAS;
257
258 LangAS AddrSpace = D->getType().getAddressSpace();
259 if (AddrSpace != LangAS::Default)
260 return AddrSpace;
261
262 return DefaultGlobalAS;
263}
264
265void SPIRVTargetCodeGenInfo::setTargetAttributes(
266 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
267 if (!M.getLangOpts().HIP ||
268 M.getTarget().getTriple().getVendor() != llvm::Triple::AMD)
269 return;
270 if (GV->isDeclaration())
271 return;
272
273 auto F = dyn_cast<llvm::Function>(GV);
274 if (!F)
275 return;
276
277 auto FD = dyn_cast_or_null<FunctionDecl>(D);
278 if (!FD)
279 return;
280 if (!FD->hasAttr<CUDAGlobalAttr>())
281 return;
282
283 unsigned N = M.getLangOpts().GPUMaxThreadsPerBlock;
284 if (auto FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>())
285 N = FlatWGS->getMax()->EvaluateKnownConstInt(M.getContext()).getExtValue();
286
287 // We encode the maximum flat WG size in the first component of the 3D
288 // max_work_group_size attribute, which will get reverse translated into the
289 // original AMDGPU attribute when targeting AMDGPU.
290 auto Int32Ty = llvm::IntegerType::getInt32Ty(M.getLLVMContext());
291 llvm::Metadata *AttrMDArgs[] = {
292 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, N)),
293 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 1)),
294 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 1))};
295
296 F->setMetadata("max_work_group_size",
297 llvm::MDNode::get(M.getLLVMContext(), AttrMDArgs));
298}
299
300llvm::SyncScope::ID
301SPIRVTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &, SyncScope Scope,
302 llvm::AtomicOrdering,
303 llvm::LLVMContext &Ctx) const {
304 return Ctx.getOrInsertSyncScopeID(mapClangSyncScopeToLLVM(Scope));
305}
306
307/// Construct a SPIR-V target extension type for the given OpenCL image type.
308static llvm::Type *getSPIRVImageType(llvm::LLVMContext &Ctx, StringRef BaseType,
309 StringRef OpenCLName,
310 unsigned AccessQualifier) {
311 // These parameters compare to the operands of OpTypeImage (see
312 // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpTypeImage
313 // for more details). The first 6 integer parameters all default to 0, and
314 // will be changed to 1 only for the image type(s) that set the parameter to
315 // one. The 7th integer parameter is the access qualifier, which is tacked on
316 // at the end.
317 SmallVector<unsigned, 7> IntParams = {0, 0, 0, 0, 0, 0};
318
319 // Choose the dimension of the image--this corresponds to the Dim enum in
320 // SPIR-V (first integer parameter of OpTypeImage).
321 if (OpenCLName.starts_with("image2d"))
322 IntParams[0] = 1;
323 else if (OpenCLName.starts_with("image3d"))
324 IntParams[0] = 2;
325 else if (OpenCLName == "image1d_buffer")
326 IntParams[0] = 5; // Buffer
327 else
328 assert(OpenCLName.starts_with("image1d") && "Unknown image type");
329
330 // Set the other integer parameters of OpTypeImage if necessary. Note that the
331 // OpenCL image types don't provide any information for the Sampled or
332 // Image Format parameters.
333 if (OpenCLName.contains("_depth"))
334 IntParams[1] = 1;
335 if (OpenCLName.contains("_array"))
336 IntParams[2] = 1;
337 if (OpenCLName.contains("_msaa"))
338 IntParams[3] = 1;
339
340 // Access qualifier
341 IntParams.push_back(AccessQualifier);
342
343 return llvm::TargetExtType::get(Ctx, BaseType, {llvm::Type::getVoidTy(Ctx)},
344 IntParams);
345}
346
347llvm::Type *CommonSPIRTargetCodeGenInfo::getOpenCLType(CodeGenModule &CGM,
348 const Type *Ty) const {
349 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
350 if (auto *PipeTy = dyn_cast<PipeType>(Ty))
351 return llvm::TargetExtType::get(Ctx, "spirv.Pipe", {},
352 {!PipeTy->isReadOnly()});
353 if (auto *BuiltinTy = dyn_cast<BuiltinType>(Ty)) {
354 enum AccessQualifier : unsigned { AQ_ro = 0, AQ_wo = 1, AQ_rw = 2 };
355 switch (BuiltinTy->getKind()) {
356#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
357 case BuiltinType::Id: \
358 return getSPIRVImageType(Ctx, "spirv.Image", #ImgType, AQ_##Suffix);
359#include "clang/Basic/OpenCLImageTypes.def"
360 case BuiltinType::OCLSampler:
361 return llvm::TargetExtType::get(Ctx, "spirv.Sampler");
362 case BuiltinType::OCLEvent:
363 return llvm::TargetExtType::get(Ctx, "spirv.Event");
364 case BuiltinType::OCLClkEvent:
365 return llvm::TargetExtType::get(Ctx, "spirv.DeviceEvent");
366 case BuiltinType::OCLQueue:
367 return llvm::TargetExtType::get(Ctx, "spirv.Queue");
368 case BuiltinType::OCLReserveID:
369 return llvm::TargetExtType::get(Ctx, "spirv.ReserveId");
370#define INTEL_SUBGROUP_AVC_TYPE(Name, Id) \
371 case BuiltinType::OCLIntelSubgroupAVC##Id: \
372 return llvm::TargetExtType::get(Ctx, "spirv.Avc" #Id "INTEL");
373#include "clang/Basic/OpenCLExtensionTypes.def"
374 default:
375 return nullptr;
376 }
377 }
378
379 return nullptr;
380}
381
382// Gets a spirv.IntegralConstant or spirv.Literal. If IntegralType is present,
383// returns an IntegralConstant, otherwise returns a Literal.
384static llvm::Type *getInlineSpirvConstant(CodeGenModule &CGM,
385 llvm::Type *IntegralType,
386 llvm::APInt Value) {
387 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
388
389 // Convert the APInt value to an array of uint32_t words
391
392 while (Value.ugt(0)) {
393 uint32_t Word = Value.trunc(32).getZExtValue();
394 Value.lshrInPlace(32);
395
396 Words.push_back(Word);
397 }
398 if (Words.size() == 0)
399 Words.push_back(0);
400
401 if (IntegralType)
402 return llvm::TargetExtType::get(Ctx, "spirv.IntegralConstant",
403 {IntegralType}, Words);
404 return llvm::TargetExtType::get(Ctx, "spirv.Literal", {}, Words);
405}
406
407static llvm::Type *getInlineSpirvType(CodeGenModule &CGM,
408 const HLSLInlineSpirvType *SpirvType) {
409 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
410
412
413 for (auto &Operand : SpirvType->getOperands()) {
414 using SpirvOperandKind = SpirvOperand::SpirvOperandKind;
415
416 llvm::Type *Result = nullptr;
417 switch (Operand.getKind()) {
418 case SpirvOperandKind::ConstantId: {
419 llvm::Type *IntegralType =
420 CGM.getTypes().ConvertType(Operand.getResultType());
421
422 Result = getInlineSpirvConstant(CGM, IntegralType, Operand.getValue());
423 break;
424 }
425 case SpirvOperandKind::Literal: {
426 Result = getInlineSpirvConstant(CGM, nullptr, Operand.getValue());
427 break;
428 }
429 case SpirvOperandKind::TypeId: {
430 QualType TypeOperand = Operand.getResultType();
431 if (const auto *RD = TypeOperand->getAsRecordDecl()) {
432 assert(RD->isCompleteDefinition() &&
433 "Type completion should have been required in Sema");
434
435 const FieldDecl *HandleField = RD->findFirstNamedDataMember();
436 if (HandleField) {
437 QualType ResourceType = HandleField->getType();
438 if (ResourceType->getAs<HLSLAttributedResourceType>()) {
439 TypeOperand = ResourceType;
440 }
441 }
442 }
443 Result = CGM.getTypes().ConvertType(TypeOperand);
444 break;
445 }
446 default:
447 llvm_unreachable("HLSLInlineSpirvType had invalid operand!");
448 break;
449 }
450
451 assert(Result);
452 Operands.push_back(Result);
453 }
454
455 return llvm::TargetExtType::get(Ctx, "spirv.Type", Operands,
456 {SpirvType->getOpcode(), SpirvType->getSize(),
457 SpirvType->getAlignment()});
458}
459
460llvm::Type *CommonSPIRTargetCodeGenInfo::getHLSLType(
461 CodeGenModule &CGM, const Type *Ty,
462 const SmallVector<int32_t> *Packoffsets) const {
463 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
464
465 if (auto *SpirvType = dyn_cast<HLSLInlineSpirvType>(Ty))
466 return getInlineSpirvType(CGM, SpirvType);
467
468 auto *ResType = dyn_cast<HLSLAttributedResourceType>(Ty);
469 if (!ResType)
470 return nullptr;
471
472 const HLSLAttributedResourceType::Attributes &ResAttrs = ResType->getAttrs();
473 switch (ResAttrs.ResourceClass) {
474 case llvm::dxil::ResourceClass::UAV:
475 case llvm::dxil::ResourceClass::SRV: {
476 // TypedBuffer and RawBuffer both need element type
477 QualType ContainedTy = ResType->getContainedType();
478 if (ContainedTy.isNull())
479 return nullptr;
480
481 assert(!ResAttrs.IsROV &&
482 "Rasterizer order views not implemented for SPIR-V yet");
483
484 if (!ResAttrs.RawBuffer) {
485 // convert element type
486 return getSPIRVImageTypeFromHLSLResource(ResAttrs, ContainedTy, CGM);
487 }
488
489 llvm::Type *ElemType = CGM.getTypes().ConvertTypeForMem(ContainedTy);
490 llvm::ArrayType *RuntimeArrayType = llvm::ArrayType::get(ElemType, 0);
491 uint32_t StorageClass = /* StorageBuffer storage class */ 12;
492 bool IsWritable = ResAttrs.ResourceClass == llvm::dxil::ResourceClass::UAV;
493 return llvm::TargetExtType::get(Ctx, "spirv.VulkanBuffer",
494 {RuntimeArrayType},
495 {StorageClass, IsWritable});
496 }
497 case llvm::dxil::ResourceClass::CBuffer: {
498 QualType ContainedTy = ResType->getContainedType();
499 if (ContainedTy.isNull() || !ContainedTy->isStructureType())
500 return nullptr;
501
502 llvm::Type *BufferLayoutTy =
503 HLSLBufferLayoutBuilder(CGM, "spirv.Layout")
504 .createLayoutType(ContainedTy->castAsCanonical<RecordType>(),
505 Packoffsets);
506 uint32_t StorageClass = /* Uniform storage class */ 2;
507 return llvm::TargetExtType::get(Ctx, "spirv.VulkanBuffer", {BufferLayoutTy},
508 {StorageClass, false});
509 break;
510 }
511 case llvm::dxil::ResourceClass::Sampler:
512 return llvm::TargetExtType::get(Ctx, "spirv.Sampler");
513 }
514 return nullptr;
515}
516
517static unsigned
519 const HLSLAttributedResourceType::Attributes &attributes,
520 llvm::Type *SampledType, QualType Ty, unsigned NumChannels) {
521 // For images with `Sampled` operand equal to 2, there are restrictions on
522 // using the Unknown image format. To avoid these restrictions in common
523 // cases, we guess an image format for them based on the sampled type and the
524 // number of channels. This is intended to match the behaviour of DXC.
525 if (LangOpts.HLSLSpvUseUnknownImageFormat ||
526 attributes.ResourceClass != llvm::dxil::ResourceClass::UAV) {
527 return 0; // Unknown
528 }
529
530 if (SampledType->isIntegerTy(32)) {
531 if (Ty->isSignedIntegerType()) {
532 if (NumChannels == 1)
533 return 24; // R32i
534 if (NumChannels == 2)
535 return 25; // Rg32i
536 if (NumChannels == 4)
537 return 21; // Rgba32i
538 } else {
539 if (NumChannels == 1)
540 return 33; // R32ui
541 if (NumChannels == 2)
542 return 35; // Rg32ui
543 if (NumChannels == 4)
544 return 30; // Rgba32ui
545 }
546 } else if (SampledType->isIntegerTy(64)) {
547 if (NumChannels == 1) {
548 if (Ty->isSignedIntegerType()) {
549 return 41; // R64i
550 }
551 return 40; // R64ui
552 }
553 } else if (SampledType->isFloatTy()) {
554 if (NumChannels == 1)
555 return 3; // R32f
556 if (NumChannels == 2)
557 return 6; // Rg32f
558 if (NumChannels == 4)
559 return 1; // Rgba32f
560 }
561
562 return 0; // Unknown
563}
564
565llvm::Type *CommonSPIRTargetCodeGenInfo::getSPIRVImageTypeFromHLSLResource(
566 const HLSLAttributedResourceType::Attributes &attributes, QualType Ty,
567 CodeGenModule &CGM) const {
568 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
569
570 unsigned NumChannels = 1;
572 if (const VectorType *V = dyn_cast<VectorType>(Ty)) {
573 NumChannels = V->getNumElements();
574 Ty = V->getElementType();
575 }
576 assert(!Ty->isVectorType() && "We still have a vector type.");
577
578 llvm::Type *SampledType = CGM.getTypes().ConvertTypeForMem(Ty);
579
580 assert((SampledType->isIntegerTy() || SampledType->isFloatingPointTy()) &&
581 "The element type for a SPIR-V resource must be a scalar integer or "
582 "floating point type.");
583
584 // These parameters correspond to the operands to the OpTypeImage SPIR-V
585 // instruction. See
586 // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpTypeImage.
587 SmallVector<unsigned, 6> IntParams(6, 0);
588
589 const char *Name =
590 Ty->isSignedIntegerType() ? "spirv.SignedImage" : "spirv.Image";
591
592 // Dim
593 // For now we assume everything is a buffer.
594 IntParams[0] = 5;
595
596 // Depth
597 // HLSL does not indicate if it is a depth texture or not, so we use unknown.
598 IntParams[1] = 2;
599
600 // Arrayed
601 IntParams[2] = 0;
602
603 // MS
604 IntParams[3] = 0;
605
606 // Sampled
607 IntParams[4] =
608 attributes.ResourceClass == llvm::dxil::ResourceClass::UAV ? 2 : 1;
609
610 // Image format.
611 IntParams[5] = getImageFormat(CGM.getLangOpts(), attributes, SampledType, Ty,
612 NumChannels);
613
614 llvm::TargetExtType *ImageType =
615 llvm::TargetExtType::get(Ctx, Name, {SampledType}, IntParams);
616 return ImageType;
617}
618
619std::unique_ptr<TargetCodeGenInfo>
621 return std::make_unique<CommonSPIRTargetCodeGenInfo>(CGM.getTypes());
622}
623
624std::unique_ptr<TargetCodeGenInfo>
626 return std::make_unique<SPIRVTargetCodeGenInfo>(CGM.getTypes());
627}
#define V(N, I)
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
Definition CGCall.cpp:359
static llvm::Type * getInlineSpirvType(CodeGenModule &CGM, const HLSLInlineSpirvType *SpirvType)
Definition SPIR.cpp:407
static llvm::Type * getSPIRVImageType(llvm::LLVMContext &Ctx, StringRef BaseType, StringRef OpenCLName, unsigned AccessQualifier)
Construct a SPIR-V target extension type for the given OpenCL image type.
Definition SPIR.cpp:308
static unsigned getImageFormat(const LangOptions &LangOpts, const HLSLAttributedResourceType::Attributes &attributes, llvm::Type *SampledType, QualType Ty, unsigned NumChannels)
Definition SPIR.cpp:518
static llvm::Type * getInlineSpirvConstant(CodeGenModule &CGM, llvm::Type *IntegralType, llvm::APInt Value)
Definition SPIR.cpp:384
unsigned getTargetAddressSpace(LangAS AS) const
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
Definition CGCXXABI.h:158
CGFunctionInfo - Class to encapsulate the information about a function definition.
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
This class organizes the cross-function state that is used while generating LLVM code.
const LangOptions & getLangOpts() const
const TargetInfo & getTarget() const
ASTContext & getContext() const
llvm::LLVMContext & getLLVMContext()
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
DefaultABIInfo - The default implementation for ABI specific details.
Definition ABIInfoImpl.h:21
ABIArgInfo classifyArgumentType(QualType RetTy) const
ABIArgInfo classifyReturnType(QualType RetTy) const
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Definition TargetInfo.h:47
Represents a member of a struct/union/class.
Definition Decl.h:3157
ExtInfo withCallingConv(CallingConv cc) const
Definition TypeBase.h:4683
ExtInfo getExtInfo() const
Definition TypeBase.h:4816
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8411
bool hasFlexibleArrayMember() const
Definition Decl.h:4342
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isStructureType() const
Definition Type.cpp:678
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2205
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
CanQualType getCanonicalTypeUnqualified() const
bool isVectorType() const
Definition TypeBase.h:8661
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
Definition TypeBase.h:2928
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9101
QualType getType() const
Definition Decl.h:722
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:145
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Definition SPIR.cpp:214
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createSPIRVTargetCodeGenInfo(CodeGenModule &CGM)
Definition SPIR.cpp:625
std::unique_ptr< TargetCodeGenInfo > createCommonSPIRTargetCodeGenInfo(CodeGenModule &CGM)
Definition SPIR.cpp:620
The JSON file list parser is used to communicate input to InstallAPI.
StorageClass
Storage classes.
Definition Specifiers.h:248
const FunctionProtoType * T
@ Type
The name was classified as a type.
Definition Sema.h:562
LangAS
Defines the address space values used by the address space qualifier of QualType.
SyncScope
Defines sync scope values used internally by clang.
Definition SyncScope.h:42
@ CC_DeviceKernel
Definition Specifiers.h:292
@ CC_SpirFunction
Definition Specifiers.h:291
LangAS getLangASFromTargetAS(unsigned TargetAS)
unsigned int uint32_t