clang 22.0.0git
RISCV.cpp
Go to the documentation of this file.
1//===-------- RISCV.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenFunction.h"
15#include "llvm/IR/IntrinsicsRISCV.h"
16#include "llvm/TargetParser/RISCVISAInfo.h"
17#include "llvm/TargetParser/RISCVTargetParser.h"
18
19using namespace clang;
20using namespace CodeGen;
21using namespace llvm;
22
23// The 0th bit simulates the `vta` of RVV
24// The 1st bit simulates the `vma` of RVV
25static constexpr unsigned RVV_VTA = 0x1;
26static constexpr unsigned RVV_VMA = 0x2;
27
28// RISC-V Vector builtin helper functions are marked NOINLINE to prevent
29// excessive inlining in CodeGenFunction::EmitRISCVBuiltinExpr's large switch
30// statement, which would significantly increase compilation time.
31static LLVM_ATTRIBUTE_NOINLINE Value *
33 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
34 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
35 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
36 auto &Builder = CGF->Builder;
37 auto &CGM = CGF->CGM;
39 if (IsMasked) {
40 // Move mask to right before vl.
41 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
42 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
43 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
44 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
45 IntrinsicTypes = {ResultType, Ops[4]->getType(), Ops[2]->getType()};
46 } else {
47 if (PolicyAttrs & RVV_VTA)
48 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
49 IntrinsicTypes = {ResultType, Ops[3]->getType(), Ops[1]->getType()};
50 }
51 Value *NewVL = Ops[2];
52 Ops.erase(Ops.begin() + 2);
53 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
54 llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
55 llvm::Value *V = Builder.CreateExtractValue(LoadValue, {0});
56 // Store new_vl.
57 clang::CharUnits Align;
58 if (IsMasked)
59 Align = CGM.getNaturalPointeeTypeAlignment(
60 E->getArg(E->getNumArgs() - 2)->getType());
61 else
62 Align = CGM.getNaturalPointeeTypeAlignment(E->getArg(1)->getType());
63 llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {1});
64 Builder.CreateStore(Val, Address(NewVL, Val->getType(), Align));
65 return V;
66}
67
68static LLVM_ATTRIBUTE_NOINLINE Value *
70 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
71 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
72 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
73 auto &Builder = CGF->Builder;
74 auto &CGM = CGF->CGM;
76 if (IsMasked) {
77 // Builtin: (mask, ptr, stride, value, vl). Intrinsic: (value, ptr, stride,
78 // mask, vl)
79 std::swap(Ops[0], Ops[3]);
80 } else {
81 // Builtin: (ptr, stride, value, vl). Intrinsic: (value, ptr, stride, vl)
82 std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
83 }
84 if (IsMasked)
85 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[4]->getType()};
86 else
87 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[3]->getType()};
88 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
89 return Builder.CreateCall(F, Ops, "");
90}
91
92static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVIndexedStoreBuiltin(
93 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
94 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
95 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
96 auto &Builder = CGF->Builder;
97 auto &CGM = CGF->CGM;
99 if (IsMasked) {
100 // Builtin: (mask, ptr, index, value, vl).
101 // Intrinsic: (value, ptr, index, mask, vl)
102 std::swap(Ops[0], Ops[3]);
103 } else {
104 // Builtin: (ptr, index, value, vl).
105 // Intrinsic: (value, ptr, index, vl)
106 std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
107 }
108 if (IsMasked)
109 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
110 Ops[4]->getType()};
111 else
112 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
113 Ops[3]->getType()};
114 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
115 return Builder.CreateCall(F, Ops, "");
116}
117
118static LLVM_ATTRIBUTE_NOINLINE Value *
120 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
121 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
122 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
123 auto &Builder = CGF->Builder;
124 auto &CGM = CGF->CGM;
126 if (IsMasked) {
127 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
128 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
129 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
130 } else {
131 if (PolicyAttrs & RVV_VTA)
132 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
133 }
134 auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
135 Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
136 if (IsMasked) {
137 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
138 // maskedoff, op1, op2, mask, vl, policy
139 IntrinsicTypes = {ResultType, ElemTy, Ops[4]->getType()};
140 } else {
141 // passthru, op1, op2, vl
142 IntrinsicTypes = {ResultType, ElemTy, Ops[3]->getType()};
143 }
144 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
145 return Builder.CreateCall(F, Ops, "");
146}
147
148static LLVM_ATTRIBUTE_NOINLINE Value *
150 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
151 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
152 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
153 auto &Builder = CGF->Builder;
154 auto &CGM = CGF->CGM;
156 if (IsMasked) {
157 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
158 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
159 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
160 } else {
161 if (PolicyAttrs & RVV_VTA)
162 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
163 }
164 auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
165 Ops.insert(Ops.begin() + 2, llvm::Constant::getAllOnesValue(ElemTy));
166 if (IsMasked) {
167 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
168 // maskedoff, op1, po2, mask, vl, policy
169 IntrinsicTypes = {ResultType, ElemTy, Ops[4]->getType()};
170 } else {
171 // passthru, op1, op2, vl
172 IntrinsicTypes = {ResultType, ElemTy, Ops[3]->getType()};
173 }
174 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
175 return Builder.CreateCall(F, Ops, "");
176}
177
178static LLVM_ATTRIBUTE_NOINLINE Value *
180 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
181 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
182 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
183 auto &Builder = CGF->Builder;
184 auto &CGM = CGF->CGM;
186 // op1, vl
187 IntrinsicTypes = {ResultType, Ops[1]->getType()};
188 Ops.insert(Ops.begin() + 1, Ops[0]);
189 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
190 return Builder.CreateCall(F, Ops, "");
191}
192
193static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVPseudoVFUnaryBuiltin(
194 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
195 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
196 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
197 auto &Builder = CGF->Builder;
198 auto &CGM = CGF->CGM;
200 if (IsMasked) {
201 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
202 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
203 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
204 Ops.insert(Ops.begin() + 2, Ops[1]);
205 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
206 // maskedoff, op1, op2, mask, vl
207 IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()};
208 } else {
209 if (PolicyAttrs & RVV_VTA)
210 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
211 // op1, po2, vl
212 IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType()};
213 Ops.insert(Ops.begin() + 2, Ops[1]);
214 }
215 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
216 return Builder.CreateCall(F, Ops, "");
217}
218
219static LLVM_ATTRIBUTE_NOINLINE Value *
221 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
222 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
223 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
224 auto &Builder = CGF->Builder;
225 auto &CGM = CGF->CGM;
227 if (IsMasked) {
228 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
229 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
230 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
231 } else {
232 if (PolicyAttrs & RVV_VTA)
233 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
234 }
235 auto ElemTy = cast<llvm::VectorType>(Ops[1]->getType())->getElementType();
236 Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
237 if (IsMasked) {
238 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
239 // maskedoff, op1, op2, mask, vl, policy
240 IntrinsicTypes = {ResultType, Ops[1]->getType(), ElemTy, Ops[4]->getType()};
241 } else {
242 // passtru, op1, op2, vl
243 IntrinsicTypes = {ResultType, Ops[1]->getType(), ElemTy, Ops[3]->getType()};
244 }
245 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
246 return Builder.CreateCall(F, Ops, "");
247}
248
249static LLVM_ATTRIBUTE_NOINLINE Value *
251 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
252 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
253 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
254 auto &Builder = CGF->Builder;
255 auto &CGM = CGF->CGM;
257 if (IsMasked) {
258 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
259 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
260 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
261 } else {
262 if (PolicyAttrs & RVV_VTA)
263 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
264 }
265 Ops.insert(Ops.begin() + 2,
266 llvm::Constant::getNullValue(Ops.back()->getType()));
267 if (IsMasked) {
268 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
269 // maskedoff, op1, xlen, mask, vl
270 IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[4]->getType(),
271 Ops[4]->getType()};
272 } else {
273 // passthru, op1, xlen, vl
274 IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType(),
275 Ops[3]->getType()};
276 }
277 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
278 return Builder.CreateCall(F, Ops, "");
279}
280
281static LLVM_ATTRIBUTE_NOINLINE Value *
283 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
284 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
285 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
286 auto &Builder = CGF->Builder;
287 auto &CGM = CGF->CGM;
288 LLVMContext &Context = CGM.getLLVMContext();
289 llvm::MDBuilder MDHelper(Context);
290 llvm::Metadata *OpsMD[] = {llvm::MDString::get(Context, "vlenb")};
291 llvm::MDNode *RegName = llvm::MDNode::get(Context, OpsMD);
292 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
293 llvm::Function *F =
294 CGM.getIntrinsic(llvm::Intrinsic::read_register, {CGF->SizeTy});
295 return Builder.CreateCall(F, Metadata);
296}
297
298static LLVM_ATTRIBUTE_NOINLINE Value *
300 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
301 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
302 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
303 auto &Builder = CGF->Builder;
304 auto &CGM = CGF->CGM;
305 llvm::Function *F = CGM.getIntrinsic(ID, {ResultType});
306 return Builder.CreateCall(F, Ops, "");
307}
308
309static LLVM_ATTRIBUTE_NOINLINE Value *
311 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
312 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
313 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
314 auto &Builder = CGF->Builder;
315 auto &CGM = CGF->CGM;
317 if (IsMasked) {
318 // Builtin: (mask, ptr, value, vl).
319 // Intrinsic: (value, ptr, mask, vl)
320 std::swap(Ops[0], Ops[2]);
321 } else {
322 // Builtin: (ptr, value, vl).
323 // Intrinsic: (value, ptr, vl)
324 std::swap(Ops[0], Ops[1]);
325 }
326 if (IsMasked)
327 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[3]->getType()};
328 else
329 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType()};
330 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
331 return Builder.CreateCall(F, Ops, "");
332}
333
334static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVUnitStridedSegLoadTupleBuiltin(
335 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
336 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
337 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
338 auto &Builder = CGF->Builder;
339 auto &CGM = CGF->CGM;
341 bool NoPassthru =
342 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
343 (!IsMasked && (PolicyAttrs & RVV_VTA));
344 unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
345 if (IsMasked)
346 IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[0]->getType(),
347 Ops.back()->getType()};
348 else
349 IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
350 Ops.back()->getType()};
351 if (IsMasked)
352 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
353 if (NoPassthru)
354 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
355 if (IsMasked)
356 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
357 Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
358 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
359 llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
360 if (ReturnValue.isNull())
361 return LoadValue;
362 return Builder.CreateStore(LoadValue, ReturnValue.getValue());
363}
364
365static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVUnitStridedSegStoreTupleBuiltin(
366 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
367 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
368 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
369 auto &Builder = CGF->Builder;
370 auto &CGM = CGF->CGM;
372 // Masked
373 // Builtin: (mask, ptr, v_tuple, vl)
374 // Intrinsic: (tuple, ptr, mask, vl, SegInstSEW)
375 // Unmasked
376 // Builtin: (ptr, v_tuple, vl)
377 // Intrinsic: (tuple, ptr, vl, SegInstSEW)
378 if (IsMasked)
379 std::swap(Ops[0], Ops[2]);
380 else
381 std::swap(Ops[0], Ops[1]);
382 Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
383 if (IsMasked)
384 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
385 Ops[3]->getType()};
386 else
387 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType()};
388 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
389 return Builder.CreateCall(F, Ops, "");
390}
391
392static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVUnitStridedSegLoadFFTupleBuiltin(
393 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
394 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
395 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
396 auto &Builder = CGF->Builder;
397 auto &CGM = CGF->CGM;
399 bool NoPassthru =
400 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
401 (!IsMasked && (PolicyAttrs & RVV_VTA));
402 unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
403 if (IsMasked)
404 IntrinsicTypes = {ResultType, Ops.back()->getType(), Ops[Offset]->getType(),
405 Ops[0]->getType()};
406 else
407 IntrinsicTypes = {ResultType, Ops.back()->getType(),
408 Ops[Offset]->getType()};
409 if (IsMasked)
410 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
411 if (NoPassthru)
412 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
413 if (IsMasked)
414 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
415 Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
416 Value *NewVL = Ops[2];
417 Ops.erase(Ops.begin() + 2);
418 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
419 llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
420 // Get alignment from the new vl operand
421 clang::CharUnits Align =
422 CGM.getNaturalPointeeTypeAlignment(E->getArg(Offset + 1)->getType());
423 llvm::Value *ReturnTuple = Builder.CreateExtractValue(LoadValue, 0);
424 // Store new_vl
425 llvm::Value *V = Builder.CreateExtractValue(LoadValue, 1);
426 Builder.CreateStore(V, Address(NewVL, V->getType(), Align));
427 if (ReturnValue.isNull())
428 return ReturnTuple;
429 return Builder.CreateStore(ReturnTuple, ReturnValue.getValue());
430}
431
432static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVStridedSegLoadTupleBuiltin(
433 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
434 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
435 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
436 auto &Builder = CGF->Builder;
437 auto &CGM = CGF->CGM;
439 bool NoPassthru =
440 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
441 (!IsMasked && (PolicyAttrs & RVV_VTA));
442 unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
443 if (IsMasked)
444 IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops.back()->getType(),
445 Ops[0]->getType()};
446 else
447 IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
448 Ops.back()->getType()};
449 if (IsMasked)
450 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
451 if (NoPassthru)
452 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
453 if (IsMasked)
454 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
455 Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
456 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
457 llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
458 if (ReturnValue.isNull())
459 return LoadValue;
460 return Builder.CreateStore(LoadValue, ReturnValue.getValue());
461}
462
463static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVStridedSegStoreTupleBuiltin(
464 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
465 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
466 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
467 auto &Builder = CGF->Builder;
468 auto &CGM = CGF->CGM;
470 // Masked
471 // Builtin: (mask, ptr, stride, v_tuple, vl)
472 // Intrinsic: (tuple, ptr, stride, mask, vl, SegInstSEW)
473 // Unmasked
474 // Builtin: (ptr, stride, v_tuple, vl)
475 // Intrinsic: (tuple, ptr, stride, vl, SegInstSEW)
476 if (IsMasked)
477 std::swap(Ops[0], Ops[3]);
478 else
479 std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
480 Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
481 if (IsMasked)
482 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[4]->getType(),
483 Ops[3]->getType()};
484 else
485 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[3]->getType()};
486 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
487 return Builder.CreateCall(F, Ops, "");
488}
489
490static LLVM_ATTRIBUTE_NOINLINE Value *
492 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
493 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
494 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
495 auto &Builder = CGF->Builder;
496 auto &CGM = CGF->CGM;
497 // LLVM intrinsic
498 // Unmasked: (passthru, op0, op1, round_mode, vl)
499 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl,
500 // policy)
501
502 bool HasMaskedOff =
503 !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
504 (!IsMasked && PolicyAttrs & RVV_VTA));
505
506 if (IsMasked)
507 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
508
509 if (!HasMaskedOff)
510 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
511
512 if (IsMasked)
513 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
514
515 llvm::Function *F = CGM.getIntrinsic(
516 ID, {ResultType, Ops[2]->getType(), Ops.back()->getType()});
517 return Builder.CreateCall(F, Ops, "");
518}
519
520static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVNarrowingClipBuiltin(
521 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
522 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
523 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
524 auto &Builder = CGF->Builder;
525 auto &CGM = CGF->CGM;
526 // LLVM intrinsic
527 // Unmasked: (passthru, op0, op1, round_mode, vl)
528 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl,
529 // policy)
530
531 bool HasMaskedOff =
532 !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
533 (!IsMasked && PolicyAttrs & RVV_VTA));
534
535 if (IsMasked)
536 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
537
538 if (!HasMaskedOff)
539 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
540
541 if (IsMasked)
542 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
543
544 llvm::Function *F =
545 CGM.getIntrinsic(ID, {ResultType, Ops[1]->getType(), Ops[2]->getType(),
546 Ops.back()->getType()});
547 return Builder.CreateCall(F, Ops, "");
548}
549
550static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVFloatingPointBuiltin(
551 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
552 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
553 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
554 auto &Builder = CGF->Builder;
555 auto &CGM = CGF->CGM;
556 // LLVM intrinsic
557 // Unmasked: (passthru, op0, op1, round_mode, vl)
558 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
559
560 bool HasMaskedOff =
561 !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
562 (!IsMasked && PolicyAttrs & RVV_VTA));
563 bool HasRoundModeOp =
564 IsMasked ? (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5)
565 : (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
566
567 if (!HasRoundModeOp)
568 Ops.insert(Ops.end() - 1,
569 ConstantInt::get(Ops.back()->getType(), 7)); // frm
570
571 if (IsMasked)
572 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
573
574 if (!HasMaskedOff)
575 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
576
577 if (IsMasked)
578 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
579
580 llvm::Function *F = CGM.getIntrinsic(
581 ID, {ResultType, Ops[2]->getType(), Ops.back()->getType()});
582 return Builder.CreateCall(F, Ops, "");
583}
584
585static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVWideningFloatingPointBuiltin(
586 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
587 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
588 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
589 auto &Builder = CGF->Builder;
590 auto &CGM = CGF->CGM;
591 // LLVM intrinsic
592 // Unmasked: (passthru, op0, op1, round_mode, vl)
593 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
594
595 bool HasMaskedOff =
596 !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
597 (!IsMasked && PolicyAttrs & RVV_VTA));
598 bool HasRoundModeOp =
599 IsMasked ? (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5)
600 : (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
601
602 if (!HasRoundModeOp)
603 Ops.insert(Ops.end() - 1,
604 ConstantInt::get(Ops.back()->getType(), 7)); // frm
605
606 if (IsMasked)
607 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
608
609 if (!HasMaskedOff)
610 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
611
612 if (IsMasked)
613 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
614
615 llvm::Function *F =
616 CGM.getIntrinsic(ID, {ResultType, Ops[1]->getType(), Ops[2]->getType(),
617 Ops.back()->getType()});
618 return Builder.CreateCall(F, Ops, "");
619}
620
621static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVIndexedSegLoadTupleBuiltin(
622 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
623 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
624 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
625 auto &Builder = CGF->Builder;
626 auto &CGM = CGF->CGM;
628
629 bool NoPassthru =
630 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
631 (!IsMasked && (PolicyAttrs & RVV_VTA));
632
633 if (IsMasked)
634 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
635 if (NoPassthru)
636 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
637
638 if (IsMasked)
639 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
640 Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
641
642 if (IsMasked)
643 IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType(),
644 Ops[3]->getType(), Ops[4]->getType()};
645 else
646 IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType(),
647 Ops[3]->getType()};
648 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
649 llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
650
651 if (ReturnValue.isNull())
652 return LoadValue;
653 return Builder.CreateStore(LoadValue, ReturnValue.getValue());
654}
655
656static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVIndexedSegStoreTupleBuiltin(
657 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
658 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
659 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
660 auto &Builder = CGF->Builder;
661 auto &CGM = CGF->CGM;
663 // Masked
664 // Builtin: (mask, ptr, index, v_tuple, vl)
665 // Intrinsic: (tuple, ptr, index, mask, vl, SegInstSEW)
666 // Unmasked
667 // Builtin: (ptr, index, v_tuple, vl)
668 // Intrinsic: (tuple, ptr, index, vl, SegInstSEW)
669
670 if (IsMasked)
671 std::swap(Ops[0], Ops[3]);
672 else
673 std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
674
675 Ops.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
676
677 if (IsMasked)
678 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
679 Ops[3]->getType(), Ops[4]->getType()};
680 else
681 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
682 Ops[3]->getType()};
683 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
684 return Builder.CreateCall(F, Ops, "");
685}
686
687static LLVM_ATTRIBUTE_NOINLINE Value *
689 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
690 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
691 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
692 auto &Builder = CGF->Builder;
693 auto &CGM = CGF->CGM;
694 // LLVM intrinsic
695 // Unmasked: (vector_in, vector_in/scalar_in, vector_in, round_mode,
696 // vl, policy)
697 // Masked: (vector_in, vector_in/scalar_in, vector_in, mask, frm,
698 // vl, policy)
699
700 bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5;
701
702 if (!HasRoundModeOp)
703 Ops.insert(Ops.end() - 1,
704 ConstantInt::get(Ops.back()->getType(), 7)); // frm
705
706 if (IsMasked)
707 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
708
709 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
710
711 llvm::Function *F = CGM.getIntrinsic(
712 ID, {ResultType, Ops[1]->getType(), Ops.back()->getType()});
713 return Builder.CreateCall(F, Ops, "");
714}
715
716static LLVM_ATTRIBUTE_NOINLINE Value *
718 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
719 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
720 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
721 auto &Builder = CGF->Builder;
722 auto &CGM = CGF->CGM;
723 // LLVM intrinsic
724 // Unmasked: (vector_in, vector_in/scalar_in, vector_in, round_mode, vl,
725 // policy) Masked: (vector_in, vector_in/scalar_in, vector_in, mask, frm,
726 // vl, policy)
727
728 bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5;
729
730 if (!HasRoundModeOp)
731 Ops.insert(Ops.end() - 1,
732 ConstantInt::get(Ops.back()->getType(), 7)); // frm
733
734 if (IsMasked)
735 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 4);
736
737 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
738
739 llvm::Function *F =
740 CGM.getIntrinsic(ID, {ResultType, Ops[1]->getType(), Ops[2]->getType(),
741 Ops.back()->getType()});
742 return Builder.CreateCall(F, Ops, "");
743}
744
745static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVFloatingUnaryBuiltin(
746 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
747 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
748 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
749 auto &Builder = CGF->Builder;
750 auto &CGM = CGF->CGM;
752 // LLVM intrinsic
753 // Unmasked: (passthru, op0, round_mode, vl)
754 // Masked: (passthru, op0, mask, frm, vl, policy)
755
756 bool HasMaskedOff =
757 !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
758 (!IsMasked && PolicyAttrs & RVV_VTA));
759 bool HasRoundModeOp =
760 IsMasked ? (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4)
761 : (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3);
762
763 if (!HasRoundModeOp)
764 Ops.insert(Ops.end() - 1,
765 ConstantInt::get(Ops.back()->getType(), 7)); // frm
766
767 if (IsMasked)
768 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
769
770 if (!HasMaskedOff)
771 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
772
773 if (IsMasked)
774 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
775
776 IntrinsicTypes = {ResultType, Ops.back()->getType()};
777 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
778 return Builder.CreateCall(F, Ops, "");
779}
780
781static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVFloatingConvBuiltin(
782 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
783 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
784 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
785 auto &Builder = CGF->Builder;
786 auto &CGM = CGF->CGM;
787 // LLVM intrinsic
788 // Unmasked: (passthru, op0, frm, vl)
789 // Masked: (passthru, op0, mask, frm, vl, policy)
790 bool HasMaskedOff =
791 !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
792 (!IsMasked && PolicyAttrs & RVV_VTA));
793 bool HasRoundModeOp =
794 IsMasked ? (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4)
795 : (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3);
796
797 if (!HasRoundModeOp)
798 Ops.insert(Ops.end() - 1,
799 ConstantInt::get(Ops.back()->getType(), 7)); // frm
800
801 if (IsMasked)
802 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
803
804 if (!HasMaskedOff)
805 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
806
807 if (IsMasked)
808 Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
809
810 llvm::Function *F = CGM.getIntrinsic(
811 ID, {ResultType, Ops[1]->getType(), Ops.back()->getType()});
812 return Builder.CreateCall(F, Ops, "");
813}
814
815static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVFloatingReductionBuiltin(
816 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
817 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
818 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
819 auto &Builder = CGF->Builder;
820 auto &CGM = CGF->CGM;
821 // LLVM intrinsic
822 // Unmasked: (passthru, op0, op1, round_mode, vl)
823 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
824
825 bool HasMaskedOff =
826 !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
827 (!IsMasked && PolicyAttrs & RVV_VTA));
828 bool HasRoundModeOp =
829 IsMasked ? (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5)
830 : (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
831
832 if (!HasRoundModeOp)
833 Ops.insert(Ops.end() - 1,
834 ConstantInt::get(Ops.back()->getType(), 7)); // frm
835
836 if (IsMasked)
837 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);
838
839 if (!HasMaskedOff)
840 Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
841
842 llvm::Function *F = CGM.getIntrinsic(
843 ID, {ResultType, Ops[1]->getType(), Ops.back()->getType()});
844 return Builder.CreateCall(F, Ops, "");
845}
846
847static LLVM_ATTRIBUTE_NOINLINE Value *
849 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
850 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
851 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
852 auto &Builder = CGF->Builder;
853 auto &CGM = CGF->CGM;
854
855 if (ResultType->isIntOrIntVectorTy(1) ||
856 Ops[0]->getType()->isIntOrIntVectorTy(1)) {
857 assert(isa<ScalableVectorType>(ResultType) &&
859
860 LLVMContext &Context = CGM.getLLVMContext();
861 ScalableVectorType *Boolean64Ty =
862 ScalableVectorType::get(llvm::Type::getInt1Ty(Context), 64);
863
864 if (ResultType->isIntOrIntVectorTy(1)) {
865 // Casting from m1 vector integer -> vector boolean
866 // Ex: <vscale x 8 x i8>
867 // --(bitcast)--------> <vscale x 64 x i1>
868 // --(vector_extract)-> <vscale x 8 x i1>
869 llvm::Value *BitCast = Builder.CreateBitCast(Ops[0], Boolean64Ty);
870 return Builder.CreateExtractVector(ResultType, BitCast,
871 ConstantInt::get(CGF->Int64Ty, 0));
872 } else {
873 // Casting from vector boolean -> m1 vector integer
874 // Ex: <vscale x 1 x i1>
875 // --(vector_insert)-> <vscale x 64 x i1>
876 // --(bitcast)-------> <vscale x 8 x i8>
877 llvm::Value *Boolean64Val = Builder.CreateInsertVector(
878 Boolean64Ty, llvm::PoisonValue::get(Boolean64Ty), Ops[0],
879 ConstantInt::get(CGF->Int64Ty, 0));
880 return Builder.CreateBitCast(Boolean64Val, ResultType);
881 }
882 }
883 return Builder.CreateBitCast(Ops[0], ResultType);
884}
885
886static LLVM_ATTRIBUTE_NOINLINE Value *
888 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
889 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
890 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
891 auto &Builder = CGF->Builder;
892 auto *VecTy = cast<ScalableVectorType>(ResultType);
893 if (auto *OpVecTy = dyn_cast<ScalableVectorType>(Ops[0]->getType())) {
894 unsigned MaxIndex =
895 OpVecTy->getMinNumElements() / VecTy->getMinNumElements();
896 assert(isPowerOf2_32(MaxIndex));
897 // Mask to only valid indices.
898 Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
899 Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
900 Ops[1] =
901 Builder.CreateMul(Ops[1], ConstantInt::get(Ops[1]->getType(),
902 VecTy->getMinNumElements()));
903 return Builder.CreateExtractVector(ResultType, Ops[0], Ops[1]);
904 }
905
906 return Builder.CreateIntrinsic(
907 Intrinsic::riscv_tuple_extract, {ResultType, Ops[0]->getType()},
908 {Ops[0], Builder.CreateTrunc(Ops[1], Builder.getInt32Ty())});
909}
910
911static LLVM_ATTRIBUTE_NOINLINE Value *
913 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
914 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
915 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
916 auto &Builder = CGF->Builder;
917 if (auto *ResVecTy = dyn_cast<ScalableVectorType>(ResultType)) {
918 auto *VecTy = cast<ScalableVectorType>(Ops[2]->getType());
919 unsigned MaxIndex =
920 ResVecTy->getMinNumElements() / VecTy->getMinNumElements();
921 assert(isPowerOf2_32(MaxIndex));
922 // Mask to only valid indices.
923 Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
924 Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
925 Ops[1] =
926 Builder.CreateMul(Ops[1], ConstantInt::get(Ops[1]->getType(),
927 VecTy->getMinNumElements()));
928 return Builder.CreateInsertVector(ResultType, Ops[0], Ops[2], Ops[1]);
929 }
930
931 return Builder.CreateIntrinsic(
932 Intrinsic::riscv_tuple_insert, {ResultType, Ops[2]->getType()},
933 {Ops[0], Ops[2], Builder.CreateTrunc(Ops[1], Builder.getInt32Ty())});
934}
935
936static LLVM_ATTRIBUTE_NOINLINE Value *
938 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
939 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
940 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
941 auto &Builder = CGF->Builder;
942 llvm::Value *ReturnVector = llvm::PoisonValue::get(ResultType);
943 auto *VecTy = cast<ScalableVectorType>(Ops[0]->getType());
944 for (unsigned I = 0, N = Ops.size(); I < N; ++I) {
945 if (isa<ScalableVectorType>(ResultType)) {
946 llvm::Value *Idx = ConstantInt::get(Builder.getInt64Ty(),
947 VecTy->getMinNumElements() * I);
948 ReturnVector =
949 Builder.CreateInsertVector(ResultType, ReturnVector, Ops[I], Idx);
950 } else {
951 llvm::Value *Idx = ConstantInt::get(Builder.getInt32Ty(), I);
952 ReturnVector = Builder.CreateIntrinsic(Intrinsic::riscv_tuple_insert,
953 {ResultType, Ops[I]->getType()},
954 {ReturnVector, Ops[I], Idx});
955 }
956 }
957 return ReturnVector;
958}
959
961 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {VoidPtrTy}, false);
962 llvm::FunctionCallee Func =
963 CGM.CreateRuntimeFunction(FTy, "__init_riscv_feature_bits");
964 auto *CalleeGV = cast<llvm::GlobalValue>(Func.getCallee());
965 CalleeGV->setDSOLocal(true);
966 CalleeGV->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
967 return Builder.CreateCall(Func, {llvm::ConstantPointerNull::get(VoidPtrTy)});
968}
969
971
972 const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
973 StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
974 if (!getContext().getTargetInfo().validateCpuSupports(FeatureStr))
975 return Builder.getFalse();
976
977 return EmitRISCVCpuSupports(ArrayRef<StringRef>(FeatureStr));
978}
979
980static Value *loadRISCVFeatureBits(unsigned Index, CGBuilderTy &Builder,
981 CodeGenModule &CGM) {
982 llvm::Type *Int32Ty = Builder.getInt32Ty();
983 llvm::Type *Int64Ty = Builder.getInt64Ty();
984 llvm::ArrayType *ArrayOfInt64Ty =
985 llvm::ArrayType::get(Int64Ty, llvm::RISCVISAInfo::FeatureBitSize);
986 llvm::Type *StructTy = llvm::StructType::get(Int32Ty, ArrayOfInt64Ty);
987 llvm::Constant *RISCVFeaturesBits =
988 CGM.CreateRuntimeVariable(StructTy, "__riscv_feature_bits");
989 cast<llvm::GlobalValue>(RISCVFeaturesBits)->setDSOLocal(true);
990 Value *IndexVal = llvm::ConstantInt::get(Int32Ty, Index);
991 llvm::Value *GEPIndices[] = {Builder.getInt32(0), Builder.getInt32(1),
992 IndexVal};
993 Value *Ptr =
994 Builder.CreateInBoundsGEP(StructTy, RISCVFeaturesBits, GEPIndices);
995 Value *FeaturesBit =
996 Builder.CreateAlignedLoad(Int64Ty, Ptr, CharUnits::fromQuantity(8));
997 return FeaturesBit;
998}
999
1001 const unsigned RISCVFeatureLength = llvm::RISCVISAInfo::FeatureBitSize;
1002 uint64_t RequireBitMasks[RISCVFeatureLength] = {0};
1003
1004 for (auto Feat : FeaturesStrs) {
1005 auto [GroupID, BitPos] = RISCVISAInfo::getRISCVFeaturesBitsInfo(Feat);
1006
1007 // If there isn't BitPos for this feature, skip this version.
1008 // It also report the warning to user during compilation.
1009 if (BitPos == -1)
1010 return Builder.getFalse();
1011
1012 RequireBitMasks[GroupID] |= (1ULL << BitPos);
1013 }
1014
1015 Value *Result = nullptr;
1016 for (unsigned Idx = 0; Idx < RISCVFeatureLength; Idx++) {
1017 if (RequireBitMasks[Idx] == 0)
1018 continue;
1019
1020 Value *Mask = Builder.getInt64(RequireBitMasks[Idx]);
1021 Value *Bitset =
1022 Builder.CreateAnd(loadRISCVFeatureBits(Idx, Builder, CGM), Mask);
1023 Value *CmpV = Builder.CreateICmpEQ(Bitset, Mask);
1024 Result = (!Result) ? CmpV : Builder.CreateAnd(Result, CmpV);
1025 }
1026
1027 assert(Result && "Should have value here.");
1028
1029 return Result;
1030}
1031
1033 const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
1034 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
1035 return EmitRISCVCpuIs(CPUStr);
1036}
1037
1039 llvm::Type *Int32Ty = Builder.getInt32Ty();
1040 llvm::Type *Int64Ty = Builder.getInt64Ty();
1041 llvm::StructType *StructTy = llvm::StructType::get(Int32Ty, Int64Ty, Int64Ty);
1042 llvm::Constant *RISCVCPUModel =
1043 CGM.CreateRuntimeVariable(StructTy, "__riscv_cpu_model");
1044 cast<llvm::GlobalValue>(RISCVCPUModel)->setDSOLocal(true);
1045
1046 auto loadRISCVCPUID = [&](unsigned Index) {
1047 Value *Ptr = Builder.CreateStructGEP(StructTy, RISCVCPUModel, Index);
1048 Value *CPUID = Builder.CreateAlignedLoad(StructTy->getTypeAtIndex(Index),
1049 Ptr, llvm::MaybeAlign());
1050 return CPUID;
1051 };
1052
1053 const llvm::RISCV::CPUModel Model = llvm::RISCV::getCPUModel(CPUStr);
1054
1055 // Compare mvendorid.
1056 Value *VendorID = loadRISCVCPUID(0);
1057 Value *Result =
1058 Builder.CreateICmpEQ(VendorID, Builder.getInt32(Model.MVendorID));
1059
1060 // Compare marchid.
1061 Value *ArchID = loadRISCVCPUID(1);
1062 Result = Builder.CreateAnd(
1063 Result, Builder.CreateICmpEQ(ArchID, Builder.getInt64(Model.MArchID)));
1064
1065 // Compare mimpid.
1066 Value *ImpID = loadRISCVCPUID(2);
1067 Result = Builder.CreateAnd(
1068 Result, Builder.CreateICmpEQ(ImpID, Builder.getInt64(Model.MImpID)));
1069
1070 return Result;
1071}
1072
1074 const CallExpr *E,
1076
1077 if (BuiltinID == Builtin::BI__builtin_cpu_supports)
1078 return EmitRISCVCpuSupports(E);
1079 if (BuiltinID == Builtin::BI__builtin_cpu_init)
1080 return EmitRISCVCpuInit();
1081 if (BuiltinID == Builtin::BI__builtin_cpu_is)
1082 return EmitRISCVCpuIs(E);
1083
1085 llvm::Type *ResultType = ConvertType(E->getType());
1086
1087 // Find out if any arguments are required to be integer constant expressions.
1088 unsigned ICEArguments = 0;
1090 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
1092 // Vector intrinsics don't have a type string.
1093 assert(BuiltinID >= clang::RISCV::FirstRVVBuiltin &&
1094 BuiltinID <= clang::RISCV::LastRVVBuiltin);
1095 ICEArguments = 0;
1096 if (BuiltinID == RISCVVector::BI__builtin_rvv_vget_v ||
1097 BuiltinID == RISCVVector::BI__builtin_rvv_vset_v)
1098 ICEArguments = 1 << 1;
1099 } else {
1100 assert(Error == ASTContext::GE_None && "Unexpected error");
1101 }
1102
1103 if (BuiltinID == RISCV::BI__builtin_riscv_ntl_load)
1104 ICEArguments |= (1 << 1);
1105 if (BuiltinID == RISCV::BI__builtin_riscv_ntl_store)
1106 ICEArguments |= (1 << 2);
1107
1108 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
1109 // Handle aggregate argument, namely RVV tuple types in segment load/store
1112 llvm::Value *AggValue = Builder.CreateLoad(L.getAddress());
1113 Ops.push_back(AggValue);
1114 continue;
1115 }
1116 Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
1117 }
1118
1119 Intrinsic::ID ID = Intrinsic::not_intrinsic;
1120 int PolicyAttrs = 0;
1121 bool IsMasked = false;
1122 // This is used by segment load/store to determine it's llvm type.
1123 unsigned SegInstSEW = 8;
1124
1125 // Required for overloaded intrinsics.
1127 switch (BuiltinID) {
1128 default: llvm_unreachable("unexpected builtin ID");
1129 case RISCV::BI__builtin_riscv_orc_b_32:
1130 case RISCV::BI__builtin_riscv_orc_b_64:
1131 case RISCV::BI__builtin_riscv_clmul_32:
1132 case RISCV::BI__builtin_riscv_clmul_64:
1133 case RISCV::BI__builtin_riscv_clmulh_32:
1134 case RISCV::BI__builtin_riscv_clmulh_64:
1135 case RISCV::BI__builtin_riscv_clmulr_32:
1136 case RISCV::BI__builtin_riscv_clmulr_64:
1137 case RISCV::BI__builtin_riscv_xperm4_32:
1138 case RISCV::BI__builtin_riscv_xperm4_64:
1139 case RISCV::BI__builtin_riscv_xperm8_32:
1140 case RISCV::BI__builtin_riscv_xperm8_64:
1141 case RISCV::BI__builtin_riscv_brev8_32:
1142 case RISCV::BI__builtin_riscv_brev8_64:
1143 case RISCV::BI__builtin_riscv_zip_32:
1144 case RISCV::BI__builtin_riscv_unzip_32: {
1145 switch (BuiltinID) {
1146 default: llvm_unreachable("unexpected builtin ID");
1147 // Zbb
1148 case RISCV::BI__builtin_riscv_orc_b_32:
1149 case RISCV::BI__builtin_riscv_orc_b_64:
1150 ID = Intrinsic::riscv_orc_b;
1151 break;
1152
1153 // Zbc
1154 case RISCV::BI__builtin_riscv_clmul_32:
1155 case RISCV::BI__builtin_riscv_clmul_64:
1156 ID = Intrinsic::riscv_clmul;
1157 break;
1158 case RISCV::BI__builtin_riscv_clmulh_32:
1159 case RISCV::BI__builtin_riscv_clmulh_64:
1160 ID = Intrinsic::riscv_clmulh;
1161 break;
1162 case RISCV::BI__builtin_riscv_clmulr_32:
1163 case RISCV::BI__builtin_riscv_clmulr_64:
1164 ID = Intrinsic::riscv_clmulr;
1165 break;
1166
1167 // Zbkx
1168 case RISCV::BI__builtin_riscv_xperm8_32:
1169 case RISCV::BI__builtin_riscv_xperm8_64:
1170 ID = Intrinsic::riscv_xperm8;
1171 break;
1172 case RISCV::BI__builtin_riscv_xperm4_32:
1173 case RISCV::BI__builtin_riscv_xperm4_64:
1174 ID = Intrinsic::riscv_xperm4;
1175 break;
1176
1177 // Zbkb
1178 case RISCV::BI__builtin_riscv_brev8_32:
1179 case RISCV::BI__builtin_riscv_brev8_64:
1180 ID = Intrinsic::riscv_brev8;
1181 break;
1182 case RISCV::BI__builtin_riscv_zip_32:
1183 ID = Intrinsic::riscv_zip;
1184 break;
1185 case RISCV::BI__builtin_riscv_unzip_32:
1186 ID = Intrinsic::riscv_unzip;
1187 break;
1188 }
1189
1190 IntrinsicTypes = {ResultType};
1191 break;
1192 }
1193
1194 // Zk builtins
1195
1196 // Zknh
1197 case RISCV::BI__builtin_riscv_sha256sig0:
1198 ID = Intrinsic::riscv_sha256sig0;
1199 break;
1200 case RISCV::BI__builtin_riscv_sha256sig1:
1201 ID = Intrinsic::riscv_sha256sig1;
1202 break;
1203 case RISCV::BI__builtin_riscv_sha256sum0:
1204 ID = Intrinsic::riscv_sha256sum0;
1205 break;
1206 case RISCV::BI__builtin_riscv_sha256sum1:
1207 ID = Intrinsic::riscv_sha256sum1;
1208 break;
1209
1210 // Zksed
1211 case RISCV::BI__builtin_riscv_sm4ks:
1212 ID = Intrinsic::riscv_sm4ks;
1213 break;
1214 case RISCV::BI__builtin_riscv_sm4ed:
1215 ID = Intrinsic::riscv_sm4ed;
1216 break;
1217
1218 // Zksh
1219 case RISCV::BI__builtin_riscv_sm3p0:
1220 ID = Intrinsic::riscv_sm3p0;
1221 break;
1222 case RISCV::BI__builtin_riscv_sm3p1:
1223 ID = Intrinsic::riscv_sm3p1;
1224 break;
1225
1226 case RISCV::BI__builtin_riscv_clz_32:
1227 case RISCV::BI__builtin_riscv_clz_64: {
1228 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
1229 Value *Result = Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
1230 if (Result->getType() != ResultType)
1231 Result =
1232 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
1233 return Result;
1234 }
1235 case RISCV::BI__builtin_riscv_ctz_32:
1236 case RISCV::BI__builtin_riscv_ctz_64: {
1237 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
1238 Value *Result = Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
1239 if (Result->getType() != ResultType)
1240 Result =
1241 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
1242 return Result;
1243 }
1244
1245 // Zihintntl
1246 case RISCV::BI__builtin_riscv_ntl_load: {
1247 llvm::Type *ResTy = ConvertType(E->getType());
1248 unsigned DomainVal = 5; // Default __RISCV_NTLH_ALL
1249 if (Ops.size() == 2)
1250 DomainVal = cast<ConstantInt>(Ops[1])->getZExtValue();
1251
1252 llvm::MDNode *RISCVDomainNode = llvm::MDNode::get(
1254 llvm::ConstantAsMetadata::get(Builder.getInt32(DomainVal)));
1255 llvm::MDNode *NontemporalNode = llvm::MDNode::get(
1256 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1257
1258 int Width;
1259 if(ResTy->isScalableTy()) {
1260 const ScalableVectorType *SVTy = cast<ScalableVectorType>(ResTy);
1261 llvm::Type *ScalarTy = ResTy->getScalarType();
1262 Width = ScalarTy->getPrimitiveSizeInBits() *
1263 SVTy->getElementCount().getKnownMinValue();
1264 } else
1265 Width = ResTy->getPrimitiveSizeInBits();
1266 LoadInst *Load = Builder.CreateLoad(
1267 Address(Ops[0], ResTy, CharUnits::fromQuantity(Width / 8)));
1268
1269 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, NontemporalNode);
1270 Load->setMetadata(CGM.getModule().getMDKindID("riscv-nontemporal-domain"),
1271 RISCVDomainNode);
1272
1273 return Load;
1274 }
1275 case RISCV::BI__builtin_riscv_ntl_store: {
1276 unsigned DomainVal = 5; // Default __RISCV_NTLH_ALL
1277 if (Ops.size() == 3)
1278 DomainVal = cast<ConstantInt>(Ops[2])->getZExtValue();
1279
1280 llvm::MDNode *RISCVDomainNode = llvm::MDNode::get(
1282 llvm::ConstantAsMetadata::get(Builder.getInt32(DomainVal)));
1283 llvm::MDNode *NontemporalNode = llvm::MDNode::get(
1284 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1285
1286 StoreInst *Store = Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
1287 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, NontemporalNode);
1288 Store->setMetadata(CGM.getModule().getMDKindID("riscv-nontemporal-domain"),
1289 RISCVDomainNode);
1290
1291 return Store;
1292 }
1293 // Zihintpause
1294 case RISCV::BI__builtin_riscv_pause: {
1295 llvm::Function *Fn = CGM.getIntrinsic(llvm::Intrinsic::riscv_pause);
1296 return Builder.CreateCall(Fn, {});
1297 }
1298
1299 // XCValu
1300 case RISCV::BI__builtin_riscv_cv_alu_addN:
1301 ID = Intrinsic::riscv_cv_alu_addN;
1302 break;
1303 case RISCV::BI__builtin_riscv_cv_alu_addRN:
1304 ID = Intrinsic::riscv_cv_alu_addRN;
1305 break;
1306 case RISCV::BI__builtin_riscv_cv_alu_adduN:
1307 ID = Intrinsic::riscv_cv_alu_adduN;
1308 break;
1309 case RISCV::BI__builtin_riscv_cv_alu_adduRN:
1310 ID = Intrinsic::riscv_cv_alu_adduRN;
1311 break;
1312 case RISCV::BI__builtin_riscv_cv_alu_clip:
1313 ID = Intrinsic::riscv_cv_alu_clip;
1314 break;
1315 case RISCV::BI__builtin_riscv_cv_alu_clipu:
1316 ID = Intrinsic::riscv_cv_alu_clipu;
1317 break;
1318 case RISCV::BI__builtin_riscv_cv_alu_extbs:
1319 return Builder.CreateSExt(Builder.CreateTrunc(Ops[0], Int8Ty), Int32Ty,
1320 "extbs");
1321 case RISCV::BI__builtin_riscv_cv_alu_extbz:
1322 return Builder.CreateZExt(Builder.CreateTrunc(Ops[0], Int8Ty), Int32Ty,
1323 "extbz");
1324 case RISCV::BI__builtin_riscv_cv_alu_exths:
1325 return Builder.CreateSExt(Builder.CreateTrunc(Ops[0], Int16Ty), Int32Ty,
1326 "exths");
1327 case RISCV::BI__builtin_riscv_cv_alu_exthz:
1328 return Builder.CreateZExt(Builder.CreateTrunc(Ops[0], Int16Ty), Int32Ty,
1329 "exthz");
1330 case RISCV::BI__builtin_riscv_cv_alu_sle:
1331 return Builder.CreateZExt(Builder.CreateICmpSLE(Ops[0], Ops[1]), Int32Ty,
1332 "sle");
1333 case RISCV::BI__builtin_riscv_cv_alu_sleu:
1334 return Builder.CreateZExt(Builder.CreateICmpULE(Ops[0], Ops[1]), Int32Ty,
1335 "sleu");
1336 case RISCV::BI__builtin_riscv_cv_alu_subN:
1337 ID = Intrinsic::riscv_cv_alu_subN;
1338 break;
1339 case RISCV::BI__builtin_riscv_cv_alu_subRN:
1340 ID = Intrinsic::riscv_cv_alu_subRN;
1341 break;
1342 case RISCV::BI__builtin_riscv_cv_alu_subuN:
1343 ID = Intrinsic::riscv_cv_alu_subuN;
1344 break;
1345 case RISCV::BI__builtin_riscv_cv_alu_subuRN:
1346 ID = Intrinsic::riscv_cv_alu_subuRN;
1347 break;
1348
1349 // XAndesBFHCvt
1350 case RISCV::BI__builtin_riscv_nds_fcvt_s_bf16:
1351 return Builder.CreateFPExt(Ops[0], FloatTy);
1352 case RISCV::BI__builtin_riscv_nds_fcvt_bf16_s:
1353 return Builder.CreateFPTrunc(Ops[0], BFloatTy);
1354
1355 // Vector builtins are handled from here.
1356#include "clang/Basic/riscv_vector_builtin_cg.inc"
1357
1358 // SiFive Vector builtins are handled from here.
1359#include "clang/Basic/riscv_sifive_vector_builtin_cg.inc"
1360
1361 // Andes Vector builtins are handled from here.
1362#include "clang/Basic/riscv_andes_vector_builtin_cg.inc"
1363 }
1364
1365 assert(ID != Intrinsic::not_intrinsic);
1366
1367 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
1368 return Builder.CreateCall(F, Ops, "");
1369}
#define V(N, I)
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVGetBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:887
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVStridedSegLoadTupleBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:432
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVFMABuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:688
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVPseudoVWCVTBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:220
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVPseudoVNCVTBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:250
static constexpr unsigned RVV_VMA
Definition RISCV.cpp:26
static Value * loadRISCVFeatureBits(unsigned Index, CGBuilderTy &Builder, CodeGenModule &CGM)
Definition RISCV.cpp:980
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVFloatingPointBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:550
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVVlenbBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:282
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVFloatingConvBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:781
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVStridedSegStoreTupleBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:463
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVIndexedSegLoadTupleBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:621
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVReinterpretBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:848
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVVSEMaskBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:310
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVAveragingBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:491
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVWideningFMABuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:717
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVUnitStridedSegLoadFFTupleBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:392
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVUnitStridedSegStoreTupleBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:365
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVVLEFFBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:32
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVIndexedStoreBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:92
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVPseudoMaskBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:179
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVFloatingReductionBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:815
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVIndexedSegStoreTupleBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:656
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVPseudoUnaryBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:119
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVFloatingUnaryBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:745
static constexpr unsigned RVV_VTA
Definition RISCV.cpp:25
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVWideningFloatingPointBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:585
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVPseudoVFUnaryBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:193
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVVSSEBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:69
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVVsetvliBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:299
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVSetBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:912
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVNarrowingClipBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:520
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVPseudoVNotBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:149
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVCreateBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:937
static LLVM_ATTRIBUTE_NOINLINE Value * emitRVVUnitStridedSegLoadTupleBuiltin(CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl< Value * > &Ops, int PolicyAttrs, bool IsMasked, unsigned SegInstSEW)
Definition RISCV.cpp:334
TokenType getType() const
Returns the token's type, e.g.
Enumerates target-specific builtins in their own namespaces within namespace clang.
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
@ GE_None
No error.
@ GE_Missing_type
Missing a type.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2879
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3083
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3070
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
Definition AMDGPU.cpp:258
llvm::Value * EmitRISCVCpuSupports(const CallExpr *E)
Definition RISCV.cpp:970
llvm::Value * EmitRISCVCpuInit()
Definition RISCV.cpp:960
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Definition RISCV.cpp:1073
llvm::Value * EmitRISCVCpuIs(const CallExpr *E)
Definition RISCV.cpp:1032
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
static bool hasAggregateEvaluationKind(QualType T)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
llvm::LLVMContext & getLLVMContext()
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Constant * CreateRuntimeVariable(llvm::Type *Ty, StringRef Name)
Create a new runtime global variable with the specified type and name.
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
LValue - This represents an lvalue references.
Definition CGValue.h:182
Address getAddress() const
Definition CGValue.h:361
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
This represents one expression.
Definition Expr.h:112
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition Expr.cpp:3078
QualType getType() const
Definition Expr.h:144
QualType getType() const
Definition Value.cpp:237
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Result
The result type of a method or function.
Definition TypeBase.h:905
U cast(CodeGen::Address addr)
Definition Address.h:327
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64