1 //===--- SPIRVUtils.h ---- SPIR-V Utility Functions -------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains miscellaneous utility functions.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #ifndef LLVM_LIB_TARGET_SPIRV_SPIRVUTILS_H
14 #define LLVM_LIB_TARGET_SPIRV_SPIRVUTILS_H
15
16 #include "MCTargetDesc/SPIRVBaseInfo.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/CodeGen/MachineBasicBlock.h"
19 #include "llvm/IR/Dominators.h"
20 #include "llvm/IR/GlobalVariable.h"
21 #include "llvm/IR/IRBuilder.h"
22 #include "llvm/IR/TypedPointerType.h"
23 #include <queue>
24 #include <string>
25 #include <unordered_map>
26 #include <unordered_set>
27
28 namespace llvm {
29 class MCInst;
30 class MachineFunction;
31 class MachineInstr;
32 class MachineInstrBuilder;
33 class MachineIRBuilder;
34 class MachineRegisterInfo;
35 class Register;
36 class StringRef;
37 class SPIRVInstrInfo;
38 class SPIRVSubtarget;
39 class SPIRVGlobalRegistry;
40
41 // This class implements a partial ordering visitor, which visits a cyclic graph
42 // in natural topological-like ordering. Topological ordering is not defined for
43 // directed graphs with cycles, so this assumes cycles are a single node, and
44 // ignores back-edges. The cycle is visited from the entry in the same
45 // topological-like ordering.
46 //
47 // Note: this visitor REQUIRES a reducible graph.
48 //
49 // This means once we visit a node, we know all the possible ancestors have been
50 // visited.
51 //
52 // clang-format off
53 //
54 // Given this graph:
55 //
56 // ,-> B -\
57 // A -+ +---> D ----> E -> F -> G -> H
58 // `-> C -/ ^ |
59 // +-----------------+
60 //
61 // Visit order is:
62 // A, [B, C in any order], D, E, F, G, H
63 //
64 // clang-format on
65 //
66 // Changing the function CFG between the construction of the visitor and
67 // visiting is undefined. The visitor can be reused, but if the CFG is updated,
68 // the visitor must be rebuilt.
69 class PartialOrderingVisitor {
70 DomTreeBuilder::BBDomTree DT;
71 LoopInfo LI;
72
73 std::unordered_set<BasicBlock *> Queued = {};
74 std::queue<BasicBlock *> ToVisit = {};
75
76 struct OrderInfo {
77 size_t Rank;
78 size_t TraversalIndex;
79 };
80
81 using BlockToOrderInfoMap = std::unordered_map<BasicBlock *, OrderInfo>;
82 BlockToOrderInfoMap BlockToOrder;
83 std::vector<BasicBlock *> Order = {};
84
85 // Get all basic-blocks reachable from Start.
86 std::unordered_set<BasicBlock *> getReachableFrom(BasicBlock *Start);
87
88 // Internal function used to determine the partial ordering.
89 // Visits |BB| with the current rank being |Rank|.
90 size_t visit(BasicBlock *BB, size_t Rank);
91
92 bool CanBeVisited(BasicBlock *BB) const;
93
94 public:
95 size_t GetNodeRank(BasicBlock *BB) const;
96
97 // Build the visitor to operate on the function F.
98 PartialOrderingVisitor(Function &F);
99
100 // Returns true is |LHS| comes before |RHS| in the partial ordering.
101 // If |LHS| and |RHS| have the same rank, the traversal order determines the
102 // order (order is stable).
103 bool compare(const BasicBlock *LHS, const BasicBlock *RHS) const;
104
105 // Visit the function starting from the basic block |Start|, and calling |Op|
106 // on each visited BB. This traversal ignores back-edges, meaning this won't
107 // visit a node to which |Start| is not an ancestor.
108 // If Op returns |true|, the visitor continues. If |Op| returns false, the
109 // visitor will stop at that rank. This means if 2 nodes share the same rank,
110 // and Op returns false when visiting the first, the second will be visited
111 // afterwards. But none of their successors will.
112 void partialOrderVisit(BasicBlock &Start,
113 std::function<bool(BasicBlock *)> Op);
114 };
115
116 // Add the given string as a series of integer operand, inserting null
117 // terminators and padding to make sure the operands all have 32-bit
118 // little-endian words.
119 void addStringImm(const StringRef &Str, MCInst &Inst);
120 void addStringImm(const StringRef &Str, MachineInstrBuilder &MIB);
121 void addStringImm(const StringRef &Str, IRBuilder<> &B,
122 std::vector<Value *> &Args);
123
124 // Read the series of integer operands back as a null-terminated string using
125 // the reverse of the logic in addStringImm.
126 std::string getStringImm(const MachineInstr &MI, unsigned StartIndex);
127
128 // Returns the string constant that the register refers to. It is assumed that
129 // Reg is a global value that contains a string.
130 std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI);
131
132 // Add the given numerical immediate to MIB.
133 void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB);
134
135 // Add an OpName instruction for the given target register.
136 void buildOpName(Register Target, const StringRef &Name,
137 MachineIRBuilder &MIRBuilder);
138 void buildOpName(Register Target, const StringRef &Name, MachineInstr &I,
139 const SPIRVInstrInfo &TII);
140
141 // Add an OpDecorate instruction for the given Reg.
142 void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder,
143 SPIRV::Decoration::Decoration Dec,
144 const std::vector<uint32_t> &DecArgs,
145 StringRef StrImm = "");
146 void buildOpDecorate(Register Reg, MachineInstr &I, const SPIRVInstrInfo &TII,
147 SPIRV::Decoration::Decoration Dec,
148 const std::vector<uint32_t> &DecArgs,
149 StringRef StrImm = "");
150
151 // Add an OpDecorate instruction for the given Reg.
152 void buildOpMemberDecorate(Register Reg, MachineIRBuilder &MIRBuilder,
153 SPIRV::Decoration::Decoration Dec, uint32_t Member,
154 const std::vector<uint32_t> &DecArgs,
155 StringRef StrImm = "");
156 void buildOpMemberDecorate(Register Reg, MachineInstr &I,
157 const SPIRVInstrInfo &TII,
158 SPIRV::Decoration::Decoration Dec, uint32_t Member,
159 const std::vector<uint32_t> &DecArgs,
160 StringRef StrImm = "");
161
162 // Add an OpDecorate instruction by "spirv.Decorations" metadata node.
163 void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder,
164 const MDNode *GVarMD);
165
166 // Return a valid position for the OpVariable instruction inside a function,
167 // i.e., at the beginning of the first block of the function.
168 MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I);
169
170 // Return a valid position for the instruction at the end of the block before
171 // terminators and debug instructions.
172 MachineBasicBlock::iterator getInsertPtValidEnd(MachineBasicBlock *MBB);
173
174 // Returns true if a pointer to the storage class can be casted to/from a
175 // pointer to the Generic storage class.
isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)176 constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
177 switch (SC) {
178 case SPIRV::StorageClass::Workgroup:
179 case SPIRV::StorageClass::CrossWorkgroup:
180 case SPIRV::StorageClass::Function:
181 return true;
182 default:
183 return false;
184 }
185 }
186
187 // Convert a SPIR-V storage class to the corresponding LLVM IR address space.
188 // TODO: maybe the following two functions should be handled in the subtarget
189 // to allow for different OpenCL vs Vulkan handling.
190 constexpr unsigned
storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)191 storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC) {
192 switch (SC) {
193 case SPIRV::StorageClass::Function:
194 return 0;
195 case SPIRV::StorageClass::CrossWorkgroup:
196 return 1;
197 case SPIRV::StorageClass::UniformConstant:
198 return 2;
199 case SPIRV::StorageClass::Workgroup:
200 return 3;
201 case SPIRV::StorageClass::Generic:
202 return 4;
203 case SPIRV::StorageClass::DeviceOnlyINTEL:
204 return 5;
205 case SPIRV::StorageClass::HostOnlyINTEL:
206 return 6;
207 case SPIRV::StorageClass::Input:
208 return 7;
209 case SPIRV::StorageClass::Output:
210 return 8;
211 case SPIRV::StorageClass::CodeSectionINTEL:
212 return 9;
213 case SPIRV::StorageClass::Private:
214 return 10;
215 case SPIRV::StorageClass::StorageBuffer:
216 return 11;
217 case SPIRV::StorageClass::Uniform:
218 return 12;
219 default:
220 report_fatal_error("Unable to get address space id");
221 }
222 }
223
224 // Convert an LLVM IR address space to a SPIR-V storage class.
225 SPIRV::StorageClass::StorageClass
226 addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI);
227
228 SPIRV::MemorySemantics::MemorySemantics
229 getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC);
230
231 SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord);
232
233 SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id);
234
235 // Find def instruction for the given ConstReg, walking through
236 // spv_track_constant and ASSIGN_TYPE instructions. Updates ConstReg by def
237 // of OpConstant instruction.
238 MachineInstr *getDefInstrMaybeConstant(Register &ConstReg,
239 const MachineRegisterInfo *MRI);
240
241 // Get constant integer value of the given ConstReg.
242 uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI);
243
244 // Check if MI is a SPIR-V specific intrinsic call.
245 bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID);
246 // Check if it's a SPIR-V specific intrinsic call.
247 bool isSpvIntrinsic(const Value *Arg);
248
249 // Get type of i-th operand of the metadata node.
250 Type *getMDOperandAsType(const MDNode *N, unsigned I);
251
252 // If OpenCL or SPIR-V builtin function name is recognized, return a demangled
253 // name, otherwise return an empty string.
254 std::string getOclOrSpirvBuiltinDemangledName(StringRef Name);
255
256 // Check if a string contains a builtin prefix.
257 bool hasBuiltinTypePrefix(StringRef Name);
258
259 // Check if given LLVM type is a special opaque builtin type.
260 bool isSpecialOpaqueType(const Type *Ty);
261
262 // Check if the function is an SPIR-V entry point
263 bool isEntryPoint(const Function &F);
264
265 // Parse basic scalar type name, substring TypeName, and return LLVM type.
266 Type *parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx);
267
268 // Sort blocks in a partial ordering, so each block is after all its
269 // dominators. This should match both the SPIR-V and the MIR requirements.
270 // Returns true if the function was changed.
271 bool sortBlocks(Function &F);
272
hasInitializer(const GlobalVariable * GV)273 inline bool hasInitializer(const GlobalVariable *GV) {
274 return GV->hasInitializer() && !isa<UndefValue>(GV->getInitializer());
275 }
276
277 // True if this is an instance of TypedPointerType.
isTypedPointerTy(const Type * T)278 inline bool isTypedPointerTy(const Type *T) {
279 return T && T->getTypeID() == Type::TypedPointerTyID;
280 }
281
282 // True if this is an instance of PointerType.
isUntypedPointerTy(const Type * T)283 inline bool isUntypedPointerTy(const Type *T) {
284 return T && T->getTypeID() == Type::PointerTyID;
285 }
286
287 // True if this is an instance of PointerType or TypedPointerType.
isPointerTy(const Type * T)288 inline bool isPointerTy(const Type *T) {
289 return isUntypedPointerTy(T) || isTypedPointerTy(T);
290 }
291
292 // Get the address space of this pointer or pointer vector type for instances of
293 // PointerType or TypedPointerType.
getPointerAddressSpace(const Type * T)294 inline unsigned getPointerAddressSpace(const Type *T) {
295 Type *SubT = T->getScalarType();
296 return SubT->getTypeID() == Type::PointerTyID
297 ? cast<PointerType>(SubT)->getAddressSpace()
298 : cast<TypedPointerType>(SubT)->getAddressSpace();
299 }
300
301 // Return true if the Argument is decorated with a pointee type
hasPointeeTypeAttr(Argument * Arg)302 inline bool hasPointeeTypeAttr(Argument *Arg) {
303 return Arg->hasByValAttr() || Arg->hasByRefAttr() || Arg->hasStructRetAttr();
304 }
305
306 // Return the pointee type of the argument or nullptr otherwise
getPointeeTypeByAttr(Argument * Arg)307 inline Type *getPointeeTypeByAttr(Argument *Arg) {
308 if (Arg->hasByValAttr())
309 return Arg->getParamByValType();
310 if (Arg->hasStructRetAttr())
311 return Arg->getParamStructRetType();
312 if (Arg->hasByRefAttr())
313 return Arg->getParamByRefType();
314 return nullptr;
315 }
316
reconstructFunctionType(Function * F)317 inline Type *reconstructFunctionType(Function *F) {
318 SmallVector<Type *> ArgTys;
319 for (unsigned i = 0; i < F->arg_size(); ++i)
320 ArgTys.push_back(F->getArg(i)->getType());
321 return FunctionType::get(F->getReturnType(), ArgTys, F->isVarArg());
322 }
323
324 #define TYPED_PTR_TARGET_EXT_NAME "spirv.$TypedPointerType"
getTypedPointerWrapper(Type * ElemTy,unsigned AS)325 inline Type *getTypedPointerWrapper(Type *ElemTy, unsigned AS) {
326 return TargetExtType::get(ElemTy->getContext(), TYPED_PTR_TARGET_EXT_NAME,
327 {ElemTy}, {AS});
328 }
329
isTypedPointerWrapper(const TargetExtType * ExtTy)330 inline bool isTypedPointerWrapper(const TargetExtType *ExtTy) {
331 return ExtTy->getName() == TYPED_PTR_TARGET_EXT_NAME &&
332 ExtTy->getNumIntParameters() == 1 &&
333 ExtTy->getNumTypeParameters() == 1;
334 }
335
336 // True if this is an instance of PointerType or TypedPointerType.
isPointerTyOrWrapper(const Type * Ty)337 inline bool isPointerTyOrWrapper(const Type *Ty) {
338 if (auto *ExtTy = dyn_cast<TargetExtType>(Ty))
339 return isTypedPointerWrapper(ExtTy);
340 return isPointerTy(Ty);
341 }
342
applyWrappers(Type * Ty)343 inline Type *applyWrappers(Type *Ty) {
344 if (auto *ExtTy = dyn_cast<TargetExtType>(Ty)) {
345 if (isTypedPointerWrapper(ExtTy))
346 return TypedPointerType::get(applyWrappers(ExtTy->getTypeParameter(0)),
347 ExtTy->getIntParameter(0));
348 } else if (auto *VecTy = dyn_cast<VectorType>(Ty)) {
349 Type *ElemTy = VecTy->getElementType();
350 Type *NewElemTy = ElemTy->isTargetExtTy() ? applyWrappers(ElemTy) : ElemTy;
351 if (NewElemTy != ElemTy)
352 return VectorType::get(NewElemTy, VecTy->getElementCount());
353 }
354 return Ty;
355 }
356
getPointeeType(const Type * Ty)357 inline Type *getPointeeType(const Type *Ty) {
358 if (Ty) {
359 if (auto PType = dyn_cast<TypedPointerType>(Ty))
360 return PType->getElementType();
361 else if (auto *ExtTy = dyn_cast<TargetExtType>(Ty))
362 if (isTypedPointerWrapper(ExtTy))
363 return ExtTy->getTypeParameter(0);
364 }
365 return nullptr;
366 }
367
isUntypedEquivalentToTyExt(Type * Ty1,Type * Ty2)368 inline bool isUntypedEquivalentToTyExt(Type *Ty1, Type *Ty2) {
369 if (!isUntypedPointerTy(Ty1) || !Ty2)
370 return false;
371 if (auto *ExtTy = dyn_cast<TargetExtType>(Ty2))
372 if (isTypedPointerWrapper(ExtTy) &&
373 ExtTy->getTypeParameter(0) ==
374 IntegerType::getInt8Ty(Ty1->getContext()) &&
375 ExtTy->getIntParameter(0) == cast<PointerType>(Ty1)->getAddressSpace())
376 return true;
377 return false;
378 }
379
isEquivalentTypes(Type * Ty1,Type * Ty2)380 inline bool isEquivalentTypes(Type *Ty1, Type *Ty2) {
381 return isUntypedEquivalentToTyExt(Ty1, Ty2) ||
382 isUntypedEquivalentToTyExt(Ty2, Ty1);
383 }
384
toTypedPointer(Type * Ty)385 inline Type *toTypedPointer(Type *Ty) {
386 if (Type *NewTy = applyWrappers(Ty); NewTy != Ty)
387 return NewTy;
388 return isUntypedPointerTy(Ty)
389 ? TypedPointerType::get(IntegerType::getInt8Ty(Ty->getContext()),
390 getPointerAddressSpace(Ty))
391 : Ty;
392 }
393
toTypedFunPointer(FunctionType * FTy)394 inline Type *toTypedFunPointer(FunctionType *FTy) {
395 Type *OrigRetTy = FTy->getReturnType();
396 Type *RetTy = toTypedPointer(OrigRetTy);
397 bool IsUntypedPtr = false;
398 for (Type *PTy : FTy->params()) {
399 if (isUntypedPointerTy(PTy)) {
400 IsUntypedPtr = true;
401 break;
402 }
403 }
404 if (!IsUntypedPtr && RetTy == OrigRetTy)
405 return FTy;
406 SmallVector<Type *> ParamTys;
407 for (Type *PTy : FTy->params())
408 ParamTys.push_back(toTypedPointer(PTy));
409 return FunctionType::get(RetTy, ParamTys, FTy->isVarArg());
410 }
411
unifyPtrType(const Type * Ty)412 inline const Type *unifyPtrType(const Type *Ty) {
413 if (auto FTy = dyn_cast<FunctionType>(Ty))
414 return toTypedFunPointer(const_cast<FunctionType *>(FTy));
415 return toTypedPointer(const_cast<Type *>(Ty));
416 }
417
isVector1(Type * Ty)418 inline bool isVector1(Type *Ty) {
419 auto *FVTy = dyn_cast<FixedVectorType>(Ty);
420 return FVTy && FVTy->getNumElements() == 1;
421 }
422
423 // Modify an LLVM type to conform with future transformations in IRTranslator.
424 // At the moment use cases comprise only a <1 x Type> vector. To extend when/if
425 // needed.
normalizeType(Type * Ty)426 inline Type *normalizeType(Type *Ty) {
427 auto *FVTy = dyn_cast<FixedVectorType>(Ty);
428 if (!FVTy || FVTy->getNumElements() != 1)
429 return Ty;
430 // If it's a <1 x Type> vector type, replace it by the element type, because
431 // it's not a legal vector type in LLT and IRTranslator will represent it as
432 // the scalar eventually.
433 return normalizeType(FVTy->getElementType());
434 }
435
getNormalizedPoisonValue(Type * Ty)436 inline PoisonValue *getNormalizedPoisonValue(Type *Ty) {
437 return PoisonValue::get(normalizeType(Ty));
438 }
439
buildMD(Value * Arg)440 inline MetadataAsValue *buildMD(Value *Arg) {
441 LLVMContext &Ctx = Arg->getContext();
442 return MetadataAsValue::get(
443 Ctx, MDNode::get(Ctx, ValueAsMetadata::getConstant(Arg)));
444 }
445
446 CallInst *buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef<Type *> Types,
447 Value *Arg, Value *Arg2, ArrayRef<Constant *> Imms,
448 IRBuilder<> &B);
449
450 MachineInstr *getVRegDef(MachineRegisterInfo &MRI, Register Reg);
451
452 #define SPIRV_BACKEND_SERVICE_FUN_NAME "__spirv_backend_service_fun"
453 bool getVacantFunctionName(Module &M, std::string &Name);
454
455 void setRegClassType(Register Reg, const Type *Ty, SPIRVGlobalRegistry *GR,
456 MachineIRBuilder &MIRBuilder,
457 SPIRV::AccessQualifier::AccessQualifier AccessQual,
458 bool EmitIR, bool Force = false);
459 void setRegClassType(Register Reg, const MachineInstr *SpvType,
460 SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI,
461 const MachineFunction &MF, bool Force = false);
462 Register createVirtualRegister(const MachineInstr *SpvType,
463 SPIRVGlobalRegistry *GR,
464 MachineRegisterInfo *MRI,
465 const MachineFunction &MF);
466 Register createVirtualRegister(const MachineInstr *SpvType,
467 SPIRVGlobalRegistry *GR,
468 MachineIRBuilder &MIRBuilder);
469 Register createVirtualRegister(
470 const Type *Ty, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIRBuilder,
471 SPIRV::AccessQualifier::AccessQualifier AccessQual, bool EmitIR);
472
473 // Return true if there is an opaque pointer type nested in the argument.
474 bool isNestedPointer(const Type *Ty);
475
476 enum FPDecorationId { NONE, RTE, RTZ, RTP, RTN, SAT };
477
demangledPostfixToDecorationId(const std::string & S)478 inline FPDecorationId demangledPostfixToDecorationId(const std::string &S) {
479 static std::unordered_map<std::string, FPDecorationId> Mapping = {
480 {"rte", FPDecorationId::RTE},
481 {"rtz", FPDecorationId::RTZ},
482 {"rtp", FPDecorationId::RTP},
483 {"rtn", FPDecorationId::RTN},
484 {"sat", FPDecorationId::SAT}};
485 auto It = Mapping.find(S);
486 return It == Mapping.end() ? FPDecorationId::NONE : It->second;
487 }
488
489 SmallVector<MachineInstr *, 4>
490 createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode,
491 unsigned MinWC, unsigned ContinuedOpcode,
492 ArrayRef<Register> Args, Register ReturnRegister,
493 Register TypeID);
494
495 // Instruction selection directed by type folding.
496 const std::set<unsigned> &getTypeFoldingSupportedOpcodes();
497 bool isTypeFoldingSupported(unsigned Opcode);
498
499 // Get loop controls from llvm.loop. metadata.
500 SmallVector<unsigned, 1> getSpirvLoopControlOperandsFromLoopMetadata(Loop *L);
501
502 // Traversing [g]MIR accounting for pseudo-instructions.
503 MachineInstr *passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI);
504 MachineInstr *getDef(const MachineOperand &MO, const MachineRegisterInfo *MRI);
505 MachineInstr *getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI);
506 int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI);
507 unsigned getArrayComponentCount(const MachineRegisterInfo *MRI,
508 const MachineInstr *ResType);
509
510 } // namespace llvm
511 #endif // LLVM_LIB_TARGET_SPIRV_SPIRVUTILS_H
512