xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp (revision 5f757f3ff9144b609b3c433dfd370cc6bdc191ad)
10b57cec5SDimitry Andric //===-- AMDGPULowerKernelArguments.cpp ------------------------------------------===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric /// \file This pass replaces accesses to kernel arguments with loads from
100b57cec5SDimitry Andric /// offsets from the kernarg base pointer.
110b57cec5SDimitry Andric //
120b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
130b57cec5SDimitry Andric 
140b57cec5SDimitry Andric #include "AMDGPU.h"
15e8d8bef9SDimitry Andric #include "GCNSubtarget.h"
160b57cec5SDimitry Andric #include "llvm/CodeGen/TargetPassConfig.h"
17fe6060f1SDimitry Andric #include "llvm/IR/IRBuilder.h"
18*5f757f3fSDimitry Andric #include "llvm/IR/IntrinsicsAMDGPU.h"
190b57cec5SDimitry Andric #include "llvm/IR/MDBuilder.h"
20e8d8bef9SDimitry Andric #include "llvm/Target/TargetMachine.h"
21*5f757f3fSDimitry Andric 
220b57cec5SDimitry Andric #define DEBUG_TYPE "amdgpu-lower-kernel-arguments"
230b57cec5SDimitry Andric 
240b57cec5SDimitry Andric using namespace llvm;
250b57cec5SDimitry Andric 
260b57cec5SDimitry Andric namespace {
270b57cec5SDimitry Andric 
28*5f757f3fSDimitry Andric class PreloadKernelArgInfo {
29*5f757f3fSDimitry Andric private:
30*5f757f3fSDimitry Andric   Function &F;
31*5f757f3fSDimitry Andric   const GCNSubtarget &ST;
32*5f757f3fSDimitry Andric   unsigned NumFreeUserSGPRs;
33*5f757f3fSDimitry Andric 
34*5f757f3fSDimitry Andric public:
35*5f757f3fSDimitry Andric   SmallVector<llvm::Metadata *, 8> KernelArgMetadata;
36*5f757f3fSDimitry Andric 
37*5f757f3fSDimitry Andric   PreloadKernelArgInfo(Function &F, const GCNSubtarget &ST) : F(F), ST(ST) {
38*5f757f3fSDimitry Andric     setInitialFreeUserSGPRsCount();
39*5f757f3fSDimitry Andric   }
40*5f757f3fSDimitry Andric 
41*5f757f3fSDimitry Andric   // Returns the maximum number of user SGPRs that we have available to preload
42*5f757f3fSDimitry Andric   // arguments.
43*5f757f3fSDimitry Andric   void setInitialFreeUserSGPRsCount() {
44*5f757f3fSDimitry Andric     const unsigned MaxUserSGPRs = ST.getMaxNumUserSGPRs();
45*5f757f3fSDimitry Andric     GCNUserSGPRUsageInfo UserSGPRInfo(F, ST);
46*5f757f3fSDimitry Andric 
47*5f757f3fSDimitry Andric     NumFreeUserSGPRs = MaxUserSGPRs - UserSGPRInfo.getNumUsedUserSGPRs();
48*5f757f3fSDimitry Andric   }
49*5f757f3fSDimitry Andric 
50*5f757f3fSDimitry Andric   bool tryAllocPreloadSGPRs(unsigned AllocSize, uint64_t ArgOffset,
51*5f757f3fSDimitry Andric                             uint64_t LastExplicitArgOffset) {
52*5f757f3fSDimitry Andric     //  Check if this argument may be loaded into the same register as the
53*5f757f3fSDimitry Andric     //  previous argument.
54*5f757f3fSDimitry Andric     if (!isAligned(Align(4), ArgOffset) && AllocSize < 4)
55*5f757f3fSDimitry Andric       return true;
56*5f757f3fSDimitry Andric 
57*5f757f3fSDimitry Andric     // Pad SGPRs for kernarg alignment.
58*5f757f3fSDimitry Andric     unsigned Padding = ArgOffset - LastExplicitArgOffset;
59*5f757f3fSDimitry Andric     unsigned PaddingSGPRs = alignTo(Padding, 4) / 4;
60*5f757f3fSDimitry Andric     unsigned NumPreloadSGPRs = alignTo(AllocSize, 4) / 4;
61*5f757f3fSDimitry Andric     if (NumPreloadSGPRs + PaddingSGPRs > NumFreeUserSGPRs)
62*5f757f3fSDimitry Andric       return false;
63*5f757f3fSDimitry Andric 
64*5f757f3fSDimitry Andric     NumFreeUserSGPRs -= (NumPreloadSGPRs + PaddingSGPRs);
65*5f757f3fSDimitry Andric     return true;
66*5f757f3fSDimitry Andric   }
67*5f757f3fSDimitry Andric };
68*5f757f3fSDimitry Andric 
690b57cec5SDimitry Andric class AMDGPULowerKernelArguments : public FunctionPass {
700b57cec5SDimitry Andric public:
710b57cec5SDimitry Andric   static char ID;
720b57cec5SDimitry Andric 
730b57cec5SDimitry Andric   AMDGPULowerKernelArguments() : FunctionPass(ID) {}
740b57cec5SDimitry Andric 
750b57cec5SDimitry Andric   bool runOnFunction(Function &F) override;
760b57cec5SDimitry Andric 
770b57cec5SDimitry Andric   void getAnalysisUsage(AnalysisUsage &AU) const override {
780b57cec5SDimitry Andric     AU.addRequired<TargetPassConfig>();
790b57cec5SDimitry Andric     AU.setPreservesAll();
800b57cec5SDimitry Andric  }
810b57cec5SDimitry Andric };
820b57cec5SDimitry Andric 
830b57cec5SDimitry Andric } // end anonymous namespace
840b57cec5SDimitry Andric 
855ffd83dbSDimitry Andric // skip allocas
865ffd83dbSDimitry Andric static BasicBlock::iterator getInsertPt(BasicBlock &BB) {
875ffd83dbSDimitry Andric   BasicBlock::iterator InsPt = BB.getFirstInsertionPt();
885ffd83dbSDimitry Andric   for (BasicBlock::iterator E = BB.end(); InsPt != E; ++InsPt) {
895ffd83dbSDimitry Andric     AllocaInst *AI = dyn_cast<AllocaInst>(&*InsPt);
905ffd83dbSDimitry Andric 
915ffd83dbSDimitry Andric     // If this is a dynamic alloca, the value may depend on the loaded kernargs,
925ffd83dbSDimitry Andric     // so loads will need to be inserted before it.
935ffd83dbSDimitry Andric     if (!AI || !AI->isStaticAlloca())
945ffd83dbSDimitry Andric       break;
955ffd83dbSDimitry Andric   }
965ffd83dbSDimitry Andric 
975ffd83dbSDimitry Andric   return InsPt;
985ffd83dbSDimitry Andric }
995ffd83dbSDimitry Andric 
100*5f757f3fSDimitry Andric static bool lowerKernelArguments(Function &F, const TargetMachine &TM) {
1010b57cec5SDimitry Andric   CallingConv::ID CC = F.getCallingConv();
1020b57cec5SDimitry Andric   if (CC != CallingConv::AMDGPU_KERNEL || F.arg_empty())
1030b57cec5SDimitry Andric     return false;
1040b57cec5SDimitry Andric 
1050b57cec5SDimitry Andric   const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
1060b57cec5SDimitry Andric   LLVMContext &Ctx = F.getParent()->getContext();
1070b57cec5SDimitry Andric   const DataLayout &DL = F.getParent()->getDataLayout();
1080b57cec5SDimitry Andric   BasicBlock &EntryBlock = *F.begin();
1095ffd83dbSDimitry Andric   IRBuilder<> Builder(&*getInsertPt(EntryBlock));
1100b57cec5SDimitry Andric 
1118bcb0991SDimitry Andric   const Align KernArgBaseAlign(16); // FIXME: Increase if necessary
11206c3fb27SDimitry Andric   const uint64_t BaseOffset = ST.getExplicitKernelArgOffset();
1130b57cec5SDimitry Andric 
1148bcb0991SDimitry Andric   Align MaxAlign;
11581ad6265SDimitry Andric   // FIXME: Alignment is broken with explicit arg offset.;
1160b57cec5SDimitry Andric   const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(F, MaxAlign);
1170b57cec5SDimitry Andric   if (TotalKernArgSize == 0)
1180b57cec5SDimitry Andric     return false;
1190b57cec5SDimitry Andric 
1200b57cec5SDimitry Andric   CallInst *KernArgSegment =
1210b57cec5SDimitry Andric       Builder.CreateIntrinsic(Intrinsic::amdgcn_kernarg_segment_ptr, {}, {},
1220b57cec5SDimitry Andric                               nullptr, F.getName() + ".kernarg.segment");
1230b57cec5SDimitry Andric 
124349cc55cSDimitry Andric   KernArgSegment->addRetAttr(Attribute::NonNull);
125349cc55cSDimitry Andric   KernArgSegment->addRetAttr(
1260b57cec5SDimitry Andric       Attribute::getWithDereferenceableBytes(Ctx, TotalKernArgSize));
1270b57cec5SDimitry Andric 
1280b57cec5SDimitry Andric   uint64_t ExplicitArgOffset = 0;
129*5f757f3fSDimitry Andric   // Preloaded kernel arguments must be sequential.
130*5f757f3fSDimitry Andric   bool InPreloadSequence = true;
131*5f757f3fSDimitry Andric   PreloadKernelArgInfo PreloadInfo(F, ST);
1320b57cec5SDimitry Andric 
1330b57cec5SDimitry Andric   for (Argument &Arg : F.args()) {
134e8d8bef9SDimitry Andric     const bool IsByRef = Arg.hasByRefAttr();
135e8d8bef9SDimitry Andric     Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType();
136bdd1243dSDimitry Andric     MaybeAlign ParamAlign = IsByRef ? Arg.getParamAlign() : std::nullopt;
13781ad6265SDimitry Andric     Align ABITypeAlign = DL.getValueOrABITypeAlignment(ParamAlign, ArgTy);
138e8d8bef9SDimitry Andric 
139e8d8bef9SDimitry Andric     uint64_t Size = DL.getTypeSizeInBits(ArgTy);
140e8d8bef9SDimitry Andric     uint64_t AllocSize = DL.getTypeAllocSize(ArgTy);
1410b57cec5SDimitry Andric 
1428bcb0991SDimitry Andric     uint64_t EltOffset = alignTo(ExplicitArgOffset, ABITypeAlign) + BaseOffset;
143*5f757f3fSDimitry Andric     uint64_t LastExplicitArgOffset = ExplicitArgOffset;
1448bcb0991SDimitry Andric     ExplicitArgOffset = alignTo(ExplicitArgOffset, ABITypeAlign) + AllocSize;
1450b57cec5SDimitry Andric 
146*5f757f3fSDimitry Andric     // Try to preload this argument into user SGPRs.
147*5f757f3fSDimitry Andric     if (Arg.hasInRegAttr() && InPreloadSequence && ST.hasKernargPreload() &&
148*5f757f3fSDimitry Andric         !ST.needsKernargPreloadBackwardsCompatibility() &&
149*5f757f3fSDimitry Andric         !Arg.getType()->isAggregateType())
150*5f757f3fSDimitry Andric       if (PreloadInfo.tryAllocPreloadSGPRs(AllocSize, EltOffset,
151*5f757f3fSDimitry Andric                                            LastExplicitArgOffset))
152*5f757f3fSDimitry Andric         continue;
153*5f757f3fSDimitry Andric 
154*5f757f3fSDimitry Andric     InPreloadSequence = false;
155*5f757f3fSDimitry Andric 
1560b57cec5SDimitry Andric     if (Arg.use_empty())
1570b57cec5SDimitry Andric       continue;
1580b57cec5SDimitry Andric 
159e8d8bef9SDimitry Andric     // If this is byval, the loads are already explicit in the function. We just
160e8d8bef9SDimitry Andric     // need to rewrite the pointer values.
161e8d8bef9SDimitry Andric     if (IsByRef) {
162e8d8bef9SDimitry Andric       Value *ArgOffsetPtr = Builder.CreateConstInBoundsGEP1_64(
163e8d8bef9SDimitry Andric           Builder.getInt8Ty(), KernArgSegment, EltOffset,
164e8d8bef9SDimitry Andric           Arg.getName() + ".byval.kernarg.offset");
165e8d8bef9SDimitry Andric 
16606c3fb27SDimitry Andric       Value *CastOffsetPtr =
16706c3fb27SDimitry Andric           Builder.CreateAddrSpaceCast(ArgOffsetPtr, Arg.getType());
168e8d8bef9SDimitry Andric       Arg.replaceAllUsesWith(CastOffsetPtr);
169e8d8bef9SDimitry Andric       continue;
170e8d8bef9SDimitry Andric     }
171e8d8bef9SDimitry Andric 
1720b57cec5SDimitry Andric     if (PointerType *PT = dyn_cast<PointerType>(ArgTy)) {
1730b57cec5SDimitry Andric       // FIXME: Hack. We rely on AssertZext to be able to fold DS addressing
1740b57cec5SDimitry Andric       // modes on SI to know the high bits are 0 so pointer adds don't wrap. We
1750b57cec5SDimitry Andric       // can't represent this with range metadata because it's only allowed for
1760b57cec5SDimitry Andric       // integer types.
1770b57cec5SDimitry Andric       if ((PT->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1780b57cec5SDimitry Andric            PT->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) &&
1790b57cec5SDimitry Andric           !ST.hasUsableDSOffset())
1800b57cec5SDimitry Andric         continue;
1810b57cec5SDimitry Andric 
1820b57cec5SDimitry Andric       // FIXME: We can replace this with equivalent alias.scope/noalias
1830b57cec5SDimitry Andric       // metadata, but this appears to be a lot of work.
1840b57cec5SDimitry Andric       if (Arg.hasNoAliasAttr())
1850b57cec5SDimitry Andric         continue;
1860b57cec5SDimitry Andric     }
1870b57cec5SDimitry Andric 
1885ffd83dbSDimitry Andric     auto *VT = dyn_cast<FixedVectorType>(ArgTy);
1890b57cec5SDimitry Andric     bool IsV3 = VT && VT->getNumElements() == 3;
1900b57cec5SDimitry Andric     bool DoShiftOpt = Size < 32 && !ArgTy->isAggregateType();
1910b57cec5SDimitry Andric 
1920b57cec5SDimitry Andric     VectorType *V4Ty = nullptr;
1930b57cec5SDimitry Andric 
1940b57cec5SDimitry Andric     int64_t AlignDownOffset = alignDown(EltOffset, 4);
1950b57cec5SDimitry Andric     int64_t OffsetDiff = EltOffset - AlignDownOffset;
1968bcb0991SDimitry Andric     Align AdjustedAlign = commonAlignment(
1978bcb0991SDimitry Andric         KernArgBaseAlign, DoShiftOpt ? AlignDownOffset : EltOffset);
1980b57cec5SDimitry Andric 
1990b57cec5SDimitry Andric     Value *ArgPtr;
2000b57cec5SDimitry Andric     Type *AdjustedArgTy;
2010b57cec5SDimitry Andric     if (DoShiftOpt) { // FIXME: Handle aggregate types
2020b57cec5SDimitry Andric       // Since we don't have sub-dword scalar loads, avoid doing an extload by
2030b57cec5SDimitry Andric       // loading earlier than the argument address, and extracting the relevant
2040b57cec5SDimitry Andric       // bits.
2050b57cec5SDimitry Andric       //
2060b57cec5SDimitry Andric       // Additionally widen any sub-dword load to i32 even if suitably aligned,
2070b57cec5SDimitry Andric       // so that CSE between different argument loads works easily.
2080b57cec5SDimitry Andric       ArgPtr = Builder.CreateConstInBoundsGEP1_64(
2090b57cec5SDimitry Andric           Builder.getInt8Ty(), KernArgSegment, AlignDownOffset,
2100b57cec5SDimitry Andric           Arg.getName() + ".kernarg.offset.align.down");
2110b57cec5SDimitry Andric       AdjustedArgTy = Builder.getInt32Ty();
2120b57cec5SDimitry Andric     } else {
2130b57cec5SDimitry Andric       ArgPtr = Builder.CreateConstInBoundsGEP1_64(
2140b57cec5SDimitry Andric           Builder.getInt8Ty(), KernArgSegment, EltOffset,
2150b57cec5SDimitry Andric           Arg.getName() + ".kernarg.offset");
2160b57cec5SDimitry Andric       AdjustedArgTy = ArgTy;
2170b57cec5SDimitry Andric     }
2180b57cec5SDimitry Andric 
2190b57cec5SDimitry Andric     if (IsV3 && Size >= 32) {
2205ffd83dbSDimitry Andric       V4Ty = FixedVectorType::get(VT->getElementType(), 4);
2210b57cec5SDimitry Andric       // Use the hack that clang uses to avoid SelectionDAG ruining v3 loads
2220b57cec5SDimitry Andric       AdjustedArgTy = V4Ty;
2230b57cec5SDimitry Andric     }
2240b57cec5SDimitry Andric 
2250b57cec5SDimitry Andric     LoadInst *Load =
2265ffd83dbSDimitry Andric         Builder.CreateAlignedLoad(AdjustedArgTy, ArgPtr, AdjustedAlign);
2270b57cec5SDimitry Andric     Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(Ctx, {}));
2280b57cec5SDimitry Andric 
2290b57cec5SDimitry Andric     MDBuilder MDB(Ctx);
2300b57cec5SDimitry Andric 
2310b57cec5SDimitry Andric     if (isa<PointerType>(ArgTy)) {
2320b57cec5SDimitry Andric       if (Arg.hasNonNullAttr())
2330b57cec5SDimitry Andric         Load->setMetadata(LLVMContext::MD_nonnull, MDNode::get(Ctx, {}));
2340b57cec5SDimitry Andric 
2350b57cec5SDimitry Andric       uint64_t DerefBytes = Arg.getDereferenceableBytes();
2360b57cec5SDimitry Andric       if (DerefBytes != 0) {
2370b57cec5SDimitry Andric         Load->setMetadata(
2380b57cec5SDimitry Andric           LLVMContext::MD_dereferenceable,
2390b57cec5SDimitry Andric           MDNode::get(Ctx,
2400b57cec5SDimitry Andric                       MDB.createConstant(
2410b57cec5SDimitry Andric                         ConstantInt::get(Builder.getInt64Ty(), DerefBytes))));
2420b57cec5SDimitry Andric       }
2430b57cec5SDimitry Andric 
2440b57cec5SDimitry Andric       uint64_t DerefOrNullBytes = Arg.getDereferenceableOrNullBytes();
2450b57cec5SDimitry Andric       if (DerefOrNullBytes != 0) {
2460b57cec5SDimitry Andric         Load->setMetadata(
2470b57cec5SDimitry Andric           LLVMContext::MD_dereferenceable_or_null,
2480b57cec5SDimitry Andric           MDNode::get(Ctx,
2490b57cec5SDimitry Andric                       MDB.createConstant(ConstantInt::get(Builder.getInt64Ty(),
2500b57cec5SDimitry Andric                                                           DerefOrNullBytes))));
2510b57cec5SDimitry Andric       }
2520b57cec5SDimitry Andric 
253bdd1243dSDimitry Andric       if (MaybeAlign ParamAlign = Arg.getParamAlign()) {
2540b57cec5SDimitry Andric         Load->setMetadata(
2550b57cec5SDimitry Andric             LLVMContext::MD_align,
256bdd1243dSDimitry Andric             MDNode::get(Ctx, MDB.createConstant(ConstantInt::get(
257bdd1243dSDimitry Andric                                  Builder.getInt64Ty(), ParamAlign->value()))));
2580b57cec5SDimitry Andric       }
2590b57cec5SDimitry Andric     }
2600b57cec5SDimitry Andric 
2610b57cec5SDimitry Andric     // TODO: Convert noalias arg to !noalias
2620b57cec5SDimitry Andric 
2630b57cec5SDimitry Andric     if (DoShiftOpt) {
2640b57cec5SDimitry Andric       Value *ExtractBits = OffsetDiff == 0 ?
2650b57cec5SDimitry Andric         Load : Builder.CreateLShr(Load, OffsetDiff * 8);
2660b57cec5SDimitry Andric 
2670b57cec5SDimitry Andric       IntegerType *ArgIntTy = Builder.getIntNTy(Size);
2680b57cec5SDimitry Andric       Value *Trunc = Builder.CreateTrunc(ExtractBits, ArgIntTy);
2690b57cec5SDimitry Andric       Value *NewVal = Builder.CreateBitCast(Trunc, ArgTy,
2700b57cec5SDimitry Andric                                             Arg.getName() + ".load");
2710b57cec5SDimitry Andric       Arg.replaceAllUsesWith(NewVal);
2720b57cec5SDimitry Andric     } else if (IsV3) {
273e8d8bef9SDimitry Andric       Value *Shuf = Builder.CreateShuffleVector(Load, ArrayRef<int>{0, 1, 2},
2740b57cec5SDimitry Andric                                                 Arg.getName() + ".load");
2750b57cec5SDimitry Andric       Arg.replaceAllUsesWith(Shuf);
2760b57cec5SDimitry Andric     } else {
2770b57cec5SDimitry Andric       Load->setName(Arg.getName() + ".load");
2780b57cec5SDimitry Andric       Arg.replaceAllUsesWith(Load);
2790b57cec5SDimitry Andric     }
2800b57cec5SDimitry Andric   }
2810b57cec5SDimitry Andric 
282349cc55cSDimitry Andric   KernArgSegment->addRetAttr(
2830b57cec5SDimitry Andric       Attribute::getWithAlignment(Ctx, std::max(KernArgBaseAlign, MaxAlign)));
2840b57cec5SDimitry Andric 
2850b57cec5SDimitry Andric   return true;
2860b57cec5SDimitry Andric }
2870b57cec5SDimitry Andric 
288*5f757f3fSDimitry Andric bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
289*5f757f3fSDimitry Andric   auto &TPC = getAnalysis<TargetPassConfig>();
290*5f757f3fSDimitry Andric   const TargetMachine &TM = TPC.getTM<TargetMachine>();
291*5f757f3fSDimitry Andric   return lowerKernelArguments(F, TM);
292*5f757f3fSDimitry Andric }
293*5f757f3fSDimitry Andric 
2940b57cec5SDimitry Andric INITIALIZE_PASS_BEGIN(AMDGPULowerKernelArguments, DEBUG_TYPE,
2950b57cec5SDimitry Andric                       "AMDGPU Lower Kernel Arguments", false, false)
2960b57cec5SDimitry Andric INITIALIZE_PASS_END(AMDGPULowerKernelArguments, DEBUG_TYPE, "AMDGPU Lower Kernel Arguments",
2970b57cec5SDimitry Andric                     false, false)
2980b57cec5SDimitry Andric 
2990b57cec5SDimitry Andric char AMDGPULowerKernelArguments::ID = 0;
3000b57cec5SDimitry Andric 
3010b57cec5SDimitry Andric FunctionPass *llvm::createAMDGPULowerKernelArgumentsPass() {
3020b57cec5SDimitry Andric   return new AMDGPULowerKernelArguments();
3030b57cec5SDimitry Andric }
304*5f757f3fSDimitry Andric 
305*5f757f3fSDimitry Andric PreservedAnalyses
306*5f757f3fSDimitry Andric AMDGPULowerKernelArgumentsPass::run(Function &F, FunctionAnalysisManager &AM) {
307*5f757f3fSDimitry Andric   bool Changed = lowerKernelArguments(F, TM);
308*5f757f3fSDimitry Andric   if (Changed) {
309*5f757f3fSDimitry Andric     // TODO: Preserves a lot more.
310*5f757f3fSDimitry Andric     PreservedAnalyses PA;
311*5f757f3fSDimitry Andric     PA.preserveSet<CFGAnalyses>();
312*5f757f3fSDimitry Andric     return PA;
313*5f757f3fSDimitry Andric   }
314*5f757f3fSDimitry Andric 
315*5f757f3fSDimitry Andric   return PreservedAnalyses::all();
316*5f757f3fSDimitry Andric }
317