xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp (revision ec0ea6efa1ad229d75c394c1a9b9cac33af2b1d3)
1 //===-- AMDGPUMachineFunctionInfo.cpp ---------------------------------------=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AMDGPUMachineFunction.h"
10 #include "AMDGPUPerfHintAnalysis.h"
11 #include "AMDGPUSubtarget.h"
12 #include "llvm/CodeGen/MachineModuleInfo.h"
13 #include "llvm/Target/TargetMachine.h"
14 
15 using namespace llvm;
16 
17 AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF)
18     : MachineFunctionInfo(), Mode(MF.getFunction()),
19       IsEntryFunction(
20           AMDGPU::isEntryFunctionCC(MF.getFunction().getCallingConv())),
21       IsModuleEntryFunction(
22           AMDGPU::isModuleEntryFunctionCC(MF.getFunction().getCallingConv())),
23       NoSignedZerosFPMath(MF.getTarget().Options.NoSignedZerosFPMath) {
24   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
25 
26   // FIXME: Should initialize KernArgSize based on ExplicitKernelArgOffset,
27   // except reserved size is not correctly aligned.
28   const Function &F = MF.getFunction();
29 
30   Attribute MemBoundAttr = F.getFnAttribute("amdgpu-memory-bound");
31   MemoryBound = MemBoundAttr.getValueAsBool();
32 
33   Attribute WaveLimitAttr = F.getFnAttribute("amdgpu-wave-limiter");
34   WaveLimiter = WaveLimitAttr.getValueAsBool();
35 
36   CallingConv::ID CC = F.getCallingConv();
37   if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL)
38     ExplicitKernArgSize = ST.getExplicitKernArgSize(F, MaxKernArgAlign);
39 }
40 
41 unsigned AMDGPUMachineFunction::allocateLDSGlobal(const DataLayout &DL,
42                                                   const GlobalVariable &GV) {
43   auto Entry = LocalMemoryObjects.insert(std::make_pair(&GV, 0));
44   if (!Entry.second)
45     return Entry.first->second;
46 
47   Align Alignment =
48       DL.getValueOrABITypeAlignment(GV.getAlign(), GV.getValueType());
49 
50   /// TODO: We should sort these to minimize wasted space due to alignment
51   /// padding. Currently the padding is decided by the first encountered use
52   /// during lowering.
53   unsigned Offset = StaticLDSSize = alignTo(StaticLDSSize, Alignment);
54 
55   Entry.first->second = Offset;
56   StaticLDSSize += DL.getTypeAllocSize(GV.getValueType());
57 
58   // Update the LDS size considering the padding to align the dynamic shared
59   // memory.
60   LDSSize = alignTo(StaticLDSSize, DynLDSAlign);
61 
62   return Offset;
63 }
64 
65 void AMDGPUMachineFunction::allocateModuleLDSGlobal(const Module *M) {
66   if (isModuleEntryFunction()) {
67     const GlobalVariable *GV = M->getNamedGlobal("llvm.amdgcn.module.lds");
68     if (GV) {
69       unsigned Offset = allocateLDSGlobal(M->getDataLayout(), *GV);
70       (void)Offset;
71       assert(Offset == 0 &&
72              "Module LDS expected to be allocated before other LDS");
73     }
74   }
75 }
76 
77 void AMDGPUMachineFunction::setDynLDSAlign(const DataLayout &DL,
78                                            const GlobalVariable &GV) {
79   assert(DL.getTypeAllocSize(GV.getValueType()).isZero());
80 
81   Align Alignment =
82       DL.getValueOrABITypeAlignment(GV.getAlign(), GV.getValueType());
83   if (Alignment <= DynLDSAlign)
84     return;
85 
86   LDSSize = alignTo(StaticLDSSize, Alignment);
87   DynLDSAlign = Alignment;
88 }
89