xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp (revision 924226fba12cc9a228c73b956e1b7fa24c60b055)
1 //===-- AMDGPUMachineFunctionInfo.cpp ---------------------------------------=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AMDGPUMachineFunction.h"
10 #include "AMDGPUPerfHintAnalysis.h"
11 #include "AMDGPUSubtarget.h"
12 #include "llvm/CodeGen/MachineModuleInfo.h"
13 #include "llvm/Target/TargetMachine.h"
14 
15 using namespace llvm;
16 
17 AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF)
18     : Mode(MF.getFunction()), IsEntryFunction(AMDGPU::isEntryFunctionCC(
19                                   MF.getFunction().getCallingConv())),
20       IsModuleEntryFunction(
21           AMDGPU::isModuleEntryFunctionCC(MF.getFunction().getCallingConv())),
22       NoSignedZerosFPMath(MF.getTarget().Options.NoSignedZerosFPMath) {
23   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
24 
25   // FIXME: Should initialize KernArgSize based on ExplicitKernelArgOffset,
26   // except reserved size is not correctly aligned.
27   const Function &F = MF.getFunction();
28 
29   Attribute MemBoundAttr = F.getFnAttribute("amdgpu-memory-bound");
30   MemoryBound = MemBoundAttr.getValueAsBool();
31 
32   Attribute WaveLimitAttr = F.getFnAttribute("amdgpu-wave-limiter");
33   WaveLimiter = WaveLimitAttr.getValueAsBool();
34 
35   CallingConv::ID CC = F.getCallingConv();
36   if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL)
37     ExplicitKernArgSize = ST.getExplicitKernArgSize(F, MaxKernArgAlign);
38 }
39 
40 unsigned AMDGPUMachineFunction::allocateLDSGlobal(const DataLayout &DL,
41                                                   const GlobalVariable &GV) {
42   auto Entry = LocalMemoryObjects.insert(std::make_pair(&GV, 0));
43   if (!Entry.second)
44     return Entry.first->second;
45 
46   Align Alignment =
47       DL.getValueOrABITypeAlignment(GV.getAlign(), GV.getValueType());
48 
49   /// TODO: We should sort these to minimize wasted space due to alignment
50   /// padding. Currently the padding is decided by the first encountered use
51   /// during lowering.
52   unsigned Offset = StaticLDSSize = alignTo(StaticLDSSize, Alignment);
53 
54   Entry.first->second = Offset;
55   StaticLDSSize += DL.getTypeAllocSize(GV.getValueType());
56 
57   // Update the LDS size considering the padding to align the dynamic shared
58   // memory.
59   LDSSize = alignTo(StaticLDSSize, DynLDSAlign);
60 
61   return Offset;
62 }
63 
64 void AMDGPUMachineFunction::allocateModuleLDSGlobal(const Module *M) {
65   if (isModuleEntryFunction()) {
66     const GlobalVariable *GV = M->getNamedGlobal("llvm.amdgcn.module.lds");
67     if (GV) {
68       unsigned Offset = allocateLDSGlobal(M->getDataLayout(), *GV);
69       (void)Offset;
70       assert(Offset == 0 &&
71              "Module LDS expected to be allocated before other LDS");
72     }
73   }
74 }
75 
76 void AMDGPUMachineFunction::setDynLDSAlign(const DataLayout &DL,
77                                            const GlobalVariable &GV) {
78   assert(DL.getTypeAllocSize(GV.getValueType()).isZero());
79 
80   Align Alignment =
81       DL.getValueOrABITypeAlignment(GV.getAlign(), GV.getValueType());
82   if (Alignment <= DynLDSAlign)
83     return;
84 
85   LDSSize = alignTo(StaticLDSSize, Alignment);
86   DynLDSAlign = Alignment;
87 }
88