xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp (revision fe75646a0234a261c0013bf1840fdac4acaf0cec)
1 //===----- RISCVMergeBaseOffset.cpp - Optimise address calculations  ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Merge the offset of address calculation into the offset field
10 // of instructions in a global address lowering sequence.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCV.h"
15 #include "RISCVTargetMachine.h"
16 #include "llvm/CodeGen/MachineFunctionPass.h"
17 #include "llvm/CodeGen/Passes.h"
18 #include "llvm/MC/TargetRegistry.h"
19 #include "llvm/Support/Debug.h"
20 #include "llvm/Target/TargetOptions.h"
21 #include <optional>
22 #include <set>
23 using namespace llvm;
24 
25 #define DEBUG_TYPE "riscv-merge-base-offset"
26 #define RISCV_MERGE_BASE_OFFSET_NAME "RISC-V Merge Base Offset"
27 namespace {
28 
29 class RISCVMergeBaseOffsetOpt : public MachineFunctionPass {
30   const RISCVSubtarget *ST = nullptr;
31   MachineRegisterInfo *MRI;
32 
33 public:
34   static char ID;
35   bool runOnMachineFunction(MachineFunction &Fn) override;
36   bool detectFoldable(MachineInstr &Hi, MachineInstr *&Lo);
37 
38   bool detectAndFoldOffset(MachineInstr &Hi, MachineInstr &Lo);
39   void foldOffset(MachineInstr &Hi, MachineInstr &Lo, MachineInstr &Tail,
40                   int64_t Offset);
41   bool foldLargeOffset(MachineInstr &Hi, MachineInstr &Lo,
42                        MachineInstr &TailAdd, Register GSReg);
43   bool foldShiftedOffset(MachineInstr &Hi, MachineInstr &Lo,
44                          MachineInstr &TailShXAdd, Register GSReg);
45 
46   bool foldIntoMemoryOps(MachineInstr &Hi, MachineInstr &Lo);
47 
48   RISCVMergeBaseOffsetOpt() : MachineFunctionPass(ID) {}
49 
50   MachineFunctionProperties getRequiredProperties() const override {
51     return MachineFunctionProperties().set(
52         MachineFunctionProperties::Property::IsSSA);
53   }
54 
55   void getAnalysisUsage(AnalysisUsage &AU) const override {
56     AU.setPreservesCFG();
57     MachineFunctionPass::getAnalysisUsage(AU);
58   }
59 
60   StringRef getPassName() const override {
61     return RISCV_MERGE_BASE_OFFSET_NAME;
62   }
63 };
64 } // end anonymous namespace
65 
66 char RISCVMergeBaseOffsetOpt::ID = 0;
67 INITIALIZE_PASS(RISCVMergeBaseOffsetOpt, DEBUG_TYPE,
68                 RISCV_MERGE_BASE_OFFSET_NAME, false, false)
69 
70 // Detect either of the patterns:
71 //
72 // 1. (medlow pattern):
73 //   lui   vreg1, %hi(s)
74 //   addi  vreg2, vreg1, %lo(s)
75 //
76 // 2. (medany pattern):
77 // .Lpcrel_hi1:
78 //   auipc vreg1, %pcrel_hi(s)
79 //   addi  vreg2, vreg1, %pcrel_lo(.Lpcrel_hi1)
80 //
81 // The pattern is only accepted if:
82 //    1) The first instruction has only one use, which is the ADDI.
83 //    2) The address operands have the appropriate type, reflecting the
84 //       lowering of a global address or constant pool using medlow or medany.
85 //    3) The offset value in the Global Address or Constant Pool is 0.
86 bool RISCVMergeBaseOffsetOpt::detectFoldable(MachineInstr &Hi,
87                                              MachineInstr *&Lo) {
88   if (Hi.getOpcode() != RISCV::LUI && Hi.getOpcode() != RISCV::AUIPC)
89     return false;
90 
91   const MachineOperand &HiOp1 = Hi.getOperand(1);
92   unsigned ExpectedFlags =
93       Hi.getOpcode() == RISCV::AUIPC ? RISCVII::MO_PCREL_HI : RISCVII::MO_HI;
94   if (HiOp1.getTargetFlags() != ExpectedFlags)
95     return false;
96 
97   if (!(HiOp1.isGlobal() || HiOp1.isCPI()) || HiOp1.getOffset() != 0)
98     return false;
99 
100   Register HiDestReg = Hi.getOperand(0).getReg();
101   if (!MRI->hasOneUse(HiDestReg))
102     return false;
103 
104   Lo = &*MRI->use_instr_begin(HiDestReg);
105   if (Lo->getOpcode() != RISCV::ADDI)
106     return false;
107 
108   const MachineOperand &LoOp2 = Lo->getOperand(2);
109   if (Hi.getOpcode() == RISCV::LUI) {
110     if (LoOp2.getTargetFlags() != RISCVII::MO_LO ||
111         !(LoOp2.isGlobal() || LoOp2.isCPI()) || LoOp2.getOffset() != 0)
112       return false;
113   } else {
114     assert(Hi.getOpcode() == RISCV::AUIPC);
115     if (LoOp2.getTargetFlags() != RISCVII::MO_PCREL_LO ||
116         LoOp2.getType() != MachineOperand::MO_MCSymbol)
117       return false;
118   }
119 
120   if (HiOp1.isGlobal()) {
121     LLVM_DEBUG(dbgs() << "  Found lowered global address: "
122                       << *HiOp1.getGlobal() << "\n");
123   } else {
124     assert(HiOp1.isCPI());
125     LLVM_DEBUG(dbgs() << "  Found lowered constant pool: " << HiOp1.getIndex()
126                       << "\n");
127   }
128 
129   return true;
130 }
131 
132 // Update the offset in Hi and Lo instructions.
133 // Delete the tail instruction and update all the uses to use the
134 // output from Lo.
135 void RISCVMergeBaseOffsetOpt::foldOffset(MachineInstr &Hi, MachineInstr &Lo,
136                                          MachineInstr &Tail, int64_t Offset) {
137   assert(isInt<32>(Offset) && "Unexpected offset");
138   // Put the offset back in Hi and the Lo
139   Hi.getOperand(1).setOffset(Offset);
140   if (Hi.getOpcode() != RISCV::AUIPC)
141     Lo.getOperand(2).setOffset(Offset);
142   // Delete the tail instruction.
143   MRI->constrainRegClass(Lo.getOperand(0).getReg(),
144                          MRI->getRegClass(Tail.getOperand(0).getReg()));
145   MRI->replaceRegWith(Tail.getOperand(0).getReg(), Lo.getOperand(0).getReg());
146   Tail.eraseFromParent();
147   LLVM_DEBUG(dbgs() << "  Merged offset " << Offset << " into base.\n"
148                     << "     " << Hi << "     " << Lo;);
149 }
150 
151 // Detect patterns for large offsets that are passed into an ADD instruction.
152 // If the pattern is found, updates the offset in Hi and Lo instructions
153 // and deletes TailAdd and the instructions that produced the offset.
154 //
155 //                     Base address lowering is of the form:
156 //                       Hi:  lui   vreg1, %hi(s)
157 //                       Lo:  addi  vreg2, vreg1, %lo(s)
158 //                       /                                  \
159 //                      /                                    \
160 //                     /                                      \
161 //                    /  The large offset can be of two forms: \
162 //  1) Offset that has non zero bits in lower      2) Offset that has non zero
163 //     12 bits and upper 20 bits                      bits in upper 20 bits only
164 //   OffseLUI: lui   vreg3, 4
165 // OffsetTail: addi  voff, vreg3, 188                OffsetTail: lui  voff, 128
166 //                    \                                        /
167 //                     \                                      /
168 //                      \                                    /
169 //                       \                                  /
170 //                         TailAdd: add  vreg4, vreg2, voff
171 bool RISCVMergeBaseOffsetOpt::foldLargeOffset(MachineInstr &Hi,
172                                               MachineInstr &Lo,
173                                               MachineInstr &TailAdd,
174                                               Register GAReg) {
175   assert((TailAdd.getOpcode() == RISCV::ADD) && "Expected ADD instruction!");
176   Register Rs = TailAdd.getOperand(1).getReg();
177   Register Rt = TailAdd.getOperand(2).getReg();
178   Register Reg = Rs == GAReg ? Rt : Rs;
179 
180   // Can't fold if the register has more than one use.
181   if (!MRI->hasOneUse(Reg))
182     return false;
183   // This can point to an ADDI(W) or a LUI:
184   MachineInstr &OffsetTail = *MRI->getVRegDef(Reg);
185   if (OffsetTail.getOpcode() == RISCV::ADDI ||
186       OffsetTail.getOpcode() == RISCV::ADDIW) {
187     // The offset value has non zero bits in both %hi and %lo parts.
188     // Detect an ADDI that feeds from a LUI instruction.
189     MachineOperand &AddiImmOp = OffsetTail.getOperand(2);
190     if (AddiImmOp.getTargetFlags() != RISCVII::MO_None)
191       return false;
192     int64_t OffLo = AddiImmOp.getImm();
193     MachineInstr &OffsetLui =
194         *MRI->getVRegDef(OffsetTail.getOperand(1).getReg());
195     MachineOperand &LuiImmOp = OffsetLui.getOperand(1);
196     if (OffsetLui.getOpcode() != RISCV::LUI ||
197         LuiImmOp.getTargetFlags() != RISCVII::MO_None ||
198         !MRI->hasOneUse(OffsetLui.getOperand(0).getReg()))
199       return false;
200     int64_t Offset = SignExtend64<32>(LuiImmOp.getImm() << 12);
201     Offset += OffLo;
202     // RV32 ignores the upper 32 bits. ADDIW sign extends the result.
203     if (!ST->is64Bit() || OffsetTail.getOpcode() == RISCV::ADDIW)
204        Offset = SignExtend64<32>(Offset);
205     // We can only fold simm32 offsets.
206     if (!isInt<32>(Offset))
207       return false;
208     LLVM_DEBUG(dbgs() << "  Offset Instrs: " << OffsetTail
209                       << "                 " << OffsetLui);
210     foldOffset(Hi, Lo, TailAdd, Offset);
211     OffsetTail.eraseFromParent();
212     OffsetLui.eraseFromParent();
213     return true;
214   } else if (OffsetTail.getOpcode() == RISCV::LUI) {
215     // The offset value has all zero bits in the lower 12 bits. Only LUI
216     // exists.
217     LLVM_DEBUG(dbgs() << "  Offset Instr: " << OffsetTail);
218     int64_t Offset = SignExtend64<32>(OffsetTail.getOperand(1).getImm() << 12);
219     foldOffset(Hi, Lo, TailAdd, Offset);
220     OffsetTail.eraseFromParent();
221     return true;
222   }
223   return false;
224 }
225 
226 // Detect patterns for offsets that are passed into a SHXADD instruction.
227 // The offset has 1, 2, or 3 trailing zeros and fits in simm13, simm14, simm15.
228 // The constant is created with addi voff, x0, C, and shXadd is used to
229 // fill insert the trailing zeros and do the addition.
230 // If the pattern is found, updates the offset in Hi and Lo instructions
231 // and deletes TailShXAdd and the instructions that produced the offset.
232 //
233 // Hi:         lui     vreg1, %hi(s)
234 // Lo:         addi    vreg2, vreg1, %lo(s)
235 // OffsetTail: addi    voff, x0, C
236 // TailAdd:    shXadd  vreg4, voff, vreg2
237 bool RISCVMergeBaseOffsetOpt::foldShiftedOffset(MachineInstr &Hi,
238                                                 MachineInstr &Lo,
239                                                 MachineInstr &TailShXAdd,
240                                                 Register GAReg) {
241   assert((TailShXAdd.getOpcode() == RISCV::SH1ADD ||
242           TailShXAdd.getOpcode() == RISCV::SH2ADD ||
243           TailShXAdd.getOpcode() == RISCV::SH3ADD) &&
244          "Expected SHXADD instruction!");
245 
246   // The first source is the shifted operand.
247   Register Rs1 = TailShXAdd.getOperand(1).getReg();
248 
249   if (GAReg != TailShXAdd.getOperand(2).getReg())
250     return false;
251 
252   // Can't fold if the register has more than one use.
253   if (!MRI->hasOneUse(Rs1))
254     return false;
255   // This can point to an ADDI X0, C.
256   MachineInstr &OffsetTail = *MRI->getVRegDef(Rs1);
257   if (OffsetTail.getOpcode() != RISCV::ADDI)
258     return false;
259   if (!OffsetTail.getOperand(1).isReg() ||
260       OffsetTail.getOperand(1).getReg() != RISCV::X0 ||
261       !OffsetTail.getOperand(2).isImm())
262     return false;
263 
264   int64_t Offset = OffsetTail.getOperand(2).getImm();
265   assert(isInt<12>(Offset) && "Unexpected offset");
266 
267   unsigned ShAmt;
268   switch (TailShXAdd.getOpcode()) {
269   default: llvm_unreachable("Unexpected opcode");
270   case RISCV::SH1ADD: ShAmt = 1; break;
271   case RISCV::SH2ADD: ShAmt = 2; break;
272   case RISCV::SH3ADD: ShAmt = 3; break;
273   }
274 
275   Offset = (uint64_t)Offset << ShAmt;
276 
277   LLVM_DEBUG(dbgs() << "  Offset Instr: " << OffsetTail);
278   foldOffset(Hi, Lo, TailShXAdd, Offset);
279   OffsetTail.eraseFromParent();
280   return true;
281 }
282 
283 bool RISCVMergeBaseOffsetOpt::detectAndFoldOffset(MachineInstr &Hi,
284                                                   MachineInstr &Lo) {
285   Register DestReg = Lo.getOperand(0).getReg();
286 
287   // Look for arithmetic instructions we can get an offset from.
288   // We might be able to remove the arithmetic instructions by folding the
289   // offset into the LUI+ADDI.
290   if (!MRI->hasOneUse(DestReg))
291     return false;
292 
293   // Lo has only one use.
294   MachineInstr &Tail = *MRI->use_instr_begin(DestReg);
295   switch (Tail.getOpcode()) {
296   default:
297     LLVM_DEBUG(dbgs() << "Don't know how to get offset from this instr:"
298                       << Tail);
299     break;
300   case RISCV::ADDI: {
301     // Offset is simply an immediate operand.
302     int64_t Offset = Tail.getOperand(2).getImm();
303 
304     // We might have two ADDIs in a row.
305     Register TailDestReg = Tail.getOperand(0).getReg();
306     if (MRI->hasOneUse(TailDestReg)) {
307       MachineInstr &TailTail = *MRI->use_instr_begin(TailDestReg);
308       if (TailTail.getOpcode() == RISCV::ADDI) {
309         Offset += TailTail.getOperand(2).getImm();
310         LLVM_DEBUG(dbgs() << "  Offset Instrs: " << Tail << TailTail);
311         foldOffset(Hi, Lo, TailTail, Offset);
312         Tail.eraseFromParent();
313         return true;
314       }
315     }
316 
317     LLVM_DEBUG(dbgs() << "  Offset Instr: " << Tail);
318     foldOffset(Hi, Lo, Tail, Offset);
319     return true;
320   }
321   case RISCV::ADD:
322     // The offset is too large to fit in the immediate field of ADDI.
323     // This can be in two forms:
324     // 1) LUI hi_Offset followed by:
325     //    ADDI lo_offset
326     //    This happens in case the offset has non zero bits in
327     //    both hi 20 and lo 12 bits.
328     // 2) LUI (offset20)
329     //    This happens in case the lower 12 bits of the offset are zeros.
330     return foldLargeOffset(Hi, Lo, Tail, DestReg);
331   case RISCV::SH1ADD:
332   case RISCV::SH2ADD:
333   case RISCV::SH3ADD:
334     // The offset is too large to fit in the immediate field of ADDI.
335     // It may be encoded as (SH2ADD (ADDI X0, C), DestReg) or
336     // (SH3ADD (ADDI X0, C), DestReg).
337     return foldShiftedOffset(Hi, Lo, Tail, DestReg);
338   }
339 
340   return false;
341 }
342 
343 bool RISCVMergeBaseOffsetOpt::foldIntoMemoryOps(MachineInstr &Hi,
344                                                 MachineInstr &Lo) {
345   Register DestReg = Lo.getOperand(0).getReg();
346 
347   // If all the uses are memory ops with the same offset, we can transform:
348   //
349   // 1. (medlow pattern):
350   // Hi:   lui vreg1, %hi(foo)          --->  lui vreg1, %hi(foo+8)
351   // Lo:   addi vreg2, vreg1, %lo(foo)  --->  lw vreg3, lo(foo+8)(vreg1)
352   // Tail: lw vreg3, 8(vreg2)
353   //
354   // 2. (medany pattern):
355   // Hi: 1:auipc vreg1, %pcrel_hi(s)         ---> auipc vreg1, %pcrel_hi(foo+8)
356   // Lo:   addi  vreg2, vreg1, %pcrel_lo(1b) ---> lw vreg3, %pcrel_lo(1b)(vreg1)
357   // Tail: lw vreg3, 8(vreg2)
358 
359   std::optional<int64_t> CommonOffset;
360   for (const MachineInstr &UseMI : MRI->use_instructions(DestReg)) {
361     switch (UseMI.getOpcode()) {
362     default:
363       LLVM_DEBUG(dbgs() << "Not a load or store instruction: " << UseMI);
364       return false;
365     case RISCV::LB:
366     case RISCV::LH:
367     case RISCV::LW:
368     case RISCV::LBU:
369     case RISCV::LHU:
370     case RISCV::LWU:
371     case RISCV::LD:
372     case RISCV::FLH:
373     case RISCV::FLW:
374     case RISCV::FLD:
375     case RISCV::SB:
376     case RISCV::SH:
377     case RISCV::SW:
378     case RISCV::SD:
379     case RISCV::FSH:
380     case RISCV::FSW:
381     case RISCV::FSD: {
382       if (UseMI.getOperand(1).isFI())
383         return false;
384       // Register defined by Lo should not be the value register.
385       if (DestReg == UseMI.getOperand(0).getReg())
386         return false;
387       assert(DestReg == UseMI.getOperand(1).getReg() &&
388              "Expected base address use");
389       // All load/store instructions must use the same offset.
390       int64_t Offset = UseMI.getOperand(2).getImm();
391       if (CommonOffset && Offset != CommonOffset)
392         return false;
393       CommonOffset = Offset;
394     }
395     }
396   }
397 
398   // We found a common offset.
399   // Update the offsets in global address lowering.
400   // We may have already folded some arithmetic so we need to add to any
401   // existing offset.
402   int64_t NewOffset = Hi.getOperand(1).getOffset() + *CommonOffset;
403   // RV32 ignores the upper 32 bits.
404   if (!ST->is64Bit())
405     NewOffset = SignExtend64<32>(NewOffset);
406   // We can only fold simm32 offsets.
407   if (!isInt<32>(NewOffset))
408     return false;
409 
410   Hi.getOperand(1).setOffset(NewOffset);
411   MachineOperand &ImmOp = Lo.getOperand(2);
412   if (Hi.getOpcode() != RISCV::AUIPC)
413     ImmOp.setOffset(NewOffset);
414 
415   // Update the immediate in the load/store instructions to add the offset.
416   for (MachineInstr &UseMI :
417        llvm::make_early_inc_range(MRI->use_instructions(DestReg))) {
418     UseMI.removeOperand(2);
419     UseMI.addOperand(ImmOp);
420     // Update the base reg in the Tail instruction to feed from LUI.
421     // Output of Hi is only used in Lo, no need to use MRI->replaceRegWith().
422     UseMI.getOperand(1).setReg(Hi.getOperand(0).getReg());
423   }
424 
425   Lo.eraseFromParent();
426   return true;
427 }
428 
429 bool RISCVMergeBaseOffsetOpt::runOnMachineFunction(MachineFunction &Fn) {
430   if (skipFunction(Fn.getFunction()))
431     return false;
432 
433   ST = &Fn.getSubtarget<RISCVSubtarget>();
434 
435   bool MadeChange = false;
436   MRI = &Fn.getRegInfo();
437   for (MachineBasicBlock &MBB : Fn) {
438     LLVM_DEBUG(dbgs() << "MBB: " << MBB.getName() << "\n");
439     for (MachineInstr &Hi : MBB) {
440       MachineInstr *Lo = nullptr;
441       if (!detectFoldable(Hi, Lo))
442         continue;
443       MadeChange |= detectAndFoldOffset(Hi, *Lo);
444       MadeChange |= foldIntoMemoryOps(Hi, *Lo);
445     }
446   }
447 
448   return MadeChange;
449 }
450 
451 /// Returns an instance of the Merge Base Offset Optimization pass.
452 FunctionPass *llvm::createRISCVMergeBaseOffsetOptPass() {
453   return new RISCVMergeBaseOffsetOpt();
454 }
455