1 //===----- RISCVMergeBaseOffset.cpp - Optimise address calculations ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Merge the offset of address calculation into the offset field
10 // of instructions in a global address lowering sequence.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "RISCV.h"
15 #include "RISCVTargetMachine.h"
16 #include "llvm/CodeGen/MachineFunctionPass.h"
17 #include "llvm/CodeGen/Passes.h"
18 #include "llvm/MC/TargetRegistry.h"
19 #include "llvm/Support/Debug.h"
20 #include "llvm/Target/TargetOptions.h"
21 #include <optional>
22 using namespace llvm;
23
24 #define DEBUG_TYPE "riscv-merge-base-offset"
25 #define RISCV_MERGE_BASE_OFFSET_NAME "RISC-V Merge Base Offset"
26 namespace {
27
28 class RISCVMergeBaseOffsetOpt : public MachineFunctionPass {
29 const RISCVSubtarget *ST = nullptr;
30 MachineRegisterInfo *MRI;
31
32 public:
33 static char ID;
34 bool runOnMachineFunction(MachineFunction &Fn) override;
35 bool detectFoldable(MachineInstr &Hi, MachineInstr *&Lo);
36
37 bool detectAndFoldOffset(MachineInstr &Hi, MachineInstr &Lo);
38 void foldOffset(MachineInstr &Hi, MachineInstr &Lo, MachineInstr &Tail,
39 int64_t Offset);
40 bool foldLargeOffset(MachineInstr &Hi, MachineInstr &Lo,
41 MachineInstr &TailAdd, Register GSReg);
42 bool foldShiftedOffset(MachineInstr &Hi, MachineInstr &Lo,
43 MachineInstr &TailShXAdd, Register GSReg);
44
45 bool foldIntoMemoryOps(MachineInstr &Hi, MachineInstr &Lo);
46
RISCVMergeBaseOffsetOpt()47 RISCVMergeBaseOffsetOpt() : MachineFunctionPass(ID) {}
48
getRequiredProperties() const49 MachineFunctionProperties getRequiredProperties() const override {
50 return MachineFunctionProperties().set(
51 MachineFunctionProperties::Property::IsSSA);
52 }
53
getAnalysisUsage(AnalysisUsage & AU) const54 void getAnalysisUsage(AnalysisUsage &AU) const override {
55 AU.setPreservesCFG();
56 MachineFunctionPass::getAnalysisUsage(AU);
57 }
58
getPassName() const59 StringRef getPassName() const override {
60 return RISCV_MERGE_BASE_OFFSET_NAME;
61 }
62 };
63 } // end anonymous namespace
64
65 char RISCVMergeBaseOffsetOpt::ID = 0;
INITIALIZE_PASS(RISCVMergeBaseOffsetOpt,DEBUG_TYPE,RISCV_MERGE_BASE_OFFSET_NAME,false,false)66 INITIALIZE_PASS(RISCVMergeBaseOffsetOpt, DEBUG_TYPE,
67 RISCV_MERGE_BASE_OFFSET_NAME, false, false)
68
69 // Detect either of the patterns:
70 //
71 // 1. (medlow pattern):
72 // lui vreg1, %hi(s)
73 // addi vreg2, vreg1, %lo(s)
74 //
75 // 2. (medany pattern):
76 // .Lpcrel_hi1:
77 // auipc vreg1, %pcrel_hi(s)
78 // addi vreg2, vreg1, %pcrel_lo(.Lpcrel_hi1)
79 //
80 // The pattern is only accepted if:
81 // 1) The first instruction has only one use, which is the ADDI.
82 // 2) The address operands have the appropriate type, reflecting the
83 // lowering of a global address or constant pool using medlow or medany.
84 // 3) The offset value in the Global Address or Constant Pool is 0.
85 bool RISCVMergeBaseOffsetOpt::detectFoldable(MachineInstr &Hi,
86 MachineInstr *&Lo) {
87 if (Hi.getOpcode() != RISCV::LUI && Hi.getOpcode() != RISCV::AUIPC &&
88 Hi.getOpcode() != RISCV::PseudoMovAddr)
89 return false;
90
91 const MachineOperand &HiOp1 = Hi.getOperand(1);
92 unsigned ExpectedFlags =
93 Hi.getOpcode() == RISCV::AUIPC ? RISCVII::MO_PCREL_HI : RISCVII::MO_HI;
94 if (HiOp1.getTargetFlags() != ExpectedFlags)
95 return false;
96
97 if (!(HiOp1.isGlobal() || HiOp1.isCPI() || HiOp1.isBlockAddress()) ||
98 HiOp1.getOffset() != 0)
99 return false;
100
101 if (Hi.getOpcode() == RISCV::PseudoMovAddr) {
102 // Most of the code should handle it correctly without modification by
103 // setting Lo and Hi both point to PseudoMovAddr
104 Lo = &Hi;
105 } else {
106 Register HiDestReg = Hi.getOperand(0).getReg();
107 if (!MRI->hasOneUse(HiDestReg))
108 return false;
109
110 Lo = &*MRI->use_instr_begin(HiDestReg);
111 if (Lo->getOpcode() != RISCV::ADDI)
112 return false;
113 }
114
115 const MachineOperand &LoOp2 = Lo->getOperand(2);
116 if (Hi.getOpcode() == RISCV::LUI || Hi.getOpcode() == RISCV::PseudoMovAddr) {
117 if (LoOp2.getTargetFlags() != RISCVII::MO_LO ||
118 !(LoOp2.isGlobal() || LoOp2.isCPI() || LoOp2.isBlockAddress()) ||
119 LoOp2.getOffset() != 0)
120 return false;
121 } else {
122 assert(Hi.getOpcode() == RISCV::AUIPC);
123 if (LoOp2.getTargetFlags() != RISCVII::MO_PCREL_LO ||
124 LoOp2.getType() != MachineOperand::MO_MCSymbol)
125 return false;
126 }
127
128 if (HiOp1.isGlobal()) {
129 LLVM_DEBUG(dbgs() << " Found lowered global address: "
130 << *HiOp1.getGlobal() << "\n");
131 } else if (HiOp1.isBlockAddress()) {
132 LLVM_DEBUG(dbgs() << " Found lowered basic address: "
133 << *HiOp1.getBlockAddress() << "\n");
134 } else if (HiOp1.isCPI()) {
135 LLVM_DEBUG(dbgs() << " Found lowered constant pool: " << HiOp1.getIndex()
136 << "\n");
137 }
138
139 return true;
140 }
141
142 // Update the offset in Hi and Lo instructions.
143 // Delete the tail instruction and update all the uses to use the
144 // output from Lo.
foldOffset(MachineInstr & Hi,MachineInstr & Lo,MachineInstr & Tail,int64_t Offset)145 void RISCVMergeBaseOffsetOpt::foldOffset(MachineInstr &Hi, MachineInstr &Lo,
146 MachineInstr &Tail, int64_t Offset) {
147 assert(isInt<32>(Offset) && "Unexpected offset");
148 // Put the offset back in Hi and the Lo
149 Hi.getOperand(1).setOffset(Offset);
150 if (Hi.getOpcode() != RISCV::AUIPC)
151 Lo.getOperand(2).setOffset(Offset);
152 // Delete the tail instruction.
153 MRI->constrainRegClass(Lo.getOperand(0).getReg(),
154 MRI->getRegClass(Tail.getOperand(0).getReg()));
155 MRI->replaceRegWith(Tail.getOperand(0).getReg(), Lo.getOperand(0).getReg());
156 Tail.eraseFromParent();
157 LLVM_DEBUG(dbgs() << " Merged offset " << Offset << " into base.\n"
158 << " " << Hi << " " << Lo;);
159 }
160
161 // Detect patterns for large offsets that are passed into an ADD instruction.
162 // If the pattern is found, updates the offset in Hi and Lo instructions
163 // and deletes TailAdd and the instructions that produced the offset.
164 //
165 // Base address lowering is of the form:
166 // Hi: lui vreg1, %hi(s)
167 // Lo: addi vreg2, vreg1, %lo(s)
168 // / \
169 // / \
170 // / \
171 // / The large offset can be of two forms: \
172 // 1) Offset that has non zero bits in lower 2) Offset that has non zero
173 // 12 bits and upper 20 bits bits in upper 20 bits only
174 // OffseLUI: lui vreg3, 4
175 // OffsetTail: addi voff, vreg3, 188 OffsetTail: lui voff, 128
176 // \ /
177 // \ /
178 // \ /
179 // \ /
180 // TailAdd: add vreg4, vreg2, voff
foldLargeOffset(MachineInstr & Hi,MachineInstr & Lo,MachineInstr & TailAdd,Register GAReg)181 bool RISCVMergeBaseOffsetOpt::foldLargeOffset(MachineInstr &Hi,
182 MachineInstr &Lo,
183 MachineInstr &TailAdd,
184 Register GAReg) {
185 assert((TailAdd.getOpcode() == RISCV::ADD) && "Expected ADD instruction!");
186 Register Rs = TailAdd.getOperand(1).getReg();
187 Register Rt = TailAdd.getOperand(2).getReg();
188 Register Reg = Rs == GAReg ? Rt : Rs;
189
190 // Can't fold if the register has more than one use.
191 if (!Reg.isVirtual() || !MRI->hasOneUse(Reg))
192 return false;
193 // This can point to an ADDI(W) or a LUI:
194 MachineInstr &OffsetTail = *MRI->getVRegDef(Reg);
195 if (OffsetTail.getOpcode() == RISCV::ADDI ||
196 OffsetTail.getOpcode() == RISCV::ADDIW) {
197 // The offset value has non zero bits in both %hi and %lo parts.
198 // Detect an ADDI that feeds from a LUI instruction.
199 MachineOperand &AddiImmOp = OffsetTail.getOperand(2);
200 if (AddiImmOp.getTargetFlags() != RISCVII::MO_None)
201 return false;
202 Register AddiReg = OffsetTail.getOperand(1).getReg();
203 int64_t OffLo = AddiImmOp.getImm();
204
205 // Handle rs1 of ADDI is X0.
206 if (AddiReg == RISCV::X0) {
207 LLVM_DEBUG(dbgs() << " Offset Instrs: " << OffsetTail);
208 foldOffset(Hi, Lo, TailAdd, OffLo);
209 OffsetTail.eraseFromParent();
210 return true;
211 }
212
213 MachineInstr &OffsetLui = *MRI->getVRegDef(AddiReg);
214 MachineOperand &LuiImmOp = OffsetLui.getOperand(1);
215 if (OffsetLui.getOpcode() != RISCV::LUI ||
216 LuiImmOp.getTargetFlags() != RISCVII::MO_None ||
217 !MRI->hasOneUse(OffsetLui.getOperand(0).getReg()))
218 return false;
219 int64_t Offset = SignExtend64<32>(LuiImmOp.getImm() << 12);
220 Offset += OffLo;
221 // RV32 ignores the upper 32 bits. ADDIW sign extends the result.
222 if (!ST->is64Bit() || OffsetTail.getOpcode() == RISCV::ADDIW)
223 Offset = SignExtend64<32>(Offset);
224 // We can only fold simm32 offsets.
225 if (!isInt<32>(Offset))
226 return false;
227 LLVM_DEBUG(dbgs() << " Offset Instrs: " << OffsetTail
228 << " " << OffsetLui);
229 foldOffset(Hi, Lo, TailAdd, Offset);
230 OffsetTail.eraseFromParent();
231 OffsetLui.eraseFromParent();
232 return true;
233 } else if (OffsetTail.getOpcode() == RISCV::LUI) {
234 // The offset value has all zero bits in the lower 12 bits. Only LUI
235 // exists.
236 LLVM_DEBUG(dbgs() << " Offset Instr: " << OffsetTail);
237 int64_t Offset = SignExtend64<32>(OffsetTail.getOperand(1).getImm() << 12);
238 foldOffset(Hi, Lo, TailAdd, Offset);
239 OffsetTail.eraseFromParent();
240 return true;
241 }
242 return false;
243 }
244
245 // Detect patterns for offsets that are passed into a SHXADD instruction.
246 // The offset has 1, 2, or 3 trailing zeros and fits in simm13, simm14, simm15.
247 // The constant is created with addi voff, x0, C, and shXadd is used to
248 // fill insert the trailing zeros and do the addition.
249 // If the pattern is found, updates the offset in Hi and Lo instructions
250 // and deletes TailShXAdd and the instructions that produced the offset.
251 //
252 // Hi: lui vreg1, %hi(s)
253 // Lo: addi vreg2, vreg1, %lo(s)
254 // OffsetTail: addi voff, x0, C
255 // TailAdd: shXadd vreg4, voff, vreg2
foldShiftedOffset(MachineInstr & Hi,MachineInstr & Lo,MachineInstr & TailShXAdd,Register GAReg)256 bool RISCVMergeBaseOffsetOpt::foldShiftedOffset(MachineInstr &Hi,
257 MachineInstr &Lo,
258 MachineInstr &TailShXAdd,
259 Register GAReg) {
260 assert((TailShXAdd.getOpcode() == RISCV::SH1ADD ||
261 TailShXAdd.getOpcode() == RISCV::SH2ADD ||
262 TailShXAdd.getOpcode() == RISCV::SH3ADD) &&
263 "Expected SHXADD instruction!");
264
265 if (GAReg != TailShXAdd.getOperand(2).getReg())
266 return false;
267
268 // The first source is the shifted operand.
269 Register Rs1 = TailShXAdd.getOperand(1).getReg();
270
271 // Can't fold if the register has more than one use.
272 if (!Rs1.isVirtual() || !MRI->hasOneUse(Rs1))
273 return false;
274 // This can point to an ADDI X0, C.
275 MachineInstr &OffsetTail = *MRI->getVRegDef(Rs1);
276 if (OffsetTail.getOpcode() != RISCV::ADDI)
277 return false;
278 if (!OffsetTail.getOperand(1).isReg() ||
279 OffsetTail.getOperand(1).getReg() != RISCV::X0 ||
280 !OffsetTail.getOperand(2).isImm())
281 return false;
282
283 int64_t Offset = OffsetTail.getOperand(2).getImm();
284 assert(isInt<12>(Offset) && "Unexpected offset");
285
286 unsigned ShAmt;
287 switch (TailShXAdd.getOpcode()) {
288 default: llvm_unreachable("Unexpected opcode");
289 case RISCV::SH1ADD: ShAmt = 1; break;
290 case RISCV::SH2ADD: ShAmt = 2; break;
291 case RISCV::SH3ADD: ShAmt = 3; break;
292 }
293
294 Offset = (uint64_t)Offset << ShAmt;
295
296 LLVM_DEBUG(dbgs() << " Offset Instr: " << OffsetTail);
297 foldOffset(Hi, Lo, TailShXAdd, Offset);
298 OffsetTail.eraseFromParent();
299 return true;
300 }
301
detectAndFoldOffset(MachineInstr & Hi,MachineInstr & Lo)302 bool RISCVMergeBaseOffsetOpt::detectAndFoldOffset(MachineInstr &Hi,
303 MachineInstr &Lo) {
304 Register DestReg = Lo.getOperand(0).getReg();
305
306 // Look for arithmetic instructions we can get an offset from.
307 // We might be able to remove the arithmetic instructions by folding the
308 // offset into the LUI+ADDI.
309 if (!MRI->hasOneUse(DestReg))
310 return false;
311
312 // Lo has only one use.
313 MachineInstr &Tail = *MRI->use_instr_begin(DestReg);
314 switch (Tail.getOpcode()) {
315 default:
316 LLVM_DEBUG(dbgs() << "Don't know how to get offset from this instr:"
317 << Tail);
318 break;
319 case RISCV::ADDI: {
320 // Offset is simply an immediate operand.
321 int64_t Offset = Tail.getOperand(2).getImm();
322
323 // We might have two ADDIs in a row.
324 Register TailDestReg = Tail.getOperand(0).getReg();
325 if (MRI->hasOneUse(TailDestReg)) {
326 MachineInstr &TailTail = *MRI->use_instr_begin(TailDestReg);
327 if (TailTail.getOpcode() == RISCV::ADDI) {
328 Offset += TailTail.getOperand(2).getImm();
329 LLVM_DEBUG(dbgs() << " Offset Instrs: " << Tail << TailTail);
330 foldOffset(Hi, Lo, TailTail, Offset);
331 Tail.eraseFromParent();
332 return true;
333 }
334 }
335
336 LLVM_DEBUG(dbgs() << " Offset Instr: " << Tail);
337 foldOffset(Hi, Lo, Tail, Offset);
338 return true;
339 }
340 case RISCV::ADD:
341 // The offset is too large to fit in the immediate field of ADDI.
342 // This can be in two forms:
343 // 1) LUI hi_Offset followed by:
344 // ADDI lo_offset
345 // This happens in case the offset has non zero bits in
346 // both hi 20 and lo 12 bits.
347 // 2) LUI (offset20)
348 // This happens in case the lower 12 bits of the offset are zeros.
349 return foldLargeOffset(Hi, Lo, Tail, DestReg);
350 case RISCV::SH1ADD:
351 case RISCV::SH2ADD:
352 case RISCV::SH3ADD:
353 // The offset is too large to fit in the immediate field of ADDI.
354 // It may be encoded as (SH2ADD (ADDI X0, C), DestReg) or
355 // (SH3ADD (ADDI X0, C), DestReg).
356 return foldShiftedOffset(Hi, Lo, Tail, DestReg);
357 }
358
359 return false;
360 }
361
foldIntoMemoryOps(MachineInstr & Hi,MachineInstr & Lo)362 bool RISCVMergeBaseOffsetOpt::foldIntoMemoryOps(MachineInstr &Hi,
363 MachineInstr &Lo) {
364 Register DestReg = Lo.getOperand(0).getReg();
365
366 // If all the uses are memory ops with the same offset, we can transform:
367 //
368 // 1. (medlow pattern):
369 // Hi: lui vreg1, %hi(foo) ---> lui vreg1, %hi(foo+8)
370 // Lo: addi vreg2, vreg1, %lo(foo) ---> lw vreg3, lo(foo+8)(vreg1)
371 // Tail: lw vreg3, 8(vreg2)
372 //
373 // 2. (medany pattern):
374 // Hi: 1:auipc vreg1, %pcrel_hi(s) ---> auipc vreg1, %pcrel_hi(foo+8)
375 // Lo: addi vreg2, vreg1, %pcrel_lo(1b) ---> lw vreg3, %pcrel_lo(1b)(vreg1)
376 // Tail: lw vreg3, 8(vreg2)
377
378 std::optional<int64_t> CommonOffset;
379 DenseMap<const MachineInstr *, SmallVector<unsigned>>
380 InlineAsmMemoryOpIndexesMap;
381 for (const MachineInstr &UseMI : MRI->use_instructions(DestReg)) {
382 switch (UseMI.getOpcode()) {
383 default:
384 LLVM_DEBUG(dbgs() << "Not a load or store instruction: " << UseMI);
385 return false;
386 case RISCV::LB:
387 case RISCV::LH:
388 case RISCV::LW:
389 case RISCV::LBU:
390 case RISCV::LHU:
391 case RISCV::LWU:
392 case RISCV::LD:
393 case RISCV::FLH:
394 case RISCV::FLW:
395 case RISCV::FLD:
396 case RISCV::SB:
397 case RISCV::SH:
398 case RISCV::SW:
399 case RISCV::SD:
400 case RISCV::FSH:
401 case RISCV::FSW:
402 case RISCV::FSD: {
403 if (UseMI.getOperand(1).isFI())
404 return false;
405 // Register defined by Lo should not be the value register.
406 if (DestReg == UseMI.getOperand(0).getReg())
407 return false;
408 assert(DestReg == UseMI.getOperand(1).getReg() &&
409 "Expected base address use");
410 // All load/store instructions must use the same offset.
411 int64_t Offset = UseMI.getOperand(2).getImm();
412 if (CommonOffset && Offset != CommonOffset)
413 return false;
414 CommonOffset = Offset;
415 break;
416 }
417 case RISCV::INLINEASM:
418 case RISCV::INLINEASM_BR: {
419 SmallVector<unsigned> InlineAsmMemoryOpIndexes;
420 unsigned NumOps = 0;
421 for (unsigned I = InlineAsm::MIOp_FirstOperand;
422 I < UseMI.getNumOperands(); I += 1 + NumOps) {
423 const MachineOperand &FlagsMO = UseMI.getOperand(I);
424 // Should be an imm.
425 if (!FlagsMO.isImm())
426 continue;
427
428 const InlineAsm::Flag Flags(FlagsMO.getImm());
429 NumOps = Flags.getNumOperandRegisters();
430
431 // Memory constraints have two operands.
432 if (NumOps != 2 || !Flags.isMemKind()) {
433 // If the register is used by something other than a memory contraint,
434 // we should not fold.
435 for (unsigned J = 0; J < NumOps; ++J) {
436 const MachineOperand &MO = UseMI.getOperand(I + 1 + J);
437 if (MO.isReg() && MO.getReg() == DestReg)
438 return false;
439 }
440 continue;
441 }
442
443 // We can't do this for constraint A because AMO instructions don't have
444 // an immediate offset field.
445 if (Flags.getMemoryConstraintID() == InlineAsm::ConstraintCode::A)
446 return false;
447
448 const MachineOperand &AddrMO = UseMI.getOperand(I + 1);
449 if (!AddrMO.isReg() || AddrMO.getReg() != DestReg)
450 continue;
451
452 const MachineOperand &OffsetMO = UseMI.getOperand(I + 2);
453 if (!OffsetMO.isImm())
454 continue;
455
456 // All inline asm memory operands must use the same offset.
457 int64_t Offset = OffsetMO.getImm();
458 if (CommonOffset && Offset != CommonOffset)
459 return false;
460 CommonOffset = Offset;
461 InlineAsmMemoryOpIndexes.push_back(I + 1);
462 }
463 InlineAsmMemoryOpIndexesMap.insert(
464 std::make_pair(&UseMI, InlineAsmMemoryOpIndexes));
465 break;
466 }
467 }
468 }
469
470 // We found a common offset.
471 // Update the offsets in global address lowering.
472 // We may have already folded some arithmetic so we need to add to any
473 // existing offset.
474 int64_t NewOffset = Hi.getOperand(1).getOffset() + *CommonOffset;
475 // RV32 ignores the upper 32 bits.
476 if (!ST->is64Bit())
477 NewOffset = SignExtend64<32>(NewOffset);
478 // We can only fold simm32 offsets.
479 if (!isInt<32>(NewOffset))
480 return false;
481
482 Hi.getOperand(1).setOffset(NewOffset);
483 MachineOperand &ImmOp = Lo.getOperand(2);
484 // Expand PseudoMovAddr into LUI
485 if (Hi.getOpcode() == RISCV::PseudoMovAddr) {
486 auto *TII = ST->getInstrInfo();
487 Hi.setDesc(TII->get(RISCV::LUI));
488 Hi.removeOperand(2);
489 }
490
491 if (Hi.getOpcode() != RISCV::AUIPC)
492 ImmOp.setOffset(NewOffset);
493
494 // Update the immediate in the load/store instructions to add the offset.
495 for (MachineInstr &UseMI :
496 llvm::make_early_inc_range(MRI->use_instructions(DestReg))) {
497 if (UseMI.getOpcode() == RISCV::INLINEASM ||
498 UseMI.getOpcode() == RISCV::INLINEASM_BR) {
499 auto &InlineAsmMemoryOpIndexes = InlineAsmMemoryOpIndexesMap[&UseMI];
500 for (unsigned I : InlineAsmMemoryOpIndexes) {
501 MachineOperand &MO = UseMI.getOperand(I + 1);
502 switch (ImmOp.getType()) {
503 case MachineOperand::MO_GlobalAddress:
504 MO.ChangeToGA(ImmOp.getGlobal(), ImmOp.getOffset(),
505 ImmOp.getTargetFlags());
506 break;
507 case MachineOperand::MO_MCSymbol:
508 MO.ChangeToMCSymbol(ImmOp.getMCSymbol(), ImmOp.getTargetFlags());
509 MO.setOffset(ImmOp.getOffset());
510 break;
511 case MachineOperand::MO_BlockAddress:
512 MO.ChangeToBA(ImmOp.getBlockAddress(), ImmOp.getOffset(),
513 ImmOp.getTargetFlags());
514 break;
515 default:
516 report_fatal_error("unsupported machine operand type");
517 break;
518 }
519 }
520 } else {
521 UseMI.removeOperand(2);
522 UseMI.addOperand(ImmOp);
523 }
524 }
525
526 // Prevent Lo (originally PseudoMovAddr, which is also pointed by Hi) from
527 // being erased
528 if (&Lo == &Hi)
529 return true;
530
531 MRI->replaceRegWith(Lo.getOperand(0).getReg(), Hi.getOperand(0).getReg());
532 Lo.eraseFromParent();
533 return true;
534 }
535
runOnMachineFunction(MachineFunction & Fn)536 bool RISCVMergeBaseOffsetOpt::runOnMachineFunction(MachineFunction &Fn) {
537 if (skipFunction(Fn.getFunction()))
538 return false;
539
540 ST = &Fn.getSubtarget<RISCVSubtarget>();
541
542 bool MadeChange = false;
543 MRI = &Fn.getRegInfo();
544 for (MachineBasicBlock &MBB : Fn) {
545 LLVM_DEBUG(dbgs() << "MBB: " << MBB.getName() << "\n");
546 for (MachineInstr &Hi : MBB) {
547 MachineInstr *Lo = nullptr;
548 if (!detectFoldable(Hi, Lo))
549 continue;
550 MadeChange |= detectAndFoldOffset(Hi, *Lo);
551 MadeChange |= foldIntoMemoryOps(Hi, *Lo);
552 }
553 }
554
555 return MadeChange;
556 }
557
558 /// Returns an instance of the Merge Base Offset Optimization pass.
createRISCVMergeBaseOffsetOptPass()559 FunctionPass *llvm::createRISCVMergeBaseOffsetOptPass() {
560 return new RISCVMergeBaseOffsetOpt();
561 }
562