xref: /freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 //===- ARMConstantIslandPass.cpp - ARM constant islands -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that splits the constant pool up into 'islands'
10 // which are scattered through-out the function.  This is required due to the
11 // limited pc-relative displacements that ARM has.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "ARM.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMBasicBlockInfo.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMSubtarget.h"
20 #include "MCTargetDesc/ARMBaseInfo.h"
21 #include "MVETailPredUtils.h"
22 #include "Thumb2InstrInfo.h"
23 #include "Utils/ARMBaseInfo.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SmallSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/ADT/StringRef.h"
30 #include "llvm/CodeGen/LivePhysRegs.h"
31 #include "llvm/CodeGen/MachineBasicBlock.h"
32 #include "llvm/CodeGen/MachineConstantPool.h"
33 #include "llvm/CodeGen/MachineDominators.h"
34 #include "llvm/CodeGen/MachineFunction.h"
35 #include "llvm/CodeGen/MachineFunctionPass.h"
36 #include "llvm/CodeGen/MachineInstr.h"
37 #include "llvm/CodeGen/MachineJumpTableInfo.h"
38 #include "llvm/CodeGen/MachineOperand.h"
39 #include "llvm/CodeGen/MachineRegisterInfo.h"
40 #include "llvm/Config/llvm-config.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/MC/MCInstrDesc.h"
44 #include "llvm/Pass.h"
45 #include "llvm/Support/CommandLine.h"
46 #include "llvm/Support/Compiler.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Support/ErrorHandling.h"
49 #include "llvm/Support/Format.h"
50 #include "llvm/Support/raw_ostream.h"
51 #include <algorithm>
52 #include <cassert>
53 #include <cstdint>
54 #include <iterator>
55 #include <utility>
56 #include <vector>
57 
58 using namespace llvm;
59 
60 #define DEBUG_TYPE "arm-cp-islands"
61 
62 #define ARM_CP_ISLANDS_OPT_NAME \
63   "ARM constant island placement and branch shortening pass"
64 STATISTIC(NumCPEs,       "Number of constpool entries");
65 STATISTIC(NumSplit,      "Number of uncond branches inserted");
66 STATISTIC(NumCBrFixed,   "Number of cond branches fixed");
67 STATISTIC(NumUBrFixed,   "Number of uncond branches fixed");
68 STATISTIC(NumTBs,        "Number of table branches generated");
69 STATISTIC(NumT2CPShrunk, "Number of Thumb2 constantpool instructions shrunk");
70 STATISTIC(NumT2BrShrunk, "Number of Thumb2 immediate branches shrunk");
71 STATISTIC(NumCBZ,        "Number of CBZ / CBNZ formed");
72 STATISTIC(NumJTMoved,    "Number of jump table destination blocks moved");
73 STATISTIC(NumJTInserted, "Number of jump table intermediate blocks inserted");
74 STATISTIC(NumLEInserted, "Number of LE backwards branches inserted");
75 
76 static cl::opt<bool>
77 AdjustJumpTableBlocks("arm-adjust-jump-tables", cl::Hidden, cl::init(true),
78           cl::desc("Adjust basic block layout to better use TB[BH]"));
79 
80 static cl::opt<unsigned>
81 CPMaxIteration("arm-constant-island-max-iteration", cl::Hidden, cl::init(30),
82           cl::desc("The max number of iteration for converge"));
83 
84 static cl::opt<bool> SynthesizeThumb1TBB(
85     "arm-synthesize-thumb-1-tbb", cl::Hidden, cl::init(true),
86     cl::desc("Use compressed jump tables in Thumb-1 by synthesizing an "
87              "equivalent to the TBB/TBH instructions"));
88 
89 namespace {
90 
91   /// ARMConstantIslands - Due to limited PC-relative displacements, ARM
92   /// requires constant pool entries to be scattered among the instructions
93   /// inside a function.  To do this, it completely ignores the normal LLVM
94   /// constant pool; instead, it places constants wherever it feels like with
95   /// special instructions.
96   ///
97   /// The terminology used in this pass includes:
98   ///   Islands - Clumps of constants placed in the function.
99   ///   Water   - Potential places where an island could be formed.
100   ///   CPE     - A constant pool entry that has been placed somewhere, which
101   ///             tracks a list of users.
102   class ARMConstantIslands : public MachineFunctionPass {
103     std::unique_ptr<ARMBasicBlockUtils> BBUtils = nullptr;
104 
105     /// WaterList - A sorted list of basic blocks where islands could be placed
106     /// (i.e. blocks that don't fall through to the following block, due
107     /// to a return, unreachable, or unconditional branch).
108     std::vector<MachineBasicBlock*> WaterList;
109 
110     /// NewWaterList - The subset of WaterList that was created since the
111     /// previous iteration by inserting unconditional branches.
112     SmallSet<MachineBasicBlock*, 4> NewWaterList;
113 
114     using water_iterator = std::vector<MachineBasicBlock *>::iterator;
115 
116     /// CPUser - One user of a constant pool, keeping the machine instruction
117     /// pointer, the constant pool being referenced, and the max displacement
118     /// allowed from the instruction to the CP.  The HighWaterMark records the
119     /// highest basic block where a new CPEntry can be placed.  To ensure this
120     /// pass terminates, the CP entries are initially placed at the end of the
121     /// function and then move monotonically to lower addresses.  The
122     /// exception to this rule is when the current CP entry for a particular
123     /// CPUser is out of range, but there is another CP entry for the same
124     /// constant value in range.  We want to use the existing in-range CP
125     /// entry, but if it later moves out of range, the search for new water
126     /// should resume where it left off.  The HighWaterMark is used to record
127     /// that point.
128     struct CPUser {
129       MachineInstr *MI;
130       MachineInstr *CPEMI;
131       MachineBasicBlock *HighWaterMark;
132       unsigned MaxDisp;
133       bool NegOk;
134       bool IsSoImm;
135       bool KnownAlignment = false;
136 
CPUser__anon34c212310111::ARMConstantIslands::CPUser137       CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp,
138              bool neg, bool soimm)
139         : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp), NegOk(neg), IsSoImm(soimm) {
140         HighWaterMark = CPEMI->getParent();
141       }
142 
143       /// getMaxDisp - Returns the maximum displacement supported by MI.
144       /// Correct for unknown alignment.
145       /// Conservatively subtract 2 bytes to handle weird alignment effects.
getMaxDisp__anon34c212310111::ARMConstantIslands::CPUser146       unsigned getMaxDisp() const {
147         return (KnownAlignment ? MaxDisp : MaxDisp - 2) - 2;
148       }
149     };
150 
151     /// CPUsers - Keep track of all of the machine instructions that use various
152     /// constant pools and their max displacement.
153     std::vector<CPUser> CPUsers;
154 
155     /// CPEntry - One per constant pool entry, keeping the machine instruction
156     /// pointer, the constpool index, and the number of CPUser's which
157     /// reference this entry.
158     struct CPEntry {
159       MachineInstr *CPEMI;
160       unsigned CPI;
161       unsigned RefCount;
162 
CPEntry__anon34c212310111::ARMConstantIslands::CPEntry163       CPEntry(MachineInstr *cpemi, unsigned cpi, unsigned rc = 0)
164         : CPEMI(cpemi), CPI(cpi), RefCount(rc) {}
165     };
166 
167     /// CPEntries - Keep track of all of the constant pool entry machine
168     /// instructions. For each original constpool index (i.e. those that existed
169     /// upon entry to this pass), it keeps a vector of entries.  Original
170     /// elements are cloned as we go along; the clones are put in the vector of
171     /// the original element, but have distinct CPIs.
172     ///
173     /// The first half of CPEntries contains generic constants, the second half
174     /// contains jump tables. Use getCombinedIndex on a generic CPEMI to look up
175     /// which vector it will be in here.
176     std::vector<std::vector<CPEntry>> CPEntries;
177 
178     /// Maps a JT index to the offset in CPEntries containing copies of that
179     /// table. The equivalent map for a CONSTPOOL_ENTRY is the identity.
180     DenseMap<int, int> JumpTableEntryIndices;
181 
182     /// Maps a JT index to the LEA that actually uses the index to calculate its
183     /// base address.
184     DenseMap<int, int> JumpTableUserIndices;
185 
186     /// ImmBranch - One per immediate branch, keeping the machine instruction
187     /// pointer, conditional or unconditional, the max displacement,
188     /// and (if isCond is true) the corresponding unconditional branch
189     /// opcode.
190     struct ImmBranch {
191       MachineInstr *MI;
192       unsigned MaxDisp : 31;
193       LLVM_PREFERRED_TYPE(bool)
194       unsigned isCond : 1;
195       unsigned UncondBr;
196 
ImmBranch__anon34c212310111::ARMConstantIslands::ImmBranch197       ImmBranch(MachineInstr *mi, unsigned maxdisp, bool cond, unsigned ubr)
198         : MI(mi), MaxDisp(maxdisp), isCond(cond), UncondBr(ubr) {}
199     };
200 
201     /// ImmBranches - Keep track of all the immediate branch instructions.
202     std::vector<ImmBranch> ImmBranches;
203 
204     /// PushPopMIs - Keep track of all the Thumb push / pop instructions.
205     SmallVector<MachineInstr*, 4> PushPopMIs;
206 
207     /// T2JumpTables - Keep track of all the Thumb2 jumptable instructions.
208     SmallVector<MachineInstr*, 4> T2JumpTables;
209 
210     MachineFunction *MF;
211     MachineConstantPool *MCP;
212     const ARMBaseInstrInfo *TII;
213     const ARMSubtarget *STI;
214     ARMFunctionInfo *AFI;
215     MachineDominatorTree *DT = nullptr;
216     bool isThumb;
217     bool isThumb1;
218     bool isThumb2;
219     bool isPositionIndependentOrROPI;
220 
221   public:
222     static char ID;
223 
ARMConstantIslands()224     ARMConstantIslands() : MachineFunctionPass(ID) {}
225 
226     bool runOnMachineFunction(MachineFunction &MF) override;
227 
getAnalysisUsage(AnalysisUsage & AU) const228     void getAnalysisUsage(AnalysisUsage &AU) const override {
229       AU.addRequired<MachineDominatorTreeWrapperPass>();
230       MachineFunctionPass::getAnalysisUsage(AU);
231     }
232 
getRequiredProperties() const233     MachineFunctionProperties getRequiredProperties() const override {
234       return MachineFunctionProperties().setNoVRegs();
235     }
236 
getPassName() const237     StringRef getPassName() const override {
238       return ARM_CP_ISLANDS_OPT_NAME;
239     }
240 
241   private:
242     void doInitialConstPlacement(std::vector<MachineInstr *> &CPEMIs);
243     void doInitialJumpTablePlacement(std::vector<MachineInstr *> &CPEMIs);
244     bool BBHasFallthrough(MachineBasicBlock *MBB);
245     CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
246     Align getCPEAlign(const MachineInstr *CPEMI);
247     void scanFunctionJumpTables();
248     void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
249     MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
250     void updateForInsertedWaterBlock(MachineBasicBlock *NewBB);
251     bool decrementCPEReferenceCount(unsigned CPI, MachineInstr* CPEMI);
252     unsigned getCombinedIndex(const MachineInstr *CPEMI);
253     int findInRangeCPEntry(CPUser& U, unsigned UserOffset);
254     bool findAvailableWater(CPUser&U, unsigned UserOffset,
255                             water_iterator &WaterIter, bool CloserWater);
256     void createNewWater(unsigned CPUserIndex, unsigned UserOffset,
257                         MachineBasicBlock *&NewMBB);
258     bool handleConstantPoolUser(unsigned CPUserIndex, bool CloserWater);
259     void removeDeadCPEMI(MachineInstr *CPEMI);
260     bool removeUnusedCPEntries();
261     bool isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
262                           MachineInstr *CPEMI, unsigned Disp, bool NegOk,
263                           bool DoDump = false);
264     bool isWaterInRange(unsigned UserOffset, MachineBasicBlock *Water,
265                         CPUser &U, unsigned &Growth);
266     bool fixupImmediateBr(ImmBranch &Br);
267     bool fixupConditionalBr(ImmBranch &Br);
268     bool fixupUnconditionalBr(ImmBranch &Br);
269     bool optimizeThumb2Instructions();
270     bool optimizeThumb2Branches();
271     bool reorderThumb2JumpTables();
272     bool preserveBaseRegister(MachineInstr *JumpMI, MachineInstr *LEAMI,
273                               unsigned &DeadSize, bool &CanDeleteLEA,
274                               bool &BaseRegKill);
275     bool optimizeThumb2JumpTables();
276     MachineBasicBlock *adjustJTTargetBlockForward(unsigned JTI,
277                                                   MachineBasicBlock *BB,
278                                                   MachineBasicBlock *JTBB);
279 
280     unsigned getUserOffset(CPUser&) const;
281     void dumpBBs();
282     void verify();
283 
284     bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
285                          unsigned Disp, bool NegativeOK, bool IsSoImm = false);
isOffsetInRange(unsigned UserOffset,unsigned TrialOffset,const CPUser & U)286     bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
287                          const CPUser &U) {
288       return isOffsetInRange(UserOffset, TrialOffset,
289                              U.getMaxDisp(), U.NegOk, U.IsSoImm);
290     }
291   };
292 
293 } // end anonymous namespace
294 
295 char ARMConstantIslands::ID = 0;
296 
297 /// verify - check BBOffsets, BBSizes, alignment of islands
verify()298 void ARMConstantIslands::verify() {
299 #ifndef NDEBUG
300   BBInfoVector &BBInfo = BBUtils->getBBInfo();
301   assert(is_sorted(*MF, [&BBInfo](const MachineBasicBlock &LHS,
302                                   const MachineBasicBlock &RHS) {
303     return BBInfo[LHS.getNumber()].postOffset() <
304            BBInfo[RHS.getNumber()].postOffset();
305   }));
306   LLVM_DEBUG(dbgs() << "Verifying " << CPUsers.size() << " CP users.\n");
307   for (CPUser &U : CPUsers) {
308     unsigned UserOffset = getUserOffset(U);
309     // Verify offset using the real max displacement without the safety
310     // adjustment.
311     if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, U.getMaxDisp()+2, U.NegOk,
312                          /* DoDump = */ true)) {
313       LLVM_DEBUG(dbgs() << "OK\n");
314       continue;
315     }
316     LLVM_DEBUG(dbgs() << "Out of range.\n");
317     dumpBBs();
318     LLVM_DEBUG(MF->dump());
319     llvm_unreachable("Constant pool entry out of range!");
320   }
321 #endif
322 }
323 
324 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
325 /// print block size and offset information - debugging
dumpBBs()326 LLVM_DUMP_METHOD void ARMConstantIslands::dumpBBs() {
327   LLVM_DEBUG({
328     BBInfoVector &BBInfo = BBUtils->getBBInfo();
329     for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
330       const BasicBlockInfo &BBI = BBInfo[J];
331       dbgs() << format("%08x %bb.%u\t", BBI.Offset, J)
332              << " kb=" << unsigned(BBI.KnownBits)
333              << " ua=" << unsigned(BBI.Unalign) << " pa=" << Log2(BBI.PostAlign)
334              << format(" size=%#x\n", BBInfo[J].Size);
335     }
336   });
337 }
338 #endif
339 
340 // Align blocks where the previous block does not fall through. This may add
341 // extra NOP's but they will not be executed. It uses the PrefLoopAlignment as a
342 // measure of how much to align, and only runs at CodeGenOptLevel::Aggressive.
AlignBlocks(MachineFunction * MF,const ARMSubtarget * STI)343 static bool AlignBlocks(MachineFunction *MF, const ARMSubtarget *STI) {
344   if (MF->getTarget().getOptLevel() != CodeGenOptLevel::Aggressive ||
345       MF->getFunction().hasOptSize())
346     return false;
347 
348   auto *TLI = STI->getTargetLowering();
349   const Align Alignment = TLI->getPrefLoopAlignment();
350   if (Alignment < 4)
351     return false;
352 
353   bool Changed = false;
354   bool PrevCanFallthough = true;
355   for (auto &MBB : *MF) {
356     if (!PrevCanFallthough) {
357       Changed = true;
358       MBB.setAlignment(Alignment);
359     }
360 
361     PrevCanFallthough = MBB.canFallThrough();
362 
363     // For LOB's, the ARMLowOverheadLoops pass may remove the unconditional
364     // branch later in the pipeline.
365     if (STI->hasLOB()) {
366       for (const auto &MI : reverse(MBB.terminators())) {
367         if (MI.getOpcode() == ARM::t2B &&
368             MI.getOperand(0).getMBB() == MBB.getNextNode())
369           continue;
370         if (isLoopStart(MI) || MI.getOpcode() == ARM::t2LoopEnd ||
371             MI.getOpcode() == ARM::t2LoopEndDec) {
372           PrevCanFallthough = true;
373           break;
374         }
375         // Any other terminator - nothing to do
376         break;
377       }
378     }
379   }
380 
381   return Changed;
382 }
383 
runOnMachineFunction(MachineFunction & mf)384 bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
385   MF = &mf;
386   MCP = mf.getConstantPool();
387   BBUtils = std::make_unique<ARMBasicBlockUtils>(mf);
388 
389   LLVM_DEBUG(dbgs() << "***** ARMConstantIslands: "
390                     << MCP->getConstants().size() << " CP entries, aligned to "
391                     << MCP->getConstantPoolAlign().value() << " bytes *****\n");
392 
393   STI = &MF->getSubtarget<ARMSubtarget>();
394   TII = STI->getInstrInfo();
395   isPositionIndependentOrROPI =
396       STI->getTargetLowering()->isPositionIndependent() || STI->isROPI();
397   AFI = MF->getInfo<ARMFunctionInfo>();
398   DT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
399 
400   isThumb = AFI->isThumbFunction();
401   isThumb1 = AFI->isThumb1OnlyFunction();
402   isThumb2 = AFI->isThumb2Function();
403 
404   bool GenerateTBB = isThumb2 || (isThumb1 && SynthesizeThumb1TBB);
405   // TBB generation code in this constant island pass has not been adapted to
406   // deal with speculation barriers.
407   if (STI->hardenSlsRetBr())
408     GenerateTBB = false;
409 
410   // Renumber all of the machine basic blocks in the function, guaranteeing that
411   // the numbers agree with the position of the block in the function.
412   MF->RenumberBlocks();
413   DT->updateBlockNumbers();
414 
415   // Try to reorder and otherwise adjust the block layout to make good use
416   // of the TB[BH] instructions.
417   bool MadeChange = false;
418   if (GenerateTBB && AdjustJumpTableBlocks) {
419     scanFunctionJumpTables();
420     MadeChange |= reorderThumb2JumpTables();
421     // Data is out of date, so clear it. It'll be re-computed later.
422     T2JumpTables.clear();
423     // Blocks may have shifted around. Keep the numbering up to date.
424     MF->RenumberBlocks();
425     DT->updateBlockNumbers();
426   }
427 
428   // Align any non-fallthrough blocks
429   MadeChange |= AlignBlocks(MF, STI);
430 
431   // Perform the initial placement of the constant pool entries.  To start with,
432   // we put them all at the end of the function.
433   std::vector<MachineInstr*> CPEMIs;
434   if (!MCP->isEmpty())
435     doInitialConstPlacement(CPEMIs);
436 
437   if (MF->getJumpTableInfo())
438     doInitialJumpTablePlacement(CPEMIs);
439 
440   /// The next UID to take is the first unused one.
441   AFI->initPICLabelUId(CPEMIs.size());
442 
443   // Do the initial scan of the function, building up information about the
444   // sizes of each block, the location of all the water, and finding all of the
445   // constant pool users.
446   initializeFunctionInfo(CPEMIs);
447   CPEMIs.clear();
448   LLVM_DEBUG(dumpBBs());
449 
450   // Functions with jump tables need an alignment of 4 because they use the ADR
451   // instruction, which aligns the PC to 4 bytes before adding an offset.
452   if (!T2JumpTables.empty())
453     MF->ensureAlignment(Align(4));
454 
455   /// Remove dead constant pool entries.
456   MadeChange |= removeUnusedCPEntries();
457 
458   // Iteratively place constant pool entries and fix up branches until there
459   // is no change.
460   unsigned NoCPIters = 0, NoBRIters = 0;
461   while (true) {
462     LLVM_DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
463     bool CPChange = false;
464     for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
465       // For most inputs, it converges in no more than 5 iterations.
466       // If it doesn't end in 10, the input may have huge BB or many CPEs.
467       // In this case, we will try different heuristics.
468       CPChange |= handleConstantPoolUser(i, NoCPIters >= CPMaxIteration / 2);
469     if (CPChange && ++NoCPIters > CPMaxIteration)
470       report_fatal_error("Constant Island pass failed to converge!");
471     LLVM_DEBUG(dumpBBs());
472 
473     // Clear NewWaterList now.  If we split a block for branches, it should
474     // appear as "new water" for the next iteration of constant pool placement.
475     NewWaterList.clear();
476 
477     LLVM_DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n');
478     bool BRChange = false;
479     for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i) {
480       // Note: fixupImmediateBr can append to ImmBranches.
481       BRChange |= fixupImmediateBr(ImmBranches[i]);
482     }
483     if (BRChange && ++NoBRIters > 30)
484       report_fatal_error("Branch Fix Up pass failed to converge!");
485     LLVM_DEBUG(dumpBBs());
486 
487     if (!CPChange && !BRChange)
488       break;
489     MadeChange = true;
490   }
491 
492   // Shrink 32-bit Thumb2 load and store instructions.
493   if (isThumb2 && !STI->prefers32BitThumb())
494     MadeChange |= optimizeThumb2Instructions();
495 
496   // Shrink 32-bit branch instructions.
497   if (isThumb && STI->hasV8MBaselineOps())
498     MadeChange |= optimizeThumb2Branches();
499 
500   // Optimize jump tables using TBB / TBH.
501   if (GenerateTBB && !STI->genExecuteOnly())
502     MadeChange |= optimizeThumb2JumpTables();
503 
504   // After a while, this might be made debug-only, but it is not expensive.
505   verify();
506 
507   // Save the mapping between original and cloned constpool entries.
508   for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
509     for (unsigned j = 0, je = CPEntries[i].size(); j != je; ++j) {
510       const CPEntry & CPE = CPEntries[i][j];
511       if (CPE.CPEMI && CPE.CPEMI->getOperand(1).isCPI())
512         AFI->recordCPEClone(i, CPE.CPI);
513     }
514   }
515 
516   LLVM_DEBUG(dbgs() << '\n'; dumpBBs());
517 
518   BBUtils->clear();
519   WaterList.clear();
520   CPUsers.clear();
521   CPEntries.clear();
522   JumpTableEntryIndices.clear();
523   JumpTableUserIndices.clear();
524   ImmBranches.clear();
525   PushPopMIs.clear();
526   T2JumpTables.clear();
527 
528   return MadeChange;
529 }
530 
531 /// Perform the initial placement of the regular constant pool entries.
532 /// To start with, we put them all at the end of the function.
533 void
doInitialConstPlacement(std::vector<MachineInstr * > & CPEMIs)534 ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs) {
535   // Create the basic block to hold the CPE's.
536   MachineBasicBlock *BB = MF->CreateMachineBasicBlock();
537   MF->push_back(BB);
538 
539   // MachineConstantPool measures alignment in bytes.
540   const Align MaxAlign = MCP->getConstantPoolAlign();
541   const unsigned MaxLogAlign = Log2(MaxAlign);
542 
543   // Mark the basic block as required by the const-pool.
544   BB->setAlignment(MaxAlign);
545 
546   // The function needs to be as aligned as the basic blocks. The linker may
547   // move functions around based on their alignment.
548   // Special case: halfword literals still need word alignment on the function.
549   Align FuncAlign = MaxAlign;
550   if (MaxAlign == 2)
551     FuncAlign = Align(4);
552   MF->ensureAlignment(FuncAlign);
553 
554   // Order the entries in BB by descending alignment.  That ensures correct
555   // alignment of all entries as long as BB is sufficiently aligned.  Keep
556   // track of the insertion point for each alignment.  We are going to bucket
557   // sort the entries as they are created.
558   SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxLogAlign + 1,
559                                                        BB->end());
560 
561   // Add all of the constants from the constant pool to the end block, use an
562   // identity mapping of CPI's to CPE's.
563   const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants();
564 
565   const DataLayout &TD = MF->getDataLayout();
566   for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
567     unsigned Size = CPs[i].getSizeInBytes(TD);
568     Align Alignment = CPs[i].getAlign();
569     // Verify that all constant pool entries are a multiple of their alignment.
570     // If not, we would have to pad them out so that instructions stay aligned.
571     assert(isAligned(Alignment, Size) && "CP Entry not multiple of 4 bytes!");
572 
573     // Insert CONSTPOOL_ENTRY before entries with a smaller alignment.
574     unsigned LogAlign = Log2(Alignment);
575     MachineBasicBlock::iterator InsAt = InsPoint[LogAlign];
576     MachineInstr *CPEMI =
577       BuildMI(*BB, InsAt, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
578         .addImm(i).addConstantPoolIndex(i).addImm(Size);
579     CPEMIs.push_back(CPEMI);
580 
581     // Ensure that future entries with higher alignment get inserted before
582     // CPEMI. This is bucket sort with iterators.
583     for (unsigned a = LogAlign + 1; a <= MaxLogAlign; ++a)
584       if (InsPoint[a] == InsAt)
585         InsPoint[a] = CPEMI;
586 
587     // Add a new CPEntry, but no corresponding CPUser yet.
588     CPEntries.emplace_back(1, CPEntry(CPEMI, i));
589     ++NumCPEs;
590     LLVM_DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = "
591                       << Size << ", align = " << Alignment.value() << '\n');
592   }
593   LLVM_DEBUG(BB->dump());
594 }
595 
596 /// Do initial placement of the jump tables. Because Thumb2's TBB and TBH
597 /// instructions can be made more efficient if the jump table immediately
598 /// follows the instruction, it's best to place them immediately next to their
599 /// jumps to begin with. In almost all cases they'll never be moved from that
600 /// position.
doInitialJumpTablePlacement(std::vector<MachineInstr * > & CPEMIs)601 void ARMConstantIslands::doInitialJumpTablePlacement(
602     std::vector<MachineInstr *> &CPEMIs) {
603   unsigned i = CPEntries.size();
604   auto MJTI = MF->getJumpTableInfo();
605   const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
606 
607   // Only inline jump tables are placed in the function.
608   if (MJTI->getEntryKind() != MachineJumpTableInfo::EK_Inline)
609     return;
610 
611   MachineBasicBlock *LastCorrectlyNumberedBB = nullptr;
612   for (MachineBasicBlock &MBB : *MF) {
613     auto MI = MBB.getLastNonDebugInstr();
614     // Look past potential SpeculationBarriers at end of BB.
615     while (MI != MBB.end() &&
616            (isSpeculationBarrierEndBBOpcode(MI->getOpcode()) ||
617             MI->isDebugInstr()))
618       --MI;
619 
620     if (MI == MBB.end())
621       continue;
622 
623     unsigned JTOpcode;
624     switch (MI->getOpcode()) {
625     default:
626       continue;
627     case ARM::BR_JTadd:
628     case ARM::BR_JTr:
629     case ARM::tBR_JTr:
630     case ARM::BR_JTm_i12:
631     case ARM::BR_JTm_rs:
632       // These instructions are emitted only in ARM or Thumb1 modes which do not
633       // support PACBTI. Hence we don't add BTI instructions in the destination
634       // blocks.
635       assert(!MF->getInfo<ARMFunctionInfo>()->branchTargetEnforcement() &&
636              "Branch protection must not be enabled for Arm or Thumb1 modes");
637       JTOpcode = ARM::JUMPTABLE_ADDRS;
638       break;
639     case ARM::t2BR_JT:
640       JTOpcode = ARM::JUMPTABLE_INSTS;
641       break;
642     case ARM::tTBB_JT:
643     case ARM::t2TBB_JT:
644       JTOpcode = ARM::JUMPTABLE_TBB;
645       break;
646     case ARM::tTBH_JT:
647     case ARM::t2TBH_JT:
648       JTOpcode = ARM::JUMPTABLE_TBH;
649       break;
650     }
651 
652     unsigned NumOps = MI->getDesc().getNumOperands();
653     MachineOperand JTOp =
654       MI->getOperand(NumOps - (MI->isPredicable() ? 2 : 1));
655     unsigned JTI = JTOp.getIndex();
656     unsigned Size = JT[JTI].MBBs.size() * sizeof(uint32_t);
657     MachineBasicBlock *JumpTableBB = MF->CreateMachineBasicBlock();
658     MF->insert(std::next(MachineFunction::iterator(MBB)), JumpTableBB);
659     MachineInstr *CPEMI = BuildMI(*JumpTableBB, JumpTableBB->begin(),
660                                   DebugLoc(), TII->get(JTOpcode))
661                               .addImm(i++)
662                               .addJumpTableIndex(JTI)
663                               .addImm(Size);
664     CPEMIs.push_back(CPEMI);
665     CPEntries.emplace_back(1, CPEntry(CPEMI, JTI));
666     JumpTableEntryIndices.insert(std::make_pair(JTI, CPEntries.size() - 1));
667     if (!LastCorrectlyNumberedBB)
668       LastCorrectlyNumberedBB = &MBB;
669   }
670 
671   // If we did anything then we need to renumber the subsequent blocks.
672   if (LastCorrectlyNumberedBB) {
673     MF->RenumberBlocks(LastCorrectlyNumberedBB);
674     DT->updateBlockNumbers();
675   }
676 }
677 
678 /// BBHasFallthrough - Return true if the specified basic block can fallthrough
679 /// into the block immediately after it.
BBHasFallthrough(MachineBasicBlock * MBB)680 bool ARMConstantIslands::BBHasFallthrough(MachineBasicBlock *MBB) {
681   // Get the next machine basic block in the function.
682   MachineFunction::iterator MBBI = MBB->getIterator();
683   // Can't fall off end of function.
684   if (std::next(MBBI) == MBB->getParent()->end())
685     return false;
686 
687   MachineBasicBlock *NextBB = &*std::next(MBBI);
688   if (!MBB->isSuccessor(NextBB))
689     return false;
690 
691   // Try to analyze the end of the block. A potential fallthrough may already
692   // have an unconditional branch for whatever reason.
693   MachineBasicBlock *TBB, *FBB;
694   SmallVector<MachineOperand, 4> Cond;
695   bool TooDifficult = TII->analyzeBranch(*MBB, TBB, FBB, Cond);
696   return TooDifficult || FBB == nullptr;
697 }
698 
699 /// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI,
700 /// look up the corresponding CPEntry.
701 ARMConstantIslands::CPEntry *
findConstPoolEntry(unsigned CPI,const MachineInstr * CPEMI)702 ARMConstantIslands::findConstPoolEntry(unsigned CPI,
703                                        const MachineInstr *CPEMI) {
704   std::vector<CPEntry> &CPEs = CPEntries[CPI];
705   // Number of entries per constpool index should be small, just do a
706   // linear search.
707   for (CPEntry &CPE : CPEs)
708     if (CPE.CPEMI == CPEMI)
709       return &CPE;
710   return nullptr;
711 }
712 
713 /// getCPEAlign - Returns the required alignment of the constant pool entry
714 /// represented by CPEMI.
getCPEAlign(const MachineInstr * CPEMI)715 Align ARMConstantIslands::getCPEAlign(const MachineInstr *CPEMI) {
716   switch (CPEMI->getOpcode()) {
717   case ARM::CONSTPOOL_ENTRY:
718     break;
719   case ARM::JUMPTABLE_TBB:
720     return isThumb1 ? Align(4) : Align(1);
721   case ARM::JUMPTABLE_TBH:
722     return isThumb1 ? Align(4) : Align(2);
723   case ARM::JUMPTABLE_INSTS:
724     return Align(2);
725   case ARM::JUMPTABLE_ADDRS:
726     return Align(4);
727   default:
728     llvm_unreachable("unknown constpool entry kind");
729   }
730 
731   unsigned CPI = getCombinedIndex(CPEMI);
732   assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
733   return MCP->getConstants()[CPI].getAlign();
734 }
735 
736 /// scanFunctionJumpTables - Do a scan of the function, building up
737 /// information about the sizes of each block and the locations of all
738 /// the jump tables.
scanFunctionJumpTables()739 void ARMConstantIslands::scanFunctionJumpTables() {
740   for (MachineBasicBlock &MBB : *MF) {
741     for (MachineInstr &I : MBB)
742       if (I.isBranch() &&
743           (I.getOpcode() == ARM::t2BR_JT || I.getOpcode() == ARM::tBR_JTr))
744         T2JumpTables.push_back(&I);
745   }
746 }
747 
748 /// initializeFunctionInfo - Do the initial scan of the function, building up
749 /// information about the sizes of each block, the location of all the water,
750 /// and finding all of the constant pool users.
751 void ARMConstantIslands::
initializeFunctionInfo(const std::vector<MachineInstr * > & CPEMIs)752 initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
753 
754   BBUtils->computeAllBlockSizes();
755   BBInfoVector &BBInfo = BBUtils->getBBInfo();
756   // The known bits of the entry block offset are determined by the function
757   // alignment.
758   BBInfo.front().KnownBits = Log2(MF->getAlignment());
759 
760   // Compute block offsets and known bits.
761   BBUtils->adjustBBOffsetsAfter(&MF->front());
762 
763   // We only care about jump table instructions when jump tables are inline.
764   MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
765   bool InlineJumpTables =
766       MJTI && MJTI->getEntryKind() == MachineJumpTableInfo::EK_Inline;
767 
768   // Now go back through the instructions and build up our data structures.
769   for (MachineBasicBlock &MBB : *MF) {
770     // If this block doesn't fall through into the next MBB, then this is
771     // 'water' that a constant pool island could be placed.
772     if (!BBHasFallthrough(&MBB))
773       WaterList.push_back(&MBB);
774 
775     for (MachineInstr &I : MBB) {
776       if (I.isDebugInstr())
777         continue;
778 
779       unsigned Opc = I.getOpcode();
780       if (I.isBranch()) {
781         bool isCond = false;
782         unsigned Bits = 0;
783         unsigned Scale = 1;
784         int UOpc = Opc;
785         switch (Opc) {
786         default:
787           continue;  // Ignore other JT branches
788         case ARM::t2BR_JT:
789         case ARM::tBR_JTr:
790           if (InlineJumpTables)
791             T2JumpTables.push_back(&I);
792           continue;   // Does not get an entry in ImmBranches
793         case ARM::Bcc:
794           isCond = true;
795           UOpc = ARM::B;
796           [[fallthrough]];
797         case ARM::B:
798           Bits = 24;
799           Scale = 4;
800           break;
801         case ARM::tBcc:
802           isCond = true;
803           UOpc = ARM::tB;
804           Bits = 8;
805           Scale = 2;
806           break;
807         case ARM::tB:
808           Bits = 11;
809           Scale = 2;
810           break;
811         case ARM::t2Bcc:
812           isCond = true;
813           UOpc = ARM::t2B;
814           Bits = 20;
815           Scale = 2;
816           break;
817         case ARM::t2B:
818           Bits = 24;
819           Scale = 2;
820           break;
821         }
822 
823         // Record this immediate branch.
824         unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
825         ImmBranches.push_back(ImmBranch(&I, MaxOffs, isCond, UOpc));
826       }
827 
828       if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET)
829         PushPopMIs.push_back(&I);
830 
831       if (Opc == ARM::CONSTPOOL_ENTRY || Opc == ARM::JUMPTABLE_ADDRS ||
832           Opc == ARM::JUMPTABLE_INSTS || Opc == ARM::JUMPTABLE_TBB ||
833           Opc == ARM::JUMPTABLE_TBH)
834         continue;
835 
836       // Scan the instructions for constant pool operands.
837       for (unsigned op = 0, e = I.getNumOperands(); op != e; ++op)
838         if (I.getOperand(op).isCPI() ||
839             (I.getOperand(op).isJTI() && InlineJumpTables)) {
840           // We found one.  The addressing mode tells us the max displacement
841           // from the PC that this instruction permits.
842 
843           // Basic size info comes from the TSFlags field.
844           unsigned Bits = 0;
845           unsigned Scale = 1;
846           bool NegOk = false;
847           bool IsSoImm = false;
848 
849           switch (Opc) {
850           default:
851             llvm_unreachable("Unknown addressing mode for CP reference!");
852 
853           // Taking the address of a CP entry.
854           case ARM::LEApcrel:
855           case ARM::LEApcrelJT: {
856               // This takes a SoImm, which is 8 bit immediate rotated. We'll
857               // pretend the maximum offset is 255 * 4. Since each instruction
858               // 4 byte wide, this is always correct. We'll check for other
859               // displacements that fits in a SoImm as well.
860               Bits = 8;
861               NegOk = true;
862               IsSoImm = true;
863               unsigned CPI = I.getOperand(op).getIndex();
864               assert(CPI < CPEMIs.size());
865               MachineInstr *CPEMI = CPEMIs[CPI];
866               const Align CPEAlign = getCPEAlign(CPEMI);
867               const unsigned LogCPEAlign = Log2(CPEAlign);
868               if (LogCPEAlign >= 2)
869                 Scale = 4;
870               else
871                 // For constants with less than 4-byte alignment,
872                 // we'll pretend the maximum offset is 255 * 1.
873                 Scale = 1;
874             }
875             break;
876           case ARM::t2LEApcrel:
877           case ARM::t2LEApcrelJT:
878             Bits = 12;
879             NegOk = true;
880             break;
881           case ARM::tLEApcrel:
882           case ARM::tLEApcrelJT:
883             Bits = 8;
884             Scale = 4;
885             break;
886 
887           case ARM::LDRBi12:
888           case ARM::LDRi12:
889           case ARM::LDRcp:
890           case ARM::t2LDRpci:
891           case ARM::t2LDRHpci:
892           case ARM::t2LDRSHpci:
893           case ARM::t2LDRBpci:
894           case ARM::t2LDRSBpci:
895             Bits = 12;  // +-offset_12
896             NegOk = true;
897             break;
898 
899           case ARM::tLDRpci:
900             Bits = 8;
901             Scale = 4;  // +(offset_8*4)
902             break;
903 
904           case ARM::VLDRD:
905           case ARM::VLDRS:
906             Bits = 8;
907             Scale = 4;  // +-(offset_8*4)
908             NegOk = true;
909             break;
910           case ARM::VLDRH:
911             Bits = 8;
912             Scale = 2;  // +-(offset_8*2)
913             NegOk = true;
914             break;
915           }
916 
917           // Remember that this is a user of a CP entry.
918           unsigned CPI = I.getOperand(op).getIndex();
919           if (I.getOperand(op).isJTI()) {
920             JumpTableUserIndices.insert(std::make_pair(CPI, CPUsers.size()));
921             CPI = JumpTableEntryIndices[CPI];
922           }
923 
924           MachineInstr *CPEMI = CPEMIs[CPI];
925           unsigned MaxOffs = ((1 << Bits)-1) * Scale;
926           CPUsers.push_back(CPUser(&I, CPEMI, MaxOffs, NegOk, IsSoImm));
927 
928           // Increment corresponding CPEntry reference count.
929           CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
930           assert(CPE && "Cannot find a corresponding CPEntry!");
931           CPE->RefCount++;
932 
933           // Instructions can only use one CP entry, don't bother scanning the
934           // rest of the operands.
935           break;
936         }
937     }
938   }
939 }
940 
941 /// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB
942 /// ID.
CompareMBBNumbers(const MachineBasicBlock * LHS,const MachineBasicBlock * RHS)943 static bool CompareMBBNumbers(const MachineBasicBlock *LHS,
944                               const MachineBasicBlock *RHS) {
945   return LHS->getNumber() < RHS->getNumber();
946 }
947 
948 /// updateForInsertedWaterBlock - When a block is newly inserted into the
949 /// machine function, it upsets all of the block numbers.  Renumber the blocks
950 /// and update the arrays that parallel this numbering.
updateForInsertedWaterBlock(MachineBasicBlock * NewBB)951 void ARMConstantIslands::updateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
952   // Renumber the MBB's to keep them consecutive.
953   NewBB->getParent()->RenumberBlocks(NewBB);
954   DT->updateBlockNumbers();
955 
956   // Insert an entry into BBInfo to align it properly with the (newly
957   // renumbered) block numbers.
958   BBUtils->insert(NewBB->getNumber(), BasicBlockInfo());
959 
960   // Next, update WaterList.  Specifically, we need to add NewMBB as having
961   // available water after it.
962   water_iterator IP = llvm::lower_bound(WaterList, NewBB, CompareMBBNumbers);
963   WaterList.insert(IP, NewBB);
964 }
965 
966 /// Split the basic block containing MI into two blocks, which are joined by
967 /// an unconditional branch.  Update data structures and renumber blocks to
968 /// account for this change and returns the newly created block.
splitBlockBeforeInstr(MachineInstr * MI)969 MachineBasicBlock *ARMConstantIslands::splitBlockBeforeInstr(MachineInstr *MI) {
970   MachineBasicBlock *OrigBB = MI->getParent();
971 
972   // Collect liveness information at MI.
973   LivePhysRegs LRs(*MF->getSubtarget().getRegisterInfo());
974   LRs.addLiveOuts(*OrigBB);
975   auto LivenessEnd = ++MachineBasicBlock::iterator(MI).getReverse();
976   for (MachineInstr &LiveMI : make_range(OrigBB->rbegin(), LivenessEnd))
977     LRs.stepBackward(LiveMI);
978 
979   // Create a new MBB for the code after the OrigBB.
980   MachineBasicBlock *NewBB =
981     MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
982   MachineFunction::iterator MBBI = ++OrigBB->getIterator();
983   MF->insert(MBBI, NewBB);
984 
985   // Splice the instructions starting with MI over to NewBB.
986   NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
987 
988   // Add an unconditional branch from OrigBB to NewBB.
989   // Note the new unconditional branch is not being recorded.
990   // There doesn't seem to be meaningful DebugInfo available; this doesn't
991   // correspond to anything in the source.
992   unsigned Opc = isThumb ? (isThumb2 ? ARM::t2B : ARM::tB) : ARM::B;
993   if (!isThumb)
994     BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB);
995   else
996     BuildMI(OrigBB, DebugLoc(), TII->get(Opc))
997         .addMBB(NewBB)
998         .add(predOps(ARMCC::AL));
999   ++NumSplit;
1000 
1001   // Update the CFG.  All succs of OrigBB are now succs of NewBB.
1002   NewBB->transferSuccessors(OrigBB);
1003 
1004   // OrigBB branches to NewBB.
1005   OrigBB->addSuccessor(NewBB);
1006 
1007   // Update live-in information in the new block.
1008   MachineRegisterInfo &MRI = MF->getRegInfo();
1009   for (MCPhysReg L : LRs)
1010     if (!MRI.isReserved(L))
1011       NewBB->addLiveIn(L);
1012 
1013   // Update internal data structures to account for the newly inserted MBB.
1014   // This is almost the same as updateForInsertedWaterBlock, except that
1015   // the Water goes after OrigBB, not NewBB.
1016   MF->RenumberBlocks(NewBB);
1017   DT->updateBlockNumbers();
1018 
1019   // Insert an entry into BBInfo to align it properly with the (newly
1020   // renumbered) block numbers.
1021   BBUtils->insert(NewBB->getNumber(), BasicBlockInfo());
1022 
1023   // Next, update WaterList.  Specifically, we need to add OrigMBB as having
1024   // available water after it (but not if it's already there, which happens
1025   // when splitting before a conditional branch that is followed by an
1026   // unconditional branch - in that case we want to insert NewBB).
1027   water_iterator IP = llvm::lower_bound(WaterList, OrigBB, CompareMBBNumbers);
1028   MachineBasicBlock* WaterBB = *IP;
1029   if (WaterBB == OrigBB)
1030     WaterList.insert(std::next(IP), NewBB);
1031   else
1032     WaterList.insert(IP, OrigBB);
1033   NewWaterList.insert(OrigBB);
1034 
1035   // Figure out how large the OrigBB is.  As the first half of the original
1036   // block, it cannot contain a tablejump.  The size includes
1037   // the new jump we added.  (It should be possible to do this without
1038   // recounting everything, but it's very confusing, and this is rarely
1039   // executed.)
1040   BBUtils->computeBlockSize(OrigBB);
1041 
1042   // Figure out how large the NewMBB is.  As the second half of the original
1043   // block, it may contain a tablejump.
1044   BBUtils->computeBlockSize(NewBB);
1045 
1046   // All BBOffsets following these blocks must be modified.
1047   BBUtils->adjustBBOffsetsAfter(OrigBB);
1048 
1049   return NewBB;
1050 }
1051 
1052 /// getUserOffset - Compute the offset of U.MI as seen by the hardware
1053 /// displacement computation.  Update U.KnownAlignment to match its current
1054 /// basic block location.
getUserOffset(CPUser & U) const1055 unsigned ARMConstantIslands::getUserOffset(CPUser &U) const {
1056   unsigned UserOffset = BBUtils->getOffsetOf(U.MI);
1057 
1058   SmallVectorImpl<BasicBlockInfo> &BBInfo = BBUtils->getBBInfo();
1059   const BasicBlockInfo &BBI = BBInfo[U.MI->getParent()->getNumber()];
1060   unsigned KnownBits = BBI.internalKnownBits();
1061 
1062   // The value read from PC is offset from the actual instruction address.
1063   UserOffset += (isThumb ? 4 : 8);
1064 
1065   // Because of inline assembly, we may not know the alignment (mod 4) of U.MI.
1066   // Make sure U.getMaxDisp() returns a constrained range.
1067   U.KnownAlignment = (KnownBits >= 2);
1068 
1069   // On Thumb, offsets==2 mod 4 are rounded down by the hardware for
1070   // purposes of the displacement computation; compensate for that here.
1071   // For unknown alignments, getMaxDisp() constrains the range instead.
1072   if (isThumb && U.KnownAlignment)
1073     UserOffset &= ~3u;
1074 
1075   return UserOffset;
1076 }
1077 
1078 /// isOffsetInRange - Checks whether UserOffset (the location of a constant pool
1079 /// reference) is within MaxDisp of TrialOffset (a proposed location of a
1080 /// constant pool entry).
1081 /// UserOffset is computed by getUserOffset above to include PC adjustments. If
1082 /// the mod 4 alignment of UserOffset is not known, the uncertainty must be
1083 /// subtracted from MaxDisp instead. CPUser::getMaxDisp() does that.
isOffsetInRange(unsigned UserOffset,unsigned TrialOffset,unsigned MaxDisp,bool NegativeOK,bool IsSoImm)1084 bool ARMConstantIslands::isOffsetInRange(unsigned UserOffset,
1085                                          unsigned TrialOffset, unsigned MaxDisp,
1086                                          bool NegativeOK, bool IsSoImm) {
1087   if (UserOffset <= TrialOffset) {
1088     // User before the Trial.
1089     if (TrialOffset - UserOffset <= MaxDisp)
1090       return true;
1091     // FIXME: Make use full range of soimm values.
1092   } else if (NegativeOK) {
1093     if (UserOffset - TrialOffset <= MaxDisp)
1094       return true;
1095     // FIXME: Make use full range of soimm values.
1096   }
1097   return false;
1098 }
1099 
1100 /// isWaterInRange - Returns true if a CPE placed after the specified
1101 /// Water (a basic block) will be in range for the specific MI.
1102 ///
1103 /// Compute how much the function will grow by inserting a CPE after Water.
isWaterInRange(unsigned UserOffset,MachineBasicBlock * Water,CPUser & U,unsigned & Growth)1104 bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
1105                                         MachineBasicBlock* Water, CPUser &U,
1106                                         unsigned &Growth) {
1107   BBInfoVector &BBInfo = BBUtils->getBBInfo();
1108   const Align CPEAlign = getCPEAlign(U.CPEMI);
1109   const unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPEAlign);
1110   unsigned NextBlockOffset;
1111   Align NextBlockAlignment;
1112   MachineFunction::const_iterator NextBlock = Water->getIterator();
1113   if (++NextBlock == MF->end()) {
1114     NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
1115   } else {
1116     NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
1117     NextBlockAlignment = NextBlock->getAlignment();
1118   }
1119   unsigned Size = U.CPEMI->getOperand(2).getImm();
1120   unsigned CPEEnd = CPEOffset + Size;
1121 
1122   // The CPE may be able to hide in the alignment padding before the next
1123   // block. It may also cause more padding to be required if it is more aligned
1124   // that the next block.
1125   if (CPEEnd > NextBlockOffset) {
1126     Growth = CPEEnd - NextBlockOffset;
1127     // Compute the padding that would go at the end of the CPE to align the next
1128     // block.
1129     Growth += offsetToAlignment(CPEEnd, NextBlockAlignment);
1130 
1131     // If the CPE is to be inserted before the instruction, that will raise
1132     // the offset of the instruction. Also account for unknown alignment padding
1133     // in blocks between CPE and the user.
1134     if (CPEOffset < UserOffset)
1135       UserOffset += Growth + UnknownPadding(MF->getAlignment(), Log2(CPEAlign));
1136   } else
1137     // CPE fits in existing padding.
1138     Growth = 0;
1139 
1140   return isOffsetInRange(UserOffset, CPEOffset, U);
1141 }
1142 
1143 /// isCPEntryInRange - Returns true if the distance between specific MI and
1144 /// specific ConstPool entry instruction can fit in MI's displacement field.
isCPEntryInRange(MachineInstr * MI,unsigned UserOffset,MachineInstr * CPEMI,unsigned MaxDisp,bool NegOk,bool DoDump)1145 bool ARMConstantIslands::isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
1146                                       MachineInstr *CPEMI, unsigned MaxDisp,
1147                                       bool NegOk, bool DoDump) {
1148   unsigned CPEOffset = BBUtils->getOffsetOf(CPEMI);
1149 
1150   if (DoDump) {
1151     LLVM_DEBUG({
1152         BBInfoVector &BBInfo = BBUtils->getBBInfo();
1153       unsigned Block = MI->getParent()->getNumber();
1154       const BasicBlockInfo &BBI = BBInfo[Block];
1155       dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
1156              << " max delta=" << MaxDisp
1157              << format(" insn address=%#x", UserOffset) << " in "
1158              << printMBBReference(*MI->getParent()) << ": "
1159              << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI
1160              << format("CPE address=%#x offset=%+d: ", CPEOffset,
1161                        int(CPEOffset - UserOffset));
1162     });
1163   }
1164 
1165   return isOffsetInRange(UserOffset, CPEOffset, MaxDisp, NegOk);
1166 }
1167 
1168 #ifndef NDEBUG
1169 /// BBIsJumpedOver - Return true of the specified basic block's only predecessor
1170 /// unconditionally branches to its only successor.
BBIsJumpedOver(MachineBasicBlock * MBB)1171 static bool BBIsJumpedOver(MachineBasicBlock *MBB) {
1172   if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
1173     return false;
1174 
1175   MachineBasicBlock *Succ = *MBB->succ_begin();
1176   MachineBasicBlock *Pred = *MBB->pred_begin();
1177   MachineInstr *PredMI = &Pred->back();
1178   if (PredMI->getOpcode() == ARM::B || PredMI->getOpcode() == ARM::tB
1179       || PredMI->getOpcode() == ARM::t2B)
1180     return PredMI->getOperand(0).getMBB() == Succ;
1181   return false;
1182 }
1183 #endif // NDEBUG
1184 
1185 /// decrementCPEReferenceCount - find the constant pool entry with index CPI
1186 /// and instruction CPEMI, and decrement its refcount.  If the refcount
1187 /// becomes 0 remove the entry and instruction.  Returns true if we removed
1188 /// the entry, false if we didn't.
decrementCPEReferenceCount(unsigned CPI,MachineInstr * CPEMI)1189 bool ARMConstantIslands::decrementCPEReferenceCount(unsigned CPI,
1190                                                     MachineInstr *CPEMI) {
1191   // Find the old entry. Eliminate it if it is no longer used.
1192   CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
1193   assert(CPE && "Unexpected!");
1194   if (--CPE->RefCount == 0) {
1195     removeDeadCPEMI(CPEMI);
1196     CPE->CPEMI = nullptr;
1197     --NumCPEs;
1198     return true;
1199   }
1200   return false;
1201 }
1202 
getCombinedIndex(const MachineInstr * CPEMI)1203 unsigned ARMConstantIslands::getCombinedIndex(const MachineInstr *CPEMI) {
1204   if (CPEMI->getOperand(1).isCPI())
1205     return CPEMI->getOperand(1).getIndex();
1206 
1207   return JumpTableEntryIndices[CPEMI->getOperand(1).getIndex()];
1208 }
1209 
1210 /// LookForCPEntryInRange - see if the currently referenced CPE is in range;
1211 /// if not, see if an in-range clone of the CPE is in range, and if so,
1212 /// change the data structures so the user references the clone.  Returns:
1213 /// 0 = no existing entry found
1214 /// 1 = entry found, and there were no code insertions or deletions
1215 /// 2 = entry found, and there were code insertions or deletions
findInRangeCPEntry(CPUser & U,unsigned UserOffset)1216 int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset) {
1217   MachineInstr *UserMI = U.MI;
1218   MachineInstr *CPEMI  = U.CPEMI;
1219 
1220   // Check to see if the CPE is already in-range.
1221   if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp(), U.NegOk,
1222                        true)) {
1223     LLVM_DEBUG(dbgs() << "In range\n");
1224     return 1;
1225   }
1226 
1227   // No.  Look for previously created clones of the CPE that are in range.
1228   unsigned CPI = getCombinedIndex(CPEMI);
1229   std::vector<CPEntry> &CPEs = CPEntries[CPI];
1230   for (CPEntry &CPE : CPEs) {
1231     // We already tried this one
1232     if (CPE.CPEMI == CPEMI)
1233       continue;
1234     // Removing CPEs can leave empty entries, skip
1235     if (CPE.CPEMI == nullptr)
1236       continue;
1237     if (isCPEntryInRange(UserMI, UserOffset, CPE.CPEMI, U.getMaxDisp(),
1238                          U.NegOk)) {
1239       LLVM_DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#" << CPE.CPI
1240                         << "\n");
1241       // Point the CPUser node to the replacement
1242       U.CPEMI = CPE.CPEMI;
1243       // Change the CPI in the instruction operand to refer to the clone.
1244       for (MachineOperand &MO : UserMI->operands())
1245         if (MO.isCPI()) {
1246           MO.setIndex(CPE.CPI);
1247           break;
1248         }
1249       // Adjust the refcount of the clone...
1250       CPE.RefCount++;
1251       // ...and the original.  If we didn't remove the old entry, none of the
1252       // addresses changed, so we don't need another pass.
1253       return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
1254     }
1255   }
1256   return 0;
1257 }
1258 
1259 /// getUnconditionalBrDisp - Returns the maximum displacement that can fit in
1260 /// the specific unconditional branch instruction.
getUnconditionalBrDisp(int Opc)1261 static inline unsigned getUnconditionalBrDisp(int Opc) {
1262   switch (Opc) {
1263   case ARM::tB:
1264     return ((1<<10)-1)*2;
1265   case ARM::t2B:
1266     return ((1<<23)-1)*2;
1267   default:
1268     break;
1269   }
1270 
1271   return ((1<<23)-1)*4;
1272 }
1273 
1274 /// findAvailableWater - Look for an existing entry in the WaterList in which
1275 /// we can place the CPE referenced from U so it's within range of U's MI.
1276 /// Returns true if found, false if not.  If it returns true, WaterIter
1277 /// is set to the WaterList entry.  For Thumb, prefer water that will not
1278 /// introduce padding to water that will.  To ensure that this pass
1279 /// terminates, the CPE location for a particular CPUser is only allowed to
1280 /// move to a lower address, so search backward from the end of the list and
1281 /// prefer the first water that is in range.
findAvailableWater(CPUser & U,unsigned UserOffset,water_iterator & WaterIter,bool CloserWater)1282 bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
1283                                             water_iterator &WaterIter,
1284                                             bool CloserWater) {
1285   if (WaterList.empty())
1286     return false;
1287 
1288   unsigned BestGrowth = ~0u;
1289   // The nearest water without splitting the UserBB is right after it.
1290   // If the distance is still large (we have a big BB), then we need to split it
1291   // if we don't converge after certain iterations. This helps the following
1292   // situation to converge:
1293   //   BB0:
1294   //      Big BB
1295   //   BB1:
1296   //      Constant Pool
1297   // When a CP access is out of range, BB0 may be used as water. However,
1298   // inserting islands between BB0 and BB1 makes other accesses out of range.
1299   MachineBasicBlock *UserBB = U.MI->getParent();
1300   BBInfoVector &BBInfo = BBUtils->getBBInfo();
1301   const Align CPEAlign = getCPEAlign(U.CPEMI);
1302   unsigned MinNoSplitDisp = BBInfo[UserBB->getNumber()].postOffset(CPEAlign);
1303   if (CloserWater && MinNoSplitDisp > U.getMaxDisp() / 2)
1304     return false;
1305   for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();;
1306        --IP) {
1307     MachineBasicBlock* WaterBB = *IP;
1308     // Check if water is in range and is either at a lower address than the
1309     // current "high water mark" or a new water block that was created since
1310     // the previous iteration by inserting an unconditional branch.  In the
1311     // latter case, we want to allow resetting the high water mark back to
1312     // this new water since we haven't seen it before.  Inserting branches
1313     // should be relatively uncommon and when it does happen, we want to be
1314     // sure to take advantage of it for all the CPEs near that block, so that
1315     // we don't insert more branches than necessary.
1316     // When CloserWater is true, we try to find the lowest address after (or
1317     // equal to) user MI's BB no matter of padding growth.
1318     unsigned Growth;
1319     if (isWaterInRange(UserOffset, WaterBB, U, Growth) &&
1320         (WaterBB->getNumber() < U.HighWaterMark->getNumber() ||
1321          NewWaterList.count(WaterBB) || WaterBB == U.MI->getParent()) &&
1322         Growth < BestGrowth) {
1323       // This is the least amount of required padding seen so far.
1324       BestGrowth = Growth;
1325       WaterIter = IP;
1326       LLVM_DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB)
1327                         << " Growth=" << Growth << '\n');
1328 
1329       if (CloserWater && WaterBB == U.MI->getParent())
1330         return true;
1331       // Keep looking unless it is perfect and we're not looking for the lowest
1332       // possible address.
1333       if (!CloserWater && BestGrowth == 0)
1334         return true;
1335     }
1336     if (IP == B)
1337       break;
1338   }
1339   return BestGrowth != ~0u;
1340 }
1341 
1342 /// createNewWater - No existing WaterList entry will work for
1343 /// CPUsers[CPUserIndex], so create a place to put the CPE.  The end of the
1344 /// block is used if in range, and the conditional branch munged so control
1345 /// flow is correct.  Otherwise the block is split to create a hole with an
1346 /// unconditional branch around it.  In either case NewMBB is set to a
1347 /// block following which the new island can be inserted (the WaterList
1348 /// is not adjusted).
createNewWater(unsigned CPUserIndex,unsigned UserOffset,MachineBasicBlock * & NewMBB)1349 void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
1350                                         unsigned UserOffset,
1351                                         MachineBasicBlock *&NewMBB) {
1352   CPUser &U = CPUsers[CPUserIndex];
1353   MachineInstr *UserMI = U.MI;
1354   MachineInstr *CPEMI  = U.CPEMI;
1355   const Align CPEAlign = getCPEAlign(CPEMI);
1356   MachineBasicBlock *UserMBB = UserMI->getParent();
1357   BBInfoVector &BBInfo = BBUtils->getBBInfo();
1358   const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
1359 
1360   // If the block does not end in an unconditional branch already, and if the
1361   // end of the block is within range, make new water there.  (The addition
1362   // below is for the unconditional branch we will be adding: 4 bytes on ARM +
1363   // Thumb2, 2 on Thumb1.
1364   if (BBHasFallthrough(UserMBB)) {
1365     // Size of branch to insert.
1366     unsigned Delta = isThumb1 ? 2 : 4;
1367     // Compute the offset where the CPE will begin.
1368     unsigned CPEOffset = UserBBI.postOffset(CPEAlign) + Delta;
1369 
1370     if (isOffsetInRange(UserOffset, CPEOffset, U)) {
1371       LLVM_DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB)
1372                         << format(", expected CPE offset %#x\n", CPEOffset));
1373       NewMBB = &*++UserMBB->getIterator();
1374       // Add an unconditional branch from UserMBB to fallthrough block.  Record
1375       // it for branch lengthening; this new branch will not get out of range,
1376       // but if the preceding conditional branch is out of range, the targets
1377       // will be exchanged, and the altered branch may be out of range, so the
1378       // machinery has to know about it.
1379       int UncondBr = isThumb ? ((isThumb2) ? ARM::t2B : ARM::tB) : ARM::B;
1380       if (!isThumb)
1381         BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB);
1382       else
1383         BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr))
1384             .addMBB(NewMBB)
1385             .add(predOps(ARMCC::AL));
1386       unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
1387       ImmBranches.push_back(ImmBranch(&UserMBB->back(),
1388                                       MaxDisp, false, UncondBr));
1389       BBUtils->computeBlockSize(UserMBB);
1390       BBUtils->adjustBBOffsetsAfter(UserMBB);
1391       return;
1392     }
1393   }
1394 
1395   // What a big block.  Find a place within the block to split it.  This is a
1396   // little tricky on Thumb1 since instructions are 2 bytes and constant pool
1397   // entries are 4 bytes: if instruction I references island CPE, and
1398   // instruction I+1 references CPE', it will not work well to put CPE as far
1399   // forward as possible, since then CPE' cannot immediately follow it (that
1400   // location is 2 bytes farther away from I+1 than CPE was from I) and we'd
1401   // need to create a new island.  So, we make a first guess, then walk through
1402   // the instructions between the one currently being looked at and the
1403   // possible insertion point, and make sure any other instructions that
1404   // reference CPEs will be able to use the same island area; if not, we back
1405   // up the insertion point.
1406 
1407   // Try to split the block so it's fully aligned.  Compute the latest split
1408   // point where we can add a 4-byte branch instruction, and then align to
1409   // Align which is the largest possible alignment in the function.
1410   const Align Align = MF->getAlignment();
1411   assert(Align >= CPEAlign && "Over-aligned constant pool entry");
1412   unsigned KnownBits = UserBBI.internalKnownBits();
1413   unsigned UPad = UnknownPadding(Align, KnownBits);
1414   unsigned BaseInsertOffset = UserOffset + U.getMaxDisp() - UPad;
1415   LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x",
1416                               BaseInsertOffset));
1417 
1418   // The 4 in the following is for the unconditional branch we'll be inserting
1419   // (allows for long branch on Thumb1).  Alignment of the island is handled
1420   // inside isOffsetInRange.
1421   BaseInsertOffset -= 4;
1422 
1423   LLVM_DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
1424                     << " la=" << Log2(Align) << " kb=" << KnownBits
1425                     << " up=" << UPad << '\n');
1426 
1427   // This could point off the end of the block if we've already got constant
1428   // pool entries following this block; only the last one is in the water list.
1429   // Back past any possible branches (allow for a conditional and a maximally
1430   // long unconditional).
1431   if (BaseInsertOffset + 8 >= UserBBI.postOffset()) {
1432     // Ensure BaseInsertOffset is larger than the offset of the instruction
1433     // following UserMI so that the loop which searches for the split point
1434     // iterates at least once.
1435     BaseInsertOffset =
1436         std::max(UserBBI.postOffset() - UPad - 8,
1437                  UserOffset + TII->getInstSizeInBytes(*UserMI) + 1);
1438     // If the CP is referenced(ie, UserOffset) is in first four instructions
1439     // after IT, this recalculated BaseInsertOffset could be in the middle of
1440     // an IT block. If it is, change the BaseInsertOffset to just after the
1441     // IT block. This still make the CP Entry is in range becuase of the
1442     // following reasons.
1443     //   1. The initial BaseseInsertOffset calculated is (UserOffset +
1444     //   U.getMaxDisp() - UPad).
1445     //   2. An IT block is only at most 4 instructions plus the "it" itself (18
1446     //   bytes).
1447     //   3. All the relevant instructions support much larger Maximum
1448     //   displacement.
1449     MachineBasicBlock::iterator I = UserMI;
1450     ++I;
1451     Register PredReg;
1452     for (unsigned Offset = UserOffset + TII->getInstSizeInBytes(*UserMI);
1453          I->getOpcode() != ARM::t2IT &&
1454          getITInstrPredicate(*I, PredReg) != ARMCC::AL;
1455          Offset += TII->getInstSizeInBytes(*I), I = std::next(I)) {
1456       BaseInsertOffset =
1457           std::max(BaseInsertOffset, Offset + TII->getInstSizeInBytes(*I) + 1);
1458       assert(I != UserMBB->end() && "Fell off end of block");
1459     }
1460     LLVM_DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
1461   }
1462   unsigned EndInsertOffset = BaseInsertOffset + 4 + UPad +
1463     CPEMI->getOperand(2).getImm();
1464   MachineBasicBlock::iterator MI = UserMI;
1465   ++MI;
1466   unsigned CPUIndex = CPUserIndex+1;
1467   unsigned NumCPUsers = CPUsers.size();
1468   MachineInstr *LastIT = nullptr;
1469   for (unsigned Offset = UserOffset + TII->getInstSizeInBytes(*UserMI);
1470        Offset < BaseInsertOffset;
1471        Offset += TII->getInstSizeInBytes(*MI), MI = std::next(MI)) {
1472     assert(MI != UserMBB->end() && "Fell off end of block");
1473     if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == &*MI) {
1474       CPUser &U = CPUsers[CPUIndex];
1475       if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
1476         // Shift intertion point by one unit of alignment so it is within reach.
1477         BaseInsertOffset -= Align.value();
1478         EndInsertOffset -= Align.value();
1479       }
1480       // This is overly conservative, as we don't account for CPEMIs being
1481       // reused within the block, but it doesn't matter much.  Also assume CPEs
1482       // are added in order with alignment padding.  We may eventually be able
1483       // to pack the aligned CPEs better.
1484       EndInsertOffset += U.CPEMI->getOperand(2).getImm();
1485       CPUIndex++;
1486     }
1487 
1488     // Remember the last IT instruction.
1489     if (MI->getOpcode() == ARM::t2IT)
1490       LastIT = &*MI;
1491   }
1492 
1493   --MI;
1494 
1495   // Avoid splitting an IT block.
1496   if (LastIT) {
1497     Register PredReg;
1498     ARMCC::CondCodes CC = getITInstrPredicate(*MI, PredReg);
1499     if (CC != ARMCC::AL)
1500       MI = LastIT;
1501   }
1502 
1503   // Avoid splitting a MOVW+MOVT pair with a relocation on Windows.
1504   // On Windows, this instruction pair is covered by one single
1505   // IMAGE_REL_ARM_MOV32T relocation which covers both instructions. If a
1506   // constant island is injected inbetween them, the relocation will clobber
1507   // the instruction and fail to update the MOVT instruction.
1508   // (These instructions are bundled up until right before the ConstantIslands
1509   // pass.)
1510   if (STI->isTargetWindows() && isThumb && MI->getOpcode() == ARM::t2MOVTi16 &&
1511       (MI->getOperand(2).getTargetFlags() & ARMII::MO_OPTION_MASK) ==
1512           ARMII::MO_HI16) {
1513     --MI;
1514     assert(MI->getOpcode() == ARM::t2MOVi16 &&
1515            (MI->getOperand(1).getTargetFlags() & ARMII::MO_OPTION_MASK) ==
1516                ARMII::MO_LO16);
1517   }
1518 
1519   // We really must not split an IT block.
1520 #ifndef NDEBUG
1521   Register PredReg;
1522   assert(!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL);
1523 #endif
1524   NewMBB = splitBlockBeforeInstr(&*MI);
1525 }
1526 
1527 /// handleConstantPoolUser - Analyze the specified user, checking to see if it
1528 /// is out-of-range.  If so, pick up the constant pool value and move it some
1529 /// place in-range.  Return true if we changed any addresses (thus must run
1530 /// another pass of branch lengthening), false otherwise.
handleConstantPoolUser(unsigned CPUserIndex,bool CloserWater)1531 bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
1532                                                 bool CloserWater) {
1533   CPUser &U = CPUsers[CPUserIndex];
1534   MachineInstr *UserMI = U.MI;
1535   MachineInstr *CPEMI  = U.CPEMI;
1536   unsigned CPI = getCombinedIndex(CPEMI);
1537   unsigned Size = CPEMI->getOperand(2).getImm();
1538   // Compute this only once, it's expensive.
1539   unsigned UserOffset = getUserOffset(U);
1540 
1541   // See if the current entry is within range, or there is a clone of it
1542   // in range.
1543   int result = findInRangeCPEntry(U, UserOffset);
1544   if (result==1) return false;
1545   else if (result==2) return true;
1546 
1547   // No existing clone of this CPE is within range.
1548   // We will be generating a new clone.  Get a UID for it.
1549   unsigned ID = AFI->createPICLabelUId();
1550 
1551   // Look for water where we can place this CPE.
1552   MachineBasicBlock *NewIsland = MF->CreateMachineBasicBlock();
1553   MachineBasicBlock *NewMBB;
1554   water_iterator IP;
1555   if (findAvailableWater(U, UserOffset, IP, CloserWater)) {
1556     LLVM_DEBUG(dbgs() << "Found water in range\n");
1557     MachineBasicBlock *WaterBB = *IP;
1558 
1559     // If the original WaterList entry was "new water" on this iteration,
1560     // propagate that to the new island.  This is just keeping NewWaterList
1561     // updated to match the WaterList, which will be updated below.
1562     if (NewWaterList.erase(WaterBB))
1563       NewWaterList.insert(NewIsland);
1564 
1565     // The new CPE goes before the following block (NewMBB).
1566     NewMBB = &*++WaterBB->getIterator();
1567   } else {
1568     // No water found.
1569     LLVM_DEBUG(dbgs() << "No water found\n");
1570     createNewWater(CPUserIndex, UserOffset, NewMBB);
1571 
1572     // splitBlockBeforeInstr adds to WaterList, which is important when it is
1573     // called while handling branches so that the water will be seen on the
1574     // next iteration for constant pools, but in this context, we don't want
1575     // it.  Check for this so it will be removed from the WaterList.
1576     // Also remove any entry from NewWaterList.
1577     MachineBasicBlock *WaterBB = &*--NewMBB->getIterator();
1578     IP = find(WaterList, WaterBB);
1579     if (IP != WaterList.end())
1580       NewWaterList.erase(WaterBB);
1581 
1582     // We are adding new water.  Update NewWaterList.
1583     NewWaterList.insert(NewIsland);
1584   }
1585   // Always align the new block because CP entries can be smaller than 4
1586   // bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may
1587   // be an already aligned constant pool block.
1588   const Align Alignment = isThumb ? Align(2) : Align(4);
1589   if (NewMBB->getAlignment() < Alignment)
1590     NewMBB->setAlignment(Alignment);
1591 
1592   // Remove the original WaterList entry; we want subsequent insertions in
1593   // this vicinity to go after the one we're about to insert.  This
1594   // considerably reduces the number of times we have to move the same CPE
1595   // more than once and is also important to ensure the algorithm terminates.
1596   if (IP != WaterList.end())
1597     WaterList.erase(IP);
1598 
1599   // Okay, we know we can put an island before NewMBB now, do it!
1600   MF->insert(NewMBB->getIterator(), NewIsland);
1601 
1602   // Update internal data structures to account for the newly inserted MBB.
1603   updateForInsertedWaterBlock(NewIsland);
1604 
1605   // Now that we have an island to add the CPE to, clone the original CPE and
1606   // add it to the island.
1607   U.HighWaterMark = NewIsland;
1608   U.CPEMI = BuildMI(NewIsland, DebugLoc(), CPEMI->getDesc())
1609                 .addImm(ID)
1610                 .add(CPEMI->getOperand(1))
1611                 .addImm(Size);
1612   CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
1613   ++NumCPEs;
1614 
1615   // Decrement the old entry, and remove it if refcount becomes 0.
1616   decrementCPEReferenceCount(CPI, CPEMI);
1617 
1618   // Mark the basic block as aligned as required by the const-pool entry.
1619   NewIsland->setAlignment(getCPEAlign(U.CPEMI));
1620 
1621   // Increase the size of the island block to account for the new entry.
1622   BBUtils->adjustBBSize(NewIsland, Size);
1623   BBUtils->adjustBBOffsetsAfter(&*--NewIsland->getIterator());
1624 
1625   // Finally, change the CPI in the instruction operand to be ID.
1626   for (MachineOperand &MO : UserMI->operands())
1627     if (MO.isCPI()) {
1628       MO.setIndex(ID);
1629       break;
1630     }
1631 
1632   LLVM_DEBUG(
1633       dbgs() << "  Moved CPE to #" << ID << " CPI=" << CPI
1634              << format(" offset=%#x\n",
1635                        BBUtils->getBBInfo()[NewIsland->getNumber()].Offset));
1636 
1637   return true;
1638 }
1639 
1640 /// removeDeadCPEMI - Remove a dead constant pool entry instruction. Update
1641 /// sizes and offsets of impacted basic blocks.
removeDeadCPEMI(MachineInstr * CPEMI)1642 void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
1643   MachineBasicBlock *CPEBB = CPEMI->getParent();
1644   unsigned Size = CPEMI->getOperand(2).getImm();
1645   CPEMI->eraseFromParent();
1646   BBInfoVector &BBInfo = BBUtils->getBBInfo();
1647   BBUtils->adjustBBSize(CPEBB, -Size);
1648   // All succeeding offsets have the current size value added in, fix this.
1649   if (CPEBB->empty()) {
1650     BBInfo[CPEBB->getNumber()].Size = 0;
1651 
1652     // This block no longer needs to be aligned.
1653     CPEBB->setAlignment(Align(1));
1654   } else {
1655     // Entries are sorted by descending alignment, so realign from the front.
1656     CPEBB->setAlignment(getCPEAlign(&*CPEBB->begin()));
1657   }
1658 
1659   BBUtils->adjustBBOffsetsAfter(CPEBB);
1660   // An island has only one predecessor BB and one successor BB. Check if
1661   // this BB's predecessor jumps directly to this BB's successor. This
1662   // shouldn't happen currently.
1663   assert(!BBIsJumpedOver(CPEBB) && "How did this happen?");
1664   // FIXME: remove the empty blocks after all the work is done?
1665 }
1666 
1667 /// removeUnusedCPEntries - Remove constant pool entries whose refcounts
1668 /// are zero.
removeUnusedCPEntries()1669 bool ARMConstantIslands::removeUnusedCPEntries() {
1670   unsigned MadeChange = false;
1671   for (std::vector<CPEntry> &CPEs : CPEntries) {
1672     for (CPEntry &CPE : CPEs) {
1673       if (CPE.RefCount == 0 && CPE.CPEMI) {
1674         removeDeadCPEMI(CPE.CPEMI);
1675         CPE.CPEMI = nullptr;
1676         MadeChange = true;
1677       }
1678     }
1679   }
1680   return MadeChange;
1681 }
1682 
1683 
1684 /// fixupImmediateBr - Fix up an immediate branch whose destination is too far
1685 /// away to fit in its displacement field.
fixupImmediateBr(ImmBranch & Br)1686 bool ARMConstantIslands::fixupImmediateBr(ImmBranch &Br) {
1687   MachineInstr *MI = Br.MI;
1688   MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1689 
1690   // Check to see if the DestBB is already in-range.
1691   if (BBUtils->isBBInRange(MI, DestBB, Br.MaxDisp))
1692     return false;
1693 
1694   if (!Br.isCond)
1695     return fixupUnconditionalBr(Br);
1696   return fixupConditionalBr(Br);
1697 }
1698 
1699 /// fixupUnconditionalBr - Fix up an unconditional branch whose destination is
1700 /// too far away to fit in its displacement field. If the LR register has been
1701 /// spilled in the epilogue, then we can use BL to implement a far jump.
1702 /// Otherwise, add an intermediate branch instruction to a branch.
1703 bool
fixupUnconditionalBr(ImmBranch & Br)1704 ARMConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
1705   MachineInstr *MI = Br.MI;
1706   MachineBasicBlock *MBB = MI->getParent();
1707   if (!isThumb1)
1708     llvm_unreachable("fixupUnconditionalBr is Thumb1 only!");
1709 
1710   if (!AFI->isLRSpilled())
1711     report_fatal_error("underestimated function size");
1712 
1713   // Use BL to implement far jump.
1714   Br.MaxDisp = (1 << 21) * 2;
1715   MI->setDesc(TII->get(ARM::tBfar));
1716   BBInfoVector &BBInfo = BBUtils->getBBInfo();
1717   BBInfo[MBB->getNumber()].Size += 2;
1718   BBUtils->adjustBBOffsetsAfter(MBB);
1719   ++NumUBrFixed;
1720 
1721   LLVM_DEBUG(dbgs() << "  Changed B to long jump " << *MI);
1722 
1723   return true;
1724 }
1725 
1726 /// fixupConditionalBr - Fix up a conditional branch whose destination is too
1727 /// far away to fit in its displacement field. It is converted to an inverse
1728 /// conditional branch + an unconditional branch to the destination.
1729 bool
fixupConditionalBr(ImmBranch & Br)1730 ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
1731   MachineInstr *MI = Br.MI;
1732   MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1733 
1734   // Add an unconditional branch to the destination and invert the branch
1735   // condition to jump over it:
1736   // blt L1
1737   // =>
1738   // bge L2
1739   // b   L1
1740   // L2:
1741   ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(1).getImm();
1742   CC = ARMCC::getOppositeCondition(CC);
1743   Register CCReg = MI->getOperand(2).getReg();
1744 
1745   // If the branch is at the end of its MBB and that has a fall-through block,
1746   // direct the updated conditional branch to the fall-through block. Otherwise,
1747   // split the MBB before the next instruction.
1748   MachineBasicBlock *MBB = MI->getParent();
1749   MachineInstr *BMI = &MBB->back();
1750   bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
1751 
1752   ++NumCBrFixed;
1753   if (BMI != MI) {
1754     if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
1755         BMI->getOpcode() == Br.UncondBr) {
1756       // Last MI in the BB is an unconditional branch. Can we simply invert the
1757       // condition and swap destinations:
1758       // beq L1
1759       // b   L2
1760       // =>
1761       // bne L2
1762       // b   L1
1763       MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
1764       if (BBUtils->isBBInRange(MI, NewDest, Br.MaxDisp)) {
1765         LLVM_DEBUG(
1766             dbgs() << "  Invert Bcc condition and swap its destination with "
1767                    << *BMI);
1768         BMI->getOperand(0).setMBB(DestBB);
1769         MI->getOperand(0).setMBB(NewDest);
1770         MI->getOperand(1).setImm(CC);
1771         return true;
1772       }
1773     }
1774   }
1775 
1776   if (NeedSplit) {
1777     splitBlockBeforeInstr(MI);
1778     // No need for the branch to the next block. We're adding an unconditional
1779     // branch to the destination.
1780     int delta = TII->getInstSizeInBytes(MBB->back());
1781     BBUtils->adjustBBSize(MBB, -delta);
1782     MBB->back().eraseFromParent();
1783 
1784     // The conditional successor will be swapped between the BBs after this, so
1785     // update CFG.
1786     MBB->addSuccessor(DestBB);
1787     std::next(MBB->getIterator())->removeSuccessor(DestBB);
1788 
1789     // BBInfo[SplitBB].Offset is wrong temporarily, fixed below
1790   }
1791   MachineBasicBlock *NextBB = &*++MBB->getIterator();
1792 
1793   LLVM_DEBUG(dbgs() << "  Insert B to " << printMBBReference(*DestBB)
1794                     << " also invert condition and change dest. to "
1795                     << printMBBReference(*NextBB) << "\n");
1796 
1797   // Insert a new conditional branch and a new unconditional branch.
1798   // Also update the ImmBranch as well as adding a new entry for the new branch.
1799   BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode()))
1800     .addMBB(NextBB).addImm(CC).addReg(CCReg);
1801   Br.MI = &MBB->back();
1802   BBUtils->adjustBBSize(MBB, TII->getInstSizeInBytes(MBB->back()));
1803   if (isThumb)
1804     BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr))
1805         .addMBB(DestBB)
1806         .add(predOps(ARMCC::AL));
1807   else
1808     BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
1809   BBUtils->adjustBBSize(MBB, TII->getInstSizeInBytes(MBB->back()));
1810   unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
1811   ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
1812 
1813   // Remove the old conditional branch.  It may or may not still be in MBB.
1814   BBUtils->adjustBBSize(MI->getParent(), -TII->getInstSizeInBytes(*MI));
1815   MI->eraseFromParent();
1816   BBUtils->adjustBBOffsetsAfter(MBB);
1817   return true;
1818 }
1819 
optimizeThumb2Instructions()1820 bool ARMConstantIslands::optimizeThumb2Instructions() {
1821   bool MadeChange = false;
1822 
1823   // Shrink ADR and LDR from constantpool.
1824   for (CPUser &U : CPUsers) {
1825     unsigned Opcode = U.MI->getOpcode();
1826     unsigned NewOpc = 0;
1827     unsigned Scale = 1;
1828     unsigned Bits = 0;
1829     switch (Opcode) {
1830     default: break;
1831     case ARM::t2LEApcrel:
1832       if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1833         NewOpc = ARM::tLEApcrel;
1834         Bits = 8;
1835         Scale = 4;
1836       }
1837       break;
1838     case ARM::t2LDRpci:
1839       if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1840         NewOpc = ARM::tLDRpci;
1841         Bits = 8;
1842         Scale = 4;
1843       }
1844       break;
1845     }
1846 
1847     if (!NewOpc)
1848       continue;
1849 
1850     unsigned UserOffset = getUserOffset(U);
1851     unsigned MaxOffs = ((1 << Bits) - 1) * Scale;
1852 
1853     // Be conservative with inline asm.
1854     if (!U.KnownAlignment)
1855       MaxOffs -= 2;
1856 
1857     // FIXME: Check if offset is multiple of scale if scale is not 4.
1858     if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, MaxOffs, false, true)) {
1859       LLVM_DEBUG(dbgs() << "Shrink: " << *U.MI);
1860       U.MI->setDesc(TII->get(NewOpc));
1861       MachineBasicBlock *MBB = U.MI->getParent();
1862       BBUtils->adjustBBSize(MBB, -2);
1863       BBUtils->adjustBBOffsetsAfter(MBB);
1864       ++NumT2CPShrunk;
1865       MadeChange = true;
1866     }
1867   }
1868 
1869   return MadeChange;
1870 }
1871 
1872 
optimizeThumb2Branches()1873 bool ARMConstantIslands::optimizeThumb2Branches() {
1874 
1875   auto TryShrinkBranch = [this](ImmBranch &Br) {
1876     unsigned Opcode = Br.MI->getOpcode();
1877     unsigned NewOpc = 0;
1878     unsigned Scale = 1;
1879     unsigned Bits = 0;
1880     switch (Opcode) {
1881     default: break;
1882     case ARM::t2B:
1883       NewOpc = ARM::tB;
1884       Bits = 11;
1885       Scale = 2;
1886       break;
1887     case ARM::t2Bcc:
1888       NewOpc = ARM::tBcc;
1889       Bits = 8;
1890       Scale = 2;
1891       break;
1892     }
1893     if (NewOpc) {
1894       unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
1895       MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1896       if (BBUtils->isBBInRange(Br.MI, DestBB, MaxOffs)) {
1897         LLVM_DEBUG(dbgs() << "Shrink branch: " << *Br.MI);
1898         Br.MI->setDesc(TII->get(NewOpc));
1899         MachineBasicBlock *MBB = Br.MI->getParent();
1900         BBUtils->adjustBBSize(MBB, -2);
1901         BBUtils->adjustBBOffsetsAfter(MBB);
1902         ++NumT2BrShrunk;
1903         return true;
1904       }
1905     }
1906     return false;
1907   };
1908 
1909   struct ImmCompare {
1910     MachineInstr* MI = nullptr;
1911     unsigned NewOpc = 0;
1912   };
1913 
1914   auto FindCmpForCBZ = [this](ImmBranch &Br, ImmCompare &ImmCmp,
1915                               MachineBasicBlock *DestBB) {
1916     ImmCmp.MI = nullptr;
1917     ImmCmp.NewOpc = 0;
1918 
1919     // If the conditional branch doesn't kill CPSR, then CPSR can be liveout
1920     // so this transformation is not safe.
1921     if (!Br.MI->killsRegister(ARM::CPSR, /*TRI=*/nullptr))
1922       return false;
1923 
1924     Register PredReg;
1925     unsigned NewOpc = 0;
1926     ARMCC::CondCodes Pred = getInstrPredicate(*Br.MI, PredReg);
1927     if (Pred == ARMCC::EQ)
1928       NewOpc = ARM::tCBZ;
1929     else if (Pred == ARMCC::NE)
1930       NewOpc = ARM::tCBNZ;
1931     else
1932       return false;
1933 
1934     // Check if the distance is within 126. Subtract starting offset by 2
1935     // because the cmp will be eliminated.
1936     unsigned BrOffset = BBUtils->getOffsetOf(Br.MI) + 4 - 2;
1937     BBInfoVector &BBInfo = BBUtils->getBBInfo();
1938     unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
1939     if (BrOffset >= DestOffset || (DestOffset - BrOffset) > 126)
1940       return false;
1941 
1942     // Search backwards to find a tCMPi8
1943     auto *TRI = STI->getRegisterInfo();
1944     MachineInstr *CmpMI = findCMPToFoldIntoCBZ(Br.MI, TRI);
1945     if (!CmpMI || CmpMI->getOpcode() != ARM::tCMPi8)
1946       return false;
1947 
1948     ImmCmp.MI = CmpMI;
1949     ImmCmp.NewOpc = NewOpc;
1950     return true;
1951   };
1952 
1953   auto TryConvertToLE = [this](ImmBranch &Br, ImmCompare &Cmp) {
1954     if (Br.MI->getOpcode() != ARM::t2Bcc || !STI->hasLOB() ||
1955         STI->hasMinSize())
1956       return false;
1957 
1958     MachineBasicBlock *MBB = Br.MI->getParent();
1959     MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1960     if (BBUtils->getOffsetOf(MBB) < BBUtils->getOffsetOf(DestBB) ||
1961         !BBUtils->isBBInRange(Br.MI, DestBB, 4094))
1962       return false;
1963 
1964     if (!DT->dominates(DestBB, MBB))
1965       return false;
1966 
1967     // We queried for the CBN?Z opcode based upon the 'ExitBB', the opposite
1968     // target of Br. So now we need to reverse the condition.
1969     Cmp.NewOpc = Cmp.NewOpc == ARM::tCBZ ? ARM::tCBNZ : ARM::tCBZ;
1970 
1971     MachineInstrBuilder MIB = BuildMI(*MBB, Br.MI, Br.MI->getDebugLoc(),
1972                                       TII->get(ARM::t2LE));
1973     // Swapped a t2Bcc for a t2LE, so no need to update the size of the block.
1974     MIB.add(Br.MI->getOperand(0));
1975     Br.MI->eraseFromParent();
1976     Br.MI = MIB;
1977     ++NumLEInserted;
1978     return true;
1979   };
1980 
1981   bool MadeChange = false;
1982 
1983   // The order in which branches appear in ImmBranches is approximately their
1984   // order within the function body. By visiting later branches first, we reduce
1985   // the distance between earlier forward branches and their targets, making it
1986   // more likely that the cbn?z optimization, which can only apply to forward
1987   // branches, will succeed.
1988   for (ImmBranch &Br : reverse(ImmBranches)) {
1989     MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1990     MachineBasicBlock *MBB = Br.MI->getParent();
1991     MachineBasicBlock *ExitBB = &MBB->back() == Br.MI ?
1992       MBB->getFallThrough() :
1993       MBB->back().getOperand(0).getMBB();
1994 
1995     ImmCompare Cmp;
1996     if (FindCmpForCBZ(Br, Cmp, ExitBB) && TryConvertToLE(Br, Cmp)) {
1997       DestBB = ExitBB;
1998       MadeChange = true;
1999     } else {
2000       FindCmpForCBZ(Br, Cmp, DestBB);
2001       MadeChange |= TryShrinkBranch(Br);
2002     }
2003 
2004     unsigned Opcode = Br.MI->getOpcode();
2005     if ((Opcode != ARM::tBcc && Opcode != ARM::t2LE) || !Cmp.NewOpc)
2006       continue;
2007 
2008     Register Reg = Cmp.MI->getOperand(0).getReg();
2009 
2010     // Check for Kill flags on Reg. If they are present remove them and set kill
2011     // on the new CBZ.
2012     auto *TRI = STI->getRegisterInfo();
2013     MachineBasicBlock::iterator KillMI = Br.MI;
2014     bool RegKilled = false;
2015     do {
2016       --KillMI;
2017       if (KillMI->killsRegister(Reg, TRI)) {
2018         KillMI->clearRegisterKills(Reg, TRI);
2019         RegKilled = true;
2020         break;
2021       }
2022     } while (KillMI != Cmp.MI);
2023 
2024     // Create the new CBZ/CBNZ
2025     LLVM_DEBUG(dbgs() << "Fold: " << *Cmp.MI << " and: " << *Br.MI);
2026     MachineInstr *NewBR =
2027         BuildMI(*MBB, Br.MI, Br.MI->getDebugLoc(), TII->get(Cmp.NewOpc))
2028             .addReg(Reg, getKillRegState(RegKilled) |
2029                              getRegState(Cmp.MI->getOperand(0)))
2030             .addMBB(DestBB, Br.MI->getOperand(0).getTargetFlags());
2031 
2032     Cmp.MI->eraseFromParent();
2033 
2034     if (Br.MI->getOpcode() == ARM::tBcc) {
2035       Br.MI->eraseFromParent();
2036       Br.MI = NewBR;
2037       BBUtils->adjustBBSize(MBB, -2);
2038     } else if (MBB->back().getOpcode() != ARM::t2LE) {
2039       // An LE has been generated, but it's not the terminator - that is an
2040       // unconditional branch. However, the logic has now been reversed with the
2041       // CBN?Z being the conditional branch and the LE being the unconditional
2042       // branch. So this means we can remove the redundant unconditional branch
2043       // at the end of the block.
2044       MachineInstr *LastMI = &MBB->back();
2045       BBUtils->adjustBBSize(MBB, -LastMI->getDesc().getSize());
2046       LastMI->eraseFromParent();
2047     }
2048     BBUtils->adjustBBOffsetsAfter(MBB);
2049     ++NumCBZ;
2050     MadeChange = true;
2051   }
2052 
2053   return MadeChange;
2054 }
2055 
isSimpleIndexCalc(MachineInstr & I,unsigned EntryReg,unsigned BaseReg)2056 static bool isSimpleIndexCalc(MachineInstr &I, unsigned EntryReg,
2057                               unsigned BaseReg) {
2058   if (I.getOpcode() != ARM::t2ADDrs)
2059     return false;
2060 
2061   if (I.getOperand(0).getReg() != EntryReg)
2062     return false;
2063 
2064   if (I.getOperand(1).getReg() != BaseReg)
2065     return false;
2066 
2067   // FIXME: what about CC and IdxReg?
2068   return true;
2069 }
2070 
2071 /// While trying to form a TBB/TBH instruction, we may (if the table
2072 /// doesn't immediately follow the BR_JT) need access to the start of the
2073 /// jump-table. We know one instruction that produces such a register; this
2074 /// function works out whether that definition can be preserved to the BR_JT,
2075 /// possibly by removing an intervening addition (which is usually needed to
2076 /// calculate the actual entry to jump to).
preserveBaseRegister(MachineInstr * JumpMI,MachineInstr * LEAMI,unsigned & DeadSize,bool & CanDeleteLEA,bool & BaseRegKill)2077 bool ARMConstantIslands::preserveBaseRegister(MachineInstr *JumpMI,
2078                                               MachineInstr *LEAMI,
2079                                               unsigned &DeadSize,
2080                                               bool &CanDeleteLEA,
2081                                               bool &BaseRegKill) {
2082   if (JumpMI->getParent() != LEAMI->getParent())
2083     return false;
2084 
2085   // Now we hope that we have at least these instructions in the basic block:
2086   //     BaseReg = t2LEA ...
2087   //     [...]
2088   //     EntryReg = t2ADDrs BaseReg, ...
2089   //     [...]
2090   //     t2BR_JT EntryReg
2091   //
2092   // We have to be very conservative about what we recognise here though. The
2093   // main perturbing factors to watch out for are:
2094   //    + Spills at any point in the chain: not direct problems but we would
2095   //      expect a blocking Def of the spilled register so in practice what we
2096   //      can do is limited.
2097   //    + EntryReg == BaseReg: this is the one situation we should allow a Def
2098   //      of BaseReg, but only if the t2ADDrs can be removed.
2099   //    + Some instruction other than t2ADDrs computing the entry. Not seen in
2100   //      the wild, but we should be careful.
2101   Register EntryReg = JumpMI->getOperand(0).getReg();
2102   Register BaseReg = LEAMI->getOperand(0).getReg();
2103 
2104   CanDeleteLEA = true;
2105   BaseRegKill = false;
2106   MachineInstr *RemovableAdd = nullptr;
2107   MachineBasicBlock::iterator I(LEAMI);
2108   for (++I; &*I != JumpMI; ++I) {
2109     if (isSimpleIndexCalc(*I, EntryReg, BaseReg)) {
2110       RemovableAdd = &*I;
2111       break;
2112     }
2113 
2114     for (const MachineOperand &MO : I->operands()) {
2115       if (!MO.isReg() || !MO.getReg())
2116         continue;
2117       if (MO.isDef() && MO.getReg() == BaseReg)
2118         return false;
2119       if (MO.isUse() && MO.getReg() == BaseReg) {
2120         BaseRegKill = BaseRegKill || MO.isKill();
2121         CanDeleteLEA = false;
2122       }
2123     }
2124   }
2125 
2126   if (!RemovableAdd)
2127     return true;
2128 
2129   // Check the add really is removable, and that nothing else in the block
2130   // clobbers BaseReg.
2131   for (++I; &*I != JumpMI; ++I) {
2132     for (const MachineOperand &MO : I->operands()) {
2133       if (!MO.isReg() || !MO.getReg())
2134         continue;
2135       if (MO.isDef() && MO.getReg() == BaseReg)
2136         return false;
2137       if (MO.isUse() && MO.getReg() == EntryReg)
2138         RemovableAdd = nullptr;
2139     }
2140   }
2141 
2142   if (RemovableAdd) {
2143     RemovableAdd->eraseFromParent();
2144     DeadSize += isThumb2 ? 4 : 2;
2145   } else if (BaseReg == EntryReg) {
2146     // The add wasn't removable, but clobbered the base for the TBB. So we can't
2147     // preserve it.
2148     return false;
2149   }
2150 
2151   // We reached the end of the block without seeing another definition of
2152   // BaseReg (except, possibly the t2ADDrs, which was removed). BaseReg can be
2153   // used in the TBB/TBH if necessary.
2154   return true;
2155 }
2156 
2157 /// Returns whether CPEMI is the first instruction in the block
2158 /// immediately following JTMI (assumed to be a TBB or TBH terminator). If so,
2159 /// we can switch the first register to PC and usually remove the address
2160 /// calculation that preceded it.
jumpTableFollowsTB(MachineInstr * JTMI,MachineInstr * CPEMI)2161 static bool jumpTableFollowsTB(MachineInstr *JTMI, MachineInstr *CPEMI) {
2162   MachineFunction::iterator MBB = JTMI->getParent()->getIterator();
2163   MachineFunction *MF = MBB->getParent();
2164   ++MBB;
2165 
2166   return MBB != MF->end() && !MBB->empty() && &*MBB->begin() == CPEMI;
2167 }
2168 
RemoveDeadAddBetweenLEAAndJT(MachineInstr * LEAMI,MachineInstr * JumpMI,unsigned & DeadSize)2169 static void RemoveDeadAddBetweenLEAAndJT(MachineInstr *LEAMI,
2170                                          MachineInstr *JumpMI,
2171                                          unsigned &DeadSize) {
2172   // Remove a dead add between the LEA and JT, which used to compute EntryReg,
2173   // but the JT now uses PC. Finds the last ADD (if any) that def's EntryReg
2174   // and is not clobbered / used.
2175   MachineInstr *RemovableAdd = nullptr;
2176   Register EntryReg = JumpMI->getOperand(0).getReg();
2177 
2178   // Find the last ADD to set EntryReg
2179   MachineBasicBlock::iterator I(LEAMI);
2180   for (++I; &*I != JumpMI; ++I) {
2181     if (I->getOpcode() == ARM::t2ADDrs && I->getOperand(0).getReg() == EntryReg)
2182       RemovableAdd = &*I;
2183   }
2184 
2185   if (!RemovableAdd)
2186     return;
2187 
2188   // Ensure EntryReg is not clobbered or used.
2189   MachineBasicBlock::iterator J(RemovableAdd);
2190   for (++J; &*J != JumpMI; ++J) {
2191     for (const MachineOperand &MO : J->operands()) {
2192       if (!MO.isReg() || !MO.getReg())
2193         continue;
2194       if (MO.isDef() && MO.getReg() == EntryReg)
2195         return;
2196       if (MO.isUse() && MO.getReg() == EntryReg)
2197         return;
2198     }
2199   }
2200 
2201   LLVM_DEBUG(dbgs() << "Removing Dead Add: " << *RemovableAdd);
2202   RemovableAdd->eraseFromParent();
2203   DeadSize += 4;
2204 }
2205 
2206 /// optimizeThumb2JumpTables - Use tbb / tbh instructions to generate smaller
2207 /// jumptables when it's possible.
optimizeThumb2JumpTables()2208 bool ARMConstantIslands::optimizeThumb2JumpTables() {
2209   bool MadeChange = false;
2210 
2211   // FIXME: After the tables are shrunk, can we get rid some of the
2212   // constantpool tables?
2213   MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
2214   if (!MJTI) return false;
2215 
2216   const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
2217   for (MachineInstr *MI : T2JumpTables) {
2218     const MCInstrDesc &MCID = MI->getDesc();
2219     unsigned NumOps = MCID.getNumOperands();
2220     unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
2221     MachineOperand JTOP = MI->getOperand(JTOpIdx);
2222     unsigned JTI = JTOP.getIndex();
2223     assert(JTI < JT.size());
2224 
2225     bool ByteOk = true;
2226     bool HalfWordOk = true;
2227     unsigned JTOffset = BBUtils->getOffsetOf(MI) + 4;
2228     const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2229     BBInfoVector &BBInfo = BBUtils->getBBInfo();
2230     for (MachineBasicBlock *MBB : JTBBs) {
2231       unsigned DstOffset = BBInfo[MBB->getNumber()].Offset;
2232       // Negative offset is not ok. FIXME: We should change BB layout to make
2233       // sure all the branches are forward.
2234       if (ByteOk && (DstOffset - JTOffset) > ((1<<8)-1)*2)
2235         ByteOk = false;
2236       unsigned TBHLimit = ((1<<16)-1)*2;
2237       if (HalfWordOk && (DstOffset - JTOffset) > TBHLimit)
2238         HalfWordOk = false;
2239       if (!ByteOk && !HalfWordOk)
2240         break;
2241     }
2242 
2243     if (!ByteOk && !HalfWordOk)
2244       continue;
2245 
2246     CPUser &User = CPUsers[JumpTableUserIndices[JTI]];
2247     MachineBasicBlock *MBB = MI->getParent();
2248     if (!MI->getOperand(0).isKill()) // FIXME: needed now?
2249       continue;
2250 
2251     unsigned DeadSize = 0;
2252     bool CanDeleteLEA = false;
2253     bool BaseRegKill = false;
2254 
2255     unsigned IdxReg = ~0U;
2256     bool IdxRegKill = true;
2257     if (isThumb2) {
2258       IdxReg = MI->getOperand(1).getReg();
2259       IdxRegKill = MI->getOperand(1).isKill();
2260 
2261       bool PreservedBaseReg =
2262         preserveBaseRegister(MI, User.MI, DeadSize, CanDeleteLEA, BaseRegKill);
2263       if (!jumpTableFollowsTB(MI, User.CPEMI) && !PreservedBaseReg)
2264         continue;
2265     } else {
2266       // We're in thumb-1 mode, so we must have something like:
2267       //   %idx = tLSLri %idx, 2
2268       //   %base = tLEApcrelJT
2269       //   %t = tLDRr %base, %idx
2270       Register BaseReg = User.MI->getOperand(0).getReg();
2271 
2272       MachineBasicBlock *UserMBB = User.MI->getParent();
2273       MachineBasicBlock::iterator Shift = User.MI->getIterator();
2274       if (Shift == UserMBB->begin())
2275         continue;
2276 
2277       Shift = prev_nodbg(Shift, UserMBB->begin());
2278       if (Shift->getOpcode() != ARM::tLSLri ||
2279           Shift->getOperand(3).getImm() != 2 ||
2280           !Shift->getOperand(2).isKill())
2281         continue;
2282       IdxReg = Shift->getOperand(2).getReg();
2283       Register ShiftedIdxReg = Shift->getOperand(0).getReg();
2284 
2285       // It's important that IdxReg is live until the actual TBB/TBH. Most of
2286       // the range is checked later, but the LEA might still clobber it and not
2287       // actually get removed.
2288       if (BaseReg == IdxReg && !jumpTableFollowsTB(MI, User.CPEMI))
2289         continue;
2290 
2291       MachineInstr *Load = User.MI->getNextNode();
2292       if (Load->getOpcode() != ARM::tLDRr)
2293         continue;
2294       if (Load->getOperand(1).getReg() != BaseReg ||
2295           Load->getOperand(2).getReg() != ShiftedIdxReg ||
2296           !Load->getOperand(2).isKill())
2297         continue;
2298 
2299       // If we're in PIC mode, there should be another ADD following.
2300       auto *TRI = STI->getRegisterInfo();
2301 
2302       // %base cannot be redefined after the load as it will appear before
2303       // TBB/TBH like:
2304       //      %base =
2305       //      %base =
2306       //      tBB %base, %idx
2307       if (registerDefinedBetween(BaseReg, Load->getNextNode(), MBB->end(), TRI))
2308         continue;
2309 
2310       if (isPositionIndependentOrROPI) {
2311         MachineInstr *Add = Load->getNextNode();
2312         if (Add->getOpcode() != ARM::tADDrr ||
2313             Add->getOperand(2).getReg() != BaseReg ||
2314             Add->getOperand(3).getReg() != Load->getOperand(0).getReg() ||
2315             !Add->getOperand(3).isKill())
2316           continue;
2317         if (Add->getOperand(0).getReg() != MI->getOperand(0).getReg())
2318           continue;
2319         if (registerDefinedBetween(IdxReg, Add->getNextNode(), MI, TRI))
2320           // IdxReg gets redefined in the middle of the sequence.
2321           continue;
2322         Add->eraseFromParent();
2323         DeadSize += 2;
2324       } else {
2325         if (Load->getOperand(0).getReg() != MI->getOperand(0).getReg())
2326           continue;
2327         if (registerDefinedBetween(IdxReg, Load->getNextNode(), MI, TRI))
2328           // IdxReg gets redefined in the middle of the sequence.
2329           continue;
2330       }
2331 
2332       // Now safe to delete the load and lsl. The LEA will be removed later.
2333       CanDeleteLEA = true;
2334       Shift->eraseFromParent();
2335       Load->eraseFromParent();
2336       DeadSize += 4;
2337     }
2338 
2339     LLVM_DEBUG(dbgs() << "Shrink JT: " << *MI);
2340     MachineInstr *CPEMI = User.CPEMI;
2341     unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT;
2342     if (!isThumb2)
2343       Opc = ByteOk ? ARM::tTBB_JT : ARM::tTBH_JT;
2344 
2345     MachineBasicBlock::iterator MI_JT = MI;
2346     MachineInstr *NewJTMI =
2347         BuildMI(*MBB, MI_JT, MI->getDebugLoc(), TII->get(Opc))
2348             .addReg(User.MI->getOperand(0).getReg(),
2349                     getKillRegState(BaseRegKill))
2350             .addReg(IdxReg, getKillRegState(IdxRegKill))
2351             .addJumpTableIndex(JTI, JTOP.getTargetFlags())
2352             .addImm(CPEMI->getOperand(0).getImm());
2353     LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << ": " << *NewJTMI);
2354 
2355     unsigned JTOpc = ByteOk ? ARM::JUMPTABLE_TBB : ARM::JUMPTABLE_TBH;
2356     CPEMI->setDesc(TII->get(JTOpc));
2357 
2358     if (jumpTableFollowsTB(MI, User.CPEMI)) {
2359       NewJTMI->getOperand(0).setReg(ARM::PC);
2360       NewJTMI->getOperand(0).setIsKill(false);
2361 
2362       if (CanDeleteLEA) {
2363         if (isThumb2)
2364           RemoveDeadAddBetweenLEAAndJT(User.MI, MI, DeadSize);
2365 
2366         User.MI->eraseFromParent();
2367         DeadSize += isThumb2 ? 4 : 2;
2368 
2369         // The LEA was eliminated, the TBB instruction becomes the only new user
2370         // of the jump table.
2371         User.MI = NewJTMI;
2372         User.MaxDisp = 4;
2373         User.NegOk = false;
2374         User.IsSoImm = false;
2375         User.KnownAlignment = false;
2376       } else {
2377         // The LEA couldn't be eliminated, so we must add another CPUser to
2378         // record the TBB or TBH use.
2379         int CPEntryIdx = JumpTableEntryIndices[JTI];
2380         auto &CPEs = CPEntries[CPEntryIdx];
2381         auto Entry =
2382             find_if(CPEs, [&](CPEntry &E) { return E.CPEMI == User.CPEMI; });
2383         ++Entry->RefCount;
2384         CPUsers.emplace_back(CPUser(NewJTMI, User.CPEMI, 4, false, false));
2385       }
2386     }
2387 
2388     unsigned NewSize = TII->getInstSizeInBytes(*NewJTMI);
2389     unsigned OrigSize = TII->getInstSizeInBytes(*MI);
2390     MI->eraseFromParent();
2391 
2392     int Delta = OrigSize - NewSize + DeadSize;
2393     BBInfo[MBB->getNumber()].Size -= Delta;
2394     BBUtils->adjustBBOffsetsAfter(MBB);
2395 
2396     ++NumTBs;
2397     MadeChange = true;
2398   }
2399 
2400   return MadeChange;
2401 }
2402 
2403 /// reorderThumb2JumpTables - Adjust the function's block layout to ensure that
2404 /// jump tables always branch forwards, since that's what tbb and tbh need.
reorderThumb2JumpTables()2405 bool ARMConstantIslands::reorderThumb2JumpTables() {
2406   bool MadeChange = false;
2407 
2408   MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
2409   if (!MJTI) return false;
2410 
2411   const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
2412   for (MachineInstr *MI : T2JumpTables) {
2413     const MCInstrDesc &MCID = MI->getDesc();
2414     unsigned NumOps = MCID.getNumOperands();
2415     unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
2416     MachineOperand JTOP = MI->getOperand(JTOpIdx);
2417     unsigned JTI = JTOP.getIndex();
2418     assert(JTI < JT.size());
2419 
2420     // We prefer if target blocks for the jump table come after the jump
2421     // instruction so we can use TB[BH]. Loop through the target blocks
2422     // and try to adjust them such that that's true.
2423     int JTNumber = MI->getParent()->getNumber();
2424     const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2425     for (MachineBasicBlock *MBB : JTBBs) {
2426       int DTNumber = MBB->getNumber();
2427 
2428       if (DTNumber < JTNumber) {
2429         // The destination precedes the switch. Try to move the block forward
2430         // so we have a positive offset.
2431         MachineBasicBlock *NewBB =
2432             adjustJTTargetBlockForward(JTI, MBB, MI->getParent());
2433         if (NewBB)
2434           MJTI->ReplaceMBBInJumpTable(JTI, MBB, NewBB);
2435         MadeChange = true;
2436       }
2437     }
2438   }
2439 
2440   return MadeChange;
2441 }
2442 
adjustJTTargetBlockForward(unsigned JTI,MachineBasicBlock * BB,MachineBasicBlock * JTBB)2443 MachineBasicBlock *ARMConstantIslands::adjustJTTargetBlockForward(
2444     unsigned JTI, MachineBasicBlock *BB, MachineBasicBlock *JTBB) {
2445   // If the destination block is terminated by an unconditional branch,
2446   // try to move it; otherwise, create a new block following the jump
2447   // table that branches back to the actual target. This is a very simple
2448   // heuristic. FIXME: We can definitely improve it.
2449   MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
2450   SmallVector<MachineOperand, 4> Cond;
2451   SmallVector<MachineOperand, 4> CondPrior;
2452   MachineFunction::iterator BBi = BB->getIterator();
2453   MachineFunction::iterator OldPrior = std::prev(BBi);
2454   MachineFunction::iterator OldNext = std::next(BBi);
2455 
2456   // If the block terminator isn't analyzable, don't try to move the block
2457   bool B = TII->analyzeBranch(*BB, TBB, FBB, Cond);
2458 
2459   // If the block ends in an unconditional branch, move it. The prior block
2460   // has to have an analyzable terminator for us to move this one. Be paranoid
2461   // and make sure we're not trying to move the entry block of the function.
2462   if (!B && Cond.empty() && BB != &MF->front() &&
2463       !TII->analyzeBranch(*OldPrior, TBB, FBB, CondPrior)) {
2464     BB->moveAfter(JTBB);
2465     OldPrior->updateTerminator(BB);
2466     BB->updateTerminator(OldNext != MF->end() ? &*OldNext : nullptr);
2467     // Update numbering to account for the block being moved.
2468     MF->RenumberBlocks();
2469     DT->updateBlockNumbers();
2470     ++NumJTMoved;
2471     return nullptr;
2472   }
2473 
2474   // Create a new MBB for the code after the jump BB.
2475   MachineBasicBlock *NewBB =
2476     MF->CreateMachineBasicBlock(JTBB->getBasicBlock());
2477   MachineFunction::iterator MBBI = ++JTBB->getIterator();
2478   MF->insert(MBBI, NewBB);
2479 
2480   // Copy live-in information to new block.
2481   for (const MachineBasicBlock::RegisterMaskPair &RegMaskPair : BB->liveins())
2482     NewBB->addLiveIn(RegMaskPair);
2483 
2484   // Add an unconditional branch from NewBB to BB.
2485   // There doesn't seem to be meaningful DebugInfo available; this doesn't
2486   // correspond directly to anything in the source.
2487   if (isThumb2)
2488     BuildMI(NewBB, DebugLoc(), TII->get(ARM::t2B))
2489         .addMBB(BB)
2490         .add(predOps(ARMCC::AL));
2491   else
2492     BuildMI(NewBB, DebugLoc(), TII->get(ARM::tB))
2493         .addMBB(BB)
2494         .add(predOps(ARMCC::AL));
2495 
2496   // Update internal data structures to account for the newly inserted MBB.
2497   MF->RenumberBlocks(NewBB);
2498   DT->updateBlockNumbers();
2499 
2500   // Update the CFG.
2501   NewBB->addSuccessor(BB);
2502   JTBB->replaceSuccessor(BB, NewBB);
2503 
2504   ++NumJTInserted;
2505   return NewBB;
2506 }
2507 
2508 /// createARMConstantIslandPass - returns an instance of the constpool
2509 /// island pass.
createARMConstantIslandPass()2510 FunctionPass *llvm::createARMConstantIslandPass() {
2511   return new ARMConstantIslands();
2512 }
2513 
2514 INITIALIZE_PASS(ARMConstantIslands, "arm-cp-islands", ARM_CP_ISLANDS_OPT_NAME,
2515                 false, false)
2516