xref: /freebsd/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp (revision a03411e84728e9b267056fd31c7d1d9d1dc1b01e)
1 //===- HexagonConstExtenders.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "HexagonInstrInfo.h"
10 #include "HexagonRegisterInfo.h"
11 #include "HexagonSubtarget.h"
12 #include "llvm/ADT/SetVector.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/CodeGen/MachineDominators.h"
15 #include "llvm/CodeGen/MachineFunctionPass.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/Register.h"
19 #include "llvm/InitializePasses.h"
20 #include "llvm/Pass.h"
21 #include "llvm/Support/CommandLine.h"
22 #include "llvm/Support/raw_ostream.h"
23 #include <map>
24 #include <set>
25 #include <utility>
26 #include <vector>
27 
28 #define DEBUG_TYPE "hexagon-cext-opt"
29 
30 using namespace llvm;
31 
32 static cl::opt<unsigned> CountThreshold(
33     "hexagon-cext-threshold", cl::init(3), cl::Hidden,
34     cl::desc("Minimum number of extenders to trigger replacement"));
35 
36 static cl::opt<unsigned>
37     ReplaceLimit("hexagon-cext-limit", cl::init(0), cl::Hidden,
38                  cl::desc("Maximum number of replacements"));
39 
40 namespace llvm {
41   void initializeHexagonConstExtendersPass(PassRegistry&);
42   FunctionPass *createHexagonConstExtenders();
43 }
44 
45 static int32_t adjustUp(int32_t V, uint8_t A, uint8_t O) {
46   assert(isPowerOf2_32(A));
47   int32_t U = (V & -A) + O;
48   return U >= V ? U : U+A;
49 }
50 
51 static int32_t adjustDown(int32_t V, uint8_t A, uint8_t O) {
52   assert(isPowerOf2_32(A));
53   int32_t U = (V & -A) + O;
54   return U <= V ? U : U-A;
55 }
56 
57 namespace {
58   struct OffsetRange {
59     // The range of values between Min and Max that are of form Align*N+Offset,
60     // for some integer N. Min and Max are required to be of that form as well,
61     // except in the case of an empty range.
62     int32_t Min = INT_MIN, Max = INT_MAX;
63     uint8_t Align = 1;
64     uint8_t Offset = 0;
65 
66     OffsetRange() = default;
67     OffsetRange(int32_t L, int32_t H, uint8_t A, uint8_t O = 0)
68       : Min(L), Max(H), Align(A), Offset(O) {}
69     OffsetRange &intersect(OffsetRange A) {
70       if (Align < A.Align)
71         std::swap(*this, A);
72 
73       // Align >= A.Align.
74       if (Offset >= A.Offset && (Offset - A.Offset) % A.Align == 0) {
75         Min = adjustUp(std::max(Min, A.Min), Align, Offset);
76         Max = adjustDown(std::min(Max, A.Max), Align, Offset);
77       } else {
78         // Make an empty range.
79         Min = 0;
80         Max = -1;
81       }
82       // Canonicalize empty ranges.
83       if (Min > Max)
84         std::tie(Min, Max, Align) = std::make_tuple(0, -1, 1);
85       return *this;
86     }
87     OffsetRange &shift(int32_t S) {
88       Min += S;
89       Max += S;
90       Offset = (Offset+S) % Align;
91       return *this;
92     }
93     OffsetRange &extendBy(int32_t D) {
94       // If D < 0, extend Min, otherwise extend Max.
95       assert(D % Align == 0);
96       if (D < 0)
97         Min = (INT_MIN-D < Min) ? Min+D : INT_MIN;
98       else
99         Max = (INT_MAX-D > Max) ? Max+D : INT_MAX;
100       return *this;
101     }
102     bool empty() const {
103       return Min > Max;
104     }
105     bool contains(int32_t V) const {
106       return Min <= V && V <= Max && (V-Offset) % Align == 0;
107     }
108     bool operator==(const OffsetRange &R) const {
109       return Min == R.Min && Max == R.Max && Align == R.Align;
110     }
111     bool operator!=(const OffsetRange &R) const {
112       return !operator==(R);
113     }
114     bool operator<(const OffsetRange &R) const {
115       if (Min != R.Min)
116         return Min < R.Min;
117       if (Max != R.Max)
118         return Max < R.Max;
119       return Align < R.Align;
120     }
121     static OffsetRange zero() { return {0, 0, 1}; }
122   };
123 
124   struct RangeTree {
125     struct Node {
126       Node(const OffsetRange &R) : MaxEnd(R.Max), Range(R) {}
127       unsigned Height = 1;
128       unsigned Count = 1;
129       int32_t MaxEnd;
130       const OffsetRange &Range;
131       Node *Left = nullptr, *Right = nullptr;
132     };
133 
134     Node *Root = nullptr;
135 
136     void add(const OffsetRange &R) {
137       Root = add(Root, R);
138     }
139     void erase(const Node *N) {
140       Root = remove(Root, N);
141       delete N;
142     }
143     void order(SmallVectorImpl<Node*> &Seq) const {
144       order(Root, Seq);
145     }
146     SmallVector<Node*,8> nodesWith(int32_t P, bool CheckAlign = true) {
147       SmallVector<Node*,8> Nodes;
148       nodesWith(Root, P, CheckAlign, Nodes);
149       return Nodes;
150     }
151     void dump() const;
152     ~RangeTree() {
153       SmallVector<Node*,8> Nodes;
154       order(Nodes);
155       for (Node *N : Nodes)
156         delete N;
157     }
158 
159   private:
160     void dump(const Node *N) const;
161     void order(Node *N, SmallVectorImpl<Node*> &Seq) const;
162     void nodesWith(Node *N, int32_t P, bool CheckA,
163                    SmallVectorImpl<Node*> &Seq) const;
164 
165     Node *add(Node *N, const OffsetRange &R);
166     Node *remove(Node *N, const Node *D);
167     Node *rotateLeft(Node *Lower, Node *Higher);
168     Node *rotateRight(Node *Lower, Node *Higher);
169     unsigned height(Node *N) {
170       return N != nullptr ? N->Height : 0;
171     }
172     Node *update(Node *N) {
173       assert(N != nullptr);
174       N->Height = 1 + std::max(height(N->Left), height(N->Right));
175       if (N->Left)
176         N->MaxEnd = std::max(N->MaxEnd, N->Left->MaxEnd);
177       if (N->Right)
178         N->MaxEnd = std::max(N->MaxEnd, N->Right->MaxEnd);
179       return N;
180     }
181     Node *rebalance(Node *N) {
182       assert(N != nullptr);
183       int32_t Balance = height(N->Right) - height(N->Left);
184       if (Balance < -1)
185         return rotateRight(N->Left, N);
186       if (Balance > 1)
187         return rotateLeft(N->Right, N);
188       return N;
189     }
190   };
191 
192   struct Loc {
193     MachineBasicBlock *Block = nullptr;
194     MachineBasicBlock::iterator At;
195 
196     Loc(MachineBasicBlock *B, MachineBasicBlock::iterator It)
197       : Block(B), At(It) {
198       if (B->end() == It) {
199         Pos = -1;
200       } else {
201         assert(It->getParent() == B);
202         Pos = std::distance(B->begin(), It);
203       }
204     }
205     bool operator<(Loc A) const {
206       if (Block != A.Block)
207         return Block->getNumber() < A.Block->getNumber();
208       if (A.Pos == -1)
209         return Pos != A.Pos;
210       return Pos != -1 && Pos < A.Pos;
211     }
212   private:
213     int Pos = 0;
214   };
215 
216   struct HexagonConstExtenders : public MachineFunctionPass {
217     static char ID;
218     HexagonConstExtenders() : MachineFunctionPass(ID) {}
219 
220     void getAnalysisUsage(AnalysisUsage &AU) const override {
221       AU.addRequired<MachineDominatorTree>();
222       AU.addPreserved<MachineDominatorTree>();
223       MachineFunctionPass::getAnalysisUsage(AU);
224     }
225 
226     StringRef getPassName() const override {
227       return "Hexagon constant-extender optimization";
228     }
229     bool runOnMachineFunction(MachineFunction &MF) override;
230 
231   private:
232     struct Register {
233       Register() = default;
234       Register(llvm::Register R, unsigned S) : Reg(R), Sub(S) {}
235       Register(const MachineOperand &Op)
236         : Reg(Op.getReg()), Sub(Op.getSubReg()) {}
237       Register &operator=(const MachineOperand &Op) {
238         if (Op.isReg()) {
239           Reg = Op.getReg();
240           Sub = Op.getSubReg();
241         } else if (Op.isFI()) {
242           Reg = llvm::Register::index2StackSlot(Op.getIndex());
243         }
244         return *this;
245       }
246       bool isVReg() const {
247         return Reg != 0 && !Reg.isStack() && Reg.isVirtual();
248       }
249       bool isSlot() const { return Reg != 0 && Reg.isStack(); }
250       operator MachineOperand() const {
251         if (isVReg())
252           return MachineOperand::CreateReg(Reg, /*Def*/false, /*Imp*/false,
253                           /*Kill*/false, /*Dead*/false, /*Undef*/false,
254                           /*EarlyClobber*/false, Sub);
255         if (Reg.isStack()) {
256           int FI = llvm::Register::stackSlot2Index(Reg);
257           return MachineOperand::CreateFI(FI);
258         }
259         llvm_unreachable("Cannot create MachineOperand");
260       }
261       bool operator==(Register R) const { return Reg == R.Reg && Sub == R.Sub; }
262       bool operator!=(Register R) const { return !operator==(R); }
263       bool operator<(Register R) const {
264         // For std::map.
265         return Reg < R.Reg || (Reg == R.Reg && Sub < R.Sub);
266       }
267       llvm::Register Reg;
268       unsigned Sub = 0;
269     };
270 
271     struct ExtExpr {
272       // A subexpression in which the extender is used. In general, this
273       // represents an expression where adding D to the extender will be
274       // equivalent to adding D to the expression as a whole. In other
275       // words, expr(add(##V,D) = add(expr(##V),D).
276 
277       // The original motivation for this are the io/ur addressing modes,
278       // where the offset is extended. Consider the io example:
279       // In memw(Rs+##V), the ##V could be replaced by a register Rt to
280       // form the rr mode: memw(Rt+Rs<<0). In such case, however, the
281       // register Rt must have exactly the value of ##V. If there was
282       // another instruction memw(Rs+##V+4), it would need a different Rt.
283       // Now, if Rt was initialized as "##V+Rs<<0", both of these
284       // instructions could use the same Rt, just with different offsets.
285       // Here it's clear that "initializer+4" should be the same as if
286       // the offset 4 was added to the ##V in the initializer.
287 
288       // The only kinds of expressions that support the requirement of
289       // commuting with addition are addition and subtraction from ##V.
290       // Include shifting the Rs to account for the ur addressing mode:
291       //   ##Val + Rs << S
292       //   ##Val - Rs
293       Register Rs;
294       unsigned S = 0;
295       bool Neg = false;
296 
297       ExtExpr() = default;
298       ExtExpr(Register RS, bool NG, unsigned SH) : Rs(RS), S(SH), Neg(NG) {}
299       // Expression is trivial if it does not modify the extender.
300       bool trivial() const {
301         return Rs.Reg == 0;
302       }
303       bool operator==(const ExtExpr &Ex) const {
304         return Rs == Ex.Rs && S == Ex.S && Neg == Ex.Neg;
305       }
306       bool operator!=(const ExtExpr &Ex) const {
307         return !operator==(Ex);
308       }
309       bool operator<(const ExtExpr &Ex) const {
310         if (Rs != Ex.Rs)
311           return Rs < Ex.Rs;
312         if (S != Ex.S)
313           return S < Ex.S;
314         return !Neg && Ex.Neg;
315       }
316     };
317 
318     struct ExtDesc {
319       MachineInstr *UseMI = nullptr;
320       unsigned OpNum = -1u;
321       // The subexpression in which the extender is used (e.g. address
322       // computation).
323       ExtExpr Expr;
324       // Optional register that is assigned the value of Expr.
325       Register Rd;
326       // Def means that the output of the instruction may differ from the
327       // original by a constant c, and that the difference can be corrected
328       // by adding/subtracting c in all users of the defined register.
329       bool IsDef = false;
330 
331       MachineOperand &getOp() {
332         return UseMI->getOperand(OpNum);
333       }
334       const MachineOperand &getOp() const {
335         return UseMI->getOperand(OpNum);
336       }
337     };
338 
339     struct ExtRoot {
340       union {
341         const ConstantFP *CFP;  // MO_FPImmediate
342         const char *SymbolName; // MO_ExternalSymbol
343         const GlobalValue *GV;  // MO_GlobalAddress
344         const BlockAddress *BA; // MO_BlockAddress
345         int64_t ImmVal;         // MO_Immediate, MO_TargetIndex,
346                                 // and MO_ConstantPoolIndex
347       } V;
348       unsigned Kind;            // Same as in MachineOperand.
349       unsigned char TF;         // TargetFlags.
350 
351       ExtRoot(const MachineOperand &Op);
352       bool operator==(const ExtRoot &ER) const {
353         return Kind == ER.Kind && V.ImmVal == ER.V.ImmVal;
354       }
355       bool operator!=(const ExtRoot &ER) const {
356         return !operator==(ER);
357       }
358       bool operator<(const ExtRoot &ER) const;
359     };
360 
361     struct ExtValue : public ExtRoot {
362       int32_t Offset;
363 
364       ExtValue(const MachineOperand &Op);
365       ExtValue(const ExtDesc &ED) : ExtValue(ED.getOp()) {}
366       ExtValue(const ExtRoot &ER, int32_t Off) : ExtRoot(ER), Offset(Off) {}
367       bool operator<(const ExtValue &EV) const;
368       bool operator==(const ExtValue &EV) const {
369         return ExtRoot(*this) == ExtRoot(EV) && Offset == EV.Offset;
370       }
371       bool operator!=(const ExtValue &EV) const {
372         return !operator==(EV);
373       }
374       explicit operator MachineOperand() const;
375     };
376 
377     using IndexList = SetVector<unsigned>;
378     using ExtenderInit = std::pair<ExtValue, ExtExpr>;
379     using AssignmentMap = std::map<ExtenderInit, IndexList>;
380     using LocDefList = std::vector<std::pair<Loc, IndexList>>;
381 
382     const HexagonSubtarget *HST = nullptr;
383     const HexagonInstrInfo *HII = nullptr;
384     const HexagonRegisterInfo *HRI = nullptr;
385     MachineDominatorTree *MDT = nullptr;
386     MachineRegisterInfo *MRI = nullptr;
387     std::vector<ExtDesc> Extenders;
388     std::vector<unsigned> NewRegs;
389 
390     bool isStoreImmediate(unsigned Opc) const;
391     bool isRegOffOpcode(unsigned ExtOpc) const ;
392     unsigned getRegOffOpcode(unsigned ExtOpc) const;
393     unsigned getDirectRegReplacement(unsigned ExtOpc) const;
394     OffsetRange getOffsetRange(Register R, const MachineInstr &MI) const;
395     OffsetRange getOffsetRange(const ExtDesc &ED) const;
396     OffsetRange getOffsetRange(Register Rd) const;
397 
398     void recordExtender(MachineInstr &MI, unsigned OpNum);
399     void collectInstr(MachineInstr &MI);
400     void collect(MachineFunction &MF);
401     void assignInits(const ExtRoot &ER, unsigned Begin, unsigned End,
402                      AssignmentMap &IMap);
403     void calculatePlacement(const ExtenderInit &ExtI, const IndexList &Refs,
404                             LocDefList &Defs);
405     Register insertInitializer(Loc DefL, const ExtenderInit &ExtI);
406     bool replaceInstrExact(const ExtDesc &ED, Register ExtR);
407     bool replaceInstrExpr(const ExtDesc &ED, const ExtenderInit &ExtI,
408                           Register ExtR, int32_t &Diff);
409     bool replaceInstr(unsigned Idx, Register ExtR, const ExtenderInit &ExtI);
410     bool replaceExtenders(const AssignmentMap &IMap);
411 
412     unsigned getOperandIndex(const MachineInstr &MI,
413                              const MachineOperand &Op) const;
414     const MachineOperand &getPredicateOp(const MachineInstr &MI) const;
415     const MachineOperand &getLoadResultOp(const MachineInstr &MI) const;
416     const MachineOperand &getStoredValueOp(const MachineInstr &MI) const;
417 
418     friend struct PrintRegister;
419     friend struct PrintExpr;
420     friend struct PrintInit;
421     friend struct PrintIMap;
422     friend raw_ostream &operator<< (raw_ostream &OS,
423                                     const struct PrintRegister &P);
424     friend raw_ostream &operator<< (raw_ostream &OS, const struct PrintExpr &P);
425     friend raw_ostream &operator<< (raw_ostream &OS, const struct PrintInit &P);
426     friend raw_ostream &operator<< (raw_ostream &OS, const ExtDesc &ED);
427     friend raw_ostream &operator<< (raw_ostream &OS, const ExtRoot &ER);
428     friend raw_ostream &operator<< (raw_ostream &OS, const ExtValue &EV);
429     friend raw_ostream &operator<< (raw_ostream &OS, const OffsetRange &OR);
430     friend raw_ostream &operator<< (raw_ostream &OS, const struct PrintIMap &P);
431   };
432 
433   using HCE = HexagonConstExtenders;
434 
435   LLVM_ATTRIBUTE_UNUSED
436   raw_ostream &operator<< (raw_ostream &OS, const OffsetRange &OR) {
437     if (OR.Min > OR.Max)
438       OS << '!';
439     OS << '[' << OR.Min << ',' << OR.Max << "]a" << unsigned(OR.Align)
440        << '+' << unsigned(OR.Offset);
441     return OS;
442   }
443 
444   struct PrintRegister {
445     PrintRegister(HCE::Register R, const HexagonRegisterInfo &I)
446       : Rs(R), HRI(I) {}
447     HCE::Register Rs;
448     const HexagonRegisterInfo &HRI;
449   };
450 
451   LLVM_ATTRIBUTE_UNUSED
452   raw_ostream &operator<< (raw_ostream &OS, const PrintRegister &P) {
453     if (P.Rs.Reg != 0)
454       OS << printReg(P.Rs.Reg, &P.HRI, P.Rs.Sub);
455     else
456       OS << "noreg";
457     return OS;
458   }
459 
460   struct PrintExpr {
461     PrintExpr(const HCE::ExtExpr &E, const HexagonRegisterInfo &I)
462       : Ex(E), HRI(I) {}
463     const HCE::ExtExpr &Ex;
464     const HexagonRegisterInfo &HRI;
465   };
466 
467   LLVM_ATTRIBUTE_UNUSED
468   raw_ostream &operator<< (raw_ostream &OS, const PrintExpr &P) {
469     OS << "## " << (P.Ex.Neg ? "- " : "+ ");
470     if (P.Ex.Rs.Reg != 0)
471       OS << printReg(P.Ex.Rs.Reg, &P.HRI, P.Ex.Rs.Sub);
472     else
473       OS << "__";
474     OS << " << " << P.Ex.S;
475     return OS;
476   }
477 
478   struct PrintInit {
479     PrintInit(const HCE::ExtenderInit &EI, const HexagonRegisterInfo &I)
480       : ExtI(EI), HRI(I) {}
481     const HCE::ExtenderInit &ExtI;
482     const HexagonRegisterInfo &HRI;
483   };
484 
485   LLVM_ATTRIBUTE_UNUSED
486   raw_ostream &operator<< (raw_ostream &OS, const PrintInit &P) {
487     OS << '[' << P.ExtI.first << ", "
488        << PrintExpr(P.ExtI.second, P.HRI) << ']';
489     return OS;
490   }
491 
492   LLVM_ATTRIBUTE_UNUSED
493   raw_ostream &operator<< (raw_ostream &OS, const HCE::ExtDesc &ED) {
494     assert(ED.OpNum != -1u);
495     const MachineBasicBlock &MBB = *ED.getOp().getParent()->getParent();
496     const MachineFunction &MF = *MBB.getParent();
497     const auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
498     OS << "bb#" << MBB.getNumber() << ": ";
499     if (ED.Rd.Reg != 0)
500       OS << printReg(ED.Rd.Reg, &HRI, ED.Rd.Sub);
501     else
502       OS << "__";
503     OS << " = " << PrintExpr(ED.Expr, HRI);
504     if (ED.IsDef)
505       OS << ", def";
506     return OS;
507   }
508 
509   LLVM_ATTRIBUTE_UNUSED
510   raw_ostream &operator<< (raw_ostream &OS, const HCE::ExtRoot &ER) {
511     switch (ER.Kind) {
512       case MachineOperand::MO_Immediate:
513         OS << "imm:" << ER.V.ImmVal;
514         break;
515       case MachineOperand::MO_FPImmediate:
516         OS << "fpi:" << *ER.V.CFP;
517         break;
518       case MachineOperand::MO_ExternalSymbol:
519         OS << "sym:" << *ER.V.SymbolName;
520         break;
521       case MachineOperand::MO_GlobalAddress:
522         OS << "gad:" << ER.V.GV->getName();
523         break;
524       case MachineOperand::MO_BlockAddress:
525         OS << "blk:" << *ER.V.BA;
526         break;
527       case MachineOperand::MO_TargetIndex:
528         OS << "tgi:" << ER.V.ImmVal;
529         break;
530       case MachineOperand::MO_ConstantPoolIndex:
531         OS << "cpi:" << ER.V.ImmVal;
532         break;
533       case MachineOperand::MO_JumpTableIndex:
534         OS << "jti:" << ER.V.ImmVal;
535         break;
536       default:
537         OS << "???:" << ER.V.ImmVal;
538         break;
539     }
540     return OS;
541   }
542 
543   LLVM_ATTRIBUTE_UNUSED
544   raw_ostream &operator<< (raw_ostream &OS, const HCE::ExtValue &EV) {
545     OS << HCE::ExtRoot(EV) << "  off:" << EV.Offset;
546     return OS;
547   }
548 
549   struct PrintIMap {
550     PrintIMap(const HCE::AssignmentMap &M, const HexagonRegisterInfo &I)
551       : IMap(M), HRI(I) {}
552     const HCE::AssignmentMap &IMap;
553     const HexagonRegisterInfo &HRI;
554   };
555 
556   LLVM_ATTRIBUTE_UNUSED
557   raw_ostream &operator<< (raw_ostream &OS, const PrintIMap &P) {
558     OS << "{\n";
559     for (const std::pair<const HCE::ExtenderInit, HCE::IndexList> &Q : P.IMap) {
560       OS << "  " << PrintInit(Q.first, P.HRI) << " -> {";
561       for (unsigned I : Q.second)
562         OS << ' ' << I;
563       OS << " }\n";
564     }
565     OS << "}\n";
566     return OS;
567   }
568 }
569 
570 INITIALIZE_PASS_BEGIN(HexagonConstExtenders, "hexagon-cext-opt",
571       "Hexagon constant-extender optimization", false, false)
572 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
573 INITIALIZE_PASS_END(HexagonConstExtenders, "hexagon-cext-opt",
574       "Hexagon constant-extender optimization", false, false)
575 
576 static unsigned ReplaceCounter = 0;
577 
578 char HCE::ID = 0;
579 
580 #ifndef NDEBUG
581 LLVM_DUMP_METHOD void RangeTree::dump() const {
582   dbgs() << "Root: " << Root << '\n';
583   if (Root)
584     dump(Root);
585 }
586 
587 LLVM_DUMP_METHOD void RangeTree::dump(const Node *N) const {
588   dbgs() << "Node: " << N << '\n';
589   dbgs() << "  Height: " << N->Height << '\n';
590   dbgs() << "  Count: " << N->Count << '\n';
591   dbgs() << "  MaxEnd: " << N->MaxEnd << '\n';
592   dbgs() << "  Range: " << N->Range << '\n';
593   dbgs() << "  Left: " << N->Left << '\n';
594   dbgs() << "  Right: " << N->Right << "\n\n";
595 
596   if (N->Left)
597     dump(N->Left);
598   if (N->Right)
599     dump(N->Right);
600 }
601 #endif
602 
603 void RangeTree::order(Node *N, SmallVectorImpl<Node*> &Seq) const {
604   if (N == nullptr)
605     return;
606   order(N->Left, Seq);
607   Seq.push_back(N);
608   order(N->Right, Seq);
609 }
610 
611 void RangeTree::nodesWith(Node *N, int32_t P, bool CheckA,
612       SmallVectorImpl<Node*> &Seq) const {
613   if (N == nullptr || N->MaxEnd < P)
614     return;
615   nodesWith(N->Left, P, CheckA, Seq);
616   if (N->Range.Min <= P) {
617     if ((CheckA && N->Range.contains(P)) || (!CheckA && P <= N->Range.Max))
618       Seq.push_back(N);
619     nodesWith(N->Right, P, CheckA, Seq);
620   }
621 }
622 
623 RangeTree::Node *RangeTree::add(Node *N, const OffsetRange &R) {
624   if (N == nullptr)
625     return new Node(R);
626 
627   if (N->Range == R) {
628     N->Count++;
629     return N;
630   }
631 
632   if (R < N->Range)
633     N->Left = add(N->Left, R);
634   else
635     N->Right = add(N->Right, R);
636   return rebalance(update(N));
637 }
638 
639 RangeTree::Node *RangeTree::remove(Node *N, const Node *D) {
640   assert(N != nullptr);
641 
642   if (N != D) {
643     assert(N->Range != D->Range && "N and D should not be equal");
644     if (D->Range < N->Range)
645       N->Left = remove(N->Left, D);
646     else
647       N->Right = remove(N->Right, D);
648     return rebalance(update(N));
649   }
650 
651   // We got to the node we need to remove. If any of its children are
652   // missing, simply replace it with the other child.
653   if (N->Left == nullptr || N->Right == nullptr)
654     return (N->Left == nullptr) ? N->Right : N->Left;
655 
656   // Find the rightmost child of N->Left, remove it and plug it in place
657   // of N.
658   Node *M = N->Left;
659   while (M->Right)
660     M = M->Right;
661   M->Left = remove(N->Left, M);
662   M->Right = N->Right;
663   return rebalance(update(M));
664 }
665 
666 RangeTree::Node *RangeTree::rotateLeft(Node *Lower, Node *Higher) {
667   assert(Higher->Right == Lower);
668   // The Lower node is on the right from Higher. Make sure that Lower's
669   // balance is greater to the right. Otherwise the rotation will create
670   // an unbalanced tree again.
671   if (height(Lower->Left) > height(Lower->Right))
672     Lower = rotateRight(Lower->Left, Lower);
673   assert(height(Lower->Left) <= height(Lower->Right));
674   Higher->Right = Lower->Left;
675   update(Higher);
676   Lower->Left = Higher;
677   update(Lower);
678   return Lower;
679 }
680 
681 RangeTree::Node *RangeTree::rotateRight(Node *Lower, Node *Higher) {
682   assert(Higher->Left == Lower);
683   // The Lower node is on the left from Higher. Make sure that Lower's
684   // balance is greater to the left. Otherwise the rotation will create
685   // an unbalanced tree again.
686   if (height(Lower->Left) < height(Lower->Right))
687     Lower = rotateLeft(Lower->Right, Lower);
688   assert(height(Lower->Left) >= height(Lower->Right));
689   Higher->Left = Lower->Right;
690   update(Higher);
691   Lower->Right = Higher;
692   update(Lower);
693   return Lower;
694 }
695 
696 
697 HCE::ExtRoot::ExtRoot(const MachineOperand &Op) {
698   // Always store ImmVal, since it's the field used for comparisons.
699   V.ImmVal = 0;
700   if (Op.isImm())
701     ; // Keep 0. Do not use Op.getImm() for value here (treat 0 as the root).
702   else if (Op.isFPImm())
703     V.CFP = Op.getFPImm();
704   else if (Op.isSymbol())
705     V.SymbolName = Op.getSymbolName();
706   else if (Op.isGlobal())
707     V.GV = Op.getGlobal();
708   else if (Op.isBlockAddress())
709     V.BA = Op.getBlockAddress();
710   else if (Op.isCPI() || Op.isTargetIndex() || Op.isJTI())
711     V.ImmVal = Op.getIndex();
712   else
713     llvm_unreachable("Unexpected operand type");
714 
715   Kind = Op.getType();
716   TF = Op.getTargetFlags();
717 }
718 
719 bool HCE::ExtRoot::operator< (const HCE::ExtRoot &ER) const {
720   if (Kind != ER.Kind)
721     return Kind < ER.Kind;
722   switch (Kind) {
723     case MachineOperand::MO_Immediate:
724     case MachineOperand::MO_TargetIndex:
725     case MachineOperand::MO_ConstantPoolIndex:
726     case MachineOperand::MO_JumpTableIndex:
727       return V.ImmVal < ER.V.ImmVal;
728     case MachineOperand::MO_FPImmediate: {
729       const APFloat &ThisF = V.CFP->getValueAPF();
730       const APFloat &OtherF = ER.V.CFP->getValueAPF();
731       return ThisF.bitcastToAPInt().ult(OtherF.bitcastToAPInt());
732     }
733     case MachineOperand::MO_ExternalSymbol:
734       return StringRef(V.SymbolName) < StringRef(ER.V.SymbolName);
735     case MachineOperand::MO_GlobalAddress:
736       // Do not use GUIDs, since they depend on the source path. Moving the
737       // source file to a different directory could cause different GUID
738       // values for a pair of given symbols. These symbols could then compare
739       // "less" in one directory, but "greater" in another.
740       assert(!V.GV->getName().empty() && !ER.V.GV->getName().empty());
741       return V.GV->getName() < ER.V.GV->getName();
742     case MachineOperand::MO_BlockAddress: {
743       const BasicBlock *ThisB = V.BA->getBasicBlock();
744       const BasicBlock *OtherB = ER.V.BA->getBasicBlock();
745       assert(ThisB->getParent() == OtherB->getParent());
746       const Function &F = *ThisB->getParent();
747       return std::distance(F.begin(), ThisB->getIterator()) <
748              std::distance(F.begin(), OtherB->getIterator());
749     }
750   }
751   return V.ImmVal < ER.V.ImmVal;
752 }
753 
754 HCE::ExtValue::ExtValue(const MachineOperand &Op) : ExtRoot(Op) {
755   if (Op.isImm())
756     Offset = Op.getImm();
757   else if (Op.isFPImm() || Op.isJTI())
758     Offset = 0;
759   else if (Op.isSymbol() || Op.isGlobal() || Op.isBlockAddress() ||
760            Op.isCPI() || Op.isTargetIndex())
761     Offset = Op.getOffset();
762   else
763     llvm_unreachable("Unexpected operand type");
764 }
765 
766 bool HCE::ExtValue::operator< (const HCE::ExtValue &EV) const {
767   const ExtRoot &ER = *this;
768   if (!(ER == ExtRoot(EV)))
769     return ER < EV;
770   return Offset < EV.Offset;
771 }
772 
773 HCE::ExtValue::operator MachineOperand() const {
774   switch (Kind) {
775     case MachineOperand::MO_Immediate:
776       return MachineOperand::CreateImm(V.ImmVal + Offset);
777     case MachineOperand::MO_FPImmediate:
778       assert(Offset == 0);
779       return MachineOperand::CreateFPImm(V.CFP);
780     case MachineOperand::MO_ExternalSymbol:
781       assert(Offset == 0);
782       return MachineOperand::CreateES(V.SymbolName, TF);
783     case MachineOperand::MO_GlobalAddress:
784       return MachineOperand::CreateGA(V.GV, Offset, TF);
785     case MachineOperand::MO_BlockAddress:
786       return MachineOperand::CreateBA(V.BA, Offset, TF);
787     case MachineOperand::MO_TargetIndex:
788       return MachineOperand::CreateTargetIndex(V.ImmVal, Offset, TF);
789     case MachineOperand::MO_ConstantPoolIndex:
790       return MachineOperand::CreateCPI(V.ImmVal, Offset, TF);
791     case MachineOperand::MO_JumpTableIndex:
792       assert(Offset == 0);
793       return MachineOperand::CreateJTI(V.ImmVal, TF);
794     default:
795       llvm_unreachable("Unhandled kind");
796  }
797 }
798 
799 bool HCE::isStoreImmediate(unsigned Opc) const {
800   switch (Opc) {
801     case Hexagon::S4_storeirbt_io:
802     case Hexagon::S4_storeirbf_io:
803     case Hexagon::S4_storeirht_io:
804     case Hexagon::S4_storeirhf_io:
805     case Hexagon::S4_storeirit_io:
806     case Hexagon::S4_storeirif_io:
807     case Hexagon::S4_storeirb_io:
808     case Hexagon::S4_storeirh_io:
809     case Hexagon::S4_storeiri_io:
810       return true;
811     default:
812       break;
813   }
814   return false;
815 }
816 
817 bool HCE::isRegOffOpcode(unsigned Opc) const {
818   switch (Opc) {
819     case Hexagon::L2_loadrub_io:
820     case Hexagon::L2_loadrb_io:
821     case Hexagon::L2_loadruh_io:
822     case Hexagon::L2_loadrh_io:
823     case Hexagon::L2_loadri_io:
824     case Hexagon::L2_loadrd_io:
825     case Hexagon::L2_loadbzw2_io:
826     case Hexagon::L2_loadbzw4_io:
827     case Hexagon::L2_loadbsw2_io:
828     case Hexagon::L2_loadbsw4_io:
829     case Hexagon::L2_loadalignh_io:
830     case Hexagon::L2_loadalignb_io:
831     case Hexagon::L2_ploadrubt_io:
832     case Hexagon::L2_ploadrubf_io:
833     case Hexagon::L2_ploadrbt_io:
834     case Hexagon::L2_ploadrbf_io:
835     case Hexagon::L2_ploadruht_io:
836     case Hexagon::L2_ploadruhf_io:
837     case Hexagon::L2_ploadrht_io:
838     case Hexagon::L2_ploadrhf_io:
839     case Hexagon::L2_ploadrit_io:
840     case Hexagon::L2_ploadrif_io:
841     case Hexagon::L2_ploadrdt_io:
842     case Hexagon::L2_ploadrdf_io:
843     case Hexagon::S2_storerb_io:
844     case Hexagon::S2_storerh_io:
845     case Hexagon::S2_storerf_io:
846     case Hexagon::S2_storeri_io:
847     case Hexagon::S2_storerd_io:
848     case Hexagon::S2_pstorerbt_io:
849     case Hexagon::S2_pstorerbf_io:
850     case Hexagon::S2_pstorerht_io:
851     case Hexagon::S2_pstorerhf_io:
852     case Hexagon::S2_pstorerft_io:
853     case Hexagon::S2_pstorerff_io:
854     case Hexagon::S2_pstorerit_io:
855     case Hexagon::S2_pstorerif_io:
856     case Hexagon::S2_pstorerdt_io:
857     case Hexagon::S2_pstorerdf_io:
858     case Hexagon::A2_addi:
859       return true;
860     default:
861       break;
862   }
863   return false;
864 }
865 
866 unsigned HCE::getRegOffOpcode(unsigned ExtOpc) const {
867   // If there exists an instruction that takes a register and offset,
868   // that corresponds to the ExtOpc, return it, otherwise return 0.
869   using namespace Hexagon;
870   switch (ExtOpc) {
871     case A2_tfrsi:    return A2_addi;
872     default:
873       break;
874   }
875   const MCInstrDesc &D = HII->get(ExtOpc);
876   if (D.mayLoad() || D.mayStore()) {
877     uint64_t F = D.TSFlags;
878     unsigned AM = (F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask;
879     switch (AM) {
880       case HexagonII::Absolute:
881       case HexagonII::AbsoluteSet:
882       case HexagonII::BaseLongOffset:
883         switch (ExtOpc) {
884           case PS_loadrubabs:
885           case L4_loadrub_ap:
886           case L4_loadrub_ur:     return L2_loadrub_io;
887           case PS_loadrbabs:
888           case L4_loadrb_ap:
889           case L4_loadrb_ur:      return L2_loadrb_io;
890           case PS_loadruhabs:
891           case L4_loadruh_ap:
892           case L4_loadruh_ur:     return L2_loadruh_io;
893           case PS_loadrhabs:
894           case L4_loadrh_ap:
895           case L4_loadrh_ur:      return L2_loadrh_io;
896           case PS_loadriabs:
897           case L4_loadri_ap:
898           case L4_loadri_ur:      return L2_loadri_io;
899           case PS_loadrdabs:
900           case L4_loadrd_ap:
901           case L4_loadrd_ur:      return L2_loadrd_io;
902           case L4_loadbzw2_ap:
903           case L4_loadbzw2_ur:    return L2_loadbzw2_io;
904           case L4_loadbzw4_ap:
905           case L4_loadbzw4_ur:    return L2_loadbzw4_io;
906           case L4_loadbsw2_ap:
907           case L4_loadbsw2_ur:    return L2_loadbsw2_io;
908           case L4_loadbsw4_ap:
909           case L4_loadbsw4_ur:    return L2_loadbsw4_io;
910           case L4_loadalignh_ap:
911           case L4_loadalignh_ur:  return L2_loadalignh_io;
912           case L4_loadalignb_ap:
913           case L4_loadalignb_ur:  return L2_loadalignb_io;
914           case L4_ploadrubt_abs:  return L2_ploadrubt_io;
915           case L4_ploadrubf_abs:  return L2_ploadrubf_io;
916           case L4_ploadrbt_abs:   return L2_ploadrbt_io;
917           case L4_ploadrbf_abs:   return L2_ploadrbf_io;
918           case L4_ploadruht_abs:  return L2_ploadruht_io;
919           case L4_ploadruhf_abs:  return L2_ploadruhf_io;
920           case L4_ploadrht_abs:   return L2_ploadrht_io;
921           case L4_ploadrhf_abs:   return L2_ploadrhf_io;
922           case L4_ploadrit_abs:   return L2_ploadrit_io;
923           case L4_ploadrif_abs:   return L2_ploadrif_io;
924           case L4_ploadrdt_abs:   return L2_ploadrdt_io;
925           case L4_ploadrdf_abs:   return L2_ploadrdf_io;
926           case PS_storerbabs:
927           case S4_storerb_ap:
928           case S4_storerb_ur:     return S2_storerb_io;
929           case PS_storerhabs:
930           case S4_storerh_ap:
931           case S4_storerh_ur:     return S2_storerh_io;
932           case PS_storerfabs:
933           case S4_storerf_ap:
934           case S4_storerf_ur:     return S2_storerf_io;
935           case PS_storeriabs:
936           case S4_storeri_ap:
937           case S4_storeri_ur:     return S2_storeri_io;
938           case PS_storerdabs:
939           case S4_storerd_ap:
940           case S4_storerd_ur:     return S2_storerd_io;
941           case S4_pstorerbt_abs:  return S2_pstorerbt_io;
942           case S4_pstorerbf_abs:  return S2_pstorerbf_io;
943           case S4_pstorerht_abs:  return S2_pstorerht_io;
944           case S4_pstorerhf_abs:  return S2_pstorerhf_io;
945           case S4_pstorerft_abs:  return S2_pstorerft_io;
946           case S4_pstorerff_abs:  return S2_pstorerff_io;
947           case S4_pstorerit_abs:  return S2_pstorerit_io;
948           case S4_pstorerif_abs:  return S2_pstorerif_io;
949           case S4_pstorerdt_abs:  return S2_pstorerdt_io;
950           case S4_pstorerdf_abs:  return S2_pstorerdf_io;
951           default:
952             break;
953         }
954         break;
955       case HexagonII::BaseImmOffset:
956         if (!isStoreImmediate(ExtOpc))
957           return ExtOpc;
958         break;
959       default:
960         break;
961     }
962   }
963   return 0;
964 }
965 
966 unsigned HCE::getDirectRegReplacement(unsigned ExtOpc) const {
967   switch (ExtOpc) {
968     case Hexagon::A2_addi:          return Hexagon::A2_add;
969     case Hexagon::A2_andir:         return Hexagon::A2_and;
970     case Hexagon::A2_combineii:     return Hexagon::A4_combineri;
971     case Hexagon::A2_orir:          return Hexagon::A2_or;
972     case Hexagon::A2_paddif:        return Hexagon::A2_paddf;
973     case Hexagon::A2_paddit:        return Hexagon::A2_paddt;
974     case Hexagon::A2_subri:         return Hexagon::A2_sub;
975     case Hexagon::A2_tfrsi:         return TargetOpcode::COPY;
976     case Hexagon::A4_cmpbeqi:       return Hexagon::A4_cmpbeq;
977     case Hexagon::A4_cmpbgti:       return Hexagon::A4_cmpbgt;
978     case Hexagon::A4_cmpbgtui:      return Hexagon::A4_cmpbgtu;
979     case Hexagon::A4_cmpheqi:       return Hexagon::A4_cmpheq;
980     case Hexagon::A4_cmphgti:       return Hexagon::A4_cmphgt;
981     case Hexagon::A4_cmphgtui:      return Hexagon::A4_cmphgtu;
982     case Hexagon::A4_combineii:     return Hexagon::A4_combineir;
983     case Hexagon::A4_combineir:     return TargetOpcode::REG_SEQUENCE;
984     case Hexagon::A4_combineri:     return TargetOpcode::REG_SEQUENCE;
985     case Hexagon::A4_rcmpeqi:       return Hexagon::A4_rcmpeq;
986     case Hexagon::A4_rcmpneqi:      return Hexagon::A4_rcmpneq;
987     case Hexagon::C2_cmoveif:       return Hexagon::A2_tfrpf;
988     case Hexagon::C2_cmoveit:       return Hexagon::A2_tfrpt;
989     case Hexagon::C2_cmpeqi:        return Hexagon::C2_cmpeq;
990     case Hexagon::C2_cmpgti:        return Hexagon::C2_cmpgt;
991     case Hexagon::C2_cmpgtui:       return Hexagon::C2_cmpgtu;
992     case Hexagon::C2_muxii:         return Hexagon::C2_muxir;
993     case Hexagon::C2_muxir:         return Hexagon::C2_mux;
994     case Hexagon::C2_muxri:         return Hexagon::C2_mux;
995     case Hexagon::C4_cmpltei:       return Hexagon::C4_cmplte;
996     case Hexagon::C4_cmplteui:      return Hexagon::C4_cmplteu;
997     case Hexagon::C4_cmpneqi:       return Hexagon::C4_cmpneq;
998     case Hexagon::M2_accii:         return Hexagon::M2_acci;        // T -> T
999     /* No M2_macsin */
1000     case Hexagon::M2_macsip:        return Hexagon::M2_maci;        // T -> T
1001     case Hexagon::M2_mpysin:        return Hexagon::M2_mpyi;
1002     case Hexagon::M2_mpysip:        return Hexagon::M2_mpyi;
1003     case Hexagon::M2_mpysmi:        return Hexagon::M2_mpyi;
1004     case Hexagon::M2_naccii:        return Hexagon::M2_nacci;       // T -> T
1005     case Hexagon::M4_mpyri_addi:    return Hexagon::M4_mpyri_addr;
1006     case Hexagon::M4_mpyri_addr:    return Hexagon::M4_mpyrr_addr;  // _ -> T
1007     case Hexagon::M4_mpyrr_addi:    return Hexagon::M4_mpyrr_addr;  // _ -> T
1008     case Hexagon::S4_addaddi:       return Hexagon::M2_acci;        // _ -> T
1009     case Hexagon::S4_addi_asl_ri:   return Hexagon::S2_asl_i_r_acc; // T -> T
1010     case Hexagon::S4_addi_lsr_ri:   return Hexagon::S2_lsr_i_r_acc; // T -> T
1011     case Hexagon::S4_andi_asl_ri:   return Hexagon::S2_asl_i_r_and; // T -> T
1012     case Hexagon::S4_andi_lsr_ri:   return Hexagon::S2_lsr_i_r_and; // T -> T
1013     case Hexagon::S4_ori_asl_ri:    return Hexagon::S2_asl_i_r_or;  // T -> T
1014     case Hexagon::S4_ori_lsr_ri:    return Hexagon::S2_lsr_i_r_or;  // T -> T
1015     case Hexagon::S4_subaddi:       return Hexagon::M2_subacc;      // _ -> T
1016     case Hexagon::S4_subi_asl_ri:   return Hexagon::S2_asl_i_r_nac; // T -> T
1017     case Hexagon::S4_subi_lsr_ri:   return Hexagon::S2_lsr_i_r_nac; // T -> T
1018 
1019     // Store-immediates:
1020     case Hexagon::S4_storeirbf_io:  return Hexagon::S2_pstorerbf_io;
1021     case Hexagon::S4_storeirb_io:   return Hexagon::S2_storerb_io;
1022     case Hexagon::S4_storeirbt_io:  return Hexagon::S2_pstorerbt_io;
1023     case Hexagon::S4_storeirhf_io:  return Hexagon::S2_pstorerhf_io;
1024     case Hexagon::S4_storeirh_io:   return Hexagon::S2_storerh_io;
1025     case Hexagon::S4_storeirht_io:  return Hexagon::S2_pstorerht_io;
1026     case Hexagon::S4_storeirif_io:  return Hexagon::S2_pstorerif_io;
1027     case Hexagon::S4_storeiri_io:   return Hexagon::S2_storeri_io;
1028     case Hexagon::S4_storeirit_io:  return Hexagon::S2_pstorerit_io;
1029 
1030     default:
1031       break;
1032   }
1033   return 0;
1034 }
1035 
1036 // Return the allowable deviation from the current value of Rb (i.e. the
1037 // range of values that can be added to the current value) which the
1038 // instruction MI can accommodate.
1039 // The instruction MI is a user of register Rb, which is defined via an
1040 // extender. It may be possible for MI to be tweaked to work for a register
1041 // defined with a slightly different value. For example
1042 //   ... = L2_loadrub_io Rb, 1
1043 // can be modifed to be
1044 //   ... = L2_loadrub_io Rb', 0
1045 // if Rb' = Rb+1.
1046 // The range for Rb would be [Min+1, Max+1], where [Min, Max] is a range
1047 // for L2_loadrub with offset 0. That means that Rb could be replaced with
1048 // Rc, where Rc-Rb belongs to [Min+1, Max+1].
1049 OffsetRange HCE::getOffsetRange(Register Rb, const MachineInstr &MI) const {
1050   unsigned Opc = MI.getOpcode();
1051   // Instructions that are constant-extended may be replaced with something
1052   // else that no longer offers the same range as the original.
1053   if (!isRegOffOpcode(Opc) || HII->isConstExtended(MI))
1054     return OffsetRange::zero();
1055 
1056   if (Opc == Hexagon::A2_addi) {
1057     const MachineOperand &Op1 = MI.getOperand(1), &Op2 = MI.getOperand(2);
1058     if (Rb != Register(Op1) || !Op2.isImm())
1059       return OffsetRange::zero();
1060     OffsetRange R = { -(1<<15)+1, (1<<15)-1, 1 };
1061     return R.shift(Op2.getImm());
1062   }
1063 
1064   // HII::getBaseAndOffsetPosition returns the increment position as "offset".
1065   if (HII->isPostIncrement(MI))
1066     return OffsetRange::zero();
1067 
1068   const MCInstrDesc &D = HII->get(Opc);
1069   assert(D.mayLoad() || D.mayStore());
1070 
1071   unsigned BaseP, OffP;
1072   if (!HII->getBaseAndOffsetPosition(MI, BaseP, OffP) ||
1073       Rb != Register(MI.getOperand(BaseP)) ||
1074       !MI.getOperand(OffP).isImm())
1075     return OffsetRange::zero();
1076 
1077   uint64_t F = (D.TSFlags >> HexagonII::MemAccessSizePos) &
1078                   HexagonII::MemAccesSizeMask;
1079   uint8_t A = HexagonII::getMemAccessSizeInBytes(HexagonII::MemAccessSize(F));
1080   unsigned L = Log2_32(A);
1081   unsigned S = 10+L;  // sint11_L
1082   int32_t Min = -alignDown((1<<S)-1, A);
1083 
1084   // The range will be shifted by Off. To prefer non-negative offsets,
1085   // adjust Max accordingly.
1086   int32_t Off = MI.getOperand(OffP).getImm();
1087   int32_t Max = Off >= 0 ? 0 : -Off;
1088 
1089   OffsetRange R = { Min, Max, A };
1090   return R.shift(Off);
1091 }
1092 
1093 // Return the allowable deviation from the current value of the extender ED,
1094 // for which the instruction corresponding to ED can be modified without
1095 // using an extender.
1096 // The instruction uses the extender directly. It will be replaced with
1097 // another instruction, say MJ, where the extender will be replaced with a
1098 // register. MJ can allow some variability with respect to the value of
1099 // that register, as is the case with indexed memory instructions.
1100 OffsetRange HCE::getOffsetRange(const ExtDesc &ED) const {
1101   // The only way that there can be a non-zero range available is if
1102   // the instruction using ED will be converted to an indexed memory
1103   // instruction.
1104   unsigned IdxOpc = getRegOffOpcode(ED.UseMI->getOpcode());
1105   switch (IdxOpc) {
1106     case 0:
1107       return OffsetRange::zero();
1108     case Hexagon::A2_addi:    // s16
1109       return { -32767, 32767, 1 };
1110     case Hexagon::A2_subri:   // s10
1111       return { -511, 511, 1 };
1112   }
1113 
1114   if (!ED.UseMI->mayLoad() && !ED.UseMI->mayStore())
1115     return OffsetRange::zero();
1116   const MCInstrDesc &D = HII->get(IdxOpc);
1117   uint64_t F = (D.TSFlags >> HexagonII::MemAccessSizePos) &
1118                   HexagonII::MemAccesSizeMask;
1119   uint8_t A = HexagonII::getMemAccessSizeInBytes(HexagonII::MemAccessSize(F));
1120   unsigned L = Log2_32(A);
1121   unsigned S = 10+L;  // sint11_L
1122   int32_t Min = -alignDown((1<<S)-1, A);
1123   int32_t Max = 0;  // Force non-negative offsets.
1124   return { Min, Max, A };
1125 }
1126 
1127 // Get the allowable deviation from the current value of Rd by checking
1128 // all uses of Rd.
1129 OffsetRange HCE::getOffsetRange(Register Rd) const {
1130   OffsetRange Range;
1131   for (const MachineOperand &Op : MRI->use_operands(Rd.Reg)) {
1132     // Make sure that the register being used by this operand is identical
1133     // to the register that was defined: using a different subregister
1134     // precludes any non-trivial range.
1135     if (Rd != Register(Op))
1136       return OffsetRange::zero();
1137     Range.intersect(getOffsetRange(Rd, *Op.getParent()));
1138   }
1139   return Range;
1140 }
1141 
1142 void HCE::recordExtender(MachineInstr &MI, unsigned OpNum) {
1143   unsigned Opc = MI.getOpcode();
1144   ExtDesc ED;
1145   ED.OpNum = OpNum;
1146 
1147   bool IsLoad = MI.mayLoad();
1148   bool IsStore = MI.mayStore();
1149 
1150   // Fixed stack slots have negative indexes, and they cannot be used
1151   // with TRI::stackSlot2Index and TRI::index2StackSlot. This is somewhat
1152   // unfortunate, but should not be a frequent thing.
1153   for (MachineOperand &Op : MI.operands())
1154     if (Op.isFI() && Op.getIndex() < 0)
1155       return;
1156 
1157   if (IsLoad || IsStore) {
1158     unsigned AM = HII->getAddrMode(MI);
1159     switch (AM) {
1160       // (Re: ##Off + Rb<<S) = Rd: ##Val
1161       case HexagonII::Absolute:       // (__: ## + __<<_)
1162         break;
1163       case HexagonII::AbsoluteSet:    // (Rd: ## + __<<_)
1164         ED.Rd = MI.getOperand(OpNum-1);
1165         ED.IsDef = true;
1166         break;
1167       case HexagonII::BaseImmOffset:  // (__: ## + Rs<<0)
1168         // Store-immediates are treated as non-memory operations, since
1169         // it's the value being stored that is extended (as opposed to
1170         // a part of the address).
1171         if (!isStoreImmediate(Opc))
1172           ED.Expr.Rs = MI.getOperand(OpNum-1);
1173         break;
1174       case HexagonII::BaseLongOffset: // (__: ## + Rs<<S)
1175         ED.Expr.Rs = MI.getOperand(OpNum-2);
1176         ED.Expr.S = MI.getOperand(OpNum-1).getImm();
1177         break;
1178       default:
1179         llvm_unreachable("Unhandled memory instruction");
1180     }
1181   } else {
1182     switch (Opc) {
1183       case Hexagon::A2_tfrsi:         // (Rd: ## + __<<_)
1184         ED.Rd = MI.getOperand(0);
1185         ED.IsDef = true;
1186         break;
1187       case Hexagon::A2_combineii:     // (Rd: ## + __<<_)
1188       case Hexagon::A4_combineir:
1189         ED.Rd = { MI.getOperand(0).getReg(), Hexagon::isub_hi };
1190         ED.IsDef = true;
1191         break;
1192       case Hexagon::A4_combineri:     // (Rd: ## + __<<_)
1193         ED.Rd = { MI.getOperand(0).getReg(), Hexagon::isub_lo };
1194         ED.IsDef = true;
1195         break;
1196       case Hexagon::A2_addi:          // (Rd: ## + Rs<<0)
1197         ED.Rd = MI.getOperand(0);
1198         ED.Expr.Rs = MI.getOperand(OpNum-1);
1199         break;
1200       case Hexagon::M2_accii:         // (__: ## + Rs<<0)
1201       case Hexagon::M2_naccii:
1202       case Hexagon::S4_addaddi:
1203         ED.Expr.Rs = MI.getOperand(OpNum-1);
1204         break;
1205       case Hexagon::A2_subri:         // (Rd: ## - Rs<<0)
1206         ED.Rd = MI.getOperand(0);
1207         ED.Expr.Rs = MI.getOperand(OpNum+1);
1208         ED.Expr.Neg = true;
1209         break;
1210       case Hexagon::S4_subaddi:       // (__: ## - Rs<<0)
1211         ED.Expr.Rs = MI.getOperand(OpNum+1);
1212         ED.Expr.Neg = true;
1213         break;
1214       default:                        // (__: ## + __<<_)
1215         break;
1216     }
1217   }
1218 
1219   ED.UseMI = &MI;
1220 
1221   // Ignore unnamed globals.
1222   ExtRoot ER(ED.getOp());
1223   if (ER.Kind == MachineOperand::MO_GlobalAddress)
1224     if (ER.V.GV->getName().empty())
1225       return;
1226   Extenders.push_back(ED);
1227 }
1228 
1229 void HCE::collectInstr(MachineInstr &MI) {
1230   if (!HII->isConstExtended(MI))
1231     return;
1232 
1233   // Skip some non-convertible instructions.
1234   unsigned Opc = MI.getOpcode();
1235   switch (Opc) {
1236     case Hexagon::M2_macsin:  // There is no Rx -= mpyi(Rs,Rt).
1237     case Hexagon::C4_addipc:
1238     case Hexagon::S4_or_andi:
1239     case Hexagon::S4_or_andix:
1240     case Hexagon::S4_or_ori:
1241       return;
1242   }
1243   recordExtender(MI, HII->getCExtOpNum(MI));
1244 }
1245 
1246 void HCE::collect(MachineFunction &MF) {
1247   Extenders.clear();
1248   for (MachineBasicBlock &MBB : MF) {
1249     // Skip unreachable blocks.
1250     if (MBB.getNumber() == -1)
1251       continue;
1252     for (MachineInstr &MI : MBB)
1253       collectInstr(MI);
1254   }
1255 }
1256 
1257 void HCE::assignInits(const ExtRoot &ER, unsigned Begin, unsigned End,
1258       AssignmentMap &IMap) {
1259   // Basic correctness: make sure that all extenders in the range [Begin..End)
1260   // share the same root ER.
1261   for (unsigned I = Begin; I != End; ++I)
1262     assert(ER == ExtRoot(Extenders[I].getOp()));
1263 
1264   // Construct the list of ranges, such that for each P in Ranges[I],
1265   // a register Reg = ER+P can be used in place of Extender[I]. If the
1266   // instruction allows, uses in the form of Reg+Off are considered
1267   // (here, Off = required_value - P).
1268   std::vector<OffsetRange> Ranges(End-Begin);
1269 
1270   // For each extender that is a def, visit all uses of the defined register,
1271   // and produce an offset range that works for all uses. The def doesn't
1272   // have to be checked, because it can become dead if all uses can be updated
1273   // to use a different reg/offset.
1274   for (unsigned I = Begin; I != End; ++I) {
1275     const ExtDesc &ED = Extenders[I];
1276     if (!ED.IsDef)
1277       continue;
1278     ExtValue EV(ED);
1279     LLVM_DEBUG(dbgs() << " =" << I << ". " << EV << "  " << ED << '\n');
1280     assert(ED.Rd.Reg != 0);
1281     Ranges[I-Begin] = getOffsetRange(ED.Rd).shift(EV.Offset);
1282     // A2_tfrsi is a special case: it will be replaced with A2_addi, which
1283     // has a 16-bit signed offset. This means that A2_tfrsi not only has a
1284     // range coming from its uses, but also from the fact that its replacement
1285     // has a range as well.
1286     if (ED.UseMI->getOpcode() == Hexagon::A2_tfrsi) {
1287       int32_t D = alignDown(32767, Ranges[I-Begin].Align); // XXX hardcoded
1288       Ranges[I-Begin].extendBy(-D).extendBy(D);
1289     }
1290   }
1291 
1292   // Visit all non-def extenders. For each one, determine the offset range
1293   // available for it.
1294   for (unsigned I = Begin; I != End; ++I) {
1295     const ExtDesc &ED = Extenders[I];
1296     if (ED.IsDef)
1297       continue;
1298     ExtValue EV(ED);
1299     LLVM_DEBUG(dbgs() << "  " << I << ". " << EV << "  " << ED << '\n');
1300     OffsetRange Dev = getOffsetRange(ED);
1301     Ranges[I-Begin].intersect(Dev.shift(EV.Offset));
1302   }
1303 
1304   // Here for each I there is a corresponding Range[I]. Construct the
1305   // inverse map, that to each range will assign the set of indexes in
1306   // [Begin..End) that this range corresponds to.
1307   std::map<OffsetRange, IndexList> RangeMap;
1308   for (unsigned I = Begin; I != End; ++I)
1309     RangeMap[Ranges[I-Begin]].insert(I);
1310 
1311   LLVM_DEBUG({
1312     dbgs() << "Ranges\n";
1313     for (unsigned I = Begin; I != End; ++I)
1314       dbgs() << "  " << I << ". " << Ranges[I-Begin] << '\n';
1315     dbgs() << "RangeMap\n";
1316     for (auto &P : RangeMap) {
1317       dbgs() << "  " << P.first << " ->";
1318       for (unsigned I : P.second)
1319         dbgs() << ' ' << I;
1320       dbgs() << '\n';
1321     }
1322   });
1323 
1324   // Select the definition points, and generate the assignment between
1325   // these points and the uses.
1326 
1327   // For each candidate offset, keep a pair CandData consisting of
1328   // the total number of ranges containing that candidate, and the
1329   // vector of corresponding RangeTree nodes.
1330   using CandData = std::pair<unsigned, SmallVector<RangeTree::Node*,8>>;
1331   std::map<int32_t, CandData> CandMap;
1332 
1333   RangeTree Tree;
1334   for (const OffsetRange &R : Ranges)
1335     Tree.add(R);
1336   SmallVector<RangeTree::Node*,8> Nodes;
1337   Tree.order(Nodes);
1338 
1339   auto MaxAlign = [](const SmallVectorImpl<RangeTree::Node*> &Nodes,
1340                      uint8_t Align, uint8_t Offset) {
1341     for (RangeTree::Node *N : Nodes) {
1342       if (N->Range.Align <= Align || N->Range.Offset < Offset)
1343         continue;
1344       if ((N->Range.Offset - Offset) % Align != 0)
1345         continue;
1346       Align = N->Range.Align;
1347       Offset = N->Range.Offset;
1348     }
1349     return std::make_pair(Align, Offset);
1350   };
1351 
1352   // Construct the set of all potential definition points from the endpoints
1353   // of the ranges. If a given endpoint also belongs to a different range,
1354   // but with a higher alignment, also consider the more-highly-aligned
1355   // value of this endpoint.
1356   std::set<int32_t> CandSet;
1357   for (RangeTree::Node *N : Nodes) {
1358     const OffsetRange &R = N->Range;
1359     auto P0 = MaxAlign(Tree.nodesWith(R.Min, false), R.Align, R.Offset);
1360     CandSet.insert(R.Min);
1361     if (R.Align < P0.first)
1362       CandSet.insert(adjustUp(R.Min, P0.first, P0.second));
1363     auto P1 = MaxAlign(Tree.nodesWith(R.Max, false), R.Align, R.Offset);
1364     CandSet.insert(R.Max);
1365     if (R.Align < P1.first)
1366       CandSet.insert(adjustDown(R.Max, P1.first, P1.second));
1367   }
1368 
1369   // Build the assignment map: candidate C -> { list of extender indexes }.
1370   // This has to be done iteratively:
1371   // - pick the candidate that covers the maximum number of extenders,
1372   // - add the candidate to the map,
1373   // - remove the extenders from the pool.
1374   while (true) {
1375     using CMap = std::map<int32_t,unsigned>;
1376     CMap Counts;
1377     for (auto It = CandSet.begin(), Et = CandSet.end(); It != Et; ) {
1378       auto &&V = Tree.nodesWith(*It);
1379       unsigned N = std::accumulate(V.begin(), V.end(), 0u,
1380                     [](unsigned Acc, const RangeTree::Node *N) {
1381                       return Acc + N->Count;
1382                     });
1383       if (N != 0)
1384         Counts.insert({*It, N});
1385       It = (N != 0) ? std::next(It) : CandSet.erase(It);
1386     }
1387     if (Counts.empty())
1388       break;
1389 
1390     // Find the best candidate with respect to the number of extenders covered.
1391     auto BestIt = std::max_element(Counts.begin(), Counts.end(),
1392                     [](const CMap::value_type &A, const CMap::value_type &B) {
1393                       return A.second < B.second ||
1394                              (A.second == B.second && A < B);
1395                     });
1396     int32_t Best = BestIt->first;
1397     ExtValue BestV(ER, Best);
1398     for (RangeTree::Node *N : Tree.nodesWith(Best)) {
1399       for (unsigned I : RangeMap[N->Range])
1400         IMap[{BestV,Extenders[I].Expr}].insert(I);
1401       Tree.erase(N);
1402     }
1403   }
1404 
1405   LLVM_DEBUG(dbgs() << "IMap (before fixup) = " << PrintIMap(IMap, *HRI));
1406 
1407   // There is some ambiguity in what initializer should be used, if the
1408   // descriptor's subexpression is non-trivial: it can be the entire
1409   // subexpression (which is what has been done so far), or it can be
1410   // the extender's value itself, if all corresponding extenders have the
1411   // exact value of the initializer (i.e. require offset of 0).
1412 
1413   // To reduce the number of initializers, merge such special cases.
1414   for (std::pair<const ExtenderInit,IndexList> &P : IMap) {
1415     // Skip trivial initializers.
1416     if (P.first.second.trivial())
1417       continue;
1418     // If the corresponding trivial initializer does not exist, skip this
1419     // entry.
1420     const ExtValue &EV = P.first.first;
1421     AssignmentMap::iterator F = IMap.find({EV, ExtExpr()});
1422     if (F == IMap.end())
1423       continue;
1424 
1425     // Finally, check if all extenders have the same value as the initializer.
1426     // Make sure that extenders that are a part of a stack address are not
1427     // merged with those that aren't. Stack addresses need an offset field
1428     // (to be used by frame index elimination), while non-stack expressions
1429     // can be replaced with forms (such as rr) that do not have such a field.
1430     // Example:
1431     //
1432     // Collected 3 extenders
1433     //  =2. imm:0  off:32968  bb#2: %7 = ## + __ << 0, def
1434     //   0. imm:0  off:267  bb#0: __ = ## + SS#1 << 0
1435     //   1. imm:0  off:267  bb#1: __ = ## + SS#1 << 0
1436     // Ranges
1437     //   0. [-756,267]a1+0
1438     //   1. [-756,267]a1+0
1439     //   2. [201,65735]a1+0
1440     // RangeMap
1441     //   [-756,267]a1+0 -> 0 1
1442     //   [201,65735]a1+0 -> 2
1443     // IMap (before fixup) = {
1444     //   [imm:0  off:267, ## + __ << 0] -> { 2 }
1445     //   [imm:0  off:267, ## + SS#1 << 0] -> { 0 1 }
1446     // }
1447     // IMap (after fixup) = {
1448     //   [imm:0  off:267, ## + __ << 0] -> { 2 0 1 }
1449     //   [imm:0  off:267, ## + SS#1 << 0] -> { }
1450     // }
1451     // Inserted def in bb#0 for initializer: [imm:0  off:267, ## + __ << 0]
1452     //   %12:intregs = A2_tfrsi 267
1453     //
1454     // The result was
1455     //   %12:intregs = A2_tfrsi 267
1456     //   S4_pstorerbt_rr %3, %12, %stack.1, 0, killed %4
1457     // Which became
1458     //   r0 = #267
1459     //   if (p0.new) memb(r0+r29<<#4) = r2
1460 
1461     bool IsStack = any_of(F->second, [this](unsigned I) {
1462                       return Extenders[I].Expr.Rs.isSlot();
1463                    });
1464     auto SameValue = [&EV,this,IsStack](unsigned I) {
1465       const ExtDesc &ED = Extenders[I];
1466       return ED.Expr.Rs.isSlot() == IsStack &&
1467              ExtValue(ED).Offset == EV.Offset;
1468     };
1469     if (all_of(P.second, SameValue)) {
1470       F->second.insert(P.second.begin(), P.second.end());
1471       P.second.clear();
1472     }
1473   }
1474 
1475   LLVM_DEBUG(dbgs() << "IMap (after fixup) = " << PrintIMap(IMap, *HRI));
1476 }
1477 
1478 void HCE::calculatePlacement(const ExtenderInit &ExtI, const IndexList &Refs,
1479       LocDefList &Defs) {
1480   if (Refs.empty())
1481     return;
1482 
1483   // The placement calculation is somewhat simple right now: it finds a
1484   // single location for the def that dominates all refs. Since this may
1485   // place the def far from the uses, producing several locations for
1486   // defs that collectively dominate all refs could be better.
1487   // For now only do the single one.
1488   DenseSet<MachineBasicBlock*> Blocks;
1489   DenseSet<MachineInstr*> RefMIs;
1490   const ExtDesc &ED0 = Extenders[Refs[0]];
1491   MachineBasicBlock *DomB = ED0.UseMI->getParent();
1492   RefMIs.insert(ED0.UseMI);
1493   Blocks.insert(DomB);
1494   for (unsigned i = 1, e = Refs.size(); i != e; ++i) {
1495     const ExtDesc &ED = Extenders[Refs[i]];
1496     MachineBasicBlock *MBB = ED.UseMI->getParent();
1497     RefMIs.insert(ED.UseMI);
1498     DomB = MDT->findNearestCommonDominator(DomB, MBB);
1499     Blocks.insert(MBB);
1500   }
1501 
1502 #ifndef NDEBUG
1503   // The block DomB should be dominated by the def of each register used
1504   // in the initializer.
1505   Register Rs = ExtI.second.Rs;  // Only one reg allowed now.
1506   const MachineInstr *DefI = Rs.isVReg() ? MRI->getVRegDef(Rs.Reg) : nullptr;
1507 
1508   // This should be guaranteed given that the entire expression is used
1509   // at each instruction in Refs. Add an assertion just in case.
1510   assert(!DefI || MDT->dominates(DefI->getParent(), DomB));
1511 #endif
1512 
1513   MachineBasicBlock::iterator It;
1514   if (Blocks.count(DomB)) {
1515     // Try to find the latest possible location for the def.
1516     MachineBasicBlock::iterator End = DomB->end();
1517     for (It = DomB->begin(); It != End; ++It)
1518       if (RefMIs.count(&*It))
1519         break;
1520     assert(It != End && "Should have found a ref in DomB");
1521   } else {
1522     // DomB does not contain any refs.
1523     It = DomB->getFirstTerminator();
1524   }
1525   Loc DefLoc(DomB, It);
1526   Defs.emplace_back(DefLoc, Refs);
1527 }
1528 
1529 HCE::Register HCE::insertInitializer(Loc DefL, const ExtenderInit &ExtI) {
1530   llvm::Register DefR = MRI->createVirtualRegister(&Hexagon::IntRegsRegClass);
1531   MachineBasicBlock &MBB = *DefL.Block;
1532   MachineBasicBlock::iterator At = DefL.At;
1533   DebugLoc dl = DefL.Block->findDebugLoc(DefL.At);
1534   const ExtValue &EV = ExtI.first;
1535   MachineOperand ExtOp(EV);
1536 
1537   const ExtExpr &Ex = ExtI.second;
1538   const MachineInstr *InitI = nullptr;
1539 
1540   if (Ex.Rs.isSlot()) {
1541     assert(Ex.S == 0 && "Cannot have a shift of a stack slot");
1542     assert(!Ex.Neg && "Cannot subtract a stack slot");
1543     // DefR = PS_fi Rb,##EV
1544     InitI = BuildMI(MBB, At, dl, HII->get(Hexagon::PS_fi), DefR)
1545               .add(MachineOperand(Ex.Rs))
1546               .add(ExtOp);
1547   } else {
1548     assert((Ex.Rs.Reg == 0 || Ex.Rs.isVReg()) && "Expecting virtual register");
1549     if (Ex.trivial()) {
1550       // DefR = ##EV
1551       InitI = BuildMI(MBB, At, dl, HII->get(Hexagon::A2_tfrsi), DefR)
1552                 .add(ExtOp);
1553     } else if (Ex.S == 0) {
1554       if (Ex.Neg) {
1555         // DefR = sub(##EV,Rb)
1556         InitI = BuildMI(MBB, At, dl, HII->get(Hexagon::A2_subri), DefR)
1557                   .add(ExtOp)
1558                   .add(MachineOperand(Ex.Rs));
1559       } else {
1560         // DefR = add(Rb,##EV)
1561         InitI = BuildMI(MBB, At, dl, HII->get(Hexagon::A2_addi), DefR)
1562                   .add(MachineOperand(Ex.Rs))
1563                   .add(ExtOp);
1564       }
1565     } else {
1566       if (HST->useCompound()) {
1567         unsigned NewOpc = Ex.Neg ? Hexagon::S4_subi_asl_ri
1568                                  : Hexagon::S4_addi_asl_ri;
1569         // DefR = add(##EV,asl(Rb,S))
1570         InitI = BuildMI(MBB, At, dl, HII->get(NewOpc), DefR)
1571                   .add(ExtOp)
1572                   .add(MachineOperand(Ex.Rs))
1573                   .addImm(Ex.S);
1574       } else {
1575         // No compounds are available. It is not clear whether we should
1576         // even process such extenders where the initializer cannot be
1577         // a single instruction, but do it for now.
1578         llvm::Register TmpR = MRI->createVirtualRegister(&Hexagon::IntRegsRegClass);
1579         BuildMI(MBB, At, dl, HII->get(Hexagon::S2_asl_i_r), TmpR)
1580           .add(MachineOperand(Ex.Rs))
1581           .addImm(Ex.S);
1582         if (Ex.Neg)
1583           InitI = BuildMI(MBB, At, dl, HII->get(Hexagon::A2_subri), DefR)
1584                     .add(ExtOp)
1585                     .add(MachineOperand(Register(TmpR, 0)));
1586         else
1587           InitI = BuildMI(MBB, At, dl, HII->get(Hexagon::A2_addi), DefR)
1588                     .add(MachineOperand(Register(TmpR, 0)))
1589                     .add(ExtOp);
1590       }
1591     }
1592   }
1593 
1594   assert(InitI);
1595   (void)InitI;
1596   LLVM_DEBUG(dbgs() << "Inserted def in bb#" << MBB.getNumber()
1597                     << " for initializer: " << PrintInit(ExtI, *HRI) << "\n  "
1598                     << *InitI);
1599   return { DefR, 0 };
1600 }
1601 
1602 // Replace the extender at index Idx with the register ExtR.
1603 bool HCE::replaceInstrExact(const ExtDesc &ED, Register ExtR) {
1604   MachineInstr &MI = *ED.UseMI;
1605   MachineBasicBlock &MBB = *MI.getParent();
1606   MachineBasicBlock::iterator At = MI.getIterator();
1607   DebugLoc dl = MI.getDebugLoc();
1608   unsigned ExtOpc = MI.getOpcode();
1609 
1610   // With a few exceptions, direct replacement amounts to creating an
1611   // instruction with a corresponding register opcode, with all operands
1612   // the same, except for the register used in place of the extender.
1613   unsigned RegOpc = getDirectRegReplacement(ExtOpc);
1614 
1615   if (RegOpc == TargetOpcode::REG_SEQUENCE) {
1616     if (ExtOpc == Hexagon::A4_combineri)
1617       BuildMI(MBB, At, dl, HII->get(RegOpc))
1618         .add(MI.getOperand(0))
1619         .add(MI.getOperand(1))
1620         .addImm(Hexagon::isub_hi)
1621         .add(MachineOperand(ExtR))
1622         .addImm(Hexagon::isub_lo);
1623     else if (ExtOpc == Hexagon::A4_combineir)
1624       BuildMI(MBB, At, dl, HII->get(RegOpc))
1625         .add(MI.getOperand(0))
1626         .add(MachineOperand(ExtR))
1627         .addImm(Hexagon::isub_hi)
1628         .add(MI.getOperand(2))
1629         .addImm(Hexagon::isub_lo);
1630     else
1631       llvm_unreachable("Unexpected opcode became REG_SEQUENCE");
1632     MBB.erase(MI);
1633     return true;
1634   }
1635   if (ExtOpc == Hexagon::C2_cmpgei || ExtOpc == Hexagon::C2_cmpgeui) {
1636     unsigned NewOpc = ExtOpc == Hexagon::C2_cmpgei ? Hexagon::C2_cmplt
1637                                                    : Hexagon::C2_cmpltu;
1638     BuildMI(MBB, At, dl, HII->get(NewOpc))
1639       .add(MI.getOperand(0))
1640       .add(MachineOperand(ExtR))
1641       .add(MI.getOperand(1));
1642     MBB.erase(MI);
1643     return true;
1644   }
1645 
1646   if (RegOpc != 0) {
1647     MachineInstrBuilder MIB = BuildMI(MBB, At, dl, HII->get(RegOpc));
1648     unsigned RegN = ED.OpNum;
1649     // Copy all operands except the one that has the extender.
1650     for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1651       if (i != RegN)
1652         MIB.add(MI.getOperand(i));
1653       else
1654         MIB.add(MachineOperand(ExtR));
1655     }
1656     MIB.cloneMemRefs(MI);
1657     MBB.erase(MI);
1658     return true;
1659   }
1660 
1661   if (MI.mayLoadOrStore() && !isStoreImmediate(ExtOpc)) {
1662     // For memory instructions, there is an asymmetry in the addressing
1663     // modes. Addressing modes allowing extenders can be replaced with
1664     // addressing modes that use registers, but the order of operands
1665     // (or even their number) may be different.
1666     // Replacements:
1667     //   BaseImmOffset (io)  -> BaseRegOffset (rr)
1668     //   BaseLongOffset (ur) -> BaseRegOffset (rr)
1669     unsigned RegOpc, Shift;
1670     unsigned AM = HII->getAddrMode(MI);
1671     if (AM == HexagonII::BaseImmOffset) {
1672       RegOpc = HII->changeAddrMode_io_rr(ExtOpc);
1673       Shift = 0;
1674     } else if (AM == HexagonII::BaseLongOffset) {
1675       // Loads:  Rd = L4_loadri_ur Rs, S, ##
1676       // Stores: S4_storeri_ur Rs, S, ##, Rt
1677       RegOpc = HII->changeAddrMode_ur_rr(ExtOpc);
1678       Shift = MI.getOperand(MI.mayLoad() ? 2 : 1).getImm();
1679     } else {
1680       llvm_unreachable("Unexpected addressing mode");
1681     }
1682 #ifndef NDEBUG
1683     if (RegOpc == -1u) {
1684       dbgs() << "\nExtOpc: " << HII->getName(ExtOpc) << " has no rr version\n";
1685       llvm_unreachable("No corresponding rr instruction");
1686     }
1687 #endif
1688 
1689     unsigned BaseP, OffP;
1690     HII->getBaseAndOffsetPosition(MI, BaseP, OffP);
1691 
1692     // Build an rr instruction: (RegOff + RegBase<<0)
1693     MachineInstrBuilder MIB = BuildMI(MBB, At, dl, HII->get(RegOpc));
1694     // First, add the def for loads.
1695     if (MI.mayLoad())
1696       MIB.add(getLoadResultOp(MI));
1697     // Handle possible predication.
1698     if (HII->isPredicated(MI))
1699       MIB.add(getPredicateOp(MI));
1700     // Build the address.
1701     MIB.add(MachineOperand(ExtR));      // RegOff
1702     MIB.add(MI.getOperand(BaseP));      // RegBase
1703     MIB.addImm(Shift);                  // << Shift
1704     // Add the stored value for stores.
1705     if (MI.mayStore())
1706       MIB.add(getStoredValueOp(MI));
1707     MIB.cloneMemRefs(MI);
1708     MBB.erase(MI);
1709     return true;
1710   }
1711 
1712 #ifndef NDEBUG
1713   dbgs() << '\n' << MI;
1714 #endif
1715   llvm_unreachable("Unhandled exact replacement");
1716   return false;
1717 }
1718 
1719 // Replace the extender ED with a form corresponding to the initializer ExtI.
1720 bool HCE::replaceInstrExpr(const ExtDesc &ED, const ExtenderInit &ExtI,
1721       Register ExtR, int32_t &Diff) {
1722   MachineInstr &MI = *ED.UseMI;
1723   MachineBasicBlock &MBB = *MI.getParent();
1724   MachineBasicBlock::iterator At = MI.getIterator();
1725   DebugLoc dl = MI.getDebugLoc();
1726   unsigned ExtOpc = MI.getOpcode();
1727 
1728   if (ExtOpc == Hexagon::A2_tfrsi) {
1729     // A2_tfrsi is a special case: it's replaced with A2_addi, which introduces
1730     // another range. One range is the one that's common to all tfrsi's uses,
1731     // this one is the range of immediates in A2_addi. When calculating ranges,
1732     // the addi's 16-bit argument was included, so now we need to make it such
1733     // that the produced value is in the range for the uses alone.
1734     // Most of the time, simply adding Diff will make the addi produce exact
1735     // result, but if Diff is outside of the 16-bit range, some adjustment
1736     // will be needed.
1737     unsigned IdxOpc = getRegOffOpcode(ExtOpc);
1738     assert(IdxOpc == Hexagon::A2_addi);
1739 
1740     // Clamp Diff to the 16 bit range.
1741     int32_t D = isInt<16>(Diff) ? Diff : (Diff > 0 ? 32767 : -32768);
1742     if (Diff > 32767) {
1743       // Split Diff into two values: one that is close to min/max int16,
1744       // and the other being the rest, and such that both have the same
1745       // "alignment" as Diff.
1746       uint32_t UD = Diff;
1747       OffsetRange R = getOffsetRange(MI.getOperand(0));
1748       uint32_t A = std::min<uint32_t>(R.Align, 1u << llvm::countr_zero(UD));
1749       D &= ~(A-1);
1750     }
1751     BuildMI(MBB, At, dl, HII->get(IdxOpc))
1752       .add(MI.getOperand(0))
1753       .add(MachineOperand(ExtR))
1754       .addImm(D);
1755     Diff -= D;
1756 #ifndef NDEBUG
1757     // Make sure the output is within allowable range for uses.
1758     // "Diff" is a difference in the "opposite direction", i.e. Ext - DefV,
1759     // not DefV - Ext, as the getOffsetRange would calculate.
1760     OffsetRange Uses = getOffsetRange(MI.getOperand(0));
1761     if (!Uses.contains(-Diff))
1762       dbgs() << "Diff: " << -Diff << " out of range " << Uses
1763              << " for " << MI;
1764     assert(Uses.contains(-Diff));
1765 #endif
1766     MBB.erase(MI);
1767     return true;
1768   }
1769 
1770   const ExtValue &EV = ExtI.first; (void)EV;
1771   const ExtExpr &Ex = ExtI.second; (void)Ex;
1772 
1773   if (ExtOpc == Hexagon::A2_addi || ExtOpc == Hexagon::A2_subri) {
1774     // If addi/subri are replaced with the exactly matching initializer,
1775     // they amount to COPY.
1776     // Check that the initializer is an exact match (for simplicity).
1777 #ifndef NDEBUG
1778     bool IsAddi = ExtOpc == Hexagon::A2_addi;
1779     const MachineOperand &RegOp = MI.getOperand(IsAddi ? 1 : 2);
1780     const MachineOperand &ImmOp = MI.getOperand(IsAddi ? 2 : 1);
1781     assert(Ex.Rs == RegOp && EV == ImmOp && Ex.Neg != IsAddi &&
1782            "Initializer mismatch");
1783 #endif
1784     BuildMI(MBB, At, dl, HII->get(TargetOpcode::COPY))
1785       .add(MI.getOperand(0))
1786       .add(MachineOperand(ExtR));
1787     Diff = 0;
1788     MBB.erase(MI);
1789     return true;
1790   }
1791   if (ExtOpc == Hexagon::M2_accii || ExtOpc == Hexagon::M2_naccii ||
1792       ExtOpc == Hexagon::S4_addaddi || ExtOpc == Hexagon::S4_subaddi) {
1793     // M2_accii:    add(Rt,add(Rs,V)) (tied)
1794     // M2_naccii:   sub(Rt,add(Rs,V))
1795     // S4_addaddi:  add(Rt,add(Rs,V))
1796     // S4_subaddi:  add(Rt,sub(V,Rs))
1797     // Check that Rs and V match the initializer expression. The Rs+V is the
1798     // combination that is considered "subexpression" for V, although Rx+V
1799     // would also be valid.
1800 #ifndef NDEBUG
1801     bool IsSub = ExtOpc == Hexagon::S4_subaddi;
1802     Register Rs = MI.getOperand(IsSub ? 3 : 2);
1803     ExtValue V = MI.getOperand(IsSub ? 2 : 3);
1804     assert(EV == V && Rs == Ex.Rs && IsSub == Ex.Neg && "Initializer mismatch");
1805 #endif
1806     unsigned NewOpc = ExtOpc == Hexagon::M2_naccii ? Hexagon::A2_sub
1807                                                    : Hexagon::A2_add;
1808     BuildMI(MBB, At, dl, HII->get(NewOpc))
1809       .add(MI.getOperand(0))
1810       .add(MI.getOperand(1))
1811       .add(MachineOperand(ExtR));
1812     MBB.erase(MI);
1813     return true;
1814   }
1815 
1816   if (MI.mayLoadOrStore()) {
1817     unsigned IdxOpc = getRegOffOpcode(ExtOpc);
1818     assert(IdxOpc && "Expecting indexed opcode");
1819     MachineInstrBuilder MIB = BuildMI(MBB, At, dl, HII->get(IdxOpc));
1820     // Construct the new indexed instruction.
1821     // First, add the def for loads.
1822     if (MI.mayLoad())
1823       MIB.add(getLoadResultOp(MI));
1824     // Handle possible predication.
1825     if (HII->isPredicated(MI))
1826       MIB.add(getPredicateOp(MI));
1827     // Build the address.
1828     MIB.add(MachineOperand(ExtR));
1829     MIB.addImm(Diff);
1830     // Add the stored value for stores.
1831     if (MI.mayStore())
1832       MIB.add(getStoredValueOp(MI));
1833     MIB.cloneMemRefs(MI);
1834     MBB.erase(MI);
1835     return true;
1836   }
1837 
1838 #ifndef NDEBUG
1839   dbgs() << '\n' << PrintInit(ExtI, *HRI) << "  " << MI;
1840 #endif
1841   llvm_unreachable("Unhandled expr replacement");
1842   return false;
1843 }
1844 
1845 bool HCE::replaceInstr(unsigned Idx, Register ExtR, const ExtenderInit &ExtI) {
1846   if (ReplaceLimit.getNumOccurrences()) {
1847     if (ReplaceLimit <= ReplaceCounter)
1848       return false;
1849     ++ReplaceCounter;
1850   }
1851   const ExtDesc &ED = Extenders[Idx];
1852   assert((!ED.IsDef || ED.Rd.Reg != 0) && "Missing Rd for def");
1853   const ExtValue &DefV = ExtI.first;
1854   assert(ExtRoot(ExtValue(ED)) == ExtRoot(DefV) && "Extender root mismatch");
1855   const ExtExpr &DefEx = ExtI.second;
1856 
1857   ExtValue EV(ED);
1858   int32_t Diff = EV.Offset - DefV.Offset;
1859   const MachineInstr &MI = *ED.UseMI;
1860   LLVM_DEBUG(dbgs() << __func__ << " Idx:" << Idx << " ExtR:"
1861                     << PrintRegister(ExtR, *HRI) << " Diff:" << Diff << '\n');
1862 
1863   // These two addressing modes must be converted into indexed forms
1864   // regardless of what the initializer looks like.
1865   bool IsAbs = false, IsAbsSet = false;
1866   if (MI.mayLoadOrStore()) {
1867     unsigned AM = HII->getAddrMode(MI);
1868     IsAbs = AM == HexagonII::Absolute;
1869     IsAbsSet = AM == HexagonII::AbsoluteSet;
1870   }
1871 
1872   // If it's a def, remember all operands that need to be updated.
1873   // If ED is a def, and Diff is not 0, then all uses of the register Rd
1874   // defined by ED must be in the form (Rd, imm), i.e. the immediate offset
1875   // must follow the Rd in the operand list.
1876   std::vector<std::pair<MachineInstr*,unsigned>> RegOps;
1877   if (ED.IsDef && Diff != 0) {
1878     for (MachineOperand &Op : MRI->use_operands(ED.Rd.Reg)) {
1879       MachineInstr &UI = *Op.getParent();
1880       RegOps.push_back({&UI, getOperandIndex(UI, Op)});
1881     }
1882   }
1883 
1884   // Replace the instruction.
1885   bool Replaced = false;
1886   if (Diff == 0 && DefEx.trivial() && !IsAbs && !IsAbsSet)
1887     Replaced = replaceInstrExact(ED, ExtR);
1888   else
1889     Replaced = replaceInstrExpr(ED, ExtI, ExtR, Diff);
1890 
1891   if (Diff != 0 && Replaced && ED.IsDef) {
1892     // Update offsets of the def's uses.
1893     for (std::pair<MachineInstr*,unsigned> P : RegOps) {
1894       unsigned J = P.second;
1895       assert(P.first->getNumOperands() > J+1 &&
1896              P.first->getOperand(J+1).isImm());
1897       MachineOperand &ImmOp = P.first->getOperand(J+1);
1898       ImmOp.setImm(ImmOp.getImm() + Diff);
1899     }
1900     // If it was an absolute-set instruction, the "set" part has been removed.
1901     // ExtR will now be the register with the extended value, and since all
1902     // users of Rd have been updated, all that needs to be done is to replace
1903     // Rd with ExtR.
1904     if (IsAbsSet) {
1905       assert(ED.Rd.Sub == 0 && ExtR.Sub == 0);
1906       MRI->replaceRegWith(ED.Rd.Reg, ExtR.Reg);
1907     }
1908   }
1909 
1910   return Replaced;
1911 }
1912 
1913 bool HCE::replaceExtenders(const AssignmentMap &IMap) {
1914   LocDefList Defs;
1915   bool Changed = false;
1916 
1917   for (const std::pair<const ExtenderInit, IndexList> &P : IMap) {
1918     const IndexList &Idxs = P.second;
1919     if (Idxs.size() < CountThreshold)
1920       continue;
1921 
1922     Defs.clear();
1923     calculatePlacement(P.first, Idxs, Defs);
1924     for (const std::pair<Loc,IndexList> &Q : Defs) {
1925       Register DefR = insertInitializer(Q.first, P.first);
1926       NewRegs.push_back(DefR.Reg);
1927       for (unsigned I : Q.second)
1928         Changed |= replaceInstr(I, DefR, P.first);
1929     }
1930   }
1931   return Changed;
1932 }
1933 
1934 unsigned HCE::getOperandIndex(const MachineInstr &MI,
1935       const MachineOperand &Op) const {
1936   for (unsigned i = 0, n = MI.getNumOperands(); i != n; ++i)
1937     if (&MI.getOperand(i) == &Op)
1938       return i;
1939   llvm_unreachable("Not an operand of MI");
1940 }
1941 
1942 const MachineOperand &HCE::getPredicateOp(const MachineInstr &MI) const {
1943   assert(HII->isPredicated(MI));
1944   for (const MachineOperand &Op : MI.operands()) {
1945     if (!Op.isReg() || !Op.isUse() ||
1946         MRI->getRegClass(Op.getReg()) != &Hexagon::PredRegsRegClass)
1947       continue;
1948     assert(Op.getSubReg() == 0 && "Predicate register with a subregister");
1949     return Op;
1950   }
1951   llvm_unreachable("Predicate operand not found");
1952 }
1953 
1954 const MachineOperand &HCE::getLoadResultOp(const MachineInstr &MI) const {
1955   assert(MI.mayLoad());
1956   return MI.getOperand(0);
1957 }
1958 
1959 const MachineOperand &HCE::getStoredValueOp(const MachineInstr &MI) const {
1960   assert(MI.mayStore());
1961   return MI.getOperand(MI.getNumExplicitOperands()-1);
1962 }
1963 
1964 bool HCE::runOnMachineFunction(MachineFunction &MF) {
1965   if (skipFunction(MF.getFunction()))
1966     return false;
1967   if (MF.getFunction().hasPersonalityFn()) {
1968     LLVM_DEBUG(dbgs() << getPassName() << ": skipping " << MF.getName()
1969                       << " due to exception handling\n");
1970     return false;
1971   }
1972   LLVM_DEBUG(MF.print(dbgs() << "Before " << getPassName() << '\n', nullptr));
1973 
1974   HST = &MF.getSubtarget<HexagonSubtarget>();
1975   HII = HST->getInstrInfo();
1976   HRI = HST->getRegisterInfo();
1977   MDT = &getAnalysis<MachineDominatorTree>();
1978   MRI = &MF.getRegInfo();
1979   AssignmentMap IMap;
1980 
1981   collect(MF);
1982   llvm::sort(Extenders, [this](const ExtDesc &A, const ExtDesc &B) {
1983     ExtValue VA(A), VB(B);
1984     if (VA != VB)
1985       return VA < VB;
1986     const MachineInstr *MA = A.UseMI;
1987     const MachineInstr *MB = B.UseMI;
1988     if (MA == MB) {
1989       // If it's the same instruction, compare operand numbers.
1990       return A.OpNum < B.OpNum;
1991     }
1992 
1993     const MachineBasicBlock *BA = MA->getParent();
1994     const MachineBasicBlock *BB = MB->getParent();
1995     assert(BA->getNumber() != -1 && BB->getNumber() != -1);
1996     if (BA != BB)
1997       return BA->getNumber() < BB->getNumber();
1998     return MDT->dominates(MA, MB);
1999   });
2000 
2001   bool Changed = false;
2002   LLVM_DEBUG(dbgs() << "Collected " << Extenders.size() << " extenders\n");
2003   for (unsigned I = 0, E = Extenders.size(); I != E; ) {
2004     unsigned B = I;
2005     const ExtRoot &T = Extenders[B].getOp();
2006     while (I != E && ExtRoot(Extenders[I].getOp()) == T)
2007       ++I;
2008 
2009     IMap.clear();
2010     assignInits(T, B, I, IMap);
2011     Changed |= replaceExtenders(IMap);
2012   }
2013 
2014   LLVM_DEBUG({
2015     if (Changed)
2016       MF.print(dbgs() << "After " << getPassName() << '\n', nullptr);
2017     else
2018       dbgs() << "No changes\n";
2019   });
2020   return Changed;
2021 }
2022 
2023 FunctionPass *llvm::createHexagonConstExtenders() {
2024   return new HexagonConstExtenders();
2025 }
2026