xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Scalar/GuardWidening.cpp (revision 59c8e88e72633afbc47a4ace0d2170d00d51f7dc)
1 //===- GuardWidening.cpp - ---- Guard widening ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the guard widening pass.  The semantics of the
10 // @llvm.experimental.guard intrinsic lets LLVM transform it so that it fails
11 // more often that it did before the transform.  This optimization is called
12 // "widening" and can be used hoist and common runtime checks in situations like
13 // these:
14 //
15 //    %cmp0 = 7 u< Length
16 //    call @llvm.experimental.guard(i1 %cmp0) [ "deopt"(...) ]
17 //    call @unknown_side_effects()
18 //    %cmp1 = 9 u< Length
19 //    call @llvm.experimental.guard(i1 %cmp1) [ "deopt"(...) ]
20 //    ...
21 //
22 // =>
23 //
24 //    %cmp0 = 9 u< Length
25 //    call @llvm.experimental.guard(i1 %cmp0) [ "deopt"(...) ]
26 //    call @unknown_side_effects()
27 //    ...
28 //
29 // If %cmp0 is false, @llvm.experimental.guard will "deoptimize" back to a
30 // generic implementation of the same function, which will have the correct
31 // semantics from that point onward.  It is always _legal_ to deoptimize (so
32 // replacing %cmp0 with false is "correct"), though it may not always be
33 // profitable to do so.
34 //
35 // NB! This pass is a work in progress.  It hasn't been tuned to be "production
36 // ready" yet.  It is known to have quadriatic running time and will not scale
37 // to large numbers of guards
38 //
39 //===----------------------------------------------------------------------===//
40 
41 #include "llvm/Transforms/Scalar/GuardWidening.h"
42 #include "llvm/ADT/DenseMap.h"
43 #include "llvm/ADT/DepthFirstIterator.h"
44 #include "llvm/ADT/Statistic.h"
45 #include "llvm/Analysis/AssumptionCache.h"
46 #include "llvm/Analysis/GuardUtils.h"
47 #include "llvm/Analysis/LoopInfo.h"
48 #include "llvm/Analysis/LoopPass.h"
49 #include "llvm/Analysis/MemorySSAUpdater.h"
50 #include "llvm/Analysis/PostDominators.h"
51 #include "llvm/Analysis/ValueTracking.h"
52 #include "llvm/IR/ConstantRange.h"
53 #include "llvm/IR/Dominators.h"
54 #include "llvm/IR/IntrinsicInst.h"
55 #include "llvm/IR/PatternMatch.h"
56 #include "llvm/InitializePasses.h"
57 #include "llvm/Pass.h"
58 #include "llvm/Support/CommandLine.h"
59 #include "llvm/Support/Debug.h"
60 #include "llvm/Support/KnownBits.h"
61 #include "llvm/Transforms/Scalar.h"
62 #include "llvm/Transforms/Utils/GuardUtils.h"
63 #include "llvm/Transforms/Utils/LoopUtils.h"
64 #include <functional>
65 
66 using namespace llvm;
67 
68 #define DEBUG_TYPE "guard-widening"
69 
70 STATISTIC(GuardsEliminated, "Number of eliminated guards");
71 STATISTIC(CondBranchEliminated, "Number of eliminated conditional branches");
72 STATISTIC(FreezeAdded, "Number of freeze instruction introduced");
73 
74 static cl::opt<bool>
75     WidenBranchGuards("guard-widening-widen-branch-guards", cl::Hidden,
76                       cl::desc("Whether or not we should widen guards  "
77                                "expressed as branches by widenable conditions"),
78                       cl::init(true));
79 
80 namespace {
81 
82 // Get the condition of \p I. It can either be a guard or a conditional branch.
83 static Value *getCondition(Instruction *I) {
84   if (IntrinsicInst *GI = dyn_cast<IntrinsicInst>(I)) {
85     assert(GI->getIntrinsicID() == Intrinsic::experimental_guard &&
86            "Bad guard intrinsic?");
87     return GI->getArgOperand(0);
88   }
89   Value *Cond, *WC;
90   BasicBlock *IfTrueBB, *IfFalseBB;
91   if (parseWidenableBranch(I, Cond, WC, IfTrueBB, IfFalseBB))
92     return Cond;
93 
94   return cast<BranchInst>(I)->getCondition();
95 }
96 
97 // Set the condition for \p I to \p NewCond. \p I can either be a guard or a
98 // conditional branch.
99 static void setCondition(Instruction *I, Value *NewCond) {
100   if (IntrinsicInst *GI = dyn_cast<IntrinsicInst>(I)) {
101     assert(GI->getIntrinsicID() == Intrinsic::experimental_guard &&
102            "Bad guard intrinsic?");
103     GI->setArgOperand(0, NewCond);
104     return;
105   }
106   cast<BranchInst>(I)->setCondition(NewCond);
107 }
108 
109 // Eliminates the guard instruction properly.
110 static void eliminateGuard(Instruction *GuardInst, MemorySSAUpdater *MSSAU) {
111   GuardInst->eraseFromParent();
112   if (MSSAU)
113     MSSAU->removeMemoryAccess(GuardInst);
114   ++GuardsEliminated;
115 }
116 
117 /// Find a point at which the widened condition of \p Guard should be inserted.
118 /// When it is represented as intrinsic call, we can do it right before the call
119 /// instruction. However, when we are dealing with widenable branch, we must
120 /// account for the following situation: widening should not turn a
121 /// loop-invariant condition into a loop-variant. It means that if
122 /// widenable.condition() call is invariant (w.r.t. any loop), the new wide
123 /// condition should stay invariant. Otherwise there can be a miscompile, like
124 /// the one described at https://github.com/llvm/llvm-project/issues/60234. The
125 /// safest way to do it is to expand the new condition at WC's block.
126 static Instruction *findInsertionPointForWideCondition(Instruction *Guard) {
127   Value *Condition, *WC;
128   BasicBlock *IfTrue, *IfFalse;
129   if (parseWidenableBranch(Guard, Condition, WC, IfTrue, IfFalse))
130     return cast<Instruction>(WC);
131   return Guard;
132 }
133 
134 class GuardWideningImpl {
135   DominatorTree &DT;
136   PostDominatorTree *PDT;
137   LoopInfo &LI;
138   AssumptionCache &AC;
139   MemorySSAUpdater *MSSAU;
140 
141   /// Together, these describe the region of interest.  This might be all of
142   /// the blocks within a function, or only a given loop's blocks and preheader.
143   DomTreeNode *Root;
144   std::function<bool(BasicBlock*)> BlockFilter;
145 
146   /// The set of guards and conditional branches whose conditions have been
147   /// widened into dominating guards.
148   SmallVector<Instruction *, 16> EliminatedGuardsAndBranches;
149 
150   /// The set of guards which have been widened to include conditions to other
151   /// guards.
152   DenseSet<Instruction *> WidenedGuards;
153 
154   /// Try to eliminate instruction \p Instr by widening it into an earlier
155   /// dominating guard.  \p DFSI is the DFS iterator on the dominator tree that
156   /// is currently visiting the block containing \p Guard, and \p GuardsPerBlock
157   /// maps BasicBlocks to the set of guards seen in that block.
158   bool eliminateInstrViaWidening(
159       Instruction *Instr, const df_iterator<DomTreeNode *> &DFSI,
160       const DenseMap<BasicBlock *, SmallVector<Instruction *, 8>> &
161           GuardsPerBlock, bool InvertCondition = false);
162 
163   /// Used to keep track of which widening potential is more effective.
164   enum WideningScore {
165     /// Don't widen.
166     WS_IllegalOrNegative,
167 
168     /// Widening is performance neutral as far as the cycles spent in check
169     /// conditions goes (but can still help, e.g., code layout, having less
170     /// deopt state).
171     WS_Neutral,
172 
173     /// Widening is profitable.
174     WS_Positive,
175 
176     /// Widening is very profitable.  Not significantly different from \c
177     /// WS_Positive, except by the order.
178     WS_VeryPositive
179   };
180 
181   static StringRef scoreTypeToString(WideningScore WS);
182 
183   /// Compute the score for widening the condition in \p DominatedInstr
184   /// into \p DominatingGuard. If \p InvertCond is set, then we widen the
185   /// inverted condition of the dominating guard.
186   WideningScore computeWideningScore(Instruction *DominatedInstr,
187                                      Instruction *DominatingGuard,
188                                      bool InvertCond);
189 
190   /// Helper to check if \p V can be hoisted to \p InsertPos.
191   bool canBeHoistedTo(const Value *V, const Instruction *InsertPos) const {
192     SmallPtrSet<const Instruction *, 8> Visited;
193     return canBeHoistedTo(V, InsertPos, Visited);
194   }
195 
196   bool canBeHoistedTo(const Value *V, const Instruction *InsertPos,
197                       SmallPtrSetImpl<const Instruction *> &Visited) const;
198 
199   /// Helper to hoist \p V to \p InsertPos.  Guaranteed to succeed if \c
200   /// canBeHoistedTo returned true.
201   void makeAvailableAt(Value *V, Instruction *InsertPos) const;
202 
203   /// Common helper used by \c widenGuard and \c isWideningCondProfitable.  Try
204   /// to generate an expression computing the logical AND of \p Cond0 and (\p
205   /// Cond1 XOR \p InvertCondition).
206   /// Return true if the expression computing the AND is only as
207   /// expensive as computing one of the two. If \p InsertPt is true then
208   /// actually generate the resulting expression, make it available at \p
209   /// InsertPt and return it in \p Result (else no change to the IR is made).
210   bool widenCondCommon(Value *Cond0, Value *Cond1, Instruction *InsertPt,
211                        Value *&Result, bool InvertCondition);
212 
213   /// Adds freeze to Orig and push it as far as possible very aggressively.
214   /// Also replaces all uses of frozen instruction with frozen version.
215   Value *freezeAndPush(Value *Orig, Instruction *InsertPt);
216 
217   /// Represents a range check of the form \c Base + \c Offset u< \c Length,
218   /// with the constraint that \c Length is not negative.  \c CheckInst is the
219   /// pre-existing instruction in the IR that computes the result of this range
220   /// check.
221   class RangeCheck {
222     const Value *Base;
223     const ConstantInt *Offset;
224     const Value *Length;
225     ICmpInst *CheckInst;
226 
227   public:
228     explicit RangeCheck(const Value *Base, const ConstantInt *Offset,
229                         const Value *Length, ICmpInst *CheckInst)
230         : Base(Base), Offset(Offset), Length(Length), CheckInst(CheckInst) {}
231 
232     void setBase(const Value *NewBase) { Base = NewBase; }
233     void setOffset(const ConstantInt *NewOffset) { Offset = NewOffset; }
234 
235     const Value *getBase() const { return Base; }
236     const ConstantInt *getOffset() const { return Offset; }
237     const APInt &getOffsetValue() const { return getOffset()->getValue(); }
238     const Value *getLength() const { return Length; };
239     ICmpInst *getCheckInst() const { return CheckInst; }
240 
241     void print(raw_ostream &OS, bool PrintTypes = false) {
242       OS << "Base: ";
243       Base->printAsOperand(OS, PrintTypes);
244       OS << " Offset: ";
245       Offset->printAsOperand(OS, PrintTypes);
246       OS << " Length: ";
247       Length->printAsOperand(OS, PrintTypes);
248     }
249 
250     LLVM_DUMP_METHOD void dump() {
251       print(dbgs());
252       dbgs() << "\n";
253     }
254   };
255 
256   /// Parse \p CheckCond into a conjunction (logical-and) of range checks; and
257   /// append them to \p Checks.  Returns true on success, may clobber \c Checks
258   /// on failure.
259   bool parseRangeChecks(Value *CheckCond, SmallVectorImpl<RangeCheck> &Checks) {
260     SmallPtrSet<const Value *, 8> Visited;
261     return parseRangeChecks(CheckCond, Checks, Visited);
262   }
263 
264   bool parseRangeChecks(Value *CheckCond, SmallVectorImpl<RangeCheck> &Checks,
265                         SmallPtrSetImpl<const Value *> &Visited);
266 
267   /// Combine the checks in \p Checks into a smaller set of checks and append
268   /// them into \p CombinedChecks.  Return true on success (i.e. all of checks
269   /// in \p Checks were combined into \p CombinedChecks).  Clobbers \p Checks
270   /// and \p CombinedChecks on success and on failure.
271   bool combineRangeChecks(SmallVectorImpl<RangeCheck> &Checks,
272                           SmallVectorImpl<RangeCheck> &CombinedChecks) const;
273 
274   /// Can we compute the logical AND of \p Cond0 and \p Cond1 for the price of
275   /// computing only one of the two expressions?
276   bool isWideningCondProfitable(Value *Cond0, Value *Cond1, bool InvertCond) {
277     Value *ResultUnused;
278     return widenCondCommon(Cond0, Cond1, /*InsertPt=*/nullptr, ResultUnused,
279                            InvertCond);
280   }
281 
282   /// If \p InvertCondition is false, Widen \p ToWiden to fail if
283   /// \p NewCondition is false, otherwise make it fail if \p NewCondition is
284   /// true (in addition to whatever it is already checking).
285   void widenGuard(Instruction *ToWiden, Value *NewCondition,
286                   bool InvertCondition) {
287     Value *Result;
288     Instruction *InsertPt = findInsertionPointForWideCondition(ToWiden);
289     widenCondCommon(getCondition(ToWiden), NewCondition, InsertPt, Result,
290                     InvertCondition);
291     if (isGuardAsWidenableBranch(ToWiden)) {
292       setWidenableBranchCond(cast<BranchInst>(ToWiden), Result);
293       return;
294     }
295     setCondition(ToWiden, Result);
296   }
297 
298 public:
299   explicit GuardWideningImpl(DominatorTree &DT, PostDominatorTree *PDT,
300                              LoopInfo &LI, AssumptionCache &AC,
301                              MemorySSAUpdater *MSSAU, DomTreeNode *Root,
302                              std::function<bool(BasicBlock *)> BlockFilter)
303       : DT(DT), PDT(PDT), LI(LI), AC(AC), MSSAU(MSSAU), Root(Root),
304         BlockFilter(BlockFilter) {}
305 
306   /// The entry point for this pass.
307   bool run();
308 };
309 }
310 
311 static bool isSupportedGuardInstruction(const Instruction *Insn) {
312   if (isGuard(Insn))
313     return true;
314   if (WidenBranchGuards && isGuardAsWidenableBranch(Insn))
315     return true;
316   return false;
317 }
318 
319 bool GuardWideningImpl::run() {
320   DenseMap<BasicBlock *, SmallVector<Instruction *, 8>> GuardsInBlock;
321   bool Changed = false;
322   for (auto DFI = df_begin(Root), DFE = df_end(Root);
323        DFI != DFE; ++DFI) {
324     auto *BB = (*DFI)->getBlock();
325     if (!BlockFilter(BB))
326       continue;
327 
328     auto &CurrentList = GuardsInBlock[BB];
329 
330     for (auto &I : *BB)
331       if (isSupportedGuardInstruction(&I))
332         CurrentList.push_back(cast<Instruction>(&I));
333 
334     for (auto *II : CurrentList)
335       Changed |= eliminateInstrViaWidening(II, DFI, GuardsInBlock);
336   }
337 
338   assert(EliminatedGuardsAndBranches.empty() || Changed);
339   for (auto *I : EliminatedGuardsAndBranches)
340     if (!WidenedGuards.count(I)) {
341       assert(isa<ConstantInt>(getCondition(I)) && "Should be!");
342       if (isSupportedGuardInstruction(I))
343         eliminateGuard(I, MSSAU);
344       else {
345         assert(isa<BranchInst>(I) &&
346                "Eliminated something other than guard or branch?");
347         ++CondBranchEliminated;
348       }
349     }
350 
351   return Changed;
352 }
353 
354 bool GuardWideningImpl::eliminateInstrViaWidening(
355     Instruction *Instr, const df_iterator<DomTreeNode *> &DFSI,
356     const DenseMap<BasicBlock *, SmallVector<Instruction *, 8>> &
357         GuardsInBlock, bool InvertCondition) {
358   // Ignore trivial true or false conditions. These instructions will be
359   // trivially eliminated by any cleanup pass. Do not erase them because other
360   // guards can possibly be widened into them.
361   if (isa<ConstantInt>(getCondition(Instr)))
362     return false;
363 
364   Instruction *BestSoFar = nullptr;
365   auto BestScoreSoFar = WS_IllegalOrNegative;
366 
367   // In the set of dominating guards, find the one we can merge GuardInst with
368   // for the most profit.
369   for (unsigned i = 0, e = DFSI.getPathLength(); i != e; ++i) {
370     auto *CurBB = DFSI.getPath(i)->getBlock();
371     if (!BlockFilter(CurBB))
372       break;
373     assert(GuardsInBlock.count(CurBB) && "Must have been populated by now!");
374     const auto &GuardsInCurBB = GuardsInBlock.find(CurBB)->second;
375 
376     auto I = GuardsInCurBB.begin();
377     auto E = Instr->getParent() == CurBB ? find(GuardsInCurBB, Instr)
378                                          : GuardsInCurBB.end();
379 
380 #ifndef NDEBUG
381     {
382       unsigned Index = 0;
383       for (auto &I : *CurBB) {
384         if (Index == GuardsInCurBB.size())
385           break;
386         if (GuardsInCurBB[Index] == &I)
387           Index++;
388       }
389       assert(Index == GuardsInCurBB.size() &&
390              "Guards expected to be in order!");
391     }
392 #endif
393 
394     assert((i == (e - 1)) == (Instr->getParent() == CurBB) && "Bad DFS?");
395 
396     for (auto *Candidate : make_range(I, E)) {
397       auto Score = computeWideningScore(Instr, Candidate, InvertCondition);
398       LLVM_DEBUG(dbgs() << "Score between " << *getCondition(Instr)
399                         << " and " << *getCondition(Candidate) << " is "
400                         << scoreTypeToString(Score) << "\n");
401       if (Score > BestScoreSoFar) {
402         BestScoreSoFar = Score;
403         BestSoFar = Candidate;
404       }
405     }
406   }
407 
408   if (BestScoreSoFar == WS_IllegalOrNegative) {
409     LLVM_DEBUG(dbgs() << "Did not eliminate guard " << *Instr << "\n");
410     return false;
411   }
412 
413   assert(BestSoFar != Instr && "Should have never visited same guard!");
414   assert(DT.dominates(BestSoFar, Instr) && "Should be!");
415 
416   LLVM_DEBUG(dbgs() << "Widening " << *Instr << " into " << *BestSoFar
417                     << " with score " << scoreTypeToString(BestScoreSoFar)
418                     << "\n");
419   widenGuard(BestSoFar, getCondition(Instr), InvertCondition);
420   auto NewGuardCondition = InvertCondition
421                                ? ConstantInt::getFalse(Instr->getContext())
422                                : ConstantInt::getTrue(Instr->getContext());
423   setCondition(Instr, NewGuardCondition);
424   EliminatedGuardsAndBranches.push_back(Instr);
425   WidenedGuards.insert(BestSoFar);
426   return true;
427 }
428 
429 GuardWideningImpl::WideningScore
430 GuardWideningImpl::computeWideningScore(Instruction *DominatedInstr,
431                                         Instruction *DominatingGuard,
432                                         bool InvertCond) {
433   Loop *DominatedInstrLoop = LI.getLoopFor(DominatedInstr->getParent());
434   Loop *DominatingGuardLoop = LI.getLoopFor(DominatingGuard->getParent());
435   bool HoistingOutOfLoop = false;
436 
437   if (DominatingGuardLoop != DominatedInstrLoop) {
438     // Be conservative and don't widen into a sibling loop.  TODO: If the
439     // sibling is colder, we should consider allowing this.
440     if (DominatingGuardLoop &&
441         !DominatingGuardLoop->contains(DominatedInstrLoop))
442       return WS_IllegalOrNegative;
443 
444     HoistingOutOfLoop = true;
445   }
446 
447   auto *WideningPoint = findInsertionPointForWideCondition(DominatingGuard);
448   if (!canBeHoistedTo(getCondition(DominatedInstr), WideningPoint))
449     return WS_IllegalOrNegative;
450   if (!canBeHoistedTo(getCondition(DominatingGuard), WideningPoint))
451     return WS_IllegalOrNegative;
452 
453   // If the guard was conditional executed, it may never be reached
454   // dynamically.  There are two potential downsides to hoisting it out of the
455   // conditionally executed region: 1) we may spuriously deopt without need and
456   // 2) we have the extra cost of computing the guard condition in the common
457   // case.  At the moment, we really only consider the second in our heuristic
458   // here.  TODO: evaluate cost model for spurious deopt
459   // NOTE: As written, this also lets us hoist right over another guard which
460   // is essentially just another spelling for control flow.
461   if (isWideningCondProfitable(getCondition(DominatedInstr),
462                                getCondition(DominatingGuard), InvertCond))
463     return HoistingOutOfLoop ? WS_VeryPositive : WS_Positive;
464 
465   if (HoistingOutOfLoop)
466     return WS_Positive;
467 
468   // For a given basic block \p BB, return its successor which is guaranteed or
469   // highly likely will be taken as its successor.
470   auto GetLikelySuccessor = [](const BasicBlock * BB)->const BasicBlock * {
471     if (auto *UniqueSucc = BB->getUniqueSuccessor())
472       return UniqueSucc;
473     auto *Term = BB->getTerminator();
474     Value *Cond = nullptr;
475     const BasicBlock *IfTrue = nullptr, *IfFalse = nullptr;
476     using namespace PatternMatch;
477     if (!match(Term, m_Br(m_Value(Cond), m_BasicBlock(IfTrue),
478                           m_BasicBlock(IfFalse))))
479       return nullptr;
480     // For constant conditions, only one dynamical successor is possible
481     if (auto *ConstCond = dyn_cast<ConstantInt>(Cond))
482       return ConstCond->isAllOnesValue() ? IfTrue : IfFalse;
483     // If one of successors ends with deopt, another one is likely.
484     if (IfFalse->getPostdominatingDeoptimizeCall())
485       return IfTrue;
486     if (IfTrue->getPostdominatingDeoptimizeCall())
487       return IfFalse;
488     // TODO: Use branch frequency metatada to allow hoisting through non-deopt
489     // branches?
490     return nullptr;
491   };
492 
493   // Returns true if we might be hoisting above explicit control flow into a
494   // considerably hotter block.  Note that this completely ignores implicit
495   // control flow (guards, calls which throw, etc...).  That choice appears
496   // arbitrary (we assume that implicit control flow exits are all rare).
497   auto MaybeHoistingToHotterBlock = [&]() {
498     const auto *DominatingBlock = DominatingGuard->getParent();
499     const auto *DominatedBlock = DominatedInstr->getParent();
500 
501     // Descend as low as we can, always taking the likely successor.
502     assert(DT.isReachableFromEntry(DominatingBlock) && "Unreached code");
503     assert(DT.isReachableFromEntry(DominatedBlock) && "Unreached code");
504     assert(DT.dominates(DominatingBlock, DominatedBlock) && "No dominance");
505     while (DominatedBlock != DominatingBlock) {
506       auto *LikelySucc = GetLikelySuccessor(DominatingBlock);
507       // No likely successor?
508       if (!LikelySucc)
509         break;
510       // Only go down the dominator tree.
511       if (!DT.properlyDominates(DominatingBlock, LikelySucc))
512         break;
513       DominatingBlock = LikelySucc;
514     }
515 
516     // Found?
517     if (DominatedBlock == DominatingBlock)
518       return false;
519     // We followed the likely successor chain and went past the dominated
520     // block. It means that the dominated guard is in dead/very cold code.
521     if (!DT.dominates(DominatingBlock, DominatedBlock))
522       return true;
523     // TODO: diamond, triangle cases
524     if (!PDT) return true;
525     return !PDT->dominates(DominatedBlock, DominatingBlock);
526   };
527 
528   return MaybeHoistingToHotterBlock() ? WS_IllegalOrNegative : WS_Neutral;
529 }
530 
531 bool GuardWideningImpl::canBeHoistedTo(
532     const Value *V, const Instruction *Loc,
533     SmallPtrSetImpl<const Instruction *> &Visited) const {
534   auto *Inst = dyn_cast<Instruction>(V);
535   if (!Inst || DT.dominates(Inst, Loc) || Visited.count(Inst))
536     return true;
537 
538   if (!isSafeToSpeculativelyExecute(Inst, Loc, &AC, &DT) ||
539       Inst->mayReadFromMemory())
540     return false;
541 
542   Visited.insert(Inst);
543 
544   // We only want to go _up_ the dominance chain when recursing.
545   assert(!isa<PHINode>(Loc) &&
546          "PHIs should return false for isSafeToSpeculativelyExecute");
547   assert(DT.isReachableFromEntry(Inst->getParent()) &&
548          "We did a DFS from the block entry!");
549   return all_of(Inst->operands(),
550                 [&](Value *Op) { return canBeHoistedTo(Op, Loc, Visited); });
551 }
552 
553 void GuardWideningImpl::makeAvailableAt(Value *V, Instruction *Loc) const {
554   auto *Inst = dyn_cast<Instruction>(V);
555   if (!Inst || DT.dominates(Inst, Loc))
556     return;
557 
558   assert(isSafeToSpeculativelyExecute(Inst, Loc, &AC, &DT) &&
559          !Inst->mayReadFromMemory() &&
560          "Should've checked with canBeHoistedTo!");
561 
562   for (Value *Op : Inst->operands())
563     makeAvailableAt(Op, Loc);
564 
565   Inst->moveBefore(Loc);
566 }
567 
568 // Return Instruction before which we can insert freeze for the value V as close
569 // to def as possible. If there is no place to add freeze, return nullptr.
570 static Instruction *getFreezeInsertPt(Value *V, const DominatorTree &DT) {
571   auto *I = dyn_cast<Instruction>(V);
572   if (!I)
573     return &*DT.getRoot()->getFirstNonPHIOrDbgOrAlloca();
574 
575   auto *Res = I->getInsertionPointAfterDef();
576   // If there is no place to add freeze - return nullptr.
577   if (!Res || !DT.dominates(I, Res))
578     return nullptr;
579 
580   // If there is a User dominated by original I, then it should be dominated
581   // by Freeze instruction as well.
582   if (any_of(I->users(), [&](User *U) {
583         Instruction *User = cast<Instruction>(U);
584         return Res != User && DT.dominates(I, User) && !DT.dominates(Res, User);
585       }))
586     return nullptr;
587   return Res;
588 }
589 
590 Value *GuardWideningImpl::freezeAndPush(Value *Orig, Instruction *InsertPt) {
591   if (isGuaranteedNotToBePoison(Orig, nullptr, InsertPt, &DT))
592     return Orig;
593   Instruction *InsertPtAtDef = getFreezeInsertPt(Orig, DT);
594   if (!InsertPtAtDef)
595     return new FreezeInst(Orig, "gw.freeze", InsertPt);
596   if (isa<Constant>(Orig) || isa<GlobalValue>(Orig))
597     return new FreezeInst(Orig, "gw.freeze", InsertPtAtDef);
598 
599   SmallSet<Value *, 16> Visited;
600   SmallVector<Value *, 16> Worklist;
601   SmallSet<Instruction *, 16> DropPoisonFlags;
602   SmallVector<Value *, 16> NeedFreeze;
603   DenseMap<Value *, FreezeInst *> CacheOfFreezes;
604 
605   // A bit overloaded data structures. Visited contains constant/GV
606   // if we already met it. In this case CacheOfFreezes has a freeze if it is
607   // required.
608   auto handleConstantOrGlobal = [&](Use &U) {
609     Value *Def = U.get();
610     if (!isa<Constant>(Def) && !isa<GlobalValue>(Def))
611       return false;
612 
613     if (Visited.insert(Def).second) {
614       if (isGuaranteedNotToBePoison(Def, nullptr, InsertPt, &DT))
615         return true;
616       CacheOfFreezes[Def] = new FreezeInst(Def, Def->getName() + ".gw.fr",
617                                            getFreezeInsertPt(Def, DT));
618     }
619 
620     if (CacheOfFreezes.count(Def))
621       U.set(CacheOfFreezes[Def]);
622     return true;
623   };
624 
625   Worklist.push_back(Orig);
626   while (!Worklist.empty()) {
627     Value *V = Worklist.pop_back_val();
628     if (!Visited.insert(V).second)
629       continue;
630 
631     if (isGuaranteedNotToBePoison(V, nullptr, InsertPt, &DT))
632       continue;
633 
634     Instruction *I = dyn_cast<Instruction>(V);
635     if (!I || canCreateUndefOrPoison(cast<Operator>(I),
636                                      /*ConsiderFlagsAndMetadata*/ false)) {
637       NeedFreeze.push_back(V);
638       continue;
639     }
640     // Check all operands. If for any of them we cannot insert Freeze,
641     // stop here. Otherwise, iterate.
642     if (any_of(I->operands(), [&](Value *Op) {
643           return isa<Instruction>(Op) && !getFreezeInsertPt(Op, DT);
644         })) {
645       NeedFreeze.push_back(I);
646       continue;
647     }
648     DropPoisonFlags.insert(I);
649     for (Use &U : I->operands())
650       if (!handleConstantOrGlobal(U))
651         Worklist.push_back(U.get());
652   }
653   for (Instruction *I : DropPoisonFlags)
654     I->dropPoisonGeneratingFlagsAndMetadata();
655 
656   Value *Result = Orig;
657   for (Value *V : NeedFreeze) {
658     auto *FreezeInsertPt = getFreezeInsertPt(V, DT);
659     FreezeInst *FI = new FreezeInst(V, V->getName() + ".gw.fr", FreezeInsertPt);
660     ++FreezeAdded;
661     if (V == Orig)
662       Result = FI;
663     V->replaceUsesWithIf(
664         FI, [&](const Use & U)->bool { return U.getUser() != FI; });
665   }
666 
667   return Result;
668 }
669 
670 bool GuardWideningImpl::widenCondCommon(Value *Cond0, Value *Cond1,
671                                         Instruction *InsertPt, Value *&Result,
672                                         bool InvertCondition) {
673   using namespace llvm::PatternMatch;
674 
675   {
676     // L >u C0 && L >u C1  ->  L >u max(C0, C1)
677     ConstantInt *RHS0, *RHS1;
678     Value *LHS;
679     ICmpInst::Predicate Pred0, Pred1;
680     if (match(Cond0, m_ICmp(Pred0, m_Value(LHS), m_ConstantInt(RHS0))) &&
681         match(Cond1, m_ICmp(Pred1, m_Specific(LHS), m_ConstantInt(RHS1)))) {
682       if (InvertCondition)
683         Pred1 = ICmpInst::getInversePredicate(Pred1);
684 
685       ConstantRange CR0 =
686           ConstantRange::makeExactICmpRegion(Pred0, RHS0->getValue());
687       ConstantRange CR1 =
688           ConstantRange::makeExactICmpRegion(Pred1, RHS1->getValue());
689 
690       // Given what we're doing here and the semantics of guards, it would
691       // be correct to use a subset intersection, but that may be too
692       // aggressive in cases we care about.
693       if (std::optional<ConstantRange> Intersect =
694               CR0.exactIntersectWith(CR1)) {
695         APInt NewRHSAP;
696         CmpInst::Predicate Pred;
697         if (Intersect->getEquivalentICmp(Pred, NewRHSAP)) {
698           if (InsertPt) {
699             ConstantInt *NewRHS =
700                 ConstantInt::get(Cond0->getContext(), NewRHSAP);
701             assert(canBeHoistedTo(LHS, InsertPt) && "must be");
702             makeAvailableAt(LHS, InsertPt);
703             Result = new ICmpInst(InsertPt, Pred, LHS, NewRHS, "wide.chk");
704           }
705           return true;
706         }
707       }
708     }
709   }
710 
711   {
712     SmallVector<GuardWideningImpl::RangeCheck, 4> Checks, CombinedChecks;
713     // TODO: Support InvertCondition case?
714     if (!InvertCondition &&
715         parseRangeChecks(Cond0, Checks) && parseRangeChecks(Cond1, Checks) &&
716         combineRangeChecks(Checks, CombinedChecks)) {
717       if (InsertPt) {
718         Result = nullptr;
719         for (auto &RC : CombinedChecks) {
720           makeAvailableAt(RC.getCheckInst(), InsertPt);
721           if (Result)
722             Result = BinaryOperator::CreateAnd(RC.getCheckInst(), Result, "",
723                                                InsertPt);
724           else
725             Result = RC.getCheckInst();
726         }
727         assert(Result && "Failed to find result value");
728         Result->setName("wide.chk");
729         Result = freezeAndPush(Result, InsertPt);
730       }
731       return true;
732     }
733   }
734 
735   // Base case -- just logical-and the two conditions together.
736 
737   if (InsertPt) {
738     makeAvailableAt(Cond0, InsertPt);
739     makeAvailableAt(Cond1, InsertPt);
740     if (InvertCondition)
741       Cond1 = BinaryOperator::CreateNot(Cond1, "inverted", InsertPt);
742     Cond1 = freezeAndPush(Cond1, InsertPt);
743     Result = BinaryOperator::CreateAnd(Cond0, Cond1, "wide.chk", InsertPt);
744   }
745 
746   // We were not able to compute Cond0 AND Cond1 for the price of one.
747   return false;
748 }
749 
750 bool GuardWideningImpl::parseRangeChecks(
751     Value *CheckCond, SmallVectorImpl<GuardWideningImpl::RangeCheck> &Checks,
752     SmallPtrSetImpl<const Value *> &Visited) {
753   if (!Visited.insert(CheckCond).second)
754     return true;
755 
756   using namespace llvm::PatternMatch;
757 
758   {
759     Value *AndLHS, *AndRHS;
760     if (match(CheckCond, m_And(m_Value(AndLHS), m_Value(AndRHS))))
761       return parseRangeChecks(AndLHS, Checks) &&
762              parseRangeChecks(AndRHS, Checks);
763   }
764 
765   auto *IC = dyn_cast<ICmpInst>(CheckCond);
766   if (!IC || !IC->getOperand(0)->getType()->isIntegerTy() ||
767       (IC->getPredicate() != ICmpInst::ICMP_ULT &&
768        IC->getPredicate() != ICmpInst::ICMP_UGT))
769     return false;
770 
771   const Value *CmpLHS = IC->getOperand(0), *CmpRHS = IC->getOperand(1);
772   if (IC->getPredicate() == ICmpInst::ICMP_UGT)
773     std::swap(CmpLHS, CmpRHS);
774 
775   auto &DL = IC->getModule()->getDataLayout();
776 
777   GuardWideningImpl::RangeCheck Check(
778       CmpLHS, cast<ConstantInt>(ConstantInt::getNullValue(CmpRHS->getType())),
779       CmpRHS, IC);
780 
781   if (!isKnownNonNegative(Check.getLength(), DL))
782     return false;
783 
784   // What we have in \c Check now is a correct interpretation of \p CheckCond.
785   // Try to see if we can move some constant offsets into the \c Offset field.
786 
787   bool Changed;
788   auto &Ctx = CheckCond->getContext();
789 
790   do {
791     Value *OpLHS;
792     ConstantInt *OpRHS;
793     Changed = false;
794 
795 #ifndef NDEBUG
796     auto *BaseInst = dyn_cast<Instruction>(Check.getBase());
797     assert((!BaseInst || DT.isReachableFromEntry(BaseInst->getParent())) &&
798            "Unreachable instruction?");
799 #endif
800 
801     if (match(Check.getBase(), m_Add(m_Value(OpLHS), m_ConstantInt(OpRHS)))) {
802       Check.setBase(OpLHS);
803       APInt NewOffset = Check.getOffsetValue() + OpRHS->getValue();
804       Check.setOffset(ConstantInt::get(Ctx, NewOffset));
805       Changed = true;
806     } else if (match(Check.getBase(),
807                      m_Or(m_Value(OpLHS), m_ConstantInt(OpRHS)))) {
808       KnownBits Known = computeKnownBits(OpLHS, DL);
809       if ((OpRHS->getValue() & Known.Zero) == OpRHS->getValue()) {
810         Check.setBase(OpLHS);
811         APInt NewOffset = Check.getOffsetValue() + OpRHS->getValue();
812         Check.setOffset(ConstantInt::get(Ctx, NewOffset));
813         Changed = true;
814       }
815     }
816   } while (Changed);
817 
818   Checks.push_back(Check);
819   return true;
820 }
821 
822 bool GuardWideningImpl::combineRangeChecks(
823     SmallVectorImpl<GuardWideningImpl::RangeCheck> &Checks,
824     SmallVectorImpl<GuardWideningImpl::RangeCheck> &RangeChecksOut) const {
825   unsigned OldCount = Checks.size();
826   while (!Checks.empty()) {
827     // Pick all of the range checks with a specific base and length, and try to
828     // merge them.
829     const Value *CurrentBase = Checks.front().getBase();
830     const Value *CurrentLength = Checks.front().getLength();
831 
832     SmallVector<GuardWideningImpl::RangeCheck, 3> CurrentChecks;
833 
834     auto IsCurrentCheck = [&](GuardWideningImpl::RangeCheck &RC) {
835       return RC.getBase() == CurrentBase && RC.getLength() == CurrentLength;
836     };
837 
838     copy_if(Checks, std::back_inserter(CurrentChecks), IsCurrentCheck);
839     erase_if(Checks, IsCurrentCheck);
840 
841     assert(CurrentChecks.size() != 0 && "We know we have at least one!");
842 
843     if (CurrentChecks.size() < 3) {
844       llvm::append_range(RangeChecksOut, CurrentChecks);
845       continue;
846     }
847 
848     // CurrentChecks.size() will typically be 3 here, but so far there has been
849     // no need to hard-code that fact.
850 
851     llvm::sort(CurrentChecks, [&](const GuardWideningImpl::RangeCheck &LHS,
852                                   const GuardWideningImpl::RangeCheck &RHS) {
853       return LHS.getOffsetValue().slt(RHS.getOffsetValue());
854     });
855 
856     // Note: std::sort should not invalidate the ChecksStart iterator.
857 
858     const ConstantInt *MinOffset = CurrentChecks.front().getOffset();
859     const ConstantInt *MaxOffset = CurrentChecks.back().getOffset();
860 
861     unsigned BitWidth = MaxOffset->getValue().getBitWidth();
862     if ((MaxOffset->getValue() - MinOffset->getValue())
863             .ugt(APInt::getSignedMinValue(BitWidth)))
864       return false;
865 
866     APInt MaxDiff = MaxOffset->getValue() - MinOffset->getValue();
867     const APInt &HighOffset = MaxOffset->getValue();
868     auto OffsetOK = [&](const GuardWideningImpl::RangeCheck &RC) {
869       return (HighOffset - RC.getOffsetValue()).ult(MaxDiff);
870     };
871 
872     if (MaxDiff.isMinValue() || !all_of(drop_begin(CurrentChecks), OffsetOK))
873       return false;
874 
875     // We have a series of f+1 checks as:
876     //
877     //   I+k_0 u< L   ... Chk_0
878     //   I+k_1 u< L   ... Chk_1
879     //   ...
880     //   I+k_f u< L   ... Chk_f
881     //
882     //     with forall i in [0,f]: k_f-k_i u< k_f-k_0  ... Precond_0
883     //          k_f-k_0 u< INT_MIN+k_f                 ... Precond_1
884     //          k_f != k_0                             ... Precond_2
885     //
886     // Claim:
887     //   Chk_0 AND Chk_f  implies all the other checks
888     //
889     // Informal proof sketch:
890     //
891     // We will show that the integer range [I+k_0,I+k_f] does not unsigned-wrap
892     // (i.e. going from I+k_0 to I+k_f does not cross the -1,0 boundary) and
893     // thus I+k_f is the greatest unsigned value in that range.
894     //
895     // This combined with Ckh_(f+1) shows that everything in that range is u< L.
896     // Via Precond_0 we know that all of the indices in Chk_0 through Chk_(f+1)
897     // lie in [I+k_0,I+k_f], this proving our claim.
898     //
899     // To see that [I+k_0,I+k_f] is not a wrapping range, note that there are
900     // two possibilities: I+k_0 u< I+k_f or I+k_0 >u I+k_f (they can't be equal
901     // since k_0 != k_f).  In the former case, [I+k_0,I+k_f] is not a wrapping
902     // range by definition, and the latter case is impossible:
903     //
904     //   0-----I+k_f---I+k_0----L---INT_MAX,INT_MIN------------------(-1)
905     //   xxxxxx             xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
906     //
907     // For Chk_0 to succeed, we'd have to have k_f-k_0 (the range highlighted
908     // with 'x' above) to be at least >u INT_MIN.
909 
910     RangeChecksOut.emplace_back(CurrentChecks.front());
911     RangeChecksOut.emplace_back(CurrentChecks.back());
912   }
913 
914   assert(RangeChecksOut.size() <= OldCount && "We pessimized!");
915   return RangeChecksOut.size() != OldCount;
916 }
917 
918 #ifndef NDEBUG
919 StringRef GuardWideningImpl::scoreTypeToString(WideningScore WS) {
920   switch (WS) {
921   case WS_IllegalOrNegative:
922     return "IllegalOrNegative";
923   case WS_Neutral:
924     return "Neutral";
925   case WS_Positive:
926     return "Positive";
927   case WS_VeryPositive:
928     return "VeryPositive";
929   }
930 
931   llvm_unreachable("Fully covered switch above!");
932 }
933 #endif
934 
935 PreservedAnalyses GuardWideningPass::run(Function &F,
936                                          FunctionAnalysisManager &AM) {
937   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
938   auto &LI = AM.getResult<LoopAnalysis>(F);
939   auto &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
940   auto &AC = AM.getResult<AssumptionAnalysis>(F);
941   auto *MSSAA = AM.getCachedResult<MemorySSAAnalysis>(F);
942   std::unique_ptr<MemorySSAUpdater> MSSAU;
943   if (MSSAA)
944     MSSAU = std::make_unique<MemorySSAUpdater>(&MSSAA->getMSSA());
945   if (!GuardWideningImpl(DT, &PDT, LI, AC, MSSAU ? MSSAU.get() : nullptr,
946                          DT.getRootNode(), [](BasicBlock *) { return true; })
947            .run())
948     return PreservedAnalyses::all();
949 
950   PreservedAnalyses PA;
951   PA.preserveSet<CFGAnalyses>();
952   PA.preserve<MemorySSAAnalysis>();
953   return PA;
954 }
955 
956 PreservedAnalyses GuardWideningPass::run(Loop &L, LoopAnalysisManager &AM,
957                                          LoopStandardAnalysisResults &AR,
958                                          LPMUpdater &U) {
959   BasicBlock *RootBB = L.getLoopPredecessor();
960   if (!RootBB)
961     RootBB = L.getHeader();
962   auto BlockFilter = [&](BasicBlock *BB) {
963     return BB == RootBB || L.contains(BB);
964   };
965   std::unique_ptr<MemorySSAUpdater> MSSAU;
966   if (AR.MSSA)
967     MSSAU = std::make_unique<MemorySSAUpdater>(AR.MSSA);
968   if (!GuardWideningImpl(AR.DT, nullptr, AR.LI, AR.AC,
969                          MSSAU ? MSSAU.get() : nullptr, AR.DT.getNode(RootBB),
970                          BlockFilter)
971            .run())
972     return PreservedAnalyses::all();
973 
974   auto PA = getLoopPassPreservedAnalyses();
975   if (AR.MSSA)
976     PA.preserve<MemorySSAAnalysis>();
977   return PA;
978 }
979 
980 namespace {
981 struct GuardWideningLegacyPass : public FunctionPass {
982   static char ID;
983 
984   GuardWideningLegacyPass() : FunctionPass(ID) {
985     initializeGuardWideningLegacyPassPass(*PassRegistry::getPassRegistry());
986   }
987 
988   bool runOnFunction(Function &F) override {
989     if (skipFunction(F))
990       return false;
991     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
992     auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
993     auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
994     auto &PDT = getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
995     auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
996     std::unique_ptr<MemorySSAUpdater> MSSAU;
997     if (MSSAWP)
998       MSSAU = std::make_unique<MemorySSAUpdater>(&MSSAWP->getMSSA());
999     return GuardWideningImpl(DT, &PDT, LI, AC, MSSAU ? MSSAU.get() : nullptr,
1000                              DT.getRootNode(),
1001                              [](BasicBlock *) { return true; })
1002         .run();
1003   }
1004 
1005   void getAnalysisUsage(AnalysisUsage &AU) const override {
1006     AU.setPreservesCFG();
1007     AU.addRequired<DominatorTreeWrapperPass>();
1008     AU.addRequired<PostDominatorTreeWrapperPass>();
1009     AU.addRequired<LoopInfoWrapperPass>();
1010     AU.addPreserved<MemorySSAWrapperPass>();
1011   }
1012 };
1013 
1014 /// Same as above, but restricted to a single loop at a time.  Can be
1015 /// scheduled with other loop passes w/o breaking out of LPM
1016 struct LoopGuardWideningLegacyPass : public LoopPass {
1017   static char ID;
1018 
1019   LoopGuardWideningLegacyPass() : LoopPass(ID) {
1020     initializeLoopGuardWideningLegacyPassPass(*PassRegistry::getPassRegistry());
1021   }
1022 
1023   bool runOnLoop(Loop *L, LPPassManager &LPM) override {
1024     if (skipLoop(L))
1025       return false;
1026     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1027     auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1028     auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
1029         *L->getHeader()->getParent());
1030     auto *PDTWP = getAnalysisIfAvailable<PostDominatorTreeWrapperPass>();
1031     auto *PDT = PDTWP ? &PDTWP->getPostDomTree() : nullptr;
1032     auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
1033     std::unique_ptr<MemorySSAUpdater> MSSAU;
1034     if (MSSAWP)
1035       MSSAU = std::make_unique<MemorySSAUpdater>(&MSSAWP->getMSSA());
1036 
1037     BasicBlock *RootBB = L->getLoopPredecessor();
1038     if (!RootBB)
1039       RootBB = L->getHeader();
1040     auto BlockFilter = [&](BasicBlock *BB) {
1041       return BB == RootBB || L->contains(BB);
1042     };
1043     return GuardWideningImpl(DT, PDT, LI, AC, MSSAU ? MSSAU.get() : nullptr,
1044                              DT.getNode(RootBB), BlockFilter)
1045         .run();
1046   }
1047 
1048   void getAnalysisUsage(AnalysisUsage &AU) const override {
1049     AU.setPreservesCFG();
1050     getLoopAnalysisUsage(AU);
1051     AU.addPreserved<PostDominatorTreeWrapperPass>();
1052     AU.addPreserved<MemorySSAWrapperPass>();
1053   }
1054 };
1055 }
1056 
1057 char GuardWideningLegacyPass::ID = 0;
1058 char LoopGuardWideningLegacyPass::ID = 0;
1059 
1060 INITIALIZE_PASS_BEGIN(GuardWideningLegacyPass, "guard-widening", "Widen guards",
1061                       false, false)
1062 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1063 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
1064 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
1065 INITIALIZE_PASS_END(GuardWideningLegacyPass, "guard-widening", "Widen guards",
1066                     false, false)
1067 
1068 INITIALIZE_PASS_BEGIN(LoopGuardWideningLegacyPass, "loop-guard-widening",
1069                       "Widen guards (within a single loop, as a loop pass)",
1070                       false, false)
1071 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1072 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
1073 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
1074 INITIALIZE_PASS_END(LoopGuardWideningLegacyPass, "loop-guard-widening",
1075                     "Widen guards (within a single loop, as a loop pass)",
1076                     false, false)
1077 
1078 FunctionPass *llvm::createGuardWideningPass() {
1079   return new GuardWideningLegacyPass();
1080 }
1081 
1082 Pass *llvm::createLoopGuardWideningPass() {
1083   return new LoopGuardWideningLegacyPass();
1084 }
1085