1 //===--- ExprConstant.cpp - Expression Constant Evaluator -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the Expr constant evaluator. 10 // 11 // Constant expression evaluation produces four main results: 12 // 13 // * A success/failure flag indicating whether constant folding was successful. 14 // This is the 'bool' return value used by most of the code in this file. A 15 // 'false' return value indicates that constant folding has failed, and any 16 // appropriate diagnostic has already been produced. 17 // 18 // * An evaluated result, valid only if constant folding has not failed. 19 // 20 // * A flag indicating if evaluation encountered (unevaluated) side-effects. 21 // These arise in cases such as (sideEffect(), 0) and (sideEffect() || 1), 22 // where it is possible to determine the evaluated result regardless. 23 // 24 // * A set of notes indicating why the evaluation was not a constant expression 25 // (under the C++11 / C++1y rules only, at the moment), or, if folding failed 26 // too, why the expression could not be folded. 27 // 28 // If we are checking for a potential constant expression, failure to constant 29 // fold a potential constant sub-expression will be indicated by a 'false' 30 // return value (the expression could not be folded) and no diagnostic (the 31 // expression is not necessarily non-constant). 32 // 33 //===----------------------------------------------------------------------===// 34 35 #include "Interp/Context.h" 36 #include "Interp/Frame.h" 37 #include "Interp/State.h" 38 #include "clang/AST/APValue.h" 39 #include "clang/AST/ASTContext.h" 40 #include "clang/AST/ASTDiagnostic.h" 41 #include "clang/AST/ASTLambda.h" 42 #include "clang/AST/Attr.h" 43 #include "clang/AST/CXXInheritance.h" 44 #include "clang/AST/CharUnits.h" 45 #include "clang/AST/CurrentSourceLocExprScope.h" 46 #include "clang/AST/Expr.h" 47 #include "clang/AST/OSLog.h" 48 #include "clang/AST/OptionalDiagnostic.h" 49 #include "clang/AST/RecordLayout.h" 50 #include "clang/AST/StmtVisitor.h" 51 #include "clang/AST/TypeLoc.h" 52 #include "clang/Basic/Builtins.h" 53 #include "clang/Basic/FixedPoint.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "llvm/ADT/Optional.h" 56 #include "llvm/ADT/SmallBitVector.h" 57 #include "llvm/Support/SaveAndRestore.h" 58 #include "llvm/Support/raw_ostream.h" 59 #include <cstring> 60 #include <functional> 61 62 #define DEBUG_TYPE "exprconstant" 63 64 using namespace clang; 65 using llvm::APInt; 66 using llvm::APSInt; 67 using llvm::APFloat; 68 using llvm::Optional; 69 70 namespace { 71 struct LValue; 72 class CallStackFrame; 73 class EvalInfo; 74 75 using SourceLocExprScopeGuard = 76 CurrentSourceLocExprScope::SourceLocExprScopeGuard; 77 78 static QualType getType(APValue::LValueBase B) { 79 if (!B) return QualType(); 80 if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) { 81 // FIXME: It's unclear where we're supposed to take the type from, and 82 // this actually matters for arrays of unknown bound. Eg: 83 // 84 // extern int arr[]; void f() { extern int arr[3]; }; 85 // constexpr int *p = &arr[1]; // valid? 86 // 87 // For now, we take the array bound from the most recent declaration. 88 for (auto *Redecl = cast<ValueDecl>(D->getMostRecentDecl()); Redecl; 89 Redecl = cast_or_null<ValueDecl>(Redecl->getPreviousDecl())) { 90 QualType T = Redecl->getType(); 91 if (!T->isIncompleteArrayType()) 92 return T; 93 } 94 return D->getType(); 95 } 96 97 if (B.is<TypeInfoLValue>()) 98 return B.getTypeInfoType(); 99 100 if (B.is<DynamicAllocLValue>()) 101 return B.getDynamicAllocType(); 102 103 const Expr *Base = B.get<const Expr*>(); 104 105 // For a materialized temporary, the type of the temporary we materialized 106 // may not be the type of the expression. 107 if (const MaterializeTemporaryExpr *MTE = 108 dyn_cast<MaterializeTemporaryExpr>(Base)) { 109 SmallVector<const Expr *, 2> CommaLHSs; 110 SmallVector<SubobjectAdjustment, 2> Adjustments; 111 const Expr *Temp = MTE->getSubExpr(); 112 const Expr *Inner = Temp->skipRValueSubobjectAdjustments(CommaLHSs, 113 Adjustments); 114 // Keep any cv-qualifiers from the reference if we generated a temporary 115 // for it directly. Otherwise use the type after adjustment. 116 if (!Adjustments.empty()) 117 return Inner->getType(); 118 } 119 120 return Base->getType(); 121 } 122 123 /// Get an LValue path entry, which is known to not be an array index, as a 124 /// field declaration. 125 static const FieldDecl *getAsField(APValue::LValuePathEntry E) { 126 return dyn_cast_or_null<FieldDecl>(E.getAsBaseOrMember().getPointer()); 127 } 128 /// Get an LValue path entry, which is known to not be an array index, as a 129 /// base class declaration. 130 static const CXXRecordDecl *getAsBaseClass(APValue::LValuePathEntry E) { 131 return dyn_cast_or_null<CXXRecordDecl>(E.getAsBaseOrMember().getPointer()); 132 } 133 /// Determine whether this LValue path entry for a base class names a virtual 134 /// base class. 135 static bool isVirtualBaseClass(APValue::LValuePathEntry E) { 136 return E.getAsBaseOrMember().getInt(); 137 } 138 139 /// Given an expression, determine the type used to store the result of 140 /// evaluating that expression. 141 static QualType getStorageType(const ASTContext &Ctx, const Expr *E) { 142 if (E->isRValue()) 143 return E->getType(); 144 return Ctx.getLValueReferenceType(E->getType()); 145 } 146 147 /// Given a CallExpr, try to get the alloc_size attribute. May return null. 148 static const AllocSizeAttr *getAllocSizeAttr(const CallExpr *CE) { 149 const FunctionDecl *Callee = CE->getDirectCallee(); 150 return Callee ? Callee->getAttr<AllocSizeAttr>() : nullptr; 151 } 152 153 /// Attempts to unwrap a CallExpr (with an alloc_size attribute) from an Expr. 154 /// This will look through a single cast. 155 /// 156 /// Returns null if we couldn't unwrap a function with alloc_size. 157 static const CallExpr *tryUnwrapAllocSizeCall(const Expr *E) { 158 if (!E->getType()->isPointerType()) 159 return nullptr; 160 161 E = E->IgnoreParens(); 162 // If we're doing a variable assignment from e.g. malloc(N), there will 163 // probably be a cast of some kind. In exotic cases, we might also see a 164 // top-level ExprWithCleanups. Ignore them either way. 165 if (const auto *FE = dyn_cast<FullExpr>(E)) 166 E = FE->getSubExpr()->IgnoreParens(); 167 168 if (const auto *Cast = dyn_cast<CastExpr>(E)) 169 E = Cast->getSubExpr()->IgnoreParens(); 170 171 if (const auto *CE = dyn_cast<CallExpr>(E)) 172 return getAllocSizeAttr(CE) ? CE : nullptr; 173 return nullptr; 174 } 175 176 /// Determines whether or not the given Base contains a call to a function 177 /// with the alloc_size attribute. 178 static bool isBaseAnAllocSizeCall(APValue::LValueBase Base) { 179 const auto *E = Base.dyn_cast<const Expr *>(); 180 return E && E->getType()->isPointerType() && tryUnwrapAllocSizeCall(E); 181 } 182 183 /// The bound to claim that an array of unknown bound has. 184 /// The value in MostDerivedArraySize is undefined in this case. So, set it 185 /// to an arbitrary value that's likely to loudly break things if it's used. 186 static const uint64_t AssumedSizeForUnsizedArray = 187 std::numeric_limits<uint64_t>::max() / 2; 188 189 /// Determines if an LValue with the given LValueBase will have an unsized 190 /// array in its designator. 191 /// Find the path length and type of the most-derived subobject in the given 192 /// path, and find the size of the containing array, if any. 193 static unsigned 194 findMostDerivedSubobject(ASTContext &Ctx, APValue::LValueBase Base, 195 ArrayRef<APValue::LValuePathEntry> Path, 196 uint64_t &ArraySize, QualType &Type, bool &IsArray, 197 bool &FirstEntryIsUnsizedArray) { 198 // This only accepts LValueBases from APValues, and APValues don't support 199 // arrays that lack size info. 200 assert(!isBaseAnAllocSizeCall(Base) && 201 "Unsized arrays shouldn't appear here"); 202 unsigned MostDerivedLength = 0; 203 Type = getType(Base); 204 205 for (unsigned I = 0, N = Path.size(); I != N; ++I) { 206 if (Type->isArrayType()) { 207 const ArrayType *AT = Ctx.getAsArrayType(Type); 208 Type = AT->getElementType(); 209 MostDerivedLength = I + 1; 210 IsArray = true; 211 212 if (auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 213 ArraySize = CAT->getSize().getZExtValue(); 214 } else { 215 assert(I == 0 && "unexpected unsized array designator"); 216 FirstEntryIsUnsizedArray = true; 217 ArraySize = AssumedSizeForUnsizedArray; 218 } 219 } else if (Type->isAnyComplexType()) { 220 const ComplexType *CT = Type->castAs<ComplexType>(); 221 Type = CT->getElementType(); 222 ArraySize = 2; 223 MostDerivedLength = I + 1; 224 IsArray = true; 225 } else if (const FieldDecl *FD = getAsField(Path[I])) { 226 Type = FD->getType(); 227 ArraySize = 0; 228 MostDerivedLength = I + 1; 229 IsArray = false; 230 } else { 231 // Path[I] describes a base class. 232 ArraySize = 0; 233 IsArray = false; 234 } 235 } 236 return MostDerivedLength; 237 } 238 239 /// A path from a glvalue to a subobject of that glvalue. 240 struct SubobjectDesignator { 241 /// True if the subobject was named in a manner not supported by C++11. Such 242 /// lvalues can still be folded, but they are not core constant expressions 243 /// and we cannot perform lvalue-to-rvalue conversions on them. 244 unsigned Invalid : 1; 245 246 /// Is this a pointer one past the end of an object? 247 unsigned IsOnePastTheEnd : 1; 248 249 /// Indicator of whether the first entry is an unsized array. 250 unsigned FirstEntryIsAnUnsizedArray : 1; 251 252 /// Indicator of whether the most-derived object is an array element. 253 unsigned MostDerivedIsArrayElement : 1; 254 255 /// The length of the path to the most-derived object of which this is a 256 /// subobject. 257 unsigned MostDerivedPathLength : 28; 258 259 /// The size of the array of which the most-derived object is an element. 260 /// This will always be 0 if the most-derived object is not an array 261 /// element. 0 is not an indicator of whether or not the most-derived object 262 /// is an array, however, because 0-length arrays are allowed. 263 /// 264 /// If the current array is an unsized array, the value of this is 265 /// undefined. 266 uint64_t MostDerivedArraySize; 267 268 /// The type of the most derived object referred to by this address. 269 QualType MostDerivedType; 270 271 typedef APValue::LValuePathEntry PathEntry; 272 273 /// The entries on the path from the glvalue to the designated subobject. 274 SmallVector<PathEntry, 8> Entries; 275 276 SubobjectDesignator() : Invalid(true) {} 277 278 explicit SubobjectDesignator(QualType T) 279 : Invalid(false), IsOnePastTheEnd(false), 280 FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false), 281 MostDerivedPathLength(0), MostDerivedArraySize(0), 282 MostDerivedType(T) {} 283 284 SubobjectDesignator(ASTContext &Ctx, const APValue &V) 285 : Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false), 286 FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false), 287 MostDerivedPathLength(0), MostDerivedArraySize(0) { 288 assert(V.isLValue() && "Non-LValue used to make an LValue designator?"); 289 if (!Invalid) { 290 IsOnePastTheEnd = V.isLValueOnePastTheEnd(); 291 ArrayRef<PathEntry> VEntries = V.getLValuePath(); 292 Entries.insert(Entries.end(), VEntries.begin(), VEntries.end()); 293 if (V.getLValueBase()) { 294 bool IsArray = false; 295 bool FirstIsUnsizedArray = false; 296 MostDerivedPathLength = findMostDerivedSubobject( 297 Ctx, V.getLValueBase(), V.getLValuePath(), MostDerivedArraySize, 298 MostDerivedType, IsArray, FirstIsUnsizedArray); 299 MostDerivedIsArrayElement = IsArray; 300 FirstEntryIsAnUnsizedArray = FirstIsUnsizedArray; 301 } 302 } 303 } 304 305 void truncate(ASTContext &Ctx, APValue::LValueBase Base, 306 unsigned NewLength) { 307 if (Invalid) 308 return; 309 310 assert(Base && "cannot truncate path for null pointer"); 311 assert(NewLength <= Entries.size() && "not a truncation"); 312 313 if (NewLength == Entries.size()) 314 return; 315 Entries.resize(NewLength); 316 317 bool IsArray = false; 318 bool FirstIsUnsizedArray = false; 319 MostDerivedPathLength = findMostDerivedSubobject( 320 Ctx, Base, Entries, MostDerivedArraySize, MostDerivedType, IsArray, 321 FirstIsUnsizedArray); 322 MostDerivedIsArrayElement = IsArray; 323 FirstEntryIsAnUnsizedArray = FirstIsUnsizedArray; 324 } 325 326 void setInvalid() { 327 Invalid = true; 328 Entries.clear(); 329 } 330 331 /// Determine whether the most derived subobject is an array without a 332 /// known bound. 333 bool isMostDerivedAnUnsizedArray() const { 334 assert(!Invalid && "Calling this makes no sense on invalid designators"); 335 return Entries.size() == 1 && FirstEntryIsAnUnsizedArray; 336 } 337 338 /// Determine what the most derived array's size is. Results in an assertion 339 /// failure if the most derived array lacks a size. 340 uint64_t getMostDerivedArraySize() const { 341 assert(!isMostDerivedAnUnsizedArray() && "Unsized array has no size"); 342 return MostDerivedArraySize; 343 } 344 345 /// Determine whether this is a one-past-the-end pointer. 346 bool isOnePastTheEnd() const { 347 assert(!Invalid); 348 if (IsOnePastTheEnd) 349 return true; 350 if (!isMostDerivedAnUnsizedArray() && MostDerivedIsArrayElement && 351 Entries[MostDerivedPathLength - 1].getAsArrayIndex() == 352 MostDerivedArraySize) 353 return true; 354 return false; 355 } 356 357 /// Get the range of valid index adjustments in the form 358 /// {maximum value that can be subtracted from this pointer, 359 /// maximum value that can be added to this pointer} 360 std::pair<uint64_t, uint64_t> validIndexAdjustments() { 361 if (Invalid || isMostDerivedAnUnsizedArray()) 362 return {0, 0}; 363 364 // [expr.add]p4: For the purposes of these operators, a pointer to a 365 // nonarray object behaves the same as a pointer to the first element of 366 // an array of length one with the type of the object as its element type. 367 bool IsArray = MostDerivedPathLength == Entries.size() && 368 MostDerivedIsArrayElement; 369 uint64_t ArrayIndex = IsArray ? Entries.back().getAsArrayIndex() 370 : (uint64_t)IsOnePastTheEnd; 371 uint64_t ArraySize = 372 IsArray ? getMostDerivedArraySize() : (uint64_t)1; 373 return {ArrayIndex, ArraySize - ArrayIndex}; 374 } 375 376 /// Check that this refers to a valid subobject. 377 bool isValidSubobject() const { 378 if (Invalid) 379 return false; 380 return !isOnePastTheEnd(); 381 } 382 /// Check that this refers to a valid subobject, and if not, produce a 383 /// relevant diagnostic and set the designator as invalid. 384 bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK); 385 386 /// Get the type of the designated object. 387 QualType getType(ASTContext &Ctx) const { 388 assert(!Invalid && "invalid designator has no subobject type"); 389 return MostDerivedPathLength == Entries.size() 390 ? MostDerivedType 391 : Ctx.getRecordType(getAsBaseClass(Entries.back())); 392 } 393 394 /// Update this designator to refer to the first element within this array. 395 void addArrayUnchecked(const ConstantArrayType *CAT) { 396 Entries.push_back(PathEntry::ArrayIndex(0)); 397 398 // This is a most-derived object. 399 MostDerivedType = CAT->getElementType(); 400 MostDerivedIsArrayElement = true; 401 MostDerivedArraySize = CAT->getSize().getZExtValue(); 402 MostDerivedPathLength = Entries.size(); 403 } 404 /// Update this designator to refer to the first element within the array of 405 /// elements of type T. This is an array of unknown size. 406 void addUnsizedArrayUnchecked(QualType ElemTy) { 407 Entries.push_back(PathEntry::ArrayIndex(0)); 408 409 MostDerivedType = ElemTy; 410 MostDerivedIsArrayElement = true; 411 // The value in MostDerivedArraySize is undefined in this case. So, set it 412 // to an arbitrary value that's likely to loudly break things if it's 413 // used. 414 MostDerivedArraySize = AssumedSizeForUnsizedArray; 415 MostDerivedPathLength = Entries.size(); 416 } 417 /// Update this designator to refer to the given base or member of this 418 /// object. 419 void addDeclUnchecked(const Decl *D, bool Virtual = false) { 420 Entries.push_back(APValue::BaseOrMemberType(D, Virtual)); 421 422 // If this isn't a base class, it's a new most-derived object. 423 if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) { 424 MostDerivedType = FD->getType(); 425 MostDerivedIsArrayElement = false; 426 MostDerivedArraySize = 0; 427 MostDerivedPathLength = Entries.size(); 428 } 429 } 430 /// Update this designator to refer to the given complex component. 431 void addComplexUnchecked(QualType EltTy, bool Imag) { 432 Entries.push_back(PathEntry::ArrayIndex(Imag)); 433 434 // This is technically a most-derived object, though in practice this 435 // is unlikely to matter. 436 MostDerivedType = EltTy; 437 MostDerivedIsArrayElement = true; 438 MostDerivedArraySize = 2; 439 MostDerivedPathLength = Entries.size(); 440 } 441 void diagnoseUnsizedArrayPointerArithmetic(EvalInfo &Info, const Expr *E); 442 void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E, 443 const APSInt &N); 444 /// Add N to the address of this subobject. 445 void adjustIndex(EvalInfo &Info, const Expr *E, APSInt N) { 446 if (Invalid || !N) return; 447 uint64_t TruncatedN = N.extOrTrunc(64).getZExtValue(); 448 if (isMostDerivedAnUnsizedArray()) { 449 diagnoseUnsizedArrayPointerArithmetic(Info, E); 450 // Can't verify -- trust that the user is doing the right thing (or if 451 // not, trust that the caller will catch the bad behavior). 452 // FIXME: Should we reject if this overflows, at least? 453 Entries.back() = PathEntry::ArrayIndex( 454 Entries.back().getAsArrayIndex() + TruncatedN); 455 return; 456 } 457 458 // [expr.add]p4: For the purposes of these operators, a pointer to a 459 // nonarray object behaves the same as a pointer to the first element of 460 // an array of length one with the type of the object as its element type. 461 bool IsArray = MostDerivedPathLength == Entries.size() && 462 MostDerivedIsArrayElement; 463 uint64_t ArrayIndex = IsArray ? Entries.back().getAsArrayIndex() 464 : (uint64_t)IsOnePastTheEnd; 465 uint64_t ArraySize = 466 IsArray ? getMostDerivedArraySize() : (uint64_t)1; 467 468 if (N < -(int64_t)ArrayIndex || N > ArraySize - ArrayIndex) { 469 // Calculate the actual index in a wide enough type, so we can include 470 // it in the note. 471 N = N.extend(std::max<unsigned>(N.getBitWidth() + 1, 65)); 472 (llvm::APInt&)N += ArrayIndex; 473 assert(N.ugt(ArraySize) && "bounds check failed for in-bounds index"); 474 diagnosePointerArithmetic(Info, E, N); 475 setInvalid(); 476 return; 477 } 478 479 ArrayIndex += TruncatedN; 480 assert(ArrayIndex <= ArraySize && 481 "bounds check succeeded for out-of-bounds index"); 482 483 if (IsArray) 484 Entries.back() = PathEntry::ArrayIndex(ArrayIndex); 485 else 486 IsOnePastTheEnd = (ArrayIndex != 0); 487 } 488 }; 489 490 /// A stack frame in the constexpr call stack. 491 class CallStackFrame : public interp::Frame { 492 public: 493 EvalInfo &Info; 494 495 /// Parent - The caller of this stack frame. 496 CallStackFrame *Caller; 497 498 /// Callee - The function which was called. 499 const FunctionDecl *Callee; 500 501 /// This - The binding for the this pointer in this call, if any. 502 const LValue *This; 503 504 /// Arguments - Parameter bindings for this function call, indexed by 505 /// parameters' function scope indices. 506 APValue *Arguments; 507 508 /// Source location information about the default argument or default 509 /// initializer expression we're evaluating, if any. 510 CurrentSourceLocExprScope CurSourceLocExprScope; 511 512 // Note that we intentionally use std::map here so that references to 513 // values are stable. 514 typedef std::pair<const void *, unsigned> MapKeyTy; 515 typedef std::map<MapKeyTy, APValue> MapTy; 516 /// Temporaries - Temporary lvalues materialized within this stack frame. 517 MapTy Temporaries; 518 519 /// CallLoc - The location of the call expression for this call. 520 SourceLocation CallLoc; 521 522 /// Index - The call index of this call. 523 unsigned Index; 524 525 /// The stack of integers for tracking version numbers for temporaries. 526 SmallVector<unsigned, 2> TempVersionStack = {1}; 527 unsigned CurTempVersion = TempVersionStack.back(); 528 529 unsigned getTempVersion() const { return TempVersionStack.back(); } 530 531 void pushTempVersion() { 532 TempVersionStack.push_back(++CurTempVersion); 533 } 534 535 void popTempVersion() { 536 TempVersionStack.pop_back(); 537 } 538 539 // FIXME: Adding this to every 'CallStackFrame' may have a nontrivial impact 540 // on the overall stack usage of deeply-recursing constexpr evaluations. 541 // (We should cache this map rather than recomputing it repeatedly.) 542 // But let's try this and see how it goes; we can look into caching the map 543 // as a later change. 544 545 /// LambdaCaptureFields - Mapping from captured variables/this to 546 /// corresponding data members in the closure class. 547 llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields; 548 FieldDecl *LambdaThisCaptureField; 549 550 CallStackFrame(EvalInfo &Info, SourceLocation CallLoc, 551 const FunctionDecl *Callee, const LValue *This, 552 APValue *Arguments); 553 ~CallStackFrame(); 554 555 // Return the temporary for Key whose version number is Version. 556 APValue *getTemporary(const void *Key, unsigned Version) { 557 MapKeyTy KV(Key, Version); 558 auto LB = Temporaries.lower_bound(KV); 559 if (LB != Temporaries.end() && LB->first == KV) 560 return &LB->second; 561 // Pair (Key,Version) wasn't found in the map. Check that no elements 562 // in the map have 'Key' as their key. 563 assert((LB == Temporaries.end() || LB->first.first != Key) && 564 (LB == Temporaries.begin() || std::prev(LB)->first.first != Key) && 565 "Element with key 'Key' found in map"); 566 return nullptr; 567 } 568 569 // Return the current temporary for Key in the map. 570 APValue *getCurrentTemporary(const void *Key) { 571 auto UB = Temporaries.upper_bound(MapKeyTy(Key, UINT_MAX)); 572 if (UB != Temporaries.begin() && std::prev(UB)->first.first == Key) 573 return &std::prev(UB)->second; 574 return nullptr; 575 } 576 577 // Return the version number of the current temporary for Key. 578 unsigned getCurrentTemporaryVersion(const void *Key) const { 579 auto UB = Temporaries.upper_bound(MapKeyTy(Key, UINT_MAX)); 580 if (UB != Temporaries.begin() && std::prev(UB)->first.first == Key) 581 return std::prev(UB)->first.second; 582 return 0; 583 } 584 585 /// Allocate storage for an object of type T in this stack frame. 586 /// Populates LV with a handle to the created object. Key identifies 587 /// the temporary within the stack frame, and must not be reused without 588 /// bumping the temporary version number. 589 template<typename KeyT> 590 APValue &createTemporary(const KeyT *Key, QualType T, 591 bool IsLifetimeExtended, LValue &LV); 592 593 void describe(llvm::raw_ostream &OS) override; 594 595 Frame *getCaller() const override { return Caller; } 596 SourceLocation getCallLocation() const override { return CallLoc; } 597 const FunctionDecl *getCallee() const override { return Callee; } 598 599 bool isStdFunction() const { 600 for (const DeclContext *DC = Callee; DC; DC = DC->getParent()) 601 if (DC->isStdNamespace()) 602 return true; 603 return false; 604 } 605 }; 606 607 /// Temporarily override 'this'. 608 class ThisOverrideRAII { 609 public: 610 ThisOverrideRAII(CallStackFrame &Frame, const LValue *NewThis, bool Enable) 611 : Frame(Frame), OldThis(Frame.This) { 612 if (Enable) 613 Frame.This = NewThis; 614 } 615 ~ThisOverrideRAII() { 616 Frame.This = OldThis; 617 } 618 private: 619 CallStackFrame &Frame; 620 const LValue *OldThis; 621 }; 622 } 623 624 static bool HandleDestruction(EvalInfo &Info, const Expr *E, 625 const LValue &This, QualType ThisType); 626 static bool HandleDestruction(EvalInfo &Info, SourceLocation Loc, 627 APValue::LValueBase LVBase, APValue &Value, 628 QualType T); 629 630 namespace { 631 /// A cleanup, and a flag indicating whether it is lifetime-extended. 632 class Cleanup { 633 llvm::PointerIntPair<APValue*, 1, bool> Value; 634 APValue::LValueBase Base; 635 QualType T; 636 637 public: 638 Cleanup(APValue *Val, APValue::LValueBase Base, QualType T, 639 bool IsLifetimeExtended) 640 : Value(Val, IsLifetimeExtended), Base(Base), T(T) {} 641 642 bool isLifetimeExtended() const { return Value.getInt(); } 643 bool endLifetime(EvalInfo &Info, bool RunDestructors) { 644 if (RunDestructors) { 645 SourceLocation Loc; 646 if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) 647 Loc = VD->getLocation(); 648 else if (const Expr *E = Base.dyn_cast<const Expr*>()) 649 Loc = E->getExprLoc(); 650 return HandleDestruction(Info, Loc, Base, *Value.getPointer(), T); 651 } 652 *Value.getPointer() = APValue(); 653 return true; 654 } 655 656 bool hasSideEffect() { 657 return T.isDestructedType(); 658 } 659 }; 660 661 /// A reference to an object whose construction we are currently evaluating. 662 struct ObjectUnderConstruction { 663 APValue::LValueBase Base; 664 ArrayRef<APValue::LValuePathEntry> Path; 665 friend bool operator==(const ObjectUnderConstruction &LHS, 666 const ObjectUnderConstruction &RHS) { 667 return LHS.Base == RHS.Base && LHS.Path == RHS.Path; 668 } 669 friend llvm::hash_code hash_value(const ObjectUnderConstruction &Obj) { 670 return llvm::hash_combine(Obj.Base, Obj.Path); 671 } 672 }; 673 enum class ConstructionPhase { 674 None, 675 Bases, 676 AfterBases, 677 Destroying, 678 DestroyingBases 679 }; 680 } 681 682 namespace llvm { 683 template<> struct DenseMapInfo<ObjectUnderConstruction> { 684 using Base = DenseMapInfo<APValue::LValueBase>; 685 static ObjectUnderConstruction getEmptyKey() { 686 return {Base::getEmptyKey(), {}}; } 687 static ObjectUnderConstruction getTombstoneKey() { 688 return {Base::getTombstoneKey(), {}}; 689 } 690 static unsigned getHashValue(const ObjectUnderConstruction &Object) { 691 return hash_value(Object); 692 } 693 static bool isEqual(const ObjectUnderConstruction &LHS, 694 const ObjectUnderConstruction &RHS) { 695 return LHS == RHS; 696 } 697 }; 698 } 699 700 namespace { 701 /// A dynamically-allocated heap object. 702 struct DynAlloc { 703 /// The value of this heap-allocated object. 704 APValue Value; 705 /// The allocating expression; used for diagnostics. Either a CXXNewExpr 706 /// or a CallExpr (the latter is for direct calls to operator new inside 707 /// std::allocator<T>::allocate). 708 const Expr *AllocExpr = nullptr; 709 710 enum Kind { 711 New, 712 ArrayNew, 713 StdAllocator 714 }; 715 716 /// Get the kind of the allocation. This must match between allocation 717 /// and deallocation. 718 Kind getKind() const { 719 if (auto *NE = dyn_cast<CXXNewExpr>(AllocExpr)) 720 return NE->isArray() ? ArrayNew : New; 721 assert(isa<CallExpr>(AllocExpr)); 722 return StdAllocator; 723 } 724 }; 725 726 struct DynAllocOrder { 727 bool operator()(DynamicAllocLValue L, DynamicAllocLValue R) const { 728 return L.getIndex() < R.getIndex(); 729 } 730 }; 731 732 /// EvalInfo - This is a private struct used by the evaluator to capture 733 /// information about a subexpression as it is folded. It retains information 734 /// about the AST context, but also maintains information about the folded 735 /// expression. 736 /// 737 /// If an expression could be evaluated, it is still possible it is not a C 738 /// "integer constant expression" or constant expression. If not, this struct 739 /// captures information about how and why not. 740 /// 741 /// One bit of information passed *into* the request for constant folding 742 /// indicates whether the subexpression is "evaluated" or not according to C 743 /// rules. For example, the RHS of (0 && foo()) is not evaluated. We can 744 /// evaluate the expression regardless of what the RHS is, but C only allows 745 /// certain things in certain situations. 746 class EvalInfo : public interp::State { 747 public: 748 ASTContext &Ctx; 749 750 /// EvalStatus - Contains information about the evaluation. 751 Expr::EvalStatus &EvalStatus; 752 753 /// CurrentCall - The top of the constexpr call stack. 754 CallStackFrame *CurrentCall; 755 756 /// CallStackDepth - The number of calls in the call stack right now. 757 unsigned CallStackDepth; 758 759 /// NextCallIndex - The next call index to assign. 760 unsigned NextCallIndex; 761 762 /// StepsLeft - The remaining number of evaluation steps we're permitted 763 /// to perform. This is essentially a limit for the number of statements 764 /// we will evaluate. 765 unsigned StepsLeft; 766 767 /// Enable the experimental new constant interpreter. If an expression is 768 /// not supported by the interpreter, an error is triggered. 769 bool EnableNewConstInterp; 770 771 /// BottomFrame - The frame in which evaluation started. This must be 772 /// initialized after CurrentCall and CallStackDepth. 773 CallStackFrame BottomFrame; 774 775 /// A stack of values whose lifetimes end at the end of some surrounding 776 /// evaluation frame. 777 llvm::SmallVector<Cleanup, 16> CleanupStack; 778 779 /// EvaluatingDecl - This is the declaration whose initializer is being 780 /// evaluated, if any. 781 APValue::LValueBase EvaluatingDecl; 782 783 enum class EvaluatingDeclKind { 784 None, 785 /// We're evaluating the construction of EvaluatingDecl. 786 Ctor, 787 /// We're evaluating the destruction of EvaluatingDecl. 788 Dtor, 789 }; 790 EvaluatingDeclKind IsEvaluatingDecl = EvaluatingDeclKind::None; 791 792 /// EvaluatingDeclValue - This is the value being constructed for the 793 /// declaration whose initializer is being evaluated, if any. 794 APValue *EvaluatingDeclValue; 795 796 /// Set of objects that are currently being constructed. 797 llvm::DenseMap<ObjectUnderConstruction, ConstructionPhase> 798 ObjectsUnderConstruction; 799 800 /// Current heap allocations, along with the location where each was 801 /// allocated. We use std::map here because we need stable addresses 802 /// for the stored APValues. 803 std::map<DynamicAllocLValue, DynAlloc, DynAllocOrder> HeapAllocs; 804 805 /// The number of heap allocations performed so far in this evaluation. 806 unsigned NumHeapAllocs = 0; 807 808 struct EvaluatingConstructorRAII { 809 EvalInfo &EI; 810 ObjectUnderConstruction Object; 811 bool DidInsert; 812 EvaluatingConstructorRAII(EvalInfo &EI, ObjectUnderConstruction Object, 813 bool HasBases) 814 : EI(EI), Object(Object) { 815 DidInsert = 816 EI.ObjectsUnderConstruction 817 .insert({Object, HasBases ? ConstructionPhase::Bases 818 : ConstructionPhase::AfterBases}) 819 .second; 820 } 821 void finishedConstructingBases() { 822 EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterBases; 823 } 824 ~EvaluatingConstructorRAII() { 825 if (DidInsert) EI.ObjectsUnderConstruction.erase(Object); 826 } 827 }; 828 829 struct EvaluatingDestructorRAII { 830 EvalInfo &EI; 831 ObjectUnderConstruction Object; 832 bool DidInsert; 833 EvaluatingDestructorRAII(EvalInfo &EI, ObjectUnderConstruction Object) 834 : EI(EI), Object(Object) { 835 DidInsert = EI.ObjectsUnderConstruction 836 .insert({Object, ConstructionPhase::Destroying}) 837 .second; 838 } 839 void startedDestroyingBases() { 840 EI.ObjectsUnderConstruction[Object] = 841 ConstructionPhase::DestroyingBases; 842 } 843 ~EvaluatingDestructorRAII() { 844 if (DidInsert) 845 EI.ObjectsUnderConstruction.erase(Object); 846 } 847 }; 848 849 ConstructionPhase 850 isEvaluatingCtorDtor(APValue::LValueBase Base, 851 ArrayRef<APValue::LValuePathEntry> Path) { 852 return ObjectsUnderConstruction.lookup({Base, Path}); 853 } 854 855 /// If we're currently speculatively evaluating, the outermost call stack 856 /// depth at which we can mutate state, otherwise 0. 857 unsigned SpeculativeEvaluationDepth = 0; 858 859 /// The current array initialization index, if we're performing array 860 /// initialization. 861 uint64_t ArrayInitIndex = -1; 862 863 /// HasActiveDiagnostic - Was the previous diagnostic stored? If so, further 864 /// notes attached to it will also be stored, otherwise they will not be. 865 bool HasActiveDiagnostic; 866 867 /// Have we emitted a diagnostic explaining why we couldn't constant 868 /// fold (not just why it's not strictly a constant expression)? 869 bool HasFoldFailureDiagnostic; 870 871 /// Whether or not we're in a context where the front end requires a 872 /// constant value. 873 bool InConstantContext; 874 875 /// Whether we're checking that an expression is a potential constant 876 /// expression. If so, do not fail on constructs that could become constant 877 /// later on (such as a use of an undefined global). 878 bool CheckingPotentialConstantExpression = false; 879 880 /// Whether we're checking for an expression that has undefined behavior. 881 /// If so, we will produce warnings if we encounter an operation that is 882 /// always undefined. 883 bool CheckingForUndefinedBehavior = false; 884 885 enum EvaluationMode { 886 /// Evaluate as a constant expression. Stop if we find that the expression 887 /// is not a constant expression. 888 EM_ConstantExpression, 889 890 /// Evaluate as a constant expression. Stop if we find that the expression 891 /// is not a constant expression. Some expressions can be retried in the 892 /// optimizer if we don't constant fold them here, but in an unevaluated 893 /// context we try to fold them immediately since the optimizer never 894 /// gets a chance to look at it. 895 EM_ConstantExpressionUnevaluated, 896 897 /// Fold the expression to a constant. Stop if we hit a side-effect that 898 /// we can't model. 899 EM_ConstantFold, 900 901 /// Evaluate in any way we know how. Don't worry about side-effects that 902 /// can't be modeled. 903 EM_IgnoreSideEffects, 904 } EvalMode; 905 906 /// Are we checking whether the expression is a potential constant 907 /// expression? 908 bool checkingPotentialConstantExpression() const override { 909 return CheckingPotentialConstantExpression; 910 } 911 912 /// Are we checking an expression for overflow? 913 // FIXME: We should check for any kind of undefined or suspicious behavior 914 // in such constructs, not just overflow. 915 bool checkingForUndefinedBehavior() const override { 916 return CheckingForUndefinedBehavior; 917 } 918 919 EvalInfo(const ASTContext &C, Expr::EvalStatus &S, EvaluationMode Mode) 920 : Ctx(const_cast<ASTContext &>(C)), EvalStatus(S), CurrentCall(nullptr), 921 CallStackDepth(0), NextCallIndex(1), 922 StepsLeft(C.getLangOpts().ConstexprStepLimit), 923 EnableNewConstInterp(C.getLangOpts().EnableNewConstInterp), 924 BottomFrame(*this, SourceLocation(), nullptr, nullptr, nullptr), 925 EvaluatingDecl((const ValueDecl *)nullptr), 926 EvaluatingDeclValue(nullptr), HasActiveDiagnostic(false), 927 HasFoldFailureDiagnostic(false), InConstantContext(false), 928 EvalMode(Mode) {} 929 930 ~EvalInfo() { 931 discardCleanups(); 932 } 933 934 void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value, 935 EvaluatingDeclKind EDK = EvaluatingDeclKind::Ctor) { 936 EvaluatingDecl = Base; 937 IsEvaluatingDecl = EDK; 938 EvaluatingDeclValue = &Value; 939 } 940 941 bool CheckCallLimit(SourceLocation Loc) { 942 // Don't perform any constexpr calls (other than the call we're checking) 943 // when checking a potential constant expression. 944 if (checkingPotentialConstantExpression() && CallStackDepth > 1) 945 return false; 946 if (NextCallIndex == 0) { 947 // NextCallIndex has wrapped around. 948 FFDiag(Loc, diag::note_constexpr_call_limit_exceeded); 949 return false; 950 } 951 if (CallStackDepth <= getLangOpts().ConstexprCallDepth) 952 return true; 953 FFDiag(Loc, diag::note_constexpr_depth_limit_exceeded) 954 << getLangOpts().ConstexprCallDepth; 955 return false; 956 } 957 958 std::pair<CallStackFrame *, unsigned> 959 getCallFrameAndDepth(unsigned CallIndex) { 960 assert(CallIndex && "no call index in getCallFrameAndDepth"); 961 // We will eventually hit BottomFrame, which has Index 1, so Frame can't 962 // be null in this loop. 963 unsigned Depth = CallStackDepth; 964 CallStackFrame *Frame = CurrentCall; 965 while (Frame->Index > CallIndex) { 966 Frame = Frame->Caller; 967 --Depth; 968 } 969 if (Frame->Index == CallIndex) 970 return {Frame, Depth}; 971 return {nullptr, 0}; 972 } 973 974 bool nextStep(const Stmt *S) { 975 if (!StepsLeft) { 976 FFDiag(S->getBeginLoc(), diag::note_constexpr_step_limit_exceeded); 977 return false; 978 } 979 --StepsLeft; 980 return true; 981 } 982 983 APValue *createHeapAlloc(const Expr *E, QualType T, LValue &LV); 984 985 Optional<DynAlloc*> lookupDynamicAlloc(DynamicAllocLValue DA) { 986 Optional<DynAlloc*> Result; 987 auto It = HeapAllocs.find(DA); 988 if (It != HeapAllocs.end()) 989 Result = &It->second; 990 return Result; 991 } 992 993 /// Information about a stack frame for std::allocator<T>::[de]allocate. 994 struct StdAllocatorCaller { 995 unsigned FrameIndex; 996 QualType ElemType; 997 explicit operator bool() const { return FrameIndex != 0; }; 998 }; 999 1000 StdAllocatorCaller getStdAllocatorCaller(StringRef FnName) const { 1001 for (const CallStackFrame *Call = CurrentCall; Call != &BottomFrame; 1002 Call = Call->Caller) { 1003 const auto *MD = dyn_cast_or_null<CXXMethodDecl>(Call->Callee); 1004 if (!MD) 1005 continue; 1006 const IdentifierInfo *FnII = MD->getIdentifier(); 1007 if (!FnII || !FnII->isStr(FnName)) 1008 continue; 1009 1010 const auto *CTSD = 1011 dyn_cast<ClassTemplateSpecializationDecl>(MD->getParent()); 1012 if (!CTSD) 1013 continue; 1014 1015 const IdentifierInfo *ClassII = CTSD->getIdentifier(); 1016 const TemplateArgumentList &TAL = CTSD->getTemplateArgs(); 1017 if (CTSD->isInStdNamespace() && ClassII && 1018 ClassII->isStr("allocator") && TAL.size() >= 1 && 1019 TAL[0].getKind() == TemplateArgument::Type) 1020 return {Call->Index, TAL[0].getAsType()}; 1021 } 1022 1023 return {}; 1024 } 1025 1026 void performLifetimeExtension() { 1027 // Disable the cleanups for lifetime-extended temporaries. 1028 CleanupStack.erase( 1029 std::remove_if(CleanupStack.begin(), CleanupStack.end(), 1030 [](Cleanup &C) { return C.isLifetimeExtended(); }), 1031 CleanupStack.end()); 1032 } 1033 1034 /// Throw away any remaining cleanups at the end of evaluation. If any 1035 /// cleanups would have had a side-effect, note that as an unmodeled 1036 /// side-effect and return false. Otherwise, return true. 1037 bool discardCleanups() { 1038 for (Cleanup &C : CleanupStack) { 1039 if (C.hasSideEffect() && !noteSideEffect()) { 1040 CleanupStack.clear(); 1041 return false; 1042 } 1043 } 1044 CleanupStack.clear(); 1045 return true; 1046 } 1047 1048 private: 1049 interp::Frame *getCurrentFrame() override { return CurrentCall; } 1050 const interp::Frame *getBottomFrame() const override { return &BottomFrame; } 1051 1052 bool hasActiveDiagnostic() override { return HasActiveDiagnostic; } 1053 void setActiveDiagnostic(bool Flag) override { HasActiveDiagnostic = Flag; } 1054 1055 void setFoldFailureDiagnostic(bool Flag) override { 1056 HasFoldFailureDiagnostic = Flag; 1057 } 1058 1059 Expr::EvalStatus &getEvalStatus() const override { return EvalStatus; } 1060 1061 ASTContext &getCtx() const override { return Ctx; } 1062 1063 // If we have a prior diagnostic, it will be noting that the expression 1064 // isn't a constant expression. This diagnostic is more important, 1065 // unless we require this evaluation to produce a constant expression. 1066 // 1067 // FIXME: We might want to show both diagnostics to the user in 1068 // EM_ConstantFold mode. 1069 bool hasPriorDiagnostic() override { 1070 if (!EvalStatus.Diag->empty()) { 1071 switch (EvalMode) { 1072 case EM_ConstantFold: 1073 case EM_IgnoreSideEffects: 1074 if (!HasFoldFailureDiagnostic) 1075 break; 1076 // We've already failed to fold something. Keep that diagnostic. 1077 LLVM_FALLTHROUGH; 1078 case EM_ConstantExpression: 1079 case EM_ConstantExpressionUnevaluated: 1080 setActiveDiagnostic(false); 1081 return true; 1082 } 1083 } 1084 return false; 1085 } 1086 1087 unsigned getCallStackDepth() override { return CallStackDepth; } 1088 1089 public: 1090 /// Should we continue evaluation after encountering a side-effect that we 1091 /// couldn't model? 1092 bool keepEvaluatingAfterSideEffect() { 1093 switch (EvalMode) { 1094 case EM_IgnoreSideEffects: 1095 return true; 1096 1097 case EM_ConstantExpression: 1098 case EM_ConstantExpressionUnevaluated: 1099 case EM_ConstantFold: 1100 // By default, assume any side effect might be valid in some other 1101 // evaluation of this expression from a different context. 1102 return checkingPotentialConstantExpression() || 1103 checkingForUndefinedBehavior(); 1104 } 1105 llvm_unreachable("Missed EvalMode case"); 1106 } 1107 1108 /// Note that we have had a side-effect, and determine whether we should 1109 /// keep evaluating. 1110 bool noteSideEffect() { 1111 EvalStatus.HasSideEffects = true; 1112 return keepEvaluatingAfterSideEffect(); 1113 } 1114 1115 /// Should we continue evaluation after encountering undefined behavior? 1116 bool keepEvaluatingAfterUndefinedBehavior() { 1117 switch (EvalMode) { 1118 case EM_IgnoreSideEffects: 1119 case EM_ConstantFold: 1120 return true; 1121 1122 case EM_ConstantExpression: 1123 case EM_ConstantExpressionUnevaluated: 1124 return checkingForUndefinedBehavior(); 1125 } 1126 llvm_unreachable("Missed EvalMode case"); 1127 } 1128 1129 /// Note that we hit something that was technically undefined behavior, but 1130 /// that we can evaluate past it (such as signed overflow or floating-point 1131 /// division by zero.) 1132 bool noteUndefinedBehavior() override { 1133 EvalStatus.HasUndefinedBehavior = true; 1134 return keepEvaluatingAfterUndefinedBehavior(); 1135 } 1136 1137 /// Should we continue evaluation as much as possible after encountering a 1138 /// construct which can't be reduced to a value? 1139 bool keepEvaluatingAfterFailure() const override { 1140 if (!StepsLeft) 1141 return false; 1142 1143 switch (EvalMode) { 1144 case EM_ConstantExpression: 1145 case EM_ConstantExpressionUnevaluated: 1146 case EM_ConstantFold: 1147 case EM_IgnoreSideEffects: 1148 return checkingPotentialConstantExpression() || 1149 checkingForUndefinedBehavior(); 1150 } 1151 llvm_unreachable("Missed EvalMode case"); 1152 } 1153 1154 /// Notes that we failed to evaluate an expression that other expressions 1155 /// directly depend on, and determine if we should keep evaluating. This 1156 /// should only be called if we actually intend to keep evaluating. 1157 /// 1158 /// Call noteSideEffect() instead if we may be able to ignore the value that 1159 /// we failed to evaluate, e.g. if we failed to evaluate Foo() in: 1160 /// 1161 /// (Foo(), 1) // use noteSideEffect 1162 /// (Foo() || true) // use noteSideEffect 1163 /// Foo() + 1 // use noteFailure 1164 LLVM_NODISCARD bool noteFailure() { 1165 // Failure when evaluating some expression often means there is some 1166 // subexpression whose evaluation was skipped. Therefore, (because we 1167 // don't track whether we skipped an expression when unwinding after an 1168 // evaluation failure) every evaluation failure that bubbles up from a 1169 // subexpression implies that a side-effect has potentially happened. We 1170 // skip setting the HasSideEffects flag to true until we decide to 1171 // continue evaluating after that point, which happens here. 1172 bool KeepGoing = keepEvaluatingAfterFailure(); 1173 EvalStatus.HasSideEffects |= KeepGoing; 1174 return KeepGoing; 1175 } 1176 1177 class ArrayInitLoopIndex { 1178 EvalInfo &Info; 1179 uint64_t OuterIndex; 1180 1181 public: 1182 ArrayInitLoopIndex(EvalInfo &Info) 1183 : Info(Info), OuterIndex(Info.ArrayInitIndex) { 1184 Info.ArrayInitIndex = 0; 1185 } 1186 ~ArrayInitLoopIndex() { Info.ArrayInitIndex = OuterIndex; } 1187 1188 operator uint64_t&() { return Info.ArrayInitIndex; } 1189 }; 1190 }; 1191 1192 /// Object used to treat all foldable expressions as constant expressions. 1193 struct FoldConstant { 1194 EvalInfo &Info; 1195 bool Enabled; 1196 bool HadNoPriorDiags; 1197 EvalInfo::EvaluationMode OldMode; 1198 1199 explicit FoldConstant(EvalInfo &Info, bool Enabled) 1200 : Info(Info), 1201 Enabled(Enabled), 1202 HadNoPriorDiags(Info.EvalStatus.Diag && 1203 Info.EvalStatus.Diag->empty() && 1204 !Info.EvalStatus.HasSideEffects), 1205 OldMode(Info.EvalMode) { 1206 if (Enabled) 1207 Info.EvalMode = EvalInfo::EM_ConstantFold; 1208 } 1209 void keepDiagnostics() { Enabled = false; } 1210 ~FoldConstant() { 1211 if (Enabled && HadNoPriorDiags && !Info.EvalStatus.Diag->empty() && 1212 !Info.EvalStatus.HasSideEffects) 1213 Info.EvalStatus.Diag->clear(); 1214 Info.EvalMode = OldMode; 1215 } 1216 }; 1217 1218 /// RAII object used to set the current evaluation mode to ignore 1219 /// side-effects. 1220 struct IgnoreSideEffectsRAII { 1221 EvalInfo &Info; 1222 EvalInfo::EvaluationMode OldMode; 1223 explicit IgnoreSideEffectsRAII(EvalInfo &Info) 1224 : Info(Info), OldMode(Info.EvalMode) { 1225 Info.EvalMode = EvalInfo::EM_IgnoreSideEffects; 1226 } 1227 1228 ~IgnoreSideEffectsRAII() { Info.EvalMode = OldMode; } 1229 }; 1230 1231 /// RAII object used to optionally suppress diagnostics and side-effects from 1232 /// a speculative evaluation. 1233 class SpeculativeEvaluationRAII { 1234 EvalInfo *Info = nullptr; 1235 Expr::EvalStatus OldStatus; 1236 unsigned OldSpeculativeEvaluationDepth; 1237 1238 void moveFromAndCancel(SpeculativeEvaluationRAII &&Other) { 1239 Info = Other.Info; 1240 OldStatus = Other.OldStatus; 1241 OldSpeculativeEvaluationDepth = Other.OldSpeculativeEvaluationDepth; 1242 Other.Info = nullptr; 1243 } 1244 1245 void maybeRestoreState() { 1246 if (!Info) 1247 return; 1248 1249 Info->EvalStatus = OldStatus; 1250 Info->SpeculativeEvaluationDepth = OldSpeculativeEvaluationDepth; 1251 } 1252 1253 public: 1254 SpeculativeEvaluationRAII() = default; 1255 1256 SpeculativeEvaluationRAII( 1257 EvalInfo &Info, SmallVectorImpl<PartialDiagnosticAt> *NewDiag = nullptr) 1258 : Info(&Info), OldStatus(Info.EvalStatus), 1259 OldSpeculativeEvaluationDepth(Info.SpeculativeEvaluationDepth) { 1260 Info.EvalStatus.Diag = NewDiag; 1261 Info.SpeculativeEvaluationDepth = Info.CallStackDepth + 1; 1262 } 1263 1264 SpeculativeEvaluationRAII(const SpeculativeEvaluationRAII &Other) = delete; 1265 SpeculativeEvaluationRAII(SpeculativeEvaluationRAII &&Other) { 1266 moveFromAndCancel(std::move(Other)); 1267 } 1268 1269 SpeculativeEvaluationRAII &operator=(SpeculativeEvaluationRAII &&Other) { 1270 maybeRestoreState(); 1271 moveFromAndCancel(std::move(Other)); 1272 return *this; 1273 } 1274 1275 ~SpeculativeEvaluationRAII() { maybeRestoreState(); } 1276 }; 1277 1278 /// RAII object wrapping a full-expression or block scope, and handling 1279 /// the ending of the lifetime of temporaries created within it. 1280 template<bool IsFullExpression> 1281 class ScopeRAII { 1282 EvalInfo &Info; 1283 unsigned OldStackSize; 1284 public: 1285 ScopeRAII(EvalInfo &Info) 1286 : Info(Info), OldStackSize(Info.CleanupStack.size()) { 1287 // Push a new temporary version. This is needed to distinguish between 1288 // temporaries created in different iterations of a loop. 1289 Info.CurrentCall->pushTempVersion(); 1290 } 1291 bool destroy(bool RunDestructors = true) { 1292 bool OK = cleanup(Info, RunDestructors, OldStackSize); 1293 OldStackSize = -1U; 1294 return OK; 1295 } 1296 ~ScopeRAII() { 1297 if (OldStackSize != -1U) 1298 destroy(false); 1299 // Body moved to a static method to encourage the compiler to inline away 1300 // instances of this class. 1301 Info.CurrentCall->popTempVersion(); 1302 } 1303 private: 1304 static bool cleanup(EvalInfo &Info, bool RunDestructors, 1305 unsigned OldStackSize) { 1306 assert(OldStackSize <= Info.CleanupStack.size() && 1307 "running cleanups out of order?"); 1308 1309 // Run all cleanups for a block scope, and non-lifetime-extended cleanups 1310 // for a full-expression scope. 1311 bool Success = true; 1312 for (unsigned I = Info.CleanupStack.size(); I > OldStackSize; --I) { 1313 if (!(IsFullExpression && 1314 Info.CleanupStack[I - 1].isLifetimeExtended())) { 1315 if (!Info.CleanupStack[I - 1].endLifetime(Info, RunDestructors)) { 1316 Success = false; 1317 break; 1318 } 1319 } 1320 } 1321 1322 // Compact lifetime-extended cleanups. 1323 auto NewEnd = Info.CleanupStack.begin() + OldStackSize; 1324 if (IsFullExpression) 1325 NewEnd = 1326 std::remove_if(NewEnd, Info.CleanupStack.end(), 1327 [](Cleanup &C) { return !C.isLifetimeExtended(); }); 1328 Info.CleanupStack.erase(NewEnd, Info.CleanupStack.end()); 1329 return Success; 1330 } 1331 }; 1332 typedef ScopeRAII<false> BlockScopeRAII; 1333 typedef ScopeRAII<true> FullExpressionRAII; 1334 } 1335 1336 bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E, 1337 CheckSubobjectKind CSK) { 1338 if (Invalid) 1339 return false; 1340 if (isOnePastTheEnd()) { 1341 Info.CCEDiag(E, diag::note_constexpr_past_end_subobject) 1342 << CSK; 1343 setInvalid(); 1344 return false; 1345 } 1346 // Note, we do not diagnose if isMostDerivedAnUnsizedArray(), because there 1347 // must actually be at least one array element; even a VLA cannot have a 1348 // bound of zero. And if our index is nonzero, we already had a CCEDiag. 1349 return true; 1350 } 1351 1352 void SubobjectDesignator::diagnoseUnsizedArrayPointerArithmetic(EvalInfo &Info, 1353 const Expr *E) { 1354 Info.CCEDiag(E, diag::note_constexpr_unsized_array_indexed); 1355 // Do not set the designator as invalid: we can represent this situation, 1356 // and correct handling of __builtin_object_size requires us to do so. 1357 } 1358 1359 void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info, 1360 const Expr *E, 1361 const APSInt &N) { 1362 // If we're complaining, we must be able to statically determine the size of 1363 // the most derived array. 1364 if (MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement) 1365 Info.CCEDiag(E, diag::note_constexpr_array_index) 1366 << N << /*array*/ 0 1367 << static_cast<unsigned>(getMostDerivedArraySize()); 1368 else 1369 Info.CCEDiag(E, diag::note_constexpr_array_index) 1370 << N << /*non-array*/ 1; 1371 setInvalid(); 1372 } 1373 1374 CallStackFrame::CallStackFrame(EvalInfo &Info, SourceLocation CallLoc, 1375 const FunctionDecl *Callee, const LValue *This, 1376 APValue *Arguments) 1377 : Info(Info), Caller(Info.CurrentCall), Callee(Callee), This(This), 1378 Arguments(Arguments), CallLoc(CallLoc), Index(Info.NextCallIndex++) { 1379 Info.CurrentCall = this; 1380 ++Info.CallStackDepth; 1381 } 1382 1383 CallStackFrame::~CallStackFrame() { 1384 assert(Info.CurrentCall == this && "calls retired out of order"); 1385 --Info.CallStackDepth; 1386 Info.CurrentCall = Caller; 1387 } 1388 1389 static bool isRead(AccessKinds AK) { 1390 return AK == AK_Read || AK == AK_ReadObjectRepresentation; 1391 } 1392 1393 static bool isModification(AccessKinds AK) { 1394 switch (AK) { 1395 case AK_Read: 1396 case AK_ReadObjectRepresentation: 1397 case AK_MemberCall: 1398 case AK_DynamicCast: 1399 case AK_TypeId: 1400 return false; 1401 case AK_Assign: 1402 case AK_Increment: 1403 case AK_Decrement: 1404 case AK_Construct: 1405 case AK_Destroy: 1406 return true; 1407 } 1408 llvm_unreachable("unknown access kind"); 1409 } 1410 1411 static bool isAnyAccess(AccessKinds AK) { 1412 return isRead(AK) || isModification(AK); 1413 } 1414 1415 /// Is this an access per the C++ definition? 1416 static bool isFormalAccess(AccessKinds AK) { 1417 return isAnyAccess(AK) && AK != AK_Construct && AK != AK_Destroy; 1418 } 1419 1420 namespace { 1421 struct ComplexValue { 1422 private: 1423 bool IsInt; 1424 1425 public: 1426 APSInt IntReal, IntImag; 1427 APFloat FloatReal, FloatImag; 1428 1429 ComplexValue() : FloatReal(APFloat::Bogus()), FloatImag(APFloat::Bogus()) {} 1430 1431 void makeComplexFloat() { IsInt = false; } 1432 bool isComplexFloat() const { return !IsInt; } 1433 APFloat &getComplexFloatReal() { return FloatReal; } 1434 APFloat &getComplexFloatImag() { return FloatImag; } 1435 1436 void makeComplexInt() { IsInt = true; } 1437 bool isComplexInt() const { return IsInt; } 1438 APSInt &getComplexIntReal() { return IntReal; } 1439 APSInt &getComplexIntImag() { return IntImag; } 1440 1441 void moveInto(APValue &v) const { 1442 if (isComplexFloat()) 1443 v = APValue(FloatReal, FloatImag); 1444 else 1445 v = APValue(IntReal, IntImag); 1446 } 1447 void setFrom(const APValue &v) { 1448 assert(v.isComplexFloat() || v.isComplexInt()); 1449 if (v.isComplexFloat()) { 1450 makeComplexFloat(); 1451 FloatReal = v.getComplexFloatReal(); 1452 FloatImag = v.getComplexFloatImag(); 1453 } else { 1454 makeComplexInt(); 1455 IntReal = v.getComplexIntReal(); 1456 IntImag = v.getComplexIntImag(); 1457 } 1458 } 1459 }; 1460 1461 struct LValue { 1462 APValue::LValueBase Base; 1463 CharUnits Offset; 1464 SubobjectDesignator Designator; 1465 bool IsNullPtr : 1; 1466 bool InvalidBase : 1; 1467 1468 const APValue::LValueBase getLValueBase() const { return Base; } 1469 CharUnits &getLValueOffset() { return Offset; } 1470 const CharUnits &getLValueOffset() const { return Offset; } 1471 SubobjectDesignator &getLValueDesignator() { return Designator; } 1472 const SubobjectDesignator &getLValueDesignator() const { return Designator;} 1473 bool isNullPointer() const { return IsNullPtr;} 1474 1475 unsigned getLValueCallIndex() const { return Base.getCallIndex(); } 1476 unsigned getLValueVersion() const { return Base.getVersion(); } 1477 1478 void moveInto(APValue &V) const { 1479 if (Designator.Invalid) 1480 V = APValue(Base, Offset, APValue::NoLValuePath(), IsNullPtr); 1481 else { 1482 assert(!InvalidBase && "APValues can't handle invalid LValue bases"); 1483 V = APValue(Base, Offset, Designator.Entries, 1484 Designator.IsOnePastTheEnd, IsNullPtr); 1485 } 1486 } 1487 void setFrom(ASTContext &Ctx, const APValue &V) { 1488 assert(V.isLValue() && "Setting LValue from a non-LValue?"); 1489 Base = V.getLValueBase(); 1490 Offset = V.getLValueOffset(); 1491 InvalidBase = false; 1492 Designator = SubobjectDesignator(Ctx, V); 1493 IsNullPtr = V.isNullPointer(); 1494 } 1495 1496 void set(APValue::LValueBase B, bool BInvalid = false) { 1497 #ifndef NDEBUG 1498 // We only allow a few types of invalid bases. Enforce that here. 1499 if (BInvalid) { 1500 const auto *E = B.get<const Expr *>(); 1501 assert((isa<MemberExpr>(E) || tryUnwrapAllocSizeCall(E)) && 1502 "Unexpected type of invalid base"); 1503 } 1504 #endif 1505 1506 Base = B; 1507 Offset = CharUnits::fromQuantity(0); 1508 InvalidBase = BInvalid; 1509 Designator = SubobjectDesignator(getType(B)); 1510 IsNullPtr = false; 1511 } 1512 1513 void setNull(ASTContext &Ctx, QualType PointerTy) { 1514 Base = (Expr *)nullptr; 1515 Offset = 1516 CharUnits::fromQuantity(Ctx.getTargetNullPointerValue(PointerTy)); 1517 InvalidBase = false; 1518 Designator = SubobjectDesignator(PointerTy->getPointeeType()); 1519 IsNullPtr = true; 1520 } 1521 1522 void setInvalid(APValue::LValueBase B, unsigned I = 0) { 1523 set(B, true); 1524 } 1525 1526 std::string toString(ASTContext &Ctx, QualType T) const { 1527 APValue Printable; 1528 moveInto(Printable); 1529 return Printable.getAsString(Ctx, T); 1530 } 1531 1532 private: 1533 // Check that this LValue is not based on a null pointer. If it is, produce 1534 // a diagnostic and mark the designator as invalid. 1535 template <typename GenDiagType> 1536 bool checkNullPointerDiagnosingWith(const GenDiagType &GenDiag) { 1537 if (Designator.Invalid) 1538 return false; 1539 if (IsNullPtr) { 1540 GenDiag(); 1541 Designator.setInvalid(); 1542 return false; 1543 } 1544 return true; 1545 } 1546 1547 public: 1548 bool checkNullPointer(EvalInfo &Info, const Expr *E, 1549 CheckSubobjectKind CSK) { 1550 return checkNullPointerDiagnosingWith([&Info, E, CSK] { 1551 Info.CCEDiag(E, diag::note_constexpr_null_subobject) << CSK; 1552 }); 1553 } 1554 1555 bool checkNullPointerForFoldAccess(EvalInfo &Info, const Expr *E, 1556 AccessKinds AK) { 1557 return checkNullPointerDiagnosingWith([&Info, E, AK] { 1558 Info.FFDiag(E, diag::note_constexpr_access_null) << AK; 1559 }); 1560 } 1561 1562 // Check this LValue refers to an object. If not, set the designator to be 1563 // invalid and emit a diagnostic. 1564 bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) { 1565 return (CSK == CSK_ArrayToPointer || checkNullPointer(Info, E, CSK)) && 1566 Designator.checkSubobject(Info, E, CSK); 1567 } 1568 1569 void addDecl(EvalInfo &Info, const Expr *E, 1570 const Decl *D, bool Virtual = false) { 1571 if (checkSubobject(Info, E, isa<FieldDecl>(D) ? CSK_Field : CSK_Base)) 1572 Designator.addDeclUnchecked(D, Virtual); 1573 } 1574 void addUnsizedArray(EvalInfo &Info, const Expr *E, QualType ElemTy) { 1575 if (!Designator.Entries.empty()) { 1576 Info.CCEDiag(E, diag::note_constexpr_unsupported_unsized_array); 1577 Designator.setInvalid(); 1578 return; 1579 } 1580 if (checkSubobject(Info, E, CSK_ArrayToPointer)) { 1581 assert(getType(Base)->isPointerType() || getType(Base)->isArrayType()); 1582 Designator.FirstEntryIsAnUnsizedArray = true; 1583 Designator.addUnsizedArrayUnchecked(ElemTy); 1584 } 1585 } 1586 void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) { 1587 if (checkSubobject(Info, E, CSK_ArrayToPointer)) 1588 Designator.addArrayUnchecked(CAT); 1589 } 1590 void addComplex(EvalInfo &Info, const Expr *E, QualType EltTy, bool Imag) { 1591 if (checkSubobject(Info, E, Imag ? CSK_Imag : CSK_Real)) 1592 Designator.addComplexUnchecked(EltTy, Imag); 1593 } 1594 void clearIsNullPointer() { 1595 IsNullPtr = false; 1596 } 1597 void adjustOffsetAndIndex(EvalInfo &Info, const Expr *E, 1598 const APSInt &Index, CharUnits ElementSize) { 1599 // An index of 0 has no effect. (In C, adding 0 to a null pointer is UB, 1600 // but we're not required to diagnose it and it's valid in C++.) 1601 if (!Index) 1602 return; 1603 1604 // Compute the new offset in the appropriate width, wrapping at 64 bits. 1605 // FIXME: When compiling for a 32-bit target, we should use 32-bit 1606 // offsets. 1607 uint64_t Offset64 = Offset.getQuantity(); 1608 uint64_t ElemSize64 = ElementSize.getQuantity(); 1609 uint64_t Index64 = Index.extOrTrunc(64).getZExtValue(); 1610 Offset = CharUnits::fromQuantity(Offset64 + ElemSize64 * Index64); 1611 1612 if (checkNullPointer(Info, E, CSK_ArrayIndex)) 1613 Designator.adjustIndex(Info, E, Index); 1614 clearIsNullPointer(); 1615 } 1616 void adjustOffset(CharUnits N) { 1617 Offset += N; 1618 if (N.getQuantity()) 1619 clearIsNullPointer(); 1620 } 1621 }; 1622 1623 struct MemberPtr { 1624 MemberPtr() {} 1625 explicit MemberPtr(const ValueDecl *Decl) : 1626 DeclAndIsDerivedMember(Decl, false), Path() {} 1627 1628 /// The member or (direct or indirect) field referred to by this member 1629 /// pointer, or 0 if this is a null member pointer. 1630 const ValueDecl *getDecl() const { 1631 return DeclAndIsDerivedMember.getPointer(); 1632 } 1633 /// Is this actually a member of some type derived from the relevant class? 1634 bool isDerivedMember() const { 1635 return DeclAndIsDerivedMember.getInt(); 1636 } 1637 /// Get the class which the declaration actually lives in. 1638 const CXXRecordDecl *getContainingRecord() const { 1639 return cast<CXXRecordDecl>( 1640 DeclAndIsDerivedMember.getPointer()->getDeclContext()); 1641 } 1642 1643 void moveInto(APValue &V) const { 1644 V = APValue(getDecl(), isDerivedMember(), Path); 1645 } 1646 void setFrom(const APValue &V) { 1647 assert(V.isMemberPointer()); 1648 DeclAndIsDerivedMember.setPointer(V.getMemberPointerDecl()); 1649 DeclAndIsDerivedMember.setInt(V.isMemberPointerToDerivedMember()); 1650 Path.clear(); 1651 ArrayRef<const CXXRecordDecl*> P = V.getMemberPointerPath(); 1652 Path.insert(Path.end(), P.begin(), P.end()); 1653 } 1654 1655 /// DeclAndIsDerivedMember - The member declaration, and a flag indicating 1656 /// whether the member is a member of some class derived from the class type 1657 /// of the member pointer. 1658 llvm::PointerIntPair<const ValueDecl*, 1, bool> DeclAndIsDerivedMember; 1659 /// Path - The path of base/derived classes from the member declaration's 1660 /// class (exclusive) to the class type of the member pointer (inclusive). 1661 SmallVector<const CXXRecordDecl*, 4> Path; 1662 1663 /// Perform a cast towards the class of the Decl (either up or down the 1664 /// hierarchy). 1665 bool castBack(const CXXRecordDecl *Class) { 1666 assert(!Path.empty()); 1667 const CXXRecordDecl *Expected; 1668 if (Path.size() >= 2) 1669 Expected = Path[Path.size() - 2]; 1670 else 1671 Expected = getContainingRecord(); 1672 if (Expected->getCanonicalDecl() != Class->getCanonicalDecl()) { 1673 // C++11 [expr.static.cast]p12: In a conversion from (D::*) to (B::*), 1674 // if B does not contain the original member and is not a base or 1675 // derived class of the class containing the original member, the result 1676 // of the cast is undefined. 1677 // C++11 [conv.mem]p2 does not cover this case for a cast from (B::*) to 1678 // (D::*). We consider that to be a language defect. 1679 return false; 1680 } 1681 Path.pop_back(); 1682 return true; 1683 } 1684 /// Perform a base-to-derived member pointer cast. 1685 bool castToDerived(const CXXRecordDecl *Derived) { 1686 if (!getDecl()) 1687 return true; 1688 if (!isDerivedMember()) { 1689 Path.push_back(Derived); 1690 return true; 1691 } 1692 if (!castBack(Derived)) 1693 return false; 1694 if (Path.empty()) 1695 DeclAndIsDerivedMember.setInt(false); 1696 return true; 1697 } 1698 /// Perform a derived-to-base member pointer cast. 1699 bool castToBase(const CXXRecordDecl *Base) { 1700 if (!getDecl()) 1701 return true; 1702 if (Path.empty()) 1703 DeclAndIsDerivedMember.setInt(true); 1704 if (isDerivedMember()) { 1705 Path.push_back(Base); 1706 return true; 1707 } 1708 return castBack(Base); 1709 } 1710 }; 1711 1712 /// Compare two member pointers, which are assumed to be of the same type. 1713 static bool operator==(const MemberPtr &LHS, const MemberPtr &RHS) { 1714 if (!LHS.getDecl() || !RHS.getDecl()) 1715 return !LHS.getDecl() && !RHS.getDecl(); 1716 if (LHS.getDecl()->getCanonicalDecl() != RHS.getDecl()->getCanonicalDecl()) 1717 return false; 1718 return LHS.Path == RHS.Path; 1719 } 1720 } 1721 1722 static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E); 1723 static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, 1724 const LValue &This, const Expr *E, 1725 bool AllowNonLiteralTypes = false); 1726 static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info, 1727 bool InvalidBaseOK = false); 1728 static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info, 1729 bool InvalidBaseOK = false); 1730 static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result, 1731 EvalInfo &Info); 1732 static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info); 1733 static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info); 1734 static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result, 1735 EvalInfo &Info); 1736 static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info); 1737 static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info); 1738 static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result, 1739 EvalInfo &Info); 1740 static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result); 1741 1742 /// Evaluate an integer or fixed point expression into an APResult. 1743 static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result, 1744 EvalInfo &Info); 1745 1746 /// Evaluate only a fixed point expression into an APResult. 1747 static bool EvaluateFixedPoint(const Expr *E, APFixedPoint &Result, 1748 EvalInfo &Info); 1749 1750 //===----------------------------------------------------------------------===// 1751 // Misc utilities 1752 //===----------------------------------------------------------------------===// 1753 1754 /// Negate an APSInt in place, converting it to a signed form if necessary, and 1755 /// preserving its value (by extending by up to one bit as needed). 1756 static void negateAsSigned(APSInt &Int) { 1757 if (Int.isUnsigned() || Int.isMinSignedValue()) { 1758 Int = Int.extend(Int.getBitWidth() + 1); 1759 Int.setIsSigned(true); 1760 } 1761 Int = -Int; 1762 } 1763 1764 template<typename KeyT> 1765 APValue &CallStackFrame::createTemporary(const KeyT *Key, QualType T, 1766 bool IsLifetimeExtended, LValue &LV) { 1767 unsigned Version = getTempVersion(); 1768 APValue::LValueBase Base(Key, Index, Version); 1769 LV.set(Base); 1770 APValue &Result = Temporaries[MapKeyTy(Key, Version)]; 1771 assert(Result.isAbsent() && "temporary created multiple times"); 1772 1773 // If we're creating a temporary immediately in the operand of a speculative 1774 // evaluation, don't register a cleanup to be run outside the speculative 1775 // evaluation context, since we won't actually be able to initialize this 1776 // object. 1777 if (Index <= Info.SpeculativeEvaluationDepth) { 1778 if (T.isDestructedType()) 1779 Info.noteSideEffect(); 1780 } else { 1781 Info.CleanupStack.push_back(Cleanup(&Result, Base, T, IsLifetimeExtended)); 1782 } 1783 return Result; 1784 } 1785 1786 APValue *EvalInfo::createHeapAlloc(const Expr *E, QualType T, LValue &LV) { 1787 if (NumHeapAllocs > DynamicAllocLValue::getMaxIndex()) { 1788 FFDiag(E, diag::note_constexpr_heap_alloc_limit_exceeded); 1789 return nullptr; 1790 } 1791 1792 DynamicAllocLValue DA(NumHeapAllocs++); 1793 LV.set(APValue::LValueBase::getDynamicAlloc(DA, T)); 1794 auto Result = HeapAllocs.emplace(std::piecewise_construct, 1795 std::forward_as_tuple(DA), std::tuple<>()); 1796 assert(Result.second && "reused a heap alloc index?"); 1797 Result.first->second.AllocExpr = E; 1798 return &Result.first->second.Value; 1799 } 1800 1801 /// Produce a string describing the given constexpr call. 1802 void CallStackFrame::describe(raw_ostream &Out) { 1803 unsigned ArgIndex = 0; 1804 bool IsMemberCall = isa<CXXMethodDecl>(Callee) && 1805 !isa<CXXConstructorDecl>(Callee) && 1806 cast<CXXMethodDecl>(Callee)->isInstance(); 1807 1808 if (!IsMemberCall) 1809 Out << *Callee << '('; 1810 1811 if (This && IsMemberCall) { 1812 APValue Val; 1813 This->moveInto(Val); 1814 Val.printPretty(Out, Info.Ctx, 1815 This->Designator.MostDerivedType); 1816 // FIXME: Add parens around Val if needed. 1817 Out << "->" << *Callee << '('; 1818 IsMemberCall = false; 1819 } 1820 1821 for (FunctionDecl::param_const_iterator I = Callee->param_begin(), 1822 E = Callee->param_end(); I != E; ++I, ++ArgIndex) { 1823 if (ArgIndex > (unsigned)IsMemberCall) 1824 Out << ", "; 1825 1826 const ParmVarDecl *Param = *I; 1827 const APValue &Arg = Arguments[ArgIndex]; 1828 Arg.printPretty(Out, Info.Ctx, Param->getType()); 1829 1830 if (ArgIndex == 0 && IsMemberCall) 1831 Out << "->" << *Callee << '('; 1832 } 1833 1834 Out << ')'; 1835 } 1836 1837 /// Evaluate an expression to see if it had side-effects, and discard its 1838 /// result. 1839 /// \return \c true if the caller should keep evaluating. 1840 static bool EvaluateIgnoredValue(EvalInfo &Info, const Expr *E) { 1841 APValue Scratch; 1842 if (!Evaluate(Scratch, Info, E)) 1843 // We don't need the value, but we might have skipped a side effect here. 1844 return Info.noteSideEffect(); 1845 return true; 1846 } 1847 1848 /// Should this call expression be treated as a string literal? 1849 static bool IsStringLiteralCall(const CallExpr *E) { 1850 unsigned Builtin = E->getBuiltinCallee(); 1851 return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString || 1852 Builtin == Builtin::BI__builtin___NSStringMakeConstantString); 1853 } 1854 1855 static bool IsGlobalLValue(APValue::LValueBase B) { 1856 // C++11 [expr.const]p3 An address constant expression is a prvalue core 1857 // constant expression of pointer type that evaluates to... 1858 1859 // ... a null pointer value, or a prvalue core constant expression of type 1860 // std::nullptr_t. 1861 if (!B) return true; 1862 1863 if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) { 1864 // ... the address of an object with static storage duration, 1865 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) 1866 return VD->hasGlobalStorage(); 1867 // ... the address of a function, 1868 return isa<FunctionDecl>(D); 1869 } 1870 1871 if (B.is<TypeInfoLValue>() || B.is<DynamicAllocLValue>()) 1872 return true; 1873 1874 const Expr *E = B.get<const Expr*>(); 1875 switch (E->getStmtClass()) { 1876 default: 1877 return false; 1878 case Expr::CompoundLiteralExprClass: { 1879 const CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E); 1880 return CLE->isFileScope() && CLE->isLValue(); 1881 } 1882 case Expr::MaterializeTemporaryExprClass: 1883 // A materialized temporary might have been lifetime-extended to static 1884 // storage duration. 1885 return cast<MaterializeTemporaryExpr>(E)->getStorageDuration() == SD_Static; 1886 // A string literal has static storage duration. 1887 case Expr::StringLiteralClass: 1888 case Expr::PredefinedExprClass: 1889 case Expr::ObjCStringLiteralClass: 1890 case Expr::ObjCEncodeExprClass: 1891 case Expr::CXXUuidofExprClass: 1892 return true; 1893 case Expr::ObjCBoxedExprClass: 1894 return cast<ObjCBoxedExpr>(E)->isExpressibleAsConstantInitializer(); 1895 case Expr::CallExprClass: 1896 return IsStringLiteralCall(cast<CallExpr>(E)); 1897 // For GCC compatibility, &&label has static storage duration. 1898 case Expr::AddrLabelExprClass: 1899 return true; 1900 // A Block literal expression may be used as the initialization value for 1901 // Block variables at global or local static scope. 1902 case Expr::BlockExprClass: 1903 return !cast<BlockExpr>(E)->getBlockDecl()->hasCaptures(); 1904 case Expr::ImplicitValueInitExprClass: 1905 // FIXME: 1906 // We can never form an lvalue with an implicit value initialization as its 1907 // base through expression evaluation, so these only appear in one case: the 1908 // implicit variable declaration we invent when checking whether a constexpr 1909 // constructor can produce a constant expression. We must assume that such 1910 // an expression might be a global lvalue. 1911 return true; 1912 } 1913 } 1914 1915 static const ValueDecl *GetLValueBaseDecl(const LValue &LVal) { 1916 return LVal.Base.dyn_cast<const ValueDecl*>(); 1917 } 1918 1919 static bool IsLiteralLValue(const LValue &Value) { 1920 if (Value.getLValueCallIndex()) 1921 return false; 1922 const Expr *E = Value.Base.dyn_cast<const Expr*>(); 1923 return E && !isa<MaterializeTemporaryExpr>(E); 1924 } 1925 1926 static bool IsWeakLValue(const LValue &Value) { 1927 const ValueDecl *Decl = GetLValueBaseDecl(Value); 1928 return Decl && Decl->isWeak(); 1929 } 1930 1931 static bool isZeroSized(const LValue &Value) { 1932 const ValueDecl *Decl = GetLValueBaseDecl(Value); 1933 if (Decl && isa<VarDecl>(Decl)) { 1934 QualType Ty = Decl->getType(); 1935 if (Ty->isArrayType()) 1936 return Ty->isIncompleteType() || 1937 Decl->getASTContext().getTypeSize(Ty) == 0; 1938 } 1939 return false; 1940 } 1941 1942 static bool HasSameBase(const LValue &A, const LValue &B) { 1943 if (!A.getLValueBase()) 1944 return !B.getLValueBase(); 1945 if (!B.getLValueBase()) 1946 return false; 1947 1948 if (A.getLValueBase().getOpaqueValue() != 1949 B.getLValueBase().getOpaqueValue()) { 1950 const Decl *ADecl = GetLValueBaseDecl(A); 1951 if (!ADecl) 1952 return false; 1953 const Decl *BDecl = GetLValueBaseDecl(B); 1954 if (!BDecl || ADecl->getCanonicalDecl() != BDecl->getCanonicalDecl()) 1955 return false; 1956 } 1957 1958 return IsGlobalLValue(A.getLValueBase()) || 1959 (A.getLValueCallIndex() == B.getLValueCallIndex() && 1960 A.getLValueVersion() == B.getLValueVersion()); 1961 } 1962 1963 static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) { 1964 assert(Base && "no location for a null lvalue"); 1965 const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>(); 1966 if (VD) 1967 Info.Note(VD->getLocation(), diag::note_declared_at); 1968 else if (const Expr *E = Base.dyn_cast<const Expr*>()) 1969 Info.Note(E->getExprLoc(), diag::note_constexpr_temporary_here); 1970 else if (DynamicAllocLValue DA = Base.dyn_cast<DynamicAllocLValue>()) { 1971 // FIXME: Produce a note for dangling pointers too. 1972 if (Optional<DynAlloc*> Alloc = Info.lookupDynamicAlloc(DA)) 1973 Info.Note((*Alloc)->AllocExpr->getExprLoc(), 1974 diag::note_constexpr_dynamic_alloc_here); 1975 } 1976 // We have no information to show for a typeid(T) object. 1977 } 1978 1979 enum class CheckEvaluationResultKind { 1980 ConstantExpression, 1981 FullyInitialized, 1982 }; 1983 1984 /// Materialized temporaries that we've already checked to determine if they're 1985 /// initializsed by a constant expression. 1986 using CheckedTemporaries = 1987 llvm::SmallPtrSet<const MaterializeTemporaryExpr *, 8>; 1988 1989 static bool CheckEvaluationResult(CheckEvaluationResultKind CERK, 1990 EvalInfo &Info, SourceLocation DiagLoc, 1991 QualType Type, const APValue &Value, 1992 Expr::ConstExprUsage Usage, 1993 SourceLocation SubobjectLoc, 1994 CheckedTemporaries &CheckedTemps); 1995 1996 /// Check that this reference or pointer core constant expression is a valid 1997 /// value for an address or reference constant expression. Return true if we 1998 /// can fold this expression, whether or not it's a constant expression. 1999 static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc, 2000 QualType Type, const LValue &LVal, 2001 Expr::ConstExprUsage Usage, 2002 CheckedTemporaries &CheckedTemps) { 2003 bool IsReferenceType = Type->isReferenceType(); 2004 2005 APValue::LValueBase Base = LVal.getLValueBase(); 2006 const SubobjectDesignator &Designator = LVal.getLValueDesignator(); 2007 2008 // Check that the object is a global. Note that the fake 'this' object we 2009 // manufacture when checking potential constant expressions is conservatively 2010 // assumed to be global here. 2011 if (!IsGlobalLValue(Base)) { 2012 if (Info.getLangOpts().CPlusPlus11) { 2013 const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>(); 2014 Info.FFDiag(Loc, diag::note_constexpr_non_global, 1) 2015 << IsReferenceType << !Designator.Entries.empty() 2016 << !!VD << VD; 2017 NoteLValueLocation(Info, Base); 2018 } else { 2019 Info.FFDiag(Loc); 2020 } 2021 // Don't allow references to temporaries to escape. 2022 return false; 2023 } 2024 assert((Info.checkingPotentialConstantExpression() || 2025 LVal.getLValueCallIndex() == 0) && 2026 "have call index for global lvalue"); 2027 2028 if (Base.is<DynamicAllocLValue>()) { 2029 Info.FFDiag(Loc, diag::note_constexpr_dynamic_alloc) 2030 << IsReferenceType << !Designator.Entries.empty(); 2031 NoteLValueLocation(Info, Base); 2032 return false; 2033 } 2034 2035 if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) { 2036 if (const VarDecl *Var = dyn_cast<const VarDecl>(VD)) { 2037 // Check if this is a thread-local variable. 2038 if (Var->getTLSKind()) 2039 // FIXME: Diagnostic! 2040 return false; 2041 2042 // A dllimport variable never acts like a constant. 2043 if (Usage == Expr::EvaluateForCodeGen && Var->hasAttr<DLLImportAttr>()) 2044 // FIXME: Diagnostic! 2045 return false; 2046 } 2047 if (const auto *FD = dyn_cast<const FunctionDecl>(VD)) { 2048 // __declspec(dllimport) must be handled very carefully: 2049 // We must never initialize an expression with the thunk in C++. 2050 // Doing otherwise would allow the same id-expression to yield 2051 // different addresses for the same function in different translation 2052 // units. However, this means that we must dynamically initialize the 2053 // expression with the contents of the import address table at runtime. 2054 // 2055 // The C language has no notion of ODR; furthermore, it has no notion of 2056 // dynamic initialization. This means that we are permitted to 2057 // perform initialization with the address of the thunk. 2058 if (Info.getLangOpts().CPlusPlus && Usage == Expr::EvaluateForCodeGen && 2059 FD->hasAttr<DLLImportAttr>()) 2060 // FIXME: Diagnostic! 2061 return false; 2062 } 2063 } else if (const auto *MTE = dyn_cast_or_null<MaterializeTemporaryExpr>( 2064 Base.dyn_cast<const Expr *>())) { 2065 if (CheckedTemps.insert(MTE).second) { 2066 QualType TempType = getType(Base); 2067 if (TempType.isDestructedType()) { 2068 Info.FFDiag(MTE->getExprLoc(), 2069 diag::note_constexpr_unsupported_tempoarary_nontrivial_dtor) 2070 << TempType; 2071 return false; 2072 } 2073 2074 APValue *V = MTE->getOrCreateValue(false); 2075 assert(V && "evasluation result refers to uninitialised temporary"); 2076 if (!CheckEvaluationResult(CheckEvaluationResultKind::ConstantExpression, 2077 Info, MTE->getExprLoc(), TempType, *V, 2078 Usage, SourceLocation(), CheckedTemps)) 2079 return false; 2080 } 2081 } 2082 2083 // Allow address constant expressions to be past-the-end pointers. This is 2084 // an extension: the standard requires them to point to an object. 2085 if (!IsReferenceType) 2086 return true; 2087 2088 // A reference constant expression must refer to an object. 2089 if (!Base) { 2090 // FIXME: diagnostic 2091 Info.CCEDiag(Loc); 2092 return true; 2093 } 2094 2095 // Does this refer one past the end of some object? 2096 if (!Designator.Invalid && Designator.isOnePastTheEnd()) { 2097 const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>(); 2098 Info.FFDiag(Loc, diag::note_constexpr_past_end, 1) 2099 << !Designator.Entries.empty() << !!VD << VD; 2100 NoteLValueLocation(Info, Base); 2101 } 2102 2103 return true; 2104 } 2105 2106 /// Member pointers are constant expressions unless they point to a 2107 /// non-virtual dllimport member function. 2108 static bool CheckMemberPointerConstantExpression(EvalInfo &Info, 2109 SourceLocation Loc, 2110 QualType Type, 2111 const APValue &Value, 2112 Expr::ConstExprUsage Usage) { 2113 const ValueDecl *Member = Value.getMemberPointerDecl(); 2114 const auto *FD = dyn_cast_or_null<CXXMethodDecl>(Member); 2115 if (!FD) 2116 return true; 2117 return Usage == Expr::EvaluateForMangling || FD->isVirtual() || 2118 !FD->hasAttr<DLLImportAttr>(); 2119 } 2120 2121 /// Check that this core constant expression is of literal type, and if not, 2122 /// produce an appropriate diagnostic. 2123 static bool CheckLiteralType(EvalInfo &Info, const Expr *E, 2124 const LValue *This = nullptr) { 2125 if (!E->isRValue() || E->getType()->isLiteralType(Info.Ctx)) 2126 return true; 2127 2128 // C++1y: A constant initializer for an object o [...] may also invoke 2129 // constexpr constructors for o and its subobjects even if those objects 2130 // are of non-literal class types. 2131 // 2132 // C++11 missed this detail for aggregates, so classes like this: 2133 // struct foo_t { union { int i; volatile int j; } u; }; 2134 // are not (obviously) initializable like so: 2135 // __attribute__((__require_constant_initialization__)) 2136 // static const foo_t x = {{0}}; 2137 // because "i" is a subobject with non-literal initialization (due to the 2138 // volatile member of the union). See: 2139 // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#1677 2140 // Therefore, we use the C++1y behavior. 2141 if (This && Info.EvaluatingDecl == This->getLValueBase()) 2142 return true; 2143 2144 // Prvalue constant expressions must be of literal types. 2145 if (Info.getLangOpts().CPlusPlus11) 2146 Info.FFDiag(E, diag::note_constexpr_nonliteral) 2147 << E->getType(); 2148 else 2149 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr); 2150 return false; 2151 } 2152 2153 static bool CheckEvaluationResult(CheckEvaluationResultKind CERK, 2154 EvalInfo &Info, SourceLocation DiagLoc, 2155 QualType Type, const APValue &Value, 2156 Expr::ConstExprUsage Usage, 2157 SourceLocation SubobjectLoc, 2158 CheckedTemporaries &CheckedTemps) { 2159 if (!Value.hasValue()) { 2160 Info.FFDiag(DiagLoc, diag::note_constexpr_uninitialized) 2161 << true << Type; 2162 if (SubobjectLoc.isValid()) 2163 Info.Note(SubobjectLoc, diag::note_constexpr_subobject_declared_here); 2164 return false; 2165 } 2166 2167 // We allow _Atomic(T) to be initialized from anything that T can be 2168 // initialized from. 2169 if (const AtomicType *AT = Type->getAs<AtomicType>()) 2170 Type = AT->getValueType(); 2171 2172 // Core issue 1454: For a literal constant expression of array or class type, 2173 // each subobject of its value shall have been initialized by a constant 2174 // expression. 2175 if (Value.isArray()) { 2176 QualType EltTy = Type->castAsArrayTypeUnsafe()->getElementType(); 2177 for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) { 2178 if (!CheckEvaluationResult(CERK, Info, DiagLoc, EltTy, 2179 Value.getArrayInitializedElt(I), Usage, 2180 SubobjectLoc, CheckedTemps)) 2181 return false; 2182 } 2183 if (!Value.hasArrayFiller()) 2184 return true; 2185 return CheckEvaluationResult(CERK, Info, DiagLoc, EltTy, 2186 Value.getArrayFiller(), Usage, SubobjectLoc, 2187 CheckedTemps); 2188 } 2189 if (Value.isUnion() && Value.getUnionField()) { 2190 return CheckEvaluationResult( 2191 CERK, Info, DiagLoc, Value.getUnionField()->getType(), 2192 Value.getUnionValue(), Usage, Value.getUnionField()->getLocation(), 2193 CheckedTemps); 2194 } 2195 if (Value.isStruct()) { 2196 RecordDecl *RD = Type->castAs<RecordType>()->getDecl(); 2197 if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) { 2198 unsigned BaseIndex = 0; 2199 for (const CXXBaseSpecifier &BS : CD->bases()) { 2200 if (!CheckEvaluationResult(CERK, Info, DiagLoc, BS.getType(), 2201 Value.getStructBase(BaseIndex), Usage, 2202 BS.getBeginLoc(), CheckedTemps)) 2203 return false; 2204 ++BaseIndex; 2205 } 2206 } 2207 for (const auto *I : RD->fields()) { 2208 if (I->isUnnamedBitfield()) 2209 continue; 2210 2211 if (!CheckEvaluationResult(CERK, Info, DiagLoc, I->getType(), 2212 Value.getStructField(I->getFieldIndex()), 2213 Usage, I->getLocation(), CheckedTemps)) 2214 return false; 2215 } 2216 } 2217 2218 if (Value.isLValue() && 2219 CERK == CheckEvaluationResultKind::ConstantExpression) { 2220 LValue LVal; 2221 LVal.setFrom(Info.Ctx, Value); 2222 return CheckLValueConstantExpression(Info, DiagLoc, Type, LVal, Usage, 2223 CheckedTemps); 2224 } 2225 2226 if (Value.isMemberPointer() && 2227 CERK == CheckEvaluationResultKind::ConstantExpression) 2228 return CheckMemberPointerConstantExpression(Info, DiagLoc, Type, Value, Usage); 2229 2230 // Everything else is fine. 2231 return true; 2232 } 2233 2234 /// Check that this core constant expression value is a valid value for a 2235 /// constant expression. If not, report an appropriate diagnostic. Does not 2236 /// check that the expression is of literal type. 2237 static bool 2238 CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc, QualType Type, 2239 const APValue &Value, 2240 Expr::ConstExprUsage Usage = Expr::EvaluateForCodeGen) { 2241 CheckedTemporaries CheckedTemps; 2242 return CheckEvaluationResult(CheckEvaluationResultKind::ConstantExpression, 2243 Info, DiagLoc, Type, Value, Usage, 2244 SourceLocation(), CheckedTemps); 2245 } 2246 2247 /// Check that this evaluated value is fully-initialized and can be loaded by 2248 /// an lvalue-to-rvalue conversion. 2249 static bool CheckFullyInitialized(EvalInfo &Info, SourceLocation DiagLoc, 2250 QualType Type, const APValue &Value) { 2251 CheckedTemporaries CheckedTemps; 2252 return CheckEvaluationResult( 2253 CheckEvaluationResultKind::FullyInitialized, Info, DiagLoc, Type, Value, 2254 Expr::EvaluateForCodeGen, SourceLocation(), CheckedTemps); 2255 } 2256 2257 /// Enforce C++2a [expr.const]/4.17, which disallows new-expressions unless 2258 /// "the allocated storage is deallocated within the evaluation". 2259 static bool CheckMemoryLeaks(EvalInfo &Info) { 2260 if (!Info.HeapAllocs.empty()) { 2261 // We can still fold to a constant despite a compile-time memory leak, 2262 // so long as the heap allocation isn't referenced in the result (we check 2263 // that in CheckConstantExpression). 2264 Info.CCEDiag(Info.HeapAllocs.begin()->second.AllocExpr, 2265 diag::note_constexpr_memory_leak) 2266 << unsigned(Info.HeapAllocs.size() - 1); 2267 } 2268 return true; 2269 } 2270 2271 static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) { 2272 // A null base expression indicates a null pointer. These are always 2273 // evaluatable, and they are false unless the offset is zero. 2274 if (!Value.getLValueBase()) { 2275 Result = !Value.getLValueOffset().isZero(); 2276 return true; 2277 } 2278 2279 // We have a non-null base. These are generally known to be true, but if it's 2280 // a weak declaration it can be null at runtime. 2281 Result = true; 2282 const ValueDecl *Decl = Value.getLValueBase().dyn_cast<const ValueDecl*>(); 2283 return !Decl || !Decl->isWeak(); 2284 } 2285 2286 static bool HandleConversionToBool(const APValue &Val, bool &Result) { 2287 switch (Val.getKind()) { 2288 case APValue::None: 2289 case APValue::Indeterminate: 2290 return false; 2291 case APValue::Int: 2292 Result = Val.getInt().getBoolValue(); 2293 return true; 2294 case APValue::FixedPoint: 2295 Result = Val.getFixedPoint().getBoolValue(); 2296 return true; 2297 case APValue::Float: 2298 Result = !Val.getFloat().isZero(); 2299 return true; 2300 case APValue::ComplexInt: 2301 Result = Val.getComplexIntReal().getBoolValue() || 2302 Val.getComplexIntImag().getBoolValue(); 2303 return true; 2304 case APValue::ComplexFloat: 2305 Result = !Val.getComplexFloatReal().isZero() || 2306 !Val.getComplexFloatImag().isZero(); 2307 return true; 2308 case APValue::LValue: 2309 return EvalPointerValueAsBool(Val, Result); 2310 case APValue::MemberPointer: 2311 Result = Val.getMemberPointerDecl(); 2312 return true; 2313 case APValue::Vector: 2314 case APValue::Array: 2315 case APValue::Struct: 2316 case APValue::Union: 2317 case APValue::AddrLabelDiff: 2318 return false; 2319 } 2320 2321 llvm_unreachable("unknown APValue kind"); 2322 } 2323 2324 static bool EvaluateAsBooleanCondition(const Expr *E, bool &Result, 2325 EvalInfo &Info) { 2326 assert(E->isRValue() && "missing lvalue-to-rvalue conv in bool condition"); 2327 APValue Val; 2328 if (!Evaluate(Val, Info, E)) 2329 return false; 2330 return HandleConversionToBool(Val, Result); 2331 } 2332 2333 template<typename T> 2334 static bool HandleOverflow(EvalInfo &Info, const Expr *E, 2335 const T &SrcValue, QualType DestType) { 2336 Info.CCEDiag(E, diag::note_constexpr_overflow) 2337 << SrcValue << DestType; 2338 return Info.noteUndefinedBehavior(); 2339 } 2340 2341 static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E, 2342 QualType SrcType, const APFloat &Value, 2343 QualType DestType, APSInt &Result) { 2344 unsigned DestWidth = Info.Ctx.getIntWidth(DestType); 2345 // Determine whether we are converting to unsigned or signed. 2346 bool DestSigned = DestType->isSignedIntegerOrEnumerationType(); 2347 2348 Result = APSInt(DestWidth, !DestSigned); 2349 bool ignored; 2350 if (Value.convertToInteger(Result, llvm::APFloat::rmTowardZero, &ignored) 2351 & APFloat::opInvalidOp) 2352 return HandleOverflow(Info, E, Value, DestType); 2353 return true; 2354 } 2355 2356 static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E, 2357 QualType SrcType, QualType DestType, 2358 APFloat &Result) { 2359 APFloat Value = Result; 2360 bool ignored; 2361 Result.convert(Info.Ctx.getFloatTypeSemantics(DestType), 2362 APFloat::rmNearestTiesToEven, &ignored); 2363 return true; 2364 } 2365 2366 static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E, 2367 QualType DestType, QualType SrcType, 2368 const APSInt &Value) { 2369 unsigned DestWidth = Info.Ctx.getIntWidth(DestType); 2370 // Figure out if this is a truncate, extend or noop cast. 2371 // If the input is signed, do a sign extend, noop, or truncate. 2372 APSInt Result = Value.extOrTrunc(DestWidth); 2373 Result.setIsUnsigned(DestType->isUnsignedIntegerOrEnumerationType()); 2374 if (DestType->isBooleanType()) 2375 Result = Value.getBoolValue(); 2376 return Result; 2377 } 2378 2379 static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E, 2380 QualType SrcType, const APSInt &Value, 2381 QualType DestType, APFloat &Result) { 2382 Result = APFloat(Info.Ctx.getFloatTypeSemantics(DestType), 1); 2383 Result.convertFromAPInt(Value, Value.isSigned(), 2384 APFloat::rmNearestTiesToEven); 2385 return true; 2386 } 2387 2388 static bool truncateBitfieldValue(EvalInfo &Info, const Expr *E, 2389 APValue &Value, const FieldDecl *FD) { 2390 assert(FD->isBitField() && "truncateBitfieldValue on non-bitfield"); 2391 2392 if (!Value.isInt()) { 2393 // Trying to store a pointer-cast-to-integer into a bitfield. 2394 // FIXME: In this case, we should provide the diagnostic for casting 2395 // a pointer to an integer. 2396 assert(Value.isLValue() && "integral value neither int nor lvalue?"); 2397 Info.FFDiag(E); 2398 return false; 2399 } 2400 2401 APSInt &Int = Value.getInt(); 2402 unsigned OldBitWidth = Int.getBitWidth(); 2403 unsigned NewBitWidth = FD->getBitWidthValue(Info.Ctx); 2404 if (NewBitWidth < OldBitWidth) 2405 Int = Int.trunc(NewBitWidth).extend(OldBitWidth); 2406 return true; 2407 } 2408 2409 static bool EvalAndBitcastToAPInt(EvalInfo &Info, const Expr *E, 2410 llvm::APInt &Res) { 2411 APValue SVal; 2412 if (!Evaluate(SVal, Info, E)) 2413 return false; 2414 if (SVal.isInt()) { 2415 Res = SVal.getInt(); 2416 return true; 2417 } 2418 if (SVal.isFloat()) { 2419 Res = SVal.getFloat().bitcastToAPInt(); 2420 return true; 2421 } 2422 if (SVal.isVector()) { 2423 QualType VecTy = E->getType(); 2424 unsigned VecSize = Info.Ctx.getTypeSize(VecTy); 2425 QualType EltTy = VecTy->castAs<VectorType>()->getElementType(); 2426 unsigned EltSize = Info.Ctx.getTypeSize(EltTy); 2427 bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian(); 2428 Res = llvm::APInt::getNullValue(VecSize); 2429 for (unsigned i = 0; i < SVal.getVectorLength(); i++) { 2430 APValue &Elt = SVal.getVectorElt(i); 2431 llvm::APInt EltAsInt; 2432 if (Elt.isInt()) { 2433 EltAsInt = Elt.getInt(); 2434 } else if (Elt.isFloat()) { 2435 EltAsInt = Elt.getFloat().bitcastToAPInt(); 2436 } else { 2437 // Don't try to handle vectors of anything other than int or float 2438 // (not sure if it's possible to hit this case). 2439 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr); 2440 return false; 2441 } 2442 unsigned BaseEltSize = EltAsInt.getBitWidth(); 2443 if (BigEndian) 2444 Res |= EltAsInt.zextOrTrunc(VecSize).rotr(i*EltSize+BaseEltSize); 2445 else 2446 Res |= EltAsInt.zextOrTrunc(VecSize).rotl(i*EltSize); 2447 } 2448 return true; 2449 } 2450 // Give up if the input isn't an int, float, or vector. For example, we 2451 // reject "(v4i16)(intptr_t)&a". 2452 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr); 2453 return false; 2454 } 2455 2456 /// Perform the given integer operation, which is known to need at most BitWidth 2457 /// bits, and check for overflow in the original type (if that type was not an 2458 /// unsigned type). 2459 template<typename Operation> 2460 static bool CheckedIntArithmetic(EvalInfo &Info, const Expr *E, 2461 const APSInt &LHS, const APSInt &RHS, 2462 unsigned BitWidth, Operation Op, 2463 APSInt &Result) { 2464 if (LHS.isUnsigned()) { 2465 Result = Op(LHS, RHS); 2466 return true; 2467 } 2468 2469 APSInt Value(Op(LHS.extend(BitWidth), RHS.extend(BitWidth)), false); 2470 Result = Value.trunc(LHS.getBitWidth()); 2471 if (Result.extend(BitWidth) != Value) { 2472 if (Info.checkingForUndefinedBehavior()) 2473 Info.Ctx.getDiagnostics().Report(E->getExprLoc(), 2474 diag::warn_integer_constant_overflow) 2475 << Result.toString(10) << E->getType(); 2476 else 2477 return HandleOverflow(Info, E, Value, E->getType()); 2478 } 2479 return true; 2480 } 2481 2482 /// Perform the given binary integer operation. 2483 static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS, 2484 BinaryOperatorKind Opcode, APSInt RHS, 2485 APSInt &Result) { 2486 switch (Opcode) { 2487 default: 2488 Info.FFDiag(E); 2489 return false; 2490 case BO_Mul: 2491 return CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() * 2, 2492 std::multiplies<APSInt>(), Result); 2493 case BO_Add: 2494 return CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() + 1, 2495 std::plus<APSInt>(), Result); 2496 case BO_Sub: 2497 return CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() + 1, 2498 std::minus<APSInt>(), Result); 2499 case BO_And: Result = LHS & RHS; return true; 2500 case BO_Xor: Result = LHS ^ RHS; return true; 2501 case BO_Or: Result = LHS | RHS; return true; 2502 case BO_Div: 2503 case BO_Rem: 2504 if (RHS == 0) { 2505 Info.FFDiag(E, diag::note_expr_divide_by_zero); 2506 return false; 2507 } 2508 Result = (Opcode == BO_Rem ? LHS % RHS : LHS / RHS); 2509 // Check for overflow case: INT_MIN / -1 or INT_MIN % -1. APSInt supports 2510 // this operation and gives the two's complement result. 2511 if (RHS.isNegative() && RHS.isAllOnesValue() && 2512 LHS.isSigned() && LHS.isMinSignedValue()) 2513 return HandleOverflow(Info, E, -LHS.extend(LHS.getBitWidth() + 1), 2514 E->getType()); 2515 return true; 2516 case BO_Shl: { 2517 if (Info.getLangOpts().OpenCL) 2518 // OpenCL 6.3j: shift values are effectively % word size of LHS. 2519 RHS &= APSInt(llvm::APInt(RHS.getBitWidth(), 2520 static_cast<uint64_t>(LHS.getBitWidth() - 1)), 2521 RHS.isUnsigned()); 2522 else if (RHS.isSigned() && RHS.isNegative()) { 2523 // During constant-folding, a negative shift is an opposite shift. Such 2524 // a shift is not a constant expression. 2525 Info.CCEDiag(E, diag::note_constexpr_negative_shift) << RHS; 2526 RHS = -RHS; 2527 goto shift_right; 2528 } 2529 shift_left: 2530 // C++11 [expr.shift]p1: Shift width must be less than the bit width of 2531 // the shifted type. 2532 unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1); 2533 if (SA != RHS) { 2534 Info.CCEDiag(E, diag::note_constexpr_large_shift) 2535 << RHS << E->getType() << LHS.getBitWidth(); 2536 } else if (LHS.isSigned() && !Info.getLangOpts().CPlusPlus2a) { 2537 // C++11 [expr.shift]p2: A signed left shift must have a non-negative 2538 // operand, and must not overflow the corresponding unsigned type. 2539 // C++2a [expr.shift]p2: E1 << E2 is the unique value congruent to 2540 // E1 x 2^E2 module 2^N. 2541 if (LHS.isNegative()) 2542 Info.CCEDiag(E, diag::note_constexpr_lshift_of_negative) << LHS; 2543 else if (LHS.countLeadingZeros() < SA) 2544 Info.CCEDiag(E, diag::note_constexpr_lshift_discards); 2545 } 2546 Result = LHS << SA; 2547 return true; 2548 } 2549 case BO_Shr: { 2550 if (Info.getLangOpts().OpenCL) 2551 // OpenCL 6.3j: shift values are effectively % word size of LHS. 2552 RHS &= APSInt(llvm::APInt(RHS.getBitWidth(), 2553 static_cast<uint64_t>(LHS.getBitWidth() - 1)), 2554 RHS.isUnsigned()); 2555 else if (RHS.isSigned() && RHS.isNegative()) { 2556 // During constant-folding, a negative shift is an opposite shift. Such a 2557 // shift is not a constant expression. 2558 Info.CCEDiag(E, diag::note_constexpr_negative_shift) << RHS; 2559 RHS = -RHS; 2560 goto shift_left; 2561 } 2562 shift_right: 2563 // C++11 [expr.shift]p1: Shift width must be less than the bit width of the 2564 // shifted type. 2565 unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1); 2566 if (SA != RHS) 2567 Info.CCEDiag(E, diag::note_constexpr_large_shift) 2568 << RHS << E->getType() << LHS.getBitWidth(); 2569 Result = LHS >> SA; 2570 return true; 2571 } 2572 2573 case BO_LT: Result = LHS < RHS; return true; 2574 case BO_GT: Result = LHS > RHS; return true; 2575 case BO_LE: Result = LHS <= RHS; return true; 2576 case BO_GE: Result = LHS >= RHS; return true; 2577 case BO_EQ: Result = LHS == RHS; return true; 2578 case BO_NE: Result = LHS != RHS; return true; 2579 case BO_Cmp: 2580 llvm_unreachable("BO_Cmp should be handled elsewhere"); 2581 } 2582 } 2583 2584 /// Perform the given binary floating-point operation, in-place, on LHS. 2585 static bool handleFloatFloatBinOp(EvalInfo &Info, const Expr *E, 2586 APFloat &LHS, BinaryOperatorKind Opcode, 2587 const APFloat &RHS) { 2588 switch (Opcode) { 2589 default: 2590 Info.FFDiag(E); 2591 return false; 2592 case BO_Mul: 2593 LHS.multiply(RHS, APFloat::rmNearestTiesToEven); 2594 break; 2595 case BO_Add: 2596 LHS.add(RHS, APFloat::rmNearestTiesToEven); 2597 break; 2598 case BO_Sub: 2599 LHS.subtract(RHS, APFloat::rmNearestTiesToEven); 2600 break; 2601 case BO_Div: 2602 // [expr.mul]p4: 2603 // If the second operand of / or % is zero the behavior is undefined. 2604 if (RHS.isZero()) 2605 Info.CCEDiag(E, diag::note_expr_divide_by_zero); 2606 LHS.divide(RHS, APFloat::rmNearestTiesToEven); 2607 break; 2608 } 2609 2610 // [expr.pre]p4: 2611 // If during the evaluation of an expression, the result is not 2612 // mathematically defined [...], the behavior is undefined. 2613 // FIXME: C++ rules require us to not conform to IEEE 754 here. 2614 if (LHS.isNaN()) { 2615 Info.CCEDiag(E, diag::note_constexpr_float_arithmetic) << LHS.isNaN(); 2616 return Info.noteUndefinedBehavior(); 2617 } 2618 return true; 2619 } 2620 2621 /// Cast an lvalue referring to a base subobject to a derived class, by 2622 /// truncating the lvalue's path to the given length. 2623 static bool CastToDerivedClass(EvalInfo &Info, const Expr *E, LValue &Result, 2624 const RecordDecl *TruncatedType, 2625 unsigned TruncatedElements) { 2626 SubobjectDesignator &D = Result.Designator; 2627 2628 // Check we actually point to a derived class object. 2629 if (TruncatedElements == D.Entries.size()) 2630 return true; 2631 assert(TruncatedElements >= D.MostDerivedPathLength && 2632 "not casting to a derived class"); 2633 if (!Result.checkSubobject(Info, E, CSK_Derived)) 2634 return false; 2635 2636 // Truncate the path to the subobject, and remove any derived-to-base offsets. 2637 const RecordDecl *RD = TruncatedType; 2638 for (unsigned I = TruncatedElements, N = D.Entries.size(); I != N; ++I) { 2639 if (RD->isInvalidDecl()) return false; 2640 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD); 2641 const CXXRecordDecl *Base = getAsBaseClass(D.Entries[I]); 2642 if (isVirtualBaseClass(D.Entries[I])) 2643 Result.Offset -= Layout.getVBaseClassOffset(Base); 2644 else 2645 Result.Offset -= Layout.getBaseClassOffset(Base); 2646 RD = Base; 2647 } 2648 D.Entries.resize(TruncatedElements); 2649 return true; 2650 } 2651 2652 static bool HandleLValueDirectBase(EvalInfo &Info, const Expr *E, LValue &Obj, 2653 const CXXRecordDecl *Derived, 2654 const CXXRecordDecl *Base, 2655 const ASTRecordLayout *RL = nullptr) { 2656 if (!RL) { 2657 if (Derived->isInvalidDecl()) return false; 2658 RL = &Info.Ctx.getASTRecordLayout(Derived); 2659 } 2660 2661 Obj.getLValueOffset() += RL->getBaseClassOffset(Base); 2662 Obj.addDecl(Info, E, Base, /*Virtual*/ false); 2663 return true; 2664 } 2665 2666 static bool HandleLValueBase(EvalInfo &Info, const Expr *E, LValue &Obj, 2667 const CXXRecordDecl *DerivedDecl, 2668 const CXXBaseSpecifier *Base) { 2669 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 2670 2671 if (!Base->isVirtual()) 2672 return HandleLValueDirectBase(Info, E, Obj, DerivedDecl, BaseDecl); 2673 2674 SubobjectDesignator &D = Obj.Designator; 2675 if (D.Invalid) 2676 return false; 2677 2678 // Extract most-derived object and corresponding type. 2679 DerivedDecl = D.MostDerivedType->getAsCXXRecordDecl(); 2680 if (!CastToDerivedClass(Info, E, Obj, DerivedDecl, D.MostDerivedPathLength)) 2681 return false; 2682 2683 // Find the virtual base class. 2684 if (DerivedDecl->isInvalidDecl()) return false; 2685 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(DerivedDecl); 2686 Obj.getLValueOffset() += Layout.getVBaseClassOffset(BaseDecl); 2687 Obj.addDecl(Info, E, BaseDecl, /*Virtual*/ true); 2688 return true; 2689 } 2690 2691 static bool HandleLValueBasePath(EvalInfo &Info, const CastExpr *E, 2692 QualType Type, LValue &Result) { 2693 for (CastExpr::path_const_iterator PathI = E->path_begin(), 2694 PathE = E->path_end(); 2695 PathI != PathE; ++PathI) { 2696 if (!HandleLValueBase(Info, E, Result, Type->getAsCXXRecordDecl(), 2697 *PathI)) 2698 return false; 2699 Type = (*PathI)->getType(); 2700 } 2701 return true; 2702 } 2703 2704 /// Cast an lvalue referring to a derived class to a known base subobject. 2705 static bool CastToBaseClass(EvalInfo &Info, const Expr *E, LValue &Result, 2706 const CXXRecordDecl *DerivedRD, 2707 const CXXRecordDecl *BaseRD) { 2708 CXXBasePaths Paths(/*FindAmbiguities=*/false, 2709 /*RecordPaths=*/true, /*DetectVirtual=*/false); 2710 if (!DerivedRD->isDerivedFrom(BaseRD, Paths)) 2711 llvm_unreachable("Class must be derived from the passed in base class!"); 2712 2713 for (CXXBasePathElement &Elem : Paths.front()) 2714 if (!HandleLValueBase(Info, E, Result, Elem.Class, Elem.Base)) 2715 return false; 2716 return true; 2717 } 2718 2719 /// Update LVal to refer to the given field, which must be a member of the type 2720 /// currently described by LVal. 2721 static bool HandleLValueMember(EvalInfo &Info, const Expr *E, LValue &LVal, 2722 const FieldDecl *FD, 2723 const ASTRecordLayout *RL = nullptr) { 2724 if (!RL) { 2725 if (FD->getParent()->isInvalidDecl()) return false; 2726 RL = &Info.Ctx.getASTRecordLayout(FD->getParent()); 2727 } 2728 2729 unsigned I = FD->getFieldIndex(); 2730 LVal.adjustOffset(Info.Ctx.toCharUnitsFromBits(RL->getFieldOffset(I))); 2731 LVal.addDecl(Info, E, FD); 2732 return true; 2733 } 2734 2735 /// Update LVal to refer to the given indirect field. 2736 static bool HandleLValueIndirectMember(EvalInfo &Info, const Expr *E, 2737 LValue &LVal, 2738 const IndirectFieldDecl *IFD) { 2739 for (const auto *C : IFD->chain()) 2740 if (!HandleLValueMember(Info, E, LVal, cast<FieldDecl>(C))) 2741 return false; 2742 return true; 2743 } 2744 2745 /// Get the size of the given type in char units. 2746 static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc, 2747 QualType Type, CharUnits &Size) { 2748 // sizeof(void), __alignof__(void), sizeof(function) = 1 as a gcc 2749 // extension. 2750 if (Type->isVoidType() || Type->isFunctionType()) { 2751 Size = CharUnits::One(); 2752 return true; 2753 } 2754 2755 if (Type->isDependentType()) { 2756 Info.FFDiag(Loc); 2757 return false; 2758 } 2759 2760 if (!Type->isConstantSizeType()) { 2761 // sizeof(vla) is not a constantexpr: C99 6.5.3.4p2. 2762 // FIXME: Better diagnostic. 2763 Info.FFDiag(Loc); 2764 return false; 2765 } 2766 2767 Size = Info.Ctx.getTypeSizeInChars(Type); 2768 return true; 2769 } 2770 2771 /// Update a pointer value to model pointer arithmetic. 2772 /// \param Info - Information about the ongoing evaluation. 2773 /// \param E - The expression being evaluated, for diagnostic purposes. 2774 /// \param LVal - The pointer value to be updated. 2775 /// \param EltTy - The pointee type represented by LVal. 2776 /// \param Adjustment - The adjustment, in objects of type EltTy, to add. 2777 static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E, 2778 LValue &LVal, QualType EltTy, 2779 APSInt Adjustment) { 2780 CharUnits SizeOfPointee; 2781 if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfPointee)) 2782 return false; 2783 2784 LVal.adjustOffsetAndIndex(Info, E, Adjustment, SizeOfPointee); 2785 return true; 2786 } 2787 2788 static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E, 2789 LValue &LVal, QualType EltTy, 2790 int64_t Adjustment) { 2791 return HandleLValueArrayAdjustment(Info, E, LVal, EltTy, 2792 APSInt::get(Adjustment)); 2793 } 2794 2795 /// Update an lvalue to refer to a component of a complex number. 2796 /// \param Info - Information about the ongoing evaluation. 2797 /// \param LVal - The lvalue to be updated. 2798 /// \param EltTy - The complex number's component type. 2799 /// \param Imag - False for the real component, true for the imaginary. 2800 static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E, 2801 LValue &LVal, QualType EltTy, 2802 bool Imag) { 2803 if (Imag) { 2804 CharUnits SizeOfComponent; 2805 if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfComponent)) 2806 return false; 2807 LVal.Offset += SizeOfComponent; 2808 } 2809 LVal.addComplex(Info, E, EltTy, Imag); 2810 return true; 2811 } 2812 2813 /// Try to evaluate the initializer for a variable declaration. 2814 /// 2815 /// \param Info Information about the ongoing evaluation. 2816 /// \param E An expression to be used when printing diagnostics. 2817 /// \param VD The variable whose initializer should be obtained. 2818 /// \param Frame The frame in which the variable was created. Must be null 2819 /// if this variable is not local to the evaluation. 2820 /// \param Result Filled in with a pointer to the value of the variable. 2821 static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E, 2822 const VarDecl *VD, CallStackFrame *Frame, 2823 APValue *&Result, const LValue *LVal) { 2824 2825 // If this is a parameter to an active constexpr function call, perform 2826 // argument substitution. 2827 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) { 2828 // Assume arguments of a potential constant expression are unknown 2829 // constant expressions. 2830 if (Info.checkingPotentialConstantExpression()) 2831 return false; 2832 if (!Frame || !Frame->Arguments) { 2833 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr); 2834 return false; 2835 } 2836 Result = &Frame->Arguments[PVD->getFunctionScopeIndex()]; 2837 return true; 2838 } 2839 2840 // If this is a local variable, dig out its value. 2841 if (Frame) { 2842 Result = LVal ? Frame->getTemporary(VD, LVal->getLValueVersion()) 2843 : Frame->getCurrentTemporary(VD); 2844 if (!Result) { 2845 // Assume variables referenced within a lambda's call operator that were 2846 // not declared within the call operator are captures and during checking 2847 // of a potential constant expression, assume they are unknown constant 2848 // expressions. 2849 assert(isLambdaCallOperator(Frame->Callee) && 2850 (VD->getDeclContext() != Frame->Callee || VD->isInitCapture()) && 2851 "missing value for local variable"); 2852 if (Info.checkingPotentialConstantExpression()) 2853 return false; 2854 // FIXME: implement capture evaluation during constant expr evaluation. 2855 Info.FFDiag(E->getBeginLoc(), 2856 diag::note_unimplemented_constexpr_lambda_feature_ast) 2857 << "captures not currently allowed"; 2858 return false; 2859 } 2860 return true; 2861 } 2862 2863 // Dig out the initializer, and use the declaration which it's attached to. 2864 const Expr *Init = VD->getAnyInitializer(VD); 2865 if (!Init || Init->isValueDependent()) { 2866 // If we're checking a potential constant expression, the variable could be 2867 // initialized later. 2868 if (!Info.checkingPotentialConstantExpression()) 2869 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr); 2870 return false; 2871 } 2872 2873 // If we're currently evaluating the initializer of this declaration, use that 2874 // in-flight value. 2875 if (Info.EvaluatingDecl.dyn_cast<const ValueDecl*>() == VD) { 2876 Result = Info.EvaluatingDeclValue; 2877 return true; 2878 } 2879 2880 // Never evaluate the initializer of a weak variable. We can't be sure that 2881 // this is the definition which will be used. 2882 if (VD->isWeak()) { 2883 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr); 2884 return false; 2885 } 2886 2887 // Check that we can fold the initializer. In C++, we will have already done 2888 // this in the cases where it matters for conformance. 2889 SmallVector<PartialDiagnosticAt, 8> Notes; 2890 if (!VD->evaluateValue(Notes)) { 2891 Info.FFDiag(E, diag::note_constexpr_var_init_non_constant, 2892 Notes.size() + 1) << VD; 2893 Info.Note(VD->getLocation(), diag::note_declared_at); 2894 Info.addNotes(Notes); 2895 return false; 2896 } else if (!VD->checkInitIsICE()) { 2897 Info.CCEDiag(E, diag::note_constexpr_var_init_non_constant, 2898 Notes.size() + 1) << VD; 2899 Info.Note(VD->getLocation(), diag::note_declared_at); 2900 Info.addNotes(Notes); 2901 } 2902 2903 Result = VD->getEvaluatedValue(); 2904 return true; 2905 } 2906 2907 static bool IsConstNonVolatile(QualType T) { 2908 Qualifiers Quals = T.getQualifiers(); 2909 return Quals.hasConst() && !Quals.hasVolatile(); 2910 } 2911 2912 /// Get the base index of the given base class within an APValue representing 2913 /// the given derived class. 2914 static unsigned getBaseIndex(const CXXRecordDecl *Derived, 2915 const CXXRecordDecl *Base) { 2916 Base = Base->getCanonicalDecl(); 2917 unsigned Index = 0; 2918 for (CXXRecordDecl::base_class_const_iterator I = Derived->bases_begin(), 2919 E = Derived->bases_end(); I != E; ++I, ++Index) { 2920 if (I->getType()->getAsCXXRecordDecl()->getCanonicalDecl() == Base) 2921 return Index; 2922 } 2923 2924 llvm_unreachable("base class missing from derived class's bases list"); 2925 } 2926 2927 /// Extract the value of a character from a string literal. 2928 static APSInt extractStringLiteralCharacter(EvalInfo &Info, const Expr *Lit, 2929 uint64_t Index) { 2930 assert(!isa<SourceLocExpr>(Lit) && 2931 "SourceLocExpr should have already been converted to a StringLiteral"); 2932 2933 // FIXME: Support MakeStringConstant 2934 if (const auto *ObjCEnc = dyn_cast<ObjCEncodeExpr>(Lit)) { 2935 std::string Str; 2936 Info.Ctx.getObjCEncodingForType(ObjCEnc->getEncodedType(), Str); 2937 assert(Index <= Str.size() && "Index too large"); 2938 return APSInt::getUnsigned(Str.c_str()[Index]); 2939 } 2940 2941 if (auto PE = dyn_cast<PredefinedExpr>(Lit)) 2942 Lit = PE->getFunctionName(); 2943 const StringLiteral *S = cast<StringLiteral>(Lit); 2944 const ConstantArrayType *CAT = 2945 Info.Ctx.getAsConstantArrayType(S->getType()); 2946 assert(CAT && "string literal isn't an array"); 2947 QualType CharType = CAT->getElementType(); 2948 assert(CharType->isIntegerType() && "unexpected character type"); 2949 2950 APSInt Value(S->getCharByteWidth() * Info.Ctx.getCharWidth(), 2951 CharType->isUnsignedIntegerType()); 2952 if (Index < S->getLength()) 2953 Value = S->getCodeUnit(Index); 2954 return Value; 2955 } 2956 2957 // Expand a string literal into an array of characters. 2958 // 2959 // FIXME: This is inefficient; we should probably introduce something similar 2960 // to the LLVM ConstantDataArray to make this cheaper. 2961 static void expandStringLiteral(EvalInfo &Info, const StringLiteral *S, 2962 APValue &Result, 2963 QualType AllocType = QualType()) { 2964 const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType( 2965 AllocType.isNull() ? S->getType() : AllocType); 2966 assert(CAT && "string literal isn't an array"); 2967 QualType CharType = CAT->getElementType(); 2968 assert(CharType->isIntegerType() && "unexpected character type"); 2969 2970 unsigned Elts = CAT->getSize().getZExtValue(); 2971 Result = APValue(APValue::UninitArray(), 2972 std::min(S->getLength(), Elts), Elts); 2973 APSInt Value(S->getCharByteWidth() * Info.Ctx.getCharWidth(), 2974 CharType->isUnsignedIntegerType()); 2975 if (Result.hasArrayFiller()) 2976 Result.getArrayFiller() = APValue(Value); 2977 for (unsigned I = 0, N = Result.getArrayInitializedElts(); I != N; ++I) { 2978 Value = S->getCodeUnit(I); 2979 Result.getArrayInitializedElt(I) = APValue(Value); 2980 } 2981 } 2982 2983 // Expand an array so that it has more than Index filled elements. 2984 static void expandArray(APValue &Array, unsigned Index) { 2985 unsigned Size = Array.getArraySize(); 2986 assert(Index < Size); 2987 2988 // Always at least double the number of elements for which we store a value. 2989 unsigned OldElts = Array.getArrayInitializedElts(); 2990 unsigned NewElts = std::max(Index+1, OldElts * 2); 2991 NewElts = std::min(Size, std::max(NewElts, 8u)); 2992 2993 // Copy the data across. 2994 APValue NewValue(APValue::UninitArray(), NewElts, Size); 2995 for (unsigned I = 0; I != OldElts; ++I) 2996 NewValue.getArrayInitializedElt(I).swap(Array.getArrayInitializedElt(I)); 2997 for (unsigned I = OldElts; I != NewElts; ++I) 2998 NewValue.getArrayInitializedElt(I) = Array.getArrayFiller(); 2999 if (NewValue.hasArrayFiller()) 3000 NewValue.getArrayFiller() = Array.getArrayFiller(); 3001 Array.swap(NewValue); 3002 } 3003 3004 /// Determine whether a type would actually be read by an lvalue-to-rvalue 3005 /// conversion. If it's of class type, we may assume that the copy operation 3006 /// is trivial. Note that this is never true for a union type with fields 3007 /// (because the copy always "reads" the active member) and always true for 3008 /// a non-class type. 3009 static bool isReadByLvalueToRvalueConversion(QualType T) { 3010 CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl(); 3011 if (!RD || (RD->isUnion() && !RD->field_empty())) 3012 return true; 3013 if (RD->isEmpty()) 3014 return false; 3015 3016 for (auto *Field : RD->fields()) 3017 if (isReadByLvalueToRvalueConversion(Field->getType())) 3018 return true; 3019 3020 for (auto &BaseSpec : RD->bases()) 3021 if (isReadByLvalueToRvalueConversion(BaseSpec.getType())) 3022 return true; 3023 3024 return false; 3025 } 3026 3027 /// Diagnose an attempt to read from any unreadable field within the specified 3028 /// type, which might be a class type. 3029 static bool diagnoseMutableFields(EvalInfo &Info, const Expr *E, AccessKinds AK, 3030 QualType T) { 3031 CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl(); 3032 if (!RD) 3033 return false; 3034 3035 if (!RD->hasMutableFields()) 3036 return false; 3037 3038 for (auto *Field : RD->fields()) { 3039 // If we're actually going to read this field in some way, then it can't 3040 // be mutable. If we're in a union, then assigning to a mutable field 3041 // (even an empty one) can change the active member, so that's not OK. 3042 // FIXME: Add core issue number for the union case. 3043 if (Field->isMutable() && 3044 (RD->isUnion() || isReadByLvalueToRvalueConversion(Field->getType()))) { 3045 Info.FFDiag(E, diag::note_constexpr_access_mutable, 1) << AK << Field; 3046 Info.Note(Field->getLocation(), diag::note_declared_at); 3047 return true; 3048 } 3049 3050 if (diagnoseMutableFields(Info, E, AK, Field->getType())) 3051 return true; 3052 } 3053 3054 for (auto &BaseSpec : RD->bases()) 3055 if (diagnoseMutableFields(Info, E, AK, BaseSpec.getType())) 3056 return true; 3057 3058 // All mutable fields were empty, and thus not actually read. 3059 return false; 3060 } 3061 3062 static bool lifetimeStartedInEvaluation(EvalInfo &Info, 3063 APValue::LValueBase Base, 3064 bool MutableSubobject = false) { 3065 // A temporary we created. 3066 if (Base.getCallIndex()) 3067 return true; 3068 3069 auto *Evaluating = Info.EvaluatingDecl.dyn_cast<const ValueDecl*>(); 3070 if (!Evaluating) 3071 return false; 3072 3073 auto *BaseD = Base.dyn_cast<const ValueDecl*>(); 3074 3075 switch (Info.IsEvaluatingDecl) { 3076 case EvalInfo::EvaluatingDeclKind::None: 3077 return false; 3078 3079 case EvalInfo::EvaluatingDeclKind::Ctor: 3080 // The variable whose initializer we're evaluating. 3081 if (BaseD) 3082 return declaresSameEntity(Evaluating, BaseD); 3083 3084 // A temporary lifetime-extended by the variable whose initializer we're 3085 // evaluating. 3086 if (auto *BaseE = Base.dyn_cast<const Expr *>()) 3087 if (auto *BaseMTE = dyn_cast<MaterializeTemporaryExpr>(BaseE)) 3088 return declaresSameEntity(BaseMTE->getExtendingDecl(), Evaluating); 3089 return false; 3090 3091 case EvalInfo::EvaluatingDeclKind::Dtor: 3092 // C++2a [expr.const]p6: 3093 // [during constant destruction] the lifetime of a and its non-mutable 3094 // subobjects (but not its mutable subobjects) [are] considered to start 3095 // within e. 3096 // 3097 // FIXME: We can meaningfully extend this to cover non-const objects, but 3098 // we will need special handling: we should be able to access only 3099 // subobjects of such objects that are themselves declared const. 3100 if (!BaseD || 3101 !(BaseD->getType().isConstQualified() || 3102 BaseD->getType()->isReferenceType()) || 3103 MutableSubobject) 3104 return false; 3105 return declaresSameEntity(Evaluating, BaseD); 3106 } 3107 3108 llvm_unreachable("unknown evaluating decl kind"); 3109 } 3110 3111 namespace { 3112 /// A handle to a complete object (an object that is not a subobject of 3113 /// another object). 3114 struct CompleteObject { 3115 /// The identity of the object. 3116 APValue::LValueBase Base; 3117 /// The value of the complete object. 3118 APValue *Value; 3119 /// The type of the complete object. 3120 QualType Type; 3121 3122 CompleteObject() : Value(nullptr) {} 3123 CompleteObject(APValue::LValueBase Base, APValue *Value, QualType Type) 3124 : Base(Base), Value(Value), Type(Type) {} 3125 3126 bool mayAccessMutableMembers(EvalInfo &Info, AccessKinds AK) const { 3127 // In C++14 onwards, it is permitted to read a mutable member whose 3128 // lifetime began within the evaluation. 3129 // FIXME: Should we also allow this in C++11? 3130 if (!Info.getLangOpts().CPlusPlus14) 3131 return false; 3132 return lifetimeStartedInEvaluation(Info, Base, /*MutableSubobject*/true); 3133 } 3134 3135 explicit operator bool() const { return !Type.isNull(); } 3136 }; 3137 } // end anonymous namespace 3138 3139 static QualType getSubobjectType(QualType ObjType, QualType SubobjType, 3140 bool IsMutable = false) { 3141 // C++ [basic.type.qualifier]p1: 3142 // - A const object is an object of type const T or a non-mutable subobject 3143 // of a const object. 3144 if (ObjType.isConstQualified() && !IsMutable) 3145 SubobjType.addConst(); 3146 // - A volatile object is an object of type const T or a subobject of a 3147 // volatile object. 3148 if (ObjType.isVolatileQualified()) 3149 SubobjType.addVolatile(); 3150 return SubobjType; 3151 } 3152 3153 /// Find the designated sub-object of an rvalue. 3154 template<typename SubobjectHandler> 3155 typename SubobjectHandler::result_type 3156 findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj, 3157 const SubobjectDesignator &Sub, SubobjectHandler &handler) { 3158 if (Sub.Invalid) 3159 // A diagnostic will have already been produced. 3160 return handler.failed(); 3161 if (Sub.isOnePastTheEnd() || Sub.isMostDerivedAnUnsizedArray()) { 3162 if (Info.getLangOpts().CPlusPlus11) 3163 Info.FFDiag(E, Sub.isOnePastTheEnd() 3164 ? diag::note_constexpr_access_past_end 3165 : diag::note_constexpr_access_unsized_array) 3166 << handler.AccessKind; 3167 else 3168 Info.FFDiag(E); 3169 return handler.failed(); 3170 } 3171 3172 APValue *O = Obj.Value; 3173 QualType ObjType = Obj.Type; 3174 const FieldDecl *LastField = nullptr; 3175 const FieldDecl *VolatileField = nullptr; 3176 3177 // Walk the designator's path to find the subobject. 3178 for (unsigned I = 0, N = Sub.Entries.size(); /**/; ++I) { 3179 // Reading an indeterminate value is undefined, but assigning over one is OK. 3180 if ((O->isAbsent() && !(handler.AccessKind == AK_Construct && I == N)) || 3181 (O->isIndeterminate() && handler.AccessKind != AK_Construct && 3182 handler.AccessKind != AK_Assign && 3183 handler.AccessKind != AK_ReadObjectRepresentation)) { 3184 if (!Info.checkingPotentialConstantExpression()) 3185 Info.FFDiag(E, diag::note_constexpr_access_uninit) 3186 << handler.AccessKind << O->isIndeterminate(); 3187 return handler.failed(); 3188 } 3189 3190 // C++ [class.ctor]p5, C++ [class.dtor]p5: 3191 // const and volatile semantics are not applied on an object under 3192 // {con,de}struction. 3193 if ((ObjType.isConstQualified() || ObjType.isVolatileQualified()) && 3194 ObjType->isRecordType() && 3195 Info.isEvaluatingCtorDtor( 3196 Obj.Base, llvm::makeArrayRef(Sub.Entries.begin(), 3197 Sub.Entries.begin() + I)) != 3198 ConstructionPhase::None) { 3199 ObjType = Info.Ctx.getCanonicalType(ObjType); 3200 ObjType.removeLocalConst(); 3201 ObjType.removeLocalVolatile(); 3202 } 3203 3204 // If this is our last pass, check that the final object type is OK. 3205 if (I == N || (I == N - 1 && ObjType->isAnyComplexType())) { 3206 // Accesses to volatile objects are prohibited. 3207 if (ObjType.isVolatileQualified() && isFormalAccess(handler.AccessKind)) { 3208 if (Info.getLangOpts().CPlusPlus) { 3209 int DiagKind; 3210 SourceLocation Loc; 3211 const NamedDecl *Decl = nullptr; 3212 if (VolatileField) { 3213 DiagKind = 2; 3214 Loc = VolatileField->getLocation(); 3215 Decl = VolatileField; 3216 } else if (auto *VD = Obj.Base.dyn_cast<const ValueDecl*>()) { 3217 DiagKind = 1; 3218 Loc = VD->getLocation(); 3219 Decl = VD; 3220 } else { 3221 DiagKind = 0; 3222 if (auto *E = Obj.Base.dyn_cast<const Expr *>()) 3223 Loc = E->getExprLoc(); 3224 } 3225 Info.FFDiag(E, diag::note_constexpr_access_volatile_obj, 1) 3226 << handler.AccessKind << DiagKind << Decl; 3227 Info.Note(Loc, diag::note_constexpr_volatile_here) << DiagKind; 3228 } else { 3229 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr); 3230 } 3231 return handler.failed(); 3232 } 3233 3234 // If we are reading an object of class type, there may still be more 3235 // things we need to check: if there are any mutable subobjects, we 3236 // cannot perform this read. (This only happens when performing a trivial 3237 // copy or assignment.) 3238 if (ObjType->isRecordType() && 3239 !Obj.mayAccessMutableMembers(Info, handler.AccessKind) && 3240 diagnoseMutableFields(Info, E, handler.AccessKind, ObjType)) 3241 return handler.failed(); 3242 } 3243 3244 if (I == N) { 3245 if (!handler.found(*O, ObjType)) 3246 return false; 3247 3248 // If we modified a bit-field, truncate it to the right width. 3249 if (isModification(handler.AccessKind) && 3250 LastField && LastField->isBitField() && 3251 !truncateBitfieldValue(Info, E, *O, LastField)) 3252 return false; 3253 3254 return true; 3255 } 3256 3257 LastField = nullptr; 3258 if (ObjType->isArrayType()) { 3259 // Next subobject is an array element. 3260 const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(ObjType); 3261 assert(CAT && "vla in literal type?"); 3262 uint64_t Index = Sub.Entries[I].getAsArrayIndex(); 3263 if (CAT->getSize().ule(Index)) { 3264 // Note, it should not be possible to form a pointer with a valid 3265 // designator which points more than one past the end of the array. 3266 if (Info.getLangOpts().CPlusPlus11) 3267 Info.FFDiag(E, diag::note_constexpr_access_past_end) 3268 << handler.AccessKind; 3269 else 3270 Info.FFDiag(E); 3271 return handler.failed(); 3272 } 3273 3274 ObjType = CAT->getElementType(); 3275 3276 if (O->getArrayInitializedElts() > Index) 3277 O = &O->getArrayInitializedElt(Index); 3278 else if (!isRead(handler.AccessKind)) { 3279 expandArray(*O, Index); 3280 O = &O->getArrayInitializedElt(Index); 3281 } else 3282 O = &O->getArrayFiller(); 3283 } else if (ObjType->isAnyComplexType()) { 3284 // Next subobject is a complex number. 3285 uint64_t Index = Sub.Entries[I].getAsArrayIndex(); 3286 if (Index > 1) { 3287 if (Info.getLangOpts().CPlusPlus11) 3288 Info.FFDiag(E, diag::note_constexpr_access_past_end) 3289 << handler.AccessKind; 3290 else 3291 Info.FFDiag(E); 3292 return handler.failed(); 3293 } 3294 3295 ObjType = getSubobjectType( 3296 ObjType, ObjType->castAs<ComplexType>()->getElementType()); 3297 3298 assert(I == N - 1 && "extracting subobject of scalar?"); 3299 if (O->isComplexInt()) { 3300 return handler.found(Index ? O->getComplexIntImag() 3301 : O->getComplexIntReal(), ObjType); 3302 } else { 3303 assert(O->isComplexFloat()); 3304 return handler.found(Index ? O->getComplexFloatImag() 3305 : O->getComplexFloatReal(), ObjType); 3306 } 3307 } else if (const FieldDecl *Field = getAsField(Sub.Entries[I])) { 3308 if (Field->isMutable() && 3309 !Obj.mayAccessMutableMembers(Info, handler.AccessKind)) { 3310 Info.FFDiag(E, diag::note_constexpr_access_mutable, 1) 3311 << handler.AccessKind << Field; 3312 Info.Note(Field->getLocation(), diag::note_declared_at); 3313 return handler.failed(); 3314 } 3315 3316 // Next subobject is a class, struct or union field. 3317 RecordDecl *RD = ObjType->castAs<RecordType>()->getDecl(); 3318 if (RD->isUnion()) { 3319 const FieldDecl *UnionField = O->getUnionField(); 3320 if (!UnionField || 3321 UnionField->getCanonicalDecl() != Field->getCanonicalDecl()) { 3322 if (I == N - 1 && handler.AccessKind == AK_Construct) { 3323 // Placement new onto an inactive union member makes it active. 3324 O->setUnion(Field, APValue()); 3325 } else { 3326 // FIXME: If O->getUnionValue() is absent, report that there's no 3327 // active union member rather than reporting the prior active union 3328 // member. We'll need to fix nullptr_t to not use APValue() as its 3329 // representation first. 3330 Info.FFDiag(E, diag::note_constexpr_access_inactive_union_member) 3331 << handler.AccessKind << Field << !UnionField << UnionField; 3332 return handler.failed(); 3333 } 3334 } 3335 O = &O->getUnionValue(); 3336 } else 3337 O = &O->getStructField(Field->getFieldIndex()); 3338 3339 ObjType = getSubobjectType(ObjType, Field->getType(), Field->isMutable()); 3340 LastField = Field; 3341 if (Field->getType().isVolatileQualified()) 3342 VolatileField = Field; 3343 } else { 3344 // Next subobject is a base class. 3345 const CXXRecordDecl *Derived = ObjType->getAsCXXRecordDecl(); 3346 const CXXRecordDecl *Base = getAsBaseClass(Sub.Entries[I]); 3347 O = &O->getStructBase(getBaseIndex(Derived, Base)); 3348 3349 ObjType = getSubobjectType(ObjType, Info.Ctx.getRecordType(Base)); 3350 } 3351 } 3352 } 3353 3354 namespace { 3355 struct ExtractSubobjectHandler { 3356 EvalInfo &Info; 3357 const Expr *E; 3358 APValue &Result; 3359 const AccessKinds AccessKind; 3360 3361 typedef bool result_type; 3362 bool failed() { return false; } 3363 bool found(APValue &Subobj, QualType SubobjType) { 3364 Result = Subobj; 3365 if (AccessKind == AK_ReadObjectRepresentation) 3366 return true; 3367 return CheckFullyInitialized(Info, E->getExprLoc(), SubobjType, Result); 3368 } 3369 bool found(APSInt &Value, QualType SubobjType) { 3370 Result = APValue(Value); 3371 return true; 3372 } 3373 bool found(APFloat &Value, QualType SubobjType) { 3374 Result = APValue(Value); 3375 return true; 3376 } 3377 }; 3378 } // end anonymous namespace 3379 3380 /// Extract the designated sub-object of an rvalue. 3381 static bool extractSubobject(EvalInfo &Info, const Expr *E, 3382 const CompleteObject &Obj, 3383 const SubobjectDesignator &Sub, APValue &Result, 3384 AccessKinds AK = AK_Read) { 3385 assert(AK == AK_Read || AK == AK_ReadObjectRepresentation); 3386 ExtractSubobjectHandler Handler = {Info, E, Result, AK}; 3387 return findSubobject(Info, E, Obj, Sub, Handler); 3388 } 3389 3390 namespace { 3391 struct ModifySubobjectHandler { 3392 EvalInfo &Info; 3393 APValue &NewVal; 3394 const Expr *E; 3395 3396 typedef bool result_type; 3397 static const AccessKinds AccessKind = AK_Assign; 3398 3399 bool checkConst(QualType QT) { 3400 // Assigning to a const object has undefined behavior. 3401 if (QT.isConstQualified()) { 3402 Info.FFDiag(E, diag::note_constexpr_modify_const_type) << QT; 3403 return false; 3404 } 3405 return true; 3406 } 3407 3408 bool failed() { return false; } 3409 bool found(APValue &Subobj, QualType SubobjType) { 3410 if (!checkConst(SubobjType)) 3411 return false; 3412 // We've been given ownership of NewVal, so just swap it in. 3413 Subobj.swap(NewVal); 3414 return true; 3415 } 3416 bool found(APSInt &Value, QualType SubobjType) { 3417 if (!checkConst(SubobjType)) 3418 return false; 3419 if (!NewVal.isInt()) { 3420 // Maybe trying to write a cast pointer value into a complex? 3421 Info.FFDiag(E); 3422 return false; 3423 } 3424 Value = NewVal.getInt(); 3425 return true; 3426 } 3427 bool found(APFloat &Value, QualType SubobjType) { 3428 if (!checkConst(SubobjType)) 3429 return false; 3430 Value = NewVal.getFloat(); 3431 return true; 3432 } 3433 }; 3434 } // end anonymous namespace 3435 3436 const AccessKinds ModifySubobjectHandler::AccessKind; 3437 3438 /// Update the designated sub-object of an rvalue to the given value. 3439 static bool modifySubobject(EvalInfo &Info, const Expr *E, 3440 const CompleteObject &Obj, 3441 const SubobjectDesignator &Sub, 3442 APValue &NewVal) { 3443 ModifySubobjectHandler Handler = { Info, NewVal, E }; 3444 return findSubobject(Info, E, Obj, Sub, Handler); 3445 } 3446 3447 /// Find the position where two subobject designators diverge, or equivalently 3448 /// the length of the common initial subsequence. 3449 static unsigned FindDesignatorMismatch(QualType ObjType, 3450 const SubobjectDesignator &A, 3451 const SubobjectDesignator &B, 3452 bool &WasArrayIndex) { 3453 unsigned I = 0, N = std::min(A.Entries.size(), B.Entries.size()); 3454 for (/**/; I != N; ++I) { 3455 if (!ObjType.isNull() && 3456 (ObjType->isArrayType() || ObjType->isAnyComplexType())) { 3457 // Next subobject is an array element. 3458 if (A.Entries[I].getAsArrayIndex() != B.Entries[I].getAsArrayIndex()) { 3459 WasArrayIndex = true; 3460 return I; 3461 } 3462 if (ObjType->isAnyComplexType()) 3463 ObjType = ObjType->castAs<ComplexType>()->getElementType(); 3464 else 3465 ObjType = ObjType->castAsArrayTypeUnsafe()->getElementType(); 3466 } else { 3467 if (A.Entries[I].getAsBaseOrMember() != 3468 B.Entries[I].getAsBaseOrMember()) { 3469 WasArrayIndex = false; 3470 return I; 3471 } 3472 if (const FieldDecl *FD = getAsField(A.Entries[I])) 3473 // Next subobject is a field. 3474 ObjType = FD->getType(); 3475 else 3476 // Next subobject is a base class. 3477 ObjType = QualType(); 3478 } 3479 } 3480 WasArrayIndex = false; 3481 return I; 3482 } 3483 3484 /// Determine whether the given subobject designators refer to elements of the 3485 /// same array object. 3486 static bool AreElementsOfSameArray(QualType ObjType, 3487 const SubobjectDesignator &A, 3488 const SubobjectDesignator &B) { 3489 if (A.Entries.size() != B.Entries.size()) 3490 return false; 3491 3492 bool IsArray = A.MostDerivedIsArrayElement; 3493 if (IsArray && A.MostDerivedPathLength != A.Entries.size()) 3494 // A is a subobject of the array element. 3495 return false; 3496 3497 // If A (and B) designates an array element, the last entry will be the array 3498 // index. That doesn't have to match. Otherwise, we're in the 'implicit array 3499 // of length 1' case, and the entire path must match. 3500 bool WasArrayIndex; 3501 unsigned CommonLength = FindDesignatorMismatch(ObjType, A, B, WasArrayIndex); 3502 return CommonLength >= A.Entries.size() - IsArray; 3503 } 3504 3505 /// Find the complete object to which an LValue refers. 3506 static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E, 3507 AccessKinds AK, const LValue &LVal, 3508 QualType LValType) { 3509 if (LVal.InvalidBase) { 3510 Info.FFDiag(E); 3511 return CompleteObject(); 3512 } 3513 3514 if (!LVal.Base) { 3515 Info.FFDiag(E, diag::note_constexpr_access_null) << AK; 3516 return CompleteObject(); 3517 } 3518 3519 CallStackFrame *Frame = nullptr; 3520 unsigned Depth = 0; 3521 if (LVal.getLValueCallIndex()) { 3522 std::tie(Frame, Depth) = 3523 Info.getCallFrameAndDepth(LVal.getLValueCallIndex()); 3524 if (!Frame) { 3525 Info.FFDiag(E, diag::note_constexpr_lifetime_ended, 1) 3526 << AK << LVal.Base.is<const ValueDecl*>(); 3527 NoteLValueLocation(Info, LVal.Base); 3528 return CompleteObject(); 3529 } 3530 } 3531 3532 bool IsAccess = isAnyAccess(AK); 3533 3534 // C++11 DR1311: An lvalue-to-rvalue conversion on a volatile-qualified type 3535 // is not a constant expression (even if the object is non-volatile). We also 3536 // apply this rule to C++98, in order to conform to the expected 'volatile' 3537 // semantics. 3538 if (isFormalAccess(AK) && LValType.isVolatileQualified()) { 3539 if (Info.getLangOpts().CPlusPlus) 3540 Info.FFDiag(E, diag::note_constexpr_access_volatile_type) 3541 << AK << LValType; 3542 else 3543 Info.FFDiag(E); 3544 return CompleteObject(); 3545 } 3546 3547 // Compute value storage location and type of base object. 3548 APValue *BaseVal = nullptr; 3549 QualType BaseType = getType(LVal.Base); 3550 3551 if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl*>()) { 3552 // In C++98, const, non-volatile integers initialized with ICEs are ICEs. 3553 // In C++11, constexpr, non-volatile variables initialized with constant 3554 // expressions are constant expressions too. Inside constexpr functions, 3555 // parameters are constant expressions even if they're non-const. 3556 // In C++1y, objects local to a constant expression (those with a Frame) are 3557 // both readable and writable inside constant expressions. 3558 // In C, such things can also be folded, although they are not ICEs. 3559 const VarDecl *VD = dyn_cast<VarDecl>(D); 3560 if (VD) { 3561 if (const VarDecl *VDef = VD->getDefinition(Info.Ctx)) 3562 VD = VDef; 3563 } 3564 if (!VD || VD->isInvalidDecl()) { 3565 Info.FFDiag(E); 3566 return CompleteObject(); 3567 } 3568 3569 // Unless we're looking at a local variable or argument in a constexpr call, 3570 // the variable we're reading must be const. 3571 if (!Frame) { 3572 if (Info.getLangOpts().CPlusPlus14 && 3573 lifetimeStartedInEvaluation(Info, LVal.Base)) { 3574 // OK, we can read and modify an object if we're in the process of 3575 // evaluating its initializer, because its lifetime began in this 3576 // evaluation. 3577 } else if (isModification(AK)) { 3578 // All the remaining cases do not permit modification of the object. 3579 Info.FFDiag(E, diag::note_constexpr_modify_global); 3580 return CompleteObject(); 3581 } else if (VD->isConstexpr()) { 3582 // OK, we can read this variable. 3583 } else if (BaseType->isIntegralOrEnumerationType()) { 3584 // In OpenCL if a variable is in constant address space it is a const 3585 // value. 3586 if (!(BaseType.isConstQualified() || 3587 (Info.getLangOpts().OpenCL && 3588 BaseType.getAddressSpace() == LangAS::opencl_constant))) { 3589 if (!IsAccess) 3590 return CompleteObject(LVal.getLValueBase(), nullptr, BaseType); 3591 if (Info.getLangOpts().CPlusPlus) { 3592 Info.FFDiag(E, diag::note_constexpr_ltor_non_const_int, 1) << VD; 3593 Info.Note(VD->getLocation(), diag::note_declared_at); 3594 } else { 3595 Info.FFDiag(E); 3596 } 3597 return CompleteObject(); 3598 } 3599 } else if (!IsAccess) { 3600 return CompleteObject(LVal.getLValueBase(), nullptr, BaseType); 3601 } else if (BaseType->isFloatingType() && BaseType.isConstQualified()) { 3602 // We support folding of const floating-point types, in order to make 3603 // static const data members of such types (supported as an extension) 3604 // more useful. 3605 if (Info.getLangOpts().CPlusPlus11) { 3606 Info.CCEDiag(E, diag::note_constexpr_ltor_non_constexpr, 1) << VD; 3607 Info.Note(VD->getLocation(), diag::note_declared_at); 3608 } else { 3609 Info.CCEDiag(E); 3610 } 3611 } else if (BaseType.isConstQualified() && VD->hasDefinition(Info.Ctx)) { 3612 Info.CCEDiag(E, diag::note_constexpr_ltor_non_constexpr) << VD; 3613 // Keep evaluating to see what we can do. 3614 } else { 3615 // FIXME: Allow folding of values of any literal type in all languages. 3616 if (Info.checkingPotentialConstantExpression() && 3617 VD->getType().isConstQualified() && !VD->hasDefinition(Info.Ctx)) { 3618 // The definition of this variable could be constexpr. We can't 3619 // access it right now, but may be able to in future. 3620 } else if (Info.getLangOpts().CPlusPlus11) { 3621 Info.FFDiag(E, diag::note_constexpr_ltor_non_constexpr, 1) << VD; 3622 Info.Note(VD->getLocation(), diag::note_declared_at); 3623 } else { 3624 Info.FFDiag(E); 3625 } 3626 return CompleteObject(); 3627 } 3628 } 3629 3630 if (!evaluateVarDeclInit(Info, E, VD, Frame, BaseVal, &LVal)) 3631 return CompleteObject(); 3632 } else if (DynamicAllocLValue DA = LVal.Base.dyn_cast<DynamicAllocLValue>()) { 3633 Optional<DynAlloc*> Alloc = Info.lookupDynamicAlloc(DA); 3634 if (!Alloc) { 3635 Info.FFDiag(E, diag::note_constexpr_access_deleted_object) << AK; 3636 return CompleteObject(); 3637 } 3638 return CompleteObject(LVal.Base, &(*Alloc)->Value, 3639 LVal.Base.getDynamicAllocType()); 3640 } else { 3641 const Expr *Base = LVal.Base.dyn_cast<const Expr*>(); 3642 3643 if (!Frame) { 3644 if (const MaterializeTemporaryExpr *MTE = 3645 dyn_cast_or_null<MaterializeTemporaryExpr>(Base)) { 3646 assert(MTE->getStorageDuration() == SD_Static && 3647 "should have a frame for a non-global materialized temporary"); 3648 3649 // Per C++1y [expr.const]p2: 3650 // an lvalue-to-rvalue conversion [is not allowed unless it applies to] 3651 // - a [...] glvalue of integral or enumeration type that refers to 3652 // a non-volatile const object [...] 3653 // [...] 3654 // - a [...] glvalue of literal type that refers to a non-volatile 3655 // object whose lifetime began within the evaluation of e. 3656 // 3657 // C++11 misses the 'began within the evaluation of e' check and 3658 // instead allows all temporaries, including things like: 3659 // int &&r = 1; 3660 // int x = ++r; 3661 // constexpr int k = r; 3662 // Therefore we use the C++14 rules in C++11 too. 3663 // 3664 // Note that temporaries whose lifetimes began while evaluating a 3665 // variable's constructor are not usable while evaluating the 3666 // corresponding destructor, not even if they're of const-qualified 3667 // types. 3668 if (!(BaseType.isConstQualified() && 3669 BaseType->isIntegralOrEnumerationType()) && 3670 !lifetimeStartedInEvaluation(Info, LVal.Base)) { 3671 if (!IsAccess) 3672 return CompleteObject(LVal.getLValueBase(), nullptr, BaseType); 3673 Info.FFDiag(E, diag::note_constexpr_access_static_temporary, 1) << AK; 3674 Info.Note(MTE->getExprLoc(), diag::note_constexpr_temporary_here); 3675 return CompleteObject(); 3676 } 3677 3678 BaseVal = MTE->getOrCreateValue(false); 3679 assert(BaseVal && "got reference to unevaluated temporary"); 3680 } else { 3681 if (!IsAccess) 3682 return CompleteObject(LVal.getLValueBase(), nullptr, BaseType); 3683 APValue Val; 3684 LVal.moveInto(Val); 3685 Info.FFDiag(E, diag::note_constexpr_access_unreadable_object) 3686 << AK 3687 << Val.getAsString(Info.Ctx, 3688 Info.Ctx.getLValueReferenceType(LValType)); 3689 NoteLValueLocation(Info, LVal.Base); 3690 return CompleteObject(); 3691 } 3692 } else { 3693 BaseVal = Frame->getTemporary(Base, LVal.Base.getVersion()); 3694 assert(BaseVal && "missing value for temporary"); 3695 } 3696 } 3697 3698 // In C++14, we can't safely access any mutable state when we might be 3699 // evaluating after an unmodeled side effect. 3700 // 3701 // FIXME: Not all local state is mutable. Allow local constant subobjects 3702 // to be read here (but take care with 'mutable' fields). 3703 if ((Frame && Info.getLangOpts().CPlusPlus14 && 3704 Info.EvalStatus.HasSideEffects) || 3705 (isModification(AK) && Depth < Info.SpeculativeEvaluationDepth)) 3706 return CompleteObject(); 3707 3708 return CompleteObject(LVal.getLValueBase(), BaseVal, BaseType); 3709 } 3710 3711 /// Perform an lvalue-to-rvalue conversion on the given glvalue. This 3712 /// can also be used for 'lvalue-to-lvalue' conversions for looking up the 3713 /// glvalue referred to by an entity of reference type. 3714 /// 3715 /// \param Info - Information about the ongoing evaluation. 3716 /// \param Conv - The expression for which we are performing the conversion. 3717 /// Used for diagnostics. 3718 /// \param Type - The type of the glvalue (before stripping cv-qualifiers in the 3719 /// case of a non-class type). 3720 /// \param LVal - The glvalue on which we are attempting to perform this action. 3721 /// \param RVal - The produced value will be placed here. 3722 /// \param WantObjectRepresentation - If true, we're looking for the object 3723 /// representation rather than the value, and in particular, 3724 /// there is no requirement that the result be fully initialized. 3725 static bool 3726 handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv, QualType Type, 3727 const LValue &LVal, APValue &RVal, 3728 bool WantObjectRepresentation = false) { 3729 if (LVal.Designator.Invalid) 3730 return false; 3731 3732 // Check for special cases where there is no existing APValue to look at. 3733 const Expr *Base = LVal.Base.dyn_cast<const Expr*>(); 3734 3735 AccessKinds AK = 3736 WantObjectRepresentation ? AK_ReadObjectRepresentation : AK_Read; 3737 3738 if (Base && !LVal.getLValueCallIndex() && !Type.isVolatileQualified()) { 3739 if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(Base)) { 3740 // In C99, a CompoundLiteralExpr is an lvalue, and we defer evaluating the 3741 // initializer until now for such expressions. Such an expression can't be 3742 // an ICE in C, so this only matters for fold. 3743 if (Type.isVolatileQualified()) { 3744 Info.FFDiag(Conv); 3745 return false; 3746 } 3747 APValue Lit; 3748 if (!Evaluate(Lit, Info, CLE->getInitializer())) 3749 return false; 3750 CompleteObject LitObj(LVal.Base, &Lit, Base->getType()); 3751 return extractSubobject(Info, Conv, LitObj, LVal.Designator, RVal, AK); 3752 } else if (isa<StringLiteral>(Base) || isa<PredefinedExpr>(Base)) { 3753 // Special-case character extraction so we don't have to construct an 3754 // APValue for the whole string. 3755 assert(LVal.Designator.Entries.size() <= 1 && 3756 "Can only read characters from string literals"); 3757 if (LVal.Designator.Entries.empty()) { 3758 // Fail for now for LValue to RValue conversion of an array. 3759 // (This shouldn't show up in C/C++, but it could be triggered by a 3760 // weird EvaluateAsRValue call from a tool.) 3761 Info.FFDiag(Conv); 3762 return false; 3763 } 3764 if (LVal.Designator.isOnePastTheEnd()) { 3765 if (Info.getLangOpts().CPlusPlus11) 3766 Info.FFDiag(Conv, diag::note_constexpr_access_past_end) << AK; 3767 else 3768 Info.FFDiag(Conv); 3769 return false; 3770 } 3771 uint64_t CharIndex = LVal.Designator.Entries[0].getAsArrayIndex(); 3772 RVal = APValue(extractStringLiteralCharacter(Info, Base, CharIndex)); 3773 return true; 3774 } 3775 } 3776 3777 CompleteObject Obj = findCompleteObject(Info, Conv, AK, LVal, Type); 3778 return Obj && extractSubobject(Info, Conv, Obj, LVal.Designator, RVal, AK); 3779 } 3780 3781 /// Perform an assignment of Val to LVal. Takes ownership of Val. 3782 static bool handleAssignment(EvalInfo &Info, const Expr *E, const LValue &LVal, 3783 QualType LValType, APValue &Val) { 3784 if (LVal.Designator.Invalid) 3785 return false; 3786 3787 if (!Info.getLangOpts().CPlusPlus14) { 3788 Info.FFDiag(E); 3789 return false; 3790 } 3791 3792 CompleteObject Obj = findCompleteObject(Info, E, AK_Assign, LVal, LValType); 3793 return Obj && modifySubobject(Info, E, Obj, LVal.Designator, Val); 3794 } 3795 3796 namespace { 3797 struct CompoundAssignSubobjectHandler { 3798 EvalInfo &Info; 3799 const Expr *E; 3800 QualType PromotedLHSType; 3801 BinaryOperatorKind Opcode; 3802 const APValue &RHS; 3803 3804 static const AccessKinds AccessKind = AK_Assign; 3805 3806 typedef bool result_type; 3807 3808 bool checkConst(QualType QT) { 3809 // Assigning to a const object has undefined behavior. 3810 if (QT.isConstQualified()) { 3811 Info.FFDiag(E, diag::note_constexpr_modify_const_type) << QT; 3812 return false; 3813 } 3814 return true; 3815 } 3816 3817 bool failed() { return false; } 3818 bool found(APValue &Subobj, QualType SubobjType) { 3819 switch (Subobj.getKind()) { 3820 case APValue::Int: 3821 return found(Subobj.getInt(), SubobjType); 3822 case APValue::Float: 3823 return found(Subobj.getFloat(), SubobjType); 3824 case APValue::ComplexInt: 3825 case APValue::ComplexFloat: 3826 // FIXME: Implement complex compound assignment. 3827 Info.FFDiag(E); 3828 return false; 3829 case APValue::LValue: 3830 return foundPointer(Subobj, SubobjType); 3831 default: 3832 // FIXME: can this happen? 3833 Info.FFDiag(E); 3834 return false; 3835 } 3836 } 3837 bool found(APSInt &Value, QualType SubobjType) { 3838 if (!checkConst(SubobjType)) 3839 return false; 3840 3841 if (!SubobjType->isIntegerType()) { 3842 // We don't support compound assignment on integer-cast-to-pointer 3843 // values. 3844 Info.FFDiag(E); 3845 return false; 3846 } 3847 3848 if (RHS.isInt()) { 3849 APSInt LHS = 3850 HandleIntToIntCast(Info, E, PromotedLHSType, SubobjType, Value); 3851 if (!handleIntIntBinOp(Info, E, LHS, Opcode, RHS.getInt(), LHS)) 3852 return false; 3853 Value = HandleIntToIntCast(Info, E, SubobjType, PromotedLHSType, LHS); 3854 return true; 3855 } else if (RHS.isFloat()) { 3856 APFloat FValue(0.0); 3857 return HandleIntToFloatCast(Info, E, SubobjType, Value, PromotedLHSType, 3858 FValue) && 3859 handleFloatFloatBinOp(Info, E, FValue, Opcode, RHS.getFloat()) && 3860 HandleFloatToIntCast(Info, E, PromotedLHSType, FValue, SubobjType, 3861 Value); 3862 } 3863 3864 Info.FFDiag(E); 3865 return false; 3866 } 3867 bool found(APFloat &Value, QualType SubobjType) { 3868 return checkConst(SubobjType) && 3869 HandleFloatToFloatCast(Info, E, SubobjType, PromotedLHSType, 3870 Value) && 3871 handleFloatFloatBinOp(Info, E, Value, Opcode, RHS.getFloat()) && 3872 HandleFloatToFloatCast(Info, E, PromotedLHSType, SubobjType, Value); 3873 } 3874 bool foundPointer(APValue &Subobj, QualType SubobjType) { 3875 if (!checkConst(SubobjType)) 3876 return false; 3877 3878 QualType PointeeType; 3879 if (const PointerType *PT = SubobjType->getAs<PointerType>()) 3880 PointeeType = PT->getPointeeType(); 3881 3882 if (PointeeType.isNull() || !RHS.isInt() || 3883 (Opcode != BO_Add && Opcode != BO_Sub)) { 3884 Info.FFDiag(E); 3885 return false; 3886 } 3887 3888 APSInt Offset = RHS.getInt(); 3889 if (Opcode == BO_Sub) 3890 negateAsSigned(Offset); 3891 3892 LValue LVal; 3893 LVal.setFrom(Info.Ctx, Subobj); 3894 if (!HandleLValueArrayAdjustment(Info, E, LVal, PointeeType, Offset)) 3895 return false; 3896 LVal.moveInto(Subobj); 3897 return true; 3898 } 3899 }; 3900 } // end anonymous namespace 3901 3902 const AccessKinds CompoundAssignSubobjectHandler::AccessKind; 3903 3904 /// Perform a compound assignment of LVal <op>= RVal. 3905 static bool handleCompoundAssignment( 3906 EvalInfo &Info, const Expr *E, 3907 const LValue &LVal, QualType LValType, QualType PromotedLValType, 3908 BinaryOperatorKind Opcode, const APValue &RVal) { 3909 if (LVal.Designator.Invalid) 3910 return false; 3911 3912 if (!Info.getLangOpts().CPlusPlus14) { 3913 Info.FFDiag(E); 3914 return false; 3915 } 3916 3917 CompleteObject Obj = findCompleteObject(Info, E, AK_Assign, LVal, LValType); 3918 CompoundAssignSubobjectHandler Handler = { Info, E, PromotedLValType, Opcode, 3919 RVal }; 3920 return Obj && findSubobject(Info, E, Obj, LVal.Designator, Handler); 3921 } 3922 3923 namespace { 3924 struct IncDecSubobjectHandler { 3925 EvalInfo &Info; 3926 const UnaryOperator *E; 3927 AccessKinds AccessKind; 3928 APValue *Old; 3929 3930 typedef bool result_type; 3931 3932 bool checkConst(QualType QT) { 3933 // Assigning to a const object has undefined behavior. 3934 if (QT.isConstQualified()) { 3935 Info.FFDiag(E, diag::note_constexpr_modify_const_type) << QT; 3936 return false; 3937 } 3938 return true; 3939 } 3940 3941 bool failed() { return false; } 3942 bool found(APValue &Subobj, QualType SubobjType) { 3943 // Stash the old value. Also clear Old, so we don't clobber it later 3944 // if we're post-incrementing a complex. 3945 if (Old) { 3946 *Old = Subobj; 3947 Old = nullptr; 3948 } 3949 3950 switch (Subobj.getKind()) { 3951 case APValue::Int: 3952 return found(Subobj.getInt(), SubobjType); 3953 case APValue::Float: 3954 return found(Subobj.getFloat(), SubobjType); 3955 case APValue::ComplexInt: 3956 return found(Subobj.getComplexIntReal(), 3957 SubobjType->castAs<ComplexType>()->getElementType() 3958 .withCVRQualifiers(SubobjType.getCVRQualifiers())); 3959 case APValue::ComplexFloat: 3960 return found(Subobj.getComplexFloatReal(), 3961 SubobjType->castAs<ComplexType>()->getElementType() 3962 .withCVRQualifiers(SubobjType.getCVRQualifiers())); 3963 case APValue::LValue: 3964 return foundPointer(Subobj, SubobjType); 3965 default: 3966 // FIXME: can this happen? 3967 Info.FFDiag(E); 3968 return false; 3969 } 3970 } 3971 bool found(APSInt &Value, QualType SubobjType) { 3972 if (!checkConst(SubobjType)) 3973 return false; 3974 3975 if (!SubobjType->isIntegerType()) { 3976 // We don't support increment / decrement on integer-cast-to-pointer 3977 // values. 3978 Info.FFDiag(E); 3979 return false; 3980 } 3981 3982 if (Old) *Old = APValue(Value); 3983 3984 // bool arithmetic promotes to int, and the conversion back to bool 3985 // doesn't reduce mod 2^n, so special-case it. 3986 if (SubobjType->isBooleanType()) { 3987 if (AccessKind == AK_Increment) 3988 Value = 1; 3989 else 3990 Value = !Value; 3991 return true; 3992 } 3993 3994 bool WasNegative = Value.isNegative(); 3995 if (AccessKind == AK_Increment) { 3996 ++Value; 3997 3998 if (!WasNegative && Value.isNegative() && E->canOverflow()) { 3999 APSInt ActualValue(Value, /*IsUnsigned*/true); 4000 return HandleOverflow(Info, E, ActualValue, SubobjType); 4001 } 4002 } else { 4003 --Value; 4004 4005 if (WasNegative && !Value.isNegative() && E->canOverflow()) { 4006 unsigned BitWidth = Value.getBitWidth(); 4007 APSInt ActualValue(Value.sext(BitWidth + 1), /*IsUnsigned*/false); 4008 ActualValue.setBit(BitWidth); 4009 return HandleOverflow(Info, E, ActualValue, SubobjType); 4010 } 4011 } 4012 return true; 4013 } 4014 bool found(APFloat &Value, QualType SubobjType) { 4015 if (!checkConst(SubobjType)) 4016 return false; 4017 4018 if (Old) *Old = APValue(Value); 4019 4020 APFloat One(Value.getSemantics(), 1); 4021 if (AccessKind == AK_Increment) 4022 Value.add(One, APFloat::rmNearestTiesToEven); 4023 else 4024 Value.subtract(One, APFloat::rmNearestTiesToEven); 4025 return true; 4026 } 4027 bool foundPointer(APValue &Subobj, QualType SubobjType) { 4028 if (!checkConst(SubobjType)) 4029 return false; 4030 4031 QualType PointeeType; 4032 if (const PointerType *PT = SubobjType->getAs<PointerType>()) 4033 PointeeType = PT->getPointeeType(); 4034 else { 4035 Info.FFDiag(E); 4036 return false; 4037 } 4038 4039 LValue LVal; 4040 LVal.setFrom(Info.Ctx, Subobj); 4041 if (!HandleLValueArrayAdjustment(Info, E, LVal, PointeeType, 4042 AccessKind == AK_Increment ? 1 : -1)) 4043 return false; 4044 LVal.moveInto(Subobj); 4045 return true; 4046 } 4047 }; 4048 } // end anonymous namespace 4049 4050 /// Perform an increment or decrement on LVal. 4051 static bool handleIncDec(EvalInfo &Info, const Expr *E, const LValue &LVal, 4052 QualType LValType, bool IsIncrement, APValue *Old) { 4053 if (LVal.Designator.Invalid) 4054 return false; 4055 4056 if (!Info.getLangOpts().CPlusPlus14) { 4057 Info.FFDiag(E); 4058 return false; 4059 } 4060 4061 AccessKinds AK = IsIncrement ? AK_Increment : AK_Decrement; 4062 CompleteObject Obj = findCompleteObject(Info, E, AK, LVal, LValType); 4063 IncDecSubobjectHandler Handler = {Info, cast<UnaryOperator>(E), AK, Old}; 4064 return Obj && findSubobject(Info, E, Obj, LVal.Designator, Handler); 4065 } 4066 4067 /// Build an lvalue for the object argument of a member function call. 4068 static bool EvaluateObjectArgument(EvalInfo &Info, const Expr *Object, 4069 LValue &This) { 4070 if (Object->getType()->isPointerType() && Object->isRValue()) 4071 return EvaluatePointer(Object, This, Info); 4072 4073 if (Object->isGLValue()) 4074 return EvaluateLValue(Object, This, Info); 4075 4076 if (Object->getType()->isLiteralType(Info.Ctx)) 4077 return EvaluateTemporary(Object, This, Info); 4078 4079 Info.FFDiag(Object, diag::note_constexpr_nonliteral) << Object->getType(); 4080 return false; 4081 } 4082 4083 /// HandleMemberPointerAccess - Evaluate a member access operation and build an 4084 /// lvalue referring to the result. 4085 /// 4086 /// \param Info - Information about the ongoing evaluation. 4087 /// \param LV - An lvalue referring to the base of the member pointer. 4088 /// \param RHS - The member pointer expression. 4089 /// \param IncludeMember - Specifies whether the member itself is included in 4090 /// the resulting LValue subobject designator. This is not possible when 4091 /// creating a bound member function. 4092 /// \return The field or method declaration to which the member pointer refers, 4093 /// or 0 if evaluation fails. 4094 static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info, 4095 QualType LVType, 4096 LValue &LV, 4097 const Expr *RHS, 4098 bool IncludeMember = true) { 4099 MemberPtr MemPtr; 4100 if (!EvaluateMemberPointer(RHS, MemPtr, Info)) 4101 return nullptr; 4102 4103 // C++11 [expr.mptr.oper]p6: If the second operand is the null pointer to 4104 // member value, the behavior is undefined. 4105 if (!MemPtr.getDecl()) { 4106 // FIXME: Specific diagnostic. 4107 Info.FFDiag(RHS); 4108 return nullptr; 4109 } 4110 4111 if (MemPtr.isDerivedMember()) { 4112 // This is a member of some derived class. Truncate LV appropriately. 4113 // The end of the derived-to-base path for the base object must match the 4114 // derived-to-base path for the member pointer. 4115 if (LV.Designator.MostDerivedPathLength + MemPtr.Path.size() > 4116 LV.Designator.Entries.size()) { 4117 Info.FFDiag(RHS); 4118 return nullptr; 4119 } 4120 unsigned PathLengthToMember = 4121 LV.Designator.Entries.size() - MemPtr.Path.size(); 4122 for (unsigned I = 0, N = MemPtr.Path.size(); I != N; ++I) { 4123 const CXXRecordDecl *LVDecl = getAsBaseClass( 4124 LV.Designator.Entries[PathLengthToMember + I]); 4125 const CXXRecordDecl *MPDecl = MemPtr.Path[I]; 4126 if (LVDecl->getCanonicalDecl() != MPDecl->getCanonicalDecl()) { 4127 Info.FFDiag(RHS); 4128 return nullptr; 4129 } 4130 } 4131 4132 // Truncate the lvalue to the appropriate derived class. 4133 if (!CastToDerivedClass(Info, RHS, LV, MemPtr.getContainingRecord(), 4134 PathLengthToMember)) 4135 return nullptr; 4136 } else if (!MemPtr.Path.empty()) { 4137 // Extend the LValue path with the member pointer's path. 4138 LV.Designator.Entries.reserve(LV.Designator.Entries.size() + 4139 MemPtr.Path.size() + IncludeMember); 4140 4141 // Walk down to the appropriate base class. 4142 if (const PointerType *PT = LVType->getAs<PointerType>()) 4143 LVType = PT->getPointeeType(); 4144 const CXXRecordDecl *RD = LVType->getAsCXXRecordDecl(); 4145 assert(RD && "member pointer access on non-class-type expression"); 4146 // The first class in the path is that of the lvalue. 4147 for (unsigned I = 1, N = MemPtr.Path.size(); I != N; ++I) { 4148 const CXXRecordDecl *Base = MemPtr.Path[N - I - 1]; 4149 if (!HandleLValueDirectBase(Info, RHS, LV, RD, Base)) 4150 return nullptr; 4151 RD = Base; 4152 } 4153 // Finally cast to the class containing the member. 4154 if (!HandleLValueDirectBase(Info, RHS, LV, RD, 4155 MemPtr.getContainingRecord())) 4156 return nullptr; 4157 } 4158 4159 // Add the member. Note that we cannot build bound member functions here. 4160 if (IncludeMember) { 4161 if (const FieldDecl *FD = dyn_cast<FieldDecl>(MemPtr.getDecl())) { 4162 if (!HandleLValueMember(Info, RHS, LV, FD)) 4163 return nullptr; 4164 } else if (const IndirectFieldDecl *IFD = 4165 dyn_cast<IndirectFieldDecl>(MemPtr.getDecl())) { 4166 if (!HandleLValueIndirectMember(Info, RHS, LV, IFD)) 4167 return nullptr; 4168 } else { 4169 llvm_unreachable("can't construct reference to bound member function"); 4170 } 4171 } 4172 4173 return MemPtr.getDecl(); 4174 } 4175 4176 static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info, 4177 const BinaryOperator *BO, 4178 LValue &LV, 4179 bool IncludeMember = true) { 4180 assert(BO->getOpcode() == BO_PtrMemD || BO->getOpcode() == BO_PtrMemI); 4181 4182 if (!EvaluateObjectArgument(Info, BO->getLHS(), LV)) { 4183 if (Info.noteFailure()) { 4184 MemberPtr MemPtr; 4185 EvaluateMemberPointer(BO->getRHS(), MemPtr, Info); 4186 } 4187 return nullptr; 4188 } 4189 4190 return HandleMemberPointerAccess(Info, BO->getLHS()->getType(), LV, 4191 BO->getRHS(), IncludeMember); 4192 } 4193 4194 /// HandleBaseToDerivedCast - Apply the given base-to-derived cast operation on 4195 /// the provided lvalue, which currently refers to the base object. 4196 static bool HandleBaseToDerivedCast(EvalInfo &Info, const CastExpr *E, 4197 LValue &Result) { 4198 SubobjectDesignator &D = Result.Designator; 4199 if (D.Invalid || !Result.checkNullPointer(Info, E, CSK_Derived)) 4200 return false; 4201 4202 QualType TargetQT = E->getType(); 4203 if (const PointerType *PT = TargetQT->getAs<PointerType>()) 4204 TargetQT = PT->getPointeeType(); 4205 4206 // Check this cast lands within the final derived-to-base subobject path. 4207 if (D.MostDerivedPathLength + E->path_size() > D.Entries.size()) { 4208 Info.CCEDiag(E, diag::note_constexpr_invalid_downcast) 4209 << D.MostDerivedType << TargetQT; 4210 return false; 4211 } 4212 4213 // Check the type of the final cast. We don't need to check the path, 4214 // since a cast can only be formed if the path is unique. 4215 unsigned NewEntriesSize = D.Entries.size() - E->path_size(); 4216 const CXXRecordDecl *TargetType = TargetQT->getAsCXXRecordDecl(); 4217 const CXXRecordDecl *FinalType; 4218 if (NewEntriesSize == D.MostDerivedPathLength) 4219 FinalType = D.MostDerivedType->getAsCXXRecordDecl(); 4220 else 4221 FinalType = getAsBaseClass(D.Entries[NewEntriesSize - 1]); 4222 if (FinalType->getCanonicalDecl() != TargetType->getCanonicalDecl()) { 4223 Info.CCEDiag(E, diag::note_constexpr_invalid_downcast) 4224 << D.MostDerivedType << TargetQT; 4225 return false; 4226 } 4227 4228 // Truncate the lvalue to the appropriate derived class. 4229 return CastToDerivedClass(Info, E, Result, TargetType, NewEntriesSize); 4230 } 4231 4232 /// Get the value to use for a default-initialized object of type T. 4233 static APValue getDefaultInitValue(QualType T) { 4234 if (auto *RD = T->getAsCXXRecordDecl()) { 4235 if (RD->isUnion()) 4236 return APValue((const FieldDecl*)nullptr); 4237 4238 APValue Struct(APValue::UninitStruct(), RD->getNumBases(), 4239 std::distance(RD->field_begin(), RD->field_end())); 4240 4241 unsigned Index = 0; 4242 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 4243 End = RD->bases_end(); I != End; ++I, ++Index) 4244 Struct.getStructBase(Index) = getDefaultInitValue(I->getType()); 4245 4246 for (const auto *I : RD->fields()) { 4247 if (I->isUnnamedBitfield()) 4248 continue; 4249 Struct.getStructField(I->getFieldIndex()) = 4250 getDefaultInitValue(I->getType()); 4251 } 4252 return Struct; 4253 } 4254 4255 if (auto *AT = 4256 dyn_cast_or_null<ConstantArrayType>(T->getAsArrayTypeUnsafe())) { 4257 APValue Array(APValue::UninitArray(), 0, AT->getSize().getZExtValue()); 4258 if (Array.hasArrayFiller()) 4259 Array.getArrayFiller() = getDefaultInitValue(AT->getElementType()); 4260 return Array; 4261 } 4262 4263 return APValue::IndeterminateValue(); 4264 } 4265 4266 namespace { 4267 enum EvalStmtResult { 4268 /// Evaluation failed. 4269 ESR_Failed, 4270 /// Hit a 'return' statement. 4271 ESR_Returned, 4272 /// Evaluation succeeded. 4273 ESR_Succeeded, 4274 /// Hit a 'continue' statement. 4275 ESR_Continue, 4276 /// Hit a 'break' statement. 4277 ESR_Break, 4278 /// Still scanning for 'case' or 'default' statement. 4279 ESR_CaseNotFound 4280 }; 4281 } 4282 4283 static bool EvaluateVarDecl(EvalInfo &Info, const VarDecl *VD) { 4284 // We don't need to evaluate the initializer for a static local. 4285 if (!VD->hasLocalStorage()) 4286 return true; 4287 4288 LValue Result; 4289 APValue &Val = 4290 Info.CurrentCall->createTemporary(VD, VD->getType(), true, Result); 4291 4292 const Expr *InitE = VD->getInit(); 4293 if (!InitE) { 4294 Val = getDefaultInitValue(VD->getType()); 4295 return true; 4296 } 4297 4298 if (InitE->isValueDependent()) 4299 return false; 4300 4301 if (!EvaluateInPlace(Val, Info, Result, InitE)) { 4302 // Wipe out any partially-computed value, to allow tracking that this 4303 // evaluation failed. 4304 Val = APValue(); 4305 return false; 4306 } 4307 4308 return true; 4309 } 4310 4311 static bool EvaluateDecl(EvalInfo &Info, const Decl *D) { 4312 bool OK = true; 4313 4314 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) 4315 OK &= EvaluateVarDecl(Info, VD); 4316 4317 if (const DecompositionDecl *DD = dyn_cast<DecompositionDecl>(D)) 4318 for (auto *BD : DD->bindings()) 4319 if (auto *VD = BD->getHoldingVar()) 4320 OK &= EvaluateDecl(Info, VD); 4321 4322 return OK; 4323 } 4324 4325 4326 /// Evaluate a condition (either a variable declaration or an expression). 4327 static bool EvaluateCond(EvalInfo &Info, const VarDecl *CondDecl, 4328 const Expr *Cond, bool &Result) { 4329 FullExpressionRAII Scope(Info); 4330 if (CondDecl && !EvaluateDecl(Info, CondDecl)) 4331 return false; 4332 if (!EvaluateAsBooleanCondition(Cond, Result, Info)) 4333 return false; 4334 return Scope.destroy(); 4335 } 4336 4337 namespace { 4338 /// A location where the result (returned value) of evaluating a 4339 /// statement should be stored. 4340 struct StmtResult { 4341 /// The APValue that should be filled in with the returned value. 4342 APValue &Value; 4343 /// The location containing the result, if any (used to support RVO). 4344 const LValue *Slot; 4345 }; 4346 4347 struct TempVersionRAII { 4348 CallStackFrame &Frame; 4349 4350 TempVersionRAII(CallStackFrame &Frame) : Frame(Frame) { 4351 Frame.pushTempVersion(); 4352 } 4353 4354 ~TempVersionRAII() { 4355 Frame.popTempVersion(); 4356 } 4357 }; 4358 4359 } 4360 4361 static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info, 4362 const Stmt *S, 4363 const SwitchCase *SC = nullptr); 4364 4365 /// Evaluate the body of a loop, and translate the result as appropriate. 4366 static EvalStmtResult EvaluateLoopBody(StmtResult &Result, EvalInfo &Info, 4367 const Stmt *Body, 4368 const SwitchCase *Case = nullptr) { 4369 BlockScopeRAII Scope(Info); 4370 4371 EvalStmtResult ESR = EvaluateStmt(Result, Info, Body, Case); 4372 if (ESR != ESR_Failed && ESR != ESR_CaseNotFound && !Scope.destroy()) 4373 ESR = ESR_Failed; 4374 4375 switch (ESR) { 4376 case ESR_Break: 4377 return ESR_Succeeded; 4378 case ESR_Succeeded: 4379 case ESR_Continue: 4380 return ESR_Continue; 4381 case ESR_Failed: 4382 case ESR_Returned: 4383 case ESR_CaseNotFound: 4384 return ESR; 4385 } 4386 llvm_unreachable("Invalid EvalStmtResult!"); 4387 } 4388 4389 /// Evaluate a switch statement. 4390 static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info, 4391 const SwitchStmt *SS) { 4392 BlockScopeRAII Scope(Info); 4393 4394 // Evaluate the switch condition. 4395 APSInt Value; 4396 { 4397 if (const Stmt *Init = SS->getInit()) { 4398 EvalStmtResult ESR = EvaluateStmt(Result, Info, Init); 4399 if (ESR != ESR_Succeeded) { 4400 if (ESR != ESR_Failed && !Scope.destroy()) 4401 ESR = ESR_Failed; 4402 return ESR; 4403 } 4404 } 4405 4406 FullExpressionRAII CondScope(Info); 4407 if (SS->getConditionVariable() && 4408 !EvaluateDecl(Info, SS->getConditionVariable())) 4409 return ESR_Failed; 4410 if (!EvaluateInteger(SS->getCond(), Value, Info)) 4411 return ESR_Failed; 4412 if (!CondScope.destroy()) 4413 return ESR_Failed; 4414 } 4415 4416 // Find the switch case corresponding to the value of the condition. 4417 // FIXME: Cache this lookup. 4418 const SwitchCase *Found = nullptr; 4419 for (const SwitchCase *SC = SS->getSwitchCaseList(); SC; 4420 SC = SC->getNextSwitchCase()) { 4421 if (isa<DefaultStmt>(SC)) { 4422 Found = SC; 4423 continue; 4424 } 4425 4426 const CaseStmt *CS = cast<CaseStmt>(SC); 4427 APSInt LHS = CS->getLHS()->EvaluateKnownConstInt(Info.Ctx); 4428 APSInt RHS = CS->getRHS() ? CS->getRHS()->EvaluateKnownConstInt(Info.Ctx) 4429 : LHS; 4430 if (LHS <= Value && Value <= RHS) { 4431 Found = SC; 4432 break; 4433 } 4434 } 4435 4436 if (!Found) 4437 return Scope.destroy() ? ESR_Succeeded : ESR_Failed; 4438 4439 // Search the switch body for the switch case and evaluate it from there. 4440 EvalStmtResult ESR = EvaluateStmt(Result, Info, SS->getBody(), Found); 4441 if (ESR != ESR_Failed && ESR != ESR_CaseNotFound && !Scope.destroy()) 4442 return ESR_Failed; 4443 4444 switch (ESR) { 4445 case ESR_Break: 4446 return ESR_Succeeded; 4447 case ESR_Succeeded: 4448 case ESR_Continue: 4449 case ESR_Failed: 4450 case ESR_Returned: 4451 return ESR; 4452 case ESR_CaseNotFound: 4453 // This can only happen if the switch case is nested within a statement 4454 // expression. We have no intention of supporting that. 4455 Info.FFDiag(Found->getBeginLoc(), 4456 diag::note_constexpr_stmt_expr_unsupported); 4457 return ESR_Failed; 4458 } 4459 llvm_unreachable("Invalid EvalStmtResult!"); 4460 } 4461 4462 // Evaluate a statement. 4463 static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info, 4464 const Stmt *S, const SwitchCase *Case) { 4465 if (!Info.nextStep(S)) 4466 return ESR_Failed; 4467 4468 // If we're hunting down a 'case' or 'default' label, recurse through 4469 // substatements until we hit the label. 4470 if (Case) { 4471 switch (S->getStmtClass()) { 4472 case Stmt::CompoundStmtClass: 4473 // FIXME: Precompute which substatement of a compound statement we 4474 // would jump to, and go straight there rather than performing a 4475 // linear scan each time. 4476 case Stmt::LabelStmtClass: 4477 case Stmt::AttributedStmtClass: 4478 case Stmt::DoStmtClass: 4479 break; 4480 4481 case Stmt::CaseStmtClass: 4482 case Stmt::DefaultStmtClass: 4483 if (Case == S) 4484 Case = nullptr; 4485 break; 4486 4487 case Stmt::IfStmtClass: { 4488 // FIXME: Precompute which side of an 'if' we would jump to, and go 4489 // straight there rather than scanning both sides. 4490 const IfStmt *IS = cast<IfStmt>(S); 4491 4492 // Wrap the evaluation in a block scope, in case it's a DeclStmt 4493 // preceded by our switch label. 4494 BlockScopeRAII Scope(Info); 4495 4496 // Step into the init statement in case it brings an (uninitialized) 4497 // variable into scope. 4498 if (const Stmt *Init = IS->getInit()) { 4499 EvalStmtResult ESR = EvaluateStmt(Result, Info, Init, Case); 4500 if (ESR != ESR_CaseNotFound) { 4501 assert(ESR != ESR_Succeeded); 4502 return ESR; 4503 } 4504 } 4505 4506 // Condition variable must be initialized if it exists. 4507 // FIXME: We can skip evaluating the body if there's a condition 4508 // variable, as there can't be any case labels within it. 4509 // (The same is true for 'for' statements.) 4510 4511 EvalStmtResult ESR = EvaluateStmt(Result, Info, IS->getThen(), Case); 4512 if (ESR == ESR_Failed) 4513 return ESR; 4514 if (ESR != ESR_CaseNotFound) 4515 return Scope.destroy() ? ESR : ESR_Failed; 4516 if (!IS->getElse()) 4517 return ESR_CaseNotFound; 4518 4519 ESR = EvaluateStmt(Result, Info, IS->getElse(), Case); 4520 if (ESR == ESR_Failed) 4521 return ESR; 4522 if (ESR != ESR_CaseNotFound) 4523 return Scope.destroy() ? ESR : ESR_Failed; 4524 return ESR_CaseNotFound; 4525 } 4526 4527 case Stmt::WhileStmtClass: { 4528 EvalStmtResult ESR = 4529 EvaluateLoopBody(Result, Info, cast<WhileStmt>(S)->getBody(), Case); 4530 if (ESR != ESR_Continue) 4531 return ESR; 4532 break; 4533 } 4534 4535 case Stmt::ForStmtClass: { 4536 const ForStmt *FS = cast<ForStmt>(S); 4537 BlockScopeRAII Scope(Info); 4538 4539 // Step into the init statement in case it brings an (uninitialized) 4540 // variable into scope. 4541 if (const Stmt *Init = FS->getInit()) { 4542 EvalStmtResult ESR = EvaluateStmt(Result, Info, Init, Case); 4543 if (ESR != ESR_CaseNotFound) { 4544 assert(ESR != ESR_Succeeded); 4545 return ESR; 4546 } 4547 } 4548 4549 EvalStmtResult ESR = 4550 EvaluateLoopBody(Result, Info, FS->getBody(), Case); 4551 if (ESR != ESR_Continue) 4552 return ESR; 4553 if (FS->getInc()) { 4554 FullExpressionRAII IncScope(Info); 4555 if (!EvaluateIgnoredValue(Info, FS->getInc()) || !IncScope.destroy()) 4556 return ESR_Failed; 4557 } 4558 break; 4559 } 4560 4561 case Stmt::DeclStmtClass: { 4562 // Start the lifetime of any uninitialized variables we encounter. They 4563 // might be used by the selected branch of the switch. 4564 const DeclStmt *DS = cast<DeclStmt>(S); 4565 for (const auto *D : DS->decls()) { 4566 if (const auto *VD = dyn_cast<VarDecl>(D)) { 4567 if (VD->hasLocalStorage() && !VD->getInit()) 4568 if (!EvaluateVarDecl(Info, VD)) 4569 return ESR_Failed; 4570 // FIXME: If the variable has initialization that can't be jumped 4571 // over, bail out of any immediately-surrounding compound-statement 4572 // too. There can't be any case labels here. 4573 } 4574 } 4575 return ESR_CaseNotFound; 4576 } 4577 4578 default: 4579 return ESR_CaseNotFound; 4580 } 4581 } 4582 4583 switch (S->getStmtClass()) { 4584 default: 4585 if (const Expr *E = dyn_cast<Expr>(S)) { 4586 // Don't bother evaluating beyond an expression-statement which couldn't 4587 // be evaluated. 4588 // FIXME: Do we need the FullExpressionRAII object here? 4589 // VisitExprWithCleanups should create one when necessary. 4590 FullExpressionRAII Scope(Info); 4591 if (!EvaluateIgnoredValue(Info, E) || !Scope.destroy()) 4592 return ESR_Failed; 4593 return ESR_Succeeded; 4594 } 4595 4596 Info.FFDiag(S->getBeginLoc()); 4597 return ESR_Failed; 4598 4599 case Stmt::NullStmtClass: 4600 return ESR_Succeeded; 4601 4602 case Stmt::DeclStmtClass: { 4603 const DeclStmt *DS = cast<DeclStmt>(S); 4604 for (const auto *D : DS->decls()) { 4605 // Each declaration initialization is its own full-expression. 4606 FullExpressionRAII Scope(Info); 4607 if (!EvaluateDecl(Info, D) && !Info.noteFailure()) 4608 return ESR_Failed; 4609 if (!Scope.destroy()) 4610 return ESR_Failed; 4611 } 4612 return ESR_Succeeded; 4613 } 4614 4615 case Stmt::ReturnStmtClass: { 4616 const Expr *RetExpr = cast<ReturnStmt>(S)->getRetValue(); 4617 FullExpressionRAII Scope(Info); 4618 if (RetExpr && 4619 !(Result.Slot 4620 ? EvaluateInPlace(Result.Value, Info, *Result.Slot, RetExpr) 4621 : Evaluate(Result.Value, Info, RetExpr))) 4622 return ESR_Failed; 4623 return Scope.destroy() ? ESR_Returned : ESR_Failed; 4624 } 4625 4626 case Stmt::CompoundStmtClass: { 4627 BlockScopeRAII Scope(Info); 4628 4629 const CompoundStmt *CS = cast<CompoundStmt>(S); 4630 for (const auto *BI : CS->body()) { 4631 EvalStmtResult ESR = EvaluateStmt(Result, Info, BI, Case); 4632 if (ESR == ESR_Succeeded) 4633 Case = nullptr; 4634 else if (ESR != ESR_CaseNotFound) { 4635 if (ESR != ESR_Failed && !Scope.destroy()) 4636 return ESR_Failed; 4637 return ESR; 4638 } 4639 } 4640 if (Case) 4641 return ESR_CaseNotFound; 4642 return Scope.destroy() ? ESR_Succeeded : ESR_Failed; 4643 } 4644 4645 case Stmt::IfStmtClass: { 4646 const IfStmt *IS = cast<IfStmt>(S); 4647 4648 // Evaluate the condition, as either a var decl or as an expression. 4649 BlockScopeRAII Scope(Info); 4650 if (const Stmt *Init = IS->getInit()) { 4651 EvalStmtResult ESR = EvaluateStmt(Result, Info, Init); 4652 if (ESR != ESR_Succeeded) { 4653 if (ESR != ESR_Failed && !Scope.destroy()) 4654 return ESR_Failed; 4655 return ESR; 4656 } 4657 } 4658 bool Cond; 4659 if (!EvaluateCond(Info, IS->getConditionVariable(), IS->getCond(), Cond)) 4660 return ESR_Failed; 4661 4662 if (const Stmt *SubStmt = Cond ? IS->getThen() : IS->getElse()) { 4663 EvalStmtResult ESR = EvaluateStmt(Result, Info, SubStmt); 4664 if (ESR != ESR_Succeeded) { 4665 if (ESR != ESR_Failed && !Scope.destroy()) 4666 return ESR_Failed; 4667 return ESR; 4668 } 4669 } 4670 return Scope.destroy() ? ESR_Succeeded : ESR_Failed; 4671 } 4672 4673 case Stmt::WhileStmtClass: { 4674 const WhileStmt *WS = cast<WhileStmt>(S); 4675 while (true) { 4676 BlockScopeRAII Scope(Info); 4677 bool Continue; 4678 if (!EvaluateCond(Info, WS->getConditionVariable(), WS->getCond(), 4679 Continue)) 4680 return ESR_Failed; 4681 if (!Continue) 4682 break; 4683 4684 EvalStmtResult ESR = EvaluateLoopBody(Result, Info, WS->getBody()); 4685 if (ESR != ESR_Continue) { 4686 if (ESR != ESR_Failed && !Scope.destroy()) 4687 return ESR_Failed; 4688 return ESR; 4689 } 4690 if (!Scope.destroy()) 4691 return ESR_Failed; 4692 } 4693 return ESR_Succeeded; 4694 } 4695 4696 case Stmt::DoStmtClass: { 4697 const DoStmt *DS = cast<DoStmt>(S); 4698 bool Continue; 4699 do { 4700 EvalStmtResult ESR = EvaluateLoopBody(Result, Info, DS->getBody(), Case); 4701 if (ESR != ESR_Continue) 4702 return ESR; 4703 Case = nullptr; 4704 4705 FullExpressionRAII CondScope(Info); 4706 if (!EvaluateAsBooleanCondition(DS->getCond(), Continue, Info) || 4707 !CondScope.destroy()) 4708 return ESR_Failed; 4709 } while (Continue); 4710 return ESR_Succeeded; 4711 } 4712 4713 case Stmt::ForStmtClass: { 4714 const ForStmt *FS = cast<ForStmt>(S); 4715 BlockScopeRAII ForScope(Info); 4716 if (FS->getInit()) { 4717 EvalStmtResult ESR = EvaluateStmt(Result, Info, FS->getInit()); 4718 if (ESR != ESR_Succeeded) { 4719 if (ESR != ESR_Failed && !ForScope.destroy()) 4720 return ESR_Failed; 4721 return ESR; 4722 } 4723 } 4724 while (true) { 4725 BlockScopeRAII IterScope(Info); 4726 bool Continue = true; 4727 if (FS->getCond() && !EvaluateCond(Info, FS->getConditionVariable(), 4728 FS->getCond(), Continue)) 4729 return ESR_Failed; 4730 if (!Continue) 4731 break; 4732 4733 EvalStmtResult ESR = EvaluateLoopBody(Result, Info, FS->getBody()); 4734 if (ESR != ESR_Continue) { 4735 if (ESR != ESR_Failed && (!IterScope.destroy() || !ForScope.destroy())) 4736 return ESR_Failed; 4737 return ESR; 4738 } 4739 4740 if (FS->getInc()) { 4741 FullExpressionRAII IncScope(Info); 4742 if (!EvaluateIgnoredValue(Info, FS->getInc()) || !IncScope.destroy()) 4743 return ESR_Failed; 4744 } 4745 4746 if (!IterScope.destroy()) 4747 return ESR_Failed; 4748 } 4749 return ForScope.destroy() ? ESR_Succeeded : ESR_Failed; 4750 } 4751 4752 case Stmt::CXXForRangeStmtClass: { 4753 const CXXForRangeStmt *FS = cast<CXXForRangeStmt>(S); 4754 BlockScopeRAII Scope(Info); 4755 4756 // Evaluate the init-statement if present. 4757 if (FS->getInit()) { 4758 EvalStmtResult ESR = EvaluateStmt(Result, Info, FS->getInit()); 4759 if (ESR != ESR_Succeeded) { 4760 if (ESR != ESR_Failed && !Scope.destroy()) 4761 return ESR_Failed; 4762 return ESR; 4763 } 4764 } 4765 4766 // Initialize the __range variable. 4767 EvalStmtResult ESR = EvaluateStmt(Result, Info, FS->getRangeStmt()); 4768 if (ESR != ESR_Succeeded) { 4769 if (ESR != ESR_Failed && !Scope.destroy()) 4770 return ESR_Failed; 4771 return ESR; 4772 } 4773 4774 // Create the __begin and __end iterators. 4775 ESR = EvaluateStmt(Result, Info, FS->getBeginStmt()); 4776 if (ESR != ESR_Succeeded) { 4777 if (ESR != ESR_Failed && !Scope.destroy()) 4778 return ESR_Failed; 4779 return ESR; 4780 } 4781 ESR = EvaluateStmt(Result, Info, FS->getEndStmt()); 4782 if (ESR != ESR_Succeeded) { 4783 if (ESR != ESR_Failed && !Scope.destroy()) 4784 return ESR_Failed; 4785 return ESR; 4786 } 4787 4788 while (true) { 4789 // Condition: __begin != __end. 4790 { 4791 bool Continue = true; 4792 FullExpressionRAII CondExpr(Info); 4793 if (!EvaluateAsBooleanCondition(FS->getCond(), Continue, Info)) 4794 return ESR_Failed; 4795 if (!Continue) 4796 break; 4797 } 4798 4799 // User's variable declaration, initialized by *__begin. 4800 BlockScopeRAII InnerScope(Info); 4801 ESR = EvaluateStmt(Result, Info, FS->getLoopVarStmt()); 4802 if (ESR != ESR_Succeeded) { 4803 if (ESR != ESR_Failed && (!InnerScope.destroy() || !Scope.destroy())) 4804 return ESR_Failed; 4805 return ESR; 4806 } 4807 4808 // Loop body. 4809 ESR = EvaluateLoopBody(Result, Info, FS->getBody()); 4810 if (ESR != ESR_Continue) { 4811 if (ESR != ESR_Failed && (!InnerScope.destroy() || !Scope.destroy())) 4812 return ESR_Failed; 4813 return ESR; 4814 } 4815 4816 // Increment: ++__begin 4817 if (!EvaluateIgnoredValue(Info, FS->getInc())) 4818 return ESR_Failed; 4819 4820 if (!InnerScope.destroy()) 4821 return ESR_Failed; 4822 } 4823 4824 return Scope.destroy() ? ESR_Succeeded : ESR_Failed; 4825 } 4826 4827 case Stmt::SwitchStmtClass: 4828 return EvaluateSwitch(Result, Info, cast<SwitchStmt>(S)); 4829 4830 case Stmt::ContinueStmtClass: 4831 return ESR_Continue; 4832 4833 case Stmt::BreakStmtClass: 4834 return ESR_Break; 4835 4836 case Stmt::LabelStmtClass: 4837 return EvaluateStmt(Result, Info, cast<LabelStmt>(S)->getSubStmt(), Case); 4838 4839 case Stmt::AttributedStmtClass: 4840 // As a general principle, C++11 attributes can be ignored without 4841 // any semantic impact. 4842 return EvaluateStmt(Result, Info, cast<AttributedStmt>(S)->getSubStmt(), 4843 Case); 4844 4845 case Stmt::CaseStmtClass: 4846 case Stmt::DefaultStmtClass: 4847 return EvaluateStmt(Result, Info, cast<SwitchCase>(S)->getSubStmt(), Case); 4848 case Stmt::CXXTryStmtClass: 4849 // Evaluate try blocks by evaluating all sub statements. 4850 return EvaluateStmt(Result, Info, cast<CXXTryStmt>(S)->getTryBlock(), Case); 4851 } 4852 } 4853 4854 /// CheckTrivialDefaultConstructor - Check whether a constructor is a trivial 4855 /// default constructor. If so, we'll fold it whether or not it's marked as 4856 /// constexpr. If it is marked as constexpr, we will never implicitly define it, 4857 /// so we need special handling. 4858 static bool CheckTrivialDefaultConstructor(EvalInfo &Info, SourceLocation Loc, 4859 const CXXConstructorDecl *CD, 4860 bool IsValueInitialization) { 4861 if (!CD->isTrivial() || !CD->isDefaultConstructor()) 4862 return false; 4863 4864 // Value-initialization does not call a trivial default constructor, so such a 4865 // call is a core constant expression whether or not the constructor is 4866 // constexpr. 4867 if (!CD->isConstexpr() && !IsValueInitialization) { 4868 if (Info.getLangOpts().CPlusPlus11) { 4869 // FIXME: If DiagDecl is an implicitly-declared special member function, 4870 // we should be much more explicit about why it's not constexpr. 4871 Info.CCEDiag(Loc, diag::note_constexpr_invalid_function, 1) 4872 << /*IsConstexpr*/0 << /*IsConstructor*/1 << CD; 4873 Info.Note(CD->getLocation(), diag::note_declared_at); 4874 } else { 4875 Info.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr); 4876 } 4877 } 4878 return true; 4879 } 4880 4881 /// CheckConstexprFunction - Check that a function can be called in a constant 4882 /// expression. 4883 static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc, 4884 const FunctionDecl *Declaration, 4885 const FunctionDecl *Definition, 4886 const Stmt *Body) { 4887 // Potential constant expressions can contain calls to declared, but not yet 4888 // defined, constexpr functions. 4889 if (Info.checkingPotentialConstantExpression() && !Definition && 4890 Declaration->isConstexpr()) 4891 return false; 4892 4893 // Bail out if the function declaration itself is invalid. We will 4894 // have produced a relevant diagnostic while parsing it, so just 4895 // note the problematic sub-expression. 4896 if (Declaration->isInvalidDecl()) { 4897 Info.FFDiag(CallLoc, diag::note_invalid_subexpr_in_const_expr); 4898 return false; 4899 } 4900 4901 // DR1872: An instantiated virtual constexpr function can't be called in a 4902 // constant expression (prior to C++20). We can still constant-fold such a 4903 // call. 4904 if (!Info.Ctx.getLangOpts().CPlusPlus2a && isa<CXXMethodDecl>(Declaration) && 4905 cast<CXXMethodDecl>(Declaration)->isVirtual()) 4906 Info.CCEDiag(CallLoc, diag::note_constexpr_virtual_call); 4907 4908 if (Definition && Definition->isInvalidDecl()) { 4909 Info.FFDiag(CallLoc, diag::note_invalid_subexpr_in_const_expr); 4910 return false; 4911 } 4912 4913 // Can we evaluate this function call? 4914 if (Definition && Definition->isConstexpr() && Body) 4915 return true; 4916 4917 if (Info.getLangOpts().CPlusPlus11) { 4918 const FunctionDecl *DiagDecl = Definition ? Definition : Declaration; 4919 4920 // If this function is not constexpr because it is an inherited 4921 // non-constexpr constructor, diagnose that directly. 4922 auto *CD = dyn_cast<CXXConstructorDecl>(DiagDecl); 4923 if (CD && CD->isInheritingConstructor()) { 4924 auto *Inherited = CD->getInheritedConstructor().getConstructor(); 4925 if (!Inherited->isConstexpr()) 4926 DiagDecl = CD = Inherited; 4927 } 4928 4929 // FIXME: If DiagDecl is an implicitly-declared special member function 4930 // or an inheriting constructor, we should be much more explicit about why 4931 // it's not constexpr. 4932 if (CD && CD->isInheritingConstructor()) 4933 Info.FFDiag(CallLoc, diag::note_constexpr_invalid_inhctor, 1) 4934 << CD->getInheritedConstructor().getConstructor()->getParent(); 4935 else 4936 Info.FFDiag(CallLoc, diag::note_constexpr_invalid_function, 1) 4937 << DiagDecl->isConstexpr() << (bool)CD << DiagDecl; 4938 Info.Note(DiagDecl->getLocation(), diag::note_declared_at); 4939 } else { 4940 Info.FFDiag(CallLoc, diag::note_invalid_subexpr_in_const_expr); 4941 } 4942 return false; 4943 } 4944 4945 namespace { 4946 struct CheckDynamicTypeHandler { 4947 AccessKinds AccessKind; 4948 typedef bool result_type; 4949 bool failed() { return false; } 4950 bool found(APValue &Subobj, QualType SubobjType) { return true; } 4951 bool found(APSInt &Value, QualType SubobjType) { return true; } 4952 bool found(APFloat &Value, QualType SubobjType) { return true; } 4953 }; 4954 } // end anonymous namespace 4955 4956 /// Check that we can access the notional vptr of an object / determine its 4957 /// dynamic type. 4958 static bool checkDynamicType(EvalInfo &Info, const Expr *E, const LValue &This, 4959 AccessKinds AK, bool Polymorphic) { 4960 if (This.Designator.Invalid) 4961 return false; 4962 4963 CompleteObject Obj = findCompleteObject(Info, E, AK, This, QualType()); 4964 4965 if (!Obj) 4966 return false; 4967 4968 if (!Obj.Value) { 4969 // The object is not usable in constant expressions, so we can't inspect 4970 // its value to see if it's in-lifetime or what the active union members 4971 // are. We can still check for a one-past-the-end lvalue. 4972 if (This.Designator.isOnePastTheEnd() || 4973 This.Designator.isMostDerivedAnUnsizedArray()) { 4974 Info.FFDiag(E, This.Designator.isOnePastTheEnd() 4975 ? diag::note_constexpr_access_past_end 4976 : diag::note_constexpr_access_unsized_array) 4977 << AK; 4978 return false; 4979 } else if (Polymorphic) { 4980 // Conservatively refuse to perform a polymorphic operation if we would 4981 // not be able to read a notional 'vptr' value. 4982 APValue Val; 4983 This.moveInto(Val); 4984 QualType StarThisType = 4985 Info.Ctx.getLValueReferenceType(This.Designator.getType(Info.Ctx)); 4986 Info.FFDiag(E, diag::note_constexpr_polymorphic_unknown_dynamic_type) 4987 << AK << Val.getAsString(Info.Ctx, StarThisType); 4988 return false; 4989 } 4990 return true; 4991 } 4992 4993 CheckDynamicTypeHandler Handler{AK}; 4994 return Obj && findSubobject(Info, E, Obj, This.Designator, Handler); 4995 } 4996 4997 /// Check that the pointee of the 'this' pointer in a member function call is 4998 /// either within its lifetime or in its period of construction or destruction. 4999 static bool 5000 checkNonVirtualMemberCallThisPointer(EvalInfo &Info, const Expr *E, 5001 const LValue &This, 5002 const CXXMethodDecl *NamedMember) { 5003 return checkDynamicType( 5004 Info, E, This, 5005 isa<CXXDestructorDecl>(NamedMember) ? AK_Destroy : AK_MemberCall, false); 5006 } 5007 5008 struct DynamicType { 5009 /// The dynamic class type of the object. 5010 const CXXRecordDecl *Type; 5011 /// The corresponding path length in the lvalue. 5012 unsigned PathLength; 5013 }; 5014 5015 static const CXXRecordDecl *getBaseClassType(SubobjectDesignator &Designator, 5016 unsigned PathLength) { 5017 assert(PathLength >= Designator.MostDerivedPathLength && PathLength <= 5018 Designator.Entries.size() && "invalid path length"); 5019 return (PathLength == Designator.MostDerivedPathLength) 5020 ? Designator.MostDerivedType->getAsCXXRecordDecl() 5021 : getAsBaseClass(Designator.Entries[PathLength - 1]); 5022 } 5023 5024 /// Determine the dynamic type of an object. 5025 static Optional<DynamicType> ComputeDynamicType(EvalInfo &Info, const Expr *E, 5026 LValue &This, AccessKinds AK) { 5027 // If we don't have an lvalue denoting an object of class type, there is no 5028 // meaningful dynamic type. (We consider objects of non-class type to have no 5029 // dynamic type.) 5030 if (!checkDynamicType(Info, E, This, AK, true)) 5031 return None; 5032 5033 // Refuse to compute a dynamic type in the presence of virtual bases. This 5034 // shouldn't happen other than in constant-folding situations, since literal 5035 // types can't have virtual bases. 5036 // 5037 // Note that consumers of DynamicType assume that the type has no virtual 5038 // bases, and will need modifications if this restriction is relaxed. 5039 const CXXRecordDecl *Class = 5040 This.Designator.MostDerivedType->getAsCXXRecordDecl(); 5041 if (!Class || Class->getNumVBases()) { 5042 Info.FFDiag(E); 5043 return None; 5044 } 5045 5046 // FIXME: For very deep class hierarchies, it might be beneficial to use a 5047 // binary search here instead. But the overwhelmingly common case is that 5048 // we're not in the middle of a constructor, so it probably doesn't matter 5049 // in practice. 5050 ArrayRef<APValue::LValuePathEntry> Path = This.Designator.Entries; 5051 for (unsigned PathLength = This.Designator.MostDerivedPathLength; 5052 PathLength <= Path.size(); ++PathLength) { 5053 switch (Info.isEvaluatingCtorDtor(This.getLValueBase(), 5054 Path.slice(0, PathLength))) { 5055 case ConstructionPhase::Bases: 5056 case ConstructionPhase::DestroyingBases: 5057 // We're constructing or destroying a base class. This is not the dynamic 5058 // type. 5059 break; 5060 5061 case ConstructionPhase::None: 5062 case ConstructionPhase::AfterBases: 5063 case ConstructionPhase::Destroying: 5064 // We've finished constructing the base classes and not yet started 5065 // destroying them again, so this is the dynamic type. 5066 return DynamicType{getBaseClassType(This.Designator, PathLength), 5067 PathLength}; 5068 } 5069 } 5070 5071 // CWG issue 1517: we're constructing a base class of the object described by 5072 // 'This', so that object has not yet begun its period of construction and 5073 // any polymorphic operation on it results in undefined behavior. 5074 Info.FFDiag(E); 5075 return None; 5076 } 5077 5078 /// Perform virtual dispatch. 5079 static const CXXMethodDecl *HandleVirtualDispatch( 5080 EvalInfo &Info, const Expr *E, LValue &This, const CXXMethodDecl *Found, 5081 llvm::SmallVectorImpl<QualType> &CovariantAdjustmentPath) { 5082 Optional<DynamicType> DynType = ComputeDynamicType( 5083 Info, E, This, 5084 isa<CXXDestructorDecl>(Found) ? AK_Destroy : AK_MemberCall); 5085 if (!DynType) 5086 return nullptr; 5087 5088 // Find the final overrider. It must be declared in one of the classes on the 5089 // path from the dynamic type to the static type. 5090 // FIXME: If we ever allow literal types to have virtual base classes, that 5091 // won't be true. 5092 const CXXMethodDecl *Callee = Found; 5093 unsigned PathLength = DynType->PathLength; 5094 for (/**/; PathLength <= This.Designator.Entries.size(); ++PathLength) { 5095 const CXXRecordDecl *Class = getBaseClassType(This.Designator, PathLength); 5096 const CXXMethodDecl *Overrider = 5097 Found->getCorrespondingMethodDeclaredInClass(Class, false); 5098 if (Overrider) { 5099 Callee = Overrider; 5100 break; 5101 } 5102 } 5103 5104 // C++2a [class.abstract]p6: 5105 // the effect of making a virtual call to a pure virtual function [...] is 5106 // undefined 5107 if (Callee->isPure()) { 5108 Info.FFDiag(E, diag::note_constexpr_pure_virtual_call, 1) << Callee; 5109 Info.Note(Callee->getLocation(), diag::note_declared_at); 5110 return nullptr; 5111 } 5112 5113 // If necessary, walk the rest of the path to determine the sequence of 5114 // covariant adjustment steps to apply. 5115 if (!Info.Ctx.hasSameUnqualifiedType(Callee->getReturnType(), 5116 Found->getReturnType())) { 5117 CovariantAdjustmentPath.push_back(Callee->getReturnType()); 5118 for (unsigned CovariantPathLength = PathLength + 1; 5119 CovariantPathLength != This.Designator.Entries.size(); 5120 ++CovariantPathLength) { 5121 const CXXRecordDecl *NextClass = 5122 getBaseClassType(This.Designator, CovariantPathLength); 5123 const CXXMethodDecl *Next = 5124 Found->getCorrespondingMethodDeclaredInClass(NextClass, false); 5125 if (Next && !Info.Ctx.hasSameUnqualifiedType( 5126 Next->getReturnType(), CovariantAdjustmentPath.back())) 5127 CovariantAdjustmentPath.push_back(Next->getReturnType()); 5128 } 5129 if (!Info.Ctx.hasSameUnqualifiedType(Found->getReturnType(), 5130 CovariantAdjustmentPath.back())) 5131 CovariantAdjustmentPath.push_back(Found->getReturnType()); 5132 } 5133 5134 // Perform 'this' adjustment. 5135 if (!CastToDerivedClass(Info, E, This, Callee->getParent(), PathLength)) 5136 return nullptr; 5137 5138 return Callee; 5139 } 5140 5141 /// Perform the adjustment from a value returned by a virtual function to 5142 /// a value of the statically expected type, which may be a pointer or 5143 /// reference to a base class of the returned type. 5144 static bool HandleCovariantReturnAdjustment(EvalInfo &Info, const Expr *E, 5145 APValue &Result, 5146 ArrayRef<QualType> Path) { 5147 assert(Result.isLValue() && 5148 "unexpected kind of APValue for covariant return"); 5149 if (Result.isNullPointer()) 5150 return true; 5151 5152 LValue LVal; 5153 LVal.setFrom(Info.Ctx, Result); 5154 5155 const CXXRecordDecl *OldClass = Path[0]->getPointeeCXXRecordDecl(); 5156 for (unsigned I = 1; I != Path.size(); ++I) { 5157 const CXXRecordDecl *NewClass = Path[I]->getPointeeCXXRecordDecl(); 5158 assert(OldClass && NewClass && "unexpected kind of covariant return"); 5159 if (OldClass != NewClass && 5160 !CastToBaseClass(Info, E, LVal, OldClass, NewClass)) 5161 return false; 5162 OldClass = NewClass; 5163 } 5164 5165 LVal.moveInto(Result); 5166 return true; 5167 } 5168 5169 /// Determine whether \p Base, which is known to be a direct base class of 5170 /// \p Derived, is a public base class. 5171 static bool isBaseClassPublic(const CXXRecordDecl *Derived, 5172 const CXXRecordDecl *Base) { 5173 for (const CXXBaseSpecifier &BaseSpec : Derived->bases()) { 5174 auto *BaseClass = BaseSpec.getType()->getAsCXXRecordDecl(); 5175 if (BaseClass && declaresSameEntity(BaseClass, Base)) 5176 return BaseSpec.getAccessSpecifier() == AS_public; 5177 } 5178 llvm_unreachable("Base is not a direct base of Derived"); 5179 } 5180 5181 /// Apply the given dynamic cast operation on the provided lvalue. 5182 /// 5183 /// This implements the hard case of dynamic_cast, requiring a "runtime check" 5184 /// to find a suitable target subobject. 5185 static bool HandleDynamicCast(EvalInfo &Info, const ExplicitCastExpr *E, 5186 LValue &Ptr) { 5187 // We can't do anything with a non-symbolic pointer value. 5188 SubobjectDesignator &D = Ptr.Designator; 5189 if (D.Invalid) 5190 return false; 5191 5192 // C++ [expr.dynamic.cast]p6: 5193 // If v is a null pointer value, the result is a null pointer value. 5194 if (Ptr.isNullPointer() && !E->isGLValue()) 5195 return true; 5196 5197 // For all the other cases, we need the pointer to point to an object within 5198 // its lifetime / period of construction / destruction, and we need to know 5199 // its dynamic type. 5200 Optional<DynamicType> DynType = 5201 ComputeDynamicType(Info, E, Ptr, AK_DynamicCast); 5202 if (!DynType) 5203 return false; 5204 5205 // C++ [expr.dynamic.cast]p7: 5206 // If T is "pointer to cv void", then the result is a pointer to the most 5207 // derived object 5208 if (E->getType()->isVoidPointerType()) 5209 return CastToDerivedClass(Info, E, Ptr, DynType->Type, DynType->PathLength); 5210 5211 const CXXRecordDecl *C = E->getTypeAsWritten()->getPointeeCXXRecordDecl(); 5212 assert(C && "dynamic_cast target is not void pointer nor class"); 5213 CanQualType CQT = Info.Ctx.getCanonicalType(Info.Ctx.getRecordType(C)); 5214 5215 auto RuntimeCheckFailed = [&] (CXXBasePaths *Paths) { 5216 // C++ [expr.dynamic.cast]p9: 5217 if (!E->isGLValue()) { 5218 // The value of a failed cast to pointer type is the null pointer value 5219 // of the required result type. 5220 Ptr.setNull(Info.Ctx, E->getType()); 5221 return true; 5222 } 5223 5224 // A failed cast to reference type throws [...] std::bad_cast. 5225 unsigned DiagKind; 5226 if (!Paths && (declaresSameEntity(DynType->Type, C) || 5227 DynType->Type->isDerivedFrom(C))) 5228 DiagKind = 0; 5229 else if (!Paths || Paths->begin() == Paths->end()) 5230 DiagKind = 1; 5231 else if (Paths->isAmbiguous(CQT)) 5232 DiagKind = 2; 5233 else { 5234 assert(Paths->front().Access != AS_public && "why did the cast fail?"); 5235 DiagKind = 3; 5236 } 5237 Info.FFDiag(E, diag::note_constexpr_dynamic_cast_to_reference_failed) 5238 << DiagKind << Ptr.Designator.getType(Info.Ctx) 5239 << Info.Ctx.getRecordType(DynType->Type) 5240 << E->getType().getUnqualifiedType(); 5241 return false; 5242 }; 5243 5244 // Runtime check, phase 1: 5245 // Walk from the base subobject towards the derived object looking for the 5246 // target type. 5247 for (int PathLength = Ptr.Designator.Entries.size(); 5248 PathLength >= (int)DynType->PathLength; --PathLength) { 5249 const CXXRecordDecl *Class = getBaseClassType(Ptr.Designator, PathLength); 5250 if (declaresSameEntity(Class, C)) 5251 return CastToDerivedClass(Info, E, Ptr, Class, PathLength); 5252 // We can only walk across public inheritance edges. 5253 if (PathLength > (int)DynType->PathLength && 5254 !isBaseClassPublic(getBaseClassType(Ptr.Designator, PathLength - 1), 5255 Class)) 5256 return RuntimeCheckFailed(nullptr); 5257 } 5258 5259 // Runtime check, phase 2: 5260 // Search the dynamic type for an unambiguous public base of type C. 5261 CXXBasePaths Paths(/*FindAmbiguities=*/true, 5262 /*RecordPaths=*/true, /*DetectVirtual=*/false); 5263 if (DynType->Type->isDerivedFrom(C, Paths) && !Paths.isAmbiguous(CQT) && 5264 Paths.front().Access == AS_public) { 5265 // Downcast to the dynamic type... 5266 if (!CastToDerivedClass(Info, E, Ptr, DynType->Type, DynType->PathLength)) 5267 return false; 5268 // ... then upcast to the chosen base class subobject. 5269 for (CXXBasePathElement &Elem : Paths.front()) 5270 if (!HandleLValueBase(Info, E, Ptr, Elem.Class, Elem.Base)) 5271 return false; 5272 return true; 5273 } 5274 5275 // Otherwise, the runtime check fails. 5276 return RuntimeCheckFailed(&Paths); 5277 } 5278 5279 namespace { 5280 struct StartLifetimeOfUnionMemberHandler { 5281 const FieldDecl *Field; 5282 5283 static const AccessKinds AccessKind = AK_Assign; 5284 5285 typedef bool result_type; 5286 bool failed() { return false; } 5287 bool found(APValue &Subobj, QualType SubobjType) { 5288 // We are supposed to perform no initialization but begin the lifetime of 5289 // the object. We interpret that as meaning to do what default 5290 // initialization of the object would do if all constructors involved were 5291 // trivial: 5292 // * All base, non-variant member, and array element subobjects' lifetimes 5293 // begin 5294 // * No variant members' lifetimes begin 5295 // * All scalar subobjects whose lifetimes begin have indeterminate values 5296 assert(SubobjType->isUnionType()); 5297 if (!declaresSameEntity(Subobj.getUnionField(), Field) || 5298 !Subobj.getUnionValue().hasValue()) 5299 Subobj.setUnion(Field, getDefaultInitValue(Field->getType())); 5300 return true; 5301 } 5302 bool found(APSInt &Value, QualType SubobjType) { 5303 llvm_unreachable("wrong value kind for union object"); 5304 } 5305 bool found(APFloat &Value, QualType SubobjType) { 5306 llvm_unreachable("wrong value kind for union object"); 5307 } 5308 }; 5309 } // end anonymous namespace 5310 5311 const AccessKinds StartLifetimeOfUnionMemberHandler::AccessKind; 5312 5313 /// Handle a builtin simple-assignment or a call to a trivial assignment 5314 /// operator whose left-hand side might involve a union member access. If it 5315 /// does, implicitly start the lifetime of any accessed union elements per 5316 /// C++20 [class.union]5. 5317 static bool HandleUnionActiveMemberChange(EvalInfo &Info, const Expr *LHSExpr, 5318 const LValue &LHS) { 5319 if (LHS.InvalidBase || LHS.Designator.Invalid) 5320 return false; 5321 5322 llvm::SmallVector<std::pair<unsigned, const FieldDecl*>, 4> UnionPathLengths; 5323 // C++ [class.union]p5: 5324 // define the set S(E) of subexpressions of E as follows: 5325 unsigned PathLength = LHS.Designator.Entries.size(); 5326 for (const Expr *E = LHSExpr; E != nullptr;) { 5327 // -- If E is of the form A.B, S(E) contains the elements of S(A)... 5328 if (auto *ME = dyn_cast<MemberExpr>(E)) { 5329 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 5330 // Note that we can't implicitly start the lifetime of a reference, 5331 // so we don't need to proceed any further if we reach one. 5332 if (!FD || FD->getType()->isReferenceType()) 5333 break; 5334 5335 // ... and also contains A.B if B names a union member ... 5336 if (FD->getParent()->isUnion()) { 5337 // ... of a non-class, non-array type, or of a class type with a 5338 // trivial default constructor that is not deleted, or an array of 5339 // such types. 5340 auto *RD = 5341 FD->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl(); 5342 if (!RD || RD->hasTrivialDefaultConstructor()) 5343 UnionPathLengths.push_back({PathLength - 1, FD}); 5344 } 5345 5346 E = ME->getBase(); 5347 --PathLength; 5348 assert(declaresSameEntity(FD, 5349 LHS.Designator.Entries[PathLength] 5350 .getAsBaseOrMember().getPointer())); 5351 5352 // -- If E is of the form A[B] and is interpreted as a built-in array 5353 // subscripting operator, S(E) is [S(the array operand, if any)]. 5354 } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(E)) { 5355 // Step over an ArrayToPointerDecay implicit cast. 5356 auto *Base = ASE->getBase()->IgnoreImplicit(); 5357 if (!Base->getType()->isArrayType()) 5358 break; 5359 5360 E = Base; 5361 --PathLength; 5362 5363 } else if (auto *ICE = dyn_cast<ImplicitCastExpr>(E)) { 5364 // Step over a derived-to-base conversion. 5365 E = ICE->getSubExpr(); 5366 if (ICE->getCastKind() == CK_NoOp) 5367 continue; 5368 if (ICE->getCastKind() != CK_DerivedToBase && 5369 ICE->getCastKind() != CK_UncheckedDerivedToBase) 5370 break; 5371 // Walk path backwards as we walk up from the base to the derived class. 5372 for (const CXXBaseSpecifier *Elt : llvm::reverse(ICE->path())) { 5373 --PathLength; 5374 (void)Elt; 5375 assert(declaresSameEntity(Elt->getType()->getAsCXXRecordDecl(), 5376 LHS.Designator.Entries[PathLength] 5377 .getAsBaseOrMember().getPointer())); 5378 } 5379 5380 // -- Otherwise, S(E) is empty. 5381 } else { 5382 break; 5383 } 5384 } 5385 5386 // Common case: no unions' lifetimes are started. 5387 if (UnionPathLengths.empty()) 5388 return true; 5389 5390 // if modification of X [would access an inactive union member], an object 5391 // of the type of X is implicitly created 5392 CompleteObject Obj = 5393 findCompleteObject(Info, LHSExpr, AK_Assign, LHS, LHSExpr->getType()); 5394 if (!Obj) 5395 return false; 5396 for (std::pair<unsigned, const FieldDecl *> LengthAndField : 5397 llvm::reverse(UnionPathLengths)) { 5398 // Form a designator for the union object. 5399 SubobjectDesignator D = LHS.Designator; 5400 D.truncate(Info.Ctx, LHS.Base, LengthAndField.first); 5401 5402 StartLifetimeOfUnionMemberHandler StartLifetime{LengthAndField.second}; 5403 if (!findSubobject(Info, LHSExpr, Obj, D, StartLifetime)) 5404 return false; 5405 } 5406 5407 return true; 5408 } 5409 5410 /// Determine if a class has any fields that might need to be copied by a 5411 /// trivial copy or move operation. 5412 static bool hasFields(const CXXRecordDecl *RD) { 5413 if (!RD || RD->isEmpty()) 5414 return false; 5415 for (auto *FD : RD->fields()) { 5416 if (FD->isUnnamedBitfield()) 5417 continue; 5418 return true; 5419 } 5420 for (auto &Base : RD->bases()) 5421 if (hasFields(Base.getType()->getAsCXXRecordDecl())) 5422 return true; 5423 return false; 5424 } 5425 5426 namespace { 5427 typedef SmallVector<APValue, 8> ArgVector; 5428 } 5429 5430 /// EvaluateArgs - Evaluate the arguments to a function call. 5431 static bool EvaluateArgs(ArrayRef<const Expr *> Args, ArgVector &ArgValues, 5432 EvalInfo &Info, const FunctionDecl *Callee) { 5433 bool Success = true; 5434 llvm::SmallBitVector ForbiddenNullArgs; 5435 if (Callee->hasAttr<NonNullAttr>()) { 5436 ForbiddenNullArgs.resize(Args.size()); 5437 for (const auto *Attr : Callee->specific_attrs<NonNullAttr>()) { 5438 if (!Attr->args_size()) { 5439 ForbiddenNullArgs.set(); 5440 break; 5441 } else 5442 for (auto Idx : Attr->args()) { 5443 unsigned ASTIdx = Idx.getASTIndex(); 5444 if (ASTIdx >= Args.size()) 5445 continue; 5446 ForbiddenNullArgs[ASTIdx] = 1; 5447 } 5448 } 5449 } 5450 for (unsigned Idx = 0; Idx < Args.size(); Idx++) { 5451 if (!Evaluate(ArgValues[Idx], Info, Args[Idx])) { 5452 // If we're checking for a potential constant expression, evaluate all 5453 // initializers even if some of them fail. 5454 if (!Info.noteFailure()) 5455 return false; 5456 Success = false; 5457 } else if (!ForbiddenNullArgs.empty() && 5458 ForbiddenNullArgs[Idx] && 5459 ArgValues[Idx].isLValue() && 5460 ArgValues[Idx].isNullPointer()) { 5461 Info.CCEDiag(Args[Idx], diag::note_non_null_attribute_failed); 5462 if (!Info.noteFailure()) 5463 return false; 5464 Success = false; 5465 } 5466 } 5467 return Success; 5468 } 5469 5470 /// Evaluate a function call. 5471 static bool HandleFunctionCall(SourceLocation CallLoc, 5472 const FunctionDecl *Callee, const LValue *This, 5473 ArrayRef<const Expr*> Args, const Stmt *Body, 5474 EvalInfo &Info, APValue &Result, 5475 const LValue *ResultSlot) { 5476 ArgVector ArgValues(Args.size()); 5477 if (!EvaluateArgs(Args, ArgValues, Info, Callee)) 5478 return false; 5479 5480 if (!Info.CheckCallLimit(CallLoc)) 5481 return false; 5482 5483 CallStackFrame Frame(Info, CallLoc, Callee, This, ArgValues.data()); 5484 5485 // For a trivial copy or move assignment, perform an APValue copy. This is 5486 // essential for unions, where the operations performed by the assignment 5487 // operator cannot be represented as statements. 5488 // 5489 // Skip this for non-union classes with no fields; in that case, the defaulted 5490 // copy/move does not actually read the object. 5491 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Callee); 5492 if (MD && MD->isDefaulted() && 5493 (MD->getParent()->isUnion() || 5494 (MD->isTrivial() && hasFields(MD->getParent())))) { 5495 assert(This && 5496 (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator())); 5497 LValue RHS; 5498 RHS.setFrom(Info.Ctx, ArgValues[0]); 5499 APValue RHSValue; 5500 if (!handleLValueToRValueConversion(Info, Args[0], Args[0]->getType(), RHS, 5501 RHSValue, MD->getParent()->isUnion())) 5502 return false; 5503 if (Info.getLangOpts().CPlusPlus2a && MD->isTrivial() && 5504 !HandleUnionActiveMemberChange(Info, Args[0], *This)) 5505 return false; 5506 if (!handleAssignment(Info, Args[0], *This, MD->getThisType(), 5507 RHSValue)) 5508 return false; 5509 This->moveInto(Result); 5510 return true; 5511 } else if (MD && isLambdaCallOperator(MD)) { 5512 // We're in a lambda; determine the lambda capture field maps unless we're 5513 // just constexpr checking a lambda's call operator. constexpr checking is 5514 // done before the captures have been added to the closure object (unless 5515 // we're inferring constexpr-ness), so we don't have access to them in this 5516 // case. But since we don't need the captures to constexpr check, we can 5517 // just ignore them. 5518 if (!Info.checkingPotentialConstantExpression()) 5519 MD->getParent()->getCaptureFields(Frame.LambdaCaptureFields, 5520 Frame.LambdaThisCaptureField); 5521 } 5522 5523 StmtResult Ret = {Result, ResultSlot}; 5524 EvalStmtResult ESR = EvaluateStmt(Ret, Info, Body); 5525 if (ESR == ESR_Succeeded) { 5526 if (Callee->getReturnType()->isVoidType()) 5527 return true; 5528 Info.FFDiag(Callee->getEndLoc(), diag::note_constexpr_no_return); 5529 } 5530 return ESR == ESR_Returned; 5531 } 5532 5533 /// Evaluate a constructor call. 5534 static bool HandleConstructorCall(const Expr *E, const LValue &This, 5535 APValue *ArgValues, 5536 const CXXConstructorDecl *Definition, 5537 EvalInfo &Info, APValue &Result) { 5538 SourceLocation CallLoc = E->getExprLoc(); 5539 if (!Info.CheckCallLimit(CallLoc)) 5540 return false; 5541 5542 const CXXRecordDecl *RD = Definition->getParent(); 5543 if (RD->getNumVBases()) { 5544 Info.FFDiag(CallLoc, diag::note_constexpr_virtual_base) << RD; 5545 return false; 5546 } 5547 5548 EvalInfo::EvaluatingConstructorRAII EvalObj( 5549 Info, 5550 ObjectUnderConstruction{This.getLValueBase(), This.Designator.Entries}, 5551 RD->getNumBases()); 5552 CallStackFrame Frame(Info, CallLoc, Definition, &This, ArgValues); 5553 5554 // FIXME: Creating an APValue just to hold a nonexistent return value is 5555 // wasteful. 5556 APValue RetVal; 5557 StmtResult Ret = {RetVal, nullptr}; 5558 5559 // If it's a delegating constructor, delegate. 5560 if (Definition->isDelegatingConstructor()) { 5561 CXXConstructorDecl::init_const_iterator I = Definition->init_begin(); 5562 { 5563 FullExpressionRAII InitScope(Info); 5564 if (!EvaluateInPlace(Result, Info, This, (*I)->getInit()) || 5565 !InitScope.destroy()) 5566 return false; 5567 } 5568 return EvaluateStmt(Ret, Info, Definition->getBody()) != ESR_Failed; 5569 } 5570 5571 // For a trivial copy or move constructor, perform an APValue copy. This is 5572 // essential for unions (or classes with anonymous union members), where the 5573 // operations performed by the constructor cannot be represented by 5574 // ctor-initializers. 5575 // 5576 // Skip this for empty non-union classes; we should not perform an 5577 // lvalue-to-rvalue conversion on them because their copy constructor does not 5578 // actually read them. 5579 if (Definition->isDefaulted() && Definition->isCopyOrMoveConstructor() && 5580 (Definition->getParent()->isUnion() || 5581 (Definition->isTrivial() && hasFields(Definition->getParent())))) { 5582 LValue RHS; 5583 RHS.setFrom(Info.Ctx, ArgValues[0]); 5584 return handleLValueToRValueConversion( 5585 Info, E, Definition->getParamDecl(0)->getType().getNonReferenceType(), 5586 RHS, Result, Definition->getParent()->isUnion()); 5587 } 5588 5589 // Reserve space for the struct members. 5590 if (!RD->isUnion() && !Result.hasValue()) 5591 Result = APValue(APValue::UninitStruct(), RD->getNumBases(), 5592 std::distance(RD->field_begin(), RD->field_end())); 5593 5594 if (RD->isInvalidDecl()) return false; 5595 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD); 5596 5597 // A scope for temporaries lifetime-extended by reference members. 5598 BlockScopeRAII LifetimeExtendedScope(Info); 5599 5600 bool Success = true; 5601 unsigned BasesSeen = 0; 5602 #ifndef NDEBUG 5603 CXXRecordDecl::base_class_const_iterator BaseIt = RD->bases_begin(); 5604 #endif 5605 CXXRecordDecl::field_iterator FieldIt = RD->field_begin(); 5606 auto SkipToField = [&](FieldDecl *FD, bool Indirect) { 5607 // We might be initializing the same field again if this is an indirect 5608 // field initialization. 5609 if (FieldIt == RD->field_end() || 5610 FieldIt->getFieldIndex() > FD->getFieldIndex()) { 5611 assert(Indirect && "fields out of order?"); 5612 return; 5613 } 5614 5615 // Default-initialize any fields with no explicit initializer. 5616 for (; !declaresSameEntity(*FieldIt, FD); ++FieldIt) { 5617 assert(FieldIt != RD->field_end() && "missing field?"); 5618 if (!FieldIt->isUnnamedBitfield()) 5619 Result.getStructField(FieldIt->getFieldIndex()) = 5620 getDefaultInitValue(FieldIt->getType()); 5621 } 5622 ++FieldIt; 5623 }; 5624 for (const auto *I : Definition->inits()) { 5625 LValue Subobject = This; 5626 LValue SubobjectParent = This; 5627 APValue *Value = &Result; 5628 5629 // Determine the subobject to initialize. 5630 FieldDecl *FD = nullptr; 5631 if (I->isBaseInitializer()) { 5632 QualType BaseType(I->getBaseClass(), 0); 5633 #ifndef NDEBUG 5634 // Non-virtual base classes are initialized in the order in the class 5635 // definition. We have already checked for virtual base classes. 5636 assert(!BaseIt->isVirtual() && "virtual base for literal type"); 5637 assert(Info.Ctx.hasSameType(BaseIt->getType(), BaseType) && 5638 "base class initializers not in expected order"); 5639 ++BaseIt; 5640 #endif 5641 if (!HandleLValueDirectBase(Info, I->getInit(), Subobject, RD, 5642 BaseType->getAsCXXRecordDecl(), &Layout)) 5643 return false; 5644 Value = &Result.getStructBase(BasesSeen++); 5645 } else if ((FD = I->getMember())) { 5646 if (!HandleLValueMember(Info, I->getInit(), Subobject, FD, &Layout)) 5647 return false; 5648 if (RD->isUnion()) { 5649 Result = APValue(FD); 5650 Value = &Result.getUnionValue(); 5651 } else { 5652 SkipToField(FD, false); 5653 Value = &Result.getStructField(FD->getFieldIndex()); 5654 } 5655 } else if (IndirectFieldDecl *IFD = I->getIndirectMember()) { 5656 // Walk the indirect field decl's chain to find the object to initialize, 5657 // and make sure we've initialized every step along it. 5658 auto IndirectFieldChain = IFD->chain(); 5659 for (auto *C : IndirectFieldChain) { 5660 FD = cast<FieldDecl>(C); 5661 CXXRecordDecl *CD = cast<CXXRecordDecl>(FD->getParent()); 5662 // Switch the union field if it differs. This happens if we had 5663 // preceding zero-initialization, and we're now initializing a union 5664 // subobject other than the first. 5665 // FIXME: In this case, the values of the other subobjects are 5666 // specified, since zero-initialization sets all padding bits to zero. 5667 if (!Value->hasValue() || 5668 (Value->isUnion() && Value->getUnionField() != FD)) { 5669 if (CD->isUnion()) 5670 *Value = APValue(FD); 5671 else 5672 // FIXME: This immediately starts the lifetime of all members of an 5673 // anonymous struct. It would be preferable to strictly start member 5674 // lifetime in initialization order. 5675 *Value = getDefaultInitValue(Info.Ctx.getRecordType(CD)); 5676 } 5677 // Store Subobject as its parent before updating it for the last element 5678 // in the chain. 5679 if (C == IndirectFieldChain.back()) 5680 SubobjectParent = Subobject; 5681 if (!HandleLValueMember(Info, I->getInit(), Subobject, FD)) 5682 return false; 5683 if (CD->isUnion()) 5684 Value = &Value->getUnionValue(); 5685 else { 5686 if (C == IndirectFieldChain.front() && !RD->isUnion()) 5687 SkipToField(FD, true); 5688 Value = &Value->getStructField(FD->getFieldIndex()); 5689 } 5690 } 5691 } else { 5692 llvm_unreachable("unknown base initializer kind"); 5693 } 5694 5695 // Need to override This for implicit field initializers as in this case 5696 // This refers to innermost anonymous struct/union containing initializer, 5697 // not to currently constructed class. 5698 const Expr *Init = I->getInit(); 5699 ThisOverrideRAII ThisOverride(*Info.CurrentCall, &SubobjectParent, 5700 isa<CXXDefaultInitExpr>(Init)); 5701 FullExpressionRAII InitScope(Info); 5702 if (!EvaluateInPlace(*Value, Info, Subobject, Init) || 5703 (FD && FD->isBitField() && 5704 !truncateBitfieldValue(Info, Init, *Value, FD))) { 5705 // If we're checking for a potential constant expression, evaluate all 5706 // initializers even if some of them fail. 5707 if (!Info.noteFailure()) 5708 return false; 5709 Success = false; 5710 } 5711 5712 // This is the point at which the dynamic type of the object becomes this 5713 // class type. 5714 if (I->isBaseInitializer() && BasesSeen == RD->getNumBases()) 5715 EvalObj.finishedConstructingBases(); 5716 } 5717 5718 // Default-initialize any remaining fields. 5719 if (!RD->isUnion()) { 5720 for (; FieldIt != RD->field_end(); ++FieldIt) { 5721 if (!FieldIt->isUnnamedBitfield()) 5722 Result.getStructField(FieldIt->getFieldIndex()) = 5723 getDefaultInitValue(FieldIt->getType()); 5724 } 5725 } 5726 5727 return Success && 5728 EvaluateStmt(Ret, Info, Definition->getBody()) != ESR_Failed && 5729 LifetimeExtendedScope.destroy(); 5730 } 5731 5732 static bool HandleConstructorCall(const Expr *E, const LValue &This, 5733 ArrayRef<const Expr*> Args, 5734 const CXXConstructorDecl *Definition, 5735 EvalInfo &Info, APValue &Result) { 5736 ArgVector ArgValues(Args.size()); 5737 if (!EvaluateArgs(Args, ArgValues, Info, Definition)) 5738 return false; 5739 5740 return HandleConstructorCall(E, This, ArgValues.data(), Definition, 5741 Info, Result); 5742 } 5743 5744 static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc, 5745 const LValue &This, APValue &Value, 5746 QualType T) { 5747 // Objects can only be destroyed while they're within their lifetimes. 5748 // FIXME: We have no representation for whether an object of type nullptr_t 5749 // is in its lifetime; it usually doesn't matter. Perhaps we should model it 5750 // as indeterminate instead? 5751 if (Value.isAbsent() && !T->isNullPtrType()) { 5752 APValue Printable; 5753 This.moveInto(Printable); 5754 Info.FFDiag(CallLoc, diag::note_constexpr_destroy_out_of_lifetime) 5755 << Printable.getAsString(Info.Ctx, Info.Ctx.getLValueReferenceType(T)); 5756 return false; 5757 } 5758 5759 // Invent an expression for location purposes. 5760 // FIXME: We shouldn't need to do this. 5761 OpaqueValueExpr LocE(CallLoc, Info.Ctx.IntTy, VK_RValue); 5762 5763 // For arrays, destroy elements right-to-left. 5764 if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(T)) { 5765 uint64_t Size = CAT->getSize().getZExtValue(); 5766 QualType ElemT = CAT->getElementType(); 5767 5768 LValue ElemLV = This; 5769 ElemLV.addArray(Info, &LocE, CAT); 5770 if (!HandleLValueArrayAdjustment(Info, &LocE, ElemLV, ElemT, Size)) 5771 return false; 5772 5773 // Ensure that we have actual array elements available to destroy; the 5774 // destructors might mutate the value, so we can't run them on the array 5775 // filler. 5776 if (Size && Size > Value.getArrayInitializedElts()) 5777 expandArray(Value, Value.getArraySize() - 1); 5778 5779 for (; Size != 0; --Size) { 5780 APValue &Elem = Value.getArrayInitializedElt(Size - 1); 5781 if (!HandleLValueArrayAdjustment(Info, &LocE, ElemLV, ElemT, -1) || 5782 !HandleDestructionImpl(Info, CallLoc, ElemLV, Elem, ElemT)) 5783 return false; 5784 } 5785 5786 // End the lifetime of this array now. 5787 Value = APValue(); 5788 return true; 5789 } 5790 5791 const CXXRecordDecl *RD = T->getAsCXXRecordDecl(); 5792 if (!RD) { 5793 if (T.isDestructedType()) { 5794 Info.FFDiag(CallLoc, diag::note_constexpr_unsupported_destruction) << T; 5795 return false; 5796 } 5797 5798 Value = APValue(); 5799 return true; 5800 } 5801 5802 if (RD->getNumVBases()) { 5803 Info.FFDiag(CallLoc, diag::note_constexpr_virtual_base) << RD; 5804 return false; 5805 } 5806 5807 const CXXDestructorDecl *DD = RD->getDestructor(); 5808 if (!DD && !RD->hasTrivialDestructor()) { 5809 Info.FFDiag(CallLoc); 5810 return false; 5811 } 5812 5813 if (!DD || DD->isTrivial() || 5814 (RD->isAnonymousStructOrUnion() && RD->isUnion())) { 5815 // A trivial destructor just ends the lifetime of the object. Check for 5816 // this case before checking for a body, because we might not bother 5817 // building a body for a trivial destructor. Note that it doesn't matter 5818 // whether the destructor is constexpr in this case; all trivial 5819 // destructors are constexpr. 5820 // 5821 // If an anonymous union would be destroyed, some enclosing destructor must 5822 // have been explicitly defined, and the anonymous union destruction should 5823 // have no effect. 5824 Value = APValue(); 5825 return true; 5826 } 5827 5828 if (!Info.CheckCallLimit(CallLoc)) 5829 return false; 5830 5831 const FunctionDecl *Definition = nullptr; 5832 const Stmt *Body = DD->getBody(Definition); 5833 5834 if (!CheckConstexprFunction(Info, CallLoc, DD, Definition, Body)) 5835 return false; 5836 5837 CallStackFrame Frame(Info, CallLoc, Definition, &This, nullptr); 5838 5839 // We're now in the period of destruction of this object. 5840 unsigned BasesLeft = RD->getNumBases(); 5841 EvalInfo::EvaluatingDestructorRAII EvalObj( 5842 Info, 5843 ObjectUnderConstruction{This.getLValueBase(), This.Designator.Entries}); 5844 if (!EvalObj.DidInsert) { 5845 // C++2a [class.dtor]p19: 5846 // the behavior is undefined if the destructor is invoked for an object 5847 // whose lifetime has ended 5848 // (Note that formally the lifetime ends when the period of destruction 5849 // begins, even though certain uses of the object remain valid until the 5850 // period of destruction ends.) 5851 Info.FFDiag(CallLoc, diag::note_constexpr_double_destroy); 5852 return false; 5853 } 5854 5855 // FIXME: Creating an APValue just to hold a nonexistent return value is 5856 // wasteful. 5857 APValue RetVal; 5858 StmtResult Ret = {RetVal, nullptr}; 5859 if (EvaluateStmt(Ret, Info, Definition->getBody()) == ESR_Failed) 5860 return false; 5861 5862 // A union destructor does not implicitly destroy its members. 5863 if (RD->isUnion()) 5864 return true; 5865 5866 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD); 5867 5868 // We don't have a good way to iterate fields in reverse, so collect all the 5869 // fields first and then walk them backwards. 5870 SmallVector<FieldDecl*, 16> Fields(RD->field_begin(), RD->field_end()); 5871 for (const FieldDecl *FD : llvm::reverse(Fields)) { 5872 if (FD->isUnnamedBitfield()) 5873 continue; 5874 5875 LValue Subobject = This; 5876 if (!HandleLValueMember(Info, &LocE, Subobject, FD, &Layout)) 5877 return false; 5878 5879 APValue *SubobjectValue = &Value.getStructField(FD->getFieldIndex()); 5880 if (!HandleDestructionImpl(Info, CallLoc, Subobject, *SubobjectValue, 5881 FD->getType())) 5882 return false; 5883 } 5884 5885 if (BasesLeft != 0) 5886 EvalObj.startedDestroyingBases(); 5887 5888 // Destroy base classes in reverse order. 5889 for (const CXXBaseSpecifier &Base : llvm::reverse(RD->bases())) { 5890 --BasesLeft; 5891 5892 QualType BaseType = Base.getType(); 5893 LValue Subobject = This; 5894 if (!HandleLValueDirectBase(Info, &LocE, Subobject, RD, 5895 BaseType->getAsCXXRecordDecl(), &Layout)) 5896 return false; 5897 5898 APValue *SubobjectValue = &Value.getStructBase(BasesLeft); 5899 if (!HandleDestructionImpl(Info, CallLoc, Subobject, *SubobjectValue, 5900 BaseType)) 5901 return false; 5902 } 5903 assert(BasesLeft == 0 && "NumBases was wrong?"); 5904 5905 // The period of destruction ends now. The object is gone. 5906 Value = APValue(); 5907 return true; 5908 } 5909 5910 namespace { 5911 struct DestroyObjectHandler { 5912 EvalInfo &Info; 5913 const Expr *E; 5914 const LValue &This; 5915 const AccessKinds AccessKind; 5916 5917 typedef bool result_type; 5918 bool failed() { return false; } 5919 bool found(APValue &Subobj, QualType SubobjType) { 5920 return HandleDestructionImpl(Info, E->getExprLoc(), This, Subobj, 5921 SubobjType); 5922 } 5923 bool found(APSInt &Value, QualType SubobjType) { 5924 Info.FFDiag(E, diag::note_constexpr_destroy_complex_elem); 5925 return false; 5926 } 5927 bool found(APFloat &Value, QualType SubobjType) { 5928 Info.FFDiag(E, diag::note_constexpr_destroy_complex_elem); 5929 return false; 5930 } 5931 }; 5932 } 5933 5934 /// Perform a destructor or pseudo-destructor call on the given object, which 5935 /// might in general not be a complete object. 5936 static bool HandleDestruction(EvalInfo &Info, const Expr *E, 5937 const LValue &This, QualType ThisType) { 5938 CompleteObject Obj = findCompleteObject(Info, E, AK_Destroy, This, ThisType); 5939 DestroyObjectHandler Handler = {Info, E, This, AK_Destroy}; 5940 return Obj && findSubobject(Info, E, Obj, This.Designator, Handler); 5941 } 5942 5943 /// Destroy and end the lifetime of the given complete object. 5944 static bool HandleDestruction(EvalInfo &Info, SourceLocation Loc, 5945 APValue::LValueBase LVBase, APValue &Value, 5946 QualType T) { 5947 // If we've had an unmodeled side-effect, we can't rely on mutable state 5948 // (such as the object we're about to destroy) being correct. 5949 if (Info.EvalStatus.HasSideEffects) 5950 return false; 5951 5952 LValue LV; 5953 LV.set({LVBase}); 5954 return HandleDestructionImpl(Info, Loc, LV, Value, T); 5955 } 5956 5957 /// Perform a call to 'perator new' or to `__builtin_operator_new'. 5958 static bool HandleOperatorNewCall(EvalInfo &Info, const CallExpr *E, 5959 LValue &Result) { 5960 if (Info.checkingPotentialConstantExpression() || 5961 Info.SpeculativeEvaluationDepth) 5962 return false; 5963 5964 // This is permitted only within a call to std::allocator<T>::allocate. 5965 auto Caller = Info.getStdAllocatorCaller("allocate"); 5966 if (!Caller) { 5967 Info.FFDiag(E->getExprLoc(), Info.getLangOpts().CPlusPlus2a 5968 ? diag::note_constexpr_new_untyped 5969 : diag::note_constexpr_new); 5970 return false; 5971 } 5972 5973 QualType ElemType = Caller.ElemType; 5974 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) { 5975 Info.FFDiag(E->getExprLoc(), 5976 diag::note_constexpr_new_not_complete_object_type) 5977 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType; 5978 return false; 5979 } 5980 5981 APSInt ByteSize; 5982 if (!EvaluateInteger(E->getArg(0), ByteSize, Info)) 5983 return false; 5984 bool IsNothrow = false; 5985 for (unsigned I = 1, N = E->getNumArgs(); I != N; ++I) { 5986 EvaluateIgnoredValue(Info, E->getArg(I)); 5987 IsNothrow |= E->getType()->isNothrowT(); 5988 } 5989 5990 CharUnits ElemSize; 5991 if (!HandleSizeof(Info, E->getExprLoc(), ElemType, ElemSize)) 5992 return false; 5993 APInt Size, Remainder; 5994 APInt ElemSizeAP(ByteSize.getBitWidth(), ElemSize.getQuantity()); 5995 APInt::udivrem(ByteSize, ElemSizeAP, Size, Remainder); 5996 if (Remainder != 0) { 5997 // This likely indicates a bug in the implementation of 'std::allocator'. 5998 Info.FFDiag(E->getExprLoc(), diag::note_constexpr_operator_new_bad_size) 5999 << ByteSize << APSInt(ElemSizeAP, true) << ElemType; 6000 return false; 6001 } 6002 6003 if (ByteSize.getActiveBits() > ConstantArrayType::getMaxSizeBits(Info.Ctx)) { 6004 if (IsNothrow) { 6005 Result.setNull(Info.Ctx, E->getType()); 6006 return true; 6007 } 6008 6009 Info.FFDiag(E, diag::note_constexpr_new_too_large) << APSInt(Size, true); 6010 return false; 6011 } 6012 6013 QualType AllocType = Info.Ctx.getConstantArrayType(ElemType, Size, nullptr, 6014 ArrayType::Normal, 0); 6015 APValue *Val = Info.createHeapAlloc(E, AllocType, Result); 6016 *Val = APValue(APValue::UninitArray(), 0, Size.getZExtValue()); 6017 Result.addArray(Info, E, cast<ConstantArrayType>(AllocType)); 6018 return true; 6019 } 6020 6021 static bool hasVirtualDestructor(QualType T) { 6022 if (CXXRecordDecl *RD = T->getAsCXXRecordDecl()) 6023 if (CXXDestructorDecl *DD = RD->getDestructor()) 6024 return DD->isVirtual(); 6025 return false; 6026 } 6027 6028 static const FunctionDecl *getVirtualOperatorDelete(QualType T) { 6029 if (CXXRecordDecl *RD = T->getAsCXXRecordDecl()) 6030 if (CXXDestructorDecl *DD = RD->getDestructor()) 6031 return DD->isVirtual() ? DD->getOperatorDelete() : nullptr; 6032 return nullptr; 6033 } 6034 6035 /// Check that the given object is a suitable pointer to a heap allocation that 6036 /// still exists and is of the right kind for the purpose of a deletion. 6037 /// 6038 /// On success, returns the heap allocation to deallocate. On failure, produces 6039 /// a diagnostic and returns None. 6040 static Optional<DynAlloc *> CheckDeleteKind(EvalInfo &Info, const Expr *E, 6041 const LValue &Pointer, 6042 DynAlloc::Kind DeallocKind) { 6043 auto PointerAsString = [&] { 6044 return Pointer.toString(Info.Ctx, Info.Ctx.VoidPtrTy); 6045 }; 6046 6047 DynamicAllocLValue DA = Pointer.Base.dyn_cast<DynamicAllocLValue>(); 6048 if (!DA) { 6049 Info.FFDiag(E, diag::note_constexpr_delete_not_heap_alloc) 6050 << PointerAsString(); 6051 if (Pointer.Base) 6052 NoteLValueLocation(Info, Pointer.Base); 6053 return None; 6054 } 6055 6056 Optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA); 6057 if (!Alloc) { 6058 Info.FFDiag(E, diag::note_constexpr_double_delete); 6059 return None; 6060 } 6061 6062 QualType AllocType = Pointer.Base.getDynamicAllocType(); 6063 if (DeallocKind != (*Alloc)->getKind()) { 6064 Info.FFDiag(E, diag::note_constexpr_new_delete_mismatch) 6065 << DeallocKind << (*Alloc)->getKind() << AllocType; 6066 NoteLValueLocation(Info, Pointer.Base); 6067 return None; 6068 } 6069 6070 bool Subobject = false; 6071 if (DeallocKind == DynAlloc::New) { 6072 Subobject = Pointer.Designator.MostDerivedPathLength != 0 || 6073 Pointer.Designator.isOnePastTheEnd(); 6074 } else { 6075 Subobject = Pointer.Designator.Entries.size() != 1 || 6076 Pointer.Designator.Entries[0].getAsArrayIndex() != 0; 6077 } 6078 if (Subobject) { 6079 Info.FFDiag(E, diag::note_constexpr_delete_subobject) 6080 << PointerAsString() << Pointer.Designator.isOnePastTheEnd(); 6081 return None; 6082 } 6083 6084 return Alloc; 6085 } 6086 6087 // Perform a call to 'operator delete' or '__builtin_operator_delete'. 6088 bool HandleOperatorDeleteCall(EvalInfo &Info, const CallExpr *E) { 6089 if (Info.checkingPotentialConstantExpression() || 6090 Info.SpeculativeEvaluationDepth) 6091 return false; 6092 6093 // This is permitted only within a call to std::allocator<T>::deallocate. 6094 if (!Info.getStdAllocatorCaller("deallocate")) { 6095 Info.FFDiag(E->getExprLoc()); 6096 return true; 6097 } 6098 6099 LValue Pointer; 6100 if (!EvaluatePointer(E->getArg(0), Pointer, Info)) 6101 return false; 6102 for (unsigned I = 1, N = E->getNumArgs(); I != N; ++I) 6103 EvaluateIgnoredValue(Info, E->getArg(I)); 6104 6105 if (Pointer.Designator.Invalid) 6106 return false; 6107 6108 // Deleting a null pointer has no effect. 6109 if (Pointer.isNullPointer()) 6110 return true; 6111 6112 if (!CheckDeleteKind(Info, E, Pointer, DynAlloc::StdAllocator)) 6113 return false; 6114 6115 Info.HeapAllocs.erase(Pointer.Base.get<DynamicAllocLValue>()); 6116 return true; 6117 } 6118 6119 //===----------------------------------------------------------------------===// 6120 // Generic Evaluation 6121 //===----------------------------------------------------------------------===// 6122 namespace { 6123 6124 class BitCastBuffer { 6125 // FIXME: We're going to need bit-level granularity when we support 6126 // bit-fields. 6127 // FIXME: Its possible under the C++ standard for 'char' to not be 8 bits, but 6128 // we don't support a host or target where that is the case. Still, we should 6129 // use a more generic type in case we ever do. 6130 SmallVector<Optional<unsigned char>, 32> Bytes; 6131 6132 static_assert(std::numeric_limits<unsigned char>::digits >= 8, 6133 "Need at least 8 bit unsigned char"); 6134 6135 bool TargetIsLittleEndian; 6136 6137 public: 6138 BitCastBuffer(CharUnits Width, bool TargetIsLittleEndian) 6139 : Bytes(Width.getQuantity()), 6140 TargetIsLittleEndian(TargetIsLittleEndian) {} 6141 6142 LLVM_NODISCARD 6143 bool readObject(CharUnits Offset, CharUnits Width, 6144 SmallVectorImpl<unsigned char> &Output) const { 6145 for (CharUnits I = Offset, E = Offset + Width; I != E; ++I) { 6146 // If a byte of an integer is uninitialized, then the whole integer is 6147 // uninitalized. 6148 if (!Bytes[I.getQuantity()]) 6149 return false; 6150 Output.push_back(*Bytes[I.getQuantity()]); 6151 } 6152 if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian) 6153 std::reverse(Output.begin(), Output.end()); 6154 return true; 6155 } 6156 6157 void writeObject(CharUnits Offset, SmallVectorImpl<unsigned char> &Input) { 6158 if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian) 6159 std::reverse(Input.begin(), Input.end()); 6160 6161 size_t Index = 0; 6162 for (unsigned char Byte : Input) { 6163 assert(!Bytes[Offset.getQuantity() + Index] && "overwriting a byte?"); 6164 Bytes[Offset.getQuantity() + Index] = Byte; 6165 ++Index; 6166 } 6167 } 6168 6169 size_t size() { return Bytes.size(); } 6170 }; 6171 6172 /// Traverse an APValue to produce an BitCastBuffer, emulating how the current 6173 /// target would represent the value at runtime. 6174 class APValueToBufferConverter { 6175 EvalInfo &Info; 6176 BitCastBuffer Buffer; 6177 const CastExpr *BCE; 6178 6179 APValueToBufferConverter(EvalInfo &Info, CharUnits ObjectWidth, 6180 const CastExpr *BCE) 6181 : Info(Info), 6182 Buffer(ObjectWidth, Info.Ctx.getTargetInfo().isLittleEndian()), 6183 BCE(BCE) {} 6184 6185 bool visit(const APValue &Val, QualType Ty) { 6186 return visit(Val, Ty, CharUnits::fromQuantity(0)); 6187 } 6188 6189 // Write out Val with type Ty into Buffer starting at Offset. 6190 bool visit(const APValue &Val, QualType Ty, CharUnits Offset) { 6191 assert((size_t)Offset.getQuantity() <= Buffer.size()); 6192 6193 // As a special case, nullptr_t has an indeterminate value. 6194 if (Ty->isNullPtrType()) 6195 return true; 6196 6197 // Dig through Src to find the byte at SrcOffset. 6198 switch (Val.getKind()) { 6199 case APValue::Indeterminate: 6200 case APValue::None: 6201 return true; 6202 6203 case APValue::Int: 6204 return visitInt(Val.getInt(), Ty, Offset); 6205 case APValue::Float: 6206 return visitFloat(Val.getFloat(), Ty, Offset); 6207 case APValue::Array: 6208 return visitArray(Val, Ty, Offset); 6209 case APValue::Struct: 6210 return visitRecord(Val, Ty, Offset); 6211 6212 case APValue::ComplexInt: 6213 case APValue::ComplexFloat: 6214 case APValue::Vector: 6215 case APValue::FixedPoint: 6216 // FIXME: We should support these. 6217 6218 case APValue::Union: 6219 case APValue::MemberPointer: 6220 case APValue::AddrLabelDiff: { 6221 Info.FFDiag(BCE->getBeginLoc(), 6222 diag::note_constexpr_bit_cast_unsupported_type) 6223 << Ty; 6224 return false; 6225 } 6226 6227 case APValue::LValue: 6228 llvm_unreachable("LValue subobject in bit_cast?"); 6229 } 6230 llvm_unreachable("Unhandled APValue::ValueKind"); 6231 } 6232 6233 bool visitRecord(const APValue &Val, QualType Ty, CharUnits Offset) { 6234 const RecordDecl *RD = Ty->getAsRecordDecl(); 6235 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD); 6236 6237 // Visit the base classes. 6238 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 6239 for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) { 6240 const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I]; 6241 CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl(); 6242 6243 if (!visitRecord(Val.getStructBase(I), BS.getType(), 6244 Layout.getBaseClassOffset(BaseDecl) + Offset)) 6245 return false; 6246 } 6247 } 6248 6249 // Visit the fields. 6250 unsigned FieldIdx = 0; 6251 for (FieldDecl *FD : RD->fields()) { 6252 if (FD->isBitField()) { 6253 Info.FFDiag(BCE->getBeginLoc(), 6254 diag::note_constexpr_bit_cast_unsupported_bitfield); 6255 return false; 6256 } 6257 6258 uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldIdx); 6259 6260 assert(FieldOffsetBits % Info.Ctx.getCharWidth() == 0 && 6261 "only bit-fields can have sub-char alignment"); 6262 CharUnits FieldOffset = 6263 Info.Ctx.toCharUnitsFromBits(FieldOffsetBits) + Offset; 6264 QualType FieldTy = FD->getType(); 6265 if (!visit(Val.getStructField(FieldIdx), FieldTy, FieldOffset)) 6266 return false; 6267 ++FieldIdx; 6268 } 6269 6270 return true; 6271 } 6272 6273 bool visitArray(const APValue &Val, QualType Ty, CharUnits Offset) { 6274 const auto *CAT = 6275 dyn_cast_or_null<ConstantArrayType>(Ty->getAsArrayTypeUnsafe()); 6276 if (!CAT) 6277 return false; 6278 6279 CharUnits ElemWidth = Info.Ctx.getTypeSizeInChars(CAT->getElementType()); 6280 unsigned NumInitializedElts = Val.getArrayInitializedElts(); 6281 unsigned ArraySize = Val.getArraySize(); 6282 // First, initialize the initialized elements. 6283 for (unsigned I = 0; I != NumInitializedElts; ++I) { 6284 const APValue &SubObj = Val.getArrayInitializedElt(I); 6285 if (!visit(SubObj, CAT->getElementType(), Offset + I * ElemWidth)) 6286 return false; 6287 } 6288 6289 // Next, initialize the rest of the array using the filler. 6290 if (Val.hasArrayFiller()) { 6291 const APValue &Filler = Val.getArrayFiller(); 6292 for (unsigned I = NumInitializedElts; I != ArraySize; ++I) { 6293 if (!visit(Filler, CAT->getElementType(), Offset + I * ElemWidth)) 6294 return false; 6295 } 6296 } 6297 6298 return true; 6299 } 6300 6301 bool visitInt(const APSInt &Val, QualType Ty, CharUnits Offset) { 6302 CharUnits Width = Info.Ctx.getTypeSizeInChars(Ty); 6303 SmallVector<unsigned char, 8> Bytes(Width.getQuantity()); 6304 llvm::StoreIntToMemory(Val, &*Bytes.begin(), Width.getQuantity()); 6305 Buffer.writeObject(Offset, Bytes); 6306 return true; 6307 } 6308 6309 bool visitFloat(const APFloat &Val, QualType Ty, CharUnits Offset) { 6310 APSInt AsInt(Val.bitcastToAPInt()); 6311 return visitInt(AsInt, Ty, Offset); 6312 } 6313 6314 public: 6315 static Optional<BitCastBuffer> convert(EvalInfo &Info, const APValue &Src, 6316 const CastExpr *BCE) { 6317 CharUnits DstSize = Info.Ctx.getTypeSizeInChars(BCE->getType()); 6318 APValueToBufferConverter Converter(Info, DstSize, BCE); 6319 if (!Converter.visit(Src, BCE->getSubExpr()->getType())) 6320 return None; 6321 return Converter.Buffer; 6322 } 6323 }; 6324 6325 /// Write an BitCastBuffer into an APValue. 6326 class BufferToAPValueConverter { 6327 EvalInfo &Info; 6328 const BitCastBuffer &Buffer; 6329 const CastExpr *BCE; 6330 6331 BufferToAPValueConverter(EvalInfo &Info, const BitCastBuffer &Buffer, 6332 const CastExpr *BCE) 6333 : Info(Info), Buffer(Buffer), BCE(BCE) {} 6334 6335 // Emit an unsupported bit_cast type error. Sema refuses to build a bit_cast 6336 // with an invalid type, so anything left is a deficiency on our part (FIXME). 6337 // Ideally this will be unreachable. 6338 llvm::NoneType unsupportedType(QualType Ty) { 6339 Info.FFDiag(BCE->getBeginLoc(), 6340 diag::note_constexpr_bit_cast_unsupported_type) 6341 << Ty; 6342 return None; 6343 } 6344 6345 Optional<APValue> visit(const BuiltinType *T, CharUnits Offset, 6346 const EnumType *EnumSugar = nullptr) { 6347 if (T->isNullPtrType()) { 6348 uint64_t NullValue = Info.Ctx.getTargetNullPointerValue(QualType(T, 0)); 6349 return APValue((Expr *)nullptr, 6350 /*Offset=*/CharUnits::fromQuantity(NullValue), 6351 APValue::NoLValuePath{}, /*IsNullPtr=*/true); 6352 } 6353 6354 CharUnits SizeOf = Info.Ctx.getTypeSizeInChars(T); 6355 SmallVector<uint8_t, 8> Bytes; 6356 if (!Buffer.readObject(Offset, SizeOf, Bytes)) { 6357 // If this is std::byte or unsigned char, then its okay to store an 6358 // indeterminate value. 6359 bool IsStdByte = EnumSugar && EnumSugar->isStdByteType(); 6360 bool IsUChar = 6361 !EnumSugar && (T->isSpecificBuiltinType(BuiltinType::UChar) || 6362 T->isSpecificBuiltinType(BuiltinType::Char_U)); 6363 if (!IsStdByte && !IsUChar) { 6364 QualType DisplayType(EnumSugar ? (const Type *)EnumSugar : T, 0); 6365 Info.FFDiag(BCE->getExprLoc(), 6366 diag::note_constexpr_bit_cast_indet_dest) 6367 << DisplayType << Info.Ctx.getLangOpts().CharIsSigned; 6368 return None; 6369 } 6370 6371 return APValue::IndeterminateValue(); 6372 } 6373 6374 APSInt Val(SizeOf.getQuantity() * Info.Ctx.getCharWidth(), true); 6375 llvm::LoadIntFromMemory(Val, &*Bytes.begin(), Bytes.size()); 6376 6377 if (T->isIntegralOrEnumerationType()) { 6378 Val.setIsSigned(T->isSignedIntegerOrEnumerationType()); 6379 return APValue(Val); 6380 } 6381 6382 if (T->isRealFloatingType()) { 6383 const llvm::fltSemantics &Semantics = 6384 Info.Ctx.getFloatTypeSemantics(QualType(T, 0)); 6385 return APValue(APFloat(Semantics, Val)); 6386 } 6387 6388 return unsupportedType(QualType(T, 0)); 6389 } 6390 6391 Optional<APValue> visit(const RecordType *RTy, CharUnits Offset) { 6392 const RecordDecl *RD = RTy->getAsRecordDecl(); 6393 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD); 6394 6395 unsigned NumBases = 0; 6396 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 6397 NumBases = CXXRD->getNumBases(); 6398 6399 APValue ResultVal(APValue::UninitStruct(), NumBases, 6400 std::distance(RD->field_begin(), RD->field_end())); 6401 6402 // Visit the base classes. 6403 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 6404 for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) { 6405 const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I]; 6406 CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl(); 6407 if (BaseDecl->isEmpty() || 6408 Info.Ctx.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero()) 6409 continue; 6410 6411 Optional<APValue> SubObj = visitType( 6412 BS.getType(), Layout.getBaseClassOffset(BaseDecl) + Offset); 6413 if (!SubObj) 6414 return None; 6415 ResultVal.getStructBase(I) = *SubObj; 6416 } 6417 } 6418 6419 // Visit the fields. 6420 unsigned FieldIdx = 0; 6421 for (FieldDecl *FD : RD->fields()) { 6422 // FIXME: We don't currently support bit-fields. A lot of the logic for 6423 // this is in CodeGen, so we need to factor it around. 6424 if (FD->isBitField()) { 6425 Info.FFDiag(BCE->getBeginLoc(), 6426 diag::note_constexpr_bit_cast_unsupported_bitfield); 6427 return None; 6428 } 6429 6430 uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldIdx); 6431 assert(FieldOffsetBits % Info.Ctx.getCharWidth() == 0); 6432 6433 CharUnits FieldOffset = 6434 CharUnits::fromQuantity(FieldOffsetBits / Info.Ctx.getCharWidth()) + 6435 Offset; 6436 QualType FieldTy = FD->getType(); 6437 Optional<APValue> SubObj = visitType(FieldTy, FieldOffset); 6438 if (!SubObj) 6439 return None; 6440 ResultVal.getStructField(FieldIdx) = *SubObj; 6441 ++FieldIdx; 6442 } 6443 6444 return ResultVal; 6445 } 6446 6447 Optional<APValue> visit(const EnumType *Ty, CharUnits Offset) { 6448 QualType RepresentationType = Ty->getDecl()->getIntegerType(); 6449 assert(!RepresentationType.isNull() && 6450 "enum forward decl should be caught by Sema"); 6451 const auto *AsBuiltin = 6452 RepresentationType.getCanonicalType()->castAs<BuiltinType>(); 6453 // Recurse into the underlying type. Treat std::byte transparently as 6454 // unsigned char. 6455 return visit(AsBuiltin, Offset, /*EnumTy=*/Ty); 6456 } 6457 6458 Optional<APValue> visit(const ConstantArrayType *Ty, CharUnits Offset) { 6459 size_t Size = Ty->getSize().getLimitedValue(); 6460 CharUnits ElementWidth = Info.Ctx.getTypeSizeInChars(Ty->getElementType()); 6461 6462 APValue ArrayValue(APValue::UninitArray(), Size, Size); 6463 for (size_t I = 0; I != Size; ++I) { 6464 Optional<APValue> ElementValue = 6465 visitType(Ty->getElementType(), Offset + I * ElementWidth); 6466 if (!ElementValue) 6467 return None; 6468 ArrayValue.getArrayInitializedElt(I) = std::move(*ElementValue); 6469 } 6470 6471 return ArrayValue; 6472 } 6473 6474 Optional<APValue> visit(const Type *Ty, CharUnits Offset) { 6475 return unsupportedType(QualType(Ty, 0)); 6476 } 6477 6478 Optional<APValue> visitType(QualType Ty, CharUnits Offset) { 6479 QualType Can = Ty.getCanonicalType(); 6480 6481 switch (Can->getTypeClass()) { 6482 #define TYPE(Class, Base) \ 6483 case Type::Class: \ 6484 return visit(cast<Class##Type>(Can.getTypePtr()), Offset); 6485 #define ABSTRACT_TYPE(Class, Base) 6486 #define NON_CANONICAL_TYPE(Class, Base) \ 6487 case Type::Class: \ 6488 llvm_unreachable("non-canonical type should be impossible!"); 6489 #define DEPENDENT_TYPE(Class, Base) \ 6490 case Type::Class: \ 6491 llvm_unreachable( \ 6492 "dependent types aren't supported in the constant evaluator!"); 6493 #define NON_CANONICAL_UNLESS_DEPENDENT(Class, Base) \ 6494 case Type::Class: \ 6495 llvm_unreachable("either dependent or not canonical!"); 6496 #include "clang/AST/TypeNodes.inc" 6497 } 6498 llvm_unreachable("Unhandled Type::TypeClass"); 6499 } 6500 6501 public: 6502 // Pull out a full value of type DstType. 6503 static Optional<APValue> convert(EvalInfo &Info, BitCastBuffer &Buffer, 6504 const CastExpr *BCE) { 6505 BufferToAPValueConverter Converter(Info, Buffer, BCE); 6506 return Converter.visitType(BCE->getType(), CharUnits::fromQuantity(0)); 6507 } 6508 }; 6509 6510 static bool checkBitCastConstexprEligibilityType(SourceLocation Loc, 6511 QualType Ty, EvalInfo *Info, 6512 const ASTContext &Ctx, 6513 bool CheckingDest) { 6514 Ty = Ty.getCanonicalType(); 6515 6516 auto diag = [&](int Reason) { 6517 if (Info) 6518 Info->FFDiag(Loc, diag::note_constexpr_bit_cast_invalid_type) 6519 << CheckingDest << (Reason == 4) << Reason; 6520 return false; 6521 }; 6522 auto note = [&](int Construct, QualType NoteTy, SourceLocation NoteLoc) { 6523 if (Info) 6524 Info->Note(NoteLoc, diag::note_constexpr_bit_cast_invalid_subtype) 6525 << NoteTy << Construct << Ty; 6526 return false; 6527 }; 6528 6529 if (Ty->isUnionType()) 6530 return diag(0); 6531 if (Ty->isPointerType()) 6532 return diag(1); 6533 if (Ty->isMemberPointerType()) 6534 return diag(2); 6535 if (Ty.isVolatileQualified()) 6536 return diag(3); 6537 6538 if (RecordDecl *Record = Ty->getAsRecordDecl()) { 6539 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Record)) { 6540 for (CXXBaseSpecifier &BS : CXXRD->bases()) 6541 if (!checkBitCastConstexprEligibilityType(Loc, BS.getType(), Info, Ctx, 6542 CheckingDest)) 6543 return note(1, BS.getType(), BS.getBeginLoc()); 6544 } 6545 for (FieldDecl *FD : Record->fields()) { 6546 if (FD->getType()->isReferenceType()) 6547 return diag(4); 6548 if (!checkBitCastConstexprEligibilityType(Loc, FD->getType(), Info, Ctx, 6549 CheckingDest)) 6550 return note(0, FD->getType(), FD->getBeginLoc()); 6551 } 6552 } 6553 6554 if (Ty->isArrayType() && 6555 !checkBitCastConstexprEligibilityType(Loc, Ctx.getBaseElementType(Ty), 6556 Info, Ctx, CheckingDest)) 6557 return false; 6558 6559 return true; 6560 } 6561 6562 static bool checkBitCastConstexprEligibility(EvalInfo *Info, 6563 const ASTContext &Ctx, 6564 const CastExpr *BCE) { 6565 bool DestOK = checkBitCastConstexprEligibilityType( 6566 BCE->getBeginLoc(), BCE->getType(), Info, Ctx, true); 6567 bool SourceOK = DestOK && checkBitCastConstexprEligibilityType( 6568 BCE->getBeginLoc(), 6569 BCE->getSubExpr()->getType(), Info, Ctx, false); 6570 return SourceOK; 6571 } 6572 6573 static bool handleLValueToRValueBitCast(EvalInfo &Info, APValue &DestValue, 6574 APValue &SourceValue, 6575 const CastExpr *BCE) { 6576 assert(CHAR_BIT == 8 && Info.Ctx.getTargetInfo().getCharWidth() == 8 && 6577 "no host or target supports non 8-bit chars"); 6578 assert(SourceValue.isLValue() && 6579 "LValueToRValueBitcast requires an lvalue operand!"); 6580 6581 if (!checkBitCastConstexprEligibility(&Info, Info.Ctx, BCE)) 6582 return false; 6583 6584 LValue SourceLValue; 6585 APValue SourceRValue; 6586 SourceLValue.setFrom(Info.Ctx, SourceValue); 6587 if (!handleLValueToRValueConversion( 6588 Info, BCE, BCE->getSubExpr()->getType().withConst(), SourceLValue, 6589 SourceRValue, /*WantObjectRepresentation=*/true)) 6590 return false; 6591 6592 // Read out SourceValue into a char buffer. 6593 Optional<BitCastBuffer> Buffer = 6594 APValueToBufferConverter::convert(Info, SourceRValue, BCE); 6595 if (!Buffer) 6596 return false; 6597 6598 // Write out the buffer into a new APValue. 6599 Optional<APValue> MaybeDestValue = 6600 BufferToAPValueConverter::convert(Info, *Buffer, BCE); 6601 if (!MaybeDestValue) 6602 return false; 6603 6604 DestValue = std::move(*MaybeDestValue); 6605 return true; 6606 } 6607 6608 template <class Derived> 6609 class ExprEvaluatorBase 6610 : public ConstStmtVisitor<Derived, bool> { 6611 private: 6612 Derived &getDerived() { return static_cast<Derived&>(*this); } 6613 bool DerivedSuccess(const APValue &V, const Expr *E) { 6614 return getDerived().Success(V, E); 6615 } 6616 bool DerivedZeroInitialization(const Expr *E) { 6617 return getDerived().ZeroInitialization(E); 6618 } 6619 6620 // Check whether a conditional operator with a non-constant condition is a 6621 // potential constant expression. If neither arm is a potential constant 6622 // expression, then the conditional operator is not either. 6623 template<typename ConditionalOperator> 6624 void CheckPotentialConstantConditional(const ConditionalOperator *E) { 6625 assert(Info.checkingPotentialConstantExpression()); 6626 6627 // Speculatively evaluate both arms. 6628 SmallVector<PartialDiagnosticAt, 8> Diag; 6629 { 6630 SpeculativeEvaluationRAII Speculate(Info, &Diag); 6631 StmtVisitorTy::Visit(E->getFalseExpr()); 6632 if (Diag.empty()) 6633 return; 6634 } 6635 6636 { 6637 SpeculativeEvaluationRAII Speculate(Info, &Diag); 6638 Diag.clear(); 6639 StmtVisitorTy::Visit(E->getTrueExpr()); 6640 if (Diag.empty()) 6641 return; 6642 } 6643 6644 Error(E, diag::note_constexpr_conditional_never_const); 6645 } 6646 6647 6648 template<typename ConditionalOperator> 6649 bool HandleConditionalOperator(const ConditionalOperator *E) { 6650 bool BoolResult; 6651 if (!EvaluateAsBooleanCondition(E->getCond(), BoolResult, Info)) { 6652 if (Info.checkingPotentialConstantExpression() && Info.noteFailure()) { 6653 CheckPotentialConstantConditional(E); 6654 return false; 6655 } 6656 if (Info.noteFailure()) { 6657 StmtVisitorTy::Visit(E->getTrueExpr()); 6658 StmtVisitorTy::Visit(E->getFalseExpr()); 6659 } 6660 return false; 6661 } 6662 6663 Expr *EvalExpr = BoolResult ? E->getTrueExpr() : E->getFalseExpr(); 6664 return StmtVisitorTy::Visit(EvalExpr); 6665 } 6666 6667 protected: 6668 EvalInfo &Info; 6669 typedef ConstStmtVisitor<Derived, bool> StmtVisitorTy; 6670 typedef ExprEvaluatorBase ExprEvaluatorBaseTy; 6671 6672 OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) { 6673 return Info.CCEDiag(E, D); 6674 } 6675 6676 bool ZeroInitialization(const Expr *E) { return Error(E); } 6677 6678 public: 6679 ExprEvaluatorBase(EvalInfo &Info) : Info(Info) {} 6680 6681 EvalInfo &getEvalInfo() { return Info; } 6682 6683 /// Report an evaluation error. This should only be called when an error is 6684 /// first discovered. When propagating an error, just return false. 6685 bool Error(const Expr *E, diag::kind D) { 6686 Info.FFDiag(E, D); 6687 return false; 6688 } 6689 bool Error(const Expr *E) { 6690 return Error(E, diag::note_invalid_subexpr_in_const_expr); 6691 } 6692 6693 bool VisitStmt(const Stmt *) { 6694 llvm_unreachable("Expression evaluator should not be called on stmts"); 6695 } 6696 bool VisitExpr(const Expr *E) { 6697 return Error(E); 6698 } 6699 6700 bool VisitConstantExpr(const ConstantExpr *E) 6701 { return StmtVisitorTy::Visit(E->getSubExpr()); } 6702 bool VisitParenExpr(const ParenExpr *E) 6703 { return StmtVisitorTy::Visit(E->getSubExpr()); } 6704 bool VisitUnaryExtension(const UnaryOperator *E) 6705 { return StmtVisitorTy::Visit(E->getSubExpr()); } 6706 bool VisitUnaryPlus(const UnaryOperator *E) 6707 { return StmtVisitorTy::Visit(E->getSubExpr()); } 6708 bool VisitChooseExpr(const ChooseExpr *E) 6709 { return StmtVisitorTy::Visit(E->getChosenSubExpr()); } 6710 bool VisitGenericSelectionExpr(const GenericSelectionExpr *E) 6711 { return StmtVisitorTy::Visit(E->getResultExpr()); } 6712 bool VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E) 6713 { return StmtVisitorTy::Visit(E->getReplacement()); } 6714 bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E) { 6715 TempVersionRAII RAII(*Info.CurrentCall); 6716 SourceLocExprScopeGuard Guard(E, Info.CurrentCall->CurSourceLocExprScope); 6717 return StmtVisitorTy::Visit(E->getExpr()); 6718 } 6719 bool VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) { 6720 TempVersionRAII RAII(*Info.CurrentCall); 6721 // The initializer may not have been parsed yet, or might be erroneous. 6722 if (!E->getExpr()) 6723 return Error(E); 6724 SourceLocExprScopeGuard Guard(E, Info.CurrentCall->CurSourceLocExprScope); 6725 return StmtVisitorTy::Visit(E->getExpr()); 6726 } 6727 6728 bool VisitExprWithCleanups(const ExprWithCleanups *E) { 6729 FullExpressionRAII Scope(Info); 6730 return StmtVisitorTy::Visit(E->getSubExpr()) && Scope.destroy(); 6731 } 6732 6733 // Temporaries are registered when created, so we don't care about 6734 // CXXBindTemporaryExpr. 6735 bool VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *E) { 6736 return StmtVisitorTy::Visit(E->getSubExpr()); 6737 } 6738 6739 bool VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E) { 6740 CCEDiag(E, diag::note_constexpr_invalid_cast) << 0; 6741 return static_cast<Derived*>(this)->VisitCastExpr(E); 6742 } 6743 bool VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) { 6744 if (!Info.Ctx.getLangOpts().CPlusPlus2a) 6745 CCEDiag(E, diag::note_constexpr_invalid_cast) << 1; 6746 return static_cast<Derived*>(this)->VisitCastExpr(E); 6747 } 6748 bool VisitBuiltinBitCastExpr(const BuiltinBitCastExpr *E) { 6749 return static_cast<Derived*>(this)->VisitCastExpr(E); 6750 } 6751 6752 bool VisitBinaryOperator(const BinaryOperator *E) { 6753 switch (E->getOpcode()) { 6754 default: 6755 return Error(E); 6756 6757 case BO_Comma: 6758 VisitIgnoredValue(E->getLHS()); 6759 return StmtVisitorTy::Visit(E->getRHS()); 6760 6761 case BO_PtrMemD: 6762 case BO_PtrMemI: { 6763 LValue Obj; 6764 if (!HandleMemberPointerAccess(Info, E, Obj)) 6765 return false; 6766 APValue Result; 6767 if (!handleLValueToRValueConversion(Info, E, E->getType(), Obj, Result)) 6768 return false; 6769 return DerivedSuccess(Result, E); 6770 } 6771 } 6772 } 6773 6774 bool VisitCXXRewrittenBinaryOperator(const CXXRewrittenBinaryOperator *E) { 6775 return StmtVisitorTy::Visit(E->getSemanticForm()); 6776 } 6777 6778 bool VisitBinaryConditionalOperator(const BinaryConditionalOperator *E) { 6779 // Evaluate and cache the common expression. We treat it as a temporary, 6780 // even though it's not quite the same thing. 6781 LValue CommonLV; 6782 if (!Evaluate(Info.CurrentCall->createTemporary( 6783 E->getOpaqueValue(), 6784 getStorageType(Info.Ctx, E->getOpaqueValue()), false, 6785 CommonLV), 6786 Info, E->getCommon())) 6787 return false; 6788 6789 return HandleConditionalOperator(E); 6790 } 6791 6792 bool VisitConditionalOperator(const ConditionalOperator *E) { 6793 bool IsBcpCall = false; 6794 // If the condition (ignoring parens) is a __builtin_constant_p call, 6795 // the result is a constant expression if it can be folded without 6796 // side-effects. This is an important GNU extension. See GCC PR38377 6797 // for discussion. 6798 if (const CallExpr *CallCE = 6799 dyn_cast<CallExpr>(E->getCond()->IgnoreParenCasts())) 6800 if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p) 6801 IsBcpCall = true; 6802 6803 // Always assume __builtin_constant_p(...) ? ... : ... is a potential 6804 // constant expression; we can't check whether it's potentially foldable. 6805 // FIXME: We should instead treat __builtin_constant_p as non-constant if 6806 // it would return 'false' in this mode. 6807 if (Info.checkingPotentialConstantExpression() && IsBcpCall) 6808 return false; 6809 6810 FoldConstant Fold(Info, IsBcpCall); 6811 if (!HandleConditionalOperator(E)) { 6812 Fold.keepDiagnostics(); 6813 return false; 6814 } 6815 6816 return true; 6817 } 6818 6819 bool VisitOpaqueValueExpr(const OpaqueValueExpr *E) { 6820 if (APValue *Value = Info.CurrentCall->getCurrentTemporary(E)) 6821 return DerivedSuccess(*Value, E); 6822 6823 const Expr *Source = E->getSourceExpr(); 6824 if (!Source) 6825 return Error(E); 6826 if (Source == E) { // sanity checking. 6827 assert(0 && "OpaqueValueExpr recursively refers to itself"); 6828 return Error(E); 6829 } 6830 return StmtVisitorTy::Visit(Source); 6831 } 6832 6833 bool VisitPseudoObjectExpr(const PseudoObjectExpr *E) { 6834 for (const Expr *SemE : E->semantics()) { 6835 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SemE)) { 6836 // FIXME: We can't handle the case where an OpaqueValueExpr is also the 6837 // result expression: there could be two different LValues that would 6838 // refer to the same object in that case, and we can't model that. 6839 if (SemE == E->getResultExpr()) 6840 return Error(E); 6841 6842 // Unique OVEs get evaluated if and when we encounter them when 6843 // emitting the rest of the semantic form, rather than eagerly. 6844 if (OVE->isUnique()) 6845 continue; 6846 6847 LValue LV; 6848 if (!Evaluate(Info.CurrentCall->createTemporary( 6849 OVE, getStorageType(Info.Ctx, OVE), false, LV), 6850 Info, OVE->getSourceExpr())) 6851 return false; 6852 } else if (SemE == E->getResultExpr()) { 6853 if (!StmtVisitorTy::Visit(SemE)) 6854 return false; 6855 } else { 6856 if (!EvaluateIgnoredValue(Info, SemE)) 6857 return false; 6858 } 6859 } 6860 return true; 6861 } 6862 6863 bool VisitCallExpr(const CallExpr *E) { 6864 APValue Result; 6865 if (!handleCallExpr(E, Result, nullptr)) 6866 return false; 6867 return DerivedSuccess(Result, E); 6868 } 6869 6870 bool handleCallExpr(const CallExpr *E, APValue &Result, 6871 const LValue *ResultSlot) { 6872 const Expr *Callee = E->getCallee()->IgnoreParens(); 6873 QualType CalleeType = Callee->getType(); 6874 6875 const FunctionDecl *FD = nullptr; 6876 LValue *This = nullptr, ThisVal; 6877 auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs()); 6878 bool HasQualifier = false; 6879 6880 // Extract function decl and 'this' pointer from the callee. 6881 if (CalleeType->isSpecificBuiltinType(BuiltinType::BoundMember)) { 6882 const CXXMethodDecl *Member = nullptr; 6883 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Callee)) { 6884 // Explicit bound member calls, such as x.f() or p->g(); 6885 if (!EvaluateObjectArgument(Info, ME->getBase(), ThisVal)) 6886 return false; 6887 Member = dyn_cast<CXXMethodDecl>(ME->getMemberDecl()); 6888 if (!Member) 6889 return Error(Callee); 6890 This = &ThisVal; 6891 HasQualifier = ME->hasQualifier(); 6892 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(Callee)) { 6893 // Indirect bound member calls ('.*' or '->*'). 6894 const ValueDecl *D = 6895 HandleMemberPointerAccess(Info, BE, ThisVal, false); 6896 if (!D) 6897 return false; 6898 Member = dyn_cast<CXXMethodDecl>(D); 6899 if (!Member) 6900 return Error(Callee); 6901 This = &ThisVal; 6902 } else if (const auto *PDE = dyn_cast<CXXPseudoDestructorExpr>(Callee)) { 6903 if (!Info.getLangOpts().CPlusPlus2a) 6904 Info.CCEDiag(PDE, diag::note_constexpr_pseudo_destructor); 6905 // FIXME: If pseudo-destructor calls ever start ending the lifetime of 6906 // their callee, we should start calling HandleDestruction here. 6907 // For now, we just evaluate the object argument and discard it. 6908 return EvaluateObjectArgument(Info, PDE->getBase(), ThisVal); 6909 } else 6910 return Error(Callee); 6911 FD = Member; 6912 } else if (CalleeType->isFunctionPointerType()) { 6913 LValue Call; 6914 if (!EvaluatePointer(Callee, Call, Info)) 6915 return false; 6916 6917 if (!Call.getLValueOffset().isZero()) 6918 return Error(Callee); 6919 FD = dyn_cast_or_null<FunctionDecl>( 6920 Call.getLValueBase().dyn_cast<const ValueDecl*>()); 6921 if (!FD) 6922 return Error(Callee); 6923 // Don't call function pointers which have been cast to some other type. 6924 // Per DR (no number yet), the caller and callee can differ in noexcept. 6925 if (!Info.Ctx.hasSameFunctionTypeIgnoringExceptionSpec( 6926 CalleeType->getPointeeType(), FD->getType())) { 6927 return Error(E); 6928 } 6929 6930 // Overloaded operator calls to member functions are represented as normal 6931 // calls with '*this' as the first argument. 6932 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); 6933 if (MD && !MD->isStatic()) { 6934 // FIXME: When selecting an implicit conversion for an overloaded 6935 // operator delete, we sometimes try to evaluate calls to conversion 6936 // operators without a 'this' parameter! 6937 if (Args.empty()) 6938 return Error(E); 6939 6940 if (!EvaluateObjectArgument(Info, Args[0], ThisVal)) 6941 return false; 6942 This = &ThisVal; 6943 Args = Args.slice(1); 6944 } else if (MD && MD->isLambdaStaticInvoker()) { 6945 // Map the static invoker for the lambda back to the call operator. 6946 // Conveniently, we don't have to slice out the 'this' argument (as is 6947 // being done for the non-static case), since a static member function 6948 // doesn't have an implicit argument passed in. 6949 const CXXRecordDecl *ClosureClass = MD->getParent(); 6950 assert( 6951 ClosureClass->captures_begin() == ClosureClass->captures_end() && 6952 "Number of captures must be zero for conversion to function-ptr"); 6953 6954 const CXXMethodDecl *LambdaCallOp = 6955 ClosureClass->getLambdaCallOperator(); 6956 6957 // Set 'FD', the function that will be called below, to the call 6958 // operator. If the closure object represents a generic lambda, find 6959 // the corresponding specialization of the call operator. 6960 6961 if (ClosureClass->isGenericLambda()) { 6962 assert(MD->isFunctionTemplateSpecialization() && 6963 "A generic lambda's static-invoker function must be a " 6964 "template specialization"); 6965 const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs(); 6966 FunctionTemplateDecl *CallOpTemplate = 6967 LambdaCallOp->getDescribedFunctionTemplate(); 6968 void *InsertPos = nullptr; 6969 FunctionDecl *CorrespondingCallOpSpecialization = 6970 CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos); 6971 assert(CorrespondingCallOpSpecialization && 6972 "We must always have a function call operator specialization " 6973 "that corresponds to our static invoker specialization"); 6974 FD = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization); 6975 } else 6976 FD = LambdaCallOp; 6977 } else if (FD->isReplaceableGlobalAllocationFunction()) { 6978 if (FD->getDeclName().getCXXOverloadedOperator() == OO_New || 6979 FD->getDeclName().getCXXOverloadedOperator() == OO_Array_New) { 6980 LValue Ptr; 6981 if (!HandleOperatorNewCall(Info, E, Ptr)) 6982 return false; 6983 Ptr.moveInto(Result); 6984 return true; 6985 } else { 6986 return HandleOperatorDeleteCall(Info, E); 6987 } 6988 } 6989 } else 6990 return Error(E); 6991 6992 SmallVector<QualType, 4> CovariantAdjustmentPath; 6993 if (This) { 6994 auto *NamedMember = dyn_cast<CXXMethodDecl>(FD); 6995 if (NamedMember && NamedMember->isVirtual() && !HasQualifier) { 6996 // Perform virtual dispatch, if necessary. 6997 FD = HandleVirtualDispatch(Info, E, *This, NamedMember, 6998 CovariantAdjustmentPath); 6999 if (!FD) 7000 return false; 7001 } else { 7002 // Check that the 'this' pointer points to an object of the right type. 7003 // FIXME: If this is an assignment operator call, we may need to change 7004 // the active union member before we check this. 7005 if (!checkNonVirtualMemberCallThisPointer(Info, E, *This, NamedMember)) 7006 return false; 7007 } 7008 } 7009 7010 // Destructor calls are different enough that they have their own codepath. 7011 if (auto *DD = dyn_cast<CXXDestructorDecl>(FD)) { 7012 assert(This && "no 'this' pointer for destructor call"); 7013 return HandleDestruction(Info, E, *This, 7014 Info.Ctx.getRecordType(DD->getParent())); 7015 } 7016 7017 const FunctionDecl *Definition = nullptr; 7018 Stmt *Body = FD->getBody(Definition); 7019 7020 if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition, Body) || 7021 !HandleFunctionCall(E->getExprLoc(), Definition, This, Args, Body, Info, 7022 Result, ResultSlot)) 7023 return false; 7024 7025 if (!CovariantAdjustmentPath.empty() && 7026 !HandleCovariantReturnAdjustment(Info, E, Result, 7027 CovariantAdjustmentPath)) 7028 return false; 7029 7030 return true; 7031 } 7032 7033 bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) { 7034 return StmtVisitorTy::Visit(E->getInitializer()); 7035 } 7036 bool VisitInitListExpr(const InitListExpr *E) { 7037 if (E->getNumInits() == 0) 7038 return DerivedZeroInitialization(E); 7039 if (E->getNumInits() == 1) 7040 return StmtVisitorTy::Visit(E->getInit(0)); 7041 return Error(E); 7042 } 7043 bool VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { 7044 return DerivedZeroInitialization(E); 7045 } 7046 bool VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { 7047 return DerivedZeroInitialization(E); 7048 } 7049 bool VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) { 7050 return DerivedZeroInitialization(E); 7051 } 7052 7053 /// A member expression where the object is a prvalue is itself a prvalue. 7054 bool VisitMemberExpr(const MemberExpr *E) { 7055 assert(!Info.Ctx.getLangOpts().CPlusPlus11 && 7056 "missing temporary materialization conversion"); 7057 assert(!E->isArrow() && "missing call to bound member function?"); 7058 7059 APValue Val; 7060 if (!Evaluate(Val, Info, E->getBase())) 7061 return false; 7062 7063 QualType BaseTy = E->getBase()->getType(); 7064 7065 const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl()); 7066 if (!FD) return Error(E); 7067 assert(!FD->getType()->isReferenceType() && "prvalue reference?"); 7068 assert(BaseTy->castAs<RecordType>()->getDecl()->getCanonicalDecl() == 7069 FD->getParent()->getCanonicalDecl() && "record / field mismatch"); 7070 7071 // Note: there is no lvalue base here. But this case should only ever 7072 // happen in C or in C++98, where we cannot be evaluating a constexpr 7073 // constructor, which is the only case the base matters. 7074 CompleteObject Obj(APValue::LValueBase(), &Val, BaseTy); 7075 SubobjectDesignator Designator(BaseTy); 7076 Designator.addDeclUnchecked(FD); 7077 7078 APValue Result; 7079 return extractSubobject(Info, E, Obj, Designator, Result) && 7080 DerivedSuccess(Result, E); 7081 } 7082 7083 bool VisitExtVectorElementExpr(const ExtVectorElementExpr *E) { 7084 APValue Val; 7085 if (!Evaluate(Val, Info, E->getBase())) 7086 return false; 7087 7088 if (Val.isVector()) { 7089 SmallVector<uint32_t, 4> Indices; 7090 E->getEncodedElementAccess(Indices); 7091 if (Indices.size() == 1) { 7092 // Return scalar. 7093 return DerivedSuccess(Val.getVectorElt(Indices[0]), E); 7094 } else { 7095 // Construct new APValue vector. 7096 SmallVector<APValue, 4> Elts; 7097 for (unsigned I = 0; I < Indices.size(); ++I) { 7098 Elts.push_back(Val.getVectorElt(Indices[I])); 7099 } 7100 APValue VecResult(Elts.data(), Indices.size()); 7101 return DerivedSuccess(VecResult, E); 7102 } 7103 } 7104 7105 return false; 7106 } 7107 7108 bool VisitCastExpr(const CastExpr *E) { 7109 switch (E->getCastKind()) { 7110 default: 7111 break; 7112 7113 case CK_AtomicToNonAtomic: { 7114 APValue AtomicVal; 7115 // This does not need to be done in place even for class/array types: 7116 // atomic-to-non-atomic conversion implies copying the object 7117 // representation. 7118 if (!Evaluate(AtomicVal, Info, E->getSubExpr())) 7119 return false; 7120 return DerivedSuccess(AtomicVal, E); 7121 } 7122 7123 case CK_NoOp: 7124 case CK_UserDefinedConversion: 7125 return StmtVisitorTy::Visit(E->getSubExpr()); 7126 7127 case CK_LValueToRValue: { 7128 LValue LVal; 7129 if (!EvaluateLValue(E->getSubExpr(), LVal, Info)) 7130 return false; 7131 APValue RVal; 7132 // Note, we use the subexpression's type in order to retain cv-qualifiers. 7133 if (!handleLValueToRValueConversion(Info, E, E->getSubExpr()->getType(), 7134 LVal, RVal)) 7135 return false; 7136 return DerivedSuccess(RVal, E); 7137 } 7138 case CK_LValueToRValueBitCast: { 7139 APValue DestValue, SourceValue; 7140 if (!Evaluate(SourceValue, Info, E->getSubExpr())) 7141 return false; 7142 if (!handleLValueToRValueBitCast(Info, DestValue, SourceValue, E)) 7143 return false; 7144 return DerivedSuccess(DestValue, E); 7145 } 7146 7147 case CK_AddressSpaceConversion: { 7148 APValue Value; 7149 if (!Evaluate(Value, Info, E->getSubExpr())) 7150 return false; 7151 return DerivedSuccess(Value, E); 7152 } 7153 } 7154 7155 return Error(E); 7156 } 7157 7158 bool VisitUnaryPostInc(const UnaryOperator *UO) { 7159 return VisitUnaryPostIncDec(UO); 7160 } 7161 bool VisitUnaryPostDec(const UnaryOperator *UO) { 7162 return VisitUnaryPostIncDec(UO); 7163 } 7164 bool VisitUnaryPostIncDec(const UnaryOperator *UO) { 7165 if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure()) 7166 return Error(UO); 7167 7168 LValue LVal; 7169 if (!EvaluateLValue(UO->getSubExpr(), LVal, Info)) 7170 return false; 7171 APValue RVal; 7172 if (!handleIncDec(this->Info, UO, LVal, UO->getSubExpr()->getType(), 7173 UO->isIncrementOp(), &RVal)) 7174 return false; 7175 return DerivedSuccess(RVal, UO); 7176 } 7177 7178 bool VisitStmtExpr(const StmtExpr *E) { 7179 // We will have checked the full-expressions inside the statement expression 7180 // when they were completed, and don't need to check them again now. 7181 if (Info.checkingForUndefinedBehavior()) 7182 return Error(E); 7183 7184 const CompoundStmt *CS = E->getSubStmt(); 7185 if (CS->body_empty()) 7186 return true; 7187 7188 BlockScopeRAII Scope(Info); 7189 for (CompoundStmt::const_body_iterator BI = CS->body_begin(), 7190 BE = CS->body_end(); 7191 /**/; ++BI) { 7192 if (BI + 1 == BE) { 7193 const Expr *FinalExpr = dyn_cast<Expr>(*BI); 7194 if (!FinalExpr) { 7195 Info.FFDiag((*BI)->getBeginLoc(), 7196 diag::note_constexpr_stmt_expr_unsupported); 7197 return false; 7198 } 7199 return this->Visit(FinalExpr) && Scope.destroy(); 7200 } 7201 7202 APValue ReturnValue; 7203 StmtResult Result = { ReturnValue, nullptr }; 7204 EvalStmtResult ESR = EvaluateStmt(Result, Info, *BI); 7205 if (ESR != ESR_Succeeded) { 7206 // FIXME: If the statement-expression terminated due to 'return', 7207 // 'break', or 'continue', it would be nice to propagate that to 7208 // the outer statement evaluation rather than bailing out. 7209 if (ESR != ESR_Failed) 7210 Info.FFDiag((*BI)->getBeginLoc(), 7211 diag::note_constexpr_stmt_expr_unsupported); 7212 return false; 7213 } 7214 } 7215 7216 llvm_unreachable("Return from function from the loop above."); 7217 } 7218 7219 /// Visit a value which is evaluated, but whose value is ignored. 7220 void VisitIgnoredValue(const Expr *E) { 7221 EvaluateIgnoredValue(Info, E); 7222 } 7223 7224 /// Potentially visit a MemberExpr's base expression. 7225 void VisitIgnoredBaseExpression(const Expr *E) { 7226 // While MSVC doesn't evaluate the base expression, it does diagnose the 7227 // presence of side-effecting behavior. 7228 if (Info.getLangOpts().MSVCCompat && !E->HasSideEffects(Info.Ctx)) 7229 return; 7230 VisitIgnoredValue(E); 7231 } 7232 }; 7233 7234 } // namespace 7235 7236 //===----------------------------------------------------------------------===// 7237 // Common base class for lvalue and temporary evaluation. 7238 //===----------------------------------------------------------------------===// 7239 namespace { 7240 template<class Derived> 7241 class LValueExprEvaluatorBase 7242 : public ExprEvaluatorBase<Derived> { 7243 protected: 7244 LValue &Result; 7245 bool InvalidBaseOK; 7246 typedef LValueExprEvaluatorBase LValueExprEvaluatorBaseTy; 7247 typedef ExprEvaluatorBase<Derived> ExprEvaluatorBaseTy; 7248 7249 bool Success(APValue::LValueBase B) { 7250 Result.set(B); 7251 return true; 7252 } 7253 7254 bool evaluatePointer(const Expr *E, LValue &Result) { 7255 return EvaluatePointer(E, Result, this->Info, InvalidBaseOK); 7256 } 7257 7258 public: 7259 LValueExprEvaluatorBase(EvalInfo &Info, LValue &Result, bool InvalidBaseOK) 7260 : ExprEvaluatorBaseTy(Info), Result(Result), 7261 InvalidBaseOK(InvalidBaseOK) {} 7262 7263 bool Success(const APValue &V, const Expr *E) { 7264 Result.setFrom(this->Info.Ctx, V); 7265 return true; 7266 } 7267 7268 bool VisitMemberExpr(const MemberExpr *E) { 7269 // Handle non-static data members. 7270 QualType BaseTy; 7271 bool EvalOK; 7272 if (E->isArrow()) { 7273 EvalOK = evaluatePointer(E->getBase(), Result); 7274 BaseTy = E->getBase()->getType()->castAs<PointerType>()->getPointeeType(); 7275 } else if (E->getBase()->isRValue()) { 7276 assert(E->getBase()->getType()->isRecordType()); 7277 EvalOK = EvaluateTemporary(E->getBase(), Result, this->Info); 7278 BaseTy = E->getBase()->getType(); 7279 } else { 7280 EvalOK = this->Visit(E->getBase()); 7281 BaseTy = E->getBase()->getType(); 7282 } 7283 if (!EvalOK) { 7284 if (!InvalidBaseOK) 7285 return false; 7286 Result.setInvalid(E); 7287 return true; 7288 } 7289 7290 const ValueDecl *MD = E->getMemberDecl(); 7291 if (const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl())) { 7292 assert(BaseTy->castAs<RecordType>()->getDecl()->getCanonicalDecl() == 7293 FD->getParent()->getCanonicalDecl() && "record / field mismatch"); 7294 (void)BaseTy; 7295 if (!HandleLValueMember(this->Info, E, Result, FD)) 7296 return false; 7297 } else if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(MD)) { 7298 if (!HandleLValueIndirectMember(this->Info, E, Result, IFD)) 7299 return false; 7300 } else 7301 return this->Error(E); 7302 7303 if (MD->getType()->isReferenceType()) { 7304 APValue RefValue; 7305 if (!handleLValueToRValueConversion(this->Info, E, MD->getType(), Result, 7306 RefValue)) 7307 return false; 7308 return Success(RefValue, E); 7309 } 7310 return true; 7311 } 7312 7313 bool VisitBinaryOperator(const BinaryOperator *E) { 7314 switch (E->getOpcode()) { 7315 default: 7316 return ExprEvaluatorBaseTy::VisitBinaryOperator(E); 7317 7318 case BO_PtrMemD: 7319 case BO_PtrMemI: 7320 return HandleMemberPointerAccess(this->Info, E, Result); 7321 } 7322 } 7323 7324 bool VisitCastExpr(const CastExpr *E) { 7325 switch (E->getCastKind()) { 7326 default: 7327 return ExprEvaluatorBaseTy::VisitCastExpr(E); 7328 7329 case CK_DerivedToBase: 7330 case CK_UncheckedDerivedToBase: 7331 if (!this->Visit(E->getSubExpr())) 7332 return false; 7333 7334 // Now figure out the necessary offset to add to the base LV to get from 7335 // the derived class to the base class. 7336 return HandleLValueBasePath(this->Info, E, E->getSubExpr()->getType(), 7337 Result); 7338 } 7339 } 7340 }; 7341 } 7342 7343 //===----------------------------------------------------------------------===// 7344 // LValue Evaluation 7345 // 7346 // This is used for evaluating lvalues (in C and C++), xvalues (in C++11), 7347 // function designators (in C), decl references to void objects (in C), and 7348 // temporaries (if building with -Wno-address-of-temporary). 7349 // 7350 // LValue evaluation produces values comprising a base expression of one of the 7351 // following types: 7352 // - Declarations 7353 // * VarDecl 7354 // * FunctionDecl 7355 // - Literals 7356 // * CompoundLiteralExpr in C (and in global scope in C++) 7357 // * StringLiteral 7358 // * PredefinedExpr 7359 // * ObjCStringLiteralExpr 7360 // * ObjCEncodeExpr 7361 // * AddrLabelExpr 7362 // * BlockExpr 7363 // * CallExpr for a MakeStringConstant builtin 7364 // - typeid(T) expressions, as TypeInfoLValues 7365 // - Locals and temporaries 7366 // * MaterializeTemporaryExpr 7367 // * Any Expr, with a CallIndex indicating the function in which the temporary 7368 // was evaluated, for cases where the MaterializeTemporaryExpr is missing 7369 // from the AST (FIXME). 7370 // * A MaterializeTemporaryExpr that has static storage duration, with no 7371 // CallIndex, for a lifetime-extended temporary. 7372 // plus an offset in bytes. 7373 //===----------------------------------------------------------------------===// 7374 namespace { 7375 class LValueExprEvaluator 7376 : public LValueExprEvaluatorBase<LValueExprEvaluator> { 7377 public: 7378 LValueExprEvaluator(EvalInfo &Info, LValue &Result, bool InvalidBaseOK) : 7379 LValueExprEvaluatorBaseTy(Info, Result, InvalidBaseOK) {} 7380 7381 bool VisitVarDecl(const Expr *E, const VarDecl *VD); 7382 bool VisitUnaryPreIncDec(const UnaryOperator *UO); 7383 7384 bool VisitDeclRefExpr(const DeclRefExpr *E); 7385 bool VisitPredefinedExpr(const PredefinedExpr *E) { return Success(E); } 7386 bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E); 7387 bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E); 7388 bool VisitMemberExpr(const MemberExpr *E); 7389 bool VisitStringLiteral(const StringLiteral *E) { return Success(E); } 7390 bool VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { return Success(E); } 7391 bool VisitCXXTypeidExpr(const CXXTypeidExpr *E); 7392 bool VisitCXXUuidofExpr(const CXXUuidofExpr *E); 7393 bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E); 7394 bool VisitUnaryDeref(const UnaryOperator *E); 7395 bool VisitUnaryReal(const UnaryOperator *E); 7396 bool VisitUnaryImag(const UnaryOperator *E); 7397 bool VisitUnaryPreInc(const UnaryOperator *UO) { 7398 return VisitUnaryPreIncDec(UO); 7399 } 7400 bool VisitUnaryPreDec(const UnaryOperator *UO) { 7401 return VisitUnaryPreIncDec(UO); 7402 } 7403 bool VisitBinAssign(const BinaryOperator *BO); 7404 bool VisitCompoundAssignOperator(const CompoundAssignOperator *CAO); 7405 7406 bool VisitCastExpr(const CastExpr *E) { 7407 switch (E->getCastKind()) { 7408 default: 7409 return LValueExprEvaluatorBaseTy::VisitCastExpr(E); 7410 7411 case CK_LValueBitCast: 7412 this->CCEDiag(E, diag::note_constexpr_invalid_cast) << 2; 7413 if (!Visit(E->getSubExpr())) 7414 return false; 7415 Result.Designator.setInvalid(); 7416 return true; 7417 7418 case CK_BaseToDerived: 7419 if (!Visit(E->getSubExpr())) 7420 return false; 7421 return HandleBaseToDerivedCast(Info, E, Result); 7422 7423 case CK_Dynamic: 7424 if (!Visit(E->getSubExpr())) 7425 return false; 7426 return HandleDynamicCast(Info, cast<ExplicitCastExpr>(E), Result); 7427 } 7428 } 7429 }; 7430 } // end anonymous namespace 7431 7432 /// Evaluate an expression as an lvalue. This can be legitimately called on 7433 /// expressions which are not glvalues, in three cases: 7434 /// * function designators in C, and 7435 /// * "extern void" objects 7436 /// * @selector() expressions in Objective-C 7437 static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info, 7438 bool InvalidBaseOK) { 7439 assert(E->isGLValue() || E->getType()->isFunctionType() || 7440 E->getType()->isVoidType() || isa<ObjCSelectorExpr>(E)); 7441 return LValueExprEvaluator(Info, Result, InvalidBaseOK).Visit(E); 7442 } 7443 7444 bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) { 7445 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl())) 7446 return Success(FD); 7447 if (const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl())) 7448 return VisitVarDecl(E, VD); 7449 if (const BindingDecl *BD = dyn_cast<BindingDecl>(E->getDecl())) 7450 return Visit(BD->getBinding()); 7451 return Error(E); 7452 } 7453 7454 7455 bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) { 7456 7457 // If we are within a lambda's call operator, check whether the 'VD' referred 7458 // to within 'E' actually represents a lambda-capture that maps to a 7459 // data-member/field within the closure object, and if so, evaluate to the 7460 // field or what the field refers to. 7461 if (Info.CurrentCall && isLambdaCallOperator(Info.CurrentCall->Callee) && 7462 isa<DeclRefExpr>(E) && 7463 cast<DeclRefExpr>(E)->refersToEnclosingVariableOrCapture()) { 7464 // We don't always have a complete capture-map when checking or inferring if 7465 // the function call operator meets the requirements of a constexpr function 7466 // - but we don't need to evaluate the captures to determine constexprness 7467 // (dcl.constexpr C++17). 7468 if (Info.checkingPotentialConstantExpression()) 7469 return false; 7470 7471 if (auto *FD = Info.CurrentCall->LambdaCaptureFields.lookup(VD)) { 7472 // Start with 'Result' referring to the complete closure object... 7473 Result = *Info.CurrentCall->This; 7474 // ... then update it to refer to the field of the closure object 7475 // that represents the capture. 7476 if (!HandleLValueMember(Info, E, Result, FD)) 7477 return false; 7478 // And if the field is of reference type, update 'Result' to refer to what 7479 // the field refers to. 7480 if (FD->getType()->isReferenceType()) { 7481 APValue RVal; 7482 if (!handleLValueToRValueConversion(Info, E, FD->getType(), Result, 7483 RVal)) 7484 return false; 7485 Result.setFrom(Info.Ctx, RVal); 7486 } 7487 return true; 7488 } 7489 } 7490 CallStackFrame *Frame = nullptr; 7491 if (VD->hasLocalStorage() && Info.CurrentCall->Index > 1) { 7492 // Only if a local variable was declared in the function currently being 7493 // evaluated, do we expect to be able to find its value in the current 7494 // frame. (Otherwise it was likely declared in an enclosing context and 7495 // could either have a valid evaluatable value (for e.g. a constexpr 7496 // variable) or be ill-formed (and trigger an appropriate evaluation 7497 // diagnostic)). 7498 if (Info.CurrentCall->Callee && 7499 Info.CurrentCall->Callee->Equals(VD->getDeclContext())) { 7500 Frame = Info.CurrentCall; 7501 } 7502 } 7503 7504 if (!VD->getType()->isReferenceType()) { 7505 if (Frame) { 7506 Result.set({VD, Frame->Index, 7507 Info.CurrentCall->getCurrentTemporaryVersion(VD)}); 7508 return true; 7509 } 7510 return Success(VD); 7511 } 7512 7513 APValue *V; 7514 if (!evaluateVarDeclInit(Info, E, VD, Frame, V, nullptr)) 7515 return false; 7516 if (!V->hasValue()) { 7517 // FIXME: Is it possible for V to be indeterminate here? If so, we should 7518 // adjust the diagnostic to say that. 7519 if (!Info.checkingPotentialConstantExpression()) 7520 Info.FFDiag(E, diag::note_constexpr_use_uninit_reference); 7521 return false; 7522 } 7523 return Success(*V, E); 7524 } 7525 7526 bool LValueExprEvaluator::VisitMaterializeTemporaryExpr( 7527 const MaterializeTemporaryExpr *E) { 7528 // Walk through the expression to find the materialized temporary itself. 7529 SmallVector<const Expr *, 2> CommaLHSs; 7530 SmallVector<SubobjectAdjustment, 2> Adjustments; 7531 const Expr *Inner = 7532 E->getSubExpr()->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments); 7533 7534 // If we passed any comma operators, evaluate their LHSs. 7535 for (unsigned I = 0, N = CommaLHSs.size(); I != N; ++I) 7536 if (!EvaluateIgnoredValue(Info, CommaLHSs[I])) 7537 return false; 7538 7539 // A materialized temporary with static storage duration can appear within the 7540 // result of a constant expression evaluation, so we need to preserve its 7541 // value for use outside this evaluation. 7542 APValue *Value; 7543 if (E->getStorageDuration() == SD_Static) { 7544 Value = E->getOrCreateValue(true); 7545 *Value = APValue(); 7546 Result.set(E); 7547 } else { 7548 Value = &Info.CurrentCall->createTemporary( 7549 E, E->getType(), E->getStorageDuration() == SD_Automatic, Result); 7550 } 7551 7552 QualType Type = Inner->getType(); 7553 7554 // Materialize the temporary itself. 7555 if (!EvaluateInPlace(*Value, Info, Result, Inner)) { 7556 *Value = APValue(); 7557 return false; 7558 } 7559 7560 // Adjust our lvalue to refer to the desired subobject. 7561 for (unsigned I = Adjustments.size(); I != 0; /**/) { 7562 --I; 7563 switch (Adjustments[I].Kind) { 7564 case SubobjectAdjustment::DerivedToBaseAdjustment: 7565 if (!HandleLValueBasePath(Info, Adjustments[I].DerivedToBase.BasePath, 7566 Type, Result)) 7567 return false; 7568 Type = Adjustments[I].DerivedToBase.BasePath->getType(); 7569 break; 7570 7571 case SubobjectAdjustment::FieldAdjustment: 7572 if (!HandleLValueMember(Info, E, Result, Adjustments[I].Field)) 7573 return false; 7574 Type = Adjustments[I].Field->getType(); 7575 break; 7576 7577 case SubobjectAdjustment::MemberPointerAdjustment: 7578 if (!HandleMemberPointerAccess(this->Info, Type, Result, 7579 Adjustments[I].Ptr.RHS)) 7580 return false; 7581 Type = Adjustments[I].Ptr.MPT->getPointeeType(); 7582 break; 7583 } 7584 } 7585 7586 return true; 7587 } 7588 7589 bool 7590 LValueExprEvaluator::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) { 7591 assert((!Info.getLangOpts().CPlusPlus || E->isFileScope()) && 7592 "lvalue compound literal in c++?"); 7593 // Defer visiting the literal until the lvalue-to-rvalue conversion. We can 7594 // only see this when folding in C, so there's no standard to follow here. 7595 return Success(E); 7596 } 7597 7598 bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) { 7599 TypeInfoLValue TypeInfo; 7600 7601 if (!E->isPotentiallyEvaluated()) { 7602 if (E->isTypeOperand()) 7603 TypeInfo = TypeInfoLValue(E->getTypeOperand(Info.Ctx).getTypePtr()); 7604 else 7605 TypeInfo = TypeInfoLValue(E->getExprOperand()->getType().getTypePtr()); 7606 } else { 7607 if (!Info.Ctx.getLangOpts().CPlusPlus2a) { 7608 Info.CCEDiag(E, diag::note_constexpr_typeid_polymorphic) 7609 << E->getExprOperand()->getType() 7610 << E->getExprOperand()->getSourceRange(); 7611 } 7612 7613 if (!Visit(E->getExprOperand())) 7614 return false; 7615 7616 Optional<DynamicType> DynType = 7617 ComputeDynamicType(Info, E, Result, AK_TypeId); 7618 if (!DynType) 7619 return false; 7620 7621 TypeInfo = 7622 TypeInfoLValue(Info.Ctx.getRecordType(DynType->Type).getTypePtr()); 7623 } 7624 7625 return Success(APValue::LValueBase::getTypeInfo(TypeInfo, E->getType())); 7626 } 7627 7628 bool LValueExprEvaluator::VisitCXXUuidofExpr(const CXXUuidofExpr *E) { 7629 return Success(E); 7630 } 7631 7632 bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) { 7633 // Handle static data members. 7634 if (const VarDecl *VD = dyn_cast<VarDecl>(E->getMemberDecl())) { 7635 VisitIgnoredBaseExpression(E->getBase()); 7636 return VisitVarDecl(E, VD); 7637 } 7638 7639 // Handle static member functions. 7640 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(E->getMemberDecl())) { 7641 if (MD->isStatic()) { 7642 VisitIgnoredBaseExpression(E->getBase()); 7643 return Success(MD); 7644 } 7645 } 7646 7647 // Handle non-static data members. 7648 return LValueExprEvaluatorBaseTy::VisitMemberExpr(E); 7649 } 7650 7651 bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) { 7652 // FIXME: Deal with vectors as array subscript bases. 7653 if (E->getBase()->getType()->isVectorType()) 7654 return Error(E); 7655 7656 bool Success = true; 7657 if (!evaluatePointer(E->getBase(), Result)) { 7658 if (!Info.noteFailure()) 7659 return false; 7660 Success = false; 7661 } 7662 7663 APSInt Index; 7664 if (!EvaluateInteger(E->getIdx(), Index, Info)) 7665 return false; 7666 7667 return Success && 7668 HandleLValueArrayAdjustment(Info, E, Result, E->getType(), Index); 7669 } 7670 7671 bool LValueExprEvaluator::VisitUnaryDeref(const UnaryOperator *E) { 7672 return evaluatePointer(E->getSubExpr(), Result); 7673 } 7674 7675 bool LValueExprEvaluator::VisitUnaryReal(const UnaryOperator *E) { 7676 if (!Visit(E->getSubExpr())) 7677 return false; 7678 // __real is a no-op on scalar lvalues. 7679 if (E->getSubExpr()->getType()->isAnyComplexType()) 7680 HandleLValueComplexElement(Info, E, Result, E->getType(), false); 7681 return true; 7682 } 7683 7684 bool LValueExprEvaluator::VisitUnaryImag(const UnaryOperator *E) { 7685 assert(E->getSubExpr()->getType()->isAnyComplexType() && 7686 "lvalue __imag__ on scalar?"); 7687 if (!Visit(E->getSubExpr())) 7688 return false; 7689 HandleLValueComplexElement(Info, E, Result, E->getType(), true); 7690 return true; 7691 } 7692 7693 bool LValueExprEvaluator::VisitUnaryPreIncDec(const UnaryOperator *UO) { 7694 if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure()) 7695 return Error(UO); 7696 7697 if (!this->Visit(UO->getSubExpr())) 7698 return false; 7699 7700 return handleIncDec( 7701 this->Info, UO, Result, UO->getSubExpr()->getType(), 7702 UO->isIncrementOp(), nullptr); 7703 } 7704 7705 bool LValueExprEvaluator::VisitCompoundAssignOperator( 7706 const CompoundAssignOperator *CAO) { 7707 if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure()) 7708 return Error(CAO); 7709 7710 APValue RHS; 7711 7712 // The overall lvalue result is the result of evaluating the LHS. 7713 if (!this->Visit(CAO->getLHS())) { 7714 if (Info.noteFailure()) 7715 Evaluate(RHS, this->Info, CAO->getRHS()); 7716 return false; 7717 } 7718 7719 if (!Evaluate(RHS, this->Info, CAO->getRHS())) 7720 return false; 7721 7722 return handleCompoundAssignment( 7723 this->Info, CAO, 7724 Result, CAO->getLHS()->getType(), CAO->getComputationLHSType(), 7725 CAO->getOpForCompoundAssignment(CAO->getOpcode()), RHS); 7726 } 7727 7728 bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) { 7729 if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure()) 7730 return Error(E); 7731 7732 APValue NewVal; 7733 7734 if (!this->Visit(E->getLHS())) { 7735 if (Info.noteFailure()) 7736 Evaluate(NewVal, this->Info, E->getRHS()); 7737 return false; 7738 } 7739 7740 if (!Evaluate(NewVal, this->Info, E->getRHS())) 7741 return false; 7742 7743 if (Info.getLangOpts().CPlusPlus2a && 7744 !HandleUnionActiveMemberChange(Info, E->getLHS(), Result)) 7745 return false; 7746 7747 return handleAssignment(this->Info, E, Result, E->getLHS()->getType(), 7748 NewVal); 7749 } 7750 7751 //===----------------------------------------------------------------------===// 7752 // Pointer Evaluation 7753 //===----------------------------------------------------------------------===// 7754 7755 /// Attempts to compute the number of bytes available at the pointer 7756 /// returned by a function with the alloc_size attribute. Returns true if we 7757 /// were successful. Places an unsigned number into `Result`. 7758 /// 7759 /// This expects the given CallExpr to be a call to a function with an 7760 /// alloc_size attribute. 7761 static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx, 7762 const CallExpr *Call, 7763 llvm::APInt &Result) { 7764 const AllocSizeAttr *AllocSize = getAllocSizeAttr(Call); 7765 7766 assert(AllocSize && AllocSize->getElemSizeParam().isValid()); 7767 unsigned SizeArgNo = AllocSize->getElemSizeParam().getASTIndex(); 7768 unsigned BitsInSizeT = Ctx.getTypeSize(Ctx.getSizeType()); 7769 if (Call->getNumArgs() <= SizeArgNo) 7770 return false; 7771 7772 auto EvaluateAsSizeT = [&](const Expr *E, APSInt &Into) { 7773 Expr::EvalResult ExprResult; 7774 if (!E->EvaluateAsInt(ExprResult, Ctx, Expr::SE_AllowSideEffects)) 7775 return false; 7776 Into = ExprResult.Val.getInt(); 7777 if (Into.isNegative() || !Into.isIntN(BitsInSizeT)) 7778 return false; 7779 Into = Into.zextOrSelf(BitsInSizeT); 7780 return true; 7781 }; 7782 7783 APSInt SizeOfElem; 7784 if (!EvaluateAsSizeT(Call->getArg(SizeArgNo), SizeOfElem)) 7785 return false; 7786 7787 if (!AllocSize->getNumElemsParam().isValid()) { 7788 Result = std::move(SizeOfElem); 7789 return true; 7790 } 7791 7792 APSInt NumberOfElems; 7793 unsigned NumArgNo = AllocSize->getNumElemsParam().getASTIndex(); 7794 if (!EvaluateAsSizeT(Call->getArg(NumArgNo), NumberOfElems)) 7795 return false; 7796 7797 bool Overflow; 7798 llvm::APInt BytesAvailable = SizeOfElem.umul_ov(NumberOfElems, Overflow); 7799 if (Overflow) 7800 return false; 7801 7802 Result = std::move(BytesAvailable); 7803 return true; 7804 } 7805 7806 /// Convenience function. LVal's base must be a call to an alloc_size 7807 /// function. 7808 static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx, 7809 const LValue &LVal, 7810 llvm::APInt &Result) { 7811 assert(isBaseAnAllocSizeCall(LVal.getLValueBase()) && 7812 "Can't get the size of a non alloc_size function"); 7813 const auto *Base = LVal.getLValueBase().get<const Expr *>(); 7814 const CallExpr *CE = tryUnwrapAllocSizeCall(Base); 7815 return getBytesReturnedByAllocSizeCall(Ctx, CE, Result); 7816 } 7817 7818 /// Attempts to evaluate the given LValueBase as the result of a call to 7819 /// a function with the alloc_size attribute. If it was possible to do so, this 7820 /// function will return true, make Result's Base point to said function call, 7821 /// and mark Result's Base as invalid. 7822 static bool evaluateLValueAsAllocSize(EvalInfo &Info, APValue::LValueBase Base, 7823 LValue &Result) { 7824 if (Base.isNull()) 7825 return false; 7826 7827 // Because we do no form of static analysis, we only support const variables. 7828 // 7829 // Additionally, we can't support parameters, nor can we support static 7830 // variables (in the latter case, use-before-assign isn't UB; in the former, 7831 // we have no clue what they'll be assigned to). 7832 const auto *VD = 7833 dyn_cast_or_null<VarDecl>(Base.dyn_cast<const ValueDecl *>()); 7834 if (!VD || !VD->isLocalVarDecl() || !VD->getType().isConstQualified()) 7835 return false; 7836 7837 const Expr *Init = VD->getAnyInitializer(); 7838 if (!Init) 7839 return false; 7840 7841 const Expr *E = Init->IgnoreParens(); 7842 if (!tryUnwrapAllocSizeCall(E)) 7843 return false; 7844 7845 // Store E instead of E unwrapped so that the type of the LValue's base is 7846 // what the user wanted. 7847 Result.setInvalid(E); 7848 7849 QualType Pointee = E->getType()->castAs<PointerType>()->getPointeeType(); 7850 Result.addUnsizedArray(Info, E, Pointee); 7851 return true; 7852 } 7853 7854 namespace { 7855 class PointerExprEvaluator 7856 : public ExprEvaluatorBase<PointerExprEvaluator> { 7857 LValue &Result; 7858 bool InvalidBaseOK; 7859 7860 bool Success(const Expr *E) { 7861 Result.set(E); 7862 return true; 7863 } 7864 7865 bool evaluateLValue(const Expr *E, LValue &Result) { 7866 return EvaluateLValue(E, Result, Info, InvalidBaseOK); 7867 } 7868 7869 bool evaluatePointer(const Expr *E, LValue &Result) { 7870 return EvaluatePointer(E, Result, Info, InvalidBaseOK); 7871 } 7872 7873 bool visitNonBuiltinCallExpr(const CallExpr *E); 7874 public: 7875 7876 PointerExprEvaluator(EvalInfo &info, LValue &Result, bool InvalidBaseOK) 7877 : ExprEvaluatorBaseTy(info), Result(Result), 7878 InvalidBaseOK(InvalidBaseOK) {} 7879 7880 bool Success(const APValue &V, const Expr *E) { 7881 Result.setFrom(Info.Ctx, V); 7882 return true; 7883 } 7884 bool ZeroInitialization(const Expr *E) { 7885 Result.setNull(Info.Ctx, E->getType()); 7886 return true; 7887 } 7888 7889 bool VisitBinaryOperator(const BinaryOperator *E); 7890 bool VisitCastExpr(const CastExpr* E); 7891 bool VisitUnaryAddrOf(const UnaryOperator *E); 7892 bool VisitObjCStringLiteral(const ObjCStringLiteral *E) 7893 { return Success(E); } 7894 bool VisitObjCBoxedExpr(const ObjCBoxedExpr *E) { 7895 if (E->isExpressibleAsConstantInitializer()) 7896 return Success(E); 7897 if (Info.noteFailure()) 7898 EvaluateIgnoredValue(Info, E->getSubExpr()); 7899 return Error(E); 7900 } 7901 bool VisitAddrLabelExpr(const AddrLabelExpr *E) 7902 { return Success(E); } 7903 bool VisitCallExpr(const CallExpr *E); 7904 bool VisitBuiltinCallExpr(const CallExpr *E, unsigned BuiltinOp); 7905 bool VisitBlockExpr(const BlockExpr *E) { 7906 if (!E->getBlockDecl()->hasCaptures()) 7907 return Success(E); 7908 return Error(E); 7909 } 7910 bool VisitCXXThisExpr(const CXXThisExpr *E) { 7911 // Can't look at 'this' when checking a potential constant expression. 7912 if (Info.checkingPotentialConstantExpression()) 7913 return false; 7914 if (!Info.CurrentCall->This) { 7915 if (Info.getLangOpts().CPlusPlus11) 7916 Info.FFDiag(E, diag::note_constexpr_this) << E->isImplicit(); 7917 else 7918 Info.FFDiag(E); 7919 return false; 7920 } 7921 Result = *Info.CurrentCall->This; 7922 // If we are inside a lambda's call operator, the 'this' expression refers 7923 // to the enclosing '*this' object (either by value or reference) which is 7924 // either copied into the closure object's field that represents the '*this' 7925 // or refers to '*this'. 7926 if (isLambdaCallOperator(Info.CurrentCall->Callee)) { 7927 // Ensure we actually have captured 'this'. (an error will have 7928 // been previously reported if not). 7929 if (!Info.CurrentCall->LambdaThisCaptureField) 7930 return false; 7931 7932 // Update 'Result' to refer to the data member/field of the closure object 7933 // that represents the '*this' capture. 7934 if (!HandleLValueMember(Info, E, Result, 7935 Info.CurrentCall->LambdaThisCaptureField)) 7936 return false; 7937 // If we captured '*this' by reference, replace the field with its referent. 7938 if (Info.CurrentCall->LambdaThisCaptureField->getType() 7939 ->isPointerType()) { 7940 APValue RVal; 7941 if (!handleLValueToRValueConversion(Info, E, E->getType(), Result, 7942 RVal)) 7943 return false; 7944 7945 Result.setFrom(Info.Ctx, RVal); 7946 } 7947 } 7948 return true; 7949 } 7950 7951 bool VisitCXXNewExpr(const CXXNewExpr *E); 7952 7953 bool VisitSourceLocExpr(const SourceLocExpr *E) { 7954 assert(E->isStringType() && "SourceLocExpr isn't a pointer type?"); 7955 APValue LValResult = E->EvaluateInContext( 7956 Info.Ctx, Info.CurrentCall->CurSourceLocExprScope.getDefaultExpr()); 7957 Result.setFrom(Info.Ctx, LValResult); 7958 return true; 7959 } 7960 7961 // FIXME: Missing: @protocol, @selector 7962 }; 7963 } // end anonymous namespace 7964 7965 static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info, 7966 bool InvalidBaseOK) { 7967 assert(E->isRValue() && E->getType()->hasPointerRepresentation()); 7968 return PointerExprEvaluator(Info, Result, InvalidBaseOK).Visit(E); 7969 } 7970 7971 bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { 7972 if (E->getOpcode() != BO_Add && 7973 E->getOpcode() != BO_Sub) 7974 return ExprEvaluatorBaseTy::VisitBinaryOperator(E); 7975 7976 const Expr *PExp = E->getLHS(); 7977 const Expr *IExp = E->getRHS(); 7978 if (IExp->getType()->isPointerType()) 7979 std::swap(PExp, IExp); 7980 7981 bool EvalPtrOK = evaluatePointer(PExp, Result); 7982 if (!EvalPtrOK && !Info.noteFailure()) 7983 return false; 7984 7985 llvm::APSInt Offset; 7986 if (!EvaluateInteger(IExp, Offset, Info) || !EvalPtrOK) 7987 return false; 7988 7989 if (E->getOpcode() == BO_Sub) 7990 negateAsSigned(Offset); 7991 7992 QualType Pointee = PExp->getType()->castAs<PointerType>()->getPointeeType(); 7993 return HandleLValueArrayAdjustment(Info, E, Result, Pointee, Offset); 7994 } 7995 7996 bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) { 7997 return evaluateLValue(E->getSubExpr(), Result); 7998 } 7999 8000 bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) { 8001 const Expr *SubExpr = E->getSubExpr(); 8002 8003 switch (E->getCastKind()) { 8004 default: 8005 break; 8006 case CK_BitCast: 8007 case CK_CPointerToObjCPointerCast: 8008 case CK_BlockPointerToObjCPointerCast: 8009 case CK_AnyPointerToBlockPointerCast: 8010 case CK_AddressSpaceConversion: 8011 if (!Visit(SubExpr)) 8012 return false; 8013 // Bitcasts to cv void* are static_casts, not reinterpret_casts, so are 8014 // permitted in constant expressions in C++11. Bitcasts from cv void* are 8015 // also static_casts, but we disallow them as a resolution to DR1312. 8016 if (!E->getType()->isVoidPointerType()) { 8017 if (!Result.InvalidBase && !Result.Designator.Invalid && 8018 !Result.IsNullPtr && 8019 Info.Ctx.hasSameUnqualifiedType(Result.Designator.getType(Info.Ctx), 8020 E->getType()->getPointeeType()) && 8021 Info.getStdAllocatorCaller("allocate")) { 8022 // Inside a call to std::allocator::allocate and friends, we permit 8023 // casting from void* back to cv1 T* for a pointer that points to a 8024 // cv2 T. 8025 } else { 8026 Result.Designator.setInvalid(); 8027 if (SubExpr->getType()->isVoidPointerType()) 8028 CCEDiag(E, diag::note_constexpr_invalid_cast) 8029 << 3 << SubExpr->getType(); 8030 else 8031 CCEDiag(E, diag::note_constexpr_invalid_cast) << 2; 8032 } 8033 } 8034 if (E->getCastKind() == CK_AddressSpaceConversion && Result.IsNullPtr) 8035 ZeroInitialization(E); 8036 return true; 8037 8038 case CK_DerivedToBase: 8039 case CK_UncheckedDerivedToBase: 8040 if (!evaluatePointer(E->getSubExpr(), Result)) 8041 return false; 8042 if (!Result.Base && Result.Offset.isZero()) 8043 return true; 8044 8045 // Now figure out the necessary offset to add to the base LV to get from 8046 // the derived class to the base class. 8047 return HandleLValueBasePath(Info, E, E->getSubExpr()->getType()-> 8048 castAs<PointerType>()->getPointeeType(), 8049 Result); 8050 8051 case CK_BaseToDerived: 8052 if (!Visit(E->getSubExpr())) 8053 return false; 8054 if (!Result.Base && Result.Offset.isZero()) 8055 return true; 8056 return HandleBaseToDerivedCast(Info, E, Result); 8057 8058 case CK_Dynamic: 8059 if (!Visit(E->getSubExpr())) 8060 return false; 8061 return HandleDynamicCast(Info, cast<ExplicitCastExpr>(E), Result); 8062 8063 case CK_NullToPointer: 8064 VisitIgnoredValue(E->getSubExpr()); 8065 return ZeroInitialization(E); 8066 8067 case CK_IntegralToPointer: { 8068 CCEDiag(E, diag::note_constexpr_invalid_cast) << 2; 8069 8070 APValue Value; 8071 if (!EvaluateIntegerOrLValue(SubExpr, Value, Info)) 8072 break; 8073 8074 if (Value.isInt()) { 8075 unsigned Size = Info.Ctx.getTypeSize(E->getType()); 8076 uint64_t N = Value.getInt().extOrTrunc(Size).getZExtValue(); 8077 Result.Base = (Expr*)nullptr; 8078 Result.InvalidBase = false; 8079 Result.Offset = CharUnits::fromQuantity(N); 8080 Result.Designator.setInvalid(); 8081 Result.IsNullPtr = false; 8082 return true; 8083 } else { 8084 // Cast is of an lvalue, no need to change value. 8085 Result.setFrom(Info.Ctx, Value); 8086 return true; 8087 } 8088 } 8089 8090 case CK_ArrayToPointerDecay: { 8091 if (SubExpr->isGLValue()) { 8092 if (!evaluateLValue(SubExpr, Result)) 8093 return false; 8094 } else { 8095 APValue &Value = Info.CurrentCall->createTemporary( 8096 SubExpr, SubExpr->getType(), false, Result); 8097 if (!EvaluateInPlace(Value, Info, Result, SubExpr)) 8098 return false; 8099 } 8100 // The result is a pointer to the first element of the array. 8101 auto *AT = Info.Ctx.getAsArrayType(SubExpr->getType()); 8102 if (auto *CAT = dyn_cast<ConstantArrayType>(AT)) 8103 Result.addArray(Info, E, CAT); 8104 else 8105 Result.addUnsizedArray(Info, E, AT->getElementType()); 8106 return true; 8107 } 8108 8109 case CK_FunctionToPointerDecay: 8110 return evaluateLValue(SubExpr, Result); 8111 8112 case CK_LValueToRValue: { 8113 LValue LVal; 8114 if (!evaluateLValue(E->getSubExpr(), LVal)) 8115 return false; 8116 8117 APValue RVal; 8118 // Note, we use the subexpression's type in order to retain cv-qualifiers. 8119 if (!handleLValueToRValueConversion(Info, E, E->getSubExpr()->getType(), 8120 LVal, RVal)) 8121 return InvalidBaseOK && 8122 evaluateLValueAsAllocSize(Info, LVal.Base, Result); 8123 return Success(RVal, E); 8124 } 8125 } 8126 8127 return ExprEvaluatorBaseTy::VisitCastExpr(E); 8128 } 8129 8130 static CharUnits GetAlignOfType(EvalInfo &Info, QualType T, 8131 UnaryExprOrTypeTrait ExprKind) { 8132 // C++ [expr.alignof]p3: 8133 // When alignof is applied to a reference type, the result is the 8134 // alignment of the referenced type. 8135 if (const ReferenceType *Ref = T->getAs<ReferenceType>()) 8136 T = Ref->getPointeeType(); 8137 8138 if (T.getQualifiers().hasUnaligned()) 8139 return CharUnits::One(); 8140 8141 const bool AlignOfReturnsPreferred = 8142 Info.Ctx.getLangOpts().getClangABICompat() <= LangOptions::ClangABI::Ver7; 8143 8144 // __alignof is defined to return the preferred alignment. 8145 // Before 8, clang returned the preferred alignment for alignof and _Alignof 8146 // as well. 8147 if (ExprKind == UETT_PreferredAlignOf || AlignOfReturnsPreferred) 8148 return Info.Ctx.toCharUnitsFromBits( 8149 Info.Ctx.getPreferredTypeAlign(T.getTypePtr())); 8150 // alignof and _Alignof are defined to return the ABI alignment. 8151 else if (ExprKind == UETT_AlignOf) 8152 return Info.Ctx.getTypeAlignInChars(T.getTypePtr()); 8153 else 8154 llvm_unreachable("GetAlignOfType on a non-alignment ExprKind"); 8155 } 8156 8157 static CharUnits GetAlignOfExpr(EvalInfo &Info, const Expr *E, 8158 UnaryExprOrTypeTrait ExprKind) { 8159 E = E->IgnoreParens(); 8160 8161 // The kinds of expressions that we have special-case logic here for 8162 // should be kept up to date with the special checks for those 8163 // expressions in Sema. 8164 8165 // alignof decl is always accepted, even if it doesn't make sense: we default 8166 // to 1 in those cases. 8167 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 8168 return Info.Ctx.getDeclAlign(DRE->getDecl(), 8169 /*RefAsPointee*/true); 8170 8171 if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) 8172 return Info.Ctx.getDeclAlign(ME->getMemberDecl(), 8173 /*RefAsPointee*/true); 8174 8175 return GetAlignOfType(Info, E->getType(), ExprKind); 8176 } 8177 8178 static CharUnits getBaseAlignment(EvalInfo &Info, const LValue &Value) { 8179 if (const auto *VD = Value.Base.dyn_cast<const ValueDecl *>()) 8180 return Info.Ctx.getDeclAlign(VD); 8181 if (const auto *E = Value.Base.dyn_cast<const Expr *>()) 8182 return GetAlignOfExpr(Info, E, UETT_AlignOf); 8183 return GetAlignOfType(Info, Value.Base.getTypeInfoType(), UETT_AlignOf); 8184 } 8185 8186 /// Evaluate the value of the alignment argument to __builtin_align_{up,down}, 8187 /// __builtin_is_aligned and __builtin_assume_aligned. 8188 static bool getAlignmentArgument(const Expr *E, QualType ForType, 8189 EvalInfo &Info, APSInt &Alignment) { 8190 if (!EvaluateInteger(E, Alignment, Info)) 8191 return false; 8192 if (Alignment < 0 || !Alignment.isPowerOf2()) { 8193 Info.FFDiag(E, diag::note_constexpr_invalid_alignment) << Alignment; 8194 return false; 8195 } 8196 unsigned SrcWidth = Info.Ctx.getIntWidth(ForType); 8197 APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1)); 8198 if (APSInt::compareValues(Alignment, MaxValue) > 0) { 8199 Info.FFDiag(E, diag::note_constexpr_alignment_too_big) 8200 << MaxValue << ForType << Alignment; 8201 return false; 8202 } 8203 // Ensure both alignment and source value have the same bit width so that we 8204 // don't assert when computing the resulting value. 8205 APSInt ExtAlignment = 8206 APSInt(Alignment.zextOrTrunc(SrcWidth), /*isUnsigned=*/true); 8207 assert(APSInt::compareValues(Alignment, ExtAlignment) == 0 && 8208 "Alignment should not be changed by ext/trunc"); 8209 Alignment = ExtAlignment; 8210 assert(Alignment.getBitWidth() == SrcWidth); 8211 return true; 8212 } 8213 8214 // To be clear: this happily visits unsupported builtins. Better name welcomed. 8215 bool PointerExprEvaluator::visitNonBuiltinCallExpr(const CallExpr *E) { 8216 if (ExprEvaluatorBaseTy::VisitCallExpr(E)) 8217 return true; 8218 8219 if (!(InvalidBaseOK && getAllocSizeAttr(E))) 8220 return false; 8221 8222 Result.setInvalid(E); 8223 QualType PointeeTy = E->getType()->castAs<PointerType>()->getPointeeType(); 8224 Result.addUnsizedArray(Info, E, PointeeTy); 8225 return true; 8226 } 8227 8228 bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) { 8229 if (IsStringLiteralCall(E)) 8230 return Success(E); 8231 8232 if (unsigned BuiltinOp = E->getBuiltinCallee()) 8233 return VisitBuiltinCallExpr(E, BuiltinOp); 8234 8235 return visitNonBuiltinCallExpr(E); 8236 } 8237 8238 bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, 8239 unsigned BuiltinOp) { 8240 switch (BuiltinOp) { 8241 case Builtin::BI__builtin_addressof: 8242 return evaluateLValue(E->getArg(0), Result); 8243 case Builtin::BI__builtin_assume_aligned: { 8244 // We need to be very careful here because: if the pointer does not have the 8245 // asserted alignment, then the behavior is undefined, and undefined 8246 // behavior is non-constant. 8247 if (!evaluatePointer(E->getArg(0), Result)) 8248 return false; 8249 8250 LValue OffsetResult(Result); 8251 APSInt Alignment; 8252 if (!getAlignmentArgument(E->getArg(1), E->getArg(0)->getType(), Info, 8253 Alignment)) 8254 return false; 8255 CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue()); 8256 8257 if (E->getNumArgs() > 2) { 8258 APSInt Offset; 8259 if (!EvaluateInteger(E->getArg(2), Offset, Info)) 8260 return false; 8261 8262 int64_t AdditionalOffset = -Offset.getZExtValue(); 8263 OffsetResult.Offset += CharUnits::fromQuantity(AdditionalOffset); 8264 } 8265 8266 // If there is a base object, then it must have the correct alignment. 8267 if (OffsetResult.Base) { 8268 CharUnits BaseAlignment = getBaseAlignment(Info, OffsetResult); 8269 8270 if (BaseAlignment < Align) { 8271 Result.Designator.setInvalid(); 8272 // FIXME: Add support to Diagnostic for long / long long. 8273 CCEDiag(E->getArg(0), 8274 diag::note_constexpr_baa_insufficient_alignment) << 0 8275 << (unsigned)BaseAlignment.getQuantity() 8276 << (unsigned)Align.getQuantity(); 8277 return false; 8278 } 8279 } 8280 8281 // The offset must also have the correct alignment. 8282 if (OffsetResult.Offset.alignTo(Align) != OffsetResult.Offset) { 8283 Result.Designator.setInvalid(); 8284 8285 (OffsetResult.Base 8286 ? CCEDiag(E->getArg(0), 8287 diag::note_constexpr_baa_insufficient_alignment) << 1 8288 : CCEDiag(E->getArg(0), 8289 diag::note_constexpr_baa_value_insufficient_alignment)) 8290 << (int)OffsetResult.Offset.getQuantity() 8291 << (unsigned)Align.getQuantity(); 8292 return false; 8293 } 8294 8295 return true; 8296 } 8297 case Builtin::BI__builtin_align_up: 8298 case Builtin::BI__builtin_align_down: { 8299 if (!evaluatePointer(E->getArg(0), Result)) 8300 return false; 8301 APSInt Alignment; 8302 if (!getAlignmentArgument(E->getArg(1), E->getArg(0)->getType(), Info, 8303 Alignment)) 8304 return false; 8305 CharUnits BaseAlignment = getBaseAlignment(Info, Result); 8306 CharUnits PtrAlign = BaseAlignment.alignmentAtOffset(Result.Offset); 8307 // For align_up/align_down, we can return the same value if the alignment 8308 // is known to be greater or equal to the requested value. 8309 if (PtrAlign.getQuantity() >= Alignment) 8310 return true; 8311 8312 // The alignment could be greater than the minimum at run-time, so we cannot 8313 // infer much about the resulting pointer value. One case is possible: 8314 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we 8315 // can infer the correct index if the requested alignment is smaller than 8316 // the base alignment so we can perform the computation on the offset. 8317 if (BaseAlignment.getQuantity() >= Alignment) { 8318 assert(Alignment.getBitWidth() <= 64 && 8319 "Cannot handle > 64-bit address-space"); 8320 uint64_t Alignment64 = Alignment.getZExtValue(); 8321 CharUnits NewOffset = CharUnits::fromQuantity( 8322 BuiltinOp == Builtin::BI__builtin_align_down 8323 ? llvm::alignDown(Result.Offset.getQuantity(), Alignment64) 8324 : llvm::alignTo(Result.Offset.getQuantity(), Alignment64)); 8325 Result.adjustOffset(NewOffset - Result.Offset); 8326 // TODO: diagnose out-of-bounds values/only allow for arrays? 8327 return true; 8328 } 8329 // Otherwise, we cannot constant-evaluate the result. 8330 Info.FFDiag(E->getArg(0), diag::note_constexpr_alignment_adjust) 8331 << Alignment; 8332 return false; 8333 } 8334 case Builtin::BI__builtin_operator_new: 8335 return HandleOperatorNewCall(Info, E, Result); 8336 case Builtin::BI__builtin_launder: 8337 return evaluatePointer(E->getArg(0), Result); 8338 case Builtin::BIstrchr: 8339 case Builtin::BIwcschr: 8340 case Builtin::BImemchr: 8341 case Builtin::BIwmemchr: 8342 if (Info.getLangOpts().CPlusPlus11) 8343 Info.CCEDiag(E, diag::note_constexpr_invalid_function) 8344 << /*isConstexpr*/0 << /*isConstructor*/0 8345 << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'"); 8346 else 8347 Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr); 8348 LLVM_FALLTHROUGH; 8349 case Builtin::BI__builtin_strchr: 8350 case Builtin::BI__builtin_wcschr: 8351 case Builtin::BI__builtin_memchr: 8352 case Builtin::BI__builtin_char_memchr: 8353 case Builtin::BI__builtin_wmemchr: { 8354 if (!Visit(E->getArg(0))) 8355 return false; 8356 APSInt Desired; 8357 if (!EvaluateInteger(E->getArg(1), Desired, Info)) 8358 return false; 8359 uint64_t MaxLength = uint64_t(-1); 8360 if (BuiltinOp != Builtin::BIstrchr && 8361 BuiltinOp != Builtin::BIwcschr && 8362 BuiltinOp != Builtin::BI__builtin_strchr && 8363 BuiltinOp != Builtin::BI__builtin_wcschr) { 8364 APSInt N; 8365 if (!EvaluateInteger(E->getArg(2), N, Info)) 8366 return false; 8367 MaxLength = N.getExtValue(); 8368 } 8369 // We cannot find the value if there are no candidates to match against. 8370 if (MaxLength == 0u) 8371 return ZeroInitialization(E); 8372 if (!Result.checkNullPointerForFoldAccess(Info, E, AK_Read) || 8373 Result.Designator.Invalid) 8374 return false; 8375 QualType CharTy = Result.Designator.getType(Info.Ctx); 8376 bool IsRawByte = BuiltinOp == Builtin::BImemchr || 8377 BuiltinOp == Builtin::BI__builtin_memchr; 8378 assert(IsRawByte || 8379 Info.Ctx.hasSameUnqualifiedType( 8380 CharTy, E->getArg(0)->getType()->getPointeeType())); 8381 // Pointers to const void may point to objects of incomplete type. 8382 if (IsRawByte && CharTy->isIncompleteType()) { 8383 Info.FFDiag(E, diag::note_constexpr_ltor_incomplete_type) << CharTy; 8384 return false; 8385 } 8386 // Give up on byte-oriented matching against multibyte elements. 8387 // FIXME: We can compare the bytes in the correct order. 8388 if (IsRawByte && Info.Ctx.getTypeSizeInChars(CharTy) != CharUnits::One()) 8389 return false; 8390 // Figure out what value we're actually looking for (after converting to 8391 // the corresponding unsigned type if necessary). 8392 uint64_t DesiredVal; 8393 bool StopAtNull = false; 8394 switch (BuiltinOp) { 8395 case Builtin::BIstrchr: 8396 case Builtin::BI__builtin_strchr: 8397 // strchr compares directly to the passed integer, and therefore 8398 // always fails if given an int that is not a char. 8399 if (!APSInt::isSameValue(HandleIntToIntCast(Info, E, CharTy, 8400 E->getArg(1)->getType(), 8401 Desired), 8402 Desired)) 8403 return ZeroInitialization(E); 8404 StopAtNull = true; 8405 LLVM_FALLTHROUGH; 8406 case Builtin::BImemchr: 8407 case Builtin::BI__builtin_memchr: 8408 case Builtin::BI__builtin_char_memchr: 8409 // memchr compares by converting both sides to unsigned char. That's also 8410 // correct for strchr if we get this far (to cope with plain char being 8411 // unsigned in the strchr case). 8412 DesiredVal = Desired.trunc(Info.Ctx.getCharWidth()).getZExtValue(); 8413 break; 8414 8415 case Builtin::BIwcschr: 8416 case Builtin::BI__builtin_wcschr: 8417 StopAtNull = true; 8418 LLVM_FALLTHROUGH; 8419 case Builtin::BIwmemchr: 8420 case Builtin::BI__builtin_wmemchr: 8421 // wcschr and wmemchr are given a wchar_t to look for. Just use it. 8422 DesiredVal = Desired.getZExtValue(); 8423 break; 8424 } 8425 8426 for (; MaxLength; --MaxLength) { 8427 APValue Char; 8428 if (!handleLValueToRValueConversion(Info, E, CharTy, Result, Char) || 8429 !Char.isInt()) 8430 return false; 8431 if (Char.getInt().getZExtValue() == DesiredVal) 8432 return true; 8433 if (StopAtNull && !Char.getInt()) 8434 break; 8435 if (!HandleLValueArrayAdjustment(Info, E, Result, CharTy, 1)) 8436 return false; 8437 } 8438 // Not found: return nullptr. 8439 return ZeroInitialization(E); 8440 } 8441 8442 case Builtin::BImemcpy: 8443 case Builtin::BImemmove: 8444 case Builtin::BIwmemcpy: 8445 case Builtin::BIwmemmove: 8446 if (Info.getLangOpts().CPlusPlus11) 8447 Info.CCEDiag(E, diag::note_constexpr_invalid_function) 8448 << /*isConstexpr*/0 << /*isConstructor*/0 8449 << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'"); 8450 else 8451 Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr); 8452 LLVM_FALLTHROUGH; 8453 case Builtin::BI__builtin_memcpy: 8454 case Builtin::BI__builtin_memmove: 8455 case Builtin::BI__builtin_wmemcpy: 8456 case Builtin::BI__builtin_wmemmove: { 8457 bool WChar = BuiltinOp == Builtin::BIwmemcpy || 8458 BuiltinOp == Builtin::BIwmemmove || 8459 BuiltinOp == Builtin::BI__builtin_wmemcpy || 8460 BuiltinOp == Builtin::BI__builtin_wmemmove; 8461 bool Move = BuiltinOp == Builtin::BImemmove || 8462 BuiltinOp == Builtin::BIwmemmove || 8463 BuiltinOp == Builtin::BI__builtin_memmove || 8464 BuiltinOp == Builtin::BI__builtin_wmemmove; 8465 8466 // The result of mem* is the first argument. 8467 if (!Visit(E->getArg(0))) 8468 return false; 8469 LValue Dest = Result; 8470 8471 LValue Src; 8472 if (!EvaluatePointer(E->getArg(1), Src, Info)) 8473 return false; 8474 8475 APSInt N; 8476 if (!EvaluateInteger(E->getArg(2), N, Info)) 8477 return false; 8478 assert(!N.isSigned() && "memcpy and friends take an unsigned size"); 8479 8480 // If the size is zero, we treat this as always being a valid no-op. 8481 // (Even if one of the src and dest pointers is null.) 8482 if (!N) 8483 return true; 8484 8485 // Otherwise, if either of the operands is null, we can't proceed. Don't 8486 // try to determine the type of the copied objects, because there aren't 8487 // any. 8488 if (!Src.Base || !Dest.Base) { 8489 APValue Val; 8490 (!Src.Base ? Src : Dest).moveInto(Val); 8491 Info.FFDiag(E, diag::note_constexpr_memcpy_null) 8492 << Move << WChar << !!Src.Base 8493 << Val.getAsString(Info.Ctx, E->getArg(0)->getType()); 8494 return false; 8495 } 8496 if (Src.Designator.Invalid || Dest.Designator.Invalid) 8497 return false; 8498 8499 // We require that Src and Dest are both pointers to arrays of 8500 // trivially-copyable type. (For the wide version, the designator will be 8501 // invalid if the designated object is not a wchar_t.) 8502 QualType T = Dest.Designator.getType(Info.Ctx); 8503 QualType SrcT = Src.Designator.getType(Info.Ctx); 8504 if (!Info.Ctx.hasSameUnqualifiedType(T, SrcT)) { 8505 Info.FFDiag(E, diag::note_constexpr_memcpy_type_pun) << Move << SrcT << T; 8506 return false; 8507 } 8508 if (T->isIncompleteType()) { 8509 Info.FFDiag(E, diag::note_constexpr_memcpy_incomplete_type) << Move << T; 8510 return false; 8511 } 8512 if (!T.isTriviallyCopyableType(Info.Ctx)) { 8513 Info.FFDiag(E, diag::note_constexpr_memcpy_nontrivial) << Move << T; 8514 return false; 8515 } 8516 8517 // Figure out how many T's we're copying. 8518 uint64_t TSize = Info.Ctx.getTypeSizeInChars(T).getQuantity(); 8519 if (!WChar) { 8520 uint64_t Remainder; 8521 llvm::APInt OrigN = N; 8522 llvm::APInt::udivrem(OrigN, TSize, N, Remainder); 8523 if (Remainder) { 8524 Info.FFDiag(E, diag::note_constexpr_memcpy_unsupported) 8525 << Move << WChar << 0 << T << OrigN.toString(10, /*Signed*/false) 8526 << (unsigned)TSize; 8527 return false; 8528 } 8529 } 8530 8531 // Check that the copying will remain within the arrays, just so that we 8532 // can give a more meaningful diagnostic. This implicitly also checks that 8533 // N fits into 64 bits. 8534 uint64_t RemainingSrcSize = Src.Designator.validIndexAdjustments().second; 8535 uint64_t RemainingDestSize = Dest.Designator.validIndexAdjustments().second; 8536 if (N.ugt(RemainingSrcSize) || N.ugt(RemainingDestSize)) { 8537 Info.FFDiag(E, diag::note_constexpr_memcpy_unsupported) 8538 << Move << WChar << (N.ugt(RemainingSrcSize) ? 1 : 2) << T 8539 << N.toString(10, /*Signed*/false); 8540 return false; 8541 } 8542 uint64_t NElems = N.getZExtValue(); 8543 uint64_t NBytes = NElems * TSize; 8544 8545 // Check for overlap. 8546 int Direction = 1; 8547 if (HasSameBase(Src, Dest)) { 8548 uint64_t SrcOffset = Src.getLValueOffset().getQuantity(); 8549 uint64_t DestOffset = Dest.getLValueOffset().getQuantity(); 8550 if (DestOffset >= SrcOffset && DestOffset - SrcOffset < NBytes) { 8551 // Dest is inside the source region. 8552 if (!Move) { 8553 Info.FFDiag(E, diag::note_constexpr_memcpy_overlap) << WChar; 8554 return false; 8555 } 8556 // For memmove and friends, copy backwards. 8557 if (!HandleLValueArrayAdjustment(Info, E, Src, T, NElems - 1) || 8558 !HandleLValueArrayAdjustment(Info, E, Dest, T, NElems - 1)) 8559 return false; 8560 Direction = -1; 8561 } else if (!Move && SrcOffset >= DestOffset && 8562 SrcOffset - DestOffset < NBytes) { 8563 // Src is inside the destination region for memcpy: invalid. 8564 Info.FFDiag(E, diag::note_constexpr_memcpy_overlap) << WChar; 8565 return false; 8566 } 8567 } 8568 8569 while (true) { 8570 APValue Val; 8571 // FIXME: Set WantObjectRepresentation to true if we're copying a 8572 // char-like type? 8573 if (!handleLValueToRValueConversion(Info, E, T, Src, Val) || 8574 !handleAssignment(Info, E, Dest, T, Val)) 8575 return false; 8576 // Do not iterate past the last element; if we're copying backwards, that 8577 // might take us off the start of the array. 8578 if (--NElems == 0) 8579 return true; 8580 if (!HandleLValueArrayAdjustment(Info, E, Src, T, Direction) || 8581 !HandleLValueArrayAdjustment(Info, E, Dest, T, Direction)) 8582 return false; 8583 } 8584 } 8585 8586 default: 8587 break; 8588 } 8589 8590 return visitNonBuiltinCallExpr(E); 8591 } 8592 8593 static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This, 8594 APValue &Result, const InitListExpr *ILE, 8595 QualType AllocType); 8596 8597 bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) { 8598 if (!Info.getLangOpts().CPlusPlus2a) 8599 Info.CCEDiag(E, diag::note_constexpr_new); 8600 8601 // We cannot speculatively evaluate a delete expression. 8602 if (Info.SpeculativeEvaluationDepth) 8603 return false; 8604 8605 FunctionDecl *OperatorNew = E->getOperatorNew(); 8606 8607 bool IsNothrow = false; 8608 bool IsPlacement = false; 8609 if (OperatorNew->isReservedGlobalPlacementOperator() && 8610 Info.CurrentCall->isStdFunction() && !E->isArray()) { 8611 // FIXME Support array placement new. 8612 assert(E->getNumPlacementArgs() == 1); 8613 if (!EvaluatePointer(E->getPlacementArg(0), Result, Info)) 8614 return false; 8615 if (Result.Designator.Invalid) 8616 return false; 8617 IsPlacement = true; 8618 } else if (!OperatorNew->isReplaceableGlobalAllocationFunction()) { 8619 Info.FFDiag(E, diag::note_constexpr_new_non_replaceable) 8620 << isa<CXXMethodDecl>(OperatorNew) << OperatorNew; 8621 return false; 8622 } else if (E->getNumPlacementArgs()) { 8623 // The only new-placement list we support is of the form (std::nothrow). 8624 // 8625 // FIXME: There is no restriction on this, but it's not clear that any 8626 // other form makes any sense. We get here for cases such as: 8627 // 8628 // new (std::align_val_t{N}) X(int) 8629 // 8630 // (which should presumably be valid only if N is a multiple of 8631 // alignof(int), and in any case can't be deallocated unless N is 8632 // alignof(X) and X has new-extended alignment). 8633 if (E->getNumPlacementArgs() != 1 || 8634 !E->getPlacementArg(0)->getType()->isNothrowT()) 8635 return Error(E, diag::note_constexpr_new_placement); 8636 8637 LValue Nothrow; 8638 if (!EvaluateLValue(E->getPlacementArg(0), Nothrow, Info)) 8639 return false; 8640 IsNothrow = true; 8641 } 8642 8643 const Expr *Init = E->getInitializer(); 8644 const InitListExpr *ResizedArrayILE = nullptr; 8645 8646 QualType AllocType = E->getAllocatedType(); 8647 if (Optional<const Expr*> ArraySize = E->getArraySize()) { 8648 const Expr *Stripped = *ArraySize; 8649 for (; auto *ICE = dyn_cast<ImplicitCastExpr>(Stripped); 8650 Stripped = ICE->getSubExpr()) 8651 if (ICE->getCastKind() != CK_NoOp && 8652 ICE->getCastKind() != CK_IntegralCast) 8653 break; 8654 8655 llvm::APSInt ArrayBound; 8656 if (!EvaluateInteger(Stripped, ArrayBound, Info)) 8657 return false; 8658 8659 // C++ [expr.new]p9: 8660 // The expression is erroneous if: 8661 // -- [...] its value before converting to size_t [or] applying the 8662 // second standard conversion sequence is less than zero 8663 if (ArrayBound.isSigned() && ArrayBound.isNegative()) { 8664 if (IsNothrow) 8665 return ZeroInitialization(E); 8666 8667 Info.FFDiag(*ArraySize, diag::note_constexpr_new_negative) 8668 << ArrayBound << (*ArraySize)->getSourceRange(); 8669 return false; 8670 } 8671 8672 // -- its value is such that the size of the allocated object would 8673 // exceed the implementation-defined limit 8674 if (ConstantArrayType::getNumAddressingBits(Info.Ctx, AllocType, 8675 ArrayBound) > 8676 ConstantArrayType::getMaxSizeBits(Info.Ctx)) { 8677 if (IsNothrow) 8678 return ZeroInitialization(E); 8679 8680 Info.FFDiag(*ArraySize, diag::note_constexpr_new_too_large) 8681 << ArrayBound << (*ArraySize)->getSourceRange(); 8682 return false; 8683 } 8684 8685 // -- the new-initializer is a braced-init-list and the number of 8686 // array elements for which initializers are provided [...] 8687 // exceeds the number of elements to initialize 8688 if (Init) { 8689 auto *CAT = Info.Ctx.getAsConstantArrayType(Init->getType()); 8690 assert(CAT && "unexpected type for array initializer"); 8691 8692 unsigned Bits = 8693 std::max(CAT->getSize().getBitWidth(), ArrayBound.getBitWidth()); 8694 llvm::APInt InitBound = CAT->getSize().zextOrSelf(Bits); 8695 llvm::APInt AllocBound = ArrayBound.zextOrSelf(Bits); 8696 if (InitBound.ugt(AllocBound)) { 8697 if (IsNothrow) 8698 return ZeroInitialization(E); 8699 8700 Info.FFDiag(*ArraySize, diag::note_constexpr_new_too_small) 8701 << AllocBound.toString(10, /*Signed=*/false) 8702 << InitBound.toString(10, /*Signed=*/false) 8703 << (*ArraySize)->getSourceRange(); 8704 return false; 8705 } 8706 8707 // If the sizes differ, we must have an initializer list, and we need 8708 // special handling for this case when we initialize. 8709 if (InitBound != AllocBound) 8710 ResizedArrayILE = cast<InitListExpr>(Init); 8711 } 8712 8713 AllocType = Info.Ctx.getConstantArrayType(AllocType, ArrayBound, nullptr, 8714 ArrayType::Normal, 0); 8715 } else { 8716 assert(!AllocType->isArrayType() && 8717 "array allocation with non-array new"); 8718 } 8719 8720 APValue *Val; 8721 if (IsPlacement) { 8722 AccessKinds AK = AK_Construct; 8723 struct FindObjectHandler { 8724 EvalInfo &Info; 8725 const Expr *E; 8726 QualType AllocType; 8727 const AccessKinds AccessKind; 8728 APValue *Value; 8729 8730 typedef bool result_type; 8731 bool failed() { return false; } 8732 bool found(APValue &Subobj, QualType SubobjType) { 8733 // FIXME: Reject the cases where [basic.life]p8 would not permit the 8734 // old name of the object to be used to name the new object. 8735 if (!Info.Ctx.hasSameUnqualifiedType(SubobjType, AllocType)) { 8736 Info.FFDiag(E, diag::note_constexpr_placement_new_wrong_type) << 8737 SubobjType << AllocType; 8738 return false; 8739 } 8740 Value = &Subobj; 8741 return true; 8742 } 8743 bool found(APSInt &Value, QualType SubobjType) { 8744 Info.FFDiag(E, diag::note_constexpr_construct_complex_elem); 8745 return false; 8746 } 8747 bool found(APFloat &Value, QualType SubobjType) { 8748 Info.FFDiag(E, diag::note_constexpr_construct_complex_elem); 8749 return false; 8750 } 8751 } Handler = {Info, E, AllocType, AK, nullptr}; 8752 8753 CompleteObject Obj = findCompleteObject(Info, E, AK, Result, AllocType); 8754 if (!Obj || !findSubobject(Info, E, Obj, Result.Designator, Handler)) 8755 return false; 8756 8757 Val = Handler.Value; 8758 8759 // [basic.life]p1: 8760 // The lifetime of an object o of type T ends when [...] the storage 8761 // which the object occupies is [...] reused by an object that is not 8762 // nested within o (6.6.2). 8763 *Val = APValue(); 8764 } else { 8765 // Perform the allocation and obtain a pointer to the resulting object. 8766 Val = Info.createHeapAlloc(E, AllocType, Result); 8767 if (!Val) 8768 return false; 8769 } 8770 8771 if (ResizedArrayILE) { 8772 if (!EvaluateArrayNewInitList(Info, Result, *Val, ResizedArrayILE, 8773 AllocType)) 8774 return false; 8775 } else if (Init) { 8776 if (!EvaluateInPlace(*Val, Info, Result, Init)) 8777 return false; 8778 } else { 8779 *Val = getDefaultInitValue(AllocType); 8780 } 8781 8782 // Array new returns a pointer to the first element, not a pointer to the 8783 // array. 8784 if (auto *AT = AllocType->getAsArrayTypeUnsafe()) 8785 Result.addArray(Info, E, cast<ConstantArrayType>(AT)); 8786 8787 return true; 8788 } 8789 //===----------------------------------------------------------------------===// 8790 // Member Pointer Evaluation 8791 //===----------------------------------------------------------------------===// 8792 8793 namespace { 8794 class MemberPointerExprEvaluator 8795 : public ExprEvaluatorBase<MemberPointerExprEvaluator> { 8796 MemberPtr &Result; 8797 8798 bool Success(const ValueDecl *D) { 8799 Result = MemberPtr(D); 8800 return true; 8801 } 8802 public: 8803 8804 MemberPointerExprEvaluator(EvalInfo &Info, MemberPtr &Result) 8805 : ExprEvaluatorBaseTy(Info), Result(Result) {} 8806 8807 bool Success(const APValue &V, const Expr *E) { 8808 Result.setFrom(V); 8809 return true; 8810 } 8811 bool ZeroInitialization(const Expr *E) { 8812 return Success((const ValueDecl*)nullptr); 8813 } 8814 8815 bool VisitCastExpr(const CastExpr *E); 8816 bool VisitUnaryAddrOf(const UnaryOperator *E); 8817 }; 8818 } // end anonymous namespace 8819 8820 static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result, 8821 EvalInfo &Info) { 8822 assert(E->isRValue() && E->getType()->isMemberPointerType()); 8823 return MemberPointerExprEvaluator(Info, Result).Visit(E); 8824 } 8825 8826 bool MemberPointerExprEvaluator::VisitCastExpr(const CastExpr *E) { 8827 switch (E->getCastKind()) { 8828 default: 8829 return ExprEvaluatorBaseTy::VisitCastExpr(E); 8830 8831 case CK_NullToMemberPointer: 8832 VisitIgnoredValue(E->getSubExpr()); 8833 return ZeroInitialization(E); 8834 8835 case CK_BaseToDerivedMemberPointer: { 8836 if (!Visit(E->getSubExpr())) 8837 return false; 8838 if (E->path_empty()) 8839 return true; 8840 // Base-to-derived member pointer casts store the path in derived-to-base 8841 // order, so iterate backwards. The CXXBaseSpecifier also provides us with 8842 // the wrong end of the derived->base arc, so stagger the path by one class. 8843 typedef std::reverse_iterator<CastExpr::path_const_iterator> ReverseIter; 8844 for (ReverseIter PathI(E->path_end() - 1), PathE(E->path_begin()); 8845 PathI != PathE; ++PathI) { 8846 assert(!(*PathI)->isVirtual() && "memptr cast through vbase"); 8847 const CXXRecordDecl *Derived = (*PathI)->getType()->getAsCXXRecordDecl(); 8848 if (!Result.castToDerived(Derived)) 8849 return Error(E); 8850 } 8851 const Type *FinalTy = E->getType()->castAs<MemberPointerType>()->getClass(); 8852 if (!Result.castToDerived(FinalTy->getAsCXXRecordDecl())) 8853 return Error(E); 8854 return true; 8855 } 8856 8857 case CK_DerivedToBaseMemberPointer: 8858 if (!Visit(E->getSubExpr())) 8859 return false; 8860 for (CastExpr::path_const_iterator PathI = E->path_begin(), 8861 PathE = E->path_end(); PathI != PathE; ++PathI) { 8862 assert(!(*PathI)->isVirtual() && "memptr cast through vbase"); 8863 const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl(); 8864 if (!Result.castToBase(Base)) 8865 return Error(E); 8866 } 8867 return true; 8868 } 8869 } 8870 8871 bool MemberPointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) { 8872 // C++11 [expr.unary.op]p3 has very strict rules on how the address of a 8873 // member can be formed. 8874 return Success(cast<DeclRefExpr>(E->getSubExpr())->getDecl()); 8875 } 8876 8877 //===----------------------------------------------------------------------===// 8878 // Record Evaluation 8879 //===----------------------------------------------------------------------===// 8880 8881 namespace { 8882 class RecordExprEvaluator 8883 : public ExprEvaluatorBase<RecordExprEvaluator> { 8884 const LValue &This; 8885 APValue &Result; 8886 public: 8887 8888 RecordExprEvaluator(EvalInfo &info, const LValue &This, APValue &Result) 8889 : ExprEvaluatorBaseTy(info), This(This), Result(Result) {} 8890 8891 bool Success(const APValue &V, const Expr *E) { 8892 Result = V; 8893 return true; 8894 } 8895 bool ZeroInitialization(const Expr *E) { 8896 return ZeroInitialization(E, E->getType()); 8897 } 8898 bool ZeroInitialization(const Expr *E, QualType T); 8899 8900 bool VisitCallExpr(const CallExpr *E) { 8901 return handleCallExpr(E, Result, &This); 8902 } 8903 bool VisitCastExpr(const CastExpr *E); 8904 bool VisitInitListExpr(const InitListExpr *E); 8905 bool VisitCXXConstructExpr(const CXXConstructExpr *E) { 8906 return VisitCXXConstructExpr(E, E->getType()); 8907 } 8908 bool VisitLambdaExpr(const LambdaExpr *E); 8909 bool VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E); 8910 bool VisitCXXConstructExpr(const CXXConstructExpr *E, QualType T); 8911 bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E); 8912 bool VisitBinCmp(const BinaryOperator *E); 8913 }; 8914 } 8915 8916 /// Perform zero-initialization on an object of non-union class type. 8917 /// C++11 [dcl.init]p5: 8918 /// To zero-initialize an object or reference of type T means: 8919 /// [...] 8920 /// -- if T is a (possibly cv-qualified) non-union class type, 8921 /// each non-static data member and each base-class subobject is 8922 /// zero-initialized 8923 static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E, 8924 const RecordDecl *RD, 8925 const LValue &This, APValue &Result) { 8926 assert(!RD->isUnion() && "Expected non-union class type"); 8927 const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD); 8928 Result = APValue(APValue::UninitStruct(), CD ? CD->getNumBases() : 0, 8929 std::distance(RD->field_begin(), RD->field_end())); 8930 8931 if (RD->isInvalidDecl()) return false; 8932 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD); 8933 8934 if (CD) { 8935 unsigned Index = 0; 8936 for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(), 8937 End = CD->bases_end(); I != End; ++I, ++Index) { 8938 const CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl(); 8939 LValue Subobject = This; 8940 if (!HandleLValueDirectBase(Info, E, Subobject, CD, Base, &Layout)) 8941 return false; 8942 if (!HandleClassZeroInitialization(Info, E, Base, Subobject, 8943 Result.getStructBase(Index))) 8944 return false; 8945 } 8946 } 8947 8948 for (const auto *I : RD->fields()) { 8949 // -- if T is a reference type, no initialization is performed. 8950 if (I->getType()->isReferenceType()) 8951 continue; 8952 8953 LValue Subobject = This; 8954 if (!HandleLValueMember(Info, E, Subobject, I, &Layout)) 8955 return false; 8956 8957 ImplicitValueInitExpr VIE(I->getType()); 8958 if (!EvaluateInPlace( 8959 Result.getStructField(I->getFieldIndex()), Info, Subobject, &VIE)) 8960 return false; 8961 } 8962 8963 return true; 8964 } 8965 8966 bool RecordExprEvaluator::ZeroInitialization(const Expr *E, QualType T) { 8967 const RecordDecl *RD = T->castAs<RecordType>()->getDecl(); 8968 if (RD->isInvalidDecl()) return false; 8969 if (RD->isUnion()) { 8970 // C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the 8971 // object's first non-static named data member is zero-initialized 8972 RecordDecl::field_iterator I = RD->field_begin(); 8973 if (I == RD->field_end()) { 8974 Result = APValue((const FieldDecl*)nullptr); 8975 return true; 8976 } 8977 8978 LValue Subobject = This; 8979 if (!HandleLValueMember(Info, E, Subobject, *I)) 8980 return false; 8981 Result = APValue(*I); 8982 ImplicitValueInitExpr VIE(I->getType()); 8983 return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, &VIE); 8984 } 8985 8986 if (isa<CXXRecordDecl>(RD) && cast<CXXRecordDecl>(RD)->getNumVBases()) { 8987 Info.FFDiag(E, diag::note_constexpr_virtual_base) << RD; 8988 return false; 8989 } 8990 8991 return HandleClassZeroInitialization(Info, E, RD, This, Result); 8992 } 8993 8994 bool RecordExprEvaluator::VisitCastExpr(const CastExpr *E) { 8995 switch (E->getCastKind()) { 8996 default: 8997 return ExprEvaluatorBaseTy::VisitCastExpr(E); 8998 8999 case CK_ConstructorConversion: 9000 return Visit(E->getSubExpr()); 9001 9002 case CK_DerivedToBase: 9003 case CK_UncheckedDerivedToBase: { 9004 APValue DerivedObject; 9005 if (!Evaluate(DerivedObject, Info, E->getSubExpr())) 9006 return false; 9007 if (!DerivedObject.isStruct()) 9008 return Error(E->getSubExpr()); 9009 9010 // Derived-to-base rvalue conversion: just slice off the derived part. 9011 APValue *Value = &DerivedObject; 9012 const CXXRecordDecl *RD = E->getSubExpr()->getType()->getAsCXXRecordDecl(); 9013 for (CastExpr::path_const_iterator PathI = E->path_begin(), 9014 PathE = E->path_end(); PathI != PathE; ++PathI) { 9015 assert(!(*PathI)->isVirtual() && "record rvalue with virtual base"); 9016 const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl(); 9017 Value = &Value->getStructBase(getBaseIndex(RD, Base)); 9018 RD = Base; 9019 } 9020 Result = *Value; 9021 return true; 9022 } 9023 } 9024 } 9025 9026 bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) { 9027 if (E->isTransparent()) 9028 return Visit(E->getInit(0)); 9029 9030 const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl(); 9031 if (RD->isInvalidDecl()) return false; 9032 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD); 9033 auto *CXXRD = dyn_cast<CXXRecordDecl>(RD); 9034 9035 EvalInfo::EvaluatingConstructorRAII EvalObj( 9036 Info, 9037 ObjectUnderConstruction{This.getLValueBase(), This.Designator.Entries}, 9038 CXXRD && CXXRD->getNumBases()); 9039 9040 if (RD->isUnion()) { 9041 const FieldDecl *Field = E->getInitializedFieldInUnion(); 9042 Result = APValue(Field); 9043 if (!Field) 9044 return true; 9045 9046 // If the initializer list for a union does not contain any elements, the 9047 // first element of the union is value-initialized. 9048 // FIXME: The element should be initialized from an initializer list. 9049 // Is this difference ever observable for initializer lists which 9050 // we don't build? 9051 ImplicitValueInitExpr VIE(Field->getType()); 9052 const Expr *InitExpr = E->getNumInits() ? E->getInit(0) : &VIE; 9053 9054 LValue Subobject = This; 9055 if (!HandleLValueMember(Info, InitExpr, Subobject, Field, &Layout)) 9056 return false; 9057 9058 // Temporarily override This, in case there's a CXXDefaultInitExpr in here. 9059 ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This, 9060 isa<CXXDefaultInitExpr>(InitExpr)); 9061 9062 return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, InitExpr); 9063 } 9064 9065 if (!Result.hasValue()) 9066 Result = APValue(APValue::UninitStruct(), CXXRD ? CXXRD->getNumBases() : 0, 9067 std::distance(RD->field_begin(), RD->field_end())); 9068 unsigned ElementNo = 0; 9069 bool Success = true; 9070 9071 // Initialize base classes. 9072 if (CXXRD && CXXRD->getNumBases()) { 9073 for (const auto &Base : CXXRD->bases()) { 9074 assert(ElementNo < E->getNumInits() && "missing init for base class"); 9075 const Expr *Init = E->getInit(ElementNo); 9076 9077 LValue Subobject = This; 9078 if (!HandleLValueBase(Info, Init, Subobject, CXXRD, &Base)) 9079 return false; 9080 9081 APValue &FieldVal = Result.getStructBase(ElementNo); 9082 if (!EvaluateInPlace(FieldVal, Info, Subobject, Init)) { 9083 if (!Info.noteFailure()) 9084 return false; 9085 Success = false; 9086 } 9087 ++ElementNo; 9088 } 9089 9090 EvalObj.finishedConstructingBases(); 9091 } 9092 9093 // Initialize members. 9094 for (const auto *Field : RD->fields()) { 9095 // Anonymous bit-fields are not considered members of the class for 9096 // purposes of aggregate initialization. 9097 if (Field->isUnnamedBitfield()) 9098 continue; 9099 9100 LValue Subobject = This; 9101 9102 bool HaveInit = ElementNo < E->getNumInits(); 9103 9104 // FIXME: Diagnostics here should point to the end of the initializer 9105 // list, not the start. 9106 if (!HandleLValueMember(Info, HaveInit ? E->getInit(ElementNo) : E, 9107 Subobject, Field, &Layout)) 9108 return false; 9109 9110 // Perform an implicit value-initialization for members beyond the end of 9111 // the initializer list. 9112 ImplicitValueInitExpr VIE(HaveInit ? Info.Ctx.IntTy : Field->getType()); 9113 const Expr *Init = HaveInit ? E->getInit(ElementNo++) : &VIE; 9114 9115 // Temporarily override This, in case there's a CXXDefaultInitExpr in here. 9116 ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This, 9117 isa<CXXDefaultInitExpr>(Init)); 9118 9119 APValue &FieldVal = Result.getStructField(Field->getFieldIndex()); 9120 if (!EvaluateInPlace(FieldVal, Info, Subobject, Init) || 9121 (Field->isBitField() && !truncateBitfieldValue(Info, Init, 9122 FieldVal, Field))) { 9123 if (!Info.noteFailure()) 9124 return false; 9125 Success = false; 9126 } 9127 } 9128 9129 return Success; 9130 } 9131 9132 bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E, 9133 QualType T) { 9134 // Note that E's type is not necessarily the type of our class here; we might 9135 // be initializing an array element instead. 9136 const CXXConstructorDecl *FD = E->getConstructor(); 9137 if (FD->isInvalidDecl() || FD->getParent()->isInvalidDecl()) return false; 9138 9139 bool ZeroInit = E->requiresZeroInitialization(); 9140 if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) { 9141 // If we've already performed zero-initialization, we're already done. 9142 if (Result.hasValue()) 9143 return true; 9144 9145 if (ZeroInit) 9146 return ZeroInitialization(E, T); 9147 9148 Result = getDefaultInitValue(T); 9149 return true; 9150 } 9151 9152 const FunctionDecl *Definition = nullptr; 9153 auto Body = FD->getBody(Definition); 9154 9155 if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition, Body)) 9156 return false; 9157 9158 // Avoid materializing a temporary for an elidable copy/move constructor. 9159 if (E->isElidable() && !ZeroInit) 9160 if (const MaterializeTemporaryExpr *ME 9161 = dyn_cast<MaterializeTemporaryExpr>(E->getArg(0))) 9162 return Visit(ME->getSubExpr()); 9163 9164 if (ZeroInit && !ZeroInitialization(E, T)) 9165 return false; 9166 9167 auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs()); 9168 return HandleConstructorCall(E, This, Args, 9169 cast<CXXConstructorDecl>(Definition), Info, 9170 Result); 9171 } 9172 9173 bool RecordExprEvaluator::VisitCXXInheritedCtorInitExpr( 9174 const CXXInheritedCtorInitExpr *E) { 9175 if (!Info.CurrentCall) { 9176 assert(Info.checkingPotentialConstantExpression()); 9177 return false; 9178 } 9179 9180 const CXXConstructorDecl *FD = E->getConstructor(); 9181 if (FD->isInvalidDecl() || FD->getParent()->isInvalidDecl()) 9182 return false; 9183 9184 const FunctionDecl *Definition = nullptr; 9185 auto Body = FD->getBody(Definition); 9186 9187 if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition, Body)) 9188 return false; 9189 9190 return HandleConstructorCall(E, This, Info.CurrentCall->Arguments, 9191 cast<CXXConstructorDecl>(Definition), Info, 9192 Result); 9193 } 9194 9195 bool RecordExprEvaluator::VisitCXXStdInitializerListExpr( 9196 const CXXStdInitializerListExpr *E) { 9197 const ConstantArrayType *ArrayType = 9198 Info.Ctx.getAsConstantArrayType(E->getSubExpr()->getType()); 9199 9200 LValue Array; 9201 if (!EvaluateLValue(E->getSubExpr(), Array, Info)) 9202 return false; 9203 9204 // Get a pointer to the first element of the array. 9205 Array.addArray(Info, E, ArrayType); 9206 9207 // FIXME: Perform the checks on the field types in SemaInit. 9208 RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl(); 9209 RecordDecl::field_iterator Field = Record->field_begin(); 9210 if (Field == Record->field_end()) 9211 return Error(E); 9212 9213 // Start pointer. 9214 if (!Field->getType()->isPointerType() || 9215 !Info.Ctx.hasSameType(Field->getType()->getPointeeType(), 9216 ArrayType->getElementType())) 9217 return Error(E); 9218 9219 // FIXME: What if the initializer_list type has base classes, etc? 9220 Result = APValue(APValue::UninitStruct(), 0, 2); 9221 Array.moveInto(Result.getStructField(0)); 9222 9223 if (++Field == Record->field_end()) 9224 return Error(E); 9225 9226 if (Field->getType()->isPointerType() && 9227 Info.Ctx.hasSameType(Field->getType()->getPointeeType(), 9228 ArrayType->getElementType())) { 9229 // End pointer. 9230 if (!HandleLValueArrayAdjustment(Info, E, Array, 9231 ArrayType->getElementType(), 9232 ArrayType->getSize().getZExtValue())) 9233 return false; 9234 Array.moveInto(Result.getStructField(1)); 9235 } else if (Info.Ctx.hasSameType(Field->getType(), Info.Ctx.getSizeType())) 9236 // Length. 9237 Result.getStructField(1) = APValue(APSInt(ArrayType->getSize())); 9238 else 9239 return Error(E); 9240 9241 if (++Field != Record->field_end()) 9242 return Error(E); 9243 9244 return true; 9245 } 9246 9247 bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) { 9248 const CXXRecordDecl *ClosureClass = E->getLambdaClass(); 9249 if (ClosureClass->isInvalidDecl()) 9250 return false; 9251 9252 const size_t NumFields = 9253 std::distance(ClosureClass->field_begin(), ClosureClass->field_end()); 9254 9255 assert(NumFields == (size_t)std::distance(E->capture_init_begin(), 9256 E->capture_init_end()) && 9257 "The number of lambda capture initializers should equal the number of " 9258 "fields within the closure type"); 9259 9260 Result = APValue(APValue::UninitStruct(), /*NumBases*/0, NumFields); 9261 // Iterate through all the lambda's closure object's fields and initialize 9262 // them. 9263 auto *CaptureInitIt = E->capture_init_begin(); 9264 const LambdaCapture *CaptureIt = ClosureClass->captures_begin(); 9265 bool Success = true; 9266 for (const auto *Field : ClosureClass->fields()) { 9267 assert(CaptureInitIt != E->capture_init_end()); 9268 // Get the initializer for this field 9269 Expr *const CurFieldInit = *CaptureInitIt++; 9270 9271 // If there is no initializer, either this is a VLA or an error has 9272 // occurred. 9273 if (!CurFieldInit) 9274 return Error(E); 9275 9276 APValue &FieldVal = Result.getStructField(Field->getFieldIndex()); 9277 if (!EvaluateInPlace(FieldVal, Info, This, CurFieldInit)) { 9278 if (!Info.keepEvaluatingAfterFailure()) 9279 return false; 9280 Success = false; 9281 } 9282 ++CaptureIt; 9283 } 9284 return Success; 9285 } 9286 9287 static bool EvaluateRecord(const Expr *E, const LValue &This, 9288 APValue &Result, EvalInfo &Info) { 9289 assert(E->isRValue() && E->getType()->isRecordType() && 9290 "can't evaluate expression as a record rvalue"); 9291 return RecordExprEvaluator(Info, This, Result).Visit(E); 9292 } 9293 9294 //===----------------------------------------------------------------------===// 9295 // Temporary Evaluation 9296 // 9297 // Temporaries are represented in the AST as rvalues, but generally behave like 9298 // lvalues. The full-object of which the temporary is a subobject is implicitly 9299 // materialized so that a reference can bind to it. 9300 //===----------------------------------------------------------------------===// 9301 namespace { 9302 class TemporaryExprEvaluator 9303 : public LValueExprEvaluatorBase<TemporaryExprEvaluator> { 9304 public: 9305 TemporaryExprEvaluator(EvalInfo &Info, LValue &Result) : 9306 LValueExprEvaluatorBaseTy(Info, Result, false) {} 9307 9308 /// Visit an expression which constructs the value of this temporary. 9309 bool VisitConstructExpr(const Expr *E) { 9310 APValue &Value = 9311 Info.CurrentCall->createTemporary(E, E->getType(), false, Result); 9312 return EvaluateInPlace(Value, Info, Result, E); 9313 } 9314 9315 bool VisitCastExpr(const CastExpr *E) { 9316 switch (E->getCastKind()) { 9317 default: 9318 return LValueExprEvaluatorBaseTy::VisitCastExpr(E); 9319 9320 case CK_ConstructorConversion: 9321 return VisitConstructExpr(E->getSubExpr()); 9322 } 9323 } 9324 bool VisitInitListExpr(const InitListExpr *E) { 9325 return VisitConstructExpr(E); 9326 } 9327 bool VisitCXXConstructExpr(const CXXConstructExpr *E) { 9328 return VisitConstructExpr(E); 9329 } 9330 bool VisitCallExpr(const CallExpr *E) { 9331 return VisitConstructExpr(E); 9332 } 9333 bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E) { 9334 return VisitConstructExpr(E); 9335 } 9336 bool VisitLambdaExpr(const LambdaExpr *E) { 9337 return VisitConstructExpr(E); 9338 } 9339 }; 9340 } // end anonymous namespace 9341 9342 /// Evaluate an expression of record type as a temporary. 9343 static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info) { 9344 assert(E->isRValue() && E->getType()->isRecordType()); 9345 return TemporaryExprEvaluator(Info, Result).Visit(E); 9346 } 9347 9348 //===----------------------------------------------------------------------===// 9349 // Vector Evaluation 9350 //===----------------------------------------------------------------------===// 9351 9352 namespace { 9353 class VectorExprEvaluator 9354 : public ExprEvaluatorBase<VectorExprEvaluator> { 9355 APValue &Result; 9356 public: 9357 9358 VectorExprEvaluator(EvalInfo &info, APValue &Result) 9359 : ExprEvaluatorBaseTy(info), Result(Result) {} 9360 9361 bool Success(ArrayRef<APValue> V, const Expr *E) { 9362 assert(V.size() == E->getType()->castAs<VectorType>()->getNumElements()); 9363 // FIXME: remove this APValue copy. 9364 Result = APValue(V.data(), V.size()); 9365 return true; 9366 } 9367 bool Success(const APValue &V, const Expr *E) { 9368 assert(V.isVector()); 9369 Result = V; 9370 return true; 9371 } 9372 bool ZeroInitialization(const Expr *E); 9373 9374 bool VisitUnaryReal(const UnaryOperator *E) 9375 { return Visit(E->getSubExpr()); } 9376 bool VisitCastExpr(const CastExpr* E); 9377 bool VisitInitListExpr(const InitListExpr *E); 9378 bool VisitUnaryImag(const UnaryOperator *E); 9379 // FIXME: Missing: unary -, unary ~, binary add/sub/mul/div, 9380 // binary comparisons, binary and/or/xor, 9381 // conditional operator (for GNU conditional select), 9382 // shufflevector, ExtVectorElementExpr 9383 }; 9384 } // end anonymous namespace 9385 9386 static bool EvaluateVector(const Expr* E, APValue& Result, EvalInfo &Info) { 9387 assert(E->isRValue() && E->getType()->isVectorType() &&"not a vector rvalue"); 9388 return VectorExprEvaluator(Info, Result).Visit(E); 9389 } 9390 9391 bool VectorExprEvaluator::VisitCastExpr(const CastExpr *E) { 9392 const VectorType *VTy = E->getType()->castAs<VectorType>(); 9393 unsigned NElts = VTy->getNumElements(); 9394 9395 const Expr *SE = E->getSubExpr(); 9396 QualType SETy = SE->getType(); 9397 9398 switch (E->getCastKind()) { 9399 case CK_VectorSplat: { 9400 APValue Val = APValue(); 9401 if (SETy->isIntegerType()) { 9402 APSInt IntResult; 9403 if (!EvaluateInteger(SE, IntResult, Info)) 9404 return false; 9405 Val = APValue(std::move(IntResult)); 9406 } else if (SETy->isRealFloatingType()) { 9407 APFloat FloatResult(0.0); 9408 if (!EvaluateFloat(SE, FloatResult, Info)) 9409 return false; 9410 Val = APValue(std::move(FloatResult)); 9411 } else { 9412 return Error(E); 9413 } 9414 9415 // Splat and create vector APValue. 9416 SmallVector<APValue, 4> Elts(NElts, Val); 9417 return Success(Elts, E); 9418 } 9419 case CK_BitCast: { 9420 // Evaluate the operand into an APInt we can extract from. 9421 llvm::APInt SValInt; 9422 if (!EvalAndBitcastToAPInt(Info, SE, SValInt)) 9423 return false; 9424 // Extract the elements 9425 QualType EltTy = VTy->getElementType(); 9426 unsigned EltSize = Info.Ctx.getTypeSize(EltTy); 9427 bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian(); 9428 SmallVector<APValue, 4> Elts; 9429 if (EltTy->isRealFloatingType()) { 9430 const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(EltTy); 9431 unsigned FloatEltSize = EltSize; 9432 if (&Sem == &APFloat::x87DoubleExtended()) 9433 FloatEltSize = 80; 9434 for (unsigned i = 0; i < NElts; i++) { 9435 llvm::APInt Elt; 9436 if (BigEndian) 9437 Elt = SValInt.rotl(i*EltSize+FloatEltSize).trunc(FloatEltSize); 9438 else 9439 Elt = SValInt.rotr(i*EltSize).trunc(FloatEltSize); 9440 Elts.push_back(APValue(APFloat(Sem, Elt))); 9441 } 9442 } else if (EltTy->isIntegerType()) { 9443 for (unsigned i = 0; i < NElts; i++) { 9444 llvm::APInt Elt; 9445 if (BigEndian) 9446 Elt = SValInt.rotl(i*EltSize+EltSize).zextOrTrunc(EltSize); 9447 else 9448 Elt = SValInt.rotr(i*EltSize).zextOrTrunc(EltSize); 9449 Elts.push_back(APValue(APSInt(Elt, EltTy->isSignedIntegerType()))); 9450 } 9451 } else { 9452 return Error(E); 9453 } 9454 return Success(Elts, E); 9455 } 9456 default: 9457 return ExprEvaluatorBaseTy::VisitCastExpr(E); 9458 } 9459 } 9460 9461 bool 9462 VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) { 9463 const VectorType *VT = E->getType()->castAs<VectorType>(); 9464 unsigned NumInits = E->getNumInits(); 9465 unsigned NumElements = VT->getNumElements(); 9466 9467 QualType EltTy = VT->getElementType(); 9468 SmallVector<APValue, 4> Elements; 9469 9470 // The number of initializers can be less than the number of 9471 // vector elements. For OpenCL, this can be due to nested vector 9472 // initialization. For GCC compatibility, missing trailing elements 9473 // should be initialized with zeroes. 9474 unsigned CountInits = 0, CountElts = 0; 9475 while (CountElts < NumElements) { 9476 // Handle nested vector initialization. 9477 if (CountInits < NumInits 9478 && E->getInit(CountInits)->getType()->isVectorType()) { 9479 APValue v; 9480 if (!EvaluateVector(E->getInit(CountInits), v, Info)) 9481 return Error(E); 9482 unsigned vlen = v.getVectorLength(); 9483 for (unsigned j = 0; j < vlen; j++) 9484 Elements.push_back(v.getVectorElt(j)); 9485 CountElts += vlen; 9486 } else if (EltTy->isIntegerType()) { 9487 llvm::APSInt sInt(32); 9488 if (CountInits < NumInits) { 9489 if (!EvaluateInteger(E->getInit(CountInits), sInt, Info)) 9490 return false; 9491 } else // trailing integer zero. 9492 sInt = Info.Ctx.MakeIntValue(0, EltTy); 9493 Elements.push_back(APValue(sInt)); 9494 CountElts++; 9495 } else { 9496 llvm::APFloat f(0.0); 9497 if (CountInits < NumInits) { 9498 if (!EvaluateFloat(E->getInit(CountInits), f, Info)) 9499 return false; 9500 } else // trailing float zero. 9501 f = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy)); 9502 Elements.push_back(APValue(f)); 9503 CountElts++; 9504 } 9505 CountInits++; 9506 } 9507 return Success(Elements, E); 9508 } 9509 9510 bool 9511 VectorExprEvaluator::ZeroInitialization(const Expr *E) { 9512 const auto *VT = E->getType()->castAs<VectorType>(); 9513 QualType EltTy = VT->getElementType(); 9514 APValue ZeroElement; 9515 if (EltTy->isIntegerType()) 9516 ZeroElement = APValue(Info.Ctx.MakeIntValue(0, EltTy)); 9517 else 9518 ZeroElement = 9519 APValue(APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy))); 9520 9521 SmallVector<APValue, 4> Elements(VT->getNumElements(), ZeroElement); 9522 return Success(Elements, E); 9523 } 9524 9525 bool VectorExprEvaluator::VisitUnaryImag(const UnaryOperator *E) { 9526 VisitIgnoredValue(E->getSubExpr()); 9527 return ZeroInitialization(E); 9528 } 9529 9530 //===----------------------------------------------------------------------===// 9531 // Array Evaluation 9532 //===----------------------------------------------------------------------===// 9533 9534 namespace { 9535 class ArrayExprEvaluator 9536 : public ExprEvaluatorBase<ArrayExprEvaluator> { 9537 const LValue &This; 9538 APValue &Result; 9539 public: 9540 9541 ArrayExprEvaluator(EvalInfo &Info, const LValue &This, APValue &Result) 9542 : ExprEvaluatorBaseTy(Info), This(This), Result(Result) {} 9543 9544 bool Success(const APValue &V, const Expr *E) { 9545 assert(V.isArray() && "expected array"); 9546 Result = V; 9547 return true; 9548 } 9549 9550 bool ZeroInitialization(const Expr *E) { 9551 const ConstantArrayType *CAT = 9552 Info.Ctx.getAsConstantArrayType(E->getType()); 9553 if (!CAT) 9554 return Error(E); 9555 9556 Result = APValue(APValue::UninitArray(), 0, 9557 CAT->getSize().getZExtValue()); 9558 if (!Result.hasArrayFiller()) return true; 9559 9560 // Zero-initialize all elements. 9561 LValue Subobject = This; 9562 Subobject.addArray(Info, E, CAT); 9563 ImplicitValueInitExpr VIE(CAT->getElementType()); 9564 return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE); 9565 } 9566 9567 bool VisitCallExpr(const CallExpr *E) { 9568 return handleCallExpr(E, Result, &This); 9569 } 9570 bool VisitInitListExpr(const InitListExpr *E, 9571 QualType AllocType = QualType()); 9572 bool VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E); 9573 bool VisitCXXConstructExpr(const CXXConstructExpr *E); 9574 bool VisitCXXConstructExpr(const CXXConstructExpr *E, 9575 const LValue &Subobject, 9576 APValue *Value, QualType Type); 9577 bool VisitStringLiteral(const StringLiteral *E, 9578 QualType AllocType = QualType()) { 9579 expandStringLiteral(Info, E, Result, AllocType); 9580 return true; 9581 } 9582 }; 9583 } // end anonymous namespace 9584 9585 static bool EvaluateArray(const Expr *E, const LValue &This, 9586 APValue &Result, EvalInfo &Info) { 9587 assert(E->isRValue() && E->getType()->isArrayType() && "not an array rvalue"); 9588 return ArrayExprEvaluator(Info, This, Result).Visit(E); 9589 } 9590 9591 static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This, 9592 APValue &Result, const InitListExpr *ILE, 9593 QualType AllocType) { 9594 assert(ILE->isRValue() && ILE->getType()->isArrayType() && 9595 "not an array rvalue"); 9596 return ArrayExprEvaluator(Info, This, Result) 9597 .VisitInitListExpr(ILE, AllocType); 9598 } 9599 9600 // Return true iff the given array filler may depend on the element index. 9601 static bool MaybeElementDependentArrayFiller(const Expr *FillerExpr) { 9602 // For now, just whitelist non-class value-initialization and initialization 9603 // lists comprised of them. 9604 if (isa<ImplicitValueInitExpr>(FillerExpr)) 9605 return false; 9606 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(FillerExpr)) { 9607 for (unsigned I = 0, E = ILE->getNumInits(); I != E; ++I) { 9608 if (MaybeElementDependentArrayFiller(ILE->getInit(I))) 9609 return true; 9610 } 9611 return false; 9612 } 9613 return true; 9614 } 9615 9616 bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E, 9617 QualType AllocType) { 9618 const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType( 9619 AllocType.isNull() ? E->getType() : AllocType); 9620 if (!CAT) 9621 return Error(E); 9622 9623 // C++11 [dcl.init.string]p1: A char array [...] can be initialized by [...] 9624 // an appropriately-typed string literal enclosed in braces. 9625 if (E->isStringLiteralInit()) { 9626 auto *SL = dyn_cast<StringLiteral>(E->getInit(0)->IgnoreParens()); 9627 // FIXME: Support ObjCEncodeExpr here once we support it in 9628 // ArrayExprEvaluator generally. 9629 if (!SL) 9630 return Error(E); 9631 return VisitStringLiteral(SL, AllocType); 9632 } 9633 9634 bool Success = true; 9635 9636 assert((!Result.isArray() || Result.getArrayInitializedElts() == 0) && 9637 "zero-initialized array shouldn't have any initialized elts"); 9638 APValue Filler; 9639 if (Result.isArray() && Result.hasArrayFiller()) 9640 Filler = Result.getArrayFiller(); 9641 9642 unsigned NumEltsToInit = E->getNumInits(); 9643 unsigned NumElts = CAT->getSize().getZExtValue(); 9644 const Expr *FillerExpr = E->hasArrayFiller() ? E->getArrayFiller() : nullptr; 9645 9646 // If the initializer might depend on the array index, run it for each 9647 // array element. 9648 if (NumEltsToInit != NumElts && MaybeElementDependentArrayFiller(FillerExpr)) 9649 NumEltsToInit = NumElts; 9650 9651 LLVM_DEBUG(llvm::dbgs() << "The number of elements to initialize: " 9652 << NumEltsToInit << ".\n"); 9653 9654 Result = APValue(APValue::UninitArray(), NumEltsToInit, NumElts); 9655 9656 // If the array was previously zero-initialized, preserve the 9657 // zero-initialized values. 9658 if (Filler.hasValue()) { 9659 for (unsigned I = 0, E = Result.getArrayInitializedElts(); I != E; ++I) 9660 Result.getArrayInitializedElt(I) = Filler; 9661 if (Result.hasArrayFiller()) 9662 Result.getArrayFiller() = Filler; 9663 } 9664 9665 LValue Subobject = This; 9666 Subobject.addArray(Info, E, CAT); 9667 for (unsigned Index = 0; Index != NumEltsToInit; ++Index) { 9668 const Expr *Init = 9669 Index < E->getNumInits() ? E->getInit(Index) : FillerExpr; 9670 if (!EvaluateInPlace(Result.getArrayInitializedElt(Index), 9671 Info, Subobject, Init) || 9672 !HandleLValueArrayAdjustment(Info, Init, Subobject, 9673 CAT->getElementType(), 1)) { 9674 if (!Info.noteFailure()) 9675 return false; 9676 Success = false; 9677 } 9678 } 9679 9680 if (!Result.hasArrayFiller()) 9681 return Success; 9682 9683 // If we get here, we have a trivial filler, which we can just evaluate 9684 // once and splat over the rest of the array elements. 9685 assert(FillerExpr && "no array filler for incomplete init list"); 9686 return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, 9687 FillerExpr) && Success; 9688 } 9689 9690 bool ArrayExprEvaluator::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) { 9691 LValue CommonLV; 9692 if (E->getCommonExpr() && 9693 !Evaluate(Info.CurrentCall->createTemporary( 9694 E->getCommonExpr(), 9695 getStorageType(Info.Ctx, E->getCommonExpr()), false, 9696 CommonLV), 9697 Info, E->getCommonExpr()->getSourceExpr())) 9698 return false; 9699 9700 auto *CAT = cast<ConstantArrayType>(E->getType()->castAsArrayTypeUnsafe()); 9701 9702 uint64_t Elements = CAT->getSize().getZExtValue(); 9703 Result = APValue(APValue::UninitArray(), Elements, Elements); 9704 9705 LValue Subobject = This; 9706 Subobject.addArray(Info, E, CAT); 9707 9708 bool Success = true; 9709 for (EvalInfo::ArrayInitLoopIndex Index(Info); Index != Elements; ++Index) { 9710 if (!EvaluateInPlace(Result.getArrayInitializedElt(Index), 9711 Info, Subobject, E->getSubExpr()) || 9712 !HandleLValueArrayAdjustment(Info, E, Subobject, 9713 CAT->getElementType(), 1)) { 9714 if (!Info.noteFailure()) 9715 return false; 9716 Success = false; 9717 } 9718 } 9719 9720 return Success; 9721 } 9722 9723 bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) { 9724 return VisitCXXConstructExpr(E, This, &Result, E->getType()); 9725 } 9726 9727 bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E, 9728 const LValue &Subobject, 9729 APValue *Value, 9730 QualType Type) { 9731 bool HadZeroInit = Value->hasValue(); 9732 9733 if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(Type)) { 9734 unsigned N = CAT->getSize().getZExtValue(); 9735 9736 // Preserve the array filler if we had prior zero-initialization. 9737 APValue Filler = 9738 HadZeroInit && Value->hasArrayFiller() ? Value->getArrayFiller() 9739 : APValue(); 9740 9741 *Value = APValue(APValue::UninitArray(), N, N); 9742 9743 if (HadZeroInit) 9744 for (unsigned I = 0; I != N; ++I) 9745 Value->getArrayInitializedElt(I) = Filler; 9746 9747 // Initialize the elements. 9748 LValue ArrayElt = Subobject; 9749 ArrayElt.addArray(Info, E, CAT); 9750 for (unsigned I = 0; I != N; ++I) 9751 if (!VisitCXXConstructExpr(E, ArrayElt, &Value->getArrayInitializedElt(I), 9752 CAT->getElementType()) || 9753 !HandleLValueArrayAdjustment(Info, E, ArrayElt, 9754 CAT->getElementType(), 1)) 9755 return false; 9756 9757 return true; 9758 } 9759 9760 if (!Type->isRecordType()) 9761 return Error(E); 9762 9763 return RecordExprEvaluator(Info, Subobject, *Value) 9764 .VisitCXXConstructExpr(E, Type); 9765 } 9766 9767 //===----------------------------------------------------------------------===// 9768 // Integer Evaluation 9769 // 9770 // As a GNU extension, we support casting pointers to sufficiently-wide integer 9771 // types and back in constant folding. Integer values are thus represented 9772 // either as an integer-valued APValue, or as an lvalue-valued APValue. 9773 //===----------------------------------------------------------------------===// 9774 9775 namespace { 9776 class IntExprEvaluator 9777 : public ExprEvaluatorBase<IntExprEvaluator> { 9778 APValue &Result; 9779 public: 9780 IntExprEvaluator(EvalInfo &info, APValue &result) 9781 : ExprEvaluatorBaseTy(info), Result(result) {} 9782 9783 bool Success(const llvm::APSInt &SI, const Expr *E, APValue &Result) { 9784 assert(E->getType()->isIntegralOrEnumerationType() && 9785 "Invalid evaluation result."); 9786 assert(SI.isSigned() == E->getType()->isSignedIntegerOrEnumerationType() && 9787 "Invalid evaluation result."); 9788 assert(SI.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) && 9789 "Invalid evaluation result."); 9790 Result = APValue(SI); 9791 return true; 9792 } 9793 bool Success(const llvm::APSInt &SI, const Expr *E) { 9794 return Success(SI, E, Result); 9795 } 9796 9797 bool Success(const llvm::APInt &I, const Expr *E, APValue &Result) { 9798 assert(E->getType()->isIntegralOrEnumerationType() && 9799 "Invalid evaluation result."); 9800 assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) && 9801 "Invalid evaluation result."); 9802 Result = APValue(APSInt(I)); 9803 Result.getInt().setIsUnsigned( 9804 E->getType()->isUnsignedIntegerOrEnumerationType()); 9805 return true; 9806 } 9807 bool Success(const llvm::APInt &I, const Expr *E) { 9808 return Success(I, E, Result); 9809 } 9810 9811 bool Success(uint64_t Value, const Expr *E, APValue &Result) { 9812 assert(E->getType()->isIntegralOrEnumerationType() && 9813 "Invalid evaluation result."); 9814 Result = APValue(Info.Ctx.MakeIntValue(Value, E->getType())); 9815 return true; 9816 } 9817 bool Success(uint64_t Value, const Expr *E) { 9818 return Success(Value, E, Result); 9819 } 9820 9821 bool Success(CharUnits Size, const Expr *E) { 9822 return Success(Size.getQuantity(), E); 9823 } 9824 9825 bool Success(const APValue &V, const Expr *E) { 9826 if (V.isLValue() || V.isAddrLabelDiff() || V.isIndeterminate()) { 9827 Result = V; 9828 return true; 9829 } 9830 return Success(V.getInt(), E); 9831 } 9832 9833 bool ZeroInitialization(const Expr *E) { return Success(0, E); } 9834 9835 //===--------------------------------------------------------------------===// 9836 // Visitor Methods 9837 //===--------------------------------------------------------------------===// 9838 9839 bool VisitConstantExpr(const ConstantExpr *E); 9840 9841 bool VisitIntegerLiteral(const IntegerLiteral *E) { 9842 return Success(E->getValue(), E); 9843 } 9844 bool VisitCharacterLiteral(const CharacterLiteral *E) { 9845 return Success(E->getValue(), E); 9846 } 9847 9848 bool CheckReferencedDecl(const Expr *E, const Decl *D); 9849 bool VisitDeclRefExpr(const DeclRefExpr *E) { 9850 if (CheckReferencedDecl(E, E->getDecl())) 9851 return true; 9852 9853 return ExprEvaluatorBaseTy::VisitDeclRefExpr(E); 9854 } 9855 bool VisitMemberExpr(const MemberExpr *E) { 9856 if (CheckReferencedDecl(E, E->getMemberDecl())) { 9857 VisitIgnoredBaseExpression(E->getBase()); 9858 return true; 9859 } 9860 9861 return ExprEvaluatorBaseTy::VisitMemberExpr(E); 9862 } 9863 9864 bool VisitCallExpr(const CallExpr *E); 9865 bool VisitBuiltinCallExpr(const CallExpr *E, unsigned BuiltinOp); 9866 bool VisitBinaryOperator(const BinaryOperator *E); 9867 bool VisitOffsetOfExpr(const OffsetOfExpr *E); 9868 bool VisitUnaryOperator(const UnaryOperator *E); 9869 9870 bool VisitCastExpr(const CastExpr* E); 9871 bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E); 9872 9873 bool VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { 9874 return Success(E->getValue(), E); 9875 } 9876 9877 bool VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) { 9878 return Success(E->getValue(), E); 9879 } 9880 9881 bool VisitArrayInitIndexExpr(const ArrayInitIndexExpr *E) { 9882 if (Info.ArrayInitIndex == uint64_t(-1)) { 9883 // We were asked to evaluate this subexpression independent of the 9884 // enclosing ArrayInitLoopExpr. We can't do that. 9885 Info.FFDiag(E); 9886 return false; 9887 } 9888 return Success(Info.ArrayInitIndex, E); 9889 } 9890 9891 // Note, GNU defines __null as an integer, not a pointer. 9892 bool VisitGNUNullExpr(const GNUNullExpr *E) { 9893 return ZeroInitialization(E); 9894 } 9895 9896 bool VisitTypeTraitExpr(const TypeTraitExpr *E) { 9897 return Success(E->getValue(), E); 9898 } 9899 9900 bool VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) { 9901 return Success(E->getValue(), E); 9902 } 9903 9904 bool VisitExpressionTraitExpr(const ExpressionTraitExpr *E) { 9905 return Success(E->getValue(), E); 9906 } 9907 9908 bool VisitUnaryReal(const UnaryOperator *E); 9909 bool VisitUnaryImag(const UnaryOperator *E); 9910 9911 bool VisitCXXNoexceptExpr(const CXXNoexceptExpr *E); 9912 bool VisitSizeOfPackExpr(const SizeOfPackExpr *E); 9913 bool VisitSourceLocExpr(const SourceLocExpr *E); 9914 bool VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E); 9915 bool VisitRequiresExpr(const RequiresExpr *E); 9916 // FIXME: Missing: array subscript of vector, member of vector 9917 }; 9918 9919 class FixedPointExprEvaluator 9920 : public ExprEvaluatorBase<FixedPointExprEvaluator> { 9921 APValue &Result; 9922 9923 public: 9924 FixedPointExprEvaluator(EvalInfo &info, APValue &result) 9925 : ExprEvaluatorBaseTy(info), Result(result) {} 9926 9927 bool Success(const llvm::APInt &I, const Expr *E) { 9928 return Success( 9929 APFixedPoint(I, Info.Ctx.getFixedPointSemantics(E->getType())), E); 9930 } 9931 9932 bool Success(uint64_t Value, const Expr *E) { 9933 return Success( 9934 APFixedPoint(Value, Info.Ctx.getFixedPointSemantics(E->getType())), E); 9935 } 9936 9937 bool Success(const APValue &V, const Expr *E) { 9938 return Success(V.getFixedPoint(), E); 9939 } 9940 9941 bool Success(const APFixedPoint &V, const Expr *E) { 9942 assert(E->getType()->isFixedPointType() && "Invalid evaluation result."); 9943 assert(V.getWidth() == Info.Ctx.getIntWidth(E->getType()) && 9944 "Invalid evaluation result."); 9945 Result = APValue(V); 9946 return true; 9947 } 9948 9949 //===--------------------------------------------------------------------===// 9950 // Visitor Methods 9951 //===--------------------------------------------------------------------===// 9952 9953 bool VisitFixedPointLiteral(const FixedPointLiteral *E) { 9954 return Success(E->getValue(), E); 9955 } 9956 9957 bool VisitCastExpr(const CastExpr *E); 9958 bool VisitUnaryOperator(const UnaryOperator *E); 9959 bool VisitBinaryOperator(const BinaryOperator *E); 9960 }; 9961 } // end anonymous namespace 9962 9963 /// EvaluateIntegerOrLValue - Evaluate an rvalue integral-typed expression, and 9964 /// produce either the integer value or a pointer. 9965 /// 9966 /// GCC has a heinous extension which folds casts between pointer types and 9967 /// pointer-sized integral types. We support this by allowing the evaluation of 9968 /// an integer rvalue to produce a pointer (represented as an lvalue) instead. 9969 /// Some simple arithmetic on such values is supported (they are treated much 9970 /// like char*). 9971 static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result, 9972 EvalInfo &Info) { 9973 assert(E->isRValue() && E->getType()->isIntegralOrEnumerationType()); 9974 return IntExprEvaluator(Info, Result).Visit(E); 9975 } 9976 9977 static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info) { 9978 APValue Val; 9979 if (!EvaluateIntegerOrLValue(E, Val, Info)) 9980 return false; 9981 if (!Val.isInt()) { 9982 // FIXME: It would be better to produce the diagnostic for casting 9983 // a pointer to an integer. 9984 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr); 9985 return false; 9986 } 9987 Result = Val.getInt(); 9988 return true; 9989 } 9990 9991 bool IntExprEvaluator::VisitSourceLocExpr(const SourceLocExpr *E) { 9992 APValue Evaluated = E->EvaluateInContext( 9993 Info.Ctx, Info.CurrentCall->CurSourceLocExprScope.getDefaultExpr()); 9994 return Success(Evaluated, E); 9995 } 9996 9997 static bool EvaluateFixedPoint(const Expr *E, APFixedPoint &Result, 9998 EvalInfo &Info) { 9999 if (E->getType()->isFixedPointType()) { 10000 APValue Val; 10001 if (!FixedPointExprEvaluator(Info, Val).Visit(E)) 10002 return false; 10003 if (!Val.isFixedPoint()) 10004 return false; 10005 10006 Result = Val.getFixedPoint(); 10007 return true; 10008 } 10009 return false; 10010 } 10011 10012 static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result, 10013 EvalInfo &Info) { 10014 if (E->getType()->isIntegerType()) { 10015 auto FXSema = Info.Ctx.getFixedPointSemantics(E->getType()); 10016 APSInt Val; 10017 if (!EvaluateInteger(E, Val, Info)) 10018 return false; 10019 Result = APFixedPoint(Val, FXSema); 10020 return true; 10021 } else if (E->getType()->isFixedPointType()) { 10022 return EvaluateFixedPoint(E, Result, Info); 10023 } 10024 return false; 10025 } 10026 10027 /// Check whether the given declaration can be directly converted to an integral 10028 /// rvalue. If not, no diagnostic is produced; there are other things we can 10029 /// try. 10030 bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) { 10031 // Enums are integer constant exprs. 10032 if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D)) { 10033 // Check for signedness/width mismatches between E type and ECD value. 10034 bool SameSign = (ECD->getInitVal().isSigned() 10035 == E->getType()->isSignedIntegerOrEnumerationType()); 10036 bool SameWidth = (ECD->getInitVal().getBitWidth() 10037 == Info.Ctx.getIntWidth(E->getType())); 10038 if (SameSign && SameWidth) 10039 return Success(ECD->getInitVal(), E); 10040 else { 10041 // Get rid of mismatch (otherwise Success assertions will fail) 10042 // by computing a new value matching the type of E. 10043 llvm::APSInt Val = ECD->getInitVal(); 10044 if (!SameSign) 10045 Val.setIsSigned(!ECD->getInitVal().isSigned()); 10046 if (!SameWidth) 10047 Val = Val.extOrTrunc(Info.Ctx.getIntWidth(E->getType())); 10048 return Success(Val, E); 10049 } 10050 } 10051 return false; 10052 } 10053 10054 /// Values returned by __builtin_classify_type, chosen to match the values 10055 /// produced by GCC's builtin. 10056 enum class GCCTypeClass { 10057 None = -1, 10058 Void = 0, 10059 Integer = 1, 10060 // GCC reserves 2 for character types, but instead classifies them as 10061 // integers. 10062 Enum = 3, 10063 Bool = 4, 10064 Pointer = 5, 10065 // GCC reserves 6 for references, but appears to never use it (because 10066 // expressions never have reference type, presumably). 10067 PointerToDataMember = 7, 10068 RealFloat = 8, 10069 Complex = 9, 10070 // GCC reserves 10 for functions, but does not use it since GCC version 6 due 10071 // to decay to pointer. (Prior to version 6 it was only used in C++ mode). 10072 // GCC claims to reserve 11 for pointers to member functions, but *actually* 10073 // uses 12 for that purpose, same as for a class or struct. Maybe it 10074 // internally implements a pointer to member as a struct? Who knows. 10075 PointerToMemberFunction = 12, // Not a bug, see above. 10076 ClassOrStruct = 12, 10077 Union = 13, 10078 // GCC reserves 14 for arrays, but does not use it since GCC version 6 due to 10079 // decay to pointer. (Prior to version 6 it was only used in C++ mode). 10080 // GCC reserves 15 for strings, but actually uses 5 (pointer) for string 10081 // literals. 10082 }; 10083 10084 /// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way 10085 /// as GCC. 10086 static GCCTypeClass 10087 EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) { 10088 assert(!T->isDependentType() && "unexpected dependent type"); 10089 10090 QualType CanTy = T.getCanonicalType(); 10091 const BuiltinType *BT = dyn_cast<BuiltinType>(CanTy); 10092 10093 switch (CanTy->getTypeClass()) { 10094 #define TYPE(ID, BASE) 10095 #define DEPENDENT_TYPE(ID, BASE) case Type::ID: 10096 #define NON_CANONICAL_TYPE(ID, BASE) case Type::ID: 10097 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(ID, BASE) case Type::ID: 10098 #include "clang/AST/TypeNodes.inc" 10099 case Type::Auto: 10100 case Type::DeducedTemplateSpecialization: 10101 llvm_unreachable("unexpected non-canonical or dependent type"); 10102 10103 case Type::Builtin: 10104 switch (BT->getKind()) { 10105 #define BUILTIN_TYPE(ID, SINGLETON_ID) 10106 #define SIGNED_TYPE(ID, SINGLETON_ID) \ 10107 case BuiltinType::ID: return GCCTypeClass::Integer; 10108 #define FLOATING_TYPE(ID, SINGLETON_ID) \ 10109 case BuiltinType::ID: return GCCTypeClass::RealFloat; 10110 #define PLACEHOLDER_TYPE(ID, SINGLETON_ID) \ 10111 case BuiltinType::ID: break; 10112 #include "clang/AST/BuiltinTypes.def" 10113 case BuiltinType::Void: 10114 return GCCTypeClass::Void; 10115 10116 case BuiltinType::Bool: 10117 return GCCTypeClass::Bool; 10118 10119 case BuiltinType::Char_U: 10120 case BuiltinType::UChar: 10121 case BuiltinType::WChar_U: 10122 case BuiltinType::Char8: 10123 case BuiltinType::Char16: 10124 case BuiltinType::Char32: 10125 case BuiltinType::UShort: 10126 case BuiltinType::UInt: 10127 case BuiltinType::ULong: 10128 case BuiltinType::ULongLong: 10129 case BuiltinType::UInt128: 10130 return GCCTypeClass::Integer; 10131 10132 case BuiltinType::UShortAccum: 10133 case BuiltinType::UAccum: 10134 case BuiltinType::ULongAccum: 10135 case BuiltinType::UShortFract: 10136 case BuiltinType::UFract: 10137 case BuiltinType::ULongFract: 10138 case BuiltinType::SatUShortAccum: 10139 case BuiltinType::SatUAccum: 10140 case BuiltinType::SatULongAccum: 10141 case BuiltinType::SatUShortFract: 10142 case BuiltinType::SatUFract: 10143 case BuiltinType::SatULongFract: 10144 return GCCTypeClass::None; 10145 10146 case BuiltinType::NullPtr: 10147 10148 case BuiltinType::ObjCId: 10149 case BuiltinType::ObjCClass: 10150 case BuiltinType::ObjCSel: 10151 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 10152 case BuiltinType::Id: 10153 #include "clang/Basic/OpenCLImageTypes.def" 10154 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 10155 case BuiltinType::Id: 10156 #include "clang/Basic/OpenCLExtensionTypes.def" 10157 case BuiltinType::OCLSampler: 10158 case BuiltinType::OCLEvent: 10159 case BuiltinType::OCLClkEvent: 10160 case BuiltinType::OCLQueue: 10161 case BuiltinType::OCLReserveID: 10162 #define SVE_TYPE(Name, Id, SingletonId) \ 10163 case BuiltinType::Id: 10164 #include "clang/Basic/AArch64SVEACLETypes.def" 10165 return GCCTypeClass::None; 10166 10167 case BuiltinType::Dependent: 10168 llvm_unreachable("unexpected dependent type"); 10169 }; 10170 llvm_unreachable("unexpected placeholder type"); 10171 10172 case Type::Enum: 10173 return LangOpts.CPlusPlus ? GCCTypeClass::Enum : GCCTypeClass::Integer; 10174 10175 case Type::Pointer: 10176 case Type::ConstantArray: 10177 case Type::VariableArray: 10178 case Type::IncompleteArray: 10179 case Type::FunctionNoProto: 10180 case Type::FunctionProto: 10181 return GCCTypeClass::Pointer; 10182 10183 case Type::MemberPointer: 10184 return CanTy->isMemberDataPointerType() 10185 ? GCCTypeClass::PointerToDataMember 10186 : GCCTypeClass::PointerToMemberFunction; 10187 10188 case Type::Complex: 10189 return GCCTypeClass::Complex; 10190 10191 case Type::Record: 10192 return CanTy->isUnionType() ? GCCTypeClass::Union 10193 : GCCTypeClass::ClassOrStruct; 10194 10195 case Type::Atomic: 10196 // GCC classifies _Atomic T the same as T. 10197 return EvaluateBuiltinClassifyType( 10198 CanTy->castAs<AtomicType>()->getValueType(), LangOpts); 10199 10200 case Type::BlockPointer: 10201 case Type::Vector: 10202 case Type::ExtVector: 10203 case Type::ObjCObject: 10204 case Type::ObjCInterface: 10205 case Type::ObjCObjectPointer: 10206 case Type::Pipe: 10207 // GCC classifies vectors as None. We follow its lead and classify all 10208 // other types that don't fit into the regular classification the same way. 10209 return GCCTypeClass::None; 10210 10211 case Type::LValueReference: 10212 case Type::RValueReference: 10213 llvm_unreachable("invalid type for expression"); 10214 } 10215 10216 llvm_unreachable("unexpected type class"); 10217 } 10218 10219 /// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way 10220 /// as GCC. 10221 static GCCTypeClass 10222 EvaluateBuiltinClassifyType(const CallExpr *E, const LangOptions &LangOpts) { 10223 // If no argument was supplied, default to None. This isn't 10224 // ideal, however it is what gcc does. 10225 if (E->getNumArgs() == 0) 10226 return GCCTypeClass::None; 10227 10228 // FIXME: Bizarrely, GCC treats a call with more than one argument as not 10229 // being an ICE, but still folds it to a constant using the type of the first 10230 // argument. 10231 return EvaluateBuiltinClassifyType(E->getArg(0)->getType(), LangOpts); 10232 } 10233 10234 /// EvaluateBuiltinConstantPForLValue - Determine the result of 10235 /// __builtin_constant_p when applied to the given pointer. 10236 /// 10237 /// A pointer is only "constant" if it is null (or a pointer cast to integer) 10238 /// or it points to the first character of a string literal. 10239 static bool EvaluateBuiltinConstantPForLValue(const APValue &LV) { 10240 APValue::LValueBase Base = LV.getLValueBase(); 10241 if (Base.isNull()) { 10242 // A null base is acceptable. 10243 return true; 10244 } else if (const Expr *E = Base.dyn_cast<const Expr *>()) { 10245 if (!isa<StringLiteral>(E)) 10246 return false; 10247 return LV.getLValueOffset().isZero(); 10248 } else if (Base.is<TypeInfoLValue>()) { 10249 // Surprisingly, GCC considers __builtin_constant_p(&typeid(int)) to 10250 // evaluate to true. 10251 return true; 10252 } else { 10253 // Any other base is not constant enough for GCC. 10254 return false; 10255 } 10256 } 10257 10258 /// EvaluateBuiltinConstantP - Evaluate __builtin_constant_p as similarly to 10259 /// GCC as we can manage. 10260 static bool EvaluateBuiltinConstantP(EvalInfo &Info, const Expr *Arg) { 10261 // This evaluation is not permitted to have side-effects, so evaluate it in 10262 // a speculative evaluation context. 10263 SpeculativeEvaluationRAII SpeculativeEval(Info); 10264 10265 // Constant-folding is always enabled for the operand of __builtin_constant_p 10266 // (even when the enclosing evaluation context otherwise requires a strict 10267 // language-specific constant expression). 10268 FoldConstant Fold(Info, true); 10269 10270 QualType ArgType = Arg->getType(); 10271 10272 // __builtin_constant_p always has one operand. The rules which gcc follows 10273 // are not precisely documented, but are as follows: 10274 // 10275 // - If the operand is of integral, floating, complex or enumeration type, 10276 // and can be folded to a known value of that type, it returns 1. 10277 // - If the operand can be folded to a pointer to the first character 10278 // of a string literal (or such a pointer cast to an integral type) 10279 // or to a null pointer or an integer cast to a pointer, it returns 1. 10280 // 10281 // Otherwise, it returns 0. 10282 // 10283 // FIXME: GCC also intends to return 1 for literals of aggregate types, but 10284 // its support for this did not work prior to GCC 9 and is not yet well 10285 // understood. 10286 if (ArgType->isIntegralOrEnumerationType() || ArgType->isFloatingType() || 10287 ArgType->isAnyComplexType() || ArgType->isPointerType() || 10288 ArgType->isNullPtrType()) { 10289 APValue V; 10290 if (!::EvaluateAsRValue(Info, Arg, V)) { 10291 Fold.keepDiagnostics(); 10292 return false; 10293 } 10294 10295 // For a pointer (possibly cast to integer), there are special rules. 10296 if (V.getKind() == APValue::LValue) 10297 return EvaluateBuiltinConstantPForLValue(V); 10298 10299 // Otherwise, any constant value is good enough. 10300 return V.hasValue(); 10301 } 10302 10303 // Anything else isn't considered to be sufficiently constant. 10304 return false; 10305 } 10306 10307 /// Retrieves the "underlying object type" of the given expression, 10308 /// as used by __builtin_object_size. 10309 static QualType getObjectType(APValue::LValueBase B) { 10310 if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) { 10311 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) 10312 return VD->getType(); 10313 } else if (const Expr *E = B.dyn_cast<const Expr*>()) { 10314 if (isa<CompoundLiteralExpr>(E)) 10315 return E->getType(); 10316 } else if (B.is<TypeInfoLValue>()) { 10317 return B.getTypeInfoType(); 10318 } else if (B.is<DynamicAllocLValue>()) { 10319 return B.getDynamicAllocType(); 10320 } 10321 10322 return QualType(); 10323 } 10324 10325 /// A more selective version of E->IgnoreParenCasts for 10326 /// tryEvaluateBuiltinObjectSize. This ignores some casts/parens that serve only 10327 /// to change the type of E. 10328 /// Ex. For E = `(short*)((char*)(&foo))`, returns `&foo` 10329 /// 10330 /// Always returns an RValue with a pointer representation. 10331 static const Expr *ignorePointerCastsAndParens(const Expr *E) { 10332 assert(E->isRValue() && E->getType()->hasPointerRepresentation()); 10333 10334 auto *NoParens = E->IgnoreParens(); 10335 auto *Cast = dyn_cast<CastExpr>(NoParens); 10336 if (Cast == nullptr) 10337 return NoParens; 10338 10339 // We only conservatively allow a few kinds of casts, because this code is 10340 // inherently a simple solution that seeks to support the common case. 10341 auto CastKind = Cast->getCastKind(); 10342 if (CastKind != CK_NoOp && CastKind != CK_BitCast && 10343 CastKind != CK_AddressSpaceConversion) 10344 return NoParens; 10345 10346 auto *SubExpr = Cast->getSubExpr(); 10347 if (!SubExpr->getType()->hasPointerRepresentation() || !SubExpr->isRValue()) 10348 return NoParens; 10349 return ignorePointerCastsAndParens(SubExpr); 10350 } 10351 10352 /// Checks to see if the given LValue's Designator is at the end of the LValue's 10353 /// record layout. e.g. 10354 /// struct { struct { int a, b; } fst, snd; } obj; 10355 /// obj.fst // no 10356 /// obj.snd // yes 10357 /// obj.fst.a // no 10358 /// obj.fst.b // no 10359 /// obj.snd.a // no 10360 /// obj.snd.b // yes 10361 /// 10362 /// Please note: this function is specialized for how __builtin_object_size 10363 /// views "objects". 10364 /// 10365 /// If this encounters an invalid RecordDecl or otherwise cannot determine the 10366 /// correct result, it will always return true. 10367 static bool isDesignatorAtObjectEnd(const ASTContext &Ctx, const LValue &LVal) { 10368 assert(!LVal.Designator.Invalid); 10369 10370 auto IsLastOrInvalidFieldDecl = [&Ctx](const FieldDecl *FD, bool &Invalid) { 10371 const RecordDecl *Parent = FD->getParent(); 10372 Invalid = Parent->isInvalidDecl(); 10373 if (Invalid || Parent->isUnion()) 10374 return true; 10375 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(Parent); 10376 return FD->getFieldIndex() + 1 == Layout.getFieldCount(); 10377 }; 10378 10379 auto &Base = LVal.getLValueBase(); 10380 if (auto *ME = dyn_cast_or_null<MemberExpr>(Base.dyn_cast<const Expr *>())) { 10381 if (auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) { 10382 bool Invalid; 10383 if (!IsLastOrInvalidFieldDecl(FD, Invalid)) 10384 return Invalid; 10385 } else if (auto *IFD = dyn_cast<IndirectFieldDecl>(ME->getMemberDecl())) { 10386 for (auto *FD : IFD->chain()) { 10387 bool Invalid; 10388 if (!IsLastOrInvalidFieldDecl(cast<FieldDecl>(FD), Invalid)) 10389 return Invalid; 10390 } 10391 } 10392 } 10393 10394 unsigned I = 0; 10395 QualType BaseType = getType(Base); 10396 if (LVal.Designator.FirstEntryIsAnUnsizedArray) { 10397 // If we don't know the array bound, conservatively assume we're looking at 10398 // the final array element. 10399 ++I; 10400 if (BaseType->isIncompleteArrayType()) 10401 BaseType = Ctx.getAsArrayType(BaseType)->getElementType(); 10402 else 10403 BaseType = BaseType->castAs<PointerType>()->getPointeeType(); 10404 } 10405 10406 for (unsigned E = LVal.Designator.Entries.size(); I != E; ++I) { 10407 const auto &Entry = LVal.Designator.Entries[I]; 10408 if (BaseType->isArrayType()) { 10409 // Because __builtin_object_size treats arrays as objects, we can ignore 10410 // the index iff this is the last array in the Designator. 10411 if (I + 1 == E) 10412 return true; 10413 const auto *CAT = cast<ConstantArrayType>(Ctx.getAsArrayType(BaseType)); 10414 uint64_t Index = Entry.getAsArrayIndex(); 10415 if (Index + 1 != CAT->getSize()) 10416 return false; 10417 BaseType = CAT->getElementType(); 10418 } else if (BaseType->isAnyComplexType()) { 10419 const auto *CT = BaseType->castAs<ComplexType>(); 10420 uint64_t Index = Entry.getAsArrayIndex(); 10421 if (Index != 1) 10422 return false; 10423 BaseType = CT->getElementType(); 10424 } else if (auto *FD = getAsField(Entry)) { 10425 bool Invalid; 10426 if (!IsLastOrInvalidFieldDecl(FD, Invalid)) 10427 return Invalid; 10428 BaseType = FD->getType(); 10429 } else { 10430 assert(getAsBaseClass(Entry) && "Expecting cast to a base class"); 10431 return false; 10432 } 10433 } 10434 return true; 10435 } 10436 10437 /// Tests to see if the LValue has a user-specified designator (that isn't 10438 /// necessarily valid). Note that this always returns 'true' if the LValue has 10439 /// an unsized array as its first designator entry, because there's currently no 10440 /// way to tell if the user typed *foo or foo[0]. 10441 static bool refersToCompleteObject(const LValue &LVal) { 10442 if (LVal.Designator.Invalid) 10443 return false; 10444 10445 if (!LVal.Designator.Entries.empty()) 10446 return LVal.Designator.isMostDerivedAnUnsizedArray(); 10447 10448 if (!LVal.InvalidBase) 10449 return true; 10450 10451 // If `E` is a MemberExpr, then the first part of the designator is hiding in 10452 // the LValueBase. 10453 const auto *E = LVal.Base.dyn_cast<const Expr *>(); 10454 return !E || !isa<MemberExpr>(E); 10455 } 10456 10457 /// Attempts to detect a user writing into a piece of memory that's impossible 10458 /// to figure out the size of by just using types. 10459 static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal) { 10460 const SubobjectDesignator &Designator = LVal.Designator; 10461 // Notes: 10462 // - Users can only write off of the end when we have an invalid base. Invalid 10463 // bases imply we don't know where the memory came from. 10464 // - We used to be a bit more aggressive here; we'd only be conservative if 10465 // the array at the end was flexible, or if it had 0 or 1 elements. This 10466 // broke some common standard library extensions (PR30346), but was 10467 // otherwise seemingly fine. It may be useful to reintroduce this behavior 10468 // with some sort of whitelist. OTOH, it seems that GCC is always 10469 // conservative with the last element in structs (if it's an array), so our 10470 // current behavior is more compatible than a whitelisting approach would 10471 // be. 10472 return LVal.InvalidBase && 10473 Designator.Entries.size() == Designator.MostDerivedPathLength && 10474 Designator.MostDerivedIsArrayElement && 10475 isDesignatorAtObjectEnd(Ctx, LVal); 10476 } 10477 10478 /// Converts the given APInt to CharUnits, assuming the APInt is unsigned. 10479 /// Fails if the conversion would cause loss of precision. 10480 static bool convertUnsignedAPIntToCharUnits(const llvm::APInt &Int, 10481 CharUnits &Result) { 10482 auto CharUnitsMax = std::numeric_limits<CharUnits::QuantityType>::max(); 10483 if (Int.ugt(CharUnitsMax)) 10484 return false; 10485 Result = CharUnits::fromQuantity(Int.getZExtValue()); 10486 return true; 10487 } 10488 10489 /// Helper for tryEvaluateBuiltinObjectSize -- Given an LValue, this will 10490 /// determine how many bytes exist from the beginning of the object to either 10491 /// the end of the current subobject, or the end of the object itself, depending 10492 /// on what the LValue looks like + the value of Type. 10493 /// 10494 /// If this returns false, the value of Result is undefined. 10495 static bool determineEndOffset(EvalInfo &Info, SourceLocation ExprLoc, 10496 unsigned Type, const LValue &LVal, 10497 CharUnits &EndOffset) { 10498 bool DetermineForCompleteObject = refersToCompleteObject(LVal); 10499 10500 auto CheckedHandleSizeof = [&](QualType Ty, CharUnits &Result) { 10501 if (Ty.isNull() || Ty->isIncompleteType() || Ty->isFunctionType()) 10502 return false; 10503 return HandleSizeof(Info, ExprLoc, Ty, Result); 10504 }; 10505 10506 // We want to evaluate the size of the entire object. This is a valid fallback 10507 // for when Type=1 and the designator is invalid, because we're asked for an 10508 // upper-bound. 10509 if (!(Type & 1) || LVal.Designator.Invalid || DetermineForCompleteObject) { 10510 // Type=3 wants a lower bound, so we can't fall back to this. 10511 if (Type == 3 && !DetermineForCompleteObject) 10512 return false; 10513 10514 llvm::APInt APEndOffset; 10515 if (isBaseAnAllocSizeCall(LVal.getLValueBase()) && 10516 getBytesReturnedByAllocSizeCall(Info.Ctx, LVal, APEndOffset)) 10517 return convertUnsignedAPIntToCharUnits(APEndOffset, EndOffset); 10518 10519 if (LVal.InvalidBase) 10520 return false; 10521 10522 QualType BaseTy = getObjectType(LVal.getLValueBase()); 10523 return CheckedHandleSizeof(BaseTy, EndOffset); 10524 } 10525 10526 // We want to evaluate the size of a subobject. 10527 const SubobjectDesignator &Designator = LVal.Designator; 10528 10529 // The following is a moderately common idiom in C: 10530 // 10531 // struct Foo { int a; char c[1]; }; 10532 // struct Foo *F = (struct Foo *)malloc(sizeof(struct Foo) + strlen(Bar)); 10533 // strcpy(&F->c[0], Bar); 10534 // 10535 // In order to not break too much legacy code, we need to support it. 10536 if (isUserWritingOffTheEnd(Info.Ctx, LVal)) { 10537 // If we can resolve this to an alloc_size call, we can hand that back, 10538 // because we know for certain how many bytes there are to write to. 10539 llvm::APInt APEndOffset; 10540 if (isBaseAnAllocSizeCall(LVal.getLValueBase()) && 10541 getBytesReturnedByAllocSizeCall(Info.Ctx, LVal, APEndOffset)) 10542 return convertUnsignedAPIntToCharUnits(APEndOffset, EndOffset); 10543 10544 // If we cannot determine the size of the initial allocation, then we can't 10545 // given an accurate upper-bound. However, we are still able to give 10546 // conservative lower-bounds for Type=3. 10547 if (Type == 1) 10548 return false; 10549 } 10550 10551 CharUnits BytesPerElem; 10552 if (!CheckedHandleSizeof(Designator.MostDerivedType, BytesPerElem)) 10553 return false; 10554 10555 // According to the GCC documentation, we want the size of the subobject 10556 // denoted by the pointer. But that's not quite right -- what we actually 10557 // want is the size of the immediately-enclosing array, if there is one. 10558 int64_t ElemsRemaining; 10559 if (Designator.MostDerivedIsArrayElement && 10560 Designator.Entries.size() == Designator.MostDerivedPathLength) { 10561 uint64_t ArraySize = Designator.getMostDerivedArraySize(); 10562 uint64_t ArrayIndex = Designator.Entries.back().getAsArrayIndex(); 10563 ElemsRemaining = ArraySize <= ArrayIndex ? 0 : ArraySize - ArrayIndex; 10564 } else { 10565 ElemsRemaining = Designator.isOnePastTheEnd() ? 0 : 1; 10566 } 10567 10568 EndOffset = LVal.getLValueOffset() + BytesPerElem * ElemsRemaining; 10569 return true; 10570 } 10571 10572 /// Tries to evaluate the __builtin_object_size for @p E. If successful, 10573 /// returns true and stores the result in @p Size. 10574 /// 10575 /// If @p WasError is non-null, this will report whether the failure to evaluate 10576 /// is to be treated as an Error in IntExprEvaluator. 10577 static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type, 10578 EvalInfo &Info, uint64_t &Size) { 10579 // Determine the denoted object. 10580 LValue LVal; 10581 { 10582 // The operand of __builtin_object_size is never evaluated for side-effects. 10583 // If there are any, but we can determine the pointed-to object anyway, then 10584 // ignore the side-effects. 10585 SpeculativeEvaluationRAII SpeculativeEval(Info); 10586 IgnoreSideEffectsRAII Fold(Info); 10587 10588 if (E->isGLValue()) { 10589 // It's possible for us to be given GLValues if we're called via 10590 // Expr::tryEvaluateObjectSize. 10591 APValue RVal; 10592 if (!EvaluateAsRValue(Info, E, RVal)) 10593 return false; 10594 LVal.setFrom(Info.Ctx, RVal); 10595 } else if (!EvaluatePointer(ignorePointerCastsAndParens(E), LVal, Info, 10596 /*InvalidBaseOK=*/true)) 10597 return false; 10598 } 10599 10600 // If we point to before the start of the object, there are no accessible 10601 // bytes. 10602 if (LVal.getLValueOffset().isNegative()) { 10603 Size = 0; 10604 return true; 10605 } 10606 10607 CharUnits EndOffset; 10608 if (!determineEndOffset(Info, E->getExprLoc(), Type, LVal, EndOffset)) 10609 return false; 10610 10611 // If we've fallen outside of the end offset, just pretend there's nothing to 10612 // write to/read from. 10613 if (EndOffset <= LVal.getLValueOffset()) 10614 Size = 0; 10615 else 10616 Size = (EndOffset - LVal.getLValueOffset()).getQuantity(); 10617 return true; 10618 } 10619 10620 bool IntExprEvaluator::VisitConstantExpr(const ConstantExpr *E) { 10621 llvm::SaveAndRestore<bool> InConstantContext(Info.InConstantContext, true); 10622 if (E->getResultAPValueKind() != APValue::None) 10623 return Success(E->getAPValueResult(), E); 10624 return ExprEvaluatorBaseTy::VisitConstantExpr(E); 10625 } 10626 10627 bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) { 10628 if (unsigned BuiltinOp = E->getBuiltinCallee()) 10629 return VisitBuiltinCallExpr(E, BuiltinOp); 10630 10631 return ExprEvaluatorBaseTy::VisitCallExpr(E); 10632 } 10633 10634 static bool getBuiltinAlignArguments(const CallExpr *E, EvalInfo &Info, 10635 APValue &Val, APSInt &Alignment) { 10636 QualType SrcTy = E->getArg(0)->getType(); 10637 if (!getAlignmentArgument(E->getArg(1), SrcTy, Info, Alignment)) 10638 return false; 10639 // Even though we are evaluating integer expressions we could get a pointer 10640 // argument for the __builtin_is_aligned() case. 10641 if (SrcTy->isPointerType()) { 10642 LValue Ptr; 10643 if (!EvaluatePointer(E->getArg(0), Ptr, Info)) 10644 return false; 10645 Ptr.moveInto(Val); 10646 } else if (!SrcTy->isIntegralOrEnumerationType()) { 10647 Info.FFDiag(E->getArg(0)); 10648 return false; 10649 } else { 10650 APSInt SrcInt; 10651 if (!EvaluateInteger(E->getArg(0), SrcInt, Info)) 10652 return false; 10653 assert(SrcInt.getBitWidth() >= Alignment.getBitWidth() && 10654 "Bit widths must be the same"); 10655 Val = APValue(SrcInt); 10656 } 10657 assert(Val.hasValue()); 10658 return true; 10659 } 10660 10661 bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, 10662 unsigned BuiltinOp) { 10663 switch (unsigned BuiltinOp = E->getBuiltinCallee()) { 10664 default: 10665 return ExprEvaluatorBaseTy::VisitCallExpr(E); 10666 10667 case Builtin::BI__builtin_dynamic_object_size: 10668 case Builtin::BI__builtin_object_size: { 10669 // The type was checked when we built the expression. 10670 unsigned Type = 10671 E->getArg(1)->EvaluateKnownConstInt(Info.Ctx).getZExtValue(); 10672 assert(Type <= 3 && "unexpected type"); 10673 10674 uint64_t Size; 10675 if (tryEvaluateBuiltinObjectSize(E->getArg(0), Type, Info, Size)) 10676 return Success(Size, E); 10677 10678 if (E->getArg(0)->HasSideEffects(Info.Ctx)) 10679 return Success((Type & 2) ? 0 : -1, E); 10680 10681 // Expression had no side effects, but we couldn't statically determine the 10682 // size of the referenced object. 10683 switch (Info.EvalMode) { 10684 case EvalInfo::EM_ConstantExpression: 10685 case EvalInfo::EM_ConstantFold: 10686 case EvalInfo::EM_IgnoreSideEffects: 10687 // Leave it to IR generation. 10688 return Error(E); 10689 case EvalInfo::EM_ConstantExpressionUnevaluated: 10690 // Reduce it to a constant now. 10691 return Success((Type & 2) ? 0 : -1, E); 10692 } 10693 10694 llvm_unreachable("unexpected EvalMode"); 10695 } 10696 10697 case Builtin::BI__builtin_os_log_format_buffer_size: { 10698 analyze_os_log::OSLogBufferLayout Layout; 10699 analyze_os_log::computeOSLogBufferLayout(Info.Ctx, E, Layout); 10700 return Success(Layout.size().getQuantity(), E); 10701 } 10702 10703 case Builtin::BI__builtin_is_aligned: { 10704 APValue Src; 10705 APSInt Alignment; 10706 if (!getBuiltinAlignArguments(E, Info, Src, Alignment)) 10707 return false; 10708 if (Src.isLValue()) { 10709 // If we evaluated a pointer, check the minimum known alignment. 10710 LValue Ptr; 10711 Ptr.setFrom(Info.Ctx, Src); 10712 CharUnits BaseAlignment = getBaseAlignment(Info, Ptr); 10713 CharUnits PtrAlign = BaseAlignment.alignmentAtOffset(Ptr.Offset); 10714 // We can return true if the known alignment at the computed offset is 10715 // greater than the requested alignment. 10716 assert(PtrAlign.isPowerOfTwo()); 10717 assert(Alignment.isPowerOf2()); 10718 if (PtrAlign.getQuantity() >= Alignment) 10719 return Success(1, E); 10720 // If the alignment is not known to be sufficient, some cases could still 10721 // be aligned at run time. However, if the requested alignment is less or 10722 // equal to the base alignment and the offset is not aligned, we know that 10723 // the run-time value can never be aligned. 10724 if (BaseAlignment.getQuantity() >= Alignment && 10725 PtrAlign.getQuantity() < Alignment) 10726 return Success(0, E); 10727 // Otherwise we can't infer whether the value is sufficiently aligned. 10728 // TODO: __builtin_is_aligned(__builtin_align_{down,up{(expr, N), N) 10729 // in cases where we can't fully evaluate the pointer. 10730 Info.FFDiag(E->getArg(0), diag::note_constexpr_alignment_compute) 10731 << Alignment; 10732 return false; 10733 } 10734 assert(Src.isInt()); 10735 return Success((Src.getInt() & (Alignment - 1)) == 0 ? 1 : 0, E); 10736 } 10737 case Builtin::BI__builtin_align_up: { 10738 APValue Src; 10739 APSInt Alignment; 10740 if (!getBuiltinAlignArguments(E, Info, Src, Alignment)) 10741 return false; 10742 if (!Src.isInt()) 10743 return Error(E); 10744 APSInt AlignedVal = 10745 APSInt((Src.getInt() + (Alignment - 1)) & ~(Alignment - 1), 10746 Src.getInt().isUnsigned()); 10747 assert(AlignedVal.getBitWidth() == Src.getInt().getBitWidth()); 10748 return Success(AlignedVal, E); 10749 } 10750 case Builtin::BI__builtin_align_down: { 10751 APValue Src; 10752 APSInt Alignment; 10753 if (!getBuiltinAlignArguments(E, Info, Src, Alignment)) 10754 return false; 10755 if (!Src.isInt()) 10756 return Error(E); 10757 APSInt AlignedVal = 10758 APSInt(Src.getInt() & ~(Alignment - 1), Src.getInt().isUnsigned()); 10759 assert(AlignedVal.getBitWidth() == Src.getInt().getBitWidth()); 10760 return Success(AlignedVal, E); 10761 } 10762 10763 case Builtin::BI__builtin_bswap16: 10764 case Builtin::BI__builtin_bswap32: 10765 case Builtin::BI__builtin_bswap64: { 10766 APSInt Val; 10767 if (!EvaluateInteger(E->getArg(0), Val, Info)) 10768 return false; 10769 10770 return Success(Val.byteSwap(), E); 10771 } 10772 10773 case Builtin::BI__builtin_classify_type: 10774 return Success((int)EvaluateBuiltinClassifyType(E, Info.getLangOpts()), E); 10775 10776 case Builtin::BI__builtin_clrsb: 10777 case Builtin::BI__builtin_clrsbl: 10778 case Builtin::BI__builtin_clrsbll: { 10779 APSInt Val; 10780 if (!EvaluateInteger(E->getArg(0), Val, Info)) 10781 return false; 10782 10783 return Success(Val.getBitWidth() - Val.getMinSignedBits(), E); 10784 } 10785 10786 case Builtin::BI__builtin_clz: 10787 case Builtin::BI__builtin_clzl: 10788 case Builtin::BI__builtin_clzll: 10789 case Builtin::BI__builtin_clzs: { 10790 APSInt Val; 10791 if (!EvaluateInteger(E->getArg(0), Val, Info)) 10792 return false; 10793 if (!Val) 10794 return Error(E); 10795 10796 return Success(Val.countLeadingZeros(), E); 10797 } 10798 10799 case Builtin::BI__builtin_constant_p: { 10800 const Expr *Arg = E->getArg(0); 10801 if (EvaluateBuiltinConstantP(Info, Arg)) 10802 return Success(true, E); 10803 if (Info.InConstantContext || Arg->HasSideEffects(Info.Ctx)) { 10804 // Outside a constant context, eagerly evaluate to false in the presence 10805 // of side-effects in order to avoid -Wunsequenced false-positives in 10806 // a branch on __builtin_constant_p(expr). 10807 return Success(false, E); 10808 } 10809 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr); 10810 return false; 10811 } 10812 10813 case Builtin::BI__builtin_is_constant_evaluated: { 10814 const auto *Callee = Info.CurrentCall->getCallee(); 10815 if (Info.InConstantContext && !Info.CheckingPotentialConstantExpression && 10816 (Info.CallStackDepth == 1 || 10817 (Info.CallStackDepth == 2 && Callee->isInStdNamespace() && 10818 Callee->getIdentifier() && 10819 Callee->getIdentifier()->isStr("is_constant_evaluated")))) { 10820 // FIXME: Find a better way to avoid duplicated diagnostics. 10821 if (Info.EvalStatus.Diag) 10822 Info.report((Info.CallStackDepth == 1) ? E->getExprLoc() 10823 : Info.CurrentCall->CallLoc, 10824 diag::warn_is_constant_evaluated_always_true_constexpr) 10825 << (Info.CallStackDepth == 1 ? "__builtin_is_constant_evaluated" 10826 : "std::is_constant_evaluated"); 10827 } 10828 10829 return Success(Info.InConstantContext, E); 10830 } 10831 10832 case Builtin::BI__builtin_ctz: 10833 case Builtin::BI__builtin_ctzl: 10834 case Builtin::BI__builtin_ctzll: 10835 case Builtin::BI__builtin_ctzs: { 10836 APSInt Val; 10837 if (!EvaluateInteger(E->getArg(0), Val, Info)) 10838 return false; 10839 if (!Val) 10840 return Error(E); 10841 10842 return Success(Val.countTrailingZeros(), E); 10843 } 10844 10845 case Builtin::BI__builtin_eh_return_data_regno: { 10846 int Operand = E->getArg(0)->EvaluateKnownConstInt(Info.Ctx).getZExtValue(); 10847 Operand = Info.Ctx.getTargetInfo().getEHDataRegisterNumber(Operand); 10848 return Success(Operand, E); 10849 } 10850 10851 case Builtin::BI__builtin_expect: 10852 return Visit(E->getArg(0)); 10853 10854 case Builtin::BI__builtin_ffs: 10855 case Builtin::BI__builtin_ffsl: 10856 case Builtin::BI__builtin_ffsll: { 10857 APSInt Val; 10858 if (!EvaluateInteger(E->getArg(0), Val, Info)) 10859 return false; 10860 10861 unsigned N = Val.countTrailingZeros(); 10862 return Success(N == Val.getBitWidth() ? 0 : N + 1, E); 10863 } 10864 10865 case Builtin::BI__builtin_fpclassify: { 10866 APFloat Val(0.0); 10867 if (!EvaluateFloat(E->getArg(5), Val, Info)) 10868 return false; 10869 unsigned Arg; 10870 switch (Val.getCategory()) { 10871 case APFloat::fcNaN: Arg = 0; break; 10872 case APFloat::fcInfinity: Arg = 1; break; 10873 case APFloat::fcNormal: Arg = Val.isDenormal() ? 3 : 2; break; 10874 case APFloat::fcZero: Arg = 4; break; 10875 } 10876 return Visit(E->getArg(Arg)); 10877 } 10878 10879 case Builtin::BI__builtin_isinf_sign: { 10880 APFloat Val(0.0); 10881 return EvaluateFloat(E->getArg(0), Val, Info) && 10882 Success(Val.isInfinity() ? (Val.isNegative() ? -1 : 1) : 0, E); 10883 } 10884 10885 case Builtin::BI__builtin_isinf: { 10886 APFloat Val(0.0); 10887 return EvaluateFloat(E->getArg(0), Val, Info) && 10888 Success(Val.isInfinity() ? 1 : 0, E); 10889 } 10890 10891 case Builtin::BI__builtin_isfinite: { 10892 APFloat Val(0.0); 10893 return EvaluateFloat(E->getArg(0), Val, Info) && 10894 Success(Val.isFinite() ? 1 : 0, E); 10895 } 10896 10897 case Builtin::BI__builtin_isnan: { 10898 APFloat Val(0.0); 10899 return EvaluateFloat(E->getArg(0), Val, Info) && 10900 Success(Val.isNaN() ? 1 : 0, E); 10901 } 10902 10903 case Builtin::BI__builtin_isnormal: { 10904 APFloat Val(0.0); 10905 return EvaluateFloat(E->getArg(0), Val, Info) && 10906 Success(Val.isNormal() ? 1 : 0, E); 10907 } 10908 10909 case Builtin::BI__builtin_parity: 10910 case Builtin::BI__builtin_parityl: 10911 case Builtin::BI__builtin_parityll: { 10912 APSInt Val; 10913 if (!EvaluateInteger(E->getArg(0), Val, Info)) 10914 return false; 10915 10916 return Success(Val.countPopulation() % 2, E); 10917 } 10918 10919 case Builtin::BI__builtin_popcount: 10920 case Builtin::BI__builtin_popcountl: 10921 case Builtin::BI__builtin_popcountll: { 10922 APSInt Val; 10923 if (!EvaluateInteger(E->getArg(0), Val, Info)) 10924 return false; 10925 10926 return Success(Val.countPopulation(), E); 10927 } 10928 10929 case Builtin::BIstrlen: 10930 case Builtin::BIwcslen: 10931 // A call to strlen is not a constant expression. 10932 if (Info.getLangOpts().CPlusPlus11) 10933 Info.CCEDiag(E, diag::note_constexpr_invalid_function) 10934 << /*isConstexpr*/0 << /*isConstructor*/0 10935 << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'"); 10936 else 10937 Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr); 10938 LLVM_FALLTHROUGH; 10939 case Builtin::BI__builtin_strlen: 10940 case Builtin::BI__builtin_wcslen: { 10941 // As an extension, we support __builtin_strlen() as a constant expression, 10942 // and support folding strlen() to a constant. 10943 LValue String; 10944 if (!EvaluatePointer(E->getArg(0), String, Info)) 10945 return false; 10946 10947 QualType CharTy = E->getArg(0)->getType()->getPointeeType(); 10948 10949 // Fast path: if it's a string literal, search the string value. 10950 if (const StringLiteral *S = dyn_cast_or_null<StringLiteral>( 10951 String.getLValueBase().dyn_cast<const Expr *>())) { 10952 // The string literal may have embedded null characters. Find the first 10953 // one and truncate there. 10954 StringRef Str = S->getBytes(); 10955 int64_t Off = String.Offset.getQuantity(); 10956 if (Off >= 0 && (uint64_t)Off <= (uint64_t)Str.size() && 10957 S->getCharByteWidth() == 1 && 10958 // FIXME: Add fast-path for wchar_t too. 10959 Info.Ctx.hasSameUnqualifiedType(CharTy, Info.Ctx.CharTy)) { 10960 Str = Str.substr(Off); 10961 10962 StringRef::size_type Pos = Str.find(0); 10963 if (Pos != StringRef::npos) 10964 Str = Str.substr(0, Pos); 10965 10966 return Success(Str.size(), E); 10967 } 10968 10969 // Fall through to slow path to issue appropriate diagnostic. 10970 } 10971 10972 // Slow path: scan the bytes of the string looking for the terminating 0. 10973 for (uint64_t Strlen = 0; /**/; ++Strlen) { 10974 APValue Char; 10975 if (!handleLValueToRValueConversion(Info, E, CharTy, String, Char) || 10976 !Char.isInt()) 10977 return false; 10978 if (!Char.getInt()) 10979 return Success(Strlen, E); 10980 if (!HandleLValueArrayAdjustment(Info, E, String, CharTy, 1)) 10981 return false; 10982 } 10983 } 10984 10985 case Builtin::BIstrcmp: 10986 case Builtin::BIwcscmp: 10987 case Builtin::BIstrncmp: 10988 case Builtin::BIwcsncmp: 10989 case Builtin::BImemcmp: 10990 case Builtin::BIbcmp: 10991 case Builtin::BIwmemcmp: 10992 // A call to strlen is not a constant expression. 10993 if (Info.getLangOpts().CPlusPlus11) 10994 Info.CCEDiag(E, diag::note_constexpr_invalid_function) 10995 << /*isConstexpr*/0 << /*isConstructor*/0 10996 << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'"); 10997 else 10998 Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr); 10999 LLVM_FALLTHROUGH; 11000 case Builtin::BI__builtin_strcmp: 11001 case Builtin::BI__builtin_wcscmp: 11002 case Builtin::BI__builtin_strncmp: 11003 case Builtin::BI__builtin_wcsncmp: 11004 case Builtin::BI__builtin_memcmp: 11005 case Builtin::BI__builtin_bcmp: 11006 case Builtin::BI__builtin_wmemcmp: { 11007 LValue String1, String2; 11008 if (!EvaluatePointer(E->getArg(0), String1, Info) || 11009 !EvaluatePointer(E->getArg(1), String2, Info)) 11010 return false; 11011 11012 uint64_t MaxLength = uint64_t(-1); 11013 if (BuiltinOp != Builtin::BIstrcmp && 11014 BuiltinOp != Builtin::BIwcscmp && 11015 BuiltinOp != Builtin::BI__builtin_strcmp && 11016 BuiltinOp != Builtin::BI__builtin_wcscmp) { 11017 APSInt N; 11018 if (!EvaluateInteger(E->getArg(2), N, Info)) 11019 return false; 11020 MaxLength = N.getExtValue(); 11021 } 11022 11023 // Empty substrings compare equal by definition. 11024 if (MaxLength == 0u) 11025 return Success(0, E); 11026 11027 if (!String1.checkNullPointerForFoldAccess(Info, E, AK_Read) || 11028 !String2.checkNullPointerForFoldAccess(Info, E, AK_Read) || 11029 String1.Designator.Invalid || String2.Designator.Invalid) 11030 return false; 11031 11032 QualType CharTy1 = String1.Designator.getType(Info.Ctx); 11033 QualType CharTy2 = String2.Designator.getType(Info.Ctx); 11034 11035 bool IsRawByte = BuiltinOp == Builtin::BImemcmp || 11036 BuiltinOp == Builtin::BIbcmp || 11037 BuiltinOp == Builtin::BI__builtin_memcmp || 11038 BuiltinOp == Builtin::BI__builtin_bcmp; 11039 11040 assert(IsRawByte || 11041 (Info.Ctx.hasSameUnqualifiedType( 11042 CharTy1, E->getArg(0)->getType()->getPointeeType()) && 11043 Info.Ctx.hasSameUnqualifiedType(CharTy1, CharTy2))); 11044 11045 const auto &ReadCurElems = [&](APValue &Char1, APValue &Char2) { 11046 return handleLValueToRValueConversion(Info, E, CharTy1, String1, Char1) && 11047 handleLValueToRValueConversion(Info, E, CharTy2, String2, Char2) && 11048 Char1.isInt() && Char2.isInt(); 11049 }; 11050 const auto &AdvanceElems = [&] { 11051 return HandleLValueArrayAdjustment(Info, E, String1, CharTy1, 1) && 11052 HandleLValueArrayAdjustment(Info, E, String2, CharTy2, 1); 11053 }; 11054 11055 if (IsRawByte) { 11056 uint64_t BytesRemaining = MaxLength; 11057 // Pointers to const void may point to objects of incomplete type. 11058 if (CharTy1->isIncompleteType()) { 11059 Info.FFDiag(E, diag::note_constexpr_ltor_incomplete_type) << CharTy1; 11060 return false; 11061 } 11062 if (CharTy2->isIncompleteType()) { 11063 Info.FFDiag(E, diag::note_constexpr_ltor_incomplete_type) << CharTy2; 11064 return false; 11065 } 11066 uint64_t CharTy1Width{Info.Ctx.getTypeSize(CharTy1)}; 11067 CharUnits CharTy1Size = Info.Ctx.toCharUnitsFromBits(CharTy1Width); 11068 // Give up on comparing between elements with disparate widths. 11069 if (CharTy1Size != Info.Ctx.getTypeSizeInChars(CharTy2)) 11070 return false; 11071 uint64_t BytesPerElement = CharTy1Size.getQuantity(); 11072 assert(BytesRemaining && "BytesRemaining should not be zero: the " 11073 "following loop considers at least one element"); 11074 while (true) { 11075 APValue Char1, Char2; 11076 if (!ReadCurElems(Char1, Char2)) 11077 return false; 11078 // We have compatible in-memory widths, but a possible type and 11079 // (for `bool`) internal representation mismatch. 11080 // Assuming two's complement representation, including 0 for `false` and 11081 // 1 for `true`, we can check an appropriate number of elements for 11082 // equality even if they are not byte-sized. 11083 APSInt Char1InMem = Char1.getInt().extOrTrunc(CharTy1Width); 11084 APSInt Char2InMem = Char2.getInt().extOrTrunc(CharTy1Width); 11085 if (Char1InMem.ne(Char2InMem)) { 11086 // If the elements are byte-sized, then we can produce a three-way 11087 // comparison result in a straightforward manner. 11088 if (BytesPerElement == 1u) { 11089 // memcmp always compares unsigned chars. 11090 return Success(Char1InMem.ult(Char2InMem) ? -1 : 1, E); 11091 } 11092 // The result is byte-order sensitive, and we have multibyte elements. 11093 // FIXME: We can compare the remaining bytes in the correct order. 11094 return false; 11095 } 11096 if (!AdvanceElems()) 11097 return false; 11098 if (BytesRemaining <= BytesPerElement) 11099 break; 11100 BytesRemaining -= BytesPerElement; 11101 } 11102 // Enough elements are equal to account for the memcmp limit. 11103 return Success(0, E); 11104 } 11105 11106 bool StopAtNull = 11107 (BuiltinOp != Builtin::BImemcmp && BuiltinOp != Builtin::BIbcmp && 11108 BuiltinOp != Builtin::BIwmemcmp && 11109 BuiltinOp != Builtin::BI__builtin_memcmp && 11110 BuiltinOp != Builtin::BI__builtin_bcmp && 11111 BuiltinOp != Builtin::BI__builtin_wmemcmp); 11112 bool IsWide = BuiltinOp == Builtin::BIwcscmp || 11113 BuiltinOp == Builtin::BIwcsncmp || 11114 BuiltinOp == Builtin::BIwmemcmp || 11115 BuiltinOp == Builtin::BI__builtin_wcscmp || 11116 BuiltinOp == Builtin::BI__builtin_wcsncmp || 11117 BuiltinOp == Builtin::BI__builtin_wmemcmp; 11118 11119 for (; MaxLength; --MaxLength) { 11120 APValue Char1, Char2; 11121 if (!ReadCurElems(Char1, Char2)) 11122 return false; 11123 if (Char1.getInt() != Char2.getInt()) { 11124 if (IsWide) // wmemcmp compares with wchar_t signedness. 11125 return Success(Char1.getInt() < Char2.getInt() ? -1 : 1, E); 11126 // memcmp always compares unsigned chars. 11127 return Success(Char1.getInt().ult(Char2.getInt()) ? -1 : 1, E); 11128 } 11129 if (StopAtNull && !Char1.getInt()) 11130 return Success(0, E); 11131 assert(!(StopAtNull && !Char2.getInt())); 11132 if (!AdvanceElems()) 11133 return false; 11134 } 11135 // We hit the strncmp / memcmp limit. 11136 return Success(0, E); 11137 } 11138 11139 case Builtin::BI__atomic_always_lock_free: 11140 case Builtin::BI__atomic_is_lock_free: 11141 case Builtin::BI__c11_atomic_is_lock_free: { 11142 APSInt SizeVal; 11143 if (!EvaluateInteger(E->getArg(0), SizeVal, Info)) 11144 return false; 11145 11146 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power 11147 // of two less than the maximum inline atomic width, we know it is 11148 // lock-free. If the size isn't a power of two, or greater than the 11149 // maximum alignment where we promote atomics, we know it is not lock-free 11150 // (at least not in the sense of atomic_is_lock_free). Otherwise, 11151 // the answer can only be determined at runtime; for example, 16-byte 11152 // atomics have lock-free implementations on some, but not all, 11153 // x86-64 processors. 11154 11155 // Check power-of-two. 11156 CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue()); 11157 if (Size.isPowerOfTwo()) { 11158 // Check against inlining width. 11159 unsigned InlineWidthBits = 11160 Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth(); 11161 if (Size <= Info.Ctx.toCharUnitsFromBits(InlineWidthBits)) { 11162 if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free || 11163 Size == CharUnits::One() || 11164 E->getArg(1)->isNullPointerConstant(Info.Ctx, 11165 Expr::NPC_NeverValueDependent)) 11166 // OK, we will inline appropriately-aligned operations of this size, 11167 // and _Atomic(T) is appropriately-aligned. 11168 return Success(1, E); 11169 11170 QualType PointeeType = E->getArg(1)->IgnoreImpCasts()->getType()-> 11171 castAs<PointerType>()->getPointeeType(); 11172 if (!PointeeType->isIncompleteType() && 11173 Info.Ctx.getTypeAlignInChars(PointeeType) >= Size) { 11174 // OK, we will inline operations on this object. 11175 return Success(1, E); 11176 } 11177 } 11178 } 11179 11180 // Avoid emiting call for runtime decision on PowerPC 32-bit 11181 // The lock free possibilities on this platform are covered by the lines 11182 // above and we know in advance other cases require lock 11183 if (Info.Ctx.getTargetInfo().getTriple().getArch() == llvm::Triple::ppc) { 11184 return Success(0, E); 11185 } 11186 11187 return BuiltinOp == Builtin::BI__atomic_always_lock_free ? 11188 Success(0, E) : Error(E); 11189 } 11190 case Builtin::BIomp_is_initial_device: 11191 // We can decide statically which value the runtime would return if called. 11192 return Success(Info.getLangOpts().OpenMPIsDevice ? 0 : 1, E); 11193 case Builtin::BI__builtin_add_overflow: 11194 case Builtin::BI__builtin_sub_overflow: 11195 case Builtin::BI__builtin_mul_overflow: 11196 case Builtin::BI__builtin_sadd_overflow: 11197 case Builtin::BI__builtin_uadd_overflow: 11198 case Builtin::BI__builtin_uaddl_overflow: 11199 case Builtin::BI__builtin_uaddll_overflow: 11200 case Builtin::BI__builtin_usub_overflow: 11201 case Builtin::BI__builtin_usubl_overflow: 11202 case Builtin::BI__builtin_usubll_overflow: 11203 case Builtin::BI__builtin_umul_overflow: 11204 case Builtin::BI__builtin_umull_overflow: 11205 case Builtin::BI__builtin_umulll_overflow: 11206 case Builtin::BI__builtin_saddl_overflow: 11207 case Builtin::BI__builtin_saddll_overflow: 11208 case Builtin::BI__builtin_ssub_overflow: 11209 case Builtin::BI__builtin_ssubl_overflow: 11210 case Builtin::BI__builtin_ssubll_overflow: 11211 case Builtin::BI__builtin_smul_overflow: 11212 case Builtin::BI__builtin_smull_overflow: 11213 case Builtin::BI__builtin_smulll_overflow: { 11214 LValue ResultLValue; 11215 APSInt LHS, RHS; 11216 11217 QualType ResultType = E->getArg(2)->getType()->getPointeeType(); 11218 if (!EvaluateInteger(E->getArg(0), LHS, Info) || 11219 !EvaluateInteger(E->getArg(1), RHS, Info) || 11220 !EvaluatePointer(E->getArg(2), ResultLValue, Info)) 11221 return false; 11222 11223 APSInt Result; 11224 bool DidOverflow = false; 11225 11226 // If the types don't have to match, enlarge all 3 to the largest of them. 11227 if (BuiltinOp == Builtin::BI__builtin_add_overflow || 11228 BuiltinOp == Builtin::BI__builtin_sub_overflow || 11229 BuiltinOp == Builtin::BI__builtin_mul_overflow) { 11230 bool IsSigned = LHS.isSigned() || RHS.isSigned() || 11231 ResultType->isSignedIntegerOrEnumerationType(); 11232 bool AllSigned = LHS.isSigned() && RHS.isSigned() && 11233 ResultType->isSignedIntegerOrEnumerationType(); 11234 uint64_t LHSSize = LHS.getBitWidth(); 11235 uint64_t RHSSize = RHS.getBitWidth(); 11236 uint64_t ResultSize = Info.Ctx.getTypeSize(ResultType); 11237 uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize); 11238 11239 // Add an additional bit if the signedness isn't uniformly agreed to. We 11240 // could do this ONLY if there is a signed and an unsigned that both have 11241 // MaxBits, but the code to check that is pretty nasty. The issue will be 11242 // caught in the shrink-to-result later anyway. 11243 if (IsSigned && !AllSigned) 11244 ++MaxBits; 11245 11246 LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned); 11247 RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned); 11248 Result = APSInt(MaxBits, !IsSigned); 11249 } 11250 11251 // Find largest int. 11252 switch (BuiltinOp) { 11253 default: 11254 llvm_unreachable("Invalid value for BuiltinOp"); 11255 case Builtin::BI__builtin_add_overflow: 11256 case Builtin::BI__builtin_sadd_overflow: 11257 case Builtin::BI__builtin_saddl_overflow: 11258 case Builtin::BI__builtin_saddll_overflow: 11259 case Builtin::BI__builtin_uadd_overflow: 11260 case Builtin::BI__builtin_uaddl_overflow: 11261 case Builtin::BI__builtin_uaddll_overflow: 11262 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, DidOverflow) 11263 : LHS.uadd_ov(RHS, DidOverflow); 11264 break; 11265 case Builtin::BI__builtin_sub_overflow: 11266 case Builtin::BI__builtin_ssub_overflow: 11267 case Builtin::BI__builtin_ssubl_overflow: 11268 case Builtin::BI__builtin_ssubll_overflow: 11269 case Builtin::BI__builtin_usub_overflow: 11270 case Builtin::BI__builtin_usubl_overflow: 11271 case Builtin::BI__builtin_usubll_overflow: 11272 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, DidOverflow) 11273 : LHS.usub_ov(RHS, DidOverflow); 11274 break; 11275 case Builtin::BI__builtin_mul_overflow: 11276 case Builtin::BI__builtin_smul_overflow: 11277 case Builtin::BI__builtin_smull_overflow: 11278 case Builtin::BI__builtin_smulll_overflow: 11279 case Builtin::BI__builtin_umul_overflow: 11280 case Builtin::BI__builtin_umull_overflow: 11281 case Builtin::BI__builtin_umulll_overflow: 11282 Result = LHS.isSigned() ? LHS.smul_ov(RHS, DidOverflow) 11283 : LHS.umul_ov(RHS, DidOverflow); 11284 break; 11285 } 11286 11287 // In the case where multiple sizes are allowed, truncate and see if 11288 // the values are the same. 11289 if (BuiltinOp == Builtin::BI__builtin_add_overflow || 11290 BuiltinOp == Builtin::BI__builtin_sub_overflow || 11291 BuiltinOp == Builtin::BI__builtin_mul_overflow) { 11292 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead, 11293 // since it will give us the behavior of a TruncOrSelf in the case where 11294 // its parameter <= its size. We previously set Result to be at least the 11295 // type-size of the result, so getTypeSize(ResultType) <= Result.BitWidth 11296 // will work exactly like TruncOrSelf. 11297 APSInt Temp = Result.extOrTrunc(Info.Ctx.getTypeSize(ResultType)); 11298 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType()); 11299 11300 if (!APSInt::isSameValue(Temp, Result)) 11301 DidOverflow = true; 11302 Result = Temp; 11303 } 11304 11305 APValue APV{Result}; 11306 if (!handleAssignment(Info, E, ResultLValue, ResultType, APV)) 11307 return false; 11308 return Success(DidOverflow, E); 11309 } 11310 } 11311 } 11312 11313 /// Determine whether this is a pointer past the end of the complete 11314 /// object referred to by the lvalue. 11315 static bool isOnePastTheEndOfCompleteObject(const ASTContext &Ctx, 11316 const LValue &LV) { 11317 // A null pointer can be viewed as being "past the end" but we don't 11318 // choose to look at it that way here. 11319 if (!LV.getLValueBase()) 11320 return false; 11321 11322 // If the designator is valid and refers to a subobject, we're not pointing 11323 // past the end. 11324 if (!LV.getLValueDesignator().Invalid && 11325 !LV.getLValueDesignator().isOnePastTheEnd()) 11326 return false; 11327 11328 // A pointer to an incomplete type might be past-the-end if the type's size is 11329 // zero. We cannot tell because the type is incomplete. 11330 QualType Ty = getType(LV.getLValueBase()); 11331 if (Ty->isIncompleteType()) 11332 return true; 11333 11334 // We're a past-the-end pointer if we point to the byte after the object, 11335 // no matter what our type or path is. 11336 auto Size = Ctx.getTypeSizeInChars(Ty); 11337 return LV.getLValueOffset() == Size; 11338 } 11339 11340 namespace { 11341 11342 /// Data recursive integer evaluator of certain binary operators. 11343 /// 11344 /// We use a data recursive algorithm for binary operators so that we are able 11345 /// to handle extreme cases of chained binary operators without causing stack 11346 /// overflow. 11347 class DataRecursiveIntBinOpEvaluator { 11348 struct EvalResult { 11349 APValue Val; 11350 bool Failed; 11351 11352 EvalResult() : Failed(false) { } 11353 11354 void swap(EvalResult &RHS) { 11355 Val.swap(RHS.Val); 11356 Failed = RHS.Failed; 11357 RHS.Failed = false; 11358 } 11359 }; 11360 11361 struct Job { 11362 const Expr *E; 11363 EvalResult LHSResult; // meaningful only for binary operator expression. 11364 enum { AnyExprKind, BinOpKind, BinOpVisitedLHSKind } Kind; 11365 11366 Job() = default; 11367 Job(Job &&) = default; 11368 11369 void startSpeculativeEval(EvalInfo &Info) { 11370 SpecEvalRAII = SpeculativeEvaluationRAII(Info); 11371 } 11372 11373 private: 11374 SpeculativeEvaluationRAII SpecEvalRAII; 11375 }; 11376 11377 SmallVector<Job, 16> Queue; 11378 11379 IntExprEvaluator &IntEval; 11380 EvalInfo &Info; 11381 APValue &FinalResult; 11382 11383 public: 11384 DataRecursiveIntBinOpEvaluator(IntExprEvaluator &IntEval, APValue &Result) 11385 : IntEval(IntEval), Info(IntEval.getEvalInfo()), FinalResult(Result) { } 11386 11387 /// True if \param E is a binary operator that we are going to handle 11388 /// data recursively. 11389 /// We handle binary operators that are comma, logical, or that have operands 11390 /// with integral or enumeration type. 11391 static bool shouldEnqueue(const BinaryOperator *E) { 11392 return E->getOpcode() == BO_Comma || E->isLogicalOp() || 11393 (E->isRValue() && E->getType()->isIntegralOrEnumerationType() && 11394 E->getLHS()->getType()->isIntegralOrEnumerationType() && 11395 E->getRHS()->getType()->isIntegralOrEnumerationType()); 11396 } 11397 11398 bool Traverse(const BinaryOperator *E) { 11399 enqueue(E); 11400 EvalResult PrevResult; 11401 while (!Queue.empty()) 11402 process(PrevResult); 11403 11404 if (PrevResult.Failed) return false; 11405 11406 FinalResult.swap(PrevResult.Val); 11407 return true; 11408 } 11409 11410 private: 11411 bool Success(uint64_t Value, const Expr *E, APValue &Result) { 11412 return IntEval.Success(Value, E, Result); 11413 } 11414 bool Success(const APSInt &Value, const Expr *E, APValue &Result) { 11415 return IntEval.Success(Value, E, Result); 11416 } 11417 bool Error(const Expr *E) { 11418 return IntEval.Error(E); 11419 } 11420 bool Error(const Expr *E, diag::kind D) { 11421 return IntEval.Error(E, D); 11422 } 11423 11424 OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) { 11425 return Info.CCEDiag(E, D); 11426 } 11427 11428 // Returns true if visiting the RHS is necessary, false otherwise. 11429 bool VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E, 11430 bool &SuppressRHSDiags); 11431 11432 bool VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult, 11433 const BinaryOperator *E, APValue &Result); 11434 11435 void EvaluateExpr(const Expr *E, EvalResult &Result) { 11436 Result.Failed = !Evaluate(Result.Val, Info, E); 11437 if (Result.Failed) 11438 Result.Val = APValue(); 11439 } 11440 11441 void process(EvalResult &Result); 11442 11443 void enqueue(const Expr *E) { 11444 E = E->IgnoreParens(); 11445 Queue.resize(Queue.size()+1); 11446 Queue.back().E = E; 11447 Queue.back().Kind = Job::AnyExprKind; 11448 } 11449 }; 11450 11451 } 11452 11453 bool DataRecursiveIntBinOpEvaluator:: 11454 VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E, 11455 bool &SuppressRHSDiags) { 11456 if (E->getOpcode() == BO_Comma) { 11457 // Ignore LHS but note if we could not evaluate it. 11458 if (LHSResult.Failed) 11459 return Info.noteSideEffect(); 11460 return true; 11461 } 11462 11463 if (E->isLogicalOp()) { 11464 bool LHSAsBool; 11465 if (!LHSResult.Failed && HandleConversionToBool(LHSResult.Val, LHSAsBool)) { 11466 // We were able to evaluate the LHS, see if we can get away with not 11467 // evaluating the RHS: 0 && X -> 0, 1 || X -> 1 11468 if (LHSAsBool == (E->getOpcode() == BO_LOr)) { 11469 Success(LHSAsBool, E, LHSResult.Val); 11470 return false; // Ignore RHS 11471 } 11472 } else { 11473 LHSResult.Failed = true; 11474 11475 // Since we weren't able to evaluate the left hand side, it 11476 // might have had side effects. 11477 if (!Info.noteSideEffect()) 11478 return false; 11479 11480 // We can't evaluate the LHS; however, sometimes the result 11481 // is determined by the RHS: X && 0 -> 0, X || 1 -> 1. 11482 // Don't ignore RHS and suppress diagnostics from this arm. 11483 SuppressRHSDiags = true; 11484 } 11485 11486 return true; 11487 } 11488 11489 assert(E->getLHS()->getType()->isIntegralOrEnumerationType() && 11490 E->getRHS()->getType()->isIntegralOrEnumerationType()); 11491 11492 if (LHSResult.Failed && !Info.noteFailure()) 11493 return false; // Ignore RHS; 11494 11495 return true; 11496 } 11497 11498 static void addOrSubLValueAsInteger(APValue &LVal, const APSInt &Index, 11499 bool IsSub) { 11500 // Compute the new offset in the appropriate width, wrapping at 64 bits. 11501 // FIXME: When compiling for a 32-bit target, we should use 32-bit 11502 // offsets. 11503 assert(!LVal.hasLValuePath() && "have designator for integer lvalue"); 11504 CharUnits &Offset = LVal.getLValueOffset(); 11505 uint64_t Offset64 = Offset.getQuantity(); 11506 uint64_t Index64 = Index.extOrTrunc(64).getZExtValue(); 11507 Offset = CharUnits::fromQuantity(IsSub ? Offset64 - Index64 11508 : Offset64 + Index64); 11509 } 11510 11511 bool DataRecursiveIntBinOpEvaluator:: 11512 VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult, 11513 const BinaryOperator *E, APValue &Result) { 11514 if (E->getOpcode() == BO_Comma) { 11515 if (RHSResult.Failed) 11516 return false; 11517 Result = RHSResult.Val; 11518 return true; 11519 } 11520 11521 if (E->isLogicalOp()) { 11522 bool lhsResult, rhsResult; 11523 bool LHSIsOK = HandleConversionToBool(LHSResult.Val, lhsResult); 11524 bool RHSIsOK = HandleConversionToBool(RHSResult.Val, rhsResult); 11525 11526 if (LHSIsOK) { 11527 if (RHSIsOK) { 11528 if (E->getOpcode() == BO_LOr) 11529 return Success(lhsResult || rhsResult, E, Result); 11530 else 11531 return Success(lhsResult && rhsResult, E, Result); 11532 } 11533 } else { 11534 if (RHSIsOK) { 11535 // We can't evaluate the LHS; however, sometimes the result 11536 // is determined by the RHS: X && 0 -> 0, X || 1 -> 1. 11537 if (rhsResult == (E->getOpcode() == BO_LOr)) 11538 return Success(rhsResult, E, Result); 11539 } 11540 } 11541 11542 return false; 11543 } 11544 11545 assert(E->getLHS()->getType()->isIntegralOrEnumerationType() && 11546 E->getRHS()->getType()->isIntegralOrEnumerationType()); 11547 11548 if (LHSResult.Failed || RHSResult.Failed) 11549 return false; 11550 11551 const APValue &LHSVal = LHSResult.Val; 11552 const APValue &RHSVal = RHSResult.Val; 11553 11554 // Handle cases like (unsigned long)&a + 4. 11555 if (E->isAdditiveOp() && LHSVal.isLValue() && RHSVal.isInt()) { 11556 Result = LHSVal; 11557 addOrSubLValueAsInteger(Result, RHSVal.getInt(), E->getOpcode() == BO_Sub); 11558 return true; 11559 } 11560 11561 // Handle cases like 4 + (unsigned long)&a 11562 if (E->getOpcode() == BO_Add && 11563 RHSVal.isLValue() && LHSVal.isInt()) { 11564 Result = RHSVal; 11565 addOrSubLValueAsInteger(Result, LHSVal.getInt(), /*IsSub*/false); 11566 return true; 11567 } 11568 11569 if (E->getOpcode() == BO_Sub && LHSVal.isLValue() && RHSVal.isLValue()) { 11570 // Handle (intptr_t)&&A - (intptr_t)&&B. 11571 if (!LHSVal.getLValueOffset().isZero() || 11572 !RHSVal.getLValueOffset().isZero()) 11573 return false; 11574 const Expr *LHSExpr = LHSVal.getLValueBase().dyn_cast<const Expr*>(); 11575 const Expr *RHSExpr = RHSVal.getLValueBase().dyn_cast<const Expr*>(); 11576 if (!LHSExpr || !RHSExpr) 11577 return false; 11578 const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr); 11579 const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr); 11580 if (!LHSAddrExpr || !RHSAddrExpr) 11581 return false; 11582 // Make sure both labels come from the same function. 11583 if (LHSAddrExpr->getLabel()->getDeclContext() != 11584 RHSAddrExpr->getLabel()->getDeclContext()) 11585 return false; 11586 Result = APValue(LHSAddrExpr, RHSAddrExpr); 11587 return true; 11588 } 11589 11590 // All the remaining cases expect both operands to be an integer 11591 if (!LHSVal.isInt() || !RHSVal.isInt()) 11592 return Error(E); 11593 11594 // Set up the width and signedness manually, in case it can't be deduced 11595 // from the operation we're performing. 11596 // FIXME: Don't do this in the cases where we can deduce it. 11597 APSInt Value(Info.Ctx.getIntWidth(E->getType()), 11598 E->getType()->isUnsignedIntegerOrEnumerationType()); 11599 if (!handleIntIntBinOp(Info, E, LHSVal.getInt(), E->getOpcode(), 11600 RHSVal.getInt(), Value)) 11601 return false; 11602 return Success(Value, E, Result); 11603 } 11604 11605 void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) { 11606 Job &job = Queue.back(); 11607 11608 switch (job.Kind) { 11609 case Job::AnyExprKind: { 11610 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(job.E)) { 11611 if (shouldEnqueue(Bop)) { 11612 job.Kind = Job::BinOpKind; 11613 enqueue(Bop->getLHS()); 11614 return; 11615 } 11616 } 11617 11618 EvaluateExpr(job.E, Result); 11619 Queue.pop_back(); 11620 return; 11621 } 11622 11623 case Job::BinOpKind: { 11624 const BinaryOperator *Bop = cast<BinaryOperator>(job.E); 11625 bool SuppressRHSDiags = false; 11626 if (!VisitBinOpLHSOnly(Result, Bop, SuppressRHSDiags)) { 11627 Queue.pop_back(); 11628 return; 11629 } 11630 if (SuppressRHSDiags) 11631 job.startSpeculativeEval(Info); 11632 job.LHSResult.swap(Result); 11633 job.Kind = Job::BinOpVisitedLHSKind; 11634 enqueue(Bop->getRHS()); 11635 return; 11636 } 11637 11638 case Job::BinOpVisitedLHSKind: { 11639 const BinaryOperator *Bop = cast<BinaryOperator>(job.E); 11640 EvalResult RHS; 11641 RHS.swap(Result); 11642 Result.Failed = !VisitBinOp(job.LHSResult, RHS, Bop, Result.Val); 11643 Queue.pop_back(); 11644 return; 11645 } 11646 } 11647 11648 llvm_unreachable("Invalid Job::Kind!"); 11649 } 11650 11651 namespace { 11652 /// Used when we determine that we should fail, but can keep evaluating prior to 11653 /// noting that we had a failure. 11654 class DelayedNoteFailureRAII { 11655 EvalInfo &Info; 11656 bool NoteFailure; 11657 11658 public: 11659 DelayedNoteFailureRAII(EvalInfo &Info, bool NoteFailure = true) 11660 : Info(Info), NoteFailure(NoteFailure) {} 11661 ~DelayedNoteFailureRAII() { 11662 if (NoteFailure) { 11663 bool ContinueAfterFailure = Info.noteFailure(); 11664 (void)ContinueAfterFailure; 11665 assert(ContinueAfterFailure && 11666 "Shouldn't have kept evaluating on failure."); 11667 } 11668 } 11669 }; 11670 11671 enum class CmpResult { 11672 Unequal, 11673 Less, 11674 Equal, 11675 Greater, 11676 Unordered, 11677 }; 11678 } 11679 11680 template <class SuccessCB, class AfterCB> 11681 static bool 11682 EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E, 11683 SuccessCB &&Success, AfterCB &&DoAfter) { 11684 assert(E->isComparisonOp() && "expected comparison operator"); 11685 assert((E->getOpcode() == BO_Cmp || 11686 E->getType()->isIntegralOrEnumerationType()) && 11687 "unsupported binary expression evaluation"); 11688 auto Error = [&](const Expr *E) { 11689 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr); 11690 return false; 11691 }; 11692 11693 bool IsRelational = E->isRelationalOp() || E->getOpcode() == BO_Cmp; 11694 bool IsEquality = E->isEqualityOp(); 11695 11696 QualType LHSTy = E->getLHS()->getType(); 11697 QualType RHSTy = E->getRHS()->getType(); 11698 11699 if (LHSTy->isIntegralOrEnumerationType() && 11700 RHSTy->isIntegralOrEnumerationType()) { 11701 APSInt LHS, RHS; 11702 bool LHSOK = EvaluateInteger(E->getLHS(), LHS, Info); 11703 if (!LHSOK && !Info.noteFailure()) 11704 return false; 11705 if (!EvaluateInteger(E->getRHS(), RHS, Info) || !LHSOK) 11706 return false; 11707 if (LHS < RHS) 11708 return Success(CmpResult::Less, E); 11709 if (LHS > RHS) 11710 return Success(CmpResult::Greater, E); 11711 return Success(CmpResult::Equal, E); 11712 } 11713 11714 if (LHSTy->isFixedPointType() || RHSTy->isFixedPointType()) { 11715 APFixedPoint LHSFX(Info.Ctx.getFixedPointSemantics(LHSTy)); 11716 APFixedPoint RHSFX(Info.Ctx.getFixedPointSemantics(RHSTy)); 11717 11718 bool LHSOK = EvaluateFixedPointOrInteger(E->getLHS(), LHSFX, Info); 11719 if (!LHSOK && !Info.noteFailure()) 11720 return false; 11721 if (!EvaluateFixedPointOrInteger(E->getRHS(), RHSFX, Info) || !LHSOK) 11722 return false; 11723 if (LHSFX < RHSFX) 11724 return Success(CmpResult::Less, E); 11725 if (LHSFX > RHSFX) 11726 return Success(CmpResult::Greater, E); 11727 return Success(CmpResult::Equal, E); 11728 } 11729 11730 if (LHSTy->isAnyComplexType() || RHSTy->isAnyComplexType()) { 11731 ComplexValue LHS, RHS; 11732 bool LHSOK; 11733 if (E->isAssignmentOp()) { 11734 LValue LV; 11735 EvaluateLValue(E->getLHS(), LV, Info); 11736 LHSOK = false; 11737 } else if (LHSTy->isRealFloatingType()) { 11738 LHSOK = EvaluateFloat(E->getLHS(), LHS.FloatReal, Info); 11739 if (LHSOK) { 11740 LHS.makeComplexFloat(); 11741 LHS.FloatImag = APFloat(LHS.FloatReal.getSemantics()); 11742 } 11743 } else { 11744 LHSOK = EvaluateComplex(E->getLHS(), LHS, Info); 11745 } 11746 if (!LHSOK && !Info.noteFailure()) 11747 return false; 11748 11749 if (E->getRHS()->getType()->isRealFloatingType()) { 11750 if (!EvaluateFloat(E->getRHS(), RHS.FloatReal, Info) || !LHSOK) 11751 return false; 11752 RHS.makeComplexFloat(); 11753 RHS.FloatImag = APFloat(RHS.FloatReal.getSemantics()); 11754 } else if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK) 11755 return false; 11756 11757 if (LHS.isComplexFloat()) { 11758 APFloat::cmpResult CR_r = 11759 LHS.getComplexFloatReal().compare(RHS.getComplexFloatReal()); 11760 APFloat::cmpResult CR_i = 11761 LHS.getComplexFloatImag().compare(RHS.getComplexFloatImag()); 11762 bool IsEqual = CR_r == APFloat::cmpEqual && CR_i == APFloat::cmpEqual; 11763 return Success(IsEqual ? CmpResult::Equal : CmpResult::Unequal, E); 11764 } else { 11765 assert(IsEquality && "invalid complex comparison"); 11766 bool IsEqual = LHS.getComplexIntReal() == RHS.getComplexIntReal() && 11767 LHS.getComplexIntImag() == RHS.getComplexIntImag(); 11768 return Success(IsEqual ? CmpResult::Equal : CmpResult::Unequal, E); 11769 } 11770 } 11771 11772 if (LHSTy->isRealFloatingType() && 11773 RHSTy->isRealFloatingType()) { 11774 APFloat RHS(0.0), LHS(0.0); 11775 11776 bool LHSOK = EvaluateFloat(E->getRHS(), RHS, Info); 11777 if (!LHSOK && !Info.noteFailure()) 11778 return false; 11779 11780 if (!EvaluateFloat(E->getLHS(), LHS, Info) || !LHSOK) 11781 return false; 11782 11783 assert(E->isComparisonOp() && "Invalid binary operator!"); 11784 auto GetCmpRes = [&]() { 11785 switch (LHS.compare(RHS)) { 11786 case APFloat::cmpEqual: 11787 return CmpResult::Equal; 11788 case APFloat::cmpLessThan: 11789 return CmpResult::Less; 11790 case APFloat::cmpGreaterThan: 11791 return CmpResult::Greater; 11792 case APFloat::cmpUnordered: 11793 return CmpResult::Unordered; 11794 } 11795 llvm_unreachable("Unrecognised APFloat::cmpResult enum"); 11796 }; 11797 return Success(GetCmpRes(), E); 11798 } 11799 11800 if (LHSTy->isPointerType() && RHSTy->isPointerType()) { 11801 LValue LHSValue, RHSValue; 11802 11803 bool LHSOK = EvaluatePointer(E->getLHS(), LHSValue, Info); 11804 if (!LHSOK && !Info.noteFailure()) 11805 return false; 11806 11807 if (!EvaluatePointer(E->getRHS(), RHSValue, Info) || !LHSOK) 11808 return false; 11809 11810 // Reject differing bases from the normal codepath; we special-case 11811 // comparisons to null. 11812 if (!HasSameBase(LHSValue, RHSValue)) { 11813 // Inequalities and subtractions between unrelated pointers have 11814 // unspecified or undefined behavior. 11815 if (!IsEquality) { 11816 Info.FFDiag(E, diag::note_constexpr_pointer_comparison_unspecified); 11817 return false; 11818 } 11819 // A constant address may compare equal to the address of a symbol. 11820 // The one exception is that address of an object cannot compare equal 11821 // to a null pointer constant. 11822 if ((!LHSValue.Base && !LHSValue.Offset.isZero()) || 11823 (!RHSValue.Base && !RHSValue.Offset.isZero())) 11824 return Error(E); 11825 // It's implementation-defined whether distinct literals will have 11826 // distinct addresses. In clang, the result of such a comparison is 11827 // unspecified, so it is not a constant expression. However, we do know 11828 // that the address of a literal will be non-null. 11829 if ((IsLiteralLValue(LHSValue) || IsLiteralLValue(RHSValue)) && 11830 LHSValue.Base && RHSValue.Base) 11831 return Error(E); 11832 // We can't tell whether weak symbols will end up pointing to the same 11833 // object. 11834 if (IsWeakLValue(LHSValue) || IsWeakLValue(RHSValue)) 11835 return Error(E); 11836 // We can't compare the address of the start of one object with the 11837 // past-the-end address of another object, per C++ DR1652. 11838 if ((LHSValue.Base && LHSValue.Offset.isZero() && 11839 isOnePastTheEndOfCompleteObject(Info.Ctx, RHSValue)) || 11840 (RHSValue.Base && RHSValue.Offset.isZero() && 11841 isOnePastTheEndOfCompleteObject(Info.Ctx, LHSValue))) 11842 return Error(E); 11843 // We can't tell whether an object is at the same address as another 11844 // zero sized object. 11845 if ((RHSValue.Base && isZeroSized(LHSValue)) || 11846 (LHSValue.Base && isZeroSized(RHSValue))) 11847 return Error(E); 11848 return Success(CmpResult::Unequal, E); 11849 } 11850 11851 const CharUnits &LHSOffset = LHSValue.getLValueOffset(); 11852 const CharUnits &RHSOffset = RHSValue.getLValueOffset(); 11853 11854 SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator(); 11855 SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator(); 11856 11857 // C++11 [expr.rel]p3: 11858 // Pointers to void (after pointer conversions) can be compared, with a 11859 // result defined as follows: If both pointers represent the same 11860 // address or are both the null pointer value, the result is true if the 11861 // operator is <= or >= and false otherwise; otherwise the result is 11862 // unspecified. 11863 // We interpret this as applying to pointers to *cv* void. 11864 if (LHSTy->isVoidPointerType() && LHSOffset != RHSOffset && IsRelational) 11865 Info.CCEDiag(E, diag::note_constexpr_void_comparison); 11866 11867 // C++11 [expr.rel]p2: 11868 // - If two pointers point to non-static data members of the same object, 11869 // or to subobjects or array elements fo such members, recursively, the 11870 // pointer to the later declared member compares greater provided the 11871 // two members have the same access control and provided their class is 11872 // not a union. 11873 // [...] 11874 // - Otherwise pointer comparisons are unspecified. 11875 if (!LHSDesignator.Invalid && !RHSDesignator.Invalid && IsRelational) { 11876 bool WasArrayIndex; 11877 unsigned Mismatch = FindDesignatorMismatch( 11878 getType(LHSValue.Base), LHSDesignator, RHSDesignator, WasArrayIndex); 11879 // At the point where the designators diverge, the comparison has a 11880 // specified value if: 11881 // - we are comparing array indices 11882 // - we are comparing fields of a union, or fields with the same access 11883 // Otherwise, the result is unspecified and thus the comparison is not a 11884 // constant expression. 11885 if (!WasArrayIndex && Mismatch < LHSDesignator.Entries.size() && 11886 Mismatch < RHSDesignator.Entries.size()) { 11887 const FieldDecl *LF = getAsField(LHSDesignator.Entries[Mismatch]); 11888 const FieldDecl *RF = getAsField(RHSDesignator.Entries[Mismatch]); 11889 if (!LF && !RF) 11890 Info.CCEDiag(E, diag::note_constexpr_pointer_comparison_base_classes); 11891 else if (!LF) 11892 Info.CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field) 11893 << getAsBaseClass(LHSDesignator.Entries[Mismatch]) 11894 << RF->getParent() << RF; 11895 else if (!RF) 11896 Info.CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field) 11897 << getAsBaseClass(RHSDesignator.Entries[Mismatch]) 11898 << LF->getParent() << LF; 11899 else if (!LF->getParent()->isUnion() && 11900 LF->getAccess() != RF->getAccess()) 11901 Info.CCEDiag(E, 11902 diag::note_constexpr_pointer_comparison_differing_access) 11903 << LF << LF->getAccess() << RF << RF->getAccess() 11904 << LF->getParent(); 11905 } 11906 } 11907 11908 // The comparison here must be unsigned, and performed with the same 11909 // width as the pointer. 11910 unsigned PtrSize = Info.Ctx.getTypeSize(LHSTy); 11911 uint64_t CompareLHS = LHSOffset.getQuantity(); 11912 uint64_t CompareRHS = RHSOffset.getQuantity(); 11913 assert(PtrSize <= 64 && "Unexpected pointer width"); 11914 uint64_t Mask = ~0ULL >> (64 - PtrSize); 11915 CompareLHS &= Mask; 11916 CompareRHS &= Mask; 11917 11918 // If there is a base and this is a relational operator, we can only 11919 // compare pointers within the object in question; otherwise, the result 11920 // depends on where the object is located in memory. 11921 if (!LHSValue.Base.isNull() && IsRelational) { 11922 QualType BaseTy = getType(LHSValue.Base); 11923 if (BaseTy->isIncompleteType()) 11924 return Error(E); 11925 CharUnits Size = Info.Ctx.getTypeSizeInChars(BaseTy); 11926 uint64_t OffsetLimit = Size.getQuantity(); 11927 if (CompareLHS > OffsetLimit || CompareRHS > OffsetLimit) 11928 return Error(E); 11929 } 11930 11931 if (CompareLHS < CompareRHS) 11932 return Success(CmpResult::Less, E); 11933 if (CompareLHS > CompareRHS) 11934 return Success(CmpResult::Greater, E); 11935 return Success(CmpResult::Equal, E); 11936 } 11937 11938 if (LHSTy->isMemberPointerType()) { 11939 assert(IsEquality && "unexpected member pointer operation"); 11940 assert(RHSTy->isMemberPointerType() && "invalid comparison"); 11941 11942 MemberPtr LHSValue, RHSValue; 11943 11944 bool LHSOK = EvaluateMemberPointer(E->getLHS(), LHSValue, Info); 11945 if (!LHSOK && !Info.noteFailure()) 11946 return false; 11947 11948 if (!EvaluateMemberPointer(E->getRHS(), RHSValue, Info) || !LHSOK) 11949 return false; 11950 11951 // C++11 [expr.eq]p2: 11952 // If both operands are null, they compare equal. Otherwise if only one is 11953 // null, they compare unequal. 11954 if (!LHSValue.getDecl() || !RHSValue.getDecl()) { 11955 bool Equal = !LHSValue.getDecl() && !RHSValue.getDecl(); 11956 return Success(Equal ? CmpResult::Equal : CmpResult::Unequal, E); 11957 } 11958 11959 // Otherwise if either is a pointer to a virtual member function, the 11960 // result is unspecified. 11961 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(LHSValue.getDecl())) 11962 if (MD->isVirtual()) 11963 Info.CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD; 11964 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(RHSValue.getDecl())) 11965 if (MD->isVirtual()) 11966 Info.CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD; 11967 11968 // Otherwise they compare equal if and only if they would refer to the 11969 // same member of the same most derived object or the same subobject if 11970 // they were dereferenced with a hypothetical object of the associated 11971 // class type. 11972 bool Equal = LHSValue == RHSValue; 11973 return Success(Equal ? CmpResult::Equal : CmpResult::Unequal, E); 11974 } 11975 11976 if (LHSTy->isNullPtrType()) { 11977 assert(E->isComparisonOp() && "unexpected nullptr operation"); 11978 assert(RHSTy->isNullPtrType() && "missing pointer conversion"); 11979 // C++11 [expr.rel]p4, [expr.eq]p3: If two operands of type std::nullptr_t 11980 // are compared, the result is true of the operator is <=, >= or ==, and 11981 // false otherwise. 11982 return Success(CmpResult::Equal, E); 11983 } 11984 11985 return DoAfter(); 11986 } 11987 11988 bool RecordExprEvaluator::VisitBinCmp(const BinaryOperator *E) { 11989 if (!CheckLiteralType(Info, E)) 11990 return false; 11991 11992 auto OnSuccess = [&](CmpResult CR, const BinaryOperator *E) { 11993 ComparisonCategoryResult CCR; 11994 switch (CR) { 11995 case CmpResult::Unequal: 11996 llvm_unreachable("should never produce Unequal for three-way comparison"); 11997 case CmpResult::Less: 11998 CCR = ComparisonCategoryResult::Less; 11999 break; 12000 case CmpResult::Equal: 12001 CCR = ComparisonCategoryResult::Equal; 12002 break; 12003 case CmpResult::Greater: 12004 CCR = ComparisonCategoryResult::Greater; 12005 break; 12006 case CmpResult::Unordered: 12007 CCR = ComparisonCategoryResult::Unordered; 12008 break; 12009 } 12010 // Evaluation succeeded. Lookup the information for the comparison category 12011 // type and fetch the VarDecl for the result. 12012 const ComparisonCategoryInfo &CmpInfo = 12013 Info.Ctx.CompCategories.getInfoForType(E->getType()); 12014 const VarDecl *VD = CmpInfo.getValueInfo(CmpInfo.makeWeakResult(CCR))->VD; 12015 // Check and evaluate the result as a constant expression. 12016 LValue LV; 12017 LV.set(VD); 12018 if (!handleLValueToRValueConversion(Info, E, E->getType(), LV, Result)) 12019 return false; 12020 return CheckConstantExpression(Info, E->getExprLoc(), E->getType(), Result); 12021 }; 12022 return EvaluateComparisonBinaryOperator(Info, E, OnSuccess, [&]() { 12023 return ExprEvaluatorBaseTy::VisitBinCmp(E); 12024 }); 12025 } 12026 12027 bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { 12028 // We don't call noteFailure immediately because the assignment happens after 12029 // we evaluate LHS and RHS. 12030 if (!Info.keepEvaluatingAfterFailure() && E->isAssignmentOp()) 12031 return Error(E); 12032 12033 DelayedNoteFailureRAII MaybeNoteFailureLater(Info, E->isAssignmentOp()); 12034 if (DataRecursiveIntBinOpEvaluator::shouldEnqueue(E)) 12035 return DataRecursiveIntBinOpEvaluator(*this, Result).Traverse(E); 12036 12037 assert((!E->getLHS()->getType()->isIntegralOrEnumerationType() || 12038 !E->getRHS()->getType()->isIntegralOrEnumerationType()) && 12039 "DataRecursiveIntBinOpEvaluator should have handled integral types"); 12040 12041 if (E->isComparisonOp()) { 12042 // Evaluate builtin binary comparisons by evaluating them as three-way 12043 // comparisons and then translating the result. 12044 auto OnSuccess = [&](CmpResult CR, const BinaryOperator *E) { 12045 assert((CR != CmpResult::Unequal || E->isEqualityOp()) && 12046 "should only produce Unequal for equality comparisons"); 12047 bool IsEqual = CR == CmpResult::Equal, 12048 IsLess = CR == CmpResult::Less, 12049 IsGreater = CR == CmpResult::Greater; 12050 auto Op = E->getOpcode(); 12051 switch (Op) { 12052 default: 12053 llvm_unreachable("unsupported binary operator"); 12054 case BO_EQ: 12055 case BO_NE: 12056 return Success(IsEqual == (Op == BO_EQ), E); 12057 case BO_LT: 12058 return Success(IsLess, E); 12059 case BO_GT: 12060 return Success(IsGreater, E); 12061 case BO_LE: 12062 return Success(IsEqual || IsLess, E); 12063 case BO_GE: 12064 return Success(IsEqual || IsGreater, E); 12065 } 12066 }; 12067 return EvaluateComparisonBinaryOperator(Info, E, OnSuccess, [&]() { 12068 return ExprEvaluatorBaseTy::VisitBinaryOperator(E); 12069 }); 12070 } 12071 12072 QualType LHSTy = E->getLHS()->getType(); 12073 QualType RHSTy = E->getRHS()->getType(); 12074 12075 if (LHSTy->isPointerType() && RHSTy->isPointerType() && 12076 E->getOpcode() == BO_Sub) { 12077 LValue LHSValue, RHSValue; 12078 12079 bool LHSOK = EvaluatePointer(E->getLHS(), LHSValue, Info); 12080 if (!LHSOK && !Info.noteFailure()) 12081 return false; 12082 12083 if (!EvaluatePointer(E->getRHS(), RHSValue, Info) || !LHSOK) 12084 return false; 12085 12086 // Reject differing bases from the normal codepath; we special-case 12087 // comparisons to null. 12088 if (!HasSameBase(LHSValue, RHSValue)) { 12089 // Handle &&A - &&B. 12090 if (!LHSValue.Offset.isZero() || !RHSValue.Offset.isZero()) 12091 return Error(E); 12092 const Expr *LHSExpr = LHSValue.Base.dyn_cast<const Expr *>(); 12093 const Expr *RHSExpr = RHSValue.Base.dyn_cast<const Expr *>(); 12094 if (!LHSExpr || !RHSExpr) 12095 return Error(E); 12096 const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr); 12097 const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr); 12098 if (!LHSAddrExpr || !RHSAddrExpr) 12099 return Error(E); 12100 // Make sure both labels come from the same function. 12101 if (LHSAddrExpr->getLabel()->getDeclContext() != 12102 RHSAddrExpr->getLabel()->getDeclContext()) 12103 return Error(E); 12104 return Success(APValue(LHSAddrExpr, RHSAddrExpr), E); 12105 } 12106 const CharUnits &LHSOffset = LHSValue.getLValueOffset(); 12107 const CharUnits &RHSOffset = RHSValue.getLValueOffset(); 12108 12109 SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator(); 12110 SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator(); 12111 12112 // C++11 [expr.add]p6: 12113 // Unless both pointers point to elements of the same array object, or 12114 // one past the last element of the array object, the behavior is 12115 // undefined. 12116 if (!LHSDesignator.Invalid && !RHSDesignator.Invalid && 12117 !AreElementsOfSameArray(getType(LHSValue.Base), LHSDesignator, 12118 RHSDesignator)) 12119 Info.CCEDiag(E, diag::note_constexpr_pointer_subtraction_not_same_array); 12120 12121 QualType Type = E->getLHS()->getType(); 12122 QualType ElementType = Type->castAs<PointerType>()->getPointeeType(); 12123 12124 CharUnits ElementSize; 12125 if (!HandleSizeof(Info, E->getExprLoc(), ElementType, ElementSize)) 12126 return false; 12127 12128 // As an extension, a type may have zero size (empty struct or union in 12129 // C, array of zero length). Pointer subtraction in such cases has 12130 // undefined behavior, so is not constant. 12131 if (ElementSize.isZero()) { 12132 Info.FFDiag(E, diag::note_constexpr_pointer_subtraction_zero_size) 12133 << ElementType; 12134 return false; 12135 } 12136 12137 // FIXME: LLVM and GCC both compute LHSOffset - RHSOffset at runtime, 12138 // and produce incorrect results when it overflows. Such behavior 12139 // appears to be non-conforming, but is common, so perhaps we should 12140 // assume the standard intended for such cases to be undefined behavior 12141 // and check for them. 12142 12143 // Compute (LHSOffset - RHSOffset) / Size carefully, checking for 12144 // overflow in the final conversion to ptrdiff_t. 12145 APSInt LHS(llvm::APInt(65, (int64_t)LHSOffset.getQuantity(), true), false); 12146 APSInt RHS(llvm::APInt(65, (int64_t)RHSOffset.getQuantity(), true), false); 12147 APSInt ElemSize(llvm::APInt(65, (int64_t)ElementSize.getQuantity(), true), 12148 false); 12149 APSInt TrueResult = (LHS - RHS) / ElemSize; 12150 APSInt Result = TrueResult.trunc(Info.Ctx.getIntWidth(E->getType())); 12151 12152 if (Result.extend(65) != TrueResult && 12153 !HandleOverflow(Info, E, TrueResult, E->getType())) 12154 return false; 12155 return Success(Result, E); 12156 } 12157 12158 return ExprEvaluatorBaseTy::VisitBinaryOperator(E); 12159 } 12160 12161 /// VisitUnaryExprOrTypeTraitExpr - Evaluate a sizeof, alignof or vec_step with 12162 /// a result as the expression's type. 12163 bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr( 12164 const UnaryExprOrTypeTraitExpr *E) { 12165 switch(E->getKind()) { 12166 case UETT_PreferredAlignOf: 12167 case UETT_AlignOf: { 12168 if (E->isArgumentType()) 12169 return Success(GetAlignOfType(Info, E->getArgumentType(), E->getKind()), 12170 E); 12171 else 12172 return Success(GetAlignOfExpr(Info, E->getArgumentExpr(), E->getKind()), 12173 E); 12174 } 12175 12176 case UETT_VecStep: { 12177 QualType Ty = E->getTypeOfArgument(); 12178 12179 if (Ty->isVectorType()) { 12180 unsigned n = Ty->castAs<VectorType>()->getNumElements(); 12181 12182 // The vec_step built-in functions that take a 3-component 12183 // vector return 4. (OpenCL 1.1 spec 6.11.12) 12184 if (n == 3) 12185 n = 4; 12186 12187 return Success(n, E); 12188 } else 12189 return Success(1, E); 12190 } 12191 12192 case UETT_SizeOf: { 12193 QualType SrcTy = E->getTypeOfArgument(); 12194 // C++ [expr.sizeof]p2: "When applied to a reference or a reference type, 12195 // the result is the size of the referenced type." 12196 if (const ReferenceType *Ref = SrcTy->getAs<ReferenceType>()) 12197 SrcTy = Ref->getPointeeType(); 12198 12199 CharUnits Sizeof; 12200 if (!HandleSizeof(Info, E->getExprLoc(), SrcTy, Sizeof)) 12201 return false; 12202 return Success(Sizeof, E); 12203 } 12204 case UETT_OpenMPRequiredSimdAlign: 12205 assert(E->isArgumentType()); 12206 return Success( 12207 Info.Ctx.toCharUnitsFromBits( 12208 Info.Ctx.getOpenMPDefaultSimdAlign(E->getArgumentType())) 12209 .getQuantity(), 12210 E); 12211 } 12212 12213 llvm_unreachable("unknown expr/type trait"); 12214 } 12215 12216 bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) { 12217 CharUnits Result; 12218 unsigned n = OOE->getNumComponents(); 12219 if (n == 0) 12220 return Error(OOE); 12221 QualType CurrentType = OOE->getTypeSourceInfo()->getType(); 12222 for (unsigned i = 0; i != n; ++i) { 12223 OffsetOfNode ON = OOE->getComponent(i); 12224 switch (ON.getKind()) { 12225 case OffsetOfNode::Array: { 12226 const Expr *Idx = OOE->getIndexExpr(ON.getArrayExprIndex()); 12227 APSInt IdxResult; 12228 if (!EvaluateInteger(Idx, IdxResult, Info)) 12229 return false; 12230 const ArrayType *AT = Info.Ctx.getAsArrayType(CurrentType); 12231 if (!AT) 12232 return Error(OOE); 12233 CurrentType = AT->getElementType(); 12234 CharUnits ElementSize = Info.Ctx.getTypeSizeInChars(CurrentType); 12235 Result += IdxResult.getSExtValue() * ElementSize; 12236 break; 12237 } 12238 12239 case OffsetOfNode::Field: { 12240 FieldDecl *MemberDecl = ON.getField(); 12241 const RecordType *RT = CurrentType->getAs<RecordType>(); 12242 if (!RT) 12243 return Error(OOE); 12244 RecordDecl *RD = RT->getDecl(); 12245 if (RD->isInvalidDecl()) return false; 12246 const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD); 12247 unsigned i = MemberDecl->getFieldIndex(); 12248 assert(i < RL.getFieldCount() && "offsetof field in wrong type"); 12249 Result += Info.Ctx.toCharUnitsFromBits(RL.getFieldOffset(i)); 12250 CurrentType = MemberDecl->getType().getNonReferenceType(); 12251 break; 12252 } 12253 12254 case OffsetOfNode::Identifier: 12255 llvm_unreachable("dependent __builtin_offsetof"); 12256 12257 case OffsetOfNode::Base: { 12258 CXXBaseSpecifier *BaseSpec = ON.getBase(); 12259 if (BaseSpec->isVirtual()) 12260 return Error(OOE); 12261 12262 // Find the layout of the class whose base we are looking into. 12263 const RecordType *RT = CurrentType->getAs<RecordType>(); 12264 if (!RT) 12265 return Error(OOE); 12266 RecordDecl *RD = RT->getDecl(); 12267 if (RD->isInvalidDecl()) return false; 12268 const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD); 12269 12270 // Find the base class itself. 12271 CurrentType = BaseSpec->getType(); 12272 const RecordType *BaseRT = CurrentType->getAs<RecordType>(); 12273 if (!BaseRT) 12274 return Error(OOE); 12275 12276 // Add the offset to the base. 12277 Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl())); 12278 break; 12279 } 12280 } 12281 } 12282 return Success(Result, OOE); 12283 } 12284 12285 bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) { 12286 switch (E->getOpcode()) { 12287 default: 12288 // Address, indirect, pre/post inc/dec, etc are not valid constant exprs. 12289 // See C99 6.6p3. 12290 return Error(E); 12291 case UO_Extension: 12292 // FIXME: Should extension allow i-c-e extension expressions in its scope? 12293 // If so, we could clear the diagnostic ID. 12294 return Visit(E->getSubExpr()); 12295 case UO_Plus: 12296 // The result is just the value. 12297 return Visit(E->getSubExpr()); 12298 case UO_Minus: { 12299 if (!Visit(E->getSubExpr())) 12300 return false; 12301 if (!Result.isInt()) return Error(E); 12302 const APSInt &Value = Result.getInt(); 12303 if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow() && 12304 !HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1), 12305 E->getType())) 12306 return false; 12307 return Success(-Value, E); 12308 } 12309 case UO_Not: { 12310 if (!Visit(E->getSubExpr())) 12311 return false; 12312 if (!Result.isInt()) return Error(E); 12313 return Success(~Result.getInt(), E); 12314 } 12315 case UO_LNot: { 12316 bool bres; 12317 if (!EvaluateAsBooleanCondition(E->getSubExpr(), bres, Info)) 12318 return false; 12319 return Success(!bres, E); 12320 } 12321 } 12322 } 12323 12324 /// HandleCast - This is used to evaluate implicit or explicit casts where the 12325 /// result type is integer. 12326 bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) { 12327 const Expr *SubExpr = E->getSubExpr(); 12328 QualType DestType = E->getType(); 12329 QualType SrcType = SubExpr->getType(); 12330 12331 switch (E->getCastKind()) { 12332 case CK_BaseToDerived: 12333 case CK_DerivedToBase: 12334 case CK_UncheckedDerivedToBase: 12335 case CK_Dynamic: 12336 case CK_ToUnion: 12337 case CK_ArrayToPointerDecay: 12338 case CK_FunctionToPointerDecay: 12339 case CK_NullToPointer: 12340 case CK_NullToMemberPointer: 12341 case CK_BaseToDerivedMemberPointer: 12342 case CK_DerivedToBaseMemberPointer: 12343 case CK_ReinterpretMemberPointer: 12344 case CK_ConstructorConversion: 12345 case CK_IntegralToPointer: 12346 case CK_ToVoid: 12347 case CK_VectorSplat: 12348 case CK_IntegralToFloating: 12349 case CK_FloatingCast: 12350 case CK_CPointerToObjCPointerCast: 12351 case CK_BlockPointerToObjCPointerCast: 12352 case CK_AnyPointerToBlockPointerCast: 12353 case CK_ObjCObjectLValueCast: 12354 case CK_FloatingRealToComplex: 12355 case CK_FloatingComplexToReal: 12356 case CK_FloatingComplexCast: 12357 case CK_FloatingComplexToIntegralComplex: 12358 case CK_IntegralRealToComplex: 12359 case CK_IntegralComplexCast: 12360 case CK_IntegralComplexToFloatingComplex: 12361 case CK_BuiltinFnToFnPtr: 12362 case CK_ZeroToOCLOpaqueType: 12363 case CK_NonAtomicToAtomic: 12364 case CK_AddressSpaceConversion: 12365 case CK_IntToOCLSampler: 12366 case CK_FixedPointCast: 12367 case CK_IntegralToFixedPoint: 12368 llvm_unreachable("invalid cast kind for integral value"); 12369 12370 case CK_BitCast: 12371 case CK_Dependent: 12372 case CK_LValueBitCast: 12373 case CK_ARCProduceObject: 12374 case CK_ARCConsumeObject: 12375 case CK_ARCReclaimReturnedObject: 12376 case CK_ARCExtendBlockObject: 12377 case CK_CopyAndAutoreleaseBlockObject: 12378 return Error(E); 12379 12380 case CK_UserDefinedConversion: 12381 case CK_LValueToRValue: 12382 case CK_AtomicToNonAtomic: 12383 case CK_NoOp: 12384 case CK_LValueToRValueBitCast: 12385 return ExprEvaluatorBaseTy::VisitCastExpr(E); 12386 12387 case CK_MemberPointerToBoolean: 12388 case CK_PointerToBoolean: 12389 case CK_IntegralToBoolean: 12390 case CK_FloatingToBoolean: 12391 case CK_BooleanToSignedIntegral: 12392 case CK_FloatingComplexToBoolean: 12393 case CK_IntegralComplexToBoolean: { 12394 bool BoolResult; 12395 if (!EvaluateAsBooleanCondition(SubExpr, BoolResult, Info)) 12396 return false; 12397 uint64_t IntResult = BoolResult; 12398 if (BoolResult && E->getCastKind() == CK_BooleanToSignedIntegral) 12399 IntResult = (uint64_t)-1; 12400 return Success(IntResult, E); 12401 } 12402 12403 case CK_FixedPointToIntegral: { 12404 APFixedPoint Src(Info.Ctx.getFixedPointSemantics(SrcType)); 12405 if (!EvaluateFixedPoint(SubExpr, Src, Info)) 12406 return false; 12407 bool Overflowed; 12408 llvm::APSInt Result = Src.convertToInt( 12409 Info.Ctx.getIntWidth(DestType), 12410 DestType->isSignedIntegerOrEnumerationType(), &Overflowed); 12411 if (Overflowed && !HandleOverflow(Info, E, Result, DestType)) 12412 return false; 12413 return Success(Result, E); 12414 } 12415 12416 case CK_FixedPointToBoolean: { 12417 // Unsigned padding does not affect this. 12418 APValue Val; 12419 if (!Evaluate(Val, Info, SubExpr)) 12420 return false; 12421 return Success(Val.getFixedPoint().getBoolValue(), E); 12422 } 12423 12424 case CK_IntegralCast: { 12425 if (!Visit(SubExpr)) 12426 return false; 12427 12428 if (!Result.isInt()) { 12429 // Allow casts of address-of-label differences if they are no-ops 12430 // or narrowing. (The narrowing case isn't actually guaranteed to 12431 // be constant-evaluatable except in some narrow cases which are hard 12432 // to detect here. We let it through on the assumption the user knows 12433 // what they are doing.) 12434 if (Result.isAddrLabelDiff()) 12435 return Info.Ctx.getTypeSize(DestType) <= Info.Ctx.getTypeSize(SrcType); 12436 // Only allow casts of lvalues if they are lossless. 12437 return Info.Ctx.getTypeSize(DestType) == Info.Ctx.getTypeSize(SrcType); 12438 } 12439 12440 return Success(HandleIntToIntCast(Info, E, DestType, SrcType, 12441 Result.getInt()), E); 12442 } 12443 12444 case CK_PointerToIntegral: { 12445 CCEDiag(E, diag::note_constexpr_invalid_cast) << 2; 12446 12447 LValue LV; 12448 if (!EvaluatePointer(SubExpr, LV, Info)) 12449 return false; 12450 12451 if (LV.getLValueBase()) { 12452 // Only allow based lvalue casts if they are lossless. 12453 // FIXME: Allow a larger integer size than the pointer size, and allow 12454 // narrowing back down to pointer width in subsequent integral casts. 12455 // FIXME: Check integer type's active bits, not its type size. 12456 if (Info.Ctx.getTypeSize(DestType) != Info.Ctx.getTypeSize(SrcType)) 12457 return Error(E); 12458 12459 LV.Designator.setInvalid(); 12460 LV.moveInto(Result); 12461 return true; 12462 } 12463 12464 APSInt AsInt; 12465 APValue V; 12466 LV.moveInto(V); 12467 if (!V.toIntegralConstant(AsInt, SrcType, Info.Ctx)) 12468 llvm_unreachable("Can't cast this!"); 12469 12470 return Success(HandleIntToIntCast(Info, E, DestType, SrcType, AsInt), E); 12471 } 12472 12473 case CK_IntegralComplexToReal: { 12474 ComplexValue C; 12475 if (!EvaluateComplex(SubExpr, C, Info)) 12476 return false; 12477 return Success(C.getComplexIntReal(), E); 12478 } 12479 12480 case CK_FloatingToIntegral: { 12481 APFloat F(0.0); 12482 if (!EvaluateFloat(SubExpr, F, Info)) 12483 return false; 12484 12485 APSInt Value; 12486 if (!HandleFloatToIntCast(Info, E, SrcType, F, DestType, Value)) 12487 return false; 12488 return Success(Value, E); 12489 } 12490 } 12491 12492 llvm_unreachable("unknown cast resulting in integral value"); 12493 } 12494 12495 bool IntExprEvaluator::VisitUnaryReal(const UnaryOperator *E) { 12496 if (E->getSubExpr()->getType()->isAnyComplexType()) { 12497 ComplexValue LV; 12498 if (!EvaluateComplex(E->getSubExpr(), LV, Info)) 12499 return false; 12500 if (!LV.isComplexInt()) 12501 return Error(E); 12502 return Success(LV.getComplexIntReal(), E); 12503 } 12504 12505 return Visit(E->getSubExpr()); 12506 } 12507 12508 bool IntExprEvaluator::VisitUnaryImag(const UnaryOperator *E) { 12509 if (E->getSubExpr()->getType()->isComplexIntegerType()) { 12510 ComplexValue LV; 12511 if (!EvaluateComplex(E->getSubExpr(), LV, Info)) 12512 return false; 12513 if (!LV.isComplexInt()) 12514 return Error(E); 12515 return Success(LV.getComplexIntImag(), E); 12516 } 12517 12518 VisitIgnoredValue(E->getSubExpr()); 12519 return Success(0, E); 12520 } 12521 12522 bool IntExprEvaluator::VisitSizeOfPackExpr(const SizeOfPackExpr *E) { 12523 return Success(E->getPackLength(), E); 12524 } 12525 12526 bool IntExprEvaluator::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) { 12527 return Success(E->getValue(), E); 12528 } 12529 12530 bool IntExprEvaluator::VisitConceptSpecializationExpr( 12531 const ConceptSpecializationExpr *E) { 12532 return Success(E->isSatisfied(), E); 12533 } 12534 12535 bool IntExprEvaluator::VisitRequiresExpr(const RequiresExpr *E) { 12536 return Success(E->isSatisfied(), E); 12537 } 12538 12539 bool FixedPointExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) { 12540 switch (E->getOpcode()) { 12541 default: 12542 // Invalid unary operators 12543 return Error(E); 12544 case UO_Plus: 12545 // The result is just the value. 12546 return Visit(E->getSubExpr()); 12547 case UO_Minus: { 12548 if (!Visit(E->getSubExpr())) return false; 12549 if (!Result.isFixedPoint()) 12550 return Error(E); 12551 bool Overflowed; 12552 APFixedPoint Negated = Result.getFixedPoint().negate(&Overflowed); 12553 if (Overflowed && !HandleOverflow(Info, E, Negated, E->getType())) 12554 return false; 12555 return Success(Negated, E); 12556 } 12557 case UO_LNot: { 12558 bool bres; 12559 if (!EvaluateAsBooleanCondition(E->getSubExpr(), bres, Info)) 12560 return false; 12561 return Success(!bres, E); 12562 } 12563 } 12564 } 12565 12566 bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) { 12567 const Expr *SubExpr = E->getSubExpr(); 12568 QualType DestType = E->getType(); 12569 assert(DestType->isFixedPointType() && 12570 "Expected destination type to be a fixed point type"); 12571 auto DestFXSema = Info.Ctx.getFixedPointSemantics(DestType); 12572 12573 switch (E->getCastKind()) { 12574 case CK_FixedPointCast: { 12575 APFixedPoint Src(Info.Ctx.getFixedPointSemantics(SubExpr->getType())); 12576 if (!EvaluateFixedPoint(SubExpr, Src, Info)) 12577 return false; 12578 bool Overflowed; 12579 APFixedPoint Result = Src.convert(DestFXSema, &Overflowed); 12580 if (Overflowed && !HandleOverflow(Info, E, Result, DestType)) 12581 return false; 12582 return Success(Result, E); 12583 } 12584 case CK_IntegralToFixedPoint: { 12585 APSInt Src; 12586 if (!EvaluateInteger(SubExpr, Src, Info)) 12587 return false; 12588 12589 bool Overflowed; 12590 APFixedPoint IntResult = APFixedPoint::getFromIntValue( 12591 Src, Info.Ctx.getFixedPointSemantics(DestType), &Overflowed); 12592 12593 if (Overflowed && !HandleOverflow(Info, E, IntResult, DestType)) 12594 return false; 12595 12596 return Success(IntResult, E); 12597 } 12598 case CK_NoOp: 12599 case CK_LValueToRValue: 12600 return ExprEvaluatorBaseTy::VisitCastExpr(E); 12601 default: 12602 return Error(E); 12603 } 12604 } 12605 12606 bool FixedPointExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { 12607 const Expr *LHS = E->getLHS(); 12608 const Expr *RHS = E->getRHS(); 12609 FixedPointSemantics ResultFXSema = 12610 Info.Ctx.getFixedPointSemantics(E->getType()); 12611 12612 APFixedPoint LHSFX(Info.Ctx.getFixedPointSemantics(LHS->getType())); 12613 if (!EvaluateFixedPointOrInteger(LHS, LHSFX, Info)) 12614 return false; 12615 APFixedPoint RHSFX(Info.Ctx.getFixedPointSemantics(RHS->getType())); 12616 if (!EvaluateFixedPointOrInteger(RHS, RHSFX, Info)) 12617 return false; 12618 12619 switch (E->getOpcode()) { 12620 case BO_Add: { 12621 bool AddOverflow, ConversionOverflow; 12622 APFixedPoint Result = LHSFX.add(RHSFX, &AddOverflow) 12623 .convert(ResultFXSema, &ConversionOverflow); 12624 if ((AddOverflow || ConversionOverflow) && 12625 !HandleOverflow(Info, E, Result, E->getType())) 12626 return false; 12627 return Success(Result, E); 12628 } 12629 default: 12630 return false; 12631 } 12632 llvm_unreachable("Should've exited before this"); 12633 } 12634 12635 //===----------------------------------------------------------------------===// 12636 // Float Evaluation 12637 //===----------------------------------------------------------------------===// 12638 12639 namespace { 12640 class FloatExprEvaluator 12641 : public ExprEvaluatorBase<FloatExprEvaluator> { 12642 APFloat &Result; 12643 public: 12644 FloatExprEvaluator(EvalInfo &info, APFloat &result) 12645 : ExprEvaluatorBaseTy(info), Result(result) {} 12646 12647 bool Success(const APValue &V, const Expr *e) { 12648 Result = V.getFloat(); 12649 return true; 12650 } 12651 12652 bool ZeroInitialization(const Expr *E) { 12653 Result = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(E->getType())); 12654 return true; 12655 } 12656 12657 bool VisitCallExpr(const CallExpr *E); 12658 12659 bool VisitUnaryOperator(const UnaryOperator *E); 12660 bool VisitBinaryOperator(const BinaryOperator *E); 12661 bool VisitFloatingLiteral(const FloatingLiteral *E); 12662 bool VisitCastExpr(const CastExpr *E); 12663 12664 bool VisitUnaryReal(const UnaryOperator *E); 12665 bool VisitUnaryImag(const UnaryOperator *E); 12666 12667 // FIXME: Missing: array subscript of vector, member of vector 12668 }; 12669 } // end anonymous namespace 12670 12671 static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) { 12672 assert(E->isRValue() && E->getType()->isRealFloatingType()); 12673 return FloatExprEvaluator(Info, Result).Visit(E); 12674 } 12675 12676 static bool TryEvaluateBuiltinNaN(const ASTContext &Context, 12677 QualType ResultTy, 12678 const Expr *Arg, 12679 bool SNaN, 12680 llvm::APFloat &Result) { 12681 const StringLiteral *S = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 12682 if (!S) return false; 12683 12684 const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(ResultTy); 12685 12686 llvm::APInt fill; 12687 12688 // Treat empty strings as if they were zero. 12689 if (S->getString().empty()) 12690 fill = llvm::APInt(32, 0); 12691 else if (S->getString().getAsInteger(0, fill)) 12692 return false; 12693 12694 if (Context.getTargetInfo().isNan2008()) { 12695 if (SNaN) 12696 Result = llvm::APFloat::getSNaN(Sem, false, &fill); 12697 else 12698 Result = llvm::APFloat::getQNaN(Sem, false, &fill); 12699 } else { 12700 // Prior to IEEE 754-2008, architectures were allowed to choose whether 12701 // the first bit of their significand was set for qNaN or sNaN. MIPS chose 12702 // a different encoding to what became a standard in 2008, and for pre- 12703 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as 12704 // sNaN. This is now known as "legacy NaN" encoding. 12705 if (SNaN) 12706 Result = llvm::APFloat::getQNaN(Sem, false, &fill); 12707 else 12708 Result = llvm::APFloat::getSNaN(Sem, false, &fill); 12709 } 12710 12711 return true; 12712 } 12713 12714 bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) { 12715 switch (E->getBuiltinCallee()) { 12716 default: 12717 return ExprEvaluatorBaseTy::VisitCallExpr(E); 12718 12719 case Builtin::BI__builtin_huge_val: 12720 case Builtin::BI__builtin_huge_valf: 12721 case Builtin::BI__builtin_huge_vall: 12722 case Builtin::BI__builtin_huge_valf128: 12723 case Builtin::BI__builtin_inf: 12724 case Builtin::BI__builtin_inff: 12725 case Builtin::BI__builtin_infl: 12726 case Builtin::BI__builtin_inff128: { 12727 const llvm::fltSemantics &Sem = 12728 Info.Ctx.getFloatTypeSemantics(E->getType()); 12729 Result = llvm::APFloat::getInf(Sem); 12730 return true; 12731 } 12732 12733 case Builtin::BI__builtin_nans: 12734 case Builtin::BI__builtin_nansf: 12735 case Builtin::BI__builtin_nansl: 12736 case Builtin::BI__builtin_nansf128: 12737 if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0), 12738 true, Result)) 12739 return Error(E); 12740 return true; 12741 12742 case Builtin::BI__builtin_nan: 12743 case Builtin::BI__builtin_nanf: 12744 case Builtin::BI__builtin_nanl: 12745 case Builtin::BI__builtin_nanf128: 12746 // If this is __builtin_nan() turn this into a nan, otherwise we 12747 // can't constant fold it. 12748 if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0), 12749 false, Result)) 12750 return Error(E); 12751 return true; 12752 12753 case Builtin::BI__builtin_fabs: 12754 case Builtin::BI__builtin_fabsf: 12755 case Builtin::BI__builtin_fabsl: 12756 case Builtin::BI__builtin_fabsf128: 12757 if (!EvaluateFloat(E->getArg(0), Result, Info)) 12758 return false; 12759 12760 if (Result.isNegative()) 12761 Result.changeSign(); 12762 return true; 12763 12764 // FIXME: Builtin::BI__builtin_powi 12765 // FIXME: Builtin::BI__builtin_powif 12766 // FIXME: Builtin::BI__builtin_powil 12767 12768 case Builtin::BI__builtin_copysign: 12769 case Builtin::BI__builtin_copysignf: 12770 case Builtin::BI__builtin_copysignl: 12771 case Builtin::BI__builtin_copysignf128: { 12772 APFloat RHS(0.); 12773 if (!EvaluateFloat(E->getArg(0), Result, Info) || 12774 !EvaluateFloat(E->getArg(1), RHS, Info)) 12775 return false; 12776 Result.copySign(RHS); 12777 return true; 12778 } 12779 } 12780 } 12781 12782 bool FloatExprEvaluator::VisitUnaryReal(const UnaryOperator *E) { 12783 if (E->getSubExpr()->getType()->isAnyComplexType()) { 12784 ComplexValue CV; 12785 if (!EvaluateComplex(E->getSubExpr(), CV, Info)) 12786 return false; 12787 Result = CV.FloatReal; 12788 return true; 12789 } 12790 12791 return Visit(E->getSubExpr()); 12792 } 12793 12794 bool FloatExprEvaluator::VisitUnaryImag(const UnaryOperator *E) { 12795 if (E->getSubExpr()->getType()->isAnyComplexType()) { 12796 ComplexValue CV; 12797 if (!EvaluateComplex(E->getSubExpr(), CV, Info)) 12798 return false; 12799 Result = CV.FloatImag; 12800 return true; 12801 } 12802 12803 VisitIgnoredValue(E->getSubExpr()); 12804 const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(E->getType()); 12805 Result = llvm::APFloat::getZero(Sem); 12806 return true; 12807 } 12808 12809 bool FloatExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) { 12810 switch (E->getOpcode()) { 12811 default: return Error(E); 12812 case UO_Plus: 12813 return EvaluateFloat(E->getSubExpr(), Result, Info); 12814 case UO_Minus: 12815 if (!EvaluateFloat(E->getSubExpr(), Result, Info)) 12816 return false; 12817 Result.changeSign(); 12818 return true; 12819 } 12820 } 12821 12822 bool FloatExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { 12823 if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma) 12824 return ExprEvaluatorBaseTy::VisitBinaryOperator(E); 12825 12826 APFloat RHS(0.0); 12827 bool LHSOK = EvaluateFloat(E->getLHS(), Result, Info); 12828 if (!LHSOK && !Info.noteFailure()) 12829 return false; 12830 return EvaluateFloat(E->getRHS(), RHS, Info) && LHSOK && 12831 handleFloatFloatBinOp(Info, E, Result, E->getOpcode(), RHS); 12832 } 12833 12834 bool FloatExprEvaluator::VisitFloatingLiteral(const FloatingLiteral *E) { 12835 Result = E->getValue(); 12836 return true; 12837 } 12838 12839 bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) { 12840 const Expr* SubExpr = E->getSubExpr(); 12841 12842 switch (E->getCastKind()) { 12843 default: 12844 return ExprEvaluatorBaseTy::VisitCastExpr(E); 12845 12846 case CK_IntegralToFloating: { 12847 APSInt IntResult; 12848 return EvaluateInteger(SubExpr, IntResult, Info) && 12849 HandleIntToFloatCast(Info, E, SubExpr->getType(), IntResult, 12850 E->getType(), Result); 12851 } 12852 12853 case CK_FloatingCast: { 12854 if (!Visit(SubExpr)) 12855 return false; 12856 return HandleFloatToFloatCast(Info, E, SubExpr->getType(), E->getType(), 12857 Result); 12858 } 12859 12860 case CK_FloatingComplexToReal: { 12861 ComplexValue V; 12862 if (!EvaluateComplex(SubExpr, V, Info)) 12863 return false; 12864 Result = V.getComplexFloatReal(); 12865 return true; 12866 } 12867 } 12868 } 12869 12870 //===----------------------------------------------------------------------===// 12871 // Complex Evaluation (for float and integer) 12872 //===----------------------------------------------------------------------===// 12873 12874 namespace { 12875 class ComplexExprEvaluator 12876 : public ExprEvaluatorBase<ComplexExprEvaluator> { 12877 ComplexValue &Result; 12878 12879 public: 12880 ComplexExprEvaluator(EvalInfo &info, ComplexValue &Result) 12881 : ExprEvaluatorBaseTy(info), Result(Result) {} 12882 12883 bool Success(const APValue &V, const Expr *e) { 12884 Result.setFrom(V); 12885 return true; 12886 } 12887 12888 bool ZeroInitialization(const Expr *E); 12889 12890 //===--------------------------------------------------------------------===// 12891 // Visitor Methods 12892 //===--------------------------------------------------------------------===// 12893 12894 bool VisitImaginaryLiteral(const ImaginaryLiteral *E); 12895 bool VisitCastExpr(const CastExpr *E); 12896 bool VisitBinaryOperator(const BinaryOperator *E); 12897 bool VisitUnaryOperator(const UnaryOperator *E); 12898 bool VisitInitListExpr(const InitListExpr *E); 12899 }; 12900 } // end anonymous namespace 12901 12902 static bool EvaluateComplex(const Expr *E, ComplexValue &Result, 12903 EvalInfo &Info) { 12904 assert(E->isRValue() && E->getType()->isAnyComplexType()); 12905 return ComplexExprEvaluator(Info, Result).Visit(E); 12906 } 12907 12908 bool ComplexExprEvaluator::ZeroInitialization(const Expr *E) { 12909 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType(); 12910 if (ElemTy->isRealFloatingType()) { 12911 Result.makeComplexFloat(); 12912 APFloat Zero = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(ElemTy)); 12913 Result.FloatReal = Zero; 12914 Result.FloatImag = Zero; 12915 } else { 12916 Result.makeComplexInt(); 12917 APSInt Zero = Info.Ctx.MakeIntValue(0, ElemTy); 12918 Result.IntReal = Zero; 12919 Result.IntImag = Zero; 12920 } 12921 return true; 12922 } 12923 12924 bool ComplexExprEvaluator::VisitImaginaryLiteral(const ImaginaryLiteral *E) { 12925 const Expr* SubExpr = E->getSubExpr(); 12926 12927 if (SubExpr->getType()->isRealFloatingType()) { 12928 Result.makeComplexFloat(); 12929 APFloat &Imag = Result.FloatImag; 12930 if (!EvaluateFloat(SubExpr, Imag, Info)) 12931 return false; 12932 12933 Result.FloatReal = APFloat(Imag.getSemantics()); 12934 return true; 12935 } else { 12936 assert(SubExpr->getType()->isIntegerType() && 12937 "Unexpected imaginary literal."); 12938 12939 Result.makeComplexInt(); 12940 APSInt &Imag = Result.IntImag; 12941 if (!EvaluateInteger(SubExpr, Imag, Info)) 12942 return false; 12943 12944 Result.IntReal = APSInt(Imag.getBitWidth(), !Imag.isSigned()); 12945 return true; 12946 } 12947 } 12948 12949 bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) { 12950 12951 switch (E->getCastKind()) { 12952 case CK_BitCast: 12953 case CK_BaseToDerived: 12954 case CK_DerivedToBase: 12955 case CK_UncheckedDerivedToBase: 12956 case CK_Dynamic: 12957 case CK_ToUnion: 12958 case CK_ArrayToPointerDecay: 12959 case CK_FunctionToPointerDecay: 12960 case CK_NullToPointer: 12961 case CK_NullToMemberPointer: 12962 case CK_BaseToDerivedMemberPointer: 12963 case CK_DerivedToBaseMemberPointer: 12964 case CK_MemberPointerToBoolean: 12965 case CK_ReinterpretMemberPointer: 12966 case CK_ConstructorConversion: 12967 case CK_IntegralToPointer: 12968 case CK_PointerToIntegral: 12969 case CK_PointerToBoolean: 12970 case CK_ToVoid: 12971 case CK_VectorSplat: 12972 case CK_IntegralCast: 12973 case CK_BooleanToSignedIntegral: 12974 case CK_IntegralToBoolean: 12975 case CK_IntegralToFloating: 12976 case CK_FloatingToIntegral: 12977 case CK_FloatingToBoolean: 12978 case CK_FloatingCast: 12979 case CK_CPointerToObjCPointerCast: 12980 case CK_BlockPointerToObjCPointerCast: 12981 case CK_AnyPointerToBlockPointerCast: 12982 case CK_ObjCObjectLValueCast: 12983 case CK_FloatingComplexToReal: 12984 case CK_FloatingComplexToBoolean: 12985 case CK_IntegralComplexToReal: 12986 case CK_IntegralComplexToBoolean: 12987 case CK_ARCProduceObject: 12988 case CK_ARCConsumeObject: 12989 case CK_ARCReclaimReturnedObject: 12990 case CK_ARCExtendBlockObject: 12991 case CK_CopyAndAutoreleaseBlockObject: 12992 case CK_BuiltinFnToFnPtr: 12993 case CK_ZeroToOCLOpaqueType: 12994 case CK_NonAtomicToAtomic: 12995 case CK_AddressSpaceConversion: 12996 case CK_IntToOCLSampler: 12997 case CK_FixedPointCast: 12998 case CK_FixedPointToBoolean: 12999 case CK_FixedPointToIntegral: 13000 case CK_IntegralToFixedPoint: 13001 llvm_unreachable("invalid cast kind for complex value"); 13002 13003 case CK_LValueToRValue: 13004 case CK_AtomicToNonAtomic: 13005 case CK_NoOp: 13006 case CK_LValueToRValueBitCast: 13007 return ExprEvaluatorBaseTy::VisitCastExpr(E); 13008 13009 case CK_Dependent: 13010 case CK_LValueBitCast: 13011 case CK_UserDefinedConversion: 13012 return Error(E); 13013 13014 case CK_FloatingRealToComplex: { 13015 APFloat &Real = Result.FloatReal; 13016 if (!EvaluateFloat(E->getSubExpr(), Real, Info)) 13017 return false; 13018 13019 Result.makeComplexFloat(); 13020 Result.FloatImag = APFloat(Real.getSemantics()); 13021 return true; 13022 } 13023 13024 case CK_FloatingComplexCast: { 13025 if (!Visit(E->getSubExpr())) 13026 return false; 13027 13028 QualType To = E->getType()->castAs<ComplexType>()->getElementType(); 13029 QualType From 13030 = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType(); 13031 13032 return HandleFloatToFloatCast(Info, E, From, To, Result.FloatReal) && 13033 HandleFloatToFloatCast(Info, E, From, To, Result.FloatImag); 13034 } 13035 13036 case CK_FloatingComplexToIntegralComplex: { 13037 if (!Visit(E->getSubExpr())) 13038 return false; 13039 13040 QualType To = E->getType()->castAs<ComplexType>()->getElementType(); 13041 QualType From 13042 = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType(); 13043 Result.makeComplexInt(); 13044 return HandleFloatToIntCast(Info, E, From, Result.FloatReal, 13045 To, Result.IntReal) && 13046 HandleFloatToIntCast(Info, E, From, Result.FloatImag, 13047 To, Result.IntImag); 13048 } 13049 13050 case CK_IntegralRealToComplex: { 13051 APSInt &Real = Result.IntReal; 13052 if (!EvaluateInteger(E->getSubExpr(), Real, Info)) 13053 return false; 13054 13055 Result.makeComplexInt(); 13056 Result.IntImag = APSInt(Real.getBitWidth(), !Real.isSigned()); 13057 return true; 13058 } 13059 13060 case CK_IntegralComplexCast: { 13061 if (!Visit(E->getSubExpr())) 13062 return false; 13063 13064 QualType To = E->getType()->castAs<ComplexType>()->getElementType(); 13065 QualType From 13066 = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType(); 13067 13068 Result.IntReal = HandleIntToIntCast(Info, E, To, From, Result.IntReal); 13069 Result.IntImag = HandleIntToIntCast(Info, E, To, From, Result.IntImag); 13070 return true; 13071 } 13072 13073 case CK_IntegralComplexToFloatingComplex: { 13074 if (!Visit(E->getSubExpr())) 13075 return false; 13076 13077 QualType To = E->getType()->castAs<ComplexType>()->getElementType(); 13078 QualType From 13079 = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType(); 13080 Result.makeComplexFloat(); 13081 return HandleIntToFloatCast(Info, E, From, Result.IntReal, 13082 To, Result.FloatReal) && 13083 HandleIntToFloatCast(Info, E, From, Result.IntImag, 13084 To, Result.FloatImag); 13085 } 13086 } 13087 13088 llvm_unreachable("unknown cast resulting in complex value"); 13089 } 13090 13091 bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { 13092 if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma) 13093 return ExprEvaluatorBaseTy::VisitBinaryOperator(E); 13094 13095 // Track whether the LHS or RHS is real at the type system level. When this is 13096 // the case we can simplify our evaluation strategy. 13097 bool LHSReal = false, RHSReal = false; 13098 13099 bool LHSOK; 13100 if (E->getLHS()->getType()->isRealFloatingType()) { 13101 LHSReal = true; 13102 APFloat &Real = Result.FloatReal; 13103 LHSOK = EvaluateFloat(E->getLHS(), Real, Info); 13104 if (LHSOK) { 13105 Result.makeComplexFloat(); 13106 Result.FloatImag = APFloat(Real.getSemantics()); 13107 } 13108 } else { 13109 LHSOK = Visit(E->getLHS()); 13110 } 13111 if (!LHSOK && !Info.noteFailure()) 13112 return false; 13113 13114 ComplexValue RHS; 13115 if (E->getRHS()->getType()->isRealFloatingType()) { 13116 RHSReal = true; 13117 APFloat &Real = RHS.FloatReal; 13118 if (!EvaluateFloat(E->getRHS(), Real, Info) || !LHSOK) 13119 return false; 13120 RHS.makeComplexFloat(); 13121 RHS.FloatImag = APFloat(Real.getSemantics()); 13122 } else if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK) 13123 return false; 13124 13125 assert(!(LHSReal && RHSReal) && 13126 "Cannot have both operands of a complex operation be real."); 13127 switch (E->getOpcode()) { 13128 default: return Error(E); 13129 case BO_Add: 13130 if (Result.isComplexFloat()) { 13131 Result.getComplexFloatReal().add(RHS.getComplexFloatReal(), 13132 APFloat::rmNearestTiesToEven); 13133 if (LHSReal) 13134 Result.getComplexFloatImag() = RHS.getComplexFloatImag(); 13135 else if (!RHSReal) 13136 Result.getComplexFloatImag().add(RHS.getComplexFloatImag(), 13137 APFloat::rmNearestTiesToEven); 13138 } else { 13139 Result.getComplexIntReal() += RHS.getComplexIntReal(); 13140 Result.getComplexIntImag() += RHS.getComplexIntImag(); 13141 } 13142 break; 13143 case BO_Sub: 13144 if (Result.isComplexFloat()) { 13145 Result.getComplexFloatReal().subtract(RHS.getComplexFloatReal(), 13146 APFloat::rmNearestTiesToEven); 13147 if (LHSReal) { 13148 Result.getComplexFloatImag() = RHS.getComplexFloatImag(); 13149 Result.getComplexFloatImag().changeSign(); 13150 } else if (!RHSReal) { 13151 Result.getComplexFloatImag().subtract(RHS.getComplexFloatImag(), 13152 APFloat::rmNearestTiesToEven); 13153 } 13154 } else { 13155 Result.getComplexIntReal() -= RHS.getComplexIntReal(); 13156 Result.getComplexIntImag() -= RHS.getComplexIntImag(); 13157 } 13158 break; 13159 case BO_Mul: 13160 if (Result.isComplexFloat()) { 13161 // This is an implementation of complex multiplication according to the 13162 // constraints laid out in C11 Annex G. The implementation uses the 13163 // following naming scheme: 13164 // (a + ib) * (c + id) 13165 ComplexValue LHS = Result; 13166 APFloat &A = LHS.getComplexFloatReal(); 13167 APFloat &B = LHS.getComplexFloatImag(); 13168 APFloat &C = RHS.getComplexFloatReal(); 13169 APFloat &D = RHS.getComplexFloatImag(); 13170 APFloat &ResR = Result.getComplexFloatReal(); 13171 APFloat &ResI = Result.getComplexFloatImag(); 13172 if (LHSReal) { 13173 assert(!RHSReal && "Cannot have two real operands for a complex op!"); 13174 ResR = A * C; 13175 ResI = A * D; 13176 } else if (RHSReal) { 13177 ResR = C * A; 13178 ResI = C * B; 13179 } else { 13180 // In the fully general case, we need to handle NaNs and infinities 13181 // robustly. 13182 APFloat AC = A * C; 13183 APFloat BD = B * D; 13184 APFloat AD = A * D; 13185 APFloat BC = B * C; 13186 ResR = AC - BD; 13187 ResI = AD + BC; 13188 if (ResR.isNaN() && ResI.isNaN()) { 13189 bool Recalc = false; 13190 if (A.isInfinity() || B.isInfinity()) { 13191 A = APFloat::copySign( 13192 APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), A); 13193 B = APFloat::copySign( 13194 APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), B); 13195 if (C.isNaN()) 13196 C = APFloat::copySign(APFloat(C.getSemantics()), C); 13197 if (D.isNaN()) 13198 D = APFloat::copySign(APFloat(D.getSemantics()), D); 13199 Recalc = true; 13200 } 13201 if (C.isInfinity() || D.isInfinity()) { 13202 C = APFloat::copySign( 13203 APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), C); 13204 D = APFloat::copySign( 13205 APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), D); 13206 if (A.isNaN()) 13207 A = APFloat::copySign(APFloat(A.getSemantics()), A); 13208 if (B.isNaN()) 13209 B = APFloat::copySign(APFloat(B.getSemantics()), B); 13210 Recalc = true; 13211 } 13212 if (!Recalc && (AC.isInfinity() || BD.isInfinity() || 13213 AD.isInfinity() || BC.isInfinity())) { 13214 if (A.isNaN()) 13215 A = APFloat::copySign(APFloat(A.getSemantics()), A); 13216 if (B.isNaN()) 13217 B = APFloat::copySign(APFloat(B.getSemantics()), B); 13218 if (C.isNaN()) 13219 C = APFloat::copySign(APFloat(C.getSemantics()), C); 13220 if (D.isNaN()) 13221 D = APFloat::copySign(APFloat(D.getSemantics()), D); 13222 Recalc = true; 13223 } 13224 if (Recalc) { 13225 ResR = APFloat::getInf(A.getSemantics()) * (A * C - B * D); 13226 ResI = APFloat::getInf(A.getSemantics()) * (A * D + B * C); 13227 } 13228 } 13229 } 13230 } else { 13231 ComplexValue LHS = Result; 13232 Result.getComplexIntReal() = 13233 (LHS.getComplexIntReal() * RHS.getComplexIntReal() - 13234 LHS.getComplexIntImag() * RHS.getComplexIntImag()); 13235 Result.getComplexIntImag() = 13236 (LHS.getComplexIntReal() * RHS.getComplexIntImag() + 13237 LHS.getComplexIntImag() * RHS.getComplexIntReal()); 13238 } 13239 break; 13240 case BO_Div: 13241 if (Result.isComplexFloat()) { 13242 // This is an implementation of complex division according to the 13243 // constraints laid out in C11 Annex G. The implementation uses the 13244 // following naming scheme: 13245 // (a + ib) / (c + id) 13246 ComplexValue LHS = Result; 13247 APFloat &A = LHS.getComplexFloatReal(); 13248 APFloat &B = LHS.getComplexFloatImag(); 13249 APFloat &C = RHS.getComplexFloatReal(); 13250 APFloat &D = RHS.getComplexFloatImag(); 13251 APFloat &ResR = Result.getComplexFloatReal(); 13252 APFloat &ResI = Result.getComplexFloatImag(); 13253 if (RHSReal) { 13254 ResR = A / C; 13255 ResI = B / C; 13256 } else { 13257 if (LHSReal) { 13258 // No real optimizations we can do here, stub out with zero. 13259 B = APFloat::getZero(A.getSemantics()); 13260 } 13261 int DenomLogB = 0; 13262 APFloat MaxCD = maxnum(abs(C), abs(D)); 13263 if (MaxCD.isFinite()) { 13264 DenomLogB = ilogb(MaxCD); 13265 C = scalbn(C, -DenomLogB, APFloat::rmNearestTiesToEven); 13266 D = scalbn(D, -DenomLogB, APFloat::rmNearestTiesToEven); 13267 } 13268 APFloat Denom = C * C + D * D; 13269 ResR = scalbn((A * C + B * D) / Denom, -DenomLogB, 13270 APFloat::rmNearestTiesToEven); 13271 ResI = scalbn((B * C - A * D) / Denom, -DenomLogB, 13272 APFloat::rmNearestTiesToEven); 13273 if (ResR.isNaN() && ResI.isNaN()) { 13274 if (Denom.isPosZero() && (!A.isNaN() || !B.isNaN())) { 13275 ResR = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * A; 13276 ResI = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * B; 13277 } else if ((A.isInfinity() || B.isInfinity()) && C.isFinite() && 13278 D.isFinite()) { 13279 A = APFloat::copySign( 13280 APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), A); 13281 B = APFloat::copySign( 13282 APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), B); 13283 ResR = APFloat::getInf(ResR.getSemantics()) * (A * C + B * D); 13284 ResI = APFloat::getInf(ResI.getSemantics()) * (B * C - A * D); 13285 } else if (MaxCD.isInfinity() && A.isFinite() && B.isFinite()) { 13286 C = APFloat::copySign( 13287 APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), C); 13288 D = APFloat::copySign( 13289 APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), D); 13290 ResR = APFloat::getZero(ResR.getSemantics()) * (A * C + B * D); 13291 ResI = APFloat::getZero(ResI.getSemantics()) * (B * C - A * D); 13292 } 13293 } 13294 } 13295 } else { 13296 if (RHS.getComplexIntReal() == 0 && RHS.getComplexIntImag() == 0) 13297 return Error(E, diag::note_expr_divide_by_zero); 13298 13299 ComplexValue LHS = Result; 13300 APSInt Den = RHS.getComplexIntReal() * RHS.getComplexIntReal() + 13301 RHS.getComplexIntImag() * RHS.getComplexIntImag(); 13302 Result.getComplexIntReal() = 13303 (LHS.getComplexIntReal() * RHS.getComplexIntReal() + 13304 LHS.getComplexIntImag() * RHS.getComplexIntImag()) / Den; 13305 Result.getComplexIntImag() = 13306 (LHS.getComplexIntImag() * RHS.getComplexIntReal() - 13307 LHS.getComplexIntReal() * RHS.getComplexIntImag()) / Den; 13308 } 13309 break; 13310 } 13311 13312 return true; 13313 } 13314 13315 bool ComplexExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) { 13316 // Get the operand value into 'Result'. 13317 if (!Visit(E->getSubExpr())) 13318 return false; 13319 13320 switch (E->getOpcode()) { 13321 default: 13322 return Error(E); 13323 case UO_Extension: 13324 return true; 13325 case UO_Plus: 13326 // The result is always just the subexpr. 13327 return true; 13328 case UO_Minus: 13329 if (Result.isComplexFloat()) { 13330 Result.getComplexFloatReal().changeSign(); 13331 Result.getComplexFloatImag().changeSign(); 13332 } 13333 else { 13334 Result.getComplexIntReal() = -Result.getComplexIntReal(); 13335 Result.getComplexIntImag() = -Result.getComplexIntImag(); 13336 } 13337 return true; 13338 case UO_Not: 13339 if (Result.isComplexFloat()) 13340 Result.getComplexFloatImag().changeSign(); 13341 else 13342 Result.getComplexIntImag() = -Result.getComplexIntImag(); 13343 return true; 13344 } 13345 } 13346 13347 bool ComplexExprEvaluator::VisitInitListExpr(const InitListExpr *E) { 13348 if (E->getNumInits() == 2) { 13349 if (E->getType()->isComplexType()) { 13350 Result.makeComplexFloat(); 13351 if (!EvaluateFloat(E->getInit(0), Result.FloatReal, Info)) 13352 return false; 13353 if (!EvaluateFloat(E->getInit(1), Result.FloatImag, Info)) 13354 return false; 13355 } else { 13356 Result.makeComplexInt(); 13357 if (!EvaluateInteger(E->getInit(0), Result.IntReal, Info)) 13358 return false; 13359 if (!EvaluateInteger(E->getInit(1), Result.IntImag, Info)) 13360 return false; 13361 } 13362 return true; 13363 } 13364 return ExprEvaluatorBaseTy::VisitInitListExpr(E); 13365 } 13366 13367 //===----------------------------------------------------------------------===// 13368 // Atomic expression evaluation, essentially just handling the NonAtomicToAtomic 13369 // implicit conversion. 13370 //===----------------------------------------------------------------------===// 13371 13372 namespace { 13373 class AtomicExprEvaluator : 13374 public ExprEvaluatorBase<AtomicExprEvaluator> { 13375 const LValue *This; 13376 APValue &Result; 13377 public: 13378 AtomicExprEvaluator(EvalInfo &Info, const LValue *This, APValue &Result) 13379 : ExprEvaluatorBaseTy(Info), This(This), Result(Result) {} 13380 13381 bool Success(const APValue &V, const Expr *E) { 13382 Result = V; 13383 return true; 13384 } 13385 13386 bool ZeroInitialization(const Expr *E) { 13387 ImplicitValueInitExpr VIE( 13388 E->getType()->castAs<AtomicType>()->getValueType()); 13389 // For atomic-qualified class (and array) types in C++, initialize the 13390 // _Atomic-wrapped subobject directly, in-place. 13391 return This ? EvaluateInPlace(Result, Info, *This, &VIE) 13392 : Evaluate(Result, Info, &VIE); 13393 } 13394 13395 bool VisitCastExpr(const CastExpr *E) { 13396 switch (E->getCastKind()) { 13397 default: 13398 return ExprEvaluatorBaseTy::VisitCastExpr(E); 13399 case CK_NonAtomicToAtomic: 13400 return This ? EvaluateInPlace(Result, Info, *This, E->getSubExpr()) 13401 : Evaluate(Result, Info, E->getSubExpr()); 13402 } 13403 } 13404 }; 13405 } // end anonymous namespace 13406 13407 static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result, 13408 EvalInfo &Info) { 13409 assert(E->isRValue() && E->getType()->isAtomicType()); 13410 return AtomicExprEvaluator(Info, This, Result).Visit(E); 13411 } 13412 13413 //===----------------------------------------------------------------------===// 13414 // Void expression evaluation, primarily for a cast to void on the LHS of a 13415 // comma operator 13416 //===----------------------------------------------------------------------===// 13417 13418 namespace { 13419 class VoidExprEvaluator 13420 : public ExprEvaluatorBase<VoidExprEvaluator> { 13421 public: 13422 VoidExprEvaluator(EvalInfo &Info) : ExprEvaluatorBaseTy(Info) {} 13423 13424 bool Success(const APValue &V, const Expr *e) { return true; } 13425 13426 bool ZeroInitialization(const Expr *E) { return true; } 13427 13428 bool VisitCastExpr(const CastExpr *E) { 13429 switch (E->getCastKind()) { 13430 default: 13431 return ExprEvaluatorBaseTy::VisitCastExpr(E); 13432 case CK_ToVoid: 13433 VisitIgnoredValue(E->getSubExpr()); 13434 return true; 13435 } 13436 } 13437 13438 bool VisitCallExpr(const CallExpr *E) { 13439 switch (E->getBuiltinCallee()) { 13440 case Builtin::BI__assume: 13441 case Builtin::BI__builtin_assume: 13442 // The argument is not evaluated! 13443 return true; 13444 13445 case Builtin::BI__builtin_operator_delete: 13446 return HandleOperatorDeleteCall(Info, E); 13447 13448 default: 13449 break; 13450 } 13451 13452 return ExprEvaluatorBaseTy::VisitCallExpr(E); 13453 } 13454 13455 bool VisitCXXDeleteExpr(const CXXDeleteExpr *E); 13456 }; 13457 } // end anonymous namespace 13458 13459 bool VoidExprEvaluator::VisitCXXDeleteExpr(const CXXDeleteExpr *E) { 13460 // We cannot speculatively evaluate a delete expression. 13461 if (Info.SpeculativeEvaluationDepth) 13462 return false; 13463 13464 FunctionDecl *OperatorDelete = E->getOperatorDelete(); 13465 if (!OperatorDelete->isReplaceableGlobalAllocationFunction()) { 13466 Info.FFDiag(E, diag::note_constexpr_new_non_replaceable) 13467 << isa<CXXMethodDecl>(OperatorDelete) << OperatorDelete; 13468 return false; 13469 } 13470 13471 const Expr *Arg = E->getArgument(); 13472 13473 LValue Pointer; 13474 if (!EvaluatePointer(Arg, Pointer, Info)) 13475 return false; 13476 if (Pointer.Designator.Invalid) 13477 return false; 13478 13479 // Deleting a null pointer has no effect. 13480 if (Pointer.isNullPointer()) { 13481 // This is the only case where we need to produce an extension warning: 13482 // the only other way we can succeed is if we find a dynamic allocation, 13483 // and we will have warned when we allocated it in that case. 13484 if (!Info.getLangOpts().CPlusPlus2a) 13485 Info.CCEDiag(E, diag::note_constexpr_new); 13486 return true; 13487 } 13488 13489 Optional<DynAlloc *> Alloc = CheckDeleteKind( 13490 Info, E, Pointer, E->isArrayForm() ? DynAlloc::ArrayNew : DynAlloc::New); 13491 if (!Alloc) 13492 return false; 13493 QualType AllocType = Pointer.Base.getDynamicAllocType(); 13494 13495 // For the non-array case, the designator must be empty if the static type 13496 // does not have a virtual destructor. 13497 if (!E->isArrayForm() && Pointer.Designator.Entries.size() != 0 && 13498 !hasVirtualDestructor(Arg->getType()->getPointeeType())) { 13499 Info.FFDiag(E, diag::note_constexpr_delete_base_nonvirt_dtor) 13500 << Arg->getType()->getPointeeType() << AllocType; 13501 return false; 13502 } 13503 13504 // For a class type with a virtual destructor, the selected operator delete 13505 // is the one looked up when building the destructor. 13506 if (!E->isArrayForm() && !E->isGlobalDelete()) { 13507 const FunctionDecl *VirtualDelete = getVirtualOperatorDelete(AllocType); 13508 if (VirtualDelete && 13509 !VirtualDelete->isReplaceableGlobalAllocationFunction()) { 13510 Info.FFDiag(E, diag::note_constexpr_new_non_replaceable) 13511 << isa<CXXMethodDecl>(VirtualDelete) << VirtualDelete; 13512 return false; 13513 } 13514 } 13515 13516 if (!HandleDestruction(Info, E->getExprLoc(), Pointer.getLValueBase(), 13517 (*Alloc)->Value, AllocType)) 13518 return false; 13519 13520 if (!Info.HeapAllocs.erase(Pointer.Base.dyn_cast<DynamicAllocLValue>())) { 13521 // The element was already erased. This means the destructor call also 13522 // deleted the object. 13523 // FIXME: This probably results in undefined behavior before we get this 13524 // far, and should be diagnosed elsewhere first. 13525 Info.FFDiag(E, diag::note_constexpr_double_delete); 13526 return false; 13527 } 13528 13529 return true; 13530 } 13531 13532 static bool EvaluateVoid(const Expr *E, EvalInfo &Info) { 13533 assert(E->isRValue() && E->getType()->isVoidType()); 13534 return VoidExprEvaluator(Info).Visit(E); 13535 } 13536 13537 //===----------------------------------------------------------------------===// 13538 // Top level Expr::EvaluateAsRValue method. 13539 //===----------------------------------------------------------------------===// 13540 13541 static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) { 13542 // In C, function designators are not lvalues, but we evaluate them as if they 13543 // are. 13544 QualType T = E->getType(); 13545 if (E->isGLValue() || T->isFunctionType()) { 13546 LValue LV; 13547 if (!EvaluateLValue(E, LV, Info)) 13548 return false; 13549 LV.moveInto(Result); 13550 } else if (T->isVectorType()) { 13551 if (!EvaluateVector(E, Result, Info)) 13552 return false; 13553 } else if (T->isIntegralOrEnumerationType()) { 13554 if (!IntExprEvaluator(Info, Result).Visit(E)) 13555 return false; 13556 } else if (T->hasPointerRepresentation()) { 13557 LValue LV; 13558 if (!EvaluatePointer(E, LV, Info)) 13559 return false; 13560 LV.moveInto(Result); 13561 } else if (T->isRealFloatingType()) { 13562 llvm::APFloat F(0.0); 13563 if (!EvaluateFloat(E, F, Info)) 13564 return false; 13565 Result = APValue(F); 13566 } else if (T->isAnyComplexType()) { 13567 ComplexValue C; 13568 if (!EvaluateComplex(E, C, Info)) 13569 return false; 13570 C.moveInto(Result); 13571 } else if (T->isFixedPointType()) { 13572 if (!FixedPointExprEvaluator(Info, Result).Visit(E)) return false; 13573 } else if (T->isMemberPointerType()) { 13574 MemberPtr P; 13575 if (!EvaluateMemberPointer(E, P, Info)) 13576 return false; 13577 P.moveInto(Result); 13578 return true; 13579 } else if (T->isArrayType()) { 13580 LValue LV; 13581 APValue &Value = 13582 Info.CurrentCall->createTemporary(E, T, false, LV); 13583 if (!EvaluateArray(E, LV, Value, Info)) 13584 return false; 13585 Result = Value; 13586 } else if (T->isRecordType()) { 13587 LValue LV; 13588 APValue &Value = Info.CurrentCall->createTemporary(E, T, false, LV); 13589 if (!EvaluateRecord(E, LV, Value, Info)) 13590 return false; 13591 Result = Value; 13592 } else if (T->isVoidType()) { 13593 if (!Info.getLangOpts().CPlusPlus11) 13594 Info.CCEDiag(E, diag::note_constexpr_nonliteral) 13595 << E->getType(); 13596 if (!EvaluateVoid(E, Info)) 13597 return false; 13598 } else if (T->isAtomicType()) { 13599 QualType Unqual = T.getAtomicUnqualifiedType(); 13600 if (Unqual->isArrayType() || Unqual->isRecordType()) { 13601 LValue LV; 13602 APValue &Value = Info.CurrentCall->createTemporary(E, Unqual, false, LV); 13603 if (!EvaluateAtomic(E, &LV, Value, Info)) 13604 return false; 13605 } else { 13606 if (!EvaluateAtomic(E, nullptr, Result, Info)) 13607 return false; 13608 } 13609 } else if (Info.getLangOpts().CPlusPlus11) { 13610 Info.FFDiag(E, diag::note_constexpr_nonliteral) << E->getType(); 13611 return false; 13612 } else { 13613 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr); 13614 return false; 13615 } 13616 13617 return true; 13618 } 13619 13620 /// EvaluateInPlace - Evaluate an expression in-place in an APValue. In some 13621 /// cases, the in-place evaluation is essential, since later initializers for 13622 /// an object can indirectly refer to subobjects which were initialized earlier. 13623 static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This, 13624 const Expr *E, bool AllowNonLiteralTypes) { 13625 assert(!E->isValueDependent()); 13626 13627 if (!AllowNonLiteralTypes && !CheckLiteralType(Info, E, &This)) 13628 return false; 13629 13630 if (E->isRValue()) { 13631 // Evaluate arrays and record types in-place, so that later initializers can 13632 // refer to earlier-initialized members of the object. 13633 QualType T = E->getType(); 13634 if (T->isArrayType()) 13635 return EvaluateArray(E, This, Result, Info); 13636 else if (T->isRecordType()) 13637 return EvaluateRecord(E, This, Result, Info); 13638 else if (T->isAtomicType()) { 13639 QualType Unqual = T.getAtomicUnqualifiedType(); 13640 if (Unqual->isArrayType() || Unqual->isRecordType()) 13641 return EvaluateAtomic(E, &This, Result, Info); 13642 } 13643 } 13644 13645 // For any other type, in-place evaluation is unimportant. 13646 return Evaluate(Result, Info, E); 13647 } 13648 13649 /// EvaluateAsRValue - Try to evaluate this expression, performing an implicit 13650 /// lvalue-to-rvalue cast if it is an lvalue. 13651 static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result) { 13652 if (Info.EnableNewConstInterp) { 13653 if (!Info.Ctx.getInterpContext().evaluateAsRValue(Info, E, Result)) 13654 return false; 13655 } else { 13656 if (E->getType().isNull()) 13657 return false; 13658 13659 if (!CheckLiteralType(Info, E)) 13660 return false; 13661 13662 if (!::Evaluate(Result, Info, E)) 13663 return false; 13664 13665 if (E->isGLValue()) { 13666 LValue LV; 13667 LV.setFrom(Info.Ctx, Result); 13668 if (!handleLValueToRValueConversion(Info, E, E->getType(), LV, Result)) 13669 return false; 13670 } 13671 } 13672 13673 // Check this core constant expression is a constant expression. 13674 return CheckConstantExpression(Info, E->getExprLoc(), E->getType(), Result) && 13675 CheckMemoryLeaks(Info); 13676 } 13677 13678 static bool FastEvaluateAsRValue(const Expr *Exp, Expr::EvalResult &Result, 13679 const ASTContext &Ctx, bool &IsConst) { 13680 // Fast-path evaluations of integer literals, since we sometimes see files 13681 // containing vast quantities of these. 13682 if (const IntegerLiteral *L = dyn_cast<IntegerLiteral>(Exp)) { 13683 Result.Val = APValue(APSInt(L->getValue(), 13684 L->getType()->isUnsignedIntegerType())); 13685 IsConst = true; 13686 return true; 13687 } 13688 13689 // This case should be rare, but we need to check it before we check on 13690 // the type below. 13691 if (Exp->getType().isNull()) { 13692 IsConst = false; 13693 return true; 13694 } 13695 13696 // FIXME: Evaluating values of large array and record types can cause 13697 // performance problems. Only do so in C++11 for now. 13698 if (Exp->isRValue() && (Exp->getType()->isArrayType() || 13699 Exp->getType()->isRecordType()) && 13700 !Ctx.getLangOpts().CPlusPlus11) { 13701 IsConst = false; 13702 return true; 13703 } 13704 return false; 13705 } 13706 13707 static bool hasUnacceptableSideEffect(Expr::EvalStatus &Result, 13708 Expr::SideEffectsKind SEK) { 13709 return (SEK < Expr::SE_AllowSideEffects && Result.HasSideEffects) || 13710 (SEK < Expr::SE_AllowUndefinedBehavior && Result.HasUndefinedBehavior); 13711 } 13712 13713 static bool EvaluateAsRValue(const Expr *E, Expr::EvalResult &Result, 13714 const ASTContext &Ctx, EvalInfo &Info) { 13715 bool IsConst; 13716 if (FastEvaluateAsRValue(E, Result, Ctx, IsConst)) 13717 return IsConst; 13718 13719 return EvaluateAsRValue(Info, E, Result.Val); 13720 } 13721 13722 static bool EvaluateAsInt(const Expr *E, Expr::EvalResult &ExprResult, 13723 const ASTContext &Ctx, 13724 Expr::SideEffectsKind AllowSideEffects, 13725 EvalInfo &Info) { 13726 if (!E->getType()->isIntegralOrEnumerationType()) 13727 return false; 13728 13729 if (!::EvaluateAsRValue(E, ExprResult, Ctx, Info) || 13730 !ExprResult.Val.isInt() || 13731 hasUnacceptableSideEffect(ExprResult, AllowSideEffects)) 13732 return false; 13733 13734 return true; 13735 } 13736 13737 static bool EvaluateAsFixedPoint(const Expr *E, Expr::EvalResult &ExprResult, 13738 const ASTContext &Ctx, 13739 Expr::SideEffectsKind AllowSideEffects, 13740 EvalInfo &Info) { 13741 if (!E->getType()->isFixedPointType()) 13742 return false; 13743 13744 if (!::EvaluateAsRValue(E, ExprResult, Ctx, Info)) 13745 return false; 13746 13747 if (!ExprResult.Val.isFixedPoint() || 13748 hasUnacceptableSideEffect(ExprResult, AllowSideEffects)) 13749 return false; 13750 13751 return true; 13752 } 13753 13754 /// EvaluateAsRValue - Return true if this is a constant which we can fold using 13755 /// any crazy technique (that has nothing to do with language standards) that 13756 /// we want to. If this function returns true, it returns the folded constant 13757 /// in Result. If this expression is a glvalue, an lvalue-to-rvalue conversion 13758 /// will be applied to the result. 13759 bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, 13760 bool InConstantContext) const { 13761 assert(!isValueDependent() && 13762 "Expression evaluator can't be called on a dependent expression."); 13763 EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects); 13764 Info.InConstantContext = InConstantContext; 13765 return ::EvaluateAsRValue(this, Result, Ctx, Info); 13766 } 13767 13768 bool Expr::EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, 13769 bool InConstantContext) const { 13770 assert(!isValueDependent() && 13771 "Expression evaluator can't be called on a dependent expression."); 13772 EvalResult Scratch; 13773 return EvaluateAsRValue(Scratch, Ctx, InConstantContext) && 13774 HandleConversionToBool(Scratch.Val, Result); 13775 } 13776 13777 bool Expr::EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, 13778 SideEffectsKind AllowSideEffects, 13779 bool InConstantContext) const { 13780 assert(!isValueDependent() && 13781 "Expression evaluator can't be called on a dependent expression."); 13782 EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects); 13783 Info.InConstantContext = InConstantContext; 13784 return ::EvaluateAsInt(this, Result, Ctx, AllowSideEffects, Info); 13785 } 13786 13787 bool Expr::EvaluateAsFixedPoint(EvalResult &Result, const ASTContext &Ctx, 13788 SideEffectsKind AllowSideEffects, 13789 bool InConstantContext) const { 13790 assert(!isValueDependent() && 13791 "Expression evaluator can't be called on a dependent expression."); 13792 EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects); 13793 Info.InConstantContext = InConstantContext; 13794 return ::EvaluateAsFixedPoint(this, Result, Ctx, AllowSideEffects, Info); 13795 } 13796 13797 bool Expr::EvaluateAsFloat(APFloat &Result, const ASTContext &Ctx, 13798 SideEffectsKind AllowSideEffects, 13799 bool InConstantContext) const { 13800 assert(!isValueDependent() && 13801 "Expression evaluator can't be called on a dependent expression."); 13802 13803 if (!getType()->isRealFloatingType()) 13804 return false; 13805 13806 EvalResult ExprResult; 13807 if (!EvaluateAsRValue(ExprResult, Ctx, InConstantContext) || 13808 !ExprResult.Val.isFloat() || 13809 hasUnacceptableSideEffect(ExprResult, AllowSideEffects)) 13810 return false; 13811 13812 Result = ExprResult.Val.getFloat(); 13813 return true; 13814 } 13815 13816 bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx, 13817 bool InConstantContext) const { 13818 assert(!isValueDependent() && 13819 "Expression evaluator can't be called on a dependent expression."); 13820 13821 EvalInfo Info(Ctx, Result, EvalInfo::EM_ConstantFold); 13822 Info.InConstantContext = InConstantContext; 13823 LValue LV; 13824 CheckedTemporaries CheckedTemps; 13825 if (!EvaluateLValue(this, LV, Info) || !Info.discardCleanups() || 13826 Result.HasSideEffects || 13827 !CheckLValueConstantExpression(Info, getExprLoc(), 13828 Ctx.getLValueReferenceType(getType()), LV, 13829 Expr::EvaluateForCodeGen, CheckedTemps)) 13830 return false; 13831 13832 LV.moveInto(Result.Val); 13833 return true; 13834 } 13835 13836 bool Expr::EvaluateAsConstantExpr(EvalResult &Result, ConstExprUsage Usage, 13837 const ASTContext &Ctx) const { 13838 assert(!isValueDependent() && 13839 "Expression evaluator can't be called on a dependent expression."); 13840 13841 EvalInfo::EvaluationMode EM = EvalInfo::EM_ConstantExpression; 13842 EvalInfo Info(Ctx, Result, EM); 13843 Info.InConstantContext = true; 13844 13845 if (!::Evaluate(Result.Val, Info, this) || Result.HasSideEffects) 13846 return false; 13847 13848 if (!Info.discardCleanups()) 13849 llvm_unreachable("Unhandled cleanup; missing full expression marker?"); 13850 13851 return CheckConstantExpression(Info, getExprLoc(), getStorageType(Ctx, this), 13852 Result.Val, Usage) && 13853 CheckMemoryLeaks(Info); 13854 } 13855 13856 bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx, 13857 const VarDecl *VD, 13858 SmallVectorImpl<PartialDiagnosticAt> &Notes) const { 13859 assert(!isValueDependent() && 13860 "Expression evaluator can't be called on a dependent expression."); 13861 13862 // FIXME: Evaluating initializers for large array and record types can cause 13863 // performance problems. Only do so in C++11 for now. 13864 if (isRValue() && (getType()->isArrayType() || getType()->isRecordType()) && 13865 !Ctx.getLangOpts().CPlusPlus11) 13866 return false; 13867 13868 Expr::EvalStatus EStatus; 13869 EStatus.Diag = &Notes; 13870 13871 EvalInfo Info(Ctx, EStatus, VD->isConstexpr() 13872 ? EvalInfo::EM_ConstantExpression 13873 : EvalInfo::EM_ConstantFold); 13874 Info.setEvaluatingDecl(VD, Value); 13875 Info.InConstantContext = true; 13876 13877 SourceLocation DeclLoc = VD->getLocation(); 13878 QualType DeclTy = VD->getType(); 13879 13880 if (Info.EnableNewConstInterp) { 13881 auto &InterpCtx = const_cast<ASTContext &>(Ctx).getInterpContext(); 13882 if (!InterpCtx.evaluateAsInitializer(Info, VD, Value)) 13883 return false; 13884 } else { 13885 LValue LVal; 13886 LVal.set(VD); 13887 13888 // C++11 [basic.start.init]p2: 13889 // Variables with static storage duration or thread storage duration shall 13890 // be zero-initialized before any other initialization takes place. 13891 // This behavior is not present in C. 13892 if (Ctx.getLangOpts().CPlusPlus && !VD->hasLocalStorage() && 13893 !DeclTy->isReferenceType()) { 13894 ImplicitValueInitExpr VIE(DeclTy); 13895 if (!EvaluateInPlace(Value, Info, LVal, &VIE, 13896 /*AllowNonLiteralTypes=*/true)) 13897 return false; 13898 } 13899 13900 if (!EvaluateInPlace(Value, Info, LVal, this, 13901 /*AllowNonLiteralTypes=*/true) || 13902 EStatus.HasSideEffects) 13903 return false; 13904 13905 // At this point, any lifetime-extended temporaries are completely 13906 // initialized. 13907 Info.performLifetimeExtension(); 13908 13909 if (!Info.discardCleanups()) 13910 llvm_unreachable("Unhandled cleanup; missing full expression marker?"); 13911 } 13912 return CheckConstantExpression(Info, DeclLoc, DeclTy, Value) && 13913 CheckMemoryLeaks(Info); 13914 } 13915 13916 bool VarDecl::evaluateDestruction( 13917 SmallVectorImpl<PartialDiagnosticAt> &Notes) const { 13918 assert(getEvaluatedValue() && !getEvaluatedValue()->isAbsent() && 13919 "cannot evaluate destruction of non-constant-initialized variable"); 13920 13921 Expr::EvalStatus EStatus; 13922 EStatus.Diag = &Notes; 13923 13924 // Make a copy of the value for the destructor to mutate. 13925 APValue DestroyedValue = *getEvaluatedValue(); 13926 13927 EvalInfo Info(getASTContext(), EStatus, EvalInfo::EM_ConstantExpression); 13928 Info.setEvaluatingDecl(this, DestroyedValue, 13929 EvalInfo::EvaluatingDeclKind::Dtor); 13930 Info.InConstantContext = true; 13931 13932 SourceLocation DeclLoc = getLocation(); 13933 QualType DeclTy = getType(); 13934 13935 LValue LVal; 13936 LVal.set(this); 13937 13938 // FIXME: Consider storing whether this variable has constant destruction in 13939 // the EvaluatedStmt so that CodeGen can query it. 13940 if (!HandleDestruction(Info, DeclLoc, LVal.Base, DestroyedValue, DeclTy) || 13941 EStatus.HasSideEffects) 13942 return false; 13943 13944 if (!Info.discardCleanups()) 13945 llvm_unreachable("Unhandled cleanup; missing full expression marker?"); 13946 13947 ensureEvaluatedStmt()->HasConstantDestruction = true; 13948 return true; 13949 } 13950 13951 /// isEvaluatable - Call EvaluateAsRValue to see if this expression can be 13952 /// constant folded, but discard the result. 13953 bool Expr::isEvaluatable(const ASTContext &Ctx, SideEffectsKind SEK) const { 13954 assert(!isValueDependent() && 13955 "Expression evaluator can't be called on a dependent expression."); 13956 13957 EvalResult Result; 13958 return EvaluateAsRValue(Result, Ctx, /* in constant context */ true) && 13959 !hasUnacceptableSideEffect(Result, SEK); 13960 } 13961 13962 APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx, 13963 SmallVectorImpl<PartialDiagnosticAt> *Diag) const { 13964 assert(!isValueDependent() && 13965 "Expression evaluator can't be called on a dependent expression."); 13966 13967 EvalResult EVResult; 13968 EVResult.Diag = Diag; 13969 EvalInfo Info(Ctx, EVResult, EvalInfo::EM_IgnoreSideEffects); 13970 Info.InConstantContext = true; 13971 13972 bool Result = ::EvaluateAsRValue(this, EVResult, Ctx, Info); 13973 (void)Result; 13974 assert(Result && "Could not evaluate expression"); 13975 assert(EVResult.Val.isInt() && "Expression did not evaluate to integer"); 13976 13977 return EVResult.Val.getInt(); 13978 } 13979 13980 APSInt Expr::EvaluateKnownConstIntCheckOverflow( 13981 const ASTContext &Ctx, SmallVectorImpl<PartialDiagnosticAt> *Diag) const { 13982 assert(!isValueDependent() && 13983 "Expression evaluator can't be called on a dependent expression."); 13984 13985 EvalResult EVResult; 13986 EVResult.Diag = Diag; 13987 EvalInfo Info(Ctx, EVResult, EvalInfo::EM_IgnoreSideEffects); 13988 Info.InConstantContext = true; 13989 Info.CheckingForUndefinedBehavior = true; 13990 13991 bool Result = ::EvaluateAsRValue(Info, this, EVResult.Val); 13992 (void)Result; 13993 assert(Result && "Could not evaluate expression"); 13994 assert(EVResult.Val.isInt() && "Expression did not evaluate to integer"); 13995 13996 return EVResult.Val.getInt(); 13997 } 13998 13999 void Expr::EvaluateForOverflow(const ASTContext &Ctx) const { 14000 assert(!isValueDependent() && 14001 "Expression evaluator can't be called on a dependent expression."); 14002 14003 bool IsConst; 14004 EvalResult EVResult; 14005 if (!FastEvaluateAsRValue(this, EVResult, Ctx, IsConst)) { 14006 EvalInfo Info(Ctx, EVResult, EvalInfo::EM_IgnoreSideEffects); 14007 Info.CheckingForUndefinedBehavior = true; 14008 (void)::EvaluateAsRValue(Info, this, EVResult.Val); 14009 } 14010 } 14011 14012 bool Expr::EvalResult::isGlobalLValue() const { 14013 assert(Val.isLValue()); 14014 return IsGlobalLValue(Val.getLValueBase()); 14015 } 14016 14017 14018 /// isIntegerConstantExpr - this recursive routine will test if an expression is 14019 /// an integer constant expression. 14020 14021 /// FIXME: Pass up a reason why! Invalid operation in i-c-e, division by zero, 14022 /// comma, etc 14023 14024 // CheckICE - This function does the fundamental ICE checking: the returned 14025 // ICEDiag contains an ICEKind indicating whether the expression is an ICE, 14026 // and a (possibly null) SourceLocation indicating the location of the problem. 14027 // 14028 // Note that to reduce code duplication, this helper does no evaluation 14029 // itself; the caller checks whether the expression is evaluatable, and 14030 // in the rare cases where CheckICE actually cares about the evaluated 14031 // value, it calls into Evaluate. 14032 14033 namespace { 14034 14035 enum ICEKind { 14036 /// This expression is an ICE. 14037 IK_ICE, 14038 /// This expression is not an ICE, but if it isn't evaluated, it's 14039 /// a legal subexpression for an ICE. This return value is used to handle 14040 /// the comma operator in C99 mode, and non-constant subexpressions. 14041 IK_ICEIfUnevaluated, 14042 /// This expression is not an ICE, and is not a legal subexpression for one. 14043 IK_NotICE 14044 }; 14045 14046 struct ICEDiag { 14047 ICEKind Kind; 14048 SourceLocation Loc; 14049 14050 ICEDiag(ICEKind IK, SourceLocation l) : Kind(IK), Loc(l) {} 14051 }; 14052 14053 } 14054 14055 static ICEDiag NoDiag() { return ICEDiag(IK_ICE, SourceLocation()); } 14056 14057 static ICEDiag Worst(ICEDiag A, ICEDiag B) { return A.Kind >= B.Kind ? A : B; } 14058 14059 static ICEDiag CheckEvalInICE(const Expr* E, const ASTContext &Ctx) { 14060 Expr::EvalResult EVResult; 14061 Expr::EvalStatus Status; 14062 EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpression); 14063 14064 Info.InConstantContext = true; 14065 if (!::EvaluateAsRValue(E, EVResult, Ctx, Info) || EVResult.HasSideEffects || 14066 !EVResult.Val.isInt()) 14067 return ICEDiag(IK_NotICE, E->getBeginLoc()); 14068 14069 return NoDiag(); 14070 } 14071 14072 static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) { 14073 assert(!E->isValueDependent() && "Should not see value dependent exprs!"); 14074 if (!E->getType()->isIntegralOrEnumerationType()) 14075 return ICEDiag(IK_NotICE, E->getBeginLoc()); 14076 14077 switch (E->getStmtClass()) { 14078 #define ABSTRACT_STMT(Node) 14079 #define STMT(Node, Base) case Expr::Node##Class: 14080 #define EXPR(Node, Base) 14081 #include "clang/AST/StmtNodes.inc" 14082 case Expr::PredefinedExprClass: 14083 case Expr::FloatingLiteralClass: 14084 case Expr::ImaginaryLiteralClass: 14085 case Expr::StringLiteralClass: 14086 case Expr::ArraySubscriptExprClass: 14087 case Expr::OMPArraySectionExprClass: 14088 case Expr::MemberExprClass: 14089 case Expr::CompoundAssignOperatorClass: 14090 case Expr::CompoundLiteralExprClass: 14091 case Expr::ExtVectorElementExprClass: 14092 case Expr::DesignatedInitExprClass: 14093 case Expr::ArrayInitLoopExprClass: 14094 case Expr::ArrayInitIndexExprClass: 14095 case Expr::NoInitExprClass: 14096 case Expr::DesignatedInitUpdateExprClass: 14097 case Expr::ImplicitValueInitExprClass: 14098 case Expr::ParenListExprClass: 14099 case Expr::VAArgExprClass: 14100 case Expr::AddrLabelExprClass: 14101 case Expr::StmtExprClass: 14102 case Expr::CXXMemberCallExprClass: 14103 case Expr::CUDAKernelCallExprClass: 14104 case Expr::CXXDynamicCastExprClass: 14105 case Expr::CXXTypeidExprClass: 14106 case Expr::CXXUuidofExprClass: 14107 case Expr::MSPropertyRefExprClass: 14108 case Expr::MSPropertySubscriptExprClass: 14109 case Expr::CXXNullPtrLiteralExprClass: 14110 case Expr::UserDefinedLiteralClass: 14111 case Expr::CXXThisExprClass: 14112 case Expr::CXXThrowExprClass: 14113 case Expr::CXXNewExprClass: 14114 case Expr::CXXDeleteExprClass: 14115 case Expr::CXXPseudoDestructorExprClass: 14116 case Expr::UnresolvedLookupExprClass: 14117 case Expr::TypoExprClass: 14118 case Expr::DependentScopeDeclRefExprClass: 14119 case Expr::CXXConstructExprClass: 14120 case Expr::CXXInheritedCtorInitExprClass: 14121 case Expr::CXXStdInitializerListExprClass: 14122 case Expr::CXXBindTemporaryExprClass: 14123 case Expr::ExprWithCleanupsClass: 14124 case Expr::CXXTemporaryObjectExprClass: 14125 case Expr::CXXUnresolvedConstructExprClass: 14126 case Expr::CXXDependentScopeMemberExprClass: 14127 case Expr::UnresolvedMemberExprClass: 14128 case Expr::ObjCStringLiteralClass: 14129 case Expr::ObjCBoxedExprClass: 14130 case Expr::ObjCArrayLiteralClass: 14131 case Expr::ObjCDictionaryLiteralClass: 14132 case Expr::ObjCEncodeExprClass: 14133 case Expr::ObjCMessageExprClass: 14134 case Expr::ObjCSelectorExprClass: 14135 case Expr::ObjCProtocolExprClass: 14136 case Expr::ObjCIvarRefExprClass: 14137 case Expr::ObjCPropertyRefExprClass: 14138 case Expr::ObjCSubscriptRefExprClass: 14139 case Expr::ObjCIsaExprClass: 14140 case Expr::ObjCAvailabilityCheckExprClass: 14141 case Expr::ShuffleVectorExprClass: 14142 case Expr::ConvertVectorExprClass: 14143 case Expr::BlockExprClass: 14144 case Expr::NoStmtClass: 14145 case Expr::OpaqueValueExprClass: 14146 case Expr::PackExpansionExprClass: 14147 case Expr::SubstNonTypeTemplateParmPackExprClass: 14148 case Expr::FunctionParmPackExprClass: 14149 case Expr::AsTypeExprClass: 14150 case Expr::ObjCIndirectCopyRestoreExprClass: 14151 case Expr::MaterializeTemporaryExprClass: 14152 case Expr::PseudoObjectExprClass: 14153 case Expr::AtomicExprClass: 14154 case Expr::LambdaExprClass: 14155 case Expr::CXXFoldExprClass: 14156 case Expr::CoawaitExprClass: 14157 case Expr::DependentCoawaitExprClass: 14158 case Expr::CoyieldExprClass: 14159 return ICEDiag(IK_NotICE, E->getBeginLoc()); 14160 14161 case Expr::InitListExprClass: { 14162 // C++03 [dcl.init]p13: If T is a scalar type, then a declaration of the 14163 // form "T x = { a };" is equivalent to "T x = a;". 14164 // Unless we're initializing a reference, T is a scalar as it is known to be 14165 // of integral or enumeration type. 14166 if (E->isRValue()) 14167 if (cast<InitListExpr>(E)->getNumInits() == 1) 14168 return CheckICE(cast<InitListExpr>(E)->getInit(0), Ctx); 14169 return ICEDiag(IK_NotICE, E->getBeginLoc()); 14170 } 14171 14172 case Expr::SizeOfPackExprClass: 14173 case Expr::GNUNullExprClass: 14174 case Expr::SourceLocExprClass: 14175 return NoDiag(); 14176 14177 case Expr::SubstNonTypeTemplateParmExprClass: 14178 return 14179 CheckICE(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(), Ctx); 14180 14181 case Expr::ConstantExprClass: 14182 return CheckICE(cast<ConstantExpr>(E)->getSubExpr(), Ctx); 14183 14184 case Expr::ParenExprClass: 14185 return CheckICE(cast<ParenExpr>(E)->getSubExpr(), Ctx); 14186 case Expr::GenericSelectionExprClass: 14187 return CheckICE(cast<GenericSelectionExpr>(E)->getResultExpr(), Ctx); 14188 case Expr::IntegerLiteralClass: 14189 case Expr::FixedPointLiteralClass: 14190 case Expr::CharacterLiteralClass: 14191 case Expr::ObjCBoolLiteralExprClass: 14192 case Expr::CXXBoolLiteralExprClass: 14193 case Expr::CXXScalarValueInitExprClass: 14194 case Expr::TypeTraitExprClass: 14195 case Expr::ConceptSpecializationExprClass: 14196 case Expr::RequiresExprClass: 14197 case Expr::ArrayTypeTraitExprClass: 14198 case Expr::ExpressionTraitExprClass: 14199 case Expr::CXXNoexceptExprClass: 14200 return NoDiag(); 14201 case Expr::CallExprClass: 14202 case Expr::CXXOperatorCallExprClass: { 14203 // C99 6.6/3 allows function calls within unevaluated subexpressions of 14204 // constant expressions, but they can never be ICEs because an ICE cannot 14205 // contain an operand of (pointer to) function type. 14206 const CallExpr *CE = cast<CallExpr>(E); 14207 if (CE->getBuiltinCallee()) 14208 return CheckEvalInICE(E, Ctx); 14209 return ICEDiag(IK_NotICE, E->getBeginLoc()); 14210 } 14211 case Expr::CXXRewrittenBinaryOperatorClass: 14212 return CheckICE(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(), 14213 Ctx); 14214 case Expr::DeclRefExprClass: { 14215 if (isa<EnumConstantDecl>(cast<DeclRefExpr>(E)->getDecl())) 14216 return NoDiag(); 14217 const ValueDecl *D = cast<DeclRefExpr>(E)->getDecl(); 14218 if (Ctx.getLangOpts().CPlusPlus && 14219 D && IsConstNonVolatile(D->getType())) { 14220 // Parameter variables are never constants. Without this check, 14221 // getAnyInitializer() can find a default argument, which leads 14222 // to chaos. 14223 if (isa<ParmVarDecl>(D)) 14224 return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation()); 14225 14226 // C++ 7.1.5.1p2 14227 // A variable of non-volatile const-qualified integral or enumeration 14228 // type initialized by an ICE can be used in ICEs. 14229 if (const VarDecl *Dcl = dyn_cast<VarDecl>(D)) { 14230 if (!Dcl->getType()->isIntegralOrEnumerationType()) 14231 return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation()); 14232 14233 const VarDecl *VD; 14234 // Look for a declaration of this variable that has an initializer, and 14235 // check whether it is an ICE. 14236 if (Dcl->getAnyInitializer(VD) && VD->checkInitIsICE()) 14237 return NoDiag(); 14238 else 14239 return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation()); 14240 } 14241 } 14242 return ICEDiag(IK_NotICE, E->getBeginLoc()); 14243 } 14244 case Expr::UnaryOperatorClass: { 14245 const UnaryOperator *Exp = cast<UnaryOperator>(E); 14246 switch (Exp->getOpcode()) { 14247 case UO_PostInc: 14248 case UO_PostDec: 14249 case UO_PreInc: 14250 case UO_PreDec: 14251 case UO_AddrOf: 14252 case UO_Deref: 14253 case UO_Coawait: 14254 // C99 6.6/3 allows increment and decrement within unevaluated 14255 // subexpressions of constant expressions, but they can never be ICEs 14256 // because an ICE cannot contain an lvalue operand. 14257 return ICEDiag(IK_NotICE, E->getBeginLoc()); 14258 case UO_Extension: 14259 case UO_LNot: 14260 case UO_Plus: 14261 case UO_Minus: 14262 case UO_Not: 14263 case UO_Real: 14264 case UO_Imag: 14265 return CheckICE(Exp->getSubExpr(), Ctx); 14266 } 14267 llvm_unreachable("invalid unary operator class"); 14268 } 14269 case Expr::OffsetOfExprClass: { 14270 // Note that per C99, offsetof must be an ICE. And AFAIK, using 14271 // EvaluateAsRValue matches the proposed gcc behavior for cases like 14272 // "offsetof(struct s{int x[4];}, x[1.0])". This doesn't affect 14273 // compliance: we should warn earlier for offsetof expressions with 14274 // array subscripts that aren't ICEs, and if the array subscripts 14275 // are ICEs, the value of the offsetof must be an integer constant. 14276 return CheckEvalInICE(E, Ctx); 14277 } 14278 case Expr::UnaryExprOrTypeTraitExprClass: { 14279 const UnaryExprOrTypeTraitExpr *Exp = cast<UnaryExprOrTypeTraitExpr>(E); 14280 if ((Exp->getKind() == UETT_SizeOf) && 14281 Exp->getTypeOfArgument()->isVariableArrayType()) 14282 return ICEDiag(IK_NotICE, E->getBeginLoc()); 14283 return NoDiag(); 14284 } 14285 case Expr::BinaryOperatorClass: { 14286 const BinaryOperator *Exp = cast<BinaryOperator>(E); 14287 switch (Exp->getOpcode()) { 14288 case BO_PtrMemD: 14289 case BO_PtrMemI: 14290 case BO_Assign: 14291 case BO_MulAssign: 14292 case BO_DivAssign: 14293 case BO_RemAssign: 14294 case BO_AddAssign: 14295 case BO_SubAssign: 14296 case BO_ShlAssign: 14297 case BO_ShrAssign: 14298 case BO_AndAssign: 14299 case BO_XorAssign: 14300 case BO_OrAssign: 14301 // C99 6.6/3 allows assignments within unevaluated subexpressions of 14302 // constant expressions, but they can never be ICEs because an ICE cannot 14303 // contain an lvalue operand. 14304 return ICEDiag(IK_NotICE, E->getBeginLoc()); 14305 14306 case BO_Mul: 14307 case BO_Div: 14308 case BO_Rem: 14309 case BO_Add: 14310 case BO_Sub: 14311 case BO_Shl: 14312 case BO_Shr: 14313 case BO_LT: 14314 case BO_GT: 14315 case BO_LE: 14316 case BO_GE: 14317 case BO_EQ: 14318 case BO_NE: 14319 case BO_And: 14320 case BO_Xor: 14321 case BO_Or: 14322 case BO_Comma: 14323 case BO_Cmp: { 14324 ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx); 14325 ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx); 14326 if (Exp->getOpcode() == BO_Div || 14327 Exp->getOpcode() == BO_Rem) { 14328 // EvaluateAsRValue gives an error for undefined Div/Rem, so make sure 14329 // we don't evaluate one. 14330 if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE) { 14331 llvm::APSInt REval = Exp->getRHS()->EvaluateKnownConstInt(Ctx); 14332 if (REval == 0) 14333 return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc()); 14334 if (REval.isSigned() && REval.isAllOnesValue()) { 14335 llvm::APSInt LEval = Exp->getLHS()->EvaluateKnownConstInt(Ctx); 14336 if (LEval.isMinSignedValue()) 14337 return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc()); 14338 } 14339 } 14340 } 14341 if (Exp->getOpcode() == BO_Comma) { 14342 if (Ctx.getLangOpts().C99) { 14343 // C99 6.6p3 introduces a strange edge case: comma can be in an ICE 14344 // if it isn't evaluated. 14345 if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE) 14346 return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc()); 14347 } else { 14348 // In both C89 and C++, commas in ICEs are illegal. 14349 return ICEDiag(IK_NotICE, E->getBeginLoc()); 14350 } 14351 } 14352 return Worst(LHSResult, RHSResult); 14353 } 14354 case BO_LAnd: 14355 case BO_LOr: { 14356 ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx); 14357 ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx); 14358 if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICEIfUnevaluated) { 14359 // Rare case where the RHS has a comma "side-effect"; we need 14360 // to actually check the condition to see whether the side 14361 // with the comma is evaluated. 14362 if ((Exp->getOpcode() == BO_LAnd) != 14363 (Exp->getLHS()->EvaluateKnownConstInt(Ctx) == 0)) 14364 return RHSResult; 14365 return NoDiag(); 14366 } 14367 14368 return Worst(LHSResult, RHSResult); 14369 } 14370 } 14371 llvm_unreachable("invalid binary operator kind"); 14372 } 14373 case Expr::ImplicitCastExprClass: 14374 case Expr::CStyleCastExprClass: 14375 case Expr::CXXFunctionalCastExprClass: 14376 case Expr::CXXStaticCastExprClass: 14377 case Expr::CXXReinterpretCastExprClass: 14378 case Expr::CXXConstCastExprClass: 14379 case Expr::ObjCBridgedCastExprClass: { 14380 const Expr *SubExpr = cast<CastExpr>(E)->getSubExpr(); 14381 if (isa<ExplicitCastExpr>(E)) { 14382 if (const FloatingLiteral *FL 14383 = dyn_cast<FloatingLiteral>(SubExpr->IgnoreParenImpCasts())) { 14384 unsigned DestWidth = Ctx.getIntWidth(E->getType()); 14385 bool DestSigned = E->getType()->isSignedIntegerOrEnumerationType(); 14386 APSInt IgnoredVal(DestWidth, !DestSigned); 14387 bool Ignored; 14388 // If the value does not fit in the destination type, the behavior is 14389 // undefined, so we are not required to treat it as a constant 14390 // expression. 14391 if (FL->getValue().convertToInteger(IgnoredVal, 14392 llvm::APFloat::rmTowardZero, 14393 &Ignored) & APFloat::opInvalidOp) 14394 return ICEDiag(IK_NotICE, E->getBeginLoc()); 14395 return NoDiag(); 14396 } 14397 } 14398 switch (cast<CastExpr>(E)->getCastKind()) { 14399 case CK_LValueToRValue: 14400 case CK_AtomicToNonAtomic: 14401 case CK_NonAtomicToAtomic: 14402 case CK_NoOp: 14403 case CK_IntegralToBoolean: 14404 case CK_IntegralCast: 14405 return CheckICE(SubExpr, Ctx); 14406 default: 14407 return ICEDiag(IK_NotICE, E->getBeginLoc()); 14408 } 14409 } 14410 case Expr::BinaryConditionalOperatorClass: { 14411 const BinaryConditionalOperator *Exp = cast<BinaryConditionalOperator>(E); 14412 ICEDiag CommonResult = CheckICE(Exp->getCommon(), Ctx); 14413 if (CommonResult.Kind == IK_NotICE) return CommonResult; 14414 ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx); 14415 if (FalseResult.Kind == IK_NotICE) return FalseResult; 14416 if (CommonResult.Kind == IK_ICEIfUnevaluated) return CommonResult; 14417 if (FalseResult.Kind == IK_ICEIfUnevaluated && 14418 Exp->getCommon()->EvaluateKnownConstInt(Ctx) != 0) return NoDiag(); 14419 return FalseResult; 14420 } 14421 case Expr::ConditionalOperatorClass: { 14422 const ConditionalOperator *Exp = cast<ConditionalOperator>(E); 14423 // If the condition (ignoring parens) is a __builtin_constant_p call, 14424 // then only the true side is actually considered in an integer constant 14425 // expression, and it is fully evaluated. This is an important GNU 14426 // extension. See GCC PR38377 for discussion. 14427 if (const CallExpr *CallCE 14428 = dyn_cast<CallExpr>(Exp->getCond()->IgnoreParenCasts())) 14429 if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p) 14430 return CheckEvalInICE(E, Ctx); 14431 ICEDiag CondResult = CheckICE(Exp->getCond(), Ctx); 14432 if (CondResult.Kind == IK_NotICE) 14433 return CondResult; 14434 14435 ICEDiag TrueResult = CheckICE(Exp->getTrueExpr(), Ctx); 14436 ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx); 14437 14438 if (TrueResult.Kind == IK_NotICE) 14439 return TrueResult; 14440 if (FalseResult.Kind == IK_NotICE) 14441 return FalseResult; 14442 if (CondResult.Kind == IK_ICEIfUnevaluated) 14443 return CondResult; 14444 if (TrueResult.Kind == IK_ICE && FalseResult.Kind == IK_ICE) 14445 return NoDiag(); 14446 // Rare case where the diagnostics depend on which side is evaluated 14447 // Note that if we get here, CondResult is 0, and at least one of 14448 // TrueResult and FalseResult is non-zero. 14449 if (Exp->getCond()->EvaluateKnownConstInt(Ctx) == 0) 14450 return FalseResult; 14451 return TrueResult; 14452 } 14453 case Expr::CXXDefaultArgExprClass: 14454 return CheckICE(cast<CXXDefaultArgExpr>(E)->getExpr(), Ctx); 14455 case Expr::CXXDefaultInitExprClass: 14456 return CheckICE(cast<CXXDefaultInitExpr>(E)->getExpr(), Ctx); 14457 case Expr::ChooseExprClass: { 14458 return CheckICE(cast<ChooseExpr>(E)->getChosenSubExpr(), Ctx); 14459 } 14460 case Expr::BuiltinBitCastExprClass: { 14461 if (!checkBitCastConstexprEligibility(nullptr, Ctx, cast<CastExpr>(E))) 14462 return ICEDiag(IK_NotICE, E->getBeginLoc()); 14463 return CheckICE(cast<CastExpr>(E)->getSubExpr(), Ctx); 14464 } 14465 } 14466 14467 llvm_unreachable("Invalid StmtClass!"); 14468 } 14469 14470 /// Evaluate an expression as a C++11 integral constant expression. 14471 static bool EvaluateCPlusPlus11IntegralConstantExpr(const ASTContext &Ctx, 14472 const Expr *E, 14473 llvm::APSInt *Value, 14474 SourceLocation *Loc) { 14475 if (!E->getType()->isIntegralOrUnscopedEnumerationType()) { 14476 if (Loc) *Loc = E->getExprLoc(); 14477 return false; 14478 } 14479 14480 APValue Result; 14481 if (!E->isCXX11ConstantExpr(Ctx, &Result, Loc)) 14482 return false; 14483 14484 if (!Result.isInt()) { 14485 if (Loc) *Loc = E->getExprLoc(); 14486 return false; 14487 } 14488 14489 if (Value) *Value = Result.getInt(); 14490 return true; 14491 } 14492 14493 bool Expr::isIntegerConstantExpr(const ASTContext &Ctx, 14494 SourceLocation *Loc) const { 14495 assert(!isValueDependent() && 14496 "Expression evaluator can't be called on a dependent expression."); 14497 14498 if (Ctx.getLangOpts().CPlusPlus11) 14499 return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, nullptr, Loc); 14500 14501 ICEDiag D = CheckICE(this, Ctx); 14502 if (D.Kind != IK_ICE) { 14503 if (Loc) *Loc = D.Loc; 14504 return false; 14505 } 14506 return true; 14507 } 14508 14509 bool Expr::isIntegerConstantExpr(llvm::APSInt &Value, const ASTContext &Ctx, 14510 SourceLocation *Loc, bool isEvaluated) const { 14511 assert(!isValueDependent() && 14512 "Expression evaluator can't be called on a dependent expression."); 14513 14514 if (Ctx.getLangOpts().CPlusPlus11) 14515 return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, &Value, Loc); 14516 14517 if (!isIntegerConstantExpr(Ctx, Loc)) 14518 return false; 14519 14520 // The only possible side-effects here are due to UB discovered in the 14521 // evaluation (for instance, INT_MAX + 1). In such a case, we are still 14522 // required to treat the expression as an ICE, so we produce the folded 14523 // value. 14524 EvalResult ExprResult; 14525 Expr::EvalStatus Status; 14526 EvalInfo Info(Ctx, Status, EvalInfo::EM_IgnoreSideEffects); 14527 Info.InConstantContext = true; 14528 14529 if (!::EvaluateAsInt(this, ExprResult, Ctx, SE_AllowSideEffects, Info)) 14530 llvm_unreachable("ICE cannot be evaluated!"); 14531 14532 Value = ExprResult.Val.getInt(); 14533 return true; 14534 } 14535 14536 bool Expr::isCXX98IntegralConstantExpr(const ASTContext &Ctx) const { 14537 assert(!isValueDependent() && 14538 "Expression evaluator can't be called on a dependent expression."); 14539 14540 return CheckICE(this, Ctx).Kind == IK_ICE; 14541 } 14542 14543 bool Expr::isCXX11ConstantExpr(const ASTContext &Ctx, APValue *Result, 14544 SourceLocation *Loc) const { 14545 assert(!isValueDependent() && 14546 "Expression evaluator can't be called on a dependent expression."); 14547 14548 // We support this checking in C++98 mode in order to diagnose compatibility 14549 // issues. 14550 assert(Ctx.getLangOpts().CPlusPlus); 14551 14552 // Build evaluation settings. 14553 Expr::EvalStatus Status; 14554 SmallVector<PartialDiagnosticAt, 8> Diags; 14555 Status.Diag = &Diags; 14556 EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpression); 14557 14558 APValue Scratch; 14559 bool IsConstExpr = 14560 ::EvaluateAsRValue(Info, this, Result ? *Result : Scratch) && 14561 // FIXME: We don't produce a diagnostic for this, but the callers that 14562 // call us on arbitrary full-expressions should generally not care. 14563 Info.discardCleanups() && !Status.HasSideEffects; 14564 14565 if (!Diags.empty()) { 14566 IsConstExpr = false; 14567 if (Loc) *Loc = Diags[0].first; 14568 } else if (!IsConstExpr) { 14569 // FIXME: This shouldn't happen. 14570 if (Loc) *Loc = getExprLoc(); 14571 } 14572 14573 return IsConstExpr; 14574 } 14575 14576 bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx, 14577 const FunctionDecl *Callee, 14578 ArrayRef<const Expr*> Args, 14579 const Expr *This) const { 14580 assert(!isValueDependent() && 14581 "Expression evaluator can't be called on a dependent expression."); 14582 14583 Expr::EvalStatus Status; 14584 EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpressionUnevaluated); 14585 Info.InConstantContext = true; 14586 14587 LValue ThisVal; 14588 const LValue *ThisPtr = nullptr; 14589 if (This) { 14590 #ifndef NDEBUG 14591 auto *MD = dyn_cast<CXXMethodDecl>(Callee); 14592 assert(MD && "Don't provide `this` for non-methods."); 14593 assert(!MD->isStatic() && "Don't provide `this` for static methods."); 14594 #endif 14595 if (!This->isValueDependent() && 14596 EvaluateObjectArgument(Info, This, ThisVal) && 14597 !Info.EvalStatus.HasSideEffects) 14598 ThisPtr = &ThisVal; 14599 14600 // Ignore any side-effects from a failed evaluation. This is safe because 14601 // they can't interfere with any other argument evaluation. 14602 Info.EvalStatus.HasSideEffects = false; 14603 } 14604 14605 ArgVector ArgValues(Args.size()); 14606 for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end(); 14607 I != E; ++I) { 14608 if ((*I)->isValueDependent() || 14609 !Evaluate(ArgValues[I - Args.begin()], Info, *I) || 14610 Info.EvalStatus.HasSideEffects) 14611 // If evaluation fails, throw away the argument entirely. 14612 ArgValues[I - Args.begin()] = APValue(); 14613 14614 // Ignore any side-effects from a failed evaluation. This is safe because 14615 // they can't interfere with any other argument evaluation. 14616 Info.EvalStatus.HasSideEffects = false; 14617 } 14618 14619 // Parameter cleanups happen in the caller and are not part of this 14620 // evaluation. 14621 Info.discardCleanups(); 14622 Info.EvalStatus.HasSideEffects = false; 14623 14624 // Build fake call to Callee. 14625 CallStackFrame Frame(Info, Callee->getLocation(), Callee, ThisPtr, 14626 ArgValues.data()); 14627 // FIXME: Missing ExprWithCleanups in enable_if conditions? 14628 FullExpressionRAII Scope(Info); 14629 return Evaluate(Value, Info, this) && Scope.destroy() && 14630 !Info.EvalStatus.HasSideEffects; 14631 } 14632 14633 bool Expr::isPotentialConstantExpr(const FunctionDecl *FD, 14634 SmallVectorImpl< 14635 PartialDiagnosticAt> &Diags) { 14636 // FIXME: It would be useful to check constexpr function templates, but at the 14637 // moment the constant expression evaluator cannot cope with the non-rigorous 14638 // ASTs which we build for dependent expressions. 14639 if (FD->isDependentContext()) 14640 return true; 14641 14642 Expr::EvalStatus Status; 14643 Status.Diag = &Diags; 14644 14645 EvalInfo Info(FD->getASTContext(), Status, EvalInfo::EM_ConstantExpression); 14646 Info.InConstantContext = true; 14647 Info.CheckingPotentialConstantExpression = true; 14648 14649 // The constexpr VM attempts to compile all methods to bytecode here. 14650 if (Info.EnableNewConstInterp) { 14651 Info.Ctx.getInterpContext().isPotentialConstantExpr(Info, FD); 14652 return Diags.empty(); 14653 } 14654 14655 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); 14656 const CXXRecordDecl *RD = MD ? MD->getParent()->getCanonicalDecl() : nullptr; 14657 14658 // Fabricate an arbitrary expression on the stack and pretend that it 14659 // is a temporary being used as the 'this' pointer. 14660 LValue This; 14661 ImplicitValueInitExpr VIE(RD ? Info.Ctx.getRecordType(RD) : Info.Ctx.IntTy); 14662 This.set({&VIE, Info.CurrentCall->Index}); 14663 14664 ArrayRef<const Expr*> Args; 14665 14666 APValue Scratch; 14667 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) { 14668 // Evaluate the call as a constant initializer, to allow the construction 14669 // of objects of non-literal types. 14670 Info.setEvaluatingDecl(This.getLValueBase(), Scratch); 14671 HandleConstructorCall(&VIE, This, Args, CD, Info, Scratch); 14672 } else { 14673 SourceLocation Loc = FD->getLocation(); 14674 HandleFunctionCall(Loc, FD, (MD && MD->isInstance()) ? &This : nullptr, 14675 Args, FD->getBody(), Info, Scratch, nullptr); 14676 } 14677 14678 return Diags.empty(); 14679 } 14680 14681 bool Expr::isPotentialConstantExprUnevaluated(Expr *E, 14682 const FunctionDecl *FD, 14683 SmallVectorImpl< 14684 PartialDiagnosticAt> &Diags) { 14685 assert(!E->isValueDependent() && 14686 "Expression evaluator can't be called on a dependent expression."); 14687 14688 Expr::EvalStatus Status; 14689 Status.Diag = &Diags; 14690 14691 EvalInfo Info(FD->getASTContext(), Status, 14692 EvalInfo::EM_ConstantExpressionUnevaluated); 14693 Info.InConstantContext = true; 14694 Info.CheckingPotentialConstantExpression = true; 14695 14696 // Fabricate a call stack frame to give the arguments a plausible cover story. 14697 ArrayRef<const Expr*> Args; 14698 ArgVector ArgValues(0); 14699 bool Success = EvaluateArgs(Args, ArgValues, Info, FD); 14700 (void)Success; 14701 assert(Success && 14702 "Failed to set up arguments for potential constant evaluation"); 14703 CallStackFrame Frame(Info, SourceLocation(), FD, nullptr, ArgValues.data()); 14704 14705 APValue ResultScratch; 14706 Evaluate(ResultScratch, Info, E); 14707 return Diags.empty(); 14708 } 14709 14710 bool Expr::tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx, 14711 unsigned Type) const { 14712 if (!getType()->isPointerType()) 14713 return false; 14714 14715 Expr::EvalStatus Status; 14716 EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold); 14717 return tryEvaluateBuiltinObjectSize(this, Type, Info, Result); 14718 } 14719