1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCXXABI.h" 14 #include "CGCleanup.h" 15 #include "CGDebugInfo.h" 16 #include "CGObjCRuntime.h" 17 #include "CGOpenMPRuntime.h" 18 #include "CodeGenFunction.h" 19 #include "CodeGenModule.h" 20 #include "ConstantEmitter.h" 21 #include "TargetInfo.h" 22 #include "clang/AST/ASTContext.h" 23 #include "clang/AST/Attr.h" 24 #include "clang/AST/DeclObjC.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/RecordLayout.h" 27 #include "clang/AST/StmtVisitor.h" 28 #include "clang/Basic/CodeGenOptions.h" 29 #include "clang/Basic/FixedPoint.h" 30 #include "clang/Basic/TargetInfo.h" 31 #include "llvm/ADT/Optional.h" 32 #include "llvm/IR/CFG.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/GetElementPtrTypeIterator.h" 37 #include "llvm/IR/GlobalVariable.h" 38 #include "llvm/IR/Intrinsics.h" 39 #include "llvm/IR/IntrinsicsPowerPC.h" 40 #include "llvm/IR/MatrixBuilder.h" 41 #include "llvm/IR/Module.h" 42 #include <cstdarg> 43 44 using namespace clang; 45 using namespace CodeGen; 46 using llvm::Value; 47 48 //===----------------------------------------------------------------------===// 49 // Scalar Expression Emitter 50 //===----------------------------------------------------------------------===// 51 52 namespace { 53 54 /// Determine whether the given binary operation may overflow. 55 /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul, 56 /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem}, 57 /// the returned overflow check is precise. The returned value is 'true' for 58 /// all other opcodes, to be conservative. 59 bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS, 60 BinaryOperator::Opcode Opcode, bool Signed, 61 llvm::APInt &Result) { 62 // Assume overflow is possible, unless we can prove otherwise. 63 bool Overflow = true; 64 const auto &LHSAP = LHS->getValue(); 65 const auto &RHSAP = RHS->getValue(); 66 if (Opcode == BO_Add) { 67 if (Signed) 68 Result = LHSAP.sadd_ov(RHSAP, Overflow); 69 else 70 Result = LHSAP.uadd_ov(RHSAP, Overflow); 71 } else if (Opcode == BO_Sub) { 72 if (Signed) 73 Result = LHSAP.ssub_ov(RHSAP, Overflow); 74 else 75 Result = LHSAP.usub_ov(RHSAP, Overflow); 76 } else if (Opcode == BO_Mul) { 77 if (Signed) 78 Result = LHSAP.smul_ov(RHSAP, Overflow); 79 else 80 Result = LHSAP.umul_ov(RHSAP, Overflow); 81 } else if (Opcode == BO_Div || Opcode == BO_Rem) { 82 if (Signed && !RHS->isZero()) 83 Result = LHSAP.sdiv_ov(RHSAP, Overflow); 84 else 85 return false; 86 } 87 return Overflow; 88 } 89 90 struct BinOpInfo { 91 Value *LHS; 92 Value *RHS; 93 QualType Ty; // Computation Type. 94 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform 95 FPOptions FPFeatures; 96 const Expr *E; // Entire expr, for error unsupported. May not be binop. 97 98 /// Check if the binop can result in integer overflow. 99 bool mayHaveIntegerOverflow() const { 100 // Without constant input, we can't rule out overflow. 101 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS); 102 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS); 103 if (!LHSCI || !RHSCI) 104 return true; 105 106 llvm::APInt Result; 107 return ::mayHaveIntegerOverflow( 108 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result); 109 } 110 111 /// Check if the binop computes a division or a remainder. 112 bool isDivremOp() const { 113 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign || 114 Opcode == BO_RemAssign; 115 } 116 117 /// Check if the binop can result in an integer division by zero. 118 bool mayHaveIntegerDivisionByZero() const { 119 if (isDivremOp()) 120 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS)) 121 return CI->isZero(); 122 return true; 123 } 124 125 /// Check if the binop can result in a float division by zero. 126 bool mayHaveFloatDivisionByZero() const { 127 if (isDivremOp()) 128 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS)) 129 return CFP->isZero(); 130 return true; 131 } 132 133 /// Check if at least one operand is a fixed point type. In such cases, this 134 /// operation did not follow usual arithmetic conversion and both operands 135 /// might not be of the same type. 136 bool isFixedPointOp() const { 137 // We cannot simply check the result type since comparison operations return 138 // an int. 139 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) { 140 QualType LHSType = BinOp->getLHS()->getType(); 141 QualType RHSType = BinOp->getRHS()->getType(); 142 return LHSType->isFixedPointType() || RHSType->isFixedPointType(); 143 } 144 if (const auto *UnOp = dyn_cast<UnaryOperator>(E)) 145 return UnOp->getSubExpr()->getType()->isFixedPointType(); 146 return false; 147 } 148 }; 149 150 static bool MustVisitNullValue(const Expr *E) { 151 // If a null pointer expression's type is the C++0x nullptr_t, then 152 // it's not necessarily a simple constant and it must be evaluated 153 // for its potential side effects. 154 return E->getType()->isNullPtrType(); 155 } 156 157 /// If \p E is a widened promoted integer, get its base (unpromoted) type. 158 static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx, 159 const Expr *E) { 160 const Expr *Base = E->IgnoreImpCasts(); 161 if (E == Base) 162 return llvm::None; 163 164 QualType BaseTy = Base->getType(); 165 if (!BaseTy->isPromotableIntegerType() || 166 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType())) 167 return llvm::None; 168 169 return BaseTy; 170 } 171 172 /// Check if \p E is a widened promoted integer. 173 static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) { 174 return getUnwidenedIntegerType(Ctx, E).hasValue(); 175 } 176 177 /// Check if we can skip the overflow check for \p Op. 178 static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) { 179 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) && 180 "Expected a unary or binary operator"); 181 182 // If the binop has constant inputs and we can prove there is no overflow, 183 // we can elide the overflow check. 184 if (!Op.mayHaveIntegerOverflow()) 185 return true; 186 187 // If a unary op has a widened operand, the op cannot overflow. 188 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E)) 189 return !UO->canOverflow(); 190 191 // We usually don't need overflow checks for binops with widened operands. 192 // Multiplication with promoted unsigned operands is a special case. 193 const auto *BO = cast<BinaryOperator>(Op.E); 194 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS()); 195 if (!OptionalLHSTy) 196 return false; 197 198 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS()); 199 if (!OptionalRHSTy) 200 return false; 201 202 QualType LHSTy = *OptionalLHSTy; 203 QualType RHSTy = *OptionalRHSTy; 204 205 // This is the simple case: binops without unsigned multiplication, and with 206 // widened operands. No overflow check is needed here. 207 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) || 208 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType()) 209 return true; 210 211 // For unsigned multiplication the overflow check can be elided if either one 212 // of the unpromoted types are less than half the size of the promoted type. 213 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType()); 214 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize || 215 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize; 216 } 217 218 class ScalarExprEmitter 219 : public StmtVisitor<ScalarExprEmitter, Value*> { 220 CodeGenFunction &CGF; 221 CGBuilderTy &Builder; 222 bool IgnoreResultAssign; 223 llvm::LLVMContext &VMContext; 224 public: 225 226 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false) 227 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira), 228 VMContext(cgf.getLLVMContext()) { 229 } 230 231 //===--------------------------------------------------------------------===// 232 // Utilities 233 //===--------------------------------------------------------------------===// 234 235 bool TestAndClearIgnoreResultAssign() { 236 bool I = IgnoreResultAssign; 237 IgnoreResultAssign = false; 238 return I; 239 } 240 241 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); } 242 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); } 243 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) { 244 return CGF.EmitCheckedLValue(E, TCK); 245 } 246 247 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks, 248 const BinOpInfo &Info); 249 250 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) { 251 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal(); 252 } 253 254 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) { 255 const AlignValueAttr *AVAttr = nullptr; 256 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { 257 const ValueDecl *VD = DRE->getDecl(); 258 259 if (VD->getType()->isReferenceType()) { 260 if (const auto *TTy = 261 dyn_cast<TypedefType>(VD->getType().getNonReferenceType())) 262 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>(); 263 } else { 264 // Assumptions for function parameters are emitted at the start of the 265 // function, so there is no need to repeat that here, 266 // unless the alignment-assumption sanitizer is enabled, 267 // then we prefer the assumption over alignment attribute 268 // on IR function param. 269 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment)) 270 return; 271 272 AVAttr = VD->getAttr<AlignValueAttr>(); 273 } 274 } 275 276 if (!AVAttr) 277 if (const auto *TTy = 278 dyn_cast<TypedefType>(E->getType())) 279 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>(); 280 281 if (!AVAttr) 282 return; 283 284 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment()); 285 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue); 286 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI); 287 } 288 289 /// EmitLoadOfLValue - Given an expression with complex type that represents a 290 /// value l-value, this method emits the address of the l-value, then loads 291 /// and returns the result. 292 Value *EmitLoadOfLValue(const Expr *E) { 293 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load), 294 E->getExprLoc()); 295 296 EmitLValueAlignmentAssumption(E, V); 297 return V; 298 } 299 300 /// EmitConversionToBool - Convert the specified expression value to a 301 /// boolean (i1) truth value. This is equivalent to "Val != 0". 302 Value *EmitConversionToBool(Value *Src, QualType DstTy); 303 304 /// Emit a check that a conversion from a floating-point type does not 305 /// overflow. 306 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType, 307 Value *Src, QualType SrcType, QualType DstType, 308 llvm::Type *DstTy, SourceLocation Loc); 309 310 /// Known implicit conversion check kinds. 311 /// Keep in sync with the enum of the same name in ubsan_handlers.h 312 enum ImplicitConversionCheckKind : unsigned char { 313 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7. 314 ICCK_UnsignedIntegerTruncation = 1, 315 ICCK_SignedIntegerTruncation = 2, 316 ICCK_IntegerSignChange = 3, 317 ICCK_SignedIntegerTruncationOrSignChange = 4, 318 }; 319 320 /// Emit a check that an [implicit] truncation of an integer does not 321 /// discard any bits. It is not UB, so we use the value after truncation. 322 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst, 323 QualType DstType, SourceLocation Loc); 324 325 /// Emit a check that an [implicit] conversion of an integer does not change 326 /// the sign of the value. It is not UB, so we use the value after conversion. 327 /// NOTE: Src and Dst may be the exact same value! (point to the same thing) 328 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst, 329 QualType DstType, SourceLocation Loc); 330 331 /// Emit a conversion from the specified type to the specified destination 332 /// type, both of which are LLVM scalar types. 333 struct ScalarConversionOpts { 334 bool TreatBooleanAsSigned; 335 bool EmitImplicitIntegerTruncationChecks; 336 bool EmitImplicitIntegerSignChangeChecks; 337 338 ScalarConversionOpts() 339 : TreatBooleanAsSigned(false), 340 EmitImplicitIntegerTruncationChecks(false), 341 EmitImplicitIntegerSignChangeChecks(false) {} 342 343 ScalarConversionOpts(clang::SanitizerSet SanOpts) 344 : TreatBooleanAsSigned(false), 345 EmitImplicitIntegerTruncationChecks( 346 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)), 347 EmitImplicitIntegerSignChangeChecks( 348 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {} 349 }; 350 Value * 351 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy, 352 SourceLocation Loc, 353 ScalarConversionOpts Opts = ScalarConversionOpts()); 354 355 /// Convert between either a fixed point and other fixed point or fixed point 356 /// and an integer. 357 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy, 358 SourceLocation Loc); 359 Value *EmitFixedPointConversion(Value *Src, FixedPointSemantics &SrcFixedSema, 360 FixedPointSemantics &DstFixedSema, 361 SourceLocation Loc, 362 bool DstIsInteger = false); 363 364 /// Emit a conversion from the specified complex type to the specified 365 /// destination type, where the destination type is an LLVM scalar type. 366 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, 367 QualType SrcTy, QualType DstTy, 368 SourceLocation Loc); 369 370 /// EmitNullValue - Emit a value that corresponds to null for the given type. 371 Value *EmitNullValue(QualType Ty); 372 373 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion. 374 Value *EmitFloatToBoolConversion(Value *V) { 375 // Compare against 0.0 for fp scalars. 376 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType()); 377 return Builder.CreateFCmpUNE(V, Zero, "tobool"); 378 } 379 380 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion. 381 Value *EmitPointerToBoolConversion(Value *V, QualType QT) { 382 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT); 383 384 return Builder.CreateICmpNE(V, Zero, "tobool"); 385 } 386 387 Value *EmitIntToBoolConversion(Value *V) { 388 // Because of the type rules of C, we often end up computing a 389 // logical value, then zero extending it to int, then wanting it 390 // as a logical value again. Optimize this common case. 391 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) { 392 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) { 393 Value *Result = ZI->getOperand(0); 394 // If there aren't any more uses, zap the instruction to save space. 395 // Note that there can be more uses, for example if this 396 // is the result of an assignment. 397 if (ZI->use_empty()) 398 ZI->eraseFromParent(); 399 return Result; 400 } 401 } 402 403 return Builder.CreateIsNotNull(V, "tobool"); 404 } 405 406 //===--------------------------------------------------------------------===// 407 // Visitor Methods 408 //===--------------------------------------------------------------------===// 409 410 Value *Visit(Expr *E) { 411 ApplyDebugLocation DL(CGF, E); 412 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E); 413 } 414 415 Value *VisitStmt(Stmt *S) { 416 S->dump(llvm::errs(), CGF.getContext()); 417 llvm_unreachable("Stmt can't have complex result type!"); 418 } 419 Value *VisitExpr(Expr *S); 420 421 Value *VisitConstantExpr(ConstantExpr *E) { 422 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) { 423 if (E->isGLValue()) 424 return CGF.Builder.CreateLoad(Address( 425 Result, CGF.getContext().getTypeAlignInChars(E->getType()))); 426 return Result; 427 } 428 return Visit(E->getSubExpr()); 429 } 430 Value *VisitParenExpr(ParenExpr *PE) { 431 return Visit(PE->getSubExpr()); 432 } 433 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { 434 return Visit(E->getReplacement()); 435 } 436 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) { 437 return Visit(GE->getResultExpr()); 438 } 439 Value *VisitCoawaitExpr(CoawaitExpr *S) { 440 return CGF.EmitCoawaitExpr(*S).getScalarVal(); 441 } 442 Value *VisitCoyieldExpr(CoyieldExpr *S) { 443 return CGF.EmitCoyieldExpr(*S).getScalarVal(); 444 } 445 Value *VisitUnaryCoawait(const UnaryOperator *E) { 446 return Visit(E->getSubExpr()); 447 } 448 449 // Leaves. 450 Value *VisitIntegerLiteral(const IntegerLiteral *E) { 451 return Builder.getInt(E->getValue()); 452 } 453 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) { 454 return Builder.getInt(E->getValue()); 455 } 456 Value *VisitFloatingLiteral(const FloatingLiteral *E) { 457 return llvm::ConstantFP::get(VMContext, E->getValue()); 458 } 459 Value *VisitCharacterLiteral(const CharacterLiteral *E) { 460 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 461 } 462 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) { 463 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 464 } 465 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { 466 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 467 } 468 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { 469 return EmitNullValue(E->getType()); 470 } 471 Value *VisitGNUNullExpr(const GNUNullExpr *E) { 472 return EmitNullValue(E->getType()); 473 } 474 Value *VisitOffsetOfExpr(OffsetOfExpr *E); 475 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E); 476 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) { 477 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel()); 478 return Builder.CreateBitCast(V, ConvertType(E->getType())); 479 } 480 481 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) { 482 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength()); 483 } 484 485 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) { 486 return CGF.EmitPseudoObjectRValue(E).getScalarVal(); 487 } 488 489 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) { 490 if (E->isGLValue()) 491 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E), 492 E->getExprLoc()); 493 494 // Otherwise, assume the mapping is the scalar directly. 495 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal(); 496 } 497 498 // l-values. 499 Value *VisitDeclRefExpr(DeclRefExpr *E) { 500 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) 501 return CGF.emitScalarConstant(Constant, E); 502 return EmitLoadOfLValue(E); 503 } 504 505 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) { 506 return CGF.EmitObjCSelectorExpr(E); 507 } 508 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) { 509 return CGF.EmitObjCProtocolExpr(E); 510 } 511 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { 512 return EmitLoadOfLValue(E); 513 } 514 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) { 515 if (E->getMethodDecl() && 516 E->getMethodDecl()->getReturnType()->isReferenceType()) 517 return EmitLoadOfLValue(E); 518 return CGF.EmitObjCMessageExpr(E).getScalarVal(); 519 } 520 521 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) { 522 LValue LV = CGF.EmitObjCIsaExpr(E); 523 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); 524 return V; 525 } 526 527 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) { 528 VersionTuple Version = E->getVersion(); 529 530 // If we're checking for a platform older than our minimum deployment 531 // target, we can fold the check away. 532 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion()) 533 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1); 534 535 Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor(); 536 llvm::Value *Args[] = { 537 llvm::ConstantInt::get(CGF.CGM.Int32Ty, Version.getMajor()), 538 llvm::ConstantInt::get(CGF.CGM.Int32Ty, Min ? *Min : 0), 539 llvm::ConstantInt::get(CGF.CGM.Int32Ty, SMin ? *SMin : 0), 540 }; 541 542 return CGF.EmitBuiltinAvailable(Args); 543 } 544 545 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E); 546 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E); 547 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E); 548 Value *VisitConvertVectorExpr(ConvertVectorExpr *E); 549 Value *VisitMemberExpr(MemberExpr *E); 550 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); } 551 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 552 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which 553 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound 554 // literals aren't l-values in C++. We do so simply because that's the 555 // cleanest way to handle compound literals in C++. 556 // See the discussion here: https://reviews.llvm.org/D64464 557 return EmitLoadOfLValue(E); 558 } 559 560 Value *VisitInitListExpr(InitListExpr *E); 561 562 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) { 563 assert(CGF.getArrayInitIndex() && 564 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?"); 565 return CGF.getArrayInitIndex(); 566 } 567 568 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { 569 return EmitNullValue(E->getType()); 570 } 571 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) { 572 CGF.CGM.EmitExplicitCastExprType(E, &CGF); 573 return VisitCastExpr(E); 574 } 575 Value *VisitCastExpr(CastExpr *E); 576 577 Value *VisitCallExpr(const CallExpr *E) { 578 if (E->getCallReturnType(CGF.getContext())->isReferenceType()) 579 return EmitLoadOfLValue(E); 580 581 Value *V = CGF.EmitCallExpr(E).getScalarVal(); 582 583 EmitLValueAlignmentAssumption(E, V); 584 return V; 585 } 586 587 Value *VisitStmtExpr(const StmtExpr *E); 588 589 // Unary Operators. 590 Value *VisitUnaryPostDec(const UnaryOperator *E) { 591 LValue LV = EmitLValue(E->getSubExpr()); 592 return EmitScalarPrePostIncDec(E, LV, false, false); 593 } 594 Value *VisitUnaryPostInc(const UnaryOperator *E) { 595 LValue LV = EmitLValue(E->getSubExpr()); 596 return EmitScalarPrePostIncDec(E, LV, true, false); 597 } 598 Value *VisitUnaryPreDec(const UnaryOperator *E) { 599 LValue LV = EmitLValue(E->getSubExpr()); 600 return EmitScalarPrePostIncDec(E, LV, false, true); 601 } 602 Value *VisitUnaryPreInc(const UnaryOperator *E) { 603 LValue LV = EmitLValue(E->getSubExpr()); 604 return EmitScalarPrePostIncDec(E, LV, true, true); 605 } 606 607 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E, 608 llvm::Value *InVal, 609 bool IsInc); 610 611 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 612 bool isInc, bool isPre); 613 614 615 Value *VisitUnaryAddrOf(const UnaryOperator *E) { 616 if (isa<MemberPointerType>(E->getType())) // never sugared 617 return CGF.CGM.getMemberPointerConstant(E); 618 619 return EmitLValue(E->getSubExpr()).getPointer(CGF); 620 } 621 Value *VisitUnaryDeref(const UnaryOperator *E) { 622 if (E->getType()->isVoidType()) 623 return Visit(E->getSubExpr()); // the actual value should be unused 624 return EmitLoadOfLValue(E); 625 } 626 Value *VisitUnaryPlus(const UnaryOperator *E) { 627 // This differs from gcc, though, most likely due to a bug in gcc. 628 TestAndClearIgnoreResultAssign(); 629 return Visit(E->getSubExpr()); 630 } 631 Value *VisitUnaryMinus (const UnaryOperator *E); 632 Value *VisitUnaryNot (const UnaryOperator *E); 633 Value *VisitUnaryLNot (const UnaryOperator *E); 634 Value *VisitUnaryReal (const UnaryOperator *E); 635 Value *VisitUnaryImag (const UnaryOperator *E); 636 Value *VisitUnaryExtension(const UnaryOperator *E) { 637 return Visit(E->getSubExpr()); 638 } 639 640 // C++ 641 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) { 642 return EmitLoadOfLValue(E); 643 } 644 Value *VisitSourceLocExpr(SourceLocExpr *SLE) { 645 auto &Ctx = CGF.getContext(); 646 APValue Evaluated = 647 SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr()); 648 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated, 649 SLE->getType()); 650 } 651 652 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 653 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE); 654 return Visit(DAE->getExpr()); 655 } 656 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { 657 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); 658 return Visit(DIE->getExpr()); 659 } 660 Value *VisitCXXThisExpr(CXXThisExpr *TE) { 661 return CGF.LoadCXXThis(); 662 } 663 664 Value *VisitExprWithCleanups(ExprWithCleanups *E); 665 Value *VisitCXXNewExpr(const CXXNewExpr *E) { 666 return CGF.EmitCXXNewExpr(E); 667 } 668 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) { 669 CGF.EmitCXXDeleteExpr(E); 670 return nullptr; 671 } 672 673 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) { 674 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 675 } 676 677 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) { 678 return Builder.getInt1(E->isSatisfied()); 679 } 680 681 Value *VisitRequiresExpr(const RequiresExpr *E) { 682 return Builder.getInt1(E->isSatisfied()); 683 } 684 685 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) { 686 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue()); 687 } 688 689 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) { 690 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue()); 691 } 692 693 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) { 694 // C++ [expr.pseudo]p1: 695 // The result shall only be used as the operand for the function call 696 // operator (), and the result of such a call has type void. The only 697 // effect is the evaluation of the postfix-expression before the dot or 698 // arrow. 699 CGF.EmitScalarExpr(E->getBase()); 700 return nullptr; 701 } 702 703 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) { 704 return EmitNullValue(E->getType()); 705 } 706 707 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) { 708 CGF.EmitCXXThrowExpr(E); 709 return nullptr; 710 } 711 712 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) { 713 return Builder.getInt1(E->getValue()); 714 } 715 716 // Binary Operators. 717 Value *EmitMul(const BinOpInfo &Ops) { 718 if (Ops.Ty->isSignedIntegerOrEnumerationType()) { 719 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 720 case LangOptions::SOB_Defined: 721 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 722 case LangOptions::SOB_Undefined: 723 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 724 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); 725 LLVM_FALLTHROUGH; 726 case LangOptions::SOB_Trapping: 727 if (CanElideOverflowCheck(CGF.getContext(), Ops)) 728 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); 729 return EmitOverflowCheckedBinOp(Ops); 730 } 731 } 732 733 if (Ops.Ty->isConstantMatrixType()) { 734 llvm::MatrixBuilder<CGBuilderTy> MB(Builder); 735 // We need to check the types of the operands of the operator to get the 736 // correct matrix dimensions. 737 auto *BO = cast<BinaryOperator>(Ops.E); 738 auto *LHSMatTy = dyn_cast<ConstantMatrixType>( 739 BO->getLHS()->getType().getCanonicalType()); 740 auto *RHSMatTy = dyn_cast<ConstantMatrixType>( 741 BO->getRHS()->getType().getCanonicalType()); 742 if (LHSMatTy && RHSMatTy) 743 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(), 744 LHSMatTy->getNumColumns(), 745 RHSMatTy->getNumColumns()); 746 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS); 747 } 748 749 if (Ops.Ty->isUnsignedIntegerType() && 750 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 751 !CanElideOverflowCheck(CGF.getContext(), Ops)) 752 return EmitOverflowCheckedBinOp(Ops); 753 754 if (Ops.LHS->getType()->isFPOrFPVectorTy()) { 755 // Preserve the old values 756 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 757 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul"); 758 } 759 if (Ops.isFixedPointOp()) 760 return EmitFixedPointBinOp(Ops); 761 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 762 } 763 /// Create a binary op that checks for overflow. 764 /// Currently only supports +, - and *. 765 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops); 766 767 // Check for undefined division and modulus behaviors. 768 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops, 769 llvm::Value *Zero,bool isDiv); 770 // Common helper for getting how wide LHS of shift is. 771 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS); 772 773 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for 774 // non powers of two. 775 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name); 776 777 Value *EmitDiv(const BinOpInfo &Ops); 778 Value *EmitRem(const BinOpInfo &Ops); 779 Value *EmitAdd(const BinOpInfo &Ops); 780 Value *EmitSub(const BinOpInfo &Ops); 781 Value *EmitShl(const BinOpInfo &Ops); 782 Value *EmitShr(const BinOpInfo &Ops); 783 Value *EmitAnd(const BinOpInfo &Ops) { 784 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and"); 785 } 786 Value *EmitXor(const BinOpInfo &Ops) { 787 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor"); 788 } 789 Value *EmitOr (const BinOpInfo &Ops) { 790 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or"); 791 } 792 793 // Helper functions for fixed point binary operations. 794 Value *EmitFixedPointBinOp(const BinOpInfo &Ops); 795 796 BinOpInfo EmitBinOps(const BinaryOperator *E); 797 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E, 798 Value *(ScalarExprEmitter::*F)(const BinOpInfo &), 799 Value *&Result); 800 801 Value *EmitCompoundAssign(const CompoundAssignOperator *E, 802 Value *(ScalarExprEmitter::*F)(const BinOpInfo &)); 803 804 // Binary operators and binary compound assignment operators. 805 #define HANDLEBINOP(OP) \ 806 Value *VisitBin ## OP(const BinaryOperator *E) { \ 807 return Emit ## OP(EmitBinOps(E)); \ 808 } \ 809 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \ 810 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \ 811 } 812 HANDLEBINOP(Mul) 813 HANDLEBINOP(Div) 814 HANDLEBINOP(Rem) 815 HANDLEBINOP(Add) 816 HANDLEBINOP(Sub) 817 HANDLEBINOP(Shl) 818 HANDLEBINOP(Shr) 819 HANDLEBINOP(And) 820 HANDLEBINOP(Xor) 821 HANDLEBINOP(Or) 822 #undef HANDLEBINOP 823 824 // Comparisons. 825 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc, 826 llvm::CmpInst::Predicate SICmpOpc, 827 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling); 828 #define VISITCOMP(CODE, UI, SI, FP, SIG) \ 829 Value *VisitBin##CODE(const BinaryOperator *E) { \ 830 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \ 831 llvm::FCmpInst::FP, SIG); } 832 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true) 833 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true) 834 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true) 835 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true) 836 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false) 837 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false) 838 #undef VISITCOMP 839 840 Value *VisitBinAssign (const BinaryOperator *E); 841 842 Value *VisitBinLAnd (const BinaryOperator *E); 843 Value *VisitBinLOr (const BinaryOperator *E); 844 Value *VisitBinComma (const BinaryOperator *E); 845 846 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); } 847 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); } 848 849 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { 850 return Visit(E->getSemanticForm()); 851 } 852 853 // Other Operators. 854 Value *VisitBlockExpr(const BlockExpr *BE); 855 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *); 856 Value *VisitChooseExpr(ChooseExpr *CE); 857 Value *VisitVAArgExpr(VAArgExpr *VE); 858 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) { 859 return CGF.EmitObjCStringLiteral(E); 860 } 861 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) { 862 return CGF.EmitObjCBoxedExpr(E); 863 } 864 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) { 865 return CGF.EmitObjCArrayLiteral(E); 866 } 867 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) { 868 return CGF.EmitObjCDictionaryLiteral(E); 869 } 870 Value *VisitAsTypeExpr(AsTypeExpr *CE); 871 Value *VisitAtomicExpr(AtomicExpr *AE); 872 }; 873 } // end anonymous namespace. 874 875 //===----------------------------------------------------------------------===// 876 // Utilities 877 //===----------------------------------------------------------------------===// 878 879 /// EmitConversionToBool - Convert the specified expression value to a 880 /// boolean (i1) truth value. This is equivalent to "Val != 0". 881 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) { 882 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); 883 884 if (SrcType->isRealFloatingType()) 885 return EmitFloatToBoolConversion(Src); 886 887 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType)) 888 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT); 889 890 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && 891 "Unknown scalar type to convert"); 892 893 if (isa<llvm::IntegerType>(Src->getType())) 894 return EmitIntToBoolConversion(Src); 895 896 assert(isa<llvm::PointerType>(Src->getType())); 897 return EmitPointerToBoolConversion(Src, SrcType); 898 } 899 900 void ScalarExprEmitter::EmitFloatConversionCheck( 901 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType, 902 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) { 903 assert(SrcType->isFloatingType() && "not a conversion from floating point"); 904 if (!isa<llvm::IntegerType>(DstTy)) 905 return; 906 907 CodeGenFunction::SanitizerScope SanScope(&CGF); 908 using llvm::APFloat; 909 using llvm::APSInt; 910 911 llvm::Value *Check = nullptr; 912 const llvm::fltSemantics &SrcSema = 913 CGF.getContext().getFloatTypeSemantics(OrigSrcType); 914 915 // Floating-point to integer. This has undefined behavior if the source is 916 // +-Inf, NaN, or doesn't fit into the destination type (after truncation 917 // to an integer). 918 unsigned Width = CGF.getContext().getIntWidth(DstType); 919 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType(); 920 921 APSInt Min = APSInt::getMinValue(Width, Unsigned); 922 APFloat MinSrc(SrcSema, APFloat::uninitialized); 923 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) & 924 APFloat::opOverflow) 925 // Don't need an overflow check for lower bound. Just check for 926 // -Inf/NaN. 927 MinSrc = APFloat::getInf(SrcSema, true); 928 else 929 // Find the largest value which is too small to represent (before 930 // truncation toward zero). 931 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative); 932 933 APSInt Max = APSInt::getMaxValue(Width, Unsigned); 934 APFloat MaxSrc(SrcSema, APFloat::uninitialized); 935 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) & 936 APFloat::opOverflow) 937 // Don't need an overflow check for upper bound. Just check for 938 // +Inf/NaN. 939 MaxSrc = APFloat::getInf(SrcSema, false); 940 else 941 // Find the smallest value which is too large to represent (before 942 // truncation toward zero). 943 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive); 944 945 // If we're converting from __half, convert the range to float to match 946 // the type of src. 947 if (OrigSrcType->isHalfType()) { 948 const llvm::fltSemantics &Sema = 949 CGF.getContext().getFloatTypeSemantics(SrcType); 950 bool IsInexact; 951 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); 952 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); 953 } 954 955 llvm::Value *GE = 956 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc)); 957 llvm::Value *LE = 958 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc)); 959 Check = Builder.CreateAnd(GE, LE); 960 961 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc), 962 CGF.EmitCheckTypeDescriptor(OrigSrcType), 963 CGF.EmitCheckTypeDescriptor(DstType)}; 964 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow), 965 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc); 966 } 967 968 // Should be called within CodeGenFunction::SanitizerScope RAII scope. 969 // Returns 'i1 false' when the truncation Src -> Dst was lossy. 970 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 971 std::pair<llvm::Value *, SanitizerMask>> 972 EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, 973 QualType DstType, CGBuilderTy &Builder) { 974 llvm::Type *SrcTy = Src->getType(); 975 llvm::Type *DstTy = Dst->getType(); 976 (void)DstTy; // Only used in assert() 977 978 // This should be truncation of integral types. 979 assert(Src != Dst); 980 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits()); 981 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && 982 "non-integer llvm type"); 983 984 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 985 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 986 987 // If both (src and dst) types are unsigned, then it's an unsigned truncation. 988 // Else, it is a signed truncation. 989 ScalarExprEmitter::ImplicitConversionCheckKind Kind; 990 SanitizerMask Mask; 991 if (!SrcSigned && !DstSigned) { 992 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation; 993 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation; 994 } else { 995 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation; 996 Mask = SanitizerKind::ImplicitSignedIntegerTruncation; 997 } 998 999 llvm::Value *Check = nullptr; 1000 // 1. Extend the truncated value back to the same width as the Src. 1001 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext"); 1002 // 2. Equality-compare with the original source value 1003 Check = Builder.CreateICmpEQ(Check, Src, "truncheck"); 1004 // If the comparison result is 'i1 false', then the truncation was lossy. 1005 return std::make_pair(Kind, std::make_pair(Check, Mask)); 1006 } 1007 1008 static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( 1009 QualType SrcType, QualType DstType) { 1010 return SrcType->isIntegerType() && DstType->isIntegerType(); 1011 } 1012 1013 void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType, 1014 Value *Dst, QualType DstType, 1015 SourceLocation Loc) { 1016 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)) 1017 return; 1018 1019 // We only care about int->int conversions here. 1020 // We ignore conversions to/from pointer and/or bool. 1021 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType, 1022 DstType)) 1023 return; 1024 1025 unsigned SrcBits = Src->getType()->getScalarSizeInBits(); 1026 unsigned DstBits = Dst->getType()->getScalarSizeInBits(); 1027 // This must be truncation. Else we do not care. 1028 if (SrcBits <= DstBits) 1029 return; 1030 1031 assert(!DstType->isBooleanType() && "we should not get here with booleans."); 1032 1033 // If the integer sign change sanitizer is enabled, 1034 // and we are truncating from larger unsigned type to smaller signed type, 1035 // let that next sanitizer deal with it. 1036 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1037 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1038 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) && 1039 (!SrcSigned && DstSigned)) 1040 return; 1041 1042 CodeGenFunction::SanitizerScope SanScope(&CGF); 1043 1044 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1045 std::pair<llvm::Value *, SanitizerMask>> 1046 Check = 1047 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder); 1048 // If the comparison result is 'i1 false', then the truncation was lossy. 1049 1050 // Do we care about this type of truncation? 1051 if (!CGF.SanOpts.has(Check.second.second)) 1052 return; 1053 1054 llvm::Constant *StaticArgs[] = { 1055 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType), 1056 CGF.EmitCheckTypeDescriptor(DstType), 1057 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)}; 1058 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs, 1059 {Src, Dst}); 1060 } 1061 1062 // Should be called within CodeGenFunction::SanitizerScope RAII scope. 1063 // Returns 'i1 false' when the conversion Src -> Dst changed the sign. 1064 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1065 std::pair<llvm::Value *, SanitizerMask>> 1066 EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, 1067 QualType DstType, CGBuilderTy &Builder) { 1068 llvm::Type *SrcTy = Src->getType(); 1069 llvm::Type *DstTy = Dst->getType(); 1070 1071 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && 1072 "non-integer llvm type"); 1073 1074 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1075 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1076 (void)SrcSigned; // Only used in assert() 1077 (void)DstSigned; // Only used in assert() 1078 unsigned SrcBits = SrcTy->getScalarSizeInBits(); 1079 unsigned DstBits = DstTy->getScalarSizeInBits(); 1080 (void)SrcBits; // Only used in assert() 1081 (void)DstBits; // Only used in assert() 1082 1083 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) && 1084 "either the widths should be different, or the signednesses."); 1085 1086 // NOTE: zero value is considered to be non-negative. 1087 auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType, 1088 const char *Name) -> Value * { 1089 // Is this value a signed type? 1090 bool VSigned = VType->isSignedIntegerOrEnumerationType(); 1091 llvm::Type *VTy = V->getType(); 1092 if (!VSigned) { 1093 // If the value is unsigned, then it is never negative. 1094 // FIXME: can we encounter non-scalar VTy here? 1095 return llvm::ConstantInt::getFalse(VTy->getContext()); 1096 } 1097 // Get the zero of the same type with which we will be comparing. 1098 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0); 1099 // %V.isnegative = icmp slt %V, 0 1100 // I.e is %V *strictly* less than zero, does it have negative value? 1101 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero, 1102 llvm::Twine(Name) + "." + V->getName() + 1103 ".negativitycheck"); 1104 }; 1105 1106 // 1. Was the old Value negative? 1107 llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src"); 1108 // 2. Is the new Value negative? 1109 llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst"); 1110 // 3. Now, was the 'negativity status' preserved during the conversion? 1111 // NOTE: conversion from negative to zero is considered to change the sign. 1112 // (We want to get 'false' when the conversion changed the sign) 1113 // So we should just equality-compare the negativity statuses. 1114 llvm::Value *Check = nullptr; 1115 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck"); 1116 // If the comparison result is 'false', then the conversion changed the sign. 1117 return std::make_pair( 1118 ScalarExprEmitter::ICCK_IntegerSignChange, 1119 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange)); 1120 } 1121 1122 void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, 1123 Value *Dst, QualType DstType, 1124 SourceLocation Loc) { 1125 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) 1126 return; 1127 1128 llvm::Type *SrcTy = Src->getType(); 1129 llvm::Type *DstTy = Dst->getType(); 1130 1131 // We only care about int->int conversions here. 1132 // We ignore conversions to/from pointer and/or bool. 1133 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType, 1134 DstType)) 1135 return; 1136 1137 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1138 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1139 unsigned SrcBits = SrcTy->getScalarSizeInBits(); 1140 unsigned DstBits = DstTy->getScalarSizeInBits(); 1141 1142 // Now, we do not need to emit the check in *all* of the cases. 1143 // We can avoid emitting it in some obvious cases where it would have been 1144 // dropped by the opt passes (instcombine) always anyways. 1145 // If it's a cast between effectively the same type, no check. 1146 // NOTE: this is *not* equivalent to checking the canonical types. 1147 if (SrcSigned == DstSigned && SrcBits == DstBits) 1148 return; 1149 // At least one of the values needs to have signed type. 1150 // If both are unsigned, then obviously, neither of them can be negative. 1151 if (!SrcSigned && !DstSigned) 1152 return; 1153 // If the conversion is to *larger* *signed* type, then no check is needed. 1154 // Because either sign-extension happens (so the sign will remain), 1155 // or zero-extension will happen (the sign bit will be zero.) 1156 if ((DstBits > SrcBits) && DstSigned) 1157 return; 1158 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) && 1159 (SrcBits > DstBits) && SrcSigned) { 1160 // If the signed integer truncation sanitizer is enabled, 1161 // and this is a truncation from signed type, then no check is needed. 1162 // Because here sign change check is interchangeable with truncation check. 1163 return; 1164 } 1165 // That's it. We can't rule out any more cases with the data we have. 1166 1167 CodeGenFunction::SanitizerScope SanScope(&CGF); 1168 1169 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1170 std::pair<llvm::Value *, SanitizerMask>> 1171 Check; 1172 1173 // Each of these checks needs to return 'false' when an issue was detected. 1174 ImplicitConversionCheckKind CheckKind; 1175 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 1176 // So we can 'and' all the checks together, and still get 'false', 1177 // if at least one of the checks detected an issue. 1178 1179 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder); 1180 CheckKind = Check.first; 1181 Checks.emplace_back(Check.second); 1182 1183 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) && 1184 (SrcBits > DstBits) && !SrcSigned && DstSigned) { 1185 // If the signed integer truncation sanitizer was enabled, 1186 // and we are truncating from larger unsigned type to smaller signed type, 1187 // let's handle the case we skipped in that check. 1188 Check = 1189 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder); 1190 CheckKind = ICCK_SignedIntegerTruncationOrSignChange; 1191 Checks.emplace_back(Check.second); 1192 // If the comparison result is 'i1 false', then the truncation was lossy. 1193 } 1194 1195 llvm::Constant *StaticArgs[] = { 1196 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType), 1197 CGF.EmitCheckTypeDescriptor(DstType), 1198 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)}; 1199 // EmitCheck() will 'and' all the checks together. 1200 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs, 1201 {Src, Dst}); 1202 } 1203 1204 /// Emit a conversion from the specified type to the specified destination type, 1205 /// both of which are LLVM scalar types. 1206 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, 1207 QualType DstType, 1208 SourceLocation Loc, 1209 ScalarConversionOpts Opts) { 1210 // All conversions involving fixed point types should be handled by the 1211 // EmitFixedPoint family functions. This is done to prevent bloating up this 1212 // function more, and although fixed point numbers are represented by 1213 // integers, we do not want to follow any logic that assumes they should be 1214 // treated as integers. 1215 // TODO(leonardchan): When necessary, add another if statement checking for 1216 // conversions to fixed point types from other types. 1217 if (SrcType->isFixedPointType()) { 1218 if (DstType->isBooleanType()) 1219 // It is important that we check this before checking if the dest type is 1220 // an integer because booleans are technically integer types. 1221 // We do not need to check the padding bit on unsigned types if unsigned 1222 // padding is enabled because overflow into this bit is undefined 1223 // behavior. 1224 return Builder.CreateIsNotNull(Src, "tobool"); 1225 if (DstType->isFixedPointType() || DstType->isIntegerType()) 1226 return EmitFixedPointConversion(Src, SrcType, DstType, Loc); 1227 1228 llvm_unreachable( 1229 "Unhandled scalar conversion from a fixed point type to another type."); 1230 } else if (DstType->isFixedPointType()) { 1231 if (SrcType->isIntegerType()) 1232 // This also includes converting booleans and enums to fixed point types. 1233 return EmitFixedPointConversion(Src, SrcType, DstType, Loc); 1234 1235 llvm_unreachable( 1236 "Unhandled scalar conversion to a fixed point type from another type."); 1237 } 1238 1239 QualType NoncanonicalSrcType = SrcType; 1240 QualType NoncanonicalDstType = DstType; 1241 1242 SrcType = CGF.getContext().getCanonicalType(SrcType); 1243 DstType = CGF.getContext().getCanonicalType(DstType); 1244 if (SrcType == DstType) return Src; 1245 1246 if (DstType->isVoidType()) return nullptr; 1247 1248 llvm::Value *OrigSrc = Src; 1249 QualType OrigSrcType = SrcType; 1250 llvm::Type *SrcTy = Src->getType(); 1251 1252 // Handle conversions to bool first, they are special: comparisons against 0. 1253 if (DstType->isBooleanType()) 1254 return EmitConversionToBool(Src, SrcType); 1255 1256 llvm::Type *DstTy = ConvertType(DstType); 1257 1258 // Cast from half through float if half isn't a native type. 1259 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 1260 // Cast to FP using the intrinsic if the half type itself isn't supported. 1261 if (DstTy->isFloatingPointTy()) { 1262 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) 1263 return Builder.CreateCall( 1264 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy), 1265 Src); 1266 } else { 1267 // Cast to other types through float, using either the intrinsic or FPExt, 1268 // depending on whether the half type itself is supported 1269 // (as opposed to operations on half, available with NativeHalfType). 1270 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 1271 Src = Builder.CreateCall( 1272 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, 1273 CGF.CGM.FloatTy), 1274 Src); 1275 } else { 1276 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv"); 1277 } 1278 SrcType = CGF.getContext().FloatTy; 1279 SrcTy = CGF.FloatTy; 1280 } 1281 } 1282 1283 // Ignore conversions like int -> uint. 1284 if (SrcTy == DstTy) { 1285 if (Opts.EmitImplicitIntegerSignChangeChecks) 1286 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src, 1287 NoncanonicalDstType, Loc); 1288 1289 return Src; 1290 } 1291 1292 // Handle pointer conversions next: pointers can only be converted to/from 1293 // other pointers and integers. Check for pointer types in terms of LLVM, as 1294 // some native types (like Obj-C id) may map to a pointer type. 1295 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) { 1296 // The source value may be an integer, or a pointer. 1297 if (isa<llvm::PointerType>(SrcTy)) 1298 return Builder.CreateBitCast(Src, DstTy, "conv"); 1299 1300 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); 1301 // First, convert to the correct width so that we control the kind of 1302 // extension. 1303 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT); 1304 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType(); 1305 llvm::Value* IntResult = 1306 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 1307 // Then, cast to pointer. 1308 return Builder.CreateIntToPtr(IntResult, DstTy, "conv"); 1309 } 1310 1311 if (isa<llvm::PointerType>(SrcTy)) { 1312 // Must be an ptr to int cast. 1313 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?"); 1314 return Builder.CreatePtrToInt(Src, DstTy, "conv"); 1315 } 1316 1317 // A scalar can be splatted to an extended vector of the same element type 1318 if (DstType->isExtVectorType() && !SrcType->isVectorType()) { 1319 // Sema should add casts to make sure that the source expression's type is 1320 // the same as the vector's element type (sans qualifiers) 1321 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == 1322 SrcType.getTypePtr() && 1323 "Splatted expr doesn't match with vector element type?"); 1324 1325 // Splat the element across to all elements 1326 unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); 1327 return Builder.CreateVectorSplat(NumElements, Src, "splat"); 1328 } 1329 1330 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) { 1331 // Allow bitcast from vector to integer/fp of the same size. 1332 unsigned SrcSize = SrcTy->getPrimitiveSizeInBits(); 1333 unsigned DstSize = DstTy->getPrimitiveSizeInBits(); 1334 if (SrcSize == DstSize) 1335 return Builder.CreateBitCast(Src, DstTy, "conv"); 1336 1337 // Conversions between vectors of different sizes are not allowed except 1338 // when vectors of half are involved. Operations on storage-only half 1339 // vectors require promoting half vector operands to float vectors and 1340 // truncating the result, which is either an int or float vector, to a 1341 // short or half vector. 1342 1343 // Source and destination are both expected to be vectors. 1344 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType(); 1345 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType(); 1346 (void)DstElementTy; 1347 1348 assert(((SrcElementTy->isIntegerTy() && 1349 DstElementTy->isIntegerTy()) || 1350 (SrcElementTy->isFloatingPointTy() && 1351 DstElementTy->isFloatingPointTy())) && 1352 "unexpected conversion between a floating-point vector and an " 1353 "integer vector"); 1354 1355 // Truncate an i32 vector to an i16 vector. 1356 if (SrcElementTy->isIntegerTy()) 1357 return Builder.CreateIntCast(Src, DstTy, false, "conv"); 1358 1359 // Truncate a float vector to a half vector. 1360 if (SrcSize > DstSize) 1361 return Builder.CreateFPTrunc(Src, DstTy, "conv"); 1362 1363 // Promote a half vector to a float vector. 1364 return Builder.CreateFPExt(Src, DstTy, "conv"); 1365 } 1366 1367 // Finally, we have the arithmetic types: real int/float. 1368 Value *Res = nullptr; 1369 llvm::Type *ResTy = DstTy; 1370 1371 // An overflowing conversion has undefined behavior if either the source type 1372 // or the destination type is a floating-point type. However, we consider the 1373 // range of representable values for all floating-point types to be 1374 // [-inf,+inf], so no overflow can ever happen when the destination type is a 1375 // floating-point type. 1376 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) && 1377 OrigSrcType->isFloatingType()) 1378 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy, 1379 Loc); 1380 1381 // Cast to half through float if half isn't a native type. 1382 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 1383 // Make sure we cast in a single step if from another FP type. 1384 if (SrcTy->isFloatingPointTy()) { 1385 // Use the intrinsic if the half type itself isn't supported 1386 // (as opposed to operations on half, available with NativeHalfType). 1387 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) 1388 return Builder.CreateCall( 1389 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src); 1390 // If the half type is supported, just use an fptrunc. 1391 return Builder.CreateFPTrunc(Src, DstTy); 1392 } 1393 DstTy = CGF.FloatTy; 1394 } 1395 1396 if (isa<llvm::IntegerType>(SrcTy)) { 1397 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType(); 1398 if (SrcType->isBooleanType() && Opts.TreatBooleanAsSigned) { 1399 InputSigned = true; 1400 } 1401 if (isa<llvm::IntegerType>(DstTy)) 1402 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); 1403 else if (InputSigned) 1404 Res = Builder.CreateSIToFP(Src, DstTy, "conv"); 1405 else 1406 Res = Builder.CreateUIToFP(Src, DstTy, "conv"); 1407 } else if (isa<llvm::IntegerType>(DstTy)) { 1408 assert(SrcTy->isFloatingPointTy() && "Unknown real conversion"); 1409 if (DstType->isSignedIntegerOrEnumerationType()) 1410 Res = Builder.CreateFPToSI(Src, DstTy, "conv"); 1411 else 1412 Res = Builder.CreateFPToUI(Src, DstTy, "conv"); 1413 } else { 1414 assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() && 1415 "Unknown real conversion"); 1416 if (DstTy->getTypeID() < SrcTy->getTypeID()) 1417 Res = Builder.CreateFPTrunc(Src, DstTy, "conv"); 1418 else 1419 Res = Builder.CreateFPExt(Src, DstTy, "conv"); 1420 } 1421 1422 if (DstTy != ResTy) { 1423 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 1424 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion"); 1425 Res = Builder.CreateCall( 1426 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy), 1427 Res); 1428 } else { 1429 Res = Builder.CreateFPTrunc(Res, ResTy, "conv"); 1430 } 1431 } 1432 1433 if (Opts.EmitImplicitIntegerTruncationChecks) 1434 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res, 1435 NoncanonicalDstType, Loc); 1436 1437 if (Opts.EmitImplicitIntegerSignChangeChecks) 1438 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res, 1439 NoncanonicalDstType, Loc); 1440 1441 return Res; 1442 } 1443 1444 Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy, 1445 QualType DstTy, 1446 SourceLocation Loc) { 1447 FixedPointSemantics SrcFPSema = 1448 CGF.getContext().getFixedPointSemantics(SrcTy); 1449 FixedPointSemantics DstFPSema = 1450 CGF.getContext().getFixedPointSemantics(DstTy); 1451 return EmitFixedPointConversion(Src, SrcFPSema, DstFPSema, Loc, 1452 DstTy->isIntegerType()); 1453 } 1454 1455 Value *ScalarExprEmitter::EmitFixedPointConversion( 1456 Value *Src, FixedPointSemantics &SrcFPSema, FixedPointSemantics &DstFPSema, 1457 SourceLocation Loc, bool DstIsInteger) { 1458 using llvm::APInt; 1459 using llvm::ConstantInt; 1460 using llvm::Value; 1461 1462 unsigned SrcWidth = SrcFPSema.getWidth(); 1463 unsigned DstWidth = DstFPSema.getWidth(); 1464 unsigned SrcScale = SrcFPSema.getScale(); 1465 unsigned DstScale = DstFPSema.getScale(); 1466 bool SrcIsSigned = SrcFPSema.isSigned(); 1467 bool DstIsSigned = DstFPSema.isSigned(); 1468 1469 llvm::Type *DstIntTy = Builder.getIntNTy(DstWidth); 1470 1471 Value *Result = Src; 1472 unsigned ResultWidth = SrcWidth; 1473 1474 // Downscale. 1475 if (DstScale < SrcScale) { 1476 // When converting to integers, we round towards zero. For negative numbers, 1477 // right shifting rounds towards negative infinity. In this case, we can 1478 // just round up before shifting. 1479 if (DstIsInteger && SrcIsSigned) { 1480 Value *Zero = llvm::Constant::getNullValue(Result->getType()); 1481 Value *IsNegative = Builder.CreateICmpSLT(Result, Zero); 1482 Value *LowBits = ConstantInt::get( 1483 CGF.getLLVMContext(), APInt::getLowBitsSet(ResultWidth, SrcScale)); 1484 Value *Rounded = Builder.CreateAdd(Result, LowBits); 1485 Result = Builder.CreateSelect(IsNegative, Rounded, Result); 1486 } 1487 1488 Result = SrcIsSigned 1489 ? Builder.CreateAShr(Result, SrcScale - DstScale, "downscale") 1490 : Builder.CreateLShr(Result, SrcScale - DstScale, "downscale"); 1491 } 1492 1493 if (!DstFPSema.isSaturated()) { 1494 // Resize. 1495 Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize"); 1496 1497 // Upscale. 1498 if (DstScale > SrcScale) 1499 Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale"); 1500 } else { 1501 // Adjust the number of fractional bits. 1502 if (DstScale > SrcScale) { 1503 // Compare to DstWidth to prevent resizing twice. 1504 ResultWidth = std::max(SrcWidth + DstScale - SrcScale, DstWidth); 1505 llvm::Type *UpscaledTy = Builder.getIntNTy(ResultWidth); 1506 Result = Builder.CreateIntCast(Result, UpscaledTy, SrcIsSigned, "resize"); 1507 Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale"); 1508 } 1509 1510 // Handle saturation. 1511 bool LessIntBits = DstFPSema.getIntegralBits() < SrcFPSema.getIntegralBits(); 1512 if (LessIntBits) { 1513 Value *Max = ConstantInt::get( 1514 CGF.getLLVMContext(), 1515 APFixedPoint::getMax(DstFPSema).getValue().extOrTrunc(ResultWidth)); 1516 Value *TooHigh = SrcIsSigned ? Builder.CreateICmpSGT(Result, Max) 1517 : Builder.CreateICmpUGT(Result, Max); 1518 Result = Builder.CreateSelect(TooHigh, Max, Result, "satmax"); 1519 } 1520 // Cannot overflow min to dest type if src is unsigned since all fixed 1521 // point types can cover the unsigned min of 0. 1522 if (SrcIsSigned && (LessIntBits || !DstIsSigned)) { 1523 Value *Min = ConstantInt::get( 1524 CGF.getLLVMContext(), 1525 APFixedPoint::getMin(DstFPSema).getValue().extOrTrunc(ResultWidth)); 1526 Value *TooLow = Builder.CreateICmpSLT(Result, Min); 1527 Result = Builder.CreateSelect(TooLow, Min, Result, "satmin"); 1528 } 1529 1530 // Resize the integer part to get the final destination size. 1531 if (ResultWidth != DstWidth) 1532 Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize"); 1533 } 1534 return Result; 1535 } 1536 1537 /// Emit a conversion from the specified complex type to the specified 1538 /// destination type, where the destination type is an LLVM scalar type. 1539 Value *ScalarExprEmitter::EmitComplexToScalarConversion( 1540 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy, 1541 SourceLocation Loc) { 1542 // Get the source element type. 1543 SrcTy = SrcTy->castAs<ComplexType>()->getElementType(); 1544 1545 // Handle conversions to bool first, they are special: comparisons against 0. 1546 if (DstTy->isBooleanType()) { 1547 // Complex != 0 -> (Real != 0) | (Imag != 0) 1548 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc); 1549 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc); 1550 return Builder.CreateOr(Src.first, Src.second, "tobool"); 1551 } 1552 1553 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type, 1554 // the imaginary part of the complex value is discarded and the value of the 1555 // real part is converted according to the conversion rules for the 1556 // corresponding real type. 1557 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc); 1558 } 1559 1560 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) { 1561 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty); 1562 } 1563 1564 /// Emit a sanitization check for the given "binary" operation (which 1565 /// might actually be a unary increment which has been lowered to a binary 1566 /// operation). The check passes if all values in \p Checks (which are \c i1), 1567 /// are \c true. 1568 void ScalarExprEmitter::EmitBinOpCheck( 1569 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) { 1570 assert(CGF.IsSanitizerScope); 1571 SanitizerHandler Check; 1572 SmallVector<llvm::Constant *, 4> StaticData; 1573 SmallVector<llvm::Value *, 2> DynamicData; 1574 1575 BinaryOperatorKind Opcode = Info.Opcode; 1576 if (BinaryOperator::isCompoundAssignmentOp(Opcode)) 1577 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode); 1578 1579 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc())); 1580 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E); 1581 if (UO && UO->getOpcode() == UO_Minus) { 1582 Check = SanitizerHandler::NegateOverflow; 1583 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType())); 1584 DynamicData.push_back(Info.RHS); 1585 } else { 1586 if (BinaryOperator::isShiftOp(Opcode)) { 1587 // Shift LHS negative or too large, or RHS out of bounds. 1588 Check = SanitizerHandler::ShiftOutOfBounds; 1589 const BinaryOperator *BO = cast<BinaryOperator>(Info.E); 1590 StaticData.push_back( 1591 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType())); 1592 StaticData.push_back( 1593 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType())); 1594 } else if (Opcode == BO_Div || Opcode == BO_Rem) { 1595 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1). 1596 Check = SanitizerHandler::DivremOverflow; 1597 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); 1598 } else { 1599 // Arithmetic overflow (+, -, *). 1600 switch (Opcode) { 1601 case BO_Add: Check = SanitizerHandler::AddOverflow; break; 1602 case BO_Sub: Check = SanitizerHandler::SubOverflow; break; 1603 case BO_Mul: Check = SanitizerHandler::MulOverflow; break; 1604 default: llvm_unreachable("unexpected opcode for bin op check"); 1605 } 1606 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); 1607 } 1608 DynamicData.push_back(Info.LHS); 1609 DynamicData.push_back(Info.RHS); 1610 } 1611 1612 CGF.EmitCheck(Checks, Check, StaticData, DynamicData); 1613 } 1614 1615 //===----------------------------------------------------------------------===// 1616 // Visitor Methods 1617 //===----------------------------------------------------------------------===// 1618 1619 Value *ScalarExprEmitter::VisitExpr(Expr *E) { 1620 CGF.ErrorUnsupported(E, "scalar expression"); 1621 if (E->getType()->isVoidType()) 1622 return nullptr; 1623 return llvm::UndefValue::get(CGF.ConvertType(E->getType())); 1624 } 1625 1626 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { 1627 // Vector Mask Case 1628 if (E->getNumSubExprs() == 2) { 1629 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0)); 1630 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1)); 1631 Value *Mask; 1632 1633 llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType()); 1634 unsigned LHSElts = LTy->getNumElements(); 1635 1636 Mask = RHS; 1637 1638 llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType()); 1639 1640 // Mask off the high bits of each shuffle index. 1641 Value *MaskBits = 1642 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1); 1643 Mask = Builder.CreateAnd(Mask, MaskBits, "mask"); 1644 1645 // newv = undef 1646 // mask = mask & maskbits 1647 // for each elt 1648 // n = extract mask i 1649 // x = extract val n 1650 // newv = insert newv, x, i 1651 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(), 1652 MTy->getNumElements()); 1653 Value* NewV = llvm::UndefValue::get(RTy); 1654 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) { 1655 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i); 1656 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx"); 1657 1658 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt"); 1659 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins"); 1660 } 1661 return NewV; 1662 } 1663 1664 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0)); 1665 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1)); 1666 1667 SmallVector<int, 32> Indices; 1668 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) { 1669 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2); 1670 // Check for -1 and output it as undef in the IR. 1671 if (Idx.isSigned() && Idx.isAllOnesValue()) 1672 Indices.push_back(-1); 1673 else 1674 Indices.push_back(Idx.getZExtValue()); 1675 } 1676 1677 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle"); 1678 } 1679 1680 Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) { 1681 QualType SrcType = E->getSrcExpr()->getType(), 1682 DstType = E->getType(); 1683 1684 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); 1685 1686 SrcType = CGF.getContext().getCanonicalType(SrcType); 1687 DstType = CGF.getContext().getCanonicalType(DstType); 1688 if (SrcType == DstType) return Src; 1689 1690 assert(SrcType->isVectorType() && 1691 "ConvertVector source type must be a vector"); 1692 assert(DstType->isVectorType() && 1693 "ConvertVector destination type must be a vector"); 1694 1695 llvm::Type *SrcTy = Src->getType(); 1696 llvm::Type *DstTy = ConvertType(DstType); 1697 1698 // Ignore conversions like int -> uint. 1699 if (SrcTy == DstTy) 1700 return Src; 1701 1702 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(), 1703 DstEltType = DstType->castAs<VectorType>()->getElementType(); 1704 1705 assert(SrcTy->isVectorTy() && 1706 "ConvertVector source IR type must be a vector"); 1707 assert(DstTy->isVectorTy() && 1708 "ConvertVector destination IR type must be a vector"); 1709 1710 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(), 1711 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType(); 1712 1713 if (DstEltType->isBooleanType()) { 1714 assert((SrcEltTy->isFloatingPointTy() || 1715 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion"); 1716 1717 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy); 1718 if (SrcEltTy->isFloatingPointTy()) { 1719 return Builder.CreateFCmpUNE(Src, Zero, "tobool"); 1720 } else { 1721 return Builder.CreateICmpNE(Src, Zero, "tobool"); 1722 } 1723 } 1724 1725 // We have the arithmetic types: real int/float. 1726 Value *Res = nullptr; 1727 1728 if (isa<llvm::IntegerType>(SrcEltTy)) { 1729 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType(); 1730 if (isa<llvm::IntegerType>(DstEltTy)) 1731 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); 1732 else if (InputSigned) 1733 Res = Builder.CreateSIToFP(Src, DstTy, "conv"); 1734 else 1735 Res = Builder.CreateUIToFP(Src, DstTy, "conv"); 1736 } else if (isa<llvm::IntegerType>(DstEltTy)) { 1737 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion"); 1738 if (DstEltType->isSignedIntegerOrEnumerationType()) 1739 Res = Builder.CreateFPToSI(Src, DstTy, "conv"); 1740 else 1741 Res = Builder.CreateFPToUI(Src, DstTy, "conv"); 1742 } else { 1743 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && 1744 "Unknown real conversion"); 1745 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID()) 1746 Res = Builder.CreateFPTrunc(Src, DstTy, "conv"); 1747 else 1748 Res = Builder.CreateFPExt(Src, DstTy, "conv"); 1749 } 1750 1751 return Res; 1752 } 1753 1754 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { 1755 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) { 1756 CGF.EmitIgnoredExpr(E->getBase()); 1757 return CGF.emitScalarConstant(Constant, E); 1758 } else { 1759 Expr::EvalResult Result; 1760 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) { 1761 llvm::APSInt Value = Result.Val.getInt(); 1762 CGF.EmitIgnoredExpr(E->getBase()); 1763 return Builder.getInt(Value); 1764 } 1765 } 1766 1767 return EmitLoadOfLValue(E); 1768 } 1769 1770 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { 1771 TestAndClearIgnoreResultAssign(); 1772 1773 // Emit subscript expressions in rvalue context's. For most cases, this just 1774 // loads the lvalue formed by the subscript expr. However, we have to be 1775 // careful, because the base of a vector subscript is occasionally an rvalue, 1776 // so we can't get it as an lvalue. 1777 if (!E->getBase()->getType()->isVectorType()) 1778 return EmitLoadOfLValue(E); 1779 1780 // Handle the vector case. The base must be a vector, the index must be an 1781 // integer value. 1782 Value *Base = Visit(E->getBase()); 1783 Value *Idx = Visit(E->getIdx()); 1784 QualType IdxTy = E->getIdx()->getType(); 1785 1786 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) 1787 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true); 1788 1789 return Builder.CreateExtractElement(Base, Idx, "vecext"); 1790 } 1791 1792 Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) { 1793 TestAndClearIgnoreResultAssign(); 1794 1795 // Handle the vector case. The base must be a vector, the index must be an 1796 // integer value. 1797 Value *RowIdx = Visit(E->getRowIdx()); 1798 Value *ColumnIdx = Visit(E->getColumnIdx()); 1799 Value *Matrix = Visit(E->getBase()); 1800 1801 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds? 1802 llvm::MatrixBuilder<CGBuilderTy> MB(Builder); 1803 return MB.CreateExtractElement( 1804 Matrix, RowIdx, ColumnIdx, 1805 E->getBase()->getType()->getAs<ConstantMatrixType>()->getNumRows()); 1806 } 1807 1808 static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, 1809 unsigned Off) { 1810 int MV = SVI->getMaskValue(Idx); 1811 if (MV == -1) 1812 return -1; 1813 return Off + MV; 1814 } 1815 1816 static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) { 1817 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && 1818 "Index operand too large for shufflevector mask!"); 1819 return C->getZExtValue(); 1820 } 1821 1822 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { 1823 bool Ignore = TestAndClearIgnoreResultAssign(); 1824 (void)Ignore; 1825 assert (Ignore == false && "init list ignored"); 1826 unsigned NumInitElements = E->getNumInits(); 1827 1828 if (E->hadArrayRangeDesignator()) 1829 CGF.ErrorUnsupported(E, "GNU array range designator extension"); 1830 1831 llvm::VectorType *VType = 1832 dyn_cast<llvm::VectorType>(ConvertType(E->getType())); 1833 1834 if (!VType) { 1835 if (NumInitElements == 0) { 1836 // C++11 value-initialization for the scalar. 1837 return EmitNullValue(E->getType()); 1838 } 1839 // We have a scalar in braces. Just use the first element. 1840 return Visit(E->getInit(0)); 1841 } 1842 1843 unsigned ResElts = VType->getNumElements(); 1844 1845 // Loop over initializers collecting the Value for each, and remembering 1846 // whether the source was swizzle (ExtVectorElementExpr). This will allow 1847 // us to fold the shuffle for the swizzle into the shuffle for the vector 1848 // initializer, since LLVM optimizers generally do not want to touch 1849 // shuffles. 1850 unsigned CurIdx = 0; 1851 bool VIsUndefShuffle = false; 1852 llvm::Value *V = llvm::UndefValue::get(VType); 1853 for (unsigned i = 0; i != NumInitElements; ++i) { 1854 Expr *IE = E->getInit(i); 1855 Value *Init = Visit(IE); 1856 SmallVector<int, 16> Args; 1857 1858 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType()); 1859 1860 // Handle scalar elements. If the scalar initializer is actually one 1861 // element of a different vector of the same width, use shuffle instead of 1862 // extract+insert. 1863 if (!VVT) { 1864 if (isa<ExtVectorElementExpr>(IE)) { 1865 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init); 1866 1867 if (EI->getVectorOperandType()->getNumElements() == ResElts) { 1868 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand()); 1869 Value *LHS = nullptr, *RHS = nullptr; 1870 if (CurIdx == 0) { 1871 // insert into undef -> shuffle (src, undef) 1872 // shufflemask must use an i32 1873 Args.push_back(getAsInt32(C, CGF.Int32Ty)); 1874 Args.resize(ResElts, -1); 1875 1876 LHS = EI->getVectorOperand(); 1877 RHS = V; 1878 VIsUndefShuffle = true; 1879 } else if (VIsUndefShuffle) { 1880 // insert into undefshuffle && size match -> shuffle (v, src) 1881 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V); 1882 for (unsigned j = 0; j != CurIdx; ++j) 1883 Args.push_back(getMaskElt(SVV, j, 0)); 1884 Args.push_back(ResElts + C->getZExtValue()); 1885 Args.resize(ResElts, -1); 1886 1887 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 1888 RHS = EI->getVectorOperand(); 1889 VIsUndefShuffle = false; 1890 } 1891 if (!Args.empty()) { 1892 V = Builder.CreateShuffleVector(LHS, RHS, Args); 1893 ++CurIdx; 1894 continue; 1895 } 1896 } 1897 } 1898 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx), 1899 "vecinit"); 1900 VIsUndefShuffle = false; 1901 ++CurIdx; 1902 continue; 1903 } 1904 1905 unsigned InitElts = VVT->getNumElements(); 1906 1907 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's 1908 // input is the same width as the vector being constructed, generate an 1909 // optimized shuffle of the swizzle input into the result. 1910 unsigned Offset = (CurIdx == 0) ? 0 : ResElts; 1911 if (isa<ExtVectorElementExpr>(IE)) { 1912 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init); 1913 Value *SVOp = SVI->getOperand(0); 1914 llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType()); 1915 1916 if (OpTy->getNumElements() == ResElts) { 1917 for (unsigned j = 0; j != CurIdx; ++j) { 1918 // If the current vector initializer is a shuffle with undef, merge 1919 // this shuffle directly into it. 1920 if (VIsUndefShuffle) { 1921 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0)); 1922 } else { 1923 Args.push_back(j); 1924 } 1925 } 1926 for (unsigned j = 0, je = InitElts; j != je; ++j) 1927 Args.push_back(getMaskElt(SVI, j, Offset)); 1928 Args.resize(ResElts, -1); 1929 1930 if (VIsUndefShuffle) 1931 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 1932 1933 Init = SVOp; 1934 } 1935 } 1936 1937 // Extend init to result vector length, and then shuffle its contribution 1938 // to the vector initializer into V. 1939 if (Args.empty()) { 1940 for (unsigned j = 0; j != InitElts; ++j) 1941 Args.push_back(j); 1942 Args.resize(ResElts, -1); 1943 Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT), Args, 1944 "vext"); 1945 1946 Args.clear(); 1947 for (unsigned j = 0; j != CurIdx; ++j) 1948 Args.push_back(j); 1949 for (unsigned j = 0; j != InitElts; ++j) 1950 Args.push_back(j + Offset); 1951 Args.resize(ResElts, -1); 1952 } 1953 1954 // If V is undef, make sure it ends up on the RHS of the shuffle to aid 1955 // merging subsequent shuffles into this one. 1956 if (CurIdx == 0) 1957 std::swap(V, Init); 1958 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit"); 1959 VIsUndefShuffle = isa<llvm::UndefValue>(Init); 1960 CurIdx += InitElts; 1961 } 1962 1963 // FIXME: evaluate codegen vs. shuffling against constant null vector. 1964 // Emit remaining default initializers. 1965 llvm::Type *EltTy = VType->getElementType(); 1966 1967 // Emit remaining default initializers 1968 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) { 1969 Value *Idx = Builder.getInt32(CurIdx); 1970 llvm::Value *Init = llvm::Constant::getNullValue(EltTy); 1971 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit"); 1972 } 1973 return V; 1974 } 1975 1976 bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) { 1977 const Expr *E = CE->getSubExpr(); 1978 1979 if (CE->getCastKind() == CK_UncheckedDerivedToBase) 1980 return false; 1981 1982 if (isa<CXXThisExpr>(E->IgnoreParens())) { 1983 // We always assume that 'this' is never null. 1984 return false; 1985 } 1986 1987 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) { 1988 // And that glvalue casts are never null. 1989 if (ICE->getValueKind() != VK_RValue) 1990 return false; 1991 } 1992 1993 return true; 1994 } 1995 1996 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts 1997 // have to handle a more broad range of conversions than explicit casts, as they 1998 // handle things like function to ptr-to-function decay etc. 1999 Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { 2000 Expr *E = CE->getSubExpr(); 2001 QualType DestTy = CE->getType(); 2002 CastKind Kind = CE->getCastKind(); 2003 2004 // These cases are generally not written to ignore the result of 2005 // evaluating their sub-expressions, so we clear this now. 2006 bool Ignored = TestAndClearIgnoreResultAssign(); 2007 2008 // Since almost all cast kinds apply to scalars, this switch doesn't have 2009 // a default case, so the compiler will warn on a missing case. The cases 2010 // are in the same order as in the CastKind enum. 2011 switch (Kind) { 2012 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!"); 2013 case CK_BuiltinFnToFnPtr: 2014 llvm_unreachable("builtin functions are handled elsewhere"); 2015 2016 case CK_LValueBitCast: 2017 case CK_ObjCObjectLValueCast: { 2018 Address Addr = EmitLValue(E).getAddress(CGF); 2019 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy)); 2020 LValue LV = CGF.MakeAddrLValue(Addr, DestTy); 2021 return EmitLoadOfLValue(LV, CE->getExprLoc()); 2022 } 2023 2024 case CK_LValueToRValueBitCast: { 2025 LValue SourceLVal = CGF.EmitLValue(E); 2026 Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF), 2027 CGF.ConvertTypeForMem(DestTy)); 2028 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy); 2029 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); 2030 return EmitLoadOfLValue(DestLV, CE->getExprLoc()); 2031 } 2032 2033 case CK_CPointerToObjCPointerCast: 2034 case CK_BlockPointerToObjCPointerCast: 2035 case CK_AnyPointerToBlockPointerCast: 2036 case CK_BitCast: { 2037 Value *Src = Visit(const_cast<Expr*>(E)); 2038 llvm::Type *SrcTy = Src->getType(); 2039 llvm::Type *DstTy = ConvertType(DestTy); 2040 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() && 2041 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) { 2042 llvm_unreachable("wrong cast for pointers in different address spaces" 2043 "(must be an address space cast)!"); 2044 } 2045 2046 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { 2047 if (auto PT = DestTy->getAs<PointerType>()) 2048 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src, 2049 /*MayBeNull=*/true, 2050 CodeGenFunction::CFITCK_UnrelatedCast, 2051 CE->getBeginLoc()); 2052 } 2053 2054 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2055 const QualType SrcType = E->getType(); 2056 2057 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) { 2058 // Casting to pointer that could carry dynamic information (provided by 2059 // invariant.group) requires launder. 2060 Src = Builder.CreateLaunderInvariantGroup(Src); 2061 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) { 2062 // Casting to pointer that does not carry dynamic information (provided 2063 // by invariant.group) requires stripping it. Note that we don't do it 2064 // if the source could not be dynamic type and destination could be 2065 // dynamic because dynamic information is already laundered. It is 2066 // because launder(strip(src)) == launder(src), so there is no need to 2067 // add extra strip before launder. 2068 Src = Builder.CreateStripInvariantGroup(Src); 2069 } 2070 } 2071 2072 // Update heapallocsite metadata when there is an explicit pointer cast. 2073 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) { 2074 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) { 2075 QualType PointeeType = DestTy->getPointeeType(); 2076 if (!PointeeType.isNull()) 2077 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType, 2078 CE->getExprLoc()); 2079 } 2080 } 2081 2082 return Builder.CreateBitCast(Src, DstTy); 2083 } 2084 case CK_AddressSpaceConversion: { 2085 Expr::EvalResult Result; 2086 if (E->EvaluateAsRValue(Result, CGF.getContext()) && 2087 Result.Val.isNullPointer()) { 2088 // If E has side effect, it is emitted even if its final result is a 2089 // null pointer. In that case, a DCE pass should be able to 2090 // eliminate the useless instructions emitted during translating E. 2091 if (Result.HasSideEffects) 2092 Visit(E); 2093 return CGF.CGM.getNullPointer(cast<llvm::PointerType>( 2094 ConvertType(DestTy)), DestTy); 2095 } 2096 // Since target may map different address spaces in AST to the same address 2097 // space, an address space conversion may end up as a bitcast. 2098 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast( 2099 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(), 2100 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy)); 2101 } 2102 case CK_AtomicToNonAtomic: 2103 case CK_NonAtomicToAtomic: 2104 case CK_NoOp: 2105 case CK_UserDefinedConversion: 2106 return Visit(const_cast<Expr*>(E)); 2107 2108 case CK_BaseToDerived: { 2109 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl(); 2110 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!"); 2111 2112 Address Base = CGF.EmitPointerWithAlignment(E); 2113 Address Derived = 2114 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl, 2115 CE->path_begin(), CE->path_end(), 2116 CGF.ShouldNullCheckClassCastValue(CE)); 2117 2118 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is 2119 // performed and the object is not of the derived type. 2120 if (CGF.sanitizePerformTypeCheck()) 2121 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(), 2122 Derived.getPointer(), DestTy->getPointeeType()); 2123 2124 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast)) 2125 CGF.EmitVTablePtrCheckForCast( 2126 DestTy->getPointeeType(), Derived.getPointer(), 2127 /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast, 2128 CE->getBeginLoc()); 2129 2130 return Derived.getPointer(); 2131 } 2132 case CK_UncheckedDerivedToBase: 2133 case CK_DerivedToBase: { 2134 // The EmitPointerWithAlignment path does this fine; just discard 2135 // the alignment. 2136 return CGF.EmitPointerWithAlignment(CE).getPointer(); 2137 } 2138 2139 case CK_Dynamic: { 2140 Address V = CGF.EmitPointerWithAlignment(E); 2141 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE); 2142 return CGF.EmitDynamicCast(V, DCE); 2143 } 2144 2145 case CK_ArrayToPointerDecay: 2146 return CGF.EmitArrayToPointerDecay(E).getPointer(); 2147 case CK_FunctionToPointerDecay: 2148 return EmitLValue(E).getPointer(CGF); 2149 2150 case CK_NullToPointer: 2151 if (MustVisitNullValue(E)) 2152 CGF.EmitIgnoredExpr(E); 2153 2154 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)), 2155 DestTy); 2156 2157 case CK_NullToMemberPointer: { 2158 if (MustVisitNullValue(E)) 2159 CGF.EmitIgnoredExpr(E); 2160 2161 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>(); 2162 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT); 2163 } 2164 2165 case CK_ReinterpretMemberPointer: 2166 case CK_BaseToDerivedMemberPointer: 2167 case CK_DerivedToBaseMemberPointer: { 2168 Value *Src = Visit(E); 2169 2170 // Note that the AST doesn't distinguish between checked and 2171 // unchecked member pointer conversions, so we always have to 2172 // implement checked conversions here. This is inefficient when 2173 // actual control flow may be required in order to perform the 2174 // check, which it is for data member pointers (but not member 2175 // function pointers on Itanium and ARM). 2176 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src); 2177 } 2178 2179 case CK_ARCProduceObject: 2180 return CGF.EmitARCRetainScalarExpr(E); 2181 case CK_ARCConsumeObject: 2182 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E)); 2183 case CK_ARCReclaimReturnedObject: 2184 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored); 2185 case CK_ARCExtendBlockObject: 2186 return CGF.EmitARCExtendBlockObject(E); 2187 2188 case CK_CopyAndAutoreleaseBlockObject: 2189 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType()); 2190 2191 case CK_FloatingRealToComplex: 2192 case CK_FloatingComplexCast: 2193 case CK_IntegralRealToComplex: 2194 case CK_IntegralComplexCast: 2195 case CK_IntegralComplexToFloatingComplex: 2196 case CK_FloatingComplexToIntegralComplex: 2197 case CK_ConstructorConversion: 2198 case CK_ToUnion: 2199 llvm_unreachable("scalar cast to non-scalar value"); 2200 2201 case CK_LValueToRValue: 2202 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)); 2203 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); 2204 return Visit(const_cast<Expr*>(E)); 2205 2206 case CK_IntegralToPointer: { 2207 Value *Src = Visit(const_cast<Expr*>(E)); 2208 2209 // First, convert to the correct width so that we control the kind of 2210 // extension. 2211 auto DestLLVMTy = ConvertType(DestTy); 2212 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy); 2213 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType(); 2214 llvm::Value* IntResult = 2215 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 2216 2217 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy); 2218 2219 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2220 // Going from integer to pointer that could be dynamic requires reloading 2221 // dynamic information from invariant.group. 2222 if (DestTy.mayBeDynamicClass()) 2223 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr); 2224 } 2225 return IntToPtr; 2226 } 2227 case CK_PointerToIntegral: { 2228 assert(!DestTy->isBooleanType() && "bool should use PointerToBool"); 2229 auto *PtrExpr = Visit(E); 2230 2231 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2232 const QualType SrcType = E->getType(); 2233 2234 // Casting to integer requires stripping dynamic information as it does 2235 // not carries it. 2236 if (SrcType.mayBeDynamicClass()) 2237 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr); 2238 } 2239 2240 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy)); 2241 } 2242 case CK_ToVoid: { 2243 CGF.EmitIgnoredExpr(E); 2244 return nullptr; 2245 } 2246 case CK_VectorSplat: { 2247 llvm::Type *DstTy = ConvertType(DestTy); 2248 Value *Elt = Visit(const_cast<Expr*>(E)); 2249 // Splat the element across to all elements 2250 unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); 2251 return Builder.CreateVectorSplat(NumElements, Elt, "splat"); 2252 } 2253 2254 case CK_FixedPointCast: 2255 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2256 CE->getExprLoc()); 2257 2258 case CK_FixedPointToBoolean: 2259 assert(E->getType()->isFixedPointType() && 2260 "Expected src type to be fixed point type"); 2261 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type"); 2262 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2263 CE->getExprLoc()); 2264 2265 case CK_FixedPointToIntegral: 2266 assert(E->getType()->isFixedPointType() && 2267 "Expected src type to be fixed point type"); 2268 assert(DestTy->isIntegerType() && "Expected dest type to be an integer"); 2269 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2270 CE->getExprLoc()); 2271 2272 case CK_IntegralToFixedPoint: 2273 assert(E->getType()->isIntegerType() && 2274 "Expected src type to be an integer"); 2275 assert(DestTy->isFixedPointType() && 2276 "Expected dest type to be fixed point type"); 2277 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2278 CE->getExprLoc()); 2279 2280 case CK_IntegralCast: { 2281 ScalarConversionOpts Opts; 2282 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) { 2283 if (!ICE->isPartOfExplicitCast()) 2284 Opts = ScalarConversionOpts(CGF.SanOpts); 2285 } 2286 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2287 CE->getExprLoc(), Opts); 2288 } 2289 case CK_IntegralToFloating: 2290 case CK_FloatingToIntegral: 2291 case CK_FloatingCast: 2292 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2293 CE->getExprLoc()); 2294 case CK_BooleanToSignedIntegral: { 2295 ScalarConversionOpts Opts; 2296 Opts.TreatBooleanAsSigned = true; 2297 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2298 CE->getExprLoc(), Opts); 2299 } 2300 case CK_IntegralToBoolean: 2301 return EmitIntToBoolConversion(Visit(E)); 2302 case CK_PointerToBoolean: 2303 return EmitPointerToBoolConversion(Visit(E), E->getType()); 2304 case CK_FloatingToBoolean: 2305 return EmitFloatToBoolConversion(Visit(E)); 2306 case CK_MemberPointerToBoolean: { 2307 llvm::Value *MemPtr = Visit(E); 2308 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>(); 2309 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT); 2310 } 2311 2312 case CK_FloatingComplexToReal: 2313 case CK_IntegralComplexToReal: 2314 return CGF.EmitComplexExpr(E, false, true).first; 2315 2316 case CK_FloatingComplexToBoolean: 2317 case CK_IntegralComplexToBoolean: { 2318 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E); 2319 2320 // TODO: kill this function off, inline appropriate case here 2321 return EmitComplexToScalarConversion(V, E->getType(), DestTy, 2322 CE->getExprLoc()); 2323 } 2324 2325 case CK_ZeroToOCLOpaqueType: { 2326 assert((DestTy->isEventT() || DestTy->isQueueT() || 2327 DestTy->isOCLIntelSubgroupAVCType()) && 2328 "CK_ZeroToOCLEvent cast on non-event type"); 2329 return llvm::Constant::getNullValue(ConvertType(DestTy)); 2330 } 2331 2332 case CK_IntToOCLSampler: 2333 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF); 2334 2335 } // end of switch 2336 2337 llvm_unreachable("unknown scalar cast"); 2338 } 2339 2340 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) { 2341 CodeGenFunction::StmtExprEvaluation eval(CGF); 2342 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(), 2343 !E->getType()->isVoidType()); 2344 if (!RetAlloca.isValid()) 2345 return nullptr; 2346 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()), 2347 E->getExprLoc()); 2348 } 2349 2350 Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { 2351 CodeGenFunction::RunCleanupsScope Scope(CGF); 2352 Value *V = Visit(E->getSubExpr()); 2353 // Defend against dominance problems caused by jumps out of expression 2354 // evaluation through the shared cleanup block. 2355 Scope.ForceCleanup({&V}); 2356 return V; 2357 } 2358 2359 //===----------------------------------------------------------------------===// 2360 // Unary Operators 2361 //===----------------------------------------------------------------------===// 2362 2363 static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, 2364 llvm::Value *InVal, bool IsInc, 2365 FPOptions FPFeatures) { 2366 BinOpInfo BinOp; 2367 BinOp.LHS = InVal; 2368 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false); 2369 BinOp.Ty = E->getType(); 2370 BinOp.Opcode = IsInc ? BO_Add : BO_Sub; 2371 BinOp.FPFeatures = FPFeatures; 2372 BinOp.E = E; 2373 return BinOp; 2374 } 2375 2376 llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior( 2377 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) { 2378 llvm::Value *Amount = 2379 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true); 2380 StringRef Name = IsInc ? "inc" : "dec"; 2381 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 2382 case LangOptions::SOB_Defined: 2383 return Builder.CreateAdd(InVal, Amount, Name); 2384 case LangOptions::SOB_Undefined: 2385 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 2386 return Builder.CreateNSWAdd(InVal, Amount, Name); 2387 LLVM_FALLTHROUGH; 2388 case LangOptions::SOB_Trapping: 2389 if (!E->canOverflow()) 2390 return Builder.CreateNSWAdd(InVal, Amount, Name); 2391 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec( 2392 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts()))); 2393 } 2394 llvm_unreachable("Unknown SignedOverflowBehaviorTy"); 2395 } 2396 2397 namespace { 2398 /// Handles check and update for lastprivate conditional variables. 2399 class OMPLastprivateConditionalUpdateRAII { 2400 private: 2401 CodeGenFunction &CGF; 2402 const UnaryOperator *E; 2403 2404 public: 2405 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF, 2406 const UnaryOperator *E) 2407 : CGF(CGF), E(E) {} 2408 ~OMPLastprivateConditionalUpdateRAII() { 2409 if (CGF.getLangOpts().OpenMP) 2410 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional( 2411 CGF, E->getSubExpr()); 2412 } 2413 }; 2414 } // namespace 2415 2416 llvm::Value * 2417 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 2418 bool isInc, bool isPre) { 2419 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E); 2420 QualType type = E->getSubExpr()->getType(); 2421 llvm::PHINode *atomicPHI = nullptr; 2422 llvm::Value *value; 2423 llvm::Value *input; 2424 2425 int amount = (isInc ? 1 : -1); 2426 bool isSubtraction = !isInc; 2427 2428 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) { 2429 type = atomicTy->getValueType(); 2430 if (isInc && type->isBooleanType()) { 2431 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type); 2432 if (isPre) { 2433 Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified()) 2434 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent); 2435 return Builder.getTrue(); 2436 } 2437 // For atomic bool increment, we just store true and return it for 2438 // preincrement, do an atomic swap with true for postincrement 2439 return Builder.CreateAtomicRMW( 2440 llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True, 2441 llvm::AtomicOrdering::SequentiallyConsistent); 2442 } 2443 // Special case for atomic increment / decrement on integers, emit 2444 // atomicrmw instructions. We skip this if we want to be doing overflow 2445 // checking, and fall into the slow path with the atomic cmpxchg loop. 2446 if (!type->isBooleanType() && type->isIntegerType() && 2447 !(type->isUnsignedIntegerType() && 2448 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) && 2449 CGF.getLangOpts().getSignedOverflowBehavior() != 2450 LangOptions::SOB_Trapping) { 2451 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add : 2452 llvm::AtomicRMWInst::Sub; 2453 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add : 2454 llvm::Instruction::Sub; 2455 llvm::Value *amt = CGF.EmitToMemory( 2456 llvm::ConstantInt::get(ConvertType(type), 1, true), type); 2457 llvm::Value *old = 2458 Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt, 2459 llvm::AtomicOrdering::SequentiallyConsistent); 2460 return isPre ? Builder.CreateBinOp(op, old, amt) : old; 2461 } 2462 value = EmitLoadOfLValue(LV, E->getExprLoc()); 2463 input = value; 2464 // For every other atomic operation, we need to emit a load-op-cmpxchg loop 2465 llvm::BasicBlock *startBB = Builder.GetInsertBlock(); 2466 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); 2467 value = CGF.EmitToMemory(value, type); 2468 Builder.CreateBr(opBB); 2469 Builder.SetInsertPoint(opBB); 2470 atomicPHI = Builder.CreatePHI(value->getType(), 2); 2471 atomicPHI->addIncoming(value, startBB); 2472 value = atomicPHI; 2473 } else { 2474 value = EmitLoadOfLValue(LV, E->getExprLoc()); 2475 input = value; 2476 } 2477 2478 // Special case of integer increment that we have to check first: bool++. 2479 // Due to promotion rules, we get: 2480 // bool++ -> bool = bool + 1 2481 // -> bool = (int)bool + 1 2482 // -> bool = ((int)bool + 1 != 0) 2483 // An interesting aspect of this is that increment is always true. 2484 // Decrement does not have this property. 2485 if (isInc && type->isBooleanType()) { 2486 value = Builder.getTrue(); 2487 2488 // Most common case by far: integer increment. 2489 } else if (type->isIntegerType()) { 2490 QualType promotedType; 2491 bool canPerformLossyDemotionCheck = false; 2492 if (type->isPromotableIntegerType()) { 2493 promotedType = CGF.getContext().getPromotedIntegerType(type); 2494 assert(promotedType != type && "Shouldn't promote to the same type."); 2495 canPerformLossyDemotionCheck = true; 2496 canPerformLossyDemotionCheck &= 2497 CGF.getContext().getCanonicalType(type) != 2498 CGF.getContext().getCanonicalType(promotedType); 2499 canPerformLossyDemotionCheck &= 2500 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( 2501 type, promotedType); 2502 assert((!canPerformLossyDemotionCheck || 2503 type->isSignedIntegerOrEnumerationType() || 2504 promotedType->isSignedIntegerOrEnumerationType() || 2505 ConvertType(type)->getScalarSizeInBits() == 2506 ConvertType(promotedType)->getScalarSizeInBits()) && 2507 "The following check expects that if we do promotion to different " 2508 "underlying canonical type, at least one of the types (either " 2509 "base or promoted) will be signed, or the bitwidths will match."); 2510 } 2511 if (CGF.SanOpts.hasOneOf( 2512 SanitizerKind::ImplicitIntegerArithmeticValueChange) && 2513 canPerformLossyDemotionCheck) { 2514 // While `x += 1` (for `x` with width less than int) is modeled as 2515 // promotion+arithmetics+demotion, and we can catch lossy demotion with 2516 // ease; inc/dec with width less than int can't overflow because of 2517 // promotion rules, so we omit promotion+demotion, which means that we can 2518 // not catch lossy "demotion". Because we still want to catch these cases 2519 // when the sanitizer is enabled, we perform the promotion, then perform 2520 // the increment/decrement in the wider type, and finally 2521 // perform the demotion. This will catch lossy demotions. 2522 2523 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc()); 2524 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true); 2525 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 2526 // Do pass non-default ScalarConversionOpts so that sanitizer check is 2527 // emitted. 2528 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(), 2529 ScalarConversionOpts(CGF.SanOpts)); 2530 2531 // Note that signed integer inc/dec with width less than int can't 2532 // overflow because of promotion rules; we're just eliding a few steps 2533 // here. 2534 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) { 2535 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc); 2536 } else if (E->canOverflow() && type->isUnsignedIntegerType() && 2537 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) { 2538 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec( 2539 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts()))); 2540 } else { 2541 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true); 2542 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 2543 } 2544 2545 // Next most common: pointer increment. 2546 } else if (const PointerType *ptr = type->getAs<PointerType>()) { 2547 QualType type = ptr->getPointeeType(); 2548 2549 // VLA types don't have constant size. 2550 if (const VariableArrayType *vla 2551 = CGF.getContext().getAsVariableArrayType(type)) { 2552 llvm::Value *numElts = CGF.getVLASize(vla).NumElts; 2553 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize"); 2554 if (CGF.getLangOpts().isSignedOverflowDefined()) 2555 value = Builder.CreateGEP(value, numElts, "vla.inc"); 2556 else 2557 value = CGF.EmitCheckedInBoundsGEP( 2558 value, numElts, /*SignedIndices=*/false, isSubtraction, 2559 E->getExprLoc(), "vla.inc"); 2560 2561 // Arithmetic on function pointers (!) is just +-1. 2562 } else if (type->isFunctionType()) { 2563 llvm::Value *amt = Builder.getInt32(amount); 2564 2565 value = CGF.EmitCastToVoidPtr(value); 2566 if (CGF.getLangOpts().isSignedOverflowDefined()) 2567 value = Builder.CreateGEP(value, amt, "incdec.funcptr"); 2568 else 2569 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false, 2570 isSubtraction, E->getExprLoc(), 2571 "incdec.funcptr"); 2572 value = Builder.CreateBitCast(value, input->getType()); 2573 2574 // For everything else, we can just do a simple increment. 2575 } else { 2576 llvm::Value *amt = Builder.getInt32(amount); 2577 if (CGF.getLangOpts().isSignedOverflowDefined()) 2578 value = Builder.CreateGEP(value, amt, "incdec.ptr"); 2579 else 2580 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false, 2581 isSubtraction, E->getExprLoc(), 2582 "incdec.ptr"); 2583 } 2584 2585 // Vector increment/decrement. 2586 } else if (type->isVectorType()) { 2587 if (type->hasIntegerRepresentation()) { 2588 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount); 2589 2590 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 2591 } else { 2592 value = Builder.CreateFAdd( 2593 value, 2594 llvm::ConstantFP::get(value->getType(), amount), 2595 isInc ? "inc" : "dec"); 2596 } 2597 2598 // Floating point. 2599 } else if (type->isRealFloatingType()) { 2600 // Add the inc/dec to the real part. 2601 llvm::Value *amt; 2602 2603 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 2604 // Another special case: half FP increment should be done via float 2605 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 2606 value = Builder.CreateCall( 2607 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, 2608 CGF.CGM.FloatTy), 2609 input, "incdec.conv"); 2610 } else { 2611 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv"); 2612 } 2613 } 2614 2615 if (value->getType()->isFloatTy()) 2616 amt = llvm::ConstantFP::get(VMContext, 2617 llvm::APFloat(static_cast<float>(amount))); 2618 else if (value->getType()->isDoubleTy()) 2619 amt = llvm::ConstantFP::get(VMContext, 2620 llvm::APFloat(static_cast<double>(amount))); 2621 else { 2622 // Remaining types are Half, LongDouble or __float128. Convert from float. 2623 llvm::APFloat F(static_cast<float>(amount)); 2624 bool ignored; 2625 const llvm::fltSemantics *FS; 2626 // Don't use getFloatTypeSemantics because Half isn't 2627 // necessarily represented using the "half" LLVM type. 2628 if (value->getType()->isFP128Ty()) 2629 FS = &CGF.getTarget().getFloat128Format(); 2630 else if (value->getType()->isHalfTy()) 2631 FS = &CGF.getTarget().getHalfFormat(); 2632 else 2633 FS = &CGF.getTarget().getLongDoubleFormat(); 2634 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored); 2635 amt = llvm::ConstantFP::get(VMContext, F); 2636 } 2637 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec"); 2638 2639 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 2640 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 2641 value = Builder.CreateCall( 2642 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, 2643 CGF.CGM.FloatTy), 2644 value, "incdec.conv"); 2645 } else { 2646 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv"); 2647 } 2648 } 2649 2650 // Fixed-point types. 2651 } else if (type->isFixedPointType()) { 2652 // Fixed-point types are tricky. In some cases, it isn't possible to 2653 // represent a 1 or a -1 in the type at all. Piggyback off of 2654 // EmitFixedPointBinOp to avoid having to reimplement saturation. 2655 BinOpInfo Info; 2656 Info.E = E; 2657 Info.Ty = E->getType(); 2658 Info.Opcode = isInc ? BO_Add : BO_Sub; 2659 Info.LHS = value; 2660 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false); 2661 // If the type is signed, it's better to represent this as +(-1) or -(-1), 2662 // since -1 is guaranteed to be representable. 2663 if (type->isSignedFixedPointType()) { 2664 Info.Opcode = isInc ? BO_Sub : BO_Add; 2665 Info.RHS = Builder.CreateNeg(Info.RHS); 2666 } 2667 // Now, convert from our invented integer literal to the type of the unary 2668 // op. This will upscale and saturate if necessary. This value can become 2669 // undef in some cases. 2670 FixedPointSemantics SrcSema = 2671 FixedPointSemantics::GetIntegerSemantics(value->getType() 2672 ->getScalarSizeInBits(), 2673 /*IsSigned=*/true); 2674 FixedPointSemantics DstSema = 2675 CGF.getContext().getFixedPointSemantics(Info.Ty); 2676 Info.RHS = EmitFixedPointConversion(Info.RHS, SrcSema, DstSema, 2677 E->getExprLoc()); 2678 value = EmitFixedPointBinOp(Info); 2679 2680 // Objective-C pointer types. 2681 } else { 2682 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>(); 2683 value = CGF.EmitCastToVoidPtr(value); 2684 2685 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType()); 2686 if (!isInc) size = -size; 2687 llvm::Value *sizeValue = 2688 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity()); 2689 2690 if (CGF.getLangOpts().isSignedOverflowDefined()) 2691 value = Builder.CreateGEP(value, sizeValue, "incdec.objptr"); 2692 else 2693 value = CGF.EmitCheckedInBoundsGEP(value, sizeValue, 2694 /*SignedIndices=*/false, isSubtraction, 2695 E->getExprLoc(), "incdec.objptr"); 2696 value = Builder.CreateBitCast(value, input->getType()); 2697 } 2698 2699 if (atomicPHI) { 2700 llvm::BasicBlock *curBlock = Builder.GetInsertBlock(); 2701 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); 2702 auto Pair = CGF.EmitAtomicCompareExchange( 2703 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc()); 2704 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type); 2705 llvm::Value *success = Pair.second; 2706 atomicPHI->addIncoming(old, curBlock); 2707 Builder.CreateCondBr(success, contBB, atomicPHI->getParent()); 2708 Builder.SetInsertPoint(contBB); 2709 return isPre ? value : input; 2710 } 2711 2712 // Store the updated result through the lvalue. 2713 if (LV.isBitField()) 2714 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value); 2715 else 2716 CGF.EmitStoreThroughLValue(RValue::get(value), LV); 2717 2718 // If this is a postinc, return the value read from memory, otherwise use the 2719 // updated value. 2720 return isPre ? value : input; 2721 } 2722 2723 2724 2725 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) { 2726 TestAndClearIgnoreResultAssign(); 2727 Value *Op = Visit(E->getSubExpr()); 2728 2729 // Generate a unary FNeg for FP ops. 2730 if (Op->getType()->isFPOrFPVectorTy()) 2731 return Builder.CreateFNeg(Op, "fneg"); 2732 2733 // Emit unary minus with EmitSub so we handle overflow cases etc. 2734 BinOpInfo BinOp; 2735 BinOp.RHS = Op; 2736 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType()); 2737 BinOp.Ty = E->getType(); 2738 BinOp.Opcode = BO_Sub; 2739 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 2740 BinOp.E = E; 2741 return EmitSub(BinOp); 2742 } 2743 2744 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) { 2745 TestAndClearIgnoreResultAssign(); 2746 Value *Op = Visit(E->getSubExpr()); 2747 return Builder.CreateNot(Op, "neg"); 2748 } 2749 2750 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { 2751 // Perform vector logical not on comparison with zero vector. 2752 if (E->getType()->isVectorType() && 2753 E->getType()->castAs<VectorType>()->getVectorKind() == 2754 VectorType::GenericVector) { 2755 Value *Oper = Visit(E->getSubExpr()); 2756 Value *Zero = llvm::Constant::getNullValue(Oper->getType()); 2757 Value *Result; 2758 if (Oper->getType()->isFPOrFPVectorTy()) { 2759 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 2760 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 2761 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp"); 2762 } else 2763 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp"); 2764 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 2765 } 2766 2767 // Compare operand to zero. 2768 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr()); 2769 2770 // Invert value. 2771 // TODO: Could dynamically modify easy computations here. For example, if 2772 // the operand is an icmp ne, turn into icmp eq. 2773 BoolVal = Builder.CreateNot(BoolVal, "lnot"); 2774 2775 // ZExt result to the expr type. 2776 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext"); 2777 } 2778 2779 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) { 2780 // Try folding the offsetof to a constant. 2781 Expr::EvalResult EVResult; 2782 if (E->EvaluateAsInt(EVResult, CGF.getContext())) { 2783 llvm::APSInt Value = EVResult.Val.getInt(); 2784 return Builder.getInt(Value); 2785 } 2786 2787 // Loop over the components of the offsetof to compute the value. 2788 unsigned n = E->getNumComponents(); 2789 llvm::Type* ResultType = ConvertType(E->getType()); 2790 llvm::Value* Result = llvm::Constant::getNullValue(ResultType); 2791 QualType CurrentType = E->getTypeSourceInfo()->getType(); 2792 for (unsigned i = 0; i != n; ++i) { 2793 OffsetOfNode ON = E->getComponent(i); 2794 llvm::Value *Offset = nullptr; 2795 switch (ON.getKind()) { 2796 case OffsetOfNode::Array: { 2797 // Compute the index 2798 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex()); 2799 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr); 2800 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType(); 2801 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv"); 2802 2803 // Save the element type 2804 CurrentType = 2805 CGF.getContext().getAsArrayType(CurrentType)->getElementType(); 2806 2807 // Compute the element size 2808 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType, 2809 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity()); 2810 2811 // Multiply out to compute the result 2812 Offset = Builder.CreateMul(Idx, ElemSize); 2813 break; 2814 } 2815 2816 case OffsetOfNode::Field: { 2817 FieldDecl *MemberDecl = ON.getField(); 2818 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl(); 2819 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); 2820 2821 // Compute the index of the field in its parent. 2822 unsigned i = 0; 2823 // FIXME: It would be nice if we didn't have to loop here! 2824 for (RecordDecl::field_iterator Field = RD->field_begin(), 2825 FieldEnd = RD->field_end(); 2826 Field != FieldEnd; ++Field, ++i) { 2827 if (*Field == MemberDecl) 2828 break; 2829 } 2830 assert(i < RL.getFieldCount() && "offsetof field in wrong type"); 2831 2832 // Compute the offset to the field 2833 int64_t OffsetInt = RL.getFieldOffset(i) / 2834 CGF.getContext().getCharWidth(); 2835 Offset = llvm::ConstantInt::get(ResultType, OffsetInt); 2836 2837 // Save the element type. 2838 CurrentType = MemberDecl->getType(); 2839 break; 2840 } 2841 2842 case OffsetOfNode::Identifier: 2843 llvm_unreachable("dependent __builtin_offsetof"); 2844 2845 case OffsetOfNode::Base: { 2846 if (ON.getBase()->isVirtual()) { 2847 CGF.ErrorUnsupported(E, "virtual base in offsetof"); 2848 continue; 2849 } 2850 2851 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl(); 2852 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); 2853 2854 // Save the element type. 2855 CurrentType = ON.getBase()->getType(); 2856 2857 // Compute the offset to the base. 2858 const RecordType *BaseRT = CurrentType->getAs<RecordType>(); 2859 CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl()); 2860 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD); 2861 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity()); 2862 break; 2863 } 2864 } 2865 Result = Builder.CreateAdd(Result, Offset); 2866 } 2867 return Result; 2868 } 2869 2870 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of 2871 /// argument of the sizeof expression as an integer. 2872 Value * 2873 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( 2874 const UnaryExprOrTypeTraitExpr *E) { 2875 QualType TypeToSize = E->getTypeOfArgument(); 2876 if (E->getKind() == UETT_SizeOf) { 2877 if (const VariableArrayType *VAT = 2878 CGF.getContext().getAsVariableArrayType(TypeToSize)) { 2879 if (E->isArgumentType()) { 2880 // sizeof(type) - make sure to emit the VLA size. 2881 CGF.EmitVariablyModifiedType(TypeToSize); 2882 } else { 2883 // C99 6.5.3.4p2: If the argument is an expression of type 2884 // VLA, it is evaluated. 2885 CGF.EmitIgnoredExpr(E->getArgumentExpr()); 2886 } 2887 2888 auto VlaSize = CGF.getVLASize(VAT); 2889 llvm::Value *size = VlaSize.NumElts; 2890 2891 // Scale the number of non-VLA elements by the non-VLA element size. 2892 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type); 2893 if (!eltSize.isOne()) 2894 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size); 2895 2896 return size; 2897 } 2898 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) { 2899 auto Alignment = 2900 CGF.getContext() 2901 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 2902 E->getTypeOfArgument()->getPointeeType())) 2903 .getQuantity(); 2904 return llvm::ConstantInt::get(CGF.SizeTy, Alignment); 2905 } 2906 2907 // If this isn't sizeof(vla), the result must be constant; use the constant 2908 // folding logic so we don't have to duplicate it here. 2909 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext())); 2910 } 2911 2912 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) { 2913 Expr *Op = E->getSubExpr(); 2914 if (Op->getType()->isAnyComplexType()) { 2915 // If it's an l-value, load through the appropriate subobject l-value. 2916 // Note that we have to ask E because Op might be an l-value that 2917 // this won't work for, e.g. an Obj-C property. 2918 if (E->isGLValue()) 2919 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), 2920 E->getExprLoc()).getScalarVal(); 2921 2922 // Otherwise, calculate and project. 2923 return CGF.EmitComplexExpr(Op, false, true).first; 2924 } 2925 2926 return Visit(Op); 2927 } 2928 2929 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) { 2930 Expr *Op = E->getSubExpr(); 2931 if (Op->getType()->isAnyComplexType()) { 2932 // If it's an l-value, load through the appropriate subobject l-value. 2933 // Note that we have to ask E because Op might be an l-value that 2934 // this won't work for, e.g. an Obj-C property. 2935 if (Op->isGLValue()) 2936 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), 2937 E->getExprLoc()).getScalarVal(); 2938 2939 // Otherwise, calculate and project. 2940 return CGF.EmitComplexExpr(Op, true, false).second; 2941 } 2942 2943 // __imag on a scalar returns zero. Emit the subexpr to ensure side 2944 // effects are evaluated, but not the actual value. 2945 if (Op->isGLValue()) 2946 CGF.EmitLValue(Op); 2947 else 2948 CGF.EmitScalarExpr(Op, true); 2949 return llvm::Constant::getNullValue(ConvertType(E->getType())); 2950 } 2951 2952 //===----------------------------------------------------------------------===// 2953 // Binary Operators 2954 //===----------------------------------------------------------------------===// 2955 2956 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) { 2957 TestAndClearIgnoreResultAssign(); 2958 BinOpInfo Result; 2959 Result.LHS = Visit(E->getLHS()); 2960 Result.RHS = Visit(E->getRHS()); 2961 Result.Ty = E->getType(); 2962 Result.Opcode = E->getOpcode(); 2963 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 2964 Result.E = E; 2965 return Result; 2966 } 2967 2968 LValue ScalarExprEmitter::EmitCompoundAssignLValue( 2969 const CompoundAssignOperator *E, 2970 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &), 2971 Value *&Result) { 2972 QualType LHSTy = E->getLHS()->getType(); 2973 BinOpInfo OpInfo; 2974 2975 if (E->getComputationResultType()->isAnyComplexType()) 2976 return CGF.EmitScalarCompoundAssignWithComplex(E, Result); 2977 2978 // Emit the RHS first. __block variables need to have the rhs evaluated 2979 // first, plus this should improve codegen a little. 2980 OpInfo.RHS = Visit(E->getRHS()); 2981 OpInfo.Ty = E->getComputationResultType(); 2982 OpInfo.Opcode = E->getOpcode(); 2983 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 2984 OpInfo.E = E; 2985 // Load/convert the LHS. 2986 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 2987 2988 llvm::PHINode *atomicPHI = nullptr; 2989 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) { 2990 QualType type = atomicTy->getValueType(); 2991 if (!type->isBooleanType() && type->isIntegerType() && 2992 !(type->isUnsignedIntegerType() && 2993 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) && 2994 CGF.getLangOpts().getSignedOverflowBehavior() != 2995 LangOptions::SOB_Trapping) { 2996 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP; 2997 llvm::Instruction::BinaryOps Op; 2998 switch (OpInfo.Opcode) { 2999 // We don't have atomicrmw operands for *, %, /, <<, >> 3000 case BO_MulAssign: case BO_DivAssign: 3001 case BO_RemAssign: 3002 case BO_ShlAssign: 3003 case BO_ShrAssign: 3004 break; 3005 case BO_AddAssign: 3006 AtomicOp = llvm::AtomicRMWInst::Add; 3007 Op = llvm::Instruction::Add; 3008 break; 3009 case BO_SubAssign: 3010 AtomicOp = llvm::AtomicRMWInst::Sub; 3011 Op = llvm::Instruction::Sub; 3012 break; 3013 case BO_AndAssign: 3014 AtomicOp = llvm::AtomicRMWInst::And; 3015 Op = llvm::Instruction::And; 3016 break; 3017 case BO_XorAssign: 3018 AtomicOp = llvm::AtomicRMWInst::Xor; 3019 Op = llvm::Instruction::Xor; 3020 break; 3021 case BO_OrAssign: 3022 AtomicOp = llvm::AtomicRMWInst::Or; 3023 Op = llvm::Instruction::Or; 3024 break; 3025 default: 3026 llvm_unreachable("Invalid compound assignment type"); 3027 } 3028 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) { 3029 llvm::Value *Amt = CGF.EmitToMemory( 3030 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy, 3031 E->getExprLoc()), 3032 LHSTy); 3033 Value *OldVal = Builder.CreateAtomicRMW( 3034 AtomicOp, LHSLV.getPointer(CGF), Amt, 3035 llvm::AtomicOrdering::SequentiallyConsistent); 3036 3037 // Since operation is atomic, the result type is guaranteed to be the 3038 // same as the input in LLVM terms. 3039 Result = Builder.CreateBinOp(Op, OldVal, Amt); 3040 return LHSLV; 3041 } 3042 } 3043 // FIXME: For floating point types, we should be saving and restoring the 3044 // floating point environment in the loop. 3045 llvm::BasicBlock *startBB = Builder.GetInsertBlock(); 3046 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); 3047 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); 3048 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type); 3049 Builder.CreateBr(opBB); 3050 Builder.SetInsertPoint(opBB); 3051 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2); 3052 atomicPHI->addIncoming(OpInfo.LHS, startBB); 3053 OpInfo.LHS = atomicPHI; 3054 } 3055 else 3056 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); 3057 3058 SourceLocation Loc = E->getExprLoc(); 3059 OpInfo.LHS = 3060 EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc); 3061 3062 // Expand the binary operator. 3063 Result = (this->*Func)(OpInfo); 3064 3065 // Convert the result back to the LHS type, 3066 // potentially with Implicit Conversion sanitizer check. 3067 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy, 3068 Loc, ScalarConversionOpts(CGF.SanOpts)); 3069 3070 if (atomicPHI) { 3071 llvm::BasicBlock *curBlock = Builder.GetInsertBlock(); 3072 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); 3073 auto Pair = CGF.EmitAtomicCompareExchange( 3074 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc()); 3075 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy); 3076 llvm::Value *success = Pair.second; 3077 atomicPHI->addIncoming(old, curBlock); 3078 Builder.CreateCondBr(success, contBB, atomicPHI->getParent()); 3079 Builder.SetInsertPoint(contBB); 3080 return LHSLV; 3081 } 3082 3083 // Store the result value into the LHS lvalue. Bit-fields are handled 3084 // specially because the result is altered by the store, i.e., [C99 6.5.16p1] 3085 // 'An assignment expression has the value of the left operand after the 3086 // assignment...'. 3087 if (LHSLV.isBitField()) 3088 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result); 3089 else 3090 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV); 3091 3092 if (CGF.getLangOpts().OpenMP) 3093 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, 3094 E->getLHS()); 3095 return LHSLV; 3096 } 3097 3098 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, 3099 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) { 3100 bool Ignore = TestAndClearIgnoreResultAssign(); 3101 Value *RHS = nullptr; 3102 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS); 3103 3104 // If the result is clearly ignored, return now. 3105 if (Ignore) 3106 return nullptr; 3107 3108 // The result of an assignment in C is the assigned r-value. 3109 if (!CGF.getLangOpts().CPlusPlus) 3110 return RHS; 3111 3112 // If the lvalue is non-volatile, return the computed value of the assignment. 3113 if (!LHS.isVolatileQualified()) 3114 return RHS; 3115 3116 // Otherwise, reload the value. 3117 return EmitLoadOfLValue(LHS, E->getExprLoc()); 3118 } 3119 3120 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck( 3121 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) { 3122 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 3123 3124 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) { 3125 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero), 3126 SanitizerKind::IntegerDivideByZero)); 3127 } 3128 3129 const auto *BO = cast<BinaryOperator>(Ops.E); 3130 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) && 3131 Ops.Ty->hasSignedIntegerRepresentation() && 3132 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) && 3133 Ops.mayHaveIntegerOverflow()) { 3134 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType()); 3135 3136 llvm::Value *IntMin = 3137 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth())); 3138 llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL); 3139 3140 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin); 3141 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne); 3142 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or"); 3143 Checks.push_back( 3144 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow)); 3145 } 3146 3147 if (Checks.size() > 0) 3148 EmitBinOpCheck(Checks, Ops); 3149 } 3150 3151 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { 3152 { 3153 CodeGenFunction::SanitizerScope SanScope(&CGF); 3154 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) || 3155 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) && 3156 Ops.Ty->isIntegerType() && 3157 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) { 3158 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3159 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true); 3160 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) && 3161 Ops.Ty->isRealFloatingType() && 3162 Ops.mayHaveFloatDivisionByZero()) { 3163 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3164 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero); 3165 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero), 3166 Ops); 3167 } 3168 } 3169 3170 if (Ops.LHS->getType()->isFPOrFPVectorTy()) { 3171 llvm::Value *Val; 3172 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 3173 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div"); 3174 if (CGF.getLangOpts().OpenCL && 3175 !CGF.CGM.getCodeGenOpts().CorrectlyRoundedDivSqrt) { 3176 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp 3177 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt 3178 // build option allows an application to specify that single precision 3179 // floating-point divide (x/y and 1/x) and sqrt used in the program 3180 // source are correctly rounded. 3181 llvm::Type *ValTy = Val->getType(); 3182 if (ValTy->isFloatTy() || 3183 (isa<llvm::VectorType>(ValTy) && 3184 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy())) 3185 CGF.SetFPAccuracy(Val, 2.5); 3186 } 3187 return Val; 3188 } 3189 else if (Ops.isFixedPointOp()) 3190 return EmitFixedPointBinOp(Ops); 3191 else if (Ops.Ty->hasUnsignedIntegerRepresentation()) 3192 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div"); 3193 else 3194 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div"); 3195 } 3196 3197 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) { 3198 // Rem in C can't be a floating point type: C99 6.5.5p2. 3199 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) || 3200 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) && 3201 Ops.Ty->isIntegerType() && 3202 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) { 3203 CodeGenFunction::SanitizerScope SanScope(&CGF); 3204 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3205 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false); 3206 } 3207 3208 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 3209 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem"); 3210 else 3211 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem"); 3212 } 3213 3214 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { 3215 unsigned IID; 3216 unsigned OpID = 0; 3217 3218 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType(); 3219 switch (Ops.Opcode) { 3220 case BO_Add: 3221 case BO_AddAssign: 3222 OpID = 1; 3223 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow : 3224 llvm::Intrinsic::uadd_with_overflow; 3225 break; 3226 case BO_Sub: 3227 case BO_SubAssign: 3228 OpID = 2; 3229 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow : 3230 llvm::Intrinsic::usub_with_overflow; 3231 break; 3232 case BO_Mul: 3233 case BO_MulAssign: 3234 OpID = 3; 3235 IID = isSigned ? llvm::Intrinsic::smul_with_overflow : 3236 llvm::Intrinsic::umul_with_overflow; 3237 break; 3238 default: 3239 llvm_unreachable("Unsupported operation for overflow detection"); 3240 } 3241 OpID <<= 1; 3242 if (isSigned) 3243 OpID |= 1; 3244 3245 CodeGenFunction::SanitizerScope SanScope(&CGF); 3246 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty); 3247 3248 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy); 3249 3250 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS}); 3251 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0); 3252 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1); 3253 3254 // Handle overflow with llvm.trap if no custom handler has been specified. 3255 const std::string *handlerName = 3256 &CGF.getLangOpts().OverflowHandler; 3257 if (handlerName->empty()) { 3258 // If the signed-integer-overflow sanitizer is enabled, emit a call to its 3259 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap. 3260 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) { 3261 llvm::Value *NotOverflow = Builder.CreateNot(overflow); 3262 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow 3263 : SanitizerKind::UnsignedIntegerOverflow; 3264 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops); 3265 } else 3266 CGF.EmitTrapCheck(Builder.CreateNot(overflow)); 3267 return result; 3268 } 3269 3270 // Branch in case of overflow. 3271 llvm::BasicBlock *initialBB = Builder.GetInsertBlock(); 3272 llvm::BasicBlock *continueBB = 3273 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode()); 3274 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn); 3275 3276 Builder.CreateCondBr(overflow, overflowBB, continueBB); 3277 3278 // If an overflow handler is set, then we want to call it and then use its 3279 // result, if it returns. 3280 Builder.SetInsertPoint(overflowBB); 3281 3282 // Get the overflow handler. 3283 llvm::Type *Int8Ty = CGF.Int8Ty; 3284 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty }; 3285 llvm::FunctionType *handlerTy = 3286 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true); 3287 llvm::FunctionCallee handler = 3288 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName); 3289 3290 // Sign extend the args to 64-bit, so that we can use the same handler for 3291 // all types of overflow. 3292 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty); 3293 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty); 3294 3295 // Call the handler with the two arguments, the operation, and the size of 3296 // the result. 3297 llvm::Value *handlerArgs[] = { 3298 lhs, 3299 rhs, 3300 Builder.getInt8(OpID), 3301 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth()) 3302 }; 3303 llvm::Value *handlerResult = 3304 CGF.EmitNounwindRuntimeCall(handler, handlerArgs); 3305 3306 // Truncate the result back to the desired size. 3307 handlerResult = Builder.CreateTrunc(handlerResult, opTy); 3308 Builder.CreateBr(continueBB); 3309 3310 Builder.SetInsertPoint(continueBB); 3311 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2); 3312 phi->addIncoming(result, initialBB); 3313 phi->addIncoming(handlerResult, overflowBB); 3314 3315 return phi; 3316 } 3317 3318 /// Emit pointer + index arithmetic. 3319 static Value *emitPointerArithmetic(CodeGenFunction &CGF, 3320 const BinOpInfo &op, 3321 bool isSubtraction) { 3322 // Must have binary (not unary) expr here. Unary pointer 3323 // increment/decrement doesn't use this path. 3324 const BinaryOperator *expr = cast<BinaryOperator>(op.E); 3325 3326 Value *pointer = op.LHS; 3327 Expr *pointerOperand = expr->getLHS(); 3328 Value *index = op.RHS; 3329 Expr *indexOperand = expr->getRHS(); 3330 3331 // In a subtraction, the LHS is always the pointer. 3332 if (!isSubtraction && !pointer->getType()->isPointerTy()) { 3333 std::swap(pointer, index); 3334 std::swap(pointerOperand, indexOperand); 3335 } 3336 3337 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); 3338 3339 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth(); 3340 auto &DL = CGF.CGM.getDataLayout(); 3341 auto PtrTy = cast<llvm::PointerType>(pointer->getType()); 3342 3343 // Some versions of glibc and gcc use idioms (particularly in their malloc 3344 // routines) that add a pointer-sized integer (known to be a pointer value) 3345 // to a null pointer in order to cast the value back to an integer or as 3346 // part of a pointer alignment algorithm. This is undefined behavior, but 3347 // we'd like to be able to compile programs that use it. 3348 // 3349 // Normally, we'd generate a GEP with a null-pointer base here in response 3350 // to that code, but it's also UB to dereference a pointer created that 3351 // way. Instead (as an acknowledged hack to tolerate the idiom) we will 3352 // generate a direct cast of the integer value to a pointer. 3353 // 3354 // The idiom (p = nullptr + N) is not met if any of the following are true: 3355 // 3356 // The operation is subtraction. 3357 // The index is not pointer-sized. 3358 // The pointer type is not byte-sized. 3359 // 3360 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(), 3361 op.Opcode, 3362 expr->getLHS(), 3363 expr->getRHS())) 3364 return CGF.Builder.CreateIntToPtr(index, pointer->getType()); 3365 3366 if (width != DL.getIndexTypeSizeInBits(PtrTy)) { 3367 // Zero-extend or sign-extend the pointer value according to 3368 // whether the index is signed or not. 3369 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned, 3370 "idx.ext"); 3371 } 3372 3373 // If this is subtraction, negate the index. 3374 if (isSubtraction) 3375 index = CGF.Builder.CreateNeg(index, "idx.neg"); 3376 3377 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) 3378 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(), 3379 /*Accessed*/ false); 3380 3381 const PointerType *pointerType 3382 = pointerOperand->getType()->getAs<PointerType>(); 3383 if (!pointerType) { 3384 QualType objectType = pointerOperand->getType() 3385 ->castAs<ObjCObjectPointerType>() 3386 ->getPointeeType(); 3387 llvm::Value *objectSize 3388 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType)); 3389 3390 index = CGF.Builder.CreateMul(index, objectSize); 3391 3392 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy); 3393 result = CGF.Builder.CreateGEP(result, index, "add.ptr"); 3394 return CGF.Builder.CreateBitCast(result, pointer->getType()); 3395 } 3396 3397 QualType elementType = pointerType->getPointeeType(); 3398 if (const VariableArrayType *vla 3399 = CGF.getContext().getAsVariableArrayType(elementType)) { 3400 // The element count here is the total number of non-VLA elements. 3401 llvm::Value *numElements = CGF.getVLASize(vla).NumElts; 3402 3403 // Effectively, the multiply by the VLA size is part of the GEP. 3404 // GEP indexes are signed, and scaling an index isn't permitted to 3405 // signed-overflow, so we use the same semantics for our explicit 3406 // multiply. We suppress this if overflow is not undefined behavior. 3407 if (CGF.getLangOpts().isSignedOverflowDefined()) { 3408 index = CGF.Builder.CreateMul(index, numElements, "vla.index"); 3409 pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr"); 3410 } else { 3411 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index"); 3412 pointer = 3413 CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction, 3414 op.E->getExprLoc(), "add.ptr"); 3415 } 3416 return pointer; 3417 } 3418 3419 // Explicitly handle GNU void* and function pointer arithmetic extensions. The 3420 // GNU void* casts amount to no-ops since our void* type is i8*, but this is 3421 // future proof. 3422 if (elementType->isVoidType() || elementType->isFunctionType()) { 3423 Value *result = CGF.EmitCastToVoidPtr(pointer); 3424 result = CGF.Builder.CreateGEP(result, index, "add.ptr"); 3425 return CGF.Builder.CreateBitCast(result, pointer->getType()); 3426 } 3427 3428 if (CGF.getLangOpts().isSignedOverflowDefined()) 3429 return CGF.Builder.CreateGEP(pointer, index, "add.ptr"); 3430 3431 return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction, 3432 op.E->getExprLoc(), "add.ptr"); 3433 } 3434 3435 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and 3436 // Addend. Use negMul and negAdd to negate the first operand of the Mul or 3437 // the add operand respectively. This allows fmuladd to represent a*b-c, or 3438 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to 3439 // efficient operations. 3440 static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, 3441 const CodeGenFunction &CGF, CGBuilderTy &Builder, 3442 bool negMul, bool negAdd) { 3443 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set."); 3444 3445 Value *MulOp0 = MulOp->getOperand(0); 3446 Value *MulOp1 = MulOp->getOperand(1); 3447 if (negMul) 3448 MulOp0 = Builder.CreateFNeg(MulOp0, "neg"); 3449 if (negAdd) 3450 Addend = Builder.CreateFNeg(Addend, "neg"); 3451 3452 Value *FMulAdd = nullptr; 3453 if (Builder.getIsFPConstrained()) { 3454 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) && 3455 "Only constrained operation should be created when Builder is in FP " 3456 "constrained mode"); 3457 FMulAdd = Builder.CreateConstrainedFPCall( 3458 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd, 3459 Addend->getType()), 3460 {MulOp0, MulOp1, Addend}); 3461 } else { 3462 FMulAdd = Builder.CreateCall( 3463 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()), 3464 {MulOp0, MulOp1, Addend}); 3465 } 3466 MulOp->eraseFromParent(); 3467 3468 return FMulAdd; 3469 } 3470 3471 // Check whether it would be legal to emit an fmuladd intrinsic call to 3472 // represent op and if so, build the fmuladd. 3473 // 3474 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on. 3475 // Does NOT check the type of the operation - it's assumed that this function 3476 // will be called from contexts where it's known that the type is contractable. 3477 static Value* tryEmitFMulAdd(const BinOpInfo &op, 3478 const CodeGenFunction &CGF, CGBuilderTy &Builder, 3479 bool isSub=false) { 3480 3481 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || 3482 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && 3483 "Only fadd/fsub can be the root of an fmuladd."); 3484 3485 // Check whether this op is marked as fusable. 3486 if (!op.FPFeatures.allowFPContractWithinStatement()) 3487 return nullptr; 3488 3489 // We have a potentially fusable op. Look for a mul on one of the operands. 3490 // Also, make sure that the mul result isn't used directly. In that case, 3491 // there's no point creating a muladd operation. 3492 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) { 3493 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul && 3494 LHSBinOp->use_empty()) 3495 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); 3496 } 3497 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) { 3498 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul && 3499 RHSBinOp->use_empty()) 3500 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); 3501 } 3502 3503 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) { 3504 if (LHSBinOp->getIntrinsicID() == 3505 llvm::Intrinsic::experimental_constrained_fmul && 3506 LHSBinOp->use_empty()) 3507 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); 3508 } 3509 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) { 3510 if (RHSBinOp->getIntrinsicID() == 3511 llvm::Intrinsic::experimental_constrained_fmul && 3512 RHSBinOp->use_empty()) 3513 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); 3514 } 3515 3516 return nullptr; 3517 } 3518 3519 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) { 3520 if (op.LHS->getType()->isPointerTy() || 3521 op.RHS->getType()->isPointerTy()) 3522 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction); 3523 3524 if (op.Ty->isSignedIntegerOrEnumerationType()) { 3525 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 3526 case LangOptions::SOB_Defined: 3527 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 3528 case LangOptions::SOB_Undefined: 3529 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 3530 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 3531 LLVM_FALLTHROUGH; 3532 case LangOptions::SOB_Trapping: 3533 if (CanElideOverflowCheck(CGF.getContext(), op)) 3534 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 3535 return EmitOverflowCheckedBinOp(op); 3536 } 3537 } 3538 3539 if (op.Ty->isConstantMatrixType()) { 3540 llvm::MatrixBuilder<CGBuilderTy> MB(Builder); 3541 return MB.CreateAdd(op.LHS, op.RHS); 3542 } 3543 3544 if (op.Ty->isUnsignedIntegerType() && 3545 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 3546 !CanElideOverflowCheck(CGF.getContext(), op)) 3547 return EmitOverflowCheckedBinOp(op); 3548 3549 if (op.LHS->getType()->isFPOrFPVectorTy()) { 3550 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3551 // Try to form an fmuladd. 3552 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder)) 3553 return FMulAdd; 3554 3555 return Builder.CreateFAdd(op.LHS, op.RHS, "add"); 3556 } 3557 3558 if (op.isFixedPointOp()) 3559 return EmitFixedPointBinOp(op); 3560 3561 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 3562 } 3563 3564 /// The resulting value must be calculated with exact precision, so the operands 3565 /// may not be the same type. 3566 Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) { 3567 using llvm::APSInt; 3568 using llvm::ConstantInt; 3569 3570 // This is either a binary operation where at least one of the operands is 3571 // a fixed-point type, or a unary operation where the operand is a fixed-point 3572 // type. The result type of a binary operation is determined by 3573 // Sema::handleFixedPointConversions(). 3574 QualType ResultTy = op.Ty; 3575 QualType LHSTy, RHSTy; 3576 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) { 3577 RHSTy = BinOp->getRHS()->getType(); 3578 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) { 3579 // For compound assignment, the effective type of the LHS at this point 3580 // is the computation LHS type, not the actual LHS type, and the final 3581 // result type is not the type of the expression but rather the 3582 // computation result type. 3583 LHSTy = CAO->getComputationLHSType(); 3584 ResultTy = CAO->getComputationResultType(); 3585 } else 3586 LHSTy = BinOp->getLHS()->getType(); 3587 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) { 3588 LHSTy = UnOp->getSubExpr()->getType(); 3589 RHSTy = UnOp->getSubExpr()->getType(); 3590 } 3591 ASTContext &Ctx = CGF.getContext(); 3592 Value *LHS = op.LHS; 3593 Value *RHS = op.RHS; 3594 3595 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy); 3596 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy); 3597 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy); 3598 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema); 3599 3600 // Convert the operands to the full precision type. 3601 Value *FullLHS = EmitFixedPointConversion(LHS, LHSFixedSema, CommonFixedSema, 3602 op.E->getExprLoc()); 3603 Value *FullRHS = EmitFixedPointConversion(RHS, RHSFixedSema, CommonFixedSema, 3604 op.E->getExprLoc()); 3605 3606 // Perform the actual operation. 3607 Value *Result; 3608 switch (op.Opcode) { 3609 case BO_AddAssign: 3610 case BO_Add: { 3611 if (CommonFixedSema.isSaturated()) { 3612 llvm::Intrinsic::ID IID = CommonFixedSema.isSigned() 3613 ? llvm::Intrinsic::sadd_sat 3614 : llvm::Intrinsic::uadd_sat; 3615 Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS); 3616 } else { 3617 Result = Builder.CreateAdd(FullLHS, FullRHS); 3618 } 3619 break; 3620 } 3621 case BO_SubAssign: 3622 case BO_Sub: { 3623 if (CommonFixedSema.isSaturated()) { 3624 llvm::Intrinsic::ID IID = CommonFixedSema.isSigned() 3625 ? llvm::Intrinsic::ssub_sat 3626 : llvm::Intrinsic::usub_sat; 3627 Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS); 3628 } else { 3629 Result = Builder.CreateSub(FullLHS, FullRHS); 3630 } 3631 break; 3632 } 3633 case BO_MulAssign: 3634 case BO_Mul: { 3635 llvm::Intrinsic::ID IID; 3636 if (CommonFixedSema.isSaturated()) 3637 IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::smul_fix_sat 3638 : llvm::Intrinsic::umul_fix_sat; 3639 else 3640 IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::smul_fix 3641 : llvm::Intrinsic::umul_fix; 3642 Result = Builder.CreateIntrinsic(IID, {FullLHS->getType()}, 3643 {FullLHS, FullRHS, Builder.getInt32(CommonFixedSema.getScale())}); 3644 break; 3645 } 3646 case BO_DivAssign: 3647 case BO_Div: { 3648 llvm::Intrinsic::ID IID; 3649 if (CommonFixedSema.isSaturated()) 3650 IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::sdiv_fix_sat 3651 : llvm::Intrinsic::udiv_fix_sat; 3652 else 3653 IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::sdiv_fix 3654 : llvm::Intrinsic::udiv_fix; 3655 Result = Builder.CreateIntrinsic(IID, {FullLHS->getType()}, 3656 {FullLHS, FullRHS, Builder.getInt32(CommonFixedSema.getScale())}); 3657 break; 3658 } 3659 case BO_LT: 3660 return CommonFixedSema.isSigned() ? Builder.CreateICmpSLT(FullLHS, FullRHS) 3661 : Builder.CreateICmpULT(FullLHS, FullRHS); 3662 case BO_GT: 3663 return CommonFixedSema.isSigned() ? Builder.CreateICmpSGT(FullLHS, FullRHS) 3664 : Builder.CreateICmpUGT(FullLHS, FullRHS); 3665 case BO_LE: 3666 return CommonFixedSema.isSigned() ? Builder.CreateICmpSLE(FullLHS, FullRHS) 3667 : Builder.CreateICmpULE(FullLHS, FullRHS); 3668 case BO_GE: 3669 return CommonFixedSema.isSigned() ? Builder.CreateICmpSGE(FullLHS, FullRHS) 3670 : Builder.CreateICmpUGE(FullLHS, FullRHS); 3671 case BO_EQ: 3672 // For equality operations, we assume any padding bits on unsigned types are 3673 // zero'd out. They could be overwritten through non-saturating operations 3674 // that cause overflow, but this leads to undefined behavior. 3675 return Builder.CreateICmpEQ(FullLHS, FullRHS); 3676 case BO_NE: 3677 return Builder.CreateICmpNE(FullLHS, FullRHS); 3678 case BO_Shl: 3679 case BO_Shr: 3680 case BO_Cmp: 3681 case BO_LAnd: 3682 case BO_LOr: 3683 case BO_ShlAssign: 3684 case BO_ShrAssign: 3685 llvm_unreachable("Found unimplemented fixed point binary operation"); 3686 case BO_PtrMemD: 3687 case BO_PtrMemI: 3688 case BO_Rem: 3689 case BO_Xor: 3690 case BO_And: 3691 case BO_Or: 3692 case BO_Assign: 3693 case BO_RemAssign: 3694 case BO_AndAssign: 3695 case BO_XorAssign: 3696 case BO_OrAssign: 3697 case BO_Comma: 3698 llvm_unreachable("Found unsupported binary operation for fixed point types."); 3699 } 3700 3701 // Convert to the result type. 3702 return EmitFixedPointConversion(Result, CommonFixedSema, ResultFixedSema, 3703 op.E->getExprLoc()); 3704 } 3705 3706 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) { 3707 // The LHS is always a pointer if either side is. 3708 if (!op.LHS->getType()->isPointerTy()) { 3709 if (op.Ty->isSignedIntegerOrEnumerationType()) { 3710 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 3711 case LangOptions::SOB_Defined: 3712 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 3713 case LangOptions::SOB_Undefined: 3714 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 3715 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 3716 LLVM_FALLTHROUGH; 3717 case LangOptions::SOB_Trapping: 3718 if (CanElideOverflowCheck(CGF.getContext(), op)) 3719 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 3720 return EmitOverflowCheckedBinOp(op); 3721 } 3722 } 3723 3724 if (op.Ty->isConstantMatrixType()) { 3725 llvm::MatrixBuilder<CGBuilderTy> MB(Builder); 3726 return MB.CreateSub(op.LHS, op.RHS); 3727 } 3728 3729 if (op.Ty->isUnsignedIntegerType() && 3730 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 3731 !CanElideOverflowCheck(CGF.getContext(), op)) 3732 return EmitOverflowCheckedBinOp(op); 3733 3734 if (op.LHS->getType()->isFPOrFPVectorTy()) { 3735 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3736 // Try to form an fmuladd. 3737 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true)) 3738 return FMulAdd; 3739 return Builder.CreateFSub(op.LHS, op.RHS, "sub"); 3740 } 3741 3742 if (op.isFixedPointOp()) 3743 return EmitFixedPointBinOp(op); 3744 3745 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 3746 } 3747 3748 // If the RHS is not a pointer, then we have normal pointer 3749 // arithmetic. 3750 if (!op.RHS->getType()->isPointerTy()) 3751 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction); 3752 3753 // Otherwise, this is a pointer subtraction. 3754 3755 // Do the raw subtraction part. 3756 llvm::Value *LHS 3757 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast"); 3758 llvm::Value *RHS 3759 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast"); 3760 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub"); 3761 3762 // Okay, figure out the element size. 3763 const BinaryOperator *expr = cast<BinaryOperator>(op.E); 3764 QualType elementType = expr->getLHS()->getType()->getPointeeType(); 3765 3766 llvm::Value *divisor = nullptr; 3767 3768 // For a variable-length array, this is going to be non-constant. 3769 if (const VariableArrayType *vla 3770 = CGF.getContext().getAsVariableArrayType(elementType)) { 3771 auto VlaSize = CGF.getVLASize(vla); 3772 elementType = VlaSize.Type; 3773 divisor = VlaSize.NumElts; 3774 3775 // Scale the number of non-VLA elements by the non-VLA element size. 3776 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType); 3777 if (!eltSize.isOne()) 3778 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor); 3779 3780 // For everything elese, we can just compute it, safe in the 3781 // assumption that Sema won't let anything through that we can't 3782 // safely compute the size of. 3783 } else { 3784 CharUnits elementSize; 3785 // Handle GCC extension for pointer arithmetic on void* and 3786 // function pointer types. 3787 if (elementType->isVoidType() || elementType->isFunctionType()) 3788 elementSize = CharUnits::One(); 3789 else 3790 elementSize = CGF.getContext().getTypeSizeInChars(elementType); 3791 3792 // Don't even emit the divide for element size of 1. 3793 if (elementSize.isOne()) 3794 return diffInChars; 3795 3796 divisor = CGF.CGM.getSize(elementSize); 3797 } 3798 3799 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since 3800 // pointer difference in C is only defined in the case where both operands 3801 // are pointing to elements of an array. 3802 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div"); 3803 } 3804 3805 Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) { 3806 llvm::IntegerType *Ty; 3807 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 3808 Ty = cast<llvm::IntegerType>(VT->getElementType()); 3809 else 3810 Ty = cast<llvm::IntegerType>(LHS->getType()); 3811 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1); 3812 } 3813 3814 Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS, 3815 const Twine &Name) { 3816 llvm::IntegerType *Ty; 3817 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 3818 Ty = cast<llvm::IntegerType>(VT->getElementType()); 3819 else 3820 Ty = cast<llvm::IntegerType>(LHS->getType()); 3821 3822 if (llvm::isPowerOf2_64(Ty->getBitWidth())) 3823 return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name); 3824 3825 return Builder.CreateURem( 3826 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name); 3827 } 3828 3829 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { 3830 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 3831 // RHS to the same size as the LHS. 3832 Value *RHS = Ops.RHS; 3833 if (Ops.LHS->getType() != RHS->getType()) 3834 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 3835 3836 bool SanitizeBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) && 3837 Ops.Ty->hasSignedIntegerRepresentation() && 3838 !CGF.getLangOpts().isSignedOverflowDefined() && 3839 !CGF.getLangOpts().CPlusPlus20; 3840 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent); 3841 // OpenCL 6.3j: shift values are effectively % word size of LHS. 3842 if (CGF.getLangOpts().OpenCL) 3843 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask"); 3844 else if ((SanitizeBase || SanitizeExponent) && 3845 isa<llvm::IntegerType>(Ops.LHS->getType())) { 3846 CodeGenFunction::SanitizerScope SanScope(&CGF); 3847 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks; 3848 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS); 3849 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne); 3850 3851 if (SanitizeExponent) { 3852 Checks.push_back( 3853 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent)); 3854 } 3855 3856 if (SanitizeBase) { 3857 // Check whether we are shifting any non-zero bits off the top of the 3858 // integer. We only emit this check if exponent is valid - otherwise 3859 // instructions below will have undefined behavior themselves. 3860 llvm::BasicBlock *Orig = Builder.GetInsertBlock(); 3861 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 3862 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check"); 3863 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont); 3864 llvm::Value *PromotedWidthMinusOne = 3865 (RHS == Ops.RHS) ? WidthMinusOne 3866 : GetWidthMinusOneValue(Ops.LHS, RHS); 3867 CGF.EmitBlock(CheckShiftBase); 3868 llvm::Value *BitsShiftedOff = Builder.CreateLShr( 3869 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros", 3870 /*NUW*/ true, /*NSW*/ true), 3871 "shl.check"); 3872 if (CGF.getLangOpts().CPlusPlus) { 3873 // In C99, we are not permitted to shift a 1 bit into the sign bit. 3874 // Under C++11's rules, shifting a 1 bit into the sign bit is 3875 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't 3876 // define signed left shifts, so we use the C99 and C++11 rules there). 3877 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1); 3878 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One); 3879 } 3880 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0); 3881 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero); 3882 CGF.EmitBlock(Cont); 3883 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2); 3884 BaseCheck->addIncoming(Builder.getTrue(), Orig); 3885 BaseCheck->addIncoming(ValidBase, CheckShiftBase); 3886 Checks.push_back(std::make_pair(BaseCheck, SanitizerKind::ShiftBase)); 3887 } 3888 3889 assert(!Checks.empty()); 3890 EmitBinOpCheck(Checks, Ops); 3891 } 3892 3893 return Builder.CreateShl(Ops.LHS, RHS, "shl"); 3894 } 3895 3896 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { 3897 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 3898 // RHS to the same size as the LHS. 3899 Value *RHS = Ops.RHS; 3900 if (Ops.LHS->getType() != RHS->getType()) 3901 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 3902 3903 // OpenCL 6.3j: shift values are effectively % word size of LHS. 3904 if (CGF.getLangOpts().OpenCL) 3905 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask"); 3906 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) && 3907 isa<llvm::IntegerType>(Ops.LHS->getType())) { 3908 CodeGenFunction::SanitizerScope SanScope(&CGF); 3909 llvm::Value *Valid = 3910 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS)); 3911 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops); 3912 } 3913 3914 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 3915 return Builder.CreateLShr(Ops.LHS, RHS, "shr"); 3916 return Builder.CreateAShr(Ops.LHS, RHS, "shr"); 3917 } 3918 3919 enum IntrinsicType { VCMPEQ, VCMPGT }; 3920 // return corresponding comparison intrinsic for given vector type 3921 static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, 3922 BuiltinType::Kind ElemKind) { 3923 switch (ElemKind) { 3924 default: llvm_unreachable("unexpected element type"); 3925 case BuiltinType::Char_U: 3926 case BuiltinType::UChar: 3927 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 3928 llvm::Intrinsic::ppc_altivec_vcmpgtub_p; 3929 case BuiltinType::Char_S: 3930 case BuiltinType::SChar: 3931 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 3932 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p; 3933 case BuiltinType::UShort: 3934 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 3935 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p; 3936 case BuiltinType::Short: 3937 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 3938 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p; 3939 case BuiltinType::UInt: 3940 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 3941 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p; 3942 case BuiltinType::Int: 3943 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 3944 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p; 3945 case BuiltinType::ULong: 3946 case BuiltinType::ULongLong: 3947 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 3948 llvm::Intrinsic::ppc_altivec_vcmpgtud_p; 3949 case BuiltinType::Long: 3950 case BuiltinType::LongLong: 3951 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 3952 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p; 3953 case BuiltinType::Float: 3954 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p : 3955 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p; 3956 case BuiltinType::Double: 3957 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p : 3958 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p; 3959 } 3960 } 3961 3962 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E, 3963 llvm::CmpInst::Predicate UICmpOpc, 3964 llvm::CmpInst::Predicate SICmpOpc, 3965 llvm::CmpInst::Predicate FCmpOpc, 3966 bool IsSignaling) { 3967 TestAndClearIgnoreResultAssign(); 3968 Value *Result; 3969 QualType LHSTy = E->getLHS()->getType(); 3970 QualType RHSTy = E->getRHS()->getType(); 3971 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) { 3972 assert(E->getOpcode() == BO_EQ || 3973 E->getOpcode() == BO_NE); 3974 Value *LHS = CGF.EmitScalarExpr(E->getLHS()); 3975 Value *RHS = CGF.EmitScalarExpr(E->getRHS()); 3976 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison( 3977 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE); 3978 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { 3979 BinOpInfo BOInfo = EmitBinOps(E); 3980 Value *LHS = BOInfo.LHS; 3981 Value *RHS = BOInfo.RHS; 3982 3983 // If AltiVec, the comparison results in a numeric type, so we use 3984 // intrinsics comparing vectors and giving 0 or 1 as a result 3985 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) { 3986 // constants for mapping CR6 register bits to predicate result 3987 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6; 3988 3989 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic; 3990 3991 // in several cases vector arguments order will be reversed 3992 Value *FirstVecArg = LHS, 3993 *SecondVecArg = RHS; 3994 3995 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType(); 3996 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind(); 3997 3998 switch(E->getOpcode()) { 3999 default: llvm_unreachable("is not a comparison operation"); 4000 case BO_EQ: 4001 CR6 = CR6_LT; 4002 ID = GetIntrinsic(VCMPEQ, ElementKind); 4003 break; 4004 case BO_NE: 4005 CR6 = CR6_EQ; 4006 ID = GetIntrinsic(VCMPEQ, ElementKind); 4007 break; 4008 case BO_LT: 4009 CR6 = CR6_LT; 4010 ID = GetIntrinsic(VCMPGT, ElementKind); 4011 std::swap(FirstVecArg, SecondVecArg); 4012 break; 4013 case BO_GT: 4014 CR6 = CR6_LT; 4015 ID = GetIntrinsic(VCMPGT, ElementKind); 4016 break; 4017 case BO_LE: 4018 if (ElementKind == BuiltinType::Float) { 4019 CR6 = CR6_LT; 4020 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4021 std::swap(FirstVecArg, SecondVecArg); 4022 } 4023 else { 4024 CR6 = CR6_EQ; 4025 ID = GetIntrinsic(VCMPGT, ElementKind); 4026 } 4027 break; 4028 case BO_GE: 4029 if (ElementKind == BuiltinType::Float) { 4030 CR6 = CR6_LT; 4031 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4032 } 4033 else { 4034 CR6 = CR6_EQ; 4035 ID = GetIntrinsic(VCMPGT, ElementKind); 4036 std::swap(FirstVecArg, SecondVecArg); 4037 } 4038 break; 4039 } 4040 4041 Value *CR6Param = Builder.getInt32(CR6); 4042 llvm::Function *F = CGF.CGM.getIntrinsic(ID); 4043 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg}); 4044 4045 // The result type of intrinsic may not be same as E->getType(). 4046 // If E->getType() is not BoolTy, EmitScalarConversion will do the 4047 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will 4048 // do nothing, if ResultTy is not i1 at the same time, it will cause 4049 // crash later. 4050 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType()); 4051 if (ResultTy->getBitWidth() > 1 && 4052 E->getType() == CGF.getContext().BoolTy) 4053 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty()); 4054 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4055 E->getExprLoc()); 4056 } 4057 4058 if (BOInfo.isFixedPointOp()) { 4059 Result = EmitFixedPointBinOp(BOInfo); 4060 } else if (LHS->getType()->isFPOrFPVectorTy()) { 4061 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures); 4062 if (!IsSignaling) 4063 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp"); 4064 else 4065 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp"); 4066 } else if (LHSTy->hasSignedIntegerRepresentation()) { 4067 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp"); 4068 } else { 4069 // Unsigned integers and pointers. 4070 4071 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers && 4072 !isa<llvm::ConstantPointerNull>(LHS) && 4073 !isa<llvm::ConstantPointerNull>(RHS)) { 4074 4075 // Dynamic information is required to be stripped for comparisons, 4076 // because it could leak the dynamic information. Based on comparisons 4077 // of pointers to dynamic objects, the optimizer can replace one pointer 4078 // with another, which might be incorrect in presence of invariant 4079 // groups. Comparison with null is safe because null does not carry any 4080 // dynamic information. 4081 if (LHSTy.mayBeDynamicClass()) 4082 LHS = Builder.CreateStripInvariantGroup(LHS); 4083 if (RHSTy.mayBeDynamicClass()) 4084 RHS = Builder.CreateStripInvariantGroup(RHS); 4085 } 4086 4087 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp"); 4088 } 4089 4090 // If this is a vector comparison, sign extend the result to the appropriate 4091 // vector integer type and return it (don't convert to bool). 4092 if (LHSTy->isVectorType()) 4093 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 4094 4095 } else { 4096 // Complex Comparison: can only be an equality comparison. 4097 CodeGenFunction::ComplexPairTy LHS, RHS; 4098 QualType CETy; 4099 if (auto *CTy = LHSTy->getAs<ComplexType>()) { 4100 LHS = CGF.EmitComplexExpr(E->getLHS()); 4101 CETy = CTy->getElementType(); 4102 } else { 4103 LHS.first = Visit(E->getLHS()); 4104 LHS.second = llvm::Constant::getNullValue(LHS.first->getType()); 4105 CETy = LHSTy; 4106 } 4107 if (auto *CTy = RHSTy->getAs<ComplexType>()) { 4108 RHS = CGF.EmitComplexExpr(E->getRHS()); 4109 assert(CGF.getContext().hasSameUnqualifiedType(CETy, 4110 CTy->getElementType()) && 4111 "The element types must always match."); 4112 (void)CTy; 4113 } else { 4114 RHS.first = Visit(E->getRHS()); 4115 RHS.second = llvm::Constant::getNullValue(RHS.first->getType()); 4116 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && 4117 "The element types must always match."); 4118 } 4119 4120 Value *ResultR, *ResultI; 4121 if (CETy->isRealFloatingType()) { 4122 // As complex comparisons can only be equality comparisons, they 4123 // are never signaling comparisons. 4124 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r"); 4125 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i"); 4126 } else { 4127 // Complex comparisons can only be equality comparisons. As such, signed 4128 // and unsigned opcodes are the same. 4129 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r"); 4130 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i"); 4131 } 4132 4133 if (E->getOpcode() == BO_EQ) { 4134 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri"); 4135 } else { 4136 assert(E->getOpcode() == BO_NE && 4137 "Complex comparison other than == or != ?"); 4138 Result = Builder.CreateOr(ResultR, ResultI, "or.ri"); 4139 } 4140 } 4141 4142 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4143 E->getExprLoc()); 4144 } 4145 4146 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { 4147 bool Ignore = TestAndClearIgnoreResultAssign(); 4148 4149 Value *RHS; 4150 LValue LHS; 4151 4152 switch (E->getLHS()->getType().getObjCLifetime()) { 4153 case Qualifiers::OCL_Strong: 4154 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore); 4155 break; 4156 4157 case Qualifiers::OCL_Autoreleasing: 4158 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E); 4159 break; 4160 4161 case Qualifiers::OCL_ExplicitNone: 4162 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore); 4163 break; 4164 4165 case Qualifiers::OCL_Weak: 4166 RHS = Visit(E->getRHS()); 4167 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4168 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore); 4169 break; 4170 4171 case Qualifiers::OCL_None: 4172 // __block variables need to have the rhs evaluated first, plus 4173 // this should improve codegen just a little. 4174 RHS = Visit(E->getRHS()); 4175 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4176 4177 // Store the value into the LHS. Bit-fields are handled specially 4178 // because the result is altered by the store, i.e., [C99 6.5.16p1] 4179 // 'An assignment expression has the value of the left operand after 4180 // the assignment...'. 4181 if (LHS.isBitField()) { 4182 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS); 4183 } else { 4184 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc()); 4185 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS); 4186 } 4187 } 4188 4189 // If the result is clearly ignored, return now. 4190 if (Ignore) 4191 return nullptr; 4192 4193 // The result of an assignment in C is the assigned r-value. 4194 if (!CGF.getLangOpts().CPlusPlus) 4195 return RHS; 4196 4197 // If the lvalue is non-volatile, return the computed value of the assignment. 4198 if (!LHS.isVolatileQualified()) 4199 return RHS; 4200 4201 // Otherwise, reload the value. 4202 return EmitLoadOfLValue(LHS, E->getExprLoc()); 4203 } 4204 4205 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { 4206 // Perform vector logical and on comparisons with zero vectors. 4207 if (E->getType()->isVectorType()) { 4208 CGF.incrementProfileCounter(E); 4209 4210 Value *LHS = Visit(E->getLHS()); 4211 Value *RHS = Visit(E->getRHS()); 4212 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 4213 if (LHS->getType()->isFPOrFPVectorTy()) { 4214 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 4215 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 4216 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 4217 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 4218 } else { 4219 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 4220 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 4221 } 4222 Value *And = Builder.CreateAnd(LHS, RHS); 4223 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext"); 4224 } 4225 4226 llvm::Type *ResTy = ConvertType(E->getType()); 4227 4228 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. 4229 // If we have 1 && X, just emit X without inserting the control flow. 4230 bool LHSCondVal; 4231 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 4232 if (LHSCondVal) { // If we have 1 && X, just emit X. 4233 CGF.incrementProfileCounter(E); 4234 4235 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4236 // ZExt result to int or bool. 4237 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext"); 4238 } 4239 4240 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false. 4241 if (!CGF.ContainsLabel(E->getRHS())) 4242 return llvm::Constant::getNullValue(ResTy); 4243 } 4244 4245 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end"); 4246 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs"); 4247 4248 CodeGenFunction::ConditionalEvaluation eval(CGF); 4249 4250 // Branch on the LHS first. If it is false, go to the failure (cont) block. 4251 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock, 4252 CGF.getProfileCount(E->getRHS())); 4253 4254 // Any edges into the ContBlock are now from an (indeterminate number of) 4255 // edges from this first condition. All of these values will be false. Start 4256 // setting up the PHI node in the Cont Block for this. 4257 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 4258 "", ContBlock); 4259 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 4260 PI != PE; ++PI) 4261 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI); 4262 4263 eval.begin(CGF); 4264 CGF.EmitBlock(RHSBlock); 4265 CGF.incrementProfileCounter(E); 4266 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4267 eval.end(CGF); 4268 4269 // Reaquire the RHS block, as there may be subblocks inserted. 4270 RHSBlock = Builder.GetInsertBlock(); 4271 4272 // Emit an unconditional branch from this block to ContBlock. 4273 { 4274 // There is no need to emit line number for unconditional branch. 4275 auto NL = ApplyDebugLocation::CreateEmpty(CGF); 4276 CGF.EmitBlock(ContBlock); 4277 } 4278 // Insert an entry into the phi node for the edge with the value of RHSCond. 4279 PN->addIncoming(RHSCond, RHSBlock); 4280 4281 // Artificial location to preserve the scope information 4282 { 4283 auto NL = ApplyDebugLocation::CreateArtificial(CGF); 4284 PN->setDebugLoc(Builder.getCurrentDebugLocation()); 4285 } 4286 4287 // ZExt result to int. 4288 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext"); 4289 } 4290 4291 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { 4292 // Perform vector logical or on comparisons with zero vectors. 4293 if (E->getType()->isVectorType()) { 4294 CGF.incrementProfileCounter(E); 4295 4296 Value *LHS = Visit(E->getLHS()); 4297 Value *RHS = Visit(E->getRHS()); 4298 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 4299 if (LHS->getType()->isFPOrFPVectorTy()) { 4300 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 4301 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 4302 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 4303 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 4304 } else { 4305 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 4306 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 4307 } 4308 Value *Or = Builder.CreateOr(LHS, RHS); 4309 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext"); 4310 } 4311 4312 llvm::Type *ResTy = ConvertType(E->getType()); 4313 4314 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. 4315 // If we have 0 || X, just emit X without inserting the control flow. 4316 bool LHSCondVal; 4317 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 4318 if (!LHSCondVal) { // If we have 0 || X, just emit X. 4319 CGF.incrementProfileCounter(E); 4320 4321 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4322 // ZExt result to int or bool. 4323 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext"); 4324 } 4325 4326 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. 4327 if (!CGF.ContainsLabel(E->getRHS())) 4328 return llvm::ConstantInt::get(ResTy, 1); 4329 } 4330 4331 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end"); 4332 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs"); 4333 4334 CodeGenFunction::ConditionalEvaluation eval(CGF); 4335 4336 // Branch on the LHS first. If it is true, go to the success (cont) block. 4337 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock, 4338 CGF.getCurrentProfileCount() - 4339 CGF.getProfileCount(E->getRHS())); 4340 4341 // Any edges into the ContBlock are now from an (indeterminate number of) 4342 // edges from this first condition. All of these values will be true. Start 4343 // setting up the PHI node in the Cont Block for this. 4344 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 4345 "", ContBlock); 4346 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 4347 PI != PE; ++PI) 4348 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI); 4349 4350 eval.begin(CGF); 4351 4352 // Emit the RHS condition as a bool value. 4353 CGF.EmitBlock(RHSBlock); 4354 CGF.incrementProfileCounter(E); 4355 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4356 4357 eval.end(CGF); 4358 4359 // Reaquire the RHS block, as there may be subblocks inserted. 4360 RHSBlock = Builder.GetInsertBlock(); 4361 4362 // Emit an unconditional branch from this block to ContBlock. Insert an entry 4363 // into the phi node for the edge with the value of RHSCond. 4364 CGF.EmitBlock(ContBlock); 4365 PN->addIncoming(RHSCond, RHSBlock); 4366 4367 // ZExt result to int. 4368 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext"); 4369 } 4370 4371 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { 4372 CGF.EmitIgnoredExpr(E->getLHS()); 4373 CGF.EnsureInsertPoint(); 4374 return Visit(E->getRHS()); 4375 } 4376 4377 //===----------------------------------------------------------------------===// 4378 // Other Operators 4379 //===----------------------------------------------------------------------===// 4380 4381 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified 4382 /// expression is cheap enough and side-effect-free enough to evaluate 4383 /// unconditionally instead of conditionally. This is used to convert control 4384 /// flow into selects in some cases. 4385 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, 4386 CodeGenFunction &CGF) { 4387 // Anything that is an integer or floating point constant is fine. 4388 return E->IgnoreParens()->isEvaluatable(CGF.getContext()); 4389 4390 // Even non-volatile automatic variables can't be evaluated unconditionally. 4391 // Referencing a thread_local may cause non-trivial initialization work to 4392 // occur. If we're inside a lambda and one of the variables is from the scope 4393 // outside the lambda, that function may have returned already. Reading its 4394 // locals is a bad idea. Also, these reads may introduce races there didn't 4395 // exist in the source-level program. 4396 } 4397 4398 4399 Value *ScalarExprEmitter:: 4400 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { 4401 TestAndClearIgnoreResultAssign(); 4402 4403 // Bind the common expression if necessary. 4404 CodeGenFunction::OpaqueValueMapping binding(CGF, E); 4405 4406 Expr *condExpr = E->getCond(); 4407 Expr *lhsExpr = E->getTrueExpr(); 4408 Expr *rhsExpr = E->getFalseExpr(); 4409 4410 // If the condition constant folds and can be elided, try to avoid emitting 4411 // the condition and the dead arm. 4412 bool CondExprBool; 4413 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 4414 Expr *live = lhsExpr, *dead = rhsExpr; 4415 if (!CondExprBool) std::swap(live, dead); 4416 4417 // If the dead side doesn't have labels we need, just emit the Live part. 4418 if (!CGF.ContainsLabel(dead)) { 4419 if (CondExprBool) 4420 CGF.incrementProfileCounter(E); 4421 Value *Result = Visit(live); 4422 4423 // If the live part is a throw expression, it acts like it has a void 4424 // type, so evaluating it returns a null Value*. However, a conditional 4425 // with non-void type must return a non-null Value*. 4426 if (!Result && !E->getType()->isVoidType()) 4427 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType())); 4428 4429 return Result; 4430 } 4431 } 4432 4433 // OpenCL: If the condition is a vector, we can treat this condition like 4434 // the select function. 4435 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) || 4436 condExpr->getType()->isExtVectorType()) { 4437 CGF.incrementProfileCounter(E); 4438 4439 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 4440 llvm::Value *LHS = Visit(lhsExpr); 4441 llvm::Value *RHS = Visit(rhsExpr); 4442 4443 llvm::Type *condType = ConvertType(condExpr->getType()); 4444 llvm::VectorType *vecTy = cast<llvm::VectorType>(condType); 4445 4446 unsigned numElem = vecTy->getNumElements(); 4447 llvm::Type *elemType = vecTy->getElementType(); 4448 4449 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy); 4450 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec); 4451 llvm::Value *tmp = Builder.CreateSExt( 4452 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext"); 4453 llvm::Value *tmp2 = Builder.CreateNot(tmp); 4454 4455 // Cast float to int to perform ANDs if necessary. 4456 llvm::Value *RHSTmp = RHS; 4457 llvm::Value *LHSTmp = LHS; 4458 bool wasCast = false; 4459 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType()); 4460 if (rhsVTy->getElementType()->isFloatingPointTy()) { 4461 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType()); 4462 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType()); 4463 wasCast = true; 4464 } 4465 4466 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2); 4467 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp); 4468 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond"); 4469 if (wasCast) 4470 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType()); 4471 4472 return tmp5; 4473 } 4474 4475 if (condExpr->getType()->isVectorType()) { 4476 CGF.incrementProfileCounter(E); 4477 4478 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 4479 llvm::Value *LHS = Visit(lhsExpr); 4480 llvm::Value *RHS = Visit(rhsExpr); 4481 4482 llvm::Type *CondType = ConvertType(condExpr->getType()); 4483 auto *VecTy = cast<llvm::VectorType>(CondType); 4484 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy); 4485 4486 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond"); 4487 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select"); 4488 } 4489 4490 // If this is a really simple expression (like x ? 4 : 5), emit this as a 4491 // select instead of as control flow. We can only do this if it is cheap and 4492 // safe to evaluate the LHS and RHS unconditionally. 4493 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) && 4494 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { 4495 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr); 4496 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty); 4497 4498 CGF.incrementProfileCounter(E, StepV); 4499 4500 llvm::Value *LHS = Visit(lhsExpr); 4501 llvm::Value *RHS = Visit(rhsExpr); 4502 if (!LHS) { 4503 // If the conditional has void type, make sure we return a null Value*. 4504 assert(!RHS && "LHS and RHS types must match"); 4505 return nullptr; 4506 } 4507 return Builder.CreateSelect(CondV, LHS, RHS, "cond"); 4508 } 4509 4510 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 4511 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 4512 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 4513 4514 CodeGenFunction::ConditionalEvaluation eval(CGF); 4515 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock, 4516 CGF.getProfileCount(lhsExpr)); 4517 4518 CGF.EmitBlock(LHSBlock); 4519 CGF.incrementProfileCounter(E); 4520 eval.begin(CGF); 4521 Value *LHS = Visit(lhsExpr); 4522 eval.end(CGF); 4523 4524 LHSBlock = Builder.GetInsertBlock(); 4525 Builder.CreateBr(ContBlock); 4526 4527 CGF.EmitBlock(RHSBlock); 4528 eval.begin(CGF); 4529 Value *RHS = Visit(rhsExpr); 4530 eval.end(CGF); 4531 4532 RHSBlock = Builder.GetInsertBlock(); 4533 CGF.EmitBlock(ContBlock); 4534 4535 // If the LHS or RHS is a throw expression, it will be legitimately null. 4536 if (!LHS) 4537 return RHS; 4538 if (!RHS) 4539 return LHS; 4540 4541 // Create a PHI node for the real part. 4542 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond"); 4543 PN->addIncoming(LHS, LHSBlock); 4544 PN->addIncoming(RHS, RHSBlock); 4545 return PN; 4546 } 4547 4548 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) { 4549 return Visit(E->getChosenSubExpr()); 4550 } 4551 4552 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 4553 QualType Ty = VE->getType(); 4554 4555 if (Ty->isVariablyModifiedType()) 4556 CGF.EmitVariablyModifiedType(Ty); 4557 4558 Address ArgValue = Address::invalid(); 4559 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue); 4560 4561 llvm::Type *ArgTy = ConvertType(VE->getType()); 4562 4563 // If EmitVAArg fails, emit an error. 4564 if (!ArgPtr.isValid()) { 4565 CGF.ErrorUnsupported(VE, "va_arg expression"); 4566 return llvm::UndefValue::get(ArgTy); 4567 } 4568 4569 // FIXME Volatility. 4570 llvm::Value *Val = Builder.CreateLoad(ArgPtr); 4571 4572 // If EmitVAArg promoted the type, we must truncate it. 4573 if (ArgTy != Val->getType()) { 4574 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy()) 4575 Val = Builder.CreateIntToPtr(Val, ArgTy); 4576 else 4577 Val = Builder.CreateTrunc(Val, ArgTy); 4578 } 4579 4580 return Val; 4581 } 4582 4583 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) { 4584 return CGF.EmitBlockLiteral(block); 4585 } 4586 4587 // Convert a vec3 to vec4, or vice versa. 4588 static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, 4589 Value *Src, unsigned NumElementsDst) { 4590 llvm::Value *UnV = llvm::UndefValue::get(Src->getType()); 4591 static constexpr int Mask[] = {0, 1, 2, -1}; 4592 return Builder.CreateShuffleVector(Src, UnV, 4593 llvm::makeArrayRef(Mask, NumElementsDst)); 4594 } 4595 4596 // Create cast instructions for converting LLVM value \p Src to LLVM type \p 4597 // DstTy. \p Src has the same size as \p DstTy. Both are single value types 4598 // but could be scalar or vectors of different lengths, and either can be 4599 // pointer. 4600 // There are 4 cases: 4601 // 1. non-pointer -> non-pointer : needs 1 bitcast 4602 // 2. pointer -> pointer : needs 1 bitcast or addrspacecast 4603 // 3. pointer -> non-pointer 4604 // a) pointer -> intptr_t : needs 1 ptrtoint 4605 // b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast 4606 // 4. non-pointer -> pointer 4607 // a) intptr_t -> pointer : needs 1 inttoptr 4608 // b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr 4609 // Note: for cases 3b and 4b two casts are required since LLVM casts do not 4610 // allow casting directly between pointer types and non-integer non-pointer 4611 // types. 4612 static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder, 4613 const llvm::DataLayout &DL, 4614 Value *Src, llvm::Type *DstTy, 4615 StringRef Name = "") { 4616 auto SrcTy = Src->getType(); 4617 4618 // Case 1. 4619 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy()) 4620 return Builder.CreateBitCast(Src, DstTy, Name); 4621 4622 // Case 2. 4623 if (SrcTy->isPointerTy() && DstTy->isPointerTy()) 4624 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name); 4625 4626 // Case 3. 4627 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) { 4628 // Case 3b. 4629 if (!DstTy->isIntegerTy()) 4630 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy)); 4631 // Cases 3a and 3b. 4632 return Builder.CreateBitOrPointerCast(Src, DstTy, Name); 4633 } 4634 4635 // Case 4b. 4636 if (!SrcTy->isIntegerTy()) 4637 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy)); 4638 // Cases 4a and 4b. 4639 return Builder.CreateIntToPtr(Src, DstTy, Name); 4640 } 4641 4642 Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) { 4643 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); 4644 llvm::Type *DstTy = ConvertType(E->getType()); 4645 4646 llvm::Type *SrcTy = Src->getType(); 4647 unsigned NumElementsSrc = isa<llvm::VectorType>(SrcTy) ? 4648 cast<llvm::VectorType>(SrcTy)->getNumElements() : 0; 4649 unsigned NumElementsDst = isa<llvm::VectorType>(DstTy) ? 4650 cast<llvm::VectorType>(DstTy)->getNumElements() : 0; 4651 4652 // Going from vec3 to non-vec3 is a special case and requires a shuffle 4653 // vector to get a vec4, then a bitcast if the target type is different. 4654 if (NumElementsSrc == 3 && NumElementsDst != 3) { 4655 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4); 4656 4657 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) { 4658 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 4659 DstTy); 4660 } 4661 4662 Src->setName("astype"); 4663 return Src; 4664 } 4665 4666 // Going from non-vec3 to vec3 is a special case and requires a bitcast 4667 // to vec4 if the original type is not vec4, then a shuffle vector to 4668 // get a vec3. 4669 if (NumElementsSrc != 3 && NumElementsDst == 3) { 4670 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) { 4671 auto *Vec4Ty = llvm::FixedVectorType::get( 4672 cast<llvm::VectorType>(DstTy)->getElementType(), 4); 4673 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 4674 Vec4Ty); 4675 } 4676 4677 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3); 4678 Src->setName("astype"); 4679 return Src; 4680 } 4681 4682 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), 4683 Src, DstTy, "astype"); 4684 } 4685 4686 Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) { 4687 return CGF.EmitAtomicExpr(E).getScalarVal(); 4688 } 4689 4690 //===----------------------------------------------------------------------===// 4691 // Entry Point into this File 4692 //===----------------------------------------------------------------------===// 4693 4694 /// Emit the computation of the specified expression of scalar type, ignoring 4695 /// the result. 4696 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { 4697 assert(E && hasScalarEvaluationKind(E->getType()) && 4698 "Invalid scalar expression to emit"); 4699 4700 return ScalarExprEmitter(*this, IgnoreResultAssign) 4701 .Visit(const_cast<Expr *>(E)); 4702 } 4703 4704 /// Emit a conversion from the specified type to the specified destination type, 4705 /// both of which are LLVM scalar types. 4706 Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, 4707 QualType DstTy, 4708 SourceLocation Loc) { 4709 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && 4710 "Invalid scalar expression to emit"); 4711 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc); 4712 } 4713 4714 /// Emit a conversion from the specified complex type to the specified 4715 /// destination type, where the destination type is an LLVM scalar type. 4716 Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, 4717 QualType SrcTy, 4718 QualType DstTy, 4719 SourceLocation Loc) { 4720 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && 4721 "Invalid complex -> scalar conversion"); 4722 return ScalarExprEmitter(*this) 4723 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc); 4724 } 4725 4726 4727 llvm::Value *CodeGenFunction:: 4728 EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 4729 bool isInc, bool isPre) { 4730 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre); 4731 } 4732 4733 LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { 4734 // object->isa or (*object).isa 4735 // Generate code as for: *(Class*)object 4736 4737 Expr *BaseExpr = E->getBase(); 4738 Address Addr = Address::invalid(); 4739 if (BaseExpr->isRValue()) { 4740 Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign()); 4741 } else { 4742 Addr = EmitLValue(BaseExpr).getAddress(*this); 4743 } 4744 4745 // Cast the address to Class*. 4746 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType())); 4747 return MakeAddrLValue(Addr, E->getType()); 4748 } 4749 4750 4751 LValue CodeGenFunction::EmitCompoundAssignmentLValue( 4752 const CompoundAssignOperator *E) { 4753 ScalarExprEmitter Scalar(*this); 4754 Value *Result = nullptr; 4755 switch (E->getOpcode()) { 4756 #define COMPOUND_OP(Op) \ 4757 case BO_##Op##Assign: \ 4758 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \ 4759 Result) 4760 COMPOUND_OP(Mul); 4761 COMPOUND_OP(Div); 4762 COMPOUND_OP(Rem); 4763 COMPOUND_OP(Add); 4764 COMPOUND_OP(Sub); 4765 COMPOUND_OP(Shl); 4766 COMPOUND_OP(Shr); 4767 COMPOUND_OP(And); 4768 COMPOUND_OP(Xor); 4769 COMPOUND_OP(Or); 4770 #undef COMPOUND_OP 4771 4772 case BO_PtrMemD: 4773 case BO_PtrMemI: 4774 case BO_Mul: 4775 case BO_Div: 4776 case BO_Rem: 4777 case BO_Add: 4778 case BO_Sub: 4779 case BO_Shl: 4780 case BO_Shr: 4781 case BO_LT: 4782 case BO_GT: 4783 case BO_LE: 4784 case BO_GE: 4785 case BO_EQ: 4786 case BO_NE: 4787 case BO_Cmp: 4788 case BO_And: 4789 case BO_Xor: 4790 case BO_Or: 4791 case BO_LAnd: 4792 case BO_LOr: 4793 case BO_Assign: 4794 case BO_Comma: 4795 llvm_unreachable("Not valid compound assignment operators"); 4796 } 4797 4798 llvm_unreachable("Unhandled compound assignment operator"); 4799 } 4800 4801 struct GEPOffsetAndOverflow { 4802 // The total (signed) byte offset for the GEP. 4803 llvm::Value *TotalOffset; 4804 // The offset overflow flag - true if the total offset overflows. 4805 llvm::Value *OffsetOverflows; 4806 }; 4807 4808 /// Evaluate given GEPVal, which is either an inbounds GEP, or a constant, 4809 /// and compute the total offset it applies from it's base pointer BasePtr. 4810 /// Returns offset in bytes and a boolean flag whether an overflow happened 4811 /// during evaluation. 4812 static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, 4813 llvm::LLVMContext &VMContext, 4814 CodeGenModule &CGM, 4815 CGBuilderTy &Builder) { 4816 const auto &DL = CGM.getDataLayout(); 4817 4818 // The total (signed) byte offset for the GEP. 4819 llvm::Value *TotalOffset = nullptr; 4820 4821 // Was the GEP already reduced to a constant? 4822 if (isa<llvm::Constant>(GEPVal)) { 4823 // Compute the offset by casting both pointers to integers and subtracting: 4824 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr) 4825 Value *BasePtr_int = 4826 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType())); 4827 Value *GEPVal_int = 4828 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType())); 4829 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int); 4830 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()}; 4831 } 4832 4833 auto *GEP = cast<llvm::GEPOperator>(GEPVal); 4834 assert(GEP->getPointerOperand() == BasePtr && 4835 "BasePtr must be the the base of the GEP."); 4836 assert(GEP->isInBounds() && "Expected inbounds GEP"); 4837 4838 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType()); 4839 4840 // Grab references to the signed add/mul overflow intrinsics for intptr_t. 4841 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 4842 auto *SAddIntrinsic = 4843 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy); 4844 auto *SMulIntrinsic = 4845 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy); 4846 4847 // The offset overflow flag - true if the total offset overflows. 4848 llvm::Value *OffsetOverflows = Builder.getFalse(); 4849 4850 /// Return the result of the given binary operation. 4851 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS, 4852 llvm::Value *RHS) -> llvm::Value * { 4853 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop"); 4854 4855 // If the operands are constants, return a constant result. 4856 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) { 4857 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) { 4858 llvm::APInt N; 4859 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode, 4860 /*Signed=*/true, N); 4861 if (HasOverflow) 4862 OffsetOverflows = Builder.getTrue(); 4863 return llvm::ConstantInt::get(VMContext, N); 4864 } 4865 } 4866 4867 // Otherwise, compute the result with checked arithmetic. 4868 auto *ResultAndOverflow = Builder.CreateCall( 4869 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS}); 4870 OffsetOverflows = Builder.CreateOr( 4871 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows); 4872 return Builder.CreateExtractValue(ResultAndOverflow, 0); 4873 }; 4874 4875 // Determine the total byte offset by looking at each GEP operand. 4876 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP); 4877 GTI != GTE; ++GTI) { 4878 llvm::Value *LocalOffset; 4879 auto *Index = GTI.getOperand(); 4880 // Compute the local offset contributed by this indexing step: 4881 if (auto *STy = GTI.getStructTypeOrNull()) { 4882 // For struct indexing, the local offset is the byte position of the 4883 // specified field. 4884 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue(); 4885 LocalOffset = llvm::ConstantInt::get( 4886 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo)); 4887 } else { 4888 // Otherwise this is array-like indexing. The local offset is the index 4889 // multiplied by the element size. 4890 auto *ElementSize = llvm::ConstantInt::get( 4891 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType())); 4892 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true); 4893 LocalOffset = eval(BO_Mul, ElementSize, IndexS); 4894 } 4895 4896 // If this is the first offset, set it as the total offset. Otherwise, add 4897 // the local offset into the running total. 4898 if (!TotalOffset || TotalOffset == Zero) 4899 TotalOffset = LocalOffset; 4900 else 4901 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset); 4902 } 4903 4904 return {TotalOffset, OffsetOverflows}; 4905 } 4906 4907 Value * 4908 CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList, 4909 bool SignedIndices, bool IsSubtraction, 4910 SourceLocation Loc, const Twine &Name) { 4911 Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name); 4912 4913 // If the pointer overflow sanitizer isn't enabled, do nothing. 4914 if (!SanOpts.has(SanitizerKind::PointerOverflow)) 4915 return GEPVal; 4916 4917 llvm::Type *PtrTy = Ptr->getType(); 4918 4919 // Perform nullptr-and-offset check unless the nullptr is defined. 4920 bool PerformNullCheck = !NullPointerIsDefined( 4921 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace()); 4922 // Check for overflows unless the GEP got constant-folded, 4923 // and only in the default address space 4924 bool PerformOverflowCheck = 4925 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0; 4926 4927 if (!(PerformNullCheck || PerformOverflowCheck)) 4928 return GEPVal; 4929 4930 const auto &DL = CGM.getDataLayout(); 4931 4932 SanitizerScope SanScope(this); 4933 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy); 4934 4935 GEPOffsetAndOverflow EvaluatedGEP = 4936 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder); 4937 4938 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || 4939 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && 4940 "If the offset got constant-folded, we don't expect that there was an " 4941 "overflow."); 4942 4943 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 4944 4945 // Common case: if the total offset is zero, and we are using C++ semantics, 4946 // where nullptr+0 is defined, don't emit a check. 4947 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus) 4948 return GEPVal; 4949 4950 // Now that we've computed the total offset, add it to the base pointer (with 4951 // wrapping semantics). 4952 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy); 4953 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset); 4954 4955 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 4956 4957 if (PerformNullCheck) { 4958 // In C++, if the base pointer evaluates to a null pointer value, 4959 // the only valid pointer this inbounds GEP can produce is also 4960 // a null pointer, so the offset must also evaluate to zero. 4961 // Likewise, if we have non-zero base pointer, we can not get null pointer 4962 // as a result, so the offset can not be -intptr_t(BasePtr). 4963 // In other words, both pointers are either null, or both are non-null, 4964 // or the behaviour is undefined. 4965 // 4966 // C, however, is more strict in this regard, and gives more 4967 // optimization opportunities: in C, additionally, nullptr+0 is undefined. 4968 // So both the input to the 'gep inbounds' AND the output must not be null. 4969 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr); 4970 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP); 4971 auto *Valid = 4972 CGM.getLangOpts().CPlusPlus 4973 ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr) 4974 : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr); 4975 Checks.emplace_back(Valid, SanitizerKind::PointerOverflow); 4976 } 4977 4978 if (PerformOverflowCheck) { 4979 // The GEP is valid if: 4980 // 1) The total offset doesn't overflow, and 4981 // 2) The sign of the difference between the computed address and the base 4982 // pointer matches the sign of the total offset. 4983 llvm::Value *ValidGEP; 4984 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows); 4985 if (SignedIndices) { 4986 // GEP is computed as `unsigned base + signed offset`, therefore: 4987 // * If offset was positive, then the computed pointer can not be 4988 // [unsigned] less than the base pointer, unless it overflowed. 4989 // * If offset was negative, then the computed pointer can not be 4990 // [unsigned] greater than the bas pointere, unless it overflowed. 4991 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 4992 auto *PosOrZeroOffset = 4993 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero); 4994 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr); 4995 ValidGEP = 4996 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid); 4997 } else if (!IsSubtraction) { 4998 // GEP is computed as `unsigned base + unsigned offset`, therefore the 4999 // computed pointer can not be [unsigned] less than base pointer, 5000 // unless there was an overflow. 5001 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`. 5002 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 5003 } else { 5004 // GEP is computed as `unsigned base - unsigned offset`, therefore the 5005 // computed pointer can not be [unsigned] greater than base pointer, 5006 // unless there was an overflow. 5007 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`. 5008 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr); 5009 } 5010 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow); 5011 Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow); 5012 } 5013 5014 assert(!Checks.empty() && "Should have produced some checks."); 5015 5016 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)}; 5017 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments. 5018 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP}; 5019 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs); 5020 5021 return GEPVal; 5022 } 5023