1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCXXABI.h" 14 #include "CGCleanup.h" 15 #include "CGDebugInfo.h" 16 #include "CGObjCRuntime.h" 17 #include "CGOpenMPRuntime.h" 18 #include "CodeGenFunction.h" 19 #include "CodeGenModule.h" 20 #include "ConstantEmitter.h" 21 #include "TargetInfo.h" 22 #include "clang/AST/ASTContext.h" 23 #include "clang/AST/Attr.h" 24 #include "clang/AST/DeclObjC.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/RecordLayout.h" 27 #include "clang/AST/StmtVisitor.h" 28 #include "clang/Basic/CodeGenOptions.h" 29 #include "clang/Basic/TargetInfo.h" 30 #include "llvm/ADT/APFixedPoint.h" 31 #include "llvm/ADT/Optional.h" 32 #include "llvm/IR/CFG.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/FixedPointBuilder.h" 36 #include "llvm/IR/Function.h" 37 #include "llvm/IR/GetElementPtrTypeIterator.h" 38 #include "llvm/IR/GlobalVariable.h" 39 #include "llvm/IR/Intrinsics.h" 40 #include "llvm/IR/IntrinsicsPowerPC.h" 41 #include "llvm/IR/MatrixBuilder.h" 42 #include "llvm/IR/Module.h" 43 #include <cstdarg> 44 45 using namespace clang; 46 using namespace CodeGen; 47 using llvm::Value; 48 49 //===----------------------------------------------------------------------===// 50 // Scalar Expression Emitter 51 //===----------------------------------------------------------------------===// 52 53 namespace { 54 55 /// Determine whether the given binary operation may overflow. 56 /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul, 57 /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem}, 58 /// the returned overflow check is precise. The returned value is 'true' for 59 /// all other opcodes, to be conservative. 60 bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS, 61 BinaryOperator::Opcode Opcode, bool Signed, 62 llvm::APInt &Result) { 63 // Assume overflow is possible, unless we can prove otherwise. 64 bool Overflow = true; 65 const auto &LHSAP = LHS->getValue(); 66 const auto &RHSAP = RHS->getValue(); 67 if (Opcode == BO_Add) { 68 if (Signed) 69 Result = LHSAP.sadd_ov(RHSAP, Overflow); 70 else 71 Result = LHSAP.uadd_ov(RHSAP, Overflow); 72 } else if (Opcode == BO_Sub) { 73 if (Signed) 74 Result = LHSAP.ssub_ov(RHSAP, Overflow); 75 else 76 Result = LHSAP.usub_ov(RHSAP, Overflow); 77 } else if (Opcode == BO_Mul) { 78 if (Signed) 79 Result = LHSAP.smul_ov(RHSAP, Overflow); 80 else 81 Result = LHSAP.umul_ov(RHSAP, Overflow); 82 } else if (Opcode == BO_Div || Opcode == BO_Rem) { 83 if (Signed && !RHS->isZero()) 84 Result = LHSAP.sdiv_ov(RHSAP, Overflow); 85 else 86 return false; 87 } 88 return Overflow; 89 } 90 91 struct BinOpInfo { 92 Value *LHS; 93 Value *RHS; 94 QualType Ty; // Computation Type. 95 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform 96 FPOptions FPFeatures; 97 const Expr *E; // Entire expr, for error unsupported. May not be binop. 98 99 /// Check if the binop can result in integer overflow. 100 bool mayHaveIntegerOverflow() const { 101 // Without constant input, we can't rule out overflow. 102 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS); 103 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS); 104 if (!LHSCI || !RHSCI) 105 return true; 106 107 llvm::APInt Result; 108 return ::mayHaveIntegerOverflow( 109 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result); 110 } 111 112 /// Check if the binop computes a division or a remainder. 113 bool isDivremOp() const { 114 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign || 115 Opcode == BO_RemAssign; 116 } 117 118 /// Check if the binop can result in an integer division by zero. 119 bool mayHaveIntegerDivisionByZero() const { 120 if (isDivremOp()) 121 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS)) 122 return CI->isZero(); 123 return true; 124 } 125 126 /// Check if the binop can result in a float division by zero. 127 bool mayHaveFloatDivisionByZero() const { 128 if (isDivremOp()) 129 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS)) 130 return CFP->isZero(); 131 return true; 132 } 133 134 /// Check if at least one operand is a fixed point type. In such cases, this 135 /// operation did not follow usual arithmetic conversion and both operands 136 /// might not be of the same type. 137 bool isFixedPointOp() const { 138 // We cannot simply check the result type since comparison operations return 139 // an int. 140 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) { 141 QualType LHSType = BinOp->getLHS()->getType(); 142 QualType RHSType = BinOp->getRHS()->getType(); 143 return LHSType->isFixedPointType() || RHSType->isFixedPointType(); 144 } 145 if (const auto *UnOp = dyn_cast<UnaryOperator>(E)) 146 return UnOp->getSubExpr()->getType()->isFixedPointType(); 147 return false; 148 } 149 }; 150 151 static bool MustVisitNullValue(const Expr *E) { 152 // If a null pointer expression's type is the C++0x nullptr_t, then 153 // it's not necessarily a simple constant and it must be evaluated 154 // for its potential side effects. 155 return E->getType()->isNullPtrType(); 156 } 157 158 /// If \p E is a widened promoted integer, get its base (unpromoted) type. 159 static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx, 160 const Expr *E) { 161 const Expr *Base = E->IgnoreImpCasts(); 162 if (E == Base) 163 return llvm::None; 164 165 QualType BaseTy = Base->getType(); 166 if (!BaseTy->isPromotableIntegerType() || 167 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType())) 168 return llvm::None; 169 170 return BaseTy; 171 } 172 173 /// Check if \p E is a widened promoted integer. 174 static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) { 175 return getUnwidenedIntegerType(Ctx, E).hasValue(); 176 } 177 178 /// Check if we can skip the overflow check for \p Op. 179 static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) { 180 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) && 181 "Expected a unary or binary operator"); 182 183 // If the binop has constant inputs and we can prove there is no overflow, 184 // we can elide the overflow check. 185 if (!Op.mayHaveIntegerOverflow()) 186 return true; 187 188 // If a unary op has a widened operand, the op cannot overflow. 189 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E)) 190 return !UO->canOverflow(); 191 192 // We usually don't need overflow checks for binops with widened operands. 193 // Multiplication with promoted unsigned operands is a special case. 194 const auto *BO = cast<BinaryOperator>(Op.E); 195 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS()); 196 if (!OptionalLHSTy) 197 return false; 198 199 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS()); 200 if (!OptionalRHSTy) 201 return false; 202 203 QualType LHSTy = *OptionalLHSTy; 204 QualType RHSTy = *OptionalRHSTy; 205 206 // This is the simple case: binops without unsigned multiplication, and with 207 // widened operands. No overflow check is needed here. 208 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) || 209 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType()) 210 return true; 211 212 // For unsigned multiplication the overflow check can be elided if either one 213 // of the unpromoted types are less than half the size of the promoted type. 214 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType()); 215 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize || 216 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize; 217 } 218 219 class ScalarExprEmitter 220 : public StmtVisitor<ScalarExprEmitter, Value*> { 221 CodeGenFunction &CGF; 222 CGBuilderTy &Builder; 223 bool IgnoreResultAssign; 224 llvm::LLVMContext &VMContext; 225 public: 226 227 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false) 228 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira), 229 VMContext(cgf.getLLVMContext()) { 230 } 231 232 //===--------------------------------------------------------------------===// 233 // Utilities 234 //===--------------------------------------------------------------------===// 235 236 bool TestAndClearIgnoreResultAssign() { 237 bool I = IgnoreResultAssign; 238 IgnoreResultAssign = false; 239 return I; 240 } 241 242 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); } 243 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); } 244 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) { 245 return CGF.EmitCheckedLValue(E, TCK); 246 } 247 248 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks, 249 const BinOpInfo &Info); 250 251 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) { 252 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal(); 253 } 254 255 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) { 256 const AlignValueAttr *AVAttr = nullptr; 257 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { 258 const ValueDecl *VD = DRE->getDecl(); 259 260 if (VD->getType()->isReferenceType()) { 261 if (const auto *TTy = 262 dyn_cast<TypedefType>(VD->getType().getNonReferenceType())) 263 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>(); 264 } else { 265 // Assumptions for function parameters are emitted at the start of the 266 // function, so there is no need to repeat that here, 267 // unless the alignment-assumption sanitizer is enabled, 268 // then we prefer the assumption over alignment attribute 269 // on IR function param. 270 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment)) 271 return; 272 273 AVAttr = VD->getAttr<AlignValueAttr>(); 274 } 275 } 276 277 if (!AVAttr) 278 if (const auto *TTy = 279 dyn_cast<TypedefType>(E->getType())) 280 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>(); 281 282 if (!AVAttr) 283 return; 284 285 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment()); 286 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue); 287 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI); 288 } 289 290 /// EmitLoadOfLValue - Given an expression with complex type that represents a 291 /// value l-value, this method emits the address of the l-value, then loads 292 /// and returns the result. 293 Value *EmitLoadOfLValue(const Expr *E) { 294 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load), 295 E->getExprLoc()); 296 297 EmitLValueAlignmentAssumption(E, V); 298 return V; 299 } 300 301 /// EmitConversionToBool - Convert the specified expression value to a 302 /// boolean (i1) truth value. This is equivalent to "Val != 0". 303 Value *EmitConversionToBool(Value *Src, QualType DstTy); 304 305 /// Emit a check that a conversion from a floating-point type does not 306 /// overflow. 307 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType, 308 Value *Src, QualType SrcType, QualType DstType, 309 llvm::Type *DstTy, SourceLocation Loc); 310 311 /// Known implicit conversion check kinds. 312 /// Keep in sync with the enum of the same name in ubsan_handlers.h 313 enum ImplicitConversionCheckKind : unsigned char { 314 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7. 315 ICCK_UnsignedIntegerTruncation = 1, 316 ICCK_SignedIntegerTruncation = 2, 317 ICCK_IntegerSignChange = 3, 318 ICCK_SignedIntegerTruncationOrSignChange = 4, 319 }; 320 321 /// Emit a check that an [implicit] truncation of an integer does not 322 /// discard any bits. It is not UB, so we use the value after truncation. 323 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst, 324 QualType DstType, SourceLocation Loc); 325 326 /// Emit a check that an [implicit] conversion of an integer does not change 327 /// the sign of the value. It is not UB, so we use the value after conversion. 328 /// NOTE: Src and Dst may be the exact same value! (point to the same thing) 329 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst, 330 QualType DstType, SourceLocation Loc); 331 332 /// Emit a conversion from the specified type to the specified destination 333 /// type, both of which are LLVM scalar types. 334 struct ScalarConversionOpts { 335 bool TreatBooleanAsSigned; 336 bool EmitImplicitIntegerTruncationChecks; 337 bool EmitImplicitIntegerSignChangeChecks; 338 339 ScalarConversionOpts() 340 : TreatBooleanAsSigned(false), 341 EmitImplicitIntegerTruncationChecks(false), 342 EmitImplicitIntegerSignChangeChecks(false) {} 343 344 ScalarConversionOpts(clang::SanitizerSet SanOpts) 345 : TreatBooleanAsSigned(false), 346 EmitImplicitIntegerTruncationChecks( 347 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)), 348 EmitImplicitIntegerSignChangeChecks( 349 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {} 350 }; 351 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType, 352 llvm::Type *SrcTy, llvm::Type *DstTy, 353 ScalarConversionOpts Opts); 354 Value * 355 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy, 356 SourceLocation Loc, 357 ScalarConversionOpts Opts = ScalarConversionOpts()); 358 359 /// Convert between either a fixed point and other fixed point or fixed point 360 /// and an integer. 361 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy, 362 SourceLocation Loc); 363 364 /// Emit a conversion from the specified complex type to the specified 365 /// destination type, where the destination type is an LLVM scalar type. 366 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, 367 QualType SrcTy, QualType DstTy, 368 SourceLocation Loc); 369 370 /// EmitNullValue - Emit a value that corresponds to null for the given type. 371 Value *EmitNullValue(QualType Ty); 372 373 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion. 374 Value *EmitFloatToBoolConversion(Value *V) { 375 // Compare against 0.0 for fp scalars. 376 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType()); 377 return Builder.CreateFCmpUNE(V, Zero, "tobool"); 378 } 379 380 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion. 381 Value *EmitPointerToBoolConversion(Value *V, QualType QT) { 382 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT); 383 384 return Builder.CreateICmpNE(V, Zero, "tobool"); 385 } 386 387 Value *EmitIntToBoolConversion(Value *V) { 388 // Because of the type rules of C, we often end up computing a 389 // logical value, then zero extending it to int, then wanting it 390 // as a logical value again. Optimize this common case. 391 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) { 392 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) { 393 Value *Result = ZI->getOperand(0); 394 // If there aren't any more uses, zap the instruction to save space. 395 // Note that there can be more uses, for example if this 396 // is the result of an assignment. 397 if (ZI->use_empty()) 398 ZI->eraseFromParent(); 399 return Result; 400 } 401 } 402 403 return Builder.CreateIsNotNull(V, "tobool"); 404 } 405 406 //===--------------------------------------------------------------------===// 407 // Visitor Methods 408 //===--------------------------------------------------------------------===// 409 410 Value *Visit(Expr *E) { 411 ApplyDebugLocation DL(CGF, E); 412 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E); 413 } 414 415 Value *VisitStmt(Stmt *S) { 416 S->dump(llvm::errs(), CGF.getContext()); 417 llvm_unreachable("Stmt can't have complex result type!"); 418 } 419 Value *VisitExpr(Expr *S); 420 421 Value *VisitConstantExpr(ConstantExpr *E) { 422 // A constant expression of type 'void' generates no code and produces no 423 // value. 424 if (E->getType()->isVoidType()) 425 return nullptr; 426 427 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) { 428 if (E->isGLValue()) 429 return CGF.Builder.CreateLoad(Address( 430 Result, CGF.getContext().getTypeAlignInChars(E->getType()))); 431 return Result; 432 } 433 return Visit(E->getSubExpr()); 434 } 435 Value *VisitParenExpr(ParenExpr *PE) { 436 return Visit(PE->getSubExpr()); 437 } 438 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { 439 return Visit(E->getReplacement()); 440 } 441 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) { 442 return Visit(GE->getResultExpr()); 443 } 444 Value *VisitCoawaitExpr(CoawaitExpr *S) { 445 return CGF.EmitCoawaitExpr(*S).getScalarVal(); 446 } 447 Value *VisitCoyieldExpr(CoyieldExpr *S) { 448 return CGF.EmitCoyieldExpr(*S).getScalarVal(); 449 } 450 Value *VisitUnaryCoawait(const UnaryOperator *E) { 451 return Visit(E->getSubExpr()); 452 } 453 454 // Leaves. 455 Value *VisitIntegerLiteral(const IntegerLiteral *E) { 456 return Builder.getInt(E->getValue()); 457 } 458 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) { 459 return Builder.getInt(E->getValue()); 460 } 461 Value *VisitFloatingLiteral(const FloatingLiteral *E) { 462 return llvm::ConstantFP::get(VMContext, E->getValue()); 463 } 464 Value *VisitCharacterLiteral(const CharacterLiteral *E) { 465 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 466 } 467 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) { 468 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 469 } 470 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { 471 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 472 } 473 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { 474 return EmitNullValue(E->getType()); 475 } 476 Value *VisitGNUNullExpr(const GNUNullExpr *E) { 477 return EmitNullValue(E->getType()); 478 } 479 Value *VisitOffsetOfExpr(OffsetOfExpr *E); 480 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E); 481 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) { 482 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel()); 483 return Builder.CreateBitCast(V, ConvertType(E->getType())); 484 } 485 486 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) { 487 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength()); 488 } 489 490 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) { 491 return CGF.EmitPseudoObjectRValue(E).getScalarVal(); 492 } 493 494 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E); 495 496 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) { 497 if (E->isGLValue()) 498 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E), 499 E->getExprLoc()); 500 501 // Otherwise, assume the mapping is the scalar directly. 502 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal(); 503 } 504 505 // l-values. 506 Value *VisitDeclRefExpr(DeclRefExpr *E) { 507 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) 508 return CGF.emitScalarConstant(Constant, E); 509 return EmitLoadOfLValue(E); 510 } 511 512 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) { 513 return CGF.EmitObjCSelectorExpr(E); 514 } 515 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) { 516 return CGF.EmitObjCProtocolExpr(E); 517 } 518 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { 519 return EmitLoadOfLValue(E); 520 } 521 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) { 522 if (E->getMethodDecl() && 523 E->getMethodDecl()->getReturnType()->isReferenceType()) 524 return EmitLoadOfLValue(E); 525 return CGF.EmitObjCMessageExpr(E).getScalarVal(); 526 } 527 528 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) { 529 LValue LV = CGF.EmitObjCIsaExpr(E); 530 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); 531 return V; 532 } 533 534 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) { 535 VersionTuple Version = E->getVersion(); 536 537 // If we're checking for a platform older than our minimum deployment 538 // target, we can fold the check away. 539 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion()) 540 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1); 541 542 return CGF.EmitBuiltinAvailable(Version); 543 } 544 545 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E); 546 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E); 547 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E); 548 Value *VisitConvertVectorExpr(ConvertVectorExpr *E); 549 Value *VisitMemberExpr(MemberExpr *E); 550 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); } 551 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 552 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which 553 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound 554 // literals aren't l-values in C++. We do so simply because that's the 555 // cleanest way to handle compound literals in C++. 556 // See the discussion here: https://reviews.llvm.org/D64464 557 return EmitLoadOfLValue(E); 558 } 559 560 Value *VisitInitListExpr(InitListExpr *E); 561 562 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) { 563 assert(CGF.getArrayInitIndex() && 564 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?"); 565 return CGF.getArrayInitIndex(); 566 } 567 568 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { 569 return EmitNullValue(E->getType()); 570 } 571 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) { 572 CGF.CGM.EmitExplicitCastExprType(E, &CGF); 573 return VisitCastExpr(E); 574 } 575 Value *VisitCastExpr(CastExpr *E); 576 577 Value *VisitCallExpr(const CallExpr *E) { 578 if (E->getCallReturnType(CGF.getContext())->isReferenceType()) 579 return EmitLoadOfLValue(E); 580 581 Value *V = CGF.EmitCallExpr(E).getScalarVal(); 582 583 EmitLValueAlignmentAssumption(E, V); 584 return V; 585 } 586 587 Value *VisitStmtExpr(const StmtExpr *E); 588 589 // Unary Operators. 590 Value *VisitUnaryPostDec(const UnaryOperator *E) { 591 LValue LV = EmitLValue(E->getSubExpr()); 592 return EmitScalarPrePostIncDec(E, LV, false, false); 593 } 594 Value *VisitUnaryPostInc(const UnaryOperator *E) { 595 LValue LV = EmitLValue(E->getSubExpr()); 596 return EmitScalarPrePostIncDec(E, LV, true, false); 597 } 598 Value *VisitUnaryPreDec(const UnaryOperator *E) { 599 LValue LV = EmitLValue(E->getSubExpr()); 600 return EmitScalarPrePostIncDec(E, LV, false, true); 601 } 602 Value *VisitUnaryPreInc(const UnaryOperator *E) { 603 LValue LV = EmitLValue(E->getSubExpr()); 604 return EmitScalarPrePostIncDec(E, LV, true, true); 605 } 606 607 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E, 608 llvm::Value *InVal, 609 bool IsInc); 610 611 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 612 bool isInc, bool isPre); 613 614 615 Value *VisitUnaryAddrOf(const UnaryOperator *E) { 616 if (isa<MemberPointerType>(E->getType())) // never sugared 617 return CGF.CGM.getMemberPointerConstant(E); 618 619 return EmitLValue(E->getSubExpr()).getPointer(CGF); 620 } 621 Value *VisitUnaryDeref(const UnaryOperator *E) { 622 if (E->getType()->isVoidType()) 623 return Visit(E->getSubExpr()); // the actual value should be unused 624 return EmitLoadOfLValue(E); 625 } 626 Value *VisitUnaryPlus(const UnaryOperator *E) { 627 // This differs from gcc, though, most likely due to a bug in gcc. 628 TestAndClearIgnoreResultAssign(); 629 return Visit(E->getSubExpr()); 630 } 631 Value *VisitUnaryMinus (const UnaryOperator *E); 632 Value *VisitUnaryNot (const UnaryOperator *E); 633 Value *VisitUnaryLNot (const UnaryOperator *E); 634 Value *VisitUnaryReal (const UnaryOperator *E); 635 Value *VisitUnaryImag (const UnaryOperator *E); 636 Value *VisitUnaryExtension(const UnaryOperator *E) { 637 return Visit(E->getSubExpr()); 638 } 639 640 // C++ 641 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) { 642 return EmitLoadOfLValue(E); 643 } 644 Value *VisitSourceLocExpr(SourceLocExpr *SLE) { 645 auto &Ctx = CGF.getContext(); 646 APValue Evaluated = 647 SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr()); 648 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated, 649 SLE->getType()); 650 } 651 652 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 653 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE); 654 return Visit(DAE->getExpr()); 655 } 656 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { 657 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); 658 return Visit(DIE->getExpr()); 659 } 660 Value *VisitCXXThisExpr(CXXThisExpr *TE) { 661 return CGF.LoadCXXThis(); 662 } 663 664 Value *VisitExprWithCleanups(ExprWithCleanups *E); 665 Value *VisitCXXNewExpr(const CXXNewExpr *E) { 666 return CGF.EmitCXXNewExpr(E); 667 } 668 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) { 669 CGF.EmitCXXDeleteExpr(E); 670 return nullptr; 671 } 672 673 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) { 674 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 675 } 676 677 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) { 678 return Builder.getInt1(E->isSatisfied()); 679 } 680 681 Value *VisitRequiresExpr(const RequiresExpr *E) { 682 return Builder.getInt1(E->isSatisfied()); 683 } 684 685 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) { 686 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue()); 687 } 688 689 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) { 690 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue()); 691 } 692 693 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) { 694 // C++ [expr.pseudo]p1: 695 // The result shall only be used as the operand for the function call 696 // operator (), and the result of such a call has type void. The only 697 // effect is the evaluation of the postfix-expression before the dot or 698 // arrow. 699 CGF.EmitScalarExpr(E->getBase()); 700 return nullptr; 701 } 702 703 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) { 704 return EmitNullValue(E->getType()); 705 } 706 707 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) { 708 CGF.EmitCXXThrowExpr(E); 709 return nullptr; 710 } 711 712 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) { 713 return Builder.getInt1(E->getValue()); 714 } 715 716 // Binary Operators. 717 Value *EmitMul(const BinOpInfo &Ops) { 718 if (Ops.Ty->isSignedIntegerOrEnumerationType()) { 719 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 720 case LangOptions::SOB_Defined: 721 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 722 case LangOptions::SOB_Undefined: 723 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 724 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); 725 LLVM_FALLTHROUGH; 726 case LangOptions::SOB_Trapping: 727 if (CanElideOverflowCheck(CGF.getContext(), Ops)) 728 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); 729 return EmitOverflowCheckedBinOp(Ops); 730 } 731 } 732 733 if (Ops.Ty->isConstantMatrixType()) { 734 llvm::MatrixBuilder<CGBuilderTy> MB(Builder); 735 // We need to check the types of the operands of the operator to get the 736 // correct matrix dimensions. 737 auto *BO = cast<BinaryOperator>(Ops.E); 738 auto *LHSMatTy = dyn_cast<ConstantMatrixType>( 739 BO->getLHS()->getType().getCanonicalType()); 740 auto *RHSMatTy = dyn_cast<ConstantMatrixType>( 741 BO->getRHS()->getType().getCanonicalType()); 742 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 743 if (LHSMatTy && RHSMatTy) 744 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(), 745 LHSMatTy->getNumColumns(), 746 RHSMatTy->getNumColumns()); 747 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS); 748 } 749 750 if (Ops.Ty->isUnsignedIntegerType() && 751 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 752 !CanElideOverflowCheck(CGF.getContext(), Ops)) 753 return EmitOverflowCheckedBinOp(Ops); 754 755 if (Ops.LHS->getType()->isFPOrFPVectorTy()) { 756 // Preserve the old values 757 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 758 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul"); 759 } 760 if (Ops.isFixedPointOp()) 761 return EmitFixedPointBinOp(Ops); 762 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 763 } 764 /// Create a binary op that checks for overflow. 765 /// Currently only supports +, - and *. 766 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops); 767 768 // Check for undefined division and modulus behaviors. 769 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops, 770 llvm::Value *Zero,bool isDiv); 771 // Common helper for getting how wide LHS of shift is. 772 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS); 773 774 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for 775 // non powers of two. 776 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name); 777 778 Value *EmitDiv(const BinOpInfo &Ops); 779 Value *EmitRem(const BinOpInfo &Ops); 780 Value *EmitAdd(const BinOpInfo &Ops); 781 Value *EmitSub(const BinOpInfo &Ops); 782 Value *EmitShl(const BinOpInfo &Ops); 783 Value *EmitShr(const BinOpInfo &Ops); 784 Value *EmitAnd(const BinOpInfo &Ops) { 785 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and"); 786 } 787 Value *EmitXor(const BinOpInfo &Ops) { 788 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor"); 789 } 790 Value *EmitOr (const BinOpInfo &Ops) { 791 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or"); 792 } 793 794 // Helper functions for fixed point binary operations. 795 Value *EmitFixedPointBinOp(const BinOpInfo &Ops); 796 797 BinOpInfo EmitBinOps(const BinaryOperator *E); 798 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E, 799 Value *(ScalarExprEmitter::*F)(const BinOpInfo &), 800 Value *&Result); 801 802 Value *EmitCompoundAssign(const CompoundAssignOperator *E, 803 Value *(ScalarExprEmitter::*F)(const BinOpInfo &)); 804 805 // Binary operators and binary compound assignment operators. 806 #define HANDLEBINOP(OP) \ 807 Value *VisitBin ## OP(const BinaryOperator *E) { \ 808 return Emit ## OP(EmitBinOps(E)); \ 809 } \ 810 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \ 811 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \ 812 } 813 HANDLEBINOP(Mul) 814 HANDLEBINOP(Div) 815 HANDLEBINOP(Rem) 816 HANDLEBINOP(Add) 817 HANDLEBINOP(Sub) 818 HANDLEBINOP(Shl) 819 HANDLEBINOP(Shr) 820 HANDLEBINOP(And) 821 HANDLEBINOP(Xor) 822 HANDLEBINOP(Or) 823 #undef HANDLEBINOP 824 825 // Comparisons. 826 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc, 827 llvm::CmpInst::Predicate SICmpOpc, 828 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling); 829 #define VISITCOMP(CODE, UI, SI, FP, SIG) \ 830 Value *VisitBin##CODE(const BinaryOperator *E) { \ 831 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \ 832 llvm::FCmpInst::FP, SIG); } 833 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true) 834 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true) 835 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true) 836 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true) 837 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false) 838 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false) 839 #undef VISITCOMP 840 841 Value *VisitBinAssign (const BinaryOperator *E); 842 843 Value *VisitBinLAnd (const BinaryOperator *E); 844 Value *VisitBinLOr (const BinaryOperator *E); 845 Value *VisitBinComma (const BinaryOperator *E); 846 847 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); } 848 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); } 849 850 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { 851 return Visit(E->getSemanticForm()); 852 } 853 854 // Other Operators. 855 Value *VisitBlockExpr(const BlockExpr *BE); 856 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *); 857 Value *VisitChooseExpr(ChooseExpr *CE); 858 Value *VisitVAArgExpr(VAArgExpr *VE); 859 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) { 860 return CGF.EmitObjCStringLiteral(E); 861 } 862 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) { 863 return CGF.EmitObjCBoxedExpr(E); 864 } 865 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) { 866 return CGF.EmitObjCArrayLiteral(E); 867 } 868 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) { 869 return CGF.EmitObjCDictionaryLiteral(E); 870 } 871 Value *VisitAsTypeExpr(AsTypeExpr *CE); 872 Value *VisitAtomicExpr(AtomicExpr *AE); 873 }; 874 } // end anonymous namespace. 875 876 //===----------------------------------------------------------------------===// 877 // Utilities 878 //===----------------------------------------------------------------------===// 879 880 /// EmitConversionToBool - Convert the specified expression value to a 881 /// boolean (i1) truth value. This is equivalent to "Val != 0". 882 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) { 883 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); 884 885 if (SrcType->isRealFloatingType()) 886 return EmitFloatToBoolConversion(Src); 887 888 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType)) 889 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT); 890 891 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && 892 "Unknown scalar type to convert"); 893 894 if (isa<llvm::IntegerType>(Src->getType())) 895 return EmitIntToBoolConversion(Src); 896 897 assert(isa<llvm::PointerType>(Src->getType())); 898 return EmitPointerToBoolConversion(Src, SrcType); 899 } 900 901 void ScalarExprEmitter::EmitFloatConversionCheck( 902 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType, 903 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) { 904 assert(SrcType->isFloatingType() && "not a conversion from floating point"); 905 if (!isa<llvm::IntegerType>(DstTy)) 906 return; 907 908 CodeGenFunction::SanitizerScope SanScope(&CGF); 909 using llvm::APFloat; 910 using llvm::APSInt; 911 912 llvm::Value *Check = nullptr; 913 const llvm::fltSemantics &SrcSema = 914 CGF.getContext().getFloatTypeSemantics(OrigSrcType); 915 916 // Floating-point to integer. This has undefined behavior if the source is 917 // +-Inf, NaN, or doesn't fit into the destination type (after truncation 918 // to an integer). 919 unsigned Width = CGF.getContext().getIntWidth(DstType); 920 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType(); 921 922 APSInt Min = APSInt::getMinValue(Width, Unsigned); 923 APFloat MinSrc(SrcSema, APFloat::uninitialized); 924 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) & 925 APFloat::opOverflow) 926 // Don't need an overflow check for lower bound. Just check for 927 // -Inf/NaN. 928 MinSrc = APFloat::getInf(SrcSema, true); 929 else 930 // Find the largest value which is too small to represent (before 931 // truncation toward zero). 932 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative); 933 934 APSInt Max = APSInt::getMaxValue(Width, Unsigned); 935 APFloat MaxSrc(SrcSema, APFloat::uninitialized); 936 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) & 937 APFloat::opOverflow) 938 // Don't need an overflow check for upper bound. Just check for 939 // +Inf/NaN. 940 MaxSrc = APFloat::getInf(SrcSema, false); 941 else 942 // Find the smallest value which is too large to represent (before 943 // truncation toward zero). 944 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive); 945 946 // If we're converting from __half, convert the range to float to match 947 // the type of src. 948 if (OrigSrcType->isHalfType()) { 949 const llvm::fltSemantics &Sema = 950 CGF.getContext().getFloatTypeSemantics(SrcType); 951 bool IsInexact; 952 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); 953 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); 954 } 955 956 llvm::Value *GE = 957 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc)); 958 llvm::Value *LE = 959 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc)); 960 Check = Builder.CreateAnd(GE, LE); 961 962 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc), 963 CGF.EmitCheckTypeDescriptor(OrigSrcType), 964 CGF.EmitCheckTypeDescriptor(DstType)}; 965 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow), 966 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc); 967 } 968 969 // Should be called within CodeGenFunction::SanitizerScope RAII scope. 970 // Returns 'i1 false' when the truncation Src -> Dst was lossy. 971 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 972 std::pair<llvm::Value *, SanitizerMask>> 973 EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, 974 QualType DstType, CGBuilderTy &Builder) { 975 llvm::Type *SrcTy = Src->getType(); 976 llvm::Type *DstTy = Dst->getType(); 977 (void)DstTy; // Only used in assert() 978 979 // This should be truncation of integral types. 980 assert(Src != Dst); 981 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits()); 982 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && 983 "non-integer llvm type"); 984 985 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 986 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 987 988 // If both (src and dst) types are unsigned, then it's an unsigned truncation. 989 // Else, it is a signed truncation. 990 ScalarExprEmitter::ImplicitConversionCheckKind Kind; 991 SanitizerMask Mask; 992 if (!SrcSigned && !DstSigned) { 993 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation; 994 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation; 995 } else { 996 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation; 997 Mask = SanitizerKind::ImplicitSignedIntegerTruncation; 998 } 999 1000 llvm::Value *Check = nullptr; 1001 // 1. Extend the truncated value back to the same width as the Src. 1002 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext"); 1003 // 2. Equality-compare with the original source value 1004 Check = Builder.CreateICmpEQ(Check, Src, "truncheck"); 1005 // If the comparison result is 'i1 false', then the truncation was lossy. 1006 return std::make_pair(Kind, std::make_pair(Check, Mask)); 1007 } 1008 1009 static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( 1010 QualType SrcType, QualType DstType) { 1011 return SrcType->isIntegerType() && DstType->isIntegerType(); 1012 } 1013 1014 void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType, 1015 Value *Dst, QualType DstType, 1016 SourceLocation Loc) { 1017 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)) 1018 return; 1019 1020 // We only care about int->int conversions here. 1021 // We ignore conversions to/from pointer and/or bool. 1022 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType, 1023 DstType)) 1024 return; 1025 1026 unsigned SrcBits = Src->getType()->getScalarSizeInBits(); 1027 unsigned DstBits = Dst->getType()->getScalarSizeInBits(); 1028 // This must be truncation. Else we do not care. 1029 if (SrcBits <= DstBits) 1030 return; 1031 1032 assert(!DstType->isBooleanType() && "we should not get here with booleans."); 1033 1034 // If the integer sign change sanitizer is enabled, 1035 // and we are truncating from larger unsigned type to smaller signed type, 1036 // let that next sanitizer deal with it. 1037 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1038 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1039 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) && 1040 (!SrcSigned && DstSigned)) 1041 return; 1042 1043 CodeGenFunction::SanitizerScope SanScope(&CGF); 1044 1045 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1046 std::pair<llvm::Value *, SanitizerMask>> 1047 Check = 1048 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder); 1049 // If the comparison result is 'i1 false', then the truncation was lossy. 1050 1051 // Do we care about this type of truncation? 1052 if (!CGF.SanOpts.has(Check.second.second)) 1053 return; 1054 1055 llvm::Constant *StaticArgs[] = { 1056 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType), 1057 CGF.EmitCheckTypeDescriptor(DstType), 1058 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)}; 1059 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs, 1060 {Src, Dst}); 1061 } 1062 1063 // Should be called within CodeGenFunction::SanitizerScope RAII scope. 1064 // Returns 'i1 false' when the conversion Src -> Dst changed the sign. 1065 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1066 std::pair<llvm::Value *, SanitizerMask>> 1067 EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, 1068 QualType DstType, CGBuilderTy &Builder) { 1069 llvm::Type *SrcTy = Src->getType(); 1070 llvm::Type *DstTy = Dst->getType(); 1071 1072 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && 1073 "non-integer llvm type"); 1074 1075 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1076 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1077 (void)SrcSigned; // Only used in assert() 1078 (void)DstSigned; // Only used in assert() 1079 unsigned SrcBits = SrcTy->getScalarSizeInBits(); 1080 unsigned DstBits = DstTy->getScalarSizeInBits(); 1081 (void)SrcBits; // Only used in assert() 1082 (void)DstBits; // Only used in assert() 1083 1084 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) && 1085 "either the widths should be different, or the signednesses."); 1086 1087 // NOTE: zero value is considered to be non-negative. 1088 auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType, 1089 const char *Name) -> Value * { 1090 // Is this value a signed type? 1091 bool VSigned = VType->isSignedIntegerOrEnumerationType(); 1092 llvm::Type *VTy = V->getType(); 1093 if (!VSigned) { 1094 // If the value is unsigned, then it is never negative. 1095 // FIXME: can we encounter non-scalar VTy here? 1096 return llvm::ConstantInt::getFalse(VTy->getContext()); 1097 } 1098 // Get the zero of the same type with which we will be comparing. 1099 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0); 1100 // %V.isnegative = icmp slt %V, 0 1101 // I.e is %V *strictly* less than zero, does it have negative value? 1102 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero, 1103 llvm::Twine(Name) + "." + V->getName() + 1104 ".negativitycheck"); 1105 }; 1106 1107 // 1. Was the old Value negative? 1108 llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src"); 1109 // 2. Is the new Value negative? 1110 llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst"); 1111 // 3. Now, was the 'negativity status' preserved during the conversion? 1112 // NOTE: conversion from negative to zero is considered to change the sign. 1113 // (We want to get 'false' when the conversion changed the sign) 1114 // So we should just equality-compare the negativity statuses. 1115 llvm::Value *Check = nullptr; 1116 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck"); 1117 // If the comparison result is 'false', then the conversion changed the sign. 1118 return std::make_pair( 1119 ScalarExprEmitter::ICCK_IntegerSignChange, 1120 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange)); 1121 } 1122 1123 void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, 1124 Value *Dst, QualType DstType, 1125 SourceLocation Loc) { 1126 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) 1127 return; 1128 1129 llvm::Type *SrcTy = Src->getType(); 1130 llvm::Type *DstTy = Dst->getType(); 1131 1132 // We only care about int->int conversions here. 1133 // We ignore conversions to/from pointer and/or bool. 1134 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType, 1135 DstType)) 1136 return; 1137 1138 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1139 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1140 unsigned SrcBits = SrcTy->getScalarSizeInBits(); 1141 unsigned DstBits = DstTy->getScalarSizeInBits(); 1142 1143 // Now, we do not need to emit the check in *all* of the cases. 1144 // We can avoid emitting it in some obvious cases where it would have been 1145 // dropped by the opt passes (instcombine) always anyways. 1146 // If it's a cast between effectively the same type, no check. 1147 // NOTE: this is *not* equivalent to checking the canonical types. 1148 if (SrcSigned == DstSigned && SrcBits == DstBits) 1149 return; 1150 // At least one of the values needs to have signed type. 1151 // If both are unsigned, then obviously, neither of them can be negative. 1152 if (!SrcSigned && !DstSigned) 1153 return; 1154 // If the conversion is to *larger* *signed* type, then no check is needed. 1155 // Because either sign-extension happens (so the sign will remain), 1156 // or zero-extension will happen (the sign bit will be zero.) 1157 if ((DstBits > SrcBits) && DstSigned) 1158 return; 1159 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) && 1160 (SrcBits > DstBits) && SrcSigned) { 1161 // If the signed integer truncation sanitizer is enabled, 1162 // and this is a truncation from signed type, then no check is needed. 1163 // Because here sign change check is interchangeable with truncation check. 1164 return; 1165 } 1166 // That's it. We can't rule out any more cases with the data we have. 1167 1168 CodeGenFunction::SanitizerScope SanScope(&CGF); 1169 1170 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1171 std::pair<llvm::Value *, SanitizerMask>> 1172 Check; 1173 1174 // Each of these checks needs to return 'false' when an issue was detected. 1175 ImplicitConversionCheckKind CheckKind; 1176 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 1177 // So we can 'and' all the checks together, and still get 'false', 1178 // if at least one of the checks detected an issue. 1179 1180 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder); 1181 CheckKind = Check.first; 1182 Checks.emplace_back(Check.second); 1183 1184 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) && 1185 (SrcBits > DstBits) && !SrcSigned && DstSigned) { 1186 // If the signed integer truncation sanitizer was enabled, 1187 // and we are truncating from larger unsigned type to smaller signed type, 1188 // let's handle the case we skipped in that check. 1189 Check = 1190 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder); 1191 CheckKind = ICCK_SignedIntegerTruncationOrSignChange; 1192 Checks.emplace_back(Check.second); 1193 // If the comparison result is 'i1 false', then the truncation was lossy. 1194 } 1195 1196 llvm::Constant *StaticArgs[] = { 1197 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType), 1198 CGF.EmitCheckTypeDescriptor(DstType), 1199 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)}; 1200 // EmitCheck() will 'and' all the checks together. 1201 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs, 1202 {Src, Dst}); 1203 } 1204 1205 Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType, 1206 QualType DstType, llvm::Type *SrcTy, 1207 llvm::Type *DstTy, 1208 ScalarConversionOpts Opts) { 1209 // The Element types determine the type of cast to perform. 1210 llvm::Type *SrcElementTy; 1211 llvm::Type *DstElementTy; 1212 QualType SrcElementType; 1213 QualType DstElementType; 1214 if (SrcType->isMatrixType() && DstType->isMatrixType()) { 1215 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType(); 1216 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType(); 1217 SrcElementType = SrcType->castAs<MatrixType>()->getElementType(); 1218 DstElementType = DstType->castAs<MatrixType>()->getElementType(); 1219 } else { 1220 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() && 1221 "cannot cast between matrix and non-matrix types"); 1222 SrcElementTy = SrcTy; 1223 DstElementTy = DstTy; 1224 SrcElementType = SrcType; 1225 DstElementType = DstType; 1226 } 1227 1228 if (isa<llvm::IntegerType>(SrcElementTy)) { 1229 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType(); 1230 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) { 1231 InputSigned = true; 1232 } 1233 1234 if (isa<llvm::IntegerType>(DstElementTy)) 1235 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); 1236 if (InputSigned) 1237 return Builder.CreateSIToFP(Src, DstTy, "conv"); 1238 return Builder.CreateUIToFP(Src, DstTy, "conv"); 1239 } 1240 1241 if (isa<llvm::IntegerType>(DstElementTy)) { 1242 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion"); 1243 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType(); 1244 1245 // If we can't recognize overflow as undefined behavior, assume that 1246 // overflow saturates. This protects against normal optimizations if we are 1247 // compiling with non-standard FP semantics. 1248 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) { 1249 llvm::Intrinsic::ID IID = 1250 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat; 1251 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src); 1252 } 1253 1254 if (IsSigned) 1255 return Builder.CreateFPToSI(Src, DstTy, "conv"); 1256 return Builder.CreateFPToUI(Src, DstTy, "conv"); 1257 } 1258 1259 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID()) 1260 return Builder.CreateFPTrunc(Src, DstTy, "conv"); 1261 return Builder.CreateFPExt(Src, DstTy, "conv"); 1262 } 1263 1264 /// Emit a conversion from the specified type to the specified destination type, 1265 /// both of which are LLVM scalar types. 1266 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, 1267 QualType DstType, 1268 SourceLocation Loc, 1269 ScalarConversionOpts Opts) { 1270 // All conversions involving fixed point types should be handled by the 1271 // EmitFixedPoint family functions. This is done to prevent bloating up this 1272 // function more, and although fixed point numbers are represented by 1273 // integers, we do not want to follow any logic that assumes they should be 1274 // treated as integers. 1275 // TODO(leonardchan): When necessary, add another if statement checking for 1276 // conversions to fixed point types from other types. 1277 if (SrcType->isFixedPointType()) { 1278 if (DstType->isBooleanType()) 1279 // It is important that we check this before checking if the dest type is 1280 // an integer because booleans are technically integer types. 1281 // We do not need to check the padding bit on unsigned types if unsigned 1282 // padding is enabled because overflow into this bit is undefined 1283 // behavior. 1284 return Builder.CreateIsNotNull(Src, "tobool"); 1285 if (DstType->isFixedPointType() || DstType->isIntegerType() || 1286 DstType->isRealFloatingType()) 1287 return EmitFixedPointConversion(Src, SrcType, DstType, Loc); 1288 1289 llvm_unreachable( 1290 "Unhandled scalar conversion from a fixed point type to another type."); 1291 } else if (DstType->isFixedPointType()) { 1292 if (SrcType->isIntegerType() || SrcType->isRealFloatingType()) 1293 // This also includes converting booleans and enums to fixed point types. 1294 return EmitFixedPointConversion(Src, SrcType, DstType, Loc); 1295 1296 llvm_unreachable( 1297 "Unhandled scalar conversion to a fixed point type from another type."); 1298 } 1299 1300 QualType NoncanonicalSrcType = SrcType; 1301 QualType NoncanonicalDstType = DstType; 1302 1303 SrcType = CGF.getContext().getCanonicalType(SrcType); 1304 DstType = CGF.getContext().getCanonicalType(DstType); 1305 if (SrcType == DstType) return Src; 1306 1307 if (DstType->isVoidType()) return nullptr; 1308 1309 llvm::Value *OrigSrc = Src; 1310 QualType OrigSrcType = SrcType; 1311 llvm::Type *SrcTy = Src->getType(); 1312 1313 // Handle conversions to bool first, they are special: comparisons against 0. 1314 if (DstType->isBooleanType()) 1315 return EmitConversionToBool(Src, SrcType); 1316 1317 llvm::Type *DstTy = ConvertType(DstType); 1318 1319 // Cast from half through float if half isn't a native type. 1320 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 1321 // Cast to FP using the intrinsic if the half type itself isn't supported. 1322 if (DstTy->isFloatingPointTy()) { 1323 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) 1324 return Builder.CreateCall( 1325 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy), 1326 Src); 1327 } else { 1328 // Cast to other types through float, using either the intrinsic or FPExt, 1329 // depending on whether the half type itself is supported 1330 // (as opposed to operations on half, available with NativeHalfType). 1331 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 1332 Src = Builder.CreateCall( 1333 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, 1334 CGF.CGM.FloatTy), 1335 Src); 1336 } else { 1337 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv"); 1338 } 1339 SrcType = CGF.getContext().FloatTy; 1340 SrcTy = CGF.FloatTy; 1341 } 1342 } 1343 1344 // Ignore conversions like int -> uint. 1345 if (SrcTy == DstTy) { 1346 if (Opts.EmitImplicitIntegerSignChangeChecks) 1347 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src, 1348 NoncanonicalDstType, Loc); 1349 1350 return Src; 1351 } 1352 1353 // Handle pointer conversions next: pointers can only be converted to/from 1354 // other pointers and integers. Check for pointer types in terms of LLVM, as 1355 // some native types (like Obj-C id) may map to a pointer type. 1356 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) { 1357 // The source value may be an integer, or a pointer. 1358 if (isa<llvm::PointerType>(SrcTy)) 1359 return Builder.CreateBitCast(Src, DstTy, "conv"); 1360 1361 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); 1362 // First, convert to the correct width so that we control the kind of 1363 // extension. 1364 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT); 1365 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType(); 1366 llvm::Value* IntResult = 1367 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 1368 // Then, cast to pointer. 1369 return Builder.CreateIntToPtr(IntResult, DstTy, "conv"); 1370 } 1371 1372 if (isa<llvm::PointerType>(SrcTy)) { 1373 // Must be an ptr to int cast. 1374 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?"); 1375 return Builder.CreatePtrToInt(Src, DstTy, "conv"); 1376 } 1377 1378 // A scalar can be splatted to an extended vector of the same element type 1379 if (DstType->isExtVectorType() && !SrcType->isVectorType()) { 1380 // Sema should add casts to make sure that the source expression's type is 1381 // the same as the vector's element type (sans qualifiers) 1382 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == 1383 SrcType.getTypePtr() && 1384 "Splatted expr doesn't match with vector element type?"); 1385 1386 // Splat the element across to all elements 1387 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements(); 1388 return Builder.CreateVectorSplat(NumElements, Src, "splat"); 1389 } 1390 1391 if (SrcType->isMatrixType() && DstType->isMatrixType()) 1392 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); 1393 1394 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) { 1395 // Allow bitcast from vector to integer/fp of the same size. 1396 unsigned SrcSize = SrcTy->getPrimitiveSizeInBits(); 1397 unsigned DstSize = DstTy->getPrimitiveSizeInBits(); 1398 if (SrcSize == DstSize) 1399 return Builder.CreateBitCast(Src, DstTy, "conv"); 1400 1401 // Conversions between vectors of different sizes are not allowed except 1402 // when vectors of half are involved. Operations on storage-only half 1403 // vectors require promoting half vector operands to float vectors and 1404 // truncating the result, which is either an int or float vector, to a 1405 // short or half vector. 1406 1407 // Source and destination are both expected to be vectors. 1408 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType(); 1409 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType(); 1410 (void)DstElementTy; 1411 1412 assert(((SrcElementTy->isIntegerTy() && 1413 DstElementTy->isIntegerTy()) || 1414 (SrcElementTy->isFloatingPointTy() && 1415 DstElementTy->isFloatingPointTy())) && 1416 "unexpected conversion between a floating-point vector and an " 1417 "integer vector"); 1418 1419 // Truncate an i32 vector to an i16 vector. 1420 if (SrcElementTy->isIntegerTy()) 1421 return Builder.CreateIntCast(Src, DstTy, false, "conv"); 1422 1423 // Truncate a float vector to a half vector. 1424 if (SrcSize > DstSize) 1425 return Builder.CreateFPTrunc(Src, DstTy, "conv"); 1426 1427 // Promote a half vector to a float vector. 1428 return Builder.CreateFPExt(Src, DstTy, "conv"); 1429 } 1430 1431 // Finally, we have the arithmetic types: real int/float. 1432 Value *Res = nullptr; 1433 llvm::Type *ResTy = DstTy; 1434 1435 // An overflowing conversion has undefined behavior if either the source type 1436 // or the destination type is a floating-point type. However, we consider the 1437 // range of representable values for all floating-point types to be 1438 // [-inf,+inf], so no overflow can ever happen when the destination type is a 1439 // floating-point type. 1440 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) && 1441 OrigSrcType->isFloatingType()) 1442 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy, 1443 Loc); 1444 1445 // Cast to half through float if half isn't a native type. 1446 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 1447 // Make sure we cast in a single step if from another FP type. 1448 if (SrcTy->isFloatingPointTy()) { 1449 // Use the intrinsic if the half type itself isn't supported 1450 // (as opposed to operations on half, available with NativeHalfType). 1451 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) 1452 return Builder.CreateCall( 1453 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src); 1454 // If the half type is supported, just use an fptrunc. 1455 return Builder.CreateFPTrunc(Src, DstTy); 1456 } 1457 DstTy = CGF.FloatTy; 1458 } 1459 1460 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); 1461 1462 if (DstTy != ResTy) { 1463 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 1464 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion"); 1465 Res = Builder.CreateCall( 1466 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy), 1467 Res); 1468 } else { 1469 Res = Builder.CreateFPTrunc(Res, ResTy, "conv"); 1470 } 1471 } 1472 1473 if (Opts.EmitImplicitIntegerTruncationChecks) 1474 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res, 1475 NoncanonicalDstType, Loc); 1476 1477 if (Opts.EmitImplicitIntegerSignChangeChecks) 1478 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res, 1479 NoncanonicalDstType, Loc); 1480 1481 return Res; 1482 } 1483 1484 Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy, 1485 QualType DstTy, 1486 SourceLocation Loc) { 1487 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 1488 llvm::Value *Result; 1489 if (SrcTy->isRealFloatingType()) 1490 Result = FPBuilder.CreateFloatingToFixed(Src, 1491 CGF.getContext().getFixedPointSemantics(DstTy)); 1492 else if (DstTy->isRealFloatingType()) 1493 Result = FPBuilder.CreateFixedToFloating(Src, 1494 CGF.getContext().getFixedPointSemantics(SrcTy), 1495 ConvertType(DstTy)); 1496 else { 1497 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy); 1498 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy); 1499 1500 if (DstTy->isIntegerType()) 1501 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema, 1502 DstFPSema.getWidth(), 1503 DstFPSema.isSigned()); 1504 else if (SrcTy->isIntegerType()) 1505 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(), 1506 DstFPSema); 1507 else 1508 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema); 1509 } 1510 return Result; 1511 } 1512 1513 /// Emit a conversion from the specified complex type to the specified 1514 /// destination type, where the destination type is an LLVM scalar type. 1515 Value *ScalarExprEmitter::EmitComplexToScalarConversion( 1516 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy, 1517 SourceLocation Loc) { 1518 // Get the source element type. 1519 SrcTy = SrcTy->castAs<ComplexType>()->getElementType(); 1520 1521 // Handle conversions to bool first, they are special: comparisons against 0. 1522 if (DstTy->isBooleanType()) { 1523 // Complex != 0 -> (Real != 0) | (Imag != 0) 1524 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc); 1525 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc); 1526 return Builder.CreateOr(Src.first, Src.second, "tobool"); 1527 } 1528 1529 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type, 1530 // the imaginary part of the complex value is discarded and the value of the 1531 // real part is converted according to the conversion rules for the 1532 // corresponding real type. 1533 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc); 1534 } 1535 1536 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) { 1537 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty); 1538 } 1539 1540 /// Emit a sanitization check for the given "binary" operation (which 1541 /// might actually be a unary increment which has been lowered to a binary 1542 /// operation). The check passes if all values in \p Checks (which are \c i1), 1543 /// are \c true. 1544 void ScalarExprEmitter::EmitBinOpCheck( 1545 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) { 1546 assert(CGF.IsSanitizerScope); 1547 SanitizerHandler Check; 1548 SmallVector<llvm::Constant *, 4> StaticData; 1549 SmallVector<llvm::Value *, 2> DynamicData; 1550 1551 BinaryOperatorKind Opcode = Info.Opcode; 1552 if (BinaryOperator::isCompoundAssignmentOp(Opcode)) 1553 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode); 1554 1555 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc())); 1556 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E); 1557 if (UO && UO->getOpcode() == UO_Minus) { 1558 Check = SanitizerHandler::NegateOverflow; 1559 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType())); 1560 DynamicData.push_back(Info.RHS); 1561 } else { 1562 if (BinaryOperator::isShiftOp(Opcode)) { 1563 // Shift LHS negative or too large, or RHS out of bounds. 1564 Check = SanitizerHandler::ShiftOutOfBounds; 1565 const BinaryOperator *BO = cast<BinaryOperator>(Info.E); 1566 StaticData.push_back( 1567 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType())); 1568 StaticData.push_back( 1569 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType())); 1570 } else if (Opcode == BO_Div || Opcode == BO_Rem) { 1571 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1). 1572 Check = SanitizerHandler::DivremOverflow; 1573 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); 1574 } else { 1575 // Arithmetic overflow (+, -, *). 1576 switch (Opcode) { 1577 case BO_Add: Check = SanitizerHandler::AddOverflow; break; 1578 case BO_Sub: Check = SanitizerHandler::SubOverflow; break; 1579 case BO_Mul: Check = SanitizerHandler::MulOverflow; break; 1580 default: llvm_unreachable("unexpected opcode for bin op check"); 1581 } 1582 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); 1583 } 1584 DynamicData.push_back(Info.LHS); 1585 DynamicData.push_back(Info.RHS); 1586 } 1587 1588 CGF.EmitCheck(Checks, Check, StaticData, DynamicData); 1589 } 1590 1591 //===----------------------------------------------------------------------===// 1592 // Visitor Methods 1593 //===----------------------------------------------------------------------===// 1594 1595 Value *ScalarExprEmitter::VisitExpr(Expr *E) { 1596 CGF.ErrorUnsupported(E, "scalar expression"); 1597 if (E->getType()->isVoidType()) 1598 return nullptr; 1599 return llvm::UndefValue::get(CGF.ConvertType(E->getType())); 1600 } 1601 1602 Value * 1603 ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) { 1604 ASTContext &Context = CGF.getContext(); 1605 llvm::Optional<LangAS> GlobalAS = 1606 Context.getTargetInfo().getConstantAddressSpace(); 1607 llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr( 1608 E->ComputeName(Context), "__usn_str", 1609 static_cast<unsigned>(GlobalAS.getValueOr(LangAS::Default))); 1610 1611 unsigned ExprAS = Context.getTargetAddressSpace(E->getType()); 1612 1613 if (GlobalConstStr->getType()->getPointerAddressSpace() == ExprAS) 1614 return GlobalConstStr; 1615 1616 llvm::PointerType *PtrTy = cast<llvm::PointerType>(GlobalConstStr->getType()); 1617 llvm::PointerType *NewPtrTy = 1618 llvm::PointerType::getWithSamePointeeType(PtrTy, ExprAS); 1619 return Builder.CreateAddrSpaceCast(GlobalConstStr, NewPtrTy, "usn_addr_cast"); 1620 } 1621 1622 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { 1623 // Vector Mask Case 1624 if (E->getNumSubExprs() == 2) { 1625 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0)); 1626 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1)); 1627 Value *Mask; 1628 1629 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType()); 1630 unsigned LHSElts = LTy->getNumElements(); 1631 1632 Mask = RHS; 1633 1634 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType()); 1635 1636 // Mask off the high bits of each shuffle index. 1637 Value *MaskBits = 1638 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1); 1639 Mask = Builder.CreateAnd(Mask, MaskBits, "mask"); 1640 1641 // newv = undef 1642 // mask = mask & maskbits 1643 // for each elt 1644 // n = extract mask i 1645 // x = extract val n 1646 // newv = insert newv, x, i 1647 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(), 1648 MTy->getNumElements()); 1649 Value* NewV = llvm::UndefValue::get(RTy); 1650 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) { 1651 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i); 1652 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx"); 1653 1654 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt"); 1655 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins"); 1656 } 1657 return NewV; 1658 } 1659 1660 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0)); 1661 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1)); 1662 1663 SmallVector<int, 32> Indices; 1664 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) { 1665 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2); 1666 // Check for -1 and output it as undef in the IR. 1667 if (Idx.isSigned() && Idx.isAllOnes()) 1668 Indices.push_back(-1); 1669 else 1670 Indices.push_back(Idx.getZExtValue()); 1671 } 1672 1673 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle"); 1674 } 1675 1676 Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) { 1677 QualType SrcType = E->getSrcExpr()->getType(), 1678 DstType = E->getType(); 1679 1680 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); 1681 1682 SrcType = CGF.getContext().getCanonicalType(SrcType); 1683 DstType = CGF.getContext().getCanonicalType(DstType); 1684 if (SrcType == DstType) return Src; 1685 1686 assert(SrcType->isVectorType() && 1687 "ConvertVector source type must be a vector"); 1688 assert(DstType->isVectorType() && 1689 "ConvertVector destination type must be a vector"); 1690 1691 llvm::Type *SrcTy = Src->getType(); 1692 llvm::Type *DstTy = ConvertType(DstType); 1693 1694 // Ignore conversions like int -> uint. 1695 if (SrcTy == DstTy) 1696 return Src; 1697 1698 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(), 1699 DstEltType = DstType->castAs<VectorType>()->getElementType(); 1700 1701 assert(SrcTy->isVectorTy() && 1702 "ConvertVector source IR type must be a vector"); 1703 assert(DstTy->isVectorTy() && 1704 "ConvertVector destination IR type must be a vector"); 1705 1706 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(), 1707 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType(); 1708 1709 if (DstEltType->isBooleanType()) { 1710 assert((SrcEltTy->isFloatingPointTy() || 1711 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion"); 1712 1713 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy); 1714 if (SrcEltTy->isFloatingPointTy()) { 1715 return Builder.CreateFCmpUNE(Src, Zero, "tobool"); 1716 } else { 1717 return Builder.CreateICmpNE(Src, Zero, "tobool"); 1718 } 1719 } 1720 1721 // We have the arithmetic types: real int/float. 1722 Value *Res = nullptr; 1723 1724 if (isa<llvm::IntegerType>(SrcEltTy)) { 1725 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType(); 1726 if (isa<llvm::IntegerType>(DstEltTy)) 1727 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); 1728 else if (InputSigned) 1729 Res = Builder.CreateSIToFP(Src, DstTy, "conv"); 1730 else 1731 Res = Builder.CreateUIToFP(Src, DstTy, "conv"); 1732 } else if (isa<llvm::IntegerType>(DstEltTy)) { 1733 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion"); 1734 if (DstEltType->isSignedIntegerOrEnumerationType()) 1735 Res = Builder.CreateFPToSI(Src, DstTy, "conv"); 1736 else 1737 Res = Builder.CreateFPToUI(Src, DstTy, "conv"); 1738 } else { 1739 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && 1740 "Unknown real conversion"); 1741 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID()) 1742 Res = Builder.CreateFPTrunc(Src, DstTy, "conv"); 1743 else 1744 Res = Builder.CreateFPExt(Src, DstTy, "conv"); 1745 } 1746 1747 return Res; 1748 } 1749 1750 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { 1751 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) { 1752 CGF.EmitIgnoredExpr(E->getBase()); 1753 return CGF.emitScalarConstant(Constant, E); 1754 } else { 1755 Expr::EvalResult Result; 1756 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) { 1757 llvm::APSInt Value = Result.Val.getInt(); 1758 CGF.EmitIgnoredExpr(E->getBase()); 1759 return Builder.getInt(Value); 1760 } 1761 } 1762 1763 return EmitLoadOfLValue(E); 1764 } 1765 1766 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { 1767 TestAndClearIgnoreResultAssign(); 1768 1769 // Emit subscript expressions in rvalue context's. For most cases, this just 1770 // loads the lvalue formed by the subscript expr. However, we have to be 1771 // careful, because the base of a vector subscript is occasionally an rvalue, 1772 // so we can't get it as an lvalue. 1773 if (!E->getBase()->getType()->isVectorType()) 1774 return EmitLoadOfLValue(E); 1775 1776 // Handle the vector case. The base must be a vector, the index must be an 1777 // integer value. 1778 Value *Base = Visit(E->getBase()); 1779 Value *Idx = Visit(E->getIdx()); 1780 QualType IdxTy = E->getIdx()->getType(); 1781 1782 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) 1783 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true); 1784 1785 return Builder.CreateExtractElement(Base, Idx, "vecext"); 1786 } 1787 1788 Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) { 1789 TestAndClearIgnoreResultAssign(); 1790 1791 // Handle the vector case. The base must be a vector, the index must be an 1792 // integer value. 1793 Value *RowIdx = Visit(E->getRowIdx()); 1794 Value *ColumnIdx = Visit(E->getColumnIdx()); 1795 1796 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>(); 1797 unsigned NumRows = MatrixTy->getNumRows(); 1798 llvm::MatrixBuilder<CGBuilderTy> MB(Builder); 1799 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows); 1800 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0) 1801 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened()); 1802 1803 Value *Matrix = Visit(E->getBase()); 1804 1805 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds? 1806 return Builder.CreateExtractElement(Matrix, Idx, "matrixext"); 1807 } 1808 1809 static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, 1810 unsigned Off) { 1811 int MV = SVI->getMaskValue(Idx); 1812 if (MV == -1) 1813 return -1; 1814 return Off + MV; 1815 } 1816 1817 static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) { 1818 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && 1819 "Index operand too large for shufflevector mask!"); 1820 return C->getZExtValue(); 1821 } 1822 1823 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { 1824 bool Ignore = TestAndClearIgnoreResultAssign(); 1825 (void)Ignore; 1826 assert (Ignore == false && "init list ignored"); 1827 unsigned NumInitElements = E->getNumInits(); 1828 1829 if (E->hadArrayRangeDesignator()) 1830 CGF.ErrorUnsupported(E, "GNU array range designator extension"); 1831 1832 llvm::VectorType *VType = 1833 dyn_cast<llvm::VectorType>(ConvertType(E->getType())); 1834 1835 if (!VType) { 1836 if (NumInitElements == 0) { 1837 // C++11 value-initialization for the scalar. 1838 return EmitNullValue(E->getType()); 1839 } 1840 // We have a scalar in braces. Just use the first element. 1841 return Visit(E->getInit(0)); 1842 } 1843 1844 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements(); 1845 1846 // Loop over initializers collecting the Value for each, and remembering 1847 // whether the source was swizzle (ExtVectorElementExpr). This will allow 1848 // us to fold the shuffle for the swizzle into the shuffle for the vector 1849 // initializer, since LLVM optimizers generally do not want to touch 1850 // shuffles. 1851 unsigned CurIdx = 0; 1852 bool VIsUndefShuffle = false; 1853 llvm::Value *V = llvm::UndefValue::get(VType); 1854 for (unsigned i = 0; i != NumInitElements; ++i) { 1855 Expr *IE = E->getInit(i); 1856 Value *Init = Visit(IE); 1857 SmallVector<int, 16> Args; 1858 1859 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType()); 1860 1861 // Handle scalar elements. If the scalar initializer is actually one 1862 // element of a different vector of the same width, use shuffle instead of 1863 // extract+insert. 1864 if (!VVT) { 1865 if (isa<ExtVectorElementExpr>(IE)) { 1866 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init); 1867 1868 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType()) 1869 ->getNumElements() == ResElts) { 1870 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand()); 1871 Value *LHS = nullptr, *RHS = nullptr; 1872 if (CurIdx == 0) { 1873 // insert into undef -> shuffle (src, undef) 1874 // shufflemask must use an i32 1875 Args.push_back(getAsInt32(C, CGF.Int32Ty)); 1876 Args.resize(ResElts, -1); 1877 1878 LHS = EI->getVectorOperand(); 1879 RHS = V; 1880 VIsUndefShuffle = true; 1881 } else if (VIsUndefShuffle) { 1882 // insert into undefshuffle && size match -> shuffle (v, src) 1883 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V); 1884 for (unsigned j = 0; j != CurIdx; ++j) 1885 Args.push_back(getMaskElt(SVV, j, 0)); 1886 Args.push_back(ResElts + C->getZExtValue()); 1887 Args.resize(ResElts, -1); 1888 1889 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 1890 RHS = EI->getVectorOperand(); 1891 VIsUndefShuffle = false; 1892 } 1893 if (!Args.empty()) { 1894 V = Builder.CreateShuffleVector(LHS, RHS, Args); 1895 ++CurIdx; 1896 continue; 1897 } 1898 } 1899 } 1900 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx), 1901 "vecinit"); 1902 VIsUndefShuffle = false; 1903 ++CurIdx; 1904 continue; 1905 } 1906 1907 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements(); 1908 1909 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's 1910 // input is the same width as the vector being constructed, generate an 1911 // optimized shuffle of the swizzle input into the result. 1912 unsigned Offset = (CurIdx == 0) ? 0 : ResElts; 1913 if (isa<ExtVectorElementExpr>(IE)) { 1914 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init); 1915 Value *SVOp = SVI->getOperand(0); 1916 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType()); 1917 1918 if (OpTy->getNumElements() == ResElts) { 1919 for (unsigned j = 0; j != CurIdx; ++j) { 1920 // If the current vector initializer is a shuffle with undef, merge 1921 // this shuffle directly into it. 1922 if (VIsUndefShuffle) { 1923 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0)); 1924 } else { 1925 Args.push_back(j); 1926 } 1927 } 1928 for (unsigned j = 0, je = InitElts; j != je; ++j) 1929 Args.push_back(getMaskElt(SVI, j, Offset)); 1930 Args.resize(ResElts, -1); 1931 1932 if (VIsUndefShuffle) 1933 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 1934 1935 Init = SVOp; 1936 } 1937 } 1938 1939 // Extend init to result vector length, and then shuffle its contribution 1940 // to the vector initializer into V. 1941 if (Args.empty()) { 1942 for (unsigned j = 0; j != InitElts; ++j) 1943 Args.push_back(j); 1944 Args.resize(ResElts, -1); 1945 Init = Builder.CreateShuffleVector(Init, Args, "vext"); 1946 1947 Args.clear(); 1948 for (unsigned j = 0; j != CurIdx; ++j) 1949 Args.push_back(j); 1950 for (unsigned j = 0; j != InitElts; ++j) 1951 Args.push_back(j + Offset); 1952 Args.resize(ResElts, -1); 1953 } 1954 1955 // If V is undef, make sure it ends up on the RHS of the shuffle to aid 1956 // merging subsequent shuffles into this one. 1957 if (CurIdx == 0) 1958 std::swap(V, Init); 1959 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit"); 1960 VIsUndefShuffle = isa<llvm::UndefValue>(Init); 1961 CurIdx += InitElts; 1962 } 1963 1964 // FIXME: evaluate codegen vs. shuffling against constant null vector. 1965 // Emit remaining default initializers. 1966 llvm::Type *EltTy = VType->getElementType(); 1967 1968 // Emit remaining default initializers 1969 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) { 1970 Value *Idx = Builder.getInt32(CurIdx); 1971 llvm::Value *Init = llvm::Constant::getNullValue(EltTy); 1972 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit"); 1973 } 1974 return V; 1975 } 1976 1977 bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) { 1978 const Expr *E = CE->getSubExpr(); 1979 1980 if (CE->getCastKind() == CK_UncheckedDerivedToBase) 1981 return false; 1982 1983 if (isa<CXXThisExpr>(E->IgnoreParens())) { 1984 // We always assume that 'this' is never null. 1985 return false; 1986 } 1987 1988 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) { 1989 // And that glvalue casts are never null. 1990 if (ICE->isGLValue()) 1991 return false; 1992 } 1993 1994 return true; 1995 } 1996 1997 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts 1998 // have to handle a more broad range of conversions than explicit casts, as they 1999 // handle things like function to ptr-to-function decay etc. 2000 Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { 2001 Expr *E = CE->getSubExpr(); 2002 QualType DestTy = CE->getType(); 2003 CastKind Kind = CE->getCastKind(); 2004 2005 // These cases are generally not written to ignore the result of 2006 // evaluating their sub-expressions, so we clear this now. 2007 bool Ignored = TestAndClearIgnoreResultAssign(); 2008 2009 // Since almost all cast kinds apply to scalars, this switch doesn't have 2010 // a default case, so the compiler will warn on a missing case. The cases 2011 // are in the same order as in the CastKind enum. 2012 switch (Kind) { 2013 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!"); 2014 case CK_BuiltinFnToFnPtr: 2015 llvm_unreachable("builtin functions are handled elsewhere"); 2016 2017 case CK_LValueBitCast: 2018 case CK_ObjCObjectLValueCast: { 2019 Address Addr = EmitLValue(E).getAddress(CGF); 2020 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy)); 2021 LValue LV = CGF.MakeAddrLValue(Addr, DestTy); 2022 return EmitLoadOfLValue(LV, CE->getExprLoc()); 2023 } 2024 2025 case CK_LValueToRValueBitCast: { 2026 LValue SourceLVal = CGF.EmitLValue(E); 2027 Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF), 2028 CGF.ConvertTypeForMem(DestTy)); 2029 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy); 2030 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); 2031 return EmitLoadOfLValue(DestLV, CE->getExprLoc()); 2032 } 2033 2034 case CK_CPointerToObjCPointerCast: 2035 case CK_BlockPointerToObjCPointerCast: 2036 case CK_AnyPointerToBlockPointerCast: 2037 case CK_BitCast: { 2038 Value *Src = Visit(const_cast<Expr*>(E)); 2039 llvm::Type *SrcTy = Src->getType(); 2040 llvm::Type *DstTy = ConvertType(DestTy); 2041 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() && 2042 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) { 2043 llvm_unreachable("wrong cast for pointers in different address spaces" 2044 "(must be an address space cast)!"); 2045 } 2046 2047 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { 2048 if (auto PT = DestTy->getAs<PointerType>()) 2049 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src, 2050 /*MayBeNull=*/true, 2051 CodeGenFunction::CFITCK_UnrelatedCast, 2052 CE->getBeginLoc()); 2053 } 2054 2055 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2056 const QualType SrcType = E->getType(); 2057 2058 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) { 2059 // Casting to pointer that could carry dynamic information (provided by 2060 // invariant.group) requires launder. 2061 Src = Builder.CreateLaunderInvariantGroup(Src); 2062 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) { 2063 // Casting to pointer that does not carry dynamic information (provided 2064 // by invariant.group) requires stripping it. Note that we don't do it 2065 // if the source could not be dynamic type and destination could be 2066 // dynamic because dynamic information is already laundered. It is 2067 // because launder(strip(src)) == launder(src), so there is no need to 2068 // add extra strip before launder. 2069 Src = Builder.CreateStripInvariantGroup(Src); 2070 } 2071 } 2072 2073 // Update heapallocsite metadata when there is an explicit pointer cast. 2074 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) { 2075 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) { 2076 QualType PointeeType = DestTy->getPointeeType(); 2077 if (!PointeeType.isNull()) 2078 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType, 2079 CE->getExprLoc()); 2080 } 2081 } 2082 2083 // If Src is a fixed vector and Dst is a scalable vector, and both have the 2084 // same element type, use the llvm.experimental.vector.insert intrinsic to 2085 // perform the bitcast. 2086 if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) { 2087 if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) { 2088 // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate 2089 // vector, use a vector insert and bitcast the result. 2090 bool NeedsBitCast = false; 2091 auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); 2092 llvm::Type *OrigType = DstTy; 2093 if (ScalableDst == PredType && 2094 FixedSrc->getElementType() == Builder.getInt8Ty()) { 2095 DstTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2); 2096 ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy); 2097 NeedsBitCast = true; 2098 } 2099 if (FixedSrc->getElementType() == ScalableDst->getElementType()) { 2100 llvm::Value *UndefVec = llvm::UndefValue::get(DstTy); 2101 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); 2102 llvm::Value *Result = Builder.CreateInsertVector( 2103 DstTy, UndefVec, Src, Zero, "castScalableSve"); 2104 if (NeedsBitCast) 2105 Result = Builder.CreateBitCast(Result, OrigType); 2106 return Result; 2107 } 2108 } 2109 } 2110 2111 // If Src is a scalable vector and Dst is a fixed vector, and both have the 2112 // same element type, use the llvm.experimental.vector.extract intrinsic to 2113 // perform the bitcast. 2114 if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) { 2115 if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) { 2116 // If we are casting a scalable 16 x i1 predicate vector to a fixed i8 2117 // vector, bitcast the source and use a vector extract. 2118 auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); 2119 if (ScalableSrc == PredType && 2120 FixedDst->getElementType() == Builder.getInt8Ty()) { 2121 SrcTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2); 2122 ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy); 2123 Src = Builder.CreateBitCast(Src, SrcTy); 2124 } 2125 if (ScalableSrc->getElementType() == FixedDst->getElementType()) { 2126 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); 2127 return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve"); 2128 } 2129 } 2130 } 2131 2132 // Perform VLAT <-> VLST bitcast through memory. 2133 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics 2134 // require the element types of the vectors to be the same, we 2135 // need to keep this around for bitcasts between VLAT <-> VLST where 2136 // the element types of the vectors are not the same, until we figure 2137 // out a better way of doing these casts. 2138 if ((isa<llvm::FixedVectorType>(SrcTy) && 2139 isa<llvm::ScalableVectorType>(DstTy)) || 2140 (isa<llvm::ScalableVectorType>(SrcTy) && 2141 isa<llvm::FixedVectorType>(DstTy))) { 2142 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value"); 2143 LValue LV = CGF.MakeAddrLValue(Addr, E->getType()); 2144 CGF.EmitStoreOfScalar(Src, LV); 2145 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy), 2146 "castFixedSve"); 2147 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy); 2148 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); 2149 return EmitLoadOfLValue(DestLV, CE->getExprLoc()); 2150 } 2151 2152 return Builder.CreateBitCast(Src, DstTy); 2153 } 2154 case CK_AddressSpaceConversion: { 2155 Expr::EvalResult Result; 2156 if (E->EvaluateAsRValue(Result, CGF.getContext()) && 2157 Result.Val.isNullPointer()) { 2158 // If E has side effect, it is emitted even if its final result is a 2159 // null pointer. In that case, a DCE pass should be able to 2160 // eliminate the useless instructions emitted during translating E. 2161 if (Result.HasSideEffects) 2162 Visit(E); 2163 return CGF.CGM.getNullPointer(cast<llvm::PointerType>( 2164 ConvertType(DestTy)), DestTy); 2165 } 2166 // Since target may map different address spaces in AST to the same address 2167 // space, an address space conversion may end up as a bitcast. 2168 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast( 2169 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(), 2170 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy)); 2171 } 2172 case CK_AtomicToNonAtomic: 2173 case CK_NonAtomicToAtomic: 2174 case CK_UserDefinedConversion: 2175 return Visit(const_cast<Expr*>(E)); 2176 2177 case CK_NoOp: { 2178 llvm::Value *V = Visit(const_cast<Expr *>(E)); 2179 if (V) { 2180 // CK_NoOp can model a pointer qualification conversion, which can remove 2181 // an array bound and change the IR type. 2182 // FIXME: Once pointee types are removed from IR, remove this. 2183 llvm::Type *T = ConvertType(DestTy); 2184 if (T != V->getType()) 2185 V = Builder.CreateBitCast(V, T); 2186 } 2187 return V; 2188 } 2189 2190 case CK_BaseToDerived: { 2191 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl(); 2192 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!"); 2193 2194 Address Base = CGF.EmitPointerWithAlignment(E); 2195 Address Derived = 2196 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl, 2197 CE->path_begin(), CE->path_end(), 2198 CGF.ShouldNullCheckClassCastValue(CE)); 2199 2200 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is 2201 // performed and the object is not of the derived type. 2202 if (CGF.sanitizePerformTypeCheck()) 2203 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(), 2204 Derived.getPointer(), DestTy->getPointeeType()); 2205 2206 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast)) 2207 CGF.EmitVTablePtrCheckForCast( 2208 DestTy->getPointeeType(), Derived.getPointer(), 2209 /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast, 2210 CE->getBeginLoc()); 2211 2212 return Derived.getPointer(); 2213 } 2214 case CK_UncheckedDerivedToBase: 2215 case CK_DerivedToBase: { 2216 // The EmitPointerWithAlignment path does this fine; just discard 2217 // the alignment. 2218 return CGF.EmitPointerWithAlignment(CE).getPointer(); 2219 } 2220 2221 case CK_Dynamic: { 2222 Address V = CGF.EmitPointerWithAlignment(E); 2223 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE); 2224 return CGF.EmitDynamicCast(V, DCE); 2225 } 2226 2227 case CK_ArrayToPointerDecay: 2228 return CGF.EmitArrayToPointerDecay(E).getPointer(); 2229 case CK_FunctionToPointerDecay: 2230 return EmitLValue(E).getPointer(CGF); 2231 2232 case CK_NullToPointer: 2233 if (MustVisitNullValue(E)) 2234 CGF.EmitIgnoredExpr(E); 2235 2236 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)), 2237 DestTy); 2238 2239 case CK_NullToMemberPointer: { 2240 if (MustVisitNullValue(E)) 2241 CGF.EmitIgnoredExpr(E); 2242 2243 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>(); 2244 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT); 2245 } 2246 2247 case CK_ReinterpretMemberPointer: 2248 case CK_BaseToDerivedMemberPointer: 2249 case CK_DerivedToBaseMemberPointer: { 2250 Value *Src = Visit(E); 2251 2252 // Note that the AST doesn't distinguish between checked and 2253 // unchecked member pointer conversions, so we always have to 2254 // implement checked conversions here. This is inefficient when 2255 // actual control flow may be required in order to perform the 2256 // check, which it is for data member pointers (but not member 2257 // function pointers on Itanium and ARM). 2258 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src); 2259 } 2260 2261 case CK_ARCProduceObject: 2262 return CGF.EmitARCRetainScalarExpr(E); 2263 case CK_ARCConsumeObject: 2264 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E)); 2265 case CK_ARCReclaimReturnedObject: 2266 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored); 2267 case CK_ARCExtendBlockObject: 2268 return CGF.EmitARCExtendBlockObject(E); 2269 2270 case CK_CopyAndAutoreleaseBlockObject: 2271 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType()); 2272 2273 case CK_FloatingRealToComplex: 2274 case CK_FloatingComplexCast: 2275 case CK_IntegralRealToComplex: 2276 case CK_IntegralComplexCast: 2277 case CK_IntegralComplexToFloatingComplex: 2278 case CK_FloatingComplexToIntegralComplex: 2279 case CK_ConstructorConversion: 2280 case CK_ToUnion: 2281 llvm_unreachable("scalar cast to non-scalar value"); 2282 2283 case CK_LValueToRValue: 2284 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)); 2285 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); 2286 return Visit(const_cast<Expr*>(E)); 2287 2288 case CK_IntegralToPointer: { 2289 Value *Src = Visit(const_cast<Expr*>(E)); 2290 2291 // First, convert to the correct width so that we control the kind of 2292 // extension. 2293 auto DestLLVMTy = ConvertType(DestTy); 2294 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy); 2295 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType(); 2296 llvm::Value* IntResult = 2297 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 2298 2299 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy); 2300 2301 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2302 // Going from integer to pointer that could be dynamic requires reloading 2303 // dynamic information from invariant.group. 2304 if (DestTy.mayBeDynamicClass()) 2305 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr); 2306 } 2307 return IntToPtr; 2308 } 2309 case CK_PointerToIntegral: { 2310 assert(!DestTy->isBooleanType() && "bool should use PointerToBool"); 2311 auto *PtrExpr = Visit(E); 2312 2313 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2314 const QualType SrcType = E->getType(); 2315 2316 // Casting to integer requires stripping dynamic information as it does 2317 // not carries it. 2318 if (SrcType.mayBeDynamicClass()) 2319 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr); 2320 } 2321 2322 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy)); 2323 } 2324 case CK_ToVoid: { 2325 CGF.EmitIgnoredExpr(E); 2326 return nullptr; 2327 } 2328 case CK_MatrixCast: { 2329 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2330 CE->getExprLoc()); 2331 } 2332 case CK_VectorSplat: { 2333 llvm::Type *DstTy = ConvertType(DestTy); 2334 Value *Elt = Visit(const_cast<Expr*>(E)); 2335 // Splat the element across to all elements 2336 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements(); 2337 return Builder.CreateVectorSplat(NumElements, Elt, "splat"); 2338 } 2339 2340 case CK_FixedPointCast: 2341 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2342 CE->getExprLoc()); 2343 2344 case CK_FixedPointToBoolean: 2345 assert(E->getType()->isFixedPointType() && 2346 "Expected src type to be fixed point type"); 2347 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type"); 2348 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2349 CE->getExprLoc()); 2350 2351 case CK_FixedPointToIntegral: 2352 assert(E->getType()->isFixedPointType() && 2353 "Expected src type to be fixed point type"); 2354 assert(DestTy->isIntegerType() && "Expected dest type to be an integer"); 2355 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2356 CE->getExprLoc()); 2357 2358 case CK_IntegralToFixedPoint: 2359 assert(E->getType()->isIntegerType() && 2360 "Expected src type to be an integer"); 2361 assert(DestTy->isFixedPointType() && 2362 "Expected dest type to be fixed point type"); 2363 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2364 CE->getExprLoc()); 2365 2366 case CK_IntegralCast: { 2367 ScalarConversionOpts Opts; 2368 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) { 2369 if (!ICE->isPartOfExplicitCast()) 2370 Opts = ScalarConversionOpts(CGF.SanOpts); 2371 } 2372 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2373 CE->getExprLoc(), Opts); 2374 } 2375 case CK_IntegralToFloating: 2376 case CK_FloatingToIntegral: 2377 case CK_FloatingCast: 2378 case CK_FixedPointToFloating: 2379 case CK_FloatingToFixedPoint: { 2380 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE); 2381 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2382 CE->getExprLoc()); 2383 } 2384 case CK_BooleanToSignedIntegral: { 2385 ScalarConversionOpts Opts; 2386 Opts.TreatBooleanAsSigned = true; 2387 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2388 CE->getExprLoc(), Opts); 2389 } 2390 case CK_IntegralToBoolean: 2391 return EmitIntToBoolConversion(Visit(E)); 2392 case CK_PointerToBoolean: 2393 return EmitPointerToBoolConversion(Visit(E), E->getType()); 2394 case CK_FloatingToBoolean: { 2395 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE); 2396 return EmitFloatToBoolConversion(Visit(E)); 2397 } 2398 case CK_MemberPointerToBoolean: { 2399 llvm::Value *MemPtr = Visit(E); 2400 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>(); 2401 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT); 2402 } 2403 2404 case CK_FloatingComplexToReal: 2405 case CK_IntegralComplexToReal: 2406 return CGF.EmitComplexExpr(E, false, true).first; 2407 2408 case CK_FloatingComplexToBoolean: 2409 case CK_IntegralComplexToBoolean: { 2410 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E); 2411 2412 // TODO: kill this function off, inline appropriate case here 2413 return EmitComplexToScalarConversion(V, E->getType(), DestTy, 2414 CE->getExprLoc()); 2415 } 2416 2417 case CK_ZeroToOCLOpaqueType: { 2418 assert((DestTy->isEventT() || DestTy->isQueueT() || 2419 DestTy->isOCLIntelSubgroupAVCType()) && 2420 "CK_ZeroToOCLEvent cast on non-event type"); 2421 return llvm::Constant::getNullValue(ConvertType(DestTy)); 2422 } 2423 2424 case CK_IntToOCLSampler: 2425 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF); 2426 2427 } // end of switch 2428 2429 llvm_unreachable("unknown scalar cast"); 2430 } 2431 2432 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) { 2433 CodeGenFunction::StmtExprEvaluation eval(CGF); 2434 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(), 2435 !E->getType()->isVoidType()); 2436 if (!RetAlloca.isValid()) 2437 return nullptr; 2438 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()), 2439 E->getExprLoc()); 2440 } 2441 2442 Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { 2443 CodeGenFunction::RunCleanupsScope Scope(CGF); 2444 Value *V = Visit(E->getSubExpr()); 2445 // Defend against dominance problems caused by jumps out of expression 2446 // evaluation through the shared cleanup block. 2447 Scope.ForceCleanup({&V}); 2448 return V; 2449 } 2450 2451 //===----------------------------------------------------------------------===// 2452 // Unary Operators 2453 //===----------------------------------------------------------------------===// 2454 2455 static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, 2456 llvm::Value *InVal, bool IsInc, 2457 FPOptions FPFeatures) { 2458 BinOpInfo BinOp; 2459 BinOp.LHS = InVal; 2460 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false); 2461 BinOp.Ty = E->getType(); 2462 BinOp.Opcode = IsInc ? BO_Add : BO_Sub; 2463 BinOp.FPFeatures = FPFeatures; 2464 BinOp.E = E; 2465 return BinOp; 2466 } 2467 2468 llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior( 2469 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) { 2470 llvm::Value *Amount = 2471 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true); 2472 StringRef Name = IsInc ? "inc" : "dec"; 2473 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 2474 case LangOptions::SOB_Defined: 2475 return Builder.CreateAdd(InVal, Amount, Name); 2476 case LangOptions::SOB_Undefined: 2477 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 2478 return Builder.CreateNSWAdd(InVal, Amount, Name); 2479 LLVM_FALLTHROUGH; 2480 case LangOptions::SOB_Trapping: 2481 if (!E->canOverflow()) 2482 return Builder.CreateNSWAdd(InVal, Amount, Name); 2483 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec( 2484 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts()))); 2485 } 2486 llvm_unreachable("Unknown SignedOverflowBehaviorTy"); 2487 } 2488 2489 namespace { 2490 /// Handles check and update for lastprivate conditional variables. 2491 class OMPLastprivateConditionalUpdateRAII { 2492 private: 2493 CodeGenFunction &CGF; 2494 const UnaryOperator *E; 2495 2496 public: 2497 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF, 2498 const UnaryOperator *E) 2499 : CGF(CGF), E(E) {} 2500 ~OMPLastprivateConditionalUpdateRAII() { 2501 if (CGF.getLangOpts().OpenMP) 2502 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional( 2503 CGF, E->getSubExpr()); 2504 } 2505 }; 2506 } // namespace 2507 2508 llvm::Value * 2509 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 2510 bool isInc, bool isPre) { 2511 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E); 2512 QualType type = E->getSubExpr()->getType(); 2513 llvm::PHINode *atomicPHI = nullptr; 2514 llvm::Value *value; 2515 llvm::Value *input; 2516 2517 int amount = (isInc ? 1 : -1); 2518 bool isSubtraction = !isInc; 2519 2520 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) { 2521 type = atomicTy->getValueType(); 2522 if (isInc && type->isBooleanType()) { 2523 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type); 2524 if (isPre) { 2525 Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified()) 2526 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent); 2527 return Builder.getTrue(); 2528 } 2529 // For atomic bool increment, we just store true and return it for 2530 // preincrement, do an atomic swap with true for postincrement 2531 return Builder.CreateAtomicRMW( 2532 llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True, 2533 llvm::AtomicOrdering::SequentiallyConsistent); 2534 } 2535 // Special case for atomic increment / decrement on integers, emit 2536 // atomicrmw instructions. We skip this if we want to be doing overflow 2537 // checking, and fall into the slow path with the atomic cmpxchg loop. 2538 if (!type->isBooleanType() && type->isIntegerType() && 2539 !(type->isUnsignedIntegerType() && 2540 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) && 2541 CGF.getLangOpts().getSignedOverflowBehavior() != 2542 LangOptions::SOB_Trapping) { 2543 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add : 2544 llvm::AtomicRMWInst::Sub; 2545 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add : 2546 llvm::Instruction::Sub; 2547 llvm::Value *amt = CGF.EmitToMemory( 2548 llvm::ConstantInt::get(ConvertType(type), 1, true), type); 2549 llvm::Value *old = 2550 Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt, 2551 llvm::AtomicOrdering::SequentiallyConsistent); 2552 return isPre ? Builder.CreateBinOp(op, old, amt) : old; 2553 } 2554 value = EmitLoadOfLValue(LV, E->getExprLoc()); 2555 input = value; 2556 // For every other atomic operation, we need to emit a load-op-cmpxchg loop 2557 llvm::BasicBlock *startBB = Builder.GetInsertBlock(); 2558 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); 2559 value = CGF.EmitToMemory(value, type); 2560 Builder.CreateBr(opBB); 2561 Builder.SetInsertPoint(opBB); 2562 atomicPHI = Builder.CreatePHI(value->getType(), 2); 2563 atomicPHI->addIncoming(value, startBB); 2564 value = atomicPHI; 2565 } else { 2566 value = EmitLoadOfLValue(LV, E->getExprLoc()); 2567 input = value; 2568 } 2569 2570 // Special case of integer increment that we have to check first: bool++. 2571 // Due to promotion rules, we get: 2572 // bool++ -> bool = bool + 1 2573 // -> bool = (int)bool + 1 2574 // -> bool = ((int)bool + 1 != 0) 2575 // An interesting aspect of this is that increment is always true. 2576 // Decrement does not have this property. 2577 if (isInc && type->isBooleanType()) { 2578 value = Builder.getTrue(); 2579 2580 // Most common case by far: integer increment. 2581 } else if (type->isIntegerType()) { 2582 QualType promotedType; 2583 bool canPerformLossyDemotionCheck = false; 2584 if (type->isPromotableIntegerType()) { 2585 promotedType = CGF.getContext().getPromotedIntegerType(type); 2586 assert(promotedType != type && "Shouldn't promote to the same type."); 2587 canPerformLossyDemotionCheck = true; 2588 canPerformLossyDemotionCheck &= 2589 CGF.getContext().getCanonicalType(type) != 2590 CGF.getContext().getCanonicalType(promotedType); 2591 canPerformLossyDemotionCheck &= 2592 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( 2593 type, promotedType); 2594 assert((!canPerformLossyDemotionCheck || 2595 type->isSignedIntegerOrEnumerationType() || 2596 promotedType->isSignedIntegerOrEnumerationType() || 2597 ConvertType(type)->getScalarSizeInBits() == 2598 ConvertType(promotedType)->getScalarSizeInBits()) && 2599 "The following check expects that if we do promotion to different " 2600 "underlying canonical type, at least one of the types (either " 2601 "base or promoted) will be signed, or the bitwidths will match."); 2602 } 2603 if (CGF.SanOpts.hasOneOf( 2604 SanitizerKind::ImplicitIntegerArithmeticValueChange) && 2605 canPerformLossyDemotionCheck) { 2606 // While `x += 1` (for `x` with width less than int) is modeled as 2607 // promotion+arithmetics+demotion, and we can catch lossy demotion with 2608 // ease; inc/dec with width less than int can't overflow because of 2609 // promotion rules, so we omit promotion+demotion, which means that we can 2610 // not catch lossy "demotion". Because we still want to catch these cases 2611 // when the sanitizer is enabled, we perform the promotion, then perform 2612 // the increment/decrement in the wider type, and finally 2613 // perform the demotion. This will catch lossy demotions. 2614 2615 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc()); 2616 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true); 2617 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 2618 // Do pass non-default ScalarConversionOpts so that sanitizer check is 2619 // emitted. 2620 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(), 2621 ScalarConversionOpts(CGF.SanOpts)); 2622 2623 // Note that signed integer inc/dec with width less than int can't 2624 // overflow because of promotion rules; we're just eliding a few steps 2625 // here. 2626 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) { 2627 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc); 2628 } else if (E->canOverflow() && type->isUnsignedIntegerType() && 2629 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) { 2630 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec( 2631 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts()))); 2632 } else { 2633 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true); 2634 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 2635 } 2636 2637 // Next most common: pointer increment. 2638 } else if (const PointerType *ptr = type->getAs<PointerType>()) { 2639 QualType type = ptr->getPointeeType(); 2640 2641 // VLA types don't have constant size. 2642 if (const VariableArrayType *vla 2643 = CGF.getContext().getAsVariableArrayType(type)) { 2644 llvm::Value *numElts = CGF.getVLASize(vla).NumElts; 2645 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize"); 2646 llvm::Type *elemTy = value->getType()->getPointerElementType(); 2647 if (CGF.getLangOpts().isSignedOverflowDefined()) 2648 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc"); 2649 else 2650 value = CGF.EmitCheckedInBoundsGEP( 2651 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction, 2652 E->getExprLoc(), "vla.inc"); 2653 2654 // Arithmetic on function pointers (!) is just +-1. 2655 } else if (type->isFunctionType()) { 2656 llvm::Value *amt = Builder.getInt32(amount); 2657 2658 value = CGF.EmitCastToVoidPtr(value); 2659 if (CGF.getLangOpts().isSignedOverflowDefined()) 2660 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr"); 2661 else 2662 value = CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt, 2663 /*SignedIndices=*/false, 2664 isSubtraction, E->getExprLoc(), 2665 "incdec.funcptr"); 2666 value = Builder.CreateBitCast(value, input->getType()); 2667 2668 // For everything else, we can just do a simple increment. 2669 } else { 2670 llvm::Value *amt = Builder.getInt32(amount); 2671 llvm::Type *elemTy = CGF.ConvertTypeForMem(type); 2672 if (CGF.getLangOpts().isSignedOverflowDefined()) 2673 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr"); 2674 else 2675 value = CGF.EmitCheckedInBoundsGEP( 2676 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction, 2677 E->getExprLoc(), "incdec.ptr"); 2678 } 2679 2680 // Vector increment/decrement. 2681 } else if (type->isVectorType()) { 2682 if (type->hasIntegerRepresentation()) { 2683 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount); 2684 2685 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 2686 } else { 2687 value = Builder.CreateFAdd( 2688 value, 2689 llvm::ConstantFP::get(value->getType(), amount), 2690 isInc ? "inc" : "dec"); 2691 } 2692 2693 // Floating point. 2694 } else if (type->isRealFloatingType()) { 2695 // Add the inc/dec to the real part. 2696 llvm::Value *amt; 2697 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); 2698 2699 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 2700 // Another special case: half FP increment should be done via float 2701 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 2702 value = Builder.CreateCall( 2703 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, 2704 CGF.CGM.FloatTy), 2705 input, "incdec.conv"); 2706 } else { 2707 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv"); 2708 } 2709 } 2710 2711 if (value->getType()->isFloatTy()) 2712 amt = llvm::ConstantFP::get(VMContext, 2713 llvm::APFloat(static_cast<float>(amount))); 2714 else if (value->getType()->isDoubleTy()) 2715 amt = llvm::ConstantFP::get(VMContext, 2716 llvm::APFloat(static_cast<double>(amount))); 2717 else { 2718 // Remaining types are Half, LongDouble, __ibm128 or __float128. Convert 2719 // from float. 2720 llvm::APFloat F(static_cast<float>(amount)); 2721 bool ignored; 2722 const llvm::fltSemantics *FS; 2723 // Don't use getFloatTypeSemantics because Half isn't 2724 // necessarily represented using the "half" LLVM type. 2725 if (value->getType()->isFP128Ty()) 2726 FS = &CGF.getTarget().getFloat128Format(); 2727 else if (value->getType()->isHalfTy()) 2728 FS = &CGF.getTarget().getHalfFormat(); 2729 else if (value->getType()->isPPC_FP128Ty()) 2730 FS = &CGF.getTarget().getIbm128Format(); 2731 else 2732 FS = &CGF.getTarget().getLongDoubleFormat(); 2733 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored); 2734 amt = llvm::ConstantFP::get(VMContext, F); 2735 } 2736 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec"); 2737 2738 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 2739 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 2740 value = Builder.CreateCall( 2741 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, 2742 CGF.CGM.FloatTy), 2743 value, "incdec.conv"); 2744 } else { 2745 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv"); 2746 } 2747 } 2748 2749 // Fixed-point types. 2750 } else if (type->isFixedPointType()) { 2751 // Fixed-point types are tricky. In some cases, it isn't possible to 2752 // represent a 1 or a -1 in the type at all. Piggyback off of 2753 // EmitFixedPointBinOp to avoid having to reimplement saturation. 2754 BinOpInfo Info; 2755 Info.E = E; 2756 Info.Ty = E->getType(); 2757 Info.Opcode = isInc ? BO_Add : BO_Sub; 2758 Info.LHS = value; 2759 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false); 2760 // If the type is signed, it's better to represent this as +(-1) or -(-1), 2761 // since -1 is guaranteed to be representable. 2762 if (type->isSignedFixedPointType()) { 2763 Info.Opcode = isInc ? BO_Sub : BO_Add; 2764 Info.RHS = Builder.CreateNeg(Info.RHS); 2765 } 2766 // Now, convert from our invented integer literal to the type of the unary 2767 // op. This will upscale and saturate if necessary. This value can become 2768 // undef in some cases. 2769 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 2770 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty); 2771 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema); 2772 value = EmitFixedPointBinOp(Info); 2773 2774 // Objective-C pointer types. 2775 } else { 2776 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>(); 2777 value = CGF.EmitCastToVoidPtr(value); 2778 2779 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType()); 2780 if (!isInc) size = -size; 2781 llvm::Value *sizeValue = 2782 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity()); 2783 2784 if (CGF.getLangOpts().isSignedOverflowDefined()) 2785 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr"); 2786 else 2787 value = CGF.EmitCheckedInBoundsGEP( 2788 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction, 2789 E->getExprLoc(), "incdec.objptr"); 2790 value = Builder.CreateBitCast(value, input->getType()); 2791 } 2792 2793 if (atomicPHI) { 2794 llvm::BasicBlock *curBlock = Builder.GetInsertBlock(); 2795 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); 2796 auto Pair = CGF.EmitAtomicCompareExchange( 2797 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc()); 2798 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type); 2799 llvm::Value *success = Pair.second; 2800 atomicPHI->addIncoming(old, curBlock); 2801 Builder.CreateCondBr(success, contBB, atomicPHI->getParent()); 2802 Builder.SetInsertPoint(contBB); 2803 return isPre ? value : input; 2804 } 2805 2806 // Store the updated result through the lvalue. 2807 if (LV.isBitField()) 2808 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value); 2809 else 2810 CGF.EmitStoreThroughLValue(RValue::get(value), LV); 2811 2812 // If this is a postinc, return the value read from memory, otherwise use the 2813 // updated value. 2814 return isPre ? value : input; 2815 } 2816 2817 2818 2819 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) { 2820 TestAndClearIgnoreResultAssign(); 2821 Value *Op = Visit(E->getSubExpr()); 2822 2823 // Generate a unary FNeg for FP ops. 2824 if (Op->getType()->isFPOrFPVectorTy()) 2825 return Builder.CreateFNeg(Op, "fneg"); 2826 2827 // Emit unary minus with EmitSub so we handle overflow cases etc. 2828 BinOpInfo BinOp; 2829 BinOp.RHS = Op; 2830 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType()); 2831 BinOp.Ty = E->getType(); 2832 BinOp.Opcode = BO_Sub; 2833 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 2834 BinOp.E = E; 2835 return EmitSub(BinOp); 2836 } 2837 2838 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) { 2839 TestAndClearIgnoreResultAssign(); 2840 Value *Op = Visit(E->getSubExpr()); 2841 return Builder.CreateNot(Op, "neg"); 2842 } 2843 2844 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { 2845 // Perform vector logical not on comparison with zero vector. 2846 if (E->getType()->isVectorType() && 2847 E->getType()->castAs<VectorType>()->getVectorKind() == 2848 VectorType::GenericVector) { 2849 Value *Oper = Visit(E->getSubExpr()); 2850 Value *Zero = llvm::Constant::getNullValue(Oper->getType()); 2851 Value *Result; 2852 if (Oper->getType()->isFPOrFPVectorTy()) { 2853 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 2854 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 2855 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp"); 2856 } else 2857 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp"); 2858 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 2859 } 2860 2861 // Compare operand to zero. 2862 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr()); 2863 2864 // Invert value. 2865 // TODO: Could dynamically modify easy computations here. For example, if 2866 // the operand is an icmp ne, turn into icmp eq. 2867 BoolVal = Builder.CreateNot(BoolVal, "lnot"); 2868 2869 // ZExt result to the expr type. 2870 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext"); 2871 } 2872 2873 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) { 2874 // Try folding the offsetof to a constant. 2875 Expr::EvalResult EVResult; 2876 if (E->EvaluateAsInt(EVResult, CGF.getContext())) { 2877 llvm::APSInt Value = EVResult.Val.getInt(); 2878 return Builder.getInt(Value); 2879 } 2880 2881 // Loop over the components of the offsetof to compute the value. 2882 unsigned n = E->getNumComponents(); 2883 llvm::Type* ResultType = ConvertType(E->getType()); 2884 llvm::Value* Result = llvm::Constant::getNullValue(ResultType); 2885 QualType CurrentType = E->getTypeSourceInfo()->getType(); 2886 for (unsigned i = 0; i != n; ++i) { 2887 OffsetOfNode ON = E->getComponent(i); 2888 llvm::Value *Offset = nullptr; 2889 switch (ON.getKind()) { 2890 case OffsetOfNode::Array: { 2891 // Compute the index 2892 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex()); 2893 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr); 2894 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType(); 2895 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv"); 2896 2897 // Save the element type 2898 CurrentType = 2899 CGF.getContext().getAsArrayType(CurrentType)->getElementType(); 2900 2901 // Compute the element size 2902 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType, 2903 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity()); 2904 2905 // Multiply out to compute the result 2906 Offset = Builder.CreateMul(Idx, ElemSize); 2907 break; 2908 } 2909 2910 case OffsetOfNode::Field: { 2911 FieldDecl *MemberDecl = ON.getField(); 2912 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl(); 2913 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); 2914 2915 // Compute the index of the field in its parent. 2916 unsigned i = 0; 2917 // FIXME: It would be nice if we didn't have to loop here! 2918 for (RecordDecl::field_iterator Field = RD->field_begin(), 2919 FieldEnd = RD->field_end(); 2920 Field != FieldEnd; ++Field, ++i) { 2921 if (*Field == MemberDecl) 2922 break; 2923 } 2924 assert(i < RL.getFieldCount() && "offsetof field in wrong type"); 2925 2926 // Compute the offset to the field 2927 int64_t OffsetInt = RL.getFieldOffset(i) / 2928 CGF.getContext().getCharWidth(); 2929 Offset = llvm::ConstantInt::get(ResultType, OffsetInt); 2930 2931 // Save the element type. 2932 CurrentType = MemberDecl->getType(); 2933 break; 2934 } 2935 2936 case OffsetOfNode::Identifier: 2937 llvm_unreachable("dependent __builtin_offsetof"); 2938 2939 case OffsetOfNode::Base: { 2940 if (ON.getBase()->isVirtual()) { 2941 CGF.ErrorUnsupported(E, "virtual base in offsetof"); 2942 continue; 2943 } 2944 2945 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl(); 2946 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); 2947 2948 // Save the element type. 2949 CurrentType = ON.getBase()->getType(); 2950 2951 // Compute the offset to the base. 2952 const RecordType *BaseRT = CurrentType->getAs<RecordType>(); 2953 CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl()); 2954 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD); 2955 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity()); 2956 break; 2957 } 2958 } 2959 Result = Builder.CreateAdd(Result, Offset); 2960 } 2961 return Result; 2962 } 2963 2964 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of 2965 /// argument of the sizeof expression as an integer. 2966 Value * 2967 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( 2968 const UnaryExprOrTypeTraitExpr *E) { 2969 QualType TypeToSize = E->getTypeOfArgument(); 2970 if (E->getKind() == UETT_SizeOf) { 2971 if (const VariableArrayType *VAT = 2972 CGF.getContext().getAsVariableArrayType(TypeToSize)) { 2973 if (E->isArgumentType()) { 2974 // sizeof(type) - make sure to emit the VLA size. 2975 CGF.EmitVariablyModifiedType(TypeToSize); 2976 } else { 2977 // C99 6.5.3.4p2: If the argument is an expression of type 2978 // VLA, it is evaluated. 2979 CGF.EmitIgnoredExpr(E->getArgumentExpr()); 2980 } 2981 2982 auto VlaSize = CGF.getVLASize(VAT); 2983 llvm::Value *size = VlaSize.NumElts; 2984 2985 // Scale the number of non-VLA elements by the non-VLA element size. 2986 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type); 2987 if (!eltSize.isOne()) 2988 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size); 2989 2990 return size; 2991 } 2992 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) { 2993 auto Alignment = 2994 CGF.getContext() 2995 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 2996 E->getTypeOfArgument()->getPointeeType())) 2997 .getQuantity(); 2998 return llvm::ConstantInt::get(CGF.SizeTy, Alignment); 2999 } 3000 3001 // If this isn't sizeof(vla), the result must be constant; use the constant 3002 // folding logic so we don't have to duplicate it here. 3003 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext())); 3004 } 3005 3006 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) { 3007 Expr *Op = E->getSubExpr(); 3008 if (Op->getType()->isAnyComplexType()) { 3009 // If it's an l-value, load through the appropriate subobject l-value. 3010 // Note that we have to ask E because Op might be an l-value that 3011 // this won't work for, e.g. an Obj-C property. 3012 if (E->isGLValue()) 3013 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), 3014 E->getExprLoc()).getScalarVal(); 3015 3016 // Otherwise, calculate and project. 3017 return CGF.EmitComplexExpr(Op, false, true).first; 3018 } 3019 3020 return Visit(Op); 3021 } 3022 3023 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) { 3024 Expr *Op = E->getSubExpr(); 3025 if (Op->getType()->isAnyComplexType()) { 3026 // If it's an l-value, load through the appropriate subobject l-value. 3027 // Note that we have to ask E because Op might be an l-value that 3028 // this won't work for, e.g. an Obj-C property. 3029 if (Op->isGLValue()) 3030 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), 3031 E->getExprLoc()).getScalarVal(); 3032 3033 // Otherwise, calculate and project. 3034 return CGF.EmitComplexExpr(Op, true, false).second; 3035 } 3036 3037 // __imag on a scalar returns zero. Emit the subexpr to ensure side 3038 // effects are evaluated, but not the actual value. 3039 if (Op->isGLValue()) 3040 CGF.EmitLValue(Op); 3041 else 3042 CGF.EmitScalarExpr(Op, true); 3043 return llvm::Constant::getNullValue(ConvertType(E->getType())); 3044 } 3045 3046 //===----------------------------------------------------------------------===// 3047 // Binary Operators 3048 //===----------------------------------------------------------------------===// 3049 3050 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) { 3051 TestAndClearIgnoreResultAssign(); 3052 BinOpInfo Result; 3053 Result.LHS = Visit(E->getLHS()); 3054 Result.RHS = Visit(E->getRHS()); 3055 Result.Ty = E->getType(); 3056 Result.Opcode = E->getOpcode(); 3057 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 3058 Result.E = E; 3059 return Result; 3060 } 3061 3062 LValue ScalarExprEmitter::EmitCompoundAssignLValue( 3063 const CompoundAssignOperator *E, 3064 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &), 3065 Value *&Result) { 3066 QualType LHSTy = E->getLHS()->getType(); 3067 BinOpInfo OpInfo; 3068 3069 if (E->getComputationResultType()->isAnyComplexType()) 3070 return CGF.EmitScalarCompoundAssignWithComplex(E, Result); 3071 3072 // Emit the RHS first. __block variables need to have the rhs evaluated 3073 // first, plus this should improve codegen a little. 3074 OpInfo.RHS = Visit(E->getRHS()); 3075 OpInfo.Ty = E->getComputationResultType(); 3076 OpInfo.Opcode = E->getOpcode(); 3077 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 3078 OpInfo.E = E; 3079 // Load/convert the LHS. 3080 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 3081 3082 llvm::PHINode *atomicPHI = nullptr; 3083 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) { 3084 QualType type = atomicTy->getValueType(); 3085 if (!type->isBooleanType() && type->isIntegerType() && 3086 !(type->isUnsignedIntegerType() && 3087 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) && 3088 CGF.getLangOpts().getSignedOverflowBehavior() != 3089 LangOptions::SOB_Trapping) { 3090 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP; 3091 llvm::Instruction::BinaryOps Op; 3092 switch (OpInfo.Opcode) { 3093 // We don't have atomicrmw operands for *, %, /, <<, >> 3094 case BO_MulAssign: case BO_DivAssign: 3095 case BO_RemAssign: 3096 case BO_ShlAssign: 3097 case BO_ShrAssign: 3098 break; 3099 case BO_AddAssign: 3100 AtomicOp = llvm::AtomicRMWInst::Add; 3101 Op = llvm::Instruction::Add; 3102 break; 3103 case BO_SubAssign: 3104 AtomicOp = llvm::AtomicRMWInst::Sub; 3105 Op = llvm::Instruction::Sub; 3106 break; 3107 case BO_AndAssign: 3108 AtomicOp = llvm::AtomicRMWInst::And; 3109 Op = llvm::Instruction::And; 3110 break; 3111 case BO_XorAssign: 3112 AtomicOp = llvm::AtomicRMWInst::Xor; 3113 Op = llvm::Instruction::Xor; 3114 break; 3115 case BO_OrAssign: 3116 AtomicOp = llvm::AtomicRMWInst::Or; 3117 Op = llvm::Instruction::Or; 3118 break; 3119 default: 3120 llvm_unreachable("Invalid compound assignment type"); 3121 } 3122 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) { 3123 llvm::Value *Amt = CGF.EmitToMemory( 3124 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy, 3125 E->getExprLoc()), 3126 LHSTy); 3127 Value *OldVal = Builder.CreateAtomicRMW( 3128 AtomicOp, LHSLV.getPointer(CGF), Amt, 3129 llvm::AtomicOrdering::SequentiallyConsistent); 3130 3131 // Since operation is atomic, the result type is guaranteed to be the 3132 // same as the input in LLVM terms. 3133 Result = Builder.CreateBinOp(Op, OldVal, Amt); 3134 return LHSLV; 3135 } 3136 } 3137 // FIXME: For floating point types, we should be saving and restoring the 3138 // floating point environment in the loop. 3139 llvm::BasicBlock *startBB = Builder.GetInsertBlock(); 3140 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); 3141 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); 3142 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type); 3143 Builder.CreateBr(opBB); 3144 Builder.SetInsertPoint(opBB); 3145 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2); 3146 atomicPHI->addIncoming(OpInfo.LHS, startBB); 3147 OpInfo.LHS = atomicPHI; 3148 } 3149 else 3150 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); 3151 3152 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures); 3153 SourceLocation Loc = E->getExprLoc(); 3154 OpInfo.LHS = 3155 EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc); 3156 3157 // Expand the binary operator. 3158 Result = (this->*Func)(OpInfo); 3159 3160 // Convert the result back to the LHS type, 3161 // potentially with Implicit Conversion sanitizer check. 3162 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy, 3163 Loc, ScalarConversionOpts(CGF.SanOpts)); 3164 3165 if (atomicPHI) { 3166 llvm::BasicBlock *curBlock = Builder.GetInsertBlock(); 3167 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); 3168 auto Pair = CGF.EmitAtomicCompareExchange( 3169 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc()); 3170 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy); 3171 llvm::Value *success = Pair.second; 3172 atomicPHI->addIncoming(old, curBlock); 3173 Builder.CreateCondBr(success, contBB, atomicPHI->getParent()); 3174 Builder.SetInsertPoint(contBB); 3175 return LHSLV; 3176 } 3177 3178 // Store the result value into the LHS lvalue. Bit-fields are handled 3179 // specially because the result is altered by the store, i.e., [C99 6.5.16p1] 3180 // 'An assignment expression has the value of the left operand after the 3181 // assignment...'. 3182 if (LHSLV.isBitField()) 3183 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result); 3184 else 3185 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV); 3186 3187 if (CGF.getLangOpts().OpenMP) 3188 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, 3189 E->getLHS()); 3190 return LHSLV; 3191 } 3192 3193 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, 3194 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) { 3195 bool Ignore = TestAndClearIgnoreResultAssign(); 3196 Value *RHS = nullptr; 3197 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS); 3198 3199 // If the result is clearly ignored, return now. 3200 if (Ignore) 3201 return nullptr; 3202 3203 // The result of an assignment in C is the assigned r-value. 3204 if (!CGF.getLangOpts().CPlusPlus) 3205 return RHS; 3206 3207 // If the lvalue is non-volatile, return the computed value of the assignment. 3208 if (!LHS.isVolatileQualified()) 3209 return RHS; 3210 3211 // Otherwise, reload the value. 3212 return EmitLoadOfLValue(LHS, E->getExprLoc()); 3213 } 3214 3215 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck( 3216 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) { 3217 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 3218 3219 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) { 3220 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero), 3221 SanitizerKind::IntegerDivideByZero)); 3222 } 3223 3224 const auto *BO = cast<BinaryOperator>(Ops.E); 3225 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) && 3226 Ops.Ty->hasSignedIntegerRepresentation() && 3227 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) && 3228 Ops.mayHaveIntegerOverflow()) { 3229 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType()); 3230 3231 llvm::Value *IntMin = 3232 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth())); 3233 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty); 3234 3235 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin); 3236 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne); 3237 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or"); 3238 Checks.push_back( 3239 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow)); 3240 } 3241 3242 if (Checks.size() > 0) 3243 EmitBinOpCheck(Checks, Ops); 3244 } 3245 3246 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { 3247 { 3248 CodeGenFunction::SanitizerScope SanScope(&CGF); 3249 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) || 3250 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) && 3251 Ops.Ty->isIntegerType() && 3252 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) { 3253 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3254 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true); 3255 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) && 3256 Ops.Ty->isRealFloatingType() && 3257 Ops.mayHaveFloatDivisionByZero()) { 3258 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3259 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero); 3260 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero), 3261 Ops); 3262 } 3263 } 3264 3265 if (Ops.Ty->isConstantMatrixType()) { 3266 llvm::MatrixBuilder<CGBuilderTy> MB(Builder); 3267 // We need to check the types of the operands of the operator to get the 3268 // correct matrix dimensions. 3269 auto *BO = cast<BinaryOperator>(Ops.E); 3270 (void)BO; 3271 assert( 3272 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) && 3273 "first operand must be a matrix"); 3274 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() && 3275 "second operand must be an arithmetic type"); 3276 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 3277 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS, 3278 Ops.Ty->hasUnsignedIntegerRepresentation()); 3279 } 3280 3281 if (Ops.LHS->getType()->isFPOrFPVectorTy()) { 3282 llvm::Value *Val; 3283 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 3284 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div"); 3285 if ((CGF.getLangOpts().OpenCL && 3286 !CGF.CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) || 3287 (CGF.getLangOpts().HIP && CGF.getLangOpts().CUDAIsDevice && 3288 !CGF.CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) { 3289 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp 3290 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt 3291 // build option allows an application to specify that single precision 3292 // floating-point divide (x/y and 1/x) and sqrt used in the program 3293 // source are correctly rounded. 3294 llvm::Type *ValTy = Val->getType(); 3295 if (ValTy->isFloatTy() || 3296 (isa<llvm::VectorType>(ValTy) && 3297 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy())) 3298 CGF.SetFPAccuracy(Val, 2.5); 3299 } 3300 return Val; 3301 } 3302 else if (Ops.isFixedPointOp()) 3303 return EmitFixedPointBinOp(Ops); 3304 else if (Ops.Ty->hasUnsignedIntegerRepresentation()) 3305 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div"); 3306 else 3307 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div"); 3308 } 3309 3310 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) { 3311 // Rem in C can't be a floating point type: C99 6.5.5p2. 3312 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) || 3313 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) && 3314 Ops.Ty->isIntegerType() && 3315 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) { 3316 CodeGenFunction::SanitizerScope SanScope(&CGF); 3317 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3318 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false); 3319 } 3320 3321 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 3322 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem"); 3323 else 3324 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem"); 3325 } 3326 3327 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { 3328 unsigned IID; 3329 unsigned OpID = 0; 3330 SanitizerHandler OverflowKind; 3331 3332 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType(); 3333 switch (Ops.Opcode) { 3334 case BO_Add: 3335 case BO_AddAssign: 3336 OpID = 1; 3337 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow : 3338 llvm::Intrinsic::uadd_with_overflow; 3339 OverflowKind = SanitizerHandler::AddOverflow; 3340 break; 3341 case BO_Sub: 3342 case BO_SubAssign: 3343 OpID = 2; 3344 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow : 3345 llvm::Intrinsic::usub_with_overflow; 3346 OverflowKind = SanitizerHandler::SubOverflow; 3347 break; 3348 case BO_Mul: 3349 case BO_MulAssign: 3350 OpID = 3; 3351 IID = isSigned ? llvm::Intrinsic::smul_with_overflow : 3352 llvm::Intrinsic::umul_with_overflow; 3353 OverflowKind = SanitizerHandler::MulOverflow; 3354 break; 3355 default: 3356 llvm_unreachable("Unsupported operation for overflow detection"); 3357 } 3358 OpID <<= 1; 3359 if (isSigned) 3360 OpID |= 1; 3361 3362 CodeGenFunction::SanitizerScope SanScope(&CGF); 3363 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty); 3364 3365 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy); 3366 3367 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS}); 3368 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0); 3369 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1); 3370 3371 // Handle overflow with llvm.trap if no custom handler has been specified. 3372 const std::string *handlerName = 3373 &CGF.getLangOpts().OverflowHandler; 3374 if (handlerName->empty()) { 3375 // If the signed-integer-overflow sanitizer is enabled, emit a call to its 3376 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap. 3377 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) { 3378 llvm::Value *NotOverflow = Builder.CreateNot(overflow); 3379 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow 3380 : SanitizerKind::UnsignedIntegerOverflow; 3381 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops); 3382 } else 3383 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind); 3384 return result; 3385 } 3386 3387 // Branch in case of overflow. 3388 llvm::BasicBlock *initialBB = Builder.GetInsertBlock(); 3389 llvm::BasicBlock *continueBB = 3390 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode()); 3391 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn); 3392 3393 Builder.CreateCondBr(overflow, overflowBB, continueBB); 3394 3395 // If an overflow handler is set, then we want to call it and then use its 3396 // result, if it returns. 3397 Builder.SetInsertPoint(overflowBB); 3398 3399 // Get the overflow handler. 3400 llvm::Type *Int8Ty = CGF.Int8Ty; 3401 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty }; 3402 llvm::FunctionType *handlerTy = 3403 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true); 3404 llvm::FunctionCallee handler = 3405 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName); 3406 3407 // Sign extend the args to 64-bit, so that we can use the same handler for 3408 // all types of overflow. 3409 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty); 3410 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty); 3411 3412 // Call the handler with the two arguments, the operation, and the size of 3413 // the result. 3414 llvm::Value *handlerArgs[] = { 3415 lhs, 3416 rhs, 3417 Builder.getInt8(OpID), 3418 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth()) 3419 }; 3420 llvm::Value *handlerResult = 3421 CGF.EmitNounwindRuntimeCall(handler, handlerArgs); 3422 3423 // Truncate the result back to the desired size. 3424 handlerResult = Builder.CreateTrunc(handlerResult, opTy); 3425 Builder.CreateBr(continueBB); 3426 3427 Builder.SetInsertPoint(continueBB); 3428 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2); 3429 phi->addIncoming(result, initialBB); 3430 phi->addIncoming(handlerResult, overflowBB); 3431 3432 return phi; 3433 } 3434 3435 /// Emit pointer + index arithmetic. 3436 static Value *emitPointerArithmetic(CodeGenFunction &CGF, 3437 const BinOpInfo &op, 3438 bool isSubtraction) { 3439 // Must have binary (not unary) expr here. Unary pointer 3440 // increment/decrement doesn't use this path. 3441 const BinaryOperator *expr = cast<BinaryOperator>(op.E); 3442 3443 Value *pointer = op.LHS; 3444 Expr *pointerOperand = expr->getLHS(); 3445 Value *index = op.RHS; 3446 Expr *indexOperand = expr->getRHS(); 3447 3448 // In a subtraction, the LHS is always the pointer. 3449 if (!isSubtraction && !pointer->getType()->isPointerTy()) { 3450 std::swap(pointer, index); 3451 std::swap(pointerOperand, indexOperand); 3452 } 3453 3454 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); 3455 3456 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth(); 3457 auto &DL = CGF.CGM.getDataLayout(); 3458 auto PtrTy = cast<llvm::PointerType>(pointer->getType()); 3459 3460 // Some versions of glibc and gcc use idioms (particularly in their malloc 3461 // routines) that add a pointer-sized integer (known to be a pointer value) 3462 // to a null pointer in order to cast the value back to an integer or as 3463 // part of a pointer alignment algorithm. This is undefined behavior, but 3464 // we'd like to be able to compile programs that use it. 3465 // 3466 // Normally, we'd generate a GEP with a null-pointer base here in response 3467 // to that code, but it's also UB to dereference a pointer created that 3468 // way. Instead (as an acknowledged hack to tolerate the idiom) we will 3469 // generate a direct cast of the integer value to a pointer. 3470 // 3471 // The idiom (p = nullptr + N) is not met if any of the following are true: 3472 // 3473 // The operation is subtraction. 3474 // The index is not pointer-sized. 3475 // The pointer type is not byte-sized. 3476 // 3477 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(), 3478 op.Opcode, 3479 expr->getLHS(), 3480 expr->getRHS())) 3481 return CGF.Builder.CreateIntToPtr(index, pointer->getType()); 3482 3483 if (width != DL.getIndexTypeSizeInBits(PtrTy)) { 3484 // Zero-extend or sign-extend the pointer value according to 3485 // whether the index is signed or not. 3486 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned, 3487 "idx.ext"); 3488 } 3489 3490 // If this is subtraction, negate the index. 3491 if (isSubtraction) 3492 index = CGF.Builder.CreateNeg(index, "idx.neg"); 3493 3494 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) 3495 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(), 3496 /*Accessed*/ false); 3497 3498 const PointerType *pointerType 3499 = pointerOperand->getType()->getAs<PointerType>(); 3500 if (!pointerType) { 3501 QualType objectType = pointerOperand->getType() 3502 ->castAs<ObjCObjectPointerType>() 3503 ->getPointeeType(); 3504 llvm::Value *objectSize 3505 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType)); 3506 3507 index = CGF.Builder.CreateMul(index, objectSize); 3508 3509 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy); 3510 result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr"); 3511 return CGF.Builder.CreateBitCast(result, pointer->getType()); 3512 } 3513 3514 QualType elementType = pointerType->getPointeeType(); 3515 if (const VariableArrayType *vla 3516 = CGF.getContext().getAsVariableArrayType(elementType)) { 3517 // The element count here is the total number of non-VLA elements. 3518 llvm::Value *numElements = CGF.getVLASize(vla).NumElts; 3519 3520 // Effectively, the multiply by the VLA size is part of the GEP. 3521 // GEP indexes are signed, and scaling an index isn't permitted to 3522 // signed-overflow, so we use the same semantics for our explicit 3523 // multiply. We suppress this if overflow is not undefined behavior. 3524 llvm::Type *elemTy = pointer->getType()->getPointerElementType(); 3525 if (CGF.getLangOpts().isSignedOverflowDefined()) { 3526 index = CGF.Builder.CreateMul(index, numElements, "vla.index"); 3527 pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr"); 3528 } else { 3529 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index"); 3530 pointer = CGF.EmitCheckedInBoundsGEP( 3531 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(), 3532 "add.ptr"); 3533 } 3534 return pointer; 3535 } 3536 3537 // Explicitly handle GNU void* and function pointer arithmetic extensions. The 3538 // GNU void* casts amount to no-ops since our void* type is i8*, but this is 3539 // future proof. 3540 if (elementType->isVoidType() || elementType->isFunctionType()) { 3541 Value *result = CGF.EmitCastToVoidPtr(pointer); 3542 result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr"); 3543 return CGF.Builder.CreateBitCast(result, pointer->getType()); 3544 } 3545 3546 llvm::Type *elemTy = CGF.ConvertTypeForMem(elementType); 3547 if (CGF.getLangOpts().isSignedOverflowDefined()) 3548 return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr"); 3549 3550 return CGF.EmitCheckedInBoundsGEP( 3551 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(), 3552 "add.ptr"); 3553 } 3554 3555 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and 3556 // Addend. Use negMul and negAdd to negate the first operand of the Mul or 3557 // the add operand respectively. This allows fmuladd to represent a*b-c, or 3558 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to 3559 // efficient operations. 3560 static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, 3561 const CodeGenFunction &CGF, CGBuilderTy &Builder, 3562 bool negMul, bool negAdd) { 3563 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set."); 3564 3565 Value *MulOp0 = MulOp->getOperand(0); 3566 Value *MulOp1 = MulOp->getOperand(1); 3567 if (negMul) 3568 MulOp0 = Builder.CreateFNeg(MulOp0, "neg"); 3569 if (negAdd) 3570 Addend = Builder.CreateFNeg(Addend, "neg"); 3571 3572 Value *FMulAdd = nullptr; 3573 if (Builder.getIsFPConstrained()) { 3574 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) && 3575 "Only constrained operation should be created when Builder is in FP " 3576 "constrained mode"); 3577 FMulAdd = Builder.CreateConstrainedFPCall( 3578 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd, 3579 Addend->getType()), 3580 {MulOp0, MulOp1, Addend}); 3581 } else { 3582 FMulAdd = Builder.CreateCall( 3583 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()), 3584 {MulOp0, MulOp1, Addend}); 3585 } 3586 MulOp->eraseFromParent(); 3587 3588 return FMulAdd; 3589 } 3590 3591 // Check whether it would be legal to emit an fmuladd intrinsic call to 3592 // represent op and if so, build the fmuladd. 3593 // 3594 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on. 3595 // Does NOT check the type of the operation - it's assumed that this function 3596 // will be called from contexts where it's known that the type is contractable. 3597 static Value* tryEmitFMulAdd(const BinOpInfo &op, 3598 const CodeGenFunction &CGF, CGBuilderTy &Builder, 3599 bool isSub=false) { 3600 3601 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || 3602 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && 3603 "Only fadd/fsub can be the root of an fmuladd."); 3604 3605 // Check whether this op is marked as fusable. 3606 if (!op.FPFeatures.allowFPContractWithinStatement()) 3607 return nullptr; 3608 3609 // We have a potentially fusable op. Look for a mul on one of the operands. 3610 // Also, make sure that the mul result isn't used directly. In that case, 3611 // there's no point creating a muladd operation. 3612 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) { 3613 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul && 3614 LHSBinOp->use_empty()) 3615 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); 3616 } 3617 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) { 3618 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul && 3619 RHSBinOp->use_empty()) 3620 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); 3621 } 3622 3623 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) { 3624 if (LHSBinOp->getIntrinsicID() == 3625 llvm::Intrinsic::experimental_constrained_fmul && 3626 LHSBinOp->use_empty()) 3627 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); 3628 } 3629 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) { 3630 if (RHSBinOp->getIntrinsicID() == 3631 llvm::Intrinsic::experimental_constrained_fmul && 3632 RHSBinOp->use_empty()) 3633 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); 3634 } 3635 3636 return nullptr; 3637 } 3638 3639 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) { 3640 if (op.LHS->getType()->isPointerTy() || 3641 op.RHS->getType()->isPointerTy()) 3642 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction); 3643 3644 if (op.Ty->isSignedIntegerOrEnumerationType()) { 3645 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 3646 case LangOptions::SOB_Defined: 3647 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 3648 case LangOptions::SOB_Undefined: 3649 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 3650 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 3651 LLVM_FALLTHROUGH; 3652 case LangOptions::SOB_Trapping: 3653 if (CanElideOverflowCheck(CGF.getContext(), op)) 3654 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 3655 return EmitOverflowCheckedBinOp(op); 3656 } 3657 } 3658 3659 if (op.Ty->isConstantMatrixType()) { 3660 llvm::MatrixBuilder<CGBuilderTy> MB(Builder); 3661 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3662 return MB.CreateAdd(op.LHS, op.RHS); 3663 } 3664 3665 if (op.Ty->isUnsignedIntegerType() && 3666 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 3667 !CanElideOverflowCheck(CGF.getContext(), op)) 3668 return EmitOverflowCheckedBinOp(op); 3669 3670 if (op.LHS->getType()->isFPOrFPVectorTy()) { 3671 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3672 // Try to form an fmuladd. 3673 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder)) 3674 return FMulAdd; 3675 3676 return Builder.CreateFAdd(op.LHS, op.RHS, "add"); 3677 } 3678 3679 if (op.isFixedPointOp()) 3680 return EmitFixedPointBinOp(op); 3681 3682 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 3683 } 3684 3685 /// The resulting value must be calculated with exact precision, so the operands 3686 /// may not be the same type. 3687 Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) { 3688 using llvm::APSInt; 3689 using llvm::ConstantInt; 3690 3691 // This is either a binary operation where at least one of the operands is 3692 // a fixed-point type, or a unary operation where the operand is a fixed-point 3693 // type. The result type of a binary operation is determined by 3694 // Sema::handleFixedPointConversions(). 3695 QualType ResultTy = op.Ty; 3696 QualType LHSTy, RHSTy; 3697 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) { 3698 RHSTy = BinOp->getRHS()->getType(); 3699 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) { 3700 // For compound assignment, the effective type of the LHS at this point 3701 // is the computation LHS type, not the actual LHS type, and the final 3702 // result type is not the type of the expression but rather the 3703 // computation result type. 3704 LHSTy = CAO->getComputationLHSType(); 3705 ResultTy = CAO->getComputationResultType(); 3706 } else 3707 LHSTy = BinOp->getLHS()->getType(); 3708 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) { 3709 LHSTy = UnOp->getSubExpr()->getType(); 3710 RHSTy = UnOp->getSubExpr()->getType(); 3711 } 3712 ASTContext &Ctx = CGF.getContext(); 3713 Value *LHS = op.LHS; 3714 Value *RHS = op.RHS; 3715 3716 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy); 3717 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy); 3718 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy); 3719 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema); 3720 3721 // Perform the actual operation. 3722 Value *Result; 3723 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 3724 switch (op.Opcode) { 3725 case BO_AddAssign: 3726 case BO_Add: 3727 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema); 3728 break; 3729 case BO_SubAssign: 3730 case BO_Sub: 3731 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema); 3732 break; 3733 case BO_MulAssign: 3734 case BO_Mul: 3735 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema); 3736 break; 3737 case BO_DivAssign: 3738 case BO_Div: 3739 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema); 3740 break; 3741 case BO_ShlAssign: 3742 case BO_Shl: 3743 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS); 3744 break; 3745 case BO_ShrAssign: 3746 case BO_Shr: 3747 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS); 3748 break; 3749 case BO_LT: 3750 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema); 3751 case BO_GT: 3752 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema); 3753 case BO_LE: 3754 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3755 case BO_GE: 3756 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3757 case BO_EQ: 3758 // For equality operations, we assume any padding bits on unsigned types are 3759 // zero'd out. They could be overwritten through non-saturating operations 3760 // that cause overflow, but this leads to undefined behavior. 3761 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema); 3762 case BO_NE: 3763 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3764 case BO_Cmp: 3765 case BO_LAnd: 3766 case BO_LOr: 3767 llvm_unreachable("Found unimplemented fixed point binary operation"); 3768 case BO_PtrMemD: 3769 case BO_PtrMemI: 3770 case BO_Rem: 3771 case BO_Xor: 3772 case BO_And: 3773 case BO_Or: 3774 case BO_Assign: 3775 case BO_RemAssign: 3776 case BO_AndAssign: 3777 case BO_XorAssign: 3778 case BO_OrAssign: 3779 case BO_Comma: 3780 llvm_unreachable("Found unsupported binary operation for fixed point types."); 3781 } 3782 3783 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) || 3784 BinaryOperator::isShiftAssignOp(op.Opcode); 3785 // Convert to the result type. 3786 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema 3787 : CommonFixedSema, 3788 ResultFixedSema); 3789 } 3790 3791 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) { 3792 // The LHS is always a pointer if either side is. 3793 if (!op.LHS->getType()->isPointerTy()) { 3794 if (op.Ty->isSignedIntegerOrEnumerationType()) { 3795 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 3796 case LangOptions::SOB_Defined: 3797 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 3798 case LangOptions::SOB_Undefined: 3799 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 3800 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 3801 LLVM_FALLTHROUGH; 3802 case LangOptions::SOB_Trapping: 3803 if (CanElideOverflowCheck(CGF.getContext(), op)) 3804 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 3805 return EmitOverflowCheckedBinOp(op); 3806 } 3807 } 3808 3809 if (op.Ty->isConstantMatrixType()) { 3810 llvm::MatrixBuilder<CGBuilderTy> MB(Builder); 3811 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3812 return MB.CreateSub(op.LHS, op.RHS); 3813 } 3814 3815 if (op.Ty->isUnsignedIntegerType() && 3816 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 3817 !CanElideOverflowCheck(CGF.getContext(), op)) 3818 return EmitOverflowCheckedBinOp(op); 3819 3820 if (op.LHS->getType()->isFPOrFPVectorTy()) { 3821 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3822 // Try to form an fmuladd. 3823 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true)) 3824 return FMulAdd; 3825 return Builder.CreateFSub(op.LHS, op.RHS, "sub"); 3826 } 3827 3828 if (op.isFixedPointOp()) 3829 return EmitFixedPointBinOp(op); 3830 3831 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 3832 } 3833 3834 // If the RHS is not a pointer, then we have normal pointer 3835 // arithmetic. 3836 if (!op.RHS->getType()->isPointerTy()) 3837 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction); 3838 3839 // Otherwise, this is a pointer subtraction. 3840 3841 // Do the raw subtraction part. 3842 llvm::Value *LHS 3843 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast"); 3844 llvm::Value *RHS 3845 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast"); 3846 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub"); 3847 3848 // Okay, figure out the element size. 3849 const BinaryOperator *expr = cast<BinaryOperator>(op.E); 3850 QualType elementType = expr->getLHS()->getType()->getPointeeType(); 3851 3852 llvm::Value *divisor = nullptr; 3853 3854 // For a variable-length array, this is going to be non-constant. 3855 if (const VariableArrayType *vla 3856 = CGF.getContext().getAsVariableArrayType(elementType)) { 3857 auto VlaSize = CGF.getVLASize(vla); 3858 elementType = VlaSize.Type; 3859 divisor = VlaSize.NumElts; 3860 3861 // Scale the number of non-VLA elements by the non-VLA element size. 3862 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType); 3863 if (!eltSize.isOne()) 3864 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor); 3865 3866 // For everything elese, we can just compute it, safe in the 3867 // assumption that Sema won't let anything through that we can't 3868 // safely compute the size of. 3869 } else { 3870 CharUnits elementSize; 3871 // Handle GCC extension for pointer arithmetic on void* and 3872 // function pointer types. 3873 if (elementType->isVoidType() || elementType->isFunctionType()) 3874 elementSize = CharUnits::One(); 3875 else 3876 elementSize = CGF.getContext().getTypeSizeInChars(elementType); 3877 3878 // Don't even emit the divide for element size of 1. 3879 if (elementSize.isOne()) 3880 return diffInChars; 3881 3882 divisor = CGF.CGM.getSize(elementSize); 3883 } 3884 3885 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since 3886 // pointer difference in C is only defined in the case where both operands 3887 // are pointing to elements of an array. 3888 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div"); 3889 } 3890 3891 Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) { 3892 llvm::IntegerType *Ty; 3893 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 3894 Ty = cast<llvm::IntegerType>(VT->getElementType()); 3895 else 3896 Ty = cast<llvm::IntegerType>(LHS->getType()); 3897 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1); 3898 } 3899 3900 Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS, 3901 const Twine &Name) { 3902 llvm::IntegerType *Ty; 3903 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 3904 Ty = cast<llvm::IntegerType>(VT->getElementType()); 3905 else 3906 Ty = cast<llvm::IntegerType>(LHS->getType()); 3907 3908 if (llvm::isPowerOf2_64(Ty->getBitWidth())) 3909 return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name); 3910 3911 return Builder.CreateURem( 3912 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name); 3913 } 3914 3915 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { 3916 // TODO: This misses out on the sanitizer check below. 3917 if (Ops.isFixedPointOp()) 3918 return EmitFixedPointBinOp(Ops); 3919 3920 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 3921 // RHS to the same size as the LHS. 3922 Value *RHS = Ops.RHS; 3923 if (Ops.LHS->getType() != RHS->getType()) 3924 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 3925 3926 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) && 3927 Ops.Ty->hasSignedIntegerRepresentation() && 3928 !CGF.getLangOpts().isSignedOverflowDefined() && 3929 !CGF.getLangOpts().CPlusPlus20; 3930 bool SanitizeUnsignedBase = 3931 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) && 3932 Ops.Ty->hasUnsignedIntegerRepresentation(); 3933 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase; 3934 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent); 3935 // OpenCL 6.3j: shift values are effectively % word size of LHS. 3936 if (CGF.getLangOpts().OpenCL) 3937 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask"); 3938 else if ((SanitizeBase || SanitizeExponent) && 3939 isa<llvm::IntegerType>(Ops.LHS->getType())) { 3940 CodeGenFunction::SanitizerScope SanScope(&CGF); 3941 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks; 3942 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS); 3943 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne); 3944 3945 if (SanitizeExponent) { 3946 Checks.push_back( 3947 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent)); 3948 } 3949 3950 if (SanitizeBase) { 3951 // Check whether we are shifting any non-zero bits off the top of the 3952 // integer. We only emit this check if exponent is valid - otherwise 3953 // instructions below will have undefined behavior themselves. 3954 llvm::BasicBlock *Orig = Builder.GetInsertBlock(); 3955 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 3956 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check"); 3957 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont); 3958 llvm::Value *PromotedWidthMinusOne = 3959 (RHS == Ops.RHS) ? WidthMinusOne 3960 : GetWidthMinusOneValue(Ops.LHS, RHS); 3961 CGF.EmitBlock(CheckShiftBase); 3962 llvm::Value *BitsShiftedOff = Builder.CreateLShr( 3963 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros", 3964 /*NUW*/ true, /*NSW*/ true), 3965 "shl.check"); 3966 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) { 3967 // In C99, we are not permitted to shift a 1 bit into the sign bit. 3968 // Under C++11's rules, shifting a 1 bit into the sign bit is 3969 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't 3970 // define signed left shifts, so we use the C99 and C++11 rules there). 3971 // Unsigned shifts can always shift into the top bit. 3972 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1); 3973 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One); 3974 } 3975 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0); 3976 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero); 3977 CGF.EmitBlock(Cont); 3978 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2); 3979 BaseCheck->addIncoming(Builder.getTrue(), Orig); 3980 BaseCheck->addIncoming(ValidBase, CheckShiftBase); 3981 Checks.push_back(std::make_pair( 3982 BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase 3983 : SanitizerKind::UnsignedShiftBase)); 3984 } 3985 3986 assert(!Checks.empty()); 3987 EmitBinOpCheck(Checks, Ops); 3988 } 3989 3990 return Builder.CreateShl(Ops.LHS, RHS, "shl"); 3991 } 3992 3993 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { 3994 // TODO: This misses out on the sanitizer check below. 3995 if (Ops.isFixedPointOp()) 3996 return EmitFixedPointBinOp(Ops); 3997 3998 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 3999 // RHS to the same size as the LHS. 4000 Value *RHS = Ops.RHS; 4001 if (Ops.LHS->getType() != RHS->getType()) 4002 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 4003 4004 // OpenCL 6.3j: shift values are effectively % word size of LHS. 4005 if (CGF.getLangOpts().OpenCL) 4006 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask"); 4007 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) && 4008 isa<llvm::IntegerType>(Ops.LHS->getType())) { 4009 CodeGenFunction::SanitizerScope SanScope(&CGF); 4010 llvm::Value *Valid = 4011 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS)); 4012 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops); 4013 } 4014 4015 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 4016 return Builder.CreateLShr(Ops.LHS, RHS, "shr"); 4017 return Builder.CreateAShr(Ops.LHS, RHS, "shr"); 4018 } 4019 4020 enum IntrinsicType { VCMPEQ, VCMPGT }; 4021 // return corresponding comparison intrinsic for given vector type 4022 static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, 4023 BuiltinType::Kind ElemKind) { 4024 switch (ElemKind) { 4025 default: llvm_unreachable("unexpected element type"); 4026 case BuiltinType::Char_U: 4027 case BuiltinType::UChar: 4028 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 4029 llvm::Intrinsic::ppc_altivec_vcmpgtub_p; 4030 case BuiltinType::Char_S: 4031 case BuiltinType::SChar: 4032 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 4033 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p; 4034 case BuiltinType::UShort: 4035 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 4036 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p; 4037 case BuiltinType::Short: 4038 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 4039 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p; 4040 case BuiltinType::UInt: 4041 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 4042 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p; 4043 case BuiltinType::Int: 4044 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 4045 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p; 4046 case BuiltinType::ULong: 4047 case BuiltinType::ULongLong: 4048 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 4049 llvm::Intrinsic::ppc_altivec_vcmpgtud_p; 4050 case BuiltinType::Long: 4051 case BuiltinType::LongLong: 4052 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 4053 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p; 4054 case BuiltinType::Float: 4055 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p : 4056 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p; 4057 case BuiltinType::Double: 4058 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p : 4059 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p; 4060 case BuiltinType::UInt128: 4061 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p 4062 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p; 4063 case BuiltinType::Int128: 4064 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p 4065 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p; 4066 } 4067 } 4068 4069 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E, 4070 llvm::CmpInst::Predicate UICmpOpc, 4071 llvm::CmpInst::Predicate SICmpOpc, 4072 llvm::CmpInst::Predicate FCmpOpc, 4073 bool IsSignaling) { 4074 TestAndClearIgnoreResultAssign(); 4075 Value *Result; 4076 QualType LHSTy = E->getLHS()->getType(); 4077 QualType RHSTy = E->getRHS()->getType(); 4078 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) { 4079 assert(E->getOpcode() == BO_EQ || 4080 E->getOpcode() == BO_NE); 4081 Value *LHS = CGF.EmitScalarExpr(E->getLHS()); 4082 Value *RHS = CGF.EmitScalarExpr(E->getRHS()); 4083 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison( 4084 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE); 4085 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { 4086 BinOpInfo BOInfo = EmitBinOps(E); 4087 Value *LHS = BOInfo.LHS; 4088 Value *RHS = BOInfo.RHS; 4089 4090 // If AltiVec, the comparison results in a numeric type, so we use 4091 // intrinsics comparing vectors and giving 0 or 1 as a result 4092 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) { 4093 // constants for mapping CR6 register bits to predicate result 4094 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6; 4095 4096 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic; 4097 4098 // in several cases vector arguments order will be reversed 4099 Value *FirstVecArg = LHS, 4100 *SecondVecArg = RHS; 4101 4102 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType(); 4103 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind(); 4104 4105 switch(E->getOpcode()) { 4106 default: llvm_unreachable("is not a comparison operation"); 4107 case BO_EQ: 4108 CR6 = CR6_LT; 4109 ID = GetIntrinsic(VCMPEQ, ElementKind); 4110 break; 4111 case BO_NE: 4112 CR6 = CR6_EQ; 4113 ID = GetIntrinsic(VCMPEQ, ElementKind); 4114 break; 4115 case BO_LT: 4116 CR6 = CR6_LT; 4117 ID = GetIntrinsic(VCMPGT, ElementKind); 4118 std::swap(FirstVecArg, SecondVecArg); 4119 break; 4120 case BO_GT: 4121 CR6 = CR6_LT; 4122 ID = GetIntrinsic(VCMPGT, ElementKind); 4123 break; 4124 case BO_LE: 4125 if (ElementKind == BuiltinType::Float) { 4126 CR6 = CR6_LT; 4127 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4128 std::swap(FirstVecArg, SecondVecArg); 4129 } 4130 else { 4131 CR6 = CR6_EQ; 4132 ID = GetIntrinsic(VCMPGT, ElementKind); 4133 } 4134 break; 4135 case BO_GE: 4136 if (ElementKind == BuiltinType::Float) { 4137 CR6 = CR6_LT; 4138 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4139 } 4140 else { 4141 CR6 = CR6_EQ; 4142 ID = GetIntrinsic(VCMPGT, ElementKind); 4143 std::swap(FirstVecArg, SecondVecArg); 4144 } 4145 break; 4146 } 4147 4148 Value *CR6Param = Builder.getInt32(CR6); 4149 llvm::Function *F = CGF.CGM.getIntrinsic(ID); 4150 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg}); 4151 4152 // The result type of intrinsic may not be same as E->getType(). 4153 // If E->getType() is not BoolTy, EmitScalarConversion will do the 4154 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will 4155 // do nothing, if ResultTy is not i1 at the same time, it will cause 4156 // crash later. 4157 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType()); 4158 if (ResultTy->getBitWidth() > 1 && 4159 E->getType() == CGF.getContext().BoolTy) 4160 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty()); 4161 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4162 E->getExprLoc()); 4163 } 4164 4165 if (BOInfo.isFixedPointOp()) { 4166 Result = EmitFixedPointBinOp(BOInfo); 4167 } else if (LHS->getType()->isFPOrFPVectorTy()) { 4168 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures); 4169 if (!IsSignaling) 4170 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp"); 4171 else 4172 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp"); 4173 } else if (LHSTy->hasSignedIntegerRepresentation()) { 4174 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp"); 4175 } else { 4176 // Unsigned integers and pointers. 4177 4178 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers && 4179 !isa<llvm::ConstantPointerNull>(LHS) && 4180 !isa<llvm::ConstantPointerNull>(RHS)) { 4181 4182 // Dynamic information is required to be stripped for comparisons, 4183 // because it could leak the dynamic information. Based on comparisons 4184 // of pointers to dynamic objects, the optimizer can replace one pointer 4185 // with another, which might be incorrect in presence of invariant 4186 // groups. Comparison with null is safe because null does not carry any 4187 // dynamic information. 4188 if (LHSTy.mayBeDynamicClass()) 4189 LHS = Builder.CreateStripInvariantGroup(LHS); 4190 if (RHSTy.mayBeDynamicClass()) 4191 RHS = Builder.CreateStripInvariantGroup(RHS); 4192 } 4193 4194 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp"); 4195 } 4196 4197 // If this is a vector comparison, sign extend the result to the appropriate 4198 // vector integer type and return it (don't convert to bool). 4199 if (LHSTy->isVectorType()) 4200 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 4201 4202 } else { 4203 // Complex Comparison: can only be an equality comparison. 4204 CodeGenFunction::ComplexPairTy LHS, RHS; 4205 QualType CETy; 4206 if (auto *CTy = LHSTy->getAs<ComplexType>()) { 4207 LHS = CGF.EmitComplexExpr(E->getLHS()); 4208 CETy = CTy->getElementType(); 4209 } else { 4210 LHS.first = Visit(E->getLHS()); 4211 LHS.second = llvm::Constant::getNullValue(LHS.first->getType()); 4212 CETy = LHSTy; 4213 } 4214 if (auto *CTy = RHSTy->getAs<ComplexType>()) { 4215 RHS = CGF.EmitComplexExpr(E->getRHS()); 4216 assert(CGF.getContext().hasSameUnqualifiedType(CETy, 4217 CTy->getElementType()) && 4218 "The element types must always match."); 4219 (void)CTy; 4220 } else { 4221 RHS.first = Visit(E->getRHS()); 4222 RHS.second = llvm::Constant::getNullValue(RHS.first->getType()); 4223 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && 4224 "The element types must always match."); 4225 } 4226 4227 Value *ResultR, *ResultI; 4228 if (CETy->isRealFloatingType()) { 4229 // As complex comparisons can only be equality comparisons, they 4230 // are never signaling comparisons. 4231 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r"); 4232 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i"); 4233 } else { 4234 // Complex comparisons can only be equality comparisons. As such, signed 4235 // and unsigned opcodes are the same. 4236 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r"); 4237 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i"); 4238 } 4239 4240 if (E->getOpcode() == BO_EQ) { 4241 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri"); 4242 } else { 4243 assert(E->getOpcode() == BO_NE && 4244 "Complex comparison other than == or != ?"); 4245 Result = Builder.CreateOr(ResultR, ResultI, "or.ri"); 4246 } 4247 } 4248 4249 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4250 E->getExprLoc()); 4251 } 4252 4253 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { 4254 bool Ignore = TestAndClearIgnoreResultAssign(); 4255 4256 Value *RHS; 4257 LValue LHS; 4258 4259 switch (E->getLHS()->getType().getObjCLifetime()) { 4260 case Qualifiers::OCL_Strong: 4261 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore); 4262 break; 4263 4264 case Qualifiers::OCL_Autoreleasing: 4265 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E); 4266 break; 4267 4268 case Qualifiers::OCL_ExplicitNone: 4269 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore); 4270 break; 4271 4272 case Qualifiers::OCL_Weak: 4273 RHS = Visit(E->getRHS()); 4274 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4275 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore); 4276 break; 4277 4278 case Qualifiers::OCL_None: 4279 // __block variables need to have the rhs evaluated first, plus 4280 // this should improve codegen just a little. 4281 RHS = Visit(E->getRHS()); 4282 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4283 4284 // Store the value into the LHS. Bit-fields are handled specially 4285 // because the result is altered by the store, i.e., [C99 6.5.16p1] 4286 // 'An assignment expression has the value of the left operand after 4287 // the assignment...'. 4288 if (LHS.isBitField()) { 4289 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS); 4290 } else { 4291 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc()); 4292 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS); 4293 } 4294 } 4295 4296 // If the result is clearly ignored, return now. 4297 if (Ignore) 4298 return nullptr; 4299 4300 // The result of an assignment in C is the assigned r-value. 4301 if (!CGF.getLangOpts().CPlusPlus) 4302 return RHS; 4303 4304 // If the lvalue is non-volatile, return the computed value of the assignment. 4305 if (!LHS.isVolatileQualified()) 4306 return RHS; 4307 4308 // Otherwise, reload the value. 4309 return EmitLoadOfLValue(LHS, E->getExprLoc()); 4310 } 4311 4312 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { 4313 // Perform vector logical and on comparisons with zero vectors. 4314 if (E->getType()->isVectorType()) { 4315 CGF.incrementProfileCounter(E); 4316 4317 Value *LHS = Visit(E->getLHS()); 4318 Value *RHS = Visit(E->getRHS()); 4319 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 4320 if (LHS->getType()->isFPOrFPVectorTy()) { 4321 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 4322 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 4323 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 4324 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 4325 } else { 4326 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 4327 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 4328 } 4329 Value *And = Builder.CreateAnd(LHS, RHS); 4330 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext"); 4331 } 4332 4333 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); 4334 llvm::Type *ResTy = ConvertType(E->getType()); 4335 4336 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. 4337 // If we have 1 && X, just emit X without inserting the control flow. 4338 bool LHSCondVal; 4339 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 4340 if (LHSCondVal) { // If we have 1 && X, just emit X. 4341 CGF.incrementProfileCounter(E); 4342 4343 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4344 4345 // If we're generating for profiling or coverage, generate a branch to a 4346 // block that increments the RHS counter needed to track branch condition 4347 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and 4348 // "FalseBlock" after the increment is done. 4349 if (InstrumentRegions && 4350 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4351 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end"); 4352 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt"); 4353 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock); 4354 CGF.EmitBlock(RHSBlockCnt); 4355 CGF.incrementProfileCounter(E->getRHS()); 4356 CGF.EmitBranch(FBlock); 4357 CGF.EmitBlock(FBlock); 4358 } 4359 4360 // ZExt result to int or bool. 4361 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext"); 4362 } 4363 4364 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false. 4365 if (!CGF.ContainsLabel(E->getRHS())) 4366 return llvm::Constant::getNullValue(ResTy); 4367 } 4368 4369 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end"); 4370 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs"); 4371 4372 CodeGenFunction::ConditionalEvaluation eval(CGF); 4373 4374 // Branch on the LHS first. If it is false, go to the failure (cont) block. 4375 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock, 4376 CGF.getProfileCount(E->getRHS())); 4377 4378 // Any edges into the ContBlock are now from an (indeterminate number of) 4379 // edges from this first condition. All of these values will be false. Start 4380 // setting up the PHI node in the Cont Block for this. 4381 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 4382 "", ContBlock); 4383 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 4384 PI != PE; ++PI) 4385 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI); 4386 4387 eval.begin(CGF); 4388 CGF.EmitBlock(RHSBlock); 4389 CGF.incrementProfileCounter(E); 4390 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4391 eval.end(CGF); 4392 4393 // Reaquire the RHS block, as there may be subblocks inserted. 4394 RHSBlock = Builder.GetInsertBlock(); 4395 4396 // If we're generating for profiling or coverage, generate a branch on the 4397 // RHS to a block that increments the RHS true counter needed to track branch 4398 // condition coverage. 4399 if (InstrumentRegions && 4400 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4401 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt"); 4402 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock); 4403 CGF.EmitBlock(RHSBlockCnt); 4404 CGF.incrementProfileCounter(E->getRHS()); 4405 CGF.EmitBranch(ContBlock); 4406 PN->addIncoming(RHSCond, RHSBlockCnt); 4407 } 4408 4409 // Emit an unconditional branch from this block to ContBlock. 4410 { 4411 // There is no need to emit line number for unconditional branch. 4412 auto NL = ApplyDebugLocation::CreateEmpty(CGF); 4413 CGF.EmitBlock(ContBlock); 4414 } 4415 // Insert an entry into the phi node for the edge with the value of RHSCond. 4416 PN->addIncoming(RHSCond, RHSBlock); 4417 4418 // Artificial location to preserve the scope information 4419 { 4420 auto NL = ApplyDebugLocation::CreateArtificial(CGF); 4421 PN->setDebugLoc(Builder.getCurrentDebugLocation()); 4422 } 4423 4424 // ZExt result to int. 4425 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext"); 4426 } 4427 4428 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { 4429 // Perform vector logical or on comparisons with zero vectors. 4430 if (E->getType()->isVectorType()) { 4431 CGF.incrementProfileCounter(E); 4432 4433 Value *LHS = Visit(E->getLHS()); 4434 Value *RHS = Visit(E->getRHS()); 4435 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 4436 if (LHS->getType()->isFPOrFPVectorTy()) { 4437 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 4438 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 4439 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 4440 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 4441 } else { 4442 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 4443 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 4444 } 4445 Value *Or = Builder.CreateOr(LHS, RHS); 4446 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext"); 4447 } 4448 4449 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); 4450 llvm::Type *ResTy = ConvertType(E->getType()); 4451 4452 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. 4453 // If we have 0 || X, just emit X without inserting the control flow. 4454 bool LHSCondVal; 4455 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 4456 if (!LHSCondVal) { // If we have 0 || X, just emit X. 4457 CGF.incrementProfileCounter(E); 4458 4459 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4460 4461 // If we're generating for profiling or coverage, generate a branch to a 4462 // block that increments the RHS counter need to track branch condition 4463 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and 4464 // "FalseBlock" after the increment is done. 4465 if (InstrumentRegions && 4466 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4467 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end"); 4468 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt"); 4469 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt); 4470 CGF.EmitBlock(RHSBlockCnt); 4471 CGF.incrementProfileCounter(E->getRHS()); 4472 CGF.EmitBranch(FBlock); 4473 CGF.EmitBlock(FBlock); 4474 } 4475 4476 // ZExt result to int or bool. 4477 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext"); 4478 } 4479 4480 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. 4481 if (!CGF.ContainsLabel(E->getRHS())) 4482 return llvm::ConstantInt::get(ResTy, 1); 4483 } 4484 4485 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end"); 4486 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs"); 4487 4488 CodeGenFunction::ConditionalEvaluation eval(CGF); 4489 4490 // Branch on the LHS first. If it is true, go to the success (cont) block. 4491 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock, 4492 CGF.getCurrentProfileCount() - 4493 CGF.getProfileCount(E->getRHS())); 4494 4495 // Any edges into the ContBlock are now from an (indeterminate number of) 4496 // edges from this first condition. All of these values will be true. Start 4497 // setting up the PHI node in the Cont Block for this. 4498 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 4499 "", ContBlock); 4500 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 4501 PI != PE; ++PI) 4502 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI); 4503 4504 eval.begin(CGF); 4505 4506 // Emit the RHS condition as a bool value. 4507 CGF.EmitBlock(RHSBlock); 4508 CGF.incrementProfileCounter(E); 4509 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4510 4511 eval.end(CGF); 4512 4513 // Reaquire the RHS block, as there may be subblocks inserted. 4514 RHSBlock = Builder.GetInsertBlock(); 4515 4516 // If we're generating for profiling or coverage, generate a branch on the 4517 // RHS to a block that increments the RHS true counter needed to track branch 4518 // condition coverage. 4519 if (InstrumentRegions && 4520 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4521 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt"); 4522 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt); 4523 CGF.EmitBlock(RHSBlockCnt); 4524 CGF.incrementProfileCounter(E->getRHS()); 4525 CGF.EmitBranch(ContBlock); 4526 PN->addIncoming(RHSCond, RHSBlockCnt); 4527 } 4528 4529 // Emit an unconditional branch from this block to ContBlock. Insert an entry 4530 // into the phi node for the edge with the value of RHSCond. 4531 CGF.EmitBlock(ContBlock); 4532 PN->addIncoming(RHSCond, RHSBlock); 4533 4534 // ZExt result to int. 4535 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext"); 4536 } 4537 4538 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { 4539 CGF.EmitIgnoredExpr(E->getLHS()); 4540 CGF.EnsureInsertPoint(); 4541 return Visit(E->getRHS()); 4542 } 4543 4544 //===----------------------------------------------------------------------===// 4545 // Other Operators 4546 //===----------------------------------------------------------------------===// 4547 4548 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified 4549 /// expression is cheap enough and side-effect-free enough to evaluate 4550 /// unconditionally instead of conditionally. This is used to convert control 4551 /// flow into selects in some cases. 4552 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, 4553 CodeGenFunction &CGF) { 4554 // Anything that is an integer or floating point constant is fine. 4555 return E->IgnoreParens()->isEvaluatable(CGF.getContext()); 4556 4557 // Even non-volatile automatic variables can't be evaluated unconditionally. 4558 // Referencing a thread_local may cause non-trivial initialization work to 4559 // occur. If we're inside a lambda and one of the variables is from the scope 4560 // outside the lambda, that function may have returned already. Reading its 4561 // locals is a bad idea. Also, these reads may introduce races there didn't 4562 // exist in the source-level program. 4563 } 4564 4565 4566 Value *ScalarExprEmitter:: 4567 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { 4568 TestAndClearIgnoreResultAssign(); 4569 4570 // Bind the common expression if necessary. 4571 CodeGenFunction::OpaqueValueMapping binding(CGF, E); 4572 4573 Expr *condExpr = E->getCond(); 4574 Expr *lhsExpr = E->getTrueExpr(); 4575 Expr *rhsExpr = E->getFalseExpr(); 4576 4577 // If the condition constant folds and can be elided, try to avoid emitting 4578 // the condition and the dead arm. 4579 bool CondExprBool; 4580 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 4581 Expr *live = lhsExpr, *dead = rhsExpr; 4582 if (!CondExprBool) std::swap(live, dead); 4583 4584 // If the dead side doesn't have labels we need, just emit the Live part. 4585 if (!CGF.ContainsLabel(dead)) { 4586 if (CondExprBool) 4587 CGF.incrementProfileCounter(E); 4588 Value *Result = Visit(live); 4589 4590 // If the live part is a throw expression, it acts like it has a void 4591 // type, so evaluating it returns a null Value*. However, a conditional 4592 // with non-void type must return a non-null Value*. 4593 if (!Result && !E->getType()->isVoidType()) 4594 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType())); 4595 4596 return Result; 4597 } 4598 } 4599 4600 // OpenCL: If the condition is a vector, we can treat this condition like 4601 // the select function. 4602 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) || 4603 condExpr->getType()->isExtVectorType()) { 4604 CGF.incrementProfileCounter(E); 4605 4606 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 4607 llvm::Value *LHS = Visit(lhsExpr); 4608 llvm::Value *RHS = Visit(rhsExpr); 4609 4610 llvm::Type *condType = ConvertType(condExpr->getType()); 4611 auto *vecTy = cast<llvm::FixedVectorType>(condType); 4612 4613 unsigned numElem = vecTy->getNumElements(); 4614 llvm::Type *elemType = vecTy->getElementType(); 4615 4616 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy); 4617 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec); 4618 llvm::Value *tmp = Builder.CreateSExt( 4619 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext"); 4620 llvm::Value *tmp2 = Builder.CreateNot(tmp); 4621 4622 // Cast float to int to perform ANDs if necessary. 4623 llvm::Value *RHSTmp = RHS; 4624 llvm::Value *LHSTmp = LHS; 4625 bool wasCast = false; 4626 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType()); 4627 if (rhsVTy->getElementType()->isFloatingPointTy()) { 4628 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType()); 4629 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType()); 4630 wasCast = true; 4631 } 4632 4633 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2); 4634 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp); 4635 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond"); 4636 if (wasCast) 4637 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType()); 4638 4639 return tmp5; 4640 } 4641 4642 if (condExpr->getType()->isVectorType()) { 4643 CGF.incrementProfileCounter(E); 4644 4645 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 4646 llvm::Value *LHS = Visit(lhsExpr); 4647 llvm::Value *RHS = Visit(rhsExpr); 4648 4649 llvm::Type *CondType = ConvertType(condExpr->getType()); 4650 auto *VecTy = cast<llvm::VectorType>(CondType); 4651 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy); 4652 4653 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond"); 4654 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select"); 4655 } 4656 4657 // If this is a really simple expression (like x ? 4 : 5), emit this as a 4658 // select instead of as control flow. We can only do this if it is cheap and 4659 // safe to evaluate the LHS and RHS unconditionally. 4660 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) && 4661 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { 4662 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr); 4663 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty); 4664 4665 CGF.incrementProfileCounter(E, StepV); 4666 4667 llvm::Value *LHS = Visit(lhsExpr); 4668 llvm::Value *RHS = Visit(rhsExpr); 4669 if (!LHS) { 4670 // If the conditional has void type, make sure we return a null Value*. 4671 assert(!RHS && "LHS and RHS types must match"); 4672 return nullptr; 4673 } 4674 return Builder.CreateSelect(CondV, LHS, RHS, "cond"); 4675 } 4676 4677 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 4678 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 4679 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 4680 4681 CodeGenFunction::ConditionalEvaluation eval(CGF); 4682 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock, 4683 CGF.getProfileCount(lhsExpr)); 4684 4685 CGF.EmitBlock(LHSBlock); 4686 CGF.incrementProfileCounter(E); 4687 eval.begin(CGF); 4688 Value *LHS = Visit(lhsExpr); 4689 eval.end(CGF); 4690 4691 LHSBlock = Builder.GetInsertBlock(); 4692 Builder.CreateBr(ContBlock); 4693 4694 CGF.EmitBlock(RHSBlock); 4695 eval.begin(CGF); 4696 Value *RHS = Visit(rhsExpr); 4697 eval.end(CGF); 4698 4699 RHSBlock = Builder.GetInsertBlock(); 4700 CGF.EmitBlock(ContBlock); 4701 4702 // If the LHS or RHS is a throw expression, it will be legitimately null. 4703 if (!LHS) 4704 return RHS; 4705 if (!RHS) 4706 return LHS; 4707 4708 // Create a PHI node for the real part. 4709 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond"); 4710 PN->addIncoming(LHS, LHSBlock); 4711 PN->addIncoming(RHS, RHSBlock); 4712 return PN; 4713 } 4714 4715 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) { 4716 return Visit(E->getChosenSubExpr()); 4717 } 4718 4719 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 4720 QualType Ty = VE->getType(); 4721 4722 if (Ty->isVariablyModifiedType()) 4723 CGF.EmitVariablyModifiedType(Ty); 4724 4725 Address ArgValue = Address::invalid(); 4726 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue); 4727 4728 llvm::Type *ArgTy = ConvertType(VE->getType()); 4729 4730 // If EmitVAArg fails, emit an error. 4731 if (!ArgPtr.isValid()) { 4732 CGF.ErrorUnsupported(VE, "va_arg expression"); 4733 return llvm::UndefValue::get(ArgTy); 4734 } 4735 4736 // FIXME Volatility. 4737 llvm::Value *Val = Builder.CreateLoad(ArgPtr); 4738 4739 // If EmitVAArg promoted the type, we must truncate it. 4740 if (ArgTy != Val->getType()) { 4741 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy()) 4742 Val = Builder.CreateIntToPtr(Val, ArgTy); 4743 else 4744 Val = Builder.CreateTrunc(Val, ArgTy); 4745 } 4746 4747 return Val; 4748 } 4749 4750 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) { 4751 return CGF.EmitBlockLiteral(block); 4752 } 4753 4754 // Convert a vec3 to vec4, or vice versa. 4755 static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, 4756 Value *Src, unsigned NumElementsDst) { 4757 static constexpr int Mask[] = {0, 1, 2, -1}; 4758 return Builder.CreateShuffleVector(Src, 4759 llvm::makeArrayRef(Mask, NumElementsDst)); 4760 } 4761 4762 // Create cast instructions for converting LLVM value \p Src to LLVM type \p 4763 // DstTy. \p Src has the same size as \p DstTy. Both are single value types 4764 // but could be scalar or vectors of different lengths, and either can be 4765 // pointer. 4766 // There are 4 cases: 4767 // 1. non-pointer -> non-pointer : needs 1 bitcast 4768 // 2. pointer -> pointer : needs 1 bitcast or addrspacecast 4769 // 3. pointer -> non-pointer 4770 // a) pointer -> intptr_t : needs 1 ptrtoint 4771 // b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast 4772 // 4. non-pointer -> pointer 4773 // a) intptr_t -> pointer : needs 1 inttoptr 4774 // b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr 4775 // Note: for cases 3b and 4b two casts are required since LLVM casts do not 4776 // allow casting directly between pointer types and non-integer non-pointer 4777 // types. 4778 static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder, 4779 const llvm::DataLayout &DL, 4780 Value *Src, llvm::Type *DstTy, 4781 StringRef Name = "") { 4782 auto SrcTy = Src->getType(); 4783 4784 // Case 1. 4785 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy()) 4786 return Builder.CreateBitCast(Src, DstTy, Name); 4787 4788 // Case 2. 4789 if (SrcTy->isPointerTy() && DstTy->isPointerTy()) 4790 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name); 4791 4792 // Case 3. 4793 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) { 4794 // Case 3b. 4795 if (!DstTy->isIntegerTy()) 4796 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy)); 4797 // Cases 3a and 3b. 4798 return Builder.CreateBitOrPointerCast(Src, DstTy, Name); 4799 } 4800 4801 // Case 4b. 4802 if (!SrcTy->isIntegerTy()) 4803 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy)); 4804 // Cases 4a and 4b. 4805 return Builder.CreateIntToPtr(Src, DstTy, Name); 4806 } 4807 4808 Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) { 4809 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); 4810 llvm::Type *DstTy = ConvertType(E->getType()); 4811 4812 llvm::Type *SrcTy = Src->getType(); 4813 unsigned NumElementsSrc = 4814 isa<llvm::VectorType>(SrcTy) 4815 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements() 4816 : 0; 4817 unsigned NumElementsDst = 4818 isa<llvm::VectorType>(DstTy) 4819 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements() 4820 : 0; 4821 4822 // Going from vec3 to non-vec3 is a special case and requires a shuffle 4823 // vector to get a vec4, then a bitcast if the target type is different. 4824 if (NumElementsSrc == 3 && NumElementsDst != 3) { 4825 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4); 4826 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 4827 DstTy); 4828 4829 Src->setName("astype"); 4830 return Src; 4831 } 4832 4833 // Going from non-vec3 to vec3 is a special case and requires a bitcast 4834 // to vec4 if the original type is not vec4, then a shuffle vector to 4835 // get a vec3. 4836 if (NumElementsSrc != 3 && NumElementsDst == 3) { 4837 auto *Vec4Ty = llvm::FixedVectorType::get( 4838 cast<llvm::VectorType>(DstTy)->getElementType(), 4); 4839 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 4840 Vec4Ty); 4841 4842 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3); 4843 Src->setName("astype"); 4844 return Src; 4845 } 4846 4847 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), 4848 Src, DstTy, "astype"); 4849 } 4850 4851 Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) { 4852 return CGF.EmitAtomicExpr(E).getScalarVal(); 4853 } 4854 4855 //===----------------------------------------------------------------------===// 4856 // Entry Point into this File 4857 //===----------------------------------------------------------------------===// 4858 4859 /// Emit the computation of the specified expression of scalar type, ignoring 4860 /// the result. 4861 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { 4862 assert(E && hasScalarEvaluationKind(E->getType()) && 4863 "Invalid scalar expression to emit"); 4864 4865 return ScalarExprEmitter(*this, IgnoreResultAssign) 4866 .Visit(const_cast<Expr *>(E)); 4867 } 4868 4869 /// Emit a conversion from the specified type to the specified destination type, 4870 /// both of which are LLVM scalar types. 4871 Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, 4872 QualType DstTy, 4873 SourceLocation Loc) { 4874 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && 4875 "Invalid scalar expression to emit"); 4876 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc); 4877 } 4878 4879 /// Emit a conversion from the specified complex type to the specified 4880 /// destination type, where the destination type is an LLVM scalar type. 4881 Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, 4882 QualType SrcTy, 4883 QualType DstTy, 4884 SourceLocation Loc) { 4885 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && 4886 "Invalid complex -> scalar conversion"); 4887 return ScalarExprEmitter(*this) 4888 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc); 4889 } 4890 4891 4892 llvm::Value *CodeGenFunction:: 4893 EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 4894 bool isInc, bool isPre) { 4895 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre); 4896 } 4897 4898 LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { 4899 // object->isa or (*object).isa 4900 // Generate code as for: *(Class*)object 4901 4902 Expr *BaseExpr = E->getBase(); 4903 Address Addr = Address::invalid(); 4904 if (BaseExpr->isPRValue()) { 4905 Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign()); 4906 } else { 4907 Addr = EmitLValue(BaseExpr).getAddress(*this); 4908 } 4909 4910 // Cast the address to Class*. 4911 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType())); 4912 return MakeAddrLValue(Addr, E->getType()); 4913 } 4914 4915 4916 LValue CodeGenFunction::EmitCompoundAssignmentLValue( 4917 const CompoundAssignOperator *E) { 4918 ScalarExprEmitter Scalar(*this); 4919 Value *Result = nullptr; 4920 switch (E->getOpcode()) { 4921 #define COMPOUND_OP(Op) \ 4922 case BO_##Op##Assign: \ 4923 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \ 4924 Result) 4925 COMPOUND_OP(Mul); 4926 COMPOUND_OP(Div); 4927 COMPOUND_OP(Rem); 4928 COMPOUND_OP(Add); 4929 COMPOUND_OP(Sub); 4930 COMPOUND_OP(Shl); 4931 COMPOUND_OP(Shr); 4932 COMPOUND_OP(And); 4933 COMPOUND_OP(Xor); 4934 COMPOUND_OP(Or); 4935 #undef COMPOUND_OP 4936 4937 case BO_PtrMemD: 4938 case BO_PtrMemI: 4939 case BO_Mul: 4940 case BO_Div: 4941 case BO_Rem: 4942 case BO_Add: 4943 case BO_Sub: 4944 case BO_Shl: 4945 case BO_Shr: 4946 case BO_LT: 4947 case BO_GT: 4948 case BO_LE: 4949 case BO_GE: 4950 case BO_EQ: 4951 case BO_NE: 4952 case BO_Cmp: 4953 case BO_And: 4954 case BO_Xor: 4955 case BO_Or: 4956 case BO_LAnd: 4957 case BO_LOr: 4958 case BO_Assign: 4959 case BO_Comma: 4960 llvm_unreachable("Not valid compound assignment operators"); 4961 } 4962 4963 llvm_unreachable("Unhandled compound assignment operator"); 4964 } 4965 4966 struct GEPOffsetAndOverflow { 4967 // The total (signed) byte offset for the GEP. 4968 llvm::Value *TotalOffset; 4969 // The offset overflow flag - true if the total offset overflows. 4970 llvm::Value *OffsetOverflows; 4971 }; 4972 4973 /// Evaluate given GEPVal, which is either an inbounds GEP, or a constant, 4974 /// and compute the total offset it applies from it's base pointer BasePtr. 4975 /// Returns offset in bytes and a boolean flag whether an overflow happened 4976 /// during evaluation. 4977 static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, 4978 llvm::LLVMContext &VMContext, 4979 CodeGenModule &CGM, 4980 CGBuilderTy &Builder) { 4981 const auto &DL = CGM.getDataLayout(); 4982 4983 // The total (signed) byte offset for the GEP. 4984 llvm::Value *TotalOffset = nullptr; 4985 4986 // Was the GEP already reduced to a constant? 4987 if (isa<llvm::Constant>(GEPVal)) { 4988 // Compute the offset by casting both pointers to integers and subtracting: 4989 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr) 4990 Value *BasePtr_int = 4991 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType())); 4992 Value *GEPVal_int = 4993 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType())); 4994 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int); 4995 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()}; 4996 } 4997 4998 auto *GEP = cast<llvm::GEPOperator>(GEPVal); 4999 assert(GEP->getPointerOperand() == BasePtr && 5000 "BasePtr must be the base of the GEP."); 5001 assert(GEP->isInBounds() && "Expected inbounds GEP"); 5002 5003 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType()); 5004 5005 // Grab references to the signed add/mul overflow intrinsics for intptr_t. 5006 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 5007 auto *SAddIntrinsic = 5008 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy); 5009 auto *SMulIntrinsic = 5010 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy); 5011 5012 // The offset overflow flag - true if the total offset overflows. 5013 llvm::Value *OffsetOverflows = Builder.getFalse(); 5014 5015 /// Return the result of the given binary operation. 5016 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS, 5017 llvm::Value *RHS) -> llvm::Value * { 5018 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop"); 5019 5020 // If the operands are constants, return a constant result. 5021 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) { 5022 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) { 5023 llvm::APInt N; 5024 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode, 5025 /*Signed=*/true, N); 5026 if (HasOverflow) 5027 OffsetOverflows = Builder.getTrue(); 5028 return llvm::ConstantInt::get(VMContext, N); 5029 } 5030 } 5031 5032 // Otherwise, compute the result with checked arithmetic. 5033 auto *ResultAndOverflow = Builder.CreateCall( 5034 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS}); 5035 OffsetOverflows = Builder.CreateOr( 5036 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows); 5037 return Builder.CreateExtractValue(ResultAndOverflow, 0); 5038 }; 5039 5040 // Determine the total byte offset by looking at each GEP operand. 5041 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP); 5042 GTI != GTE; ++GTI) { 5043 llvm::Value *LocalOffset; 5044 auto *Index = GTI.getOperand(); 5045 // Compute the local offset contributed by this indexing step: 5046 if (auto *STy = GTI.getStructTypeOrNull()) { 5047 // For struct indexing, the local offset is the byte position of the 5048 // specified field. 5049 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue(); 5050 LocalOffset = llvm::ConstantInt::get( 5051 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo)); 5052 } else { 5053 // Otherwise this is array-like indexing. The local offset is the index 5054 // multiplied by the element size. 5055 auto *ElementSize = llvm::ConstantInt::get( 5056 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType())); 5057 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true); 5058 LocalOffset = eval(BO_Mul, ElementSize, IndexS); 5059 } 5060 5061 // If this is the first offset, set it as the total offset. Otherwise, add 5062 // the local offset into the running total. 5063 if (!TotalOffset || TotalOffset == Zero) 5064 TotalOffset = LocalOffset; 5065 else 5066 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset); 5067 } 5068 5069 return {TotalOffset, OffsetOverflows}; 5070 } 5071 5072 Value * 5073 CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr, 5074 ArrayRef<Value *> IdxList, 5075 bool SignedIndices, bool IsSubtraction, 5076 SourceLocation Loc, const Twine &Name) { 5077 llvm::Type *PtrTy = Ptr->getType(); 5078 Value *GEPVal = Builder.CreateInBoundsGEP(ElemTy, Ptr, IdxList, Name); 5079 5080 // If the pointer overflow sanitizer isn't enabled, do nothing. 5081 if (!SanOpts.has(SanitizerKind::PointerOverflow)) 5082 return GEPVal; 5083 5084 // Perform nullptr-and-offset check unless the nullptr is defined. 5085 bool PerformNullCheck = !NullPointerIsDefined( 5086 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace()); 5087 // Check for overflows unless the GEP got constant-folded, 5088 // and only in the default address space 5089 bool PerformOverflowCheck = 5090 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0; 5091 5092 if (!(PerformNullCheck || PerformOverflowCheck)) 5093 return GEPVal; 5094 5095 const auto &DL = CGM.getDataLayout(); 5096 5097 SanitizerScope SanScope(this); 5098 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy); 5099 5100 GEPOffsetAndOverflow EvaluatedGEP = 5101 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder); 5102 5103 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || 5104 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && 5105 "If the offset got constant-folded, we don't expect that there was an " 5106 "overflow."); 5107 5108 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 5109 5110 // Common case: if the total offset is zero, and we are using C++ semantics, 5111 // where nullptr+0 is defined, don't emit a check. 5112 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus) 5113 return GEPVal; 5114 5115 // Now that we've computed the total offset, add it to the base pointer (with 5116 // wrapping semantics). 5117 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy); 5118 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset); 5119 5120 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 5121 5122 if (PerformNullCheck) { 5123 // In C++, if the base pointer evaluates to a null pointer value, 5124 // the only valid pointer this inbounds GEP can produce is also 5125 // a null pointer, so the offset must also evaluate to zero. 5126 // Likewise, if we have non-zero base pointer, we can not get null pointer 5127 // as a result, so the offset can not be -intptr_t(BasePtr). 5128 // In other words, both pointers are either null, or both are non-null, 5129 // or the behaviour is undefined. 5130 // 5131 // C, however, is more strict in this regard, and gives more 5132 // optimization opportunities: in C, additionally, nullptr+0 is undefined. 5133 // So both the input to the 'gep inbounds' AND the output must not be null. 5134 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr); 5135 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP); 5136 auto *Valid = 5137 CGM.getLangOpts().CPlusPlus 5138 ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr) 5139 : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr); 5140 Checks.emplace_back(Valid, SanitizerKind::PointerOverflow); 5141 } 5142 5143 if (PerformOverflowCheck) { 5144 // The GEP is valid if: 5145 // 1) The total offset doesn't overflow, and 5146 // 2) The sign of the difference between the computed address and the base 5147 // pointer matches the sign of the total offset. 5148 llvm::Value *ValidGEP; 5149 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows); 5150 if (SignedIndices) { 5151 // GEP is computed as `unsigned base + signed offset`, therefore: 5152 // * If offset was positive, then the computed pointer can not be 5153 // [unsigned] less than the base pointer, unless it overflowed. 5154 // * If offset was negative, then the computed pointer can not be 5155 // [unsigned] greater than the bas pointere, unless it overflowed. 5156 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 5157 auto *PosOrZeroOffset = 5158 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero); 5159 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr); 5160 ValidGEP = 5161 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid); 5162 } else if (!IsSubtraction) { 5163 // GEP is computed as `unsigned base + unsigned offset`, therefore the 5164 // computed pointer can not be [unsigned] less than base pointer, 5165 // unless there was an overflow. 5166 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`. 5167 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 5168 } else { 5169 // GEP is computed as `unsigned base - unsigned offset`, therefore the 5170 // computed pointer can not be [unsigned] greater than base pointer, 5171 // unless there was an overflow. 5172 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`. 5173 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr); 5174 } 5175 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow); 5176 Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow); 5177 } 5178 5179 assert(!Checks.empty() && "Should have produced some checks."); 5180 5181 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)}; 5182 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments. 5183 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP}; 5184 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs); 5185 5186 return GEPVal; 5187 } 5188