1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCXXABI.h" 14 #include "CGCleanup.h" 15 #include "CGDebugInfo.h" 16 #include "CGObjCRuntime.h" 17 #include "CGOpenMPRuntime.h" 18 #include "CodeGenFunction.h" 19 #include "CodeGenModule.h" 20 #include "ConstantEmitter.h" 21 #include "TargetInfo.h" 22 #include "clang/AST/ASTContext.h" 23 #include "clang/AST/Attr.h" 24 #include "clang/AST/DeclObjC.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/RecordLayout.h" 27 #include "clang/AST/StmtVisitor.h" 28 #include "clang/Basic/CodeGenOptions.h" 29 #include "clang/Basic/TargetInfo.h" 30 #include "llvm/ADT/APFixedPoint.h" 31 #include "llvm/ADT/Optional.h" 32 #include "llvm/IR/CFG.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/FixedPointBuilder.h" 37 #include "llvm/IR/Function.h" 38 #include "llvm/IR/GetElementPtrTypeIterator.h" 39 #include "llvm/IR/GlobalVariable.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/IR/IntrinsicsPowerPC.h" 42 #include "llvm/IR/MatrixBuilder.h" 43 #include "llvm/IR/Module.h" 44 #include "llvm/Support/TypeSize.h" 45 #include <cstdarg> 46 47 using namespace clang; 48 using namespace CodeGen; 49 using llvm::Value; 50 51 //===----------------------------------------------------------------------===// 52 // Scalar Expression Emitter 53 //===----------------------------------------------------------------------===// 54 55 namespace { 56 57 /// Determine whether the given binary operation may overflow. 58 /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul, 59 /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem}, 60 /// the returned overflow check is precise. The returned value is 'true' for 61 /// all other opcodes, to be conservative. 62 bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS, 63 BinaryOperator::Opcode Opcode, bool Signed, 64 llvm::APInt &Result) { 65 // Assume overflow is possible, unless we can prove otherwise. 66 bool Overflow = true; 67 const auto &LHSAP = LHS->getValue(); 68 const auto &RHSAP = RHS->getValue(); 69 if (Opcode == BO_Add) { 70 Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow) 71 : LHSAP.uadd_ov(RHSAP, Overflow); 72 } else if (Opcode == BO_Sub) { 73 Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow) 74 : LHSAP.usub_ov(RHSAP, Overflow); 75 } else if (Opcode == BO_Mul) { 76 Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow) 77 : LHSAP.umul_ov(RHSAP, Overflow); 78 } else if (Opcode == BO_Div || Opcode == BO_Rem) { 79 if (Signed && !RHS->isZero()) 80 Result = LHSAP.sdiv_ov(RHSAP, Overflow); 81 else 82 return false; 83 } 84 return Overflow; 85 } 86 87 struct BinOpInfo { 88 Value *LHS; 89 Value *RHS; 90 QualType Ty; // Computation Type. 91 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform 92 FPOptions FPFeatures; 93 const Expr *E; // Entire expr, for error unsupported. May not be binop. 94 95 /// Check if the binop can result in integer overflow. 96 bool mayHaveIntegerOverflow() const { 97 // Without constant input, we can't rule out overflow. 98 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS); 99 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS); 100 if (!LHSCI || !RHSCI) 101 return true; 102 103 llvm::APInt Result; 104 return ::mayHaveIntegerOverflow( 105 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result); 106 } 107 108 /// Check if the binop computes a division or a remainder. 109 bool isDivremOp() const { 110 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign || 111 Opcode == BO_RemAssign; 112 } 113 114 /// Check if the binop can result in an integer division by zero. 115 bool mayHaveIntegerDivisionByZero() const { 116 if (isDivremOp()) 117 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS)) 118 return CI->isZero(); 119 return true; 120 } 121 122 /// Check if the binop can result in a float division by zero. 123 bool mayHaveFloatDivisionByZero() const { 124 if (isDivremOp()) 125 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS)) 126 return CFP->isZero(); 127 return true; 128 } 129 130 /// Check if at least one operand is a fixed point type. In such cases, this 131 /// operation did not follow usual arithmetic conversion and both operands 132 /// might not be of the same type. 133 bool isFixedPointOp() const { 134 // We cannot simply check the result type since comparison operations return 135 // an int. 136 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) { 137 QualType LHSType = BinOp->getLHS()->getType(); 138 QualType RHSType = BinOp->getRHS()->getType(); 139 return LHSType->isFixedPointType() || RHSType->isFixedPointType(); 140 } 141 if (const auto *UnOp = dyn_cast<UnaryOperator>(E)) 142 return UnOp->getSubExpr()->getType()->isFixedPointType(); 143 return false; 144 } 145 }; 146 147 static bool MustVisitNullValue(const Expr *E) { 148 // If a null pointer expression's type is the C++0x nullptr_t, then 149 // it's not necessarily a simple constant and it must be evaluated 150 // for its potential side effects. 151 return E->getType()->isNullPtrType(); 152 } 153 154 /// If \p E is a widened promoted integer, get its base (unpromoted) type. 155 static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx, 156 const Expr *E) { 157 const Expr *Base = E->IgnoreImpCasts(); 158 if (E == Base) 159 return llvm::None; 160 161 QualType BaseTy = Base->getType(); 162 if (!BaseTy->isPromotableIntegerType() || 163 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType())) 164 return llvm::None; 165 166 return BaseTy; 167 } 168 169 /// Check if \p E is a widened promoted integer. 170 static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) { 171 return getUnwidenedIntegerType(Ctx, E).has_value(); 172 } 173 174 /// Check if we can skip the overflow check for \p Op. 175 static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) { 176 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) && 177 "Expected a unary or binary operator"); 178 179 // If the binop has constant inputs and we can prove there is no overflow, 180 // we can elide the overflow check. 181 if (!Op.mayHaveIntegerOverflow()) 182 return true; 183 184 // If a unary op has a widened operand, the op cannot overflow. 185 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E)) 186 return !UO->canOverflow(); 187 188 // We usually don't need overflow checks for binops with widened operands. 189 // Multiplication with promoted unsigned operands is a special case. 190 const auto *BO = cast<BinaryOperator>(Op.E); 191 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS()); 192 if (!OptionalLHSTy) 193 return false; 194 195 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS()); 196 if (!OptionalRHSTy) 197 return false; 198 199 QualType LHSTy = *OptionalLHSTy; 200 QualType RHSTy = *OptionalRHSTy; 201 202 // This is the simple case: binops without unsigned multiplication, and with 203 // widened operands. No overflow check is needed here. 204 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) || 205 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType()) 206 return true; 207 208 // For unsigned multiplication the overflow check can be elided if either one 209 // of the unpromoted types are less than half the size of the promoted type. 210 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType()); 211 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize || 212 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize; 213 } 214 215 class ScalarExprEmitter 216 : public StmtVisitor<ScalarExprEmitter, Value*> { 217 CodeGenFunction &CGF; 218 CGBuilderTy &Builder; 219 bool IgnoreResultAssign; 220 llvm::LLVMContext &VMContext; 221 public: 222 223 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false) 224 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira), 225 VMContext(cgf.getLLVMContext()) { 226 } 227 228 //===--------------------------------------------------------------------===// 229 // Utilities 230 //===--------------------------------------------------------------------===// 231 232 bool TestAndClearIgnoreResultAssign() { 233 bool I = IgnoreResultAssign; 234 IgnoreResultAssign = false; 235 return I; 236 } 237 238 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); } 239 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); } 240 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) { 241 return CGF.EmitCheckedLValue(E, TCK); 242 } 243 244 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks, 245 const BinOpInfo &Info); 246 247 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) { 248 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal(); 249 } 250 251 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) { 252 const AlignValueAttr *AVAttr = nullptr; 253 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { 254 const ValueDecl *VD = DRE->getDecl(); 255 256 if (VD->getType()->isReferenceType()) { 257 if (const auto *TTy = 258 dyn_cast<TypedefType>(VD->getType().getNonReferenceType())) 259 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>(); 260 } else { 261 // Assumptions for function parameters are emitted at the start of the 262 // function, so there is no need to repeat that here, 263 // unless the alignment-assumption sanitizer is enabled, 264 // then we prefer the assumption over alignment attribute 265 // on IR function param. 266 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment)) 267 return; 268 269 AVAttr = VD->getAttr<AlignValueAttr>(); 270 } 271 } 272 273 if (!AVAttr) 274 if (const auto *TTy = 275 dyn_cast<TypedefType>(E->getType())) 276 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>(); 277 278 if (!AVAttr) 279 return; 280 281 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment()); 282 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue); 283 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI); 284 } 285 286 /// EmitLoadOfLValue - Given an expression with complex type that represents a 287 /// value l-value, this method emits the address of the l-value, then loads 288 /// and returns the result. 289 Value *EmitLoadOfLValue(const Expr *E) { 290 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load), 291 E->getExprLoc()); 292 293 EmitLValueAlignmentAssumption(E, V); 294 return V; 295 } 296 297 /// EmitConversionToBool - Convert the specified expression value to a 298 /// boolean (i1) truth value. This is equivalent to "Val != 0". 299 Value *EmitConversionToBool(Value *Src, QualType DstTy); 300 301 /// Emit a check that a conversion from a floating-point type does not 302 /// overflow. 303 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType, 304 Value *Src, QualType SrcType, QualType DstType, 305 llvm::Type *DstTy, SourceLocation Loc); 306 307 /// Known implicit conversion check kinds. 308 /// Keep in sync with the enum of the same name in ubsan_handlers.h 309 enum ImplicitConversionCheckKind : unsigned char { 310 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7. 311 ICCK_UnsignedIntegerTruncation = 1, 312 ICCK_SignedIntegerTruncation = 2, 313 ICCK_IntegerSignChange = 3, 314 ICCK_SignedIntegerTruncationOrSignChange = 4, 315 }; 316 317 /// Emit a check that an [implicit] truncation of an integer does not 318 /// discard any bits. It is not UB, so we use the value after truncation. 319 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst, 320 QualType DstType, SourceLocation Loc); 321 322 /// Emit a check that an [implicit] conversion of an integer does not change 323 /// the sign of the value. It is not UB, so we use the value after conversion. 324 /// NOTE: Src and Dst may be the exact same value! (point to the same thing) 325 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst, 326 QualType DstType, SourceLocation Loc); 327 328 /// Emit a conversion from the specified type to the specified destination 329 /// type, both of which are LLVM scalar types. 330 struct ScalarConversionOpts { 331 bool TreatBooleanAsSigned; 332 bool EmitImplicitIntegerTruncationChecks; 333 bool EmitImplicitIntegerSignChangeChecks; 334 335 ScalarConversionOpts() 336 : TreatBooleanAsSigned(false), 337 EmitImplicitIntegerTruncationChecks(false), 338 EmitImplicitIntegerSignChangeChecks(false) {} 339 340 ScalarConversionOpts(clang::SanitizerSet SanOpts) 341 : TreatBooleanAsSigned(false), 342 EmitImplicitIntegerTruncationChecks( 343 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)), 344 EmitImplicitIntegerSignChangeChecks( 345 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {} 346 }; 347 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType, 348 llvm::Type *SrcTy, llvm::Type *DstTy, 349 ScalarConversionOpts Opts); 350 Value * 351 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy, 352 SourceLocation Loc, 353 ScalarConversionOpts Opts = ScalarConversionOpts()); 354 355 /// Convert between either a fixed point and other fixed point or fixed point 356 /// and an integer. 357 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy, 358 SourceLocation Loc); 359 360 /// Emit a conversion from the specified complex type to the specified 361 /// destination type, where the destination type is an LLVM scalar type. 362 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, 363 QualType SrcTy, QualType DstTy, 364 SourceLocation Loc); 365 366 /// EmitNullValue - Emit a value that corresponds to null for the given type. 367 Value *EmitNullValue(QualType Ty); 368 369 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion. 370 Value *EmitFloatToBoolConversion(Value *V) { 371 // Compare against 0.0 for fp scalars. 372 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType()); 373 return Builder.CreateFCmpUNE(V, Zero, "tobool"); 374 } 375 376 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion. 377 Value *EmitPointerToBoolConversion(Value *V, QualType QT) { 378 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT); 379 380 return Builder.CreateICmpNE(V, Zero, "tobool"); 381 } 382 383 Value *EmitIntToBoolConversion(Value *V) { 384 // Because of the type rules of C, we often end up computing a 385 // logical value, then zero extending it to int, then wanting it 386 // as a logical value again. Optimize this common case. 387 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) { 388 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) { 389 Value *Result = ZI->getOperand(0); 390 // If there aren't any more uses, zap the instruction to save space. 391 // Note that there can be more uses, for example if this 392 // is the result of an assignment. 393 if (ZI->use_empty()) 394 ZI->eraseFromParent(); 395 return Result; 396 } 397 } 398 399 return Builder.CreateIsNotNull(V, "tobool"); 400 } 401 402 //===--------------------------------------------------------------------===// 403 // Visitor Methods 404 //===--------------------------------------------------------------------===// 405 406 Value *Visit(Expr *E) { 407 ApplyDebugLocation DL(CGF, E); 408 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E); 409 } 410 411 Value *VisitStmt(Stmt *S) { 412 S->dump(llvm::errs(), CGF.getContext()); 413 llvm_unreachable("Stmt can't have complex result type!"); 414 } 415 Value *VisitExpr(Expr *S); 416 417 Value *VisitConstantExpr(ConstantExpr *E) { 418 // A constant expression of type 'void' generates no code and produces no 419 // value. 420 if (E->getType()->isVoidType()) 421 return nullptr; 422 423 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) { 424 if (E->isGLValue()) 425 return CGF.Builder.CreateLoad(Address( 426 Result, CGF.ConvertTypeForMem(E->getType()), 427 CGF.getContext().getTypeAlignInChars(E->getType()))); 428 return Result; 429 } 430 return Visit(E->getSubExpr()); 431 } 432 Value *VisitParenExpr(ParenExpr *PE) { 433 return Visit(PE->getSubExpr()); 434 } 435 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { 436 return Visit(E->getReplacement()); 437 } 438 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) { 439 return Visit(GE->getResultExpr()); 440 } 441 Value *VisitCoawaitExpr(CoawaitExpr *S) { 442 return CGF.EmitCoawaitExpr(*S).getScalarVal(); 443 } 444 Value *VisitCoyieldExpr(CoyieldExpr *S) { 445 return CGF.EmitCoyieldExpr(*S).getScalarVal(); 446 } 447 Value *VisitUnaryCoawait(const UnaryOperator *E) { 448 return Visit(E->getSubExpr()); 449 } 450 451 // Leaves. 452 Value *VisitIntegerLiteral(const IntegerLiteral *E) { 453 return Builder.getInt(E->getValue()); 454 } 455 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) { 456 return Builder.getInt(E->getValue()); 457 } 458 Value *VisitFloatingLiteral(const FloatingLiteral *E) { 459 return llvm::ConstantFP::get(VMContext, E->getValue()); 460 } 461 Value *VisitCharacterLiteral(const CharacterLiteral *E) { 462 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 463 } 464 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) { 465 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 466 } 467 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { 468 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 469 } 470 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { 471 return EmitNullValue(E->getType()); 472 } 473 Value *VisitGNUNullExpr(const GNUNullExpr *E) { 474 return EmitNullValue(E->getType()); 475 } 476 Value *VisitOffsetOfExpr(OffsetOfExpr *E); 477 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E); 478 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) { 479 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel()); 480 return Builder.CreateBitCast(V, ConvertType(E->getType())); 481 } 482 483 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) { 484 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength()); 485 } 486 487 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) { 488 return CGF.EmitPseudoObjectRValue(E).getScalarVal(); 489 } 490 491 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E); 492 493 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) { 494 if (E->isGLValue()) 495 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E), 496 E->getExprLoc()); 497 498 // Otherwise, assume the mapping is the scalar directly. 499 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal(); 500 } 501 502 // l-values. 503 Value *VisitDeclRefExpr(DeclRefExpr *E) { 504 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) 505 return CGF.emitScalarConstant(Constant, E); 506 return EmitLoadOfLValue(E); 507 } 508 509 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) { 510 return CGF.EmitObjCSelectorExpr(E); 511 } 512 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) { 513 return CGF.EmitObjCProtocolExpr(E); 514 } 515 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { 516 return EmitLoadOfLValue(E); 517 } 518 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) { 519 if (E->getMethodDecl() && 520 E->getMethodDecl()->getReturnType()->isReferenceType()) 521 return EmitLoadOfLValue(E); 522 return CGF.EmitObjCMessageExpr(E).getScalarVal(); 523 } 524 525 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) { 526 LValue LV = CGF.EmitObjCIsaExpr(E); 527 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); 528 return V; 529 } 530 531 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) { 532 VersionTuple Version = E->getVersion(); 533 534 // If we're checking for a platform older than our minimum deployment 535 // target, we can fold the check away. 536 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion()) 537 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1); 538 539 return CGF.EmitBuiltinAvailable(Version); 540 } 541 542 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E); 543 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E); 544 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E); 545 Value *VisitConvertVectorExpr(ConvertVectorExpr *E); 546 Value *VisitMemberExpr(MemberExpr *E); 547 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); } 548 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 549 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which 550 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound 551 // literals aren't l-values in C++. We do so simply because that's the 552 // cleanest way to handle compound literals in C++. 553 // See the discussion here: https://reviews.llvm.org/D64464 554 return EmitLoadOfLValue(E); 555 } 556 557 Value *VisitInitListExpr(InitListExpr *E); 558 559 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) { 560 assert(CGF.getArrayInitIndex() && 561 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?"); 562 return CGF.getArrayInitIndex(); 563 } 564 565 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { 566 return EmitNullValue(E->getType()); 567 } 568 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) { 569 CGF.CGM.EmitExplicitCastExprType(E, &CGF); 570 return VisitCastExpr(E); 571 } 572 Value *VisitCastExpr(CastExpr *E); 573 574 Value *VisitCallExpr(const CallExpr *E) { 575 if (E->getCallReturnType(CGF.getContext())->isReferenceType()) 576 return EmitLoadOfLValue(E); 577 578 Value *V = CGF.EmitCallExpr(E).getScalarVal(); 579 580 EmitLValueAlignmentAssumption(E, V); 581 return V; 582 } 583 584 Value *VisitStmtExpr(const StmtExpr *E); 585 586 // Unary Operators. 587 Value *VisitUnaryPostDec(const UnaryOperator *E) { 588 LValue LV = EmitLValue(E->getSubExpr()); 589 return EmitScalarPrePostIncDec(E, LV, false, false); 590 } 591 Value *VisitUnaryPostInc(const UnaryOperator *E) { 592 LValue LV = EmitLValue(E->getSubExpr()); 593 return EmitScalarPrePostIncDec(E, LV, true, false); 594 } 595 Value *VisitUnaryPreDec(const UnaryOperator *E) { 596 LValue LV = EmitLValue(E->getSubExpr()); 597 return EmitScalarPrePostIncDec(E, LV, false, true); 598 } 599 Value *VisitUnaryPreInc(const UnaryOperator *E) { 600 LValue LV = EmitLValue(E->getSubExpr()); 601 return EmitScalarPrePostIncDec(E, LV, true, true); 602 } 603 604 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E, 605 llvm::Value *InVal, 606 bool IsInc); 607 608 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 609 bool isInc, bool isPre); 610 611 612 Value *VisitUnaryAddrOf(const UnaryOperator *E) { 613 if (isa<MemberPointerType>(E->getType())) // never sugared 614 return CGF.CGM.getMemberPointerConstant(E); 615 616 return EmitLValue(E->getSubExpr()).getPointer(CGF); 617 } 618 Value *VisitUnaryDeref(const UnaryOperator *E) { 619 if (E->getType()->isVoidType()) 620 return Visit(E->getSubExpr()); // the actual value should be unused 621 return EmitLoadOfLValue(E); 622 } 623 Value *VisitUnaryPlus(const UnaryOperator *E) { 624 // This differs from gcc, though, most likely due to a bug in gcc. 625 TestAndClearIgnoreResultAssign(); 626 return Visit(E->getSubExpr()); 627 } 628 Value *VisitUnaryMinus (const UnaryOperator *E); 629 Value *VisitUnaryNot (const UnaryOperator *E); 630 Value *VisitUnaryLNot (const UnaryOperator *E); 631 Value *VisitUnaryReal (const UnaryOperator *E); 632 Value *VisitUnaryImag (const UnaryOperator *E); 633 Value *VisitUnaryExtension(const UnaryOperator *E) { 634 return Visit(E->getSubExpr()); 635 } 636 637 // C++ 638 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) { 639 return EmitLoadOfLValue(E); 640 } 641 Value *VisitSourceLocExpr(SourceLocExpr *SLE) { 642 auto &Ctx = CGF.getContext(); 643 APValue Evaluated = 644 SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr()); 645 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated, 646 SLE->getType()); 647 } 648 649 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 650 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE); 651 return Visit(DAE->getExpr()); 652 } 653 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { 654 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); 655 return Visit(DIE->getExpr()); 656 } 657 Value *VisitCXXThisExpr(CXXThisExpr *TE) { 658 return CGF.LoadCXXThis(); 659 } 660 661 Value *VisitExprWithCleanups(ExprWithCleanups *E); 662 Value *VisitCXXNewExpr(const CXXNewExpr *E) { 663 return CGF.EmitCXXNewExpr(E); 664 } 665 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) { 666 CGF.EmitCXXDeleteExpr(E); 667 return nullptr; 668 } 669 670 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) { 671 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 672 } 673 674 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) { 675 return Builder.getInt1(E->isSatisfied()); 676 } 677 678 Value *VisitRequiresExpr(const RequiresExpr *E) { 679 return Builder.getInt1(E->isSatisfied()); 680 } 681 682 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) { 683 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue()); 684 } 685 686 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) { 687 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue()); 688 } 689 690 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) { 691 // C++ [expr.pseudo]p1: 692 // The result shall only be used as the operand for the function call 693 // operator (), and the result of such a call has type void. The only 694 // effect is the evaluation of the postfix-expression before the dot or 695 // arrow. 696 CGF.EmitScalarExpr(E->getBase()); 697 return nullptr; 698 } 699 700 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) { 701 return EmitNullValue(E->getType()); 702 } 703 704 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) { 705 CGF.EmitCXXThrowExpr(E); 706 return nullptr; 707 } 708 709 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) { 710 return Builder.getInt1(E->getValue()); 711 } 712 713 // Binary Operators. 714 Value *EmitMul(const BinOpInfo &Ops) { 715 if (Ops.Ty->isSignedIntegerOrEnumerationType()) { 716 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 717 case LangOptions::SOB_Defined: 718 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 719 case LangOptions::SOB_Undefined: 720 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 721 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); 722 LLVM_FALLTHROUGH; 723 case LangOptions::SOB_Trapping: 724 if (CanElideOverflowCheck(CGF.getContext(), Ops)) 725 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); 726 return EmitOverflowCheckedBinOp(Ops); 727 } 728 } 729 730 if (Ops.Ty->isConstantMatrixType()) { 731 llvm::MatrixBuilder MB(Builder); 732 // We need to check the types of the operands of the operator to get the 733 // correct matrix dimensions. 734 auto *BO = cast<BinaryOperator>(Ops.E); 735 auto *LHSMatTy = dyn_cast<ConstantMatrixType>( 736 BO->getLHS()->getType().getCanonicalType()); 737 auto *RHSMatTy = dyn_cast<ConstantMatrixType>( 738 BO->getRHS()->getType().getCanonicalType()); 739 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 740 if (LHSMatTy && RHSMatTy) 741 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(), 742 LHSMatTy->getNumColumns(), 743 RHSMatTy->getNumColumns()); 744 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS); 745 } 746 747 if (Ops.Ty->isUnsignedIntegerType() && 748 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 749 !CanElideOverflowCheck(CGF.getContext(), Ops)) 750 return EmitOverflowCheckedBinOp(Ops); 751 752 if (Ops.LHS->getType()->isFPOrFPVectorTy()) { 753 // Preserve the old values 754 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 755 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul"); 756 } 757 if (Ops.isFixedPointOp()) 758 return EmitFixedPointBinOp(Ops); 759 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 760 } 761 /// Create a binary op that checks for overflow. 762 /// Currently only supports +, - and *. 763 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops); 764 765 // Check for undefined division and modulus behaviors. 766 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops, 767 llvm::Value *Zero,bool isDiv); 768 // Common helper for getting how wide LHS of shift is. 769 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS); 770 771 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for 772 // non powers of two. 773 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name); 774 775 Value *EmitDiv(const BinOpInfo &Ops); 776 Value *EmitRem(const BinOpInfo &Ops); 777 Value *EmitAdd(const BinOpInfo &Ops); 778 Value *EmitSub(const BinOpInfo &Ops); 779 Value *EmitShl(const BinOpInfo &Ops); 780 Value *EmitShr(const BinOpInfo &Ops); 781 Value *EmitAnd(const BinOpInfo &Ops) { 782 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and"); 783 } 784 Value *EmitXor(const BinOpInfo &Ops) { 785 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor"); 786 } 787 Value *EmitOr (const BinOpInfo &Ops) { 788 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or"); 789 } 790 791 // Helper functions for fixed point binary operations. 792 Value *EmitFixedPointBinOp(const BinOpInfo &Ops); 793 794 BinOpInfo EmitBinOps(const BinaryOperator *E); 795 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E, 796 Value *(ScalarExprEmitter::*F)(const BinOpInfo &), 797 Value *&Result); 798 799 Value *EmitCompoundAssign(const CompoundAssignOperator *E, 800 Value *(ScalarExprEmitter::*F)(const BinOpInfo &)); 801 802 // Binary operators and binary compound assignment operators. 803 #define HANDLEBINOP(OP) \ 804 Value *VisitBin ## OP(const BinaryOperator *E) { \ 805 return Emit ## OP(EmitBinOps(E)); \ 806 } \ 807 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \ 808 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \ 809 } 810 HANDLEBINOP(Mul) 811 HANDLEBINOP(Div) 812 HANDLEBINOP(Rem) 813 HANDLEBINOP(Add) 814 HANDLEBINOP(Sub) 815 HANDLEBINOP(Shl) 816 HANDLEBINOP(Shr) 817 HANDLEBINOP(And) 818 HANDLEBINOP(Xor) 819 HANDLEBINOP(Or) 820 #undef HANDLEBINOP 821 822 // Comparisons. 823 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc, 824 llvm::CmpInst::Predicate SICmpOpc, 825 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling); 826 #define VISITCOMP(CODE, UI, SI, FP, SIG) \ 827 Value *VisitBin##CODE(const BinaryOperator *E) { \ 828 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \ 829 llvm::FCmpInst::FP, SIG); } 830 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true) 831 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true) 832 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true) 833 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true) 834 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false) 835 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false) 836 #undef VISITCOMP 837 838 Value *VisitBinAssign (const BinaryOperator *E); 839 840 Value *VisitBinLAnd (const BinaryOperator *E); 841 Value *VisitBinLOr (const BinaryOperator *E); 842 Value *VisitBinComma (const BinaryOperator *E); 843 844 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); } 845 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); } 846 847 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { 848 return Visit(E->getSemanticForm()); 849 } 850 851 // Other Operators. 852 Value *VisitBlockExpr(const BlockExpr *BE); 853 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *); 854 Value *VisitChooseExpr(ChooseExpr *CE); 855 Value *VisitVAArgExpr(VAArgExpr *VE); 856 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) { 857 return CGF.EmitObjCStringLiteral(E); 858 } 859 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) { 860 return CGF.EmitObjCBoxedExpr(E); 861 } 862 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) { 863 return CGF.EmitObjCArrayLiteral(E); 864 } 865 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) { 866 return CGF.EmitObjCDictionaryLiteral(E); 867 } 868 Value *VisitAsTypeExpr(AsTypeExpr *CE); 869 Value *VisitAtomicExpr(AtomicExpr *AE); 870 }; 871 } // end anonymous namespace. 872 873 //===----------------------------------------------------------------------===// 874 // Utilities 875 //===----------------------------------------------------------------------===// 876 877 /// EmitConversionToBool - Convert the specified expression value to a 878 /// boolean (i1) truth value. This is equivalent to "Val != 0". 879 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) { 880 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); 881 882 if (SrcType->isRealFloatingType()) 883 return EmitFloatToBoolConversion(Src); 884 885 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType)) 886 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT); 887 888 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && 889 "Unknown scalar type to convert"); 890 891 if (isa<llvm::IntegerType>(Src->getType())) 892 return EmitIntToBoolConversion(Src); 893 894 assert(isa<llvm::PointerType>(Src->getType())); 895 return EmitPointerToBoolConversion(Src, SrcType); 896 } 897 898 void ScalarExprEmitter::EmitFloatConversionCheck( 899 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType, 900 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) { 901 assert(SrcType->isFloatingType() && "not a conversion from floating point"); 902 if (!isa<llvm::IntegerType>(DstTy)) 903 return; 904 905 CodeGenFunction::SanitizerScope SanScope(&CGF); 906 using llvm::APFloat; 907 using llvm::APSInt; 908 909 llvm::Value *Check = nullptr; 910 const llvm::fltSemantics &SrcSema = 911 CGF.getContext().getFloatTypeSemantics(OrigSrcType); 912 913 // Floating-point to integer. This has undefined behavior if the source is 914 // +-Inf, NaN, or doesn't fit into the destination type (after truncation 915 // to an integer). 916 unsigned Width = CGF.getContext().getIntWidth(DstType); 917 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType(); 918 919 APSInt Min = APSInt::getMinValue(Width, Unsigned); 920 APFloat MinSrc(SrcSema, APFloat::uninitialized); 921 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) & 922 APFloat::opOverflow) 923 // Don't need an overflow check for lower bound. Just check for 924 // -Inf/NaN. 925 MinSrc = APFloat::getInf(SrcSema, true); 926 else 927 // Find the largest value which is too small to represent (before 928 // truncation toward zero). 929 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative); 930 931 APSInt Max = APSInt::getMaxValue(Width, Unsigned); 932 APFloat MaxSrc(SrcSema, APFloat::uninitialized); 933 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) & 934 APFloat::opOverflow) 935 // Don't need an overflow check for upper bound. Just check for 936 // +Inf/NaN. 937 MaxSrc = APFloat::getInf(SrcSema, false); 938 else 939 // Find the smallest value which is too large to represent (before 940 // truncation toward zero). 941 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive); 942 943 // If we're converting from __half, convert the range to float to match 944 // the type of src. 945 if (OrigSrcType->isHalfType()) { 946 const llvm::fltSemantics &Sema = 947 CGF.getContext().getFloatTypeSemantics(SrcType); 948 bool IsInexact; 949 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); 950 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); 951 } 952 953 llvm::Value *GE = 954 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc)); 955 llvm::Value *LE = 956 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc)); 957 Check = Builder.CreateAnd(GE, LE); 958 959 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc), 960 CGF.EmitCheckTypeDescriptor(OrigSrcType), 961 CGF.EmitCheckTypeDescriptor(DstType)}; 962 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow), 963 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc); 964 } 965 966 // Should be called within CodeGenFunction::SanitizerScope RAII scope. 967 // Returns 'i1 false' when the truncation Src -> Dst was lossy. 968 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 969 std::pair<llvm::Value *, SanitizerMask>> 970 EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, 971 QualType DstType, CGBuilderTy &Builder) { 972 llvm::Type *SrcTy = Src->getType(); 973 llvm::Type *DstTy = Dst->getType(); 974 (void)DstTy; // Only used in assert() 975 976 // This should be truncation of integral types. 977 assert(Src != Dst); 978 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits()); 979 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && 980 "non-integer llvm type"); 981 982 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 983 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 984 985 // If both (src and dst) types are unsigned, then it's an unsigned truncation. 986 // Else, it is a signed truncation. 987 ScalarExprEmitter::ImplicitConversionCheckKind Kind; 988 SanitizerMask Mask; 989 if (!SrcSigned && !DstSigned) { 990 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation; 991 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation; 992 } else { 993 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation; 994 Mask = SanitizerKind::ImplicitSignedIntegerTruncation; 995 } 996 997 llvm::Value *Check = nullptr; 998 // 1. Extend the truncated value back to the same width as the Src. 999 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext"); 1000 // 2. Equality-compare with the original source value 1001 Check = Builder.CreateICmpEQ(Check, Src, "truncheck"); 1002 // If the comparison result is 'i1 false', then the truncation was lossy. 1003 return std::make_pair(Kind, std::make_pair(Check, Mask)); 1004 } 1005 1006 static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( 1007 QualType SrcType, QualType DstType) { 1008 return SrcType->isIntegerType() && DstType->isIntegerType(); 1009 } 1010 1011 void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType, 1012 Value *Dst, QualType DstType, 1013 SourceLocation Loc) { 1014 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)) 1015 return; 1016 1017 // We only care about int->int conversions here. 1018 // We ignore conversions to/from pointer and/or bool. 1019 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType, 1020 DstType)) 1021 return; 1022 1023 unsigned SrcBits = Src->getType()->getScalarSizeInBits(); 1024 unsigned DstBits = Dst->getType()->getScalarSizeInBits(); 1025 // This must be truncation. Else we do not care. 1026 if (SrcBits <= DstBits) 1027 return; 1028 1029 assert(!DstType->isBooleanType() && "we should not get here with booleans."); 1030 1031 // If the integer sign change sanitizer is enabled, 1032 // and we are truncating from larger unsigned type to smaller signed type, 1033 // let that next sanitizer deal with it. 1034 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1035 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1036 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) && 1037 (!SrcSigned && DstSigned)) 1038 return; 1039 1040 CodeGenFunction::SanitizerScope SanScope(&CGF); 1041 1042 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1043 std::pair<llvm::Value *, SanitizerMask>> 1044 Check = 1045 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder); 1046 // If the comparison result is 'i1 false', then the truncation was lossy. 1047 1048 // Do we care about this type of truncation? 1049 if (!CGF.SanOpts.has(Check.second.second)) 1050 return; 1051 1052 llvm::Constant *StaticArgs[] = { 1053 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType), 1054 CGF.EmitCheckTypeDescriptor(DstType), 1055 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)}; 1056 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs, 1057 {Src, Dst}); 1058 } 1059 1060 // Should be called within CodeGenFunction::SanitizerScope RAII scope. 1061 // Returns 'i1 false' when the conversion Src -> Dst changed the sign. 1062 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1063 std::pair<llvm::Value *, SanitizerMask>> 1064 EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, 1065 QualType DstType, CGBuilderTy &Builder) { 1066 llvm::Type *SrcTy = Src->getType(); 1067 llvm::Type *DstTy = Dst->getType(); 1068 1069 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && 1070 "non-integer llvm type"); 1071 1072 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1073 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1074 (void)SrcSigned; // Only used in assert() 1075 (void)DstSigned; // Only used in assert() 1076 unsigned SrcBits = SrcTy->getScalarSizeInBits(); 1077 unsigned DstBits = DstTy->getScalarSizeInBits(); 1078 (void)SrcBits; // Only used in assert() 1079 (void)DstBits; // Only used in assert() 1080 1081 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) && 1082 "either the widths should be different, or the signednesses."); 1083 1084 // NOTE: zero value is considered to be non-negative. 1085 auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType, 1086 const char *Name) -> Value * { 1087 // Is this value a signed type? 1088 bool VSigned = VType->isSignedIntegerOrEnumerationType(); 1089 llvm::Type *VTy = V->getType(); 1090 if (!VSigned) { 1091 // If the value is unsigned, then it is never negative. 1092 // FIXME: can we encounter non-scalar VTy here? 1093 return llvm::ConstantInt::getFalse(VTy->getContext()); 1094 } 1095 // Get the zero of the same type with which we will be comparing. 1096 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0); 1097 // %V.isnegative = icmp slt %V, 0 1098 // I.e is %V *strictly* less than zero, does it have negative value? 1099 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero, 1100 llvm::Twine(Name) + "." + V->getName() + 1101 ".negativitycheck"); 1102 }; 1103 1104 // 1. Was the old Value negative? 1105 llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src"); 1106 // 2. Is the new Value negative? 1107 llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst"); 1108 // 3. Now, was the 'negativity status' preserved during the conversion? 1109 // NOTE: conversion from negative to zero is considered to change the sign. 1110 // (We want to get 'false' when the conversion changed the sign) 1111 // So we should just equality-compare the negativity statuses. 1112 llvm::Value *Check = nullptr; 1113 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck"); 1114 // If the comparison result is 'false', then the conversion changed the sign. 1115 return std::make_pair( 1116 ScalarExprEmitter::ICCK_IntegerSignChange, 1117 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange)); 1118 } 1119 1120 void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, 1121 Value *Dst, QualType DstType, 1122 SourceLocation Loc) { 1123 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) 1124 return; 1125 1126 llvm::Type *SrcTy = Src->getType(); 1127 llvm::Type *DstTy = Dst->getType(); 1128 1129 // We only care about int->int conversions here. 1130 // We ignore conversions to/from pointer and/or bool. 1131 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType, 1132 DstType)) 1133 return; 1134 1135 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1136 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1137 unsigned SrcBits = SrcTy->getScalarSizeInBits(); 1138 unsigned DstBits = DstTy->getScalarSizeInBits(); 1139 1140 // Now, we do not need to emit the check in *all* of the cases. 1141 // We can avoid emitting it in some obvious cases where it would have been 1142 // dropped by the opt passes (instcombine) always anyways. 1143 // If it's a cast between effectively the same type, no check. 1144 // NOTE: this is *not* equivalent to checking the canonical types. 1145 if (SrcSigned == DstSigned && SrcBits == DstBits) 1146 return; 1147 // At least one of the values needs to have signed type. 1148 // If both are unsigned, then obviously, neither of them can be negative. 1149 if (!SrcSigned && !DstSigned) 1150 return; 1151 // If the conversion is to *larger* *signed* type, then no check is needed. 1152 // Because either sign-extension happens (so the sign will remain), 1153 // or zero-extension will happen (the sign bit will be zero.) 1154 if ((DstBits > SrcBits) && DstSigned) 1155 return; 1156 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) && 1157 (SrcBits > DstBits) && SrcSigned) { 1158 // If the signed integer truncation sanitizer is enabled, 1159 // and this is a truncation from signed type, then no check is needed. 1160 // Because here sign change check is interchangeable with truncation check. 1161 return; 1162 } 1163 // That's it. We can't rule out any more cases with the data we have. 1164 1165 CodeGenFunction::SanitizerScope SanScope(&CGF); 1166 1167 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1168 std::pair<llvm::Value *, SanitizerMask>> 1169 Check; 1170 1171 // Each of these checks needs to return 'false' when an issue was detected. 1172 ImplicitConversionCheckKind CheckKind; 1173 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 1174 // So we can 'and' all the checks together, and still get 'false', 1175 // if at least one of the checks detected an issue. 1176 1177 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder); 1178 CheckKind = Check.first; 1179 Checks.emplace_back(Check.second); 1180 1181 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) && 1182 (SrcBits > DstBits) && !SrcSigned && DstSigned) { 1183 // If the signed integer truncation sanitizer was enabled, 1184 // and we are truncating from larger unsigned type to smaller signed type, 1185 // let's handle the case we skipped in that check. 1186 Check = 1187 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder); 1188 CheckKind = ICCK_SignedIntegerTruncationOrSignChange; 1189 Checks.emplace_back(Check.second); 1190 // If the comparison result is 'i1 false', then the truncation was lossy. 1191 } 1192 1193 llvm::Constant *StaticArgs[] = { 1194 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType), 1195 CGF.EmitCheckTypeDescriptor(DstType), 1196 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)}; 1197 // EmitCheck() will 'and' all the checks together. 1198 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs, 1199 {Src, Dst}); 1200 } 1201 1202 Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType, 1203 QualType DstType, llvm::Type *SrcTy, 1204 llvm::Type *DstTy, 1205 ScalarConversionOpts Opts) { 1206 // The Element types determine the type of cast to perform. 1207 llvm::Type *SrcElementTy; 1208 llvm::Type *DstElementTy; 1209 QualType SrcElementType; 1210 QualType DstElementType; 1211 if (SrcType->isMatrixType() && DstType->isMatrixType()) { 1212 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType(); 1213 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType(); 1214 SrcElementType = SrcType->castAs<MatrixType>()->getElementType(); 1215 DstElementType = DstType->castAs<MatrixType>()->getElementType(); 1216 } else { 1217 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() && 1218 "cannot cast between matrix and non-matrix types"); 1219 SrcElementTy = SrcTy; 1220 DstElementTy = DstTy; 1221 SrcElementType = SrcType; 1222 DstElementType = DstType; 1223 } 1224 1225 if (isa<llvm::IntegerType>(SrcElementTy)) { 1226 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType(); 1227 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) { 1228 InputSigned = true; 1229 } 1230 1231 if (isa<llvm::IntegerType>(DstElementTy)) 1232 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); 1233 if (InputSigned) 1234 return Builder.CreateSIToFP(Src, DstTy, "conv"); 1235 return Builder.CreateUIToFP(Src, DstTy, "conv"); 1236 } 1237 1238 if (isa<llvm::IntegerType>(DstElementTy)) { 1239 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion"); 1240 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType(); 1241 1242 // If we can't recognize overflow as undefined behavior, assume that 1243 // overflow saturates. This protects against normal optimizations if we are 1244 // compiling with non-standard FP semantics. 1245 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) { 1246 llvm::Intrinsic::ID IID = 1247 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat; 1248 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src); 1249 } 1250 1251 if (IsSigned) 1252 return Builder.CreateFPToSI(Src, DstTy, "conv"); 1253 return Builder.CreateFPToUI(Src, DstTy, "conv"); 1254 } 1255 1256 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID()) 1257 return Builder.CreateFPTrunc(Src, DstTy, "conv"); 1258 return Builder.CreateFPExt(Src, DstTy, "conv"); 1259 } 1260 1261 /// Emit a conversion from the specified type to the specified destination type, 1262 /// both of which are LLVM scalar types. 1263 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, 1264 QualType DstType, 1265 SourceLocation Loc, 1266 ScalarConversionOpts Opts) { 1267 // All conversions involving fixed point types should be handled by the 1268 // EmitFixedPoint family functions. This is done to prevent bloating up this 1269 // function more, and although fixed point numbers are represented by 1270 // integers, we do not want to follow any logic that assumes they should be 1271 // treated as integers. 1272 // TODO(leonardchan): When necessary, add another if statement checking for 1273 // conversions to fixed point types from other types. 1274 if (SrcType->isFixedPointType()) { 1275 if (DstType->isBooleanType()) 1276 // It is important that we check this before checking if the dest type is 1277 // an integer because booleans are technically integer types. 1278 // We do not need to check the padding bit on unsigned types if unsigned 1279 // padding is enabled because overflow into this bit is undefined 1280 // behavior. 1281 return Builder.CreateIsNotNull(Src, "tobool"); 1282 if (DstType->isFixedPointType() || DstType->isIntegerType() || 1283 DstType->isRealFloatingType()) 1284 return EmitFixedPointConversion(Src, SrcType, DstType, Loc); 1285 1286 llvm_unreachable( 1287 "Unhandled scalar conversion from a fixed point type to another type."); 1288 } else if (DstType->isFixedPointType()) { 1289 if (SrcType->isIntegerType() || SrcType->isRealFloatingType()) 1290 // This also includes converting booleans and enums to fixed point types. 1291 return EmitFixedPointConversion(Src, SrcType, DstType, Loc); 1292 1293 llvm_unreachable( 1294 "Unhandled scalar conversion to a fixed point type from another type."); 1295 } 1296 1297 QualType NoncanonicalSrcType = SrcType; 1298 QualType NoncanonicalDstType = DstType; 1299 1300 SrcType = CGF.getContext().getCanonicalType(SrcType); 1301 DstType = CGF.getContext().getCanonicalType(DstType); 1302 if (SrcType == DstType) return Src; 1303 1304 if (DstType->isVoidType()) return nullptr; 1305 1306 llvm::Value *OrigSrc = Src; 1307 QualType OrigSrcType = SrcType; 1308 llvm::Type *SrcTy = Src->getType(); 1309 1310 // Handle conversions to bool first, they are special: comparisons against 0. 1311 if (DstType->isBooleanType()) 1312 return EmitConversionToBool(Src, SrcType); 1313 1314 llvm::Type *DstTy = ConvertType(DstType); 1315 1316 // Cast from half through float if half isn't a native type. 1317 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 1318 // Cast to FP using the intrinsic if the half type itself isn't supported. 1319 if (DstTy->isFloatingPointTy()) { 1320 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) 1321 return Builder.CreateCall( 1322 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy), 1323 Src); 1324 } else { 1325 // Cast to other types through float, using either the intrinsic or FPExt, 1326 // depending on whether the half type itself is supported 1327 // (as opposed to operations on half, available with NativeHalfType). 1328 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 1329 Src = Builder.CreateCall( 1330 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, 1331 CGF.CGM.FloatTy), 1332 Src); 1333 } else { 1334 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv"); 1335 } 1336 SrcType = CGF.getContext().FloatTy; 1337 SrcTy = CGF.FloatTy; 1338 } 1339 } 1340 1341 // Ignore conversions like int -> uint. 1342 if (SrcTy == DstTy) { 1343 if (Opts.EmitImplicitIntegerSignChangeChecks) 1344 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src, 1345 NoncanonicalDstType, Loc); 1346 1347 return Src; 1348 } 1349 1350 // Handle pointer conversions next: pointers can only be converted to/from 1351 // other pointers and integers. Check for pointer types in terms of LLVM, as 1352 // some native types (like Obj-C id) may map to a pointer type. 1353 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) { 1354 // The source value may be an integer, or a pointer. 1355 if (isa<llvm::PointerType>(SrcTy)) 1356 return Builder.CreateBitCast(Src, DstTy, "conv"); 1357 1358 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); 1359 // First, convert to the correct width so that we control the kind of 1360 // extension. 1361 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT); 1362 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType(); 1363 llvm::Value* IntResult = 1364 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 1365 // Then, cast to pointer. 1366 return Builder.CreateIntToPtr(IntResult, DstTy, "conv"); 1367 } 1368 1369 if (isa<llvm::PointerType>(SrcTy)) { 1370 // Must be an ptr to int cast. 1371 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?"); 1372 return Builder.CreatePtrToInt(Src, DstTy, "conv"); 1373 } 1374 1375 // A scalar can be splatted to an extended vector of the same element type 1376 if (DstType->isExtVectorType() && !SrcType->isVectorType()) { 1377 // Sema should add casts to make sure that the source expression's type is 1378 // the same as the vector's element type (sans qualifiers) 1379 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == 1380 SrcType.getTypePtr() && 1381 "Splatted expr doesn't match with vector element type?"); 1382 1383 // Splat the element across to all elements 1384 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements(); 1385 return Builder.CreateVectorSplat(NumElements, Src, "splat"); 1386 } 1387 1388 if (SrcType->isMatrixType() && DstType->isMatrixType()) 1389 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); 1390 1391 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) { 1392 // Allow bitcast from vector to integer/fp of the same size. 1393 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits(); 1394 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits(); 1395 if (SrcSize == DstSize) 1396 return Builder.CreateBitCast(Src, DstTy, "conv"); 1397 1398 // Conversions between vectors of different sizes are not allowed except 1399 // when vectors of half are involved. Operations on storage-only half 1400 // vectors require promoting half vector operands to float vectors and 1401 // truncating the result, which is either an int or float vector, to a 1402 // short or half vector. 1403 1404 // Source and destination are both expected to be vectors. 1405 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType(); 1406 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType(); 1407 (void)DstElementTy; 1408 1409 assert(((SrcElementTy->isIntegerTy() && 1410 DstElementTy->isIntegerTy()) || 1411 (SrcElementTy->isFloatingPointTy() && 1412 DstElementTy->isFloatingPointTy())) && 1413 "unexpected conversion between a floating-point vector and an " 1414 "integer vector"); 1415 1416 // Truncate an i32 vector to an i16 vector. 1417 if (SrcElementTy->isIntegerTy()) 1418 return Builder.CreateIntCast(Src, DstTy, false, "conv"); 1419 1420 // Truncate a float vector to a half vector. 1421 if (SrcSize > DstSize) 1422 return Builder.CreateFPTrunc(Src, DstTy, "conv"); 1423 1424 // Promote a half vector to a float vector. 1425 return Builder.CreateFPExt(Src, DstTy, "conv"); 1426 } 1427 1428 // Finally, we have the arithmetic types: real int/float. 1429 Value *Res = nullptr; 1430 llvm::Type *ResTy = DstTy; 1431 1432 // An overflowing conversion has undefined behavior if either the source type 1433 // or the destination type is a floating-point type. However, we consider the 1434 // range of representable values for all floating-point types to be 1435 // [-inf,+inf], so no overflow can ever happen when the destination type is a 1436 // floating-point type. 1437 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) && 1438 OrigSrcType->isFloatingType()) 1439 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy, 1440 Loc); 1441 1442 // Cast to half through float if half isn't a native type. 1443 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 1444 // Make sure we cast in a single step if from another FP type. 1445 if (SrcTy->isFloatingPointTy()) { 1446 // Use the intrinsic if the half type itself isn't supported 1447 // (as opposed to operations on half, available with NativeHalfType). 1448 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) 1449 return Builder.CreateCall( 1450 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src); 1451 // If the half type is supported, just use an fptrunc. 1452 return Builder.CreateFPTrunc(Src, DstTy); 1453 } 1454 DstTy = CGF.FloatTy; 1455 } 1456 1457 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); 1458 1459 if (DstTy != ResTy) { 1460 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 1461 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion"); 1462 Res = Builder.CreateCall( 1463 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy), 1464 Res); 1465 } else { 1466 Res = Builder.CreateFPTrunc(Res, ResTy, "conv"); 1467 } 1468 } 1469 1470 if (Opts.EmitImplicitIntegerTruncationChecks) 1471 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res, 1472 NoncanonicalDstType, Loc); 1473 1474 if (Opts.EmitImplicitIntegerSignChangeChecks) 1475 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res, 1476 NoncanonicalDstType, Loc); 1477 1478 return Res; 1479 } 1480 1481 Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy, 1482 QualType DstTy, 1483 SourceLocation Loc) { 1484 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 1485 llvm::Value *Result; 1486 if (SrcTy->isRealFloatingType()) 1487 Result = FPBuilder.CreateFloatingToFixed(Src, 1488 CGF.getContext().getFixedPointSemantics(DstTy)); 1489 else if (DstTy->isRealFloatingType()) 1490 Result = FPBuilder.CreateFixedToFloating(Src, 1491 CGF.getContext().getFixedPointSemantics(SrcTy), 1492 ConvertType(DstTy)); 1493 else { 1494 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy); 1495 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy); 1496 1497 if (DstTy->isIntegerType()) 1498 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema, 1499 DstFPSema.getWidth(), 1500 DstFPSema.isSigned()); 1501 else if (SrcTy->isIntegerType()) 1502 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(), 1503 DstFPSema); 1504 else 1505 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema); 1506 } 1507 return Result; 1508 } 1509 1510 /// Emit a conversion from the specified complex type to the specified 1511 /// destination type, where the destination type is an LLVM scalar type. 1512 Value *ScalarExprEmitter::EmitComplexToScalarConversion( 1513 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy, 1514 SourceLocation Loc) { 1515 // Get the source element type. 1516 SrcTy = SrcTy->castAs<ComplexType>()->getElementType(); 1517 1518 // Handle conversions to bool first, they are special: comparisons against 0. 1519 if (DstTy->isBooleanType()) { 1520 // Complex != 0 -> (Real != 0) | (Imag != 0) 1521 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc); 1522 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc); 1523 return Builder.CreateOr(Src.first, Src.second, "tobool"); 1524 } 1525 1526 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type, 1527 // the imaginary part of the complex value is discarded and the value of the 1528 // real part is converted according to the conversion rules for the 1529 // corresponding real type. 1530 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc); 1531 } 1532 1533 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) { 1534 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty); 1535 } 1536 1537 /// Emit a sanitization check for the given "binary" operation (which 1538 /// might actually be a unary increment which has been lowered to a binary 1539 /// operation). The check passes if all values in \p Checks (which are \c i1), 1540 /// are \c true. 1541 void ScalarExprEmitter::EmitBinOpCheck( 1542 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) { 1543 assert(CGF.IsSanitizerScope); 1544 SanitizerHandler Check; 1545 SmallVector<llvm::Constant *, 4> StaticData; 1546 SmallVector<llvm::Value *, 2> DynamicData; 1547 1548 BinaryOperatorKind Opcode = Info.Opcode; 1549 if (BinaryOperator::isCompoundAssignmentOp(Opcode)) 1550 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode); 1551 1552 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc())); 1553 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E); 1554 if (UO && UO->getOpcode() == UO_Minus) { 1555 Check = SanitizerHandler::NegateOverflow; 1556 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType())); 1557 DynamicData.push_back(Info.RHS); 1558 } else { 1559 if (BinaryOperator::isShiftOp(Opcode)) { 1560 // Shift LHS negative or too large, or RHS out of bounds. 1561 Check = SanitizerHandler::ShiftOutOfBounds; 1562 const BinaryOperator *BO = cast<BinaryOperator>(Info.E); 1563 StaticData.push_back( 1564 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType())); 1565 StaticData.push_back( 1566 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType())); 1567 } else if (Opcode == BO_Div || Opcode == BO_Rem) { 1568 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1). 1569 Check = SanitizerHandler::DivremOverflow; 1570 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); 1571 } else { 1572 // Arithmetic overflow (+, -, *). 1573 switch (Opcode) { 1574 case BO_Add: Check = SanitizerHandler::AddOverflow; break; 1575 case BO_Sub: Check = SanitizerHandler::SubOverflow; break; 1576 case BO_Mul: Check = SanitizerHandler::MulOverflow; break; 1577 default: llvm_unreachable("unexpected opcode for bin op check"); 1578 } 1579 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); 1580 } 1581 DynamicData.push_back(Info.LHS); 1582 DynamicData.push_back(Info.RHS); 1583 } 1584 1585 CGF.EmitCheck(Checks, Check, StaticData, DynamicData); 1586 } 1587 1588 //===----------------------------------------------------------------------===// 1589 // Visitor Methods 1590 //===----------------------------------------------------------------------===// 1591 1592 Value *ScalarExprEmitter::VisitExpr(Expr *E) { 1593 CGF.ErrorUnsupported(E, "scalar expression"); 1594 if (E->getType()->isVoidType()) 1595 return nullptr; 1596 return llvm::UndefValue::get(CGF.ConvertType(E->getType())); 1597 } 1598 1599 Value * 1600 ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) { 1601 ASTContext &Context = CGF.getContext(); 1602 llvm::Optional<LangAS> GlobalAS = 1603 Context.getTargetInfo().getConstantAddressSpace(); 1604 llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr( 1605 E->ComputeName(Context), "__usn_str", 1606 static_cast<unsigned>(GlobalAS.value_or(LangAS::Default))); 1607 1608 unsigned ExprAS = Context.getTargetAddressSpace(E->getType()); 1609 1610 if (GlobalConstStr->getType()->getPointerAddressSpace() == ExprAS) 1611 return GlobalConstStr; 1612 1613 llvm::PointerType *PtrTy = cast<llvm::PointerType>(GlobalConstStr->getType()); 1614 llvm::PointerType *NewPtrTy = 1615 llvm::PointerType::getWithSamePointeeType(PtrTy, ExprAS); 1616 return Builder.CreateAddrSpaceCast(GlobalConstStr, NewPtrTy, "usn_addr_cast"); 1617 } 1618 1619 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { 1620 // Vector Mask Case 1621 if (E->getNumSubExprs() == 2) { 1622 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0)); 1623 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1)); 1624 Value *Mask; 1625 1626 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType()); 1627 unsigned LHSElts = LTy->getNumElements(); 1628 1629 Mask = RHS; 1630 1631 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType()); 1632 1633 // Mask off the high bits of each shuffle index. 1634 Value *MaskBits = 1635 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1); 1636 Mask = Builder.CreateAnd(Mask, MaskBits, "mask"); 1637 1638 // newv = undef 1639 // mask = mask & maskbits 1640 // for each elt 1641 // n = extract mask i 1642 // x = extract val n 1643 // newv = insert newv, x, i 1644 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(), 1645 MTy->getNumElements()); 1646 Value* NewV = llvm::UndefValue::get(RTy); 1647 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) { 1648 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i); 1649 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx"); 1650 1651 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt"); 1652 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins"); 1653 } 1654 return NewV; 1655 } 1656 1657 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0)); 1658 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1)); 1659 1660 SmallVector<int, 32> Indices; 1661 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) { 1662 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2); 1663 // Check for -1 and output it as undef in the IR. 1664 if (Idx.isSigned() && Idx.isAllOnes()) 1665 Indices.push_back(-1); 1666 else 1667 Indices.push_back(Idx.getZExtValue()); 1668 } 1669 1670 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle"); 1671 } 1672 1673 Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) { 1674 QualType SrcType = E->getSrcExpr()->getType(), 1675 DstType = E->getType(); 1676 1677 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); 1678 1679 SrcType = CGF.getContext().getCanonicalType(SrcType); 1680 DstType = CGF.getContext().getCanonicalType(DstType); 1681 if (SrcType == DstType) return Src; 1682 1683 assert(SrcType->isVectorType() && 1684 "ConvertVector source type must be a vector"); 1685 assert(DstType->isVectorType() && 1686 "ConvertVector destination type must be a vector"); 1687 1688 llvm::Type *SrcTy = Src->getType(); 1689 llvm::Type *DstTy = ConvertType(DstType); 1690 1691 // Ignore conversions like int -> uint. 1692 if (SrcTy == DstTy) 1693 return Src; 1694 1695 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(), 1696 DstEltType = DstType->castAs<VectorType>()->getElementType(); 1697 1698 assert(SrcTy->isVectorTy() && 1699 "ConvertVector source IR type must be a vector"); 1700 assert(DstTy->isVectorTy() && 1701 "ConvertVector destination IR type must be a vector"); 1702 1703 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(), 1704 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType(); 1705 1706 if (DstEltType->isBooleanType()) { 1707 assert((SrcEltTy->isFloatingPointTy() || 1708 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion"); 1709 1710 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy); 1711 if (SrcEltTy->isFloatingPointTy()) { 1712 return Builder.CreateFCmpUNE(Src, Zero, "tobool"); 1713 } else { 1714 return Builder.CreateICmpNE(Src, Zero, "tobool"); 1715 } 1716 } 1717 1718 // We have the arithmetic types: real int/float. 1719 Value *Res = nullptr; 1720 1721 if (isa<llvm::IntegerType>(SrcEltTy)) { 1722 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType(); 1723 if (isa<llvm::IntegerType>(DstEltTy)) 1724 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); 1725 else if (InputSigned) 1726 Res = Builder.CreateSIToFP(Src, DstTy, "conv"); 1727 else 1728 Res = Builder.CreateUIToFP(Src, DstTy, "conv"); 1729 } else if (isa<llvm::IntegerType>(DstEltTy)) { 1730 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion"); 1731 if (DstEltType->isSignedIntegerOrEnumerationType()) 1732 Res = Builder.CreateFPToSI(Src, DstTy, "conv"); 1733 else 1734 Res = Builder.CreateFPToUI(Src, DstTy, "conv"); 1735 } else { 1736 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && 1737 "Unknown real conversion"); 1738 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID()) 1739 Res = Builder.CreateFPTrunc(Src, DstTy, "conv"); 1740 else 1741 Res = Builder.CreateFPExt(Src, DstTy, "conv"); 1742 } 1743 1744 return Res; 1745 } 1746 1747 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { 1748 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) { 1749 CGF.EmitIgnoredExpr(E->getBase()); 1750 return CGF.emitScalarConstant(Constant, E); 1751 } else { 1752 Expr::EvalResult Result; 1753 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) { 1754 llvm::APSInt Value = Result.Val.getInt(); 1755 CGF.EmitIgnoredExpr(E->getBase()); 1756 return Builder.getInt(Value); 1757 } 1758 } 1759 1760 return EmitLoadOfLValue(E); 1761 } 1762 1763 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { 1764 TestAndClearIgnoreResultAssign(); 1765 1766 // Emit subscript expressions in rvalue context's. For most cases, this just 1767 // loads the lvalue formed by the subscript expr. However, we have to be 1768 // careful, because the base of a vector subscript is occasionally an rvalue, 1769 // so we can't get it as an lvalue. 1770 if (!E->getBase()->getType()->isVectorType() && 1771 !E->getBase()->getType()->isVLSTBuiltinType()) 1772 return EmitLoadOfLValue(E); 1773 1774 // Handle the vector case. The base must be a vector, the index must be an 1775 // integer value. 1776 Value *Base = Visit(E->getBase()); 1777 Value *Idx = Visit(E->getIdx()); 1778 QualType IdxTy = E->getIdx()->getType(); 1779 1780 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) 1781 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true); 1782 1783 return Builder.CreateExtractElement(Base, Idx, "vecext"); 1784 } 1785 1786 Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) { 1787 TestAndClearIgnoreResultAssign(); 1788 1789 // Handle the vector case. The base must be a vector, the index must be an 1790 // integer value. 1791 Value *RowIdx = Visit(E->getRowIdx()); 1792 Value *ColumnIdx = Visit(E->getColumnIdx()); 1793 1794 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>(); 1795 unsigned NumRows = MatrixTy->getNumRows(); 1796 llvm::MatrixBuilder MB(Builder); 1797 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows); 1798 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0) 1799 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened()); 1800 1801 Value *Matrix = Visit(E->getBase()); 1802 1803 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds? 1804 return Builder.CreateExtractElement(Matrix, Idx, "matrixext"); 1805 } 1806 1807 static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, 1808 unsigned Off) { 1809 int MV = SVI->getMaskValue(Idx); 1810 if (MV == -1) 1811 return -1; 1812 return Off + MV; 1813 } 1814 1815 static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) { 1816 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && 1817 "Index operand too large for shufflevector mask!"); 1818 return C->getZExtValue(); 1819 } 1820 1821 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { 1822 bool Ignore = TestAndClearIgnoreResultAssign(); 1823 (void)Ignore; 1824 assert (Ignore == false && "init list ignored"); 1825 unsigned NumInitElements = E->getNumInits(); 1826 1827 if (E->hadArrayRangeDesignator()) 1828 CGF.ErrorUnsupported(E, "GNU array range designator extension"); 1829 1830 llvm::VectorType *VType = 1831 dyn_cast<llvm::VectorType>(ConvertType(E->getType())); 1832 1833 if (!VType) { 1834 if (NumInitElements == 0) { 1835 // C++11 value-initialization for the scalar. 1836 return EmitNullValue(E->getType()); 1837 } 1838 // We have a scalar in braces. Just use the first element. 1839 return Visit(E->getInit(0)); 1840 } 1841 1842 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements(); 1843 1844 // Loop over initializers collecting the Value for each, and remembering 1845 // whether the source was swizzle (ExtVectorElementExpr). This will allow 1846 // us to fold the shuffle for the swizzle into the shuffle for the vector 1847 // initializer, since LLVM optimizers generally do not want to touch 1848 // shuffles. 1849 unsigned CurIdx = 0; 1850 bool VIsUndefShuffle = false; 1851 llvm::Value *V = llvm::UndefValue::get(VType); 1852 for (unsigned i = 0; i != NumInitElements; ++i) { 1853 Expr *IE = E->getInit(i); 1854 Value *Init = Visit(IE); 1855 SmallVector<int, 16> Args; 1856 1857 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType()); 1858 1859 // Handle scalar elements. If the scalar initializer is actually one 1860 // element of a different vector of the same width, use shuffle instead of 1861 // extract+insert. 1862 if (!VVT) { 1863 if (isa<ExtVectorElementExpr>(IE)) { 1864 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init); 1865 1866 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType()) 1867 ->getNumElements() == ResElts) { 1868 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand()); 1869 Value *LHS = nullptr, *RHS = nullptr; 1870 if (CurIdx == 0) { 1871 // insert into undef -> shuffle (src, undef) 1872 // shufflemask must use an i32 1873 Args.push_back(getAsInt32(C, CGF.Int32Ty)); 1874 Args.resize(ResElts, -1); 1875 1876 LHS = EI->getVectorOperand(); 1877 RHS = V; 1878 VIsUndefShuffle = true; 1879 } else if (VIsUndefShuffle) { 1880 // insert into undefshuffle && size match -> shuffle (v, src) 1881 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V); 1882 for (unsigned j = 0; j != CurIdx; ++j) 1883 Args.push_back(getMaskElt(SVV, j, 0)); 1884 Args.push_back(ResElts + C->getZExtValue()); 1885 Args.resize(ResElts, -1); 1886 1887 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 1888 RHS = EI->getVectorOperand(); 1889 VIsUndefShuffle = false; 1890 } 1891 if (!Args.empty()) { 1892 V = Builder.CreateShuffleVector(LHS, RHS, Args); 1893 ++CurIdx; 1894 continue; 1895 } 1896 } 1897 } 1898 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx), 1899 "vecinit"); 1900 VIsUndefShuffle = false; 1901 ++CurIdx; 1902 continue; 1903 } 1904 1905 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements(); 1906 1907 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's 1908 // input is the same width as the vector being constructed, generate an 1909 // optimized shuffle of the swizzle input into the result. 1910 unsigned Offset = (CurIdx == 0) ? 0 : ResElts; 1911 if (isa<ExtVectorElementExpr>(IE)) { 1912 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init); 1913 Value *SVOp = SVI->getOperand(0); 1914 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType()); 1915 1916 if (OpTy->getNumElements() == ResElts) { 1917 for (unsigned j = 0; j != CurIdx; ++j) { 1918 // If the current vector initializer is a shuffle with undef, merge 1919 // this shuffle directly into it. 1920 if (VIsUndefShuffle) { 1921 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0)); 1922 } else { 1923 Args.push_back(j); 1924 } 1925 } 1926 for (unsigned j = 0, je = InitElts; j != je; ++j) 1927 Args.push_back(getMaskElt(SVI, j, Offset)); 1928 Args.resize(ResElts, -1); 1929 1930 if (VIsUndefShuffle) 1931 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 1932 1933 Init = SVOp; 1934 } 1935 } 1936 1937 // Extend init to result vector length, and then shuffle its contribution 1938 // to the vector initializer into V. 1939 if (Args.empty()) { 1940 for (unsigned j = 0; j != InitElts; ++j) 1941 Args.push_back(j); 1942 Args.resize(ResElts, -1); 1943 Init = Builder.CreateShuffleVector(Init, Args, "vext"); 1944 1945 Args.clear(); 1946 for (unsigned j = 0; j != CurIdx; ++j) 1947 Args.push_back(j); 1948 for (unsigned j = 0; j != InitElts; ++j) 1949 Args.push_back(j + Offset); 1950 Args.resize(ResElts, -1); 1951 } 1952 1953 // If V is undef, make sure it ends up on the RHS of the shuffle to aid 1954 // merging subsequent shuffles into this one. 1955 if (CurIdx == 0) 1956 std::swap(V, Init); 1957 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit"); 1958 VIsUndefShuffle = isa<llvm::UndefValue>(Init); 1959 CurIdx += InitElts; 1960 } 1961 1962 // FIXME: evaluate codegen vs. shuffling against constant null vector. 1963 // Emit remaining default initializers. 1964 llvm::Type *EltTy = VType->getElementType(); 1965 1966 // Emit remaining default initializers 1967 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) { 1968 Value *Idx = Builder.getInt32(CurIdx); 1969 llvm::Value *Init = llvm::Constant::getNullValue(EltTy); 1970 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit"); 1971 } 1972 return V; 1973 } 1974 1975 bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) { 1976 const Expr *E = CE->getSubExpr(); 1977 1978 if (CE->getCastKind() == CK_UncheckedDerivedToBase) 1979 return false; 1980 1981 if (isa<CXXThisExpr>(E->IgnoreParens())) { 1982 // We always assume that 'this' is never null. 1983 return false; 1984 } 1985 1986 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) { 1987 // And that glvalue casts are never null. 1988 if (ICE->isGLValue()) 1989 return false; 1990 } 1991 1992 return true; 1993 } 1994 1995 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts 1996 // have to handle a more broad range of conversions than explicit casts, as they 1997 // handle things like function to ptr-to-function decay etc. 1998 Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { 1999 Expr *E = CE->getSubExpr(); 2000 QualType DestTy = CE->getType(); 2001 CastKind Kind = CE->getCastKind(); 2002 2003 // These cases are generally not written to ignore the result of 2004 // evaluating their sub-expressions, so we clear this now. 2005 bool Ignored = TestAndClearIgnoreResultAssign(); 2006 2007 // Since almost all cast kinds apply to scalars, this switch doesn't have 2008 // a default case, so the compiler will warn on a missing case. The cases 2009 // are in the same order as in the CastKind enum. 2010 switch (Kind) { 2011 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!"); 2012 case CK_BuiltinFnToFnPtr: 2013 llvm_unreachable("builtin functions are handled elsewhere"); 2014 2015 case CK_LValueBitCast: 2016 case CK_ObjCObjectLValueCast: { 2017 Address Addr = EmitLValue(E).getAddress(CGF); 2018 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy)); 2019 LValue LV = CGF.MakeAddrLValue(Addr, DestTy); 2020 return EmitLoadOfLValue(LV, CE->getExprLoc()); 2021 } 2022 2023 case CK_LValueToRValueBitCast: { 2024 LValue SourceLVal = CGF.EmitLValue(E); 2025 Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF), 2026 CGF.ConvertTypeForMem(DestTy)); 2027 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy); 2028 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); 2029 return EmitLoadOfLValue(DestLV, CE->getExprLoc()); 2030 } 2031 2032 case CK_CPointerToObjCPointerCast: 2033 case CK_BlockPointerToObjCPointerCast: 2034 case CK_AnyPointerToBlockPointerCast: 2035 case CK_BitCast: { 2036 Value *Src = Visit(const_cast<Expr*>(E)); 2037 llvm::Type *SrcTy = Src->getType(); 2038 llvm::Type *DstTy = ConvertType(DestTy); 2039 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() && 2040 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) { 2041 llvm_unreachable("wrong cast for pointers in different address spaces" 2042 "(must be an address space cast)!"); 2043 } 2044 2045 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { 2046 if (auto *PT = DestTy->getAs<PointerType>()) { 2047 CGF.EmitVTablePtrCheckForCast( 2048 PT->getPointeeType(), 2049 Address(Src, 2050 CGF.ConvertTypeForMem( 2051 E->getType()->castAs<PointerType>()->getPointeeType()), 2052 CGF.getPointerAlign()), 2053 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast, 2054 CE->getBeginLoc()); 2055 } 2056 } 2057 2058 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2059 const QualType SrcType = E->getType(); 2060 2061 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) { 2062 // Casting to pointer that could carry dynamic information (provided by 2063 // invariant.group) requires launder. 2064 Src = Builder.CreateLaunderInvariantGroup(Src); 2065 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) { 2066 // Casting to pointer that does not carry dynamic information (provided 2067 // by invariant.group) requires stripping it. Note that we don't do it 2068 // if the source could not be dynamic type and destination could be 2069 // dynamic because dynamic information is already laundered. It is 2070 // because launder(strip(src)) == launder(src), so there is no need to 2071 // add extra strip before launder. 2072 Src = Builder.CreateStripInvariantGroup(Src); 2073 } 2074 } 2075 2076 // Update heapallocsite metadata when there is an explicit pointer cast. 2077 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) { 2078 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) { 2079 QualType PointeeType = DestTy->getPointeeType(); 2080 if (!PointeeType.isNull()) 2081 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType, 2082 CE->getExprLoc()); 2083 } 2084 } 2085 2086 // If Src is a fixed vector and Dst is a scalable vector, and both have the 2087 // same element type, use the llvm.vector.insert intrinsic to perform the 2088 // bitcast. 2089 if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) { 2090 if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) { 2091 // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate 2092 // vector, use a vector insert and bitcast the result. 2093 bool NeedsBitCast = false; 2094 auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); 2095 llvm::Type *OrigType = DstTy; 2096 if (ScalableDst == PredType && 2097 FixedSrc->getElementType() == Builder.getInt8Ty()) { 2098 DstTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2); 2099 ScalableDst = cast<llvm::ScalableVectorType>(DstTy); 2100 NeedsBitCast = true; 2101 } 2102 if (FixedSrc->getElementType() == ScalableDst->getElementType()) { 2103 llvm::Value *UndefVec = llvm::UndefValue::get(DstTy); 2104 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); 2105 llvm::Value *Result = Builder.CreateInsertVector( 2106 DstTy, UndefVec, Src, Zero, "castScalableSve"); 2107 if (NeedsBitCast) 2108 Result = Builder.CreateBitCast(Result, OrigType); 2109 return Result; 2110 } 2111 } 2112 } 2113 2114 // If Src is a scalable vector and Dst is a fixed vector, and both have the 2115 // same element type, use the llvm.vector.extract intrinsic to perform the 2116 // bitcast. 2117 if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) { 2118 if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) { 2119 // If we are casting a scalable 16 x i1 predicate vector to a fixed i8 2120 // vector, bitcast the source and use a vector extract. 2121 auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); 2122 if (ScalableSrc == PredType && 2123 FixedDst->getElementType() == Builder.getInt8Ty()) { 2124 SrcTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2); 2125 ScalableSrc = cast<llvm::ScalableVectorType>(SrcTy); 2126 Src = Builder.CreateBitCast(Src, SrcTy); 2127 } 2128 if (ScalableSrc->getElementType() == FixedDst->getElementType()) { 2129 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); 2130 return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve"); 2131 } 2132 } 2133 } 2134 2135 // Perform VLAT <-> VLST bitcast through memory. 2136 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics 2137 // require the element types of the vectors to be the same, we 2138 // need to keep this around for bitcasts between VLAT <-> VLST where 2139 // the element types of the vectors are not the same, until we figure 2140 // out a better way of doing these casts. 2141 if ((isa<llvm::FixedVectorType>(SrcTy) && 2142 isa<llvm::ScalableVectorType>(DstTy)) || 2143 (isa<llvm::ScalableVectorType>(SrcTy) && 2144 isa<llvm::FixedVectorType>(DstTy))) { 2145 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value"); 2146 LValue LV = CGF.MakeAddrLValue(Addr, E->getType()); 2147 CGF.EmitStoreOfScalar(Src, LV); 2148 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy), 2149 "castFixedSve"); 2150 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy); 2151 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); 2152 return EmitLoadOfLValue(DestLV, CE->getExprLoc()); 2153 } 2154 return Builder.CreateBitCast(Src, DstTy); 2155 } 2156 case CK_AddressSpaceConversion: { 2157 Expr::EvalResult Result; 2158 if (E->EvaluateAsRValue(Result, CGF.getContext()) && 2159 Result.Val.isNullPointer()) { 2160 // If E has side effect, it is emitted even if its final result is a 2161 // null pointer. In that case, a DCE pass should be able to 2162 // eliminate the useless instructions emitted during translating E. 2163 if (Result.HasSideEffects) 2164 Visit(E); 2165 return CGF.CGM.getNullPointer(cast<llvm::PointerType>( 2166 ConvertType(DestTy)), DestTy); 2167 } 2168 // Since target may map different address spaces in AST to the same address 2169 // space, an address space conversion may end up as a bitcast. 2170 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast( 2171 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(), 2172 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy)); 2173 } 2174 case CK_AtomicToNonAtomic: 2175 case CK_NonAtomicToAtomic: 2176 case CK_UserDefinedConversion: 2177 return Visit(const_cast<Expr*>(E)); 2178 2179 case CK_NoOp: { 2180 llvm::Value *V = Visit(const_cast<Expr *>(E)); 2181 if (V) { 2182 // CK_NoOp can model a pointer qualification conversion, which can remove 2183 // an array bound and change the IR type. 2184 // FIXME: Once pointee types are removed from IR, remove this. 2185 llvm::Type *T = ConvertType(DestTy); 2186 if (T != V->getType()) 2187 V = Builder.CreateBitCast(V, T); 2188 } 2189 return V; 2190 } 2191 2192 case CK_BaseToDerived: { 2193 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl(); 2194 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!"); 2195 2196 Address Base = CGF.EmitPointerWithAlignment(E); 2197 Address Derived = 2198 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl, 2199 CE->path_begin(), CE->path_end(), 2200 CGF.ShouldNullCheckClassCastValue(CE)); 2201 2202 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is 2203 // performed and the object is not of the derived type. 2204 if (CGF.sanitizePerformTypeCheck()) 2205 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(), 2206 Derived.getPointer(), DestTy->getPointeeType()); 2207 2208 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast)) 2209 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived, 2210 /*MayBeNull=*/true, 2211 CodeGenFunction::CFITCK_DerivedCast, 2212 CE->getBeginLoc()); 2213 2214 return Derived.getPointer(); 2215 } 2216 case CK_UncheckedDerivedToBase: 2217 case CK_DerivedToBase: { 2218 // The EmitPointerWithAlignment path does this fine; just discard 2219 // the alignment. 2220 return CGF.EmitPointerWithAlignment(CE).getPointer(); 2221 } 2222 2223 case CK_Dynamic: { 2224 Address V = CGF.EmitPointerWithAlignment(E); 2225 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE); 2226 return CGF.EmitDynamicCast(V, DCE); 2227 } 2228 2229 case CK_ArrayToPointerDecay: 2230 return CGF.EmitArrayToPointerDecay(E).getPointer(); 2231 case CK_FunctionToPointerDecay: 2232 return EmitLValue(E).getPointer(CGF); 2233 2234 case CK_NullToPointer: 2235 if (MustVisitNullValue(E)) 2236 CGF.EmitIgnoredExpr(E); 2237 2238 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)), 2239 DestTy); 2240 2241 case CK_NullToMemberPointer: { 2242 if (MustVisitNullValue(E)) 2243 CGF.EmitIgnoredExpr(E); 2244 2245 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>(); 2246 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT); 2247 } 2248 2249 case CK_ReinterpretMemberPointer: 2250 case CK_BaseToDerivedMemberPointer: 2251 case CK_DerivedToBaseMemberPointer: { 2252 Value *Src = Visit(E); 2253 2254 // Note that the AST doesn't distinguish between checked and 2255 // unchecked member pointer conversions, so we always have to 2256 // implement checked conversions here. This is inefficient when 2257 // actual control flow may be required in order to perform the 2258 // check, which it is for data member pointers (but not member 2259 // function pointers on Itanium and ARM). 2260 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src); 2261 } 2262 2263 case CK_ARCProduceObject: 2264 return CGF.EmitARCRetainScalarExpr(E); 2265 case CK_ARCConsumeObject: 2266 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E)); 2267 case CK_ARCReclaimReturnedObject: 2268 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored); 2269 case CK_ARCExtendBlockObject: 2270 return CGF.EmitARCExtendBlockObject(E); 2271 2272 case CK_CopyAndAutoreleaseBlockObject: 2273 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType()); 2274 2275 case CK_FloatingRealToComplex: 2276 case CK_FloatingComplexCast: 2277 case CK_IntegralRealToComplex: 2278 case CK_IntegralComplexCast: 2279 case CK_IntegralComplexToFloatingComplex: 2280 case CK_FloatingComplexToIntegralComplex: 2281 case CK_ConstructorConversion: 2282 case CK_ToUnion: 2283 llvm_unreachable("scalar cast to non-scalar value"); 2284 2285 case CK_LValueToRValue: 2286 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)); 2287 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); 2288 return Visit(const_cast<Expr*>(E)); 2289 2290 case CK_IntegralToPointer: { 2291 Value *Src = Visit(const_cast<Expr*>(E)); 2292 2293 // First, convert to the correct width so that we control the kind of 2294 // extension. 2295 auto DestLLVMTy = ConvertType(DestTy); 2296 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy); 2297 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType(); 2298 llvm::Value* IntResult = 2299 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 2300 2301 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy); 2302 2303 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2304 // Going from integer to pointer that could be dynamic requires reloading 2305 // dynamic information from invariant.group. 2306 if (DestTy.mayBeDynamicClass()) 2307 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr); 2308 } 2309 return IntToPtr; 2310 } 2311 case CK_PointerToIntegral: { 2312 assert(!DestTy->isBooleanType() && "bool should use PointerToBool"); 2313 auto *PtrExpr = Visit(E); 2314 2315 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2316 const QualType SrcType = E->getType(); 2317 2318 // Casting to integer requires stripping dynamic information as it does 2319 // not carries it. 2320 if (SrcType.mayBeDynamicClass()) 2321 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr); 2322 } 2323 2324 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy)); 2325 } 2326 case CK_ToVoid: { 2327 CGF.EmitIgnoredExpr(E); 2328 return nullptr; 2329 } 2330 case CK_MatrixCast: { 2331 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2332 CE->getExprLoc()); 2333 } 2334 case CK_VectorSplat: { 2335 llvm::Type *DstTy = ConvertType(DestTy); 2336 Value *Elt = Visit(const_cast<Expr *>(E)); 2337 // Splat the element across to all elements 2338 llvm::ElementCount NumElements = 2339 cast<llvm::VectorType>(DstTy)->getElementCount(); 2340 return Builder.CreateVectorSplat(NumElements, Elt, "splat"); 2341 } 2342 2343 case CK_FixedPointCast: 2344 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2345 CE->getExprLoc()); 2346 2347 case CK_FixedPointToBoolean: 2348 assert(E->getType()->isFixedPointType() && 2349 "Expected src type to be fixed point type"); 2350 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type"); 2351 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2352 CE->getExprLoc()); 2353 2354 case CK_FixedPointToIntegral: 2355 assert(E->getType()->isFixedPointType() && 2356 "Expected src type to be fixed point type"); 2357 assert(DestTy->isIntegerType() && "Expected dest type to be an integer"); 2358 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2359 CE->getExprLoc()); 2360 2361 case CK_IntegralToFixedPoint: 2362 assert(E->getType()->isIntegerType() && 2363 "Expected src type to be an integer"); 2364 assert(DestTy->isFixedPointType() && 2365 "Expected dest type to be fixed point type"); 2366 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2367 CE->getExprLoc()); 2368 2369 case CK_IntegralCast: { 2370 ScalarConversionOpts Opts; 2371 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) { 2372 if (!ICE->isPartOfExplicitCast()) 2373 Opts = ScalarConversionOpts(CGF.SanOpts); 2374 } 2375 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2376 CE->getExprLoc(), Opts); 2377 } 2378 case CK_IntegralToFloating: 2379 case CK_FloatingToIntegral: 2380 case CK_FloatingCast: 2381 case CK_FixedPointToFloating: 2382 case CK_FloatingToFixedPoint: { 2383 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE); 2384 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2385 CE->getExprLoc()); 2386 } 2387 case CK_BooleanToSignedIntegral: { 2388 ScalarConversionOpts Opts; 2389 Opts.TreatBooleanAsSigned = true; 2390 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2391 CE->getExprLoc(), Opts); 2392 } 2393 case CK_IntegralToBoolean: 2394 return EmitIntToBoolConversion(Visit(E)); 2395 case CK_PointerToBoolean: 2396 return EmitPointerToBoolConversion(Visit(E), E->getType()); 2397 case CK_FloatingToBoolean: { 2398 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE); 2399 return EmitFloatToBoolConversion(Visit(E)); 2400 } 2401 case CK_MemberPointerToBoolean: { 2402 llvm::Value *MemPtr = Visit(E); 2403 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>(); 2404 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT); 2405 } 2406 2407 case CK_FloatingComplexToReal: 2408 case CK_IntegralComplexToReal: 2409 return CGF.EmitComplexExpr(E, false, true).first; 2410 2411 case CK_FloatingComplexToBoolean: 2412 case CK_IntegralComplexToBoolean: { 2413 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E); 2414 2415 // TODO: kill this function off, inline appropriate case here 2416 return EmitComplexToScalarConversion(V, E->getType(), DestTy, 2417 CE->getExprLoc()); 2418 } 2419 2420 case CK_ZeroToOCLOpaqueType: { 2421 assert((DestTy->isEventT() || DestTy->isQueueT() || 2422 DestTy->isOCLIntelSubgroupAVCType()) && 2423 "CK_ZeroToOCLEvent cast on non-event type"); 2424 return llvm::Constant::getNullValue(ConvertType(DestTy)); 2425 } 2426 2427 case CK_IntToOCLSampler: 2428 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF); 2429 2430 } // end of switch 2431 2432 llvm_unreachable("unknown scalar cast"); 2433 } 2434 2435 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) { 2436 CodeGenFunction::StmtExprEvaluation eval(CGF); 2437 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(), 2438 !E->getType()->isVoidType()); 2439 if (!RetAlloca.isValid()) 2440 return nullptr; 2441 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()), 2442 E->getExprLoc()); 2443 } 2444 2445 Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { 2446 CodeGenFunction::RunCleanupsScope Scope(CGF); 2447 Value *V = Visit(E->getSubExpr()); 2448 // Defend against dominance problems caused by jumps out of expression 2449 // evaluation through the shared cleanup block. 2450 Scope.ForceCleanup({&V}); 2451 return V; 2452 } 2453 2454 //===----------------------------------------------------------------------===// 2455 // Unary Operators 2456 //===----------------------------------------------------------------------===// 2457 2458 static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, 2459 llvm::Value *InVal, bool IsInc, 2460 FPOptions FPFeatures) { 2461 BinOpInfo BinOp; 2462 BinOp.LHS = InVal; 2463 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false); 2464 BinOp.Ty = E->getType(); 2465 BinOp.Opcode = IsInc ? BO_Add : BO_Sub; 2466 BinOp.FPFeatures = FPFeatures; 2467 BinOp.E = E; 2468 return BinOp; 2469 } 2470 2471 llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior( 2472 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) { 2473 llvm::Value *Amount = 2474 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true); 2475 StringRef Name = IsInc ? "inc" : "dec"; 2476 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 2477 case LangOptions::SOB_Defined: 2478 return Builder.CreateAdd(InVal, Amount, Name); 2479 case LangOptions::SOB_Undefined: 2480 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 2481 return Builder.CreateNSWAdd(InVal, Amount, Name); 2482 LLVM_FALLTHROUGH; 2483 case LangOptions::SOB_Trapping: 2484 if (!E->canOverflow()) 2485 return Builder.CreateNSWAdd(InVal, Amount, Name); 2486 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec( 2487 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts()))); 2488 } 2489 llvm_unreachable("Unknown SignedOverflowBehaviorTy"); 2490 } 2491 2492 namespace { 2493 /// Handles check and update for lastprivate conditional variables. 2494 class OMPLastprivateConditionalUpdateRAII { 2495 private: 2496 CodeGenFunction &CGF; 2497 const UnaryOperator *E; 2498 2499 public: 2500 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF, 2501 const UnaryOperator *E) 2502 : CGF(CGF), E(E) {} 2503 ~OMPLastprivateConditionalUpdateRAII() { 2504 if (CGF.getLangOpts().OpenMP) 2505 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional( 2506 CGF, E->getSubExpr()); 2507 } 2508 }; 2509 } // namespace 2510 2511 llvm::Value * 2512 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 2513 bool isInc, bool isPre) { 2514 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E); 2515 QualType type = E->getSubExpr()->getType(); 2516 llvm::PHINode *atomicPHI = nullptr; 2517 llvm::Value *value; 2518 llvm::Value *input; 2519 2520 int amount = (isInc ? 1 : -1); 2521 bool isSubtraction = !isInc; 2522 2523 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) { 2524 type = atomicTy->getValueType(); 2525 if (isInc && type->isBooleanType()) { 2526 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type); 2527 if (isPre) { 2528 Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified()) 2529 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent); 2530 return Builder.getTrue(); 2531 } 2532 // For atomic bool increment, we just store true and return it for 2533 // preincrement, do an atomic swap with true for postincrement 2534 return Builder.CreateAtomicRMW( 2535 llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True, 2536 llvm::AtomicOrdering::SequentiallyConsistent); 2537 } 2538 // Special case for atomic increment / decrement on integers, emit 2539 // atomicrmw instructions. We skip this if we want to be doing overflow 2540 // checking, and fall into the slow path with the atomic cmpxchg loop. 2541 if (!type->isBooleanType() && type->isIntegerType() && 2542 !(type->isUnsignedIntegerType() && 2543 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) && 2544 CGF.getLangOpts().getSignedOverflowBehavior() != 2545 LangOptions::SOB_Trapping) { 2546 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add : 2547 llvm::AtomicRMWInst::Sub; 2548 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add : 2549 llvm::Instruction::Sub; 2550 llvm::Value *amt = CGF.EmitToMemory( 2551 llvm::ConstantInt::get(ConvertType(type), 1, true), type); 2552 llvm::Value *old = 2553 Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt, 2554 llvm::AtomicOrdering::SequentiallyConsistent); 2555 return isPre ? Builder.CreateBinOp(op, old, amt) : old; 2556 } 2557 value = EmitLoadOfLValue(LV, E->getExprLoc()); 2558 input = value; 2559 // For every other atomic operation, we need to emit a load-op-cmpxchg loop 2560 llvm::BasicBlock *startBB = Builder.GetInsertBlock(); 2561 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); 2562 value = CGF.EmitToMemory(value, type); 2563 Builder.CreateBr(opBB); 2564 Builder.SetInsertPoint(opBB); 2565 atomicPHI = Builder.CreatePHI(value->getType(), 2); 2566 atomicPHI->addIncoming(value, startBB); 2567 value = atomicPHI; 2568 } else { 2569 value = EmitLoadOfLValue(LV, E->getExprLoc()); 2570 input = value; 2571 } 2572 2573 // Special case of integer increment that we have to check first: bool++. 2574 // Due to promotion rules, we get: 2575 // bool++ -> bool = bool + 1 2576 // -> bool = (int)bool + 1 2577 // -> bool = ((int)bool + 1 != 0) 2578 // An interesting aspect of this is that increment is always true. 2579 // Decrement does not have this property. 2580 if (isInc && type->isBooleanType()) { 2581 value = Builder.getTrue(); 2582 2583 // Most common case by far: integer increment. 2584 } else if (type->isIntegerType()) { 2585 QualType promotedType; 2586 bool canPerformLossyDemotionCheck = false; 2587 if (type->isPromotableIntegerType()) { 2588 promotedType = CGF.getContext().getPromotedIntegerType(type); 2589 assert(promotedType != type && "Shouldn't promote to the same type."); 2590 canPerformLossyDemotionCheck = true; 2591 canPerformLossyDemotionCheck &= 2592 CGF.getContext().getCanonicalType(type) != 2593 CGF.getContext().getCanonicalType(promotedType); 2594 canPerformLossyDemotionCheck &= 2595 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( 2596 type, promotedType); 2597 assert((!canPerformLossyDemotionCheck || 2598 type->isSignedIntegerOrEnumerationType() || 2599 promotedType->isSignedIntegerOrEnumerationType() || 2600 ConvertType(type)->getScalarSizeInBits() == 2601 ConvertType(promotedType)->getScalarSizeInBits()) && 2602 "The following check expects that if we do promotion to different " 2603 "underlying canonical type, at least one of the types (either " 2604 "base or promoted) will be signed, or the bitwidths will match."); 2605 } 2606 if (CGF.SanOpts.hasOneOf( 2607 SanitizerKind::ImplicitIntegerArithmeticValueChange) && 2608 canPerformLossyDemotionCheck) { 2609 // While `x += 1` (for `x` with width less than int) is modeled as 2610 // promotion+arithmetics+demotion, and we can catch lossy demotion with 2611 // ease; inc/dec with width less than int can't overflow because of 2612 // promotion rules, so we omit promotion+demotion, which means that we can 2613 // not catch lossy "demotion". Because we still want to catch these cases 2614 // when the sanitizer is enabled, we perform the promotion, then perform 2615 // the increment/decrement in the wider type, and finally 2616 // perform the demotion. This will catch lossy demotions. 2617 2618 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc()); 2619 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true); 2620 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 2621 // Do pass non-default ScalarConversionOpts so that sanitizer check is 2622 // emitted. 2623 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(), 2624 ScalarConversionOpts(CGF.SanOpts)); 2625 2626 // Note that signed integer inc/dec with width less than int can't 2627 // overflow because of promotion rules; we're just eliding a few steps 2628 // here. 2629 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) { 2630 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc); 2631 } else if (E->canOverflow() && type->isUnsignedIntegerType() && 2632 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) { 2633 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec( 2634 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts()))); 2635 } else { 2636 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true); 2637 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 2638 } 2639 2640 // Next most common: pointer increment. 2641 } else if (const PointerType *ptr = type->getAs<PointerType>()) { 2642 QualType type = ptr->getPointeeType(); 2643 2644 // VLA types don't have constant size. 2645 if (const VariableArrayType *vla 2646 = CGF.getContext().getAsVariableArrayType(type)) { 2647 llvm::Value *numElts = CGF.getVLASize(vla).NumElts; 2648 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize"); 2649 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType()); 2650 if (CGF.getLangOpts().isSignedOverflowDefined()) 2651 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc"); 2652 else 2653 value = CGF.EmitCheckedInBoundsGEP( 2654 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction, 2655 E->getExprLoc(), "vla.inc"); 2656 2657 // Arithmetic on function pointers (!) is just +-1. 2658 } else if (type->isFunctionType()) { 2659 llvm::Value *amt = Builder.getInt32(amount); 2660 2661 value = CGF.EmitCastToVoidPtr(value); 2662 if (CGF.getLangOpts().isSignedOverflowDefined()) 2663 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr"); 2664 else 2665 value = CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt, 2666 /*SignedIndices=*/false, 2667 isSubtraction, E->getExprLoc(), 2668 "incdec.funcptr"); 2669 value = Builder.CreateBitCast(value, input->getType()); 2670 2671 // For everything else, we can just do a simple increment. 2672 } else { 2673 llvm::Value *amt = Builder.getInt32(amount); 2674 llvm::Type *elemTy = CGF.ConvertTypeForMem(type); 2675 if (CGF.getLangOpts().isSignedOverflowDefined()) 2676 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr"); 2677 else 2678 value = CGF.EmitCheckedInBoundsGEP( 2679 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction, 2680 E->getExprLoc(), "incdec.ptr"); 2681 } 2682 2683 // Vector increment/decrement. 2684 } else if (type->isVectorType()) { 2685 if (type->hasIntegerRepresentation()) { 2686 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount); 2687 2688 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 2689 } else { 2690 value = Builder.CreateFAdd( 2691 value, 2692 llvm::ConstantFP::get(value->getType(), amount), 2693 isInc ? "inc" : "dec"); 2694 } 2695 2696 // Floating point. 2697 } else if (type->isRealFloatingType()) { 2698 // Add the inc/dec to the real part. 2699 llvm::Value *amt; 2700 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); 2701 2702 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 2703 // Another special case: half FP increment should be done via float 2704 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 2705 value = Builder.CreateCall( 2706 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, 2707 CGF.CGM.FloatTy), 2708 input, "incdec.conv"); 2709 } else { 2710 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv"); 2711 } 2712 } 2713 2714 if (value->getType()->isFloatTy()) 2715 amt = llvm::ConstantFP::get(VMContext, 2716 llvm::APFloat(static_cast<float>(amount))); 2717 else if (value->getType()->isDoubleTy()) 2718 amt = llvm::ConstantFP::get(VMContext, 2719 llvm::APFloat(static_cast<double>(amount))); 2720 else { 2721 // Remaining types are Half, LongDouble, __ibm128 or __float128. Convert 2722 // from float. 2723 llvm::APFloat F(static_cast<float>(amount)); 2724 bool ignored; 2725 const llvm::fltSemantics *FS; 2726 // Don't use getFloatTypeSemantics because Half isn't 2727 // necessarily represented using the "half" LLVM type. 2728 if (value->getType()->isFP128Ty()) 2729 FS = &CGF.getTarget().getFloat128Format(); 2730 else if (value->getType()->isHalfTy()) 2731 FS = &CGF.getTarget().getHalfFormat(); 2732 else if (value->getType()->isPPC_FP128Ty()) 2733 FS = &CGF.getTarget().getIbm128Format(); 2734 else 2735 FS = &CGF.getTarget().getLongDoubleFormat(); 2736 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored); 2737 amt = llvm::ConstantFP::get(VMContext, F); 2738 } 2739 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec"); 2740 2741 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 2742 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 2743 value = Builder.CreateCall( 2744 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, 2745 CGF.CGM.FloatTy), 2746 value, "incdec.conv"); 2747 } else { 2748 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv"); 2749 } 2750 } 2751 2752 // Fixed-point types. 2753 } else if (type->isFixedPointType()) { 2754 // Fixed-point types are tricky. In some cases, it isn't possible to 2755 // represent a 1 or a -1 in the type at all. Piggyback off of 2756 // EmitFixedPointBinOp to avoid having to reimplement saturation. 2757 BinOpInfo Info; 2758 Info.E = E; 2759 Info.Ty = E->getType(); 2760 Info.Opcode = isInc ? BO_Add : BO_Sub; 2761 Info.LHS = value; 2762 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false); 2763 // If the type is signed, it's better to represent this as +(-1) or -(-1), 2764 // since -1 is guaranteed to be representable. 2765 if (type->isSignedFixedPointType()) { 2766 Info.Opcode = isInc ? BO_Sub : BO_Add; 2767 Info.RHS = Builder.CreateNeg(Info.RHS); 2768 } 2769 // Now, convert from our invented integer literal to the type of the unary 2770 // op. This will upscale and saturate if necessary. This value can become 2771 // undef in some cases. 2772 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 2773 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty); 2774 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema); 2775 value = EmitFixedPointBinOp(Info); 2776 2777 // Objective-C pointer types. 2778 } else { 2779 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>(); 2780 value = CGF.EmitCastToVoidPtr(value); 2781 2782 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType()); 2783 if (!isInc) size = -size; 2784 llvm::Value *sizeValue = 2785 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity()); 2786 2787 if (CGF.getLangOpts().isSignedOverflowDefined()) 2788 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr"); 2789 else 2790 value = CGF.EmitCheckedInBoundsGEP( 2791 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction, 2792 E->getExprLoc(), "incdec.objptr"); 2793 value = Builder.CreateBitCast(value, input->getType()); 2794 } 2795 2796 if (atomicPHI) { 2797 llvm::BasicBlock *curBlock = Builder.GetInsertBlock(); 2798 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); 2799 auto Pair = CGF.EmitAtomicCompareExchange( 2800 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc()); 2801 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type); 2802 llvm::Value *success = Pair.second; 2803 atomicPHI->addIncoming(old, curBlock); 2804 Builder.CreateCondBr(success, contBB, atomicPHI->getParent()); 2805 Builder.SetInsertPoint(contBB); 2806 return isPre ? value : input; 2807 } 2808 2809 // Store the updated result through the lvalue. 2810 if (LV.isBitField()) 2811 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value); 2812 else 2813 CGF.EmitStoreThroughLValue(RValue::get(value), LV); 2814 2815 // If this is a postinc, return the value read from memory, otherwise use the 2816 // updated value. 2817 return isPre ? value : input; 2818 } 2819 2820 2821 2822 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) { 2823 TestAndClearIgnoreResultAssign(); 2824 Value *Op = Visit(E->getSubExpr()); 2825 2826 // Generate a unary FNeg for FP ops. 2827 if (Op->getType()->isFPOrFPVectorTy()) 2828 return Builder.CreateFNeg(Op, "fneg"); 2829 2830 // Emit unary minus with EmitSub so we handle overflow cases etc. 2831 BinOpInfo BinOp; 2832 BinOp.RHS = Op; 2833 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType()); 2834 BinOp.Ty = E->getType(); 2835 BinOp.Opcode = BO_Sub; 2836 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 2837 BinOp.E = E; 2838 return EmitSub(BinOp); 2839 } 2840 2841 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) { 2842 TestAndClearIgnoreResultAssign(); 2843 Value *Op = Visit(E->getSubExpr()); 2844 return Builder.CreateNot(Op, "neg"); 2845 } 2846 2847 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { 2848 // Perform vector logical not on comparison with zero vector. 2849 if (E->getType()->isVectorType() && 2850 E->getType()->castAs<VectorType>()->getVectorKind() == 2851 VectorType::GenericVector) { 2852 Value *Oper = Visit(E->getSubExpr()); 2853 Value *Zero = llvm::Constant::getNullValue(Oper->getType()); 2854 Value *Result; 2855 if (Oper->getType()->isFPOrFPVectorTy()) { 2856 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 2857 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 2858 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp"); 2859 } else 2860 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp"); 2861 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 2862 } 2863 2864 // Compare operand to zero. 2865 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr()); 2866 2867 // Invert value. 2868 // TODO: Could dynamically modify easy computations here. For example, if 2869 // the operand is an icmp ne, turn into icmp eq. 2870 BoolVal = Builder.CreateNot(BoolVal, "lnot"); 2871 2872 // ZExt result to the expr type. 2873 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext"); 2874 } 2875 2876 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) { 2877 // Try folding the offsetof to a constant. 2878 Expr::EvalResult EVResult; 2879 if (E->EvaluateAsInt(EVResult, CGF.getContext())) { 2880 llvm::APSInt Value = EVResult.Val.getInt(); 2881 return Builder.getInt(Value); 2882 } 2883 2884 // Loop over the components of the offsetof to compute the value. 2885 unsigned n = E->getNumComponents(); 2886 llvm::Type* ResultType = ConvertType(E->getType()); 2887 llvm::Value* Result = llvm::Constant::getNullValue(ResultType); 2888 QualType CurrentType = E->getTypeSourceInfo()->getType(); 2889 for (unsigned i = 0; i != n; ++i) { 2890 OffsetOfNode ON = E->getComponent(i); 2891 llvm::Value *Offset = nullptr; 2892 switch (ON.getKind()) { 2893 case OffsetOfNode::Array: { 2894 // Compute the index 2895 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex()); 2896 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr); 2897 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType(); 2898 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv"); 2899 2900 // Save the element type 2901 CurrentType = 2902 CGF.getContext().getAsArrayType(CurrentType)->getElementType(); 2903 2904 // Compute the element size 2905 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType, 2906 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity()); 2907 2908 // Multiply out to compute the result 2909 Offset = Builder.CreateMul(Idx, ElemSize); 2910 break; 2911 } 2912 2913 case OffsetOfNode::Field: { 2914 FieldDecl *MemberDecl = ON.getField(); 2915 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl(); 2916 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); 2917 2918 // Compute the index of the field in its parent. 2919 unsigned i = 0; 2920 // FIXME: It would be nice if we didn't have to loop here! 2921 for (RecordDecl::field_iterator Field = RD->field_begin(), 2922 FieldEnd = RD->field_end(); 2923 Field != FieldEnd; ++Field, ++i) { 2924 if (*Field == MemberDecl) 2925 break; 2926 } 2927 assert(i < RL.getFieldCount() && "offsetof field in wrong type"); 2928 2929 // Compute the offset to the field 2930 int64_t OffsetInt = RL.getFieldOffset(i) / 2931 CGF.getContext().getCharWidth(); 2932 Offset = llvm::ConstantInt::get(ResultType, OffsetInt); 2933 2934 // Save the element type. 2935 CurrentType = MemberDecl->getType(); 2936 break; 2937 } 2938 2939 case OffsetOfNode::Identifier: 2940 llvm_unreachable("dependent __builtin_offsetof"); 2941 2942 case OffsetOfNode::Base: { 2943 if (ON.getBase()->isVirtual()) { 2944 CGF.ErrorUnsupported(E, "virtual base in offsetof"); 2945 continue; 2946 } 2947 2948 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl(); 2949 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); 2950 2951 // Save the element type. 2952 CurrentType = ON.getBase()->getType(); 2953 2954 // Compute the offset to the base. 2955 auto *BaseRT = CurrentType->castAs<RecordType>(); 2956 auto *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl()); 2957 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD); 2958 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity()); 2959 break; 2960 } 2961 } 2962 Result = Builder.CreateAdd(Result, Offset); 2963 } 2964 return Result; 2965 } 2966 2967 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of 2968 /// argument of the sizeof expression as an integer. 2969 Value * 2970 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( 2971 const UnaryExprOrTypeTraitExpr *E) { 2972 QualType TypeToSize = E->getTypeOfArgument(); 2973 if (E->getKind() == UETT_SizeOf) { 2974 if (const VariableArrayType *VAT = 2975 CGF.getContext().getAsVariableArrayType(TypeToSize)) { 2976 if (E->isArgumentType()) { 2977 // sizeof(type) - make sure to emit the VLA size. 2978 CGF.EmitVariablyModifiedType(TypeToSize); 2979 } else { 2980 // C99 6.5.3.4p2: If the argument is an expression of type 2981 // VLA, it is evaluated. 2982 CGF.EmitIgnoredExpr(E->getArgumentExpr()); 2983 } 2984 2985 auto VlaSize = CGF.getVLASize(VAT); 2986 llvm::Value *size = VlaSize.NumElts; 2987 2988 // Scale the number of non-VLA elements by the non-VLA element size. 2989 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type); 2990 if (!eltSize.isOne()) 2991 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size); 2992 2993 return size; 2994 } 2995 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) { 2996 auto Alignment = 2997 CGF.getContext() 2998 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 2999 E->getTypeOfArgument()->getPointeeType())) 3000 .getQuantity(); 3001 return llvm::ConstantInt::get(CGF.SizeTy, Alignment); 3002 } 3003 3004 // If this isn't sizeof(vla), the result must be constant; use the constant 3005 // folding logic so we don't have to duplicate it here. 3006 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext())); 3007 } 3008 3009 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) { 3010 Expr *Op = E->getSubExpr(); 3011 if (Op->getType()->isAnyComplexType()) { 3012 // If it's an l-value, load through the appropriate subobject l-value. 3013 // Note that we have to ask E because Op might be an l-value that 3014 // this won't work for, e.g. an Obj-C property. 3015 if (E->isGLValue()) 3016 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), 3017 E->getExprLoc()).getScalarVal(); 3018 3019 // Otherwise, calculate and project. 3020 return CGF.EmitComplexExpr(Op, false, true).first; 3021 } 3022 3023 return Visit(Op); 3024 } 3025 3026 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) { 3027 Expr *Op = E->getSubExpr(); 3028 if (Op->getType()->isAnyComplexType()) { 3029 // If it's an l-value, load through the appropriate subobject l-value. 3030 // Note that we have to ask E because Op might be an l-value that 3031 // this won't work for, e.g. an Obj-C property. 3032 if (Op->isGLValue()) 3033 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), 3034 E->getExprLoc()).getScalarVal(); 3035 3036 // Otherwise, calculate and project. 3037 return CGF.EmitComplexExpr(Op, true, false).second; 3038 } 3039 3040 // __imag on a scalar returns zero. Emit the subexpr to ensure side 3041 // effects are evaluated, but not the actual value. 3042 if (Op->isGLValue()) 3043 CGF.EmitLValue(Op); 3044 else 3045 CGF.EmitScalarExpr(Op, true); 3046 return llvm::Constant::getNullValue(ConvertType(E->getType())); 3047 } 3048 3049 //===----------------------------------------------------------------------===// 3050 // Binary Operators 3051 //===----------------------------------------------------------------------===// 3052 3053 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) { 3054 TestAndClearIgnoreResultAssign(); 3055 BinOpInfo Result; 3056 Result.LHS = Visit(E->getLHS()); 3057 Result.RHS = Visit(E->getRHS()); 3058 Result.Ty = E->getType(); 3059 Result.Opcode = E->getOpcode(); 3060 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 3061 Result.E = E; 3062 return Result; 3063 } 3064 3065 LValue ScalarExprEmitter::EmitCompoundAssignLValue( 3066 const CompoundAssignOperator *E, 3067 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &), 3068 Value *&Result) { 3069 QualType LHSTy = E->getLHS()->getType(); 3070 BinOpInfo OpInfo; 3071 3072 if (E->getComputationResultType()->isAnyComplexType()) 3073 return CGF.EmitScalarCompoundAssignWithComplex(E, Result); 3074 3075 // Emit the RHS first. __block variables need to have the rhs evaluated 3076 // first, plus this should improve codegen a little. 3077 OpInfo.RHS = Visit(E->getRHS()); 3078 OpInfo.Ty = E->getComputationResultType(); 3079 OpInfo.Opcode = E->getOpcode(); 3080 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 3081 OpInfo.E = E; 3082 // Load/convert the LHS. 3083 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 3084 3085 llvm::PHINode *atomicPHI = nullptr; 3086 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) { 3087 QualType type = atomicTy->getValueType(); 3088 if (!type->isBooleanType() && type->isIntegerType() && 3089 !(type->isUnsignedIntegerType() && 3090 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) && 3091 CGF.getLangOpts().getSignedOverflowBehavior() != 3092 LangOptions::SOB_Trapping) { 3093 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP; 3094 llvm::Instruction::BinaryOps Op; 3095 switch (OpInfo.Opcode) { 3096 // We don't have atomicrmw operands for *, %, /, <<, >> 3097 case BO_MulAssign: case BO_DivAssign: 3098 case BO_RemAssign: 3099 case BO_ShlAssign: 3100 case BO_ShrAssign: 3101 break; 3102 case BO_AddAssign: 3103 AtomicOp = llvm::AtomicRMWInst::Add; 3104 Op = llvm::Instruction::Add; 3105 break; 3106 case BO_SubAssign: 3107 AtomicOp = llvm::AtomicRMWInst::Sub; 3108 Op = llvm::Instruction::Sub; 3109 break; 3110 case BO_AndAssign: 3111 AtomicOp = llvm::AtomicRMWInst::And; 3112 Op = llvm::Instruction::And; 3113 break; 3114 case BO_XorAssign: 3115 AtomicOp = llvm::AtomicRMWInst::Xor; 3116 Op = llvm::Instruction::Xor; 3117 break; 3118 case BO_OrAssign: 3119 AtomicOp = llvm::AtomicRMWInst::Or; 3120 Op = llvm::Instruction::Or; 3121 break; 3122 default: 3123 llvm_unreachable("Invalid compound assignment type"); 3124 } 3125 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) { 3126 llvm::Value *Amt = CGF.EmitToMemory( 3127 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy, 3128 E->getExprLoc()), 3129 LHSTy); 3130 Value *OldVal = Builder.CreateAtomicRMW( 3131 AtomicOp, LHSLV.getPointer(CGF), Amt, 3132 llvm::AtomicOrdering::SequentiallyConsistent); 3133 3134 // Since operation is atomic, the result type is guaranteed to be the 3135 // same as the input in LLVM terms. 3136 Result = Builder.CreateBinOp(Op, OldVal, Amt); 3137 return LHSLV; 3138 } 3139 } 3140 // FIXME: For floating point types, we should be saving and restoring the 3141 // floating point environment in the loop. 3142 llvm::BasicBlock *startBB = Builder.GetInsertBlock(); 3143 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); 3144 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); 3145 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type); 3146 Builder.CreateBr(opBB); 3147 Builder.SetInsertPoint(opBB); 3148 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2); 3149 atomicPHI->addIncoming(OpInfo.LHS, startBB); 3150 OpInfo.LHS = atomicPHI; 3151 } 3152 else 3153 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); 3154 3155 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures); 3156 SourceLocation Loc = E->getExprLoc(); 3157 OpInfo.LHS = 3158 EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc); 3159 3160 // Expand the binary operator. 3161 Result = (this->*Func)(OpInfo); 3162 3163 // Convert the result back to the LHS type, 3164 // potentially with Implicit Conversion sanitizer check. 3165 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy, 3166 Loc, ScalarConversionOpts(CGF.SanOpts)); 3167 3168 if (atomicPHI) { 3169 llvm::BasicBlock *curBlock = Builder.GetInsertBlock(); 3170 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); 3171 auto Pair = CGF.EmitAtomicCompareExchange( 3172 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc()); 3173 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy); 3174 llvm::Value *success = Pair.second; 3175 atomicPHI->addIncoming(old, curBlock); 3176 Builder.CreateCondBr(success, contBB, atomicPHI->getParent()); 3177 Builder.SetInsertPoint(contBB); 3178 return LHSLV; 3179 } 3180 3181 // Store the result value into the LHS lvalue. Bit-fields are handled 3182 // specially because the result is altered by the store, i.e., [C99 6.5.16p1] 3183 // 'An assignment expression has the value of the left operand after the 3184 // assignment...'. 3185 if (LHSLV.isBitField()) 3186 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result); 3187 else 3188 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV); 3189 3190 if (CGF.getLangOpts().OpenMP) 3191 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, 3192 E->getLHS()); 3193 return LHSLV; 3194 } 3195 3196 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, 3197 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) { 3198 bool Ignore = TestAndClearIgnoreResultAssign(); 3199 Value *RHS = nullptr; 3200 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS); 3201 3202 // If the result is clearly ignored, return now. 3203 if (Ignore) 3204 return nullptr; 3205 3206 // The result of an assignment in C is the assigned r-value. 3207 if (!CGF.getLangOpts().CPlusPlus) 3208 return RHS; 3209 3210 // If the lvalue is non-volatile, return the computed value of the assignment. 3211 if (!LHS.isVolatileQualified()) 3212 return RHS; 3213 3214 // Otherwise, reload the value. 3215 return EmitLoadOfLValue(LHS, E->getExprLoc()); 3216 } 3217 3218 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck( 3219 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) { 3220 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 3221 3222 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) { 3223 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero), 3224 SanitizerKind::IntegerDivideByZero)); 3225 } 3226 3227 const auto *BO = cast<BinaryOperator>(Ops.E); 3228 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) && 3229 Ops.Ty->hasSignedIntegerRepresentation() && 3230 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) && 3231 Ops.mayHaveIntegerOverflow()) { 3232 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType()); 3233 3234 llvm::Value *IntMin = 3235 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth())); 3236 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty); 3237 3238 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin); 3239 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne); 3240 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or"); 3241 Checks.push_back( 3242 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow)); 3243 } 3244 3245 if (Checks.size() > 0) 3246 EmitBinOpCheck(Checks, Ops); 3247 } 3248 3249 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { 3250 { 3251 CodeGenFunction::SanitizerScope SanScope(&CGF); 3252 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) || 3253 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) && 3254 Ops.Ty->isIntegerType() && 3255 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) { 3256 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3257 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true); 3258 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) && 3259 Ops.Ty->isRealFloatingType() && 3260 Ops.mayHaveFloatDivisionByZero()) { 3261 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3262 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero); 3263 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero), 3264 Ops); 3265 } 3266 } 3267 3268 if (Ops.Ty->isConstantMatrixType()) { 3269 llvm::MatrixBuilder MB(Builder); 3270 // We need to check the types of the operands of the operator to get the 3271 // correct matrix dimensions. 3272 auto *BO = cast<BinaryOperator>(Ops.E); 3273 (void)BO; 3274 assert( 3275 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) && 3276 "first operand must be a matrix"); 3277 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() && 3278 "second operand must be an arithmetic type"); 3279 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 3280 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS, 3281 Ops.Ty->hasUnsignedIntegerRepresentation()); 3282 } 3283 3284 if (Ops.LHS->getType()->isFPOrFPVectorTy()) { 3285 llvm::Value *Val; 3286 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 3287 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div"); 3288 if ((CGF.getLangOpts().OpenCL && 3289 !CGF.CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) || 3290 (CGF.getLangOpts().HIP && CGF.getLangOpts().CUDAIsDevice && 3291 !CGF.CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) { 3292 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp 3293 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt 3294 // build option allows an application to specify that single precision 3295 // floating-point divide (x/y and 1/x) and sqrt used in the program 3296 // source are correctly rounded. 3297 llvm::Type *ValTy = Val->getType(); 3298 if (ValTy->isFloatTy() || 3299 (isa<llvm::VectorType>(ValTy) && 3300 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy())) 3301 CGF.SetFPAccuracy(Val, 2.5); 3302 } 3303 return Val; 3304 } 3305 else if (Ops.isFixedPointOp()) 3306 return EmitFixedPointBinOp(Ops); 3307 else if (Ops.Ty->hasUnsignedIntegerRepresentation()) 3308 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div"); 3309 else 3310 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div"); 3311 } 3312 3313 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) { 3314 // Rem in C can't be a floating point type: C99 6.5.5p2. 3315 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) || 3316 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) && 3317 Ops.Ty->isIntegerType() && 3318 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) { 3319 CodeGenFunction::SanitizerScope SanScope(&CGF); 3320 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3321 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false); 3322 } 3323 3324 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 3325 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem"); 3326 else 3327 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem"); 3328 } 3329 3330 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { 3331 unsigned IID; 3332 unsigned OpID = 0; 3333 SanitizerHandler OverflowKind; 3334 3335 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType(); 3336 switch (Ops.Opcode) { 3337 case BO_Add: 3338 case BO_AddAssign: 3339 OpID = 1; 3340 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow : 3341 llvm::Intrinsic::uadd_with_overflow; 3342 OverflowKind = SanitizerHandler::AddOverflow; 3343 break; 3344 case BO_Sub: 3345 case BO_SubAssign: 3346 OpID = 2; 3347 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow : 3348 llvm::Intrinsic::usub_with_overflow; 3349 OverflowKind = SanitizerHandler::SubOverflow; 3350 break; 3351 case BO_Mul: 3352 case BO_MulAssign: 3353 OpID = 3; 3354 IID = isSigned ? llvm::Intrinsic::smul_with_overflow : 3355 llvm::Intrinsic::umul_with_overflow; 3356 OverflowKind = SanitizerHandler::MulOverflow; 3357 break; 3358 default: 3359 llvm_unreachable("Unsupported operation for overflow detection"); 3360 } 3361 OpID <<= 1; 3362 if (isSigned) 3363 OpID |= 1; 3364 3365 CodeGenFunction::SanitizerScope SanScope(&CGF); 3366 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty); 3367 3368 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy); 3369 3370 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS}); 3371 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0); 3372 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1); 3373 3374 // Handle overflow with llvm.trap if no custom handler has been specified. 3375 const std::string *handlerName = 3376 &CGF.getLangOpts().OverflowHandler; 3377 if (handlerName->empty()) { 3378 // If the signed-integer-overflow sanitizer is enabled, emit a call to its 3379 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap. 3380 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) { 3381 llvm::Value *NotOverflow = Builder.CreateNot(overflow); 3382 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow 3383 : SanitizerKind::UnsignedIntegerOverflow; 3384 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops); 3385 } else 3386 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind); 3387 return result; 3388 } 3389 3390 // Branch in case of overflow. 3391 llvm::BasicBlock *initialBB = Builder.GetInsertBlock(); 3392 llvm::BasicBlock *continueBB = 3393 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode()); 3394 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn); 3395 3396 Builder.CreateCondBr(overflow, overflowBB, continueBB); 3397 3398 // If an overflow handler is set, then we want to call it and then use its 3399 // result, if it returns. 3400 Builder.SetInsertPoint(overflowBB); 3401 3402 // Get the overflow handler. 3403 llvm::Type *Int8Ty = CGF.Int8Ty; 3404 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty }; 3405 llvm::FunctionType *handlerTy = 3406 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true); 3407 llvm::FunctionCallee handler = 3408 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName); 3409 3410 // Sign extend the args to 64-bit, so that we can use the same handler for 3411 // all types of overflow. 3412 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty); 3413 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty); 3414 3415 // Call the handler with the two arguments, the operation, and the size of 3416 // the result. 3417 llvm::Value *handlerArgs[] = { 3418 lhs, 3419 rhs, 3420 Builder.getInt8(OpID), 3421 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth()) 3422 }; 3423 llvm::Value *handlerResult = 3424 CGF.EmitNounwindRuntimeCall(handler, handlerArgs); 3425 3426 // Truncate the result back to the desired size. 3427 handlerResult = Builder.CreateTrunc(handlerResult, opTy); 3428 Builder.CreateBr(continueBB); 3429 3430 Builder.SetInsertPoint(continueBB); 3431 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2); 3432 phi->addIncoming(result, initialBB); 3433 phi->addIncoming(handlerResult, overflowBB); 3434 3435 return phi; 3436 } 3437 3438 /// Emit pointer + index arithmetic. 3439 static Value *emitPointerArithmetic(CodeGenFunction &CGF, 3440 const BinOpInfo &op, 3441 bool isSubtraction) { 3442 // Must have binary (not unary) expr here. Unary pointer 3443 // increment/decrement doesn't use this path. 3444 const BinaryOperator *expr = cast<BinaryOperator>(op.E); 3445 3446 Value *pointer = op.LHS; 3447 Expr *pointerOperand = expr->getLHS(); 3448 Value *index = op.RHS; 3449 Expr *indexOperand = expr->getRHS(); 3450 3451 // In a subtraction, the LHS is always the pointer. 3452 if (!isSubtraction && !pointer->getType()->isPointerTy()) { 3453 std::swap(pointer, index); 3454 std::swap(pointerOperand, indexOperand); 3455 } 3456 3457 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); 3458 3459 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth(); 3460 auto &DL = CGF.CGM.getDataLayout(); 3461 auto PtrTy = cast<llvm::PointerType>(pointer->getType()); 3462 3463 // Some versions of glibc and gcc use idioms (particularly in their malloc 3464 // routines) that add a pointer-sized integer (known to be a pointer value) 3465 // to a null pointer in order to cast the value back to an integer or as 3466 // part of a pointer alignment algorithm. This is undefined behavior, but 3467 // we'd like to be able to compile programs that use it. 3468 // 3469 // Normally, we'd generate a GEP with a null-pointer base here in response 3470 // to that code, but it's also UB to dereference a pointer created that 3471 // way. Instead (as an acknowledged hack to tolerate the idiom) we will 3472 // generate a direct cast of the integer value to a pointer. 3473 // 3474 // The idiom (p = nullptr + N) is not met if any of the following are true: 3475 // 3476 // The operation is subtraction. 3477 // The index is not pointer-sized. 3478 // The pointer type is not byte-sized. 3479 // 3480 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(), 3481 op.Opcode, 3482 expr->getLHS(), 3483 expr->getRHS())) 3484 return CGF.Builder.CreateIntToPtr(index, pointer->getType()); 3485 3486 if (width != DL.getIndexTypeSizeInBits(PtrTy)) { 3487 // Zero-extend or sign-extend the pointer value according to 3488 // whether the index is signed or not. 3489 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned, 3490 "idx.ext"); 3491 } 3492 3493 // If this is subtraction, negate the index. 3494 if (isSubtraction) 3495 index = CGF.Builder.CreateNeg(index, "idx.neg"); 3496 3497 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) 3498 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(), 3499 /*Accessed*/ false); 3500 3501 const PointerType *pointerType 3502 = pointerOperand->getType()->getAs<PointerType>(); 3503 if (!pointerType) { 3504 QualType objectType = pointerOperand->getType() 3505 ->castAs<ObjCObjectPointerType>() 3506 ->getPointeeType(); 3507 llvm::Value *objectSize 3508 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType)); 3509 3510 index = CGF.Builder.CreateMul(index, objectSize); 3511 3512 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy); 3513 result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr"); 3514 return CGF.Builder.CreateBitCast(result, pointer->getType()); 3515 } 3516 3517 QualType elementType = pointerType->getPointeeType(); 3518 if (const VariableArrayType *vla 3519 = CGF.getContext().getAsVariableArrayType(elementType)) { 3520 // The element count here is the total number of non-VLA elements. 3521 llvm::Value *numElements = CGF.getVLASize(vla).NumElts; 3522 3523 // Effectively, the multiply by the VLA size is part of the GEP. 3524 // GEP indexes are signed, and scaling an index isn't permitted to 3525 // signed-overflow, so we use the same semantics for our explicit 3526 // multiply. We suppress this if overflow is not undefined behavior. 3527 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType()); 3528 if (CGF.getLangOpts().isSignedOverflowDefined()) { 3529 index = CGF.Builder.CreateMul(index, numElements, "vla.index"); 3530 pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr"); 3531 } else { 3532 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index"); 3533 pointer = CGF.EmitCheckedInBoundsGEP( 3534 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(), 3535 "add.ptr"); 3536 } 3537 return pointer; 3538 } 3539 3540 // Explicitly handle GNU void* and function pointer arithmetic extensions. The 3541 // GNU void* casts amount to no-ops since our void* type is i8*, but this is 3542 // future proof. 3543 if (elementType->isVoidType() || elementType->isFunctionType()) { 3544 Value *result = CGF.EmitCastToVoidPtr(pointer); 3545 result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr"); 3546 return CGF.Builder.CreateBitCast(result, pointer->getType()); 3547 } 3548 3549 llvm::Type *elemTy = CGF.ConvertTypeForMem(elementType); 3550 if (CGF.getLangOpts().isSignedOverflowDefined()) 3551 return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr"); 3552 3553 return CGF.EmitCheckedInBoundsGEP( 3554 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(), 3555 "add.ptr"); 3556 } 3557 3558 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and 3559 // Addend. Use negMul and negAdd to negate the first operand of the Mul or 3560 // the add operand respectively. This allows fmuladd to represent a*b-c, or 3561 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to 3562 // efficient operations. 3563 static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, 3564 const CodeGenFunction &CGF, CGBuilderTy &Builder, 3565 bool negMul, bool negAdd) { 3566 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set."); 3567 3568 Value *MulOp0 = MulOp->getOperand(0); 3569 Value *MulOp1 = MulOp->getOperand(1); 3570 if (negMul) 3571 MulOp0 = Builder.CreateFNeg(MulOp0, "neg"); 3572 if (negAdd) 3573 Addend = Builder.CreateFNeg(Addend, "neg"); 3574 3575 Value *FMulAdd = nullptr; 3576 if (Builder.getIsFPConstrained()) { 3577 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) && 3578 "Only constrained operation should be created when Builder is in FP " 3579 "constrained mode"); 3580 FMulAdd = Builder.CreateConstrainedFPCall( 3581 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd, 3582 Addend->getType()), 3583 {MulOp0, MulOp1, Addend}); 3584 } else { 3585 FMulAdd = Builder.CreateCall( 3586 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()), 3587 {MulOp0, MulOp1, Addend}); 3588 } 3589 MulOp->eraseFromParent(); 3590 3591 return FMulAdd; 3592 } 3593 3594 // Check whether it would be legal to emit an fmuladd intrinsic call to 3595 // represent op and if so, build the fmuladd. 3596 // 3597 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on. 3598 // Does NOT check the type of the operation - it's assumed that this function 3599 // will be called from contexts where it's known that the type is contractable. 3600 static Value* tryEmitFMulAdd(const BinOpInfo &op, 3601 const CodeGenFunction &CGF, CGBuilderTy &Builder, 3602 bool isSub=false) { 3603 3604 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || 3605 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && 3606 "Only fadd/fsub can be the root of an fmuladd."); 3607 3608 // Check whether this op is marked as fusable. 3609 if (!op.FPFeatures.allowFPContractWithinStatement()) 3610 return nullptr; 3611 3612 // We have a potentially fusable op. Look for a mul on one of the operands. 3613 // Also, make sure that the mul result isn't used directly. In that case, 3614 // there's no point creating a muladd operation. 3615 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) { 3616 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul && 3617 LHSBinOp->use_empty()) 3618 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); 3619 } 3620 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) { 3621 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul && 3622 RHSBinOp->use_empty()) 3623 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); 3624 } 3625 3626 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) { 3627 if (LHSBinOp->getIntrinsicID() == 3628 llvm::Intrinsic::experimental_constrained_fmul && 3629 LHSBinOp->use_empty()) 3630 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); 3631 } 3632 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) { 3633 if (RHSBinOp->getIntrinsicID() == 3634 llvm::Intrinsic::experimental_constrained_fmul && 3635 RHSBinOp->use_empty()) 3636 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); 3637 } 3638 3639 return nullptr; 3640 } 3641 3642 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) { 3643 if (op.LHS->getType()->isPointerTy() || 3644 op.RHS->getType()->isPointerTy()) 3645 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction); 3646 3647 if (op.Ty->isSignedIntegerOrEnumerationType()) { 3648 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 3649 case LangOptions::SOB_Defined: 3650 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 3651 case LangOptions::SOB_Undefined: 3652 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 3653 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 3654 LLVM_FALLTHROUGH; 3655 case LangOptions::SOB_Trapping: 3656 if (CanElideOverflowCheck(CGF.getContext(), op)) 3657 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 3658 return EmitOverflowCheckedBinOp(op); 3659 } 3660 } 3661 3662 if (op.Ty->isConstantMatrixType()) { 3663 llvm::MatrixBuilder MB(Builder); 3664 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3665 return MB.CreateAdd(op.LHS, op.RHS); 3666 } 3667 3668 if (op.Ty->isUnsignedIntegerType() && 3669 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 3670 !CanElideOverflowCheck(CGF.getContext(), op)) 3671 return EmitOverflowCheckedBinOp(op); 3672 3673 if (op.LHS->getType()->isFPOrFPVectorTy()) { 3674 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3675 // Try to form an fmuladd. 3676 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder)) 3677 return FMulAdd; 3678 3679 return Builder.CreateFAdd(op.LHS, op.RHS, "add"); 3680 } 3681 3682 if (op.isFixedPointOp()) 3683 return EmitFixedPointBinOp(op); 3684 3685 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 3686 } 3687 3688 /// The resulting value must be calculated with exact precision, so the operands 3689 /// may not be the same type. 3690 Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) { 3691 using llvm::APSInt; 3692 using llvm::ConstantInt; 3693 3694 // This is either a binary operation where at least one of the operands is 3695 // a fixed-point type, or a unary operation where the operand is a fixed-point 3696 // type. The result type of a binary operation is determined by 3697 // Sema::handleFixedPointConversions(). 3698 QualType ResultTy = op.Ty; 3699 QualType LHSTy, RHSTy; 3700 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) { 3701 RHSTy = BinOp->getRHS()->getType(); 3702 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) { 3703 // For compound assignment, the effective type of the LHS at this point 3704 // is the computation LHS type, not the actual LHS type, and the final 3705 // result type is not the type of the expression but rather the 3706 // computation result type. 3707 LHSTy = CAO->getComputationLHSType(); 3708 ResultTy = CAO->getComputationResultType(); 3709 } else 3710 LHSTy = BinOp->getLHS()->getType(); 3711 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) { 3712 LHSTy = UnOp->getSubExpr()->getType(); 3713 RHSTy = UnOp->getSubExpr()->getType(); 3714 } 3715 ASTContext &Ctx = CGF.getContext(); 3716 Value *LHS = op.LHS; 3717 Value *RHS = op.RHS; 3718 3719 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy); 3720 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy); 3721 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy); 3722 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema); 3723 3724 // Perform the actual operation. 3725 Value *Result; 3726 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 3727 switch (op.Opcode) { 3728 case BO_AddAssign: 3729 case BO_Add: 3730 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema); 3731 break; 3732 case BO_SubAssign: 3733 case BO_Sub: 3734 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema); 3735 break; 3736 case BO_MulAssign: 3737 case BO_Mul: 3738 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema); 3739 break; 3740 case BO_DivAssign: 3741 case BO_Div: 3742 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema); 3743 break; 3744 case BO_ShlAssign: 3745 case BO_Shl: 3746 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS); 3747 break; 3748 case BO_ShrAssign: 3749 case BO_Shr: 3750 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS); 3751 break; 3752 case BO_LT: 3753 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema); 3754 case BO_GT: 3755 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema); 3756 case BO_LE: 3757 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3758 case BO_GE: 3759 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3760 case BO_EQ: 3761 // For equality operations, we assume any padding bits on unsigned types are 3762 // zero'd out. They could be overwritten through non-saturating operations 3763 // that cause overflow, but this leads to undefined behavior. 3764 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema); 3765 case BO_NE: 3766 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3767 case BO_Cmp: 3768 case BO_LAnd: 3769 case BO_LOr: 3770 llvm_unreachable("Found unimplemented fixed point binary operation"); 3771 case BO_PtrMemD: 3772 case BO_PtrMemI: 3773 case BO_Rem: 3774 case BO_Xor: 3775 case BO_And: 3776 case BO_Or: 3777 case BO_Assign: 3778 case BO_RemAssign: 3779 case BO_AndAssign: 3780 case BO_XorAssign: 3781 case BO_OrAssign: 3782 case BO_Comma: 3783 llvm_unreachable("Found unsupported binary operation for fixed point types."); 3784 } 3785 3786 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) || 3787 BinaryOperator::isShiftAssignOp(op.Opcode); 3788 // Convert to the result type. 3789 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema 3790 : CommonFixedSema, 3791 ResultFixedSema); 3792 } 3793 3794 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) { 3795 // The LHS is always a pointer if either side is. 3796 if (!op.LHS->getType()->isPointerTy()) { 3797 if (op.Ty->isSignedIntegerOrEnumerationType()) { 3798 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 3799 case LangOptions::SOB_Defined: 3800 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 3801 case LangOptions::SOB_Undefined: 3802 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 3803 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 3804 LLVM_FALLTHROUGH; 3805 case LangOptions::SOB_Trapping: 3806 if (CanElideOverflowCheck(CGF.getContext(), op)) 3807 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 3808 return EmitOverflowCheckedBinOp(op); 3809 } 3810 } 3811 3812 if (op.Ty->isConstantMatrixType()) { 3813 llvm::MatrixBuilder MB(Builder); 3814 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3815 return MB.CreateSub(op.LHS, op.RHS); 3816 } 3817 3818 if (op.Ty->isUnsignedIntegerType() && 3819 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 3820 !CanElideOverflowCheck(CGF.getContext(), op)) 3821 return EmitOverflowCheckedBinOp(op); 3822 3823 if (op.LHS->getType()->isFPOrFPVectorTy()) { 3824 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3825 // Try to form an fmuladd. 3826 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true)) 3827 return FMulAdd; 3828 return Builder.CreateFSub(op.LHS, op.RHS, "sub"); 3829 } 3830 3831 if (op.isFixedPointOp()) 3832 return EmitFixedPointBinOp(op); 3833 3834 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 3835 } 3836 3837 // If the RHS is not a pointer, then we have normal pointer 3838 // arithmetic. 3839 if (!op.RHS->getType()->isPointerTy()) 3840 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction); 3841 3842 // Otherwise, this is a pointer subtraction. 3843 3844 // Do the raw subtraction part. 3845 llvm::Value *LHS 3846 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast"); 3847 llvm::Value *RHS 3848 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast"); 3849 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub"); 3850 3851 // Okay, figure out the element size. 3852 const BinaryOperator *expr = cast<BinaryOperator>(op.E); 3853 QualType elementType = expr->getLHS()->getType()->getPointeeType(); 3854 3855 llvm::Value *divisor = nullptr; 3856 3857 // For a variable-length array, this is going to be non-constant. 3858 if (const VariableArrayType *vla 3859 = CGF.getContext().getAsVariableArrayType(elementType)) { 3860 auto VlaSize = CGF.getVLASize(vla); 3861 elementType = VlaSize.Type; 3862 divisor = VlaSize.NumElts; 3863 3864 // Scale the number of non-VLA elements by the non-VLA element size. 3865 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType); 3866 if (!eltSize.isOne()) 3867 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor); 3868 3869 // For everything elese, we can just compute it, safe in the 3870 // assumption that Sema won't let anything through that we can't 3871 // safely compute the size of. 3872 } else { 3873 CharUnits elementSize; 3874 // Handle GCC extension for pointer arithmetic on void* and 3875 // function pointer types. 3876 if (elementType->isVoidType() || elementType->isFunctionType()) 3877 elementSize = CharUnits::One(); 3878 else 3879 elementSize = CGF.getContext().getTypeSizeInChars(elementType); 3880 3881 // Don't even emit the divide for element size of 1. 3882 if (elementSize.isOne()) 3883 return diffInChars; 3884 3885 divisor = CGF.CGM.getSize(elementSize); 3886 } 3887 3888 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since 3889 // pointer difference in C is only defined in the case where both operands 3890 // are pointing to elements of an array. 3891 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div"); 3892 } 3893 3894 Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) { 3895 llvm::IntegerType *Ty; 3896 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 3897 Ty = cast<llvm::IntegerType>(VT->getElementType()); 3898 else 3899 Ty = cast<llvm::IntegerType>(LHS->getType()); 3900 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1); 3901 } 3902 3903 Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS, 3904 const Twine &Name) { 3905 llvm::IntegerType *Ty; 3906 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 3907 Ty = cast<llvm::IntegerType>(VT->getElementType()); 3908 else 3909 Ty = cast<llvm::IntegerType>(LHS->getType()); 3910 3911 if (llvm::isPowerOf2_64(Ty->getBitWidth())) 3912 return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name); 3913 3914 return Builder.CreateURem( 3915 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name); 3916 } 3917 3918 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { 3919 // TODO: This misses out on the sanitizer check below. 3920 if (Ops.isFixedPointOp()) 3921 return EmitFixedPointBinOp(Ops); 3922 3923 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 3924 // RHS to the same size as the LHS. 3925 Value *RHS = Ops.RHS; 3926 if (Ops.LHS->getType() != RHS->getType()) 3927 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 3928 3929 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) && 3930 Ops.Ty->hasSignedIntegerRepresentation() && 3931 !CGF.getLangOpts().isSignedOverflowDefined() && 3932 !CGF.getLangOpts().CPlusPlus20; 3933 bool SanitizeUnsignedBase = 3934 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) && 3935 Ops.Ty->hasUnsignedIntegerRepresentation(); 3936 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase; 3937 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent); 3938 // OpenCL 6.3j: shift values are effectively % word size of LHS. 3939 if (CGF.getLangOpts().OpenCL) 3940 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask"); 3941 else if ((SanitizeBase || SanitizeExponent) && 3942 isa<llvm::IntegerType>(Ops.LHS->getType())) { 3943 CodeGenFunction::SanitizerScope SanScope(&CGF); 3944 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks; 3945 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS); 3946 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne); 3947 3948 if (SanitizeExponent) { 3949 Checks.push_back( 3950 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent)); 3951 } 3952 3953 if (SanitizeBase) { 3954 // Check whether we are shifting any non-zero bits off the top of the 3955 // integer. We only emit this check if exponent is valid - otherwise 3956 // instructions below will have undefined behavior themselves. 3957 llvm::BasicBlock *Orig = Builder.GetInsertBlock(); 3958 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 3959 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check"); 3960 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont); 3961 llvm::Value *PromotedWidthMinusOne = 3962 (RHS == Ops.RHS) ? WidthMinusOne 3963 : GetWidthMinusOneValue(Ops.LHS, RHS); 3964 CGF.EmitBlock(CheckShiftBase); 3965 llvm::Value *BitsShiftedOff = Builder.CreateLShr( 3966 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros", 3967 /*NUW*/ true, /*NSW*/ true), 3968 "shl.check"); 3969 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) { 3970 // In C99, we are not permitted to shift a 1 bit into the sign bit. 3971 // Under C++11's rules, shifting a 1 bit into the sign bit is 3972 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't 3973 // define signed left shifts, so we use the C99 and C++11 rules there). 3974 // Unsigned shifts can always shift into the top bit. 3975 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1); 3976 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One); 3977 } 3978 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0); 3979 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero); 3980 CGF.EmitBlock(Cont); 3981 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2); 3982 BaseCheck->addIncoming(Builder.getTrue(), Orig); 3983 BaseCheck->addIncoming(ValidBase, CheckShiftBase); 3984 Checks.push_back(std::make_pair( 3985 BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase 3986 : SanitizerKind::UnsignedShiftBase)); 3987 } 3988 3989 assert(!Checks.empty()); 3990 EmitBinOpCheck(Checks, Ops); 3991 } 3992 3993 return Builder.CreateShl(Ops.LHS, RHS, "shl"); 3994 } 3995 3996 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { 3997 // TODO: This misses out on the sanitizer check below. 3998 if (Ops.isFixedPointOp()) 3999 return EmitFixedPointBinOp(Ops); 4000 4001 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 4002 // RHS to the same size as the LHS. 4003 Value *RHS = Ops.RHS; 4004 if (Ops.LHS->getType() != RHS->getType()) 4005 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 4006 4007 // OpenCL 6.3j: shift values are effectively % word size of LHS. 4008 if (CGF.getLangOpts().OpenCL) 4009 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask"); 4010 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) && 4011 isa<llvm::IntegerType>(Ops.LHS->getType())) { 4012 CodeGenFunction::SanitizerScope SanScope(&CGF); 4013 llvm::Value *Valid = 4014 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS)); 4015 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops); 4016 } 4017 4018 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 4019 return Builder.CreateLShr(Ops.LHS, RHS, "shr"); 4020 return Builder.CreateAShr(Ops.LHS, RHS, "shr"); 4021 } 4022 4023 enum IntrinsicType { VCMPEQ, VCMPGT }; 4024 // return corresponding comparison intrinsic for given vector type 4025 static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, 4026 BuiltinType::Kind ElemKind) { 4027 switch (ElemKind) { 4028 default: llvm_unreachable("unexpected element type"); 4029 case BuiltinType::Char_U: 4030 case BuiltinType::UChar: 4031 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 4032 llvm::Intrinsic::ppc_altivec_vcmpgtub_p; 4033 case BuiltinType::Char_S: 4034 case BuiltinType::SChar: 4035 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 4036 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p; 4037 case BuiltinType::UShort: 4038 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 4039 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p; 4040 case BuiltinType::Short: 4041 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 4042 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p; 4043 case BuiltinType::UInt: 4044 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 4045 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p; 4046 case BuiltinType::Int: 4047 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 4048 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p; 4049 case BuiltinType::ULong: 4050 case BuiltinType::ULongLong: 4051 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 4052 llvm::Intrinsic::ppc_altivec_vcmpgtud_p; 4053 case BuiltinType::Long: 4054 case BuiltinType::LongLong: 4055 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 4056 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p; 4057 case BuiltinType::Float: 4058 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p : 4059 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p; 4060 case BuiltinType::Double: 4061 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p : 4062 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p; 4063 case BuiltinType::UInt128: 4064 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p 4065 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p; 4066 case BuiltinType::Int128: 4067 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p 4068 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p; 4069 } 4070 } 4071 4072 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E, 4073 llvm::CmpInst::Predicate UICmpOpc, 4074 llvm::CmpInst::Predicate SICmpOpc, 4075 llvm::CmpInst::Predicate FCmpOpc, 4076 bool IsSignaling) { 4077 TestAndClearIgnoreResultAssign(); 4078 Value *Result; 4079 QualType LHSTy = E->getLHS()->getType(); 4080 QualType RHSTy = E->getRHS()->getType(); 4081 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) { 4082 assert(E->getOpcode() == BO_EQ || 4083 E->getOpcode() == BO_NE); 4084 Value *LHS = CGF.EmitScalarExpr(E->getLHS()); 4085 Value *RHS = CGF.EmitScalarExpr(E->getRHS()); 4086 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison( 4087 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE); 4088 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { 4089 BinOpInfo BOInfo = EmitBinOps(E); 4090 Value *LHS = BOInfo.LHS; 4091 Value *RHS = BOInfo.RHS; 4092 4093 // If AltiVec, the comparison results in a numeric type, so we use 4094 // intrinsics comparing vectors and giving 0 or 1 as a result 4095 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) { 4096 // constants for mapping CR6 register bits to predicate result 4097 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6; 4098 4099 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic; 4100 4101 // in several cases vector arguments order will be reversed 4102 Value *FirstVecArg = LHS, 4103 *SecondVecArg = RHS; 4104 4105 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType(); 4106 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind(); 4107 4108 switch(E->getOpcode()) { 4109 default: llvm_unreachable("is not a comparison operation"); 4110 case BO_EQ: 4111 CR6 = CR6_LT; 4112 ID = GetIntrinsic(VCMPEQ, ElementKind); 4113 break; 4114 case BO_NE: 4115 CR6 = CR6_EQ; 4116 ID = GetIntrinsic(VCMPEQ, ElementKind); 4117 break; 4118 case BO_LT: 4119 CR6 = CR6_LT; 4120 ID = GetIntrinsic(VCMPGT, ElementKind); 4121 std::swap(FirstVecArg, SecondVecArg); 4122 break; 4123 case BO_GT: 4124 CR6 = CR6_LT; 4125 ID = GetIntrinsic(VCMPGT, ElementKind); 4126 break; 4127 case BO_LE: 4128 if (ElementKind == BuiltinType::Float) { 4129 CR6 = CR6_LT; 4130 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4131 std::swap(FirstVecArg, SecondVecArg); 4132 } 4133 else { 4134 CR6 = CR6_EQ; 4135 ID = GetIntrinsic(VCMPGT, ElementKind); 4136 } 4137 break; 4138 case BO_GE: 4139 if (ElementKind == BuiltinType::Float) { 4140 CR6 = CR6_LT; 4141 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4142 } 4143 else { 4144 CR6 = CR6_EQ; 4145 ID = GetIntrinsic(VCMPGT, ElementKind); 4146 std::swap(FirstVecArg, SecondVecArg); 4147 } 4148 break; 4149 } 4150 4151 Value *CR6Param = Builder.getInt32(CR6); 4152 llvm::Function *F = CGF.CGM.getIntrinsic(ID); 4153 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg}); 4154 4155 // The result type of intrinsic may not be same as E->getType(). 4156 // If E->getType() is not BoolTy, EmitScalarConversion will do the 4157 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will 4158 // do nothing, if ResultTy is not i1 at the same time, it will cause 4159 // crash later. 4160 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType()); 4161 if (ResultTy->getBitWidth() > 1 && 4162 E->getType() == CGF.getContext().BoolTy) 4163 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty()); 4164 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4165 E->getExprLoc()); 4166 } 4167 4168 if (BOInfo.isFixedPointOp()) { 4169 Result = EmitFixedPointBinOp(BOInfo); 4170 } else if (LHS->getType()->isFPOrFPVectorTy()) { 4171 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures); 4172 if (!IsSignaling) 4173 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp"); 4174 else 4175 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp"); 4176 } else if (LHSTy->hasSignedIntegerRepresentation()) { 4177 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp"); 4178 } else { 4179 // Unsigned integers and pointers. 4180 4181 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers && 4182 !isa<llvm::ConstantPointerNull>(LHS) && 4183 !isa<llvm::ConstantPointerNull>(RHS)) { 4184 4185 // Dynamic information is required to be stripped for comparisons, 4186 // because it could leak the dynamic information. Based on comparisons 4187 // of pointers to dynamic objects, the optimizer can replace one pointer 4188 // with another, which might be incorrect in presence of invariant 4189 // groups. Comparison with null is safe because null does not carry any 4190 // dynamic information. 4191 if (LHSTy.mayBeDynamicClass()) 4192 LHS = Builder.CreateStripInvariantGroup(LHS); 4193 if (RHSTy.mayBeDynamicClass()) 4194 RHS = Builder.CreateStripInvariantGroup(RHS); 4195 } 4196 4197 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp"); 4198 } 4199 4200 // If this is a vector comparison, sign extend the result to the appropriate 4201 // vector integer type and return it (don't convert to bool). 4202 if (LHSTy->isVectorType()) 4203 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 4204 4205 } else { 4206 // Complex Comparison: can only be an equality comparison. 4207 CodeGenFunction::ComplexPairTy LHS, RHS; 4208 QualType CETy; 4209 if (auto *CTy = LHSTy->getAs<ComplexType>()) { 4210 LHS = CGF.EmitComplexExpr(E->getLHS()); 4211 CETy = CTy->getElementType(); 4212 } else { 4213 LHS.first = Visit(E->getLHS()); 4214 LHS.second = llvm::Constant::getNullValue(LHS.first->getType()); 4215 CETy = LHSTy; 4216 } 4217 if (auto *CTy = RHSTy->getAs<ComplexType>()) { 4218 RHS = CGF.EmitComplexExpr(E->getRHS()); 4219 assert(CGF.getContext().hasSameUnqualifiedType(CETy, 4220 CTy->getElementType()) && 4221 "The element types must always match."); 4222 (void)CTy; 4223 } else { 4224 RHS.first = Visit(E->getRHS()); 4225 RHS.second = llvm::Constant::getNullValue(RHS.first->getType()); 4226 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && 4227 "The element types must always match."); 4228 } 4229 4230 Value *ResultR, *ResultI; 4231 if (CETy->isRealFloatingType()) { 4232 // As complex comparisons can only be equality comparisons, they 4233 // are never signaling comparisons. 4234 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r"); 4235 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i"); 4236 } else { 4237 // Complex comparisons can only be equality comparisons. As such, signed 4238 // and unsigned opcodes are the same. 4239 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r"); 4240 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i"); 4241 } 4242 4243 if (E->getOpcode() == BO_EQ) { 4244 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri"); 4245 } else { 4246 assert(E->getOpcode() == BO_NE && 4247 "Complex comparison other than == or != ?"); 4248 Result = Builder.CreateOr(ResultR, ResultI, "or.ri"); 4249 } 4250 } 4251 4252 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4253 E->getExprLoc()); 4254 } 4255 4256 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { 4257 bool Ignore = TestAndClearIgnoreResultAssign(); 4258 4259 Value *RHS; 4260 LValue LHS; 4261 4262 switch (E->getLHS()->getType().getObjCLifetime()) { 4263 case Qualifiers::OCL_Strong: 4264 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore); 4265 break; 4266 4267 case Qualifiers::OCL_Autoreleasing: 4268 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E); 4269 break; 4270 4271 case Qualifiers::OCL_ExplicitNone: 4272 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore); 4273 break; 4274 4275 case Qualifiers::OCL_Weak: 4276 RHS = Visit(E->getRHS()); 4277 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4278 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore); 4279 break; 4280 4281 case Qualifiers::OCL_None: 4282 // __block variables need to have the rhs evaluated first, plus 4283 // this should improve codegen just a little. 4284 RHS = Visit(E->getRHS()); 4285 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4286 4287 // Store the value into the LHS. Bit-fields are handled specially 4288 // because the result is altered by the store, i.e., [C99 6.5.16p1] 4289 // 'An assignment expression has the value of the left operand after 4290 // the assignment...'. 4291 if (LHS.isBitField()) { 4292 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS); 4293 } else { 4294 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc()); 4295 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS); 4296 } 4297 } 4298 4299 // If the result is clearly ignored, return now. 4300 if (Ignore) 4301 return nullptr; 4302 4303 // The result of an assignment in C is the assigned r-value. 4304 if (!CGF.getLangOpts().CPlusPlus) 4305 return RHS; 4306 4307 // If the lvalue is non-volatile, return the computed value of the assignment. 4308 if (!LHS.isVolatileQualified()) 4309 return RHS; 4310 4311 // Otherwise, reload the value. 4312 return EmitLoadOfLValue(LHS, E->getExprLoc()); 4313 } 4314 4315 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { 4316 // Perform vector logical and on comparisons with zero vectors. 4317 if (E->getType()->isVectorType()) { 4318 CGF.incrementProfileCounter(E); 4319 4320 Value *LHS = Visit(E->getLHS()); 4321 Value *RHS = Visit(E->getRHS()); 4322 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 4323 if (LHS->getType()->isFPOrFPVectorTy()) { 4324 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 4325 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 4326 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 4327 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 4328 } else { 4329 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 4330 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 4331 } 4332 Value *And = Builder.CreateAnd(LHS, RHS); 4333 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext"); 4334 } 4335 4336 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); 4337 llvm::Type *ResTy = ConvertType(E->getType()); 4338 4339 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. 4340 // If we have 1 && X, just emit X without inserting the control flow. 4341 bool LHSCondVal; 4342 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 4343 if (LHSCondVal) { // If we have 1 && X, just emit X. 4344 CGF.incrementProfileCounter(E); 4345 4346 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4347 4348 // If we're generating for profiling or coverage, generate a branch to a 4349 // block that increments the RHS counter needed to track branch condition 4350 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and 4351 // "FalseBlock" after the increment is done. 4352 if (InstrumentRegions && 4353 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4354 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end"); 4355 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt"); 4356 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock); 4357 CGF.EmitBlock(RHSBlockCnt); 4358 CGF.incrementProfileCounter(E->getRHS()); 4359 CGF.EmitBranch(FBlock); 4360 CGF.EmitBlock(FBlock); 4361 } 4362 4363 // ZExt result to int or bool. 4364 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext"); 4365 } 4366 4367 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false. 4368 if (!CGF.ContainsLabel(E->getRHS())) 4369 return llvm::Constant::getNullValue(ResTy); 4370 } 4371 4372 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end"); 4373 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs"); 4374 4375 CodeGenFunction::ConditionalEvaluation eval(CGF); 4376 4377 // Branch on the LHS first. If it is false, go to the failure (cont) block. 4378 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock, 4379 CGF.getProfileCount(E->getRHS())); 4380 4381 // Any edges into the ContBlock are now from an (indeterminate number of) 4382 // edges from this first condition. All of these values will be false. Start 4383 // setting up the PHI node in the Cont Block for this. 4384 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 4385 "", ContBlock); 4386 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 4387 PI != PE; ++PI) 4388 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI); 4389 4390 eval.begin(CGF); 4391 CGF.EmitBlock(RHSBlock); 4392 CGF.incrementProfileCounter(E); 4393 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4394 eval.end(CGF); 4395 4396 // Reaquire the RHS block, as there may be subblocks inserted. 4397 RHSBlock = Builder.GetInsertBlock(); 4398 4399 // If we're generating for profiling or coverage, generate a branch on the 4400 // RHS to a block that increments the RHS true counter needed to track branch 4401 // condition coverage. 4402 if (InstrumentRegions && 4403 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4404 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt"); 4405 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock); 4406 CGF.EmitBlock(RHSBlockCnt); 4407 CGF.incrementProfileCounter(E->getRHS()); 4408 CGF.EmitBranch(ContBlock); 4409 PN->addIncoming(RHSCond, RHSBlockCnt); 4410 } 4411 4412 // Emit an unconditional branch from this block to ContBlock. 4413 { 4414 // There is no need to emit line number for unconditional branch. 4415 auto NL = ApplyDebugLocation::CreateEmpty(CGF); 4416 CGF.EmitBlock(ContBlock); 4417 } 4418 // Insert an entry into the phi node for the edge with the value of RHSCond. 4419 PN->addIncoming(RHSCond, RHSBlock); 4420 4421 // Artificial location to preserve the scope information 4422 { 4423 auto NL = ApplyDebugLocation::CreateArtificial(CGF); 4424 PN->setDebugLoc(Builder.getCurrentDebugLocation()); 4425 } 4426 4427 // ZExt result to int. 4428 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext"); 4429 } 4430 4431 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { 4432 // Perform vector logical or on comparisons with zero vectors. 4433 if (E->getType()->isVectorType()) { 4434 CGF.incrementProfileCounter(E); 4435 4436 Value *LHS = Visit(E->getLHS()); 4437 Value *RHS = Visit(E->getRHS()); 4438 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 4439 if (LHS->getType()->isFPOrFPVectorTy()) { 4440 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 4441 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 4442 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 4443 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 4444 } else { 4445 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 4446 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 4447 } 4448 Value *Or = Builder.CreateOr(LHS, RHS); 4449 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext"); 4450 } 4451 4452 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); 4453 llvm::Type *ResTy = ConvertType(E->getType()); 4454 4455 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. 4456 // If we have 0 || X, just emit X without inserting the control flow. 4457 bool LHSCondVal; 4458 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 4459 if (!LHSCondVal) { // If we have 0 || X, just emit X. 4460 CGF.incrementProfileCounter(E); 4461 4462 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4463 4464 // If we're generating for profiling or coverage, generate a branch to a 4465 // block that increments the RHS counter need to track branch condition 4466 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and 4467 // "FalseBlock" after the increment is done. 4468 if (InstrumentRegions && 4469 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4470 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end"); 4471 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt"); 4472 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt); 4473 CGF.EmitBlock(RHSBlockCnt); 4474 CGF.incrementProfileCounter(E->getRHS()); 4475 CGF.EmitBranch(FBlock); 4476 CGF.EmitBlock(FBlock); 4477 } 4478 4479 // ZExt result to int or bool. 4480 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext"); 4481 } 4482 4483 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. 4484 if (!CGF.ContainsLabel(E->getRHS())) 4485 return llvm::ConstantInt::get(ResTy, 1); 4486 } 4487 4488 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end"); 4489 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs"); 4490 4491 CodeGenFunction::ConditionalEvaluation eval(CGF); 4492 4493 // Branch on the LHS first. If it is true, go to the success (cont) block. 4494 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock, 4495 CGF.getCurrentProfileCount() - 4496 CGF.getProfileCount(E->getRHS())); 4497 4498 // Any edges into the ContBlock are now from an (indeterminate number of) 4499 // edges from this first condition. All of these values will be true. Start 4500 // setting up the PHI node in the Cont Block for this. 4501 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 4502 "", ContBlock); 4503 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 4504 PI != PE; ++PI) 4505 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI); 4506 4507 eval.begin(CGF); 4508 4509 // Emit the RHS condition as a bool value. 4510 CGF.EmitBlock(RHSBlock); 4511 CGF.incrementProfileCounter(E); 4512 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4513 4514 eval.end(CGF); 4515 4516 // Reaquire the RHS block, as there may be subblocks inserted. 4517 RHSBlock = Builder.GetInsertBlock(); 4518 4519 // If we're generating for profiling or coverage, generate a branch on the 4520 // RHS to a block that increments the RHS true counter needed to track branch 4521 // condition coverage. 4522 if (InstrumentRegions && 4523 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4524 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt"); 4525 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt); 4526 CGF.EmitBlock(RHSBlockCnt); 4527 CGF.incrementProfileCounter(E->getRHS()); 4528 CGF.EmitBranch(ContBlock); 4529 PN->addIncoming(RHSCond, RHSBlockCnt); 4530 } 4531 4532 // Emit an unconditional branch from this block to ContBlock. Insert an entry 4533 // into the phi node for the edge with the value of RHSCond. 4534 CGF.EmitBlock(ContBlock); 4535 PN->addIncoming(RHSCond, RHSBlock); 4536 4537 // ZExt result to int. 4538 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext"); 4539 } 4540 4541 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { 4542 CGF.EmitIgnoredExpr(E->getLHS()); 4543 CGF.EnsureInsertPoint(); 4544 return Visit(E->getRHS()); 4545 } 4546 4547 //===----------------------------------------------------------------------===// 4548 // Other Operators 4549 //===----------------------------------------------------------------------===// 4550 4551 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified 4552 /// expression is cheap enough and side-effect-free enough to evaluate 4553 /// unconditionally instead of conditionally. This is used to convert control 4554 /// flow into selects in some cases. 4555 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, 4556 CodeGenFunction &CGF) { 4557 // Anything that is an integer or floating point constant is fine. 4558 return E->IgnoreParens()->isEvaluatable(CGF.getContext()); 4559 4560 // Even non-volatile automatic variables can't be evaluated unconditionally. 4561 // Referencing a thread_local may cause non-trivial initialization work to 4562 // occur. If we're inside a lambda and one of the variables is from the scope 4563 // outside the lambda, that function may have returned already. Reading its 4564 // locals is a bad idea. Also, these reads may introduce races there didn't 4565 // exist in the source-level program. 4566 } 4567 4568 4569 Value *ScalarExprEmitter:: 4570 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { 4571 TestAndClearIgnoreResultAssign(); 4572 4573 // Bind the common expression if necessary. 4574 CodeGenFunction::OpaqueValueMapping binding(CGF, E); 4575 4576 Expr *condExpr = E->getCond(); 4577 Expr *lhsExpr = E->getTrueExpr(); 4578 Expr *rhsExpr = E->getFalseExpr(); 4579 4580 // If the condition constant folds and can be elided, try to avoid emitting 4581 // the condition and the dead arm. 4582 bool CondExprBool; 4583 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 4584 Expr *live = lhsExpr, *dead = rhsExpr; 4585 if (!CondExprBool) std::swap(live, dead); 4586 4587 // If the dead side doesn't have labels we need, just emit the Live part. 4588 if (!CGF.ContainsLabel(dead)) { 4589 if (CondExprBool) 4590 CGF.incrementProfileCounter(E); 4591 Value *Result = Visit(live); 4592 4593 // If the live part is a throw expression, it acts like it has a void 4594 // type, so evaluating it returns a null Value*. However, a conditional 4595 // with non-void type must return a non-null Value*. 4596 if (!Result && !E->getType()->isVoidType()) 4597 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType())); 4598 4599 return Result; 4600 } 4601 } 4602 4603 // OpenCL: If the condition is a vector, we can treat this condition like 4604 // the select function. 4605 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) || 4606 condExpr->getType()->isExtVectorType()) { 4607 CGF.incrementProfileCounter(E); 4608 4609 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 4610 llvm::Value *LHS = Visit(lhsExpr); 4611 llvm::Value *RHS = Visit(rhsExpr); 4612 4613 llvm::Type *condType = ConvertType(condExpr->getType()); 4614 auto *vecTy = cast<llvm::FixedVectorType>(condType); 4615 4616 unsigned numElem = vecTy->getNumElements(); 4617 llvm::Type *elemType = vecTy->getElementType(); 4618 4619 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy); 4620 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec); 4621 llvm::Value *tmp = Builder.CreateSExt( 4622 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext"); 4623 llvm::Value *tmp2 = Builder.CreateNot(tmp); 4624 4625 // Cast float to int to perform ANDs if necessary. 4626 llvm::Value *RHSTmp = RHS; 4627 llvm::Value *LHSTmp = LHS; 4628 bool wasCast = false; 4629 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType()); 4630 if (rhsVTy->getElementType()->isFloatingPointTy()) { 4631 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType()); 4632 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType()); 4633 wasCast = true; 4634 } 4635 4636 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2); 4637 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp); 4638 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond"); 4639 if (wasCast) 4640 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType()); 4641 4642 return tmp5; 4643 } 4644 4645 if (condExpr->getType()->isVectorType() || 4646 condExpr->getType()->isVLSTBuiltinType()) { 4647 CGF.incrementProfileCounter(E); 4648 4649 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 4650 llvm::Value *LHS = Visit(lhsExpr); 4651 llvm::Value *RHS = Visit(rhsExpr); 4652 4653 llvm::Type *CondType = ConvertType(condExpr->getType()); 4654 auto *VecTy = cast<llvm::VectorType>(CondType); 4655 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy); 4656 4657 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond"); 4658 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select"); 4659 } 4660 4661 // If this is a really simple expression (like x ? 4 : 5), emit this as a 4662 // select instead of as control flow. We can only do this if it is cheap and 4663 // safe to evaluate the LHS and RHS unconditionally. 4664 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) && 4665 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { 4666 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr); 4667 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty); 4668 4669 CGF.incrementProfileCounter(E, StepV); 4670 4671 llvm::Value *LHS = Visit(lhsExpr); 4672 llvm::Value *RHS = Visit(rhsExpr); 4673 if (!LHS) { 4674 // If the conditional has void type, make sure we return a null Value*. 4675 assert(!RHS && "LHS and RHS types must match"); 4676 return nullptr; 4677 } 4678 return Builder.CreateSelect(CondV, LHS, RHS, "cond"); 4679 } 4680 4681 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 4682 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 4683 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 4684 4685 CodeGenFunction::ConditionalEvaluation eval(CGF); 4686 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock, 4687 CGF.getProfileCount(lhsExpr)); 4688 4689 CGF.EmitBlock(LHSBlock); 4690 CGF.incrementProfileCounter(E); 4691 eval.begin(CGF); 4692 Value *LHS = Visit(lhsExpr); 4693 eval.end(CGF); 4694 4695 LHSBlock = Builder.GetInsertBlock(); 4696 Builder.CreateBr(ContBlock); 4697 4698 CGF.EmitBlock(RHSBlock); 4699 eval.begin(CGF); 4700 Value *RHS = Visit(rhsExpr); 4701 eval.end(CGF); 4702 4703 RHSBlock = Builder.GetInsertBlock(); 4704 CGF.EmitBlock(ContBlock); 4705 4706 // If the LHS or RHS is a throw expression, it will be legitimately null. 4707 if (!LHS) 4708 return RHS; 4709 if (!RHS) 4710 return LHS; 4711 4712 // Create a PHI node for the real part. 4713 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond"); 4714 PN->addIncoming(LHS, LHSBlock); 4715 PN->addIncoming(RHS, RHSBlock); 4716 return PN; 4717 } 4718 4719 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) { 4720 return Visit(E->getChosenSubExpr()); 4721 } 4722 4723 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 4724 QualType Ty = VE->getType(); 4725 4726 if (Ty->isVariablyModifiedType()) 4727 CGF.EmitVariablyModifiedType(Ty); 4728 4729 Address ArgValue = Address::invalid(); 4730 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue); 4731 4732 llvm::Type *ArgTy = ConvertType(VE->getType()); 4733 4734 // If EmitVAArg fails, emit an error. 4735 if (!ArgPtr.isValid()) { 4736 CGF.ErrorUnsupported(VE, "va_arg expression"); 4737 return llvm::UndefValue::get(ArgTy); 4738 } 4739 4740 // FIXME Volatility. 4741 llvm::Value *Val = Builder.CreateLoad(ArgPtr); 4742 4743 // If EmitVAArg promoted the type, we must truncate it. 4744 if (ArgTy != Val->getType()) { 4745 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy()) 4746 Val = Builder.CreateIntToPtr(Val, ArgTy); 4747 else 4748 Val = Builder.CreateTrunc(Val, ArgTy); 4749 } 4750 4751 return Val; 4752 } 4753 4754 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) { 4755 return CGF.EmitBlockLiteral(block); 4756 } 4757 4758 // Convert a vec3 to vec4, or vice versa. 4759 static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, 4760 Value *Src, unsigned NumElementsDst) { 4761 static constexpr int Mask[] = {0, 1, 2, -1}; 4762 return Builder.CreateShuffleVector(Src, 4763 llvm::makeArrayRef(Mask, NumElementsDst)); 4764 } 4765 4766 // Create cast instructions for converting LLVM value \p Src to LLVM type \p 4767 // DstTy. \p Src has the same size as \p DstTy. Both are single value types 4768 // but could be scalar or vectors of different lengths, and either can be 4769 // pointer. 4770 // There are 4 cases: 4771 // 1. non-pointer -> non-pointer : needs 1 bitcast 4772 // 2. pointer -> pointer : needs 1 bitcast or addrspacecast 4773 // 3. pointer -> non-pointer 4774 // a) pointer -> intptr_t : needs 1 ptrtoint 4775 // b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast 4776 // 4. non-pointer -> pointer 4777 // a) intptr_t -> pointer : needs 1 inttoptr 4778 // b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr 4779 // Note: for cases 3b and 4b two casts are required since LLVM casts do not 4780 // allow casting directly between pointer types and non-integer non-pointer 4781 // types. 4782 static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder, 4783 const llvm::DataLayout &DL, 4784 Value *Src, llvm::Type *DstTy, 4785 StringRef Name = "") { 4786 auto SrcTy = Src->getType(); 4787 4788 // Case 1. 4789 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy()) 4790 return Builder.CreateBitCast(Src, DstTy, Name); 4791 4792 // Case 2. 4793 if (SrcTy->isPointerTy() && DstTy->isPointerTy()) 4794 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name); 4795 4796 // Case 3. 4797 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) { 4798 // Case 3b. 4799 if (!DstTy->isIntegerTy()) 4800 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy)); 4801 // Cases 3a and 3b. 4802 return Builder.CreateBitOrPointerCast(Src, DstTy, Name); 4803 } 4804 4805 // Case 4b. 4806 if (!SrcTy->isIntegerTy()) 4807 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy)); 4808 // Cases 4a and 4b. 4809 return Builder.CreateIntToPtr(Src, DstTy, Name); 4810 } 4811 4812 Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) { 4813 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); 4814 llvm::Type *DstTy = ConvertType(E->getType()); 4815 4816 llvm::Type *SrcTy = Src->getType(); 4817 unsigned NumElementsSrc = 4818 isa<llvm::VectorType>(SrcTy) 4819 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements() 4820 : 0; 4821 unsigned NumElementsDst = 4822 isa<llvm::VectorType>(DstTy) 4823 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements() 4824 : 0; 4825 4826 // Use bit vector expansion for ext_vector_type boolean vectors. 4827 if (E->getType()->isExtVectorBoolType()) 4828 return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype"); 4829 4830 // Going from vec3 to non-vec3 is a special case and requires a shuffle 4831 // vector to get a vec4, then a bitcast if the target type is different. 4832 if (NumElementsSrc == 3 && NumElementsDst != 3) { 4833 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4); 4834 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 4835 DstTy); 4836 4837 Src->setName("astype"); 4838 return Src; 4839 } 4840 4841 // Going from non-vec3 to vec3 is a special case and requires a bitcast 4842 // to vec4 if the original type is not vec4, then a shuffle vector to 4843 // get a vec3. 4844 if (NumElementsSrc != 3 && NumElementsDst == 3) { 4845 auto *Vec4Ty = llvm::FixedVectorType::get( 4846 cast<llvm::VectorType>(DstTy)->getElementType(), 4); 4847 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 4848 Vec4Ty); 4849 4850 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3); 4851 Src->setName("astype"); 4852 return Src; 4853 } 4854 4855 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), 4856 Src, DstTy, "astype"); 4857 } 4858 4859 Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) { 4860 return CGF.EmitAtomicExpr(E).getScalarVal(); 4861 } 4862 4863 //===----------------------------------------------------------------------===// 4864 // Entry Point into this File 4865 //===----------------------------------------------------------------------===// 4866 4867 /// Emit the computation of the specified expression of scalar type, ignoring 4868 /// the result. 4869 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { 4870 assert(E && hasScalarEvaluationKind(E->getType()) && 4871 "Invalid scalar expression to emit"); 4872 4873 return ScalarExprEmitter(*this, IgnoreResultAssign) 4874 .Visit(const_cast<Expr *>(E)); 4875 } 4876 4877 /// Emit a conversion from the specified type to the specified destination type, 4878 /// both of which are LLVM scalar types. 4879 Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, 4880 QualType DstTy, 4881 SourceLocation Loc) { 4882 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && 4883 "Invalid scalar expression to emit"); 4884 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc); 4885 } 4886 4887 /// Emit a conversion from the specified complex type to the specified 4888 /// destination type, where the destination type is an LLVM scalar type. 4889 Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, 4890 QualType SrcTy, 4891 QualType DstTy, 4892 SourceLocation Loc) { 4893 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && 4894 "Invalid complex -> scalar conversion"); 4895 return ScalarExprEmitter(*this) 4896 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc); 4897 } 4898 4899 4900 llvm::Value *CodeGenFunction:: 4901 EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 4902 bool isInc, bool isPre) { 4903 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre); 4904 } 4905 4906 LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { 4907 // object->isa or (*object).isa 4908 // Generate code as for: *(Class*)object 4909 4910 Expr *BaseExpr = E->getBase(); 4911 Address Addr = Address::invalid(); 4912 if (BaseExpr->isPRValue()) { 4913 llvm::Type *BaseTy = 4914 ConvertTypeForMem(BaseExpr->getType()->getPointeeType()); 4915 Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign()); 4916 } else { 4917 Addr = EmitLValue(BaseExpr).getAddress(*this); 4918 } 4919 4920 // Cast the address to Class*. 4921 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType())); 4922 return MakeAddrLValue(Addr, E->getType()); 4923 } 4924 4925 4926 LValue CodeGenFunction::EmitCompoundAssignmentLValue( 4927 const CompoundAssignOperator *E) { 4928 ScalarExprEmitter Scalar(*this); 4929 Value *Result = nullptr; 4930 switch (E->getOpcode()) { 4931 #define COMPOUND_OP(Op) \ 4932 case BO_##Op##Assign: \ 4933 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \ 4934 Result) 4935 COMPOUND_OP(Mul); 4936 COMPOUND_OP(Div); 4937 COMPOUND_OP(Rem); 4938 COMPOUND_OP(Add); 4939 COMPOUND_OP(Sub); 4940 COMPOUND_OP(Shl); 4941 COMPOUND_OP(Shr); 4942 COMPOUND_OP(And); 4943 COMPOUND_OP(Xor); 4944 COMPOUND_OP(Or); 4945 #undef COMPOUND_OP 4946 4947 case BO_PtrMemD: 4948 case BO_PtrMemI: 4949 case BO_Mul: 4950 case BO_Div: 4951 case BO_Rem: 4952 case BO_Add: 4953 case BO_Sub: 4954 case BO_Shl: 4955 case BO_Shr: 4956 case BO_LT: 4957 case BO_GT: 4958 case BO_LE: 4959 case BO_GE: 4960 case BO_EQ: 4961 case BO_NE: 4962 case BO_Cmp: 4963 case BO_And: 4964 case BO_Xor: 4965 case BO_Or: 4966 case BO_LAnd: 4967 case BO_LOr: 4968 case BO_Assign: 4969 case BO_Comma: 4970 llvm_unreachable("Not valid compound assignment operators"); 4971 } 4972 4973 llvm_unreachable("Unhandled compound assignment operator"); 4974 } 4975 4976 struct GEPOffsetAndOverflow { 4977 // The total (signed) byte offset for the GEP. 4978 llvm::Value *TotalOffset; 4979 // The offset overflow flag - true if the total offset overflows. 4980 llvm::Value *OffsetOverflows; 4981 }; 4982 4983 /// Evaluate given GEPVal, which is either an inbounds GEP, or a constant, 4984 /// and compute the total offset it applies from it's base pointer BasePtr. 4985 /// Returns offset in bytes and a boolean flag whether an overflow happened 4986 /// during evaluation. 4987 static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, 4988 llvm::LLVMContext &VMContext, 4989 CodeGenModule &CGM, 4990 CGBuilderTy &Builder) { 4991 const auto &DL = CGM.getDataLayout(); 4992 4993 // The total (signed) byte offset for the GEP. 4994 llvm::Value *TotalOffset = nullptr; 4995 4996 // Was the GEP already reduced to a constant? 4997 if (isa<llvm::Constant>(GEPVal)) { 4998 // Compute the offset by casting both pointers to integers and subtracting: 4999 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr) 5000 Value *BasePtr_int = 5001 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType())); 5002 Value *GEPVal_int = 5003 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType())); 5004 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int); 5005 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()}; 5006 } 5007 5008 auto *GEP = cast<llvm::GEPOperator>(GEPVal); 5009 assert(GEP->getPointerOperand() == BasePtr && 5010 "BasePtr must be the base of the GEP."); 5011 assert(GEP->isInBounds() && "Expected inbounds GEP"); 5012 5013 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType()); 5014 5015 // Grab references to the signed add/mul overflow intrinsics for intptr_t. 5016 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 5017 auto *SAddIntrinsic = 5018 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy); 5019 auto *SMulIntrinsic = 5020 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy); 5021 5022 // The offset overflow flag - true if the total offset overflows. 5023 llvm::Value *OffsetOverflows = Builder.getFalse(); 5024 5025 /// Return the result of the given binary operation. 5026 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS, 5027 llvm::Value *RHS) -> llvm::Value * { 5028 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop"); 5029 5030 // If the operands are constants, return a constant result. 5031 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) { 5032 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) { 5033 llvm::APInt N; 5034 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode, 5035 /*Signed=*/true, N); 5036 if (HasOverflow) 5037 OffsetOverflows = Builder.getTrue(); 5038 return llvm::ConstantInt::get(VMContext, N); 5039 } 5040 } 5041 5042 // Otherwise, compute the result with checked arithmetic. 5043 auto *ResultAndOverflow = Builder.CreateCall( 5044 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS}); 5045 OffsetOverflows = Builder.CreateOr( 5046 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows); 5047 return Builder.CreateExtractValue(ResultAndOverflow, 0); 5048 }; 5049 5050 // Determine the total byte offset by looking at each GEP operand. 5051 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP); 5052 GTI != GTE; ++GTI) { 5053 llvm::Value *LocalOffset; 5054 auto *Index = GTI.getOperand(); 5055 // Compute the local offset contributed by this indexing step: 5056 if (auto *STy = GTI.getStructTypeOrNull()) { 5057 // For struct indexing, the local offset is the byte position of the 5058 // specified field. 5059 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue(); 5060 LocalOffset = llvm::ConstantInt::get( 5061 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo)); 5062 } else { 5063 // Otherwise this is array-like indexing. The local offset is the index 5064 // multiplied by the element size. 5065 auto *ElementSize = llvm::ConstantInt::get( 5066 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType())); 5067 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true); 5068 LocalOffset = eval(BO_Mul, ElementSize, IndexS); 5069 } 5070 5071 // If this is the first offset, set it as the total offset. Otherwise, add 5072 // the local offset into the running total. 5073 if (!TotalOffset || TotalOffset == Zero) 5074 TotalOffset = LocalOffset; 5075 else 5076 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset); 5077 } 5078 5079 return {TotalOffset, OffsetOverflows}; 5080 } 5081 5082 Value * 5083 CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr, 5084 ArrayRef<Value *> IdxList, 5085 bool SignedIndices, bool IsSubtraction, 5086 SourceLocation Loc, const Twine &Name) { 5087 llvm::Type *PtrTy = Ptr->getType(); 5088 Value *GEPVal = Builder.CreateInBoundsGEP(ElemTy, Ptr, IdxList, Name); 5089 5090 // If the pointer overflow sanitizer isn't enabled, do nothing. 5091 if (!SanOpts.has(SanitizerKind::PointerOverflow)) 5092 return GEPVal; 5093 5094 // Perform nullptr-and-offset check unless the nullptr is defined. 5095 bool PerformNullCheck = !NullPointerIsDefined( 5096 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace()); 5097 // Check for overflows unless the GEP got constant-folded, 5098 // and only in the default address space 5099 bool PerformOverflowCheck = 5100 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0; 5101 5102 if (!(PerformNullCheck || PerformOverflowCheck)) 5103 return GEPVal; 5104 5105 const auto &DL = CGM.getDataLayout(); 5106 5107 SanitizerScope SanScope(this); 5108 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy); 5109 5110 GEPOffsetAndOverflow EvaluatedGEP = 5111 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder); 5112 5113 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || 5114 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && 5115 "If the offset got constant-folded, we don't expect that there was an " 5116 "overflow."); 5117 5118 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 5119 5120 // Common case: if the total offset is zero, and we are using C++ semantics, 5121 // where nullptr+0 is defined, don't emit a check. 5122 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus) 5123 return GEPVal; 5124 5125 // Now that we've computed the total offset, add it to the base pointer (with 5126 // wrapping semantics). 5127 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy); 5128 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset); 5129 5130 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 5131 5132 if (PerformNullCheck) { 5133 // In C++, if the base pointer evaluates to a null pointer value, 5134 // the only valid pointer this inbounds GEP can produce is also 5135 // a null pointer, so the offset must also evaluate to zero. 5136 // Likewise, if we have non-zero base pointer, we can not get null pointer 5137 // as a result, so the offset can not be -intptr_t(BasePtr). 5138 // In other words, both pointers are either null, or both are non-null, 5139 // or the behaviour is undefined. 5140 // 5141 // C, however, is more strict in this regard, and gives more 5142 // optimization opportunities: in C, additionally, nullptr+0 is undefined. 5143 // So both the input to the 'gep inbounds' AND the output must not be null. 5144 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr); 5145 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP); 5146 auto *Valid = 5147 CGM.getLangOpts().CPlusPlus 5148 ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr) 5149 : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr); 5150 Checks.emplace_back(Valid, SanitizerKind::PointerOverflow); 5151 } 5152 5153 if (PerformOverflowCheck) { 5154 // The GEP is valid if: 5155 // 1) The total offset doesn't overflow, and 5156 // 2) The sign of the difference between the computed address and the base 5157 // pointer matches the sign of the total offset. 5158 llvm::Value *ValidGEP; 5159 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows); 5160 if (SignedIndices) { 5161 // GEP is computed as `unsigned base + signed offset`, therefore: 5162 // * If offset was positive, then the computed pointer can not be 5163 // [unsigned] less than the base pointer, unless it overflowed. 5164 // * If offset was negative, then the computed pointer can not be 5165 // [unsigned] greater than the bas pointere, unless it overflowed. 5166 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 5167 auto *PosOrZeroOffset = 5168 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero); 5169 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr); 5170 ValidGEP = 5171 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid); 5172 } else if (!IsSubtraction) { 5173 // GEP is computed as `unsigned base + unsigned offset`, therefore the 5174 // computed pointer can not be [unsigned] less than base pointer, 5175 // unless there was an overflow. 5176 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`. 5177 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 5178 } else { 5179 // GEP is computed as `unsigned base - unsigned offset`, therefore the 5180 // computed pointer can not be [unsigned] greater than base pointer, 5181 // unless there was an overflow. 5182 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`. 5183 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr); 5184 } 5185 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow); 5186 Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow); 5187 } 5188 5189 assert(!Checks.empty() && "Should have produced some checks."); 5190 5191 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)}; 5192 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments. 5193 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP}; 5194 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs); 5195 5196 return GEPVal; 5197 } 5198