1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCXXABI.h" 14 #include "CGCleanup.h" 15 #include "CGDebugInfo.h" 16 #include "CGObjCRuntime.h" 17 #include "CGOpenMPRuntime.h" 18 #include "CodeGenFunction.h" 19 #include "CodeGenModule.h" 20 #include "ConstantEmitter.h" 21 #include "TargetInfo.h" 22 #include "clang/AST/ASTContext.h" 23 #include "clang/AST/Attr.h" 24 #include "clang/AST/DeclObjC.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/RecordLayout.h" 27 #include "clang/AST/StmtVisitor.h" 28 #include "clang/Basic/CodeGenOptions.h" 29 #include "clang/Basic/TargetInfo.h" 30 #include "llvm/ADT/APFixedPoint.h" 31 #include "llvm/IR/CFG.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/DerivedTypes.h" 35 #include "llvm/IR/FixedPointBuilder.h" 36 #include "llvm/IR/Function.h" 37 #include "llvm/IR/GetElementPtrTypeIterator.h" 38 #include "llvm/IR/GlobalVariable.h" 39 #include "llvm/IR/Intrinsics.h" 40 #include "llvm/IR/IntrinsicsPowerPC.h" 41 #include "llvm/IR/MatrixBuilder.h" 42 #include "llvm/IR/Module.h" 43 #include "llvm/Support/TypeSize.h" 44 #include <cstdarg> 45 #include <optional> 46 47 using namespace clang; 48 using namespace CodeGen; 49 using llvm::Value; 50 51 //===----------------------------------------------------------------------===// 52 // Scalar Expression Emitter 53 //===----------------------------------------------------------------------===// 54 55 namespace { 56 57 /// Determine whether the given binary operation may overflow. 58 /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul, 59 /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem}, 60 /// the returned overflow check is precise. The returned value is 'true' for 61 /// all other opcodes, to be conservative. 62 bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS, 63 BinaryOperator::Opcode Opcode, bool Signed, 64 llvm::APInt &Result) { 65 // Assume overflow is possible, unless we can prove otherwise. 66 bool Overflow = true; 67 const auto &LHSAP = LHS->getValue(); 68 const auto &RHSAP = RHS->getValue(); 69 if (Opcode == BO_Add) { 70 Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow) 71 : LHSAP.uadd_ov(RHSAP, Overflow); 72 } else if (Opcode == BO_Sub) { 73 Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow) 74 : LHSAP.usub_ov(RHSAP, Overflow); 75 } else if (Opcode == BO_Mul) { 76 Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow) 77 : LHSAP.umul_ov(RHSAP, Overflow); 78 } else if (Opcode == BO_Div || Opcode == BO_Rem) { 79 if (Signed && !RHS->isZero()) 80 Result = LHSAP.sdiv_ov(RHSAP, Overflow); 81 else 82 return false; 83 } 84 return Overflow; 85 } 86 87 struct BinOpInfo { 88 Value *LHS; 89 Value *RHS; 90 QualType Ty; // Computation Type. 91 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform 92 FPOptions FPFeatures; 93 const Expr *E; // Entire expr, for error unsupported. May not be binop. 94 95 /// Check if the binop can result in integer overflow. 96 bool mayHaveIntegerOverflow() const { 97 // Without constant input, we can't rule out overflow. 98 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS); 99 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS); 100 if (!LHSCI || !RHSCI) 101 return true; 102 103 llvm::APInt Result; 104 return ::mayHaveIntegerOverflow( 105 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result); 106 } 107 108 /// Check if the binop computes a division or a remainder. 109 bool isDivremOp() const { 110 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign || 111 Opcode == BO_RemAssign; 112 } 113 114 /// Check if the binop can result in an integer division by zero. 115 bool mayHaveIntegerDivisionByZero() const { 116 if (isDivremOp()) 117 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS)) 118 return CI->isZero(); 119 return true; 120 } 121 122 /// Check if the binop can result in a float division by zero. 123 bool mayHaveFloatDivisionByZero() const { 124 if (isDivremOp()) 125 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS)) 126 return CFP->isZero(); 127 return true; 128 } 129 130 /// Check if at least one operand is a fixed point type. In such cases, this 131 /// operation did not follow usual arithmetic conversion and both operands 132 /// might not be of the same type. 133 bool isFixedPointOp() const { 134 // We cannot simply check the result type since comparison operations return 135 // an int. 136 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) { 137 QualType LHSType = BinOp->getLHS()->getType(); 138 QualType RHSType = BinOp->getRHS()->getType(); 139 return LHSType->isFixedPointType() || RHSType->isFixedPointType(); 140 } 141 if (const auto *UnOp = dyn_cast<UnaryOperator>(E)) 142 return UnOp->getSubExpr()->getType()->isFixedPointType(); 143 return false; 144 } 145 }; 146 147 static bool MustVisitNullValue(const Expr *E) { 148 // If a null pointer expression's type is the C++0x nullptr_t, then 149 // it's not necessarily a simple constant and it must be evaluated 150 // for its potential side effects. 151 return E->getType()->isNullPtrType(); 152 } 153 154 /// If \p E is a widened promoted integer, get its base (unpromoted) type. 155 static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx, 156 const Expr *E) { 157 const Expr *Base = E->IgnoreImpCasts(); 158 if (E == Base) 159 return std::nullopt; 160 161 QualType BaseTy = Base->getType(); 162 if (!Ctx.isPromotableIntegerType(BaseTy) || 163 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType())) 164 return std::nullopt; 165 166 return BaseTy; 167 } 168 169 /// Check if \p E is a widened promoted integer. 170 static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) { 171 return getUnwidenedIntegerType(Ctx, E).has_value(); 172 } 173 174 /// Check if we can skip the overflow check for \p Op. 175 static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) { 176 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) && 177 "Expected a unary or binary operator"); 178 179 // If the binop has constant inputs and we can prove there is no overflow, 180 // we can elide the overflow check. 181 if (!Op.mayHaveIntegerOverflow()) 182 return true; 183 184 // If a unary op has a widened operand, the op cannot overflow. 185 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E)) 186 return !UO->canOverflow(); 187 188 // We usually don't need overflow checks for binops with widened operands. 189 // Multiplication with promoted unsigned operands is a special case. 190 const auto *BO = cast<BinaryOperator>(Op.E); 191 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS()); 192 if (!OptionalLHSTy) 193 return false; 194 195 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS()); 196 if (!OptionalRHSTy) 197 return false; 198 199 QualType LHSTy = *OptionalLHSTy; 200 QualType RHSTy = *OptionalRHSTy; 201 202 // This is the simple case: binops without unsigned multiplication, and with 203 // widened operands. No overflow check is needed here. 204 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) || 205 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType()) 206 return true; 207 208 // For unsigned multiplication the overflow check can be elided if either one 209 // of the unpromoted types are less than half the size of the promoted type. 210 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType()); 211 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize || 212 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize; 213 } 214 215 class ScalarExprEmitter 216 : public StmtVisitor<ScalarExprEmitter, Value*> { 217 CodeGenFunction &CGF; 218 CGBuilderTy &Builder; 219 bool IgnoreResultAssign; 220 llvm::LLVMContext &VMContext; 221 public: 222 223 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false) 224 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira), 225 VMContext(cgf.getLLVMContext()) { 226 } 227 228 //===--------------------------------------------------------------------===// 229 // Utilities 230 //===--------------------------------------------------------------------===// 231 232 bool TestAndClearIgnoreResultAssign() { 233 bool I = IgnoreResultAssign; 234 IgnoreResultAssign = false; 235 return I; 236 } 237 238 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); } 239 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); } 240 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) { 241 return CGF.EmitCheckedLValue(E, TCK); 242 } 243 244 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks, 245 const BinOpInfo &Info); 246 247 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) { 248 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal(); 249 } 250 251 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) { 252 const AlignValueAttr *AVAttr = nullptr; 253 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { 254 const ValueDecl *VD = DRE->getDecl(); 255 256 if (VD->getType()->isReferenceType()) { 257 if (const auto *TTy = 258 VD->getType().getNonReferenceType()->getAs<TypedefType>()) 259 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>(); 260 } else { 261 // Assumptions for function parameters are emitted at the start of the 262 // function, so there is no need to repeat that here, 263 // unless the alignment-assumption sanitizer is enabled, 264 // then we prefer the assumption over alignment attribute 265 // on IR function param. 266 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment)) 267 return; 268 269 AVAttr = VD->getAttr<AlignValueAttr>(); 270 } 271 } 272 273 if (!AVAttr) 274 if (const auto *TTy = E->getType()->getAs<TypedefType>()) 275 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>(); 276 277 if (!AVAttr) 278 return; 279 280 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment()); 281 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue); 282 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI); 283 } 284 285 /// EmitLoadOfLValue - Given an expression with complex type that represents a 286 /// value l-value, this method emits the address of the l-value, then loads 287 /// and returns the result. 288 Value *EmitLoadOfLValue(const Expr *E) { 289 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load), 290 E->getExprLoc()); 291 292 EmitLValueAlignmentAssumption(E, V); 293 return V; 294 } 295 296 /// EmitConversionToBool - Convert the specified expression value to a 297 /// boolean (i1) truth value. This is equivalent to "Val != 0". 298 Value *EmitConversionToBool(Value *Src, QualType DstTy); 299 300 /// Emit a check that a conversion from a floating-point type does not 301 /// overflow. 302 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType, 303 Value *Src, QualType SrcType, QualType DstType, 304 llvm::Type *DstTy, SourceLocation Loc); 305 306 /// Known implicit conversion check kinds. 307 /// Keep in sync with the enum of the same name in ubsan_handlers.h 308 enum ImplicitConversionCheckKind : unsigned char { 309 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7. 310 ICCK_UnsignedIntegerTruncation = 1, 311 ICCK_SignedIntegerTruncation = 2, 312 ICCK_IntegerSignChange = 3, 313 ICCK_SignedIntegerTruncationOrSignChange = 4, 314 }; 315 316 /// Emit a check that an [implicit] truncation of an integer does not 317 /// discard any bits. It is not UB, so we use the value after truncation. 318 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst, 319 QualType DstType, SourceLocation Loc); 320 321 /// Emit a check that an [implicit] conversion of an integer does not change 322 /// the sign of the value. It is not UB, so we use the value after conversion. 323 /// NOTE: Src and Dst may be the exact same value! (point to the same thing) 324 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst, 325 QualType DstType, SourceLocation Loc); 326 327 /// Emit a conversion from the specified type to the specified destination 328 /// type, both of which are LLVM scalar types. 329 struct ScalarConversionOpts { 330 bool TreatBooleanAsSigned; 331 bool EmitImplicitIntegerTruncationChecks; 332 bool EmitImplicitIntegerSignChangeChecks; 333 334 ScalarConversionOpts() 335 : TreatBooleanAsSigned(false), 336 EmitImplicitIntegerTruncationChecks(false), 337 EmitImplicitIntegerSignChangeChecks(false) {} 338 339 ScalarConversionOpts(clang::SanitizerSet SanOpts) 340 : TreatBooleanAsSigned(false), 341 EmitImplicitIntegerTruncationChecks( 342 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)), 343 EmitImplicitIntegerSignChangeChecks( 344 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {} 345 }; 346 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType, 347 llvm::Type *SrcTy, llvm::Type *DstTy, 348 ScalarConversionOpts Opts); 349 Value * 350 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy, 351 SourceLocation Loc, 352 ScalarConversionOpts Opts = ScalarConversionOpts()); 353 354 /// Convert between either a fixed point and other fixed point or fixed point 355 /// and an integer. 356 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy, 357 SourceLocation Loc); 358 359 /// Emit a conversion from the specified complex type to the specified 360 /// destination type, where the destination type is an LLVM scalar type. 361 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, 362 QualType SrcTy, QualType DstTy, 363 SourceLocation Loc); 364 365 /// EmitNullValue - Emit a value that corresponds to null for the given type. 366 Value *EmitNullValue(QualType Ty); 367 368 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion. 369 Value *EmitFloatToBoolConversion(Value *V) { 370 // Compare against 0.0 for fp scalars. 371 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType()); 372 return Builder.CreateFCmpUNE(V, Zero, "tobool"); 373 } 374 375 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion. 376 Value *EmitPointerToBoolConversion(Value *V, QualType QT) { 377 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT); 378 379 return Builder.CreateICmpNE(V, Zero, "tobool"); 380 } 381 382 Value *EmitIntToBoolConversion(Value *V) { 383 // Because of the type rules of C, we often end up computing a 384 // logical value, then zero extending it to int, then wanting it 385 // as a logical value again. Optimize this common case. 386 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) { 387 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) { 388 Value *Result = ZI->getOperand(0); 389 // If there aren't any more uses, zap the instruction to save space. 390 // Note that there can be more uses, for example if this 391 // is the result of an assignment. 392 if (ZI->use_empty()) 393 ZI->eraseFromParent(); 394 return Result; 395 } 396 } 397 398 return Builder.CreateIsNotNull(V, "tobool"); 399 } 400 401 //===--------------------------------------------------------------------===// 402 // Visitor Methods 403 //===--------------------------------------------------------------------===// 404 405 Value *Visit(Expr *E) { 406 ApplyDebugLocation DL(CGF, E); 407 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E); 408 } 409 410 Value *VisitStmt(Stmt *S) { 411 S->dump(llvm::errs(), CGF.getContext()); 412 llvm_unreachable("Stmt can't have complex result type!"); 413 } 414 Value *VisitExpr(Expr *S); 415 416 Value *VisitConstantExpr(ConstantExpr *E) { 417 // A constant expression of type 'void' generates no code and produces no 418 // value. 419 if (E->getType()->isVoidType()) 420 return nullptr; 421 422 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) { 423 if (E->isGLValue()) 424 return CGF.Builder.CreateLoad(Address( 425 Result, CGF.ConvertTypeForMem(E->getType()), 426 CGF.getContext().getTypeAlignInChars(E->getType()))); 427 return Result; 428 } 429 return Visit(E->getSubExpr()); 430 } 431 Value *VisitParenExpr(ParenExpr *PE) { 432 return Visit(PE->getSubExpr()); 433 } 434 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { 435 return Visit(E->getReplacement()); 436 } 437 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) { 438 return Visit(GE->getResultExpr()); 439 } 440 Value *VisitCoawaitExpr(CoawaitExpr *S) { 441 return CGF.EmitCoawaitExpr(*S).getScalarVal(); 442 } 443 Value *VisitCoyieldExpr(CoyieldExpr *S) { 444 return CGF.EmitCoyieldExpr(*S).getScalarVal(); 445 } 446 Value *VisitUnaryCoawait(const UnaryOperator *E) { 447 return Visit(E->getSubExpr()); 448 } 449 450 // Leaves. 451 Value *VisitIntegerLiteral(const IntegerLiteral *E) { 452 return Builder.getInt(E->getValue()); 453 } 454 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) { 455 return Builder.getInt(E->getValue()); 456 } 457 Value *VisitFloatingLiteral(const FloatingLiteral *E) { 458 return llvm::ConstantFP::get(VMContext, E->getValue()); 459 } 460 Value *VisitCharacterLiteral(const CharacterLiteral *E) { 461 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 462 } 463 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) { 464 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 465 } 466 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { 467 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 468 } 469 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { 470 if (E->getType()->isVoidType()) 471 return nullptr; 472 473 return EmitNullValue(E->getType()); 474 } 475 Value *VisitGNUNullExpr(const GNUNullExpr *E) { 476 return EmitNullValue(E->getType()); 477 } 478 Value *VisitOffsetOfExpr(OffsetOfExpr *E); 479 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E); 480 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) { 481 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel()); 482 return Builder.CreateBitCast(V, ConvertType(E->getType())); 483 } 484 485 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) { 486 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength()); 487 } 488 489 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) { 490 return CGF.EmitPseudoObjectRValue(E).getScalarVal(); 491 } 492 493 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E); 494 495 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) { 496 if (E->isGLValue()) 497 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E), 498 E->getExprLoc()); 499 500 // Otherwise, assume the mapping is the scalar directly. 501 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal(); 502 } 503 504 // l-values. 505 Value *VisitDeclRefExpr(DeclRefExpr *E) { 506 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) 507 return CGF.emitScalarConstant(Constant, E); 508 return EmitLoadOfLValue(E); 509 } 510 511 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) { 512 return CGF.EmitObjCSelectorExpr(E); 513 } 514 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) { 515 return CGF.EmitObjCProtocolExpr(E); 516 } 517 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { 518 return EmitLoadOfLValue(E); 519 } 520 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) { 521 if (E->getMethodDecl() && 522 E->getMethodDecl()->getReturnType()->isReferenceType()) 523 return EmitLoadOfLValue(E); 524 return CGF.EmitObjCMessageExpr(E).getScalarVal(); 525 } 526 527 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) { 528 LValue LV = CGF.EmitObjCIsaExpr(E); 529 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); 530 return V; 531 } 532 533 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) { 534 VersionTuple Version = E->getVersion(); 535 536 // If we're checking for a platform older than our minimum deployment 537 // target, we can fold the check away. 538 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion()) 539 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1); 540 541 return CGF.EmitBuiltinAvailable(Version); 542 } 543 544 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E); 545 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E); 546 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E); 547 Value *VisitConvertVectorExpr(ConvertVectorExpr *E); 548 Value *VisitMemberExpr(MemberExpr *E); 549 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); } 550 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 551 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which 552 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound 553 // literals aren't l-values in C++. We do so simply because that's the 554 // cleanest way to handle compound literals in C++. 555 // See the discussion here: https://reviews.llvm.org/D64464 556 return EmitLoadOfLValue(E); 557 } 558 559 Value *VisitInitListExpr(InitListExpr *E); 560 561 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) { 562 assert(CGF.getArrayInitIndex() && 563 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?"); 564 return CGF.getArrayInitIndex(); 565 } 566 567 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { 568 return EmitNullValue(E->getType()); 569 } 570 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) { 571 CGF.CGM.EmitExplicitCastExprType(E, &CGF); 572 return VisitCastExpr(E); 573 } 574 Value *VisitCastExpr(CastExpr *E); 575 576 Value *VisitCallExpr(const CallExpr *E) { 577 if (E->getCallReturnType(CGF.getContext())->isReferenceType()) 578 return EmitLoadOfLValue(E); 579 580 Value *V = CGF.EmitCallExpr(E).getScalarVal(); 581 582 EmitLValueAlignmentAssumption(E, V); 583 return V; 584 } 585 586 Value *VisitStmtExpr(const StmtExpr *E); 587 588 // Unary Operators. 589 Value *VisitUnaryPostDec(const UnaryOperator *E) { 590 LValue LV = EmitLValue(E->getSubExpr()); 591 return EmitScalarPrePostIncDec(E, LV, false, false); 592 } 593 Value *VisitUnaryPostInc(const UnaryOperator *E) { 594 LValue LV = EmitLValue(E->getSubExpr()); 595 return EmitScalarPrePostIncDec(E, LV, true, false); 596 } 597 Value *VisitUnaryPreDec(const UnaryOperator *E) { 598 LValue LV = EmitLValue(E->getSubExpr()); 599 return EmitScalarPrePostIncDec(E, LV, false, true); 600 } 601 Value *VisitUnaryPreInc(const UnaryOperator *E) { 602 LValue LV = EmitLValue(E->getSubExpr()); 603 return EmitScalarPrePostIncDec(E, LV, true, true); 604 } 605 606 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E, 607 llvm::Value *InVal, 608 bool IsInc); 609 610 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 611 bool isInc, bool isPre); 612 613 614 Value *VisitUnaryAddrOf(const UnaryOperator *E) { 615 if (isa<MemberPointerType>(E->getType())) // never sugared 616 return CGF.CGM.getMemberPointerConstant(E); 617 618 return EmitLValue(E->getSubExpr()).getPointer(CGF); 619 } 620 Value *VisitUnaryDeref(const UnaryOperator *E) { 621 if (E->getType()->isVoidType()) 622 return Visit(E->getSubExpr()); // the actual value should be unused 623 return EmitLoadOfLValue(E); 624 } 625 626 Value *VisitUnaryPlus(const UnaryOperator *E, 627 QualType PromotionType = QualType()); 628 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType); 629 Value *VisitUnaryMinus(const UnaryOperator *E, 630 QualType PromotionType = QualType()); 631 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType); 632 633 Value *VisitUnaryNot (const UnaryOperator *E); 634 Value *VisitUnaryLNot (const UnaryOperator *E); 635 Value *VisitUnaryReal(const UnaryOperator *E, 636 QualType PromotionType = QualType()); 637 Value *VisitReal(const UnaryOperator *E, QualType PromotionType); 638 Value *VisitUnaryImag(const UnaryOperator *E, 639 QualType PromotionType = QualType()); 640 Value *VisitImag(const UnaryOperator *E, QualType PromotionType); 641 Value *VisitUnaryExtension(const UnaryOperator *E) { 642 return Visit(E->getSubExpr()); 643 } 644 645 // C++ 646 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) { 647 return EmitLoadOfLValue(E); 648 } 649 Value *VisitSourceLocExpr(SourceLocExpr *SLE) { 650 auto &Ctx = CGF.getContext(); 651 APValue Evaluated = 652 SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr()); 653 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated, 654 SLE->getType()); 655 } 656 657 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 658 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE); 659 return Visit(DAE->getExpr()); 660 } 661 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { 662 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); 663 return Visit(DIE->getExpr()); 664 } 665 Value *VisitCXXThisExpr(CXXThisExpr *TE) { 666 return CGF.LoadCXXThis(); 667 } 668 669 Value *VisitExprWithCleanups(ExprWithCleanups *E); 670 Value *VisitCXXNewExpr(const CXXNewExpr *E) { 671 return CGF.EmitCXXNewExpr(E); 672 } 673 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) { 674 CGF.EmitCXXDeleteExpr(E); 675 return nullptr; 676 } 677 678 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) { 679 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 680 } 681 682 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) { 683 return Builder.getInt1(E->isSatisfied()); 684 } 685 686 Value *VisitRequiresExpr(const RequiresExpr *E) { 687 return Builder.getInt1(E->isSatisfied()); 688 } 689 690 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) { 691 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue()); 692 } 693 694 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) { 695 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue()); 696 } 697 698 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) { 699 // C++ [expr.pseudo]p1: 700 // The result shall only be used as the operand for the function call 701 // operator (), and the result of such a call has type void. The only 702 // effect is the evaluation of the postfix-expression before the dot or 703 // arrow. 704 CGF.EmitScalarExpr(E->getBase()); 705 return nullptr; 706 } 707 708 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) { 709 return EmitNullValue(E->getType()); 710 } 711 712 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) { 713 CGF.EmitCXXThrowExpr(E); 714 return nullptr; 715 } 716 717 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) { 718 return Builder.getInt1(E->getValue()); 719 } 720 721 // Binary Operators. 722 Value *EmitMul(const BinOpInfo &Ops) { 723 if (Ops.Ty->isSignedIntegerOrEnumerationType()) { 724 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 725 case LangOptions::SOB_Defined: 726 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 727 case LangOptions::SOB_Undefined: 728 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 729 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); 730 [[fallthrough]]; 731 case LangOptions::SOB_Trapping: 732 if (CanElideOverflowCheck(CGF.getContext(), Ops)) 733 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); 734 return EmitOverflowCheckedBinOp(Ops); 735 } 736 } 737 738 if (Ops.Ty->isConstantMatrixType()) { 739 llvm::MatrixBuilder MB(Builder); 740 // We need to check the types of the operands of the operator to get the 741 // correct matrix dimensions. 742 auto *BO = cast<BinaryOperator>(Ops.E); 743 auto *LHSMatTy = dyn_cast<ConstantMatrixType>( 744 BO->getLHS()->getType().getCanonicalType()); 745 auto *RHSMatTy = dyn_cast<ConstantMatrixType>( 746 BO->getRHS()->getType().getCanonicalType()); 747 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 748 if (LHSMatTy && RHSMatTy) 749 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(), 750 LHSMatTy->getNumColumns(), 751 RHSMatTy->getNumColumns()); 752 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS); 753 } 754 755 if (Ops.Ty->isUnsignedIntegerType() && 756 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 757 !CanElideOverflowCheck(CGF.getContext(), Ops)) 758 return EmitOverflowCheckedBinOp(Ops); 759 760 if (Ops.LHS->getType()->isFPOrFPVectorTy()) { 761 // Preserve the old values 762 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 763 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul"); 764 } 765 if (Ops.isFixedPointOp()) 766 return EmitFixedPointBinOp(Ops); 767 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 768 } 769 /// Create a binary op that checks for overflow. 770 /// Currently only supports +, - and *. 771 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops); 772 773 // Check for undefined division and modulus behaviors. 774 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops, 775 llvm::Value *Zero,bool isDiv); 776 // Common helper for getting how wide LHS of shift is. 777 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS); 778 779 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for 780 // non powers of two. 781 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name); 782 783 Value *EmitDiv(const BinOpInfo &Ops); 784 Value *EmitRem(const BinOpInfo &Ops); 785 Value *EmitAdd(const BinOpInfo &Ops); 786 Value *EmitSub(const BinOpInfo &Ops); 787 Value *EmitShl(const BinOpInfo &Ops); 788 Value *EmitShr(const BinOpInfo &Ops); 789 Value *EmitAnd(const BinOpInfo &Ops) { 790 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and"); 791 } 792 Value *EmitXor(const BinOpInfo &Ops) { 793 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor"); 794 } 795 Value *EmitOr (const BinOpInfo &Ops) { 796 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or"); 797 } 798 799 // Helper functions for fixed point binary operations. 800 Value *EmitFixedPointBinOp(const BinOpInfo &Ops); 801 802 BinOpInfo EmitBinOps(const BinaryOperator *E, 803 QualType PromotionTy = QualType()); 804 805 Value *EmitPromotedValue(Value *result, QualType PromotionType); 806 Value *EmitUnPromotedValue(Value *result, QualType ExprType); 807 Value *EmitPromoted(const Expr *E, QualType PromotionType); 808 809 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E, 810 Value *(ScalarExprEmitter::*F)(const BinOpInfo &), 811 Value *&Result); 812 813 Value *EmitCompoundAssign(const CompoundAssignOperator *E, 814 Value *(ScalarExprEmitter::*F)(const BinOpInfo &)); 815 816 QualType getPromotionType(QualType Ty) { 817 if (auto *CT = Ty->getAs<ComplexType>()) { 818 QualType ElementType = CT->getElementType(); 819 if (ElementType.UseExcessPrecision(CGF.getContext())) 820 return CGF.getContext().getComplexType(CGF.getContext().FloatTy); 821 } 822 if (Ty.UseExcessPrecision(CGF.getContext())) 823 return CGF.getContext().FloatTy; 824 return QualType(); 825 } 826 827 // Binary operators and binary compound assignment operators. 828 #define HANDLEBINOP(OP) \ 829 Value *VisitBin##OP(const BinaryOperator *E) { \ 830 QualType promotionTy = getPromotionType(E->getType()); \ 831 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \ 832 if (result && !promotionTy.isNull()) \ 833 result = EmitUnPromotedValue(result, E->getType()); \ 834 return result; \ 835 } \ 836 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \ 837 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \ 838 } 839 HANDLEBINOP(Mul) 840 HANDLEBINOP(Div) 841 HANDLEBINOP(Rem) 842 HANDLEBINOP(Add) 843 HANDLEBINOP(Sub) 844 HANDLEBINOP(Shl) 845 HANDLEBINOP(Shr) 846 HANDLEBINOP(And) 847 HANDLEBINOP(Xor) 848 HANDLEBINOP(Or) 849 #undef HANDLEBINOP 850 851 // Comparisons. 852 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc, 853 llvm::CmpInst::Predicate SICmpOpc, 854 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling); 855 #define VISITCOMP(CODE, UI, SI, FP, SIG) \ 856 Value *VisitBin##CODE(const BinaryOperator *E) { \ 857 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \ 858 llvm::FCmpInst::FP, SIG); } 859 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true) 860 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true) 861 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true) 862 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true) 863 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false) 864 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false) 865 #undef VISITCOMP 866 867 Value *VisitBinAssign (const BinaryOperator *E); 868 869 Value *VisitBinLAnd (const BinaryOperator *E); 870 Value *VisitBinLOr (const BinaryOperator *E); 871 Value *VisitBinComma (const BinaryOperator *E); 872 873 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); } 874 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); } 875 876 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { 877 return Visit(E->getSemanticForm()); 878 } 879 880 // Other Operators. 881 Value *VisitBlockExpr(const BlockExpr *BE); 882 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *); 883 Value *VisitChooseExpr(ChooseExpr *CE); 884 Value *VisitVAArgExpr(VAArgExpr *VE); 885 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) { 886 return CGF.EmitObjCStringLiteral(E); 887 } 888 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) { 889 return CGF.EmitObjCBoxedExpr(E); 890 } 891 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) { 892 return CGF.EmitObjCArrayLiteral(E); 893 } 894 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) { 895 return CGF.EmitObjCDictionaryLiteral(E); 896 } 897 Value *VisitAsTypeExpr(AsTypeExpr *CE); 898 Value *VisitAtomicExpr(AtomicExpr *AE); 899 }; 900 } // end anonymous namespace. 901 902 //===----------------------------------------------------------------------===// 903 // Utilities 904 //===----------------------------------------------------------------------===// 905 906 /// EmitConversionToBool - Convert the specified expression value to a 907 /// boolean (i1) truth value. This is equivalent to "Val != 0". 908 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) { 909 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); 910 911 if (SrcType->isRealFloatingType()) 912 return EmitFloatToBoolConversion(Src); 913 914 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType)) 915 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT); 916 917 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && 918 "Unknown scalar type to convert"); 919 920 if (isa<llvm::IntegerType>(Src->getType())) 921 return EmitIntToBoolConversion(Src); 922 923 assert(isa<llvm::PointerType>(Src->getType())); 924 return EmitPointerToBoolConversion(Src, SrcType); 925 } 926 927 void ScalarExprEmitter::EmitFloatConversionCheck( 928 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType, 929 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) { 930 assert(SrcType->isFloatingType() && "not a conversion from floating point"); 931 if (!isa<llvm::IntegerType>(DstTy)) 932 return; 933 934 CodeGenFunction::SanitizerScope SanScope(&CGF); 935 using llvm::APFloat; 936 using llvm::APSInt; 937 938 llvm::Value *Check = nullptr; 939 const llvm::fltSemantics &SrcSema = 940 CGF.getContext().getFloatTypeSemantics(OrigSrcType); 941 942 // Floating-point to integer. This has undefined behavior if the source is 943 // +-Inf, NaN, or doesn't fit into the destination type (after truncation 944 // to an integer). 945 unsigned Width = CGF.getContext().getIntWidth(DstType); 946 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType(); 947 948 APSInt Min = APSInt::getMinValue(Width, Unsigned); 949 APFloat MinSrc(SrcSema, APFloat::uninitialized); 950 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) & 951 APFloat::opOverflow) 952 // Don't need an overflow check for lower bound. Just check for 953 // -Inf/NaN. 954 MinSrc = APFloat::getInf(SrcSema, true); 955 else 956 // Find the largest value which is too small to represent (before 957 // truncation toward zero). 958 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative); 959 960 APSInt Max = APSInt::getMaxValue(Width, Unsigned); 961 APFloat MaxSrc(SrcSema, APFloat::uninitialized); 962 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) & 963 APFloat::opOverflow) 964 // Don't need an overflow check for upper bound. Just check for 965 // +Inf/NaN. 966 MaxSrc = APFloat::getInf(SrcSema, false); 967 else 968 // Find the smallest value which is too large to represent (before 969 // truncation toward zero). 970 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive); 971 972 // If we're converting from __half, convert the range to float to match 973 // the type of src. 974 if (OrigSrcType->isHalfType()) { 975 const llvm::fltSemantics &Sema = 976 CGF.getContext().getFloatTypeSemantics(SrcType); 977 bool IsInexact; 978 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); 979 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); 980 } 981 982 llvm::Value *GE = 983 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc)); 984 llvm::Value *LE = 985 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc)); 986 Check = Builder.CreateAnd(GE, LE); 987 988 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc), 989 CGF.EmitCheckTypeDescriptor(OrigSrcType), 990 CGF.EmitCheckTypeDescriptor(DstType)}; 991 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow), 992 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc); 993 } 994 995 // Should be called within CodeGenFunction::SanitizerScope RAII scope. 996 // Returns 'i1 false' when the truncation Src -> Dst was lossy. 997 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 998 std::pair<llvm::Value *, SanitizerMask>> 999 EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, 1000 QualType DstType, CGBuilderTy &Builder) { 1001 llvm::Type *SrcTy = Src->getType(); 1002 llvm::Type *DstTy = Dst->getType(); 1003 (void)DstTy; // Only used in assert() 1004 1005 // This should be truncation of integral types. 1006 assert(Src != Dst); 1007 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits()); 1008 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && 1009 "non-integer llvm type"); 1010 1011 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1012 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1013 1014 // If both (src and dst) types are unsigned, then it's an unsigned truncation. 1015 // Else, it is a signed truncation. 1016 ScalarExprEmitter::ImplicitConversionCheckKind Kind; 1017 SanitizerMask Mask; 1018 if (!SrcSigned && !DstSigned) { 1019 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation; 1020 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation; 1021 } else { 1022 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation; 1023 Mask = SanitizerKind::ImplicitSignedIntegerTruncation; 1024 } 1025 1026 llvm::Value *Check = nullptr; 1027 // 1. Extend the truncated value back to the same width as the Src. 1028 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext"); 1029 // 2. Equality-compare with the original source value 1030 Check = Builder.CreateICmpEQ(Check, Src, "truncheck"); 1031 // If the comparison result is 'i1 false', then the truncation was lossy. 1032 return std::make_pair(Kind, std::make_pair(Check, Mask)); 1033 } 1034 1035 static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( 1036 QualType SrcType, QualType DstType) { 1037 return SrcType->isIntegerType() && DstType->isIntegerType(); 1038 } 1039 1040 void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType, 1041 Value *Dst, QualType DstType, 1042 SourceLocation Loc) { 1043 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)) 1044 return; 1045 1046 // We only care about int->int conversions here. 1047 // We ignore conversions to/from pointer and/or bool. 1048 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType, 1049 DstType)) 1050 return; 1051 1052 unsigned SrcBits = Src->getType()->getScalarSizeInBits(); 1053 unsigned DstBits = Dst->getType()->getScalarSizeInBits(); 1054 // This must be truncation. Else we do not care. 1055 if (SrcBits <= DstBits) 1056 return; 1057 1058 assert(!DstType->isBooleanType() && "we should not get here with booleans."); 1059 1060 // If the integer sign change sanitizer is enabled, 1061 // and we are truncating from larger unsigned type to smaller signed type, 1062 // let that next sanitizer deal with it. 1063 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1064 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1065 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) && 1066 (!SrcSigned && DstSigned)) 1067 return; 1068 1069 CodeGenFunction::SanitizerScope SanScope(&CGF); 1070 1071 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1072 std::pair<llvm::Value *, SanitizerMask>> 1073 Check = 1074 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder); 1075 // If the comparison result is 'i1 false', then the truncation was lossy. 1076 1077 // Do we care about this type of truncation? 1078 if (!CGF.SanOpts.has(Check.second.second)) 1079 return; 1080 1081 llvm::Constant *StaticArgs[] = { 1082 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType), 1083 CGF.EmitCheckTypeDescriptor(DstType), 1084 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)}; 1085 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs, 1086 {Src, Dst}); 1087 } 1088 1089 // Should be called within CodeGenFunction::SanitizerScope RAII scope. 1090 // Returns 'i1 false' when the conversion Src -> Dst changed the sign. 1091 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1092 std::pair<llvm::Value *, SanitizerMask>> 1093 EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, 1094 QualType DstType, CGBuilderTy &Builder) { 1095 llvm::Type *SrcTy = Src->getType(); 1096 llvm::Type *DstTy = Dst->getType(); 1097 1098 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && 1099 "non-integer llvm type"); 1100 1101 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1102 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1103 (void)SrcSigned; // Only used in assert() 1104 (void)DstSigned; // Only used in assert() 1105 unsigned SrcBits = SrcTy->getScalarSizeInBits(); 1106 unsigned DstBits = DstTy->getScalarSizeInBits(); 1107 (void)SrcBits; // Only used in assert() 1108 (void)DstBits; // Only used in assert() 1109 1110 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) && 1111 "either the widths should be different, or the signednesses."); 1112 1113 // NOTE: zero value is considered to be non-negative. 1114 auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType, 1115 const char *Name) -> Value * { 1116 // Is this value a signed type? 1117 bool VSigned = VType->isSignedIntegerOrEnumerationType(); 1118 llvm::Type *VTy = V->getType(); 1119 if (!VSigned) { 1120 // If the value is unsigned, then it is never negative. 1121 // FIXME: can we encounter non-scalar VTy here? 1122 return llvm::ConstantInt::getFalse(VTy->getContext()); 1123 } 1124 // Get the zero of the same type with which we will be comparing. 1125 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0); 1126 // %V.isnegative = icmp slt %V, 0 1127 // I.e is %V *strictly* less than zero, does it have negative value? 1128 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero, 1129 llvm::Twine(Name) + "." + V->getName() + 1130 ".negativitycheck"); 1131 }; 1132 1133 // 1. Was the old Value negative? 1134 llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src"); 1135 // 2. Is the new Value negative? 1136 llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst"); 1137 // 3. Now, was the 'negativity status' preserved during the conversion? 1138 // NOTE: conversion from negative to zero is considered to change the sign. 1139 // (We want to get 'false' when the conversion changed the sign) 1140 // So we should just equality-compare the negativity statuses. 1141 llvm::Value *Check = nullptr; 1142 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck"); 1143 // If the comparison result is 'false', then the conversion changed the sign. 1144 return std::make_pair( 1145 ScalarExprEmitter::ICCK_IntegerSignChange, 1146 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange)); 1147 } 1148 1149 void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, 1150 Value *Dst, QualType DstType, 1151 SourceLocation Loc) { 1152 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) 1153 return; 1154 1155 llvm::Type *SrcTy = Src->getType(); 1156 llvm::Type *DstTy = Dst->getType(); 1157 1158 // We only care about int->int conversions here. 1159 // We ignore conversions to/from pointer and/or bool. 1160 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType, 1161 DstType)) 1162 return; 1163 1164 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1165 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1166 unsigned SrcBits = SrcTy->getScalarSizeInBits(); 1167 unsigned DstBits = DstTy->getScalarSizeInBits(); 1168 1169 // Now, we do not need to emit the check in *all* of the cases. 1170 // We can avoid emitting it in some obvious cases where it would have been 1171 // dropped by the opt passes (instcombine) always anyways. 1172 // If it's a cast between effectively the same type, no check. 1173 // NOTE: this is *not* equivalent to checking the canonical types. 1174 if (SrcSigned == DstSigned && SrcBits == DstBits) 1175 return; 1176 // At least one of the values needs to have signed type. 1177 // If both are unsigned, then obviously, neither of them can be negative. 1178 if (!SrcSigned && !DstSigned) 1179 return; 1180 // If the conversion is to *larger* *signed* type, then no check is needed. 1181 // Because either sign-extension happens (so the sign will remain), 1182 // or zero-extension will happen (the sign bit will be zero.) 1183 if ((DstBits > SrcBits) && DstSigned) 1184 return; 1185 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) && 1186 (SrcBits > DstBits) && SrcSigned) { 1187 // If the signed integer truncation sanitizer is enabled, 1188 // and this is a truncation from signed type, then no check is needed. 1189 // Because here sign change check is interchangeable with truncation check. 1190 return; 1191 } 1192 // That's it. We can't rule out any more cases with the data we have. 1193 1194 CodeGenFunction::SanitizerScope SanScope(&CGF); 1195 1196 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1197 std::pair<llvm::Value *, SanitizerMask>> 1198 Check; 1199 1200 // Each of these checks needs to return 'false' when an issue was detected. 1201 ImplicitConversionCheckKind CheckKind; 1202 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 1203 // So we can 'and' all the checks together, and still get 'false', 1204 // if at least one of the checks detected an issue. 1205 1206 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder); 1207 CheckKind = Check.first; 1208 Checks.emplace_back(Check.second); 1209 1210 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) && 1211 (SrcBits > DstBits) && !SrcSigned && DstSigned) { 1212 // If the signed integer truncation sanitizer was enabled, 1213 // and we are truncating from larger unsigned type to smaller signed type, 1214 // let's handle the case we skipped in that check. 1215 Check = 1216 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder); 1217 CheckKind = ICCK_SignedIntegerTruncationOrSignChange; 1218 Checks.emplace_back(Check.second); 1219 // If the comparison result is 'i1 false', then the truncation was lossy. 1220 } 1221 1222 llvm::Constant *StaticArgs[] = { 1223 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType), 1224 CGF.EmitCheckTypeDescriptor(DstType), 1225 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)}; 1226 // EmitCheck() will 'and' all the checks together. 1227 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs, 1228 {Src, Dst}); 1229 } 1230 1231 Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType, 1232 QualType DstType, llvm::Type *SrcTy, 1233 llvm::Type *DstTy, 1234 ScalarConversionOpts Opts) { 1235 // The Element types determine the type of cast to perform. 1236 llvm::Type *SrcElementTy; 1237 llvm::Type *DstElementTy; 1238 QualType SrcElementType; 1239 QualType DstElementType; 1240 if (SrcType->isMatrixType() && DstType->isMatrixType()) { 1241 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType(); 1242 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType(); 1243 SrcElementType = SrcType->castAs<MatrixType>()->getElementType(); 1244 DstElementType = DstType->castAs<MatrixType>()->getElementType(); 1245 } else { 1246 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() && 1247 "cannot cast between matrix and non-matrix types"); 1248 SrcElementTy = SrcTy; 1249 DstElementTy = DstTy; 1250 SrcElementType = SrcType; 1251 DstElementType = DstType; 1252 } 1253 1254 if (isa<llvm::IntegerType>(SrcElementTy)) { 1255 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType(); 1256 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) { 1257 InputSigned = true; 1258 } 1259 1260 if (isa<llvm::IntegerType>(DstElementTy)) 1261 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); 1262 if (InputSigned) 1263 return Builder.CreateSIToFP(Src, DstTy, "conv"); 1264 return Builder.CreateUIToFP(Src, DstTy, "conv"); 1265 } 1266 1267 if (isa<llvm::IntegerType>(DstElementTy)) { 1268 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion"); 1269 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType(); 1270 1271 // If we can't recognize overflow as undefined behavior, assume that 1272 // overflow saturates. This protects against normal optimizations if we are 1273 // compiling with non-standard FP semantics. 1274 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) { 1275 llvm::Intrinsic::ID IID = 1276 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat; 1277 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src); 1278 } 1279 1280 if (IsSigned) 1281 return Builder.CreateFPToSI(Src, DstTy, "conv"); 1282 return Builder.CreateFPToUI(Src, DstTy, "conv"); 1283 } 1284 1285 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID()) 1286 return Builder.CreateFPTrunc(Src, DstTy, "conv"); 1287 return Builder.CreateFPExt(Src, DstTy, "conv"); 1288 } 1289 1290 /// Emit a conversion from the specified type to the specified destination type, 1291 /// both of which are LLVM scalar types. 1292 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, 1293 QualType DstType, 1294 SourceLocation Loc, 1295 ScalarConversionOpts Opts) { 1296 // All conversions involving fixed point types should be handled by the 1297 // EmitFixedPoint family functions. This is done to prevent bloating up this 1298 // function more, and although fixed point numbers are represented by 1299 // integers, we do not want to follow any logic that assumes they should be 1300 // treated as integers. 1301 // TODO(leonardchan): When necessary, add another if statement checking for 1302 // conversions to fixed point types from other types. 1303 if (SrcType->isFixedPointType()) { 1304 if (DstType->isBooleanType()) 1305 // It is important that we check this before checking if the dest type is 1306 // an integer because booleans are technically integer types. 1307 // We do not need to check the padding bit on unsigned types if unsigned 1308 // padding is enabled because overflow into this bit is undefined 1309 // behavior. 1310 return Builder.CreateIsNotNull(Src, "tobool"); 1311 if (DstType->isFixedPointType() || DstType->isIntegerType() || 1312 DstType->isRealFloatingType()) 1313 return EmitFixedPointConversion(Src, SrcType, DstType, Loc); 1314 1315 llvm_unreachable( 1316 "Unhandled scalar conversion from a fixed point type to another type."); 1317 } else if (DstType->isFixedPointType()) { 1318 if (SrcType->isIntegerType() || SrcType->isRealFloatingType()) 1319 // This also includes converting booleans and enums to fixed point types. 1320 return EmitFixedPointConversion(Src, SrcType, DstType, Loc); 1321 1322 llvm_unreachable( 1323 "Unhandled scalar conversion to a fixed point type from another type."); 1324 } 1325 1326 QualType NoncanonicalSrcType = SrcType; 1327 QualType NoncanonicalDstType = DstType; 1328 1329 SrcType = CGF.getContext().getCanonicalType(SrcType); 1330 DstType = CGF.getContext().getCanonicalType(DstType); 1331 if (SrcType == DstType) return Src; 1332 1333 if (DstType->isVoidType()) return nullptr; 1334 1335 llvm::Value *OrigSrc = Src; 1336 QualType OrigSrcType = SrcType; 1337 llvm::Type *SrcTy = Src->getType(); 1338 1339 // Handle conversions to bool first, they are special: comparisons against 0. 1340 if (DstType->isBooleanType()) 1341 return EmitConversionToBool(Src, SrcType); 1342 1343 llvm::Type *DstTy = ConvertType(DstType); 1344 1345 // Cast from half through float if half isn't a native type. 1346 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 1347 // Cast to FP using the intrinsic if the half type itself isn't supported. 1348 if (DstTy->isFloatingPointTy()) { 1349 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) 1350 return Builder.CreateCall( 1351 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy), 1352 Src); 1353 } else { 1354 // Cast to other types through float, using either the intrinsic or FPExt, 1355 // depending on whether the half type itself is supported 1356 // (as opposed to operations on half, available with NativeHalfType). 1357 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 1358 Src = Builder.CreateCall( 1359 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, 1360 CGF.CGM.FloatTy), 1361 Src); 1362 } else { 1363 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv"); 1364 } 1365 SrcType = CGF.getContext().FloatTy; 1366 SrcTy = CGF.FloatTy; 1367 } 1368 } 1369 1370 // Ignore conversions like int -> uint. 1371 if (SrcTy == DstTy) { 1372 if (Opts.EmitImplicitIntegerSignChangeChecks) 1373 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src, 1374 NoncanonicalDstType, Loc); 1375 1376 return Src; 1377 } 1378 1379 // Handle pointer conversions next: pointers can only be converted to/from 1380 // other pointers and integers. Check for pointer types in terms of LLVM, as 1381 // some native types (like Obj-C id) may map to a pointer type. 1382 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) { 1383 // The source value may be an integer, or a pointer. 1384 if (isa<llvm::PointerType>(SrcTy)) 1385 return Builder.CreateBitCast(Src, DstTy, "conv"); 1386 1387 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); 1388 // First, convert to the correct width so that we control the kind of 1389 // extension. 1390 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT); 1391 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType(); 1392 llvm::Value* IntResult = 1393 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 1394 // Then, cast to pointer. 1395 return Builder.CreateIntToPtr(IntResult, DstTy, "conv"); 1396 } 1397 1398 if (isa<llvm::PointerType>(SrcTy)) { 1399 // Must be an ptr to int cast. 1400 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?"); 1401 return Builder.CreatePtrToInt(Src, DstTy, "conv"); 1402 } 1403 1404 // A scalar can be splatted to an extended vector of the same element type 1405 if (DstType->isExtVectorType() && !SrcType->isVectorType()) { 1406 // Sema should add casts to make sure that the source expression's type is 1407 // the same as the vector's element type (sans qualifiers) 1408 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == 1409 SrcType.getTypePtr() && 1410 "Splatted expr doesn't match with vector element type?"); 1411 1412 // Splat the element across to all elements 1413 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements(); 1414 return Builder.CreateVectorSplat(NumElements, Src, "splat"); 1415 } 1416 1417 if (SrcType->isMatrixType() && DstType->isMatrixType()) 1418 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); 1419 1420 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) { 1421 // Allow bitcast from vector to integer/fp of the same size. 1422 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits(); 1423 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits(); 1424 if (SrcSize == DstSize) 1425 return Builder.CreateBitCast(Src, DstTy, "conv"); 1426 1427 // Conversions between vectors of different sizes are not allowed except 1428 // when vectors of half are involved. Operations on storage-only half 1429 // vectors require promoting half vector operands to float vectors and 1430 // truncating the result, which is either an int or float vector, to a 1431 // short or half vector. 1432 1433 // Source and destination are both expected to be vectors. 1434 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType(); 1435 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType(); 1436 (void)DstElementTy; 1437 1438 assert(((SrcElementTy->isIntegerTy() && 1439 DstElementTy->isIntegerTy()) || 1440 (SrcElementTy->isFloatingPointTy() && 1441 DstElementTy->isFloatingPointTy())) && 1442 "unexpected conversion between a floating-point vector and an " 1443 "integer vector"); 1444 1445 // Truncate an i32 vector to an i16 vector. 1446 if (SrcElementTy->isIntegerTy()) 1447 return Builder.CreateIntCast(Src, DstTy, false, "conv"); 1448 1449 // Truncate a float vector to a half vector. 1450 if (SrcSize > DstSize) 1451 return Builder.CreateFPTrunc(Src, DstTy, "conv"); 1452 1453 // Promote a half vector to a float vector. 1454 return Builder.CreateFPExt(Src, DstTy, "conv"); 1455 } 1456 1457 // Finally, we have the arithmetic types: real int/float. 1458 Value *Res = nullptr; 1459 llvm::Type *ResTy = DstTy; 1460 1461 // An overflowing conversion has undefined behavior if either the source type 1462 // or the destination type is a floating-point type. However, we consider the 1463 // range of representable values for all floating-point types to be 1464 // [-inf,+inf], so no overflow can ever happen when the destination type is a 1465 // floating-point type. 1466 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) && 1467 OrigSrcType->isFloatingType()) 1468 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy, 1469 Loc); 1470 1471 // Cast to half through float if half isn't a native type. 1472 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 1473 // Make sure we cast in a single step if from another FP type. 1474 if (SrcTy->isFloatingPointTy()) { 1475 // Use the intrinsic if the half type itself isn't supported 1476 // (as opposed to operations on half, available with NativeHalfType). 1477 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) 1478 return Builder.CreateCall( 1479 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src); 1480 // If the half type is supported, just use an fptrunc. 1481 return Builder.CreateFPTrunc(Src, DstTy); 1482 } 1483 DstTy = CGF.FloatTy; 1484 } 1485 1486 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); 1487 1488 if (DstTy != ResTy) { 1489 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 1490 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion"); 1491 Res = Builder.CreateCall( 1492 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy), 1493 Res); 1494 } else { 1495 Res = Builder.CreateFPTrunc(Res, ResTy, "conv"); 1496 } 1497 } 1498 1499 if (Opts.EmitImplicitIntegerTruncationChecks) 1500 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res, 1501 NoncanonicalDstType, Loc); 1502 1503 if (Opts.EmitImplicitIntegerSignChangeChecks) 1504 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res, 1505 NoncanonicalDstType, Loc); 1506 1507 return Res; 1508 } 1509 1510 Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy, 1511 QualType DstTy, 1512 SourceLocation Loc) { 1513 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 1514 llvm::Value *Result; 1515 if (SrcTy->isRealFloatingType()) 1516 Result = FPBuilder.CreateFloatingToFixed(Src, 1517 CGF.getContext().getFixedPointSemantics(DstTy)); 1518 else if (DstTy->isRealFloatingType()) 1519 Result = FPBuilder.CreateFixedToFloating(Src, 1520 CGF.getContext().getFixedPointSemantics(SrcTy), 1521 ConvertType(DstTy)); 1522 else { 1523 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy); 1524 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy); 1525 1526 if (DstTy->isIntegerType()) 1527 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema, 1528 DstFPSema.getWidth(), 1529 DstFPSema.isSigned()); 1530 else if (SrcTy->isIntegerType()) 1531 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(), 1532 DstFPSema); 1533 else 1534 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema); 1535 } 1536 return Result; 1537 } 1538 1539 /// Emit a conversion from the specified complex type to the specified 1540 /// destination type, where the destination type is an LLVM scalar type. 1541 Value *ScalarExprEmitter::EmitComplexToScalarConversion( 1542 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy, 1543 SourceLocation Loc) { 1544 // Get the source element type. 1545 SrcTy = SrcTy->castAs<ComplexType>()->getElementType(); 1546 1547 // Handle conversions to bool first, they are special: comparisons against 0. 1548 if (DstTy->isBooleanType()) { 1549 // Complex != 0 -> (Real != 0) | (Imag != 0) 1550 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc); 1551 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc); 1552 return Builder.CreateOr(Src.first, Src.second, "tobool"); 1553 } 1554 1555 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type, 1556 // the imaginary part of the complex value is discarded and the value of the 1557 // real part is converted according to the conversion rules for the 1558 // corresponding real type. 1559 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc); 1560 } 1561 1562 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) { 1563 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty); 1564 } 1565 1566 /// Emit a sanitization check for the given "binary" operation (which 1567 /// might actually be a unary increment which has been lowered to a binary 1568 /// operation). The check passes if all values in \p Checks (which are \c i1), 1569 /// are \c true. 1570 void ScalarExprEmitter::EmitBinOpCheck( 1571 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) { 1572 assert(CGF.IsSanitizerScope); 1573 SanitizerHandler Check; 1574 SmallVector<llvm::Constant *, 4> StaticData; 1575 SmallVector<llvm::Value *, 2> DynamicData; 1576 1577 BinaryOperatorKind Opcode = Info.Opcode; 1578 if (BinaryOperator::isCompoundAssignmentOp(Opcode)) 1579 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode); 1580 1581 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc())); 1582 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E); 1583 if (UO && UO->getOpcode() == UO_Minus) { 1584 Check = SanitizerHandler::NegateOverflow; 1585 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType())); 1586 DynamicData.push_back(Info.RHS); 1587 } else { 1588 if (BinaryOperator::isShiftOp(Opcode)) { 1589 // Shift LHS negative or too large, or RHS out of bounds. 1590 Check = SanitizerHandler::ShiftOutOfBounds; 1591 const BinaryOperator *BO = cast<BinaryOperator>(Info.E); 1592 StaticData.push_back( 1593 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType())); 1594 StaticData.push_back( 1595 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType())); 1596 } else if (Opcode == BO_Div || Opcode == BO_Rem) { 1597 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1). 1598 Check = SanitizerHandler::DivremOverflow; 1599 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); 1600 } else { 1601 // Arithmetic overflow (+, -, *). 1602 switch (Opcode) { 1603 case BO_Add: Check = SanitizerHandler::AddOverflow; break; 1604 case BO_Sub: Check = SanitizerHandler::SubOverflow; break; 1605 case BO_Mul: Check = SanitizerHandler::MulOverflow; break; 1606 default: llvm_unreachable("unexpected opcode for bin op check"); 1607 } 1608 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); 1609 } 1610 DynamicData.push_back(Info.LHS); 1611 DynamicData.push_back(Info.RHS); 1612 } 1613 1614 CGF.EmitCheck(Checks, Check, StaticData, DynamicData); 1615 } 1616 1617 //===----------------------------------------------------------------------===// 1618 // Visitor Methods 1619 //===----------------------------------------------------------------------===// 1620 1621 Value *ScalarExprEmitter::VisitExpr(Expr *E) { 1622 CGF.ErrorUnsupported(E, "scalar expression"); 1623 if (E->getType()->isVoidType()) 1624 return nullptr; 1625 return llvm::UndefValue::get(CGF.ConvertType(E->getType())); 1626 } 1627 1628 Value * 1629 ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) { 1630 ASTContext &Context = CGF.getContext(); 1631 unsigned AddrSpace = 1632 Context.getTargetAddressSpace(CGF.CGM.GetGlobalConstantAddressSpace()); 1633 llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr( 1634 E->ComputeName(Context), "__usn_str", AddrSpace); 1635 1636 llvm::Type *ExprTy = ConvertType(E->getType()); 1637 return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy, 1638 "usn_addr_cast"); 1639 } 1640 1641 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { 1642 // Vector Mask Case 1643 if (E->getNumSubExprs() == 2) { 1644 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0)); 1645 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1)); 1646 Value *Mask; 1647 1648 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType()); 1649 unsigned LHSElts = LTy->getNumElements(); 1650 1651 Mask = RHS; 1652 1653 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType()); 1654 1655 // Mask off the high bits of each shuffle index. 1656 Value *MaskBits = 1657 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1); 1658 Mask = Builder.CreateAnd(Mask, MaskBits, "mask"); 1659 1660 // newv = undef 1661 // mask = mask & maskbits 1662 // for each elt 1663 // n = extract mask i 1664 // x = extract val n 1665 // newv = insert newv, x, i 1666 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(), 1667 MTy->getNumElements()); 1668 Value* NewV = llvm::PoisonValue::get(RTy); 1669 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) { 1670 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i); 1671 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx"); 1672 1673 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt"); 1674 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins"); 1675 } 1676 return NewV; 1677 } 1678 1679 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0)); 1680 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1)); 1681 1682 SmallVector<int, 32> Indices; 1683 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) { 1684 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2); 1685 // Check for -1 and output it as undef in the IR. 1686 if (Idx.isSigned() && Idx.isAllOnes()) 1687 Indices.push_back(-1); 1688 else 1689 Indices.push_back(Idx.getZExtValue()); 1690 } 1691 1692 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle"); 1693 } 1694 1695 Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) { 1696 QualType SrcType = E->getSrcExpr()->getType(), 1697 DstType = E->getType(); 1698 1699 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); 1700 1701 SrcType = CGF.getContext().getCanonicalType(SrcType); 1702 DstType = CGF.getContext().getCanonicalType(DstType); 1703 if (SrcType == DstType) return Src; 1704 1705 assert(SrcType->isVectorType() && 1706 "ConvertVector source type must be a vector"); 1707 assert(DstType->isVectorType() && 1708 "ConvertVector destination type must be a vector"); 1709 1710 llvm::Type *SrcTy = Src->getType(); 1711 llvm::Type *DstTy = ConvertType(DstType); 1712 1713 // Ignore conversions like int -> uint. 1714 if (SrcTy == DstTy) 1715 return Src; 1716 1717 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(), 1718 DstEltType = DstType->castAs<VectorType>()->getElementType(); 1719 1720 assert(SrcTy->isVectorTy() && 1721 "ConvertVector source IR type must be a vector"); 1722 assert(DstTy->isVectorTy() && 1723 "ConvertVector destination IR type must be a vector"); 1724 1725 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(), 1726 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType(); 1727 1728 if (DstEltType->isBooleanType()) { 1729 assert((SrcEltTy->isFloatingPointTy() || 1730 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion"); 1731 1732 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy); 1733 if (SrcEltTy->isFloatingPointTy()) { 1734 return Builder.CreateFCmpUNE(Src, Zero, "tobool"); 1735 } else { 1736 return Builder.CreateICmpNE(Src, Zero, "tobool"); 1737 } 1738 } 1739 1740 // We have the arithmetic types: real int/float. 1741 Value *Res = nullptr; 1742 1743 if (isa<llvm::IntegerType>(SrcEltTy)) { 1744 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType(); 1745 if (isa<llvm::IntegerType>(DstEltTy)) 1746 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); 1747 else if (InputSigned) 1748 Res = Builder.CreateSIToFP(Src, DstTy, "conv"); 1749 else 1750 Res = Builder.CreateUIToFP(Src, DstTy, "conv"); 1751 } else if (isa<llvm::IntegerType>(DstEltTy)) { 1752 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion"); 1753 if (DstEltType->isSignedIntegerOrEnumerationType()) 1754 Res = Builder.CreateFPToSI(Src, DstTy, "conv"); 1755 else 1756 Res = Builder.CreateFPToUI(Src, DstTy, "conv"); 1757 } else { 1758 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && 1759 "Unknown real conversion"); 1760 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID()) 1761 Res = Builder.CreateFPTrunc(Src, DstTy, "conv"); 1762 else 1763 Res = Builder.CreateFPExt(Src, DstTy, "conv"); 1764 } 1765 1766 return Res; 1767 } 1768 1769 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { 1770 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) { 1771 CGF.EmitIgnoredExpr(E->getBase()); 1772 return CGF.emitScalarConstant(Constant, E); 1773 } else { 1774 Expr::EvalResult Result; 1775 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) { 1776 llvm::APSInt Value = Result.Val.getInt(); 1777 CGF.EmitIgnoredExpr(E->getBase()); 1778 return Builder.getInt(Value); 1779 } 1780 } 1781 1782 return EmitLoadOfLValue(E); 1783 } 1784 1785 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { 1786 TestAndClearIgnoreResultAssign(); 1787 1788 // Emit subscript expressions in rvalue context's. For most cases, this just 1789 // loads the lvalue formed by the subscript expr. However, we have to be 1790 // careful, because the base of a vector subscript is occasionally an rvalue, 1791 // so we can't get it as an lvalue. 1792 if (!E->getBase()->getType()->isVectorType() && 1793 !E->getBase()->getType()->isVLSTBuiltinType()) 1794 return EmitLoadOfLValue(E); 1795 1796 // Handle the vector case. The base must be a vector, the index must be an 1797 // integer value. 1798 Value *Base = Visit(E->getBase()); 1799 Value *Idx = Visit(E->getIdx()); 1800 QualType IdxTy = E->getIdx()->getType(); 1801 1802 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) 1803 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true); 1804 1805 return Builder.CreateExtractElement(Base, Idx, "vecext"); 1806 } 1807 1808 Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) { 1809 TestAndClearIgnoreResultAssign(); 1810 1811 // Handle the vector case. The base must be a vector, the index must be an 1812 // integer value. 1813 Value *RowIdx = Visit(E->getRowIdx()); 1814 Value *ColumnIdx = Visit(E->getColumnIdx()); 1815 1816 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>(); 1817 unsigned NumRows = MatrixTy->getNumRows(); 1818 llvm::MatrixBuilder MB(Builder); 1819 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows); 1820 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0) 1821 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened()); 1822 1823 Value *Matrix = Visit(E->getBase()); 1824 1825 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds? 1826 return Builder.CreateExtractElement(Matrix, Idx, "matrixext"); 1827 } 1828 1829 static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, 1830 unsigned Off) { 1831 int MV = SVI->getMaskValue(Idx); 1832 if (MV == -1) 1833 return -1; 1834 return Off + MV; 1835 } 1836 1837 static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) { 1838 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && 1839 "Index operand too large for shufflevector mask!"); 1840 return C->getZExtValue(); 1841 } 1842 1843 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { 1844 bool Ignore = TestAndClearIgnoreResultAssign(); 1845 (void)Ignore; 1846 assert (Ignore == false && "init list ignored"); 1847 unsigned NumInitElements = E->getNumInits(); 1848 1849 if (E->hadArrayRangeDesignator()) 1850 CGF.ErrorUnsupported(E, "GNU array range designator extension"); 1851 1852 llvm::VectorType *VType = 1853 dyn_cast<llvm::VectorType>(ConvertType(E->getType())); 1854 1855 if (!VType) { 1856 if (NumInitElements == 0) { 1857 // C++11 value-initialization for the scalar. 1858 return EmitNullValue(E->getType()); 1859 } 1860 // We have a scalar in braces. Just use the first element. 1861 return Visit(E->getInit(0)); 1862 } 1863 1864 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements(); 1865 1866 // Loop over initializers collecting the Value for each, and remembering 1867 // whether the source was swizzle (ExtVectorElementExpr). This will allow 1868 // us to fold the shuffle for the swizzle into the shuffle for the vector 1869 // initializer, since LLVM optimizers generally do not want to touch 1870 // shuffles. 1871 unsigned CurIdx = 0; 1872 bool VIsUndefShuffle = false; 1873 llvm::Value *V = llvm::UndefValue::get(VType); 1874 for (unsigned i = 0; i != NumInitElements; ++i) { 1875 Expr *IE = E->getInit(i); 1876 Value *Init = Visit(IE); 1877 SmallVector<int, 16> Args; 1878 1879 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType()); 1880 1881 // Handle scalar elements. If the scalar initializer is actually one 1882 // element of a different vector of the same width, use shuffle instead of 1883 // extract+insert. 1884 if (!VVT) { 1885 if (isa<ExtVectorElementExpr>(IE)) { 1886 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init); 1887 1888 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType()) 1889 ->getNumElements() == ResElts) { 1890 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand()); 1891 Value *LHS = nullptr, *RHS = nullptr; 1892 if (CurIdx == 0) { 1893 // insert into undef -> shuffle (src, undef) 1894 // shufflemask must use an i32 1895 Args.push_back(getAsInt32(C, CGF.Int32Ty)); 1896 Args.resize(ResElts, -1); 1897 1898 LHS = EI->getVectorOperand(); 1899 RHS = V; 1900 VIsUndefShuffle = true; 1901 } else if (VIsUndefShuffle) { 1902 // insert into undefshuffle && size match -> shuffle (v, src) 1903 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V); 1904 for (unsigned j = 0; j != CurIdx; ++j) 1905 Args.push_back(getMaskElt(SVV, j, 0)); 1906 Args.push_back(ResElts + C->getZExtValue()); 1907 Args.resize(ResElts, -1); 1908 1909 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 1910 RHS = EI->getVectorOperand(); 1911 VIsUndefShuffle = false; 1912 } 1913 if (!Args.empty()) { 1914 V = Builder.CreateShuffleVector(LHS, RHS, Args); 1915 ++CurIdx; 1916 continue; 1917 } 1918 } 1919 } 1920 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx), 1921 "vecinit"); 1922 VIsUndefShuffle = false; 1923 ++CurIdx; 1924 continue; 1925 } 1926 1927 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements(); 1928 1929 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's 1930 // input is the same width as the vector being constructed, generate an 1931 // optimized shuffle of the swizzle input into the result. 1932 unsigned Offset = (CurIdx == 0) ? 0 : ResElts; 1933 if (isa<ExtVectorElementExpr>(IE)) { 1934 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init); 1935 Value *SVOp = SVI->getOperand(0); 1936 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType()); 1937 1938 if (OpTy->getNumElements() == ResElts) { 1939 for (unsigned j = 0; j != CurIdx; ++j) { 1940 // If the current vector initializer is a shuffle with undef, merge 1941 // this shuffle directly into it. 1942 if (VIsUndefShuffle) { 1943 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0)); 1944 } else { 1945 Args.push_back(j); 1946 } 1947 } 1948 for (unsigned j = 0, je = InitElts; j != je; ++j) 1949 Args.push_back(getMaskElt(SVI, j, Offset)); 1950 Args.resize(ResElts, -1); 1951 1952 if (VIsUndefShuffle) 1953 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 1954 1955 Init = SVOp; 1956 } 1957 } 1958 1959 // Extend init to result vector length, and then shuffle its contribution 1960 // to the vector initializer into V. 1961 if (Args.empty()) { 1962 for (unsigned j = 0; j != InitElts; ++j) 1963 Args.push_back(j); 1964 Args.resize(ResElts, -1); 1965 Init = Builder.CreateShuffleVector(Init, Args, "vext"); 1966 1967 Args.clear(); 1968 for (unsigned j = 0; j != CurIdx; ++j) 1969 Args.push_back(j); 1970 for (unsigned j = 0; j != InitElts; ++j) 1971 Args.push_back(j + Offset); 1972 Args.resize(ResElts, -1); 1973 } 1974 1975 // If V is undef, make sure it ends up on the RHS of the shuffle to aid 1976 // merging subsequent shuffles into this one. 1977 if (CurIdx == 0) 1978 std::swap(V, Init); 1979 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit"); 1980 VIsUndefShuffle = isa<llvm::UndefValue>(Init); 1981 CurIdx += InitElts; 1982 } 1983 1984 // FIXME: evaluate codegen vs. shuffling against constant null vector. 1985 // Emit remaining default initializers. 1986 llvm::Type *EltTy = VType->getElementType(); 1987 1988 // Emit remaining default initializers 1989 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) { 1990 Value *Idx = Builder.getInt32(CurIdx); 1991 llvm::Value *Init = llvm::Constant::getNullValue(EltTy); 1992 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit"); 1993 } 1994 return V; 1995 } 1996 1997 bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) { 1998 const Expr *E = CE->getSubExpr(); 1999 2000 if (CE->getCastKind() == CK_UncheckedDerivedToBase) 2001 return false; 2002 2003 if (isa<CXXThisExpr>(E->IgnoreParens())) { 2004 // We always assume that 'this' is never null. 2005 return false; 2006 } 2007 2008 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) { 2009 // And that glvalue casts are never null. 2010 if (ICE->isGLValue()) 2011 return false; 2012 } 2013 2014 return true; 2015 } 2016 2017 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts 2018 // have to handle a more broad range of conversions than explicit casts, as they 2019 // handle things like function to ptr-to-function decay etc. 2020 Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { 2021 Expr *E = CE->getSubExpr(); 2022 QualType DestTy = CE->getType(); 2023 CastKind Kind = CE->getCastKind(); 2024 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE); 2025 2026 // These cases are generally not written to ignore the result of 2027 // evaluating their sub-expressions, so we clear this now. 2028 bool Ignored = TestAndClearIgnoreResultAssign(); 2029 2030 // Since almost all cast kinds apply to scalars, this switch doesn't have 2031 // a default case, so the compiler will warn on a missing case. The cases 2032 // are in the same order as in the CastKind enum. 2033 switch (Kind) { 2034 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!"); 2035 case CK_BuiltinFnToFnPtr: 2036 llvm_unreachable("builtin functions are handled elsewhere"); 2037 2038 case CK_LValueBitCast: 2039 case CK_ObjCObjectLValueCast: { 2040 Address Addr = EmitLValue(E).getAddress(CGF); 2041 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy)); 2042 LValue LV = CGF.MakeAddrLValue(Addr, DestTy); 2043 return EmitLoadOfLValue(LV, CE->getExprLoc()); 2044 } 2045 2046 case CK_LValueToRValueBitCast: { 2047 LValue SourceLVal = CGF.EmitLValue(E); 2048 Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF), 2049 CGF.ConvertTypeForMem(DestTy)); 2050 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy); 2051 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); 2052 return EmitLoadOfLValue(DestLV, CE->getExprLoc()); 2053 } 2054 2055 case CK_CPointerToObjCPointerCast: 2056 case CK_BlockPointerToObjCPointerCast: 2057 case CK_AnyPointerToBlockPointerCast: 2058 case CK_BitCast: { 2059 Value *Src = Visit(const_cast<Expr*>(E)); 2060 llvm::Type *SrcTy = Src->getType(); 2061 llvm::Type *DstTy = ConvertType(DestTy); 2062 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() && 2063 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) { 2064 llvm_unreachable("wrong cast for pointers in different address spaces" 2065 "(must be an address space cast)!"); 2066 } 2067 2068 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { 2069 if (auto *PT = DestTy->getAs<PointerType>()) { 2070 CGF.EmitVTablePtrCheckForCast( 2071 PT->getPointeeType(), 2072 Address(Src, 2073 CGF.ConvertTypeForMem( 2074 E->getType()->castAs<PointerType>()->getPointeeType()), 2075 CGF.getPointerAlign()), 2076 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast, 2077 CE->getBeginLoc()); 2078 } 2079 } 2080 2081 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2082 const QualType SrcType = E->getType(); 2083 2084 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) { 2085 // Casting to pointer that could carry dynamic information (provided by 2086 // invariant.group) requires launder. 2087 Src = Builder.CreateLaunderInvariantGroup(Src); 2088 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) { 2089 // Casting to pointer that does not carry dynamic information (provided 2090 // by invariant.group) requires stripping it. Note that we don't do it 2091 // if the source could not be dynamic type and destination could be 2092 // dynamic because dynamic information is already laundered. It is 2093 // because launder(strip(src)) == launder(src), so there is no need to 2094 // add extra strip before launder. 2095 Src = Builder.CreateStripInvariantGroup(Src); 2096 } 2097 } 2098 2099 // Update heapallocsite metadata when there is an explicit pointer cast. 2100 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) { 2101 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) { 2102 QualType PointeeType = DestTy->getPointeeType(); 2103 if (!PointeeType.isNull()) 2104 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType, 2105 CE->getExprLoc()); 2106 } 2107 } 2108 2109 // If Src is a fixed vector and Dst is a scalable vector, and both have the 2110 // same element type, use the llvm.vector.insert intrinsic to perform the 2111 // bitcast. 2112 if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) { 2113 if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) { 2114 // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate 2115 // vector, use a vector insert and bitcast the result. 2116 bool NeedsBitCast = false; 2117 auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); 2118 llvm::Type *OrigType = DstTy; 2119 if (ScalableDst == PredType && 2120 FixedSrc->getElementType() == Builder.getInt8Ty()) { 2121 DstTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2); 2122 ScalableDst = cast<llvm::ScalableVectorType>(DstTy); 2123 NeedsBitCast = true; 2124 } 2125 if (FixedSrc->getElementType() == ScalableDst->getElementType()) { 2126 llvm::Value *UndefVec = llvm::UndefValue::get(DstTy); 2127 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); 2128 llvm::Value *Result = Builder.CreateInsertVector( 2129 DstTy, UndefVec, Src, Zero, "castScalableSve"); 2130 if (NeedsBitCast) 2131 Result = Builder.CreateBitCast(Result, OrigType); 2132 return Result; 2133 } 2134 } 2135 } 2136 2137 // If Src is a scalable vector and Dst is a fixed vector, and both have the 2138 // same element type, use the llvm.vector.extract intrinsic to perform the 2139 // bitcast. 2140 if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) { 2141 if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) { 2142 // If we are casting a scalable 16 x i1 predicate vector to a fixed i8 2143 // vector, bitcast the source and use a vector extract. 2144 auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); 2145 if (ScalableSrc == PredType && 2146 FixedDst->getElementType() == Builder.getInt8Ty()) { 2147 SrcTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2); 2148 ScalableSrc = cast<llvm::ScalableVectorType>(SrcTy); 2149 Src = Builder.CreateBitCast(Src, SrcTy); 2150 } 2151 if (ScalableSrc->getElementType() == FixedDst->getElementType()) { 2152 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); 2153 return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve"); 2154 } 2155 } 2156 } 2157 2158 // Perform VLAT <-> VLST bitcast through memory. 2159 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics 2160 // require the element types of the vectors to be the same, we 2161 // need to keep this around for bitcasts between VLAT <-> VLST where 2162 // the element types of the vectors are not the same, until we figure 2163 // out a better way of doing these casts. 2164 if ((isa<llvm::FixedVectorType>(SrcTy) && 2165 isa<llvm::ScalableVectorType>(DstTy)) || 2166 (isa<llvm::ScalableVectorType>(SrcTy) && 2167 isa<llvm::FixedVectorType>(DstTy))) { 2168 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value"); 2169 LValue LV = CGF.MakeAddrLValue(Addr, E->getType()); 2170 CGF.EmitStoreOfScalar(Src, LV); 2171 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy), 2172 "castFixedSve"); 2173 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy); 2174 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); 2175 return EmitLoadOfLValue(DestLV, CE->getExprLoc()); 2176 } 2177 return Builder.CreateBitCast(Src, DstTy); 2178 } 2179 case CK_AddressSpaceConversion: { 2180 Expr::EvalResult Result; 2181 if (E->EvaluateAsRValue(Result, CGF.getContext()) && 2182 Result.Val.isNullPointer()) { 2183 // If E has side effect, it is emitted even if its final result is a 2184 // null pointer. In that case, a DCE pass should be able to 2185 // eliminate the useless instructions emitted during translating E. 2186 if (Result.HasSideEffects) 2187 Visit(E); 2188 return CGF.CGM.getNullPointer(cast<llvm::PointerType>( 2189 ConvertType(DestTy)), DestTy); 2190 } 2191 // Since target may map different address spaces in AST to the same address 2192 // space, an address space conversion may end up as a bitcast. 2193 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast( 2194 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(), 2195 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy)); 2196 } 2197 case CK_AtomicToNonAtomic: 2198 case CK_NonAtomicToAtomic: 2199 case CK_UserDefinedConversion: 2200 return Visit(const_cast<Expr*>(E)); 2201 2202 case CK_NoOp: { 2203 llvm::Value *V = Visit(const_cast<Expr *>(E)); 2204 if (V) { 2205 // CK_NoOp can model a pointer qualification conversion, which can remove 2206 // an array bound and change the IR type. 2207 // FIXME: Once pointee types are removed from IR, remove this. 2208 llvm::Type *T = ConvertType(DestTy); 2209 if (T != V->getType()) 2210 V = Builder.CreateBitCast(V, T); 2211 } 2212 return V; 2213 } 2214 2215 case CK_BaseToDerived: { 2216 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl(); 2217 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!"); 2218 2219 Address Base = CGF.EmitPointerWithAlignment(E); 2220 Address Derived = 2221 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl, 2222 CE->path_begin(), CE->path_end(), 2223 CGF.ShouldNullCheckClassCastValue(CE)); 2224 2225 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is 2226 // performed and the object is not of the derived type. 2227 if (CGF.sanitizePerformTypeCheck()) 2228 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(), 2229 Derived.getPointer(), DestTy->getPointeeType()); 2230 2231 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast)) 2232 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived, 2233 /*MayBeNull=*/true, 2234 CodeGenFunction::CFITCK_DerivedCast, 2235 CE->getBeginLoc()); 2236 2237 return Derived.getPointer(); 2238 } 2239 case CK_UncheckedDerivedToBase: 2240 case CK_DerivedToBase: { 2241 // The EmitPointerWithAlignment path does this fine; just discard 2242 // the alignment. 2243 return CGF.EmitPointerWithAlignment(CE).getPointer(); 2244 } 2245 2246 case CK_Dynamic: { 2247 Address V = CGF.EmitPointerWithAlignment(E); 2248 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE); 2249 return CGF.EmitDynamicCast(V, DCE); 2250 } 2251 2252 case CK_ArrayToPointerDecay: 2253 return CGF.EmitArrayToPointerDecay(E).getPointer(); 2254 case CK_FunctionToPointerDecay: 2255 return EmitLValue(E).getPointer(CGF); 2256 2257 case CK_NullToPointer: 2258 if (MustVisitNullValue(E)) 2259 CGF.EmitIgnoredExpr(E); 2260 2261 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)), 2262 DestTy); 2263 2264 case CK_NullToMemberPointer: { 2265 if (MustVisitNullValue(E)) 2266 CGF.EmitIgnoredExpr(E); 2267 2268 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>(); 2269 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT); 2270 } 2271 2272 case CK_ReinterpretMemberPointer: 2273 case CK_BaseToDerivedMemberPointer: 2274 case CK_DerivedToBaseMemberPointer: { 2275 Value *Src = Visit(E); 2276 2277 // Note that the AST doesn't distinguish between checked and 2278 // unchecked member pointer conversions, so we always have to 2279 // implement checked conversions here. This is inefficient when 2280 // actual control flow may be required in order to perform the 2281 // check, which it is for data member pointers (but not member 2282 // function pointers on Itanium and ARM). 2283 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src); 2284 } 2285 2286 case CK_ARCProduceObject: 2287 return CGF.EmitARCRetainScalarExpr(E); 2288 case CK_ARCConsumeObject: 2289 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E)); 2290 case CK_ARCReclaimReturnedObject: 2291 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored); 2292 case CK_ARCExtendBlockObject: 2293 return CGF.EmitARCExtendBlockObject(E); 2294 2295 case CK_CopyAndAutoreleaseBlockObject: 2296 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType()); 2297 2298 case CK_FloatingRealToComplex: 2299 case CK_FloatingComplexCast: 2300 case CK_IntegralRealToComplex: 2301 case CK_IntegralComplexCast: 2302 case CK_IntegralComplexToFloatingComplex: 2303 case CK_FloatingComplexToIntegralComplex: 2304 case CK_ConstructorConversion: 2305 case CK_ToUnion: 2306 llvm_unreachable("scalar cast to non-scalar value"); 2307 2308 case CK_LValueToRValue: 2309 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)); 2310 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); 2311 return Visit(const_cast<Expr*>(E)); 2312 2313 case CK_IntegralToPointer: { 2314 Value *Src = Visit(const_cast<Expr*>(E)); 2315 2316 // First, convert to the correct width so that we control the kind of 2317 // extension. 2318 auto DestLLVMTy = ConvertType(DestTy); 2319 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy); 2320 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType(); 2321 llvm::Value* IntResult = 2322 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 2323 2324 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy); 2325 2326 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2327 // Going from integer to pointer that could be dynamic requires reloading 2328 // dynamic information from invariant.group. 2329 if (DestTy.mayBeDynamicClass()) 2330 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr); 2331 } 2332 return IntToPtr; 2333 } 2334 case CK_PointerToIntegral: { 2335 assert(!DestTy->isBooleanType() && "bool should use PointerToBool"); 2336 auto *PtrExpr = Visit(E); 2337 2338 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2339 const QualType SrcType = E->getType(); 2340 2341 // Casting to integer requires stripping dynamic information as it does 2342 // not carries it. 2343 if (SrcType.mayBeDynamicClass()) 2344 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr); 2345 } 2346 2347 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy)); 2348 } 2349 case CK_ToVoid: { 2350 CGF.EmitIgnoredExpr(E); 2351 return nullptr; 2352 } 2353 case CK_MatrixCast: { 2354 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2355 CE->getExprLoc()); 2356 } 2357 case CK_VectorSplat: { 2358 llvm::Type *DstTy = ConvertType(DestTy); 2359 Value *Elt = Visit(const_cast<Expr *>(E)); 2360 // Splat the element across to all elements 2361 llvm::ElementCount NumElements = 2362 cast<llvm::VectorType>(DstTy)->getElementCount(); 2363 return Builder.CreateVectorSplat(NumElements, Elt, "splat"); 2364 } 2365 2366 case CK_FixedPointCast: 2367 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2368 CE->getExprLoc()); 2369 2370 case CK_FixedPointToBoolean: 2371 assert(E->getType()->isFixedPointType() && 2372 "Expected src type to be fixed point type"); 2373 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type"); 2374 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2375 CE->getExprLoc()); 2376 2377 case CK_FixedPointToIntegral: 2378 assert(E->getType()->isFixedPointType() && 2379 "Expected src type to be fixed point type"); 2380 assert(DestTy->isIntegerType() && "Expected dest type to be an integer"); 2381 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2382 CE->getExprLoc()); 2383 2384 case CK_IntegralToFixedPoint: 2385 assert(E->getType()->isIntegerType() && 2386 "Expected src type to be an integer"); 2387 assert(DestTy->isFixedPointType() && 2388 "Expected dest type to be fixed point type"); 2389 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2390 CE->getExprLoc()); 2391 2392 case CK_IntegralCast: { 2393 ScalarConversionOpts Opts; 2394 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) { 2395 if (!ICE->isPartOfExplicitCast()) 2396 Opts = ScalarConversionOpts(CGF.SanOpts); 2397 } 2398 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2399 CE->getExprLoc(), Opts); 2400 } 2401 case CK_IntegralToFloating: 2402 case CK_FloatingToIntegral: 2403 case CK_FloatingCast: 2404 case CK_FixedPointToFloating: 2405 case CK_FloatingToFixedPoint: { 2406 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE); 2407 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2408 CE->getExprLoc()); 2409 } 2410 case CK_BooleanToSignedIntegral: { 2411 ScalarConversionOpts Opts; 2412 Opts.TreatBooleanAsSigned = true; 2413 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2414 CE->getExprLoc(), Opts); 2415 } 2416 case CK_IntegralToBoolean: 2417 return EmitIntToBoolConversion(Visit(E)); 2418 case CK_PointerToBoolean: 2419 return EmitPointerToBoolConversion(Visit(E), E->getType()); 2420 case CK_FloatingToBoolean: { 2421 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE); 2422 return EmitFloatToBoolConversion(Visit(E)); 2423 } 2424 case CK_MemberPointerToBoolean: { 2425 llvm::Value *MemPtr = Visit(E); 2426 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>(); 2427 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT); 2428 } 2429 2430 case CK_FloatingComplexToReal: 2431 case CK_IntegralComplexToReal: 2432 return CGF.EmitComplexExpr(E, false, true).first; 2433 2434 case CK_FloatingComplexToBoolean: 2435 case CK_IntegralComplexToBoolean: { 2436 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E); 2437 2438 // TODO: kill this function off, inline appropriate case here 2439 return EmitComplexToScalarConversion(V, E->getType(), DestTy, 2440 CE->getExprLoc()); 2441 } 2442 2443 case CK_ZeroToOCLOpaqueType: { 2444 assert((DestTy->isEventT() || DestTy->isQueueT() || 2445 DestTy->isOCLIntelSubgroupAVCType()) && 2446 "CK_ZeroToOCLEvent cast on non-event type"); 2447 return llvm::Constant::getNullValue(ConvertType(DestTy)); 2448 } 2449 2450 case CK_IntToOCLSampler: 2451 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF); 2452 2453 } // end of switch 2454 2455 llvm_unreachable("unknown scalar cast"); 2456 } 2457 2458 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) { 2459 CodeGenFunction::StmtExprEvaluation eval(CGF); 2460 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(), 2461 !E->getType()->isVoidType()); 2462 if (!RetAlloca.isValid()) 2463 return nullptr; 2464 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()), 2465 E->getExprLoc()); 2466 } 2467 2468 Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { 2469 CodeGenFunction::RunCleanupsScope Scope(CGF); 2470 Value *V = Visit(E->getSubExpr()); 2471 // Defend against dominance problems caused by jumps out of expression 2472 // evaluation through the shared cleanup block. 2473 Scope.ForceCleanup({&V}); 2474 return V; 2475 } 2476 2477 //===----------------------------------------------------------------------===// 2478 // Unary Operators 2479 //===----------------------------------------------------------------------===// 2480 2481 static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, 2482 llvm::Value *InVal, bool IsInc, 2483 FPOptions FPFeatures) { 2484 BinOpInfo BinOp; 2485 BinOp.LHS = InVal; 2486 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false); 2487 BinOp.Ty = E->getType(); 2488 BinOp.Opcode = IsInc ? BO_Add : BO_Sub; 2489 BinOp.FPFeatures = FPFeatures; 2490 BinOp.E = E; 2491 return BinOp; 2492 } 2493 2494 llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior( 2495 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) { 2496 llvm::Value *Amount = 2497 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true); 2498 StringRef Name = IsInc ? "inc" : "dec"; 2499 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 2500 case LangOptions::SOB_Defined: 2501 return Builder.CreateAdd(InVal, Amount, Name); 2502 case LangOptions::SOB_Undefined: 2503 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 2504 return Builder.CreateNSWAdd(InVal, Amount, Name); 2505 [[fallthrough]]; 2506 case LangOptions::SOB_Trapping: 2507 if (!E->canOverflow()) 2508 return Builder.CreateNSWAdd(InVal, Amount, Name); 2509 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec( 2510 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts()))); 2511 } 2512 llvm_unreachable("Unknown SignedOverflowBehaviorTy"); 2513 } 2514 2515 namespace { 2516 /// Handles check and update for lastprivate conditional variables. 2517 class OMPLastprivateConditionalUpdateRAII { 2518 private: 2519 CodeGenFunction &CGF; 2520 const UnaryOperator *E; 2521 2522 public: 2523 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF, 2524 const UnaryOperator *E) 2525 : CGF(CGF), E(E) {} 2526 ~OMPLastprivateConditionalUpdateRAII() { 2527 if (CGF.getLangOpts().OpenMP) 2528 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional( 2529 CGF, E->getSubExpr()); 2530 } 2531 }; 2532 } // namespace 2533 2534 llvm::Value * 2535 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 2536 bool isInc, bool isPre) { 2537 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E); 2538 QualType type = E->getSubExpr()->getType(); 2539 llvm::PHINode *atomicPHI = nullptr; 2540 llvm::Value *value; 2541 llvm::Value *input; 2542 2543 int amount = (isInc ? 1 : -1); 2544 bool isSubtraction = !isInc; 2545 2546 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) { 2547 type = atomicTy->getValueType(); 2548 if (isInc && type->isBooleanType()) { 2549 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type); 2550 if (isPre) { 2551 Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified()) 2552 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent); 2553 return Builder.getTrue(); 2554 } 2555 // For atomic bool increment, we just store true and return it for 2556 // preincrement, do an atomic swap with true for postincrement 2557 return Builder.CreateAtomicRMW( 2558 llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True, 2559 llvm::AtomicOrdering::SequentiallyConsistent); 2560 } 2561 // Special case for atomic increment / decrement on integers, emit 2562 // atomicrmw instructions. We skip this if we want to be doing overflow 2563 // checking, and fall into the slow path with the atomic cmpxchg loop. 2564 if (!type->isBooleanType() && type->isIntegerType() && 2565 !(type->isUnsignedIntegerType() && 2566 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) && 2567 CGF.getLangOpts().getSignedOverflowBehavior() != 2568 LangOptions::SOB_Trapping) { 2569 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add : 2570 llvm::AtomicRMWInst::Sub; 2571 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add : 2572 llvm::Instruction::Sub; 2573 llvm::Value *amt = CGF.EmitToMemory( 2574 llvm::ConstantInt::get(ConvertType(type), 1, true), type); 2575 llvm::Value *old = 2576 Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt, 2577 llvm::AtomicOrdering::SequentiallyConsistent); 2578 return isPre ? Builder.CreateBinOp(op, old, amt) : old; 2579 } 2580 value = EmitLoadOfLValue(LV, E->getExprLoc()); 2581 input = value; 2582 // For every other atomic operation, we need to emit a load-op-cmpxchg loop 2583 llvm::BasicBlock *startBB = Builder.GetInsertBlock(); 2584 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); 2585 value = CGF.EmitToMemory(value, type); 2586 Builder.CreateBr(opBB); 2587 Builder.SetInsertPoint(opBB); 2588 atomicPHI = Builder.CreatePHI(value->getType(), 2); 2589 atomicPHI->addIncoming(value, startBB); 2590 value = atomicPHI; 2591 } else { 2592 value = EmitLoadOfLValue(LV, E->getExprLoc()); 2593 input = value; 2594 } 2595 2596 // Special case of integer increment that we have to check first: bool++. 2597 // Due to promotion rules, we get: 2598 // bool++ -> bool = bool + 1 2599 // -> bool = (int)bool + 1 2600 // -> bool = ((int)bool + 1 != 0) 2601 // An interesting aspect of this is that increment is always true. 2602 // Decrement does not have this property. 2603 if (isInc && type->isBooleanType()) { 2604 value = Builder.getTrue(); 2605 2606 // Most common case by far: integer increment. 2607 } else if (type->isIntegerType()) { 2608 QualType promotedType; 2609 bool canPerformLossyDemotionCheck = false; 2610 if (CGF.getContext().isPromotableIntegerType(type)) { 2611 promotedType = CGF.getContext().getPromotedIntegerType(type); 2612 assert(promotedType != type && "Shouldn't promote to the same type."); 2613 canPerformLossyDemotionCheck = true; 2614 canPerformLossyDemotionCheck &= 2615 CGF.getContext().getCanonicalType(type) != 2616 CGF.getContext().getCanonicalType(promotedType); 2617 canPerformLossyDemotionCheck &= 2618 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( 2619 type, promotedType); 2620 assert((!canPerformLossyDemotionCheck || 2621 type->isSignedIntegerOrEnumerationType() || 2622 promotedType->isSignedIntegerOrEnumerationType() || 2623 ConvertType(type)->getScalarSizeInBits() == 2624 ConvertType(promotedType)->getScalarSizeInBits()) && 2625 "The following check expects that if we do promotion to different " 2626 "underlying canonical type, at least one of the types (either " 2627 "base or promoted) will be signed, or the bitwidths will match."); 2628 } 2629 if (CGF.SanOpts.hasOneOf( 2630 SanitizerKind::ImplicitIntegerArithmeticValueChange) && 2631 canPerformLossyDemotionCheck) { 2632 // While `x += 1` (for `x` with width less than int) is modeled as 2633 // promotion+arithmetics+demotion, and we can catch lossy demotion with 2634 // ease; inc/dec with width less than int can't overflow because of 2635 // promotion rules, so we omit promotion+demotion, which means that we can 2636 // not catch lossy "demotion". Because we still want to catch these cases 2637 // when the sanitizer is enabled, we perform the promotion, then perform 2638 // the increment/decrement in the wider type, and finally 2639 // perform the demotion. This will catch lossy demotions. 2640 2641 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc()); 2642 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true); 2643 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 2644 // Do pass non-default ScalarConversionOpts so that sanitizer check is 2645 // emitted. 2646 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(), 2647 ScalarConversionOpts(CGF.SanOpts)); 2648 2649 // Note that signed integer inc/dec with width less than int can't 2650 // overflow because of promotion rules; we're just eliding a few steps 2651 // here. 2652 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) { 2653 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc); 2654 } else if (E->canOverflow() && type->isUnsignedIntegerType() && 2655 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) { 2656 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec( 2657 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts()))); 2658 } else { 2659 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true); 2660 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 2661 } 2662 2663 // Next most common: pointer increment. 2664 } else if (const PointerType *ptr = type->getAs<PointerType>()) { 2665 QualType type = ptr->getPointeeType(); 2666 2667 // VLA types don't have constant size. 2668 if (const VariableArrayType *vla 2669 = CGF.getContext().getAsVariableArrayType(type)) { 2670 llvm::Value *numElts = CGF.getVLASize(vla).NumElts; 2671 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize"); 2672 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType()); 2673 if (CGF.getLangOpts().isSignedOverflowDefined()) 2674 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc"); 2675 else 2676 value = CGF.EmitCheckedInBoundsGEP( 2677 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction, 2678 E->getExprLoc(), "vla.inc"); 2679 2680 // Arithmetic on function pointers (!) is just +-1. 2681 } else if (type->isFunctionType()) { 2682 llvm::Value *amt = Builder.getInt32(amount); 2683 2684 value = CGF.EmitCastToVoidPtr(value); 2685 if (CGF.getLangOpts().isSignedOverflowDefined()) 2686 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr"); 2687 else 2688 value = CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt, 2689 /*SignedIndices=*/false, 2690 isSubtraction, E->getExprLoc(), 2691 "incdec.funcptr"); 2692 value = Builder.CreateBitCast(value, input->getType()); 2693 2694 // For everything else, we can just do a simple increment. 2695 } else { 2696 llvm::Value *amt = Builder.getInt32(amount); 2697 llvm::Type *elemTy = CGF.ConvertTypeForMem(type); 2698 if (CGF.getLangOpts().isSignedOverflowDefined()) 2699 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr"); 2700 else 2701 value = CGF.EmitCheckedInBoundsGEP( 2702 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction, 2703 E->getExprLoc(), "incdec.ptr"); 2704 } 2705 2706 // Vector increment/decrement. 2707 } else if (type->isVectorType()) { 2708 if (type->hasIntegerRepresentation()) { 2709 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount); 2710 2711 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 2712 } else { 2713 value = Builder.CreateFAdd( 2714 value, 2715 llvm::ConstantFP::get(value->getType(), amount), 2716 isInc ? "inc" : "dec"); 2717 } 2718 2719 // Floating point. 2720 } else if (type->isRealFloatingType()) { 2721 // Add the inc/dec to the real part. 2722 llvm::Value *amt; 2723 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); 2724 2725 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 2726 // Another special case: half FP increment should be done via float 2727 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 2728 value = Builder.CreateCall( 2729 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, 2730 CGF.CGM.FloatTy), 2731 input, "incdec.conv"); 2732 } else { 2733 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv"); 2734 } 2735 } 2736 2737 if (value->getType()->isFloatTy()) 2738 amt = llvm::ConstantFP::get(VMContext, 2739 llvm::APFloat(static_cast<float>(amount))); 2740 else if (value->getType()->isDoubleTy()) 2741 amt = llvm::ConstantFP::get(VMContext, 2742 llvm::APFloat(static_cast<double>(amount))); 2743 else { 2744 // Remaining types are Half, LongDouble, __ibm128 or __float128. Convert 2745 // from float. 2746 llvm::APFloat F(static_cast<float>(amount)); 2747 bool ignored; 2748 const llvm::fltSemantics *FS; 2749 // Don't use getFloatTypeSemantics because Half isn't 2750 // necessarily represented using the "half" LLVM type. 2751 if (value->getType()->isFP128Ty()) 2752 FS = &CGF.getTarget().getFloat128Format(); 2753 else if (value->getType()->isHalfTy()) 2754 FS = &CGF.getTarget().getHalfFormat(); 2755 else if (value->getType()->isPPC_FP128Ty()) 2756 FS = &CGF.getTarget().getIbm128Format(); 2757 else 2758 FS = &CGF.getTarget().getLongDoubleFormat(); 2759 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored); 2760 amt = llvm::ConstantFP::get(VMContext, F); 2761 } 2762 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec"); 2763 2764 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 2765 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 2766 value = Builder.CreateCall( 2767 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, 2768 CGF.CGM.FloatTy), 2769 value, "incdec.conv"); 2770 } else { 2771 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv"); 2772 } 2773 } 2774 2775 // Fixed-point types. 2776 } else if (type->isFixedPointType()) { 2777 // Fixed-point types are tricky. In some cases, it isn't possible to 2778 // represent a 1 or a -1 in the type at all. Piggyback off of 2779 // EmitFixedPointBinOp to avoid having to reimplement saturation. 2780 BinOpInfo Info; 2781 Info.E = E; 2782 Info.Ty = E->getType(); 2783 Info.Opcode = isInc ? BO_Add : BO_Sub; 2784 Info.LHS = value; 2785 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false); 2786 // If the type is signed, it's better to represent this as +(-1) or -(-1), 2787 // since -1 is guaranteed to be representable. 2788 if (type->isSignedFixedPointType()) { 2789 Info.Opcode = isInc ? BO_Sub : BO_Add; 2790 Info.RHS = Builder.CreateNeg(Info.RHS); 2791 } 2792 // Now, convert from our invented integer literal to the type of the unary 2793 // op. This will upscale and saturate if necessary. This value can become 2794 // undef in some cases. 2795 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 2796 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty); 2797 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema); 2798 value = EmitFixedPointBinOp(Info); 2799 2800 // Objective-C pointer types. 2801 } else { 2802 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>(); 2803 value = CGF.EmitCastToVoidPtr(value); 2804 2805 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType()); 2806 if (!isInc) size = -size; 2807 llvm::Value *sizeValue = 2808 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity()); 2809 2810 if (CGF.getLangOpts().isSignedOverflowDefined()) 2811 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr"); 2812 else 2813 value = CGF.EmitCheckedInBoundsGEP( 2814 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction, 2815 E->getExprLoc(), "incdec.objptr"); 2816 value = Builder.CreateBitCast(value, input->getType()); 2817 } 2818 2819 if (atomicPHI) { 2820 llvm::BasicBlock *curBlock = Builder.GetInsertBlock(); 2821 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); 2822 auto Pair = CGF.EmitAtomicCompareExchange( 2823 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc()); 2824 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type); 2825 llvm::Value *success = Pair.second; 2826 atomicPHI->addIncoming(old, curBlock); 2827 Builder.CreateCondBr(success, contBB, atomicPHI->getParent()); 2828 Builder.SetInsertPoint(contBB); 2829 return isPre ? value : input; 2830 } 2831 2832 // Store the updated result through the lvalue. 2833 if (LV.isBitField()) 2834 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value); 2835 else 2836 CGF.EmitStoreThroughLValue(RValue::get(value), LV); 2837 2838 // If this is a postinc, return the value read from memory, otherwise use the 2839 // updated value. 2840 return isPre ? value : input; 2841 } 2842 2843 2844 Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E, 2845 QualType PromotionType) { 2846 QualType promotionTy = PromotionType.isNull() 2847 ? getPromotionType(E->getSubExpr()->getType()) 2848 : PromotionType; 2849 Value *result = VisitPlus(E, promotionTy); 2850 if (result && !promotionTy.isNull()) 2851 result = EmitUnPromotedValue(result, E->getType()); 2852 return result; 2853 } 2854 2855 Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E, 2856 QualType PromotionType) { 2857 // This differs from gcc, though, most likely due to a bug in gcc. 2858 TestAndClearIgnoreResultAssign(); 2859 if (!PromotionType.isNull()) 2860 return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType); 2861 return Visit(E->getSubExpr()); 2862 } 2863 2864 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E, 2865 QualType PromotionType) { 2866 QualType promotionTy = PromotionType.isNull() 2867 ? getPromotionType(E->getSubExpr()->getType()) 2868 : PromotionType; 2869 Value *result = VisitMinus(E, promotionTy); 2870 if (result && !promotionTy.isNull()) 2871 result = EmitUnPromotedValue(result, E->getType()); 2872 return result; 2873 } 2874 2875 Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E, 2876 QualType PromotionType) { 2877 TestAndClearIgnoreResultAssign(); 2878 Value *Op; 2879 if (!PromotionType.isNull()) 2880 Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType); 2881 else 2882 Op = Visit(E->getSubExpr()); 2883 2884 // Generate a unary FNeg for FP ops. 2885 if (Op->getType()->isFPOrFPVectorTy()) 2886 return Builder.CreateFNeg(Op, "fneg"); 2887 2888 // Emit unary minus with EmitSub so we handle overflow cases etc. 2889 BinOpInfo BinOp; 2890 BinOp.RHS = Op; 2891 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType()); 2892 BinOp.Ty = E->getType(); 2893 BinOp.Opcode = BO_Sub; 2894 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 2895 BinOp.E = E; 2896 return EmitSub(BinOp); 2897 } 2898 2899 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) { 2900 TestAndClearIgnoreResultAssign(); 2901 Value *Op = Visit(E->getSubExpr()); 2902 return Builder.CreateNot(Op, "not"); 2903 } 2904 2905 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { 2906 // Perform vector logical not on comparison with zero vector. 2907 if (E->getType()->isVectorType() && 2908 E->getType()->castAs<VectorType>()->getVectorKind() == 2909 VectorType::GenericVector) { 2910 Value *Oper = Visit(E->getSubExpr()); 2911 Value *Zero = llvm::Constant::getNullValue(Oper->getType()); 2912 Value *Result; 2913 if (Oper->getType()->isFPOrFPVectorTy()) { 2914 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 2915 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 2916 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp"); 2917 } else 2918 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp"); 2919 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 2920 } 2921 2922 // Compare operand to zero. 2923 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr()); 2924 2925 // Invert value. 2926 // TODO: Could dynamically modify easy computations here. For example, if 2927 // the operand is an icmp ne, turn into icmp eq. 2928 BoolVal = Builder.CreateNot(BoolVal, "lnot"); 2929 2930 // ZExt result to the expr type. 2931 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext"); 2932 } 2933 2934 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) { 2935 // Try folding the offsetof to a constant. 2936 Expr::EvalResult EVResult; 2937 if (E->EvaluateAsInt(EVResult, CGF.getContext())) { 2938 llvm::APSInt Value = EVResult.Val.getInt(); 2939 return Builder.getInt(Value); 2940 } 2941 2942 // Loop over the components of the offsetof to compute the value. 2943 unsigned n = E->getNumComponents(); 2944 llvm::Type* ResultType = ConvertType(E->getType()); 2945 llvm::Value* Result = llvm::Constant::getNullValue(ResultType); 2946 QualType CurrentType = E->getTypeSourceInfo()->getType(); 2947 for (unsigned i = 0; i != n; ++i) { 2948 OffsetOfNode ON = E->getComponent(i); 2949 llvm::Value *Offset = nullptr; 2950 switch (ON.getKind()) { 2951 case OffsetOfNode::Array: { 2952 // Compute the index 2953 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex()); 2954 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr); 2955 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType(); 2956 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv"); 2957 2958 // Save the element type 2959 CurrentType = 2960 CGF.getContext().getAsArrayType(CurrentType)->getElementType(); 2961 2962 // Compute the element size 2963 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType, 2964 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity()); 2965 2966 // Multiply out to compute the result 2967 Offset = Builder.CreateMul(Idx, ElemSize); 2968 break; 2969 } 2970 2971 case OffsetOfNode::Field: { 2972 FieldDecl *MemberDecl = ON.getField(); 2973 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl(); 2974 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); 2975 2976 // Compute the index of the field in its parent. 2977 unsigned i = 0; 2978 // FIXME: It would be nice if we didn't have to loop here! 2979 for (RecordDecl::field_iterator Field = RD->field_begin(), 2980 FieldEnd = RD->field_end(); 2981 Field != FieldEnd; ++Field, ++i) { 2982 if (*Field == MemberDecl) 2983 break; 2984 } 2985 assert(i < RL.getFieldCount() && "offsetof field in wrong type"); 2986 2987 // Compute the offset to the field 2988 int64_t OffsetInt = RL.getFieldOffset(i) / 2989 CGF.getContext().getCharWidth(); 2990 Offset = llvm::ConstantInt::get(ResultType, OffsetInt); 2991 2992 // Save the element type. 2993 CurrentType = MemberDecl->getType(); 2994 break; 2995 } 2996 2997 case OffsetOfNode::Identifier: 2998 llvm_unreachable("dependent __builtin_offsetof"); 2999 3000 case OffsetOfNode::Base: { 3001 if (ON.getBase()->isVirtual()) { 3002 CGF.ErrorUnsupported(E, "virtual base in offsetof"); 3003 continue; 3004 } 3005 3006 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl(); 3007 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); 3008 3009 // Save the element type. 3010 CurrentType = ON.getBase()->getType(); 3011 3012 // Compute the offset to the base. 3013 auto *BaseRT = CurrentType->castAs<RecordType>(); 3014 auto *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl()); 3015 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD); 3016 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity()); 3017 break; 3018 } 3019 } 3020 Result = Builder.CreateAdd(Result, Offset); 3021 } 3022 return Result; 3023 } 3024 3025 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of 3026 /// argument of the sizeof expression as an integer. 3027 Value * 3028 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( 3029 const UnaryExprOrTypeTraitExpr *E) { 3030 QualType TypeToSize = E->getTypeOfArgument(); 3031 if (E->getKind() == UETT_SizeOf) { 3032 if (const VariableArrayType *VAT = 3033 CGF.getContext().getAsVariableArrayType(TypeToSize)) { 3034 if (E->isArgumentType()) { 3035 // sizeof(type) - make sure to emit the VLA size. 3036 CGF.EmitVariablyModifiedType(TypeToSize); 3037 } else { 3038 // C99 6.5.3.4p2: If the argument is an expression of type 3039 // VLA, it is evaluated. 3040 CGF.EmitIgnoredExpr(E->getArgumentExpr()); 3041 } 3042 3043 auto VlaSize = CGF.getVLASize(VAT); 3044 llvm::Value *size = VlaSize.NumElts; 3045 3046 // Scale the number of non-VLA elements by the non-VLA element size. 3047 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type); 3048 if (!eltSize.isOne()) 3049 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size); 3050 3051 return size; 3052 } 3053 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) { 3054 auto Alignment = 3055 CGF.getContext() 3056 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 3057 E->getTypeOfArgument()->getPointeeType())) 3058 .getQuantity(); 3059 return llvm::ConstantInt::get(CGF.SizeTy, Alignment); 3060 } 3061 3062 // If this isn't sizeof(vla), the result must be constant; use the constant 3063 // folding logic so we don't have to duplicate it here. 3064 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext())); 3065 } 3066 3067 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E, 3068 QualType PromotionType) { 3069 QualType promotionTy = PromotionType.isNull() 3070 ? getPromotionType(E->getSubExpr()->getType()) 3071 : PromotionType; 3072 Value *result = VisitReal(E, promotionTy); 3073 if (result && !promotionTy.isNull()) 3074 result = EmitUnPromotedValue(result, E->getType()); 3075 return result; 3076 } 3077 3078 Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E, 3079 QualType PromotionType) { 3080 Expr *Op = E->getSubExpr(); 3081 if (Op->getType()->isAnyComplexType()) { 3082 // If it's an l-value, load through the appropriate subobject l-value. 3083 // Note that we have to ask E because Op might be an l-value that 3084 // this won't work for, e.g. an Obj-C property. 3085 if (E->isGLValue()) { 3086 if (!PromotionType.isNull()) { 3087 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr( 3088 Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true); 3089 if (result.first) 3090 result.first = CGF.EmitPromotedValue(result, PromotionType).first; 3091 return result.first; 3092 } else { 3093 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc()) 3094 .getScalarVal(); 3095 } 3096 } 3097 // Otherwise, calculate and project. 3098 return CGF.EmitComplexExpr(Op, false, true).first; 3099 } 3100 3101 if (!PromotionType.isNull()) 3102 return CGF.EmitPromotedScalarExpr(Op, PromotionType); 3103 return Visit(Op); 3104 } 3105 3106 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E, 3107 QualType PromotionType) { 3108 QualType promotionTy = PromotionType.isNull() 3109 ? getPromotionType(E->getSubExpr()->getType()) 3110 : PromotionType; 3111 Value *result = VisitImag(E, promotionTy); 3112 if (result && !promotionTy.isNull()) 3113 result = EmitUnPromotedValue(result, E->getType()); 3114 return result; 3115 } 3116 3117 Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E, 3118 QualType PromotionType) { 3119 Expr *Op = E->getSubExpr(); 3120 if (Op->getType()->isAnyComplexType()) { 3121 // If it's an l-value, load through the appropriate subobject l-value. 3122 // Note that we have to ask E because Op might be an l-value that 3123 // this won't work for, e.g. an Obj-C property. 3124 if (Op->isGLValue()) { 3125 if (!PromotionType.isNull()) { 3126 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr( 3127 Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign); 3128 if (result.second) 3129 result.second = CGF.EmitPromotedValue(result, PromotionType).second; 3130 return result.second; 3131 } else { 3132 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc()) 3133 .getScalarVal(); 3134 } 3135 } 3136 // Otherwise, calculate and project. 3137 return CGF.EmitComplexExpr(Op, true, false).second; 3138 } 3139 3140 // __imag on a scalar returns zero. Emit the subexpr to ensure side 3141 // effects are evaluated, but not the actual value. 3142 if (Op->isGLValue()) 3143 CGF.EmitLValue(Op); 3144 else if (!PromotionType.isNull()) 3145 CGF.EmitPromotedScalarExpr(Op, PromotionType); 3146 else 3147 CGF.EmitScalarExpr(Op, true); 3148 if (!PromotionType.isNull()) 3149 return llvm::Constant::getNullValue(ConvertType(PromotionType)); 3150 return llvm::Constant::getNullValue(ConvertType(E->getType())); 3151 } 3152 3153 //===----------------------------------------------------------------------===// 3154 // Binary Operators 3155 //===----------------------------------------------------------------------===// 3156 3157 Value *ScalarExprEmitter::EmitPromotedValue(Value *result, 3158 QualType PromotionType) { 3159 return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext"); 3160 } 3161 3162 Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result, 3163 QualType ExprType) { 3164 return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion"); 3165 } 3166 3167 Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) { 3168 E = E->IgnoreParens(); 3169 if (auto BO = dyn_cast<BinaryOperator>(E)) { 3170 switch (BO->getOpcode()) { 3171 #define HANDLE_BINOP(OP) \ 3172 case BO_##OP: \ 3173 return Emit##OP(EmitBinOps(BO, PromotionType)); 3174 HANDLE_BINOP(Add) 3175 HANDLE_BINOP(Sub) 3176 HANDLE_BINOP(Mul) 3177 HANDLE_BINOP(Div) 3178 #undef HANDLE_BINOP 3179 default: 3180 break; 3181 } 3182 } else if (auto UO = dyn_cast<UnaryOperator>(E)) { 3183 switch (UO->getOpcode()) { 3184 case UO_Imag: 3185 return VisitImag(UO, PromotionType); 3186 case UO_Real: 3187 return VisitReal(UO, PromotionType); 3188 case UO_Minus: 3189 return VisitMinus(UO, PromotionType); 3190 case UO_Plus: 3191 return VisitPlus(UO, PromotionType); 3192 default: 3193 break; 3194 } 3195 } 3196 auto result = Visit(const_cast<Expr *>(E)); 3197 if (result) { 3198 if (!PromotionType.isNull()) 3199 return EmitPromotedValue(result, PromotionType); 3200 else 3201 return EmitUnPromotedValue(result, E->getType()); 3202 } 3203 return result; 3204 } 3205 3206 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E, 3207 QualType PromotionType) { 3208 TestAndClearIgnoreResultAssign(); 3209 BinOpInfo Result; 3210 Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType); 3211 Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType); 3212 if (!PromotionType.isNull()) 3213 Result.Ty = PromotionType; 3214 else 3215 Result.Ty = E->getType(); 3216 Result.Opcode = E->getOpcode(); 3217 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 3218 Result.E = E; 3219 return Result; 3220 } 3221 3222 LValue ScalarExprEmitter::EmitCompoundAssignLValue( 3223 const CompoundAssignOperator *E, 3224 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &), 3225 Value *&Result) { 3226 QualType LHSTy = E->getLHS()->getType(); 3227 BinOpInfo OpInfo; 3228 3229 if (E->getComputationResultType()->isAnyComplexType()) 3230 return CGF.EmitScalarCompoundAssignWithComplex(E, Result); 3231 3232 // Emit the RHS first. __block variables need to have the rhs evaluated 3233 // first, plus this should improve codegen a little. 3234 3235 QualType PromotionTypeCR; 3236 PromotionTypeCR = getPromotionType(E->getComputationResultType()); 3237 if (PromotionTypeCR.isNull()) 3238 PromotionTypeCR = E->getComputationResultType(); 3239 QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType()); 3240 QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType()); 3241 if (!PromotionTypeRHS.isNull()) 3242 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS); 3243 else 3244 OpInfo.RHS = Visit(E->getRHS()); 3245 OpInfo.Ty = PromotionTypeCR; 3246 OpInfo.Opcode = E->getOpcode(); 3247 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 3248 OpInfo.E = E; 3249 // Load/convert the LHS. 3250 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 3251 3252 llvm::PHINode *atomicPHI = nullptr; 3253 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) { 3254 QualType type = atomicTy->getValueType(); 3255 if (!type->isBooleanType() && type->isIntegerType() && 3256 !(type->isUnsignedIntegerType() && 3257 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) && 3258 CGF.getLangOpts().getSignedOverflowBehavior() != 3259 LangOptions::SOB_Trapping) { 3260 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP; 3261 llvm::Instruction::BinaryOps Op; 3262 switch (OpInfo.Opcode) { 3263 // We don't have atomicrmw operands for *, %, /, <<, >> 3264 case BO_MulAssign: case BO_DivAssign: 3265 case BO_RemAssign: 3266 case BO_ShlAssign: 3267 case BO_ShrAssign: 3268 break; 3269 case BO_AddAssign: 3270 AtomicOp = llvm::AtomicRMWInst::Add; 3271 Op = llvm::Instruction::Add; 3272 break; 3273 case BO_SubAssign: 3274 AtomicOp = llvm::AtomicRMWInst::Sub; 3275 Op = llvm::Instruction::Sub; 3276 break; 3277 case BO_AndAssign: 3278 AtomicOp = llvm::AtomicRMWInst::And; 3279 Op = llvm::Instruction::And; 3280 break; 3281 case BO_XorAssign: 3282 AtomicOp = llvm::AtomicRMWInst::Xor; 3283 Op = llvm::Instruction::Xor; 3284 break; 3285 case BO_OrAssign: 3286 AtomicOp = llvm::AtomicRMWInst::Or; 3287 Op = llvm::Instruction::Or; 3288 break; 3289 default: 3290 llvm_unreachable("Invalid compound assignment type"); 3291 } 3292 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) { 3293 llvm::Value *Amt = CGF.EmitToMemory( 3294 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy, 3295 E->getExprLoc()), 3296 LHSTy); 3297 Value *OldVal = Builder.CreateAtomicRMW( 3298 AtomicOp, LHSLV.getPointer(CGF), Amt, 3299 llvm::AtomicOrdering::SequentiallyConsistent); 3300 3301 // Since operation is atomic, the result type is guaranteed to be the 3302 // same as the input in LLVM terms. 3303 Result = Builder.CreateBinOp(Op, OldVal, Amt); 3304 return LHSLV; 3305 } 3306 } 3307 // FIXME: For floating point types, we should be saving and restoring the 3308 // floating point environment in the loop. 3309 llvm::BasicBlock *startBB = Builder.GetInsertBlock(); 3310 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); 3311 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); 3312 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type); 3313 Builder.CreateBr(opBB); 3314 Builder.SetInsertPoint(opBB); 3315 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2); 3316 atomicPHI->addIncoming(OpInfo.LHS, startBB); 3317 OpInfo.LHS = atomicPHI; 3318 } 3319 else 3320 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); 3321 3322 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures); 3323 SourceLocation Loc = E->getExprLoc(); 3324 if (!PromotionTypeLHS.isNull()) 3325 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS, 3326 E->getExprLoc()); 3327 else 3328 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, 3329 E->getComputationLHSType(), Loc); 3330 3331 // Expand the binary operator. 3332 Result = (this->*Func)(OpInfo); 3333 3334 // Convert the result back to the LHS type, 3335 // potentially with Implicit Conversion sanitizer check. 3336 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc, 3337 ScalarConversionOpts(CGF.SanOpts)); 3338 3339 if (atomicPHI) { 3340 llvm::BasicBlock *curBlock = Builder.GetInsertBlock(); 3341 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); 3342 auto Pair = CGF.EmitAtomicCompareExchange( 3343 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc()); 3344 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy); 3345 llvm::Value *success = Pair.second; 3346 atomicPHI->addIncoming(old, curBlock); 3347 Builder.CreateCondBr(success, contBB, atomicPHI->getParent()); 3348 Builder.SetInsertPoint(contBB); 3349 return LHSLV; 3350 } 3351 3352 // Store the result value into the LHS lvalue. Bit-fields are handled 3353 // specially because the result is altered by the store, i.e., [C99 6.5.16p1] 3354 // 'An assignment expression has the value of the left operand after the 3355 // assignment...'. 3356 if (LHSLV.isBitField()) 3357 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result); 3358 else 3359 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV); 3360 3361 if (CGF.getLangOpts().OpenMP) 3362 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, 3363 E->getLHS()); 3364 return LHSLV; 3365 } 3366 3367 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, 3368 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) { 3369 bool Ignore = TestAndClearIgnoreResultAssign(); 3370 Value *RHS = nullptr; 3371 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS); 3372 3373 // If the result is clearly ignored, return now. 3374 if (Ignore) 3375 return nullptr; 3376 3377 // The result of an assignment in C is the assigned r-value. 3378 if (!CGF.getLangOpts().CPlusPlus) 3379 return RHS; 3380 3381 // If the lvalue is non-volatile, return the computed value of the assignment. 3382 if (!LHS.isVolatileQualified()) 3383 return RHS; 3384 3385 // Otherwise, reload the value. 3386 return EmitLoadOfLValue(LHS, E->getExprLoc()); 3387 } 3388 3389 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck( 3390 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) { 3391 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 3392 3393 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) { 3394 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero), 3395 SanitizerKind::IntegerDivideByZero)); 3396 } 3397 3398 const auto *BO = cast<BinaryOperator>(Ops.E); 3399 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) && 3400 Ops.Ty->hasSignedIntegerRepresentation() && 3401 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) && 3402 Ops.mayHaveIntegerOverflow()) { 3403 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType()); 3404 3405 llvm::Value *IntMin = 3406 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth())); 3407 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty); 3408 3409 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin); 3410 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne); 3411 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or"); 3412 Checks.push_back( 3413 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow)); 3414 } 3415 3416 if (Checks.size() > 0) 3417 EmitBinOpCheck(Checks, Ops); 3418 } 3419 3420 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { 3421 { 3422 CodeGenFunction::SanitizerScope SanScope(&CGF); 3423 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) || 3424 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) && 3425 Ops.Ty->isIntegerType() && 3426 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) { 3427 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3428 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true); 3429 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) && 3430 Ops.Ty->isRealFloatingType() && 3431 Ops.mayHaveFloatDivisionByZero()) { 3432 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3433 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero); 3434 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero), 3435 Ops); 3436 } 3437 } 3438 3439 if (Ops.Ty->isConstantMatrixType()) { 3440 llvm::MatrixBuilder MB(Builder); 3441 // We need to check the types of the operands of the operator to get the 3442 // correct matrix dimensions. 3443 auto *BO = cast<BinaryOperator>(Ops.E); 3444 (void)BO; 3445 assert( 3446 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) && 3447 "first operand must be a matrix"); 3448 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() && 3449 "second operand must be an arithmetic type"); 3450 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 3451 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS, 3452 Ops.Ty->hasUnsignedIntegerRepresentation()); 3453 } 3454 3455 if (Ops.LHS->getType()->isFPOrFPVectorTy()) { 3456 llvm::Value *Val; 3457 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 3458 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div"); 3459 if ((CGF.getLangOpts().OpenCL && 3460 !CGF.CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) || 3461 (CGF.getLangOpts().HIP && CGF.getLangOpts().CUDAIsDevice && 3462 !CGF.CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) { 3463 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp 3464 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt 3465 // build option allows an application to specify that single precision 3466 // floating-point divide (x/y and 1/x) and sqrt used in the program 3467 // source are correctly rounded. 3468 llvm::Type *ValTy = Val->getType(); 3469 if (ValTy->isFloatTy() || 3470 (isa<llvm::VectorType>(ValTy) && 3471 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy())) 3472 CGF.SetFPAccuracy(Val, 2.5); 3473 } 3474 return Val; 3475 } 3476 else if (Ops.isFixedPointOp()) 3477 return EmitFixedPointBinOp(Ops); 3478 else if (Ops.Ty->hasUnsignedIntegerRepresentation()) 3479 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div"); 3480 else 3481 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div"); 3482 } 3483 3484 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) { 3485 // Rem in C can't be a floating point type: C99 6.5.5p2. 3486 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) || 3487 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) && 3488 Ops.Ty->isIntegerType() && 3489 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) { 3490 CodeGenFunction::SanitizerScope SanScope(&CGF); 3491 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3492 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false); 3493 } 3494 3495 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 3496 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem"); 3497 else 3498 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem"); 3499 } 3500 3501 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { 3502 unsigned IID; 3503 unsigned OpID = 0; 3504 SanitizerHandler OverflowKind; 3505 3506 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType(); 3507 switch (Ops.Opcode) { 3508 case BO_Add: 3509 case BO_AddAssign: 3510 OpID = 1; 3511 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow : 3512 llvm::Intrinsic::uadd_with_overflow; 3513 OverflowKind = SanitizerHandler::AddOverflow; 3514 break; 3515 case BO_Sub: 3516 case BO_SubAssign: 3517 OpID = 2; 3518 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow : 3519 llvm::Intrinsic::usub_with_overflow; 3520 OverflowKind = SanitizerHandler::SubOverflow; 3521 break; 3522 case BO_Mul: 3523 case BO_MulAssign: 3524 OpID = 3; 3525 IID = isSigned ? llvm::Intrinsic::smul_with_overflow : 3526 llvm::Intrinsic::umul_with_overflow; 3527 OverflowKind = SanitizerHandler::MulOverflow; 3528 break; 3529 default: 3530 llvm_unreachable("Unsupported operation for overflow detection"); 3531 } 3532 OpID <<= 1; 3533 if (isSigned) 3534 OpID |= 1; 3535 3536 CodeGenFunction::SanitizerScope SanScope(&CGF); 3537 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty); 3538 3539 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy); 3540 3541 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS}); 3542 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0); 3543 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1); 3544 3545 // Handle overflow with llvm.trap if no custom handler has been specified. 3546 const std::string *handlerName = 3547 &CGF.getLangOpts().OverflowHandler; 3548 if (handlerName->empty()) { 3549 // If the signed-integer-overflow sanitizer is enabled, emit a call to its 3550 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap. 3551 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) { 3552 llvm::Value *NotOverflow = Builder.CreateNot(overflow); 3553 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow 3554 : SanitizerKind::UnsignedIntegerOverflow; 3555 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops); 3556 } else 3557 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind); 3558 return result; 3559 } 3560 3561 // Branch in case of overflow. 3562 llvm::BasicBlock *initialBB = Builder.GetInsertBlock(); 3563 llvm::BasicBlock *continueBB = 3564 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode()); 3565 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn); 3566 3567 Builder.CreateCondBr(overflow, overflowBB, continueBB); 3568 3569 // If an overflow handler is set, then we want to call it and then use its 3570 // result, if it returns. 3571 Builder.SetInsertPoint(overflowBB); 3572 3573 // Get the overflow handler. 3574 llvm::Type *Int8Ty = CGF.Int8Ty; 3575 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty }; 3576 llvm::FunctionType *handlerTy = 3577 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true); 3578 llvm::FunctionCallee handler = 3579 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName); 3580 3581 // Sign extend the args to 64-bit, so that we can use the same handler for 3582 // all types of overflow. 3583 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty); 3584 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty); 3585 3586 // Call the handler with the two arguments, the operation, and the size of 3587 // the result. 3588 llvm::Value *handlerArgs[] = { 3589 lhs, 3590 rhs, 3591 Builder.getInt8(OpID), 3592 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth()) 3593 }; 3594 llvm::Value *handlerResult = 3595 CGF.EmitNounwindRuntimeCall(handler, handlerArgs); 3596 3597 // Truncate the result back to the desired size. 3598 handlerResult = Builder.CreateTrunc(handlerResult, opTy); 3599 Builder.CreateBr(continueBB); 3600 3601 Builder.SetInsertPoint(continueBB); 3602 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2); 3603 phi->addIncoming(result, initialBB); 3604 phi->addIncoming(handlerResult, overflowBB); 3605 3606 return phi; 3607 } 3608 3609 /// Emit pointer + index arithmetic. 3610 static Value *emitPointerArithmetic(CodeGenFunction &CGF, 3611 const BinOpInfo &op, 3612 bool isSubtraction) { 3613 // Must have binary (not unary) expr here. Unary pointer 3614 // increment/decrement doesn't use this path. 3615 const BinaryOperator *expr = cast<BinaryOperator>(op.E); 3616 3617 Value *pointer = op.LHS; 3618 Expr *pointerOperand = expr->getLHS(); 3619 Value *index = op.RHS; 3620 Expr *indexOperand = expr->getRHS(); 3621 3622 // In a subtraction, the LHS is always the pointer. 3623 if (!isSubtraction && !pointer->getType()->isPointerTy()) { 3624 std::swap(pointer, index); 3625 std::swap(pointerOperand, indexOperand); 3626 } 3627 3628 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); 3629 3630 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth(); 3631 auto &DL = CGF.CGM.getDataLayout(); 3632 auto PtrTy = cast<llvm::PointerType>(pointer->getType()); 3633 3634 // Some versions of glibc and gcc use idioms (particularly in their malloc 3635 // routines) that add a pointer-sized integer (known to be a pointer value) 3636 // to a null pointer in order to cast the value back to an integer or as 3637 // part of a pointer alignment algorithm. This is undefined behavior, but 3638 // we'd like to be able to compile programs that use it. 3639 // 3640 // Normally, we'd generate a GEP with a null-pointer base here in response 3641 // to that code, but it's also UB to dereference a pointer created that 3642 // way. Instead (as an acknowledged hack to tolerate the idiom) we will 3643 // generate a direct cast of the integer value to a pointer. 3644 // 3645 // The idiom (p = nullptr + N) is not met if any of the following are true: 3646 // 3647 // The operation is subtraction. 3648 // The index is not pointer-sized. 3649 // The pointer type is not byte-sized. 3650 // 3651 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(), 3652 op.Opcode, 3653 expr->getLHS(), 3654 expr->getRHS())) 3655 return CGF.Builder.CreateIntToPtr(index, pointer->getType()); 3656 3657 if (width != DL.getIndexTypeSizeInBits(PtrTy)) { 3658 // Zero-extend or sign-extend the pointer value according to 3659 // whether the index is signed or not. 3660 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned, 3661 "idx.ext"); 3662 } 3663 3664 // If this is subtraction, negate the index. 3665 if (isSubtraction) 3666 index = CGF.Builder.CreateNeg(index, "idx.neg"); 3667 3668 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) 3669 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(), 3670 /*Accessed*/ false); 3671 3672 const PointerType *pointerType 3673 = pointerOperand->getType()->getAs<PointerType>(); 3674 if (!pointerType) { 3675 QualType objectType = pointerOperand->getType() 3676 ->castAs<ObjCObjectPointerType>() 3677 ->getPointeeType(); 3678 llvm::Value *objectSize 3679 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType)); 3680 3681 index = CGF.Builder.CreateMul(index, objectSize); 3682 3683 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy); 3684 result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr"); 3685 return CGF.Builder.CreateBitCast(result, pointer->getType()); 3686 } 3687 3688 QualType elementType = pointerType->getPointeeType(); 3689 if (const VariableArrayType *vla 3690 = CGF.getContext().getAsVariableArrayType(elementType)) { 3691 // The element count here is the total number of non-VLA elements. 3692 llvm::Value *numElements = CGF.getVLASize(vla).NumElts; 3693 3694 // Effectively, the multiply by the VLA size is part of the GEP. 3695 // GEP indexes are signed, and scaling an index isn't permitted to 3696 // signed-overflow, so we use the same semantics for our explicit 3697 // multiply. We suppress this if overflow is not undefined behavior. 3698 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType()); 3699 if (CGF.getLangOpts().isSignedOverflowDefined()) { 3700 index = CGF.Builder.CreateMul(index, numElements, "vla.index"); 3701 pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr"); 3702 } else { 3703 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index"); 3704 pointer = CGF.EmitCheckedInBoundsGEP( 3705 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(), 3706 "add.ptr"); 3707 } 3708 return pointer; 3709 } 3710 3711 // Explicitly handle GNU void* and function pointer arithmetic extensions. The 3712 // GNU void* casts amount to no-ops since our void* type is i8*, but this is 3713 // future proof. 3714 if (elementType->isVoidType() || elementType->isFunctionType()) { 3715 Value *result = CGF.EmitCastToVoidPtr(pointer); 3716 result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr"); 3717 return CGF.Builder.CreateBitCast(result, pointer->getType()); 3718 } 3719 3720 llvm::Type *elemTy = CGF.ConvertTypeForMem(elementType); 3721 if (CGF.getLangOpts().isSignedOverflowDefined()) 3722 return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr"); 3723 3724 return CGF.EmitCheckedInBoundsGEP( 3725 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(), 3726 "add.ptr"); 3727 } 3728 3729 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and 3730 // Addend. Use negMul and negAdd to negate the first operand of the Mul or 3731 // the add operand respectively. This allows fmuladd to represent a*b-c, or 3732 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to 3733 // efficient operations. 3734 static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, 3735 const CodeGenFunction &CGF, CGBuilderTy &Builder, 3736 bool negMul, bool negAdd) { 3737 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set."); 3738 3739 Value *MulOp0 = MulOp->getOperand(0); 3740 Value *MulOp1 = MulOp->getOperand(1); 3741 if (negMul) 3742 MulOp0 = Builder.CreateFNeg(MulOp0, "neg"); 3743 if (negAdd) 3744 Addend = Builder.CreateFNeg(Addend, "neg"); 3745 3746 Value *FMulAdd = nullptr; 3747 if (Builder.getIsFPConstrained()) { 3748 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) && 3749 "Only constrained operation should be created when Builder is in FP " 3750 "constrained mode"); 3751 FMulAdd = Builder.CreateConstrainedFPCall( 3752 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd, 3753 Addend->getType()), 3754 {MulOp0, MulOp1, Addend}); 3755 } else { 3756 FMulAdd = Builder.CreateCall( 3757 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()), 3758 {MulOp0, MulOp1, Addend}); 3759 } 3760 MulOp->eraseFromParent(); 3761 3762 return FMulAdd; 3763 } 3764 3765 // Check whether it would be legal to emit an fmuladd intrinsic call to 3766 // represent op and if so, build the fmuladd. 3767 // 3768 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on. 3769 // Does NOT check the type of the operation - it's assumed that this function 3770 // will be called from contexts where it's known that the type is contractable. 3771 static Value* tryEmitFMulAdd(const BinOpInfo &op, 3772 const CodeGenFunction &CGF, CGBuilderTy &Builder, 3773 bool isSub=false) { 3774 3775 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || 3776 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && 3777 "Only fadd/fsub can be the root of an fmuladd."); 3778 3779 // Check whether this op is marked as fusable. 3780 if (!op.FPFeatures.allowFPContractWithinStatement()) 3781 return nullptr; 3782 3783 // We have a potentially fusable op. Look for a mul on one of the operands. 3784 // Also, make sure that the mul result isn't used directly. In that case, 3785 // there's no point creating a muladd operation. 3786 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) { 3787 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul && 3788 LHSBinOp->use_empty()) 3789 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); 3790 } 3791 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) { 3792 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul && 3793 RHSBinOp->use_empty()) 3794 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); 3795 } 3796 3797 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) { 3798 if (LHSBinOp->getIntrinsicID() == 3799 llvm::Intrinsic::experimental_constrained_fmul && 3800 LHSBinOp->use_empty()) 3801 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); 3802 } 3803 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) { 3804 if (RHSBinOp->getIntrinsicID() == 3805 llvm::Intrinsic::experimental_constrained_fmul && 3806 RHSBinOp->use_empty()) 3807 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); 3808 } 3809 3810 return nullptr; 3811 } 3812 3813 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) { 3814 if (op.LHS->getType()->isPointerTy() || 3815 op.RHS->getType()->isPointerTy()) 3816 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction); 3817 3818 if (op.Ty->isSignedIntegerOrEnumerationType()) { 3819 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 3820 case LangOptions::SOB_Defined: 3821 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 3822 case LangOptions::SOB_Undefined: 3823 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 3824 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 3825 [[fallthrough]]; 3826 case LangOptions::SOB_Trapping: 3827 if (CanElideOverflowCheck(CGF.getContext(), op)) 3828 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 3829 return EmitOverflowCheckedBinOp(op); 3830 } 3831 } 3832 3833 if (op.Ty->isConstantMatrixType()) { 3834 llvm::MatrixBuilder MB(Builder); 3835 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3836 return MB.CreateAdd(op.LHS, op.RHS); 3837 } 3838 3839 if (op.Ty->isUnsignedIntegerType() && 3840 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 3841 !CanElideOverflowCheck(CGF.getContext(), op)) 3842 return EmitOverflowCheckedBinOp(op); 3843 3844 if (op.LHS->getType()->isFPOrFPVectorTy()) { 3845 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3846 // Try to form an fmuladd. 3847 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder)) 3848 return FMulAdd; 3849 3850 return Builder.CreateFAdd(op.LHS, op.RHS, "add"); 3851 } 3852 3853 if (op.isFixedPointOp()) 3854 return EmitFixedPointBinOp(op); 3855 3856 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 3857 } 3858 3859 /// The resulting value must be calculated with exact precision, so the operands 3860 /// may not be the same type. 3861 Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) { 3862 using llvm::APSInt; 3863 using llvm::ConstantInt; 3864 3865 // This is either a binary operation where at least one of the operands is 3866 // a fixed-point type, or a unary operation where the operand is a fixed-point 3867 // type. The result type of a binary operation is determined by 3868 // Sema::handleFixedPointConversions(). 3869 QualType ResultTy = op.Ty; 3870 QualType LHSTy, RHSTy; 3871 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) { 3872 RHSTy = BinOp->getRHS()->getType(); 3873 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) { 3874 // For compound assignment, the effective type of the LHS at this point 3875 // is the computation LHS type, not the actual LHS type, and the final 3876 // result type is not the type of the expression but rather the 3877 // computation result type. 3878 LHSTy = CAO->getComputationLHSType(); 3879 ResultTy = CAO->getComputationResultType(); 3880 } else 3881 LHSTy = BinOp->getLHS()->getType(); 3882 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) { 3883 LHSTy = UnOp->getSubExpr()->getType(); 3884 RHSTy = UnOp->getSubExpr()->getType(); 3885 } 3886 ASTContext &Ctx = CGF.getContext(); 3887 Value *LHS = op.LHS; 3888 Value *RHS = op.RHS; 3889 3890 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy); 3891 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy); 3892 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy); 3893 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema); 3894 3895 // Perform the actual operation. 3896 Value *Result; 3897 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 3898 switch (op.Opcode) { 3899 case BO_AddAssign: 3900 case BO_Add: 3901 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema); 3902 break; 3903 case BO_SubAssign: 3904 case BO_Sub: 3905 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema); 3906 break; 3907 case BO_MulAssign: 3908 case BO_Mul: 3909 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema); 3910 break; 3911 case BO_DivAssign: 3912 case BO_Div: 3913 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema); 3914 break; 3915 case BO_ShlAssign: 3916 case BO_Shl: 3917 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS); 3918 break; 3919 case BO_ShrAssign: 3920 case BO_Shr: 3921 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS); 3922 break; 3923 case BO_LT: 3924 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema); 3925 case BO_GT: 3926 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema); 3927 case BO_LE: 3928 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3929 case BO_GE: 3930 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3931 case BO_EQ: 3932 // For equality operations, we assume any padding bits on unsigned types are 3933 // zero'd out. They could be overwritten through non-saturating operations 3934 // that cause overflow, but this leads to undefined behavior. 3935 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema); 3936 case BO_NE: 3937 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3938 case BO_Cmp: 3939 case BO_LAnd: 3940 case BO_LOr: 3941 llvm_unreachable("Found unimplemented fixed point binary operation"); 3942 case BO_PtrMemD: 3943 case BO_PtrMemI: 3944 case BO_Rem: 3945 case BO_Xor: 3946 case BO_And: 3947 case BO_Or: 3948 case BO_Assign: 3949 case BO_RemAssign: 3950 case BO_AndAssign: 3951 case BO_XorAssign: 3952 case BO_OrAssign: 3953 case BO_Comma: 3954 llvm_unreachable("Found unsupported binary operation for fixed point types."); 3955 } 3956 3957 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) || 3958 BinaryOperator::isShiftAssignOp(op.Opcode); 3959 // Convert to the result type. 3960 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema 3961 : CommonFixedSema, 3962 ResultFixedSema); 3963 } 3964 3965 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) { 3966 // The LHS is always a pointer if either side is. 3967 if (!op.LHS->getType()->isPointerTy()) { 3968 if (op.Ty->isSignedIntegerOrEnumerationType()) { 3969 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 3970 case LangOptions::SOB_Defined: 3971 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 3972 case LangOptions::SOB_Undefined: 3973 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 3974 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 3975 [[fallthrough]]; 3976 case LangOptions::SOB_Trapping: 3977 if (CanElideOverflowCheck(CGF.getContext(), op)) 3978 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 3979 return EmitOverflowCheckedBinOp(op); 3980 } 3981 } 3982 3983 if (op.Ty->isConstantMatrixType()) { 3984 llvm::MatrixBuilder MB(Builder); 3985 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3986 return MB.CreateSub(op.LHS, op.RHS); 3987 } 3988 3989 if (op.Ty->isUnsignedIntegerType() && 3990 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 3991 !CanElideOverflowCheck(CGF.getContext(), op)) 3992 return EmitOverflowCheckedBinOp(op); 3993 3994 if (op.LHS->getType()->isFPOrFPVectorTy()) { 3995 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3996 // Try to form an fmuladd. 3997 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true)) 3998 return FMulAdd; 3999 return Builder.CreateFSub(op.LHS, op.RHS, "sub"); 4000 } 4001 4002 if (op.isFixedPointOp()) 4003 return EmitFixedPointBinOp(op); 4004 4005 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 4006 } 4007 4008 // If the RHS is not a pointer, then we have normal pointer 4009 // arithmetic. 4010 if (!op.RHS->getType()->isPointerTy()) 4011 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction); 4012 4013 // Otherwise, this is a pointer subtraction. 4014 4015 // Do the raw subtraction part. 4016 llvm::Value *LHS 4017 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast"); 4018 llvm::Value *RHS 4019 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast"); 4020 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub"); 4021 4022 // Okay, figure out the element size. 4023 const BinaryOperator *expr = cast<BinaryOperator>(op.E); 4024 QualType elementType = expr->getLHS()->getType()->getPointeeType(); 4025 4026 llvm::Value *divisor = nullptr; 4027 4028 // For a variable-length array, this is going to be non-constant. 4029 if (const VariableArrayType *vla 4030 = CGF.getContext().getAsVariableArrayType(elementType)) { 4031 auto VlaSize = CGF.getVLASize(vla); 4032 elementType = VlaSize.Type; 4033 divisor = VlaSize.NumElts; 4034 4035 // Scale the number of non-VLA elements by the non-VLA element size. 4036 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType); 4037 if (!eltSize.isOne()) 4038 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor); 4039 4040 // For everything elese, we can just compute it, safe in the 4041 // assumption that Sema won't let anything through that we can't 4042 // safely compute the size of. 4043 } else { 4044 CharUnits elementSize; 4045 // Handle GCC extension for pointer arithmetic on void* and 4046 // function pointer types. 4047 if (elementType->isVoidType() || elementType->isFunctionType()) 4048 elementSize = CharUnits::One(); 4049 else 4050 elementSize = CGF.getContext().getTypeSizeInChars(elementType); 4051 4052 // Don't even emit the divide for element size of 1. 4053 if (elementSize.isOne()) 4054 return diffInChars; 4055 4056 divisor = CGF.CGM.getSize(elementSize); 4057 } 4058 4059 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since 4060 // pointer difference in C is only defined in the case where both operands 4061 // are pointing to elements of an array. 4062 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div"); 4063 } 4064 4065 Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) { 4066 llvm::IntegerType *Ty; 4067 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 4068 Ty = cast<llvm::IntegerType>(VT->getElementType()); 4069 else 4070 Ty = cast<llvm::IntegerType>(LHS->getType()); 4071 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1); 4072 } 4073 4074 Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS, 4075 const Twine &Name) { 4076 llvm::IntegerType *Ty; 4077 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 4078 Ty = cast<llvm::IntegerType>(VT->getElementType()); 4079 else 4080 Ty = cast<llvm::IntegerType>(LHS->getType()); 4081 4082 if (llvm::isPowerOf2_64(Ty->getBitWidth())) 4083 return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name); 4084 4085 return Builder.CreateURem( 4086 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name); 4087 } 4088 4089 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { 4090 // TODO: This misses out on the sanitizer check below. 4091 if (Ops.isFixedPointOp()) 4092 return EmitFixedPointBinOp(Ops); 4093 4094 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 4095 // RHS to the same size as the LHS. 4096 Value *RHS = Ops.RHS; 4097 if (Ops.LHS->getType() != RHS->getType()) 4098 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 4099 4100 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) && 4101 Ops.Ty->hasSignedIntegerRepresentation() && 4102 !CGF.getLangOpts().isSignedOverflowDefined() && 4103 !CGF.getLangOpts().CPlusPlus20; 4104 bool SanitizeUnsignedBase = 4105 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) && 4106 Ops.Ty->hasUnsignedIntegerRepresentation(); 4107 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase; 4108 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent); 4109 // OpenCL 6.3j: shift values are effectively % word size of LHS. 4110 if (CGF.getLangOpts().OpenCL) 4111 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask"); 4112 else if ((SanitizeBase || SanitizeExponent) && 4113 isa<llvm::IntegerType>(Ops.LHS->getType())) { 4114 CodeGenFunction::SanitizerScope SanScope(&CGF); 4115 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks; 4116 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS); 4117 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne); 4118 4119 if (SanitizeExponent) { 4120 Checks.push_back( 4121 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent)); 4122 } 4123 4124 if (SanitizeBase) { 4125 // Check whether we are shifting any non-zero bits off the top of the 4126 // integer. We only emit this check if exponent is valid - otherwise 4127 // instructions below will have undefined behavior themselves. 4128 llvm::BasicBlock *Orig = Builder.GetInsertBlock(); 4129 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 4130 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check"); 4131 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont); 4132 llvm::Value *PromotedWidthMinusOne = 4133 (RHS == Ops.RHS) ? WidthMinusOne 4134 : GetWidthMinusOneValue(Ops.LHS, RHS); 4135 CGF.EmitBlock(CheckShiftBase); 4136 llvm::Value *BitsShiftedOff = Builder.CreateLShr( 4137 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros", 4138 /*NUW*/ true, /*NSW*/ true), 4139 "shl.check"); 4140 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) { 4141 // In C99, we are not permitted to shift a 1 bit into the sign bit. 4142 // Under C++11's rules, shifting a 1 bit into the sign bit is 4143 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't 4144 // define signed left shifts, so we use the C99 and C++11 rules there). 4145 // Unsigned shifts can always shift into the top bit. 4146 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1); 4147 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One); 4148 } 4149 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0); 4150 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero); 4151 CGF.EmitBlock(Cont); 4152 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2); 4153 BaseCheck->addIncoming(Builder.getTrue(), Orig); 4154 BaseCheck->addIncoming(ValidBase, CheckShiftBase); 4155 Checks.push_back(std::make_pair( 4156 BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase 4157 : SanitizerKind::UnsignedShiftBase)); 4158 } 4159 4160 assert(!Checks.empty()); 4161 EmitBinOpCheck(Checks, Ops); 4162 } 4163 4164 return Builder.CreateShl(Ops.LHS, RHS, "shl"); 4165 } 4166 4167 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { 4168 // TODO: This misses out on the sanitizer check below. 4169 if (Ops.isFixedPointOp()) 4170 return EmitFixedPointBinOp(Ops); 4171 4172 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 4173 // RHS to the same size as the LHS. 4174 Value *RHS = Ops.RHS; 4175 if (Ops.LHS->getType() != RHS->getType()) 4176 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 4177 4178 // OpenCL 6.3j: shift values are effectively % word size of LHS. 4179 if (CGF.getLangOpts().OpenCL) 4180 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask"); 4181 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) && 4182 isa<llvm::IntegerType>(Ops.LHS->getType())) { 4183 CodeGenFunction::SanitizerScope SanScope(&CGF); 4184 llvm::Value *Valid = 4185 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS)); 4186 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops); 4187 } 4188 4189 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 4190 return Builder.CreateLShr(Ops.LHS, RHS, "shr"); 4191 return Builder.CreateAShr(Ops.LHS, RHS, "shr"); 4192 } 4193 4194 enum IntrinsicType { VCMPEQ, VCMPGT }; 4195 // return corresponding comparison intrinsic for given vector type 4196 static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, 4197 BuiltinType::Kind ElemKind) { 4198 switch (ElemKind) { 4199 default: llvm_unreachable("unexpected element type"); 4200 case BuiltinType::Char_U: 4201 case BuiltinType::UChar: 4202 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 4203 llvm::Intrinsic::ppc_altivec_vcmpgtub_p; 4204 case BuiltinType::Char_S: 4205 case BuiltinType::SChar: 4206 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 4207 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p; 4208 case BuiltinType::UShort: 4209 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 4210 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p; 4211 case BuiltinType::Short: 4212 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 4213 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p; 4214 case BuiltinType::UInt: 4215 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 4216 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p; 4217 case BuiltinType::Int: 4218 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 4219 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p; 4220 case BuiltinType::ULong: 4221 case BuiltinType::ULongLong: 4222 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 4223 llvm::Intrinsic::ppc_altivec_vcmpgtud_p; 4224 case BuiltinType::Long: 4225 case BuiltinType::LongLong: 4226 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 4227 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p; 4228 case BuiltinType::Float: 4229 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p : 4230 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p; 4231 case BuiltinType::Double: 4232 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p : 4233 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p; 4234 case BuiltinType::UInt128: 4235 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p 4236 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p; 4237 case BuiltinType::Int128: 4238 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p 4239 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p; 4240 } 4241 } 4242 4243 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E, 4244 llvm::CmpInst::Predicate UICmpOpc, 4245 llvm::CmpInst::Predicate SICmpOpc, 4246 llvm::CmpInst::Predicate FCmpOpc, 4247 bool IsSignaling) { 4248 TestAndClearIgnoreResultAssign(); 4249 Value *Result; 4250 QualType LHSTy = E->getLHS()->getType(); 4251 QualType RHSTy = E->getRHS()->getType(); 4252 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) { 4253 assert(E->getOpcode() == BO_EQ || 4254 E->getOpcode() == BO_NE); 4255 Value *LHS = CGF.EmitScalarExpr(E->getLHS()); 4256 Value *RHS = CGF.EmitScalarExpr(E->getRHS()); 4257 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison( 4258 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE); 4259 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { 4260 BinOpInfo BOInfo = EmitBinOps(E); 4261 Value *LHS = BOInfo.LHS; 4262 Value *RHS = BOInfo.RHS; 4263 4264 // If AltiVec, the comparison results in a numeric type, so we use 4265 // intrinsics comparing vectors and giving 0 or 1 as a result 4266 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) { 4267 // constants for mapping CR6 register bits to predicate result 4268 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6; 4269 4270 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic; 4271 4272 // in several cases vector arguments order will be reversed 4273 Value *FirstVecArg = LHS, 4274 *SecondVecArg = RHS; 4275 4276 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType(); 4277 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind(); 4278 4279 switch(E->getOpcode()) { 4280 default: llvm_unreachable("is not a comparison operation"); 4281 case BO_EQ: 4282 CR6 = CR6_LT; 4283 ID = GetIntrinsic(VCMPEQ, ElementKind); 4284 break; 4285 case BO_NE: 4286 CR6 = CR6_EQ; 4287 ID = GetIntrinsic(VCMPEQ, ElementKind); 4288 break; 4289 case BO_LT: 4290 CR6 = CR6_LT; 4291 ID = GetIntrinsic(VCMPGT, ElementKind); 4292 std::swap(FirstVecArg, SecondVecArg); 4293 break; 4294 case BO_GT: 4295 CR6 = CR6_LT; 4296 ID = GetIntrinsic(VCMPGT, ElementKind); 4297 break; 4298 case BO_LE: 4299 if (ElementKind == BuiltinType::Float) { 4300 CR6 = CR6_LT; 4301 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4302 std::swap(FirstVecArg, SecondVecArg); 4303 } 4304 else { 4305 CR6 = CR6_EQ; 4306 ID = GetIntrinsic(VCMPGT, ElementKind); 4307 } 4308 break; 4309 case BO_GE: 4310 if (ElementKind == BuiltinType::Float) { 4311 CR6 = CR6_LT; 4312 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4313 } 4314 else { 4315 CR6 = CR6_EQ; 4316 ID = GetIntrinsic(VCMPGT, ElementKind); 4317 std::swap(FirstVecArg, SecondVecArg); 4318 } 4319 break; 4320 } 4321 4322 Value *CR6Param = Builder.getInt32(CR6); 4323 llvm::Function *F = CGF.CGM.getIntrinsic(ID); 4324 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg}); 4325 4326 // The result type of intrinsic may not be same as E->getType(). 4327 // If E->getType() is not BoolTy, EmitScalarConversion will do the 4328 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will 4329 // do nothing, if ResultTy is not i1 at the same time, it will cause 4330 // crash later. 4331 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType()); 4332 if (ResultTy->getBitWidth() > 1 && 4333 E->getType() == CGF.getContext().BoolTy) 4334 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty()); 4335 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4336 E->getExprLoc()); 4337 } 4338 4339 if (BOInfo.isFixedPointOp()) { 4340 Result = EmitFixedPointBinOp(BOInfo); 4341 } else if (LHS->getType()->isFPOrFPVectorTy()) { 4342 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures); 4343 if (!IsSignaling) 4344 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp"); 4345 else 4346 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp"); 4347 } else if (LHSTy->hasSignedIntegerRepresentation()) { 4348 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp"); 4349 } else { 4350 // Unsigned integers and pointers. 4351 4352 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers && 4353 !isa<llvm::ConstantPointerNull>(LHS) && 4354 !isa<llvm::ConstantPointerNull>(RHS)) { 4355 4356 // Dynamic information is required to be stripped for comparisons, 4357 // because it could leak the dynamic information. Based on comparisons 4358 // of pointers to dynamic objects, the optimizer can replace one pointer 4359 // with another, which might be incorrect in presence of invariant 4360 // groups. Comparison with null is safe because null does not carry any 4361 // dynamic information. 4362 if (LHSTy.mayBeDynamicClass()) 4363 LHS = Builder.CreateStripInvariantGroup(LHS); 4364 if (RHSTy.mayBeDynamicClass()) 4365 RHS = Builder.CreateStripInvariantGroup(RHS); 4366 } 4367 4368 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp"); 4369 } 4370 4371 // If this is a vector comparison, sign extend the result to the appropriate 4372 // vector integer type and return it (don't convert to bool). 4373 if (LHSTy->isVectorType()) 4374 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 4375 4376 } else { 4377 // Complex Comparison: can only be an equality comparison. 4378 CodeGenFunction::ComplexPairTy LHS, RHS; 4379 QualType CETy; 4380 if (auto *CTy = LHSTy->getAs<ComplexType>()) { 4381 LHS = CGF.EmitComplexExpr(E->getLHS()); 4382 CETy = CTy->getElementType(); 4383 } else { 4384 LHS.first = Visit(E->getLHS()); 4385 LHS.second = llvm::Constant::getNullValue(LHS.first->getType()); 4386 CETy = LHSTy; 4387 } 4388 if (auto *CTy = RHSTy->getAs<ComplexType>()) { 4389 RHS = CGF.EmitComplexExpr(E->getRHS()); 4390 assert(CGF.getContext().hasSameUnqualifiedType(CETy, 4391 CTy->getElementType()) && 4392 "The element types must always match."); 4393 (void)CTy; 4394 } else { 4395 RHS.first = Visit(E->getRHS()); 4396 RHS.second = llvm::Constant::getNullValue(RHS.first->getType()); 4397 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && 4398 "The element types must always match."); 4399 } 4400 4401 Value *ResultR, *ResultI; 4402 if (CETy->isRealFloatingType()) { 4403 // As complex comparisons can only be equality comparisons, they 4404 // are never signaling comparisons. 4405 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r"); 4406 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i"); 4407 } else { 4408 // Complex comparisons can only be equality comparisons. As such, signed 4409 // and unsigned opcodes are the same. 4410 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r"); 4411 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i"); 4412 } 4413 4414 if (E->getOpcode() == BO_EQ) { 4415 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri"); 4416 } else { 4417 assert(E->getOpcode() == BO_NE && 4418 "Complex comparison other than == or != ?"); 4419 Result = Builder.CreateOr(ResultR, ResultI, "or.ri"); 4420 } 4421 } 4422 4423 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4424 E->getExprLoc()); 4425 } 4426 4427 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { 4428 bool Ignore = TestAndClearIgnoreResultAssign(); 4429 4430 Value *RHS; 4431 LValue LHS; 4432 4433 switch (E->getLHS()->getType().getObjCLifetime()) { 4434 case Qualifiers::OCL_Strong: 4435 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore); 4436 break; 4437 4438 case Qualifiers::OCL_Autoreleasing: 4439 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E); 4440 break; 4441 4442 case Qualifiers::OCL_ExplicitNone: 4443 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore); 4444 break; 4445 4446 case Qualifiers::OCL_Weak: 4447 RHS = Visit(E->getRHS()); 4448 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4449 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore); 4450 break; 4451 4452 case Qualifiers::OCL_None: 4453 // __block variables need to have the rhs evaluated first, plus 4454 // this should improve codegen just a little. 4455 RHS = Visit(E->getRHS()); 4456 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4457 4458 // Store the value into the LHS. Bit-fields are handled specially 4459 // because the result is altered by the store, i.e., [C99 6.5.16p1] 4460 // 'An assignment expression has the value of the left operand after 4461 // the assignment...'. 4462 if (LHS.isBitField()) { 4463 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS); 4464 } else { 4465 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc()); 4466 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS); 4467 } 4468 } 4469 4470 // If the result is clearly ignored, return now. 4471 if (Ignore) 4472 return nullptr; 4473 4474 // The result of an assignment in C is the assigned r-value. 4475 if (!CGF.getLangOpts().CPlusPlus) 4476 return RHS; 4477 4478 // If the lvalue is non-volatile, return the computed value of the assignment. 4479 if (!LHS.isVolatileQualified()) 4480 return RHS; 4481 4482 // Otherwise, reload the value. 4483 return EmitLoadOfLValue(LHS, E->getExprLoc()); 4484 } 4485 4486 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { 4487 // Perform vector logical and on comparisons with zero vectors. 4488 if (E->getType()->isVectorType()) { 4489 CGF.incrementProfileCounter(E); 4490 4491 Value *LHS = Visit(E->getLHS()); 4492 Value *RHS = Visit(E->getRHS()); 4493 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 4494 if (LHS->getType()->isFPOrFPVectorTy()) { 4495 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 4496 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 4497 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 4498 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 4499 } else { 4500 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 4501 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 4502 } 4503 Value *And = Builder.CreateAnd(LHS, RHS); 4504 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext"); 4505 } 4506 4507 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); 4508 llvm::Type *ResTy = ConvertType(E->getType()); 4509 4510 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. 4511 // If we have 1 && X, just emit X without inserting the control flow. 4512 bool LHSCondVal; 4513 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 4514 if (LHSCondVal) { // If we have 1 && X, just emit X. 4515 CGF.incrementProfileCounter(E); 4516 4517 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4518 4519 // If we're generating for profiling or coverage, generate a branch to a 4520 // block that increments the RHS counter needed to track branch condition 4521 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and 4522 // "FalseBlock" after the increment is done. 4523 if (InstrumentRegions && 4524 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4525 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end"); 4526 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt"); 4527 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock); 4528 CGF.EmitBlock(RHSBlockCnt); 4529 CGF.incrementProfileCounter(E->getRHS()); 4530 CGF.EmitBranch(FBlock); 4531 CGF.EmitBlock(FBlock); 4532 } 4533 4534 // ZExt result to int or bool. 4535 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext"); 4536 } 4537 4538 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false. 4539 if (!CGF.ContainsLabel(E->getRHS())) 4540 return llvm::Constant::getNullValue(ResTy); 4541 } 4542 4543 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end"); 4544 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs"); 4545 4546 CodeGenFunction::ConditionalEvaluation eval(CGF); 4547 4548 // Branch on the LHS first. If it is false, go to the failure (cont) block. 4549 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock, 4550 CGF.getProfileCount(E->getRHS())); 4551 4552 // Any edges into the ContBlock are now from an (indeterminate number of) 4553 // edges from this first condition. All of these values will be false. Start 4554 // setting up the PHI node in the Cont Block for this. 4555 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 4556 "", ContBlock); 4557 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 4558 PI != PE; ++PI) 4559 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI); 4560 4561 eval.begin(CGF); 4562 CGF.EmitBlock(RHSBlock); 4563 CGF.incrementProfileCounter(E); 4564 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4565 eval.end(CGF); 4566 4567 // Reaquire the RHS block, as there may be subblocks inserted. 4568 RHSBlock = Builder.GetInsertBlock(); 4569 4570 // If we're generating for profiling or coverage, generate a branch on the 4571 // RHS to a block that increments the RHS true counter needed to track branch 4572 // condition coverage. 4573 if (InstrumentRegions && 4574 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4575 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt"); 4576 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock); 4577 CGF.EmitBlock(RHSBlockCnt); 4578 CGF.incrementProfileCounter(E->getRHS()); 4579 CGF.EmitBranch(ContBlock); 4580 PN->addIncoming(RHSCond, RHSBlockCnt); 4581 } 4582 4583 // Emit an unconditional branch from this block to ContBlock. 4584 { 4585 // There is no need to emit line number for unconditional branch. 4586 auto NL = ApplyDebugLocation::CreateEmpty(CGF); 4587 CGF.EmitBlock(ContBlock); 4588 } 4589 // Insert an entry into the phi node for the edge with the value of RHSCond. 4590 PN->addIncoming(RHSCond, RHSBlock); 4591 4592 // Artificial location to preserve the scope information 4593 { 4594 auto NL = ApplyDebugLocation::CreateArtificial(CGF); 4595 PN->setDebugLoc(Builder.getCurrentDebugLocation()); 4596 } 4597 4598 // ZExt result to int. 4599 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext"); 4600 } 4601 4602 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { 4603 // Perform vector logical or on comparisons with zero vectors. 4604 if (E->getType()->isVectorType()) { 4605 CGF.incrementProfileCounter(E); 4606 4607 Value *LHS = Visit(E->getLHS()); 4608 Value *RHS = Visit(E->getRHS()); 4609 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 4610 if (LHS->getType()->isFPOrFPVectorTy()) { 4611 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 4612 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 4613 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 4614 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 4615 } else { 4616 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 4617 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 4618 } 4619 Value *Or = Builder.CreateOr(LHS, RHS); 4620 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext"); 4621 } 4622 4623 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); 4624 llvm::Type *ResTy = ConvertType(E->getType()); 4625 4626 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. 4627 // If we have 0 || X, just emit X without inserting the control flow. 4628 bool LHSCondVal; 4629 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 4630 if (!LHSCondVal) { // If we have 0 || X, just emit X. 4631 CGF.incrementProfileCounter(E); 4632 4633 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4634 4635 // If we're generating for profiling or coverage, generate a branch to a 4636 // block that increments the RHS counter need to track branch condition 4637 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and 4638 // "FalseBlock" after the increment is done. 4639 if (InstrumentRegions && 4640 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4641 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end"); 4642 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt"); 4643 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt); 4644 CGF.EmitBlock(RHSBlockCnt); 4645 CGF.incrementProfileCounter(E->getRHS()); 4646 CGF.EmitBranch(FBlock); 4647 CGF.EmitBlock(FBlock); 4648 } 4649 4650 // ZExt result to int or bool. 4651 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext"); 4652 } 4653 4654 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. 4655 if (!CGF.ContainsLabel(E->getRHS())) 4656 return llvm::ConstantInt::get(ResTy, 1); 4657 } 4658 4659 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end"); 4660 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs"); 4661 4662 CodeGenFunction::ConditionalEvaluation eval(CGF); 4663 4664 // Branch on the LHS first. If it is true, go to the success (cont) block. 4665 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock, 4666 CGF.getCurrentProfileCount() - 4667 CGF.getProfileCount(E->getRHS())); 4668 4669 // Any edges into the ContBlock are now from an (indeterminate number of) 4670 // edges from this first condition. All of these values will be true. Start 4671 // setting up the PHI node in the Cont Block for this. 4672 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 4673 "", ContBlock); 4674 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 4675 PI != PE; ++PI) 4676 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI); 4677 4678 eval.begin(CGF); 4679 4680 // Emit the RHS condition as a bool value. 4681 CGF.EmitBlock(RHSBlock); 4682 CGF.incrementProfileCounter(E); 4683 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4684 4685 eval.end(CGF); 4686 4687 // Reaquire the RHS block, as there may be subblocks inserted. 4688 RHSBlock = Builder.GetInsertBlock(); 4689 4690 // If we're generating for profiling or coverage, generate a branch on the 4691 // RHS to a block that increments the RHS true counter needed to track branch 4692 // condition coverage. 4693 if (InstrumentRegions && 4694 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4695 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt"); 4696 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt); 4697 CGF.EmitBlock(RHSBlockCnt); 4698 CGF.incrementProfileCounter(E->getRHS()); 4699 CGF.EmitBranch(ContBlock); 4700 PN->addIncoming(RHSCond, RHSBlockCnt); 4701 } 4702 4703 // Emit an unconditional branch from this block to ContBlock. Insert an entry 4704 // into the phi node for the edge with the value of RHSCond. 4705 CGF.EmitBlock(ContBlock); 4706 PN->addIncoming(RHSCond, RHSBlock); 4707 4708 // ZExt result to int. 4709 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext"); 4710 } 4711 4712 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { 4713 CGF.EmitIgnoredExpr(E->getLHS()); 4714 CGF.EnsureInsertPoint(); 4715 return Visit(E->getRHS()); 4716 } 4717 4718 //===----------------------------------------------------------------------===// 4719 // Other Operators 4720 //===----------------------------------------------------------------------===// 4721 4722 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified 4723 /// expression is cheap enough and side-effect-free enough to evaluate 4724 /// unconditionally instead of conditionally. This is used to convert control 4725 /// flow into selects in some cases. 4726 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, 4727 CodeGenFunction &CGF) { 4728 // Anything that is an integer or floating point constant is fine. 4729 return E->IgnoreParens()->isEvaluatable(CGF.getContext()); 4730 4731 // Even non-volatile automatic variables can't be evaluated unconditionally. 4732 // Referencing a thread_local may cause non-trivial initialization work to 4733 // occur. If we're inside a lambda and one of the variables is from the scope 4734 // outside the lambda, that function may have returned already. Reading its 4735 // locals is a bad idea. Also, these reads may introduce races there didn't 4736 // exist in the source-level program. 4737 } 4738 4739 4740 Value *ScalarExprEmitter:: 4741 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { 4742 TestAndClearIgnoreResultAssign(); 4743 4744 // Bind the common expression if necessary. 4745 CodeGenFunction::OpaqueValueMapping binding(CGF, E); 4746 4747 Expr *condExpr = E->getCond(); 4748 Expr *lhsExpr = E->getTrueExpr(); 4749 Expr *rhsExpr = E->getFalseExpr(); 4750 4751 // If the condition constant folds and can be elided, try to avoid emitting 4752 // the condition and the dead arm. 4753 bool CondExprBool; 4754 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 4755 Expr *live = lhsExpr, *dead = rhsExpr; 4756 if (!CondExprBool) std::swap(live, dead); 4757 4758 // If the dead side doesn't have labels we need, just emit the Live part. 4759 if (!CGF.ContainsLabel(dead)) { 4760 if (CondExprBool) 4761 CGF.incrementProfileCounter(E); 4762 Value *Result = Visit(live); 4763 4764 // If the live part is a throw expression, it acts like it has a void 4765 // type, so evaluating it returns a null Value*. However, a conditional 4766 // with non-void type must return a non-null Value*. 4767 if (!Result && !E->getType()->isVoidType()) 4768 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType())); 4769 4770 return Result; 4771 } 4772 } 4773 4774 // OpenCL: If the condition is a vector, we can treat this condition like 4775 // the select function. 4776 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) || 4777 condExpr->getType()->isExtVectorType()) { 4778 CGF.incrementProfileCounter(E); 4779 4780 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 4781 llvm::Value *LHS = Visit(lhsExpr); 4782 llvm::Value *RHS = Visit(rhsExpr); 4783 4784 llvm::Type *condType = ConvertType(condExpr->getType()); 4785 auto *vecTy = cast<llvm::FixedVectorType>(condType); 4786 4787 unsigned numElem = vecTy->getNumElements(); 4788 llvm::Type *elemType = vecTy->getElementType(); 4789 4790 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy); 4791 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec); 4792 llvm::Value *tmp = Builder.CreateSExt( 4793 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext"); 4794 llvm::Value *tmp2 = Builder.CreateNot(tmp); 4795 4796 // Cast float to int to perform ANDs if necessary. 4797 llvm::Value *RHSTmp = RHS; 4798 llvm::Value *LHSTmp = LHS; 4799 bool wasCast = false; 4800 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType()); 4801 if (rhsVTy->getElementType()->isFloatingPointTy()) { 4802 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType()); 4803 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType()); 4804 wasCast = true; 4805 } 4806 4807 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2); 4808 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp); 4809 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond"); 4810 if (wasCast) 4811 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType()); 4812 4813 return tmp5; 4814 } 4815 4816 if (condExpr->getType()->isVectorType() || 4817 condExpr->getType()->isVLSTBuiltinType()) { 4818 CGF.incrementProfileCounter(E); 4819 4820 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 4821 llvm::Value *LHS = Visit(lhsExpr); 4822 llvm::Value *RHS = Visit(rhsExpr); 4823 4824 llvm::Type *CondType = ConvertType(condExpr->getType()); 4825 auto *VecTy = cast<llvm::VectorType>(CondType); 4826 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy); 4827 4828 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond"); 4829 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select"); 4830 } 4831 4832 // If this is a really simple expression (like x ? 4 : 5), emit this as a 4833 // select instead of as control flow. We can only do this if it is cheap and 4834 // safe to evaluate the LHS and RHS unconditionally. 4835 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) && 4836 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { 4837 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr); 4838 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty); 4839 4840 CGF.incrementProfileCounter(E, StepV); 4841 4842 llvm::Value *LHS = Visit(lhsExpr); 4843 llvm::Value *RHS = Visit(rhsExpr); 4844 if (!LHS) { 4845 // If the conditional has void type, make sure we return a null Value*. 4846 assert(!RHS && "LHS and RHS types must match"); 4847 return nullptr; 4848 } 4849 return Builder.CreateSelect(CondV, LHS, RHS, "cond"); 4850 } 4851 4852 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 4853 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 4854 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 4855 4856 CodeGenFunction::ConditionalEvaluation eval(CGF); 4857 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock, 4858 CGF.getProfileCount(lhsExpr)); 4859 4860 CGF.EmitBlock(LHSBlock); 4861 CGF.incrementProfileCounter(E); 4862 eval.begin(CGF); 4863 Value *LHS = Visit(lhsExpr); 4864 eval.end(CGF); 4865 4866 LHSBlock = Builder.GetInsertBlock(); 4867 Builder.CreateBr(ContBlock); 4868 4869 CGF.EmitBlock(RHSBlock); 4870 eval.begin(CGF); 4871 Value *RHS = Visit(rhsExpr); 4872 eval.end(CGF); 4873 4874 RHSBlock = Builder.GetInsertBlock(); 4875 CGF.EmitBlock(ContBlock); 4876 4877 // If the LHS or RHS is a throw expression, it will be legitimately null. 4878 if (!LHS) 4879 return RHS; 4880 if (!RHS) 4881 return LHS; 4882 4883 // Create a PHI node for the real part. 4884 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond"); 4885 PN->addIncoming(LHS, LHSBlock); 4886 PN->addIncoming(RHS, RHSBlock); 4887 return PN; 4888 } 4889 4890 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) { 4891 return Visit(E->getChosenSubExpr()); 4892 } 4893 4894 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 4895 QualType Ty = VE->getType(); 4896 4897 if (Ty->isVariablyModifiedType()) 4898 CGF.EmitVariablyModifiedType(Ty); 4899 4900 Address ArgValue = Address::invalid(); 4901 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue); 4902 4903 llvm::Type *ArgTy = ConvertType(VE->getType()); 4904 4905 // If EmitVAArg fails, emit an error. 4906 if (!ArgPtr.isValid()) { 4907 CGF.ErrorUnsupported(VE, "va_arg expression"); 4908 return llvm::UndefValue::get(ArgTy); 4909 } 4910 4911 // FIXME Volatility. 4912 llvm::Value *Val = Builder.CreateLoad(ArgPtr); 4913 4914 // If EmitVAArg promoted the type, we must truncate it. 4915 if (ArgTy != Val->getType()) { 4916 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy()) 4917 Val = Builder.CreateIntToPtr(Val, ArgTy); 4918 else 4919 Val = Builder.CreateTrunc(Val, ArgTy); 4920 } 4921 4922 return Val; 4923 } 4924 4925 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) { 4926 return CGF.EmitBlockLiteral(block); 4927 } 4928 4929 // Convert a vec3 to vec4, or vice versa. 4930 static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, 4931 Value *Src, unsigned NumElementsDst) { 4932 static constexpr int Mask[] = {0, 1, 2, -1}; 4933 return Builder.CreateShuffleVector(Src, llvm::ArrayRef(Mask, NumElementsDst)); 4934 } 4935 4936 // Create cast instructions for converting LLVM value \p Src to LLVM type \p 4937 // DstTy. \p Src has the same size as \p DstTy. Both are single value types 4938 // but could be scalar or vectors of different lengths, and either can be 4939 // pointer. 4940 // There are 4 cases: 4941 // 1. non-pointer -> non-pointer : needs 1 bitcast 4942 // 2. pointer -> pointer : needs 1 bitcast or addrspacecast 4943 // 3. pointer -> non-pointer 4944 // a) pointer -> intptr_t : needs 1 ptrtoint 4945 // b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast 4946 // 4. non-pointer -> pointer 4947 // a) intptr_t -> pointer : needs 1 inttoptr 4948 // b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr 4949 // Note: for cases 3b and 4b two casts are required since LLVM casts do not 4950 // allow casting directly between pointer types and non-integer non-pointer 4951 // types. 4952 static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder, 4953 const llvm::DataLayout &DL, 4954 Value *Src, llvm::Type *DstTy, 4955 StringRef Name = "") { 4956 auto SrcTy = Src->getType(); 4957 4958 // Case 1. 4959 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy()) 4960 return Builder.CreateBitCast(Src, DstTy, Name); 4961 4962 // Case 2. 4963 if (SrcTy->isPointerTy() && DstTy->isPointerTy()) 4964 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name); 4965 4966 // Case 3. 4967 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) { 4968 // Case 3b. 4969 if (!DstTy->isIntegerTy()) 4970 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy)); 4971 // Cases 3a and 3b. 4972 return Builder.CreateBitOrPointerCast(Src, DstTy, Name); 4973 } 4974 4975 // Case 4b. 4976 if (!SrcTy->isIntegerTy()) 4977 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy)); 4978 // Cases 4a and 4b. 4979 return Builder.CreateIntToPtr(Src, DstTy, Name); 4980 } 4981 4982 Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) { 4983 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); 4984 llvm::Type *DstTy = ConvertType(E->getType()); 4985 4986 llvm::Type *SrcTy = Src->getType(); 4987 unsigned NumElementsSrc = 4988 isa<llvm::VectorType>(SrcTy) 4989 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements() 4990 : 0; 4991 unsigned NumElementsDst = 4992 isa<llvm::VectorType>(DstTy) 4993 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements() 4994 : 0; 4995 4996 // Use bit vector expansion for ext_vector_type boolean vectors. 4997 if (E->getType()->isExtVectorBoolType()) 4998 return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype"); 4999 5000 // Going from vec3 to non-vec3 is a special case and requires a shuffle 5001 // vector to get a vec4, then a bitcast if the target type is different. 5002 if (NumElementsSrc == 3 && NumElementsDst != 3) { 5003 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4); 5004 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 5005 DstTy); 5006 5007 Src->setName("astype"); 5008 return Src; 5009 } 5010 5011 // Going from non-vec3 to vec3 is a special case and requires a bitcast 5012 // to vec4 if the original type is not vec4, then a shuffle vector to 5013 // get a vec3. 5014 if (NumElementsSrc != 3 && NumElementsDst == 3) { 5015 auto *Vec4Ty = llvm::FixedVectorType::get( 5016 cast<llvm::VectorType>(DstTy)->getElementType(), 4); 5017 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 5018 Vec4Ty); 5019 5020 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3); 5021 Src->setName("astype"); 5022 return Src; 5023 } 5024 5025 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), 5026 Src, DstTy, "astype"); 5027 } 5028 5029 Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) { 5030 return CGF.EmitAtomicExpr(E).getScalarVal(); 5031 } 5032 5033 //===----------------------------------------------------------------------===// 5034 // Entry Point into this File 5035 //===----------------------------------------------------------------------===// 5036 5037 /// Emit the computation of the specified expression of scalar type, ignoring 5038 /// the result. 5039 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { 5040 assert(E && hasScalarEvaluationKind(E->getType()) && 5041 "Invalid scalar expression to emit"); 5042 5043 return ScalarExprEmitter(*this, IgnoreResultAssign) 5044 .Visit(const_cast<Expr *>(E)); 5045 } 5046 5047 /// Emit a conversion from the specified type to the specified destination type, 5048 /// both of which are LLVM scalar types. 5049 Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, 5050 QualType DstTy, 5051 SourceLocation Loc) { 5052 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && 5053 "Invalid scalar expression to emit"); 5054 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc); 5055 } 5056 5057 /// Emit a conversion from the specified complex type to the specified 5058 /// destination type, where the destination type is an LLVM scalar type. 5059 Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, 5060 QualType SrcTy, 5061 QualType DstTy, 5062 SourceLocation Loc) { 5063 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && 5064 "Invalid complex -> scalar conversion"); 5065 return ScalarExprEmitter(*this) 5066 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc); 5067 } 5068 5069 5070 Value * 5071 CodeGenFunction::EmitPromotedScalarExpr(const Expr *E, 5072 QualType PromotionType) { 5073 if (!PromotionType.isNull()) 5074 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType); 5075 else 5076 return ScalarExprEmitter(*this).Visit(const_cast<Expr *>(E)); 5077 } 5078 5079 5080 llvm::Value *CodeGenFunction:: 5081 EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 5082 bool isInc, bool isPre) { 5083 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre); 5084 } 5085 5086 LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { 5087 // object->isa or (*object).isa 5088 // Generate code as for: *(Class*)object 5089 5090 Expr *BaseExpr = E->getBase(); 5091 Address Addr = Address::invalid(); 5092 if (BaseExpr->isPRValue()) { 5093 llvm::Type *BaseTy = 5094 ConvertTypeForMem(BaseExpr->getType()->getPointeeType()); 5095 Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign()); 5096 } else { 5097 Addr = EmitLValue(BaseExpr).getAddress(*this); 5098 } 5099 5100 // Cast the address to Class*. 5101 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType())); 5102 return MakeAddrLValue(Addr, E->getType()); 5103 } 5104 5105 5106 LValue CodeGenFunction::EmitCompoundAssignmentLValue( 5107 const CompoundAssignOperator *E) { 5108 ScalarExprEmitter Scalar(*this); 5109 Value *Result = nullptr; 5110 switch (E->getOpcode()) { 5111 #define COMPOUND_OP(Op) \ 5112 case BO_##Op##Assign: \ 5113 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \ 5114 Result) 5115 COMPOUND_OP(Mul); 5116 COMPOUND_OP(Div); 5117 COMPOUND_OP(Rem); 5118 COMPOUND_OP(Add); 5119 COMPOUND_OP(Sub); 5120 COMPOUND_OP(Shl); 5121 COMPOUND_OP(Shr); 5122 COMPOUND_OP(And); 5123 COMPOUND_OP(Xor); 5124 COMPOUND_OP(Or); 5125 #undef COMPOUND_OP 5126 5127 case BO_PtrMemD: 5128 case BO_PtrMemI: 5129 case BO_Mul: 5130 case BO_Div: 5131 case BO_Rem: 5132 case BO_Add: 5133 case BO_Sub: 5134 case BO_Shl: 5135 case BO_Shr: 5136 case BO_LT: 5137 case BO_GT: 5138 case BO_LE: 5139 case BO_GE: 5140 case BO_EQ: 5141 case BO_NE: 5142 case BO_Cmp: 5143 case BO_And: 5144 case BO_Xor: 5145 case BO_Or: 5146 case BO_LAnd: 5147 case BO_LOr: 5148 case BO_Assign: 5149 case BO_Comma: 5150 llvm_unreachable("Not valid compound assignment operators"); 5151 } 5152 5153 llvm_unreachable("Unhandled compound assignment operator"); 5154 } 5155 5156 struct GEPOffsetAndOverflow { 5157 // The total (signed) byte offset for the GEP. 5158 llvm::Value *TotalOffset; 5159 // The offset overflow flag - true if the total offset overflows. 5160 llvm::Value *OffsetOverflows; 5161 }; 5162 5163 /// Evaluate given GEPVal, which is either an inbounds GEP, or a constant, 5164 /// and compute the total offset it applies from it's base pointer BasePtr. 5165 /// Returns offset in bytes and a boolean flag whether an overflow happened 5166 /// during evaluation. 5167 static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, 5168 llvm::LLVMContext &VMContext, 5169 CodeGenModule &CGM, 5170 CGBuilderTy &Builder) { 5171 const auto &DL = CGM.getDataLayout(); 5172 5173 // The total (signed) byte offset for the GEP. 5174 llvm::Value *TotalOffset = nullptr; 5175 5176 // Was the GEP already reduced to a constant? 5177 if (isa<llvm::Constant>(GEPVal)) { 5178 // Compute the offset by casting both pointers to integers and subtracting: 5179 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr) 5180 Value *BasePtr_int = 5181 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType())); 5182 Value *GEPVal_int = 5183 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType())); 5184 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int); 5185 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()}; 5186 } 5187 5188 auto *GEP = cast<llvm::GEPOperator>(GEPVal); 5189 assert(GEP->getPointerOperand() == BasePtr && 5190 "BasePtr must be the base of the GEP."); 5191 assert(GEP->isInBounds() && "Expected inbounds GEP"); 5192 5193 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType()); 5194 5195 // Grab references to the signed add/mul overflow intrinsics for intptr_t. 5196 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 5197 auto *SAddIntrinsic = 5198 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy); 5199 auto *SMulIntrinsic = 5200 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy); 5201 5202 // The offset overflow flag - true if the total offset overflows. 5203 llvm::Value *OffsetOverflows = Builder.getFalse(); 5204 5205 /// Return the result of the given binary operation. 5206 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS, 5207 llvm::Value *RHS) -> llvm::Value * { 5208 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop"); 5209 5210 // If the operands are constants, return a constant result. 5211 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) { 5212 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) { 5213 llvm::APInt N; 5214 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode, 5215 /*Signed=*/true, N); 5216 if (HasOverflow) 5217 OffsetOverflows = Builder.getTrue(); 5218 return llvm::ConstantInt::get(VMContext, N); 5219 } 5220 } 5221 5222 // Otherwise, compute the result with checked arithmetic. 5223 auto *ResultAndOverflow = Builder.CreateCall( 5224 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS}); 5225 OffsetOverflows = Builder.CreateOr( 5226 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows); 5227 return Builder.CreateExtractValue(ResultAndOverflow, 0); 5228 }; 5229 5230 // Determine the total byte offset by looking at each GEP operand. 5231 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP); 5232 GTI != GTE; ++GTI) { 5233 llvm::Value *LocalOffset; 5234 auto *Index = GTI.getOperand(); 5235 // Compute the local offset contributed by this indexing step: 5236 if (auto *STy = GTI.getStructTypeOrNull()) { 5237 // For struct indexing, the local offset is the byte position of the 5238 // specified field. 5239 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue(); 5240 LocalOffset = llvm::ConstantInt::get( 5241 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo)); 5242 } else { 5243 // Otherwise this is array-like indexing. The local offset is the index 5244 // multiplied by the element size. 5245 auto *ElementSize = llvm::ConstantInt::get( 5246 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType())); 5247 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true); 5248 LocalOffset = eval(BO_Mul, ElementSize, IndexS); 5249 } 5250 5251 // If this is the first offset, set it as the total offset. Otherwise, add 5252 // the local offset into the running total. 5253 if (!TotalOffset || TotalOffset == Zero) 5254 TotalOffset = LocalOffset; 5255 else 5256 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset); 5257 } 5258 5259 return {TotalOffset, OffsetOverflows}; 5260 } 5261 5262 Value * 5263 CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr, 5264 ArrayRef<Value *> IdxList, 5265 bool SignedIndices, bool IsSubtraction, 5266 SourceLocation Loc, const Twine &Name) { 5267 llvm::Type *PtrTy = Ptr->getType(); 5268 Value *GEPVal = Builder.CreateInBoundsGEP(ElemTy, Ptr, IdxList, Name); 5269 5270 // If the pointer overflow sanitizer isn't enabled, do nothing. 5271 if (!SanOpts.has(SanitizerKind::PointerOverflow)) 5272 return GEPVal; 5273 5274 // Perform nullptr-and-offset check unless the nullptr is defined. 5275 bool PerformNullCheck = !NullPointerIsDefined( 5276 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace()); 5277 // Check for overflows unless the GEP got constant-folded, 5278 // and only in the default address space 5279 bool PerformOverflowCheck = 5280 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0; 5281 5282 if (!(PerformNullCheck || PerformOverflowCheck)) 5283 return GEPVal; 5284 5285 const auto &DL = CGM.getDataLayout(); 5286 5287 SanitizerScope SanScope(this); 5288 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy); 5289 5290 GEPOffsetAndOverflow EvaluatedGEP = 5291 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder); 5292 5293 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || 5294 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && 5295 "If the offset got constant-folded, we don't expect that there was an " 5296 "overflow."); 5297 5298 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 5299 5300 // Common case: if the total offset is zero, and we are using C++ semantics, 5301 // where nullptr+0 is defined, don't emit a check. 5302 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus) 5303 return GEPVal; 5304 5305 // Now that we've computed the total offset, add it to the base pointer (with 5306 // wrapping semantics). 5307 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy); 5308 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset); 5309 5310 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 5311 5312 if (PerformNullCheck) { 5313 // In C++, if the base pointer evaluates to a null pointer value, 5314 // the only valid pointer this inbounds GEP can produce is also 5315 // a null pointer, so the offset must also evaluate to zero. 5316 // Likewise, if we have non-zero base pointer, we can not get null pointer 5317 // as a result, so the offset can not be -intptr_t(BasePtr). 5318 // In other words, both pointers are either null, or both are non-null, 5319 // or the behaviour is undefined. 5320 // 5321 // C, however, is more strict in this regard, and gives more 5322 // optimization opportunities: in C, additionally, nullptr+0 is undefined. 5323 // So both the input to the 'gep inbounds' AND the output must not be null. 5324 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr); 5325 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP); 5326 auto *Valid = 5327 CGM.getLangOpts().CPlusPlus 5328 ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr) 5329 : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr); 5330 Checks.emplace_back(Valid, SanitizerKind::PointerOverflow); 5331 } 5332 5333 if (PerformOverflowCheck) { 5334 // The GEP is valid if: 5335 // 1) The total offset doesn't overflow, and 5336 // 2) The sign of the difference between the computed address and the base 5337 // pointer matches the sign of the total offset. 5338 llvm::Value *ValidGEP; 5339 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows); 5340 if (SignedIndices) { 5341 // GEP is computed as `unsigned base + signed offset`, therefore: 5342 // * If offset was positive, then the computed pointer can not be 5343 // [unsigned] less than the base pointer, unless it overflowed. 5344 // * If offset was negative, then the computed pointer can not be 5345 // [unsigned] greater than the bas pointere, unless it overflowed. 5346 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 5347 auto *PosOrZeroOffset = 5348 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero); 5349 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr); 5350 ValidGEP = 5351 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid); 5352 } else if (!IsSubtraction) { 5353 // GEP is computed as `unsigned base + unsigned offset`, therefore the 5354 // computed pointer can not be [unsigned] less than base pointer, 5355 // unless there was an overflow. 5356 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`. 5357 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 5358 } else { 5359 // GEP is computed as `unsigned base - unsigned offset`, therefore the 5360 // computed pointer can not be [unsigned] greater than base pointer, 5361 // unless there was an overflow. 5362 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`. 5363 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr); 5364 } 5365 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow); 5366 Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow); 5367 } 5368 5369 assert(!Checks.empty() && "Should have produced some checks."); 5370 5371 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)}; 5372 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments. 5373 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP}; 5374 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs); 5375 5376 return GEPVal; 5377 } 5378