1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the TargetLoweringBase class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/ADT/BitVector.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/SmallVector.h" 16 #include "llvm/ADT/StringExtras.h" 17 #include "llvm/ADT/StringRef.h" 18 #include "llvm/ADT/Twine.h" 19 #include "llvm/Analysis/Loads.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/CodeGen/Analysis.h" 22 #include "llvm/CodeGen/ISDOpcodes.h" 23 #include "llvm/CodeGen/MachineBasicBlock.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstr.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/CodeGen/MachineMemOperand.h" 29 #include "llvm/CodeGen/MachineOperand.h" 30 #include "llvm/CodeGen/MachineRegisterInfo.h" 31 #include "llvm/CodeGen/MachineValueType.h" 32 #include "llvm/CodeGen/RuntimeLibcalls.h" 33 #include "llvm/CodeGen/StackMaps.h" 34 #include "llvm/CodeGen/TargetLowering.h" 35 #include "llvm/CodeGen/TargetOpcodes.h" 36 #include "llvm/CodeGen/TargetRegisterInfo.h" 37 #include "llvm/CodeGen/ValueTypes.h" 38 #include "llvm/IR/Attributes.h" 39 #include "llvm/IR/CallingConv.h" 40 #include "llvm/IR/DataLayout.h" 41 #include "llvm/IR/DerivedTypes.h" 42 #include "llvm/IR/Function.h" 43 #include "llvm/IR/GlobalValue.h" 44 #include "llvm/IR/GlobalVariable.h" 45 #include "llvm/IR/IRBuilder.h" 46 #include "llvm/IR/Module.h" 47 #include "llvm/IR/Type.h" 48 #include "llvm/Support/Casting.h" 49 #include "llvm/Support/CommandLine.h" 50 #include "llvm/Support/Compiler.h" 51 #include "llvm/Support/ErrorHandling.h" 52 #include "llvm/Support/MathExtras.h" 53 #include "llvm/Target/TargetMachine.h" 54 #include "llvm/Target/TargetOptions.h" 55 #include "llvm/TargetParser/Triple.h" 56 #include "llvm/Transforms/Utils/SizeOpts.h" 57 #include <algorithm> 58 #include <cassert> 59 #include <cstdint> 60 #include <cstring> 61 #include <iterator> 62 #include <string> 63 #include <tuple> 64 #include <utility> 65 66 using namespace llvm; 67 68 static cl::opt<bool> JumpIsExpensiveOverride( 69 "jump-is-expensive", cl::init(false), 70 cl::desc("Do not create extra branches to split comparison logic."), 71 cl::Hidden); 72 73 static cl::opt<unsigned> MinimumJumpTableEntries 74 ("min-jump-table-entries", cl::init(4), cl::Hidden, 75 cl::desc("Set minimum number of entries to use a jump table.")); 76 77 static cl::opt<unsigned> MaximumJumpTableSize 78 ("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden, 79 cl::desc("Set maximum size of jump tables.")); 80 81 /// Minimum jump table density for normal functions. 82 static cl::opt<unsigned> 83 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden, 84 cl::desc("Minimum density for building a jump table in " 85 "a normal function")); 86 87 /// Minimum jump table density for -Os or -Oz functions. 88 static cl::opt<unsigned> OptsizeJumpTableDensity( 89 "optsize-jump-table-density", cl::init(40), cl::Hidden, 90 cl::desc("Minimum density for building a jump table in " 91 "an optsize function")); 92 93 // FIXME: This option is only to test if the strict fp operation processed 94 // correctly by preventing mutating strict fp operation to normal fp operation 95 // during development. When the backend supports strict float operation, this 96 // option will be meaningless. 97 static cl::opt<bool> DisableStrictNodeMutation("disable-strictnode-mutation", 98 cl::desc("Don't mutate strict-float node to a legalize node"), 99 cl::init(false), cl::Hidden); 100 101 static bool darwinHasSinCos(const Triple &TT) { 102 assert(TT.isOSDarwin() && "should be called with darwin triple"); 103 // Don't bother with 32 bit x86. 104 if (TT.getArch() == Triple::x86) 105 return false; 106 // Macos < 10.9 has no sincos_stret. 107 if (TT.isMacOSX()) 108 return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit(); 109 // iOS < 7.0 has no sincos_stret. 110 if (TT.isiOS()) 111 return !TT.isOSVersionLT(7, 0); 112 // Any other darwin such as WatchOS/TvOS is new enough. 113 return true; 114 } 115 116 void TargetLoweringBase::InitLibcalls(const Triple &TT) { 117 #define HANDLE_LIBCALL(code, name) \ 118 setLibcallName(RTLIB::code, name); 119 #include "llvm/IR/RuntimeLibcalls.def" 120 #undef HANDLE_LIBCALL 121 // Initialize calling conventions to their default. 122 for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC) 123 setLibcallCallingConv((RTLIB::Libcall)LC, CallingConv::C); 124 125 // For IEEE quad-precision libcall names, PPC uses "kf" instead of "tf". 126 if (TT.isPPC()) { 127 setLibcallName(RTLIB::ADD_F128, "__addkf3"); 128 setLibcallName(RTLIB::SUB_F128, "__subkf3"); 129 setLibcallName(RTLIB::MUL_F128, "__mulkf3"); 130 setLibcallName(RTLIB::DIV_F128, "__divkf3"); 131 setLibcallName(RTLIB::POWI_F128, "__powikf2"); 132 setLibcallName(RTLIB::FPEXT_F32_F128, "__extendsfkf2"); 133 setLibcallName(RTLIB::FPEXT_F64_F128, "__extenddfkf2"); 134 setLibcallName(RTLIB::FPROUND_F128_F32, "__trunckfsf2"); 135 setLibcallName(RTLIB::FPROUND_F128_F64, "__trunckfdf2"); 136 setLibcallName(RTLIB::FPTOSINT_F128_I32, "__fixkfsi"); 137 setLibcallName(RTLIB::FPTOSINT_F128_I64, "__fixkfdi"); 138 setLibcallName(RTLIB::FPTOSINT_F128_I128, "__fixkfti"); 139 setLibcallName(RTLIB::FPTOUINT_F128_I32, "__fixunskfsi"); 140 setLibcallName(RTLIB::FPTOUINT_F128_I64, "__fixunskfdi"); 141 setLibcallName(RTLIB::FPTOUINT_F128_I128, "__fixunskfti"); 142 setLibcallName(RTLIB::SINTTOFP_I32_F128, "__floatsikf"); 143 setLibcallName(RTLIB::SINTTOFP_I64_F128, "__floatdikf"); 144 setLibcallName(RTLIB::SINTTOFP_I128_F128, "__floattikf"); 145 setLibcallName(RTLIB::UINTTOFP_I32_F128, "__floatunsikf"); 146 setLibcallName(RTLIB::UINTTOFP_I64_F128, "__floatundikf"); 147 setLibcallName(RTLIB::UINTTOFP_I128_F128, "__floatuntikf"); 148 setLibcallName(RTLIB::OEQ_F128, "__eqkf2"); 149 setLibcallName(RTLIB::UNE_F128, "__nekf2"); 150 setLibcallName(RTLIB::OGE_F128, "__gekf2"); 151 setLibcallName(RTLIB::OLT_F128, "__ltkf2"); 152 setLibcallName(RTLIB::OLE_F128, "__lekf2"); 153 setLibcallName(RTLIB::OGT_F128, "__gtkf2"); 154 setLibcallName(RTLIB::UO_F128, "__unordkf2"); 155 } 156 157 // A few names are different on particular architectures or environments. 158 if (TT.isOSDarwin()) { 159 // For f16/f32 conversions, Darwin uses the standard naming scheme, instead 160 // of the gnueabi-style __gnu_*_ieee. 161 // FIXME: What about other targets? 162 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2"); 163 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2"); 164 165 // Some darwins have an optimized __bzero/bzero function. 166 switch (TT.getArch()) { 167 case Triple::x86: 168 case Triple::x86_64: 169 if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6)) 170 setLibcallName(RTLIB::BZERO, "__bzero"); 171 break; 172 case Triple::aarch64: 173 case Triple::aarch64_32: 174 setLibcallName(RTLIB::BZERO, "bzero"); 175 break; 176 default: 177 break; 178 } 179 180 if (darwinHasSinCos(TT)) { 181 setLibcallName(RTLIB::SINCOS_STRET_F32, "__sincosf_stret"); 182 setLibcallName(RTLIB::SINCOS_STRET_F64, "__sincos_stret"); 183 if (TT.isWatchABI()) { 184 setLibcallCallingConv(RTLIB::SINCOS_STRET_F32, 185 CallingConv::ARM_AAPCS_VFP); 186 setLibcallCallingConv(RTLIB::SINCOS_STRET_F64, 187 CallingConv::ARM_AAPCS_VFP); 188 } 189 } 190 } else { 191 setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee"); 192 setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee"); 193 } 194 195 if (TT.isGNUEnvironment() || TT.isOSFuchsia() || 196 (TT.isAndroid() && !TT.isAndroidVersionLT(9))) { 197 setLibcallName(RTLIB::SINCOS_F32, "sincosf"); 198 setLibcallName(RTLIB::SINCOS_F64, "sincos"); 199 setLibcallName(RTLIB::SINCOS_F80, "sincosl"); 200 setLibcallName(RTLIB::SINCOS_F128, "sincosl"); 201 setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl"); 202 } 203 204 if (TT.isPS()) { 205 setLibcallName(RTLIB::SINCOS_F32, "sincosf"); 206 setLibcallName(RTLIB::SINCOS_F64, "sincos"); 207 } 208 209 if (TT.isOSOpenBSD()) { 210 setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr); 211 } 212 213 if (TT.isOSWindows() && !TT.isOSCygMing()) { 214 setLibcallName(RTLIB::LDEXP_F32, nullptr); 215 setLibcallName(RTLIB::LDEXP_F80, nullptr); 216 setLibcallName(RTLIB::LDEXP_F128, nullptr); 217 setLibcallName(RTLIB::LDEXP_PPCF128, nullptr); 218 219 setLibcallName(RTLIB::FREXP_F32, nullptr); 220 setLibcallName(RTLIB::FREXP_F80, nullptr); 221 setLibcallName(RTLIB::FREXP_F128, nullptr); 222 setLibcallName(RTLIB::FREXP_PPCF128, nullptr); 223 } 224 } 225 226 /// GetFPLibCall - Helper to return the right libcall for the given floating 227 /// point type, or UNKNOWN_LIBCALL if there is none. 228 RTLIB::Libcall RTLIB::getFPLibCall(EVT VT, 229 RTLIB::Libcall Call_F32, 230 RTLIB::Libcall Call_F64, 231 RTLIB::Libcall Call_F80, 232 RTLIB::Libcall Call_F128, 233 RTLIB::Libcall Call_PPCF128) { 234 return 235 VT == MVT::f32 ? Call_F32 : 236 VT == MVT::f64 ? Call_F64 : 237 VT == MVT::f80 ? Call_F80 : 238 VT == MVT::f128 ? Call_F128 : 239 VT == MVT::ppcf128 ? Call_PPCF128 : 240 RTLIB::UNKNOWN_LIBCALL; 241 } 242 243 /// getFPEXT - Return the FPEXT_*_* value for the given types, or 244 /// UNKNOWN_LIBCALL if there is none. 245 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) { 246 if (OpVT == MVT::f16) { 247 if (RetVT == MVT::f32) 248 return FPEXT_F16_F32; 249 if (RetVT == MVT::f64) 250 return FPEXT_F16_F64; 251 if (RetVT == MVT::f80) 252 return FPEXT_F16_F80; 253 if (RetVT == MVT::f128) 254 return FPEXT_F16_F128; 255 } else if (OpVT == MVT::f32) { 256 if (RetVT == MVT::f64) 257 return FPEXT_F32_F64; 258 if (RetVT == MVT::f128) 259 return FPEXT_F32_F128; 260 if (RetVT == MVT::ppcf128) 261 return FPEXT_F32_PPCF128; 262 } else if (OpVT == MVT::f64) { 263 if (RetVT == MVT::f128) 264 return FPEXT_F64_F128; 265 else if (RetVT == MVT::ppcf128) 266 return FPEXT_F64_PPCF128; 267 } else if (OpVT == MVT::f80) { 268 if (RetVT == MVT::f128) 269 return FPEXT_F80_F128; 270 } 271 272 return UNKNOWN_LIBCALL; 273 } 274 275 /// getFPROUND - Return the FPROUND_*_* value for the given types, or 276 /// UNKNOWN_LIBCALL if there is none. 277 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) { 278 if (RetVT == MVT::f16) { 279 if (OpVT == MVT::f32) 280 return FPROUND_F32_F16; 281 if (OpVT == MVT::f64) 282 return FPROUND_F64_F16; 283 if (OpVT == MVT::f80) 284 return FPROUND_F80_F16; 285 if (OpVT == MVT::f128) 286 return FPROUND_F128_F16; 287 if (OpVT == MVT::ppcf128) 288 return FPROUND_PPCF128_F16; 289 } else if (RetVT == MVT::bf16) { 290 if (OpVT == MVT::f32) 291 return FPROUND_F32_BF16; 292 if (OpVT == MVT::f64) 293 return FPROUND_F64_BF16; 294 } else if (RetVT == MVT::f32) { 295 if (OpVT == MVT::f64) 296 return FPROUND_F64_F32; 297 if (OpVT == MVT::f80) 298 return FPROUND_F80_F32; 299 if (OpVT == MVT::f128) 300 return FPROUND_F128_F32; 301 if (OpVT == MVT::ppcf128) 302 return FPROUND_PPCF128_F32; 303 } else if (RetVT == MVT::f64) { 304 if (OpVT == MVT::f80) 305 return FPROUND_F80_F64; 306 if (OpVT == MVT::f128) 307 return FPROUND_F128_F64; 308 if (OpVT == MVT::ppcf128) 309 return FPROUND_PPCF128_F64; 310 } else if (RetVT == MVT::f80) { 311 if (OpVT == MVT::f128) 312 return FPROUND_F128_F80; 313 } 314 315 return UNKNOWN_LIBCALL; 316 } 317 318 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or 319 /// UNKNOWN_LIBCALL if there is none. 320 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) { 321 if (OpVT == MVT::f16) { 322 if (RetVT == MVT::i32) 323 return FPTOSINT_F16_I32; 324 if (RetVT == MVT::i64) 325 return FPTOSINT_F16_I64; 326 if (RetVT == MVT::i128) 327 return FPTOSINT_F16_I128; 328 } else if (OpVT == MVT::f32) { 329 if (RetVT == MVT::i32) 330 return FPTOSINT_F32_I32; 331 if (RetVT == MVT::i64) 332 return FPTOSINT_F32_I64; 333 if (RetVT == MVT::i128) 334 return FPTOSINT_F32_I128; 335 } else if (OpVT == MVT::f64) { 336 if (RetVT == MVT::i32) 337 return FPTOSINT_F64_I32; 338 if (RetVT == MVT::i64) 339 return FPTOSINT_F64_I64; 340 if (RetVT == MVT::i128) 341 return FPTOSINT_F64_I128; 342 } else if (OpVT == MVT::f80) { 343 if (RetVT == MVT::i32) 344 return FPTOSINT_F80_I32; 345 if (RetVT == MVT::i64) 346 return FPTOSINT_F80_I64; 347 if (RetVT == MVT::i128) 348 return FPTOSINT_F80_I128; 349 } else if (OpVT == MVT::f128) { 350 if (RetVT == MVT::i32) 351 return FPTOSINT_F128_I32; 352 if (RetVT == MVT::i64) 353 return FPTOSINT_F128_I64; 354 if (RetVT == MVT::i128) 355 return FPTOSINT_F128_I128; 356 } else if (OpVT == MVT::ppcf128) { 357 if (RetVT == MVT::i32) 358 return FPTOSINT_PPCF128_I32; 359 if (RetVT == MVT::i64) 360 return FPTOSINT_PPCF128_I64; 361 if (RetVT == MVT::i128) 362 return FPTOSINT_PPCF128_I128; 363 } 364 return UNKNOWN_LIBCALL; 365 } 366 367 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or 368 /// UNKNOWN_LIBCALL if there is none. 369 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) { 370 if (OpVT == MVT::f16) { 371 if (RetVT == MVT::i32) 372 return FPTOUINT_F16_I32; 373 if (RetVT == MVT::i64) 374 return FPTOUINT_F16_I64; 375 if (RetVT == MVT::i128) 376 return FPTOUINT_F16_I128; 377 } else if (OpVT == MVT::f32) { 378 if (RetVT == MVT::i32) 379 return FPTOUINT_F32_I32; 380 if (RetVT == MVT::i64) 381 return FPTOUINT_F32_I64; 382 if (RetVT == MVT::i128) 383 return FPTOUINT_F32_I128; 384 } else if (OpVT == MVT::f64) { 385 if (RetVT == MVT::i32) 386 return FPTOUINT_F64_I32; 387 if (RetVT == MVT::i64) 388 return FPTOUINT_F64_I64; 389 if (RetVT == MVT::i128) 390 return FPTOUINT_F64_I128; 391 } else if (OpVT == MVT::f80) { 392 if (RetVT == MVT::i32) 393 return FPTOUINT_F80_I32; 394 if (RetVT == MVT::i64) 395 return FPTOUINT_F80_I64; 396 if (RetVT == MVT::i128) 397 return FPTOUINT_F80_I128; 398 } else if (OpVT == MVT::f128) { 399 if (RetVT == MVT::i32) 400 return FPTOUINT_F128_I32; 401 if (RetVT == MVT::i64) 402 return FPTOUINT_F128_I64; 403 if (RetVT == MVT::i128) 404 return FPTOUINT_F128_I128; 405 } else if (OpVT == MVT::ppcf128) { 406 if (RetVT == MVT::i32) 407 return FPTOUINT_PPCF128_I32; 408 if (RetVT == MVT::i64) 409 return FPTOUINT_PPCF128_I64; 410 if (RetVT == MVT::i128) 411 return FPTOUINT_PPCF128_I128; 412 } 413 return UNKNOWN_LIBCALL; 414 } 415 416 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or 417 /// UNKNOWN_LIBCALL if there is none. 418 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) { 419 if (OpVT == MVT::i32) { 420 if (RetVT == MVT::f16) 421 return SINTTOFP_I32_F16; 422 if (RetVT == MVT::f32) 423 return SINTTOFP_I32_F32; 424 if (RetVT == MVT::f64) 425 return SINTTOFP_I32_F64; 426 if (RetVT == MVT::f80) 427 return SINTTOFP_I32_F80; 428 if (RetVT == MVT::f128) 429 return SINTTOFP_I32_F128; 430 if (RetVT == MVT::ppcf128) 431 return SINTTOFP_I32_PPCF128; 432 } else if (OpVT == MVT::i64) { 433 if (RetVT == MVT::f16) 434 return SINTTOFP_I64_F16; 435 if (RetVT == MVT::f32) 436 return SINTTOFP_I64_F32; 437 if (RetVT == MVT::f64) 438 return SINTTOFP_I64_F64; 439 if (RetVT == MVT::f80) 440 return SINTTOFP_I64_F80; 441 if (RetVT == MVT::f128) 442 return SINTTOFP_I64_F128; 443 if (RetVT == MVT::ppcf128) 444 return SINTTOFP_I64_PPCF128; 445 } else if (OpVT == MVT::i128) { 446 if (RetVT == MVT::f16) 447 return SINTTOFP_I128_F16; 448 if (RetVT == MVT::f32) 449 return SINTTOFP_I128_F32; 450 if (RetVT == MVT::f64) 451 return SINTTOFP_I128_F64; 452 if (RetVT == MVT::f80) 453 return SINTTOFP_I128_F80; 454 if (RetVT == MVT::f128) 455 return SINTTOFP_I128_F128; 456 if (RetVT == MVT::ppcf128) 457 return SINTTOFP_I128_PPCF128; 458 } 459 return UNKNOWN_LIBCALL; 460 } 461 462 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or 463 /// UNKNOWN_LIBCALL if there is none. 464 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) { 465 if (OpVT == MVT::i32) { 466 if (RetVT == MVT::f16) 467 return UINTTOFP_I32_F16; 468 if (RetVT == MVT::f32) 469 return UINTTOFP_I32_F32; 470 if (RetVT == MVT::f64) 471 return UINTTOFP_I32_F64; 472 if (RetVT == MVT::f80) 473 return UINTTOFP_I32_F80; 474 if (RetVT == MVT::f128) 475 return UINTTOFP_I32_F128; 476 if (RetVT == MVT::ppcf128) 477 return UINTTOFP_I32_PPCF128; 478 } else if (OpVT == MVT::i64) { 479 if (RetVT == MVT::f16) 480 return UINTTOFP_I64_F16; 481 if (RetVT == MVT::f32) 482 return UINTTOFP_I64_F32; 483 if (RetVT == MVT::f64) 484 return UINTTOFP_I64_F64; 485 if (RetVT == MVT::f80) 486 return UINTTOFP_I64_F80; 487 if (RetVT == MVT::f128) 488 return UINTTOFP_I64_F128; 489 if (RetVT == MVT::ppcf128) 490 return UINTTOFP_I64_PPCF128; 491 } else if (OpVT == MVT::i128) { 492 if (RetVT == MVT::f16) 493 return UINTTOFP_I128_F16; 494 if (RetVT == MVT::f32) 495 return UINTTOFP_I128_F32; 496 if (RetVT == MVT::f64) 497 return UINTTOFP_I128_F64; 498 if (RetVT == MVT::f80) 499 return UINTTOFP_I128_F80; 500 if (RetVT == MVT::f128) 501 return UINTTOFP_I128_F128; 502 if (RetVT == MVT::ppcf128) 503 return UINTTOFP_I128_PPCF128; 504 } 505 return UNKNOWN_LIBCALL; 506 } 507 508 RTLIB::Libcall RTLIB::getPOWI(EVT RetVT) { 509 return getFPLibCall(RetVT, POWI_F32, POWI_F64, POWI_F80, POWI_F128, 510 POWI_PPCF128); 511 } 512 513 RTLIB::Libcall RTLIB::getLDEXP(EVT RetVT) { 514 return getFPLibCall(RetVT, LDEXP_F32, LDEXP_F64, LDEXP_F80, LDEXP_F128, 515 LDEXP_PPCF128); 516 } 517 518 RTLIB::Libcall RTLIB::getFREXP(EVT RetVT) { 519 return getFPLibCall(RetVT, FREXP_F32, FREXP_F64, FREXP_F80, FREXP_F128, 520 FREXP_PPCF128); 521 } 522 523 RTLIB::Libcall RTLIB::getOutlineAtomicHelper(const Libcall (&LC)[5][4], 524 AtomicOrdering Order, 525 uint64_t MemSize) { 526 unsigned ModeN, ModelN; 527 switch (MemSize) { 528 case 1: 529 ModeN = 0; 530 break; 531 case 2: 532 ModeN = 1; 533 break; 534 case 4: 535 ModeN = 2; 536 break; 537 case 8: 538 ModeN = 3; 539 break; 540 case 16: 541 ModeN = 4; 542 break; 543 default: 544 return RTLIB::UNKNOWN_LIBCALL; 545 } 546 547 switch (Order) { 548 case AtomicOrdering::Monotonic: 549 ModelN = 0; 550 break; 551 case AtomicOrdering::Acquire: 552 ModelN = 1; 553 break; 554 case AtomicOrdering::Release: 555 ModelN = 2; 556 break; 557 case AtomicOrdering::AcquireRelease: 558 case AtomicOrdering::SequentiallyConsistent: 559 ModelN = 3; 560 break; 561 default: 562 return UNKNOWN_LIBCALL; 563 } 564 565 return LC[ModeN][ModelN]; 566 } 567 568 RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, 569 MVT VT) { 570 if (!VT.isScalarInteger()) 571 return UNKNOWN_LIBCALL; 572 uint64_t MemSize = VT.getScalarSizeInBits() / 8; 573 574 #define LCALLS(A, B) \ 575 { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL } 576 #define LCALL5(A) \ 577 LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16) 578 switch (Opc) { 579 case ISD::ATOMIC_CMP_SWAP: { 580 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_CAS)}; 581 return getOutlineAtomicHelper(LC, Order, MemSize); 582 } 583 case ISD::ATOMIC_SWAP: { 584 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_SWP)}; 585 return getOutlineAtomicHelper(LC, Order, MemSize); 586 } 587 case ISD::ATOMIC_LOAD_ADD: { 588 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDADD)}; 589 return getOutlineAtomicHelper(LC, Order, MemSize); 590 } 591 case ISD::ATOMIC_LOAD_OR: { 592 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDSET)}; 593 return getOutlineAtomicHelper(LC, Order, MemSize); 594 } 595 case ISD::ATOMIC_LOAD_CLR: { 596 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDCLR)}; 597 return getOutlineAtomicHelper(LC, Order, MemSize); 598 } 599 case ISD::ATOMIC_LOAD_XOR: { 600 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDEOR)}; 601 return getOutlineAtomicHelper(LC, Order, MemSize); 602 } 603 default: 604 return UNKNOWN_LIBCALL; 605 } 606 #undef LCALLS 607 #undef LCALL5 608 } 609 610 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) { 611 #define OP_TO_LIBCALL(Name, Enum) \ 612 case Name: \ 613 switch (VT.SimpleTy) { \ 614 default: \ 615 return UNKNOWN_LIBCALL; \ 616 case MVT::i8: \ 617 return Enum##_1; \ 618 case MVT::i16: \ 619 return Enum##_2; \ 620 case MVT::i32: \ 621 return Enum##_4; \ 622 case MVT::i64: \ 623 return Enum##_8; \ 624 case MVT::i128: \ 625 return Enum##_16; \ 626 } 627 628 switch (Opc) { 629 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET) 630 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP) 631 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD) 632 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB) 633 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND) 634 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR) 635 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR) 636 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND) 637 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX) 638 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX) 639 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN) 640 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN) 641 } 642 643 #undef OP_TO_LIBCALL 644 645 return UNKNOWN_LIBCALL; 646 } 647 648 RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 649 switch (ElementSize) { 650 case 1: 651 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1; 652 case 2: 653 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2; 654 case 4: 655 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4; 656 case 8: 657 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8; 658 case 16: 659 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16; 660 default: 661 return UNKNOWN_LIBCALL; 662 } 663 } 664 665 RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 666 switch (ElementSize) { 667 case 1: 668 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1; 669 case 2: 670 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2; 671 case 4: 672 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4; 673 case 8: 674 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8; 675 case 16: 676 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16; 677 default: 678 return UNKNOWN_LIBCALL; 679 } 680 } 681 682 RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 683 switch (ElementSize) { 684 case 1: 685 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1; 686 case 2: 687 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2; 688 case 4: 689 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4; 690 case 8: 691 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8; 692 case 16: 693 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16; 694 default: 695 return UNKNOWN_LIBCALL; 696 } 697 } 698 699 /// InitCmpLibcallCCs - Set default comparison libcall CC. 700 static void InitCmpLibcallCCs(ISD::CondCode *CCs) { 701 std::fill(CCs, CCs + RTLIB::UNKNOWN_LIBCALL, ISD::SETCC_INVALID); 702 CCs[RTLIB::OEQ_F32] = ISD::SETEQ; 703 CCs[RTLIB::OEQ_F64] = ISD::SETEQ; 704 CCs[RTLIB::OEQ_F128] = ISD::SETEQ; 705 CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ; 706 CCs[RTLIB::UNE_F32] = ISD::SETNE; 707 CCs[RTLIB::UNE_F64] = ISD::SETNE; 708 CCs[RTLIB::UNE_F128] = ISD::SETNE; 709 CCs[RTLIB::UNE_PPCF128] = ISD::SETNE; 710 CCs[RTLIB::OGE_F32] = ISD::SETGE; 711 CCs[RTLIB::OGE_F64] = ISD::SETGE; 712 CCs[RTLIB::OGE_F128] = ISD::SETGE; 713 CCs[RTLIB::OGE_PPCF128] = ISD::SETGE; 714 CCs[RTLIB::OLT_F32] = ISD::SETLT; 715 CCs[RTLIB::OLT_F64] = ISD::SETLT; 716 CCs[RTLIB::OLT_F128] = ISD::SETLT; 717 CCs[RTLIB::OLT_PPCF128] = ISD::SETLT; 718 CCs[RTLIB::OLE_F32] = ISD::SETLE; 719 CCs[RTLIB::OLE_F64] = ISD::SETLE; 720 CCs[RTLIB::OLE_F128] = ISD::SETLE; 721 CCs[RTLIB::OLE_PPCF128] = ISD::SETLE; 722 CCs[RTLIB::OGT_F32] = ISD::SETGT; 723 CCs[RTLIB::OGT_F64] = ISD::SETGT; 724 CCs[RTLIB::OGT_F128] = ISD::SETGT; 725 CCs[RTLIB::OGT_PPCF128] = ISD::SETGT; 726 CCs[RTLIB::UO_F32] = ISD::SETNE; 727 CCs[RTLIB::UO_F64] = ISD::SETNE; 728 CCs[RTLIB::UO_F128] = ISD::SETNE; 729 CCs[RTLIB::UO_PPCF128] = ISD::SETNE; 730 } 731 732 /// NOTE: The TargetMachine owns TLOF. 733 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) { 734 initActions(); 735 736 // Perform these initializations only once. 737 MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 738 MaxLoadsPerMemcmp = 8; 739 MaxGluedStoresPerMemcpy = 0; 740 MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize = 741 MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4; 742 HasMultipleConditionRegisters = false; 743 HasExtractBitsInsn = false; 744 JumpIsExpensive = JumpIsExpensiveOverride; 745 PredictableSelectIsExpensive = false; 746 EnableExtLdPromotion = false; 747 StackPointerRegisterToSaveRestore = 0; 748 BooleanContents = UndefinedBooleanContent; 749 BooleanFloatContents = UndefinedBooleanContent; 750 BooleanVectorContents = UndefinedBooleanContent; 751 SchedPreferenceInfo = Sched::ILP; 752 GatherAllAliasesMaxDepth = 18; 753 IsStrictFPEnabled = DisableStrictNodeMutation; 754 MaxBytesForAlignment = 0; 755 // TODO: the default will be switched to 0 in the next commit, along 756 // with the Target-specific changes necessary. 757 MaxAtomicSizeInBitsSupported = 1024; 758 759 // Assume that even with libcalls, no target supports wider than 128 bit 760 // division. 761 MaxDivRemBitWidthSupported = 128; 762 763 MaxLargeFPConvertBitWidthSupported = llvm::IntegerType::MAX_INT_BITS; 764 765 MinCmpXchgSizeInBits = 0; 766 SupportsUnalignedAtomics = false; 767 768 std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr); 769 770 InitLibcalls(TM.getTargetTriple()); 771 InitCmpLibcallCCs(CmpLibcallCCs); 772 } 773 774 void TargetLoweringBase::initActions() { 775 // All operations default to being supported. 776 memset(OpActions, 0, sizeof(OpActions)); 777 memset(LoadExtActions, 0, sizeof(LoadExtActions)); 778 memset(TruncStoreActions, 0, sizeof(TruncStoreActions)); 779 memset(IndexedModeActions, 0, sizeof(IndexedModeActions)); 780 memset(CondCodeActions, 0, sizeof(CondCodeActions)); 781 std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr); 782 std::fill(std::begin(TargetDAGCombineArray), 783 std::end(TargetDAGCombineArray), 0); 784 785 // We're somewhat special casing MVT::i2 and MVT::i4. Ideally we want to 786 // remove this and targets should individually set these types if not legal. 787 for (ISD::NodeType NT : enum_seq(ISD::DELETED_NODE, ISD::BUILTIN_OP_END, 788 force_iteration_on_noniterable_enum)) { 789 for (MVT VT : {MVT::i2, MVT::i4}) 790 OpActions[(unsigned)VT.SimpleTy][NT] = Expand; 791 } 792 for (MVT AVT : MVT::all_valuetypes()) { 793 for (MVT VT : {MVT::i2, MVT::i4, MVT::v128i2, MVT::v64i4}) { 794 setTruncStoreAction(AVT, VT, Expand); 795 setLoadExtAction(ISD::EXTLOAD, AVT, VT, Expand); 796 setLoadExtAction(ISD::ZEXTLOAD, AVT, VT, Expand); 797 } 798 } 799 for (unsigned IM = (unsigned)ISD::PRE_INC; 800 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 801 for (MVT VT : {MVT::i2, MVT::i4}) { 802 setIndexedLoadAction(IM, VT, Expand); 803 setIndexedStoreAction(IM, VT, Expand); 804 setIndexedMaskedLoadAction(IM, VT, Expand); 805 setIndexedMaskedStoreAction(IM, VT, Expand); 806 } 807 } 808 809 for (MVT VT : MVT::fp_valuetypes()) { 810 MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits()); 811 if (IntVT.isValid()) { 812 setOperationAction(ISD::ATOMIC_SWAP, VT, Promote); 813 AddPromotedToType(ISD::ATOMIC_SWAP, VT, IntVT); 814 } 815 } 816 817 // Set default actions for various operations. 818 for (MVT VT : MVT::all_valuetypes()) { 819 // Default all indexed load / store to expand. 820 for (unsigned IM = (unsigned)ISD::PRE_INC; 821 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 822 setIndexedLoadAction(IM, VT, Expand); 823 setIndexedStoreAction(IM, VT, Expand); 824 setIndexedMaskedLoadAction(IM, VT, Expand); 825 setIndexedMaskedStoreAction(IM, VT, Expand); 826 } 827 828 // Most backends expect to see the node which just returns the value loaded. 829 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand); 830 831 // These operations default to expand. 832 setOperationAction({ISD::FGETSIGN, ISD::CONCAT_VECTORS, 833 ISD::FMINNUM, ISD::FMAXNUM, 834 ISD::FMINNUM_IEEE, ISD::FMAXNUM_IEEE, 835 ISD::FMINIMUM, ISD::FMAXIMUM, 836 ISD::FMAD, ISD::SMIN, 837 ISD::SMAX, ISD::UMIN, 838 ISD::UMAX, ISD::ABS, 839 ISD::FSHL, ISD::FSHR, 840 ISD::SADDSAT, ISD::UADDSAT, 841 ISD::SSUBSAT, ISD::USUBSAT, 842 ISD::SSHLSAT, ISD::USHLSAT, 843 ISD::SMULFIX, ISD::SMULFIXSAT, 844 ISD::UMULFIX, ISD::UMULFIXSAT, 845 ISD::SDIVFIX, ISD::SDIVFIXSAT, 846 ISD::UDIVFIX, ISD::UDIVFIXSAT, 847 ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT, 848 ISD::IS_FPCLASS}, 849 VT, Expand); 850 851 // Overflow operations default to expand 852 setOperationAction({ISD::SADDO, ISD::SSUBO, ISD::UADDO, ISD::USUBO, 853 ISD::SMULO, ISD::UMULO}, 854 VT, Expand); 855 856 // Carry-using overflow operations default to expand. 857 setOperationAction({ISD::UADDO_CARRY, ISD::USUBO_CARRY, ISD::SETCCCARRY, 858 ISD::SADDO_CARRY, ISD::SSUBO_CARRY}, 859 VT, Expand); 860 861 // ADDC/ADDE/SUBC/SUBE default to expand. 862 setOperationAction({ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}, VT, 863 Expand); 864 865 // Halving adds 866 setOperationAction( 867 {ISD::AVGFLOORS, ISD::AVGFLOORU, ISD::AVGCEILS, ISD::AVGCEILU}, VT, 868 Expand); 869 870 // Absolute difference 871 setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Expand); 872 873 // These default to Expand so they will be expanded to CTLZ/CTTZ by default. 874 setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT, 875 Expand); 876 877 setOperationAction({ISD::BITREVERSE, ISD::PARITY}, VT, Expand); 878 879 // These library functions default to expand. 880 setOperationAction({ISD::FROUND, ISD::FPOWI, ISD::FLDEXP, ISD::FFREXP}, VT, 881 Expand); 882 883 // These operations default to expand for vector types. 884 if (VT.isVector()) 885 setOperationAction( 886 {ISD::FCOPYSIGN, ISD::SIGN_EXTEND_INREG, ISD::ANY_EXTEND_VECTOR_INREG, 887 ISD::SIGN_EXTEND_VECTOR_INREG, ISD::ZERO_EXTEND_VECTOR_INREG, 888 ISD::SPLAT_VECTOR, ISD::LRINT, ISD::LLRINT}, 889 VT, Expand); 890 891 // Constrained floating-point operations default to expand. 892 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 893 setOperationAction(ISD::STRICT_##DAGN, VT, Expand); 894 #include "llvm/IR/ConstrainedOps.def" 895 896 // For most targets @llvm.get.dynamic.area.offset just returns 0. 897 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand); 898 899 // Vector reduction default to expand. 900 setOperationAction( 901 {ISD::VECREDUCE_FADD, ISD::VECREDUCE_FMUL, ISD::VECREDUCE_ADD, 902 ISD::VECREDUCE_MUL, ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, 903 ISD::VECREDUCE_XOR, ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN, 904 ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN, ISD::VECREDUCE_FMAX, 905 ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAXIMUM, ISD::VECREDUCE_FMINIMUM, 906 ISD::VECREDUCE_SEQ_FADD, ISD::VECREDUCE_SEQ_FMUL}, 907 VT, Expand); 908 909 // Named vector shuffles default to expand. 910 setOperationAction(ISD::VECTOR_SPLICE, VT, Expand); 911 912 // VP operations default to expand. 913 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) \ 914 setOperationAction(ISD::SDOPC, VT, Expand); 915 #include "llvm/IR/VPIntrinsics.def" 916 917 // FP environment operations default to expand. 918 setOperationAction(ISD::GET_FPENV, VT, Expand); 919 setOperationAction(ISD::SET_FPENV, VT, Expand); 920 setOperationAction(ISD::RESET_FPENV, VT, Expand); 921 } 922 923 // Most targets ignore the @llvm.prefetch intrinsic. 924 setOperationAction(ISD::PREFETCH, MVT::Other, Expand); 925 926 // Most targets also ignore the @llvm.readcyclecounter intrinsic. 927 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand); 928 929 // ConstantFP nodes default to expand. Targets can either change this to 930 // Legal, in which case all fp constants are legal, or use isFPImmLegal() 931 // to optimize expansions for certain constants. 932 setOperationAction(ISD::ConstantFP, 933 {MVT::bf16, MVT::f16, MVT::f32, MVT::f64, MVT::f80, MVT::f128}, 934 Expand); 935 936 // These library functions default to expand. 937 setOperationAction({ISD::FCBRT, ISD::FLOG, ISD::FLOG2, ISD::FLOG10, ISD::FEXP, 938 ISD::FEXP2, ISD::FEXP10, ISD::FFLOOR, ISD::FNEARBYINT, 939 ISD::FCEIL, ISD::FRINT, ISD::FTRUNC, ISD::LROUND, 940 ISD::LLROUND, ISD::LRINT, ISD::LLRINT, ISD::FROUNDEVEN}, 941 {MVT::f32, MVT::f64, MVT::f128}, Expand); 942 943 // Default ISD::TRAP to expand (which turns it into abort). 944 setOperationAction(ISD::TRAP, MVT::Other, Expand); 945 946 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand" 947 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP. 948 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand); 949 950 setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand); 951 952 setOperationAction(ISD::GET_FPENV_MEM, MVT::Other, Expand); 953 setOperationAction(ISD::SET_FPENV_MEM, MVT::Other, Expand); 954 955 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) { 956 setOperationAction(ISD::GET_FPMODE, VT, Expand); 957 setOperationAction(ISD::SET_FPMODE, VT, Expand); 958 } 959 setOperationAction(ISD::RESET_FPMODE, MVT::Other, Expand); 960 } 961 962 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL, 963 EVT) const { 964 return MVT::getIntegerVT(DL.getPointerSizeInBits(0)); 965 } 966 967 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, const DataLayout &DL, 968 bool LegalTypes) const { 969 assert(LHSTy.isInteger() && "Shift amount is not an integer type!"); 970 if (LHSTy.isVector()) 971 return LHSTy; 972 MVT ShiftVT = 973 LegalTypes ? getScalarShiftAmountTy(DL, LHSTy) : getPointerTy(DL); 974 // If any possible shift value won't fit in the prefered type, just use 975 // something safe. Assume it will be legalized when the shift is expanded. 976 if (ShiftVT.getSizeInBits() < Log2_32_Ceil(LHSTy.getSizeInBits())) 977 ShiftVT = MVT::i32; 978 assert(ShiftVT.getSizeInBits() >= Log2_32_Ceil(LHSTy.getSizeInBits()) && 979 "ShiftVT is still too small!"); 980 return ShiftVT; 981 } 982 983 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const { 984 assert(isTypeLegal(VT)); 985 switch (Op) { 986 default: 987 return false; 988 case ISD::SDIV: 989 case ISD::UDIV: 990 case ISD::SREM: 991 case ISD::UREM: 992 return true; 993 } 994 } 995 996 bool TargetLoweringBase::isFreeAddrSpaceCast(unsigned SrcAS, 997 unsigned DestAS) const { 998 return TM.isNoopAddrSpaceCast(SrcAS, DestAS); 999 } 1000 1001 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) { 1002 // If the command-line option was specified, ignore this request. 1003 if (!JumpIsExpensiveOverride.getNumOccurrences()) 1004 JumpIsExpensive = isExpensive; 1005 } 1006 1007 TargetLoweringBase::LegalizeKind 1008 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { 1009 // If this is a simple type, use the ComputeRegisterProp mechanism. 1010 if (VT.isSimple()) { 1011 MVT SVT = VT.getSimpleVT(); 1012 assert((unsigned)SVT.SimpleTy < std::size(TransformToType)); 1013 MVT NVT = TransformToType[SVT.SimpleTy]; 1014 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT); 1015 1016 assert((LA == TypeLegal || LA == TypeSoftenFloat || 1017 LA == TypeSoftPromoteHalf || 1018 (NVT.isVector() || 1019 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)) && 1020 "Promote may not follow Expand or Promote"); 1021 1022 if (LA == TypeSplitVector) 1023 return LegalizeKind(LA, EVT(SVT).getHalfNumVectorElementsVT(Context)); 1024 if (LA == TypeScalarizeVector) 1025 return LegalizeKind(LA, SVT.getVectorElementType()); 1026 return LegalizeKind(LA, NVT); 1027 } 1028 1029 // Handle Extended Scalar Types. 1030 if (!VT.isVector()) { 1031 assert(VT.isInteger() && "Float types must be simple"); 1032 unsigned BitSize = VT.getSizeInBits(); 1033 // First promote to a power-of-two size, then expand if necessary. 1034 if (BitSize < 8 || !isPowerOf2_32(BitSize)) { 1035 EVT NVT = VT.getRoundIntegerType(Context); 1036 assert(NVT != VT && "Unable to round integer VT"); 1037 LegalizeKind NextStep = getTypeConversion(Context, NVT); 1038 // Avoid multi-step promotion. 1039 if (NextStep.first == TypePromoteInteger) 1040 return NextStep; 1041 // Return rounded integer type. 1042 return LegalizeKind(TypePromoteInteger, NVT); 1043 } 1044 1045 return LegalizeKind(TypeExpandInteger, 1046 EVT::getIntegerVT(Context, VT.getSizeInBits() / 2)); 1047 } 1048 1049 // Handle vector types. 1050 ElementCount NumElts = VT.getVectorElementCount(); 1051 EVT EltVT = VT.getVectorElementType(); 1052 1053 // Vectors with only one element are always scalarized. 1054 if (NumElts.isScalar()) 1055 return LegalizeKind(TypeScalarizeVector, EltVT); 1056 1057 // Try to widen vector elements until the element type is a power of two and 1058 // promote it to a legal type later on, for example: 1059 // <3 x i8> -> <4 x i8> -> <4 x i32> 1060 if (EltVT.isInteger()) { 1061 // Vectors with a number of elements that is not a power of two are always 1062 // widened, for example <3 x i8> -> <4 x i8>. 1063 if (!VT.isPow2VectorType()) { 1064 NumElts = NumElts.coefficientNextPowerOf2(); 1065 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts); 1066 return LegalizeKind(TypeWidenVector, NVT); 1067 } 1068 1069 // Examine the element type. 1070 LegalizeKind LK = getTypeConversion(Context, EltVT); 1071 1072 // If type is to be expanded, split the vector. 1073 // <4 x i140> -> <2 x i140> 1074 if (LK.first == TypeExpandInteger) { 1075 if (VT.getVectorElementCount().isScalable()) 1076 return LegalizeKind(TypeScalarizeScalableVector, EltVT); 1077 return LegalizeKind(TypeSplitVector, 1078 VT.getHalfNumVectorElementsVT(Context)); 1079 } 1080 1081 // Promote the integer element types until a legal vector type is found 1082 // or until the element integer type is too big. If a legal type was not 1083 // found, fallback to the usual mechanism of widening/splitting the 1084 // vector. 1085 EVT OldEltVT = EltVT; 1086 while (true) { 1087 // Increase the bitwidth of the element to the next pow-of-two 1088 // (which is greater than 8 bits). 1089 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits()) 1090 .getRoundIntegerType(Context); 1091 1092 // Stop trying when getting a non-simple element type. 1093 // Note that vector elements may be greater than legal vector element 1094 // types. Example: X86 XMM registers hold 64bit element on 32bit 1095 // systems. 1096 if (!EltVT.isSimple()) 1097 break; 1098 1099 // Build a new vector type and check if it is legal. 1100 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 1101 // Found a legal promoted vector type. 1102 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal) 1103 return LegalizeKind(TypePromoteInteger, 1104 EVT::getVectorVT(Context, EltVT, NumElts)); 1105 } 1106 1107 // Reset the type to the unexpanded type if we did not find a legal vector 1108 // type with a promoted vector element type. 1109 EltVT = OldEltVT; 1110 } 1111 1112 // Try to widen the vector until a legal type is found. 1113 // If there is no wider legal type, split the vector. 1114 while (true) { 1115 // Round up to the next power of 2. 1116 NumElts = NumElts.coefficientNextPowerOf2(); 1117 1118 // If there is no simple vector type with this many elements then there 1119 // cannot be a larger legal vector type. Note that this assumes that 1120 // there are no skipped intermediate vector types in the simple types. 1121 if (!EltVT.isSimple()) 1122 break; 1123 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 1124 if (LargerVector == MVT()) 1125 break; 1126 1127 // If this type is legal then widen the vector. 1128 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal) 1129 return LegalizeKind(TypeWidenVector, LargerVector); 1130 } 1131 1132 // Widen odd vectors to next power of two. 1133 if (!VT.isPow2VectorType()) { 1134 EVT NVT = VT.getPow2VectorType(Context); 1135 return LegalizeKind(TypeWidenVector, NVT); 1136 } 1137 1138 if (VT.getVectorElementCount() == ElementCount::getScalable(1)) 1139 return LegalizeKind(TypeScalarizeScalableVector, EltVT); 1140 1141 // Vectors with illegal element types are expanded. 1142 EVT NVT = EVT::getVectorVT(Context, EltVT, 1143 VT.getVectorElementCount().divideCoefficientBy(2)); 1144 return LegalizeKind(TypeSplitVector, NVT); 1145 } 1146 1147 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, 1148 unsigned &NumIntermediates, 1149 MVT &RegisterVT, 1150 TargetLoweringBase *TLI) { 1151 // Figure out the right, legal destination reg to copy into. 1152 ElementCount EC = VT.getVectorElementCount(); 1153 MVT EltTy = VT.getVectorElementType(); 1154 1155 unsigned NumVectorRegs = 1; 1156 1157 // Scalable vectors cannot be scalarized, so splitting or widening is 1158 // required. 1159 if (VT.isScalableVector() && !isPowerOf2_32(EC.getKnownMinValue())) 1160 llvm_unreachable( 1161 "Splitting or widening of non-power-of-2 MVTs is not implemented."); 1162 1163 // FIXME: We don't support non-power-of-2-sized vectors for now. 1164 // Ideally we could break down into LHS/RHS like LegalizeDAG does. 1165 if (!isPowerOf2_32(EC.getKnownMinValue())) { 1166 // Split EC to unit size (scalable property is preserved). 1167 NumVectorRegs = EC.getKnownMinValue(); 1168 EC = ElementCount::getFixed(1); 1169 } 1170 1171 // Divide the input until we get to a supported size. This will 1172 // always end up with an EC that represent a scalar or a scalable 1173 // scalar. 1174 while (EC.getKnownMinValue() > 1 && 1175 !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) { 1176 EC = EC.divideCoefficientBy(2); 1177 NumVectorRegs <<= 1; 1178 } 1179 1180 NumIntermediates = NumVectorRegs; 1181 1182 MVT NewVT = MVT::getVectorVT(EltTy, EC); 1183 if (!TLI->isTypeLegal(NewVT)) 1184 NewVT = EltTy; 1185 IntermediateVT = NewVT; 1186 1187 unsigned LaneSizeInBits = NewVT.getScalarSizeInBits(); 1188 1189 // Convert sizes such as i33 to i64. 1190 LaneSizeInBits = llvm::bit_ceil(LaneSizeInBits); 1191 1192 MVT DestVT = TLI->getRegisterType(NewVT); 1193 RegisterVT = DestVT; 1194 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. 1195 return NumVectorRegs * (LaneSizeInBits / DestVT.getScalarSizeInBits()); 1196 1197 // Otherwise, promotion or legal types use the same number of registers as 1198 // the vector decimated to the appropriate level. 1199 return NumVectorRegs; 1200 } 1201 1202 /// isLegalRC - Return true if the value types that can be represented by the 1203 /// specified register class are all legal. 1204 bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI, 1205 const TargetRegisterClass &RC) const { 1206 for (const auto *I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I) 1207 if (isTypeLegal(*I)) 1208 return true; 1209 return false; 1210 } 1211 1212 /// Replace/modify any TargetFrameIndex operands with a targte-dependent 1213 /// sequence of memory operands that is recognized by PrologEpilogInserter. 1214 MachineBasicBlock * 1215 TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI, 1216 MachineBasicBlock *MBB) const { 1217 MachineInstr *MI = &InitialMI; 1218 MachineFunction &MF = *MI->getMF(); 1219 MachineFrameInfo &MFI = MF.getFrameInfo(); 1220 1221 // We're handling multiple types of operands here: 1222 // PATCHPOINT MetaArgs - live-in, read only, direct 1223 // STATEPOINT Deopt Spill - live-through, read only, indirect 1224 // STATEPOINT Deopt Alloca - live-through, read only, direct 1225 // (We're currently conservative and mark the deopt slots read/write in 1226 // practice.) 1227 // STATEPOINT GC Spill - live-through, read/write, indirect 1228 // STATEPOINT GC Alloca - live-through, read/write, direct 1229 // The live-in vs live-through is handled already (the live through ones are 1230 // all stack slots), but we need to handle the different type of stackmap 1231 // operands and memory effects here. 1232 1233 if (llvm::none_of(MI->operands(), 1234 [](MachineOperand &Operand) { return Operand.isFI(); })) 1235 return MBB; 1236 1237 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc()); 1238 1239 // Inherit previous memory operands. 1240 MIB.cloneMemRefs(*MI); 1241 1242 for (unsigned i = 0; i < MI->getNumOperands(); ++i) { 1243 MachineOperand &MO = MI->getOperand(i); 1244 if (!MO.isFI()) { 1245 // Index of Def operand this Use it tied to. 1246 // Since Defs are coming before Uses, if Use is tied, then 1247 // index of Def must be smaller that index of that Use. 1248 // Also, Defs preserve their position in new MI. 1249 unsigned TiedTo = i; 1250 if (MO.isReg() && MO.isTied()) 1251 TiedTo = MI->findTiedOperandIdx(i); 1252 MIB.add(MO); 1253 if (TiedTo < i) 1254 MIB->tieOperands(TiedTo, MIB->getNumOperands() - 1); 1255 continue; 1256 } 1257 1258 // foldMemoryOperand builds a new MI after replacing a single FI operand 1259 // with the canonical set of five x86 addressing-mode operands. 1260 int FI = MO.getIndex(); 1261 1262 // Add frame index operands recognized by stackmaps.cpp 1263 if (MFI.isStatepointSpillSlotObjectIndex(FI)) { 1264 // indirect-mem-ref tag, size, #FI, offset. 1265 // Used for spills inserted by StatepointLowering. This codepath is not 1266 // used for patchpoints/stackmaps at all, for these spilling is done via 1267 // foldMemoryOperand callback only. 1268 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity"); 1269 MIB.addImm(StackMaps::IndirectMemRefOp); 1270 MIB.addImm(MFI.getObjectSize(FI)); 1271 MIB.add(MO); 1272 MIB.addImm(0); 1273 } else { 1274 // direct-mem-ref tag, #FI, offset. 1275 // Used by patchpoint, and direct alloca arguments to statepoints 1276 MIB.addImm(StackMaps::DirectMemRefOp); 1277 MIB.add(MO); 1278 MIB.addImm(0); 1279 } 1280 1281 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!"); 1282 1283 // Add a new memory operand for this FI. 1284 assert(MFI.getObjectOffset(FI) != -1); 1285 1286 // Note: STATEPOINT MMOs are added during SelectionDAG. STACKMAP, and 1287 // PATCHPOINT should be updated to do the same. (TODO) 1288 if (MI->getOpcode() != TargetOpcode::STATEPOINT) { 1289 auto Flags = MachineMemOperand::MOLoad; 1290 MachineMemOperand *MMO = MF.getMachineMemOperand( 1291 MachinePointerInfo::getFixedStack(MF, FI), Flags, 1292 MF.getDataLayout().getPointerSize(), MFI.getObjectAlign(FI)); 1293 MIB->addMemOperand(MF, MMO); 1294 } 1295 } 1296 MBB->insert(MachineBasicBlock::iterator(MI), MIB); 1297 MI->eraseFromParent(); 1298 return MBB; 1299 } 1300 1301 /// findRepresentativeClass - Return the largest legal super-reg register class 1302 /// of the register class for the specified type and its associated "cost". 1303 // This function is in TargetLowering because it uses RegClassForVT which would 1304 // need to be moved to TargetRegisterInfo and would necessitate moving 1305 // isTypeLegal over as well - a massive change that would just require 1306 // TargetLowering having a TargetRegisterInfo class member that it would use. 1307 std::pair<const TargetRegisterClass *, uint8_t> 1308 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI, 1309 MVT VT) const { 1310 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy]; 1311 if (!RC) 1312 return std::make_pair(RC, 0); 1313 1314 // Compute the set of all super-register classes. 1315 BitVector SuperRegRC(TRI->getNumRegClasses()); 1316 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI) 1317 SuperRegRC.setBitsInMask(RCI.getMask()); 1318 1319 // Find the first legal register class with the largest spill size. 1320 const TargetRegisterClass *BestRC = RC; 1321 for (unsigned i : SuperRegRC.set_bits()) { 1322 const TargetRegisterClass *SuperRC = TRI->getRegClass(i); 1323 // We want the largest possible spill size. 1324 if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC)) 1325 continue; 1326 if (!isLegalRC(*TRI, *SuperRC)) 1327 continue; 1328 BestRC = SuperRC; 1329 } 1330 return std::make_pair(BestRC, 1); 1331 } 1332 1333 /// computeRegisterProperties - Once all of the register classes are added, 1334 /// this allows us to compute derived properties we expose. 1335 void TargetLoweringBase::computeRegisterProperties( 1336 const TargetRegisterInfo *TRI) { 1337 static_assert(MVT::VALUETYPE_SIZE <= MVT::MAX_ALLOWED_VALUETYPE, 1338 "Too many value types for ValueTypeActions to hold!"); 1339 1340 // Everything defaults to needing one register. 1341 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) { 1342 NumRegistersForVT[i] = 1; 1343 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i; 1344 } 1345 // ...except isVoid, which doesn't need any registers. 1346 NumRegistersForVT[MVT::isVoid] = 0; 1347 1348 // Find the largest integer register class. 1349 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE; 1350 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg) 1351 assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); 1352 1353 // Every integer value type larger than this largest register takes twice as 1354 // many registers to represent as the previous ValueType. 1355 for (unsigned ExpandedReg = LargestIntReg + 1; 1356 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) { 1357 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; 1358 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg; 1359 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1); 1360 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg, 1361 TypeExpandInteger); 1362 } 1363 1364 // Inspect all of the ValueType's smaller than the largest integer 1365 // register to see which ones need promotion. 1366 unsigned LegalIntReg = LargestIntReg; 1367 for (unsigned IntReg = LargestIntReg - 1; 1368 IntReg >= (unsigned)MVT::i1; --IntReg) { 1369 MVT IVT = (MVT::SimpleValueType)IntReg; 1370 if (isTypeLegal(IVT)) { 1371 LegalIntReg = IntReg; 1372 } else { 1373 RegisterTypeForVT[IntReg] = TransformToType[IntReg] = 1374 (MVT::SimpleValueType)LegalIntReg; 1375 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger); 1376 } 1377 } 1378 1379 // ppcf128 type is really two f64's. 1380 if (!isTypeLegal(MVT::ppcf128)) { 1381 if (isTypeLegal(MVT::f64)) { 1382 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64]; 1383 RegisterTypeForVT[MVT::ppcf128] = MVT::f64; 1384 TransformToType[MVT::ppcf128] = MVT::f64; 1385 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat); 1386 } else { 1387 NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128]; 1388 RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128]; 1389 TransformToType[MVT::ppcf128] = MVT::i128; 1390 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat); 1391 } 1392 } 1393 1394 // Decide how to handle f128. If the target does not have native f128 support, 1395 // expand it to i128 and we will be generating soft float library calls. 1396 if (!isTypeLegal(MVT::f128)) { 1397 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128]; 1398 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128]; 1399 TransformToType[MVT::f128] = MVT::i128; 1400 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat); 1401 } 1402 1403 // Decide how to handle f80. If the target does not have native f80 support, 1404 // expand it to i96 and we will be generating soft float library calls. 1405 if (!isTypeLegal(MVT::f80)) { 1406 NumRegistersForVT[MVT::f80] = 3*NumRegistersForVT[MVT::i32]; 1407 RegisterTypeForVT[MVT::f80] = RegisterTypeForVT[MVT::i32]; 1408 TransformToType[MVT::f80] = MVT::i32; 1409 ValueTypeActions.setTypeAction(MVT::f80, TypeSoftenFloat); 1410 } 1411 1412 // Decide how to handle f64. If the target does not have native f64 support, 1413 // expand it to i64 and we will be generating soft float library calls. 1414 if (!isTypeLegal(MVT::f64)) { 1415 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64]; 1416 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64]; 1417 TransformToType[MVT::f64] = MVT::i64; 1418 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat); 1419 } 1420 1421 // Decide how to handle f32. If the target does not have native f32 support, 1422 // expand it to i32 and we will be generating soft float library calls. 1423 if (!isTypeLegal(MVT::f32)) { 1424 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; 1425 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; 1426 TransformToType[MVT::f32] = MVT::i32; 1427 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat); 1428 } 1429 1430 // Decide how to handle f16. If the target does not have native f16 support, 1431 // promote it to f32, because there are no f16 library calls (except for 1432 // conversions). 1433 if (!isTypeLegal(MVT::f16)) { 1434 // Allow targets to control how we legalize half. 1435 if (softPromoteHalfType()) { 1436 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16]; 1437 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16]; 1438 TransformToType[MVT::f16] = MVT::f32; 1439 ValueTypeActions.setTypeAction(MVT::f16, TypeSoftPromoteHalf); 1440 } else { 1441 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32]; 1442 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32]; 1443 TransformToType[MVT::f16] = MVT::f32; 1444 ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat); 1445 } 1446 } 1447 1448 // Decide how to handle bf16. If the target does not have native bf16 support, 1449 // promote it to f32, because there are no bf16 library calls (except for 1450 // converting from f32 to bf16). 1451 if (!isTypeLegal(MVT::bf16)) { 1452 NumRegistersForVT[MVT::bf16] = NumRegistersForVT[MVT::f32]; 1453 RegisterTypeForVT[MVT::bf16] = RegisterTypeForVT[MVT::f32]; 1454 TransformToType[MVT::bf16] = MVT::f32; 1455 ValueTypeActions.setTypeAction(MVT::bf16, TypeSoftPromoteHalf); 1456 } 1457 1458 // Loop over all of the vector value types to see which need transformations. 1459 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; 1460 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 1461 MVT VT = (MVT::SimpleValueType) i; 1462 if (isTypeLegal(VT)) 1463 continue; 1464 1465 MVT EltVT = VT.getVectorElementType(); 1466 ElementCount EC = VT.getVectorElementCount(); 1467 bool IsLegalWiderType = false; 1468 bool IsScalable = VT.isScalableVector(); 1469 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT); 1470 switch (PreferredAction) { 1471 case TypePromoteInteger: { 1472 MVT::SimpleValueType EndVT = IsScalable ? 1473 MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE : 1474 MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE; 1475 // Try to promote the elements of integer vectors. If no legal 1476 // promotion was found, fall through to the widen-vector method. 1477 for (unsigned nVT = i + 1; 1478 (MVT::SimpleValueType)nVT <= EndVT; ++nVT) { 1479 MVT SVT = (MVT::SimpleValueType) nVT; 1480 // Promote vectors of integers to vectors with the same number 1481 // of elements, with a wider element type. 1482 if (SVT.getScalarSizeInBits() > EltVT.getFixedSizeInBits() && 1483 SVT.getVectorElementCount() == EC && isTypeLegal(SVT)) { 1484 TransformToType[i] = SVT; 1485 RegisterTypeForVT[i] = SVT; 1486 NumRegistersForVT[i] = 1; 1487 ValueTypeActions.setTypeAction(VT, TypePromoteInteger); 1488 IsLegalWiderType = true; 1489 break; 1490 } 1491 } 1492 if (IsLegalWiderType) 1493 break; 1494 [[fallthrough]]; 1495 } 1496 1497 case TypeWidenVector: 1498 if (isPowerOf2_32(EC.getKnownMinValue())) { 1499 // Try to widen the vector. 1500 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) { 1501 MVT SVT = (MVT::SimpleValueType) nVT; 1502 if (SVT.getVectorElementType() == EltVT && 1503 SVT.isScalableVector() == IsScalable && 1504 SVT.getVectorElementCount().getKnownMinValue() > 1505 EC.getKnownMinValue() && 1506 isTypeLegal(SVT)) { 1507 TransformToType[i] = SVT; 1508 RegisterTypeForVT[i] = SVT; 1509 NumRegistersForVT[i] = 1; 1510 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1511 IsLegalWiderType = true; 1512 break; 1513 } 1514 } 1515 if (IsLegalWiderType) 1516 break; 1517 } else { 1518 // Only widen to the next power of 2 to keep consistency with EVT. 1519 MVT NVT = VT.getPow2VectorType(); 1520 if (isTypeLegal(NVT)) { 1521 TransformToType[i] = NVT; 1522 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1523 RegisterTypeForVT[i] = NVT; 1524 NumRegistersForVT[i] = 1; 1525 break; 1526 } 1527 } 1528 [[fallthrough]]; 1529 1530 case TypeSplitVector: 1531 case TypeScalarizeVector: { 1532 MVT IntermediateVT; 1533 MVT RegisterVT; 1534 unsigned NumIntermediates; 1535 unsigned NumRegisters = getVectorTypeBreakdownMVT(VT, IntermediateVT, 1536 NumIntermediates, RegisterVT, this); 1537 NumRegistersForVT[i] = NumRegisters; 1538 assert(NumRegistersForVT[i] == NumRegisters && 1539 "NumRegistersForVT size cannot represent NumRegisters!"); 1540 RegisterTypeForVT[i] = RegisterVT; 1541 1542 MVT NVT = VT.getPow2VectorType(); 1543 if (NVT == VT) { 1544 // Type is already a power of 2. The default action is to split. 1545 TransformToType[i] = MVT::Other; 1546 if (PreferredAction == TypeScalarizeVector) 1547 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector); 1548 else if (PreferredAction == TypeSplitVector) 1549 ValueTypeActions.setTypeAction(VT, TypeSplitVector); 1550 else if (EC.getKnownMinValue() > 1) 1551 ValueTypeActions.setTypeAction(VT, TypeSplitVector); 1552 else 1553 ValueTypeActions.setTypeAction(VT, EC.isScalable() 1554 ? TypeScalarizeScalableVector 1555 : TypeScalarizeVector); 1556 } else { 1557 TransformToType[i] = NVT; 1558 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1559 } 1560 break; 1561 } 1562 default: 1563 llvm_unreachable("Unknown vector legalization action!"); 1564 } 1565 } 1566 1567 // Determine the 'representative' register class for each value type. 1568 // An representative register class is the largest (meaning one which is 1569 // not a sub-register class / subreg register class) legal register class for 1570 // a group of value types. For example, on i386, i8, i16, and i32 1571 // representative would be GR32; while on x86_64 it's GR64. 1572 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) { 1573 const TargetRegisterClass* RRC; 1574 uint8_t Cost; 1575 std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i); 1576 RepRegClassForVT[i] = RRC; 1577 RepRegClassCostForVT[i] = Cost; 1578 } 1579 } 1580 1581 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &, 1582 EVT VT) const { 1583 assert(!VT.isVector() && "No default SetCC type for vectors!"); 1584 return getPointerTy(DL).SimpleTy; 1585 } 1586 1587 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const { 1588 return MVT::i32; // return the default value 1589 } 1590 1591 /// getVectorTypeBreakdown - Vector types are broken down into some number of 1592 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 1593 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 1594 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. 1595 /// 1596 /// This method returns the number of registers needed, and the VT for each 1597 /// register. It also returns the VT and quantity of the intermediate values 1598 /// before they are promoted/expanded. 1599 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, 1600 EVT VT, EVT &IntermediateVT, 1601 unsigned &NumIntermediates, 1602 MVT &RegisterVT) const { 1603 ElementCount EltCnt = VT.getVectorElementCount(); 1604 1605 // If there is a wider vector type with the same element type as this one, 1606 // or a promoted vector type that has the same number of elements which 1607 // are wider, then we should convert to that legal vector type. 1608 // This handles things like <2 x float> -> <4 x float> and 1609 // <4 x i1> -> <4 x i32>. 1610 LegalizeTypeAction TA = getTypeAction(Context, VT); 1611 if (!EltCnt.isScalar() && 1612 (TA == TypeWidenVector || TA == TypePromoteInteger)) { 1613 EVT RegisterEVT = getTypeToTransformTo(Context, VT); 1614 if (isTypeLegal(RegisterEVT)) { 1615 IntermediateVT = RegisterEVT; 1616 RegisterVT = RegisterEVT.getSimpleVT(); 1617 NumIntermediates = 1; 1618 return 1; 1619 } 1620 } 1621 1622 // Figure out the right, legal destination reg to copy into. 1623 EVT EltTy = VT.getVectorElementType(); 1624 1625 unsigned NumVectorRegs = 1; 1626 1627 // Scalable vectors cannot be scalarized, so handle the legalisation of the 1628 // types like done elsewhere in SelectionDAG. 1629 if (EltCnt.isScalable()) { 1630 LegalizeKind LK; 1631 EVT PartVT = VT; 1632 do { 1633 // Iterate until we've found a legal (part) type to hold VT. 1634 LK = getTypeConversion(Context, PartVT); 1635 PartVT = LK.second; 1636 } while (LK.first != TypeLegal); 1637 1638 if (!PartVT.isVector()) { 1639 report_fatal_error( 1640 "Don't know how to legalize this scalable vector type"); 1641 } 1642 1643 NumIntermediates = 1644 divideCeil(VT.getVectorElementCount().getKnownMinValue(), 1645 PartVT.getVectorElementCount().getKnownMinValue()); 1646 IntermediateVT = PartVT; 1647 RegisterVT = getRegisterType(Context, IntermediateVT); 1648 return NumIntermediates; 1649 } 1650 1651 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally 1652 // we could break down into LHS/RHS like LegalizeDAG does. 1653 if (!isPowerOf2_32(EltCnt.getKnownMinValue())) { 1654 NumVectorRegs = EltCnt.getKnownMinValue(); 1655 EltCnt = ElementCount::getFixed(1); 1656 } 1657 1658 // Divide the input until we get to a supported size. This will always 1659 // end with a scalar if the target doesn't support vectors. 1660 while (EltCnt.getKnownMinValue() > 1 && 1661 !isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) { 1662 EltCnt = EltCnt.divideCoefficientBy(2); 1663 NumVectorRegs <<= 1; 1664 } 1665 1666 NumIntermediates = NumVectorRegs; 1667 1668 EVT NewVT = EVT::getVectorVT(Context, EltTy, EltCnt); 1669 if (!isTypeLegal(NewVT)) 1670 NewVT = EltTy; 1671 IntermediateVT = NewVT; 1672 1673 MVT DestVT = getRegisterType(Context, NewVT); 1674 RegisterVT = DestVT; 1675 1676 if (EVT(DestVT).bitsLT(NewVT)) { // Value is expanded, e.g. i64 -> i16. 1677 TypeSize NewVTSize = NewVT.getSizeInBits(); 1678 // Convert sizes such as i33 to i64. 1679 if (!llvm::has_single_bit<uint32_t>(NewVTSize.getKnownMinValue())) 1680 NewVTSize = NewVTSize.coefficientNextPowerOf2(); 1681 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits()); 1682 } 1683 1684 // Otherwise, promotion or legal types use the same number of registers as 1685 // the vector decimated to the appropriate level. 1686 return NumVectorRegs; 1687 } 1688 1689 bool TargetLoweringBase::isSuitableForJumpTable(const SwitchInst *SI, 1690 uint64_t NumCases, 1691 uint64_t Range, 1692 ProfileSummaryInfo *PSI, 1693 BlockFrequencyInfo *BFI) const { 1694 // FIXME: This function check the maximum table size and density, but the 1695 // minimum size is not checked. It would be nice if the minimum size is 1696 // also combined within this function. Currently, the minimum size check is 1697 // performed in findJumpTable() in SelectionDAGBuiler and 1698 // getEstimatedNumberOfCaseClusters() in BasicTTIImpl. 1699 const bool OptForSize = 1700 SI->getParent()->getParent()->hasOptSize() || 1701 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI); 1702 const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize); 1703 const unsigned MaxJumpTableSize = getMaximumJumpTableSize(); 1704 1705 // Check whether the number of cases is small enough and 1706 // the range is dense enough for a jump table. 1707 return (OptForSize || Range <= MaxJumpTableSize) && 1708 (NumCases * 100 >= Range * MinDensity); 1709 } 1710 1711 MVT TargetLoweringBase::getPreferredSwitchConditionType(LLVMContext &Context, 1712 EVT ConditionVT) const { 1713 return getRegisterType(Context, ConditionVT); 1714 } 1715 1716 /// Get the EVTs and ArgFlags collections that represent the legalized return 1717 /// type of the given function. This does not require a DAG or a return value, 1718 /// and is suitable for use before any DAGs for the function are constructed. 1719 /// TODO: Move this out of TargetLowering.cpp. 1720 void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType, 1721 AttributeList attr, 1722 SmallVectorImpl<ISD::OutputArg> &Outs, 1723 const TargetLowering &TLI, const DataLayout &DL) { 1724 SmallVector<EVT, 4> ValueVTs; 1725 ComputeValueVTs(TLI, DL, ReturnType, ValueVTs); 1726 unsigned NumValues = ValueVTs.size(); 1727 if (NumValues == 0) return; 1728 1729 for (unsigned j = 0, f = NumValues; j != f; ++j) { 1730 EVT VT = ValueVTs[j]; 1731 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 1732 1733 if (attr.hasRetAttr(Attribute::SExt)) 1734 ExtendKind = ISD::SIGN_EXTEND; 1735 else if (attr.hasRetAttr(Attribute::ZExt)) 1736 ExtendKind = ISD::ZERO_EXTEND; 1737 1738 // FIXME: C calling convention requires the return type to be promoted to 1739 // at least 32-bit. But this is not necessary for non-C calling 1740 // conventions. The frontend should mark functions whose return values 1741 // require promoting with signext or zeroext attributes. 1742 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) { 1743 MVT MinVT = TLI.getRegisterType(MVT::i32); 1744 if (VT.bitsLT(MinVT)) 1745 VT = MinVT; 1746 } 1747 1748 unsigned NumParts = 1749 TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT); 1750 MVT PartVT = 1751 TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT); 1752 1753 // 'inreg' on function refers to return value 1754 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); 1755 if (attr.hasRetAttr(Attribute::InReg)) 1756 Flags.setInReg(); 1757 1758 // Propagate extension type if any 1759 if (attr.hasRetAttr(Attribute::SExt)) 1760 Flags.setSExt(); 1761 else if (attr.hasRetAttr(Attribute::ZExt)) 1762 Flags.setZExt(); 1763 1764 for (unsigned i = 0; i < NumParts; ++i) 1765 Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isfixed=*/true, 0, 0)); 1766 } 1767 } 1768 1769 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1770 /// function arguments in the caller parameter area. This is the actual 1771 /// alignment, not its logarithm. 1772 uint64_t TargetLoweringBase::getByValTypeAlignment(Type *Ty, 1773 const DataLayout &DL) const { 1774 return DL.getABITypeAlign(Ty).value(); 1775 } 1776 1777 bool TargetLoweringBase::allowsMemoryAccessForAlignment( 1778 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, 1779 Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const { 1780 // Check if the specified alignment is sufficient based on the data layout. 1781 // TODO: While using the data layout works in practice, a better solution 1782 // would be to implement this check directly (make this a virtual function). 1783 // For example, the ABI alignment may change based on software platform while 1784 // this function should only be affected by hardware implementation. 1785 Type *Ty = VT.getTypeForEVT(Context); 1786 if (VT.isZeroSized() || Alignment >= DL.getABITypeAlign(Ty)) { 1787 // Assume that an access that meets the ABI-specified alignment is fast. 1788 if (Fast != nullptr) 1789 *Fast = 1; 1790 return true; 1791 } 1792 1793 // This is a misaligned access. 1794 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast); 1795 } 1796 1797 bool TargetLoweringBase::allowsMemoryAccessForAlignment( 1798 LLVMContext &Context, const DataLayout &DL, EVT VT, 1799 const MachineMemOperand &MMO, unsigned *Fast) const { 1800 return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(), 1801 MMO.getAlign(), MMO.getFlags(), Fast); 1802 } 1803 1804 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, 1805 const DataLayout &DL, EVT VT, 1806 unsigned AddrSpace, Align Alignment, 1807 MachineMemOperand::Flags Flags, 1808 unsigned *Fast) const { 1809 return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment, 1810 Flags, Fast); 1811 } 1812 1813 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, 1814 const DataLayout &DL, EVT VT, 1815 const MachineMemOperand &MMO, 1816 unsigned *Fast) const { 1817 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(), 1818 MMO.getFlags(), Fast); 1819 } 1820 1821 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, 1822 const DataLayout &DL, LLT Ty, 1823 const MachineMemOperand &MMO, 1824 unsigned *Fast) const { 1825 EVT VT = getApproximateEVTForLLT(Ty, DL, Context); 1826 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(), 1827 MMO.getFlags(), Fast); 1828 } 1829 1830 //===----------------------------------------------------------------------===// 1831 // TargetTransformInfo Helpers 1832 //===----------------------------------------------------------------------===// 1833 1834 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const { 1835 enum InstructionOpcodes { 1836 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM, 1837 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM 1838 #include "llvm/IR/Instruction.def" 1839 }; 1840 switch (static_cast<InstructionOpcodes>(Opcode)) { 1841 case Ret: return 0; 1842 case Br: return 0; 1843 case Switch: return 0; 1844 case IndirectBr: return 0; 1845 case Invoke: return 0; 1846 case CallBr: return 0; 1847 case Resume: return 0; 1848 case Unreachable: return 0; 1849 case CleanupRet: return 0; 1850 case CatchRet: return 0; 1851 case CatchPad: return 0; 1852 case CatchSwitch: return 0; 1853 case CleanupPad: return 0; 1854 case FNeg: return ISD::FNEG; 1855 case Add: return ISD::ADD; 1856 case FAdd: return ISD::FADD; 1857 case Sub: return ISD::SUB; 1858 case FSub: return ISD::FSUB; 1859 case Mul: return ISD::MUL; 1860 case FMul: return ISD::FMUL; 1861 case UDiv: return ISD::UDIV; 1862 case SDiv: return ISD::SDIV; 1863 case FDiv: return ISD::FDIV; 1864 case URem: return ISD::UREM; 1865 case SRem: return ISD::SREM; 1866 case FRem: return ISD::FREM; 1867 case Shl: return ISD::SHL; 1868 case LShr: return ISD::SRL; 1869 case AShr: return ISD::SRA; 1870 case And: return ISD::AND; 1871 case Or: return ISD::OR; 1872 case Xor: return ISD::XOR; 1873 case Alloca: return 0; 1874 case Load: return ISD::LOAD; 1875 case Store: return ISD::STORE; 1876 case GetElementPtr: return 0; 1877 case Fence: return 0; 1878 case AtomicCmpXchg: return 0; 1879 case AtomicRMW: return 0; 1880 case Trunc: return ISD::TRUNCATE; 1881 case ZExt: return ISD::ZERO_EXTEND; 1882 case SExt: return ISD::SIGN_EXTEND; 1883 case FPToUI: return ISD::FP_TO_UINT; 1884 case FPToSI: return ISD::FP_TO_SINT; 1885 case UIToFP: return ISD::UINT_TO_FP; 1886 case SIToFP: return ISD::SINT_TO_FP; 1887 case FPTrunc: return ISD::FP_ROUND; 1888 case FPExt: return ISD::FP_EXTEND; 1889 case PtrToInt: return ISD::BITCAST; 1890 case IntToPtr: return ISD::BITCAST; 1891 case BitCast: return ISD::BITCAST; 1892 case AddrSpaceCast: return ISD::ADDRSPACECAST; 1893 case ICmp: return ISD::SETCC; 1894 case FCmp: return ISD::SETCC; 1895 case PHI: return 0; 1896 case Call: return 0; 1897 case Select: return ISD::SELECT; 1898 case UserOp1: return 0; 1899 case UserOp2: return 0; 1900 case VAArg: return 0; 1901 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT; 1902 case InsertElement: return ISD::INSERT_VECTOR_ELT; 1903 case ShuffleVector: return ISD::VECTOR_SHUFFLE; 1904 case ExtractValue: return ISD::MERGE_VALUES; 1905 case InsertValue: return ISD::MERGE_VALUES; 1906 case LandingPad: return 0; 1907 case Freeze: return ISD::FREEZE; 1908 } 1909 1910 llvm_unreachable("Unknown instruction type encountered!"); 1911 } 1912 1913 Value * 1914 TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, 1915 bool UseTLS) const { 1916 // compiler-rt provides a variable with a magic name. Targets that do not 1917 // link with compiler-rt may also provide such a variable. 1918 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 1919 const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr"; 1920 auto UnsafeStackPtr = 1921 dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar)); 1922 1923 Type *StackPtrTy = PointerType::getUnqual(M->getContext()); 1924 1925 if (!UnsafeStackPtr) { 1926 auto TLSModel = UseTLS ? 1927 GlobalValue::InitialExecTLSModel : 1928 GlobalValue::NotThreadLocal; 1929 // The global variable is not defined yet, define it ourselves. 1930 // We use the initial-exec TLS model because we do not support the 1931 // variable living anywhere other than in the main executable. 1932 UnsafeStackPtr = new GlobalVariable( 1933 *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr, 1934 UnsafeStackPtrVar, nullptr, TLSModel); 1935 } else { 1936 // The variable exists, check its type and attributes. 1937 if (UnsafeStackPtr->getValueType() != StackPtrTy) 1938 report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type"); 1939 if (UseTLS != UnsafeStackPtr->isThreadLocal()) 1940 report_fatal_error(Twine(UnsafeStackPtrVar) + " must " + 1941 (UseTLS ? "" : "not ") + "be thread-local"); 1942 } 1943 return UnsafeStackPtr; 1944 } 1945 1946 Value * 1947 TargetLoweringBase::getSafeStackPointerLocation(IRBuilderBase &IRB) const { 1948 if (!TM.getTargetTriple().isAndroid()) 1949 return getDefaultSafeStackPointerLocation(IRB, true); 1950 1951 // Android provides a libc function to retrieve the address of the current 1952 // thread's unsafe stack pointer. 1953 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 1954 auto *PtrTy = PointerType::getUnqual(M->getContext()); 1955 FunctionCallee Fn = 1956 M->getOrInsertFunction("__safestack_pointer_address", PtrTy); 1957 return IRB.CreateCall(Fn); 1958 } 1959 1960 //===----------------------------------------------------------------------===// 1961 // Loop Strength Reduction hooks 1962 //===----------------------------------------------------------------------===// 1963 1964 /// isLegalAddressingMode - Return true if the addressing mode represented 1965 /// by AM is legal for this target, for a load/store of the specified type. 1966 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL, 1967 const AddrMode &AM, Type *Ty, 1968 unsigned AS, Instruction *I) const { 1969 // The default implementation of this implements a conservative RISCy, r+r and 1970 // r+i addr mode. 1971 1972 // Allows a sign-extended 16-bit immediate field. 1973 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 1974 return false; 1975 1976 // No global is ever allowed as a base. 1977 if (AM.BaseGV) 1978 return false; 1979 1980 // Only support r+r, 1981 switch (AM.Scale) { 1982 case 0: // "r+i" or just "i", depending on HasBaseReg. 1983 break; 1984 case 1: 1985 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 1986 return false; 1987 // Otherwise we have r+r or r+i. 1988 break; 1989 case 2: 1990 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 1991 return false; 1992 // Allow 2*r as r+r. 1993 break; 1994 default: // Don't allow n * r 1995 return false; 1996 } 1997 1998 return true; 1999 } 2000 2001 //===----------------------------------------------------------------------===// 2002 // Stack Protector 2003 //===----------------------------------------------------------------------===// 2004 2005 // For OpenBSD return its special guard variable. Otherwise return nullptr, 2006 // so that SelectionDAG handle SSP. 2007 Value *TargetLoweringBase::getIRStackGuard(IRBuilderBase &IRB) const { 2008 if (getTargetMachine().getTargetTriple().isOSOpenBSD()) { 2009 Module &M = *IRB.GetInsertBlock()->getParent()->getParent(); 2010 PointerType *PtrTy = PointerType::getUnqual(M.getContext()); 2011 Constant *C = M.getOrInsertGlobal("__guard_local", PtrTy); 2012 if (GlobalVariable *G = dyn_cast_or_null<GlobalVariable>(C)) 2013 G->setVisibility(GlobalValue::HiddenVisibility); 2014 return C; 2015 } 2016 return nullptr; 2017 } 2018 2019 // Currently only support "standard" __stack_chk_guard. 2020 // TODO: add LOAD_STACK_GUARD support. 2021 void TargetLoweringBase::insertSSPDeclarations(Module &M) const { 2022 if (!M.getNamedValue("__stack_chk_guard")) { 2023 auto *GV = new GlobalVariable(M, PointerType::getUnqual(M.getContext()), 2024 false, GlobalVariable::ExternalLinkage, 2025 nullptr, "__stack_chk_guard"); 2026 2027 // FreeBSD has "__stack_chk_guard" defined externally on libc.so 2028 if (M.getDirectAccessExternalData() && 2029 !TM.getTargetTriple().isWindowsGNUEnvironment() && 2030 !(TM.getTargetTriple().isPPC64() && TM.getTargetTriple().isOSFreeBSD()) && 2031 (!TM.getTargetTriple().isOSDarwin() || 2032 TM.getRelocationModel() == Reloc::Static)) 2033 GV->setDSOLocal(true); 2034 } 2035 } 2036 2037 // Currently only support "standard" __stack_chk_guard. 2038 // TODO: add LOAD_STACK_GUARD support. 2039 Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const { 2040 return M.getNamedValue("__stack_chk_guard"); 2041 } 2042 2043 Function *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const { 2044 return nullptr; 2045 } 2046 2047 unsigned TargetLoweringBase::getMinimumJumpTableEntries() const { 2048 return MinimumJumpTableEntries; 2049 } 2050 2051 void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) { 2052 MinimumJumpTableEntries = Val; 2053 } 2054 2055 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const { 2056 return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity; 2057 } 2058 2059 unsigned TargetLoweringBase::getMaximumJumpTableSize() const { 2060 return MaximumJumpTableSize; 2061 } 2062 2063 void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) { 2064 MaximumJumpTableSize = Val; 2065 } 2066 2067 bool TargetLoweringBase::isJumpTableRelative() const { 2068 return getTargetMachine().isPositionIndependent(); 2069 } 2070 2071 Align TargetLoweringBase::getPrefLoopAlignment(MachineLoop *ML) const { 2072 if (TM.Options.LoopAlignment) 2073 return Align(TM.Options.LoopAlignment); 2074 return PrefLoopAlignment; 2075 } 2076 2077 unsigned TargetLoweringBase::getMaxPermittedBytesForAlignment( 2078 MachineBasicBlock *MBB) const { 2079 return MaxBytesForAlignment; 2080 } 2081 2082 //===----------------------------------------------------------------------===// 2083 // Reciprocal Estimates 2084 //===----------------------------------------------------------------------===// 2085 2086 /// Get the reciprocal estimate attribute string for a function that will 2087 /// override the target defaults. 2088 static StringRef getRecipEstimateForFunc(MachineFunction &MF) { 2089 const Function &F = MF.getFunction(); 2090 return F.getFnAttribute("reciprocal-estimates").getValueAsString(); 2091 } 2092 2093 /// Construct a string for the given reciprocal operation of the given type. 2094 /// This string should match the corresponding option to the front-end's 2095 /// "-mrecip" flag assuming those strings have been passed through in an 2096 /// attribute string. For example, "vec-divf" for a division of a vXf32. 2097 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) { 2098 std::string Name = VT.isVector() ? "vec-" : ""; 2099 2100 Name += IsSqrt ? "sqrt" : "div"; 2101 2102 // TODO: Handle other float types? 2103 if (VT.getScalarType() == MVT::f64) { 2104 Name += "d"; 2105 } else if (VT.getScalarType() == MVT::f16) { 2106 Name += "h"; 2107 } else { 2108 assert(VT.getScalarType() == MVT::f32 && 2109 "Unexpected FP type for reciprocal estimate"); 2110 Name += "f"; 2111 } 2112 2113 return Name; 2114 } 2115 2116 /// Return the character position and value (a single numeric character) of a 2117 /// customized refinement operation in the input string if it exists. Return 2118 /// false if there is no customized refinement step count. 2119 static bool parseRefinementStep(StringRef In, size_t &Position, 2120 uint8_t &Value) { 2121 const char RefStepToken = ':'; 2122 Position = In.find(RefStepToken); 2123 if (Position == StringRef::npos) 2124 return false; 2125 2126 StringRef RefStepString = In.substr(Position + 1); 2127 // Allow exactly one numeric character for the additional refinement 2128 // step parameter. 2129 if (RefStepString.size() == 1) { 2130 char RefStepChar = RefStepString[0]; 2131 if (isDigit(RefStepChar)) { 2132 Value = RefStepChar - '0'; 2133 return true; 2134 } 2135 } 2136 report_fatal_error("Invalid refinement step for -recip."); 2137 } 2138 2139 /// For the input attribute string, return one of the ReciprocalEstimate enum 2140 /// status values (enabled, disabled, or not specified) for this operation on 2141 /// the specified data type. 2142 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) { 2143 if (Override.empty()) 2144 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2145 2146 SmallVector<StringRef, 4> OverrideVector; 2147 Override.split(OverrideVector, ','); 2148 unsigned NumArgs = OverrideVector.size(); 2149 2150 // Check if "all", "none", or "default" was specified. 2151 if (NumArgs == 1) { 2152 // Look for an optional setting of the number of refinement steps needed 2153 // for this type of reciprocal operation. 2154 size_t RefPos; 2155 uint8_t RefSteps; 2156 if (parseRefinementStep(Override, RefPos, RefSteps)) { 2157 // Split the string for further processing. 2158 Override = Override.substr(0, RefPos); 2159 } 2160 2161 // All reciprocal types are enabled. 2162 if (Override == "all") 2163 return TargetLoweringBase::ReciprocalEstimate::Enabled; 2164 2165 // All reciprocal types are disabled. 2166 if (Override == "none") 2167 return TargetLoweringBase::ReciprocalEstimate::Disabled; 2168 2169 // Target defaults for enablement are used. 2170 if (Override == "default") 2171 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2172 } 2173 2174 // The attribute string may omit the size suffix ('f'/'d'). 2175 std::string VTName = getReciprocalOpName(IsSqrt, VT); 2176 std::string VTNameNoSize = VTName; 2177 VTNameNoSize.pop_back(); 2178 static const char DisabledPrefix = '!'; 2179 2180 for (StringRef RecipType : OverrideVector) { 2181 size_t RefPos; 2182 uint8_t RefSteps; 2183 if (parseRefinementStep(RecipType, RefPos, RefSteps)) 2184 RecipType = RecipType.substr(0, RefPos); 2185 2186 // Ignore the disablement token for string matching. 2187 bool IsDisabled = RecipType[0] == DisabledPrefix; 2188 if (IsDisabled) 2189 RecipType = RecipType.substr(1); 2190 2191 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize)) 2192 return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled 2193 : TargetLoweringBase::ReciprocalEstimate::Enabled; 2194 } 2195 2196 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2197 } 2198 2199 /// For the input attribute string, return the customized refinement step count 2200 /// for this operation on the specified data type. If the step count does not 2201 /// exist, return the ReciprocalEstimate enum value for unspecified. 2202 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) { 2203 if (Override.empty()) 2204 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2205 2206 SmallVector<StringRef, 4> OverrideVector; 2207 Override.split(OverrideVector, ','); 2208 unsigned NumArgs = OverrideVector.size(); 2209 2210 // Check if "all", "default", or "none" was specified. 2211 if (NumArgs == 1) { 2212 // Look for an optional setting of the number of refinement steps needed 2213 // for this type of reciprocal operation. 2214 size_t RefPos; 2215 uint8_t RefSteps; 2216 if (!parseRefinementStep(Override, RefPos, RefSteps)) 2217 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2218 2219 // Split the string for further processing. 2220 Override = Override.substr(0, RefPos); 2221 assert(Override != "none" && 2222 "Disabled reciprocals, but specifed refinement steps?"); 2223 2224 // If this is a general override, return the specified number of steps. 2225 if (Override == "all" || Override == "default") 2226 return RefSteps; 2227 } 2228 2229 // The attribute string may omit the size suffix ('f'/'d'). 2230 std::string VTName = getReciprocalOpName(IsSqrt, VT); 2231 std::string VTNameNoSize = VTName; 2232 VTNameNoSize.pop_back(); 2233 2234 for (StringRef RecipType : OverrideVector) { 2235 size_t RefPos; 2236 uint8_t RefSteps; 2237 if (!parseRefinementStep(RecipType, RefPos, RefSteps)) 2238 continue; 2239 2240 RecipType = RecipType.substr(0, RefPos); 2241 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize)) 2242 return RefSteps; 2243 } 2244 2245 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2246 } 2247 2248 int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT, 2249 MachineFunction &MF) const { 2250 return getOpEnabled(true, VT, getRecipEstimateForFunc(MF)); 2251 } 2252 2253 int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT, 2254 MachineFunction &MF) const { 2255 return getOpEnabled(false, VT, getRecipEstimateForFunc(MF)); 2256 } 2257 2258 int TargetLoweringBase::getSqrtRefinementSteps(EVT VT, 2259 MachineFunction &MF) const { 2260 return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF)); 2261 } 2262 2263 int TargetLoweringBase::getDivRefinementSteps(EVT VT, 2264 MachineFunction &MF) const { 2265 return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF)); 2266 } 2267 2268 bool TargetLoweringBase::isLoadBitCastBeneficial( 2269 EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG, 2270 const MachineMemOperand &MMO) const { 2271 // Single-element vectors are scalarized, so we should generally avoid having 2272 // any memory operations on such types, as they would get scalarized too. 2273 if (LoadVT.isFixedLengthVector() && BitcastVT.isFixedLengthVector() && 2274 BitcastVT.getVectorNumElements() == 1) 2275 return false; 2276 2277 // Don't do if we could do an indexed load on the original type, but not on 2278 // the new one. 2279 if (!LoadVT.isSimple() || !BitcastVT.isSimple()) 2280 return true; 2281 2282 MVT LoadMVT = LoadVT.getSimpleVT(); 2283 2284 // Don't bother doing this if it's just going to be promoted again later, as 2285 // doing so might interfere with other combines. 2286 if (getOperationAction(ISD::LOAD, LoadMVT) == Promote && 2287 getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT()) 2288 return false; 2289 2290 unsigned Fast = 0; 2291 return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), BitcastVT, 2292 MMO, &Fast) && 2293 Fast; 2294 } 2295 2296 void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const { 2297 MF.getRegInfo().freezeReservedRegs(MF); 2298 } 2299 2300 MachineMemOperand::Flags TargetLoweringBase::getLoadMemOperandFlags( 2301 const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC, 2302 const TargetLibraryInfo *LibInfo) const { 2303 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad; 2304 if (LI.isVolatile()) 2305 Flags |= MachineMemOperand::MOVolatile; 2306 2307 if (LI.hasMetadata(LLVMContext::MD_nontemporal)) 2308 Flags |= MachineMemOperand::MONonTemporal; 2309 2310 if (LI.hasMetadata(LLVMContext::MD_invariant_load)) 2311 Flags |= MachineMemOperand::MOInvariant; 2312 2313 if (isDereferenceableAndAlignedPointer(LI.getPointerOperand(), LI.getType(), 2314 LI.getAlign(), DL, &LI, AC, 2315 /*DT=*/nullptr, LibInfo)) 2316 Flags |= MachineMemOperand::MODereferenceable; 2317 2318 Flags |= getTargetMMOFlags(LI); 2319 return Flags; 2320 } 2321 2322 MachineMemOperand::Flags 2323 TargetLoweringBase::getStoreMemOperandFlags(const StoreInst &SI, 2324 const DataLayout &DL) const { 2325 MachineMemOperand::Flags Flags = MachineMemOperand::MOStore; 2326 2327 if (SI.isVolatile()) 2328 Flags |= MachineMemOperand::MOVolatile; 2329 2330 if (SI.hasMetadata(LLVMContext::MD_nontemporal)) 2331 Flags |= MachineMemOperand::MONonTemporal; 2332 2333 // FIXME: Not preserving dereferenceable 2334 Flags |= getTargetMMOFlags(SI); 2335 return Flags; 2336 } 2337 2338 MachineMemOperand::Flags 2339 TargetLoweringBase::getAtomicMemOperandFlags(const Instruction &AI, 2340 const DataLayout &DL) const { 2341 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 2342 2343 if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&AI)) { 2344 if (RMW->isVolatile()) 2345 Flags |= MachineMemOperand::MOVolatile; 2346 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(&AI)) { 2347 if (CmpX->isVolatile()) 2348 Flags |= MachineMemOperand::MOVolatile; 2349 } else 2350 llvm_unreachable("not an atomic instruction"); 2351 2352 // FIXME: Not preserving dereferenceable 2353 Flags |= getTargetMMOFlags(AI); 2354 return Flags; 2355 } 2356 2357 Instruction *TargetLoweringBase::emitLeadingFence(IRBuilderBase &Builder, 2358 Instruction *Inst, 2359 AtomicOrdering Ord) const { 2360 if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore()) 2361 return Builder.CreateFence(Ord); 2362 else 2363 return nullptr; 2364 } 2365 2366 Instruction *TargetLoweringBase::emitTrailingFence(IRBuilderBase &Builder, 2367 Instruction *Inst, 2368 AtomicOrdering Ord) const { 2369 if (isAcquireOrStronger(Ord)) 2370 return Builder.CreateFence(Ord); 2371 else 2372 return nullptr; 2373 } 2374 2375 //===----------------------------------------------------------------------===// 2376 // GlobalISel Hooks 2377 //===----------------------------------------------------------------------===// 2378 2379 bool TargetLoweringBase::shouldLocalize(const MachineInstr &MI, 2380 const TargetTransformInfo *TTI) const { 2381 auto &MF = *MI.getMF(); 2382 auto &MRI = MF.getRegInfo(); 2383 // Assuming a spill and reload of a value has a cost of 1 instruction each, 2384 // this helper function computes the maximum number of uses we should consider 2385 // for remat. E.g. on arm64 global addresses take 2 insts to materialize. We 2386 // break even in terms of code size when the original MI has 2 users vs 2387 // choosing to potentially spill. Any more than 2 users we we have a net code 2388 // size increase. This doesn't take into account register pressure though. 2389 auto maxUses = [](unsigned RematCost) { 2390 // A cost of 1 means remats are basically free. 2391 if (RematCost == 1) 2392 return std::numeric_limits<unsigned>::max(); 2393 if (RematCost == 2) 2394 return 2U; 2395 2396 // Remat is too expensive, only sink if there's one user. 2397 if (RematCost > 2) 2398 return 1U; 2399 llvm_unreachable("Unexpected remat cost"); 2400 }; 2401 2402 switch (MI.getOpcode()) { 2403 default: 2404 return false; 2405 // Constants-like instructions should be close to their users. 2406 // We don't want long live-ranges for them. 2407 case TargetOpcode::G_CONSTANT: 2408 case TargetOpcode::G_FCONSTANT: 2409 case TargetOpcode::G_FRAME_INDEX: 2410 case TargetOpcode::G_INTTOPTR: 2411 return true; 2412 case TargetOpcode::G_GLOBAL_VALUE: { 2413 unsigned RematCost = TTI->getGISelRematGlobalCost(); 2414 Register Reg = MI.getOperand(0).getReg(); 2415 unsigned MaxUses = maxUses(RematCost); 2416 if (MaxUses == UINT_MAX) 2417 return true; // Remats are "free" so always localize. 2418 return MRI.hasAtMostUserInstrs(Reg, MaxUses); 2419 } 2420 } 2421 } 2422