1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the TargetLoweringBase class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/ADT/BitVector.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/SmallVector.h" 16 #include "llvm/ADT/StringExtras.h" 17 #include "llvm/ADT/StringRef.h" 18 #include "llvm/ADT/Triple.h" 19 #include "llvm/ADT/Twine.h" 20 #include "llvm/Analysis/Loads.h" 21 #include "llvm/Analysis/TargetTransformInfo.h" 22 #include "llvm/CodeGen/Analysis.h" 23 #include "llvm/CodeGen/ISDOpcodes.h" 24 #include "llvm/CodeGen/MachineBasicBlock.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineInstr.h" 28 #include "llvm/CodeGen/MachineInstrBuilder.h" 29 #include "llvm/CodeGen/MachineMemOperand.h" 30 #include "llvm/CodeGen/MachineOperand.h" 31 #include "llvm/CodeGen/MachineRegisterInfo.h" 32 #include "llvm/CodeGen/RuntimeLibcalls.h" 33 #include "llvm/CodeGen/StackMaps.h" 34 #include "llvm/CodeGen/TargetLowering.h" 35 #include "llvm/CodeGen/TargetOpcodes.h" 36 #include "llvm/CodeGen/TargetRegisterInfo.h" 37 #include "llvm/CodeGen/ValueTypes.h" 38 #include "llvm/IR/Attributes.h" 39 #include "llvm/IR/CallingConv.h" 40 #include "llvm/IR/DataLayout.h" 41 #include "llvm/IR/DerivedTypes.h" 42 #include "llvm/IR/Function.h" 43 #include "llvm/IR/GlobalValue.h" 44 #include "llvm/IR/GlobalVariable.h" 45 #include "llvm/IR/IRBuilder.h" 46 #include "llvm/IR/Module.h" 47 #include "llvm/IR/Type.h" 48 #include "llvm/Support/Casting.h" 49 #include "llvm/Support/CommandLine.h" 50 #include "llvm/Support/Compiler.h" 51 #include "llvm/Support/ErrorHandling.h" 52 #include "llvm/Support/MachineValueType.h" 53 #include "llvm/Support/MathExtras.h" 54 #include "llvm/Target/TargetMachine.h" 55 #include "llvm/Target/TargetOptions.h" 56 #include "llvm/Transforms/Utils/SizeOpts.h" 57 #include <algorithm> 58 #include <cassert> 59 #include <cstdint> 60 #include <cstring> 61 #include <iterator> 62 #include <string> 63 #include <tuple> 64 #include <utility> 65 66 using namespace llvm; 67 68 static cl::opt<bool> JumpIsExpensiveOverride( 69 "jump-is-expensive", cl::init(false), 70 cl::desc("Do not create extra branches to split comparison logic."), 71 cl::Hidden); 72 73 static cl::opt<unsigned> MinimumJumpTableEntries 74 ("min-jump-table-entries", cl::init(4), cl::Hidden, 75 cl::desc("Set minimum number of entries to use a jump table.")); 76 77 static cl::opt<unsigned> MaximumJumpTableSize 78 ("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden, 79 cl::desc("Set maximum size of jump tables.")); 80 81 /// Minimum jump table density for normal functions. 82 static cl::opt<unsigned> 83 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden, 84 cl::desc("Minimum density for building a jump table in " 85 "a normal function")); 86 87 /// Minimum jump table density for -Os or -Oz functions. 88 static cl::opt<unsigned> OptsizeJumpTableDensity( 89 "optsize-jump-table-density", cl::init(40), cl::Hidden, 90 cl::desc("Minimum density for building a jump table in " 91 "an optsize function")); 92 93 // FIXME: This option is only to test if the strict fp operation processed 94 // correctly by preventing mutating strict fp operation to normal fp operation 95 // during development. When the backend supports strict float operation, this 96 // option will be meaningless. 97 static cl::opt<bool> DisableStrictNodeMutation("disable-strictnode-mutation", 98 cl::desc("Don't mutate strict-float node to a legalize node"), 99 cl::init(false), cl::Hidden); 100 101 static bool darwinHasSinCos(const Triple &TT) { 102 assert(TT.isOSDarwin() && "should be called with darwin triple"); 103 // Don't bother with 32 bit x86. 104 if (TT.getArch() == Triple::x86) 105 return false; 106 // Macos < 10.9 has no sincos_stret. 107 if (TT.isMacOSX()) 108 return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit(); 109 // iOS < 7.0 has no sincos_stret. 110 if (TT.isiOS()) 111 return !TT.isOSVersionLT(7, 0); 112 // Any other darwin such as WatchOS/TvOS is new enough. 113 return true; 114 } 115 116 void TargetLoweringBase::InitLibcalls(const Triple &TT) { 117 #define HANDLE_LIBCALL(code, name) \ 118 setLibcallName(RTLIB::code, name); 119 #include "llvm/IR/RuntimeLibcalls.def" 120 #undef HANDLE_LIBCALL 121 // Initialize calling conventions to their default. 122 for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC) 123 setLibcallCallingConv((RTLIB::Libcall)LC, CallingConv::C); 124 125 // For IEEE quad-precision libcall names, PPC uses "kf" instead of "tf". 126 if (TT.isPPC()) { 127 setLibcallName(RTLIB::ADD_F128, "__addkf3"); 128 setLibcallName(RTLIB::SUB_F128, "__subkf3"); 129 setLibcallName(RTLIB::MUL_F128, "__mulkf3"); 130 setLibcallName(RTLIB::DIV_F128, "__divkf3"); 131 setLibcallName(RTLIB::POWI_F128, "__powikf2"); 132 setLibcallName(RTLIB::FPEXT_F32_F128, "__extendsfkf2"); 133 setLibcallName(RTLIB::FPEXT_F64_F128, "__extenddfkf2"); 134 setLibcallName(RTLIB::FPROUND_F128_F32, "__trunckfsf2"); 135 setLibcallName(RTLIB::FPROUND_F128_F64, "__trunckfdf2"); 136 setLibcallName(RTLIB::FPTOSINT_F128_I32, "__fixkfsi"); 137 setLibcallName(RTLIB::FPTOSINT_F128_I64, "__fixkfdi"); 138 setLibcallName(RTLIB::FPTOSINT_F128_I128, "__fixkfti"); 139 setLibcallName(RTLIB::FPTOUINT_F128_I32, "__fixunskfsi"); 140 setLibcallName(RTLIB::FPTOUINT_F128_I64, "__fixunskfdi"); 141 setLibcallName(RTLIB::FPTOUINT_F128_I128, "__fixunskfti"); 142 setLibcallName(RTLIB::SINTTOFP_I32_F128, "__floatsikf"); 143 setLibcallName(RTLIB::SINTTOFP_I64_F128, "__floatdikf"); 144 setLibcallName(RTLIB::SINTTOFP_I128_F128, "__floattikf"); 145 setLibcallName(RTLIB::UINTTOFP_I32_F128, "__floatunsikf"); 146 setLibcallName(RTLIB::UINTTOFP_I64_F128, "__floatundikf"); 147 setLibcallName(RTLIB::UINTTOFP_I128_F128, "__floatuntikf"); 148 setLibcallName(RTLIB::OEQ_F128, "__eqkf2"); 149 setLibcallName(RTLIB::UNE_F128, "__nekf2"); 150 setLibcallName(RTLIB::OGE_F128, "__gekf2"); 151 setLibcallName(RTLIB::OLT_F128, "__ltkf2"); 152 setLibcallName(RTLIB::OLE_F128, "__lekf2"); 153 setLibcallName(RTLIB::OGT_F128, "__gtkf2"); 154 setLibcallName(RTLIB::UO_F128, "__unordkf2"); 155 } 156 157 // A few names are different on particular architectures or environments. 158 if (TT.isOSDarwin()) { 159 // For f16/f32 conversions, Darwin uses the standard naming scheme, instead 160 // of the gnueabi-style __gnu_*_ieee. 161 // FIXME: What about other targets? 162 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2"); 163 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2"); 164 165 // Some darwins have an optimized __bzero/bzero function. 166 switch (TT.getArch()) { 167 case Triple::x86: 168 case Triple::x86_64: 169 if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6)) 170 setLibcallName(RTLIB::BZERO, "__bzero"); 171 break; 172 case Triple::aarch64: 173 case Triple::aarch64_32: 174 setLibcallName(RTLIB::BZERO, "bzero"); 175 break; 176 default: 177 break; 178 } 179 180 if (darwinHasSinCos(TT)) { 181 setLibcallName(RTLIB::SINCOS_STRET_F32, "__sincosf_stret"); 182 setLibcallName(RTLIB::SINCOS_STRET_F64, "__sincos_stret"); 183 if (TT.isWatchABI()) { 184 setLibcallCallingConv(RTLIB::SINCOS_STRET_F32, 185 CallingConv::ARM_AAPCS_VFP); 186 setLibcallCallingConv(RTLIB::SINCOS_STRET_F64, 187 CallingConv::ARM_AAPCS_VFP); 188 } 189 } 190 } else { 191 setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee"); 192 setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee"); 193 } 194 195 if (TT.isGNUEnvironment() || TT.isOSFuchsia() || 196 (TT.isAndroid() && !TT.isAndroidVersionLT(9))) { 197 setLibcallName(RTLIB::SINCOS_F32, "sincosf"); 198 setLibcallName(RTLIB::SINCOS_F64, "sincos"); 199 setLibcallName(RTLIB::SINCOS_F80, "sincosl"); 200 setLibcallName(RTLIB::SINCOS_F128, "sincosl"); 201 setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl"); 202 } 203 204 if (TT.isPS()) { 205 setLibcallName(RTLIB::SINCOS_F32, "sincosf"); 206 setLibcallName(RTLIB::SINCOS_F64, "sincos"); 207 } 208 209 if (TT.isOSOpenBSD()) { 210 setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr); 211 } 212 } 213 214 /// GetFPLibCall - Helper to return the right libcall for the given floating 215 /// point type, or UNKNOWN_LIBCALL if there is none. 216 RTLIB::Libcall RTLIB::getFPLibCall(EVT VT, 217 RTLIB::Libcall Call_F32, 218 RTLIB::Libcall Call_F64, 219 RTLIB::Libcall Call_F80, 220 RTLIB::Libcall Call_F128, 221 RTLIB::Libcall Call_PPCF128) { 222 return 223 VT == MVT::f32 ? Call_F32 : 224 VT == MVT::f64 ? Call_F64 : 225 VT == MVT::f80 ? Call_F80 : 226 VT == MVT::f128 ? Call_F128 : 227 VT == MVT::ppcf128 ? Call_PPCF128 : 228 RTLIB::UNKNOWN_LIBCALL; 229 } 230 231 /// getFPEXT - Return the FPEXT_*_* value for the given types, or 232 /// UNKNOWN_LIBCALL if there is none. 233 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) { 234 if (OpVT == MVT::f16) { 235 if (RetVT == MVT::f32) 236 return FPEXT_F16_F32; 237 if (RetVT == MVT::f64) 238 return FPEXT_F16_F64; 239 if (RetVT == MVT::f80) 240 return FPEXT_F16_F80; 241 if (RetVT == MVT::f128) 242 return FPEXT_F16_F128; 243 } else if (OpVT == MVT::f32) { 244 if (RetVT == MVT::f64) 245 return FPEXT_F32_F64; 246 if (RetVT == MVT::f128) 247 return FPEXT_F32_F128; 248 if (RetVT == MVT::ppcf128) 249 return FPEXT_F32_PPCF128; 250 } else if (OpVT == MVT::f64) { 251 if (RetVT == MVT::f128) 252 return FPEXT_F64_F128; 253 else if (RetVT == MVT::ppcf128) 254 return FPEXT_F64_PPCF128; 255 } else if (OpVT == MVT::f80) { 256 if (RetVT == MVT::f128) 257 return FPEXT_F80_F128; 258 } 259 260 return UNKNOWN_LIBCALL; 261 } 262 263 /// getFPROUND - Return the FPROUND_*_* value for the given types, or 264 /// UNKNOWN_LIBCALL if there is none. 265 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) { 266 if (RetVT == MVT::f16) { 267 if (OpVT == MVT::f32) 268 return FPROUND_F32_F16; 269 if (OpVT == MVT::f64) 270 return FPROUND_F64_F16; 271 if (OpVT == MVT::f80) 272 return FPROUND_F80_F16; 273 if (OpVT == MVT::f128) 274 return FPROUND_F128_F16; 275 if (OpVT == MVT::ppcf128) 276 return FPROUND_PPCF128_F16; 277 } else if (RetVT == MVT::bf16) { 278 if (OpVT == MVT::f32) 279 return FPROUND_F32_BF16; 280 if (OpVT == MVT::f64) 281 return FPROUND_F64_BF16; 282 } else if (RetVT == MVT::f32) { 283 if (OpVT == MVT::f64) 284 return FPROUND_F64_F32; 285 if (OpVT == MVT::f80) 286 return FPROUND_F80_F32; 287 if (OpVT == MVT::f128) 288 return FPROUND_F128_F32; 289 if (OpVT == MVT::ppcf128) 290 return FPROUND_PPCF128_F32; 291 } else if (RetVT == MVT::f64) { 292 if (OpVT == MVT::f80) 293 return FPROUND_F80_F64; 294 if (OpVT == MVT::f128) 295 return FPROUND_F128_F64; 296 if (OpVT == MVT::ppcf128) 297 return FPROUND_PPCF128_F64; 298 } else if (RetVT == MVT::f80) { 299 if (OpVT == MVT::f128) 300 return FPROUND_F128_F80; 301 } 302 303 return UNKNOWN_LIBCALL; 304 } 305 306 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or 307 /// UNKNOWN_LIBCALL if there is none. 308 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) { 309 if (OpVT == MVT::f16) { 310 if (RetVT == MVT::i32) 311 return FPTOSINT_F16_I32; 312 if (RetVT == MVT::i64) 313 return FPTOSINT_F16_I64; 314 if (RetVT == MVT::i128) 315 return FPTOSINT_F16_I128; 316 } else if (OpVT == MVT::f32) { 317 if (RetVT == MVT::i32) 318 return FPTOSINT_F32_I32; 319 if (RetVT == MVT::i64) 320 return FPTOSINT_F32_I64; 321 if (RetVT == MVT::i128) 322 return FPTOSINT_F32_I128; 323 } else if (OpVT == MVT::f64) { 324 if (RetVT == MVT::i32) 325 return FPTOSINT_F64_I32; 326 if (RetVT == MVT::i64) 327 return FPTOSINT_F64_I64; 328 if (RetVT == MVT::i128) 329 return FPTOSINT_F64_I128; 330 } else if (OpVT == MVT::f80) { 331 if (RetVT == MVT::i32) 332 return FPTOSINT_F80_I32; 333 if (RetVT == MVT::i64) 334 return FPTOSINT_F80_I64; 335 if (RetVT == MVT::i128) 336 return FPTOSINT_F80_I128; 337 } else if (OpVT == MVT::f128) { 338 if (RetVT == MVT::i32) 339 return FPTOSINT_F128_I32; 340 if (RetVT == MVT::i64) 341 return FPTOSINT_F128_I64; 342 if (RetVT == MVT::i128) 343 return FPTOSINT_F128_I128; 344 } else if (OpVT == MVT::ppcf128) { 345 if (RetVT == MVT::i32) 346 return FPTOSINT_PPCF128_I32; 347 if (RetVT == MVT::i64) 348 return FPTOSINT_PPCF128_I64; 349 if (RetVT == MVT::i128) 350 return FPTOSINT_PPCF128_I128; 351 } 352 return UNKNOWN_LIBCALL; 353 } 354 355 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or 356 /// UNKNOWN_LIBCALL if there is none. 357 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) { 358 if (OpVT == MVT::f16) { 359 if (RetVT == MVT::i32) 360 return FPTOUINT_F16_I32; 361 if (RetVT == MVT::i64) 362 return FPTOUINT_F16_I64; 363 if (RetVT == MVT::i128) 364 return FPTOUINT_F16_I128; 365 } else if (OpVT == MVT::f32) { 366 if (RetVT == MVT::i32) 367 return FPTOUINT_F32_I32; 368 if (RetVT == MVT::i64) 369 return FPTOUINT_F32_I64; 370 if (RetVT == MVT::i128) 371 return FPTOUINT_F32_I128; 372 } else if (OpVT == MVT::f64) { 373 if (RetVT == MVT::i32) 374 return FPTOUINT_F64_I32; 375 if (RetVT == MVT::i64) 376 return FPTOUINT_F64_I64; 377 if (RetVT == MVT::i128) 378 return FPTOUINT_F64_I128; 379 } else if (OpVT == MVT::f80) { 380 if (RetVT == MVT::i32) 381 return FPTOUINT_F80_I32; 382 if (RetVT == MVT::i64) 383 return FPTOUINT_F80_I64; 384 if (RetVT == MVT::i128) 385 return FPTOUINT_F80_I128; 386 } else if (OpVT == MVT::f128) { 387 if (RetVT == MVT::i32) 388 return FPTOUINT_F128_I32; 389 if (RetVT == MVT::i64) 390 return FPTOUINT_F128_I64; 391 if (RetVT == MVT::i128) 392 return FPTOUINT_F128_I128; 393 } else if (OpVT == MVT::ppcf128) { 394 if (RetVT == MVT::i32) 395 return FPTOUINT_PPCF128_I32; 396 if (RetVT == MVT::i64) 397 return FPTOUINT_PPCF128_I64; 398 if (RetVT == MVT::i128) 399 return FPTOUINT_PPCF128_I128; 400 } 401 return UNKNOWN_LIBCALL; 402 } 403 404 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or 405 /// UNKNOWN_LIBCALL if there is none. 406 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) { 407 if (OpVT == MVT::i32) { 408 if (RetVT == MVT::f16) 409 return SINTTOFP_I32_F16; 410 if (RetVT == MVT::f32) 411 return SINTTOFP_I32_F32; 412 if (RetVT == MVT::f64) 413 return SINTTOFP_I32_F64; 414 if (RetVT == MVT::f80) 415 return SINTTOFP_I32_F80; 416 if (RetVT == MVT::f128) 417 return SINTTOFP_I32_F128; 418 if (RetVT == MVT::ppcf128) 419 return SINTTOFP_I32_PPCF128; 420 } else if (OpVT == MVT::i64) { 421 if (RetVT == MVT::f16) 422 return SINTTOFP_I64_F16; 423 if (RetVT == MVT::f32) 424 return SINTTOFP_I64_F32; 425 if (RetVT == MVT::f64) 426 return SINTTOFP_I64_F64; 427 if (RetVT == MVT::f80) 428 return SINTTOFP_I64_F80; 429 if (RetVT == MVT::f128) 430 return SINTTOFP_I64_F128; 431 if (RetVT == MVT::ppcf128) 432 return SINTTOFP_I64_PPCF128; 433 } else if (OpVT == MVT::i128) { 434 if (RetVT == MVT::f16) 435 return SINTTOFP_I128_F16; 436 if (RetVT == MVT::f32) 437 return SINTTOFP_I128_F32; 438 if (RetVT == MVT::f64) 439 return SINTTOFP_I128_F64; 440 if (RetVT == MVT::f80) 441 return SINTTOFP_I128_F80; 442 if (RetVT == MVT::f128) 443 return SINTTOFP_I128_F128; 444 if (RetVT == MVT::ppcf128) 445 return SINTTOFP_I128_PPCF128; 446 } 447 return UNKNOWN_LIBCALL; 448 } 449 450 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or 451 /// UNKNOWN_LIBCALL if there is none. 452 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) { 453 if (OpVT == MVT::i32) { 454 if (RetVT == MVT::f16) 455 return UINTTOFP_I32_F16; 456 if (RetVT == MVT::f32) 457 return UINTTOFP_I32_F32; 458 if (RetVT == MVT::f64) 459 return UINTTOFP_I32_F64; 460 if (RetVT == MVT::f80) 461 return UINTTOFP_I32_F80; 462 if (RetVT == MVT::f128) 463 return UINTTOFP_I32_F128; 464 if (RetVT == MVT::ppcf128) 465 return UINTTOFP_I32_PPCF128; 466 } else if (OpVT == MVT::i64) { 467 if (RetVT == MVT::f16) 468 return UINTTOFP_I64_F16; 469 if (RetVT == MVT::f32) 470 return UINTTOFP_I64_F32; 471 if (RetVT == MVT::f64) 472 return UINTTOFP_I64_F64; 473 if (RetVT == MVT::f80) 474 return UINTTOFP_I64_F80; 475 if (RetVT == MVT::f128) 476 return UINTTOFP_I64_F128; 477 if (RetVT == MVT::ppcf128) 478 return UINTTOFP_I64_PPCF128; 479 } else if (OpVT == MVT::i128) { 480 if (RetVT == MVT::f16) 481 return UINTTOFP_I128_F16; 482 if (RetVT == MVT::f32) 483 return UINTTOFP_I128_F32; 484 if (RetVT == MVT::f64) 485 return UINTTOFP_I128_F64; 486 if (RetVT == MVT::f80) 487 return UINTTOFP_I128_F80; 488 if (RetVT == MVT::f128) 489 return UINTTOFP_I128_F128; 490 if (RetVT == MVT::ppcf128) 491 return UINTTOFP_I128_PPCF128; 492 } 493 return UNKNOWN_LIBCALL; 494 } 495 496 RTLIB::Libcall RTLIB::getPOWI(EVT RetVT) { 497 return getFPLibCall(RetVT, POWI_F32, POWI_F64, POWI_F80, POWI_F128, 498 POWI_PPCF128); 499 } 500 501 RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, 502 MVT VT) { 503 unsigned ModeN, ModelN; 504 switch (VT.SimpleTy) { 505 case MVT::i8: 506 ModeN = 0; 507 break; 508 case MVT::i16: 509 ModeN = 1; 510 break; 511 case MVT::i32: 512 ModeN = 2; 513 break; 514 case MVT::i64: 515 ModeN = 3; 516 break; 517 case MVT::i128: 518 ModeN = 4; 519 break; 520 default: 521 return UNKNOWN_LIBCALL; 522 } 523 524 switch (Order) { 525 case AtomicOrdering::Monotonic: 526 ModelN = 0; 527 break; 528 case AtomicOrdering::Acquire: 529 ModelN = 1; 530 break; 531 case AtomicOrdering::Release: 532 ModelN = 2; 533 break; 534 case AtomicOrdering::AcquireRelease: 535 case AtomicOrdering::SequentiallyConsistent: 536 ModelN = 3; 537 break; 538 default: 539 return UNKNOWN_LIBCALL; 540 } 541 542 #define LCALLS(A, B) \ 543 { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL } 544 #define LCALL5(A) \ 545 LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16) 546 switch (Opc) { 547 case ISD::ATOMIC_CMP_SWAP: { 548 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_CAS)}; 549 return LC[ModeN][ModelN]; 550 } 551 case ISD::ATOMIC_SWAP: { 552 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_SWP)}; 553 return LC[ModeN][ModelN]; 554 } 555 case ISD::ATOMIC_LOAD_ADD: { 556 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDADD)}; 557 return LC[ModeN][ModelN]; 558 } 559 case ISD::ATOMIC_LOAD_OR: { 560 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDSET)}; 561 return LC[ModeN][ModelN]; 562 } 563 case ISD::ATOMIC_LOAD_CLR: { 564 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDCLR)}; 565 return LC[ModeN][ModelN]; 566 } 567 case ISD::ATOMIC_LOAD_XOR: { 568 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDEOR)}; 569 return LC[ModeN][ModelN]; 570 } 571 default: 572 return UNKNOWN_LIBCALL; 573 } 574 #undef LCALLS 575 #undef LCALL5 576 } 577 578 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) { 579 #define OP_TO_LIBCALL(Name, Enum) \ 580 case Name: \ 581 switch (VT.SimpleTy) { \ 582 default: \ 583 return UNKNOWN_LIBCALL; \ 584 case MVT::i8: \ 585 return Enum##_1; \ 586 case MVT::i16: \ 587 return Enum##_2; \ 588 case MVT::i32: \ 589 return Enum##_4; \ 590 case MVT::i64: \ 591 return Enum##_8; \ 592 case MVT::i128: \ 593 return Enum##_16; \ 594 } 595 596 switch (Opc) { 597 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET) 598 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP) 599 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD) 600 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB) 601 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND) 602 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR) 603 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR) 604 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND) 605 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX) 606 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX) 607 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN) 608 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN) 609 } 610 611 #undef OP_TO_LIBCALL 612 613 return UNKNOWN_LIBCALL; 614 } 615 616 RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 617 switch (ElementSize) { 618 case 1: 619 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1; 620 case 2: 621 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2; 622 case 4: 623 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4; 624 case 8: 625 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8; 626 case 16: 627 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16; 628 default: 629 return UNKNOWN_LIBCALL; 630 } 631 } 632 633 RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 634 switch (ElementSize) { 635 case 1: 636 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1; 637 case 2: 638 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2; 639 case 4: 640 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4; 641 case 8: 642 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8; 643 case 16: 644 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16; 645 default: 646 return UNKNOWN_LIBCALL; 647 } 648 } 649 650 RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 651 switch (ElementSize) { 652 case 1: 653 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1; 654 case 2: 655 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2; 656 case 4: 657 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4; 658 case 8: 659 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8; 660 case 16: 661 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16; 662 default: 663 return UNKNOWN_LIBCALL; 664 } 665 } 666 667 /// InitCmpLibcallCCs - Set default comparison libcall CC. 668 static void InitCmpLibcallCCs(ISD::CondCode *CCs) { 669 std::fill(CCs, CCs + RTLIB::UNKNOWN_LIBCALL, ISD::SETCC_INVALID); 670 CCs[RTLIB::OEQ_F32] = ISD::SETEQ; 671 CCs[RTLIB::OEQ_F64] = ISD::SETEQ; 672 CCs[RTLIB::OEQ_F128] = ISD::SETEQ; 673 CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ; 674 CCs[RTLIB::UNE_F32] = ISD::SETNE; 675 CCs[RTLIB::UNE_F64] = ISD::SETNE; 676 CCs[RTLIB::UNE_F128] = ISD::SETNE; 677 CCs[RTLIB::UNE_PPCF128] = ISD::SETNE; 678 CCs[RTLIB::OGE_F32] = ISD::SETGE; 679 CCs[RTLIB::OGE_F64] = ISD::SETGE; 680 CCs[RTLIB::OGE_F128] = ISD::SETGE; 681 CCs[RTLIB::OGE_PPCF128] = ISD::SETGE; 682 CCs[RTLIB::OLT_F32] = ISD::SETLT; 683 CCs[RTLIB::OLT_F64] = ISD::SETLT; 684 CCs[RTLIB::OLT_F128] = ISD::SETLT; 685 CCs[RTLIB::OLT_PPCF128] = ISD::SETLT; 686 CCs[RTLIB::OLE_F32] = ISD::SETLE; 687 CCs[RTLIB::OLE_F64] = ISD::SETLE; 688 CCs[RTLIB::OLE_F128] = ISD::SETLE; 689 CCs[RTLIB::OLE_PPCF128] = ISD::SETLE; 690 CCs[RTLIB::OGT_F32] = ISD::SETGT; 691 CCs[RTLIB::OGT_F64] = ISD::SETGT; 692 CCs[RTLIB::OGT_F128] = ISD::SETGT; 693 CCs[RTLIB::OGT_PPCF128] = ISD::SETGT; 694 CCs[RTLIB::UO_F32] = ISD::SETNE; 695 CCs[RTLIB::UO_F64] = ISD::SETNE; 696 CCs[RTLIB::UO_F128] = ISD::SETNE; 697 CCs[RTLIB::UO_PPCF128] = ISD::SETNE; 698 } 699 700 /// NOTE: The TargetMachine owns TLOF. 701 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) { 702 initActions(); 703 704 // Perform these initializations only once. 705 MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 706 MaxLoadsPerMemcmp = 8; 707 MaxGluedStoresPerMemcpy = 0; 708 MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize = 709 MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4; 710 HasMultipleConditionRegisters = false; 711 HasExtractBitsInsn = false; 712 JumpIsExpensive = JumpIsExpensiveOverride; 713 PredictableSelectIsExpensive = false; 714 EnableExtLdPromotion = false; 715 StackPointerRegisterToSaveRestore = 0; 716 BooleanContents = UndefinedBooleanContent; 717 BooleanFloatContents = UndefinedBooleanContent; 718 BooleanVectorContents = UndefinedBooleanContent; 719 SchedPreferenceInfo = Sched::ILP; 720 GatherAllAliasesMaxDepth = 18; 721 IsStrictFPEnabled = DisableStrictNodeMutation; 722 MaxBytesForAlignment = 0; 723 // TODO: the default will be switched to 0 in the next commit, along 724 // with the Target-specific changes necessary. 725 MaxAtomicSizeInBitsSupported = 1024; 726 727 MinCmpXchgSizeInBits = 0; 728 SupportsUnalignedAtomics = false; 729 730 std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr); 731 732 InitLibcalls(TM.getTargetTriple()); 733 InitCmpLibcallCCs(CmpLibcallCCs); 734 } 735 736 void TargetLoweringBase::initActions() { 737 // All operations default to being supported. 738 memset(OpActions, 0, sizeof(OpActions)); 739 memset(LoadExtActions, 0, sizeof(LoadExtActions)); 740 memset(TruncStoreActions, 0, sizeof(TruncStoreActions)); 741 memset(IndexedModeActions, 0, sizeof(IndexedModeActions)); 742 memset(CondCodeActions, 0, sizeof(CondCodeActions)); 743 std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr); 744 std::fill(std::begin(TargetDAGCombineArray), 745 std::end(TargetDAGCombineArray), 0); 746 747 // We're somewhat special casing MVT::i2 and MVT::i4. Ideally we want to 748 // remove this and targets should individually set these types if not legal. 749 for (ISD::NodeType NT : enum_seq(ISD::DELETED_NODE, ISD::BUILTIN_OP_END, 750 force_iteration_on_noniterable_enum)) { 751 for (MVT VT : {MVT::i2, MVT::i4}) 752 OpActions[(unsigned)VT.SimpleTy][NT] = Expand; 753 } 754 for (MVT AVT : MVT::all_valuetypes()) { 755 for (MVT VT : {MVT::i2, MVT::i4, MVT::v128i2, MVT::v64i4}) { 756 setTruncStoreAction(AVT, VT, Expand); 757 setLoadExtAction(ISD::EXTLOAD, AVT, VT, Expand); 758 setLoadExtAction(ISD::ZEXTLOAD, AVT, VT, Expand); 759 } 760 } 761 for (unsigned IM = (unsigned)ISD::PRE_INC; 762 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 763 for (MVT VT : {MVT::i2, MVT::i4}) { 764 setIndexedLoadAction(IM, VT, Expand); 765 setIndexedStoreAction(IM, VT, Expand); 766 setIndexedMaskedLoadAction(IM, VT, Expand); 767 setIndexedMaskedStoreAction(IM, VT, Expand); 768 } 769 } 770 771 for (MVT VT : MVT::fp_valuetypes()) { 772 MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits()); 773 if (IntVT.isValid()) { 774 setOperationAction(ISD::ATOMIC_SWAP, VT, Promote); 775 AddPromotedToType(ISD::ATOMIC_SWAP, VT, IntVT); 776 } 777 } 778 779 // Set default actions for various operations. 780 for (MVT VT : MVT::all_valuetypes()) { 781 // Default all indexed load / store to expand. 782 for (unsigned IM = (unsigned)ISD::PRE_INC; 783 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 784 setIndexedLoadAction(IM, VT, Expand); 785 setIndexedStoreAction(IM, VT, Expand); 786 setIndexedMaskedLoadAction(IM, VT, Expand); 787 setIndexedMaskedStoreAction(IM, VT, Expand); 788 } 789 790 // Most backends expect to see the node which just returns the value loaded. 791 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand); 792 793 // These operations default to expand. 794 setOperationAction({ISD::FGETSIGN, ISD::CONCAT_VECTORS, 795 ISD::FMINNUM, ISD::FMAXNUM, 796 ISD::FMINNUM_IEEE, ISD::FMAXNUM_IEEE, 797 ISD::FMINIMUM, ISD::FMAXIMUM, 798 ISD::FMAD, ISD::SMIN, 799 ISD::SMAX, ISD::UMIN, 800 ISD::UMAX, ISD::ABS, 801 ISD::FSHL, ISD::FSHR, 802 ISD::SADDSAT, ISD::UADDSAT, 803 ISD::SSUBSAT, ISD::USUBSAT, 804 ISD::SSHLSAT, ISD::USHLSAT, 805 ISD::SMULFIX, ISD::SMULFIXSAT, 806 ISD::UMULFIX, ISD::UMULFIXSAT, 807 ISD::SDIVFIX, ISD::SDIVFIXSAT, 808 ISD::UDIVFIX, ISD::UDIVFIXSAT, 809 ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT, 810 ISD::IS_FPCLASS}, 811 VT, Expand); 812 813 // Overflow operations default to expand 814 setOperationAction({ISD::SADDO, ISD::SSUBO, ISD::UADDO, ISD::USUBO, 815 ISD::SMULO, ISD::UMULO}, 816 VT, Expand); 817 818 // ADDCARRY operations default to expand 819 setOperationAction({ISD::ADDCARRY, ISD::SUBCARRY, ISD::SETCCCARRY, 820 ISD::SADDO_CARRY, ISD::SSUBO_CARRY}, 821 VT, Expand); 822 823 // ADDC/ADDE/SUBC/SUBE default to expand. 824 setOperationAction({ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}, VT, 825 Expand); 826 827 // Halving adds 828 setOperationAction( 829 {ISD::AVGFLOORS, ISD::AVGFLOORU, ISD::AVGCEILS, ISD::AVGCEILU}, VT, 830 Expand); 831 832 // Absolute difference 833 setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Expand); 834 835 // These default to Expand so they will be expanded to CTLZ/CTTZ by default. 836 setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT, 837 Expand); 838 839 setOperationAction({ISD::BITREVERSE, ISD::PARITY}, VT, Expand); 840 841 // These library functions default to expand. 842 setOperationAction({ISD::FROUND, ISD::FROUNDEVEN, ISD::FPOWI}, VT, Expand); 843 844 // These operations default to expand for vector types. 845 if (VT.isVector()) 846 setOperationAction({ISD::FCOPYSIGN, ISD::SIGN_EXTEND_INREG, 847 ISD::ANY_EXTEND_VECTOR_INREG, 848 ISD::SIGN_EXTEND_VECTOR_INREG, 849 ISD::ZERO_EXTEND_VECTOR_INREG, ISD::SPLAT_VECTOR}, 850 VT, Expand); 851 852 // Constrained floating-point operations default to expand. 853 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 854 setOperationAction(ISD::STRICT_##DAGN, VT, Expand); 855 #include "llvm/IR/ConstrainedOps.def" 856 857 // For most targets @llvm.get.dynamic.area.offset just returns 0. 858 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand); 859 860 // Vector reduction default to expand. 861 setOperationAction( 862 {ISD::VECREDUCE_FADD, ISD::VECREDUCE_FMUL, ISD::VECREDUCE_ADD, 863 ISD::VECREDUCE_MUL, ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, 864 ISD::VECREDUCE_XOR, ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN, 865 ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN, ISD::VECREDUCE_FMAX, 866 ISD::VECREDUCE_FMIN, ISD::VECREDUCE_SEQ_FADD, ISD::VECREDUCE_SEQ_FMUL}, 867 VT, Expand); 868 869 // Named vector shuffles default to expand. 870 setOperationAction(ISD::VECTOR_SPLICE, VT, Expand); 871 } 872 873 // Most targets ignore the @llvm.prefetch intrinsic. 874 setOperationAction(ISD::PREFETCH, MVT::Other, Expand); 875 876 // Most targets also ignore the @llvm.readcyclecounter intrinsic. 877 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand); 878 879 // ConstantFP nodes default to expand. Targets can either change this to 880 // Legal, in which case all fp constants are legal, or use isFPImmLegal() 881 // to optimize expansions for certain constants. 882 setOperationAction(ISD::ConstantFP, 883 {MVT::f16, MVT::f32, MVT::f64, MVT::f80, MVT::f128}, 884 Expand); 885 886 // These library functions default to expand. 887 setOperationAction({ISD::FCBRT, ISD::FLOG, ISD::FLOG2, ISD::FLOG10, ISD::FEXP, 888 ISD::FEXP2, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FCEIL, 889 ISD::FRINT, ISD::FTRUNC, ISD::LROUND, ISD::LLROUND, 890 ISD::LRINT, ISD::LLRINT}, 891 {MVT::f32, MVT::f64, MVT::f128}, Expand); 892 893 // Default ISD::TRAP to expand (which turns it into abort). 894 setOperationAction(ISD::TRAP, MVT::Other, Expand); 895 896 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand" 897 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP. 898 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand); 899 900 setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand); 901 } 902 903 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL, 904 EVT) const { 905 return MVT::getIntegerVT(DL.getPointerSizeInBits(0)); 906 } 907 908 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, const DataLayout &DL, 909 bool LegalTypes) const { 910 assert(LHSTy.isInteger() && "Shift amount is not an integer type!"); 911 if (LHSTy.isVector()) 912 return LHSTy; 913 MVT ShiftVT = 914 LegalTypes ? getScalarShiftAmountTy(DL, LHSTy) : getPointerTy(DL); 915 // If any possible shift value won't fit in the prefered type, just use 916 // something safe. Assume it will be legalized when the shift is expanded. 917 if (ShiftVT.getSizeInBits() < Log2_32_Ceil(LHSTy.getSizeInBits())) 918 ShiftVT = MVT::i32; 919 assert(ShiftVT.getSizeInBits() >= Log2_32_Ceil(LHSTy.getSizeInBits()) && 920 "ShiftVT is still too small!"); 921 return ShiftVT; 922 } 923 924 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const { 925 assert(isTypeLegal(VT)); 926 switch (Op) { 927 default: 928 return false; 929 case ISD::SDIV: 930 case ISD::UDIV: 931 case ISD::SREM: 932 case ISD::UREM: 933 return true; 934 } 935 } 936 937 bool TargetLoweringBase::isFreeAddrSpaceCast(unsigned SrcAS, 938 unsigned DestAS) const { 939 return TM.isNoopAddrSpaceCast(SrcAS, DestAS); 940 } 941 942 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) { 943 // If the command-line option was specified, ignore this request. 944 if (!JumpIsExpensiveOverride.getNumOccurrences()) 945 JumpIsExpensive = isExpensive; 946 } 947 948 TargetLoweringBase::LegalizeKind 949 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { 950 // If this is a simple type, use the ComputeRegisterProp mechanism. 951 if (VT.isSimple()) { 952 MVT SVT = VT.getSimpleVT(); 953 assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType)); 954 MVT NVT = TransformToType[SVT.SimpleTy]; 955 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT); 956 957 assert((LA == TypeLegal || LA == TypeSoftenFloat || 958 LA == TypeSoftPromoteHalf || 959 (NVT.isVector() || 960 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)) && 961 "Promote may not follow Expand or Promote"); 962 963 if (LA == TypeSplitVector) 964 return LegalizeKind(LA, EVT(SVT).getHalfNumVectorElementsVT(Context)); 965 if (LA == TypeScalarizeVector) 966 return LegalizeKind(LA, SVT.getVectorElementType()); 967 return LegalizeKind(LA, NVT); 968 } 969 970 // Handle Extended Scalar Types. 971 if (!VT.isVector()) { 972 assert(VT.isInteger() && "Float types must be simple"); 973 unsigned BitSize = VT.getSizeInBits(); 974 // First promote to a power-of-two size, then expand if necessary. 975 if (BitSize < 8 || !isPowerOf2_32(BitSize)) { 976 EVT NVT = VT.getRoundIntegerType(Context); 977 assert(NVT != VT && "Unable to round integer VT"); 978 LegalizeKind NextStep = getTypeConversion(Context, NVT); 979 // Avoid multi-step promotion. 980 if (NextStep.first == TypePromoteInteger) 981 return NextStep; 982 // Return rounded integer type. 983 return LegalizeKind(TypePromoteInteger, NVT); 984 } 985 986 return LegalizeKind(TypeExpandInteger, 987 EVT::getIntegerVT(Context, VT.getSizeInBits() / 2)); 988 } 989 990 // Handle vector types. 991 ElementCount NumElts = VT.getVectorElementCount(); 992 EVT EltVT = VT.getVectorElementType(); 993 994 // Vectors with only one element are always scalarized. 995 if (NumElts.isScalar()) 996 return LegalizeKind(TypeScalarizeVector, EltVT); 997 998 // Try to widen vector elements until the element type is a power of two and 999 // promote it to a legal type later on, for example: 1000 // <3 x i8> -> <4 x i8> -> <4 x i32> 1001 if (EltVT.isInteger()) { 1002 // Vectors with a number of elements that is not a power of two are always 1003 // widened, for example <3 x i8> -> <4 x i8>. 1004 if (!VT.isPow2VectorType()) { 1005 NumElts = NumElts.coefficientNextPowerOf2(); 1006 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts); 1007 return LegalizeKind(TypeWidenVector, NVT); 1008 } 1009 1010 // Examine the element type. 1011 LegalizeKind LK = getTypeConversion(Context, EltVT); 1012 1013 // If type is to be expanded, split the vector. 1014 // <4 x i140> -> <2 x i140> 1015 if (LK.first == TypeExpandInteger) { 1016 if (VT.getVectorElementCount().isScalable()) 1017 return LegalizeKind(TypeScalarizeScalableVector, EltVT); 1018 return LegalizeKind(TypeSplitVector, 1019 VT.getHalfNumVectorElementsVT(Context)); 1020 } 1021 1022 // Promote the integer element types until a legal vector type is found 1023 // or until the element integer type is too big. If a legal type was not 1024 // found, fallback to the usual mechanism of widening/splitting the 1025 // vector. 1026 EVT OldEltVT = EltVT; 1027 while (true) { 1028 // Increase the bitwidth of the element to the next pow-of-two 1029 // (which is greater than 8 bits). 1030 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits()) 1031 .getRoundIntegerType(Context); 1032 1033 // Stop trying when getting a non-simple element type. 1034 // Note that vector elements may be greater than legal vector element 1035 // types. Example: X86 XMM registers hold 64bit element on 32bit 1036 // systems. 1037 if (!EltVT.isSimple()) 1038 break; 1039 1040 // Build a new vector type and check if it is legal. 1041 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 1042 // Found a legal promoted vector type. 1043 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal) 1044 return LegalizeKind(TypePromoteInteger, 1045 EVT::getVectorVT(Context, EltVT, NumElts)); 1046 } 1047 1048 // Reset the type to the unexpanded type if we did not find a legal vector 1049 // type with a promoted vector element type. 1050 EltVT = OldEltVT; 1051 } 1052 1053 // Try to widen the vector until a legal type is found. 1054 // If there is no wider legal type, split the vector. 1055 while (true) { 1056 // Round up to the next power of 2. 1057 NumElts = NumElts.coefficientNextPowerOf2(); 1058 1059 // If there is no simple vector type with this many elements then there 1060 // cannot be a larger legal vector type. Note that this assumes that 1061 // there are no skipped intermediate vector types in the simple types. 1062 if (!EltVT.isSimple()) 1063 break; 1064 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 1065 if (LargerVector == MVT()) 1066 break; 1067 1068 // If this type is legal then widen the vector. 1069 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal) 1070 return LegalizeKind(TypeWidenVector, LargerVector); 1071 } 1072 1073 // Widen odd vectors to next power of two. 1074 if (!VT.isPow2VectorType()) { 1075 EVT NVT = VT.getPow2VectorType(Context); 1076 return LegalizeKind(TypeWidenVector, NVT); 1077 } 1078 1079 if (VT.getVectorElementCount() == ElementCount::getScalable(1)) 1080 return LegalizeKind(TypeScalarizeScalableVector, EltVT); 1081 1082 // Vectors with illegal element types are expanded. 1083 EVT NVT = EVT::getVectorVT(Context, EltVT, 1084 VT.getVectorElementCount().divideCoefficientBy(2)); 1085 return LegalizeKind(TypeSplitVector, NVT); 1086 } 1087 1088 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, 1089 unsigned &NumIntermediates, 1090 MVT &RegisterVT, 1091 TargetLoweringBase *TLI) { 1092 // Figure out the right, legal destination reg to copy into. 1093 ElementCount EC = VT.getVectorElementCount(); 1094 MVT EltTy = VT.getVectorElementType(); 1095 1096 unsigned NumVectorRegs = 1; 1097 1098 // Scalable vectors cannot be scalarized, so splitting or widening is 1099 // required. 1100 if (VT.isScalableVector() && !isPowerOf2_32(EC.getKnownMinValue())) 1101 llvm_unreachable( 1102 "Splitting or widening of non-power-of-2 MVTs is not implemented."); 1103 1104 // FIXME: We don't support non-power-of-2-sized vectors for now. 1105 // Ideally we could break down into LHS/RHS like LegalizeDAG does. 1106 if (!isPowerOf2_32(EC.getKnownMinValue())) { 1107 // Split EC to unit size (scalable property is preserved). 1108 NumVectorRegs = EC.getKnownMinValue(); 1109 EC = ElementCount::getFixed(1); 1110 } 1111 1112 // Divide the input until we get to a supported size. This will 1113 // always end up with an EC that represent a scalar or a scalable 1114 // scalar. 1115 while (EC.getKnownMinValue() > 1 && 1116 !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) { 1117 EC = EC.divideCoefficientBy(2); 1118 NumVectorRegs <<= 1; 1119 } 1120 1121 NumIntermediates = NumVectorRegs; 1122 1123 MVT NewVT = MVT::getVectorVT(EltTy, EC); 1124 if (!TLI->isTypeLegal(NewVT)) 1125 NewVT = EltTy; 1126 IntermediateVT = NewVT; 1127 1128 unsigned LaneSizeInBits = NewVT.getScalarSizeInBits(); 1129 1130 // Convert sizes such as i33 to i64. 1131 if (!isPowerOf2_32(LaneSizeInBits)) 1132 LaneSizeInBits = NextPowerOf2(LaneSizeInBits); 1133 1134 MVT DestVT = TLI->getRegisterType(NewVT); 1135 RegisterVT = DestVT; 1136 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. 1137 return NumVectorRegs * (LaneSizeInBits / DestVT.getScalarSizeInBits()); 1138 1139 // Otherwise, promotion or legal types use the same number of registers as 1140 // the vector decimated to the appropriate level. 1141 return NumVectorRegs; 1142 } 1143 1144 /// isLegalRC - Return true if the value types that can be represented by the 1145 /// specified register class are all legal. 1146 bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI, 1147 const TargetRegisterClass &RC) const { 1148 for (const auto *I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I) 1149 if (isTypeLegal(*I)) 1150 return true; 1151 return false; 1152 } 1153 1154 /// Replace/modify any TargetFrameIndex operands with a targte-dependent 1155 /// sequence of memory operands that is recognized by PrologEpilogInserter. 1156 MachineBasicBlock * 1157 TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI, 1158 MachineBasicBlock *MBB) const { 1159 MachineInstr *MI = &InitialMI; 1160 MachineFunction &MF = *MI->getMF(); 1161 MachineFrameInfo &MFI = MF.getFrameInfo(); 1162 1163 // We're handling multiple types of operands here: 1164 // PATCHPOINT MetaArgs - live-in, read only, direct 1165 // STATEPOINT Deopt Spill - live-through, read only, indirect 1166 // STATEPOINT Deopt Alloca - live-through, read only, direct 1167 // (We're currently conservative and mark the deopt slots read/write in 1168 // practice.) 1169 // STATEPOINT GC Spill - live-through, read/write, indirect 1170 // STATEPOINT GC Alloca - live-through, read/write, direct 1171 // The live-in vs live-through is handled already (the live through ones are 1172 // all stack slots), but we need to handle the different type of stackmap 1173 // operands and memory effects here. 1174 1175 if (llvm::none_of(MI->operands(), 1176 [](MachineOperand &Operand) { return Operand.isFI(); })) 1177 return MBB; 1178 1179 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc()); 1180 1181 // Inherit previous memory operands. 1182 MIB.cloneMemRefs(*MI); 1183 1184 for (unsigned i = 0; i < MI->getNumOperands(); ++i) { 1185 MachineOperand &MO = MI->getOperand(i); 1186 if (!MO.isFI()) { 1187 // Index of Def operand this Use it tied to. 1188 // Since Defs are coming before Uses, if Use is tied, then 1189 // index of Def must be smaller that index of that Use. 1190 // Also, Defs preserve their position in new MI. 1191 unsigned TiedTo = i; 1192 if (MO.isReg() && MO.isTied()) 1193 TiedTo = MI->findTiedOperandIdx(i); 1194 MIB.add(MO); 1195 if (TiedTo < i) 1196 MIB->tieOperands(TiedTo, MIB->getNumOperands() - 1); 1197 continue; 1198 } 1199 1200 // foldMemoryOperand builds a new MI after replacing a single FI operand 1201 // with the canonical set of five x86 addressing-mode operands. 1202 int FI = MO.getIndex(); 1203 1204 // Add frame index operands recognized by stackmaps.cpp 1205 if (MFI.isStatepointSpillSlotObjectIndex(FI)) { 1206 // indirect-mem-ref tag, size, #FI, offset. 1207 // Used for spills inserted by StatepointLowering. This codepath is not 1208 // used for patchpoints/stackmaps at all, for these spilling is done via 1209 // foldMemoryOperand callback only. 1210 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity"); 1211 MIB.addImm(StackMaps::IndirectMemRefOp); 1212 MIB.addImm(MFI.getObjectSize(FI)); 1213 MIB.add(MO); 1214 MIB.addImm(0); 1215 } else { 1216 // direct-mem-ref tag, #FI, offset. 1217 // Used by patchpoint, and direct alloca arguments to statepoints 1218 MIB.addImm(StackMaps::DirectMemRefOp); 1219 MIB.add(MO); 1220 MIB.addImm(0); 1221 } 1222 1223 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!"); 1224 1225 // Add a new memory operand for this FI. 1226 assert(MFI.getObjectOffset(FI) != -1); 1227 1228 // Note: STATEPOINT MMOs are added during SelectionDAG. STACKMAP, and 1229 // PATCHPOINT should be updated to do the same. (TODO) 1230 if (MI->getOpcode() != TargetOpcode::STATEPOINT) { 1231 auto Flags = MachineMemOperand::MOLoad; 1232 MachineMemOperand *MMO = MF.getMachineMemOperand( 1233 MachinePointerInfo::getFixedStack(MF, FI), Flags, 1234 MF.getDataLayout().getPointerSize(), MFI.getObjectAlign(FI)); 1235 MIB->addMemOperand(MF, MMO); 1236 } 1237 } 1238 MBB->insert(MachineBasicBlock::iterator(MI), MIB); 1239 MI->eraseFromParent(); 1240 return MBB; 1241 } 1242 1243 /// findRepresentativeClass - Return the largest legal super-reg register class 1244 /// of the register class for the specified type and its associated "cost". 1245 // This function is in TargetLowering because it uses RegClassForVT which would 1246 // need to be moved to TargetRegisterInfo and would necessitate moving 1247 // isTypeLegal over as well - a massive change that would just require 1248 // TargetLowering having a TargetRegisterInfo class member that it would use. 1249 std::pair<const TargetRegisterClass *, uint8_t> 1250 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI, 1251 MVT VT) const { 1252 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy]; 1253 if (!RC) 1254 return std::make_pair(RC, 0); 1255 1256 // Compute the set of all super-register classes. 1257 BitVector SuperRegRC(TRI->getNumRegClasses()); 1258 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI) 1259 SuperRegRC.setBitsInMask(RCI.getMask()); 1260 1261 // Find the first legal register class with the largest spill size. 1262 const TargetRegisterClass *BestRC = RC; 1263 for (unsigned i : SuperRegRC.set_bits()) { 1264 const TargetRegisterClass *SuperRC = TRI->getRegClass(i); 1265 // We want the largest possible spill size. 1266 if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC)) 1267 continue; 1268 if (!isLegalRC(*TRI, *SuperRC)) 1269 continue; 1270 BestRC = SuperRC; 1271 } 1272 return std::make_pair(BestRC, 1); 1273 } 1274 1275 /// computeRegisterProperties - Once all of the register classes are added, 1276 /// this allows us to compute derived properties we expose. 1277 void TargetLoweringBase::computeRegisterProperties( 1278 const TargetRegisterInfo *TRI) { 1279 static_assert(MVT::VALUETYPE_SIZE <= MVT::MAX_ALLOWED_VALUETYPE, 1280 "Too many value types for ValueTypeActions to hold!"); 1281 1282 // Everything defaults to needing one register. 1283 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) { 1284 NumRegistersForVT[i] = 1; 1285 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i; 1286 } 1287 // ...except isVoid, which doesn't need any registers. 1288 NumRegistersForVT[MVT::isVoid] = 0; 1289 1290 // Find the largest integer register class. 1291 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE; 1292 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg) 1293 assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); 1294 1295 // Every integer value type larger than this largest register takes twice as 1296 // many registers to represent as the previous ValueType. 1297 for (unsigned ExpandedReg = LargestIntReg + 1; 1298 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) { 1299 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; 1300 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg; 1301 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1); 1302 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg, 1303 TypeExpandInteger); 1304 } 1305 1306 // Inspect all of the ValueType's smaller than the largest integer 1307 // register to see which ones need promotion. 1308 unsigned LegalIntReg = LargestIntReg; 1309 for (unsigned IntReg = LargestIntReg - 1; 1310 IntReg >= (unsigned)MVT::i1; --IntReg) { 1311 MVT IVT = (MVT::SimpleValueType)IntReg; 1312 if (isTypeLegal(IVT)) { 1313 LegalIntReg = IntReg; 1314 } else { 1315 RegisterTypeForVT[IntReg] = TransformToType[IntReg] = 1316 (MVT::SimpleValueType)LegalIntReg; 1317 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger); 1318 } 1319 } 1320 1321 // ppcf128 type is really two f64's. 1322 if (!isTypeLegal(MVT::ppcf128)) { 1323 if (isTypeLegal(MVT::f64)) { 1324 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64]; 1325 RegisterTypeForVT[MVT::ppcf128] = MVT::f64; 1326 TransformToType[MVT::ppcf128] = MVT::f64; 1327 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat); 1328 } else { 1329 NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128]; 1330 RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128]; 1331 TransformToType[MVT::ppcf128] = MVT::i128; 1332 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat); 1333 } 1334 } 1335 1336 // Decide how to handle f128. If the target does not have native f128 support, 1337 // expand it to i128 and we will be generating soft float library calls. 1338 if (!isTypeLegal(MVT::f128)) { 1339 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128]; 1340 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128]; 1341 TransformToType[MVT::f128] = MVT::i128; 1342 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat); 1343 } 1344 1345 // Decide how to handle f64. If the target does not have native f64 support, 1346 // expand it to i64 and we will be generating soft float library calls. 1347 if (!isTypeLegal(MVT::f64)) { 1348 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64]; 1349 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64]; 1350 TransformToType[MVT::f64] = MVT::i64; 1351 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat); 1352 } 1353 1354 // Decide how to handle f32. If the target does not have native f32 support, 1355 // expand it to i32 and we will be generating soft float library calls. 1356 if (!isTypeLegal(MVT::f32)) { 1357 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; 1358 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; 1359 TransformToType[MVT::f32] = MVT::i32; 1360 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat); 1361 } 1362 1363 // Decide how to handle f16. If the target does not have native f16 support, 1364 // promote it to f32, because there are no f16 library calls (except for 1365 // conversions). 1366 if (!isTypeLegal(MVT::f16)) { 1367 // Allow targets to control how we legalize half. 1368 if (softPromoteHalfType()) { 1369 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16]; 1370 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16]; 1371 TransformToType[MVT::f16] = MVT::f32; 1372 ValueTypeActions.setTypeAction(MVT::f16, TypeSoftPromoteHalf); 1373 } else { 1374 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32]; 1375 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32]; 1376 TransformToType[MVT::f16] = MVT::f32; 1377 ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat); 1378 } 1379 } 1380 1381 // Decide how to handle bf16. If the target does not have native bf16 support, 1382 // promote it to f32, because there are no bf16 library calls (except for 1383 // converting from f32 to bf16). 1384 if (!isTypeLegal(MVT::bf16)) { 1385 NumRegistersForVT[MVT::bf16] = NumRegistersForVT[MVT::f32]; 1386 RegisterTypeForVT[MVT::bf16] = RegisterTypeForVT[MVT::f32]; 1387 TransformToType[MVT::bf16] = MVT::f32; 1388 ValueTypeActions.setTypeAction(MVT::bf16, TypePromoteFloat); 1389 } 1390 1391 // Loop over all of the vector value types to see which need transformations. 1392 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; 1393 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 1394 MVT VT = (MVT::SimpleValueType) i; 1395 if (isTypeLegal(VT)) 1396 continue; 1397 1398 MVT EltVT = VT.getVectorElementType(); 1399 ElementCount EC = VT.getVectorElementCount(); 1400 bool IsLegalWiderType = false; 1401 bool IsScalable = VT.isScalableVector(); 1402 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT); 1403 switch (PreferredAction) { 1404 case TypePromoteInteger: { 1405 MVT::SimpleValueType EndVT = IsScalable ? 1406 MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE : 1407 MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE; 1408 // Try to promote the elements of integer vectors. If no legal 1409 // promotion was found, fall through to the widen-vector method. 1410 for (unsigned nVT = i + 1; 1411 (MVT::SimpleValueType)nVT <= EndVT; ++nVT) { 1412 MVT SVT = (MVT::SimpleValueType) nVT; 1413 // Promote vectors of integers to vectors with the same number 1414 // of elements, with a wider element type. 1415 if (SVT.getScalarSizeInBits() > EltVT.getFixedSizeInBits() && 1416 SVT.getVectorElementCount() == EC && isTypeLegal(SVT)) { 1417 TransformToType[i] = SVT; 1418 RegisterTypeForVT[i] = SVT; 1419 NumRegistersForVT[i] = 1; 1420 ValueTypeActions.setTypeAction(VT, TypePromoteInteger); 1421 IsLegalWiderType = true; 1422 break; 1423 } 1424 } 1425 if (IsLegalWiderType) 1426 break; 1427 LLVM_FALLTHROUGH; 1428 } 1429 1430 case TypeWidenVector: 1431 if (isPowerOf2_32(EC.getKnownMinValue())) { 1432 // Try to widen the vector. 1433 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) { 1434 MVT SVT = (MVT::SimpleValueType) nVT; 1435 if (SVT.getVectorElementType() == EltVT && 1436 SVT.isScalableVector() == IsScalable && 1437 SVT.getVectorElementCount().getKnownMinValue() > 1438 EC.getKnownMinValue() && 1439 isTypeLegal(SVT)) { 1440 TransformToType[i] = SVT; 1441 RegisterTypeForVT[i] = SVT; 1442 NumRegistersForVT[i] = 1; 1443 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1444 IsLegalWiderType = true; 1445 break; 1446 } 1447 } 1448 if (IsLegalWiderType) 1449 break; 1450 } else { 1451 // Only widen to the next power of 2 to keep consistency with EVT. 1452 MVT NVT = VT.getPow2VectorType(); 1453 if (isTypeLegal(NVT)) { 1454 TransformToType[i] = NVT; 1455 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1456 RegisterTypeForVT[i] = NVT; 1457 NumRegistersForVT[i] = 1; 1458 break; 1459 } 1460 } 1461 LLVM_FALLTHROUGH; 1462 1463 case TypeSplitVector: 1464 case TypeScalarizeVector: { 1465 MVT IntermediateVT; 1466 MVT RegisterVT; 1467 unsigned NumIntermediates; 1468 unsigned NumRegisters = getVectorTypeBreakdownMVT(VT, IntermediateVT, 1469 NumIntermediates, RegisterVT, this); 1470 NumRegistersForVT[i] = NumRegisters; 1471 assert(NumRegistersForVT[i] == NumRegisters && 1472 "NumRegistersForVT size cannot represent NumRegisters!"); 1473 RegisterTypeForVT[i] = RegisterVT; 1474 1475 MVT NVT = VT.getPow2VectorType(); 1476 if (NVT == VT) { 1477 // Type is already a power of 2. The default action is to split. 1478 TransformToType[i] = MVT::Other; 1479 if (PreferredAction == TypeScalarizeVector) 1480 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector); 1481 else if (PreferredAction == TypeSplitVector) 1482 ValueTypeActions.setTypeAction(VT, TypeSplitVector); 1483 else if (EC.getKnownMinValue() > 1) 1484 ValueTypeActions.setTypeAction(VT, TypeSplitVector); 1485 else 1486 ValueTypeActions.setTypeAction(VT, EC.isScalable() 1487 ? TypeScalarizeScalableVector 1488 : TypeScalarizeVector); 1489 } else { 1490 TransformToType[i] = NVT; 1491 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1492 } 1493 break; 1494 } 1495 default: 1496 llvm_unreachable("Unknown vector legalization action!"); 1497 } 1498 } 1499 1500 // Determine the 'representative' register class for each value type. 1501 // An representative register class is the largest (meaning one which is 1502 // not a sub-register class / subreg register class) legal register class for 1503 // a group of value types. For example, on i386, i8, i16, and i32 1504 // representative would be GR32; while on x86_64 it's GR64. 1505 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) { 1506 const TargetRegisterClass* RRC; 1507 uint8_t Cost; 1508 std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i); 1509 RepRegClassForVT[i] = RRC; 1510 RepRegClassCostForVT[i] = Cost; 1511 } 1512 } 1513 1514 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &, 1515 EVT VT) const { 1516 assert(!VT.isVector() && "No default SetCC type for vectors!"); 1517 return getPointerTy(DL).SimpleTy; 1518 } 1519 1520 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const { 1521 return MVT::i32; // return the default value 1522 } 1523 1524 /// getVectorTypeBreakdown - Vector types are broken down into some number of 1525 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 1526 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 1527 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. 1528 /// 1529 /// This method returns the number of registers needed, and the VT for each 1530 /// register. It also returns the VT and quantity of the intermediate values 1531 /// before they are promoted/expanded. 1532 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, 1533 EVT VT, EVT &IntermediateVT, 1534 unsigned &NumIntermediates, 1535 MVT &RegisterVT) const { 1536 ElementCount EltCnt = VT.getVectorElementCount(); 1537 1538 // If there is a wider vector type with the same element type as this one, 1539 // or a promoted vector type that has the same number of elements which 1540 // are wider, then we should convert to that legal vector type. 1541 // This handles things like <2 x float> -> <4 x float> and 1542 // <4 x i1> -> <4 x i32>. 1543 LegalizeTypeAction TA = getTypeAction(Context, VT); 1544 if (!EltCnt.isScalar() && 1545 (TA == TypeWidenVector || TA == TypePromoteInteger)) { 1546 EVT RegisterEVT = getTypeToTransformTo(Context, VT); 1547 if (isTypeLegal(RegisterEVT)) { 1548 IntermediateVT = RegisterEVT; 1549 RegisterVT = RegisterEVT.getSimpleVT(); 1550 NumIntermediates = 1; 1551 return 1; 1552 } 1553 } 1554 1555 // Figure out the right, legal destination reg to copy into. 1556 EVT EltTy = VT.getVectorElementType(); 1557 1558 unsigned NumVectorRegs = 1; 1559 1560 // Scalable vectors cannot be scalarized, so handle the legalisation of the 1561 // types like done elsewhere in SelectionDAG. 1562 if (EltCnt.isScalable()) { 1563 LegalizeKind LK; 1564 EVT PartVT = VT; 1565 do { 1566 // Iterate until we've found a legal (part) type to hold VT. 1567 LK = getTypeConversion(Context, PartVT); 1568 PartVT = LK.second; 1569 } while (LK.first != TypeLegal); 1570 1571 if (!PartVT.isVector()) { 1572 report_fatal_error( 1573 "Don't know how to legalize this scalable vector type"); 1574 } 1575 1576 NumIntermediates = 1577 divideCeil(VT.getVectorElementCount().getKnownMinValue(), 1578 PartVT.getVectorElementCount().getKnownMinValue()); 1579 IntermediateVT = PartVT; 1580 RegisterVT = getRegisterType(Context, IntermediateVT); 1581 return NumIntermediates; 1582 } 1583 1584 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally 1585 // we could break down into LHS/RHS like LegalizeDAG does. 1586 if (!isPowerOf2_32(EltCnt.getKnownMinValue())) { 1587 NumVectorRegs = EltCnt.getKnownMinValue(); 1588 EltCnt = ElementCount::getFixed(1); 1589 } 1590 1591 // Divide the input until we get to a supported size. This will always 1592 // end with a scalar if the target doesn't support vectors. 1593 while (EltCnt.getKnownMinValue() > 1 && 1594 !isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) { 1595 EltCnt = EltCnt.divideCoefficientBy(2); 1596 NumVectorRegs <<= 1; 1597 } 1598 1599 NumIntermediates = NumVectorRegs; 1600 1601 EVT NewVT = EVT::getVectorVT(Context, EltTy, EltCnt); 1602 if (!isTypeLegal(NewVT)) 1603 NewVT = EltTy; 1604 IntermediateVT = NewVT; 1605 1606 MVT DestVT = getRegisterType(Context, NewVT); 1607 RegisterVT = DestVT; 1608 1609 if (EVT(DestVT).bitsLT(NewVT)) { // Value is expanded, e.g. i64 -> i16. 1610 TypeSize NewVTSize = NewVT.getSizeInBits(); 1611 // Convert sizes such as i33 to i64. 1612 if (!isPowerOf2_32(NewVTSize.getKnownMinSize())) 1613 NewVTSize = NewVTSize.coefficientNextPowerOf2(); 1614 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits()); 1615 } 1616 1617 // Otherwise, promotion or legal types use the same number of registers as 1618 // the vector decimated to the appropriate level. 1619 return NumVectorRegs; 1620 } 1621 1622 bool TargetLoweringBase::isSuitableForJumpTable(const SwitchInst *SI, 1623 uint64_t NumCases, 1624 uint64_t Range, 1625 ProfileSummaryInfo *PSI, 1626 BlockFrequencyInfo *BFI) const { 1627 // FIXME: This function check the maximum table size and density, but the 1628 // minimum size is not checked. It would be nice if the minimum size is 1629 // also combined within this function. Currently, the minimum size check is 1630 // performed in findJumpTable() in SelectionDAGBuiler and 1631 // getEstimatedNumberOfCaseClusters() in BasicTTIImpl. 1632 const bool OptForSize = 1633 SI->getParent()->getParent()->hasOptSize() || 1634 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI); 1635 const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize); 1636 const unsigned MaxJumpTableSize = getMaximumJumpTableSize(); 1637 1638 // Check whether the number of cases is small enough and 1639 // the range is dense enough for a jump table. 1640 return (OptForSize || Range <= MaxJumpTableSize) && 1641 (NumCases * 100 >= Range * MinDensity); 1642 } 1643 1644 MVT TargetLoweringBase::getPreferredSwitchConditionType(LLVMContext &Context, 1645 EVT ConditionVT) const { 1646 return getRegisterType(Context, ConditionVT); 1647 } 1648 1649 /// Get the EVTs and ArgFlags collections that represent the legalized return 1650 /// type of the given function. This does not require a DAG or a return value, 1651 /// and is suitable for use before any DAGs for the function are constructed. 1652 /// TODO: Move this out of TargetLowering.cpp. 1653 void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType, 1654 AttributeList attr, 1655 SmallVectorImpl<ISD::OutputArg> &Outs, 1656 const TargetLowering &TLI, const DataLayout &DL) { 1657 SmallVector<EVT, 4> ValueVTs; 1658 ComputeValueVTs(TLI, DL, ReturnType, ValueVTs); 1659 unsigned NumValues = ValueVTs.size(); 1660 if (NumValues == 0) return; 1661 1662 for (unsigned j = 0, f = NumValues; j != f; ++j) { 1663 EVT VT = ValueVTs[j]; 1664 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 1665 1666 if (attr.hasRetAttr(Attribute::SExt)) 1667 ExtendKind = ISD::SIGN_EXTEND; 1668 else if (attr.hasRetAttr(Attribute::ZExt)) 1669 ExtendKind = ISD::ZERO_EXTEND; 1670 1671 // FIXME: C calling convention requires the return type to be promoted to 1672 // at least 32-bit. But this is not necessary for non-C calling 1673 // conventions. The frontend should mark functions whose return values 1674 // require promoting with signext or zeroext attributes. 1675 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) { 1676 MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32); 1677 if (VT.bitsLT(MinVT)) 1678 VT = MinVT; 1679 } 1680 1681 unsigned NumParts = 1682 TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT); 1683 MVT PartVT = 1684 TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT); 1685 1686 // 'inreg' on function refers to return value 1687 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); 1688 if (attr.hasRetAttr(Attribute::InReg)) 1689 Flags.setInReg(); 1690 1691 // Propagate extension type if any 1692 if (attr.hasRetAttr(Attribute::SExt)) 1693 Flags.setSExt(); 1694 else if (attr.hasRetAttr(Attribute::ZExt)) 1695 Flags.setZExt(); 1696 1697 for (unsigned i = 0; i < NumParts; ++i) 1698 Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isfixed=*/true, 0, 0)); 1699 } 1700 } 1701 1702 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1703 /// function arguments in the caller parameter area. This is the actual 1704 /// alignment, not its logarithm. 1705 uint64_t TargetLoweringBase::getByValTypeAlignment(Type *Ty, 1706 const DataLayout &DL) const { 1707 return DL.getABITypeAlign(Ty).value(); 1708 } 1709 1710 bool TargetLoweringBase::allowsMemoryAccessForAlignment( 1711 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, 1712 Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const { 1713 // Check if the specified alignment is sufficient based on the data layout. 1714 // TODO: While using the data layout works in practice, a better solution 1715 // would be to implement this check directly (make this a virtual function). 1716 // For example, the ABI alignment may change based on software platform while 1717 // this function should only be affected by hardware implementation. 1718 Type *Ty = VT.getTypeForEVT(Context); 1719 if (VT.isZeroSized() || Alignment >= DL.getABITypeAlign(Ty)) { 1720 // Assume that an access that meets the ABI-specified alignment is fast. 1721 if (Fast != nullptr) 1722 *Fast = true; 1723 return true; 1724 } 1725 1726 // This is a misaligned access. 1727 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast); 1728 } 1729 1730 bool TargetLoweringBase::allowsMemoryAccessForAlignment( 1731 LLVMContext &Context, const DataLayout &DL, EVT VT, 1732 const MachineMemOperand &MMO, bool *Fast) const { 1733 return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(), 1734 MMO.getAlign(), MMO.getFlags(), Fast); 1735 } 1736 1737 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, 1738 const DataLayout &DL, EVT VT, 1739 unsigned AddrSpace, Align Alignment, 1740 MachineMemOperand::Flags Flags, 1741 bool *Fast) const { 1742 return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment, 1743 Flags, Fast); 1744 } 1745 1746 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, 1747 const DataLayout &DL, EVT VT, 1748 const MachineMemOperand &MMO, 1749 bool *Fast) const { 1750 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(), 1751 MMO.getFlags(), Fast); 1752 } 1753 1754 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, 1755 const DataLayout &DL, LLT Ty, 1756 const MachineMemOperand &MMO, 1757 bool *Fast) const { 1758 EVT VT = getApproximateEVTForLLT(Ty, DL, Context); 1759 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(), 1760 MMO.getFlags(), Fast); 1761 } 1762 1763 //===----------------------------------------------------------------------===// 1764 // TargetTransformInfo Helpers 1765 //===----------------------------------------------------------------------===// 1766 1767 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const { 1768 enum InstructionOpcodes { 1769 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM, 1770 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM 1771 #include "llvm/IR/Instruction.def" 1772 }; 1773 switch (static_cast<InstructionOpcodes>(Opcode)) { 1774 case Ret: return 0; 1775 case Br: return 0; 1776 case Switch: return 0; 1777 case IndirectBr: return 0; 1778 case Invoke: return 0; 1779 case CallBr: return 0; 1780 case Resume: return 0; 1781 case Unreachable: return 0; 1782 case CleanupRet: return 0; 1783 case CatchRet: return 0; 1784 case CatchPad: return 0; 1785 case CatchSwitch: return 0; 1786 case CleanupPad: return 0; 1787 case FNeg: return ISD::FNEG; 1788 case Add: return ISD::ADD; 1789 case FAdd: return ISD::FADD; 1790 case Sub: return ISD::SUB; 1791 case FSub: return ISD::FSUB; 1792 case Mul: return ISD::MUL; 1793 case FMul: return ISD::FMUL; 1794 case UDiv: return ISD::UDIV; 1795 case SDiv: return ISD::SDIV; 1796 case FDiv: return ISD::FDIV; 1797 case URem: return ISD::UREM; 1798 case SRem: return ISD::SREM; 1799 case FRem: return ISD::FREM; 1800 case Shl: return ISD::SHL; 1801 case LShr: return ISD::SRL; 1802 case AShr: return ISD::SRA; 1803 case And: return ISD::AND; 1804 case Or: return ISD::OR; 1805 case Xor: return ISD::XOR; 1806 case Alloca: return 0; 1807 case Load: return ISD::LOAD; 1808 case Store: return ISD::STORE; 1809 case GetElementPtr: return 0; 1810 case Fence: return 0; 1811 case AtomicCmpXchg: return 0; 1812 case AtomicRMW: return 0; 1813 case Trunc: return ISD::TRUNCATE; 1814 case ZExt: return ISD::ZERO_EXTEND; 1815 case SExt: return ISD::SIGN_EXTEND; 1816 case FPToUI: return ISD::FP_TO_UINT; 1817 case FPToSI: return ISD::FP_TO_SINT; 1818 case UIToFP: return ISD::UINT_TO_FP; 1819 case SIToFP: return ISD::SINT_TO_FP; 1820 case FPTrunc: return ISD::FP_ROUND; 1821 case FPExt: return ISD::FP_EXTEND; 1822 case PtrToInt: return ISD::BITCAST; 1823 case IntToPtr: return ISD::BITCAST; 1824 case BitCast: return ISD::BITCAST; 1825 case AddrSpaceCast: return ISD::ADDRSPACECAST; 1826 case ICmp: return ISD::SETCC; 1827 case FCmp: return ISD::SETCC; 1828 case PHI: return 0; 1829 case Call: return 0; 1830 case Select: return ISD::SELECT; 1831 case UserOp1: return 0; 1832 case UserOp2: return 0; 1833 case VAArg: return 0; 1834 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT; 1835 case InsertElement: return ISD::INSERT_VECTOR_ELT; 1836 case ShuffleVector: return ISD::VECTOR_SHUFFLE; 1837 case ExtractValue: return ISD::MERGE_VALUES; 1838 case InsertValue: return ISD::MERGE_VALUES; 1839 case LandingPad: return 0; 1840 case Freeze: return ISD::FREEZE; 1841 } 1842 1843 llvm_unreachable("Unknown instruction type encountered!"); 1844 } 1845 1846 std::pair<InstructionCost, MVT> 1847 TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL, 1848 Type *Ty) const { 1849 LLVMContext &C = Ty->getContext(); 1850 EVT MTy = getValueType(DL, Ty); 1851 1852 InstructionCost Cost = 1; 1853 // We keep legalizing the type until we find a legal kind. We assume that 1854 // the only operation that costs anything is the split. After splitting 1855 // we need to handle two types. 1856 while (true) { 1857 LegalizeKind LK = getTypeConversion(C, MTy); 1858 1859 if (LK.first == TypeScalarizeScalableVector) { 1860 // Ensure we return a sensible simple VT here, since many callers of this 1861 // function require it. 1862 MVT VT = MTy.isSimple() ? MTy.getSimpleVT() : MVT::i64; 1863 return std::make_pair(InstructionCost::getInvalid(), VT); 1864 } 1865 1866 if (LK.first == TypeLegal) 1867 return std::make_pair(Cost, MTy.getSimpleVT()); 1868 1869 if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger) 1870 Cost *= 2; 1871 1872 // Do not loop with f128 type. 1873 if (MTy == LK.second) 1874 return std::make_pair(Cost, MTy.getSimpleVT()); 1875 1876 // Keep legalizing the type. 1877 MTy = LK.second; 1878 } 1879 } 1880 1881 Value * 1882 TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, 1883 bool UseTLS) const { 1884 // compiler-rt provides a variable with a magic name. Targets that do not 1885 // link with compiler-rt may also provide such a variable. 1886 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 1887 const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr"; 1888 auto UnsafeStackPtr = 1889 dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar)); 1890 1891 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext()); 1892 1893 if (!UnsafeStackPtr) { 1894 auto TLSModel = UseTLS ? 1895 GlobalValue::InitialExecTLSModel : 1896 GlobalValue::NotThreadLocal; 1897 // The global variable is not defined yet, define it ourselves. 1898 // We use the initial-exec TLS model because we do not support the 1899 // variable living anywhere other than in the main executable. 1900 UnsafeStackPtr = new GlobalVariable( 1901 *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr, 1902 UnsafeStackPtrVar, nullptr, TLSModel); 1903 } else { 1904 // The variable exists, check its type and attributes. 1905 if (UnsafeStackPtr->getValueType() != StackPtrTy) 1906 report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type"); 1907 if (UseTLS != UnsafeStackPtr->isThreadLocal()) 1908 report_fatal_error(Twine(UnsafeStackPtrVar) + " must " + 1909 (UseTLS ? "" : "not ") + "be thread-local"); 1910 } 1911 return UnsafeStackPtr; 1912 } 1913 1914 Value * 1915 TargetLoweringBase::getSafeStackPointerLocation(IRBuilderBase &IRB) const { 1916 if (!TM.getTargetTriple().isAndroid()) 1917 return getDefaultSafeStackPointerLocation(IRB, true); 1918 1919 // Android provides a libc function to retrieve the address of the current 1920 // thread's unsafe stack pointer. 1921 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 1922 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext()); 1923 FunctionCallee Fn = M->getOrInsertFunction("__safestack_pointer_address", 1924 StackPtrTy->getPointerTo(0)); 1925 return IRB.CreateCall(Fn); 1926 } 1927 1928 //===----------------------------------------------------------------------===// 1929 // Loop Strength Reduction hooks 1930 //===----------------------------------------------------------------------===// 1931 1932 /// isLegalAddressingMode - Return true if the addressing mode represented 1933 /// by AM is legal for this target, for a load/store of the specified type. 1934 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL, 1935 const AddrMode &AM, Type *Ty, 1936 unsigned AS, Instruction *I) const { 1937 // The default implementation of this implements a conservative RISCy, r+r and 1938 // r+i addr mode. 1939 1940 // Allows a sign-extended 16-bit immediate field. 1941 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 1942 return false; 1943 1944 // No global is ever allowed as a base. 1945 if (AM.BaseGV) 1946 return false; 1947 1948 // Only support r+r, 1949 switch (AM.Scale) { 1950 case 0: // "r+i" or just "i", depending on HasBaseReg. 1951 break; 1952 case 1: 1953 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 1954 return false; 1955 // Otherwise we have r+r or r+i. 1956 break; 1957 case 2: 1958 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 1959 return false; 1960 // Allow 2*r as r+r. 1961 break; 1962 default: // Don't allow n * r 1963 return false; 1964 } 1965 1966 return true; 1967 } 1968 1969 //===----------------------------------------------------------------------===// 1970 // Stack Protector 1971 //===----------------------------------------------------------------------===// 1972 1973 // For OpenBSD return its special guard variable. Otherwise return nullptr, 1974 // so that SelectionDAG handle SSP. 1975 Value *TargetLoweringBase::getIRStackGuard(IRBuilderBase &IRB) const { 1976 if (getTargetMachine().getTargetTriple().isOSOpenBSD()) { 1977 Module &M = *IRB.GetInsertBlock()->getParent()->getParent(); 1978 PointerType *PtrTy = Type::getInt8PtrTy(M.getContext()); 1979 Constant *C = M.getOrInsertGlobal("__guard_local", PtrTy); 1980 if (GlobalVariable *G = dyn_cast_or_null<GlobalVariable>(C)) 1981 G->setVisibility(GlobalValue::HiddenVisibility); 1982 return C; 1983 } 1984 return nullptr; 1985 } 1986 1987 // Currently only support "standard" __stack_chk_guard. 1988 // TODO: add LOAD_STACK_GUARD support. 1989 void TargetLoweringBase::insertSSPDeclarations(Module &M) const { 1990 if (!M.getNamedValue("__stack_chk_guard")) { 1991 auto *GV = new GlobalVariable(M, Type::getInt8PtrTy(M.getContext()), false, 1992 GlobalVariable::ExternalLinkage, nullptr, 1993 "__stack_chk_guard"); 1994 1995 // FreeBSD has "__stack_chk_guard" defined externally on libc.so 1996 if (TM.getRelocationModel() == Reloc::Static && 1997 !TM.getTargetTriple().isWindowsGNUEnvironment() && 1998 !(TM.getTargetTriple().isPPC64() && TM.getTargetTriple().isOSFreeBSD())) 1999 GV->setDSOLocal(true); 2000 } 2001 } 2002 2003 // Currently only support "standard" __stack_chk_guard. 2004 // TODO: add LOAD_STACK_GUARD support. 2005 Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const { 2006 return M.getNamedValue("__stack_chk_guard"); 2007 } 2008 2009 Function *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const { 2010 return nullptr; 2011 } 2012 2013 unsigned TargetLoweringBase::getMinimumJumpTableEntries() const { 2014 return MinimumJumpTableEntries; 2015 } 2016 2017 void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) { 2018 MinimumJumpTableEntries = Val; 2019 } 2020 2021 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const { 2022 return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity; 2023 } 2024 2025 unsigned TargetLoweringBase::getMaximumJumpTableSize() const { 2026 return MaximumJumpTableSize; 2027 } 2028 2029 void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) { 2030 MaximumJumpTableSize = Val; 2031 } 2032 2033 bool TargetLoweringBase::isJumpTableRelative() const { 2034 return getTargetMachine().isPositionIndependent(); 2035 } 2036 2037 Align TargetLoweringBase::getPrefLoopAlignment(MachineLoop *ML) const { 2038 if (TM.Options.LoopAlignment) 2039 return Align(TM.Options.LoopAlignment); 2040 return PrefLoopAlignment; 2041 } 2042 2043 unsigned TargetLoweringBase::getMaxPermittedBytesForAlignment( 2044 MachineBasicBlock *MBB) const { 2045 return MaxBytesForAlignment; 2046 } 2047 2048 //===----------------------------------------------------------------------===// 2049 // Reciprocal Estimates 2050 //===----------------------------------------------------------------------===// 2051 2052 /// Get the reciprocal estimate attribute string for a function that will 2053 /// override the target defaults. 2054 static StringRef getRecipEstimateForFunc(MachineFunction &MF) { 2055 const Function &F = MF.getFunction(); 2056 return F.getFnAttribute("reciprocal-estimates").getValueAsString(); 2057 } 2058 2059 /// Construct a string for the given reciprocal operation of the given type. 2060 /// This string should match the corresponding option to the front-end's 2061 /// "-mrecip" flag assuming those strings have been passed through in an 2062 /// attribute string. For example, "vec-divf" for a division of a vXf32. 2063 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) { 2064 std::string Name = VT.isVector() ? "vec-" : ""; 2065 2066 Name += IsSqrt ? "sqrt" : "div"; 2067 2068 // TODO: Handle other float types? 2069 if (VT.getScalarType() == MVT::f64) { 2070 Name += "d"; 2071 } else if (VT.getScalarType() == MVT::f16) { 2072 Name += "h"; 2073 } else { 2074 assert(VT.getScalarType() == MVT::f32 && 2075 "Unexpected FP type for reciprocal estimate"); 2076 Name += "f"; 2077 } 2078 2079 return Name; 2080 } 2081 2082 /// Return the character position and value (a single numeric character) of a 2083 /// customized refinement operation in the input string if it exists. Return 2084 /// false if there is no customized refinement step count. 2085 static bool parseRefinementStep(StringRef In, size_t &Position, 2086 uint8_t &Value) { 2087 const char RefStepToken = ':'; 2088 Position = In.find(RefStepToken); 2089 if (Position == StringRef::npos) 2090 return false; 2091 2092 StringRef RefStepString = In.substr(Position + 1); 2093 // Allow exactly one numeric character for the additional refinement 2094 // step parameter. 2095 if (RefStepString.size() == 1) { 2096 char RefStepChar = RefStepString[0]; 2097 if (isDigit(RefStepChar)) { 2098 Value = RefStepChar - '0'; 2099 return true; 2100 } 2101 } 2102 report_fatal_error("Invalid refinement step for -recip."); 2103 } 2104 2105 /// For the input attribute string, return one of the ReciprocalEstimate enum 2106 /// status values (enabled, disabled, or not specified) for this operation on 2107 /// the specified data type. 2108 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) { 2109 if (Override.empty()) 2110 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2111 2112 SmallVector<StringRef, 4> OverrideVector; 2113 Override.split(OverrideVector, ','); 2114 unsigned NumArgs = OverrideVector.size(); 2115 2116 // Check if "all", "none", or "default" was specified. 2117 if (NumArgs == 1) { 2118 // Look for an optional setting of the number of refinement steps needed 2119 // for this type of reciprocal operation. 2120 size_t RefPos; 2121 uint8_t RefSteps; 2122 if (parseRefinementStep(Override, RefPos, RefSteps)) { 2123 // Split the string for further processing. 2124 Override = Override.substr(0, RefPos); 2125 } 2126 2127 // All reciprocal types are enabled. 2128 if (Override == "all") 2129 return TargetLoweringBase::ReciprocalEstimate::Enabled; 2130 2131 // All reciprocal types are disabled. 2132 if (Override == "none") 2133 return TargetLoweringBase::ReciprocalEstimate::Disabled; 2134 2135 // Target defaults for enablement are used. 2136 if (Override == "default") 2137 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2138 } 2139 2140 // The attribute string may omit the size suffix ('f'/'d'). 2141 std::string VTName = getReciprocalOpName(IsSqrt, VT); 2142 std::string VTNameNoSize = VTName; 2143 VTNameNoSize.pop_back(); 2144 static const char DisabledPrefix = '!'; 2145 2146 for (StringRef RecipType : OverrideVector) { 2147 size_t RefPos; 2148 uint8_t RefSteps; 2149 if (parseRefinementStep(RecipType, RefPos, RefSteps)) 2150 RecipType = RecipType.substr(0, RefPos); 2151 2152 // Ignore the disablement token for string matching. 2153 bool IsDisabled = RecipType[0] == DisabledPrefix; 2154 if (IsDisabled) 2155 RecipType = RecipType.substr(1); 2156 2157 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize)) 2158 return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled 2159 : TargetLoweringBase::ReciprocalEstimate::Enabled; 2160 } 2161 2162 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2163 } 2164 2165 /// For the input attribute string, return the customized refinement step count 2166 /// for this operation on the specified data type. If the step count does not 2167 /// exist, return the ReciprocalEstimate enum value for unspecified. 2168 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) { 2169 if (Override.empty()) 2170 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2171 2172 SmallVector<StringRef, 4> OverrideVector; 2173 Override.split(OverrideVector, ','); 2174 unsigned NumArgs = OverrideVector.size(); 2175 2176 // Check if "all", "default", or "none" was specified. 2177 if (NumArgs == 1) { 2178 // Look for an optional setting of the number of refinement steps needed 2179 // for this type of reciprocal operation. 2180 size_t RefPos; 2181 uint8_t RefSteps; 2182 if (!parseRefinementStep(Override, RefPos, RefSteps)) 2183 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2184 2185 // Split the string for further processing. 2186 Override = Override.substr(0, RefPos); 2187 assert(Override != "none" && 2188 "Disabled reciprocals, but specifed refinement steps?"); 2189 2190 // If this is a general override, return the specified number of steps. 2191 if (Override == "all" || Override == "default") 2192 return RefSteps; 2193 } 2194 2195 // The attribute string may omit the size suffix ('f'/'d'). 2196 std::string VTName = getReciprocalOpName(IsSqrt, VT); 2197 std::string VTNameNoSize = VTName; 2198 VTNameNoSize.pop_back(); 2199 2200 for (StringRef RecipType : OverrideVector) { 2201 size_t RefPos; 2202 uint8_t RefSteps; 2203 if (!parseRefinementStep(RecipType, RefPos, RefSteps)) 2204 continue; 2205 2206 RecipType = RecipType.substr(0, RefPos); 2207 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize)) 2208 return RefSteps; 2209 } 2210 2211 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2212 } 2213 2214 int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT, 2215 MachineFunction &MF) const { 2216 return getOpEnabled(true, VT, getRecipEstimateForFunc(MF)); 2217 } 2218 2219 int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT, 2220 MachineFunction &MF) const { 2221 return getOpEnabled(false, VT, getRecipEstimateForFunc(MF)); 2222 } 2223 2224 int TargetLoweringBase::getSqrtRefinementSteps(EVT VT, 2225 MachineFunction &MF) const { 2226 return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF)); 2227 } 2228 2229 int TargetLoweringBase::getDivRefinementSteps(EVT VT, 2230 MachineFunction &MF) const { 2231 return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF)); 2232 } 2233 2234 void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const { 2235 MF.getRegInfo().freezeReservedRegs(MF); 2236 } 2237 2238 MachineMemOperand::Flags 2239 TargetLoweringBase::getLoadMemOperandFlags(const LoadInst &LI, 2240 const DataLayout &DL) const { 2241 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad; 2242 if (LI.isVolatile()) 2243 Flags |= MachineMemOperand::MOVolatile; 2244 2245 if (LI.hasMetadata(LLVMContext::MD_nontemporal)) 2246 Flags |= MachineMemOperand::MONonTemporal; 2247 2248 if (LI.hasMetadata(LLVMContext::MD_invariant_load)) 2249 Flags |= MachineMemOperand::MOInvariant; 2250 2251 if (isDereferenceablePointer(LI.getPointerOperand(), LI.getType(), DL)) 2252 Flags |= MachineMemOperand::MODereferenceable; 2253 2254 Flags |= getTargetMMOFlags(LI); 2255 return Flags; 2256 } 2257 2258 MachineMemOperand::Flags 2259 TargetLoweringBase::getStoreMemOperandFlags(const StoreInst &SI, 2260 const DataLayout &DL) const { 2261 MachineMemOperand::Flags Flags = MachineMemOperand::MOStore; 2262 2263 if (SI.isVolatile()) 2264 Flags |= MachineMemOperand::MOVolatile; 2265 2266 if (SI.hasMetadata(LLVMContext::MD_nontemporal)) 2267 Flags |= MachineMemOperand::MONonTemporal; 2268 2269 // FIXME: Not preserving dereferenceable 2270 Flags |= getTargetMMOFlags(SI); 2271 return Flags; 2272 } 2273 2274 MachineMemOperand::Flags 2275 TargetLoweringBase::getAtomicMemOperandFlags(const Instruction &AI, 2276 const DataLayout &DL) const { 2277 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 2278 2279 if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&AI)) { 2280 if (RMW->isVolatile()) 2281 Flags |= MachineMemOperand::MOVolatile; 2282 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(&AI)) { 2283 if (CmpX->isVolatile()) 2284 Flags |= MachineMemOperand::MOVolatile; 2285 } else 2286 llvm_unreachable("not an atomic instruction"); 2287 2288 // FIXME: Not preserving dereferenceable 2289 Flags |= getTargetMMOFlags(AI); 2290 return Flags; 2291 } 2292 2293 Instruction *TargetLoweringBase::emitLeadingFence(IRBuilderBase &Builder, 2294 Instruction *Inst, 2295 AtomicOrdering Ord) const { 2296 if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore()) 2297 return Builder.CreateFence(Ord); 2298 else 2299 return nullptr; 2300 } 2301 2302 Instruction *TargetLoweringBase::emitTrailingFence(IRBuilderBase &Builder, 2303 Instruction *Inst, 2304 AtomicOrdering Ord) const { 2305 if (isAcquireOrStronger(Ord)) 2306 return Builder.CreateFence(Ord); 2307 else 2308 return nullptr; 2309 } 2310 2311 //===----------------------------------------------------------------------===// 2312 // GlobalISel Hooks 2313 //===----------------------------------------------------------------------===// 2314 2315 bool TargetLoweringBase::shouldLocalize(const MachineInstr &MI, 2316 const TargetTransformInfo *TTI) const { 2317 auto &MF = *MI.getMF(); 2318 auto &MRI = MF.getRegInfo(); 2319 // Assuming a spill and reload of a value has a cost of 1 instruction each, 2320 // this helper function computes the maximum number of uses we should consider 2321 // for remat. E.g. on arm64 global addresses take 2 insts to materialize. We 2322 // break even in terms of code size when the original MI has 2 users vs 2323 // choosing to potentially spill. Any more than 2 users we we have a net code 2324 // size increase. This doesn't take into account register pressure though. 2325 auto maxUses = [](unsigned RematCost) { 2326 // A cost of 1 means remats are basically free. 2327 if (RematCost == 1) 2328 return UINT_MAX; 2329 if (RematCost == 2) 2330 return 2U; 2331 2332 // Remat is too expensive, only sink if there's one user. 2333 if (RematCost > 2) 2334 return 1U; 2335 llvm_unreachable("Unexpected remat cost"); 2336 }; 2337 2338 // Helper to walk through uses and terminate if we've reached a limit. Saves 2339 // us spending time traversing uses if all we want to know is if it's >= min. 2340 auto isUsesAtMost = [&](unsigned Reg, unsigned MaxUses) { 2341 unsigned NumUses = 0; 2342 auto UI = MRI.use_instr_nodbg_begin(Reg), UE = MRI.use_instr_nodbg_end(); 2343 for (; UI != UE && NumUses < MaxUses; ++UI) { 2344 NumUses++; 2345 } 2346 // If we haven't reached the end yet then there are more than MaxUses users. 2347 return UI == UE; 2348 }; 2349 2350 switch (MI.getOpcode()) { 2351 default: 2352 return false; 2353 // Constants-like instructions should be close to their users. 2354 // We don't want long live-ranges for them. 2355 case TargetOpcode::G_CONSTANT: 2356 case TargetOpcode::G_FCONSTANT: 2357 case TargetOpcode::G_FRAME_INDEX: 2358 case TargetOpcode::G_INTTOPTR: 2359 return true; 2360 case TargetOpcode::G_GLOBAL_VALUE: { 2361 unsigned RematCost = TTI->getGISelRematGlobalCost(); 2362 Register Reg = MI.getOperand(0).getReg(); 2363 unsigned MaxUses = maxUses(RematCost); 2364 if (MaxUses == UINT_MAX) 2365 return true; // Remats are "free" so always localize. 2366 bool B = isUsesAtMost(Reg, MaxUses); 2367 return B; 2368 } 2369 } 2370 } 2371