1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the TargetLoweringBase class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/ADT/BitVector.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/SmallVector.h" 16 #include "llvm/ADT/StringExtras.h" 17 #include "llvm/ADT/StringRef.h" 18 #include "llvm/ADT/Triple.h" 19 #include "llvm/ADT/Twine.h" 20 #include "llvm/Analysis/Loads.h" 21 #include "llvm/Analysis/TargetTransformInfo.h" 22 #include "llvm/CodeGen/Analysis.h" 23 #include "llvm/CodeGen/ISDOpcodes.h" 24 #include "llvm/CodeGen/MachineBasicBlock.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineInstr.h" 28 #include "llvm/CodeGen/MachineInstrBuilder.h" 29 #include "llvm/CodeGen/MachineMemOperand.h" 30 #include "llvm/CodeGen/MachineOperand.h" 31 #include "llvm/CodeGen/MachineRegisterInfo.h" 32 #include "llvm/CodeGen/RuntimeLibcalls.h" 33 #include "llvm/CodeGen/StackMaps.h" 34 #include "llvm/CodeGen/TargetLowering.h" 35 #include "llvm/CodeGen/TargetOpcodes.h" 36 #include "llvm/CodeGen/TargetRegisterInfo.h" 37 #include "llvm/CodeGen/ValueTypes.h" 38 #include "llvm/IR/Attributes.h" 39 #include "llvm/IR/CallingConv.h" 40 #include "llvm/IR/DataLayout.h" 41 #include "llvm/IR/DerivedTypes.h" 42 #include "llvm/IR/Function.h" 43 #include "llvm/IR/GlobalValue.h" 44 #include "llvm/IR/GlobalVariable.h" 45 #include "llvm/IR/IRBuilder.h" 46 #include "llvm/IR/Module.h" 47 #include "llvm/IR/Type.h" 48 #include "llvm/Support/Casting.h" 49 #include "llvm/Support/CommandLine.h" 50 #include "llvm/Support/Compiler.h" 51 #include "llvm/Support/ErrorHandling.h" 52 #include "llvm/Support/MachineValueType.h" 53 #include "llvm/Support/MathExtras.h" 54 #include "llvm/Target/TargetMachine.h" 55 #include "llvm/Target/TargetOptions.h" 56 #include "llvm/Transforms/Utils/SizeOpts.h" 57 #include <algorithm> 58 #include <cassert> 59 #include <cstddef> 60 #include <cstdint> 61 #include <cstring> 62 #include <iterator> 63 #include <string> 64 #include <tuple> 65 #include <utility> 66 67 using namespace llvm; 68 69 static cl::opt<bool> JumpIsExpensiveOverride( 70 "jump-is-expensive", cl::init(false), 71 cl::desc("Do not create extra branches to split comparison logic."), 72 cl::Hidden); 73 74 static cl::opt<unsigned> MinimumJumpTableEntries 75 ("min-jump-table-entries", cl::init(4), cl::Hidden, 76 cl::desc("Set minimum number of entries to use a jump table.")); 77 78 static cl::opt<unsigned> MaximumJumpTableSize 79 ("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden, 80 cl::desc("Set maximum size of jump tables.")); 81 82 /// Minimum jump table density for normal functions. 83 static cl::opt<unsigned> 84 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden, 85 cl::desc("Minimum density for building a jump table in " 86 "a normal function")); 87 88 /// Minimum jump table density for -Os or -Oz functions. 89 static cl::opt<unsigned> OptsizeJumpTableDensity( 90 "optsize-jump-table-density", cl::init(40), cl::Hidden, 91 cl::desc("Minimum density for building a jump table in " 92 "an optsize function")); 93 94 // FIXME: This option is only to test if the strict fp operation processed 95 // correctly by preventing mutating strict fp operation to normal fp operation 96 // during development. When the backend supports strict float operation, this 97 // option will be meaningless. 98 static cl::opt<bool> DisableStrictNodeMutation("disable-strictnode-mutation", 99 cl::desc("Don't mutate strict-float node to a legalize node"), 100 cl::init(false), cl::Hidden); 101 102 static bool darwinHasSinCos(const Triple &TT) { 103 assert(TT.isOSDarwin() && "should be called with darwin triple"); 104 // Don't bother with 32 bit x86. 105 if (TT.getArch() == Triple::x86) 106 return false; 107 // Macos < 10.9 has no sincos_stret. 108 if (TT.isMacOSX()) 109 return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit(); 110 // iOS < 7.0 has no sincos_stret. 111 if (TT.isiOS()) 112 return !TT.isOSVersionLT(7, 0); 113 // Any other darwin such as WatchOS/TvOS is new enough. 114 return true; 115 } 116 117 void TargetLoweringBase::InitLibcalls(const Triple &TT) { 118 #define HANDLE_LIBCALL(code, name) \ 119 setLibcallName(RTLIB::code, name); 120 #include "llvm/IR/RuntimeLibcalls.def" 121 #undef HANDLE_LIBCALL 122 // Initialize calling conventions to their default. 123 for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC) 124 setLibcallCallingConv((RTLIB::Libcall)LC, CallingConv::C); 125 126 // For IEEE quad-precision libcall names, PPC uses "kf" instead of "tf". 127 if (TT.isPPC()) { 128 setLibcallName(RTLIB::ADD_F128, "__addkf3"); 129 setLibcallName(RTLIB::SUB_F128, "__subkf3"); 130 setLibcallName(RTLIB::MUL_F128, "__mulkf3"); 131 setLibcallName(RTLIB::DIV_F128, "__divkf3"); 132 setLibcallName(RTLIB::POWI_F128, "__powikf2"); 133 setLibcallName(RTLIB::FPEXT_F32_F128, "__extendsfkf2"); 134 setLibcallName(RTLIB::FPEXT_F64_F128, "__extenddfkf2"); 135 setLibcallName(RTLIB::FPROUND_F128_F32, "__trunckfsf2"); 136 setLibcallName(RTLIB::FPROUND_F128_F64, "__trunckfdf2"); 137 setLibcallName(RTLIB::FPTOSINT_F128_I32, "__fixkfsi"); 138 setLibcallName(RTLIB::FPTOSINT_F128_I64, "__fixkfdi"); 139 setLibcallName(RTLIB::FPTOSINT_F128_I128, "__fixkfti"); 140 setLibcallName(RTLIB::FPTOUINT_F128_I32, "__fixunskfsi"); 141 setLibcallName(RTLIB::FPTOUINT_F128_I64, "__fixunskfdi"); 142 setLibcallName(RTLIB::FPTOUINT_F128_I128, "__fixunskfti"); 143 setLibcallName(RTLIB::SINTTOFP_I32_F128, "__floatsikf"); 144 setLibcallName(RTLIB::SINTTOFP_I64_F128, "__floatdikf"); 145 setLibcallName(RTLIB::SINTTOFP_I128_F128, "__floattikf"); 146 setLibcallName(RTLIB::UINTTOFP_I32_F128, "__floatunsikf"); 147 setLibcallName(RTLIB::UINTTOFP_I64_F128, "__floatundikf"); 148 setLibcallName(RTLIB::UINTTOFP_I128_F128, "__floatuntikf"); 149 setLibcallName(RTLIB::OEQ_F128, "__eqkf2"); 150 setLibcallName(RTLIB::UNE_F128, "__nekf2"); 151 setLibcallName(RTLIB::OGE_F128, "__gekf2"); 152 setLibcallName(RTLIB::OLT_F128, "__ltkf2"); 153 setLibcallName(RTLIB::OLE_F128, "__lekf2"); 154 setLibcallName(RTLIB::OGT_F128, "__gtkf2"); 155 setLibcallName(RTLIB::UO_F128, "__unordkf2"); 156 } 157 158 // A few names are different on particular architectures or environments. 159 if (TT.isOSDarwin()) { 160 // For f16/f32 conversions, Darwin uses the standard naming scheme, instead 161 // of the gnueabi-style __gnu_*_ieee. 162 // FIXME: What about other targets? 163 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2"); 164 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2"); 165 166 // Some darwins have an optimized __bzero/bzero function. 167 switch (TT.getArch()) { 168 case Triple::x86: 169 case Triple::x86_64: 170 if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6)) 171 setLibcallName(RTLIB::BZERO, "__bzero"); 172 break; 173 case Triple::aarch64: 174 case Triple::aarch64_32: 175 setLibcallName(RTLIB::BZERO, "bzero"); 176 break; 177 default: 178 break; 179 } 180 181 if (darwinHasSinCos(TT)) { 182 setLibcallName(RTLIB::SINCOS_STRET_F32, "__sincosf_stret"); 183 setLibcallName(RTLIB::SINCOS_STRET_F64, "__sincos_stret"); 184 if (TT.isWatchABI()) { 185 setLibcallCallingConv(RTLIB::SINCOS_STRET_F32, 186 CallingConv::ARM_AAPCS_VFP); 187 setLibcallCallingConv(RTLIB::SINCOS_STRET_F64, 188 CallingConv::ARM_AAPCS_VFP); 189 } 190 } 191 } else { 192 setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee"); 193 setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee"); 194 } 195 196 if (TT.isGNUEnvironment() || TT.isOSFuchsia() || 197 (TT.isAndroid() && !TT.isAndroidVersionLT(9))) { 198 setLibcallName(RTLIB::SINCOS_F32, "sincosf"); 199 setLibcallName(RTLIB::SINCOS_F64, "sincos"); 200 setLibcallName(RTLIB::SINCOS_F80, "sincosl"); 201 setLibcallName(RTLIB::SINCOS_F128, "sincosl"); 202 setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl"); 203 } 204 205 if (TT.isPS4CPU()) { 206 setLibcallName(RTLIB::SINCOS_F32, "sincosf"); 207 setLibcallName(RTLIB::SINCOS_F64, "sincos"); 208 } 209 210 if (TT.isOSOpenBSD()) { 211 setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr); 212 } 213 } 214 215 /// GetFPLibCall - Helper to return the right libcall for the given floating 216 /// point type, or UNKNOWN_LIBCALL if there is none. 217 RTLIB::Libcall RTLIB::getFPLibCall(EVT VT, 218 RTLIB::Libcall Call_F32, 219 RTLIB::Libcall Call_F64, 220 RTLIB::Libcall Call_F80, 221 RTLIB::Libcall Call_F128, 222 RTLIB::Libcall Call_PPCF128) { 223 return 224 VT == MVT::f32 ? Call_F32 : 225 VT == MVT::f64 ? Call_F64 : 226 VT == MVT::f80 ? Call_F80 : 227 VT == MVT::f128 ? Call_F128 : 228 VT == MVT::ppcf128 ? Call_PPCF128 : 229 RTLIB::UNKNOWN_LIBCALL; 230 } 231 232 /// getFPEXT - Return the FPEXT_*_* value for the given types, or 233 /// UNKNOWN_LIBCALL if there is none. 234 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) { 235 if (OpVT == MVT::f16) { 236 if (RetVT == MVT::f32) 237 return FPEXT_F16_F32; 238 if (RetVT == MVT::f64) 239 return FPEXT_F16_F64; 240 if (RetVT == MVT::f80) 241 return FPEXT_F16_F80; 242 if (RetVT == MVT::f128) 243 return FPEXT_F16_F128; 244 } else if (OpVT == MVT::f32) { 245 if (RetVT == MVT::f64) 246 return FPEXT_F32_F64; 247 if (RetVT == MVT::f128) 248 return FPEXT_F32_F128; 249 if (RetVT == MVT::ppcf128) 250 return FPEXT_F32_PPCF128; 251 } else if (OpVT == MVT::f64) { 252 if (RetVT == MVT::f128) 253 return FPEXT_F64_F128; 254 else if (RetVT == MVT::ppcf128) 255 return FPEXT_F64_PPCF128; 256 } else if (OpVT == MVT::f80) { 257 if (RetVT == MVT::f128) 258 return FPEXT_F80_F128; 259 } 260 261 return UNKNOWN_LIBCALL; 262 } 263 264 /// getFPROUND - Return the FPROUND_*_* value for the given types, or 265 /// UNKNOWN_LIBCALL if there is none. 266 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) { 267 if (RetVT == MVT::f16) { 268 if (OpVT == MVT::f32) 269 return FPROUND_F32_F16; 270 if (OpVT == MVT::f64) 271 return FPROUND_F64_F16; 272 if (OpVT == MVT::f80) 273 return FPROUND_F80_F16; 274 if (OpVT == MVT::f128) 275 return FPROUND_F128_F16; 276 if (OpVT == MVT::ppcf128) 277 return FPROUND_PPCF128_F16; 278 } else if (RetVT == MVT::f32) { 279 if (OpVT == MVT::f64) 280 return FPROUND_F64_F32; 281 if (OpVT == MVT::f80) 282 return FPROUND_F80_F32; 283 if (OpVT == MVT::f128) 284 return FPROUND_F128_F32; 285 if (OpVT == MVT::ppcf128) 286 return FPROUND_PPCF128_F32; 287 } else if (RetVT == MVT::f64) { 288 if (OpVT == MVT::f80) 289 return FPROUND_F80_F64; 290 if (OpVT == MVT::f128) 291 return FPROUND_F128_F64; 292 if (OpVT == MVT::ppcf128) 293 return FPROUND_PPCF128_F64; 294 } else if (RetVT == MVT::f80) { 295 if (OpVT == MVT::f128) 296 return FPROUND_F128_F80; 297 } 298 299 return UNKNOWN_LIBCALL; 300 } 301 302 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or 303 /// UNKNOWN_LIBCALL if there is none. 304 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) { 305 if (OpVT == MVT::f16) { 306 if (RetVT == MVT::i32) 307 return FPTOSINT_F16_I32; 308 if (RetVT == MVT::i64) 309 return FPTOSINT_F16_I64; 310 if (RetVT == MVT::i128) 311 return FPTOSINT_F16_I128; 312 } else if (OpVT == MVT::f32) { 313 if (RetVT == MVT::i32) 314 return FPTOSINT_F32_I32; 315 if (RetVT == MVT::i64) 316 return FPTOSINT_F32_I64; 317 if (RetVT == MVT::i128) 318 return FPTOSINT_F32_I128; 319 } else if (OpVT == MVT::f64) { 320 if (RetVT == MVT::i32) 321 return FPTOSINT_F64_I32; 322 if (RetVT == MVT::i64) 323 return FPTOSINT_F64_I64; 324 if (RetVT == MVT::i128) 325 return FPTOSINT_F64_I128; 326 } else if (OpVT == MVT::f80) { 327 if (RetVT == MVT::i32) 328 return FPTOSINT_F80_I32; 329 if (RetVT == MVT::i64) 330 return FPTOSINT_F80_I64; 331 if (RetVT == MVT::i128) 332 return FPTOSINT_F80_I128; 333 } else if (OpVT == MVT::f128) { 334 if (RetVT == MVT::i32) 335 return FPTOSINT_F128_I32; 336 if (RetVT == MVT::i64) 337 return FPTOSINT_F128_I64; 338 if (RetVT == MVT::i128) 339 return FPTOSINT_F128_I128; 340 } else if (OpVT == MVT::ppcf128) { 341 if (RetVT == MVT::i32) 342 return FPTOSINT_PPCF128_I32; 343 if (RetVT == MVT::i64) 344 return FPTOSINT_PPCF128_I64; 345 if (RetVT == MVT::i128) 346 return FPTOSINT_PPCF128_I128; 347 } 348 return UNKNOWN_LIBCALL; 349 } 350 351 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or 352 /// UNKNOWN_LIBCALL if there is none. 353 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) { 354 if (OpVT == MVT::f16) { 355 if (RetVT == MVT::i32) 356 return FPTOUINT_F16_I32; 357 if (RetVT == MVT::i64) 358 return FPTOUINT_F16_I64; 359 if (RetVT == MVT::i128) 360 return FPTOUINT_F16_I128; 361 } else if (OpVT == MVT::f32) { 362 if (RetVT == MVT::i32) 363 return FPTOUINT_F32_I32; 364 if (RetVT == MVT::i64) 365 return FPTOUINT_F32_I64; 366 if (RetVT == MVT::i128) 367 return FPTOUINT_F32_I128; 368 } else if (OpVT == MVT::f64) { 369 if (RetVT == MVT::i32) 370 return FPTOUINT_F64_I32; 371 if (RetVT == MVT::i64) 372 return FPTOUINT_F64_I64; 373 if (RetVT == MVT::i128) 374 return FPTOUINT_F64_I128; 375 } else if (OpVT == MVT::f80) { 376 if (RetVT == MVT::i32) 377 return FPTOUINT_F80_I32; 378 if (RetVT == MVT::i64) 379 return FPTOUINT_F80_I64; 380 if (RetVT == MVT::i128) 381 return FPTOUINT_F80_I128; 382 } else if (OpVT == MVT::f128) { 383 if (RetVT == MVT::i32) 384 return FPTOUINT_F128_I32; 385 if (RetVT == MVT::i64) 386 return FPTOUINT_F128_I64; 387 if (RetVT == MVT::i128) 388 return FPTOUINT_F128_I128; 389 } else if (OpVT == MVT::ppcf128) { 390 if (RetVT == MVT::i32) 391 return FPTOUINT_PPCF128_I32; 392 if (RetVT == MVT::i64) 393 return FPTOUINT_PPCF128_I64; 394 if (RetVT == MVT::i128) 395 return FPTOUINT_PPCF128_I128; 396 } 397 return UNKNOWN_LIBCALL; 398 } 399 400 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or 401 /// UNKNOWN_LIBCALL if there is none. 402 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) { 403 if (OpVT == MVT::i32) { 404 if (RetVT == MVT::f16) 405 return SINTTOFP_I32_F16; 406 if (RetVT == MVT::f32) 407 return SINTTOFP_I32_F32; 408 if (RetVT == MVT::f64) 409 return SINTTOFP_I32_F64; 410 if (RetVT == MVT::f80) 411 return SINTTOFP_I32_F80; 412 if (RetVT == MVT::f128) 413 return SINTTOFP_I32_F128; 414 if (RetVT == MVT::ppcf128) 415 return SINTTOFP_I32_PPCF128; 416 } else if (OpVT == MVT::i64) { 417 if (RetVT == MVT::f16) 418 return SINTTOFP_I64_F16; 419 if (RetVT == MVT::f32) 420 return SINTTOFP_I64_F32; 421 if (RetVT == MVT::f64) 422 return SINTTOFP_I64_F64; 423 if (RetVT == MVT::f80) 424 return SINTTOFP_I64_F80; 425 if (RetVT == MVT::f128) 426 return SINTTOFP_I64_F128; 427 if (RetVT == MVT::ppcf128) 428 return SINTTOFP_I64_PPCF128; 429 } else if (OpVT == MVT::i128) { 430 if (RetVT == MVT::f16) 431 return SINTTOFP_I128_F16; 432 if (RetVT == MVT::f32) 433 return SINTTOFP_I128_F32; 434 if (RetVT == MVT::f64) 435 return SINTTOFP_I128_F64; 436 if (RetVT == MVT::f80) 437 return SINTTOFP_I128_F80; 438 if (RetVT == MVT::f128) 439 return SINTTOFP_I128_F128; 440 if (RetVT == MVT::ppcf128) 441 return SINTTOFP_I128_PPCF128; 442 } 443 return UNKNOWN_LIBCALL; 444 } 445 446 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or 447 /// UNKNOWN_LIBCALL if there is none. 448 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) { 449 if (OpVT == MVT::i32) { 450 if (RetVT == MVT::f16) 451 return UINTTOFP_I32_F16; 452 if (RetVT == MVT::f32) 453 return UINTTOFP_I32_F32; 454 if (RetVT == MVT::f64) 455 return UINTTOFP_I32_F64; 456 if (RetVT == MVT::f80) 457 return UINTTOFP_I32_F80; 458 if (RetVT == MVT::f128) 459 return UINTTOFP_I32_F128; 460 if (RetVT == MVT::ppcf128) 461 return UINTTOFP_I32_PPCF128; 462 } else if (OpVT == MVT::i64) { 463 if (RetVT == MVT::f16) 464 return UINTTOFP_I64_F16; 465 if (RetVT == MVT::f32) 466 return UINTTOFP_I64_F32; 467 if (RetVT == MVT::f64) 468 return UINTTOFP_I64_F64; 469 if (RetVT == MVT::f80) 470 return UINTTOFP_I64_F80; 471 if (RetVT == MVT::f128) 472 return UINTTOFP_I64_F128; 473 if (RetVT == MVT::ppcf128) 474 return UINTTOFP_I64_PPCF128; 475 } else if (OpVT == MVT::i128) { 476 if (RetVT == MVT::f16) 477 return UINTTOFP_I128_F16; 478 if (RetVT == MVT::f32) 479 return UINTTOFP_I128_F32; 480 if (RetVT == MVT::f64) 481 return UINTTOFP_I128_F64; 482 if (RetVT == MVT::f80) 483 return UINTTOFP_I128_F80; 484 if (RetVT == MVT::f128) 485 return UINTTOFP_I128_F128; 486 if (RetVT == MVT::ppcf128) 487 return UINTTOFP_I128_PPCF128; 488 } 489 return UNKNOWN_LIBCALL; 490 } 491 492 RTLIB::Libcall RTLIB::getPOWI(EVT RetVT) { 493 return getFPLibCall(RetVT, POWI_F32, POWI_F64, POWI_F80, POWI_F128, 494 POWI_PPCF128); 495 } 496 497 RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, 498 MVT VT) { 499 unsigned ModeN, ModelN; 500 switch (VT.SimpleTy) { 501 case MVT::i8: 502 ModeN = 0; 503 break; 504 case MVT::i16: 505 ModeN = 1; 506 break; 507 case MVT::i32: 508 ModeN = 2; 509 break; 510 case MVT::i64: 511 ModeN = 3; 512 break; 513 case MVT::i128: 514 ModeN = 4; 515 break; 516 default: 517 return UNKNOWN_LIBCALL; 518 } 519 520 switch (Order) { 521 case AtomicOrdering::Monotonic: 522 ModelN = 0; 523 break; 524 case AtomicOrdering::Acquire: 525 ModelN = 1; 526 break; 527 case AtomicOrdering::Release: 528 ModelN = 2; 529 break; 530 case AtomicOrdering::AcquireRelease: 531 case AtomicOrdering::SequentiallyConsistent: 532 ModelN = 3; 533 break; 534 default: 535 return UNKNOWN_LIBCALL; 536 } 537 538 #define LCALLS(A, B) \ 539 { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL } 540 #define LCALL5(A) \ 541 LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16) 542 switch (Opc) { 543 case ISD::ATOMIC_CMP_SWAP: { 544 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_CAS)}; 545 return LC[ModeN][ModelN]; 546 } 547 case ISD::ATOMIC_SWAP: { 548 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_SWP)}; 549 return LC[ModeN][ModelN]; 550 } 551 case ISD::ATOMIC_LOAD_ADD: { 552 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDADD)}; 553 return LC[ModeN][ModelN]; 554 } 555 case ISD::ATOMIC_LOAD_OR: { 556 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDSET)}; 557 return LC[ModeN][ModelN]; 558 } 559 case ISD::ATOMIC_LOAD_CLR: { 560 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDCLR)}; 561 return LC[ModeN][ModelN]; 562 } 563 case ISD::ATOMIC_LOAD_XOR: { 564 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDEOR)}; 565 return LC[ModeN][ModelN]; 566 } 567 default: 568 return UNKNOWN_LIBCALL; 569 } 570 #undef LCALLS 571 #undef LCALL5 572 } 573 574 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) { 575 #define OP_TO_LIBCALL(Name, Enum) \ 576 case Name: \ 577 switch (VT.SimpleTy) { \ 578 default: \ 579 return UNKNOWN_LIBCALL; \ 580 case MVT::i8: \ 581 return Enum##_1; \ 582 case MVT::i16: \ 583 return Enum##_2; \ 584 case MVT::i32: \ 585 return Enum##_4; \ 586 case MVT::i64: \ 587 return Enum##_8; \ 588 case MVT::i128: \ 589 return Enum##_16; \ 590 } 591 592 switch (Opc) { 593 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET) 594 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP) 595 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD) 596 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB) 597 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND) 598 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR) 599 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR) 600 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND) 601 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX) 602 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX) 603 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN) 604 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN) 605 } 606 607 #undef OP_TO_LIBCALL 608 609 return UNKNOWN_LIBCALL; 610 } 611 612 RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 613 switch (ElementSize) { 614 case 1: 615 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1; 616 case 2: 617 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2; 618 case 4: 619 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4; 620 case 8: 621 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8; 622 case 16: 623 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16; 624 default: 625 return UNKNOWN_LIBCALL; 626 } 627 } 628 629 RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 630 switch (ElementSize) { 631 case 1: 632 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1; 633 case 2: 634 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2; 635 case 4: 636 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4; 637 case 8: 638 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8; 639 case 16: 640 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16; 641 default: 642 return UNKNOWN_LIBCALL; 643 } 644 } 645 646 RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 647 switch (ElementSize) { 648 case 1: 649 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1; 650 case 2: 651 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2; 652 case 4: 653 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4; 654 case 8: 655 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8; 656 case 16: 657 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16; 658 default: 659 return UNKNOWN_LIBCALL; 660 } 661 } 662 663 /// InitCmpLibcallCCs - Set default comparison libcall CC. 664 static void InitCmpLibcallCCs(ISD::CondCode *CCs) { 665 std::fill(CCs, CCs + RTLIB::UNKNOWN_LIBCALL, ISD::SETCC_INVALID); 666 CCs[RTLIB::OEQ_F32] = ISD::SETEQ; 667 CCs[RTLIB::OEQ_F64] = ISD::SETEQ; 668 CCs[RTLIB::OEQ_F128] = ISD::SETEQ; 669 CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ; 670 CCs[RTLIB::UNE_F32] = ISD::SETNE; 671 CCs[RTLIB::UNE_F64] = ISD::SETNE; 672 CCs[RTLIB::UNE_F128] = ISD::SETNE; 673 CCs[RTLIB::UNE_PPCF128] = ISD::SETNE; 674 CCs[RTLIB::OGE_F32] = ISD::SETGE; 675 CCs[RTLIB::OGE_F64] = ISD::SETGE; 676 CCs[RTLIB::OGE_F128] = ISD::SETGE; 677 CCs[RTLIB::OGE_PPCF128] = ISD::SETGE; 678 CCs[RTLIB::OLT_F32] = ISD::SETLT; 679 CCs[RTLIB::OLT_F64] = ISD::SETLT; 680 CCs[RTLIB::OLT_F128] = ISD::SETLT; 681 CCs[RTLIB::OLT_PPCF128] = ISD::SETLT; 682 CCs[RTLIB::OLE_F32] = ISD::SETLE; 683 CCs[RTLIB::OLE_F64] = ISD::SETLE; 684 CCs[RTLIB::OLE_F128] = ISD::SETLE; 685 CCs[RTLIB::OLE_PPCF128] = ISD::SETLE; 686 CCs[RTLIB::OGT_F32] = ISD::SETGT; 687 CCs[RTLIB::OGT_F64] = ISD::SETGT; 688 CCs[RTLIB::OGT_F128] = ISD::SETGT; 689 CCs[RTLIB::OGT_PPCF128] = ISD::SETGT; 690 CCs[RTLIB::UO_F32] = ISD::SETNE; 691 CCs[RTLIB::UO_F64] = ISD::SETNE; 692 CCs[RTLIB::UO_F128] = ISD::SETNE; 693 CCs[RTLIB::UO_PPCF128] = ISD::SETNE; 694 } 695 696 /// NOTE: The TargetMachine owns TLOF. 697 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) { 698 initActions(); 699 700 // Perform these initializations only once. 701 MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 702 MaxLoadsPerMemcmp = 8; 703 MaxGluedStoresPerMemcpy = 0; 704 MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize = 705 MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4; 706 HasMultipleConditionRegisters = false; 707 HasExtractBitsInsn = false; 708 JumpIsExpensive = JumpIsExpensiveOverride; 709 PredictableSelectIsExpensive = false; 710 EnableExtLdPromotion = false; 711 StackPointerRegisterToSaveRestore = 0; 712 BooleanContents = UndefinedBooleanContent; 713 BooleanFloatContents = UndefinedBooleanContent; 714 BooleanVectorContents = UndefinedBooleanContent; 715 SchedPreferenceInfo = Sched::ILP; 716 GatherAllAliasesMaxDepth = 18; 717 IsStrictFPEnabled = DisableStrictNodeMutation; 718 MaxBytesForAlignment = 0; 719 // TODO: the default will be switched to 0 in the next commit, along 720 // with the Target-specific changes necessary. 721 MaxAtomicSizeInBitsSupported = 1024; 722 723 MinCmpXchgSizeInBits = 0; 724 SupportsUnalignedAtomics = false; 725 726 std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr); 727 728 InitLibcalls(TM.getTargetTriple()); 729 InitCmpLibcallCCs(CmpLibcallCCs); 730 } 731 732 void TargetLoweringBase::initActions() { 733 // All operations default to being supported. 734 memset(OpActions, 0, sizeof(OpActions)); 735 memset(LoadExtActions, 0, sizeof(LoadExtActions)); 736 memset(TruncStoreActions, 0, sizeof(TruncStoreActions)); 737 memset(IndexedModeActions, 0, sizeof(IndexedModeActions)); 738 memset(CondCodeActions, 0, sizeof(CondCodeActions)); 739 std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr); 740 std::fill(std::begin(TargetDAGCombineArray), 741 std::end(TargetDAGCombineArray), 0); 742 743 for (MVT VT : MVT::fp_valuetypes()) { 744 MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits()); 745 if (IntVT.isValid()) { 746 setOperationAction(ISD::ATOMIC_SWAP, VT, Promote); 747 AddPromotedToType(ISD::ATOMIC_SWAP, VT, IntVT); 748 } 749 } 750 751 // Set default actions for various operations. 752 for (MVT VT : MVT::all_valuetypes()) { 753 // Default all indexed load / store to expand. 754 for (unsigned IM = (unsigned)ISD::PRE_INC; 755 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 756 setIndexedLoadAction(IM, VT, Expand); 757 setIndexedStoreAction(IM, VT, Expand); 758 setIndexedMaskedLoadAction(IM, VT, Expand); 759 setIndexedMaskedStoreAction(IM, VT, Expand); 760 } 761 762 // Most backends expect to see the node which just returns the value loaded. 763 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand); 764 765 // These operations default to expand. 766 setOperationAction(ISD::FGETSIGN, VT, Expand); 767 setOperationAction(ISD::CONCAT_VECTORS, VT, Expand); 768 setOperationAction(ISD::FMINNUM, VT, Expand); 769 setOperationAction(ISD::FMAXNUM, VT, Expand); 770 setOperationAction(ISD::FMINNUM_IEEE, VT, Expand); 771 setOperationAction(ISD::FMAXNUM_IEEE, VT, Expand); 772 setOperationAction(ISD::FMINIMUM, VT, Expand); 773 setOperationAction(ISD::FMAXIMUM, VT, Expand); 774 setOperationAction(ISD::FMAD, VT, Expand); 775 setOperationAction(ISD::SMIN, VT, Expand); 776 setOperationAction(ISD::SMAX, VT, Expand); 777 setOperationAction(ISD::UMIN, VT, Expand); 778 setOperationAction(ISD::UMAX, VT, Expand); 779 setOperationAction(ISD::ABS, VT, Expand); 780 setOperationAction(ISD::FSHL, VT, Expand); 781 setOperationAction(ISD::FSHR, VT, Expand); 782 setOperationAction(ISD::SADDSAT, VT, Expand); 783 setOperationAction(ISD::UADDSAT, VT, Expand); 784 setOperationAction(ISD::SSUBSAT, VT, Expand); 785 setOperationAction(ISD::USUBSAT, VT, Expand); 786 setOperationAction(ISD::SSHLSAT, VT, Expand); 787 setOperationAction(ISD::USHLSAT, VT, Expand); 788 setOperationAction(ISD::SMULFIX, VT, Expand); 789 setOperationAction(ISD::SMULFIXSAT, VT, Expand); 790 setOperationAction(ISD::UMULFIX, VT, Expand); 791 setOperationAction(ISD::UMULFIXSAT, VT, Expand); 792 setOperationAction(ISD::SDIVFIX, VT, Expand); 793 setOperationAction(ISD::SDIVFIXSAT, VT, Expand); 794 setOperationAction(ISD::UDIVFIX, VT, Expand); 795 setOperationAction(ISD::UDIVFIXSAT, VT, Expand); 796 setOperationAction(ISD::FP_TO_SINT_SAT, VT, Expand); 797 setOperationAction(ISD::FP_TO_UINT_SAT, VT, Expand); 798 799 // Overflow operations default to expand 800 setOperationAction(ISD::SADDO, VT, Expand); 801 setOperationAction(ISD::SSUBO, VT, Expand); 802 setOperationAction(ISD::UADDO, VT, Expand); 803 setOperationAction(ISD::USUBO, VT, Expand); 804 setOperationAction(ISD::SMULO, VT, Expand); 805 setOperationAction(ISD::UMULO, VT, Expand); 806 807 // ADDCARRY operations default to expand 808 setOperationAction(ISD::ADDCARRY, VT, Expand); 809 setOperationAction(ISD::SUBCARRY, VT, Expand); 810 setOperationAction(ISD::SETCCCARRY, VT, Expand); 811 setOperationAction(ISD::SADDO_CARRY, VT, Expand); 812 setOperationAction(ISD::SSUBO_CARRY, VT, Expand); 813 814 // ADDC/ADDE/SUBC/SUBE default to expand. 815 setOperationAction(ISD::ADDC, VT, Expand); 816 setOperationAction(ISD::ADDE, VT, Expand); 817 setOperationAction(ISD::SUBC, VT, Expand); 818 setOperationAction(ISD::SUBE, VT, Expand); 819 820 // Absolute difference 821 setOperationAction(ISD::ABDS, VT, Expand); 822 setOperationAction(ISD::ABDU, VT, Expand); 823 824 // These default to Expand so they will be expanded to CTLZ/CTTZ by default. 825 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 826 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 827 828 setOperationAction(ISD::BITREVERSE, VT, Expand); 829 setOperationAction(ISD::PARITY, VT, Expand); 830 831 // These library functions default to expand. 832 setOperationAction(ISD::FROUND, VT, Expand); 833 setOperationAction(ISD::FROUNDEVEN, VT, Expand); 834 setOperationAction(ISD::FPOWI, VT, Expand); 835 836 // These operations default to expand for vector types. 837 if (VT.isVector()) { 838 setOperationAction(ISD::FCOPYSIGN, VT, Expand); 839 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 840 setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, VT, Expand); 841 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Expand); 842 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Expand); 843 setOperationAction(ISD::SPLAT_VECTOR, VT, Expand); 844 } 845 846 // Constrained floating-point operations default to expand. 847 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 848 setOperationAction(ISD::STRICT_##DAGN, VT, Expand); 849 #include "llvm/IR/ConstrainedOps.def" 850 851 // For most targets @llvm.get.dynamic.area.offset just returns 0. 852 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand); 853 854 // Vector reduction default to expand. 855 setOperationAction(ISD::VECREDUCE_FADD, VT, Expand); 856 setOperationAction(ISD::VECREDUCE_FMUL, VT, Expand); 857 setOperationAction(ISD::VECREDUCE_ADD, VT, Expand); 858 setOperationAction(ISD::VECREDUCE_MUL, VT, Expand); 859 setOperationAction(ISD::VECREDUCE_AND, VT, Expand); 860 setOperationAction(ISD::VECREDUCE_OR, VT, Expand); 861 setOperationAction(ISD::VECREDUCE_XOR, VT, Expand); 862 setOperationAction(ISD::VECREDUCE_SMAX, VT, Expand); 863 setOperationAction(ISD::VECREDUCE_SMIN, VT, Expand); 864 setOperationAction(ISD::VECREDUCE_UMAX, VT, Expand); 865 setOperationAction(ISD::VECREDUCE_UMIN, VT, Expand); 866 setOperationAction(ISD::VECREDUCE_FMAX, VT, Expand); 867 setOperationAction(ISD::VECREDUCE_FMIN, VT, Expand); 868 setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Expand); 869 setOperationAction(ISD::VECREDUCE_SEQ_FMUL, VT, Expand); 870 871 // Named vector shuffles default to expand. 872 setOperationAction(ISD::VECTOR_SPLICE, VT, Expand); 873 } 874 875 // Most targets ignore the @llvm.prefetch intrinsic. 876 setOperationAction(ISD::PREFETCH, MVT::Other, Expand); 877 878 // Most targets also ignore the @llvm.readcyclecounter intrinsic. 879 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand); 880 881 // ConstantFP nodes default to expand. Targets can either change this to 882 // Legal, in which case all fp constants are legal, or use isFPImmLegal() 883 // to optimize expansions for certain constants. 884 setOperationAction(ISD::ConstantFP, MVT::f16, Expand); 885 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 886 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 887 setOperationAction(ISD::ConstantFP, MVT::f80, Expand); 888 setOperationAction(ISD::ConstantFP, MVT::f128, Expand); 889 890 // These library functions default to expand. 891 for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) { 892 setOperationAction(ISD::FCBRT, VT, Expand); 893 setOperationAction(ISD::FLOG , VT, Expand); 894 setOperationAction(ISD::FLOG2, VT, Expand); 895 setOperationAction(ISD::FLOG10, VT, Expand); 896 setOperationAction(ISD::FEXP , VT, Expand); 897 setOperationAction(ISD::FEXP2, VT, Expand); 898 setOperationAction(ISD::FFLOOR, VT, Expand); 899 setOperationAction(ISD::FNEARBYINT, VT, Expand); 900 setOperationAction(ISD::FCEIL, VT, Expand); 901 setOperationAction(ISD::FRINT, VT, Expand); 902 setOperationAction(ISD::FTRUNC, VT, Expand); 903 setOperationAction(ISD::LROUND, VT, Expand); 904 setOperationAction(ISD::LLROUND, VT, Expand); 905 setOperationAction(ISD::LRINT, VT, Expand); 906 setOperationAction(ISD::LLRINT, VT, Expand); 907 } 908 909 // Default ISD::TRAP to expand (which turns it into abort). 910 setOperationAction(ISD::TRAP, MVT::Other, Expand); 911 912 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand" 913 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP. 914 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand); 915 916 setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand); 917 } 918 919 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL, 920 EVT) const { 921 return MVT::getIntegerVT(DL.getPointerSizeInBits(0)); 922 } 923 924 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, const DataLayout &DL, 925 bool LegalTypes) const { 926 assert(LHSTy.isInteger() && "Shift amount is not an integer type!"); 927 if (LHSTy.isVector()) 928 return LHSTy; 929 MVT ShiftVT = 930 LegalTypes ? getScalarShiftAmountTy(DL, LHSTy) : getPointerTy(DL); 931 // If any possible shift value won't fit in the prefered type, just use 932 // something safe. Assume it will be legalized when the shift is expanded. 933 if (ShiftVT.getSizeInBits() < Log2_32_Ceil(LHSTy.getSizeInBits())) 934 ShiftVT = MVT::i32; 935 assert(ShiftVT.getSizeInBits() >= Log2_32_Ceil(LHSTy.getSizeInBits()) && 936 "ShiftVT is still too small!"); 937 return ShiftVT; 938 } 939 940 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const { 941 assert(isTypeLegal(VT)); 942 switch (Op) { 943 default: 944 return false; 945 case ISD::SDIV: 946 case ISD::UDIV: 947 case ISD::SREM: 948 case ISD::UREM: 949 return true; 950 } 951 } 952 953 bool TargetLoweringBase::isFreeAddrSpaceCast(unsigned SrcAS, 954 unsigned DestAS) const { 955 return TM.isNoopAddrSpaceCast(SrcAS, DestAS); 956 } 957 958 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) { 959 // If the command-line option was specified, ignore this request. 960 if (!JumpIsExpensiveOverride.getNumOccurrences()) 961 JumpIsExpensive = isExpensive; 962 } 963 964 TargetLoweringBase::LegalizeKind 965 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { 966 // If this is a simple type, use the ComputeRegisterProp mechanism. 967 if (VT.isSimple()) { 968 MVT SVT = VT.getSimpleVT(); 969 assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType)); 970 MVT NVT = TransformToType[SVT.SimpleTy]; 971 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT); 972 973 assert((LA == TypeLegal || LA == TypeSoftenFloat || 974 LA == TypeSoftPromoteHalf || 975 (NVT.isVector() || 976 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)) && 977 "Promote may not follow Expand or Promote"); 978 979 if (LA == TypeSplitVector) 980 return LegalizeKind(LA, EVT(SVT).getHalfNumVectorElementsVT(Context)); 981 if (LA == TypeScalarizeVector) 982 return LegalizeKind(LA, SVT.getVectorElementType()); 983 return LegalizeKind(LA, NVT); 984 } 985 986 // Handle Extended Scalar Types. 987 if (!VT.isVector()) { 988 assert(VT.isInteger() && "Float types must be simple"); 989 unsigned BitSize = VT.getSizeInBits(); 990 // First promote to a power-of-two size, then expand if necessary. 991 if (BitSize < 8 || !isPowerOf2_32(BitSize)) { 992 EVT NVT = VT.getRoundIntegerType(Context); 993 assert(NVT != VT && "Unable to round integer VT"); 994 LegalizeKind NextStep = getTypeConversion(Context, NVT); 995 // Avoid multi-step promotion. 996 if (NextStep.first == TypePromoteInteger) 997 return NextStep; 998 // Return rounded integer type. 999 return LegalizeKind(TypePromoteInteger, NVT); 1000 } 1001 1002 return LegalizeKind(TypeExpandInteger, 1003 EVT::getIntegerVT(Context, VT.getSizeInBits() / 2)); 1004 } 1005 1006 // Handle vector types. 1007 ElementCount NumElts = VT.getVectorElementCount(); 1008 EVT EltVT = VT.getVectorElementType(); 1009 1010 // Vectors with only one element are always scalarized. 1011 if (NumElts.isScalar()) 1012 return LegalizeKind(TypeScalarizeVector, EltVT); 1013 1014 // Try to widen vector elements until the element type is a power of two and 1015 // promote it to a legal type later on, for example: 1016 // <3 x i8> -> <4 x i8> -> <4 x i32> 1017 if (EltVT.isInteger()) { 1018 // Vectors with a number of elements that is not a power of two are always 1019 // widened, for example <3 x i8> -> <4 x i8>. 1020 if (!VT.isPow2VectorType()) { 1021 NumElts = NumElts.coefficientNextPowerOf2(); 1022 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts); 1023 return LegalizeKind(TypeWidenVector, NVT); 1024 } 1025 1026 // Examine the element type. 1027 LegalizeKind LK = getTypeConversion(Context, EltVT); 1028 1029 // If type is to be expanded, split the vector. 1030 // <4 x i140> -> <2 x i140> 1031 if (LK.first == TypeExpandInteger) { 1032 if (VT.getVectorElementCount().isScalable()) 1033 return LegalizeKind(TypeScalarizeScalableVector, EltVT); 1034 return LegalizeKind(TypeSplitVector, 1035 VT.getHalfNumVectorElementsVT(Context)); 1036 } 1037 1038 // Promote the integer element types until a legal vector type is found 1039 // or until the element integer type is too big. If a legal type was not 1040 // found, fallback to the usual mechanism of widening/splitting the 1041 // vector. 1042 EVT OldEltVT = EltVT; 1043 while (true) { 1044 // Increase the bitwidth of the element to the next pow-of-two 1045 // (which is greater than 8 bits). 1046 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits()) 1047 .getRoundIntegerType(Context); 1048 1049 // Stop trying when getting a non-simple element type. 1050 // Note that vector elements may be greater than legal vector element 1051 // types. Example: X86 XMM registers hold 64bit element on 32bit 1052 // systems. 1053 if (!EltVT.isSimple()) 1054 break; 1055 1056 // Build a new vector type and check if it is legal. 1057 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 1058 // Found a legal promoted vector type. 1059 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal) 1060 return LegalizeKind(TypePromoteInteger, 1061 EVT::getVectorVT(Context, EltVT, NumElts)); 1062 } 1063 1064 // Reset the type to the unexpanded type if we did not find a legal vector 1065 // type with a promoted vector element type. 1066 EltVT = OldEltVT; 1067 } 1068 1069 // Try to widen the vector until a legal type is found. 1070 // If there is no wider legal type, split the vector. 1071 while (true) { 1072 // Round up to the next power of 2. 1073 NumElts = NumElts.coefficientNextPowerOf2(); 1074 1075 // If there is no simple vector type with this many elements then there 1076 // cannot be a larger legal vector type. Note that this assumes that 1077 // there are no skipped intermediate vector types in the simple types. 1078 if (!EltVT.isSimple()) 1079 break; 1080 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 1081 if (LargerVector == MVT()) 1082 break; 1083 1084 // If this type is legal then widen the vector. 1085 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal) 1086 return LegalizeKind(TypeWidenVector, LargerVector); 1087 } 1088 1089 // Widen odd vectors to next power of two. 1090 if (!VT.isPow2VectorType()) { 1091 EVT NVT = VT.getPow2VectorType(Context); 1092 return LegalizeKind(TypeWidenVector, NVT); 1093 } 1094 1095 if (VT.getVectorElementCount() == ElementCount::getScalable(1)) 1096 return LegalizeKind(TypeScalarizeScalableVector, EltVT); 1097 1098 // Vectors with illegal element types are expanded. 1099 EVT NVT = EVT::getVectorVT(Context, EltVT, 1100 VT.getVectorElementCount().divideCoefficientBy(2)); 1101 return LegalizeKind(TypeSplitVector, NVT); 1102 } 1103 1104 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, 1105 unsigned &NumIntermediates, 1106 MVT &RegisterVT, 1107 TargetLoweringBase *TLI) { 1108 // Figure out the right, legal destination reg to copy into. 1109 ElementCount EC = VT.getVectorElementCount(); 1110 MVT EltTy = VT.getVectorElementType(); 1111 1112 unsigned NumVectorRegs = 1; 1113 1114 // Scalable vectors cannot be scalarized, so splitting or widening is 1115 // required. 1116 if (VT.isScalableVector() && !isPowerOf2_32(EC.getKnownMinValue())) 1117 llvm_unreachable( 1118 "Splitting or widening of non-power-of-2 MVTs is not implemented."); 1119 1120 // FIXME: We don't support non-power-of-2-sized vectors for now. 1121 // Ideally we could break down into LHS/RHS like LegalizeDAG does. 1122 if (!isPowerOf2_32(EC.getKnownMinValue())) { 1123 // Split EC to unit size (scalable property is preserved). 1124 NumVectorRegs = EC.getKnownMinValue(); 1125 EC = ElementCount::getFixed(1); 1126 } 1127 1128 // Divide the input until we get to a supported size. This will 1129 // always end up with an EC that represent a scalar or a scalable 1130 // scalar. 1131 while (EC.getKnownMinValue() > 1 && 1132 !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) { 1133 EC = EC.divideCoefficientBy(2); 1134 NumVectorRegs <<= 1; 1135 } 1136 1137 NumIntermediates = NumVectorRegs; 1138 1139 MVT NewVT = MVT::getVectorVT(EltTy, EC); 1140 if (!TLI->isTypeLegal(NewVT)) 1141 NewVT = EltTy; 1142 IntermediateVT = NewVT; 1143 1144 unsigned LaneSizeInBits = NewVT.getScalarSizeInBits(); 1145 1146 // Convert sizes such as i33 to i64. 1147 if (!isPowerOf2_32(LaneSizeInBits)) 1148 LaneSizeInBits = NextPowerOf2(LaneSizeInBits); 1149 1150 MVT DestVT = TLI->getRegisterType(NewVT); 1151 RegisterVT = DestVT; 1152 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. 1153 return NumVectorRegs * (LaneSizeInBits / DestVT.getScalarSizeInBits()); 1154 1155 // Otherwise, promotion or legal types use the same number of registers as 1156 // the vector decimated to the appropriate level. 1157 return NumVectorRegs; 1158 } 1159 1160 /// isLegalRC - Return true if the value types that can be represented by the 1161 /// specified register class are all legal. 1162 bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI, 1163 const TargetRegisterClass &RC) const { 1164 for (auto I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I) 1165 if (isTypeLegal(*I)) 1166 return true; 1167 return false; 1168 } 1169 1170 /// Replace/modify any TargetFrameIndex operands with a targte-dependent 1171 /// sequence of memory operands that is recognized by PrologEpilogInserter. 1172 MachineBasicBlock * 1173 TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI, 1174 MachineBasicBlock *MBB) const { 1175 MachineInstr *MI = &InitialMI; 1176 MachineFunction &MF = *MI->getMF(); 1177 MachineFrameInfo &MFI = MF.getFrameInfo(); 1178 1179 // We're handling multiple types of operands here: 1180 // PATCHPOINT MetaArgs - live-in, read only, direct 1181 // STATEPOINT Deopt Spill - live-through, read only, indirect 1182 // STATEPOINT Deopt Alloca - live-through, read only, direct 1183 // (We're currently conservative and mark the deopt slots read/write in 1184 // practice.) 1185 // STATEPOINT GC Spill - live-through, read/write, indirect 1186 // STATEPOINT GC Alloca - live-through, read/write, direct 1187 // The live-in vs live-through is handled already (the live through ones are 1188 // all stack slots), but we need to handle the different type of stackmap 1189 // operands and memory effects here. 1190 1191 if (llvm::none_of(MI->operands(), 1192 [](MachineOperand &Operand) { return Operand.isFI(); })) 1193 return MBB; 1194 1195 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc()); 1196 1197 // Inherit previous memory operands. 1198 MIB.cloneMemRefs(*MI); 1199 1200 for (unsigned i = 0; i < MI->getNumOperands(); ++i) { 1201 MachineOperand &MO = MI->getOperand(i); 1202 if (!MO.isFI()) { 1203 // Index of Def operand this Use it tied to. 1204 // Since Defs are coming before Uses, if Use is tied, then 1205 // index of Def must be smaller that index of that Use. 1206 // Also, Defs preserve their position in new MI. 1207 unsigned TiedTo = i; 1208 if (MO.isReg() && MO.isTied()) 1209 TiedTo = MI->findTiedOperandIdx(i); 1210 MIB.add(MO); 1211 if (TiedTo < i) 1212 MIB->tieOperands(TiedTo, MIB->getNumOperands() - 1); 1213 continue; 1214 } 1215 1216 // foldMemoryOperand builds a new MI after replacing a single FI operand 1217 // with the canonical set of five x86 addressing-mode operands. 1218 int FI = MO.getIndex(); 1219 1220 // Add frame index operands recognized by stackmaps.cpp 1221 if (MFI.isStatepointSpillSlotObjectIndex(FI)) { 1222 // indirect-mem-ref tag, size, #FI, offset. 1223 // Used for spills inserted by StatepointLowering. This codepath is not 1224 // used for patchpoints/stackmaps at all, for these spilling is done via 1225 // foldMemoryOperand callback only. 1226 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity"); 1227 MIB.addImm(StackMaps::IndirectMemRefOp); 1228 MIB.addImm(MFI.getObjectSize(FI)); 1229 MIB.add(MO); 1230 MIB.addImm(0); 1231 } else { 1232 // direct-mem-ref tag, #FI, offset. 1233 // Used by patchpoint, and direct alloca arguments to statepoints 1234 MIB.addImm(StackMaps::DirectMemRefOp); 1235 MIB.add(MO); 1236 MIB.addImm(0); 1237 } 1238 1239 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!"); 1240 1241 // Add a new memory operand for this FI. 1242 assert(MFI.getObjectOffset(FI) != -1); 1243 1244 // Note: STATEPOINT MMOs are added during SelectionDAG. STACKMAP, and 1245 // PATCHPOINT should be updated to do the same. (TODO) 1246 if (MI->getOpcode() != TargetOpcode::STATEPOINT) { 1247 auto Flags = MachineMemOperand::MOLoad; 1248 MachineMemOperand *MMO = MF.getMachineMemOperand( 1249 MachinePointerInfo::getFixedStack(MF, FI), Flags, 1250 MF.getDataLayout().getPointerSize(), MFI.getObjectAlign(FI)); 1251 MIB->addMemOperand(MF, MMO); 1252 } 1253 } 1254 MBB->insert(MachineBasicBlock::iterator(MI), MIB); 1255 MI->eraseFromParent(); 1256 return MBB; 1257 } 1258 1259 /// findRepresentativeClass - Return the largest legal super-reg register class 1260 /// of the register class for the specified type and its associated "cost". 1261 // This function is in TargetLowering because it uses RegClassForVT which would 1262 // need to be moved to TargetRegisterInfo and would necessitate moving 1263 // isTypeLegal over as well - a massive change that would just require 1264 // TargetLowering having a TargetRegisterInfo class member that it would use. 1265 std::pair<const TargetRegisterClass *, uint8_t> 1266 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI, 1267 MVT VT) const { 1268 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy]; 1269 if (!RC) 1270 return std::make_pair(RC, 0); 1271 1272 // Compute the set of all super-register classes. 1273 BitVector SuperRegRC(TRI->getNumRegClasses()); 1274 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI) 1275 SuperRegRC.setBitsInMask(RCI.getMask()); 1276 1277 // Find the first legal register class with the largest spill size. 1278 const TargetRegisterClass *BestRC = RC; 1279 for (unsigned i : SuperRegRC.set_bits()) { 1280 const TargetRegisterClass *SuperRC = TRI->getRegClass(i); 1281 // We want the largest possible spill size. 1282 if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC)) 1283 continue; 1284 if (!isLegalRC(*TRI, *SuperRC)) 1285 continue; 1286 BestRC = SuperRC; 1287 } 1288 return std::make_pair(BestRC, 1); 1289 } 1290 1291 /// computeRegisterProperties - Once all of the register classes are added, 1292 /// this allows us to compute derived properties we expose. 1293 void TargetLoweringBase::computeRegisterProperties( 1294 const TargetRegisterInfo *TRI) { 1295 static_assert(MVT::VALUETYPE_SIZE <= MVT::MAX_ALLOWED_VALUETYPE, 1296 "Too many value types for ValueTypeActions to hold!"); 1297 1298 // Everything defaults to needing one register. 1299 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) { 1300 NumRegistersForVT[i] = 1; 1301 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i; 1302 } 1303 // ...except isVoid, which doesn't need any registers. 1304 NumRegistersForVT[MVT::isVoid] = 0; 1305 1306 // Find the largest integer register class. 1307 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE; 1308 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg) 1309 assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); 1310 1311 // Every integer value type larger than this largest register takes twice as 1312 // many registers to represent as the previous ValueType. 1313 for (unsigned ExpandedReg = LargestIntReg + 1; 1314 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) { 1315 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; 1316 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg; 1317 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1); 1318 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg, 1319 TypeExpandInteger); 1320 } 1321 1322 // Inspect all of the ValueType's smaller than the largest integer 1323 // register to see which ones need promotion. 1324 unsigned LegalIntReg = LargestIntReg; 1325 for (unsigned IntReg = LargestIntReg - 1; 1326 IntReg >= (unsigned)MVT::i1; --IntReg) { 1327 MVT IVT = (MVT::SimpleValueType)IntReg; 1328 if (isTypeLegal(IVT)) { 1329 LegalIntReg = IntReg; 1330 } else { 1331 RegisterTypeForVT[IntReg] = TransformToType[IntReg] = 1332 (MVT::SimpleValueType)LegalIntReg; 1333 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger); 1334 } 1335 } 1336 1337 // ppcf128 type is really two f64's. 1338 if (!isTypeLegal(MVT::ppcf128)) { 1339 if (isTypeLegal(MVT::f64)) { 1340 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64]; 1341 RegisterTypeForVT[MVT::ppcf128] = MVT::f64; 1342 TransformToType[MVT::ppcf128] = MVT::f64; 1343 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat); 1344 } else { 1345 NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128]; 1346 RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128]; 1347 TransformToType[MVT::ppcf128] = MVT::i128; 1348 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat); 1349 } 1350 } 1351 1352 // Decide how to handle f128. If the target does not have native f128 support, 1353 // expand it to i128 and we will be generating soft float library calls. 1354 if (!isTypeLegal(MVT::f128)) { 1355 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128]; 1356 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128]; 1357 TransformToType[MVT::f128] = MVT::i128; 1358 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat); 1359 } 1360 1361 // Decide how to handle f64. If the target does not have native f64 support, 1362 // expand it to i64 and we will be generating soft float library calls. 1363 if (!isTypeLegal(MVT::f64)) { 1364 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64]; 1365 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64]; 1366 TransformToType[MVT::f64] = MVT::i64; 1367 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat); 1368 } 1369 1370 // Decide how to handle f32. If the target does not have native f32 support, 1371 // expand it to i32 and we will be generating soft float library calls. 1372 if (!isTypeLegal(MVT::f32)) { 1373 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; 1374 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; 1375 TransformToType[MVT::f32] = MVT::i32; 1376 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat); 1377 } 1378 1379 // Decide how to handle f16. If the target does not have native f16 support, 1380 // promote it to f32, because there are no f16 library calls (except for 1381 // conversions). 1382 if (!isTypeLegal(MVT::f16)) { 1383 // Allow targets to control how we legalize half. 1384 if (softPromoteHalfType()) { 1385 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16]; 1386 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16]; 1387 TransformToType[MVT::f16] = MVT::f32; 1388 ValueTypeActions.setTypeAction(MVT::f16, TypeSoftPromoteHalf); 1389 } else { 1390 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32]; 1391 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32]; 1392 TransformToType[MVT::f16] = MVT::f32; 1393 ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat); 1394 } 1395 } 1396 1397 // Loop over all of the vector value types to see which need transformations. 1398 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; 1399 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 1400 MVT VT = (MVT::SimpleValueType) i; 1401 if (isTypeLegal(VT)) 1402 continue; 1403 1404 MVT EltVT = VT.getVectorElementType(); 1405 ElementCount EC = VT.getVectorElementCount(); 1406 bool IsLegalWiderType = false; 1407 bool IsScalable = VT.isScalableVector(); 1408 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT); 1409 switch (PreferredAction) { 1410 case TypePromoteInteger: { 1411 MVT::SimpleValueType EndVT = IsScalable ? 1412 MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE : 1413 MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE; 1414 // Try to promote the elements of integer vectors. If no legal 1415 // promotion was found, fall through to the widen-vector method. 1416 for (unsigned nVT = i + 1; 1417 (MVT::SimpleValueType)nVT <= EndVT; ++nVT) { 1418 MVT SVT = (MVT::SimpleValueType) nVT; 1419 // Promote vectors of integers to vectors with the same number 1420 // of elements, with a wider element type. 1421 if (SVT.getScalarSizeInBits() > EltVT.getFixedSizeInBits() && 1422 SVT.getVectorElementCount() == EC && isTypeLegal(SVT)) { 1423 TransformToType[i] = SVT; 1424 RegisterTypeForVT[i] = SVT; 1425 NumRegistersForVT[i] = 1; 1426 ValueTypeActions.setTypeAction(VT, TypePromoteInteger); 1427 IsLegalWiderType = true; 1428 break; 1429 } 1430 } 1431 if (IsLegalWiderType) 1432 break; 1433 LLVM_FALLTHROUGH; 1434 } 1435 1436 case TypeWidenVector: 1437 if (isPowerOf2_32(EC.getKnownMinValue())) { 1438 // Try to widen the vector. 1439 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) { 1440 MVT SVT = (MVT::SimpleValueType) nVT; 1441 if (SVT.getVectorElementType() == EltVT && 1442 SVT.isScalableVector() == IsScalable && 1443 SVT.getVectorElementCount().getKnownMinValue() > 1444 EC.getKnownMinValue() && 1445 isTypeLegal(SVT)) { 1446 TransformToType[i] = SVT; 1447 RegisterTypeForVT[i] = SVT; 1448 NumRegistersForVT[i] = 1; 1449 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1450 IsLegalWiderType = true; 1451 break; 1452 } 1453 } 1454 if (IsLegalWiderType) 1455 break; 1456 } else { 1457 // Only widen to the next power of 2 to keep consistency with EVT. 1458 MVT NVT = VT.getPow2VectorType(); 1459 if (isTypeLegal(NVT)) { 1460 TransformToType[i] = NVT; 1461 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1462 RegisterTypeForVT[i] = NVT; 1463 NumRegistersForVT[i] = 1; 1464 break; 1465 } 1466 } 1467 LLVM_FALLTHROUGH; 1468 1469 case TypeSplitVector: 1470 case TypeScalarizeVector: { 1471 MVT IntermediateVT; 1472 MVT RegisterVT; 1473 unsigned NumIntermediates; 1474 unsigned NumRegisters = getVectorTypeBreakdownMVT(VT, IntermediateVT, 1475 NumIntermediates, RegisterVT, this); 1476 NumRegistersForVT[i] = NumRegisters; 1477 assert(NumRegistersForVT[i] == NumRegisters && 1478 "NumRegistersForVT size cannot represent NumRegisters!"); 1479 RegisterTypeForVT[i] = RegisterVT; 1480 1481 MVT NVT = VT.getPow2VectorType(); 1482 if (NVT == VT) { 1483 // Type is already a power of 2. The default action is to split. 1484 TransformToType[i] = MVT::Other; 1485 if (PreferredAction == TypeScalarizeVector) 1486 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector); 1487 else if (PreferredAction == TypeSplitVector) 1488 ValueTypeActions.setTypeAction(VT, TypeSplitVector); 1489 else if (EC.getKnownMinValue() > 1) 1490 ValueTypeActions.setTypeAction(VT, TypeSplitVector); 1491 else 1492 ValueTypeActions.setTypeAction(VT, EC.isScalable() 1493 ? TypeScalarizeScalableVector 1494 : TypeScalarizeVector); 1495 } else { 1496 TransformToType[i] = NVT; 1497 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1498 } 1499 break; 1500 } 1501 default: 1502 llvm_unreachable("Unknown vector legalization action!"); 1503 } 1504 } 1505 1506 // Determine the 'representative' register class for each value type. 1507 // An representative register class is the largest (meaning one which is 1508 // not a sub-register class / subreg register class) legal register class for 1509 // a group of value types. For example, on i386, i8, i16, and i32 1510 // representative would be GR32; while on x86_64 it's GR64. 1511 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) { 1512 const TargetRegisterClass* RRC; 1513 uint8_t Cost; 1514 std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i); 1515 RepRegClassForVT[i] = RRC; 1516 RepRegClassCostForVT[i] = Cost; 1517 } 1518 } 1519 1520 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &, 1521 EVT VT) const { 1522 assert(!VT.isVector() && "No default SetCC type for vectors!"); 1523 return getPointerTy(DL).SimpleTy; 1524 } 1525 1526 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const { 1527 return MVT::i32; // return the default value 1528 } 1529 1530 /// getVectorTypeBreakdown - Vector types are broken down into some number of 1531 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 1532 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 1533 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. 1534 /// 1535 /// This method returns the number of registers needed, and the VT for each 1536 /// register. It also returns the VT and quantity of the intermediate values 1537 /// before they are promoted/expanded. 1538 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, 1539 EVT VT, EVT &IntermediateVT, 1540 unsigned &NumIntermediates, 1541 MVT &RegisterVT) const { 1542 ElementCount EltCnt = VT.getVectorElementCount(); 1543 1544 // If there is a wider vector type with the same element type as this one, 1545 // or a promoted vector type that has the same number of elements which 1546 // are wider, then we should convert to that legal vector type. 1547 // This handles things like <2 x float> -> <4 x float> and 1548 // <4 x i1> -> <4 x i32>. 1549 LegalizeTypeAction TA = getTypeAction(Context, VT); 1550 if (!EltCnt.isScalar() && 1551 (TA == TypeWidenVector || TA == TypePromoteInteger)) { 1552 EVT RegisterEVT = getTypeToTransformTo(Context, VT); 1553 if (isTypeLegal(RegisterEVT)) { 1554 IntermediateVT = RegisterEVT; 1555 RegisterVT = RegisterEVT.getSimpleVT(); 1556 NumIntermediates = 1; 1557 return 1; 1558 } 1559 } 1560 1561 // Figure out the right, legal destination reg to copy into. 1562 EVT EltTy = VT.getVectorElementType(); 1563 1564 unsigned NumVectorRegs = 1; 1565 1566 // Scalable vectors cannot be scalarized, so handle the legalisation of the 1567 // types like done elsewhere in SelectionDAG. 1568 if (EltCnt.isScalable()) { 1569 LegalizeKind LK; 1570 EVT PartVT = VT; 1571 do { 1572 // Iterate until we've found a legal (part) type to hold VT. 1573 LK = getTypeConversion(Context, PartVT); 1574 PartVT = LK.second; 1575 } while (LK.first != TypeLegal); 1576 1577 if (!PartVT.isVector()) { 1578 report_fatal_error( 1579 "Don't know how to legalize this scalable vector type"); 1580 } 1581 1582 NumIntermediates = 1583 divideCeil(VT.getVectorElementCount().getKnownMinValue(), 1584 PartVT.getVectorElementCount().getKnownMinValue()); 1585 IntermediateVT = PartVT; 1586 RegisterVT = getRegisterType(Context, IntermediateVT); 1587 return NumIntermediates; 1588 } 1589 1590 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally 1591 // we could break down into LHS/RHS like LegalizeDAG does. 1592 if (!isPowerOf2_32(EltCnt.getKnownMinValue())) { 1593 NumVectorRegs = EltCnt.getKnownMinValue(); 1594 EltCnt = ElementCount::getFixed(1); 1595 } 1596 1597 // Divide the input until we get to a supported size. This will always 1598 // end with a scalar if the target doesn't support vectors. 1599 while (EltCnt.getKnownMinValue() > 1 && 1600 !isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) { 1601 EltCnt = EltCnt.divideCoefficientBy(2); 1602 NumVectorRegs <<= 1; 1603 } 1604 1605 NumIntermediates = NumVectorRegs; 1606 1607 EVT NewVT = EVT::getVectorVT(Context, EltTy, EltCnt); 1608 if (!isTypeLegal(NewVT)) 1609 NewVT = EltTy; 1610 IntermediateVT = NewVT; 1611 1612 MVT DestVT = getRegisterType(Context, NewVT); 1613 RegisterVT = DestVT; 1614 1615 if (EVT(DestVT).bitsLT(NewVT)) { // Value is expanded, e.g. i64 -> i16. 1616 TypeSize NewVTSize = NewVT.getSizeInBits(); 1617 // Convert sizes such as i33 to i64. 1618 if (!isPowerOf2_32(NewVTSize.getKnownMinSize())) 1619 NewVTSize = NewVTSize.coefficientNextPowerOf2(); 1620 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits()); 1621 } 1622 1623 // Otherwise, promotion or legal types use the same number of registers as 1624 // the vector decimated to the appropriate level. 1625 return NumVectorRegs; 1626 } 1627 1628 bool TargetLoweringBase::isSuitableForJumpTable(const SwitchInst *SI, 1629 uint64_t NumCases, 1630 uint64_t Range, 1631 ProfileSummaryInfo *PSI, 1632 BlockFrequencyInfo *BFI) const { 1633 // FIXME: This function check the maximum table size and density, but the 1634 // minimum size is not checked. It would be nice if the minimum size is 1635 // also combined within this function. Currently, the minimum size check is 1636 // performed in findJumpTable() in SelectionDAGBuiler and 1637 // getEstimatedNumberOfCaseClusters() in BasicTTIImpl. 1638 const bool OptForSize = 1639 SI->getParent()->getParent()->hasOptSize() || 1640 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI); 1641 const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize); 1642 const unsigned MaxJumpTableSize = getMaximumJumpTableSize(); 1643 1644 // Check whether the number of cases is small enough and 1645 // the range is dense enough for a jump table. 1646 return (OptForSize || Range <= MaxJumpTableSize) && 1647 (NumCases * 100 >= Range * MinDensity); 1648 } 1649 1650 /// Get the EVTs and ArgFlags collections that represent the legalized return 1651 /// type of the given function. This does not require a DAG or a return value, 1652 /// and is suitable for use before any DAGs for the function are constructed. 1653 /// TODO: Move this out of TargetLowering.cpp. 1654 void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType, 1655 AttributeList attr, 1656 SmallVectorImpl<ISD::OutputArg> &Outs, 1657 const TargetLowering &TLI, const DataLayout &DL) { 1658 SmallVector<EVT, 4> ValueVTs; 1659 ComputeValueVTs(TLI, DL, ReturnType, ValueVTs); 1660 unsigned NumValues = ValueVTs.size(); 1661 if (NumValues == 0) return; 1662 1663 for (unsigned j = 0, f = NumValues; j != f; ++j) { 1664 EVT VT = ValueVTs[j]; 1665 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 1666 1667 if (attr.hasRetAttr(Attribute::SExt)) 1668 ExtendKind = ISD::SIGN_EXTEND; 1669 else if (attr.hasRetAttr(Attribute::ZExt)) 1670 ExtendKind = ISD::ZERO_EXTEND; 1671 1672 // FIXME: C calling convention requires the return type to be promoted to 1673 // at least 32-bit. But this is not necessary for non-C calling 1674 // conventions. The frontend should mark functions whose return values 1675 // require promoting with signext or zeroext attributes. 1676 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) { 1677 MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32); 1678 if (VT.bitsLT(MinVT)) 1679 VT = MinVT; 1680 } 1681 1682 unsigned NumParts = 1683 TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT); 1684 MVT PartVT = 1685 TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT); 1686 1687 // 'inreg' on function refers to return value 1688 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); 1689 if (attr.hasRetAttr(Attribute::InReg)) 1690 Flags.setInReg(); 1691 1692 // Propagate extension type if any 1693 if (attr.hasRetAttr(Attribute::SExt)) 1694 Flags.setSExt(); 1695 else if (attr.hasRetAttr(Attribute::ZExt)) 1696 Flags.setZExt(); 1697 1698 for (unsigned i = 0; i < NumParts; ++i) 1699 Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isfixed=*/true, 0, 0)); 1700 } 1701 } 1702 1703 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1704 /// function arguments in the caller parameter area. This is the actual 1705 /// alignment, not its logarithm. 1706 uint64_t TargetLoweringBase::getByValTypeAlignment(Type *Ty, 1707 const DataLayout &DL) const { 1708 return DL.getABITypeAlign(Ty).value(); 1709 } 1710 1711 bool TargetLoweringBase::allowsMemoryAccessForAlignment( 1712 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, 1713 Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const { 1714 // Check if the specified alignment is sufficient based on the data layout. 1715 // TODO: While using the data layout works in practice, a better solution 1716 // would be to implement this check directly (make this a virtual function). 1717 // For example, the ABI alignment may change based on software platform while 1718 // this function should only be affected by hardware implementation. 1719 Type *Ty = VT.getTypeForEVT(Context); 1720 if (VT.isZeroSized() || Alignment >= DL.getABITypeAlign(Ty)) { 1721 // Assume that an access that meets the ABI-specified alignment is fast. 1722 if (Fast != nullptr) 1723 *Fast = true; 1724 return true; 1725 } 1726 1727 // This is a misaligned access. 1728 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast); 1729 } 1730 1731 bool TargetLoweringBase::allowsMemoryAccessForAlignment( 1732 LLVMContext &Context, const DataLayout &DL, EVT VT, 1733 const MachineMemOperand &MMO, bool *Fast) const { 1734 return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(), 1735 MMO.getAlign(), MMO.getFlags(), Fast); 1736 } 1737 1738 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, 1739 const DataLayout &DL, EVT VT, 1740 unsigned AddrSpace, Align Alignment, 1741 MachineMemOperand::Flags Flags, 1742 bool *Fast) const { 1743 return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment, 1744 Flags, Fast); 1745 } 1746 1747 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, 1748 const DataLayout &DL, EVT VT, 1749 const MachineMemOperand &MMO, 1750 bool *Fast) const { 1751 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(), 1752 MMO.getFlags(), Fast); 1753 } 1754 1755 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, 1756 const DataLayout &DL, LLT Ty, 1757 const MachineMemOperand &MMO, 1758 bool *Fast) const { 1759 EVT VT = getApproximateEVTForLLT(Ty, DL, Context); 1760 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(), 1761 MMO.getFlags(), Fast); 1762 } 1763 1764 //===----------------------------------------------------------------------===// 1765 // TargetTransformInfo Helpers 1766 //===----------------------------------------------------------------------===// 1767 1768 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const { 1769 enum InstructionOpcodes { 1770 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM, 1771 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM 1772 #include "llvm/IR/Instruction.def" 1773 }; 1774 switch (static_cast<InstructionOpcodes>(Opcode)) { 1775 case Ret: return 0; 1776 case Br: return 0; 1777 case Switch: return 0; 1778 case IndirectBr: return 0; 1779 case Invoke: return 0; 1780 case CallBr: return 0; 1781 case Resume: return 0; 1782 case Unreachable: return 0; 1783 case CleanupRet: return 0; 1784 case CatchRet: return 0; 1785 case CatchPad: return 0; 1786 case CatchSwitch: return 0; 1787 case CleanupPad: return 0; 1788 case FNeg: return ISD::FNEG; 1789 case Add: return ISD::ADD; 1790 case FAdd: return ISD::FADD; 1791 case Sub: return ISD::SUB; 1792 case FSub: return ISD::FSUB; 1793 case Mul: return ISD::MUL; 1794 case FMul: return ISD::FMUL; 1795 case UDiv: return ISD::UDIV; 1796 case SDiv: return ISD::SDIV; 1797 case FDiv: return ISD::FDIV; 1798 case URem: return ISD::UREM; 1799 case SRem: return ISD::SREM; 1800 case FRem: return ISD::FREM; 1801 case Shl: return ISD::SHL; 1802 case LShr: return ISD::SRL; 1803 case AShr: return ISD::SRA; 1804 case And: return ISD::AND; 1805 case Or: return ISD::OR; 1806 case Xor: return ISD::XOR; 1807 case Alloca: return 0; 1808 case Load: return ISD::LOAD; 1809 case Store: return ISD::STORE; 1810 case GetElementPtr: return 0; 1811 case Fence: return 0; 1812 case AtomicCmpXchg: return 0; 1813 case AtomicRMW: return 0; 1814 case Trunc: return ISD::TRUNCATE; 1815 case ZExt: return ISD::ZERO_EXTEND; 1816 case SExt: return ISD::SIGN_EXTEND; 1817 case FPToUI: return ISD::FP_TO_UINT; 1818 case FPToSI: return ISD::FP_TO_SINT; 1819 case UIToFP: return ISD::UINT_TO_FP; 1820 case SIToFP: return ISD::SINT_TO_FP; 1821 case FPTrunc: return ISD::FP_ROUND; 1822 case FPExt: return ISD::FP_EXTEND; 1823 case PtrToInt: return ISD::BITCAST; 1824 case IntToPtr: return ISD::BITCAST; 1825 case BitCast: return ISD::BITCAST; 1826 case AddrSpaceCast: return ISD::ADDRSPACECAST; 1827 case ICmp: return ISD::SETCC; 1828 case FCmp: return ISD::SETCC; 1829 case PHI: return 0; 1830 case Call: return 0; 1831 case Select: return ISD::SELECT; 1832 case UserOp1: return 0; 1833 case UserOp2: return 0; 1834 case VAArg: return 0; 1835 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT; 1836 case InsertElement: return ISD::INSERT_VECTOR_ELT; 1837 case ShuffleVector: return ISD::VECTOR_SHUFFLE; 1838 case ExtractValue: return ISD::MERGE_VALUES; 1839 case InsertValue: return ISD::MERGE_VALUES; 1840 case LandingPad: return 0; 1841 case Freeze: return ISD::FREEZE; 1842 } 1843 1844 llvm_unreachable("Unknown instruction type encountered!"); 1845 } 1846 1847 std::pair<InstructionCost, MVT> 1848 TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL, 1849 Type *Ty) const { 1850 LLVMContext &C = Ty->getContext(); 1851 EVT MTy = getValueType(DL, Ty); 1852 1853 InstructionCost Cost = 1; 1854 // We keep legalizing the type until we find a legal kind. We assume that 1855 // the only operation that costs anything is the split. After splitting 1856 // we need to handle two types. 1857 while (true) { 1858 LegalizeKind LK = getTypeConversion(C, MTy); 1859 1860 if (LK.first == TypeScalarizeScalableVector) { 1861 // Ensure we return a sensible simple VT here, since many callers of this 1862 // function require it. 1863 MVT VT = MTy.isSimple() ? MTy.getSimpleVT() : MVT::i64; 1864 return std::make_pair(InstructionCost::getInvalid(), VT); 1865 } 1866 1867 if (LK.first == TypeLegal) 1868 return std::make_pair(Cost, MTy.getSimpleVT()); 1869 1870 if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger) 1871 Cost *= 2; 1872 1873 // Do not loop with f128 type. 1874 if (MTy == LK.second) 1875 return std::make_pair(Cost, MTy.getSimpleVT()); 1876 1877 // Keep legalizing the type. 1878 MTy = LK.second; 1879 } 1880 } 1881 1882 Value * 1883 TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, 1884 bool UseTLS) const { 1885 // compiler-rt provides a variable with a magic name. Targets that do not 1886 // link with compiler-rt may also provide such a variable. 1887 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 1888 const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr"; 1889 auto UnsafeStackPtr = 1890 dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar)); 1891 1892 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext()); 1893 1894 if (!UnsafeStackPtr) { 1895 auto TLSModel = UseTLS ? 1896 GlobalValue::InitialExecTLSModel : 1897 GlobalValue::NotThreadLocal; 1898 // The global variable is not defined yet, define it ourselves. 1899 // We use the initial-exec TLS model because we do not support the 1900 // variable living anywhere other than in the main executable. 1901 UnsafeStackPtr = new GlobalVariable( 1902 *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr, 1903 UnsafeStackPtrVar, nullptr, TLSModel); 1904 } else { 1905 // The variable exists, check its type and attributes. 1906 if (UnsafeStackPtr->getValueType() != StackPtrTy) 1907 report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type"); 1908 if (UseTLS != UnsafeStackPtr->isThreadLocal()) 1909 report_fatal_error(Twine(UnsafeStackPtrVar) + " must " + 1910 (UseTLS ? "" : "not ") + "be thread-local"); 1911 } 1912 return UnsafeStackPtr; 1913 } 1914 1915 Value * 1916 TargetLoweringBase::getSafeStackPointerLocation(IRBuilderBase &IRB) const { 1917 if (!TM.getTargetTriple().isAndroid()) 1918 return getDefaultSafeStackPointerLocation(IRB, true); 1919 1920 // Android provides a libc function to retrieve the address of the current 1921 // thread's unsafe stack pointer. 1922 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 1923 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext()); 1924 FunctionCallee Fn = M->getOrInsertFunction("__safestack_pointer_address", 1925 StackPtrTy->getPointerTo(0)); 1926 return IRB.CreateCall(Fn); 1927 } 1928 1929 //===----------------------------------------------------------------------===// 1930 // Loop Strength Reduction hooks 1931 //===----------------------------------------------------------------------===// 1932 1933 /// isLegalAddressingMode - Return true if the addressing mode represented 1934 /// by AM is legal for this target, for a load/store of the specified type. 1935 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL, 1936 const AddrMode &AM, Type *Ty, 1937 unsigned AS, Instruction *I) const { 1938 // The default implementation of this implements a conservative RISCy, r+r and 1939 // r+i addr mode. 1940 1941 // Allows a sign-extended 16-bit immediate field. 1942 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 1943 return false; 1944 1945 // No global is ever allowed as a base. 1946 if (AM.BaseGV) 1947 return false; 1948 1949 // Only support r+r, 1950 switch (AM.Scale) { 1951 case 0: // "r+i" or just "i", depending on HasBaseReg. 1952 break; 1953 case 1: 1954 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 1955 return false; 1956 // Otherwise we have r+r or r+i. 1957 break; 1958 case 2: 1959 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 1960 return false; 1961 // Allow 2*r as r+r. 1962 break; 1963 default: // Don't allow n * r 1964 return false; 1965 } 1966 1967 return true; 1968 } 1969 1970 //===----------------------------------------------------------------------===// 1971 // Stack Protector 1972 //===----------------------------------------------------------------------===// 1973 1974 // For OpenBSD return its special guard variable. Otherwise return nullptr, 1975 // so that SelectionDAG handle SSP. 1976 Value *TargetLoweringBase::getIRStackGuard(IRBuilderBase &IRB) const { 1977 if (getTargetMachine().getTargetTriple().isOSOpenBSD()) { 1978 Module &M = *IRB.GetInsertBlock()->getParent()->getParent(); 1979 PointerType *PtrTy = Type::getInt8PtrTy(M.getContext()); 1980 Constant *C = M.getOrInsertGlobal("__guard_local", PtrTy); 1981 if (GlobalVariable *G = dyn_cast_or_null<GlobalVariable>(C)) 1982 G->setVisibility(GlobalValue::HiddenVisibility); 1983 return C; 1984 } 1985 return nullptr; 1986 } 1987 1988 // Currently only support "standard" __stack_chk_guard. 1989 // TODO: add LOAD_STACK_GUARD support. 1990 void TargetLoweringBase::insertSSPDeclarations(Module &M) const { 1991 if (!M.getNamedValue("__stack_chk_guard")) { 1992 auto *GV = new GlobalVariable(M, Type::getInt8PtrTy(M.getContext()), false, 1993 GlobalVariable::ExternalLinkage, nullptr, 1994 "__stack_chk_guard"); 1995 1996 // FreeBSD has "__stack_chk_guard" defined externally on libc.so 1997 if (TM.getRelocationModel() == Reloc::Static && 1998 !TM.getTargetTriple().isWindowsGNUEnvironment() && 1999 !(TM.getTargetTriple().isPPC64() && TM.getTargetTriple().isOSFreeBSD())) 2000 GV->setDSOLocal(true); 2001 } 2002 } 2003 2004 // Currently only support "standard" __stack_chk_guard. 2005 // TODO: add LOAD_STACK_GUARD support. 2006 Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const { 2007 return M.getNamedValue("__stack_chk_guard"); 2008 } 2009 2010 Function *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const { 2011 return nullptr; 2012 } 2013 2014 unsigned TargetLoweringBase::getMinimumJumpTableEntries() const { 2015 return MinimumJumpTableEntries; 2016 } 2017 2018 void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) { 2019 MinimumJumpTableEntries = Val; 2020 } 2021 2022 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const { 2023 return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity; 2024 } 2025 2026 unsigned TargetLoweringBase::getMaximumJumpTableSize() const { 2027 return MaximumJumpTableSize; 2028 } 2029 2030 void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) { 2031 MaximumJumpTableSize = Val; 2032 } 2033 2034 bool TargetLoweringBase::isJumpTableRelative() const { 2035 return getTargetMachine().isPositionIndependent(); 2036 } 2037 2038 Align TargetLoweringBase::getPrefLoopAlignment(MachineLoop *ML) const { 2039 if (TM.Options.LoopAlignment) 2040 return Align(TM.Options.LoopAlignment); 2041 return PrefLoopAlignment; 2042 } 2043 2044 unsigned TargetLoweringBase::getMaxPermittedBytesForAlignment( 2045 MachineBasicBlock *MBB) const { 2046 return MaxBytesForAlignment; 2047 } 2048 2049 //===----------------------------------------------------------------------===// 2050 // Reciprocal Estimates 2051 //===----------------------------------------------------------------------===// 2052 2053 /// Get the reciprocal estimate attribute string for a function that will 2054 /// override the target defaults. 2055 static StringRef getRecipEstimateForFunc(MachineFunction &MF) { 2056 const Function &F = MF.getFunction(); 2057 return F.getFnAttribute("reciprocal-estimates").getValueAsString(); 2058 } 2059 2060 /// Construct a string for the given reciprocal operation of the given type. 2061 /// This string should match the corresponding option to the front-end's 2062 /// "-mrecip" flag assuming those strings have been passed through in an 2063 /// attribute string. For example, "vec-divf" for a division of a vXf32. 2064 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) { 2065 std::string Name = VT.isVector() ? "vec-" : ""; 2066 2067 Name += IsSqrt ? "sqrt" : "div"; 2068 2069 // TODO: Handle "half" or other float types? 2070 if (VT.getScalarType() == MVT::f64) { 2071 Name += "d"; 2072 } else { 2073 assert(VT.getScalarType() == MVT::f32 && 2074 "Unexpected FP type for reciprocal estimate"); 2075 Name += "f"; 2076 } 2077 2078 return Name; 2079 } 2080 2081 /// Return the character position and value (a single numeric character) of a 2082 /// customized refinement operation in the input string if it exists. Return 2083 /// false if there is no customized refinement step count. 2084 static bool parseRefinementStep(StringRef In, size_t &Position, 2085 uint8_t &Value) { 2086 const char RefStepToken = ':'; 2087 Position = In.find(RefStepToken); 2088 if (Position == StringRef::npos) 2089 return false; 2090 2091 StringRef RefStepString = In.substr(Position + 1); 2092 // Allow exactly one numeric character for the additional refinement 2093 // step parameter. 2094 if (RefStepString.size() == 1) { 2095 char RefStepChar = RefStepString[0]; 2096 if (isDigit(RefStepChar)) { 2097 Value = RefStepChar - '0'; 2098 return true; 2099 } 2100 } 2101 report_fatal_error("Invalid refinement step for -recip."); 2102 } 2103 2104 /// For the input attribute string, return one of the ReciprocalEstimate enum 2105 /// status values (enabled, disabled, or not specified) for this operation on 2106 /// the specified data type. 2107 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) { 2108 if (Override.empty()) 2109 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2110 2111 SmallVector<StringRef, 4> OverrideVector; 2112 Override.split(OverrideVector, ','); 2113 unsigned NumArgs = OverrideVector.size(); 2114 2115 // Check if "all", "none", or "default" was specified. 2116 if (NumArgs == 1) { 2117 // Look for an optional setting of the number of refinement steps needed 2118 // for this type of reciprocal operation. 2119 size_t RefPos; 2120 uint8_t RefSteps; 2121 if (parseRefinementStep(Override, RefPos, RefSteps)) { 2122 // Split the string for further processing. 2123 Override = Override.substr(0, RefPos); 2124 } 2125 2126 // All reciprocal types are enabled. 2127 if (Override == "all") 2128 return TargetLoweringBase::ReciprocalEstimate::Enabled; 2129 2130 // All reciprocal types are disabled. 2131 if (Override == "none") 2132 return TargetLoweringBase::ReciprocalEstimate::Disabled; 2133 2134 // Target defaults for enablement are used. 2135 if (Override == "default") 2136 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2137 } 2138 2139 // The attribute string may omit the size suffix ('f'/'d'). 2140 std::string VTName = getReciprocalOpName(IsSqrt, VT); 2141 std::string VTNameNoSize = VTName; 2142 VTNameNoSize.pop_back(); 2143 static const char DisabledPrefix = '!'; 2144 2145 for (StringRef RecipType : OverrideVector) { 2146 size_t RefPos; 2147 uint8_t RefSteps; 2148 if (parseRefinementStep(RecipType, RefPos, RefSteps)) 2149 RecipType = RecipType.substr(0, RefPos); 2150 2151 // Ignore the disablement token for string matching. 2152 bool IsDisabled = RecipType[0] == DisabledPrefix; 2153 if (IsDisabled) 2154 RecipType = RecipType.substr(1); 2155 2156 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize)) 2157 return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled 2158 : TargetLoweringBase::ReciprocalEstimate::Enabled; 2159 } 2160 2161 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2162 } 2163 2164 /// For the input attribute string, return the customized refinement step count 2165 /// for this operation on the specified data type. If the step count does not 2166 /// exist, return the ReciprocalEstimate enum value for unspecified. 2167 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) { 2168 if (Override.empty()) 2169 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2170 2171 SmallVector<StringRef, 4> OverrideVector; 2172 Override.split(OverrideVector, ','); 2173 unsigned NumArgs = OverrideVector.size(); 2174 2175 // Check if "all", "default", or "none" was specified. 2176 if (NumArgs == 1) { 2177 // Look for an optional setting of the number of refinement steps needed 2178 // for this type of reciprocal operation. 2179 size_t RefPos; 2180 uint8_t RefSteps; 2181 if (!parseRefinementStep(Override, RefPos, RefSteps)) 2182 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2183 2184 // Split the string for further processing. 2185 Override = Override.substr(0, RefPos); 2186 assert(Override != "none" && 2187 "Disabled reciprocals, but specifed refinement steps?"); 2188 2189 // If this is a general override, return the specified number of steps. 2190 if (Override == "all" || Override == "default") 2191 return RefSteps; 2192 } 2193 2194 // The attribute string may omit the size suffix ('f'/'d'). 2195 std::string VTName = getReciprocalOpName(IsSqrt, VT); 2196 std::string VTNameNoSize = VTName; 2197 VTNameNoSize.pop_back(); 2198 2199 for (StringRef RecipType : OverrideVector) { 2200 size_t RefPos; 2201 uint8_t RefSteps; 2202 if (!parseRefinementStep(RecipType, RefPos, RefSteps)) 2203 continue; 2204 2205 RecipType = RecipType.substr(0, RefPos); 2206 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize)) 2207 return RefSteps; 2208 } 2209 2210 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2211 } 2212 2213 int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT, 2214 MachineFunction &MF) const { 2215 return getOpEnabled(true, VT, getRecipEstimateForFunc(MF)); 2216 } 2217 2218 int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT, 2219 MachineFunction &MF) const { 2220 return getOpEnabled(false, VT, getRecipEstimateForFunc(MF)); 2221 } 2222 2223 int TargetLoweringBase::getSqrtRefinementSteps(EVT VT, 2224 MachineFunction &MF) const { 2225 return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF)); 2226 } 2227 2228 int TargetLoweringBase::getDivRefinementSteps(EVT VT, 2229 MachineFunction &MF) const { 2230 return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF)); 2231 } 2232 2233 void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const { 2234 MF.getRegInfo().freezeReservedRegs(MF); 2235 } 2236 2237 MachineMemOperand::Flags 2238 TargetLoweringBase::getLoadMemOperandFlags(const LoadInst &LI, 2239 const DataLayout &DL) const { 2240 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad; 2241 if (LI.isVolatile()) 2242 Flags |= MachineMemOperand::MOVolatile; 2243 2244 if (LI.hasMetadata(LLVMContext::MD_nontemporal)) 2245 Flags |= MachineMemOperand::MONonTemporal; 2246 2247 if (LI.hasMetadata(LLVMContext::MD_invariant_load)) 2248 Flags |= MachineMemOperand::MOInvariant; 2249 2250 if (isDereferenceablePointer(LI.getPointerOperand(), LI.getType(), DL)) 2251 Flags |= MachineMemOperand::MODereferenceable; 2252 2253 Flags |= getTargetMMOFlags(LI); 2254 return Flags; 2255 } 2256 2257 MachineMemOperand::Flags 2258 TargetLoweringBase::getStoreMemOperandFlags(const StoreInst &SI, 2259 const DataLayout &DL) const { 2260 MachineMemOperand::Flags Flags = MachineMemOperand::MOStore; 2261 2262 if (SI.isVolatile()) 2263 Flags |= MachineMemOperand::MOVolatile; 2264 2265 if (SI.hasMetadata(LLVMContext::MD_nontemporal)) 2266 Flags |= MachineMemOperand::MONonTemporal; 2267 2268 // FIXME: Not preserving dereferenceable 2269 Flags |= getTargetMMOFlags(SI); 2270 return Flags; 2271 } 2272 2273 MachineMemOperand::Flags 2274 TargetLoweringBase::getAtomicMemOperandFlags(const Instruction &AI, 2275 const DataLayout &DL) const { 2276 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 2277 2278 if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&AI)) { 2279 if (RMW->isVolatile()) 2280 Flags |= MachineMemOperand::MOVolatile; 2281 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(&AI)) { 2282 if (CmpX->isVolatile()) 2283 Flags |= MachineMemOperand::MOVolatile; 2284 } else 2285 llvm_unreachable("not an atomic instruction"); 2286 2287 // FIXME: Not preserving dereferenceable 2288 Flags |= getTargetMMOFlags(AI); 2289 return Flags; 2290 } 2291 2292 Instruction *TargetLoweringBase::emitLeadingFence(IRBuilderBase &Builder, 2293 Instruction *Inst, 2294 AtomicOrdering Ord) const { 2295 if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore()) 2296 return Builder.CreateFence(Ord); 2297 else 2298 return nullptr; 2299 } 2300 2301 Instruction *TargetLoweringBase::emitTrailingFence(IRBuilderBase &Builder, 2302 Instruction *Inst, 2303 AtomicOrdering Ord) const { 2304 if (isAcquireOrStronger(Ord)) 2305 return Builder.CreateFence(Ord); 2306 else 2307 return nullptr; 2308 } 2309 2310 //===----------------------------------------------------------------------===// 2311 // GlobalISel Hooks 2312 //===----------------------------------------------------------------------===// 2313 2314 bool TargetLoweringBase::shouldLocalize(const MachineInstr &MI, 2315 const TargetTransformInfo *TTI) const { 2316 auto &MF = *MI.getMF(); 2317 auto &MRI = MF.getRegInfo(); 2318 // Assuming a spill and reload of a value has a cost of 1 instruction each, 2319 // this helper function computes the maximum number of uses we should consider 2320 // for remat. E.g. on arm64 global addresses take 2 insts to materialize. We 2321 // break even in terms of code size when the original MI has 2 users vs 2322 // choosing to potentially spill. Any more than 2 users we we have a net code 2323 // size increase. This doesn't take into account register pressure though. 2324 auto maxUses = [](unsigned RematCost) { 2325 // A cost of 1 means remats are basically free. 2326 if (RematCost == 1) 2327 return UINT_MAX; 2328 if (RematCost == 2) 2329 return 2U; 2330 2331 // Remat is too expensive, only sink if there's one user. 2332 if (RematCost > 2) 2333 return 1U; 2334 llvm_unreachable("Unexpected remat cost"); 2335 }; 2336 2337 // Helper to walk through uses and terminate if we've reached a limit. Saves 2338 // us spending time traversing uses if all we want to know is if it's >= min. 2339 auto isUsesAtMost = [&](unsigned Reg, unsigned MaxUses) { 2340 unsigned NumUses = 0; 2341 auto UI = MRI.use_instr_nodbg_begin(Reg), UE = MRI.use_instr_nodbg_end(); 2342 for (; UI != UE && NumUses < MaxUses; ++UI) { 2343 NumUses++; 2344 } 2345 // If we haven't reached the end yet then there are more than MaxUses users. 2346 return UI == UE; 2347 }; 2348 2349 switch (MI.getOpcode()) { 2350 default: 2351 return false; 2352 // Constants-like instructions should be close to their users. 2353 // We don't want long live-ranges for them. 2354 case TargetOpcode::G_CONSTANT: 2355 case TargetOpcode::G_FCONSTANT: 2356 case TargetOpcode::G_FRAME_INDEX: 2357 case TargetOpcode::G_INTTOPTR: 2358 return true; 2359 case TargetOpcode::G_GLOBAL_VALUE: { 2360 unsigned RematCost = TTI->getGISelRematGlobalCost(); 2361 Register Reg = MI.getOperand(0).getReg(); 2362 unsigned MaxUses = maxUses(RematCost); 2363 if (MaxUses == UINT_MAX) 2364 return true; // Remats are "free" so always localize. 2365 bool B = isUsesAtMost(Reg, MaxUses); 2366 return B; 2367 } 2368 } 2369 } 2370