1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the TargetLoweringBase class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/ADT/BitVector.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/SmallVector.h" 16 #include "llvm/ADT/StringExtras.h" 17 #include "llvm/ADT/StringRef.h" 18 #include "llvm/ADT/Twine.h" 19 #include "llvm/Analysis/Loads.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/CodeGen/Analysis.h" 22 #include "llvm/CodeGen/ISDOpcodes.h" 23 #include "llvm/CodeGen/MachineBasicBlock.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstr.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/CodeGen/MachineMemOperand.h" 29 #include "llvm/CodeGen/MachineOperand.h" 30 #include "llvm/CodeGen/MachineRegisterInfo.h" 31 #include "llvm/CodeGen/RuntimeLibcallUtil.h" 32 #include "llvm/CodeGen/StackMaps.h" 33 #include "llvm/CodeGen/TargetLowering.h" 34 #include "llvm/CodeGen/TargetOpcodes.h" 35 #include "llvm/CodeGen/TargetRegisterInfo.h" 36 #include "llvm/CodeGen/ValueTypes.h" 37 #include "llvm/CodeGenTypes/MachineValueType.h" 38 #include "llvm/IR/Attributes.h" 39 #include "llvm/IR/CallingConv.h" 40 #include "llvm/IR/DataLayout.h" 41 #include "llvm/IR/DerivedTypes.h" 42 #include "llvm/IR/Function.h" 43 #include "llvm/IR/GlobalValue.h" 44 #include "llvm/IR/GlobalVariable.h" 45 #include "llvm/IR/IRBuilder.h" 46 #include "llvm/IR/Module.h" 47 #include "llvm/IR/Type.h" 48 #include "llvm/Support/Casting.h" 49 #include "llvm/Support/CommandLine.h" 50 #include "llvm/Support/Compiler.h" 51 #include "llvm/Support/ErrorHandling.h" 52 #include "llvm/Support/MathExtras.h" 53 #include "llvm/Target/TargetMachine.h" 54 #include "llvm/Target/TargetOptions.h" 55 #include "llvm/TargetParser/Triple.h" 56 #include "llvm/Transforms/Utils/SizeOpts.h" 57 #include <algorithm> 58 #include <cassert> 59 #include <cstdint> 60 #include <cstring> 61 #include <iterator> 62 #include <string> 63 #include <tuple> 64 #include <utility> 65 66 using namespace llvm; 67 68 static cl::opt<bool> JumpIsExpensiveOverride( 69 "jump-is-expensive", cl::init(false), 70 cl::desc("Do not create extra branches to split comparison logic."), 71 cl::Hidden); 72 73 static cl::opt<unsigned> MinimumJumpTableEntries 74 ("min-jump-table-entries", cl::init(4), cl::Hidden, 75 cl::desc("Set minimum number of entries to use a jump table.")); 76 77 static cl::opt<unsigned> MaximumJumpTableSize 78 ("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden, 79 cl::desc("Set maximum size of jump tables.")); 80 81 /// Minimum jump table density for normal functions. 82 static cl::opt<unsigned> 83 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden, 84 cl::desc("Minimum density for building a jump table in " 85 "a normal function")); 86 87 /// Minimum jump table density for -Os or -Oz functions. 88 static cl::opt<unsigned> OptsizeJumpTableDensity( 89 "optsize-jump-table-density", cl::init(40), cl::Hidden, 90 cl::desc("Minimum density for building a jump table in " 91 "an optsize function")); 92 93 // FIXME: This option is only to test if the strict fp operation processed 94 // correctly by preventing mutating strict fp operation to normal fp operation 95 // during development. When the backend supports strict float operation, this 96 // option will be meaningless. 97 static cl::opt<bool> DisableStrictNodeMutation("disable-strictnode-mutation", 98 cl::desc("Don't mutate strict-float node to a legalize node"), 99 cl::init(false), cl::Hidden); 100 101 /// GetFPLibCall - Helper to return the right libcall for the given floating 102 /// point type, or UNKNOWN_LIBCALL if there is none. 103 RTLIB::Libcall RTLIB::getFPLibCall(EVT VT, 104 RTLIB::Libcall Call_F32, 105 RTLIB::Libcall Call_F64, 106 RTLIB::Libcall Call_F80, 107 RTLIB::Libcall Call_F128, 108 RTLIB::Libcall Call_PPCF128) { 109 return 110 VT == MVT::f32 ? Call_F32 : 111 VT == MVT::f64 ? Call_F64 : 112 VT == MVT::f80 ? Call_F80 : 113 VT == MVT::f128 ? Call_F128 : 114 VT == MVT::ppcf128 ? Call_PPCF128 : 115 RTLIB::UNKNOWN_LIBCALL; 116 } 117 118 /// getFPEXT - Return the FPEXT_*_* value for the given types, or 119 /// UNKNOWN_LIBCALL if there is none. 120 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) { 121 if (OpVT == MVT::f16) { 122 if (RetVT == MVT::f32) 123 return FPEXT_F16_F32; 124 if (RetVT == MVT::f64) 125 return FPEXT_F16_F64; 126 if (RetVT == MVT::f80) 127 return FPEXT_F16_F80; 128 if (RetVT == MVT::f128) 129 return FPEXT_F16_F128; 130 } else if (OpVT == MVT::f32) { 131 if (RetVT == MVT::f64) 132 return FPEXT_F32_F64; 133 if (RetVT == MVT::f128) 134 return FPEXT_F32_F128; 135 if (RetVT == MVT::ppcf128) 136 return FPEXT_F32_PPCF128; 137 } else if (OpVT == MVT::f64) { 138 if (RetVT == MVT::f128) 139 return FPEXT_F64_F128; 140 else if (RetVT == MVT::ppcf128) 141 return FPEXT_F64_PPCF128; 142 } else if (OpVT == MVT::f80) { 143 if (RetVT == MVT::f128) 144 return FPEXT_F80_F128; 145 } else if (OpVT == MVT::bf16) { 146 if (RetVT == MVT::f32) 147 return FPEXT_BF16_F32; 148 } 149 150 return UNKNOWN_LIBCALL; 151 } 152 153 /// getFPROUND - Return the FPROUND_*_* value for the given types, or 154 /// UNKNOWN_LIBCALL if there is none. 155 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) { 156 if (RetVT == MVT::f16) { 157 if (OpVT == MVT::f32) 158 return FPROUND_F32_F16; 159 if (OpVT == MVT::f64) 160 return FPROUND_F64_F16; 161 if (OpVT == MVT::f80) 162 return FPROUND_F80_F16; 163 if (OpVT == MVT::f128) 164 return FPROUND_F128_F16; 165 if (OpVT == MVT::ppcf128) 166 return FPROUND_PPCF128_F16; 167 } else if (RetVT == MVT::bf16) { 168 if (OpVT == MVT::f32) 169 return FPROUND_F32_BF16; 170 if (OpVT == MVT::f64) 171 return FPROUND_F64_BF16; 172 if (OpVT == MVT::f80) 173 return FPROUND_F80_BF16; 174 if (OpVT == MVT::f128) 175 return FPROUND_F128_BF16; 176 } else if (RetVT == MVT::f32) { 177 if (OpVT == MVT::f64) 178 return FPROUND_F64_F32; 179 if (OpVT == MVT::f80) 180 return FPROUND_F80_F32; 181 if (OpVT == MVT::f128) 182 return FPROUND_F128_F32; 183 if (OpVT == MVT::ppcf128) 184 return FPROUND_PPCF128_F32; 185 } else if (RetVT == MVT::f64) { 186 if (OpVT == MVT::f80) 187 return FPROUND_F80_F64; 188 if (OpVT == MVT::f128) 189 return FPROUND_F128_F64; 190 if (OpVT == MVT::ppcf128) 191 return FPROUND_PPCF128_F64; 192 } else if (RetVT == MVT::f80) { 193 if (OpVT == MVT::f128) 194 return FPROUND_F128_F80; 195 } 196 197 return UNKNOWN_LIBCALL; 198 } 199 200 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or 201 /// UNKNOWN_LIBCALL if there is none. 202 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) { 203 if (OpVT == MVT::f16) { 204 if (RetVT == MVT::i32) 205 return FPTOSINT_F16_I32; 206 if (RetVT == MVT::i64) 207 return FPTOSINT_F16_I64; 208 if (RetVT == MVT::i128) 209 return FPTOSINT_F16_I128; 210 } else if (OpVT == MVT::f32) { 211 if (RetVT == MVT::i32) 212 return FPTOSINT_F32_I32; 213 if (RetVT == MVT::i64) 214 return FPTOSINT_F32_I64; 215 if (RetVT == MVT::i128) 216 return FPTOSINT_F32_I128; 217 } else if (OpVT == MVT::f64) { 218 if (RetVT == MVT::i32) 219 return FPTOSINT_F64_I32; 220 if (RetVT == MVT::i64) 221 return FPTOSINT_F64_I64; 222 if (RetVT == MVT::i128) 223 return FPTOSINT_F64_I128; 224 } else if (OpVT == MVT::f80) { 225 if (RetVT == MVT::i32) 226 return FPTOSINT_F80_I32; 227 if (RetVT == MVT::i64) 228 return FPTOSINT_F80_I64; 229 if (RetVT == MVT::i128) 230 return FPTOSINT_F80_I128; 231 } else if (OpVT == MVT::f128) { 232 if (RetVT == MVT::i32) 233 return FPTOSINT_F128_I32; 234 if (RetVT == MVT::i64) 235 return FPTOSINT_F128_I64; 236 if (RetVT == MVT::i128) 237 return FPTOSINT_F128_I128; 238 } else if (OpVT == MVT::ppcf128) { 239 if (RetVT == MVT::i32) 240 return FPTOSINT_PPCF128_I32; 241 if (RetVT == MVT::i64) 242 return FPTOSINT_PPCF128_I64; 243 if (RetVT == MVT::i128) 244 return FPTOSINT_PPCF128_I128; 245 } 246 return UNKNOWN_LIBCALL; 247 } 248 249 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or 250 /// UNKNOWN_LIBCALL if there is none. 251 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) { 252 if (OpVT == MVT::f16) { 253 if (RetVT == MVT::i32) 254 return FPTOUINT_F16_I32; 255 if (RetVT == MVT::i64) 256 return FPTOUINT_F16_I64; 257 if (RetVT == MVT::i128) 258 return FPTOUINT_F16_I128; 259 } else if (OpVT == MVT::f32) { 260 if (RetVT == MVT::i32) 261 return FPTOUINT_F32_I32; 262 if (RetVT == MVT::i64) 263 return FPTOUINT_F32_I64; 264 if (RetVT == MVT::i128) 265 return FPTOUINT_F32_I128; 266 } else if (OpVT == MVT::f64) { 267 if (RetVT == MVT::i32) 268 return FPTOUINT_F64_I32; 269 if (RetVT == MVT::i64) 270 return FPTOUINT_F64_I64; 271 if (RetVT == MVT::i128) 272 return FPTOUINT_F64_I128; 273 } else if (OpVT == MVT::f80) { 274 if (RetVT == MVT::i32) 275 return FPTOUINT_F80_I32; 276 if (RetVT == MVT::i64) 277 return FPTOUINT_F80_I64; 278 if (RetVT == MVT::i128) 279 return FPTOUINT_F80_I128; 280 } else if (OpVT == MVT::f128) { 281 if (RetVT == MVT::i32) 282 return FPTOUINT_F128_I32; 283 if (RetVT == MVT::i64) 284 return FPTOUINT_F128_I64; 285 if (RetVT == MVT::i128) 286 return FPTOUINT_F128_I128; 287 } else if (OpVT == MVT::ppcf128) { 288 if (RetVT == MVT::i32) 289 return FPTOUINT_PPCF128_I32; 290 if (RetVT == MVT::i64) 291 return FPTOUINT_PPCF128_I64; 292 if (RetVT == MVT::i128) 293 return FPTOUINT_PPCF128_I128; 294 } 295 return UNKNOWN_LIBCALL; 296 } 297 298 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or 299 /// UNKNOWN_LIBCALL if there is none. 300 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) { 301 if (OpVT == MVT::i32) { 302 if (RetVT == MVT::f16) 303 return SINTTOFP_I32_F16; 304 if (RetVT == MVT::f32) 305 return SINTTOFP_I32_F32; 306 if (RetVT == MVT::f64) 307 return SINTTOFP_I32_F64; 308 if (RetVT == MVT::f80) 309 return SINTTOFP_I32_F80; 310 if (RetVT == MVT::f128) 311 return SINTTOFP_I32_F128; 312 if (RetVT == MVT::ppcf128) 313 return SINTTOFP_I32_PPCF128; 314 } else if (OpVT == MVT::i64) { 315 if (RetVT == MVT::bf16) 316 return SINTTOFP_I64_BF16; 317 if (RetVT == MVT::f16) 318 return SINTTOFP_I64_F16; 319 if (RetVT == MVT::f32) 320 return SINTTOFP_I64_F32; 321 if (RetVT == MVT::f64) 322 return SINTTOFP_I64_F64; 323 if (RetVT == MVT::f80) 324 return SINTTOFP_I64_F80; 325 if (RetVT == MVT::f128) 326 return SINTTOFP_I64_F128; 327 if (RetVT == MVT::ppcf128) 328 return SINTTOFP_I64_PPCF128; 329 } else if (OpVT == MVT::i128) { 330 if (RetVT == MVT::f16) 331 return SINTTOFP_I128_F16; 332 if (RetVT == MVT::f32) 333 return SINTTOFP_I128_F32; 334 if (RetVT == MVT::f64) 335 return SINTTOFP_I128_F64; 336 if (RetVT == MVT::f80) 337 return SINTTOFP_I128_F80; 338 if (RetVT == MVT::f128) 339 return SINTTOFP_I128_F128; 340 if (RetVT == MVT::ppcf128) 341 return SINTTOFP_I128_PPCF128; 342 } 343 return UNKNOWN_LIBCALL; 344 } 345 346 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or 347 /// UNKNOWN_LIBCALL if there is none. 348 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) { 349 if (OpVT == MVT::i32) { 350 if (RetVT == MVT::f16) 351 return UINTTOFP_I32_F16; 352 if (RetVT == MVT::f32) 353 return UINTTOFP_I32_F32; 354 if (RetVT == MVT::f64) 355 return UINTTOFP_I32_F64; 356 if (RetVT == MVT::f80) 357 return UINTTOFP_I32_F80; 358 if (RetVT == MVT::f128) 359 return UINTTOFP_I32_F128; 360 if (RetVT == MVT::ppcf128) 361 return UINTTOFP_I32_PPCF128; 362 } else if (OpVT == MVT::i64) { 363 if (RetVT == MVT::bf16) 364 return UINTTOFP_I64_BF16; 365 if (RetVT == MVT::f16) 366 return UINTTOFP_I64_F16; 367 if (RetVT == MVT::f32) 368 return UINTTOFP_I64_F32; 369 if (RetVT == MVT::f64) 370 return UINTTOFP_I64_F64; 371 if (RetVT == MVT::f80) 372 return UINTTOFP_I64_F80; 373 if (RetVT == MVT::f128) 374 return UINTTOFP_I64_F128; 375 if (RetVT == MVT::ppcf128) 376 return UINTTOFP_I64_PPCF128; 377 } else if (OpVT == MVT::i128) { 378 if (RetVT == MVT::f16) 379 return UINTTOFP_I128_F16; 380 if (RetVT == MVT::f32) 381 return UINTTOFP_I128_F32; 382 if (RetVT == MVT::f64) 383 return UINTTOFP_I128_F64; 384 if (RetVT == MVT::f80) 385 return UINTTOFP_I128_F80; 386 if (RetVT == MVT::f128) 387 return UINTTOFP_I128_F128; 388 if (RetVT == MVT::ppcf128) 389 return UINTTOFP_I128_PPCF128; 390 } 391 return UNKNOWN_LIBCALL; 392 } 393 394 RTLIB::Libcall RTLIB::getPOWI(EVT RetVT) { 395 return getFPLibCall(RetVT, POWI_F32, POWI_F64, POWI_F80, POWI_F128, 396 POWI_PPCF128); 397 } 398 399 RTLIB::Libcall RTLIB::getPOW(EVT RetVT) { 400 return getFPLibCall(RetVT, POW_F32, POW_F64, POW_F80, POW_F128, POW_PPCF128); 401 } 402 403 RTLIB::Libcall RTLIB::getLDEXP(EVT RetVT) { 404 return getFPLibCall(RetVT, LDEXP_F32, LDEXP_F64, LDEXP_F80, LDEXP_F128, 405 LDEXP_PPCF128); 406 } 407 408 RTLIB::Libcall RTLIB::getFREXP(EVT RetVT) { 409 return getFPLibCall(RetVT, FREXP_F32, FREXP_F64, FREXP_F80, FREXP_F128, 410 FREXP_PPCF128); 411 } 412 413 RTLIB::Libcall RTLIB::getSIN(EVT RetVT) { 414 return getFPLibCall(RetVT, SIN_F32, SIN_F64, SIN_F80, SIN_F128, SIN_PPCF128); 415 } 416 417 RTLIB::Libcall RTLIB::getCOS(EVT RetVT) { 418 return getFPLibCall(RetVT, COS_F32, COS_F64, COS_F80, COS_F128, COS_PPCF128); 419 } 420 421 RTLIB::Libcall RTLIB::getSINCOS(EVT RetVT) { 422 return getFPLibCall(RetVT, SINCOS_F32, SINCOS_F64, SINCOS_F80, SINCOS_F128, 423 SINCOS_PPCF128); 424 } 425 426 RTLIB::Libcall RTLIB::getSINCOSPI(EVT RetVT) { 427 return getFPLibCall(RetVT, SINCOSPI_F32, SINCOSPI_F64, SINCOSPI_F80, 428 SINCOSPI_F128, SINCOSPI_PPCF128); 429 } 430 431 RTLIB::Libcall RTLIB::getMODF(EVT RetVT) { 432 return getFPLibCall(RetVT, MODF_F32, MODF_F64, MODF_F80, MODF_F128, 433 MODF_PPCF128); 434 } 435 436 RTLIB::Libcall RTLIB::getOutlineAtomicHelper(const Libcall (&LC)[5][4], 437 AtomicOrdering Order, 438 uint64_t MemSize) { 439 unsigned ModeN, ModelN; 440 switch (MemSize) { 441 case 1: 442 ModeN = 0; 443 break; 444 case 2: 445 ModeN = 1; 446 break; 447 case 4: 448 ModeN = 2; 449 break; 450 case 8: 451 ModeN = 3; 452 break; 453 case 16: 454 ModeN = 4; 455 break; 456 default: 457 return RTLIB::UNKNOWN_LIBCALL; 458 } 459 460 switch (Order) { 461 case AtomicOrdering::Monotonic: 462 ModelN = 0; 463 break; 464 case AtomicOrdering::Acquire: 465 ModelN = 1; 466 break; 467 case AtomicOrdering::Release: 468 ModelN = 2; 469 break; 470 case AtomicOrdering::AcquireRelease: 471 case AtomicOrdering::SequentiallyConsistent: 472 ModelN = 3; 473 break; 474 default: 475 return UNKNOWN_LIBCALL; 476 } 477 478 return LC[ModeN][ModelN]; 479 } 480 481 RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, 482 MVT VT) { 483 if (!VT.isScalarInteger()) 484 return UNKNOWN_LIBCALL; 485 uint64_t MemSize = VT.getScalarSizeInBits() / 8; 486 487 #define LCALLS(A, B) \ 488 { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL } 489 #define LCALL5(A) \ 490 LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16) 491 switch (Opc) { 492 case ISD::ATOMIC_CMP_SWAP: { 493 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_CAS)}; 494 return getOutlineAtomicHelper(LC, Order, MemSize); 495 } 496 case ISD::ATOMIC_SWAP: { 497 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_SWP)}; 498 return getOutlineAtomicHelper(LC, Order, MemSize); 499 } 500 case ISD::ATOMIC_LOAD_ADD: { 501 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDADD)}; 502 return getOutlineAtomicHelper(LC, Order, MemSize); 503 } 504 case ISD::ATOMIC_LOAD_OR: { 505 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDSET)}; 506 return getOutlineAtomicHelper(LC, Order, MemSize); 507 } 508 case ISD::ATOMIC_LOAD_CLR: { 509 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDCLR)}; 510 return getOutlineAtomicHelper(LC, Order, MemSize); 511 } 512 case ISD::ATOMIC_LOAD_XOR: { 513 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDEOR)}; 514 return getOutlineAtomicHelper(LC, Order, MemSize); 515 } 516 default: 517 return UNKNOWN_LIBCALL; 518 } 519 #undef LCALLS 520 #undef LCALL5 521 } 522 523 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) { 524 #define OP_TO_LIBCALL(Name, Enum) \ 525 case Name: \ 526 switch (VT.SimpleTy) { \ 527 default: \ 528 return UNKNOWN_LIBCALL; \ 529 case MVT::i8: \ 530 return Enum##_1; \ 531 case MVT::i16: \ 532 return Enum##_2; \ 533 case MVT::i32: \ 534 return Enum##_4; \ 535 case MVT::i64: \ 536 return Enum##_8; \ 537 case MVT::i128: \ 538 return Enum##_16; \ 539 } 540 541 switch (Opc) { 542 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET) 543 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP) 544 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD) 545 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB) 546 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND) 547 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR) 548 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR) 549 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND) 550 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX) 551 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX) 552 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN) 553 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN) 554 } 555 556 #undef OP_TO_LIBCALL 557 558 return UNKNOWN_LIBCALL; 559 } 560 561 RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 562 switch (ElementSize) { 563 case 1: 564 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1; 565 case 2: 566 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2; 567 case 4: 568 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4; 569 case 8: 570 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8; 571 case 16: 572 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16; 573 default: 574 return UNKNOWN_LIBCALL; 575 } 576 } 577 578 RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 579 switch (ElementSize) { 580 case 1: 581 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1; 582 case 2: 583 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2; 584 case 4: 585 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4; 586 case 8: 587 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8; 588 case 16: 589 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16; 590 default: 591 return UNKNOWN_LIBCALL; 592 } 593 } 594 595 RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 596 switch (ElementSize) { 597 case 1: 598 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1; 599 case 2: 600 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2; 601 case 4: 602 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4; 603 case 8: 604 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8; 605 case 16: 606 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16; 607 default: 608 return UNKNOWN_LIBCALL; 609 } 610 } 611 612 ISD::CondCode TargetLoweringBase::getSoftFloatCmpLibcallPredicate( 613 RTLIB::LibcallImpl Impl) const { 614 switch (Impl) { 615 case RTLIB::__aeabi_dcmpeq__une: 616 case RTLIB::__aeabi_fcmpeq__une: 617 // Usage in the eq case, so we have to invert the comparison. 618 return ISD::SETEQ; 619 case RTLIB::__aeabi_dcmpeq__oeq: 620 case RTLIB::__aeabi_fcmpeq__oeq: 621 // Normal comparison to boolean value. 622 return ISD::SETNE; 623 case RTLIB::__aeabi_dcmplt: 624 case RTLIB::__aeabi_dcmple: 625 case RTLIB::__aeabi_dcmpge: 626 case RTLIB::__aeabi_dcmpgt: 627 case RTLIB::__aeabi_dcmpun: 628 case RTLIB::__aeabi_fcmplt: 629 case RTLIB::__aeabi_fcmple: 630 case RTLIB::__aeabi_fcmpge: 631 case RTLIB::__aeabi_fcmpgt: 632 /// The AEABI versions return a typical boolean value, so we can compare 633 /// against the integer result as simply != 0. 634 return ISD::SETNE; 635 default: 636 break; 637 } 638 639 // Assume libgcc/compiler-rt behavior. Most of the cases are really aliases of 640 // each other, and return a 3-way comparison style result of -1, 0, or 1 641 // depending on lt/eq/gt. 642 // 643 // FIXME: It would be cleaner to directly express this as a 3-way comparison 644 // soft FP libcall instead of individual compares. 645 RTLIB::Libcall LC = RTLIB::RuntimeLibcallsInfo::getLibcallFromImpl(Impl); 646 switch (LC) { 647 case RTLIB::OEQ_F32: 648 case RTLIB::OEQ_F64: 649 case RTLIB::OEQ_F128: 650 case RTLIB::OEQ_PPCF128: 651 return ISD::SETEQ; 652 case RTLIB::UNE_F32: 653 case RTLIB::UNE_F64: 654 case RTLIB::UNE_F128: 655 case RTLIB::UNE_PPCF128: 656 return ISD::SETNE; 657 case RTLIB::OGE_F32: 658 case RTLIB::OGE_F64: 659 case RTLIB::OGE_F128: 660 case RTLIB::OGE_PPCF128: 661 return ISD::SETGE; 662 case RTLIB::OLT_F32: 663 case RTLIB::OLT_F64: 664 case RTLIB::OLT_F128: 665 case RTLIB::OLT_PPCF128: 666 return ISD::SETLT; 667 case RTLIB::OLE_F32: 668 case RTLIB::OLE_F64: 669 case RTLIB::OLE_F128: 670 case RTLIB::OLE_PPCF128: 671 return ISD::SETLE; 672 case RTLIB::OGT_F32: 673 case RTLIB::OGT_F64: 674 case RTLIB::OGT_F128: 675 case RTLIB::OGT_PPCF128: 676 return ISD::SETGT; 677 case RTLIB::UO_F32: 678 case RTLIB::UO_F64: 679 case RTLIB::UO_F128: 680 case RTLIB::UO_PPCF128: 681 return ISD::SETNE; 682 default: 683 llvm_unreachable("not a compare libcall"); 684 } 685 } 686 687 /// NOTE: The TargetMachine owns TLOF. 688 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) 689 : TM(tm), Libcalls(TM.getTargetTriple(), TM.Options.ExceptionModel, 690 TM.Options.FloatABIType, TM.Options.EABIVersion, 691 TM.Options.MCOptions.getABIName()) { 692 initActions(); 693 694 // Perform these initializations only once. 695 MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 696 MaxLoadsPerMemcmp = 8; 697 MaxGluedStoresPerMemcpy = 0; 698 MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize = 699 MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4; 700 HasMultipleConditionRegisters = false; 701 HasExtractBitsInsn = false; 702 JumpIsExpensive = JumpIsExpensiveOverride; 703 PredictableSelectIsExpensive = false; 704 EnableExtLdPromotion = false; 705 StackPointerRegisterToSaveRestore = 0; 706 BooleanContents = UndefinedBooleanContent; 707 BooleanFloatContents = UndefinedBooleanContent; 708 BooleanVectorContents = UndefinedBooleanContent; 709 SchedPreferenceInfo = Sched::ILP; 710 GatherAllAliasesMaxDepth = 18; 711 IsStrictFPEnabled = DisableStrictNodeMutation; 712 MaxBytesForAlignment = 0; 713 MaxAtomicSizeInBitsSupported = 0; 714 715 // Assume that even with libcalls, no target supports wider than 128 bit 716 // division. 717 MaxDivRemBitWidthSupported = 128; 718 719 MaxLargeFPConvertBitWidthSupported = llvm::IntegerType::MAX_INT_BITS; 720 721 MinCmpXchgSizeInBits = 0; 722 SupportsUnalignedAtomics = false; 723 } 724 725 // Define the virtual destructor out-of-line to act as a key method to anchor 726 // debug info (see coding standards). 727 TargetLoweringBase::~TargetLoweringBase() = default; 728 729 void TargetLoweringBase::initActions() { 730 // All operations default to being supported. 731 memset(OpActions, 0, sizeof(OpActions)); 732 memset(LoadExtActions, 0, sizeof(LoadExtActions)); 733 memset(TruncStoreActions, 0, sizeof(TruncStoreActions)); 734 memset(IndexedModeActions, 0, sizeof(IndexedModeActions)); 735 memset(CondCodeActions, 0, sizeof(CondCodeActions)); 736 llvm::fill(RegClassForVT, nullptr); 737 llvm::fill(TargetDAGCombineArray, 0); 738 739 // Let extending atomic loads be unsupported by default. 740 for (MVT ValVT : MVT::all_valuetypes()) 741 for (MVT MemVT : MVT::all_valuetypes()) 742 setAtomicLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD}, ValVT, MemVT, 743 Expand); 744 745 // We're somewhat special casing MVT::i2 and MVT::i4. Ideally we want to 746 // remove this and targets should individually set these types if not legal. 747 for (ISD::NodeType NT : enum_seq(ISD::DELETED_NODE, ISD::BUILTIN_OP_END, 748 force_iteration_on_noniterable_enum)) { 749 for (MVT VT : {MVT::i2, MVT::i4}) 750 OpActions[(unsigned)VT.SimpleTy][NT] = Expand; 751 } 752 for (MVT AVT : MVT::all_valuetypes()) { 753 for (MVT VT : {MVT::i2, MVT::i4, MVT::v128i2, MVT::v64i4}) { 754 setTruncStoreAction(AVT, VT, Expand); 755 setLoadExtAction(ISD::EXTLOAD, AVT, VT, Expand); 756 setLoadExtAction(ISD::ZEXTLOAD, AVT, VT, Expand); 757 } 758 } 759 for (unsigned IM = (unsigned)ISD::PRE_INC; 760 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 761 for (MVT VT : {MVT::i2, MVT::i4}) { 762 setIndexedLoadAction(IM, VT, Expand); 763 setIndexedStoreAction(IM, VT, Expand); 764 setIndexedMaskedLoadAction(IM, VT, Expand); 765 setIndexedMaskedStoreAction(IM, VT, Expand); 766 } 767 } 768 769 for (MVT VT : MVT::fp_valuetypes()) { 770 MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits()); 771 if (IntVT.isValid()) { 772 setOperationAction(ISD::ATOMIC_SWAP, VT, Promote); 773 AddPromotedToType(ISD::ATOMIC_SWAP, VT, IntVT); 774 } 775 } 776 777 // Set default actions for various operations. 778 for (MVT VT : MVT::all_valuetypes()) { 779 // Default all indexed load / store to expand. 780 for (unsigned IM = (unsigned)ISD::PRE_INC; 781 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 782 setIndexedLoadAction(IM, VT, Expand); 783 setIndexedStoreAction(IM, VT, Expand); 784 setIndexedMaskedLoadAction(IM, VT, Expand); 785 setIndexedMaskedStoreAction(IM, VT, Expand); 786 } 787 788 // Most backends expect to see the node which just returns the value loaded. 789 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand); 790 791 // These operations default to expand. 792 setOperationAction({ISD::FGETSIGN, ISD::CONCAT_VECTORS, 793 ISD::FMINNUM, ISD::FMAXNUM, 794 ISD::FMINNUM_IEEE, ISD::FMAXNUM_IEEE, 795 ISD::FMINIMUM, ISD::FMAXIMUM, 796 ISD::FMINIMUMNUM, ISD::FMAXIMUMNUM, 797 ISD::FMAD, ISD::SMIN, 798 ISD::SMAX, ISD::UMIN, 799 ISD::UMAX, ISD::ABS, 800 ISD::FSHL, ISD::FSHR, 801 ISD::SADDSAT, ISD::UADDSAT, 802 ISD::SSUBSAT, ISD::USUBSAT, 803 ISD::SSHLSAT, ISD::USHLSAT, 804 ISD::SMULFIX, ISD::SMULFIXSAT, 805 ISD::UMULFIX, ISD::UMULFIXSAT, 806 ISD::SDIVFIX, ISD::SDIVFIXSAT, 807 ISD::UDIVFIX, ISD::UDIVFIXSAT, 808 ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT, 809 ISD::IS_FPCLASS}, 810 VT, Expand); 811 812 // Overflow operations default to expand 813 setOperationAction({ISD::SADDO, ISD::SSUBO, ISD::UADDO, ISD::USUBO, 814 ISD::SMULO, ISD::UMULO}, 815 VT, Expand); 816 817 // Carry-using overflow operations default to expand. 818 setOperationAction({ISD::UADDO_CARRY, ISD::USUBO_CARRY, ISD::SETCCCARRY, 819 ISD::SADDO_CARRY, ISD::SSUBO_CARRY}, 820 VT, Expand); 821 822 // ADDC/ADDE/SUBC/SUBE default to expand. 823 setOperationAction({ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}, VT, 824 Expand); 825 826 // [US]CMP default to expand 827 setOperationAction({ISD::UCMP, ISD::SCMP}, VT, Expand); 828 829 // Halving adds 830 setOperationAction( 831 {ISD::AVGFLOORS, ISD::AVGFLOORU, ISD::AVGCEILS, ISD::AVGCEILU}, VT, 832 Expand); 833 834 // Absolute difference 835 setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Expand); 836 837 // Saturated trunc 838 setOperationAction(ISD::TRUNCATE_SSAT_S, VT, Expand); 839 setOperationAction(ISD::TRUNCATE_SSAT_U, VT, Expand); 840 setOperationAction(ISD::TRUNCATE_USAT_U, VT, Expand); 841 842 // These default to Expand so they will be expanded to CTLZ/CTTZ by default. 843 setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT, 844 Expand); 845 846 setOperationAction({ISD::BITREVERSE, ISD::PARITY}, VT, Expand); 847 848 // These library functions default to expand. 849 setOperationAction({ISD::FROUND, ISD::FPOWI, ISD::FLDEXP, ISD::FFREXP, 850 ISD::FSINCOS, ISD::FSINCOSPI, ISD::FMODF}, 851 VT, Expand); 852 853 // These operations default to expand for vector types. 854 if (VT.isVector()) 855 setOperationAction( 856 {ISD::FCOPYSIGN, ISD::SIGN_EXTEND_INREG, ISD::ANY_EXTEND_VECTOR_INREG, 857 ISD::SIGN_EXTEND_VECTOR_INREG, ISD::ZERO_EXTEND_VECTOR_INREG, 858 ISD::SPLAT_VECTOR, ISD::LRINT, ISD::LLRINT, ISD::LROUND, 859 ISD::LLROUND, ISD::FTAN, ISD::FACOS, ISD::FASIN, ISD::FATAN, 860 ISD::FCOSH, ISD::FSINH, ISD::FTANH, ISD::FATAN2}, 861 VT, Expand); 862 863 // Constrained floating-point operations default to expand. 864 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 865 setOperationAction(ISD::STRICT_##DAGN, VT, Expand); 866 #include "llvm/IR/ConstrainedOps.def" 867 868 // For most targets @llvm.get.dynamic.area.offset just returns 0. 869 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand); 870 871 // Vector reduction default to expand. 872 setOperationAction( 873 {ISD::VECREDUCE_FADD, ISD::VECREDUCE_FMUL, ISD::VECREDUCE_ADD, 874 ISD::VECREDUCE_MUL, ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, 875 ISD::VECREDUCE_XOR, ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN, 876 ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN, ISD::VECREDUCE_FMAX, 877 ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAXIMUM, ISD::VECREDUCE_FMINIMUM, 878 ISD::VECREDUCE_SEQ_FADD, ISD::VECREDUCE_SEQ_FMUL}, 879 VT, Expand); 880 881 // Named vector shuffles default to expand. 882 setOperationAction(ISD::VECTOR_SPLICE, VT, Expand); 883 884 // Only some target support this vector operation. Most need to expand it. 885 setOperationAction(ISD::VECTOR_COMPRESS, VT, Expand); 886 887 // VP operations default to expand. 888 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) \ 889 setOperationAction(ISD::SDOPC, VT, Expand); 890 #include "llvm/IR/VPIntrinsics.def" 891 892 // Masked vector extracts default to expand. 893 setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Expand); 894 895 // FP environment operations default to expand. 896 setOperationAction(ISD::GET_FPENV, VT, Expand); 897 setOperationAction(ISD::SET_FPENV, VT, Expand); 898 setOperationAction(ISD::RESET_FPENV, VT, Expand); 899 } 900 901 // Most targets ignore the @llvm.prefetch intrinsic. 902 setOperationAction(ISD::PREFETCH, MVT::Other, Expand); 903 904 // Most targets also ignore the @llvm.readcyclecounter intrinsic. 905 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand); 906 907 // Most targets also ignore the @llvm.readsteadycounter intrinsic. 908 setOperationAction(ISD::READSTEADYCOUNTER, MVT::i64, Expand); 909 910 // ConstantFP nodes default to expand. Targets can either change this to 911 // Legal, in which case all fp constants are legal, or use isFPImmLegal() 912 // to optimize expansions for certain constants. 913 setOperationAction(ISD::ConstantFP, 914 {MVT::bf16, MVT::f16, MVT::f32, MVT::f64, MVT::f80, MVT::f128}, 915 Expand); 916 917 // These library functions default to expand. 918 setOperationAction({ISD::FCBRT, ISD::FLOG, ISD::FLOG2, ISD::FLOG10, 919 ISD::FEXP, ISD::FEXP2, ISD::FEXP10, ISD::FFLOOR, 920 ISD::FNEARBYINT, ISD::FCEIL, ISD::FRINT, ISD::FTRUNC, 921 ISD::FROUNDEVEN, ISD::FTAN, ISD::FACOS, ISD::FASIN, 922 ISD::FATAN, ISD::FCOSH, ISD::FSINH, ISD::FTANH, 923 ISD::FATAN2}, 924 {MVT::f32, MVT::f64, MVT::f128}, Expand); 925 926 // Insert custom handling default for llvm.canonicalize.*. 927 setOperationAction(ISD::FCANONICALIZE, 928 {MVT::f16, MVT::f32, MVT::f64, MVT::f128}, Expand); 929 930 // FIXME: Query RuntimeLibCalls to make the decision. 931 setOperationAction({ISD::LRINT, ISD::LLRINT, ISD::LROUND, ISD::LLROUND}, 932 {MVT::f32, MVT::f64, MVT::f128}, LibCall); 933 934 setOperationAction({ISD::FTAN, ISD::FACOS, ISD::FASIN, ISD::FATAN, ISD::FCOSH, 935 ISD::FSINH, ISD::FTANH, ISD::FATAN2}, 936 MVT::f16, Promote); 937 // Default ISD::TRAP to expand (which turns it into abort). 938 setOperationAction(ISD::TRAP, MVT::Other, Expand); 939 940 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand" 941 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP. 942 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand); 943 944 setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand); 945 946 setOperationAction(ISD::GET_FPENV_MEM, MVT::Other, Expand); 947 setOperationAction(ISD::SET_FPENV_MEM, MVT::Other, Expand); 948 949 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) { 950 setOperationAction(ISD::GET_FPMODE, VT, Expand); 951 setOperationAction(ISD::SET_FPMODE, VT, Expand); 952 } 953 setOperationAction(ISD::RESET_FPMODE, MVT::Other, Expand); 954 955 // This one by default will call __clear_cache unless the target 956 // wants something different. 957 setOperationAction(ISD::CLEAR_CACHE, MVT::Other, LibCall); 958 } 959 960 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL, 961 EVT) const { 962 return MVT::getIntegerVT(DL.getPointerSizeInBits(0)); 963 } 964 965 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, 966 const DataLayout &DL) const { 967 assert(LHSTy.isInteger() && "Shift amount is not an integer type!"); 968 if (LHSTy.isVector()) 969 return LHSTy; 970 MVT ShiftVT = getScalarShiftAmountTy(DL, LHSTy); 971 // If any possible shift value won't fit in the prefered type, just use 972 // something safe. Assume it will be legalized when the shift is expanded. 973 if (ShiftVT.getSizeInBits() < Log2_32_Ceil(LHSTy.getSizeInBits())) 974 ShiftVT = MVT::i32; 975 assert(ShiftVT.getSizeInBits() >= Log2_32_Ceil(LHSTy.getSizeInBits()) && 976 "ShiftVT is still too small!"); 977 return ShiftVT; 978 } 979 980 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const { 981 assert(isTypeLegal(VT)); 982 switch (Op) { 983 default: 984 return false; 985 case ISD::SDIV: 986 case ISD::UDIV: 987 case ISD::SREM: 988 case ISD::UREM: 989 return true; 990 } 991 } 992 993 bool TargetLoweringBase::isFreeAddrSpaceCast(unsigned SrcAS, 994 unsigned DestAS) const { 995 return TM.isNoopAddrSpaceCast(SrcAS, DestAS); 996 } 997 998 unsigned TargetLoweringBase::getBitWidthForCttzElements( 999 Type *RetTy, ElementCount EC, bool ZeroIsPoison, 1000 const ConstantRange *VScaleRange) const { 1001 // Find the smallest "sensible" element type to use for the expansion. 1002 ConstantRange CR(APInt(64, EC.getKnownMinValue())); 1003 if (EC.isScalable()) 1004 CR = CR.umul_sat(*VScaleRange); 1005 1006 if (ZeroIsPoison) 1007 CR = CR.subtract(APInt(64, 1)); 1008 1009 unsigned EltWidth = RetTy->getScalarSizeInBits(); 1010 EltWidth = std::min(EltWidth, (unsigned)CR.getActiveBits()); 1011 EltWidth = std::max(llvm::bit_ceil(EltWidth), (unsigned)8); 1012 1013 return EltWidth; 1014 } 1015 1016 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) { 1017 // If the command-line option was specified, ignore this request. 1018 if (!JumpIsExpensiveOverride.getNumOccurrences()) 1019 JumpIsExpensive = isExpensive; 1020 } 1021 1022 TargetLoweringBase::LegalizeKind 1023 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { 1024 // If this is a simple type, use the ComputeRegisterProp mechanism. 1025 if (VT.isSimple()) { 1026 MVT SVT = VT.getSimpleVT(); 1027 assert((unsigned)SVT.SimpleTy < std::size(TransformToType)); 1028 MVT NVT = TransformToType[SVT.SimpleTy]; 1029 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT); 1030 1031 assert((LA == TypeLegal || LA == TypeSoftenFloat || 1032 LA == TypeSoftPromoteHalf || 1033 (NVT.isVector() || 1034 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)) && 1035 "Promote may not follow Expand or Promote"); 1036 1037 if (LA == TypeSplitVector) 1038 return LegalizeKind(LA, EVT(SVT).getHalfNumVectorElementsVT(Context)); 1039 if (LA == TypeScalarizeVector) 1040 return LegalizeKind(LA, SVT.getVectorElementType()); 1041 return LegalizeKind(LA, NVT); 1042 } 1043 1044 // Handle Extended Scalar Types. 1045 if (!VT.isVector()) { 1046 assert(VT.isInteger() && "Float types must be simple"); 1047 unsigned BitSize = VT.getSizeInBits(); 1048 // First promote to a power-of-two size, then expand if necessary. 1049 if (BitSize < 8 || !isPowerOf2_32(BitSize)) { 1050 EVT NVT = VT.getRoundIntegerType(Context); 1051 assert(NVT != VT && "Unable to round integer VT"); 1052 LegalizeKind NextStep = getTypeConversion(Context, NVT); 1053 // Avoid multi-step promotion. 1054 if (NextStep.first == TypePromoteInteger) 1055 return NextStep; 1056 // Return rounded integer type. 1057 return LegalizeKind(TypePromoteInteger, NVT); 1058 } 1059 1060 return LegalizeKind(TypeExpandInteger, 1061 EVT::getIntegerVT(Context, VT.getSizeInBits() / 2)); 1062 } 1063 1064 // Handle vector types. 1065 ElementCount NumElts = VT.getVectorElementCount(); 1066 EVT EltVT = VT.getVectorElementType(); 1067 1068 // Vectors with only one element are always scalarized. 1069 if (NumElts.isScalar()) 1070 return LegalizeKind(TypeScalarizeVector, EltVT); 1071 1072 // Try to widen vector elements until the element type is a power of two and 1073 // promote it to a legal type later on, for example: 1074 // <3 x i8> -> <4 x i8> -> <4 x i32> 1075 if (EltVT.isInteger()) { 1076 // Vectors with a number of elements that is not a power of two are always 1077 // widened, for example <3 x i8> -> <4 x i8>. 1078 if (!VT.isPow2VectorType()) { 1079 NumElts = NumElts.coefficientNextPowerOf2(); 1080 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts); 1081 return LegalizeKind(TypeWidenVector, NVT); 1082 } 1083 1084 // Examine the element type. 1085 LegalizeKind LK = getTypeConversion(Context, EltVT); 1086 1087 // If type is to be expanded, split the vector. 1088 // <4 x i140> -> <2 x i140> 1089 if (LK.first == TypeExpandInteger) { 1090 if (NumElts.isScalable() && NumElts.getKnownMinValue() == 1) 1091 return LegalizeKind(TypeScalarizeScalableVector, EltVT); 1092 return LegalizeKind(TypeSplitVector, 1093 VT.getHalfNumVectorElementsVT(Context)); 1094 } 1095 1096 // Promote the integer element types until a legal vector type is found 1097 // or until the element integer type is too big. If a legal type was not 1098 // found, fallback to the usual mechanism of widening/splitting the 1099 // vector. 1100 EVT OldEltVT = EltVT; 1101 while (true) { 1102 // Increase the bitwidth of the element to the next pow-of-two 1103 // (which is greater than 8 bits). 1104 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits()) 1105 .getRoundIntegerType(Context); 1106 1107 // Stop trying when getting a non-simple element type. 1108 // Note that vector elements may be greater than legal vector element 1109 // types. Example: X86 XMM registers hold 64bit element on 32bit 1110 // systems. 1111 if (!EltVT.isSimple()) 1112 break; 1113 1114 // Build a new vector type and check if it is legal. 1115 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 1116 // Found a legal promoted vector type. 1117 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal) 1118 return LegalizeKind(TypePromoteInteger, 1119 EVT::getVectorVT(Context, EltVT, NumElts)); 1120 } 1121 1122 // Reset the type to the unexpanded type if we did not find a legal vector 1123 // type with a promoted vector element type. 1124 EltVT = OldEltVT; 1125 } 1126 1127 // Try to widen the vector until a legal type is found. 1128 // If there is no wider legal type, split the vector. 1129 while (true) { 1130 // Round up to the next power of 2. 1131 NumElts = NumElts.coefficientNextPowerOf2(); 1132 1133 // If there is no simple vector type with this many elements then there 1134 // cannot be a larger legal vector type. Note that this assumes that 1135 // there are no skipped intermediate vector types in the simple types. 1136 if (!EltVT.isSimple()) 1137 break; 1138 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 1139 if (LargerVector == MVT()) 1140 break; 1141 1142 // If this type is legal then widen the vector. 1143 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal) 1144 return LegalizeKind(TypeWidenVector, LargerVector); 1145 } 1146 1147 // Widen odd vectors to next power of two. 1148 if (!VT.isPow2VectorType()) { 1149 EVT NVT = VT.getPow2VectorType(Context); 1150 return LegalizeKind(TypeWidenVector, NVT); 1151 } 1152 1153 if (VT.getVectorElementCount() == ElementCount::getScalable(1)) 1154 return LegalizeKind(TypeScalarizeScalableVector, EltVT); 1155 1156 // Vectors with illegal element types are expanded. 1157 EVT NVT = EVT::getVectorVT(Context, EltVT, 1158 VT.getVectorElementCount().divideCoefficientBy(2)); 1159 return LegalizeKind(TypeSplitVector, NVT); 1160 } 1161 1162 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, 1163 unsigned &NumIntermediates, 1164 MVT &RegisterVT, 1165 TargetLoweringBase *TLI) { 1166 // Figure out the right, legal destination reg to copy into. 1167 ElementCount EC = VT.getVectorElementCount(); 1168 MVT EltTy = VT.getVectorElementType(); 1169 1170 unsigned NumVectorRegs = 1; 1171 1172 // Scalable vectors cannot be scalarized, so splitting or widening is 1173 // required. 1174 if (VT.isScalableVector() && !isPowerOf2_32(EC.getKnownMinValue())) 1175 llvm_unreachable( 1176 "Splitting or widening of non-power-of-2 MVTs is not implemented."); 1177 1178 // FIXME: We don't support non-power-of-2-sized vectors for now. 1179 // Ideally we could break down into LHS/RHS like LegalizeDAG does. 1180 if (!isPowerOf2_32(EC.getKnownMinValue())) { 1181 // Split EC to unit size (scalable property is preserved). 1182 NumVectorRegs = EC.getKnownMinValue(); 1183 EC = ElementCount::getFixed(1); 1184 } 1185 1186 // Divide the input until we get to a supported size. This will 1187 // always end up with an EC that represent a scalar or a scalable 1188 // scalar. 1189 while (EC.getKnownMinValue() > 1 && 1190 !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) { 1191 EC = EC.divideCoefficientBy(2); 1192 NumVectorRegs <<= 1; 1193 } 1194 1195 NumIntermediates = NumVectorRegs; 1196 1197 MVT NewVT = MVT::getVectorVT(EltTy, EC); 1198 if (!TLI->isTypeLegal(NewVT)) 1199 NewVT = EltTy; 1200 IntermediateVT = NewVT; 1201 1202 unsigned LaneSizeInBits = NewVT.getScalarSizeInBits(); 1203 1204 // Convert sizes such as i33 to i64. 1205 LaneSizeInBits = llvm::bit_ceil(LaneSizeInBits); 1206 1207 MVT DestVT = TLI->getRegisterType(NewVT); 1208 RegisterVT = DestVT; 1209 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. 1210 return NumVectorRegs * (LaneSizeInBits / DestVT.getScalarSizeInBits()); 1211 1212 // Otherwise, promotion or legal types use the same number of registers as 1213 // the vector decimated to the appropriate level. 1214 return NumVectorRegs; 1215 } 1216 1217 /// isLegalRC - Return true if the value types that can be represented by the 1218 /// specified register class are all legal. 1219 bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI, 1220 const TargetRegisterClass &RC) const { 1221 for (const auto *I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I) 1222 if (isTypeLegal(*I)) 1223 return true; 1224 return false; 1225 } 1226 1227 /// Replace/modify any TargetFrameIndex operands with a targte-dependent 1228 /// sequence of memory operands that is recognized by PrologEpilogInserter. 1229 MachineBasicBlock * 1230 TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI, 1231 MachineBasicBlock *MBB) const { 1232 MachineInstr *MI = &InitialMI; 1233 MachineFunction &MF = *MI->getMF(); 1234 MachineFrameInfo &MFI = MF.getFrameInfo(); 1235 1236 // We're handling multiple types of operands here: 1237 // PATCHPOINT MetaArgs - live-in, read only, direct 1238 // STATEPOINT Deopt Spill - live-through, read only, indirect 1239 // STATEPOINT Deopt Alloca - live-through, read only, direct 1240 // (We're currently conservative and mark the deopt slots read/write in 1241 // practice.) 1242 // STATEPOINT GC Spill - live-through, read/write, indirect 1243 // STATEPOINT GC Alloca - live-through, read/write, direct 1244 // The live-in vs live-through is handled already (the live through ones are 1245 // all stack slots), but we need to handle the different type of stackmap 1246 // operands and memory effects here. 1247 1248 if (llvm::none_of(MI->operands(), 1249 [](MachineOperand &Operand) { return Operand.isFI(); })) 1250 return MBB; 1251 1252 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc()); 1253 1254 // Inherit previous memory operands. 1255 MIB.cloneMemRefs(*MI); 1256 1257 for (unsigned i = 0; i < MI->getNumOperands(); ++i) { 1258 MachineOperand &MO = MI->getOperand(i); 1259 if (!MO.isFI()) { 1260 // Index of Def operand this Use it tied to. 1261 // Since Defs are coming before Uses, if Use is tied, then 1262 // index of Def must be smaller that index of that Use. 1263 // Also, Defs preserve their position in new MI. 1264 unsigned TiedTo = i; 1265 if (MO.isReg() && MO.isTied()) 1266 TiedTo = MI->findTiedOperandIdx(i); 1267 MIB.add(MO); 1268 if (TiedTo < i) 1269 MIB->tieOperands(TiedTo, MIB->getNumOperands() - 1); 1270 continue; 1271 } 1272 1273 // foldMemoryOperand builds a new MI after replacing a single FI operand 1274 // with the canonical set of five x86 addressing-mode operands. 1275 int FI = MO.getIndex(); 1276 1277 // Add frame index operands recognized by stackmaps.cpp 1278 if (MFI.isStatepointSpillSlotObjectIndex(FI)) { 1279 // indirect-mem-ref tag, size, #FI, offset. 1280 // Used for spills inserted by StatepointLowering. This codepath is not 1281 // used for patchpoints/stackmaps at all, for these spilling is done via 1282 // foldMemoryOperand callback only. 1283 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity"); 1284 MIB.addImm(StackMaps::IndirectMemRefOp); 1285 MIB.addImm(MFI.getObjectSize(FI)); 1286 MIB.add(MO); 1287 MIB.addImm(0); 1288 } else { 1289 // direct-mem-ref tag, #FI, offset. 1290 // Used by patchpoint, and direct alloca arguments to statepoints 1291 MIB.addImm(StackMaps::DirectMemRefOp); 1292 MIB.add(MO); 1293 MIB.addImm(0); 1294 } 1295 1296 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!"); 1297 1298 // Add a new memory operand for this FI. 1299 assert(MFI.getObjectOffset(FI) != -1); 1300 1301 // Note: STATEPOINT MMOs are added during SelectionDAG. STACKMAP, and 1302 // PATCHPOINT should be updated to do the same. (TODO) 1303 if (MI->getOpcode() != TargetOpcode::STATEPOINT) { 1304 auto Flags = MachineMemOperand::MOLoad; 1305 MachineMemOperand *MMO = MF.getMachineMemOperand( 1306 MachinePointerInfo::getFixedStack(MF, FI), Flags, 1307 MF.getDataLayout().getPointerSize(), MFI.getObjectAlign(FI)); 1308 MIB->addMemOperand(MF, MMO); 1309 } 1310 } 1311 MBB->insert(MachineBasicBlock::iterator(MI), MIB); 1312 MI->eraseFromParent(); 1313 return MBB; 1314 } 1315 1316 /// findRepresentativeClass - Return the largest legal super-reg register class 1317 /// of the register class for the specified type and its associated "cost". 1318 // This function is in TargetLowering because it uses RegClassForVT which would 1319 // need to be moved to TargetRegisterInfo and would necessitate moving 1320 // isTypeLegal over as well - a massive change that would just require 1321 // TargetLowering having a TargetRegisterInfo class member that it would use. 1322 std::pair<const TargetRegisterClass *, uint8_t> 1323 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI, 1324 MVT VT) const { 1325 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy]; 1326 if (!RC) 1327 return std::make_pair(RC, 0); 1328 1329 // Compute the set of all super-register classes. 1330 BitVector SuperRegRC(TRI->getNumRegClasses()); 1331 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI) 1332 SuperRegRC.setBitsInMask(RCI.getMask()); 1333 1334 // Find the first legal register class with the largest spill size. 1335 const TargetRegisterClass *BestRC = RC; 1336 for (unsigned i : SuperRegRC.set_bits()) { 1337 const TargetRegisterClass *SuperRC = TRI->getRegClass(i); 1338 // We want the largest possible spill size. 1339 if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC)) 1340 continue; 1341 if (!isLegalRC(*TRI, *SuperRC)) 1342 continue; 1343 BestRC = SuperRC; 1344 } 1345 return std::make_pair(BestRC, 1); 1346 } 1347 1348 /// computeRegisterProperties - Once all of the register classes are added, 1349 /// this allows us to compute derived properties we expose. 1350 void TargetLoweringBase::computeRegisterProperties( 1351 const TargetRegisterInfo *TRI) { 1352 // Everything defaults to needing one register. 1353 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) { 1354 NumRegistersForVT[i] = 1; 1355 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i; 1356 } 1357 // ...except isVoid, which doesn't need any registers. 1358 NumRegistersForVT[MVT::isVoid] = 0; 1359 1360 // Find the largest integer register class. 1361 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE; 1362 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg) 1363 assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); 1364 1365 // Every integer value type larger than this largest register takes twice as 1366 // many registers to represent as the previous ValueType. 1367 for (unsigned ExpandedReg = LargestIntReg + 1; 1368 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) { 1369 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; 1370 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg; 1371 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1); 1372 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg, 1373 TypeExpandInteger); 1374 } 1375 1376 // Inspect all of the ValueType's smaller than the largest integer 1377 // register to see which ones need promotion. 1378 unsigned LegalIntReg = LargestIntReg; 1379 for (unsigned IntReg = LargestIntReg - 1; 1380 IntReg >= (unsigned)MVT::i1; --IntReg) { 1381 MVT IVT = (MVT::SimpleValueType)IntReg; 1382 if (isTypeLegal(IVT)) { 1383 LegalIntReg = IntReg; 1384 } else { 1385 RegisterTypeForVT[IntReg] = TransformToType[IntReg] = 1386 (MVT::SimpleValueType)LegalIntReg; 1387 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger); 1388 } 1389 } 1390 1391 // ppcf128 type is really two f64's. 1392 if (!isTypeLegal(MVT::ppcf128)) { 1393 if (isTypeLegal(MVT::f64)) { 1394 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64]; 1395 RegisterTypeForVT[MVT::ppcf128] = MVT::f64; 1396 TransformToType[MVT::ppcf128] = MVT::f64; 1397 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat); 1398 } else { 1399 NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128]; 1400 RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128]; 1401 TransformToType[MVT::ppcf128] = MVT::i128; 1402 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat); 1403 } 1404 } 1405 1406 // Decide how to handle f128. If the target does not have native f128 support, 1407 // expand it to i128 and we will be generating soft float library calls. 1408 if (!isTypeLegal(MVT::f128)) { 1409 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128]; 1410 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128]; 1411 TransformToType[MVT::f128] = MVT::i128; 1412 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat); 1413 } 1414 1415 // Decide how to handle f80. If the target does not have native f80 support, 1416 // expand it to i96 and we will be generating soft float library calls. 1417 if (!isTypeLegal(MVT::f80)) { 1418 NumRegistersForVT[MVT::f80] = 3*NumRegistersForVT[MVT::i32]; 1419 RegisterTypeForVT[MVT::f80] = RegisterTypeForVT[MVT::i32]; 1420 TransformToType[MVT::f80] = MVT::i32; 1421 ValueTypeActions.setTypeAction(MVT::f80, TypeSoftenFloat); 1422 } 1423 1424 // Decide how to handle f64. If the target does not have native f64 support, 1425 // expand it to i64 and we will be generating soft float library calls. 1426 if (!isTypeLegal(MVT::f64)) { 1427 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64]; 1428 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64]; 1429 TransformToType[MVT::f64] = MVT::i64; 1430 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat); 1431 } 1432 1433 // Decide how to handle f32. If the target does not have native f32 support, 1434 // expand it to i32 and we will be generating soft float library calls. 1435 if (!isTypeLegal(MVT::f32)) { 1436 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; 1437 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; 1438 TransformToType[MVT::f32] = MVT::i32; 1439 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat); 1440 } 1441 1442 // Decide how to handle f16. If the target does not have native f16 support, 1443 // promote it to f32, because there are no f16 library calls (except for 1444 // conversions). 1445 if (!isTypeLegal(MVT::f16)) { 1446 // Allow targets to control how we legalize half. 1447 bool SoftPromoteHalfType = softPromoteHalfType(); 1448 bool UseFPRegsForHalfType = !SoftPromoteHalfType || useFPRegsForHalfType(); 1449 1450 if (!UseFPRegsForHalfType) { 1451 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16]; 1452 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16]; 1453 } else { 1454 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32]; 1455 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32]; 1456 } 1457 TransformToType[MVT::f16] = MVT::f32; 1458 if (SoftPromoteHalfType) { 1459 ValueTypeActions.setTypeAction(MVT::f16, TypeSoftPromoteHalf); 1460 } else { 1461 ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat); 1462 } 1463 } 1464 1465 // Decide how to handle bf16. If the target does not have native bf16 support, 1466 // promote it to f32, because there are no bf16 library calls (except for 1467 // converting from f32 to bf16). 1468 if (!isTypeLegal(MVT::bf16)) { 1469 NumRegistersForVT[MVT::bf16] = NumRegistersForVT[MVT::f32]; 1470 RegisterTypeForVT[MVT::bf16] = RegisterTypeForVT[MVT::f32]; 1471 TransformToType[MVT::bf16] = MVT::f32; 1472 ValueTypeActions.setTypeAction(MVT::bf16, TypeSoftPromoteHalf); 1473 } 1474 1475 // Loop over all of the vector value types to see which need transformations. 1476 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; 1477 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 1478 MVT VT = (MVT::SimpleValueType) i; 1479 if (isTypeLegal(VT)) 1480 continue; 1481 1482 MVT EltVT = VT.getVectorElementType(); 1483 ElementCount EC = VT.getVectorElementCount(); 1484 bool IsLegalWiderType = false; 1485 bool IsScalable = VT.isScalableVector(); 1486 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT); 1487 switch (PreferredAction) { 1488 case TypePromoteInteger: { 1489 MVT::SimpleValueType EndVT = IsScalable ? 1490 MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE : 1491 MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE; 1492 // Try to promote the elements of integer vectors. If no legal 1493 // promotion was found, fall through to the widen-vector method. 1494 for (unsigned nVT = i + 1; 1495 (MVT::SimpleValueType)nVT <= EndVT; ++nVT) { 1496 MVT SVT = (MVT::SimpleValueType) nVT; 1497 // Promote vectors of integers to vectors with the same number 1498 // of elements, with a wider element type. 1499 if (SVT.getScalarSizeInBits() > EltVT.getFixedSizeInBits() && 1500 SVT.getVectorElementCount() == EC && isTypeLegal(SVT)) { 1501 TransformToType[i] = SVT; 1502 RegisterTypeForVT[i] = SVT; 1503 NumRegistersForVT[i] = 1; 1504 ValueTypeActions.setTypeAction(VT, TypePromoteInteger); 1505 IsLegalWiderType = true; 1506 break; 1507 } 1508 } 1509 if (IsLegalWiderType) 1510 break; 1511 [[fallthrough]]; 1512 } 1513 1514 case TypeWidenVector: 1515 if (isPowerOf2_32(EC.getKnownMinValue())) { 1516 // Try to widen the vector. 1517 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) { 1518 MVT SVT = (MVT::SimpleValueType) nVT; 1519 if (SVT.getVectorElementType() == EltVT && 1520 SVT.isScalableVector() == IsScalable && 1521 SVT.getVectorElementCount().getKnownMinValue() > 1522 EC.getKnownMinValue() && 1523 isTypeLegal(SVT)) { 1524 TransformToType[i] = SVT; 1525 RegisterTypeForVT[i] = SVT; 1526 NumRegistersForVT[i] = 1; 1527 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1528 IsLegalWiderType = true; 1529 break; 1530 } 1531 } 1532 if (IsLegalWiderType) 1533 break; 1534 } else { 1535 // Only widen to the next power of 2 to keep consistency with EVT. 1536 MVT NVT = VT.getPow2VectorType(); 1537 if (isTypeLegal(NVT)) { 1538 TransformToType[i] = NVT; 1539 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1540 RegisterTypeForVT[i] = NVT; 1541 NumRegistersForVT[i] = 1; 1542 break; 1543 } 1544 } 1545 [[fallthrough]]; 1546 1547 case TypeSplitVector: 1548 case TypeScalarizeVector: { 1549 MVT IntermediateVT; 1550 MVT RegisterVT; 1551 unsigned NumIntermediates; 1552 unsigned NumRegisters = getVectorTypeBreakdownMVT(VT, IntermediateVT, 1553 NumIntermediates, RegisterVT, this); 1554 NumRegistersForVT[i] = NumRegisters; 1555 assert(NumRegistersForVT[i] == NumRegisters && 1556 "NumRegistersForVT size cannot represent NumRegisters!"); 1557 RegisterTypeForVT[i] = RegisterVT; 1558 1559 MVT NVT = VT.getPow2VectorType(); 1560 if (NVT == VT) { 1561 // Type is already a power of 2. The default action is to split. 1562 TransformToType[i] = MVT::Other; 1563 if (PreferredAction == TypeScalarizeVector) 1564 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector); 1565 else if (PreferredAction == TypeSplitVector) 1566 ValueTypeActions.setTypeAction(VT, TypeSplitVector); 1567 else if (EC.getKnownMinValue() > 1) 1568 ValueTypeActions.setTypeAction(VT, TypeSplitVector); 1569 else 1570 ValueTypeActions.setTypeAction(VT, EC.isScalable() 1571 ? TypeScalarizeScalableVector 1572 : TypeScalarizeVector); 1573 } else { 1574 TransformToType[i] = NVT; 1575 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1576 } 1577 break; 1578 } 1579 default: 1580 llvm_unreachable("Unknown vector legalization action!"); 1581 } 1582 } 1583 1584 // Determine the 'representative' register class for each value type. 1585 // An representative register class is the largest (meaning one which is 1586 // not a sub-register class / subreg register class) legal register class for 1587 // a group of value types. For example, on i386, i8, i16, and i32 1588 // representative would be GR32; while on x86_64 it's GR64. 1589 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) { 1590 const TargetRegisterClass* RRC; 1591 uint8_t Cost; 1592 std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i); 1593 RepRegClassForVT[i] = RRC; 1594 RepRegClassCostForVT[i] = Cost; 1595 } 1596 } 1597 1598 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &, 1599 EVT VT) const { 1600 assert(!VT.isVector() && "No default SetCC type for vectors!"); 1601 return getPointerTy(DL).SimpleTy; 1602 } 1603 1604 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const { 1605 return MVT::i32; // return the default value 1606 } 1607 1608 /// getVectorTypeBreakdown - Vector types are broken down into some number of 1609 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 1610 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 1611 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. 1612 /// 1613 /// This method returns the number of registers needed, and the VT for each 1614 /// register. It also returns the VT and quantity of the intermediate values 1615 /// before they are promoted/expanded. 1616 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, 1617 EVT VT, EVT &IntermediateVT, 1618 unsigned &NumIntermediates, 1619 MVT &RegisterVT) const { 1620 ElementCount EltCnt = VT.getVectorElementCount(); 1621 1622 // If there is a wider vector type with the same element type as this one, 1623 // or a promoted vector type that has the same number of elements which 1624 // are wider, then we should convert to that legal vector type. 1625 // This handles things like <2 x float> -> <4 x float> and 1626 // <4 x i1> -> <4 x i32>. 1627 LegalizeTypeAction TA = getTypeAction(Context, VT); 1628 if (!EltCnt.isScalar() && 1629 (TA == TypeWidenVector || TA == TypePromoteInteger)) { 1630 EVT RegisterEVT = getTypeToTransformTo(Context, VT); 1631 if (isTypeLegal(RegisterEVT)) { 1632 IntermediateVT = RegisterEVT; 1633 RegisterVT = RegisterEVT.getSimpleVT(); 1634 NumIntermediates = 1; 1635 return 1; 1636 } 1637 } 1638 1639 // Figure out the right, legal destination reg to copy into. 1640 EVT EltTy = VT.getVectorElementType(); 1641 1642 unsigned NumVectorRegs = 1; 1643 1644 // Scalable vectors cannot be scalarized, so handle the legalisation of the 1645 // types like done elsewhere in SelectionDAG. 1646 if (EltCnt.isScalable()) { 1647 LegalizeKind LK; 1648 EVT PartVT = VT; 1649 do { 1650 // Iterate until we've found a legal (part) type to hold VT. 1651 LK = getTypeConversion(Context, PartVT); 1652 PartVT = LK.second; 1653 } while (LK.first != TypeLegal); 1654 1655 if (!PartVT.isVector()) { 1656 report_fatal_error( 1657 "Don't know how to legalize this scalable vector type"); 1658 } 1659 1660 NumIntermediates = 1661 divideCeil(VT.getVectorElementCount().getKnownMinValue(), 1662 PartVT.getVectorElementCount().getKnownMinValue()); 1663 IntermediateVT = PartVT; 1664 RegisterVT = getRegisterType(Context, IntermediateVT); 1665 return NumIntermediates; 1666 } 1667 1668 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally 1669 // we could break down into LHS/RHS like LegalizeDAG does. 1670 if (!isPowerOf2_32(EltCnt.getKnownMinValue())) { 1671 NumVectorRegs = EltCnt.getKnownMinValue(); 1672 EltCnt = ElementCount::getFixed(1); 1673 } 1674 1675 // Divide the input until we get to a supported size. This will always 1676 // end with a scalar if the target doesn't support vectors. 1677 while (EltCnt.getKnownMinValue() > 1 && 1678 !isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) { 1679 EltCnt = EltCnt.divideCoefficientBy(2); 1680 NumVectorRegs <<= 1; 1681 } 1682 1683 NumIntermediates = NumVectorRegs; 1684 1685 EVT NewVT = EVT::getVectorVT(Context, EltTy, EltCnt); 1686 if (!isTypeLegal(NewVT)) 1687 NewVT = EltTy; 1688 IntermediateVT = NewVT; 1689 1690 MVT DestVT = getRegisterType(Context, NewVT); 1691 RegisterVT = DestVT; 1692 1693 if (EVT(DestVT).bitsLT(NewVT)) { // Value is expanded, e.g. i64 -> i16. 1694 TypeSize NewVTSize = NewVT.getSizeInBits(); 1695 // Convert sizes such as i33 to i64. 1696 if (!llvm::has_single_bit<uint32_t>(NewVTSize.getKnownMinValue())) 1697 NewVTSize = NewVTSize.coefficientNextPowerOf2(); 1698 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits()); 1699 } 1700 1701 // Otherwise, promotion or legal types use the same number of registers as 1702 // the vector decimated to the appropriate level. 1703 return NumVectorRegs; 1704 } 1705 1706 bool TargetLoweringBase::isSuitableForJumpTable(const SwitchInst *SI, 1707 uint64_t NumCases, 1708 uint64_t Range, 1709 ProfileSummaryInfo *PSI, 1710 BlockFrequencyInfo *BFI) const { 1711 // FIXME: This function check the maximum table size and density, but the 1712 // minimum size is not checked. It would be nice if the minimum size is 1713 // also combined within this function. Currently, the minimum size check is 1714 // performed in findJumpTable() in SelectionDAGBuiler and 1715 // getEstimatedNumberOfCaseClusters() in BasicTTIImpl. 1716 const bool OptForSize = 1717 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI); 1718 const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize); 1719 const unsigned MaxJumpTableSize = getMaximumJumpTableSize(); 1720 1721 // Check whether the number of cases is small enough and 1722 // the range is dense enough for a jump table. 1723 return (OptForSize || Range <= MaxJumpTableSize) && 1724 (NumCases * 100 >= Range * MinDensity); 1725 } 1726 1727 MVT TargetLoweringBase::getPreferredSwitchConditionType(LLVMContext &Context, 1728 EVT ConditionVT) const { 1729 return getRegisterType(Context, ConditionVT); 1730 } 1731 1732 /// Get the EVTs and ArgFlags collections that represent the legalized return 1733 /// type of the given function. This does not require a DAG or a return value, 1734 /// and is suitable for use before any DAGs for the function are constructed. 1735 /// TODO: Move this out of TargetLowering.cpp. 1736 void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType, 1737 AttributeList attr, 1738 SmallVectorImpl<ISD::OutputArg> &Outs, 1739 const TargetLowering &TLI, const DataLayout &DL) { 1740 SmallVector<EVT, 4> ValueVTs; 1741 ComputeValueVTs(TLI, DL, ReturnType, ValueVTs); 1742 unsigned NumValues = ValueVTs.size(); 1743 if (NumValues == 0) return; 1744 1745 for (unsigned j = 0, f = NumValues; j != f; ++j) { 1746 EVT VT = ValueVTs[j]; 1747 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 1748 1749 if (attr.hasRetAttr(Attribute::SExt)) 1750 ExtendKind = ISD::SIGN_EXTEND; 1751 else if (attr.hasRetAttr(Attribute::ZExt)) 1752 ExtendKind = ISD::ZERO_EXTEND; 1753 1754 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) 1755 VT = TLI.getTypeForExtReturn(ReturnType->getContext(), VT, ExtendKind); 1756 1757 unsigned NumParts = 1758 TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT); 1759 MVT PartVT = 1760 TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT); 1761 1762 // 'inreg' on function refers to return value 1763 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); 1764 if (attr.hasRetAttr(Attribute::InReg)) 1765 Flags.setInReg(); 1766 1767 // Propagate extension type if any 1768 if (attr.hasRetAttr(Attribute::SExt)) 1769 Flags.setSExt(); 1770 else if (attr.hasRetAttr(Attribute::ZExt)) 1771 Flags.setZExt(); 1772 1773 for (unsigned i = 0; i < NumParts; ++i) 1774 Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isfixed=*/true, 0, 0)); 1775 } 1776 } 1777 1778 Align TargetLoweringBase::getByValTypeAlignment(Type *Ty, 1779 const DataLayout &DL) const { 1780 return DL.getABITypeAlign(Ty); 1781 } 1782 1783 bool TargetLoweringBase::allowsMemoryAccessForAlignment( 1784 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, 1785 Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const { 1786 // Check if the specified alignment is sufficient based on the data layout. 1787 // TODO: While using the data layout works in practice, a better solution 1788 // would be to implement this check directly (make this a virtual function). 1789 // For example, the ABI alignment may change based on software platform while 1790 // this function should only be affected by hardware implementation. 1791 Type *Ty = VT.getTypeForEVT(Context); 1792 if (VT.isZeroSized() || Alignment >= DL.getABITypeAlign(Ty)) { 1793 // Assume that an access that meets the ABI-specified alignment is fast. 1794 if (Fast != nullptr) 1795 *Fast = 1; 1796 return true; 1797 } 1798 1799 // This is a misaligned access. 1800 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast); 1801 } 1802 1803 bool TargetLoweringBase::allowsMemoryAccessForAlignment( 1804 LLVMContext &Context, const DataLayout &DL, EVT VT, 1805 const MachineMemOperand &MMO, unsigned *Fast) const { 1806 return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(), 1807 MMO.getAlign(), MMO.getFlags(), Fast); 1808 } 1809 1810 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, 1811 const DataLayout &DL, EVT VT, 1812 unsigned AddrSpace, Align Alignment, 1813 MachineMemOperand::Flags Flags, 1814 unsigned *Fast) const { 1815 return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment, 1816 Flags, Fast); 1817 } 1818 1819 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, 1820 const DataLayout &DL, EVT VT, 1821 const MachineMemOperand &MMO, 1822 unsigned *Fast) const { 1823 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(), 1824 MMO.getFlags(), Fast); 1825 } 1826 1827 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, 1828 const DataLayout &DL, LLT Ty, 1829 const MachineMemOperand &MMO, 1830 unsigned *Fast) const { 1831 EVT VT = getApproximateEVTForLLT(Ty, Context); 1832 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(), 1833 MMO.getFlags(), Fast); 1834 } 1835 1836 //===----------------------------------------------------------------------===// 1837 // TargetTransformInfo Helpers 1838 //===----------------------------------------------------------------------===// 1839 1840 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const { 1841 enum InstructionOpcodes { 1842 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM, 1843 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM 1844 #include "llvm/IR/Instruction.def" 1845 }; 1846 switch (static_cast<InstructionOpcodes>(Opcode)) { 1847 case Ret: return 0; 1848 case Br: return 0; 1849 case Switch: return 0; 1850 case IndirectBr: return 0; 1851 case Invoke: return 0; 1852 case CallBr: return 0; 1853 case Resume: return 0; 1854 case Unreachable: return 0; 1855 case CleanupRet: return 0; 1856 case CatchRet: return 0; 1857 case CatchPad: return 0; 1858 case CatchSwitch: return 0; 1859 case CleanupPad: return 0; 1860 case FNeg: return ISD::FNEG; 1861 case Add: return ISD::ADD; 1862 case FAdd: return ISD::FADD; 1863 case Sub: return ISD::SUB; 1864 case FSub: return ISD::FSUB; 1865 case Mul: return ISD::MUL; 1866 case FMul: return ISD::FMUL; 1867 case UDiv: return ISD::UDIV; 1868 case SDiv: return ISD::SDIV; 1869 case FDiv: return ISD::FDIV; 1870 case URem: return ISD::UREM; 1871 case SRem: return ISD::SREM; 1872 case FRem: return ISD::FREM; 1873 case Shl: return ISD::SHL; 1874 case LShr: return ISD::SRL; 1875 case AShr: return ISD::SRA; 1876 case And: return ISD::AND; 1877 case Or: return ISD::OR; 1878 case Xor: return ISD::XOR; 1879 case Alloca: return 0; 1880 case Load: return ISD::LOAD; 1881 case Store: return ISD::STORE; 1882 case GetElementPtr: return 0; 1883 case Fence: return 0; 1884 case AtomicCmpXchg: return 0; 1885 case AtomicRMW: return 0; 1886 case Trunc: return ISD::TRUNCATE; 1887 case ZExt: return ISD::ZERO_EXTEND; 1888 case SExt: return ISD::SIGN_EXTEND; 1889 case FPToUI: return ISD::FP_TO_UINT; 1890 case FPToSI: return ISD::FP_TO_SINT; 1891 case UIToFP: return ISD::UINT_TO_FP; 1892 case SIToFP: return ISD::SINT_TO_FP; 1893 case FPTrunc: return ISD::FP_ROUND; 1894 case FPExt: return ISD::FP_EXTEND; 1895 case PtrToInt: return ISD::BITCAST; 1896 case IntToPtr: return ISD::BITCAST; 1897 case BitCast: return ISD::BITCAST; 1898 case AddrSpaceCast: return ISD::ADDRSPACECAST; 1899 case ICmp: return ISD::SETCC; 1900 case FCmp: return ISD::SETCC; 1901 case PHI: return 0; 1902 case Call: return 0; 1903 case Select: return ISD::SELECT; 1904 case UserOp1: return 0; 1905 case UserOp2: return 0; 1906 case VAArg: return 0; 1907 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT; 1908 case InsertElement: return ISD::INSERT_VECTOR_ELT; 1909 case ShuffleVector: return ISD::VECTOR_SHUFFLE; 1910 case ExtractValue: return ISD::MERGE_VALUES; 1911 case InsertValue: return ISD::MERGE_VALUES; 1912 case LandingPad: return 0; 1913 case Freeze: return ISD::FREEZE; 1914 } 1915 1916 llvm_unreachable("Unknown instruction type encountered!"); 1917 } 1918 1919 int TargetLoweringBase::IntrinsicIDToISD(Intrinsic::ID ID) const { 1920 switch (ID) { 1921 case Intrinsic::exp: 1922 return ISD::FEXP; 1923 case Intrinsic::exp2: 1924 return ISD::FEXP2; 1925 default: 1926 return ISD::DELETED_NODE; 1927 } 1928 } 1929 1930 Value * 1931 TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, 1932 bool UseTLS) const { 1933 // compiler-rt provides a variable with a magic name. Targets that do not 1934 // link with compiler-rt may also provide such a variable. 1935 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 1936 const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr"; 1937 auto UnsafeStackPtr = 1938 dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar)); 1939 1940 const DataLayout &DL = M->getDataLayout(); 1941 PointerType *StackPtrTy = DL.getAllocaPtrType(M->getContext()); 1942 1943 if (!UnsafeStackPtr) { 1944 auto TLSModel = UseTLS ? 1945 GlobalValue::InitialExecTLSModel : 1946 GlobalValue::NotThreadLocal; 1947 // The global variable is not defined yet, define it ourselves. 1948 // We use the initial-exec TLS model because we do not support the 1949 // variable living anywhere other than in the main executable. 1950 UnsafeStackPtr = new GlobalVariable( 1951 *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr, 1952 UnsafeStackPtrVar, nullptr, TLSModel); 1953 } else { 1954 // The variable exists, check its type and attributes. 1955 // 1956 // FIXME: Move to IR verifier. 1957 if (UnsafeStackPtr->getValueType() != StackPtrTy) 1958 report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type"); 1959 if (UseTLS != UnsafeStackPtr->isThreadLocal()) 1960 report_fatal_error(Twine(UnsafeStackPtrVar) + " must " + 1961 (UseTLS ? "" : "not ") + "be thread-local"); 1962 } 1963 return UnsafeStackPtr; 1964 } 1965 1966 Value * 1967 TargetLoweringBase::getSafeStackPointerLocation(IRBuilderBase &IRB) const { 1968 if (!TM.getTargetTriple().isAndroid()) 1969 return getDefaultSafeStackPointerLocation(IRB, true); 1970 1971 // Android provides a libc function to retrieve the address of the current 1972 // thread's unsafe stack pointer. 1973 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 1974 auto *PtrTy = PointerType::getUnqual(M->getContext()); 1975 FunctionCallee Fn = 1976 M->getOrInsertFunction("__safestack_pointer_address", PtrTy); 1977 return IRB.CreateCall(Fn); 1978 } 1979 1980 //===----------------------------------------------------------------------===// 1981 // Loop Strength Reduction hooks 1982 //===----------------------------------------------------------------------===// 1983 1984 /// isLegalAddressingMode - Return true if the addressing mode represented 1985 /// by AM is legal for this target, for a load/store of the specified type. 1986 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL, 1987 const AddrMode &AM, Type *Ty, 1988 unsigned AS, Instruction *I) const { 1989 // The default implementation of this implements a conservative RISCy, r+r and 1990 // r+i addr mode. 1991 1992 // Scalable offsets not supported 1993 if (AM.ScalableOffset) 1994 return false; 1995 1996 // Allows a sign-extended 16-bit immediate field. 1997 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 1998 return false; 1999 2000 // No global is ever allowed as a base. 2001 if (AM.BaseGV) 2002 return false; 2003 2004 // Only support r+r, 2005 switch (AM.Scale) { 2006 case 0: // "r+i" or just "i", depending on HasBaseReg. 2007 break; 2008 case 1: 2009 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 2010 return false; 2011 // Otherwise we have r+r or r+i. 2012 break; 2013 case 2: 2014 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 2015 return false; 2016 // Allow 2*r as r+r. 2017 break; 2018 default: // Don't allow n * r 2019 return false; 2020 } 2021 2022 return true; 2023 } 2024 2025 //===----------------------------------------------------------------------===// 2026 // Stack Protector 2027 //===----------------------------------------------------------------------===// 2028 2029 // For OpenBSD return its special guard variable. Otherwise return nullptr, 2030 // so that SelectionDAG handle SSP. 2031 Value *TargetLoweringBase::getIRStackGuard(IRBuilderBase &IRB) const { 2032 if (getTargetMachine().getTargetTriple().isOSOpenBSD()) { 2033 Module &M = *IRB.GetInsertBlock()->getParent()->getParent(); 2034 const DataLayout &DL = M.getDataLayout(); 2035 PointerType *PtrTy = 2036 PointerType::get(M.getContext(), DL.getDefaultGlobalsAddressSpace()); 2037 GlobalVariable *G = M.getOrInsertGlobal("__guard_local", PtrTy); 2038 G->setVisibility(GlobalValue::HiddenVisibility); 2039 return G; 2040 } 2041 return nullptr; 2042 } 2043 2044 // Currently only support "standard" __stack_chk_guard. 2045 // TODO: add LOAD_STACK_GUARD support. 2046 void TargetLoweringBase::insertSSPDeclarations(Module &M) const { 2047 if (!M.getNamedValue("__stack_chk_guard")) { 2048 auto *GV = new GlobalVariable(M, PointerType::getUnqual(M.getContext()), 2049 false, GlobalVariable::ExternalLinkage, 2050 nullptr, "__stack_chk_guard"); 2051 2052 // FreeBSD has "__stack_chk_guard" defined externally on libc.so 2053 if (M.getDirectAccessExternalData() && 2054 !TM.getTargetTriple().isWindowsGNUEnvironment() && 2055 !(TM.getTargetTriple().isPPC64() && 2056 TM.getTargetTriple().isOSFreeBSD()) && 2057 (!TM.getTargetTriple().isOSDarwin() || 2058 TM.getRelocationModel() == Reloc::Static)) 2059 GV->setDSOLocal(true); 2060 } 2061 } 2062 2063 // Currently only support "standard" __stack_chk_guard. 2064 // TODO: add LOAD_STACK_GUARD support. 2065 Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const { 2066 if (getTargetMachine().getTargetTriple().isOSOpenBSD()) { 2067 return M.getNamedValue("__guard_local"); 2068 } 2069 return M.getNamedValue("__stack_chk_guard"); 2070 } 2071 2072 Function *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const { 2073 return nullptr; 2074 } 2075 2076 unsigned TargetLoweringBase::getMinimumJumpTableEntries() const { 2077 return MinimumJumpTableEntries; 2078 } 2079 2080 void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) { 2081 MinimumJumpTableEntries = Val; 2082 } 2083 2084 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const { 2085 return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity; 2086 } 2087 2088 unsigned TargetLoweringBase::getMaximumJumpTableSize() const { 2089 return MaximumJumpTableSize; 2090 } 2091 2092 void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) { 2093 MaximumJumpTableSize = Val; 2094 } 2095 2096 bool TargetLoweringBase::isJumpTableRelative() const { 2097 return getTargetMachine().isPositionIndependent(); 2098 } 2099 2100 Align TargetLoweringBase::getPrefLoopAlignment(MachineLoop *ML) const { 2101 if (TM.Options.LoopAlignment) 2102 return Align(TM.Options.LoopAlignment); 2103 return PrefLoopAlignment; 2104 } 2105 2106 unsigned TargetLoweringBase::getMaxPermittedBytesForAlignment( 2107 MachineBasicBlock *MBB) const { 2108 return MaxBytesForAlignment; 2109 } 2110 2111 //===----------------------------------------------------------------------===// 2112 // Reciprocal Estimates 2113 //===----------------------------------------------------------------------===// 2114 2115 /// Get the reciprocal estimate attribute string for a function that will 2116 /// override the target defaults. 2117 static StringRef getRecipEstimateForFunc(MachineFunction &MF) { 2118 const Function &F = MF.getFunction(); 2119 return F.getFnAttribute("reciprocal-estimates").getValueAsString(); 2120 } 2121 2122 /// Construct a string for the given reciprocal operation of the given type. 2123 /// This string should match the corresponding option to the front-end's 2124 /// "-mrecip" flag assuming those strings have been passed through in an 2125 /// attribute string. For example, "vec-divf" for a division of a vXf32. 2126 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) { 2127 std::string Name = VT.isVector() ? "vec-" : ""; 2128 2129 Name += IsSqrt ? "sqrt" : "div"; 2130 2131 // TODO: Handle other float types? 2132 if (VT.getScalarType() == MVT::f64) { 2133 Name += "d"; 2134 } else if (VT.getScalarType() == MVT::f16) { 2135 Name += "h"; 2136 } else { 2137 assert(VT.getScalarType() == MVT::f32 && 2138 "Unexpected FP type for reciprocal estimate"); 2139 Name += "f"; 2140 } 2141 2142 return Name; 2143 } 2144 2145 /// Return the character position and value (a single numeric character) of a 2146 /// customized refinement operation in the input string if it exists. Return 2147 /// false if there is no customized refinement step count. 2148 static bool parseRefinementStep(StringRef In, size_t &Position, 2149 uint8_t &Value) { 2150 const char RefStepToken = ':'; 2151 Position = In.find(RefStepToken); 2152 if (Position == StringRef::npos) 2153 return false; 2154 2155 StringRef RefStepString = In.substr(Position + 1); 2156 // Allow exactly one numeric character for the additional refinement 2157 // step parameter. 2158 if (RefStepString.size() == 1) { 2159 char RefStepChar = RefStepString[0]; 2160 if (isDigit(RefStepChar)) { 2161 Value = RefStepChar - '0'; 2162 return true; 2163 } 2164 } 2165 report_fatal_error("Invalid refinement step for -recip."); 2166 } 2167 2168 /// For the input attribute string, return one of the ReciprocalEstimate enum 2169 /// status values (enabled, disabled, or not specified) for this operation on 2170 /// the specified data type. 2171 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) { 2172 if (Override.empty()) 2173 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2174 2175 SmallVector<StringRef, 4> OverrideVector; 2176 Override.split(OverrideVector, ','); 2177 unsigned NumArgs = OverrideVector.size(); 2178 2179 // Check if "all", "none", or "default" was specified. 2180 if (NumArgs == 1) { 2181 // Look for an optional setting of the number of refinement steps needed 2182 // for this type of reciprocal operation. 2183 size_t RefPos; 2184 uint8_t RefSteps; 2185 if (parseRefinementStep(Override, RefPos, RefSteps)) { 2186 // Split the string for further processing. 2187 Override = Override.substr(0, RefPos); 2188 } 2189 2190 // All reciprocal types are enabled. 2191 if (Override == "all") 2192 return TargetLoweringBase::ReciprocalEstimate::Enabled; 2193 2194 // All reciprocal types are disabled. 2195 if (Override == "none") 2196 return TargetLoweringBase::ReciprocalEstimate::Disabled; 2197 2198 // Target defaults for enablement are used. 2199 if (Override == "default") 2200 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2201 } 2202 2203 // The attribute string may omit the size suffix ('f'/'d'). 2204 std::string VTName = getReciprocalOpName(IsSqrt, VT); 2205 std::string VTNameNoSize = VTName; 2206 VTNameNoSize.pop_back(); 2207 static const char DisabledPrefix = '!'; 2208 2209 for (StringRef RecipType : OverrideVector) { 2210 size_t RefPos; 2211 uint8_t RefSteps; 2212 if (parseRefinementStep(RecipType, RefPos, RefSteps)) 2213 RecipType = RecipType.substr(0, RefPos); 2214 2215 // Ignore the disablement token for string matching. 2216 bool IsDisabled = RecipType[0] == DisabledPrefix; 2217 if (IsDisabled) 2218 RecipType = RecipType.substr(1); 2219 2220 if (RecipType == VTName || RecipType == VTNameNoSize) 2221 return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled 2222 : TargetLoweringBase::ReciprocalEstimate::Enabled; 2223 } 2224 2225 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2226 } 2227 2228 /// For the input attribute string, return the customized refinement step count 2229 /// for this operation on the specified data type. If the step count does not 2230 /// exist, return the ReciprocalEstimate enum value for unspecified. 2231 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) { 2232 if (Override.empty()) 2233 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2234 2235 SmallVector<StringRef, 4> OverrideVector; 2236 Override.split(OverrideVector, ','); 2237 unsigned NumArgs = OverrideVector.size(); 2238 2239 // Check if "all", "default", or "none" was specified. 2240 if (NumArgs == 1) { 2241 // Look for an optional setting of the number of refinement steps needed 2242 // for this type of reciprocal operation. 2243 size_t RefPos; 2244 uint8_t RefSteps; 2245 if (!parseRefinementStep(Override, RefPos, RefSteps)) 2246 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2247 2248 // Split the string for further processing. 2249 Override = Override.substr(0, RefPos); 2250 assert(Override != "none" && 2251 "Disabled reciprocals, but specifed refinement steps?"); 2252 2253 // If this is a general override, return the specified number of steps. 2254 if (Override == "all" || Override == "default") 2255 return RefSteps; 2256 } 2257 2258 // The attribute string may omit the size suffix ('f'/'d'). 2259 std::string VTName = getReciprocalOpName(IsSqrt, VT); 2260 std::string VTNameNoSize = VTName; 2261 VTNameNoSize.pop_back(); 2262 2263 for (StringRef RecipType : OverrideVector) { 2264 size_t RefPos; 2265 uint8_t RefSteps; 2266 if (!parseRefinementStep(RecipType, RefPos, RefSteps)) 2267 continue; 2268 2269 RecipType = RecipType.substr(0, RefPos); 2270 if (RecipType == VTName || RecipType == VTNameNoSize) 2271 return RefSteps; 2272 } 2273 2274 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2275 } 2276 2277 int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT, 2278 MachineFunction &MF) const { 2279 return getOpEnabled(true, VT, getRecipEstimateForFunc(MF)); 2280 } 2281 2282 int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT, 2283 MachineFunction &MF) const { 2284 return getOpEnabled(false, VT, getRecipEstimateForFunc(MF)); 2285 } 2286 2287 int TargetLoweringBase::getSqrtRefinementSteps(EVT VT, 2288 MachineFunction &MF) const { 2289 return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF)); 2290 } 2291 2292 int TargetLoweringBase::getDivRefinementSteps(EVT VT, 2293 MachineFunction &MF) const { 2294 return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF)); 2295 } 2296 2297 bool TargetLoweringBase::isLoadBitCastBeneficial( 2298 EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG, 2299 const MachineMemOperand &MMO) const { 2300 // Single-element vectors are scalarized, so we should generally avoid having 2301 // any memory operations on such types, as they would get scalarized too. 2302 if (LoadVT.isFixedLengthVector() && BitcastVT.isFixedLengthVector() && 2303 BitcastVT.getVectorNumElements() == 1) 2304 return false; 2305 2306 // Don't do if we could do an indexed load on the original type, but not on 2307 // the new one. 2308 if (!LoadVT.isSimple() || !BitcastVT.isSimple()) 2309 return true; 2310 2311 MVT LoadMVT = LoadVT.getSimpleVT(); 2312 2313 // Don't bother doing this if it's just going to be promoted again later, as 2314 // doing so might interfere with other combines. 2315 if (getOperationAction(ISD::LOAD, LoadMVT) == Promote && 2316 getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT()) 2317 return false; 2318 2319 unsigned Fast = 0; 2320 return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), BitcastVT, 2321 MMO, &Fast) && 2322 Fast; 2323 } 2324 2325 void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const { 2326 MF.getRegInfo().freezeReservedRegs(); 2327 } 2328 2329 MachineMemOperand::Flags TargetLoweringBase::getLoadMemOperandFlags( 2330 const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC, 2331 const TargetLibraryInfo *LibInfo) const { 2332 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad; 2333 if (LI.isVolatile()) 2334 Flags |= MachineMemOperand::MOVolatile; 2335 2336 if (LI.hasMetadata(LLVMContext::MD_nontemporal)) 2337 Flags |= MachineMemOperand::MONonTemporal; 2338 2339 if (LI.hasMetadata(LLVMContext::MD_invariant_load)) 2340 Flags |= MachineMemOperand::MOInvariant; 2341 2342 if (isDereferenceableAndAlignedPointer(LI.getPointerOperand(), LI.getType(), 2343 LI.getAlign(), DL, &LI, AC, 2344 /*DT=*/nullptr, LibInfo)) 2345 Flags |= MachineMemOperand::MODereferenceable; 2346 2347 Flags |= getTargetMMOFlags(LI); 2348 return Flags; 2349 } 2350 2351 MachineMemOperand::Flags 2352 TargetLoweringBase::getStoreMemOperandFlags(const StoreInst &SI, 2353 const DataLayout &DL) const { 2354 MachineMemOperand::Flags Flags = MachineMemOperand::MOStore; 2355 2356 if (SI.isVolatile()) 2357 Flags |= MachineMemOperand::MOVolatile; 2358 2359 if (SI.hasMetadata(LLVMContext::MD_nontemporal)) 2360 Flags |= MachineMemOperand::MONonTemporal; 2361 2362 // FIXME: Not preserving dereferenceable 2363 Flags |= getTargetMMOFlags(SI); 2364 return Flags; 2365 } 2366 2367 MachineMemOperand::Flags 2368 TargetLoweringBase::getAtomicMemOperandFlags(const Instruction &AI, 2369 const DataLayout &DL) const { 2370 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 2371 2372 if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&AI)) { 2373 if (RMW->isVolatile()) 2374 Flags |= MachineMemOperand::MOVolatile; 2375 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(&AI)) { 2376 if (CmpX->isVolatile()) 2377 Flags |= MachineMemOperand::MOVolatile; 2378 } else 2379 llvm_unreachable("not an atomic instruction"); 2380 2381 // FIXME: Not preserving dereferenceable 2382 Flags |= getTargetMMOFlags(AI); 2383 return Flags; 2384 } 2385 2386 Instruction *TargetLoweringBase::emitLeadingFence(IRBuilderBase &Builder, 2387 Instruction *Inst, 2388 AtomicOrdering Ord) const { 2389 if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore()) 2390 return Builder.CreateFence(Ord); 2391 else 2392 return nullptr; 2393 } 2394 2395 Instruction *TargetLoweringBase::emitTrailingFence(IRBuilderBase &Builder, 2396 Instruction *Inst, 2397 AtomicOrdering Ord) const { 2398 if (isAcquireOrStronger(Ord)) 2399 return Builder.CreateFence(Ord); 2400 else 2401 return nullptr; 2402 } 2403 2404 //===----------------------------------------------------------------------===// 2405 // GlobalISel Hooks 2406 //===----------------------------------------------------------------------===// 2407 2408 bool TargetLoweringBase::shouldLocalize(const MachineInstr &MI, 2409 const TargetTransformInfo *TTI) const { 2410 auto &MF = *MI.getMF(); 2411 auto &MRI = MF.getRegInfo(); 2412 // Assuming a spill and reload of a value has a cost of 1 instruction each, 2413 // this helper function computes the maximum number of uses we should consider 2414 // for remat. E.g. on arm64 global addresses take 2 insts to materialize. We 2415 // break even in terms of code size when the original MI has 2 users vs 2416 // choosing to potentially spill. Any more than 2 users we we have a net code 2417 // size increase. This doesn't take into account register pressure though. 2418 auto maxUses = [](unsigned RematCost) { 2419 // A cost of 1 means remats are basically free. 2420 if (RematCost == 1) 2421 return std::numeric_limits<unsigned>::max(); 2422 if (RematCost == 2) 2423 return 2U; 2424 2425 // Remat is too expensive, only sink if there's one user. 2426 if (RematCost > 2) 2427 return 1U; 2428 llvm_unreachable("Unexpected remat cost"); 2429 }; 2430 2431 switch (MI.getOpcode()) { 2432 default: 2433 return false; 2434 // Constants-like instructions should be close to their users. 2435 // We don't want long live-ranges for them. 2436 case TargetOpcode::G_CONSTANT: 2437 case TargetOpcode::G_FCONSTANT: 2438 case TargetOpcode::G_FRAME_INDEX: 2439 case TargetOpcode::G_INTTOPTR: 2440 return true; 2441 case TargetOpcode::G_GLOBAL_VALUE: { 2442 unsigned RematCost = TTI->getGISelRematGlobalCost(); 2443 Register Reg = MI.getOperand(0).getReg(); 2444 unsigned MaxUses = maxUses(RematCost); 2445 if (MaxUses == UINT_MAX) 2446 return true; // Remats are "free" so always localize. 2447 return MRI.hasAtMostUserInstrs(Reg, MaxUses); 2448 } 2449 } 2450 } 2451