1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements the WebAssemblyTargetLowering class. 11 /// 12 //===----------------------------------------------------------------------===// 13 14 #include "WebAssemblyISelLowering.h" 15 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h" 16 #include "WebAssemblyMachineFunctionInfo.h" 17 #include "WebAssemblySubtarget.h" 18 #include "WebAssemblyTargetMachine.h" 19 #include "llvm/CodeGen/Analysis.h" 20 #include "llvm/CodeGen/CallingConvLower.h" 21 #include "llvm/CodeGen/MachineInstrBuilder.h" 22 #include "llvm/CodeGen/MachineJumpTableInfo.h" 23 #include "llvm/CodeGen/MachineModuleInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/SelectionDAG.h" 26 #include "llvm/CodeGen/WasmEHFuncInfo.h" 27 #include "llvm/IR/DiagnosticInfo.h" 28 #include "llvm/IR/DiagnosticPrinter.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/IR/Intrinsics.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/ErrorHandling.h" 33 #include "llvm/Support/raw_ostream.h" 34 #include "llvm/Target/TargetOptions.h" 35 using namespace llvm; 36 37 #define DEBUG_TYPE "wasm-lower" 38 39 WebAssemblyTargetLowering::WebAssemblyTargetLowering( 40 const TargetMachine &TM, const WebAssemblySubtarget &STI) 41 : TargetLowering(TM), Subtarget(&STI) { 42 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32; 43 44 // Booleans always contain 0 or 1. 45 setBooleanContents(ZeroOrOneBooleanContent); 46 // Except in SIMD vectors 47 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 48 // We don't know the microarchitecture here, so just reduce register pressure. 49 setSchedulingPreference(Sched::RegPressure); 50 // Tell ISel that we have a stack pointer. 51 setStackPointerRegisterToSaveRestore( 52 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32); 53 // Set up the register classes. 54 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass); 55 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass); 56 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass); 57 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass); 58 if (Subtarget->hasSIMD128()) { 59 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass); 60 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass); 61 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass); 62 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass); 63 } 64 if (Subtarget->hasUnimplementedSIMD128()) { 65 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass); 66 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass); 67 } 68 // Compute derived properties from the register classes. 69 computeRegisterProperties(Subtarget->getRegisterInfo()); 70 71 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom); 72 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom); 73 setOperationAction(ISD::JumpTable, MVTPtr, Custom); 74 setOperationAction(ISD::BlockAddress, MVTPtr, Custom); 75 setOperationAction(ISD::BRIND, MVT::Other, Custom); 76 77 // Take the default expansion for va_arg, va_copy, and va_end. There is no 78 // default action for va_start, so we do that custom. 79 setOperationAction(ISD::VASTART, MVT::Other, Custom); 80 setOperationAction(ISD::VAARG, MVT::Other, Expand); 81 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 82 setOperationAction(ISD::VAEND, MVT::Other, Expand); 83 84 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) { 85 // Don't expand the floating-point types to constant pools. 86 setOperationAction(ISD::ConstantFP, T, Legal); 87 // Expand floating-point comparisons. 88 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE, 89 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE}) 90 setCondCodeAction(CC, T, Expand); 91 // Expand floating-point library function operators. 92 for (auto Op : 93 {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA}) 94 setOperationAction(Op, T, Expand); 95 // Note supported floating-point library function operators that otherwise 96 // default to expand. 97 for (auto Op : 98 {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT}) 99 setOperationAction(Op, T, Legal); 100 // Support minimum and maximum, which otherwise default to expand. 101 setOperationAction(ISD::FMINIMUM, T, Legal); 102 setOperationAction(ISD::FMAXIMUM, T, Legal); 103 // WebAssembly currently has no builtin f16 support. 104 setOperationAction(ISD::FP16_TO_FP, T, Expand); 105 setOperationAction(ISD::FP_TO_FP16, T, Expand); 106 setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand); 107 setTruncStoreAction(T, MVT::f16, Expand); 108 } 109 110 // Expand unavailable integer operations. 111 for (auto Op : 112 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU, 113 ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS, 114 ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) { 115 for (auto T : {MVT::i32, MVT::i64}) 116 setOperationAction(Op, T, Expand); 117 if (Subtarget->hasSIMD128()) 118 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) 119 setOperationAction(Op, T, Expand); 120 if (Subtarget->hasUnimplementedSIMD128()) 121 setOperationAction(Op, MVT::v2i64, Expand); 122 } 123 124 // SIMD-specific configuration 125 if (Subtarget->hasSIMD128()) { 126 // Support saturating add for i8x16 and i16x8 127 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT}) 128 for (auto T : {MVT::v16i8, MVT::v8i16}) 129 setOperationAction(Op, T, Legal); 130 131 // Custom lower BUILD_VECTORs to minimize number of replace_lanes 132 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) 133 setOperationAction(ISD::BUILD_VECTOR, T, Custom); 134 if (Subtarget->hasUnimplementedSIMD128()) 135 for (auto T : {MVT::v2i64, MVT::v2f64}) 136 setOperationAction(ISD::BUILD_VECTOR, T, Custom); 137 138 // We have custom shuffle lowering to expose the shuffle mask 139 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) 140 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom); 141 if (Subtarget->hasUnimplementedSIMD128()) 142 for (auto T: {MVT::v2i64, MVT::v2f64}) 143 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom); 144 145 // Custom lowering since wasm shifts must have a scalar shift amount 146 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL}) { 147 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) 148 setOperationAction(Op, T, Custom); 149 if (Subtarget->hasUnimplementedSIMD128()) 150 setOperationAction(Op, MVT::v2i64, Custom); 151 } 152 153 // Custom lower lane accesses to expand out variable indices 154 for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT}) { 155 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) 156 setOperationAction(Op, T, Custom); 157 if (Subtarget->hasUnimplementedSIMD128()) 158 for (auto T : {MVT::v2i64, MVT::v2f64}) 159 setOperationAction(Op, T, Custom); 160 } 161 162 // There is no i64x2.mul instruction 163 setOperationAction(ISD::MUL, MVT::v2i64, Expand); 164 165 // There are no vector select instructions 166 for (auto Op : {ISD::VSELECT, ISD::SELECT_CC, ISD::SELECT}) { 167 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) 168 setOperationAction(Op, T, Expand); 169 if (Subtarget->hasUnimplementedSIMD128()) 170 for (auto T : {MVT::v2i64, MVT::v2f64}) 171 setOperationAction(Op, T, Expand); 172 } 173 174 // Expand integer operations supported for scalars but not SIMD 175 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP, ISD::SDIV, ISD::UDIV, 176 ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR}) { 177 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) 178 setOperationAction(Op, T, Expand); 179 if (Subtarget->hasUnimplementedSIMD128()) 180 setOperationAction(Op, MVT::v2i64, Expand); 181 } 182 183 // Expand float operations supported for scalars but not SIMD 184 for (auto Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, 185 ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10, 186 ISD::FEXP, ISD::FEXP2, ISD::FRINT}) { 187 setOperationAction(Op, MVT::v4f32, Expand); 188 if (Subtarget->hasUnimplementedSIMD128()) 189 setOperationAction(Op, MVT::v2f64, Expand); 190 } 191 192 // Expand additional SIMD ops that V8 hasn't implemented yet 193 if (!Subtarget->hasUnimplementedSIMD128()) { 194 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 195 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 196 } 197 } 198 199 // As a special case, these operators use the type to mean the type to 200 // sign-extend from. 201 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 202 if (!Subtarget->hasSignExt()) { 203 // Sign extends are legal only when extending a vector extract 204 auto Action = Subtarget->hasSIMD128() ? Custom : Expand; 205 for (auto T : {MVT::i8, MVT::i16, MVT::i32}) 206 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action); 207 } 208 for (auto T : MVT::integer_vector_valuetypes()) 209 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand); 210 211 // Dynamic stack allocation: use the default expansion. 212 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 213 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 214 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand); 215 216 setOperationAction(ISD::FrameIndex, MVT::i32, Custom); 217 setOperationAction(ISD::CopyToReg, MVT::Other, Custom); 218 219 // Expand these forms; we pattern-match the forms that we can handle in isel. 220 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) 221 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC}) 222 setOperationAction(Op, T, Expand); 223 224 // We have custom switch handling. 225 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 226 227 // WebAssembly doesn't have: 228 // - Floating-point extending loads. 229 // - Floating-point truncating stores. 230 // - i1 extending loads. 231 // - extending/truncating SIMD loads/stores 232 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 233 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 234 for (auto T : MVT::integer_valuetypes()) 235 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD}) 236 setLoadExtAction(Ext, T, MVT::i1, Promote); 237 if (Subtarget->hasSIMD128()) { 238 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, 239 MVT::v2f64}) { 240 for (auto MemT : MVT::vector_valuetypes()) { 241 if (MVT(T) != MemT) { 242 setTruncStoreAction(T, MemT, Expand); 243 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD}) 244 setLoadExtAction(Ext, T, MemT, Expand); 245 } 246 } 247 } 248 } 249 250 // Don't do anything clever with build_pairs 251 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 252 253 // Trap lowers to wasm unreachable 254 setOperationAction(ISD::TRAP, MVT::Other, Legal); 255 256 // Exception handling intrinsics 257 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 258 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 259 260 setMaxAtomicSizeInBitsSupported(64); 261 262 if (Subtarget->hasBulkMemory()) { 263 // Use memory.copy and friends over multiple loads and stores 264 MaxStoresPerMemcpy = 1; 265 MaxStoresPerMemcpyOptSize = 1; 266 MaxStoresPerMemmove = 1; 267 MaxStoresPerMemmoveOptSize = 1; 268 MaxStoresPerMemset = 1; 269 MaxStoresPerMemsetOptSize = 1; 270 } 271 272 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is 273 // consistent with the f64 and f128 names. 274 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2"); 275 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2"); 276 277 // Define the emscripten name for return address helper. 278 // TODO: when implementing other WASM backends, make this generic or only do 279 // this on emscripten depending on what they end up doing. 280 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address"); 281 282 // Always convert switches to br_tables unless there is only one case, which 283 // is equivalent to a simple branch. This reduces code size for wasm, and we 284 // defer possible jump table optimizations to the VM. 285 setMinimumJumpTableEntries(2); 286 } 287 288 TargetLowering::AtomicExpansionKind 289 WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 290 // We have wasm instructions for these 291 switch (AI->getOperation()) { 292 case AtomicRMWInst::Add: 293 case AtomicRMWInst::Sub: 294 case AtomicRMWInst::And: 295 case AtomicRMWInst::Or: 296 case AtomicRMWInst::Xor: 297 case AtomicRMWInst::Xchg: 298 return AtomicExpansionKind::None; 299 default: 300 break; 301 } 302 return AtomicExpansionKind::CmpXChg; 303 } 304 305 FastISel *WebAssemblyTargetLowering::createFastISel( 306 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const { 307 return WebAssembly::createFastISel(FuncInfo, LibInfo); 308 } 309 310 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/, 311 EVT VT) const { 312 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1); 313 if (BitWidth > 1 && BitWidth < 8) 314 BitWidth = 8; 315 316 if (BitWidth > 64) { 317 // The shift will be lowered to a libcall, and compiler-rt libcalls expect 318 // the count to be an i32. 319 BitWidth = 32; 320 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && 321 "32-bit shift counts ought to be enough for anyone"); 322 } 323 324 MVT Result = MVT::getIntegerVT(BitWidth); 325 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE && 326 "Unable to represent scalar shift amount type"); 327 return Result; 328 } 329 330 // Lower an fp-to-int conversion operator from the LLVM opcode, which has an 331 // undefined result on invalid/overflow, to the WebAssembly opcode, which 332 // traps on invalid/overflow. 333 static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL, 334 MachineBasicBlock *BB, 335 const TargetInstrInfo &TII, 336 bool IsUnsigned, bool Int64, 337 bool Float64, unsigned LoweredOpcode) { 338 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 339 340 unsigned OutReg = MI.getOperand(0).getReg(); 341 unsigned InReg = MI.getOperand(1).getReg(); 342 343 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32; 344 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32; 345 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32; 346 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32; 347 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32; 348 unsigned Eqz = WebAssembly::EQZ_I32; 349 unsigned And = WebAssembly::AND_I32; 350 int64_t Limit = Int64 ? INT64_MIN : INT32_MIN; 351 int64_t Substitute = IsUnsigned ? 0 : Limit; 352 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit; 353 auto &Context = BB->getParent()->getFunction().getContext(); 354 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context); 355 356 const BasicBlock *LLVMBB = BB->getBasicBlock(); 357 MachineFunction *F = BB->getParent(); 358 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB); 359 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB); 360 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB); 361 362 MachineFunction::iterator It = ++BB->getIterator(); 363 F->insert(It, FalseMBB); 364 F->insert(It, TrueMBB); 365 F->insert(It, DoneMBB); 366 367 // Transfer the remainder of BB and its successor edges to DoneMBB. 368 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end()); 369 DoneMBB->transferSuccessorsAndUpdatePHIs(BB); 370 371 BB->addSuccessor(TrueMBB); 372 BB->addSuccessor(FalseMBB); 373 TrueMBB->addSuccessor(DoneMBB); 374 FalseMBB->addSuccessor(DoneMBB); 375 376 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg; 377 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg)); 378 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg)); 379 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass); 380 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass); 381 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg)); 382 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg)); 383 384 MI.eraseFromParent(); 385 // For signed numbers, we can do a single comparison to determine whether 386 // fabs(x) is within range. 387 if (IsUnsigned) { 388 Tmp0 = InReg; 389 } else { 390 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg); 391 } 392 BuildMI(BB, DL, TII.get(FConst), Tmp1) 393 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal))); 394 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1); 395 396 // For unsigned numbers, we have to do a separate comparison with zero. 397 if (IsUnsigned) { 398 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg)); 399 unsigned SecondCmpReg = 400 MRI.createVirtualRegister(&WebAssembly::I32RegClass); 401 unsigned AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass); 402 BuildMI(BB, DL, TII.get(FConst), Tmp1) 403 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0))); 404 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1); 405 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg); 406 CmpReg = AndReg; 407 } 408 409 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg); 410 411 // Create the CFG diamond to select between doing the conversion or using 412 // the substitute value. 413 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg); 414 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg); 415 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB); 416 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute); 417 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg) 418 .addReg(FalseReg) 419 .addMBB(FalseMBB) 420 .addReg(TrueReg) 421 .addMBB(TrueMBB); 422 423 return DoneMBB; 424 } 425 426 MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter( 427 MachineInstr &MI, MachineBasicBlock *BB) const { 428 const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); 429 DebugLoc DL = MI.getDebugLoc(); 430 431 switch (MI.getOpcode()) { 432 default: 433 llvm_unreachable("Unexpected instr type to insert"); 434 case WebAssembly::FP_TO_SINT_I32_F32: 435 return LowerFPToInt(MI, DL, BB, TII, false, false, false, 436 WebAssembly::I32_TRUNC_S_F32); 437 case WebAssembly::FP_TO_UINT_I32_F32: 438 return LowerFPToInt(MI, DL, BB, TII, true, false, false, 439 WebAssembly::I32_TRUNC_U_F32); 440 case WebAssembly::FP_TO_SINT_I64_F32: 441 return LowerFPToInt(MI, DL, BB, TII, false, true, false, 442 WebAssembly::I64_TRUNC_S_F32); 443 case WebAssembly::FP_TO_UINT_I64_F32: 444 return LowerFPToInt(MI, DL, BB, TII, true, true, false, 445 WebAssembly::I64_TRUNC_U_F32); 446 case WebAssembly::FP_TO_SINT_I32_F64: 447 return LowerFPToInt(MI, DL, BB, TII, false, false, true, 448 WebAssembly::I32_TRUNC_S_F64); 449 case WebAssembly::FP_TO_UINT_I32_F64: 450 return LowerFPToInt(MI, DL, BB, TII, true, false, true, 451 WebAssembly::I32_TRUNC_U_F64); 452 case WebAssembly::FP_TO_SINT_I64_F64: 453 return LowerFPToInt(MI, DL, BB, TII, false, true, true, 454 WebAssembly::I64_TRUNC_S_F64); 455 case WebAssembly::FP_TO_UINT_I64_F64: 456 return LowerFPToInt(MI, DL, BB, TII, true, true, true, 457 WebAssembly::I64_TRUNC_U_F64); 458 llvm_unreachable("Unexpected instruction to emit with custom inserter"); 459 } 460 } 461 462 const char * 463 WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const { 464 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) { 465 case WebAssemblyISD::FIRST_NUMBER: 466 break; 467 #define HANDLE_NODETYPE(NODE) \ 468 case WebAssemblyISD::NODE: \ 469 return "WebAssemblyISD::" #NODE; 470 #include "WebAssemblyISD.def" 471 #undef HANDLE_NODETYPE 472 } 473 return nullptr; 474 } 475 476 std::pair<unsigned, const TargetRegisterClass *> 477 WebAssemblyTargetLowering::getRegForInlineAsmConstraint( 478 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 479 // First, see if this is a constraint that directly corresponds to a 480 // WebAssembly register class. 481 if (Constraint.size() == 1) { 482 switch (Constraint[0]) { 483 case 'r': 484 assert(VT != MVT::iPTR && "Pointer MVT not expected here"); 485 if (Subtarget->hasSIMD128() && VT.isVector()) { 486 if (VT.getSizeInBits() == 128) 487 return std::make_pair(0U, &WebAssembly::V128RegClass); 488 } 489 if (VT.isInteger() && !VT.isVector()) { 490 if (VT.getSizeInBits() <= 32) 491 return std::make_pair(0U, &WebAssembly::I32RegClass); 492 if (VT.getSizeInBits() <= 64) 493 return std::make_pair(0U, &WebAssembly::I64RegClass); 494 } 495 break; 496 default: 497 break; 498 } 499 } 500 501 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 502 } 503 504 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const { 505 // Assume ctz is a relatively cheap operation. 506 return true; 507 } 508 509 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const { 510 // Assume clz is a relatively cheap operation. 511 return true; 512 } 513 514 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL, 515 const AddrMode &AM, 516 Type *Ty, unsigned AS, 517 Instruction *I) const { 518 // WebAssembly offsets are added as unsigned without wrapping. The 519 // isLegalAddressingMode gives us no way to determine if wrapping could be 520 // happening, so we approximate this by accepting only non-negative offsets. 521 if (AM.BaseOffs < 0) 522 return false; 523 524 // WebAssembly has no scale register operands. 525 if (AM.Scale != 0) 526 return false; 527 528 // Everything else is legal. 529 return true; 530 } 531 532 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses( 533 EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/, 534 MachineMemOperand::Flags /*Flags*/, bool *Fast) const { 535 // WebAssembly supports unaligned accesses, though it should be declared 536 // with the p2align attribute on loads and stores which do so, and there 537 // may be a performance impact. We tell LLVM they're "fast" because 538 // for the kinds of things that LLVM uses this for (merging adjacent stores 539 // of constants, etc.), WebAssembly implementations will either want the 540 // unaligned access or they'll split anyway. 541 if (Fast) 542 *Fast = true; 543 return true; 544 } 545 546 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT, 547 AttributeList Attr) const { 548 // The current thinking is that wasm engines will perform this optimization, 549 // so we can save on code size. 550 return true; 551 } 552 553 EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL, 554 LLVMContext &C, 555 EVT VT) const { 556 if (VT.isVector()) 557 return VT.changeVectorElementTypeToInteger(); 558 559 return TargetLowering::getSetCCResultType(DL, C, VT); 560 } 561 562 bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 563 const CallInst &I, 564 MachineFunction &MF, 565 unsigned Intrinsic) const { 566 switch (Intrinsic) { 567 case Intrinsic::wasm_atomic_notify: 568 Info.opc = ISD::INTRINSIC_W_CHAIN; 569 Info.memVT = MVT::i32; 570 Info.ptrVal = I.getArgOperand(0); 571 Info.offset = 0; 572 Info.align = 4; 573 // atomic.notify instruction does not really load the memory specified with 574 // this argument, but MachineMemOperand should either be load or store, so 575 // we set this to a load. 576 // FIXME Volatile isn't really correct, but currently all LLVM atomic 577 // instructions are treated as volatiles in the backend, so we should be 578 // consistent. The same applies for wasm_atomic_wait intrinsics too. 579 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad; 580 return true; 581 case Intrinsic::wasm_atomic_wait_i32: 582 Info.opc = ISD::INTRINSIC_W_CHAIN; 583 Info.memVT = MVT::i32; 584 Info.ptrVal = I.getArgOperand(0); 585 Info.offset = 0; 586 Info.align = 4; 587 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad; 588 return true; 589 case Intrinsic::wasm_atomic_wait_i64: 590 Info.opc = ISD::INTRINSIC_W_CHAIN; 591 Info.memVT = MVT::i64; 592 Info.ptrVal = I.getArgOperand(0); 593 Info.offset = 0; 594 Info.align = 8; 595 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad; 596 return true; 597 default: 598 return false; 599 } 600 } 601 602 //===----------------------------------------------------------------------===// 603 // WebAssembly Lowering private implementation. 604 //===----------------------------------------------------------------------===// 605 606 //===----------------------------------------------------------------------===// 607 // Lowering Code 608 //===----------------------------------------------------------------------===// 609 610 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) { 611 MachineFunction &MF = DAG.getMachineFunction(); 612 DAG.getContext()->diagnose( 613 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc())); 614 } 615 616 // Test whether the given calling convention is supported. 617 static bool callingConvSupported(CallingConv::ID CallConv) { 618 // We currently support the language-independent target-independent 619 // conventions. We don't yet have a way to annotate calls with properties like 620 // "cold", and we don't have any call-clobbered registers, so these are mostly 621 // all handled the same. 622 return CallConv == CallingConv::C || CallConv == CallingConv::Fast || 623 CallConv == CallingConv::Cold || 624 CallConv == CallingConv::PreserveMost || 625 CallConv == CallingConv::PreserveAll || 626 CallConv == CallingConv::CXX_FAST_TLS; 627 } 628 629 SDValue 630 WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI, 631 SmallVectorImpl<SDValue> &InVals) const { 632 SelectionDAG &DAG = CLI.DAG; 633 SDLoc DL = CLI.DL; 634 SDValue Chain = CLI.Chain; 635 SDValue Callee = CLI.Callee; 636 MachineFunction &MF = DAG.getMachineFunction(); 637 auto Layout = MF.getDataLayout(); 638 639 CallingConv::ID CallConv = CLI.CallConv; 640 if (!callingConvSupported(CallConv)) 641 fail(DL, DAG, 642 "WebAssembly doesn't support language-specific or target-specific " 643 "calling conventions yet"); 644 if (CLI.IsPatchPoint) 645 fail(DL, DAG, "WebAssembly doesn't support patch point yet"); 646 647 // Fail if tail calls are required but not enabled 648 if (!Subtarget->hasTailCall()) { 649 if ((CallConv == CallingConv::Fast && CLI.IsTailCall && 650 MF.getTarget().Options.GuaranteedTailCallOpt) || 651 (CLI.CS && CLI.CS.isMustTailCall())) 652 fail(DL, DAG, "WebAssembly 'tail-call' feature not enabled"); 653 CLI.IsTailCall = false; 654 } 655 656 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 657 if (Ins.size() > 1) 658 fail(DL, DAG, "WebAssembly doesn't support more than 1 returned value yet"); 659 660 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 661 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 662 unsigned NumFixedArgs = 0; 663 for (unsigned I = 0; I < Outs.size(); ++I) { 664 const ISD::OutputArg &Out = Outs[I]; 665 SDValue &OutVal = OutVals[I]; 666 if (Out.Flags.isNest()) 667 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments"); 668 if (Out.Flags.isInAlloca()) 669 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments"); 670 if (Out.Flags.isInConsecutiveRegs()) 671 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments"); 672 if (Out.Flags.isInConsecutiveRegsLast()) 673 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments"); 674 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) { 675 auto &MFI = MF.getFrameInfo(); 676 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(), 677 Out.Flags.getByValAlign(), 678 /*isSS=*/false); 679 SDValue SizeNode = 680 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32); 681 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout)); 682 Chain = DAG.getMemcpy( 683 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getByValAlign(), 684 /*isVolatile*/ false, /*AlwaysInline=*/false, 685 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo()); 686 OutVal = FINode; 687 } 688 // Count the number of fixed args *after* legalization. 689 NumFixedArgs += Out.IsFixed; 690 } 691 692 bool IsVarArg = CLI.IsVarArg; 693 auto PtrVT = getPointerTy(Layout); 694 695 // Analyze operands of the call, assigning locations to each operand. 696 SmallVector<CCValAssign, 16> ArgLocs; 697 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 698 699 if (IsVarArg) { 700 // Outgoing non-fixed arguments are placed in a buffer. First 701 // compute their offsets and the total amount of buffer space needed. 702 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) { 703 const ISD::OutputArg &Out = Outs[I]; 704 SDValue &Arg = OutVals[I]; 705 EVT VT = Arg.getValueType(); 706 assert(VT != MVT::iPTR && "Legalized args should be concrete"); 707 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 708 unsigned Align = std::max(Out.Flags.getOrigAlign(), 709 Layout.getABITypeAlignment(Ty)); 710 unsigned Offset = CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), 711 Align); 712 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(), 713 Offset, VT.getSimpleVT(), 714 CCValAssign::Full)); 715 } 716 } 717 718 unsigned NumBytes = CCInfo.getAlignedCallFrameSize(); 719 720 SDValue FINode; 721 if (IsVarArg && NumBytes) { 722 // For non-fixed arguments, next emit stores to store the argument values 723 // to the stack buffer at the offsets computed above. 724 int FI = MF.getFrameInfo().CreateStackObject(NumBytes, 725 Layout.getStackAlignment(), 726 /*isSS=*/false); 727 unsigned ValNo = 0; 728 SmallVector<SDValue, 8> Chains; 729 for (SDValue Arg : 730 make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) { 731 assert(ArgLocs[ValNo].getValNo() == ValNo && 732 "ArgLocs should remain in order and only hold varargs args"); 733 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset(); 734 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout)); 735 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode, 736 DAG.getConstant(Offset, DL, PtrVT)); 737 Chains.push_back( 738 DAG.getStore(Chain, DL, Arg, Add, 739 MachinePointerInfo::getFixedStack(MF, FI, Offset), 0)); 740 } 741 if (!Chains.empty()) 742 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 743 } else if (IsVarArg) { 744 FINode = DAG.getIntPtrConstant(0, DL); 745 } 746 747 if (Callee->getOpcode() == ISD::GlobalAddress) { 748 // If the callee is a GlobalAddress node (quite common, every direct call 749 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress 750 // doesn't at MO_GOT which is not needed for direct calls. 751 GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee); 752 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, 753 getPointerTy(DAG.getDataLayout()), 754 GA->getOffset()); 755 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL, 756 getPointerTy(DAG.getDataLayout()), Callee); 757 } 758 759 // Compute the operands for the CALLn node. 760 SmallVector<SDValue, 16> Ops; 761 Ops.push_back(Chain); 762 Ops.push_back(Callee); 763 764 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs 765 // isn't reliable. 766 Ops.append(OutVals.begin(), 767 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end()); 768 // Add a pointer to the vararg buffer. 769 if (IsVarArg) 770 Ops.push_back(FINode); 771 772 SmallVector<EVT, 8> InTys; 773 for (const auto &In : Ins) { 774 assert(!In.Flags.isByVal() && "byval is not valid for return values"); 775 assert(!In.Flags.isNest() && "nest is not valid for return values"); 776 if (In.Flags.isInAlloca()) 777 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values"); 778 if (In.Flags.isInConsecutiveRegs()) 779 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values"); 780 if (In.Flags.isInConsecutiveRegsLast()) 781 fail(DL, DAG, 782 "WebAssembly hasn't implemented cons regs last return values"); 783 // Ignore In.getOrigAlign() because all our arguments are passed in 784 // registers. 785 InTys.push_back(In.VT); 786 } 787 788 if (CLI.IsTailCall) { 789 // ret_calls do not return values to the current frame 790 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 791 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops); 792 } 793 794 InTys.push_back(MVT::Other); 795 SDVTList InTyList = DAG.getVTList(InTys); 796 SDValue Res = 797 DAG.getNode(Ins.empty() ? WebAssemblyISD::CALL0 : WebAssemblyISD::CALL1, 798 DL, InTyList, Ops); 799 if (Ins.empty()) { 800 Chain = Res; 801 } else { 802 InVals.push_back(Res); 803 Chain = Res.getValue(1); 804 } 805 806 return Chain; 807 } 808 809 bool WebAssemblyTargetLowering::CanLowerReturn( 810 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/, 811 const SmallVectorImpl<ISD::OutputArg> &Outs, 812 LLVMContext & /*Context*/) const { 813 // WebAssembly can't currently handle returning tuples. 814 return Outs.size() <= 1; 815 } 816 817 SDValue WebAssemblyTargetLowering::LowerReturn( 818 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/, 819 const SmallVectorImpl<ISD::OutputArg> &Outs, 820 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, 821 SelectionDAG &DAG) const { 822 assert(Outs.size() <= 1 && "WebAssembly can only return up to one value"); 823 if (!callingConvSupported(CallConv)) 824 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions"); 825 826 SmallVector<SDValue, 4> RetOps(1, Chain); 827 RetOps.append(OutVals.begin(), OutVals.end()); 828 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps); 829 830 // Record the number and types of the return values. 831 for (const ISD::OutputArg &Out : Outs) { 832 assert(!Out.Flags.isByVal() && "byval is not valid for return values"); 833 assert(!Out.Flags.isNest() && "nest is not valid for return values"); 834 assert(Out.IsFixed && "non-fixed return value is not valid"); 835 if (Out.Flags.isInAlloca()) 836 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results"); 837 if (Out.Flags.isInConsecutiveRegs()) 838 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results"); 839 if (Out.Flags.isInConsecutiveRegsLast()) 840 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results"); 841 } 842 843 return Chain; 844 } 845 846 SDValue WebAssemblyTargetLowering::LowerFormalArguments( 847 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 848 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 849 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 850 if (!callingConvSupported(CallConv)) 851 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions"); 852 853 MachineFunction &MF = DAG.getMachineFunction(); 854 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>(); 855 856 // Set up the incoming ARGUMENTS value, which serves to represent the liveness 857 // of the incoming values before they're represented by virtual registers. 858 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS); 859 860 for (const ISD::InputArg &In : Ins) { 861 if (In.Flags.isInAlloca()) 862 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments"); 863 if (In.Flags.isNest()) 864 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments"); 865 if (In.Flags.isInConsecutiveRegs()) 866 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments"); 867 if (In.Flags.isInConsecutiveRegsLast()) 868 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments"); 869 // Ignore In.getOrigAlign() because all our arguments are passed in 870 // registers. 871 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT, 872 DAG.getTargetConstant(InVals.size(), 873 DL, MVT::i32)) 874 : DAG.getUNDEF(In.VT)); 875 876 // Record the number and types of arguments. 877 MFI->addParam(In.VT); 878 } 879 880 // Varargs are copied into a buffer allocated by the caller, and a pointer to 881 // the buffer is passed as an argument. 882 if (IsVarArg) { 883 MVT PtrVT = getPointerTy(MF.getDataLayout()); 884 unsigned VarargVreg = 885 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT)); 886 MFI->setVarargBufferVreg(VarargVreg); 887 Chain = DAG.getCopyToReg( 888 Chain, DL, VarargVreg, 889 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT, 890 DAG.getTargetConstant(Ins.size(), DL, MVT::i32))); 891 MFI->addParam(PtrVT); 892 } 893 894 // Record the number and types of arguments and results. 895 SmallVector<MVT, 4> Params; 896 SmallVector<MVT, 4> Results; 897 computeSignatureVTs(MF.getFunction().getFunctionType(), MF.getFunction(), 898 DAG.getTarget(), Params, Results); 899 for (MVT VT : Results) 900 MFI->addResult(VT); 901 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify 902 // the param logic here with ComputeSignatureVTs 903 assert(MFI->getParams().size() == Params.size() && 904 std::equal(MFI->getParams().begin(), MFI->getParams().end(), 905 Params.begin())); 906 907 return Chain; 908 } 909 910 void WebAssemblyTargetLowering::ReplaceNodeResults( 911 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { 912 switch (N->getOpcode()) { 913 case ISD::SIGN_EXTEND_INREG: 914 // Do not add any results, signifying that N should not be custom lowered 915 // after all. This happens because simd128 turns on custom lowering for 916 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an 917 // illegal type. 918 break; 919 default: 920 llvm_unreachable( 921 "ReplaceNodeResults not implemented for this op for WebAssembly!"); 922 } 923 } 924 925 //===----------------------------------------------------------------------===// 926 // Custom lowering hooks. 927 //===----------------------------------------------------------------------===// 928 929 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op, 930 SelectionDAG &DAG) const { 931 SDLoc DL(Op); 932 switch (Op.getOpcode()) { 933 default: 934 llvm_unreachable("unimplemented operation lowering"); 935 return SDValue(); 936 case ISD::FrameIndex: 937 return LowerFrameIndex(Op, DAG); 938 case ISD::GlobalAddress: 939 return LowerGlobalAddress(Op, DAG); 940 case ISD::ExternalSymbol: 941 return LowerExternalSymbol(Op, DAG); 942 case ISD::JumpTable: 943 return LowerJumpTable(Op, DAG); 944 case ISD::BR_JT: 945 return LowerBR_JT(Op, DAG); 946 case ISD::VASTART: 947 return LowerVASTART(Op, DAG); 948 case ISD::BlockAddress: 949 case ISD::BRIND: 950 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos"); 951 return SDValue(); 952 case ISD::RETURNADDR: 953 return LowerRETURNADDR(Op, DAG); 954 case ISD::FRAMEADDR: 955 return LowerFRAMEADDR(Op, DAG); 956 case ISD::CopyToReg: 957 return LowerCopyToReg(Op, DAG); 958 case ISD::EXTRACT_VECTOR_ELT: 959 case ISD::INSERT_VECTOR_ELT: 960 return LowerAccessVectorElement(Op, DAG); 961 case ISD::INTRINSIC_VOID: 962 case ISD::INTRINSIC_WO_CHAIN: 963 case ISD::INTRINSIC_W_CHAIN: 964 return LowerIntrinsic(Op, DAG); 965 case ISD::SIGN_EXTEND_INREG: 966 return LowerSIGN_EXTEND_INREG(Op, DAG); 967 case ISD::BUILD_VECTOR: 968 return LowerBUILD_VECTOR(Op, DAG); 969 case ISD::VECTOR_SHUFFLE: 970 return LowerVECTOR_SHUFFLE(Op, DAG); 971 case ISD::SHL: 972 case ISD::SRA: 973 case ISD::SRL: 974 return LowerShift(Op, DAG); 975 } 976 } 977 978 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op, 979 SelectionDAG &DAG) const { 980 SDValue Src = Op.getOperand(2); 981 if (isa<FrameIndexSDNode>(Src.getNode())) { 982 // CopyToReg nodes don't support FrameIndex operands. Other targets select 983 // the FI to some LEA-like instruction, but since we don't have that, we 984 // need to insert some kind of instruction that can take an FI operand and 985 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy 986 // local.copy between Op and its FI operand. 987 SDValue Chain = Op.getOperand(0); 988 SDLoc DL(Op); 989 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg(); 990 EVT VT = Src.getValueType(); 991 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32 992 : WebAssembly::COPY_I64, 993 DL, VT, Src), 994 0); 995 return Op.getNode()->getNumValues() == 1 996 ? DAG.getCopyToReg(Chain, DL, Reg, Copy) 997 : DAG.getCopyToReg(Chain, DL, Reg, Copy, 998 Op.getNumOperands() == 4 ? Op.getOperand(3) 999 : SDValue()); 1000 } 1001 return SDValue(); 1002 } 1003 1004 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op, 1005 SelectionDAG &DAG) const { 1006 int FI = cast<FrameIndexSDNode>(Op)->getIndex(); 1007 return DAG.getTargetFrameIndex(FI, Op.getValueType()); 1008 } 1009 1010 SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op, 1011 SelectionDAG &DAG) const { 1012 SDLoc DL(Op); 1013 1014 if (!Subtarget->getTargetTriple().isOSEmscripten()) { 1015 fail(DL, DAG, 1016 "Non-Emscripten WebAssembly hasn't implemented " 1017 "__builtin_return_address"); 1018 return SDValue(); 1019 } 1020 1021 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 1022 return SDValue(); 1023 1024 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1025 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(), 1026 {DAG.getConstant(Depth, DL, MVT::i32)}, false, DL) 1027 .first; 1028 } 1029 1030 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op, 1031 SelectionDAG &DAG) const { 1032 // Non-zero depths are not supported by WebAssembly currently. Use the 1033 // legalizer's default expansion, which is to return 0 (what this function is 1034 // documented to do). 1035 if (Op.getConstantOperandVal(0) > 0) 1036 return SDValue(); 1037 1038 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true); 1039 EVT VT = Op.getValueType(); 1040 unsigned FP = 1041 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction()); 1042 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT); 1043 } 1044 1045 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op, 1046 SelectionDAG &DAG) const { 1047 SDLoc DL(Op); 1048 const auto *GA = cast<GlobalAddressSDNode>(Op); 1049 EVT VT = Op.getValueType(); 1050 assert(GA->getTargetFlags() == 0 && 1051 "Unexpected target flags on generic GlobalAddressSDNode"); 1052 if (GA->getAddressSpace() != 0) 1053 fail(DL, DAG, "WebAssembly only expects the 0 address space"); 1054 1055 unsigned OperandFlags = 0; 1056 if (isPositionIndependent()) { 1057 const GlobalValue *GV = GA->getGlobal(); 1058 if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) { 1059 MachineFunction &MF = DAG.getMachineFunction(); 1060 MVT PtrVT = getPointerTy(MF.getDataLayout()); 1061 const char *BaseName; 1062 if (GV->getValueType()->isFunctionTy()) { 1063 BaseName = MF.createExternalSymbolName("__table_base"); 1064 OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL; 1065 } 1066 else { 1067 BaseName = MF.createExternalSymbolName("__memory_base"); 1068 OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL; 1069 } 1070 SDValue BaseAddr = 1071 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, 1072 DAG.getTargetExternalSymbol(BaseName, PtrVT)); 1073 1074 SDValue SymAddr = DAG.getNode( 1075 WebAssemblyISD::WrapperPIC, DL, VT, 1076 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(), 1077 OperandFlags)); 1078 1079 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr); 1080 } else { 1081 OperandFlags = WebAssemblyII::MO_GOT; 1082 } 1083 } 1084 1085 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT, 1086 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, 1087 GA->getOffset(), OperandFlags)); 1088 } 1089 1090 SDValue 1091 WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op, 1092 SelectionDAG &DAG) const { 1093 SDLoc DL(Op); 1094 const auto *ES = cast<ExternalSymbolSDNode>(Op); 1095 EVT VT = Op.getValueType(); 1096 assert(ES->getTargetFlags() == 0 && 1097 "Unexpected target flags on generic ExternalSymbolSDNode"); 1098 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT, 1099 DAG.getTargetExternalSymbol(ES->getSymbol(), VT)); 1100 } 1101 1102 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op, 1103 SelectionDAG &DAG) const { 1104 // There's no need for a Wrapper node because we always incorporate a jump 1105 // table operand into a BR_TABLE instruction, rather than ever 1106 // materializing it in a register. 1107 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 1108 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(), 1109 JT->getTargetFlags()); 1110 } 1111 1112 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op, 1113 SelectionDAG &DAG) const { 1114 SDLoc DL(Op); 1115 SDValue Chain = Op.getOperand(0); 1116 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1)); 1117 SDValue Index = Op.getOperand(2); 1118 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags"); 1119 1120 SmallVector<SDValue, 8> Ops; 1121 Ops.push_back(Chain); 1122 Ops.push_back(Index); 1123 1124 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo(); 1125 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs; 1126 1127 // Add an operand for each case. 1128 for (auto MBB : MBBs) 1129 Ops.push_back(DAG.getBasicBlock(MBB)); 1130 1131 // TODO: For now, we just pick something arbitrary for a default case for now. 1132 // We really want to sniff out the guard and put in the real default case (and 1133 // delete the guard). 1134 Ops.push_back(DAG.getBasicBlock(MBBs[0])); 1135 1136 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops); 1137 } 1138 1139 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op, 1140 SelectionDAG &DAG) const { 1141 SDLoc DL(Op); 1142 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout()); 1143 1144 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>(); 1145 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1146 1147 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL, 1148 MFI->getVarargBufferVreg(), PtrVT); 1149 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1), 1150 MachinePointerInfo(SV), 0); 1151 } 1152 1153 SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op, 1154 SelectionDAG &DAG) const { 1155 MachineFunction &MF = DAG.getMachineFunction(); 1156 unsigned IntNo; 1157 switch (Op.getOpcode()) { 1158 case ISD::INTRINSIC_VOID: 1159 case ISD::INTRINSIC_W_CHAIN: 1160 IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1161 break; 1162 case ISD::INTRINSIC_WO_CHAIN: 1163 IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1164 break; 1165 default: 1166 llvm_unreachable("Invalid intrinsic"); 1167 } 1168 SDLoc DL(Op); 1169 1170 switch (IntNo) { 1171 default: 1172 return SDValue(); // Don't custom lower most intrinsics. 1173 1174 case Intrinsic::wasm_lsda: { 1175 EVT VT = Op.getValueType(); 1176 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1177 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); 1178 auto &Context = MF.getMMI().getContext(); 1179 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") + 1180 Twine(MF.getFunctionNumber())); 1181 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT, 1182 DAG.getMCSymbol(S, PtrVT)); 1183 } 1184 1185 case Intrinsic::wasm_throw: { 1186 // We only support C++ exceptions for now 1187 int Tag = cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue(); 1188 if (Tag != CPP_EXCEPTION) 1189 llvm_unreachable("Invalid tag!"); 1190 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1191 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); 1192 const char *SymName = MF.createExternalSymbolName("__cpp_exception"); 1193 SDValue SymNode = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, 1194 DAG.getTargetExternalSymbol(SymName, PtrVT)); 1195 return DAG.getNode(WebAssemblyISD::THROW, DL, 1196 MVT::Other, // outchain type 1197 { 1198 Op.getOperand(0), // inchain 1199 SymNode, // exception symbol 1200 Op.getOperand(3) // thrown value 1201 }); 1202 } 1203 } 1204 } 1205 1206 SDValue 1207 WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 1208 SelectionDAG &DAG) const { 1209 SDLoc DL(Op); 1210 // If sign extension operations are disabled, allow sext_inreg only if operand 1211 // is a vector extract. SIMD does not depend on sign extension operations, but 1212 // allowing sext_inreg in this context lets us have simple patterns to select 1213 // extract_lane_s instructions. Expanding sext_inreg everywhere would be 1214 // simpler in this file, but would necessitate large and brittle patterns to 1215 // undo the expansion and select extract_lane_s instructions. 1216 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128()); 1217 if (Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 1218 const SDValue &Extract = Op.getOperand(0); 1219 MVT VecT = Extract.getOperand(0).getSimpleValueType(); 1220 MVT ExtractedLaneT = static_cast<VTSDNode *>(Op.getOperand(1).getNode()) 1221 ->getVT() 1222 .getSimpleVT(); 1223 MVT ExtractedVecT = 1224 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits()); 1225 if (ExtractedVecT == VecT) 1226 return Op; 1227 // Bitcast vector to appropriate type to ensure ISel pattern coverage 1228 const SDValue &Index = Extract.getOperand(1); 1229 unsigned IndexVal = 1230 static_cast<ConstantSDNode *>(Index.getNode())->getZExtValue(); 1231 unsigned Scale = 1232 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements(); 1233 assert(Scale > 1); 1234 SDValue NewIndex = 1235 DAG.getConstant(IndexVal * Scale, DL, Index.getValueType()); 1236 SDValue NewExtract = DAG.getNode( 1237 ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(), 1238 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex); 1239 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), 1240 NewExtract, Op.getOperand(1)); 1241 } 1242 // Otherwise expand 1243 return SDValue(); 1244 } 1245 1246 SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op, 1247 SelectionDAG &DAG) const { 1248 SDLoc DL(Op); 1249 const EVT VecT = Op.getValueType(); 1250 const EVT LaneT = Op.getOperand(0).getValueType(); 1251 const size_t Lanes = Op.getNumOperands(); 1252 auto IsConstant = [](const SDValue &V) { 1253 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP; 1254 }; 1255 1256 // Find the most common operand, which is approximately the best to splat 1257 using Entry = std::pair<SDValue, size_t>; 1258 SmallVector<Entry, 16> ValueCounts; 1259 size_t NumConst = 0, NumDynamic = 0; 1260 for (const SDValue &Lane : Op->op_values()) { 1261 if (Lane.isUndef()) { 1262 continue; 1263 } else if (IsConstant(Lane)) { 1264 NumConst++; 1265 } else { 1266 NumDynamic++; 1267 } 1268 auto CountIt = std::find_if(ValueCounts.begin(), ValueCounts.end(), 1269 [&Lane](Entry A) { return A.first == Lane; }); 1270 if (CountIt == ValueCounts.end()) { 1271 ValueCounts.emplace_back(Lane, 1); 1272 } else { 1273 CountIt->second++; 1274 } 1275 } 1276 auto CommonIt = 1277 std::max_element(ValueCounts.begin(), ValueCounts.end(), 1278 [](Entry A, Entry B) { return A.second < B.second; }); 1279 assert(CommonIt != ValueCounts.end() && "Unexpected all-undef build_vector"); 1280 SDValue SplatValue = CommonIt->first; 1281 size_t NumCommon = CommonIt->second; 1282 1283 // If v128.const is available, consider using it instead of a splat 1284 if (Subtarget->hasUnimplementedSIMD128()) { 1285 // {i32,i64,f32,f64}.const opcode, and value 1286 const size_t ConstBytes = 1 + std::max(size_t(4), 16 / Lanes); 1287 // SIMD prefix and opcode 1288 const size_t SplatBytes = 2; 1289 const size_t SplatConstBytes = SplatBytes + ConstBytes; 1290 // SIMD prefix, opcode, and lane index 1291 const size_t ReplaceBytes = 3; 1292 const size_t ReplaceConstBytes = ReplaceBytes + ConstBytes; 1293 // SIMD prefix, v128.const opcode, and 128-bit value 1294 const size_t VecConstBytes = 18; 1295 // Initial v128.const and a replace_lane for each non-const operand 1296 const size_t ConstInitBytes = VecConstBytes + NumDynamic * ReplaceBytes; 1297 // Initial splat and all necessary replace_lanes 1298 const size_t SplatInitBytes = 1299 IsConstant(SplatValue) 1300 // Initial constant splat 1301 ? (SplatConstBytes + 1302 // Constant replace_lanes 1303 (NumConst - NumCommon) * ReplaceConstBytes + 1304 // Dynamic replace_lanes 1305 (NumDynamic * ReplaceBytes)) 1306 // Initial dynamic splat 1307 : (SplatBytes + 1308 // Constant replace_lanes 1309 (NumConst * ReplaceConstBytes) + 1310 // Dynamic replace_lanes 1311 (NumDynamic - NumCommon) * ReplaceBytes); 1312 if (ConstInitBytes < SplatInitBytes) { 1313 // Create build_vector that will lower to initial v128.const 1314 SmallVector<SDValue, 16> ConstLanes; 1315 for (const SDValue &Lane : Op->op_values()) { 1316 if (IsConstant(Lane)) { 1317 ConstLanes.push_back(Lane); 1318 } else if (LaneT.isFloatingPoint()) { 1319 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT)); 1320 } else { 1321 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT)); 1322 } 1323 } 1324 SDValue Result = DAG.getBuildVector(VecT, DL, ConstLanes); 1325 // Add replace_lane instructions for non-const lanes 1326 for (size_t I = 0; I < Lanes; ++I) { 1327 const SDValue &Lane = Op->getOperand(I); 1328 if (!Lane.isUndef() && !IsConstant(Lane)) 1329 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane, 1330 DAG.getConstant(I, DL, MVT::i32)); 1331 } 1332 return Result; 1333 } 1334 } 1335 // Use a splat for the initial vector 1336 SDValue Result = DAG.getSplatBuildVector(VecT, DL, SplatValue); 1337 // Add replace_lane instructions for other values 1338 for (size_t I = 0; I < Lanes; ++I) { 1339 const SDValue &Lane = Op->getOperand(I); 1340 if (Lane != SplatValue) 1341 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane, 1342 DAG.getConstant(I, DL, MVT::i32)); 1343 } 1344 return Result; 1345 } 1346 1347 SDValue 1348 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 1349 SelectionDAG &DAG) const { 1350 SDLoc DL(Op); 1351 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask(); 1352 MVT VecType = Op.getOperand(0).getSimpleValueType(); 1353 assert(VecType.is128BitVector() && "Unexpected shuffle vector type"); 1354 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8; 1355 1356 // Space for two vector args and sixteen mask indices 1357 SDValue Ops[18]; 1358 size_t OpIdx = 0; 1359 Ops[OpIdx++] = Op.getOperand(0); 1360 Ops[OpIdx++] = Op.getOperand(1); 1361 1362 // Expand mask indices to byte indices and materialize them as operands 1363 for (int M : Mask) { 1364 for (size_t J = 0; J < LaneBytes; ++J) { 1365 // Lower undefs (represented by -1 in mask) to zero 1366 uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J; 1367 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32); 1368 } 1369 } 1370 1371 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops); 1372 } 1373 1374 SDValue 1375 WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op, 1376 SelectionDAG &DAG) const { 1377 // Allow constant lane indices, expand variable lane indices 1378 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode(); 1379 if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef()) 1380 return Op; 1381 else 1382 // Perform default expansion 1383 return SDValue(); 1384 } 1385 1386 static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) { 1387 EVT LaneT = Op.getSimpleValueType().getVectorElementType(); 1388 // 32-bit and 64-bit unrolled shifts will have proper semantics 1389 if (LaneT.bitsGE(MVT::i32)) 1390 return DAG.UnrollVectorOp(Op.getNode()); 1391 // Otherwise mask the shift value to get proper semantics from 32-bit shift 1392 SDLoc DL(Op); 1393 SDValue ShiftVal = Op.getOperand(1); 1394 uint64_t MaskVal = LaneT.getSizeInBits() - 1; 1395 SDValue MaskedShiftVal = DAG.getNode( 1396 ISD::AND, // mask opcode 1397 DL, ShiftVal.getValueType(), // masked value type 1398 ShiftVal, // original shift value operand 1399 DAG.getConstant(MaskVal, DL, ShiftVal.getValueType()) // mask operand 1400 ); 1401 1402 return DAG.UnrollVectorOp( 1403 DAG.getNode(Op.getOpcode(), // original shift opcode 1404 DL, Op.getValueType(), // original return type 1405 Op.getOperand(0), // original vector operand, 1406 MaskedShiftVal // new masked shift value operand 1407 ) 1408 .getNode()); 1409 } 1410 1411 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op, 1412 SelectionDAG &DAG) const { 1413 SDLoc DL(Op); 1414 1415 // Only manually lower vector shifts 1416 assert(Op.getSimpleValueType().isVector()); 1417 1418 // Expand all vector shifts until V8 fixes its implementation 1419 // TODO: remove this once V8 is fixed 1420 if (!Subtarget->hasUnimplementedSIMD128()) 1421 return unrollVectorShift(Op, DAG); 1422 1423 // Unroll non-splat vector shifts 1424 BuildVectorSDNode *ShiftVec; 1425 SDValue SplatVal; 1426 if (!(ShiftVec = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode())) || 1427 !(SplatVal = ShiftVec->getSplatValue())) 1428 return unrollVectorShift(Op, DAG); 1429 1430 // All splats except i64x2 const splats are handled by patterns 1431 auto *SplatConst = dyn_cast<ConstantSDNode>(SplatVal); 1432 if (!SplatConst || Op.getSimpleValueType() != MVT::v2i64) 1433 return Op; 1434 1435 // i64x2 const splats are custom lowered to avoid unnecessary wraps 1436 unsigned Opcode; 1437 switch (Op.getOpcode()) { 1438 case ISD::SHL: 1439 Opcode = WebAssemblyISD::VEC_SHL; 1440 break; 1441 case ISD::SRA: 1442 Opcode = WebAssemblyISD::VEC_SHR_S; 1443 break; 1444 case ISD::SRL: 1445 Opcode = WebAssemblyISD::VEC_SHR_U; 1446 break; 1447 default: 1448 llvm_unreachable("unexpected opcode"); 1449 } 1450 APInt Shift = SplatConst->getAPIntValue().zextOrTrunc(32); 1451 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), 1452 DAG.getConstant(Shift, DL, MVT::i32)); 1453 } 1454 1455 //===----------------------------------------------------------------------===// 1456 // WebAssembly Optimization Hooks 1457 //===----------------------------------------------------------------------===// 1458