1//===-- ARMInstrVFP.td - VFP support for ARM ---------------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the ARM VFP instruction set. 10// 11//===----------------------------------------------------------------------===// 12 13def SDT_CMPFP0 : SDTypeProfile<0, 1, [SDTCisFP<0>]>; 14def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>, 15 SDTCisSameAs<1, 2>]>; 16def SDT_VMOVRRD : SDTypeProfile<2, 1, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>, 17 SDTCisVT<2, f64>]>; 18 19def SDT_VMOVSR : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisVT<1, i32>]>; 20 21def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInGlue, SDNPOutGlue]>; 22def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutGlue]>; 23def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutGlue]>; 24def arm_cmpfpe : SDNode<"ARMISD::CMPFPE", SDT_ARMCmp, [SDNPOutGlue]>; 25def arm_cmpfpe0: SDNode<"ARMISD::CMPFPEw0",SDT_CMPFP0, [SDNPOutGlue]>; 26def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>; 27def arm_fmrrd : SDNode<"ARMISD::VMOVRRD", SDT_VMOVRRD>; 28def arm_vmovsr : SDNode<"ARMISD::VMOVSR", SDT_VMOVSR>; 29 30def SDT_VMOVhr : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, i32>] >; 31def SDT_VMOVrh : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisFP<1>] >; 32def arm_vmovhr : SDNode<"ARMISD::VMOVhr", SDT_VMOVhr>; 33def arm_vmovrh : SDNode<"ARMISD::VMOVrh", SDT_VMOVrh>; 34 35//===----------------------------------------------------------------------===// 36// Operand Definitions. 37// 38 39// 8-bit floating-point immediate encodings. 40def FPImmOperand : AsmOperandClass { 41 let Name = "FPImm"; 42 let ParserMethod = "parseFPImm"; 43} 44 45def vfp_f16imm : Operand<f16>, 46 PatLeaf<(f16 fpimm), [{ 47 return ARM_AM::getFP16Imm(N->getValueAPF()) != -1; 48 }], SDNodeXForm<fpimm, [{ 49 APFloat InVal = N->getValueAPF(); 50 uint32_t enc = ARM_AM::getFP16Imm(InVal); 51 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32); 52 }]>> { 53 let PrintMethod = "printFPImmOperand"; 54 let ParserMatchClass = FPImmOperand; 55} 56 57def vfp_f32f16imm_xform : SDNodeXForm<fpimm, [{ 58 APFloat InVal = N->getValueAPF(); 59 uint32_t enc = ARM_AM::getFP32FP16Imm(InVal); 60 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32); 61 }]>; 62 63def vfp_f32f16imm : PatLeaf<(f32 fpimm), [{ 64 return ARM_AM::getFP32FP16Imm(N->getValueAPF()) != -1; 65 }], vfp_f32f16imm_xform>; 66 67def vfp_f32imm_xform : SDNodeXForm<fpimm, [{ 68 APFloat InVal = N->getValueAPF(); 69 uint32_t enc = ARM_AM::getFP32Imm(InVal); 70 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32); 71 }]>; 72 73def gi_vfp_f32imm : GICustomOperandRenderer<"renderVFPF32Imm">, 74 GISDNodeXFormEquiv<vfp_f32imm_xform>; 75 76def vfp_f32imm : Operand<f32>, 77 PatLeaf<(f32 fpimm), [{ 78 return ARM_AM::getFP32Imm(N->getValueAPF()) != -1; 79 }], vfp_f32imm_xform> { 80 let PrintMethod = "printFPImmOperand"; 81 let ParserMatchClass = FPImmOperand; 82 let GISelPredicateCode = [{ 83 const auto &MO = MI.getOperand(1); 84 if (!MO.isFPImm()) 85 return false; 86 return ARM_AM::getFP32Imm(MO.getFPImm()->getValueAPF()) != -1; 87 }]; 88} 89 90def vfp_f64imm_xform : SDNodeXForm<fpimm, [{ 91 APFloat InVal = N->getValueAPF(); 92 uint32_t enc = ARM_AM::getFP64Imm(InVal); 93 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32); 94 }]>; 95 96def gi_vfp_f64imm : GICustomOperandRenderer<"renderVFPF64Imm">, 97 GISDNodeXFormEquiv<vfp_f64imm_xform>; 98 99def vfp_f64imm : Operand<f64>, 100 PatLeaf<(f64 fpimm), [{ 101 return ARM_AM::getFP64Imm(N->getValueAPF()) != -1; 102 }], vfp_f64imm_xform> { 103 let PrintMethod = "printFPImmOperand"; 104 let ParserMatchClass = FPImmOperand; 105 let GISelPredicateCode = [{ 106 const auto &MO = MI.getOperand(1); 107 if (!MO.isFPImm()) 108 return false; 109 return ARM_AM::getFP64Imm(MO.getFPImm()->getValueAPF()) != -1; 110 }]; 111} 112 113def alignedload16 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 114 return cast<LoadSDNode>(N)->getAlignment() >= 2; 115}]>; 116 117def alignedload32 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 118 return cast<LoadSDNode>(N)->getAlignment() >= 4; 119}]>; 120 121def alignedstore16 : PatFrag<(ops node:$val, node:$ptr), 122 (store node:$val, node:$ptr), [{ 123 return cast<StoreSDNode>(N)->getAlignment() >= 2; 124}]>; 125 126def alignedstore32 : PatFrag<(ops node:$val, node:$ptr), 127 (store node:$val, node:$ptr), [{ 128 return cast<StoreSDNode>(N)->getAlignment() >= 4; 129}]>; 130 131// The VCVT to/from fixed-point instructions encode the 'fbits' operand 132// (the number of fixed bits) differently than it appears in the assembly 133// source. It's encoded as "Size - fbits" where Size is the size of the 134// fixed-point representation (32 or 16) and fbits is the value appearing 135// in the assembly source, an integer in [0,16] or (0,32], depending on size. 136def fbits32_asm_operand : AsmOperandClass { let Name = "FBits32"; } 137def fbits32 : Operand<i32> { 138 let PrintMethod = "printFBits32"; 139 let ParserMatchClass = fbits32_asm_operand; 140} 141 142def fbits16_asm_operand : AsmOperandClass { let Name = "FBits16"; } 143def fbits16 : Operand<i32> { 144 let PrintMethod = "printFBits16"; 145 let ParserMatchClass = fbits16_asm_operand; 146} 147 148//===----------------------------------------------------------------------===// 149// Load / store Instructions. 150// 151 152let canFoldAsLoad = 1, isReMaterializable = 1 in { 153 154def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr), 155 IIC_fpLoad64, "vldr", "\t$Dd, $addr", 156 [(set DPR:$Dd, (f64 (alignedload32 addrmode5:$addr)))]>, 157 Requires<[HasFPRegs]>; 158 159def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr), 160 IIC_fpLoad32, "vldr", "\t$Sd, $addr", 161 [(set SPR:$Sd, (alignedload32 addrmode5:$addr))]>, 162 Requires<[HasFPRegs]> { 163 // Some single precision VFP instructions may be executed on both NEON and VFP 164 // pipelines. 165 let D = VFPNeonDomain; 166} 167 168let isUnpredicable = 1 in 169def VLDRH : AHI5<0b1101, 0b01, (outs HPR:$Sd), (ins addrmode5fp16:$addr), 170 IIC_fpLoad16, "vldr", ".16\t$Sd, $addr", 171 [(set HPR:$Sd, (f16 (alignedload16 addrmode5fp16:$addr)))]>, 172 Requires<[HasFPRegs16]>; 173 174} // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in' 175 176def : Pat<(bf16 (alignedload16 addrmode5fp16:$addr)), 177 (VLDRH addrmode5fp16:$addr)> { 178 let Predicates = [HasFPRegs16]; 179} 180def : Pat<(bf16 (alignedload16 addrmode3:$addr)), 181 (COPY_TO_REGCLASS (LDRH addrmode3:$addr), HPR)> { 182 let Predicates = [HasNoFPRegs16, IsARM]; 183} 184def : Pat<(bf16 (alignedload16 t2addrmode_imm12:$addr)), 185 (COPY_TO_REGCLASS (t2LDRHi12 t2addrmode_imm12:$addr), HPR)> { 186 let Predicates = [HasNoFPRegs16, IsThumb]; 187} 188 189def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr), 190 IIC_fpStore64, "vstr", "\t$Dd, $addr", 191 [(alignedstore32 (f64 DPR:$Dd), addrmode5:$addr)]>, 192 Requires<[HasFPRegs]>; 193 194def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr), 195 IIC_fpStore32, "vstr", "\t$Sd, $addr", 196 [(alignedstore32 SPR:$Sd, addrmode5:$addr)]>, 197 Requires<[HasFPRegs]> { 198 // Some single precision VFP instructions may be executed on both NEON and VFP 199 // pipelines. 200 let D = VFPNeonDomain; 201} 202 203let isUnpredicable = 1 in 204def VSTRH : AHI5<0b1101, 0b00, (outs), (ins HPR:$Sd, addrmode5fp16:$addr), 205 IIC_fpStore16, "vstr", ".16\t$Sd, $addr", 206 [(alignedstore16 (f16 HPR:$Sd), addrmode5fp16:$addr)]>, 207 Requires<[HasFPRegs16]>; 208 209def : Pat<(alignedstore16 (bf16 HPR:$Sd), addrmode5fp16:$addr), 210 (VSTRH (bf16 HPR:$Sd), addrmode5fp16:$addr)> { 211 let Predicates = [HasFPRegs16]; 212} 213def : Pat<(alignedstore16 (bf16 HPR:$Sd), addrmode3:$addr), 214 (STRH (COPY_TO_REGCLASS $Sd, GPR), addrmode3:$addr)> { 215 let Predicates = [HasNoFPRegs16, IsARM]; 216} 217def : Pat<(alignedstore16 (bf16 HPR:$Sd), t2addrmode_imm12:$addr), 218 (t2STRHi12 (COPY_TO_REGCLASS $Sd, GPR), t2addrmode_imm12:$addr)> { 219 let Predicates = [HasNoFPRegs16, IsThumb]; 220} 221 222//===----------------------------------------------------------------------===// 223// Load / store multiple Instructions. 224// 225 226multiclass vfp_ldst_mult<string asm, bit L_bit, 227 InstrItinClass itin, InstrItinClass itin_upd> { 228 let Predicates = [HasFPRegs] in { 229 // Double Precision 230 def DIA : 231 AXDI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops), 232 IndexModeNone, itin, 233 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> { 234 let Inst{24-23} = 0b01; // Increment After 235 let Inst{21} = 0; // No writeback 236 let Inst{20} = L_bit; 237 } 238 def DIA_UPD : 239 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, 240 variable_ops), 241 IndexModeUpd, itin_upd, 242 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 243 let Inst{24-23} = 0b01; // Increment After 244 let Inst{21} = 1; // Writeback 245 let Inst{20} = L_bit; 246 } 247 def DDB_UPD : 248 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, 249 variable_ops), 250 IndexModeUpd, itin_upd, 251 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 252 let Inst{24-23} = 0b10; // Decrement Before 253 let Inst{21} = 1; // Writeback 254 let Inst{20} = L_bit; 255 } 256 257 // Single Precision 258 def SIA : 259 AXSI4<(outs), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops), 260 IndexModeNone, itin, 261 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> { 262 let Inst{24-23} = 0b01; // Increment After 263 let Inst{21} = 0; // No writeback 264 let Inst{20} = L_bit; 265 266 // Some single precision VFP instructions may be executed on both NEON and 267 // VFP pipelines. 268 let D = VFPNeonDomain; 269 } 270 def SIA_UPD : 271 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, 272 variable_ops), 273 IndexModeUpd, itin_upd, 274 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 275 let Inst{24-23} = 0b01; // Increment After 276 let Inst{21} = 1; // Writeback 277 let Inst{20} = L_bit; 278 279 // Some single precision VFP instructions may be executed on both NEON and 280 // VFP pipelines. 281 let D = VFPNeonDomain; 282 } 283 def SDB_UPD : 284 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, 285 variable_ops), 286 IndexModeUpd, itin_upd, 287 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 288 let Inst{24-23} = 0b10; // Decrement Before 289 let Inst{21} = 1; // Writeback 290 let Inst{20} = L_bit; 291 292 // Some single precision VFP instructions may be executed on both NEON and 293 // VFP pipelines. 294 let D = VFPNeonDomain; 295 } 296 } 297} 298 299let hasSideEffects = 0 in { 300 301let mayLoad = 1, hasExtraDefRegAllocReq = 1 in 302defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>; 303 304let mayStore = 1, hasExtraSrcRegAllocReq = 1 in 305defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpStore_m, IIC_fpStore_mu>; 306 307} // hasSideEffects 308 309def : MnemonicAlias<"vldm", "vldmia">; 310def : MnemonicAlias<"vstm", "vstmia">; 311 312 313//===----------------------------------------------------------------------===// 314// Lazy load / store multiple Instructions 315// 316def VLLDM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone, 317 NoItinerary, "vlldm${p}\t$Rn", "", []>, 318 Requires<[HasV8MMainline, Has8MSecExt]> { 319 let Inst{24-23} = 0b00; 320 let Inst{22} = 0; 321 let Inst{21} = 1; 322 let Inst{20} = 1; 323 let Inst{15-12} = 0; 324 let Inst{7-0} = 0; 325 let mayLoad = 1; 326 let Defs = [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, VPR, FPSCR, FPSCR_NZCV]; 327} 328 329def VLSTM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone, 330 NoItinerary, "vlstm${p}\t$Rn", "", []>, 331 Requires<[HasV8MMainline, Has8MSecExt]> { 332 let Inst{24-23} = 0b00; 333 let Inst{22} = 0; 334 let Inst{21} = 1; 335 let Inst{20} = 0; 336 let Inst{15-12} = 0; 337 let Inst{7-0} = 0; 338 let mayStore = 1; 339} 340 341def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r), 0>, 342 Requires<[HasFPRegs]>; 343def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r), 0>, 344 Requires<[HasFPRegs]>; 345def : InstAlias<"vpop${p} $r", (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r), 0>, 346 Requires<[HasFPRegs]>; 347def : InstAlias<"vpop${p} $r", (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r), 0>, 348 Requires<[HasFPRegs]>; 349defm : VFPDTAnyInstAlias<"vpush${p}", "$r", 350 (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>; 351defm : VFPDTAnyInstAlias<"vpush${p}", "$r", 352 (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>; 353defm : VFPDTAnyInstAlias<"vpop${p}", "$r", 354 (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>; 355defm : VFPDTAnyInstAlias<"vpop${p}", "$r", 356 (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>; 357 358// FLDMX, FSTMX - Load and store multiple unknown precision registers for 359// pre-armv6 cores. 360// These instruction are deprecated so we don't want them to get selected. 361// However, there is no UAL syntax for them, so we keep them around for 362// (dis)assembly only. 363multiclass vfp_ldstx_mult<string asm, bit L_bit> { 364 let Predicates = [HasFPRegs], hasNoSchedulingInfo = 1 in { 365 // Unknown precision 366 def XIA : 367 AXXI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops), 368 IndexModeNone, !strconcat(asm, "iax${p}\t$Rn, $regs"), "", []> { 369 let Inst{24-23} = 0b01; // Increment After 370 let Inst{21} = 0; // No writeback 371 let Inst{20} = L_bit; 372 } 373 def XIA_UPD : 374 AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops), 375 IndexModeUpd, !strconcat(asm, "iax${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 376 let Inst{24-23} = 0b01; // Increment After 377 let Inst{21} = 1; // Writeback 378 let Inst{20} = L_bit; 379 } 380 def XDB_UPD : 381 AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops), 382 IndexModeUpd, !strconcat(asm, "dbx${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 383 let Inst{24-23} = 0b10; // Decrement Before 384 let Inst{21} = 1; // Writeback 385 let Inst{20} = L_bit; 386 } 387 } 388} 389 390defm FLDM : vfp_ldstx_mult<"fldm", 1>; 391defm FSTM : vfp_ldstx_mult<"fstm", 0>; 392 393def : VFP2MnemonicAlias<"fldmeax", "fldmdbx">; 394def : VFP2MnemonicAlias<"fldmfdx", "fldmiax">; 395 396def : VFP2MnemonicAlias<"fstmeax", "fstmiax">; 397def : VFP2MnemonicAlias<"fstmfdx", "fstmdbx">; 398 399//===----------------------------------------------------------------------===// 400// FP Binary Operations. 401// 402 403let TwoOperandAliasConstraint = "$Dn = $Dd" in 404def VADDD : ADbI<0b11100, 0b11, 0, 0, 405 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 406 IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm", 407 [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>, 408 Sched<[WriteFPALU64]>; 409 410let TwoOperandAliasConstraint = "$Sn = $Sd" in 411def VADDS : ASbIn<0b11100, 0b11, 0, 0, 412 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 413 IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm", 414 [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]>, 415 Sched<[WriteFPALU32]> { 416 // Some single precision VFP instructions may be executed on both NEON and 417 // VFP pipelines on A8. 418 let D = VFPNeonA8Domain; 419} 420 421let TwoOperandAliasConstraint = "$Sn = $Sd" in 422def VADDH : AHbI<0b11100, 0b11, 0, 0, 423 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 424 IIC_fpALU16, "vadd", ".f16\t$Sd, $Sn, $Sm", 425 [(set (f16 HPR:$Sd), (fadd (f16 HPR:$Sn), (f16 HPR:$Sm)))]>, 426 Sched<[WriteFPALU32]>; 427 428let TwoOperandAliasConstraint = "$Dn = $Dd" in 429def VSUBD : ADbI<0b11100, 0b11, 1, 0, 430 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 431 IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm", 432 [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>, 433 Sched<[WriteFPALU64]>; 434 435let TwoOperandAliasConstraint = "$Sn = $Sd" in 436def VSUBS : ASbIn<0b11100, 0b11, 1, 0, 437 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 438 IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm", 439 [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]>, 440 Sched<[WriteFPALU32]>{ 441 // Some single precision VFP instructions may be executed on both NEON and 442 // VFP pipelines on A8. 443 let D = VFPNeonA8Domain; 444} 445 446let TwoOperandAliasConstraint = "$Sn = $Sd" in 447def VSUBH : AHbI<0b11100, 0b11, 1, 0, 448 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 449 IIC_fpALU16, "vsub", ".f16\t$Sd, $Sn, $Sm", 450 [(set (f16 HPR:$Sd), (fsub (f16 HPR:$Sn), (f16 HPR:$Sm)))]>, 451 Sched<[WriteFPALU32]>; 452 453let TwoOperandAliasConstraint = "$Dn = $Dd" in 454def VDIVD : ADbI<0b11101, 0b00, 0, 0, 455 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 456 IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm", 457 [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>, 458 Sched<[WriteFPDIV64]>; 459 460let TwoOperandAliasConstraint = "$Sn = $Sd" in 461def VDIVS : ASbI<0b11101, 0b00, 0, 0, 462 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 463 IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm", 464 [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>, 465 Sched<[WriteFPDIV32]>; 466 467let TwoOperandAliasConstraint = "$Sn = $Sd" in 468def VDIVH : AHbI<0b11101, 0b00, 0, 0, 469 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 470 IIC_fpDIV16, "vdiv", ".f16\t$Sd, $Sn, $Sm", 471 [(set (f16 HPR:$Sd), (fdiv (f16 HPR:$Sn), (f16 HPR:$Sm)))]>, 472 Sched<[WriteFPDIV32]>; 473 474let TwoOperandAliasConstraint = "$Dn = $Dd" in 475def VMULD : ADbI<0b11100, 0b10, 0, 0, 476 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 477 IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm", 478 [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>, 479 Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>; 480 481let TwoOperandAliasConstraint = "$Sn = $Sd" in 482def VMULS : ASbIn<0b11100, 0b10, 0, 0, 483 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 484 IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm", 485 [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]>, 486 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> { 487 // Some single precision VFP instructions may be executed on both NEON and 488 // VFP pipelines on A8. 489 let D = VFPNeonA8Domain; 490} 491 492let TwoOperandAliasConstraint = "$Sn = $Sd" in 493def VMULH : AHbI<0b11100, 0b10, 0, 0, 494 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 495 IIC_fpMUL16, "vmul", ".f16\t$Sd, $Sn, $Sm", 496 [(set (f16 HPR:$Sd), (fmul (f16 HPR:$Sn), (f16 HPR:$Sm)))]>, 497 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>; 498 499def VNMULD : ADbI<0b11100, 0b10, 1, 0, 500 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 501 IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm", 502 [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>, 503 Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>; 504 505def VNMULS : ASbI<0b11100, 0b10, 1, 0, 506 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 507 IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm", 508 [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]>, 509 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> { 510 // Some single precision VFP instructions may be executed on both NEON and 511 // VFP pipelines on A8. 512 let D = VFPNeonA8Domain; 513} 514 515def VNMULH : AHbI<0b11100, 0b10, 1, 0, 516 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 517 IIC_fpMUL16, "vnmul", ".f16\t$Sd, $Sn, $Sm", 518 [(set (f16 HPR:$Sd), (fneg (fmul (f16 HPR:$Sn), (f16 HPR:$Sm))))]>, 519 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>; 520 521multiclass vsel_inst<string op, bits<2> opc, int CC> { 522 let DecoderNamespace = "VFPV8", PostEncoderMethod = "", 523 Uses = [CPSR], AddedComplexity = 4, isUnpredicable = 1 in { 524 def H : AHbInp<0b11100, opc, 0, 525 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 526 NoItinerary, !strconcat("vsel", op, ".f16\t$Sd, $Sn, $Sm"), 527 [(set (f16 HPR:$Sd), (ARMcmov (f16 HPR:$Sm), (f16 HPR:$Sn), CC))]>, 528 Requires<[HasFullFP16]>; 529 530 def S : ASbInp<0b11100, opc, 0, 531 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 532 NoItinerary, !strconcat("vsel", op, ".f32\t$Sd, $Sn, $Sm"), 533 [(set SPR:$Sd, (ARMcmov SPR:$Sm, SPR:$Sn, CC))]>, 534 Requires<[HasFPARMv8]>; 535 536 def D : ADbInp<0b11100, opc, 0, 537 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 538 NoItinerary, !strconcat("vsel", op, ".f64\t$Dd, $Dn, $Dm"), 539 [(set DPR:$Dd, (ARMcmov (f64 DPR:$Dm), (f64 DPR:$Dn), CC))]>, 540 Requires<[HasFPARMv8, HasDPVFP]>; 541 } 542} 543 544// The CC constants here match ARMCC::CondCodes. 545defm VSELGT : vsel_inst<"gt", 0b11, 12>; 546defm VSELGE : vsel_inst<"ge", 0b10, 10>; 547defm VSELEQ : vsel_inst<"eq", 0b00, 0>; 548defm VSELVS : vsel_inst<"vs", 0b01, 6>; 549 550multiclass vmaxmin_inst<string op, bit opc, SDNode SD> { 551 let DecoderNamespace = "VFPV8", PostEncoderMethod = "", 552 isUnpredicable = 1 in { 553 def H : AHbInp<0b11101, 0b00, opc, 554 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 555 NoItinerary, !strconcat(op, ".f16\t$Sd, $Sn, $Sm"), 556 [(set (f16 HPR:$Sd), (SD (f16 HPR:$Sn), (f16 HPR:$Sm)))]>, 557 Requires<[HasFullFP16]>; 558 559 def S : ASbInp<0b11101, 0b00, opc, 560 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 561 NoItinerary, !strconcat(op, ".f32\t$Sd, $Sn, $Sm"), 562 [(set SPR:$Sd, (SD SPR:$Sn, SPR:$Sm))]>, 563 Requires<[HasFPARMv8]>; 564 565 def D : ADbInp<0b11101, 0b00, opc, 566 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 567 NoItinerary, !strconcat(op, ".f64\t$Dd, $Dn, $Dm"), 568 [(set DPR:$Dd, (f64 (SD (f64 DPR:$Dn), (f64 DPR:$Dm))))]>, 569 Requires<[HasFPARMv8, HasDPVFP]>; 570 } 571} 572 573defm VFP_VMAXNM : vmaxmin_inst<"vmaxnm", 0, fmaxnum>; 574defm VFP_VMINNM : vmaxmin_inst<"vminnm", 1, fminnum>; 575 576// Match reassociated forms only if not sign dependent rounding. 577def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)), 578 (VNMULD DPR:$a, DPR:$b)>, 579 Requires<[NoHonorSignDependentRounding,HasDPVFP]>; 580def : Pat<(fmul (fneg SPR:$a), SPR:$b), 581 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>; 582 583// These are encoded as unary instructions. 584let Defs = [FPSCR_NZCV] in { 585def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0, 586 (outs), (ins DPR:$Dd, DPR:$Dm), 587 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm", 588 [(arm_cmpfpe DPR:$Dd, (f64 DPR:$Dm))]>; 589 590def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0, 591 (outs), (ins SPR:$Sd, SPR:$Sm), 592 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm", 593 [(arm_cmpfpe SPR:$Sd, SPR:$Sm)]> { 594 // Some single precision VFP instructions may be executed on both NEON and 595 // VFP pipelines on A8. 596 let D = VFPNeonA8Domain; 597} 598 599def VCMPEH : AHuI<0b11101, 0b11, 0b0100, 0b11, 0, 600 (outs), (ins HPR:$Sd, HPR:$Sm), 601 IIC_fpCMP16, "vcmpe", ".f16\t$Sd, $Sm", 602 [(arm_cmpfpe (f16 HPR:$Sd), (f16 HPR:$Sm))]>; 603 604def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0, 605 (outs), (ins DPR:$Dd, DPR:$Dm), 606 IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm", 607 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm))]>; 608 609def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0, 610 (outs), (ins SPR:$Sd, SPR:$Sm), 611 IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm", 612 [(arm_cmpfp SPR:$Sd, SPR:$Sm)]> { 613 // Some single precision VFP instructions may be executed on both NEON and 614 // VFP pipelines on A8. 615 let D = VFPNeonA8Domain; 616} 617 618def VCMPH : AHuI<0b11101, 0b11, 0b0100, 0b01, 0, 619 (outs), (ins HPR:$Sd, HPR:$Sm), 620 IIC_fpCMP16, "vcmp", ".f16\t$Sd, $Sm", 621 [(arm_cmpfp (f16 HPR:$Sd), (f16 HPR:$Sm))]>; 622} // Defs = [FPSCR_NZCV] 623 624//===----------------------------------------------------------------------===// 625// FP Unary Operations. 626// 627 628def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0, 629 (outs DPR:$Dd), (ins DPR:$Dm), 630 IIC_fpUNA64, "vabs", ".f64\t$Dd, $Dm", 631 [(set DPR:$Dd, (fabs (f64 DPR:$Dm)))]>; 632 633def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0, 634 (outs SPR:$Sd), (ins SPR:$Sm), 635 IIC_fpUNA32, "vabs", ".f32\t$Sd, $Sm", 636 [(set SPR:$Sd, (fabs SPR:$Sm))]> { 637 // Some single precision VFP instructions may be executed on both NEON and 638 // VFP pipelines on A8. 639 let D = VFPNeonA8Domain; 640} 641 642def VABSH : AHuI<0b11101, 0b11, 0b0000, 0b11, 0, 643 (outs HPR:$Sd), (ins HPR:$Sm), 644 IIC_fpUNA16, "vabs", ".f16\t$Sd, $Sm", 645 [(set (f16 HPR:$Sd), (fabs (f16 HPR:$Sm)))]>; 646 647let Defs = [FPSCR_NZCV] in { 648def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0, 649 (outs), (ins DPR:$Dd), 650 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0", 651 [(arm_cmpfpe0 (f64 DPR:$Dd))]> { 652 let Inst{3-0} = 0b0000; 653 let Inst{5} = 0; 654} 655 656def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0, 657 (outs), (ins SPR:$Sd), 658 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0", 659 [(arm_cmpfpe0 SPR:$Sd)]> { 660 let Inst{3-0} = 0b0000; 661 let Inst{5} = 0; 662 663 // Some single precision VFP instructions may be executed on both NEON and 664 // VFP pipelines on A8. 665 let D = VFPNeonA8Domain; 666} 667 668def VCMPEZH : AHuI<0b11101, 0b11, 0b0101, 0b11, 0, 669 (outs), (ins HPR:$Sd), 670 IIC_fpCMP16, "vcmpe", ".f16\t$Sd, #0", 671 [(arm_cmpfpe0 (f16 HPR:$Sd))]> { 672 let Inst{3-0} = 0b0000; 673 let Inst{5} = 0; 674} 675 676def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0, 677 (outs), (ins DPR:$Dd), 678 IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0", 679 [(arm_cmpfp0 (f64 DPR:$Dd))]> { 680 let Inst{3-0} = 0b0000; 681 let Inst{5} = 0; 682} 683 684def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0, 685 (outs), (ins SPR:$Sd), 686 IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0", 687 [(arm_cmpfp0 SPR:$Sd)]> { 688 let Inst{3-0} = 0b0000; 689 let Inst{5} = 0; 690 691 // Some single precision VFP instructions may be executed on both NEON and 692 // VFP pipelines on A8. 693 let D = VFPNeonA8Domain; 694} 695 696def VCMPZH : AHuI<0b11101, 0b11, 0b0101, 0b01, 0, 697 (outs), (ins HPR:$Sd), 698 IIC_fpCMP16, "vcmp", ".f16\t$Sd, #0", 699 [(arm_cmpfp0 (f16 HPR:$Sd))]> { 700 let Inst{3-0} = 0b0000; 701 let Inst{5} = 0; 702} 703} // Defs = [FPSCR_NZCV] 704 705def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0, 706 (outs DPR:$Dd), (ins SPR:$Sm), 707 IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm", 708 [(set DPR:$Dd, (fpextend SPR:$Sm))]>, 709 Sched<[WriteFPCVT]> { 710 // Instruction operands. 711 bits<5> Dd; 712 bits<5> Sm; 713 714 // Encode instruction operands. 715 let Inst{3-0} = Sm{4-1}; 716 let Inst{5} = Sm{0}; 717 let Inst{15-12} = Dd{3-0}; 718 let Inst{22} = Dd{4}; 719 720 let Predicates = [HasVFP2, HasDPVFP]; 721 let hasSideEffects = 0; 722} 723 724// Special case encoding: bits 11-8 is 0b1011. 725def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm, 726 IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm", 727 [(set SPR:$Sd, (fpround DPR:$Dm))]>, 728 Sched<[WriteFPCVT]> { 729 // Instruction operands. 730 bits<5> Sd; 731 bits<5> Dm; 732 733 // Encode instruction operands. 734 let Inst{3-0} = Dm{3-0}; 735 let Inst{5} = Dm{4}; 736 let Inst{15-12} = Sd{4-1}; 737 let Inst{22} = Sd{0}; 738 739 let Inst{27-23} = 0b11101; 740 let Inst{21-16} = 0b110111; 741 let Inst{11-8} = 0b1011; 742 let Inst{7-6} = 0b11; 743 let Inst{4} = 0; 744 745 let Predicates = [HasVFP2, HasDPVFP]; 746 let hasSideEffects = 0; 747} 748 749// Between half, single and double-precision. 750let hasSideEffects = 0 in 751def VCVTBHS: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm), 752 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$Sd, $Sm", 753 [/* Intentionally left blank, see patterns below */]>, 754 Requires<[HasFP16]>, 755 Sched<[WriteFPCVT]>; 756 757def : FP16Pat<(f32 (fpextend (f16 HPR:$Sm))), 758 (VCVTBHS (COPY_TO_REGCLASS (f16 HPR:$Sm), SPR))>; 759def : FP16Pat<(f16_to_fp GPR:$a), 760 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>; 761 762let hasSideEffects = 0 in 763def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm), 764 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$Sd, $Sm", 765 [/* Intentionally left blank, see patterns below */]>, 766 Requires<[HasFP16]>, 767 Sched<[WriteFPCVT]>; 768 769def : FP16Pat<(f16 (fpround SPR:$Sm)), 770 (COPY_TO_REGCLASS (VCVTBSH SPR:$Sm), HPR)>; 771def : FP16Pat<(fp_to_f16 SPR:$a), 772 (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>; 773def : FP16Pat<(insertelt (v8f16 MQPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_even:$lane), 774 (v8f16 (INSERT_SUBREG (v8f16 MQPR:$src1), (VCVTBSH SPR:$src2), 775 (SSubReg_f16_reg imm:$lane)))>; 776def : FP16Pat<(insertelt (v4f16 DPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_even:$lane), 777 (v4f16 (INSERT_SUBREG (v4f16 DPR:$src1), (VCVTBSH SPR:$src2), 778 (SSubReg_f16_reg imm:$lane)))>; 779 780let hasSideEffects = 0 in 781def VCVTTHS: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm), 782 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$Sd, $Sm", 783 [/* Intentionally left blank, see patterns below */]>, 784 Requires<[HasFP16]>, 785 Sched<[WriteFPCVT]>; 786 787def : FP16Pat<(f32 (fpextend (extractelt (v8f16 MQPR:$src), imm_odd:$lane))), 788 (VCVTTHS (EXTRACT_SUBREG MQPR:$src, (SSubReg_f16_reg imm_odd:$lane)))>; 789def : FP16Pat<(f32 (fpextend (extractelt (v4f16 DPR:$src), imm_odd:$lane))), 790 (VCVTTHS (EXTRACT_SUBREG 791 (v2f32 (COPY_TO_REGCLASS (v4f16 DPR:$src), DPR_VFP2)), 792 (SSubReg_f16_reg imm_odd:$lane)))>; 793 794let hasSideEffects = 0 in 795def VCVTTSH: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm), 796 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$Sd, $Sm", 797 [/* Intentionally left blank, see patterns below */]>, 798 Requires<[HasFP16]>, 799 Sched<[WriteFPCVT]>; 800 801def : FP16Pat<(insertelt (v8f16 MQPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_odd:$lane), 802 (v8f16 (INSERT_SUBREG (v8f16 MQPR:$src1), (VCVTTSH SPR:$src2), 803 (SSubReg_f16_reg imm:$lane)))>; 804def : FP16Pat<(insertelt (v4f16 DPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_odd:$lane), 805 (v4f16 (INSERT_SUBREG (v4f16 DPR:$src1), (VCVTTSH SPR:$src2), 806 (SSubReg_f16_reg imm:$lane)))>; 807 808def VCVTBHD : ADuI<0b11101, 0b11, 0b0010, 0b01, 0, 809 (outs DPR:$Dd), (ins SPR:$Sm), 810 NoItinerary, "vcvtb", ".f64.f16\t$Dd, $Sm", 811 [/* Intentionally left blank, see patterns below */]>, 812 Requires<[HasFPARMv8, HasDPVFP]>, 813 Sched<[WriteFPCVT]> { 814 // Instruction operands. 815 bits<5> Sm; 816 817 // Encode instruction operands. 818 let Inst{3-0} = Sm{4-1}; 819 let Inst{5} = Sm{0}; 820 821 let hasSideEffects = 0; 822} 823 824def : FullFP16Pat<(f64 (fpextend (f16 HPR:$Sm))), 825 (VCVTBHD (COPY_TO_REGCLASS (f16 HPR:$Sm), SPR))>, 826 Requires<[HasFPARMv8, HasDPVFP]>; 827def : FP16Pat<(f64 (f16_to_fp GPR:$a)), 828 (VCVTBHD (COPY_TO_REGCLASS GPR:$a, SPR))>, 829 Requires<[HasFPARMv8, HasDPVFP]>; 830 831def VCVTBDH : ADuI<0b11101, 0b11, 0b0011, 0b01, 0, 832 (outs SPR:$Sd), (ins DPR:$Dm), 833 NoItinerary, "vcvtb", ".f16.f64\t$Sd, $Dm", 834 [/* Intentionally left blank, see patterns below */]>, 835 Requires<[HasFPARMv8, HasDPVFP]> { 836 // Instruction operands. 837 bits<5> Sd; 838 bits<5> Dm; 839 840 // Encode instruction operands. 841 let Inst{3-0} = Dm{3-0}; 842 let Inst{5} = Dm{4}; 843 let Inst{15-12} = Sd{4-1}; 844 let Inst{22} = Sd{0}; 845 846 let hasSideEffects = 0; 847} 848 849def : FullFP16Pat<(f16 (fpround DPR:$Dm)), 850 (COPY_TO_REGCLASS (VCVTBDH DPR:$Dm), HPR)>, 851 Requires<[HasFPARMv8, HasDPVFP]>; 852def : FP16Pat<(fp_to_f16 (f64 DPR:$a)), 853 (i32 (COPY_TO_REGCLASS (VCVTBDH DPR:$a), GPR))>, 854 Requires<[HasFPARMv8, HasDPVFP]>; 855 856def VCVTTHD : ADuI<0b11101, 0b11, 0b0010, 0b11, 0, 857 (outs DPR:$Dd), (ins SPR:$Sm), 858 NoItinerary, "vcvtt", ".f64.f16\t$Dd, $Sm", 859 []>, Requires<[HasFPARMv8, HasDPVFP]> { 860 // Instruction operands. 861 bits<5> Sm; 862 863 // Encode instruction operands. 864 let Inst{3-0} = Sm{4-1}; 865 let Inst{5} = Sm{0}; 866 867 let hasSideEffects = 0; 868} 869 870def VCVTTDH : ADuI<0b11101, 0b11, 0b0011, 0b11, 0, 871 (outs SPR:$Sd), (ins DPR:$Dm), 872 NoItinerary, "vcvtt", ".f16.f64\t$Sd, $Dm", 873 []>, Requires<[HasFPARMv8, HasDPVFP]> { 874 // Instruction operands. 875 bits<5> Sd; 876 bits<5> Dm; 877 878 // Encode instruction operands. 879 let Inst{15-12} = Sd{4-1}; 880 let Inst{22} = Sd{0}; 881 let Inst{3-0} = Dm{3-0}; 882 let Inst{5} = Dm{4}; 883 884 let hasSideEffects = 0; 885} 886 887multiclass vcvt_inst<string opc, bits<2> rm, 888 SDPatternOperator node = null_frag> { 889 let PostEncoderMethod = "", DecoderNamespace = "VFPV8", hasSideEffects = 0 in { 890 def SH : AHuInp<0b11101, 0b11, 0b1100, 0b11, 0, 891 (outs SPR:$Sd), (ins HPR:$Sm), 892 NoItinerary, !strconcat("vcvt", opc, ".s32.f16\t$Sd, $Sm"), 893 []>, 894 Requires<[HasFullFP16]> { 895 let Inst{17-16} = rm; 896 } 897 898 def UH : AHuInp<0b11101, 0b11, 0b1100, 0b01, 0, 899 (outs SPR:$Sd), (ins HPR:$Sm), 900 NoItinerary, !strconcat("vcvt", opc, ".u32.f16\t$Sd, $Sm"), 901 []>, 902 Requires<[HasFullFP16]> { 903 let Inst{17-16} = rm; 904 } 905 906 def SS : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0, 907 (outs SPR:$Sd), (ins SPR:$Sm), 908 NoItinerary, !strconcat("vcvt", opc, ".s32.f32\t$Sd, $Sm"), 909 []>, 910 Requires<[HasFPARMv8]> { 911 let Inst{17-16} = rm; 912 } 913 914 def US : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0, 915 (outs SPR:$Sd), (ins SPR:$Sm), 916 NoItinerary, !strconcat("vcvt", opc, ".u32.f32\t$Sd, $Sm"), 917 []>, 918 Requires<[HasFPARMv8]> { 919 let Inst{17-16} = rm; 920 } 921 922 def SD : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0, 923 (outs SPR:$Sd), (ins DPR:$Dm), 924 NoItinerary, !strconcat("vcvt", opc, ".s32.f64\t$Sd, $Dm"), 925 []>, 926 Requires<[HasFPARMv8, HasDPVFP]> { 927 bits<5> Dm; 928 929 let Inst{17-16} = rm; 930 931 // Encode instruction operands. 932 let Inst{3-0} = Dm{3-0}; 933 let Inst{5} = Dm{4}; 934 let Inst{8} = 1; 935 } 936 937 def UD : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0, 938 (outs SPR:$Sd), (ins DPR:$Dm), 939 NoItinerary, !strconcat("vcvt", opc, ".u32.f64\t$Sd, $Dm"), 940 []>, 941 Requires<[HasFPARMv8, HasDPVFP]> { 942 bits<5> Dm; 943 944 let Inst{17-16} = rm; 945 946 // Encode instruction operands 947 let Inst{3-0} = Dm{3-0}; 948 let Inst{5} = Dm{4}; 949 let Inst{8} = 1; 950 } 951 } 952 953 let Predicates = [HasFPARMv8] in { 954 let Predicates = [HasFullFP16] in { 955 def : Pat<(i32 (fp_to_sint (node (f16 HPR:$a)))), 956 (COPY_TO_REGCLASS 957 (!cast<Instruction>(NAME#"SH") (f16 HPR:$a)), 958 GPR)>; 959 960 def : Pat<(i32 (fp_to_uint (node (f16 HPR:$a)))), 961 (COPY_TO_REGCLASS 962 (!cast<Instruction>(NAME#"UH") (f16 HPR:$a)), 963 GPR)>; 964 } 965 def : Pat<(i32 (fp_to_sint (node SPR:$a))), 966 (COPY_TO_REGCLASS 967 (!cast<Instruction>(NAME#"SS") SPR:$a), 968 GPR)>; 969 def : Pat<(i32 (fp_to_uint (node SPR:$a))), 970 (COPY_TO_REGCLASS 971 (!cast<Instruction>(NAME#"US") SPR:$a), 972 GPR)>; 973 } 974 let Predicates = [HasFPARMv8, HasDPVFP] in { 975 def : Pat<(i32 (fp_to_sint (node (f64 DPR:$a)))), 976 (COPY_TO_REGCLASS 977 (!cast<Instruction>(NAME#"SD") DPR:$a), 978 GPR)>; 979 def : Pat<(i32 (fp_to_uint (node (f64 DPR:$a)))), 980 (COPY_TO_REGCLASS 981 (!cast<Instruction>(NAME#"UD") DPR:$a), 982 GPR)>; 983 } 984} 985 986defm VCVTA : vcvt_inst<"a", 0b00, fround>; 987defm VCVTN : vcvt_inst<"n", 0b01>; 988defm VCVTP : vcvt_inst<"p", 0b10, fceil>; 989defm VCVTM : vcvt_inst<"m", 0b11, ffloor>; 990 991def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0, 992 (outs DPR:$Dd), (ins DPR:$Dm), 993 IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm", 994 [(set DPR:$Dd, (fneg (f64 DPR:$Dm)))]>; 995 996def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0, 997 (outs SPR:$Sd), (ins SPR:$Sm), 998 IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm", 999 [(set SPR:$Sd, (fneg SPR:$Sm))]> { 1000 // Some single precision VFP instructions may be executed on both NEON and 1001 // VFP pipelines on A8. 1002 let D = VFPNeonA8Domain; 1003} 1004 1005def VNEGH : AHuI<0b11101, 0b11, 0b0001, 0b01, 0, 1006 (outs HPR:$Sd), (ins HPR:$Sm), 1007 IIC_fpUNA16, "vneg", ".f16\t$Sd, $Sm", 1008 [(set (f16 HPR:$Sd), (fneg (f16 HPR:$Sm)))]>; 1009 1010multiclass vrint_inst_zrx<string opc, bit op, bit op2, SDPatternOperator node> { 1011 def H : AHuI<0b11101, 0b11, 0b0110, 0b11, 0, 1012 (outs HPR:$Sd), (ins HPR:$Sm), 1013 NoItinerary, !strconcat("vrint", opc), ".f16\t$Sd, $Sm", 1014 [(set (f16 HPR:$Sd), (node (f16 HPR:$Sm)))]>, 1015 Requires<[HasFullFP16]> { 1016 let Inst{7} = op2; 1017 let Inst{16} = op; 1018 } 1019 1020 def S : ASuI<0b11101, 0b11, 0b0110, 0b11, 0, 1021 (outs SPR:$Sd), (ins SPR:$Sm), 1022 NoItinerary, !strconcat("vrint", opc), ".f32\t$Sd, $Sm", 1023 [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>, 1024 Requires<[HasFPARMv8]> { 1025 let Inst{7} = op2; 1026 let Inst{16} = op; 1027 } 1028 def D : ADuI<0b11101, 0b11, 0b0110, 0b11, 0, 1029 (outs DPR:$Dd), (ins DPR:$Dm), 1030 NoItinerary, !strconcat("vrint", opc), ".f64\t$Dd, $Dm", 1031 [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>, 1032 Requires<[HasFPARMv8, HasDPVFP]> { 1033 let Inst{7} = op2; 1034 let Inst{16} = op; 1035 } 1036 1037 def : InstAlias<!strconcat("vrint", opc, "$p.f16.f16\t$Sd, $Sm"), 1038 (!cast<Instruction>(NAME#"H") SPR:$Sd, SPR:$Sm, pred:$p), 0>, 1039 Requires<[HasFullFP16]>; 1040 def : InstAlias<!strconcat("vrint", opc, "$p.f32.f32\t$Sd, $Sm"), 1041 (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm, pred:$p), 0>, 1042 Requires<[HasFPARMv8]>; 1043 def : InstAlias<!strconcat("vrint", opc, "$p.f64.f64\t$Dd, $Dm"), 1044 (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm, pred:$p), 0>, 1045 Requires<[HasFPARMv8,HasDPVFP]>; 1046} 1047 1048defm VRINTZ : vrint_inst_zrx<"z", 0, 1, ftrunc>; 1049defm VRINTR : vrint_inst_zrx<"r", 0, 0, fnearbyint>; 1050defm VRINTX : vrint_inst_zrx<"x", 1, 0, frint>; 1051 1052multiclass vrint_inst_anpm<string opc, bits<2> rm, 1053 SDPatternOperator node = null_frag> { 1054 let PostEncoderMethod = "", DecoderNamespace = "VFPV8", 1055 isUnpredicable = 1 in { 1056 def H : AHuInp<0b11101, 0b11, 0b1000, 0b01, 0, 1057 (outs HPR:$Sd), (ins HPR:$Sm), 1058 NoItinerary, !strconcat("vrint", opc, ".f16\t$Sd, $Sm"), 1059 [(set (f16 HPR:$Sd), (node (f16 HPR:$Sm)))]>, 1060 Requires<[HasFullFP16]> { 1061 let Inst{17-16} = rm; 1062 } 1063 def S : ASuInp<0b11101, 0b11, 0b1000, 0b01, 0, 1064 (outs SPR:$Sd), (ins SPR:$Sm), 1065 NoItinerary, !strconcat("vrint", opc, ".f32\t$Sd, $Sm"), 1066 [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>, 1067 Requires<[HasFPARMv8]> { 1068 let Inst{17-16} = rm; 1069 } 1070 def D : ADuInp<0b11101, 0b11, 0b1000, 0b01, 0, 1071 (outs DPR:$Dd), (ins DPR:$Dm), 1072 NoItinerary, !strconcat("vrint", opc, ".f64\t$Dd, $Dm"), 1073 [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>, 1074 Requires<[HasFPARMv8, HasDPVFP]> { 1075 let Inst{17-16} = rm; 1076 } 1077 } 1078 1079 def : InstAlias<!strconcat("vrint", opc, ".f32.f32\t$Sd, $Sm"), 1080 (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm), 0>, 1081 Requires<[HasFPARMv8]>; 1082 def : InstAlias<!strconcat("vrint", opc, ".f64.f64\t$Dd, $Dm"), 1083 (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm), 0>, 1084 Requires<[HasFPARMv8,HasDPVFP]>; 1085} 1086 1087defm VRINTA : vrint_inst_anpm<"a", 0b00, fround>; 1088defm VRINTN : vrint_inst_anpm<"n", 0b01, int_arm_neon_vrintn>; 1089defm VRINTP : vrint_inst_anpm<"p", 0b10, fceil>; 1090defm VRINTM : vrint_inst_anpm<"m", 0b11, ffloor>; 1091 1092def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0, 1093 (outs DPR:$Dd), (ins DPR:$Dm), 1094 IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm", 1095 [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>, 1096 Sched<[WriteFPSQRT64]>; 1097 1098def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0, 1099 (outs SPR:$Sd), (ins SPR:$Sm), 1100 IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm", 1101 [(set SPR:$Sd, (fsqrt SPR:$Sm))]>, 1102 Sched<[WriteFPSQRT32]>; 1103 1104def VSQRTH : AHuI<0b11101, 0b11, 0b0001, 0b11, 0, 1105 (outs HPR:$Sd), (ins HPR:$Sm), 1106 IIC_fpSQRT16, "vsqrt", ".f16\t$Sd, $Sm", 1107 [(set (f16 HPR:$Sd), (fsqrt (f16 HPR:$Sm)))]>; 1108 1109let hasSideEffects = 0 in { 1110let isMoveReg = 1 in { 1111def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0, 1112 (outs DPR:$Dd), (ins DPR:$Dm), 1113 IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", []>, 1114 Requires<[HasFPRegs64]>; 1115 1116def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0, 1117 (outs SPR:$Sd), (ins SPR:$Sm), 1118 IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", []>, 1119 Requires<[HasFPRegs]>; 1120} // isMoveReg 1121 1122let PostEncoderMethod = "", DecoderNamespace = "VFPV8", isUnpredicable = 1 in { 1123def VMOVH : ASuInp<0b11101, 0b11, 0b0000, 0b01, 0, 1124 (outs SPR:$Sd), (ins SPR:$Sm), 1125 IIC_fpUNA16, "vmovx.f16\t$Sd, $Sm", []>, 1126 Requires<[HasFullFP16]>; 1127 1128def VINSH : ASuInp<0b11101, 0b11, 0b0000, 0b11, 0, 1129 (outs SPR:$Sd), (ins SPR:$Sda, SPR:$Sm), 1130 IIC_fpUNA16, "vins.f16\t$Sd, $Sm", []>, 1131 Requires<[HasFullFP16]> { 1132 let Constraints = "$Sd = $Sda"; 1133} 1134 1135} // PostEncoderMethod 1136} // hasSideEffects 1137 1138//===----------------------------------------------------------------------===// 1139// FP <-> GPR Copies. Int <-> FP Conversions. 1140// 1141 1142let isMoveReg = 1 in { 1143def VMOVRS : AVConv2I<0b11100001, 0b1010, 1144 (outs GPR:$Rt), (ins SPR:$Sn), 1145 IIC_fpMOVSI, "vmov", "\t$Rt, $Sn", 1146 [(set GPR:$Rt, (bitconvert SPR:$Sn))]>, 1147 Requires<[HasFPRegs]>, 1148 Sched<[WriteFPMOV]> { 1149 // Instruction operands. 1150 bits<4> Rt; 1151 bits<5> Sn; 1152 1153 // Encode instruction operands. 1154 let Inst{19-16} = Sn{4-1}; 1155 let Inst{7} = Sn{0}; 1156 let Inst{15-12} = Rt; 1157 1158 let Inst{6-5} = 0b00; 1159 let Inst{3-0} = 0b0000; 1160 1161 // Some single precision VFP instructions may be executed on both NEON and VFP 1162 // pipelines. 1163 let D = VFPNeonDomain; 1164} 1165 1166// Bitcast i32 -> f32. NEON prefers to use VMOVDRR. 1167def VMOVSR : AVConv4I<0b11100000, 0b1010, 1168 (outs SPR:$Sn), (ins GPR:$Rt), 1169 IIC_fpMOVIS, "vmov", "\t$Sn, $Rt", 1170 [(set SPR:$Sn, (bitconvert GPR:$Rt))]>, 1171 Requires<[HasFPRegs, UseVMOVSR]>, 1172 Sched<[WriteFPMOV]> { 1173 // Instruction operands. 1174 bits<5> Sn; 1175 bits<4> Rt; 1176 1177 // Encode instruction operands. 1178 let Inst{19-16} = Sn{4-1}; 1179 let Inst{7} = Sn{0}; 1180 let Inst{15-12} = Rt; 1181 1182 let Inst{6-5} = 0b00; 1183 let Inst{3-0} = 0b0000; 1184 1185 // Some single precision VFP instructions may be executed on both NEON and VFP 1186 // pipelines. 1187 let D = VFPNeonDomain; 1188} 1189} // isMoveReg 1190def : Pat<(arm_vmovsr GPR:$Rt), (VMOVSR GPR:$Rt)>, Requires<[HasVFP2, UseVMOVSR]>; 1191 1192let hasSideEffects = 0 in { 1193def VMOVRRD : AVConv3I<0b11000101, 0b1011, 1194 (outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm), 1195 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm", 1196 [(set GPR:$Rt, GPR:$Rt2, (arm_fmrrd DPR:$Dm))]>, 1197 Requires<[HasFPRegs]>, 1198 Sched<[WriteFPMOV]> { 1199 // Instruction operands. 1200 bits<5> Dm; 1201 bits<4> Rt; 1202 bits<4> Rt2; 1203 1204 // Encode instruction operands. 1205 let Inst{3-0} = Dm{3-0}; 1206 let Inst{5} = Dm{4}; 1207 let Inst{15-12} = Rt; 1208 let Inst{19-16} = Rt2; 1209 1210 let Inst{7-6} = 0b00; 1211 1212 // Some single precision VFP instructions may be executed on both NEON and VFP 1213 // pipelines. 1214 let D = VFPNeonDomain; 1215 1216 // This instruction is equivalent to 1217 // $Rt = EXTRACT_SUBREG $Dm, ssub_0 1218 // $Rt2 = EXTRACT_SUBREG $Dm, ssub_1 1219 let isExtractSubreg = 1; 1220} 1221 1222def VMOVRRS : AVConv3I<0b11000101, 0b1010, 1223 (outs GPR:$Rt, GPR:$Rt2), (ins SPR:$src1, SPR:$src2), 1224 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $src1, $src2", 1225 [/* For disassembly only; pattern left blank */]>, 1226 Requires<[HasFPRegs]>, 1227 Sched<[WriteFPMOV]> { 1228 bits<5> src1; 1229 bits<4> Rt; 1230 bits<4> Rt2; 1231 1232 // Encode instruction operands. 1233 let Inst{3-0} = src1{4-1}; 1234 let Inst{5} = src1{0}; 1235 let Inst{15-12} = Rt; 1236 let Inst{19-16} = Rt2; 1237 1238 let Inst{7-6} = 0b00; 1239 1240 // Some single precision VFP instructions may be executed on both NEON and VFP 1241 // pipelines. 1242 let D = VFPNeonDomain; 1243 let DecoderMethod = "DecodeVMOVRRS"; 1244} 1245} // hasSideEffects 1246 1247// FMDHR: GPR -> SPR 1248// FMDLR: GPR -> SPR 1249 1250def VMOVDRR : AVConv5I<0b11000100, 0b1011, 1251 (outs DPR:$Dm), (ins GPR:$Rt, GPR:$Rt2), 1252 IIC_fpMOVID, "vmov", "\t$Dm, $Rt, $Rt2", 1253 [(set DPR:$Dm, (arm_fmdrr GPR:$Rt, GPR:$Rt2))]>, 1254 Requires<[HasFPRegs]>, 1255 Sched<[WriteFPMOV]> { 1256 // Instruction operands. 1257 bits<5> Dm; 1258 bits<4> Rt; 1259 bits<4> Rt2; 1260 1261 // Encode instruction operands. 1262 let Inst{3-0} = Dm{3-0}; 1263 let Inst{5} = Dm{4}; 1264 let Inst{15-12} = Rt; 1265 let Inst{19-16} = Rt2; 1266 1267 let Inst{7-6} = 0b00; 1268 1269 // Some single precision VFP instructions may be executed on both NEON and VFP 1270 // pipelines. 1271 let D = VFPNeonDomain; 1272 1273 // This instruction is equivalent to 1274 // $Dm = REG_SEQUENCE $Rt, ssub_0, $Rt2, ssub_1 1275 let isRegSequence = 1; 1276} 1277 1278// Hoist an fabs or a fneg of a value coming from integer registers 1279// and do the fabs/fneg on the integer value. This is never a lose 1280// and could enable the conversion to float to be removed completely. 1281def : Pat<(fabs (arm_fmdrr GPR:$Rl, GPR:$Rh)), 1282 (VMOVDRR GPR:$Rl, (BFC GPR:$Rh, (i32 0x7FFFFFFF)))>, 1283 Requires<[IsARM, HasV6T2]>; 1284def : Pat<(fabs (arm_fmdrr GPR:$Rl, GPR:$Rh)), 1285 (VMOVDRR GPR:$Rl, (t2BFC GPR:$Rh, (i32 0x7FFFFFFF)))>, 1286 Requires<[IsThumb2, HasV6T2]>; 1287def : Pat<(fneg (arm_fmdrr GPR:$Rl, GPR:$Rh)), 1288 (VMOVDRR GPR:$Rl, (EORri GPR:$Rh, (i32 0x80000000)))>, 1289 Requires<[IsARM]>; 1290def : Pat<(fneg (arm_fmdrr GPR:$Rl, GPR:$Rh)), 1291 (VMOVDRR GPR:$Rl, (t2EORri GPR:$Rh, (i32 0x80000000)))>, 1292 Requires<[IsThumb2]>; 1293 1294let hasSideEffects = 0 in 1295def VMOVSRR : AVConv5I<0b11000100, 0b1010, 1296 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2), 1297 IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2", 1298 [/* For disassembly only; pattern left blank */]>, 1299 Requires<[HasFPRegs]>, 1300 Sched<[WriteFPMOV]> { 1301 // Instruction operands. 1302 bits<5> dst1; 1303 bits<4> src1; 1304 bits<4> src2; 1305 1306 // Encode instruction operands. 1307 let Inst{3-0} = dst1{4-1}; 1308 let Inst{5} = dst1{0}; 1309 let Inst{15-12} = src1; 1310 let Inst{19-16} = src2; 1311 1312 let Inst{7-6} = 0b00; 1313 1314 // Some single precision VFP instructions may be executed on both NEON and VFP 1315 // pipelines. 1316 let D = VFPNeonDomain; 1317 1318 let DecoderMethod = "DecodeVMOVSRR"; 1319} 1320 1321// Move H->R, clearing top 16 bits 1322def VMOVRH : AVConv2I<0b11100001, 0b1001, 1323 (outs rGPR:$Rt), (ins HPR:$Sn), 1324 IIC_fpMOVSI, "vmov", ".f16\t$Rt, $Sn", 1325 []>, 1326 Requires<[HasFPRegs16]>, 1327 Sched<[WriteFPMOV]> { 1328 // Instruction operands. 1329 bits<4> Rt; 1330 bits<5> Sn; 1331 1332 // Encode instruction operands. 1333 let Inst{19-16} = Sn{4-1}; 1334 let Inst{7} = Sn{0}; 1335 let Inst{15-12} = Rt; 1336 1337 let Inst{6-5} = 0b00; 1338 let Inst{3-0} = 0b0000; 1339 1340 let isUnpredicable = 1; 1341} 1342 1343// Move R->H, clearing top 16 bits 1344def VMOVHR : AVConv4I<0b11100000, 0b1001, 1345 (outs HPR:$Sn), (ins rGPR:$Rt), 1346 IIC_fpMOVIS, "vmov", ".f16\t$Sn, $Rt", 1347 []>, 1348 Requires<[HasFPRegs16]>, 1349 Sched<[WriteFPMOV]> { 1350 // Instruction operands. 1351 bits<5> Sn; 1352 bits<4> Rt; 1353 1354 // Encode instruction operands. 1355 let Inst{19-16} = Sn{4-1}; 1356 let Inst{7} = Sn{0}; 1357 let Inst{15-12} = Rt; 1358 1359 let Inst{6-5} = 0b00; 1360 let Inst{3-0} = 0b0000; 1361 1362 let isUnpredicable = 1; 1363} 1364 1365def : FPRegs16Pat<(arm_vmovrh (f16 HPR:$Sn)), (VMOVRH (f16 HPR:$Sn))>; 1366def : FPRegs16Pat<(arm_vmovrh (bf16 HPR:$Sn)), (VMOVRH (bf16 HPR:$Sn))>; 1367def : FPRegs16Pat<(f16 (arm_vmovhr rGPR:$Rt)), (VMOVHR rGPR:$Rt)>; 1368def : FPRegs16Pat<(bf16 (arm_vmovhr rGPR:$Rt)), (VMOVHR rGPR:$Rt)>; 1369 1370// FMRDH: SPR -> GPR 1371// FMRDL: SPR -> GPR 1372// FMRRS: SPR -> GPR 1373// FMRX: SPR system reg -> GPR 1374// FMSRR: GPR -> SPR 1375// FMXR: GPR -> VFP system reg 1376 1377 1378// Int -> FP: 1379 1380class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1381 bits<4> opcod4, dag oops, dag iops, 1382 InstrItinClass itin, string opc, string asm, 1383 list<dag> pattern> 1384 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1385 pattern> { 1386 // Instruction operands. 1387 bits<5> Dd; 1388 bits<5> Sm; 1389 1390 // Encode instruction operands. 1391 let Inst{3-0} = Sm{4-1}; 1392 let Inst{5} = Sm{0}; 1393 let Inst{15-12} = Dd{3-0}; 1394 let Inst{22} = Dd{4}; 1395 1396 let Predicates = [HasVFP2, HasDPVFP]; 1397 let hasSideEffects = 0; 1398} 1399 1400class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1401 bits<4> opcod4, dag oops, dag iops,InstrItinClass itin, 1402 string opc, string asm, list<dag> pattern> 1403 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1404 pattern> { 1405 // Instruction operands. 1406 bits<5> Sd; 1407 bits<5> Sm; 1408 1409 // Encode instruction operands. 1410 let Inst{3-0} = Sm{4-1}; 1411 let Inst{5} = Sm{0}; 1412 let Inst{15-12} = Sd{4-1}; 1413 let Inst{22} = Sd{0}; 1414 1415 let hasSideEffects = 0; 1416} 1417 1418class AVConv1IHs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1419 bits<4> opcod4, dag oops, dag iops, 1420 InstrItinClass itin, string opc, string asm, 1421 list<dag> pattern> 1422 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1423 pattern> { 1424 // Instruction operands. 1425 bits<5> Sd; 1426 bits<5> Sm; 1427 1428 // Encode instruction operands. 1429 let Inst{3-0} = Sm{4-1}; 1430 let Inst{5} = Sm{0}; 1431 let Inst{15-12} = Sd{4-1}; 1432 let Inst{22} = Sd{0}; 1433 1434 let Predicates = [HasFullFP16]; 1435 let hasSideEffects = 0; 1436} 1437 1438def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011, 1439 (outs DPR:$Dd), (ins SPR:$Sm), 1440 IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm", 1441 []>, 1442 Sched<[WriteFPCVT]> { 1443 let Inst{7} = 1; // s32 1444} 1445 1446let Predicates=[HasVFP2, HasDPVFP] in { 1447 def : VFPPat<(f64 (sint_to_fp GPR:$a)), 1448 (VSITOD (COPY_TO_REGCLASS GPR:$a, SPR))>; 1449 1450 def : VFPPat<(f64 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))), 1451 (VSITOD (VLDRS addrmode5:$a))>; 1452} 1453 1454def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010, 1455 (outs SPR:$Sd),(ins SPR:$Sm), 1456 IIC_fpCVTIS, "vcvt", ".f32.s32\t$Sd, $Sm", 1457 []>, 1458 Sched<[WriteFPCVT]> { 1459 let Inst{7} = 1; // s32 1460 1461 // Some single precision VFP instructions may be executed on both NEON and 1462 // VFP pipelines on A8. 1463 let D = VFPNeonA8Domain; 1464} 1465 1466def : VFPNoNEONPat<(f32 (sint_to_fp GPR:$a)), 1467 (VSITOS (COPY_TO_REGCLASS GPR:$a, SPR))>; 1468 1469def : VFPNoNEONPat<(f32 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))), 1470 (VSITOS (VLDRS addrmode5:$a))>; 1471 1472def VSITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001, 1473 (outs HPR:$Sd), (ins SPR:$Sm), 1474 IIC_fpCVTIH, "vcvt", ".f16.s32\t$Sd, $Sm", 1475 []>, 1476 Sched<[WriteFPCVT]> { 1477 let Inst{7} = 1; // s32 1478 let isUnpredicable = 1; 1479} 1480 1481def : VFPNoNEONPat<(f16 (sint_to_fp GPR:$a)), 1482 (VSITOH (COPY_TO_REGCLASS GPR:$a, SPR))>; 1483 1484def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011, 1485 (outs DPR:$Dd), (ins SPR:$Sm), 1486 IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm", 1487 []>, 1488 Sched<[WriteFPCVT]> { 1489 let Inst{7} = 0; // u32 1490} 1491 1492let Predicates=[HasVFP2, HasDPVFP] in { 1493 def : VFPPat<(f64 (uint_to_fp GPR:$a)), 1494 (VUITOD (COPY_TO_REGCLASS GPR:$a, SPR))>; 1495 1496 def : VFPPat<(f64 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))), 1497 (VUITOD (VLDRS addrmode5:$a))>; 1498} 1499 1500def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010, 1501 (outs SPR:$Sd), (ins SPR:$Sm), 1502 IIC_fpCVTIS, "vcvt", ".f32.u32\t$Sd, $Sm", 1503 []>, 1504 Sched<[WriteFPCVT]> { 1505 let Inst{7} = 0; // u32 1506 1507 // Some single precision VFP instructions may be executed on both NEON and 1508 // VFP pipelines on A8. 1509 let D = VFPNeonA8Domain; 1510} 1511 1512def : VFPNoNEONPat<(f32 (uint_to_fp GPR:$a)), 1513 (VUITOS (COPY_TO_REGCLASS GPR:$a, SPR))>; 1514 1515def : VFPNoNEONPat<(f32 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))), 1516 (VUITOS (VLDRS addrmode5:$a))>; 1517 1518def VUITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001, 1519 (outs HPR:$Sd), (ins SPR:$Sm), 1520 IIC_fpCVTIH, "vcvt", ".f16.u32\t$Sd, $Sm", 1521 []>, 1522 Sched<[WriteFPCVT]> { 1523 let Inst{7} = 0; // u32 1524 let isUnpredicable = 1; 1525} 1526 1527def : VFPNoNEONPat<(f16 (uint_to_fp GPR:$a)), 1528 (VUITOH (COPY_TO_REGCLASS GPR:$a, SPR))>; 1529 1530// FP -> Int: 1531 1532class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1533 bits<4> opcod4, dag oops, dag iops, 1534 InstrItinClass itin, string opc, string asm, 1535 list<dag> pattern> 1536 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1537 pattern> { 1538 // Instruction operands. 1539 bits<5> Sd; 1540 bits<5> Dm; 1541 1542 // Encode instruction operands. 1543 let Inst{3-0} = Dm{3-0}; 1544 let Inst{5} = Dm{4}; 1545 let Inst{15-12} = Sd{4-1}; 1546 let Inst{22} = Sd{0}; 1547 1548 let Predicates = [HasVFP2, HasDPVFP]; 1549 let hasSideEffects = 0; 1550} 1551 1552class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1553 bits<4> opcod4, dag oops, dag iops, 1554 InstrItinClass itin, string opc, string asm, 1555 list<dag> pattern> 1556 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1557 pattern> { 1558 // Instruction operands. 1559 bits<5> Sd; 1560 bits<5> Sm; 1561 1562 // Encode instruction operands. 1563 let Inst{3-0} = Sm{4-1}; 1564 let Inst{5} = Sm{0}; 1565 let Inst{15-12} = Sd{4-1}; 1566 let Inst{22} = Sd{0}; 1567 1568 let hasSideEffects = 0; 1569} 1570 1571class AVConv1IsH_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1572 bits<4> opcod4, dag oops, dag iops, 1573 InstrItinClass itin, string opc, string asm, 1574 list<dag> pattern> 1575 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1576 pattern> { 1577 // Instruction operands. 1578 bits<5> Sd; 1579 bits<5> Sm; 1580 1581 // Encode instruction operands. 1582 let Inst{3-0} = Sm{4-1}; 1583 let Inst{5} = Sm{0}; 1584 let Inst{15-12} = Sd{4-1}; 1585 let Inst{22} = Sd{0}; 1586 1587 let Predicates = [HasFullFP16]; 1588 let hasSideEffects = 0; 1589} 1590 1591// Always set Z bit in the instruction, i.e. "round towards zero" variants. 1592def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011, 1593 (outs SPR:$Sd), (ins DPR:$Dm), 1594 IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm", 1595 []>, 1596 Sched<[WriteFPCVT]> { 1597 let Inst{7} = 1; // Z bit 1598} 1599 1600let Predicates=[HasVFP2, HasDPVFP] in { 1601 def : VFPPat<(i32 (fp_to_sint (f64 DPR:$a))), 1602 (COPY_TO_REGCLASS (VTOSIZD DPR:$a), GPR)>; 1603 1604 def : VFPPat<(alignedstore32 (i32 (fp_to_sint (f64 DPR:$a))), addrmode5:$ptr), 1605 (VSTRS (VTOSIZD DPR:$a), addrmode5:$ptr)>; 1606} 1607 1608def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010, 1609 (outs SPR:$Sd), (ins SPR:$Sm), 1610 IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm", 1611 []>, 1612 Sched<[WriteFPCVT]> { 1613 let Inst{7} = 1; // Z bit 1614 1615 // Some single precision VFP instructions may be executed on both NEON and 1616 // VFP pipelines on A8. 1617 let D = VFPNeonA8Domain; 1618} 1619 1620def : VFPNoNEONPat<(i32 (fp_to_sint SPR:$a)), 1621 (COPY_TO_REGCLASS (VTOSIZS SPR:$a), GPR)>; 1622 1623def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_sint (f32 SPR:$a))), 1624 addrmode5:$ptr), 1625 (VSTRS (VTOSIZS SPR:$a), addrmode5:$ptr)>; 1626 1627def VTOSIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001, 1628 (outs SPR:$Sd), (ins HPR:$Sm), 1629 IIC_fpCVTHI, "vcvt", ".s32.f16\t$Sd, $Sm", 1630 []>, 1631 Sched<[WriteFPCVT]> { 1632 let Inst{7} = 1; // Z bit 1633 let isUnpredicable = 1; 1634} 1635 1636def : VFPNoNEONPat<(i32 (fp_to_sint (f16 HPR:$a))), 1637 (COPY_TO_REGCLASS (VTOSIZH (f16 HPR:$a)), GPR)>; 1638 1639def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011, 1640 (outs SPR:$Sd), (ins DPR:$Dm), 1641 IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm", 1642 []>, 1643 Sched<[WriteFPCVT]> { 1644 let Inst{7} = 1; // Z bit 1645} 1646 1647let Predicates=[HasVFP2, HasDPVFP] in { 1648 def : VFPPat<(i32 (fp_to_uint (f64 DPR:$a))), 1649 (COPY_TO_REGCLASS (VTOUIZD DPR:$a), GPR)>; 1650 1651 def : VFPPat<(alignedstore32 (i32 (fp_to_uint (f64 DPR:$a))), addrmode5:$ptr), 1652 (VSTRS (VTOUIZD DPR:$a), addrmode5:$ptr)>; 1653} 1654 1655def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010, 1656 (outs SPR:$Sd), (ins SPR:$Sm), 1657 IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm", 1658 []>, 1659 Sched<[WriteFPCVT]> { 1660 let Inst{7} = 1; // Z bit 1661 1662 // Some single precision VFP instructions may be executed on both NEON and 1663 // VFP pipelines on A8. 1664 let D = VFPNeonA8Domain; 1665} 1666 1667def : VFPNoNEONPat<(i32 (fp_to_uint SPR:$a)), 1668 (COPY_TO_REGCLASS (VTOUIZS SPR:$a), GPR)>; 1669 1670def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_uint (f32 SPR:$a))), 1671 addrmode5:$ptr), 1672 (VSTRS (VTOUIZS SPR:$a), addrmode5:$ptr)>; 1673 1674def VTOUIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001, 1675 (outs SPR:$Sd), (ins HPR:$Sm), 1676 IIC_fpCVTHI, "vcvt", ".u32.f16\t$Sd, $Sm", 1677 []>, 1678 Sched<[WriteFPCVT]> { 1679 let Inst{7} = 1; // Z bit 1680 let isUnpredicable = 1; 1681} 1682 1683def : VFPNoNEONPat<(i32 (fp_to_uint (f16 HPR:$a))), 1684 (COPY_TO_REGCLASS (VTOUIZH (f16 HPR:$a)), GPR)>; 1685 1686// And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR. 1687let Uses = [FPSCR] in { 1688def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011, 1689 (outs SPR:$Sd), (ins DPR:$Dm), 1690 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm", 1691 [(set SPR:$Sd, (int_arm_vcvtr (f64 DPR:$Dm)))]>, 1692 Sched<[WriteFPCVT]> { 1693 let Inst{7} = 0; // Z bit 1694} 1695 1696def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010, 1697 (outs SPR:$Sd), (ins SPR:$Sm), 1698 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$Sd, $Sm", 1699 [(set SPR:$Sd, (int_arm_vcvtr SPR:$Sm))]>, 1700 Sched<[WriteFPCVT]> { 1701 let Inst{7} = 0; // Z bit 1702} 1703 1704def VTOSIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001, 1705 (outs SPR:$Sd), (ins SPR:$Sm), 1706 IIC_fpCVTHI, "vcvtr", ".s32.f16\t$Sd, $Sm", 1707 []>, 1708 Sched<[WriteFPCVT]> { 1709 let Inst{7} = 0; // Z bit 1710 let isUnpredicable = 1; 1711} 1712 1713def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011, 1714 (outs SPR:$Sd), (ins DPR:$Dm), 1715 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm", 1716 [(set SPR:$Sd, (int_arm_vcvtru(f64 DPR:$Dm)))]>, 1717 Sched<[WriteFPCVT]> { 1718 let Inst{7} = 0; // Z bit 1719} 1720 1721def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010, 1722 (outs SPR:$Sd), (ins SPR:$Sm), 1723 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$Sd, $Sm", 1724 [(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]>, 1725 Sched<[WriteFPCVT]> { 1726 let Inst{7} = 0; // Z bit 1727} 1728 1729def VTOUIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001, 1730 (outs SPR:$Sd), (ins SPR:$Sm), 1731 IIC_fpCVTHI, "vcvtr", ".u32.f16\t$Sd, $Sm", 1732 []>, 1733 Sched<[WriteFPCVT]> { 1734 let Inst{7} = 0; // Z bit 1735 let isUnpredicable = 1; 1736} 1737} 1738 1739// v8.3-a Javascript Convert to Signed fixed-point 1740def VJCVT : AVConv1IsD_Encode<0b11101, 0b11, 0b1001, 0b1011, 1741 (outs SPR:$Sd), (ins DPR:$Dm), 1742 IIC_fpCVTDI, "vjcvt", ".s32.f64\t$Sd, $Dm", 1743 []>, 1744 Requires<[HasFPARMv8, HasV8_3a]> { 1745 let Inst{7} = 1; // Z bit 1746} 1747 1748// Convert between floating-point and fixed-point 1749// Data type for fixed-point naming convention: 1750// S16 (U=0, sx=0) -> SH 1751// U16 (U=1, sx=0) -> UH 1752// S32 (U=0, sx=1) -> SL 1753// U32 (U=1, sx=1) -> UL 1754 1755let Constraints = "$a = $dst" in { 1756 1757// FP to Fixed-Point: 1758 1759// Single Precision register 1760class AVConv1XInsS_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4, 1761 bit op5, dag oops, dag iops, InstrItinClass itin, 1762 string opc, string asm, list<dag> pattern> 1763 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> { 1764 bits<5> dst; 1765 // if dp_operation then UInt(D:Vd) else UInt(Vd:D); 1766 let Inst{22} = dst{0}; 1767 let Inst{15-12} = dst{4-1}; 1768 1769 let hasSideEffects = 0; 1770} 1771 1772// Double Precision register 1773class AVConv1XInsD_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4, 1774 bit op5, dag oops, dag iops, InstrItinClass itin, 1775 string opc, string asm, list<dag> pattern> 1776 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> { 1777 bits<5> dst; 1778 // if dp_operation then UInt(D:Vd) else UInt(Vd:D); 1779 let Inst{22} = dst{4}; 1780 let Inst{15-12} = dst{3-0}; 1781 1782 let hasSideEffects = 0; 1783 let Predicates = [HasVFP2, HasDPVFP]; 1784} 1785 1786let isUnpredicable = 1 in { 1787 1788def VTOSHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 0, 1789 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1790 IIC_fpCVTHI, "vcvt", ".s16.f16\t$dst, $a, $fbits", []>, 1791 Requires<[HasFullFP16]>, 1792 Sched<[WriteFPCVT]>; 1793 1794def VTOUHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 0, 1795 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1796 IIC_fpCVTHI, "vcvt", ".u16.f16\t$dst, $a, $fbits", []>, 1797 Requires<[HasFullFP16]>, 1798 Sched<[WriteFPCVT]>; 1799 1800def VTOSLH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 1, 1801 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1802 IIC_fpCVTHI, "vcvt", ".s32.f16\t$dst, $a, $fbits", []>, 1803 Requires<[HasFullFP16]>, 1804 Sched<[WriteFPCVT]>; 1805 1806def VTOULH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 1, 1807 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1808 IIC_fpCVTHI, "vcvt", ".u32.f16\t$dst, $a, $fbits", []>, 1809 Requires<[HasFullFP16]>, 1810 Sched<[WriteFPCVT]>; 1811 1812} // End of 'let isUnpredicable = 1 in' 1813 1814def VTOSHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 0, 1815 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1816 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits", []>, 1817 Sched<[WriteFPCVT]> { 1818 // Some single precision VFP instructions may be executed on both NEON and 1819 // VFP pipelines on A8. 1820 let D = VFPNeonA8Domain; 1821} 1822 1823def VTOUHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 0, 1824 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1825 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits", []>, 1826 Sched<[WriteFPCVT]> { 1827 // Some single precision VFP instructions may be executed on both NEON and 1828 // VFP pipelines on A8. 1829 let D = VFPNeonA8Domain; 1830} 1831 1832def VTOSLS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 1, 1833 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1834 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits", []>, 1835 Sched<[WriteFPCVT]> { 1836 // Some single precision VFP instructions may be executed on both NEON and 1837 // VFP pipelines on A8. 1838 let D = VFPNeonA8Domain; 1839} 1840 1841def VTOULS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 1, 1842 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1843 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits", []>, 1844 Sched<[WriteFPCVT]> { 1845 // Some single precision VFP instructions may be executed on both NEON and 1846 // VFP pipelines on A8. 1847 let D = VFPNeonA8Domain; 1848} 1849 1850def VTOSHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 0, 1851 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits), 1852 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits", []>, 1853 Sched<[WriteFPCVT]>; 1854 1855def VTOUHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 0, 1856 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits), 1857 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits", []>, 1858 Sched<[WriteFPCVT]>; 1859 1860def VTOSLD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 1, 1861 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits), 1862 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits", []>, 1863 Sched<[WriteFPCVT]>; 1864 1865def VTOULD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 1, 1866 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits), 1867 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits", []>, 1868 Sched<[WriteFPCVT]>; 1869 1870// Fixed-Point to FP: 1871 1872let isUnpredicable = 1 in { 1873 1874def VSHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 0, 1875 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1876 IIC_fpCVTIH, "vcvt", ".f16.s16\t$dst, $a, $fbits", []>, 1877 Requires<[HasFullFP16]>, 1878 Sched<[WriteFPCVT]>; 1879 1880def VUHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 0, 1881 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1882 IIC_fpCVTIH, "vcvt", ".f16.u16\t$dst, $a, $fbits", []>, 1883 Requires<[HasFullFP16]>, 1884 Sched<[WriteFPCVT]>; 1885 1886def VSLTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 1, 1887 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1888 IIC_fpCVTIH, "vcvt", ".f16.s32\t$dst, $a, $fbits", []>, 1889 Requires<[HasFullFP16]>, 1890 Sched<[WriteFPCVT]>; 1891 1892def VULTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 1, 1893 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1894 IIC_fpCVTIH, "vcvt", ".f16.u32\t$dst, $a, $fbits", []>, 1895 Requires<[HasFullFP16]>, 1896 Sched<[WriteFPCVT]>; 1897 1898} // End of 'let isUnpredicable = 1 in' 1899 1900def VSHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 0, 1901 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1902 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits", []>, 1903 Sched<[WriteFPCVT]> { 1904 // Some single precision VFP instructions may be executed on both NEON and 1905 // VFP pipelines on A8. 1906 let D = VFPNeonA8Domain; 1907} 1908 1909def VUHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 0, 1910 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1911 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits", []>, 1912 Sched<[WriteFPCVT]> { 1913 // Some single precision VFP instructions may be executed on both NEON and 1914 // VFP pipelines on A8. 1915 let D = VFPNeonA8Domain; 1916} 1917 1918def VSLTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 1, 1919 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1920 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits", []>, 1921 Sched<[WriteFPCVT]> { 1922 // Some single precision VFP instructions may be executed on both NEON and 1923 // VFP pipelines on A8. 1924 let D = VFPNeonA8Domain; 1925} 1926 1927def VULTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 1, 1928 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1929 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits", []>, 1930 Sched<[WriteFPCVT]> { 1931 // Some single precision VFP instructions may be executed on both NEON and 1932 // VFP pipelines on A8. 1933 let D = VFPNeonA8Domain; 1934} 1935 1936def VSHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 0, 1937 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits), 1938 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits", []>, 1939 Sched<[WriteFPCVT]>; 1940 1941def VUHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 0, 1942 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits), 1943 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits", []>, 1944 Sched<[WriteFPCVT]>; 1945 1946def VSLTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 1, 1947 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits), 1948 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits", []>, 1949 Sched<[WriteFPCVT]>; 1950 1951def VULTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 1, 1952 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits), 1953 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits", []>, 1954 Sched<[WriteFPCVT]>; 1955 1956} // End of 'let Constraints = "$a = $dst" in' 1957 1958// BFloat16 - Single precision, unary, predicated 1959class BF16_VCVT<string opc, bits<2> op7_6> 1960 : VFPAI<(outs SPR:$Sd), (ins SPR:$dst, SPR:$Sm), 1961 VFPUnaryFrm, NoItinerary, 1962 opc, ".bf16.f32\t$Sd, $Sm", []>, 1963 RegConstraint<"$dst = $Sd">, 1964 Requires<[HasBF16]>, 1965 Sched<[]> { 1966 bits<5> Sd; 1967 bits<5> Sm; 1968 1969 // Encode instruction operands. 1970 let Inst{3-0} = Sm{4-1}; 1971 let Inst{5} = Sm{0}; 1972 let Inst{15-12} = Sd{4-1}; 1973 let Inst{22} = Sd{0}; 1974 1975 let Inst{27-23} = 0b11101; // opcode1 1976 let Inst{21-20} = 0b11; // opcode2 1977 let Inst{19-16} = 0b0011; // opcode3 1978 let Inst{11-8} = 0b1001; 1979 let Inst{7-6} = op7_6; 1980 let Inst{4} = 0; 1981 1982 let DecoderNamespace = "VFPV8"; 1983 let hasSideEffects = 0; 1984} 1985 1986def BF16_VCVTB : BF16_VCVT<"vcvtb", 0b01>; 1987def BF16_VCVTT : BF16_VCVT<"vcvtt", 0b11>; 1988 1989//===----------------------------------------------------------------------===// 1990// FP Multiply-Accumulate Operations. 1991// 1992 1993def VMLAD : ADbI<0b11100, 0b00, 0, 0, 1994 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 1995 IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm", 1996 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm), 1997 (f64 DPR:$Ddin)))]>, 1998 RegConstraint<"$Ddin = $Dd">, 1999 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>, 2000 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2001 2002def VMLAS : ASbIn<0b11100, 0b00, 0, 0, 2003 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2004 IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm", 2005 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm), 2006 SPR:$Sdin))]>, 2007 RegConstraint<"$Sdin = $Sd">, 2008 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>, 2009 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2010 // Some single precision VFP instructions may be executed on both NEON and 2011 // VFP pipelines on A8. 2012 let D = VFPNeonA8Domain; 2013} 2014 2015def VMLAH : AHbI<0b11100, 0b00, 0, 0, 2016 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2017 IIC_fpMAC16, "vmla", ".f16\t$Sd, $Sn, $Sm", 2018 [(set (f16 HPR:$Sd), (fadd_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)), 2019 (f16 HPR:$Sdin)))]>, 2020 RegConstraint<"$Sdin = $Sd">, 2021 Requires<[HasFullFP16,UseFPVMLx]>; 2022 2023def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))), 2024 (VMLAD DPR:$dstin, DPR:$a, DPR:$b)>, 2025 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>; 2026def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)), 2027 (VMLAS SPR:$dstin, SPR:$a, SPR:$b)>, 2028 Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx]>; 2029def : Pat<(fadd_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)), 2030 (VMLAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>, 2031 Requires<[HasFullFP16,DontUseNEONForFP, UseFPVMLx]>; 2032 2033 2034def VMLSD : ADbI<0b11100, 0b00, 1, 0, 2035 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2036 IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm", 2037 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)), 2038 (f64 DPR:$Ddin)))]>, 2039 RegConstraint<"$Ddin = $Dd">, 2040 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>, 2041 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2042 2043def VMLSS : ASbIn<0b11100, 0b00, 1, 0, 2044 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2045 IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm", 2046 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)), 2047 SPR:$Sdin))]>, 2048 RegConstraint<"$Sdin = $Sd">, 2049 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>, 2050 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2051 // Some single precision VFP instructions may be executed on both NEON and 2052 // VFP pipelines on A8. 2053 let D = VFPNeonA8Domain; 2054} 2055 2056def VMLSH : AHbI<0b11100, 0b00, 1, 0, 2057 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2058 IIC_fpMAC16, "vmls", ".f16\t$Sd, $Sn, $Sm", 2059 [(set (f16 HPR:$Sd), (fadd_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))), 2060 (f16 HPR:$Sdin)))]>, 2061 RegConstraint<"$Sdin = $Sd">, 2062 Requires<[HasFullFP16,UseFPVMLx]>; 2063 2064def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))), 2065 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>, 2066 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>; 2067def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)), 2068 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>, 2069 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>; 2070def : Pat<(fsub_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)), 2071 (VMLSH HPR:$dstin, (f16 HPR:$a), HPR:$b)>, 2072 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>; 2073 2074def VNMLAD : ADbI<0b11100, 0b01, 1, 0, 2075 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2076 IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm", 2077 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)), 2078 (f64 DPR:$Ddin)))]>, 2079 RegConstraint<"$Ddin = $Dd">, 2080 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>, 2081 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2082 2083def VNMLAS : ASbI<0b11100, 0b01, 1, 0, 2084 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2085 IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm", 2086 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)), 2087 SPR:$Sdin))]>, 2088 RegConstraint<"$Sdin = $Sd">, 2089 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>, 2090 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2091 // Some single precision VFP instructions may be executed on both NEON and 2092 // VFP pipelines on A8. 2093 let D = VFPNeonA8Domain; 2094} 2095 2096def VNMLAH : AHbI<0b11100, 0b01, 1, 0, 2097 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2098 IIC_fpMAC16, "vnmla", ".f16\t$Sd, $Sn, $Sm", 2099 [(set (f16 HPR:$Sd), (fsub_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))), 2100 (f16 HPR:$Sdin)))]>, 2101 RegConstraint<"$Sdin = $Sd">, 2102 Requires<[HasFullFP16,UseFPVMLx]>; 2103 2104// (-(a * b) - dst) -> -(dst + (a * b)) 2105def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin), 2106 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>, 2107 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>; 2108def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin), 2109 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>, 2110 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>; 2111def : Pat<(fsub_mlx (fneg (fmul_su (f16 HPR:$a), HPR:$b)), HPR:$dstin), 2112 (VNMLAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>, 2113 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>; 2114 2115// (-dst - (a * b)) -> -(dst + (a * b)) 2116def : Pat<(fsub_mlx (fneg DPR:$dstin), (fmul_su DPR:$a, (f64 DPR:$b))), 2117 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>, 2118 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>; 2119def : Pat<(fsub_mlx (fneg SPR:$dstin), (fmul_su SPR:$a, SPR:$b)), 2120 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>, 2121 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>; 2122def : Pat<(fsub_mlx (fneg HPR:$dstin), (fmul_su (f16 HPR:$a), HPR:$b)), 2123 (VNMLAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>, 2124 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>; 2125 2126def VNMLSD : ADbI<0b11100, 0b01, 0, 0, 2127 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2128 IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm", 2129 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm), 2130 (f64 DPR:$Ddin)))]>, 2131 RegConstraint<"$Ddin = $Dd">, 2132 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>, 2133 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2134 2135def VNMLSS : ASbI<0b11100, 0b01, 0, 0, 2136 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2137 IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm", 2138 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>, 2139 RegConstraint<"$Sdin = $Sd">, 2140 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>, 2141 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2142 // Some single precision VFP instructions may be executed on both NEON and 2143 // VFP pipelines on A8. 2144 let D = VFPNeonA8Domain; 2145} 2146 2147def VNMLSH : AHbI<0b11100, 0b01, 0, 0, 2148 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2149 IIC_fpMAC16, "vnmls", ".f16\t$Sd, $Sn, $Sm", 2150 [(set (f16 HPR:$Sd), (fsub_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)), (f16 HPR:$Sdin)))]>, 2151 RegConstraint<"$Sdin = $Sd">, 2152 Requires<[HasFullFP16,UseFPVMLx]>; 2153 2154def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin), 2155 (VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>, 2156 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>; 2157def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin), 2158 (VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>, 2159 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>; 2160def : Pat<(fsub_mlx (fmul_su (f16 HPR:$a), HPR:$b), HPR:$dstin), 2161 (VNMLSH HPR:$dstin, (f16 HPR:$a), HPR:$b)>, 2162 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>; 2163 2164//===----------------------------------------------------------------------===// 2165// Fused FP Multiply-Accumulate Operations. 2166// 2167def VFMAD : ADbI<0b11101, 0b10, 0, 0, 2168 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2169 IIC_fpFMAC64, "vfma", ".f64\t$Dd, $Dn, $Dm", 2170 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm), 2171 (f64 DPR:$Ddin)))]>, 2172 RegConstraint<"$Ddin = $Dd">, 2173 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>, 2174 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2175 2176def VFMAS : ASbIn<0b11101, 0b10, 0, 0, 2177 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2178 IIC_fpFMAC32, "vfma", ".f32\t$Sd, $Sn, $Sm", 2179 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm), 2180 SPR:$Sdin))]>, 2181 RegConstraint<"$Sdin = $Sd">, 2182 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>, 2183 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2184 // Some single precision VFP instructions may be executed on both NEON and 2185 // VFP pipelines. 2186} 2187 2188def VFMAH : AHbI<0b11101, 0b10, 0, 0, 2189 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2190 IIC_fpFMAC16, "vfma", ".f16\t$Sd, $Sn, $Sm", 2191 [(set (f16 HPR:$Sd), (fadd_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)), 2192 (f16 HPR:$Sdin)))]>, 2193 RegConstraint<"$Sdin = $Sd">, 2194 Requires<[HasFullFP16,UseFusedMAC]>, 2195 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2196 2197def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))), 2198 (VFMAD DPR:$dstin, DPR:$a, DPR:$b)>, 2199 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 2200def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)), 2201 (VFMAS SPR:$dstin, SPR:$a, SPR:$b)>, 2202 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>; 2203def : Pat<(fadd_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)), 2204 (VFMAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>, 2205 Requires<[HasFullFP16,DontUseNEONForFP,UseFusedMAC]>; 2206 2207// Match @llvm.fma.* intrinsics 2208// (fma x, y, z) -> (vfms z, x, y) 2209def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, DPR:$Ddin)), 2210 (VFMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2211 Requires<[HasVFP4,HasDPVFP]>; 2212def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, SPR:$Sdin)), 2213 (VFMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2214 Requires<[HasVFP4]>; 2215def : Pat<(f16 (fma HPR:$Sn, HPR:$Sm, (f16 HPR:$Sdin))), 2216 (VFMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>, 2217 Requires<[HasFullFP16]>; 2218 2219def VFMSD : ADbI<0b11101, 0b10, 1, 0, 2220 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2221 IIC_fpFMAC64, "vfms", ".f64\t$Dd, $Dn, $Dm", 2222 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)), 2223 (f64 DPR:$Ddin)))]>, 2224 RegConstraint<"$Ddin = $Dd">, 2225 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>, 2226 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2227 2228def VFMSS : ASbIn<0b11101, 0b10, 1, 0, 2229 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2230 IIC_fpFMAC32, "vfms", ".f32\t$Sd, $Sn, $Sm", 2231 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)), 2232 SPR:$Sdin))]>, 2233 RegConstraint<"$Sdin = $Sd">, 2234 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>, 2235 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2236 // Some single precision VFP instructions may be executed on both NEON and 2237 // VFP pipelines. 2238} 2239 2240def VFMSH : AHbI<0b11101, 0b10, 1, 0, 2241 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2242 IIC_fpFMAC16, "vfms", ".f16\t$Sd, $Sn, $Sm", 2243 [(set (f16 HPR:$Sd), (fadd_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))), 2244 (f16 HPR:$Sdin)))]>, 2245 RegConstraint<"$Sdin = $Sd">, 2246 Requires<[HasFullFP16,UseFusedMAC]>, 2247 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2248 2249def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))), 2250 (VFMSD DPR:$dstin, DPR:$a, DPR:$b)>, 2251 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 2252def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)), 2253 (VFMSS SPR:$dstin, SPR:$a, SPR:$b)>, 2254 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>; 2255def : Pat<(fsub_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)), 2256 (VFMSH HPR:$dstin, (f16 HPR:$a), HPR:$b)>, 2257 Requires<[HasFullFP16,DontUseNEONForFP,UseFusedMAC]>; 2258 2259// Match @llvm.fma.* intrinsics 2260// (fma (fneg x), y, z) -> (vfms z, x, y) 2261def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin)), 2262 (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2263 Requires<[HasVFP4,HasDPVFP]>; 2264def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin)), 2265 (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2266 Requires<[HasVFP4]>; 2267def : Pat<(f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (f16 HPR:$Sdin))), 2268 (VFMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>, 2269 Requires<[HasFullFP16]>; 2270 2271def VFNMAD : ADbI<0b11101, 0b01, 1, 0, 2272 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2273 IIC_fpFMAC64, "vfnma", ".f64\t$Dd, $Dn, $Dm", 2274 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)), 2275 (f64 DPR:$Ddin)))]>, 2276 RegConstraint<"$Ddin = $Dd">, 2277 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>, 2278 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2279 2280def VFNMAS : ASbI<0b11101, 0b01, 1, 0, 2281 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2282 IIC_fpFMAC32, "vfnma", ".f32\t$Sd, $Sn, $Sm", 2283 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)), 2284 SPR:$Sdin))]>, 2285 RegConstraint<"$Sdin = $Sd">, 2286 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>, 2287 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2288 // Some single precision VFP instructions may be executed on both NEON and 2289 // VFP pipelines. 2290} 2291 2292def VFNMAH : AHbI<0b11101, 0b01, 1, 0, 2293 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2294 IIC_fpFMAC16, "vfnma", ".f16\t$Sd, $Sn, $Sm", 2295 [(set (f16 HPR:$Sd), (fsub_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))), 2296 (f16 HPR:$Sdin)))]>, 2297 RegConstraint<"$Sdin = $Sd">, 2298 Requires<[HasFullFP16,UseFusedMAC]>, 2299 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2300 2301def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin), 2302 (VFNMAD DPR:$dstin, DPR:$a, DPR:$b)>, 2303 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 2304def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin), 2305 (VFNMAS SPR:$dstin, SPR:$a, SPR:$b)>, 2306 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>; 2307 2308// Match @llvm.fma.* intrinsics 2309// (fneg (fma x, y, z)) -> (vfnma z, x, y) 2310def : Pat<(fneg (fma (f64 DPR:$Dn), (f64 DPR:$Dm), (f64 DPR:$Ddin))), 2311 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2312 Requires<[HasVFP4,HasDPVFP]>; 2313def : Pat<(fneg (fma (f32 SPR:$Sn), (f32 SPR:$Sm), (f32 SPR:$Sdin))), 2314 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2315 Requires<[HasVFP4]>; 2316def : Pat<(fneg (fma (f16 HPR:$Sn), (f16 HPR:$Sm), (f16 (f16 HPR:$Sdin)))), 2317 (VFNMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>, 2318 Requires<[HasFullFP16]>; 2319// (fma (fneg x), y, (fneg z)) -> (vfnma z, x, y) 2320def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, (fneg DPR:$Ddin))), 2321 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2322 Requires<[HasVFP4,HasDPVFP]>; 2323def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, (fneg SPR:$Sdin))), 2324 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2325 Requires<[HasVFP4]>; 2326def : Pat<(f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (fneg (f16 HPR:$Sdin)))), 2327 (VFNMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>, 2328 Requires<[HasFullFP16]>; 2329 2330def VFNMSD : ADbI<0b11101, 0b01, 0, 0, 2331 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2332 IIC_fpFMAC64, "vfnms", ".f64\t$Dd, $Dn, $Dm", 2333 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm), 2334 (f64 DPR:$Ddin)))]>, 2335 RegConstraint<"$Ddin = $Dd">, 2336 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>, 2337 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2338 2339def VFNMSS : ASbI<0b11101, 0b01, 0, 0, 2340 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2341 IIC_fpFMAC32, "vfnms", ".f32\t$Sd, $Sn, $Sm", 2342 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>, 2343 RegConstraint<"$Sdin = $Sd">, 2344 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>, 2345 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2346 // Some single precision VFP instructions may be executed on both NEON and 2347 // VFP pipelines. 2348} 2349 2350def VFNMSH : AHbI<0b11101, 0b01, 0, 0, 2351 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2352 IIC_fpFMAC16, "vfnms", ".f16\t$Sd, $Sn, $Sm", 2353 [(set (f16 HPR:$Sd), (fsub_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)), (f16 HPR:$Sdin)))]>, 2354 RegConstraint<"$Sdin = $Sd">, 2355 Requires<[HasFullFP16,UseFusedMAC]>, 2356 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2357 2358def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin), 2359 (VFNMSD DPR:$dstin, DPR:$a, DPR:$b)>, 2360 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 2361def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin), 2362 (VFNMSS SPR:$dstin, SPR:$a, SPR:$b)>, 2363 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>; 2364 2365// Match @llvm.fma.* intrinsics 2366 2367// (fma x, y, (fneg z)) -> (vfnms z, x, y)) 2368def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, (fneg DPR:$Ddin))), 2369 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2370 Requires<[HasVFP4,HasDPVFP]>; 2371def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, (fneg SPR:$Sdin))), 2372 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2373 Requires<[HasVFP4]>; 2374def : Pat<(f16 (fma (f16 HPR:$Sn), (f16 HPR:$Sm), (fneg (f16 HPR:$Sdin)))), 2375 (VFNMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>, 2376 Requires<[HasFullFP16]>; 2377// (fneg (fma (fneg x), y, z)) -> (vfnms z, x, y) 2378def : Pat<(fneg (f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin))), 2379 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2380 Requires<[HasVFP4,HasDPVFP]>; 2381def : Pat<(fneg (f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin))), 2382 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2383 Requires<[HasVFP4]>; 2384def : Pat<(fneg (f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (f16 HPR:$Sdin)))), 2385 (VFNMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>, 2386 Requires<[HasFullFP16]>; 2387 2388//===----------------------------------------------------------------------===// 2389// FP Conditional moves. 2390// 2391 2392let hasSideEffects = 0 in { 2393def VMOVDcc : PseudoInst<(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm, cmovpred:$p), 2394 IIC_fpUNA64, 2395 [(set (f64 DPR:$Dd), 2396 (ARMcmov DPR:$Dn, DPR:$Dm, cmovpred:$p))]>, 2397 RegConstraint<"$Dn = $Dd">, Requires<[HasFPRegs64]>; 2398 2399def VMOVScc : PseudoInst<(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm, cmovpred:$p), 2400 IIC_fpUNA32, 2401 [(set (f32 SPR:$Sd), 2402 (ARMcmov SPR:$Sn, SPR:$Sm, cmovpred:$p))]>, 2403 RegConstraint<"$Sn = $Sd">, Requires<[HasFPRegs]>; 2404 2405def VMOVHcc : PseudoInst<(outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm, cmovpred:$p), 2406 IIC_fpUNA16, 2407 [(set (f16 HPR:$Sd), 2408 (ARMcmov (f16 HPR:$Sn), (f16 HPR:$Sm), cmovpred:$p))]>, 2409 RegConstraint<"$Sd = $Sn">, Requires<[HasFPRegs]>; 2410} // hasSideEffects 2411 2412//===----------------------------------------------------------------------===// 2413// Move from VFP System Register to ARM core register. 2414// 2415 2416class MovFromVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm, 2417 list<dag> pattern>: 2418 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> { 2419 2420 // Instruction operand. 2421 bits<4> Rt; 2422 2423 let Inst{27-20} = 0b11101111; 2424 let Inst{19-16} = opc19_16; 2425 let Inst{15-12} = Rt; 2426 let Inst{11-8} = 0b1010; 2427 let Inst{7} = 0; 2428 let Inst{6-5} = 0b00; 2429 let Inst{4} = 1; 2430 let Inst{3-0} = 0b0000; 2431 let Unpredictable{7-5} = 0b111; 2432 let Unpredictable{3-0} = 0b1111; 2433} 2434 2435let DecoderMethod = "DecodeForVMRSandVMSR" in { 2436 // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags 2437 // to APSR. 2438 let Defs = [CPSR], Uses = [FPSCR_NZCV], Predicates = [HasFPRegs], 2439 Rt = 0b1111 /* apsr_nzcv */ in 2440 def FMSTAT : MovFromVFP<0b0001 /* fpscr */, (outs), (ins), 2441 "vmrs", "\tAPSR_nzcv, fpscr", [(arm_fmstat)]>; 2442 2443 // Application level FPSCR -> GPR 2444 let hasSideEffects = 1, Uses = [FPSCR], Predicates = [HasFPRegs] in 2445 def VMRS : MovFromVFP<0b0001 /* fpscr */, (outs GPRnopc:$Rt), (ins), 2446 "vmrs", "\t$Rt, fpscr", 2447 [(set GPRnopc:$Rt, (int_arm_get_fpscr))]>; 2448 2449 // System level FPEXC, FPSID -> GPR 2450 let Uses = [FPSCR] in { 2451 def VMRS_FPEXC : MovFromVFP<0b1000 /* fpexc */, (outs GPRnopc:$Rt), (ins), 2452 "vmrs", "\t$Rt, fpexc", []>; 2453 def VMRS_FPSID : MovFromVFP<0b0000 /* fpsid */, (outs GPRnopc:$Rt), (ins), 2454 "vmrs", "\t$Rt, fpsid", []>; 2455 def VMRS_MVFR0 : MovFromVFP<0b0111 /* mvfr0 */, (outs GPRnopc:$Rt), (ins), 2456 "vmrs", "\t$Rt, mvfr0", []>; 2457 def VMRS_MVFR1 : MovFromVFP<0b0110 /* mvfr1 */, (outs GPRnopc:$Rt), (ins), 2458 "vmrs", "\t$Rt, mvfr1", []>; 2459 let Predicates = [HasFPARMv8] in { 2460 def VMRS_MVFR2 : MovFromVFP<0b0101 /* mvfr2 */, (outs GPRnopc:$Rt), (ins), 2461 "vmrs", "\t$Rt, mvfr2", []>; 2462 } 2463 def VMRS_FPINST : MovFromVFP<0b1001 /* fpinst */, (outs GPRnopc:$Rt), (ins), 2464 "vmrs", "\t$Rt, fpinst", []>; 2465 def VMRS_FPINST2 : MovFromVFP<0b1010 /* fpinst2 */, (outs GPRnopc:$Rt), 2466 (ins), "vmrs", "\t$Rt, fpinst2", []>; 2467 let Predicates = [HasV8_1MMainline, HasFPRegs] in { 2468 // System level FPSCR_NZCVQC -> GPR 2469 def VMRS_FPSCR_NZCVQC 2470 : MovFromVFP<0b0010 /* fpscr_nzcvqc */, 2471 (outs GPR:$Rt), (ins cl_FPSCR_NZCV:$fpscr_in), 2472 "vmrs", "\t$Rt, fpscr_nzcvqc", []>; 2473 } 2474 } 2475 let Predicates = [HasV8_1MMainline, Has8MSecExt] in { 2476 // System level FPSCR -> GPR, with context saving for security extensions 2477 def VMRS_FPCXTNS : MovFromVFP<0b1110 /* fpcxtns */, (outs GPR:$Rt), (ins), 2478 "vmrs", "\t$Rt, fpcxtns", []>; 2479 } 2480 let Predicates = [HasV8_1MMainline, Has8MSecExt] in { 2481 // System level FPSCR -> GPR, with context saving for security extensions 2482 def VMRS_FPCXTS : MovFromVFP<0b1111 /* fpcxts */, (outs GPR:$Rt), (ins), 2483 "vmrs", "\t$Rt, fpcxts", []>; 2484 } 2485 2486 let Predicates = [HasV8_1MMainline, HasMVEInt] in { 2487 // System level VPR/P0 -> GPR 2488 let Uses = [VPR] in 2489 def VMRS_VPR : MovFromVFP<0b1100 /* vpr */, (outs GPR:$Rt), (ins), 2490 "vmrs", "\t$Rt, vpr", []>; 2491 2492 def VMRS_P0 : MovFromVFP<0b1101 /* p0 */, (outs GPR:$Rt), (ins VCCR:$cond), 2493 "vmrs", "\t$Rt, p0", []>; 2494 } 2495} 2496 2497//===----------------------------------------------------------------------===// 2498// Move from ARM core register to VFP System Register. 2499// 2500 2501class MovToVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm, 2502 list<dag> pattern>: 2503 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> { 2504 2505 // Instruction operand. 2506 bits<4> Rt; 2507 2508 let Inst{27-20} = 0b11101110; 2509 let Inst{19-16} = opc19_16; 2510 let Inst{15-12} = Rt; 2511 let Inst{11-8} = 0b1010; 2512 let Inst{7} = 0; 2513 let Inst{6-5} = 0b00; 2514 let Inst{4} = 1; 2515 let Inst{3-0} = 0b0000; 2516 let Predicates = [HasVFP2]; 2517 let Unpredictable{7-5} = 0b111; 2518 let Unpredictable{3-0} = 0b1111; 2519} 2520 2521let DecoderMethod = "DecodeForVMRSandVMSR" in { 2522 let Defs = [FPSCR] in { 2523 let Predicates = [HasFPRegs] in 2524 // Application level GPR -> FPSCR 2525 def VMSR : MovToVFP<0b0001 /* fpscr */, (outs), (ins GPRnopc:$Rt), 2526 "vmsr", "\tfpscr, $Rt", 2527 [(int_arm_set_fpscr GPRnopc:$Rt)]>; 2528 // System level GPR -> FPEXC 2529 def VMSR_FPEXC : MovToVFP<0b1000 /* fpexc */, (outs), (ins GPRnopc:$Rt), 2530 "vmsr", "\tfpexc, $Rt", []>; 2531 // System level GPR -> FPSID 2532 def VMSR_FPSID : MovToVFP<0b0000 /* fpsid */, (outs), (ins GPRnopc:$Rt), 2533 "vmsr", "\tfpsid, $Rt", []>; 2534 def VMSR_FPINST : MovToVFP<0b1001 /* fpinst */, (outs), (ins GPRnopc:$Rt), 2535 "vmsr", "\tfpinst, $Rt", []>; 2536 def VMSR_FPINST2 : MovToVFP<0b1010 /* fpinst2 */, (outs), (ins GPRnopc:$Rt), 2537 "vmsr", "\tfpinst2, $Rt", []>; 2538 } 2539 let Predicates = [HasV8_1MMainline, Has8MSecExt] in { 2540 // System level GPR -> FPSCR with context saving for security extensions 2541 def VMSR_FPCXTNS : MovToVFP<0b1110 /* fpcxtns */, (outs), (ins GPR:$Rt), 2542 "vmsr", "\tfpcxtns, $Rt", []>; 2543 } 2544 let Predicates = [HasV8_1MMainline, Has8MSecExt] in { 2545 // System level GPR -> FPSCR with context saving for security extensions 2546 def VMSR_FPCXTS : MovToVFP<0b1111 /* fpcxts */, (outs), (ins GPR:$Rt), 2547 "vmsr", "\tfpcxts, $Rt", []>; 2548 } 2549 let Predicates = [HasV8_1MMainline, HasFPRegs] in { 2550 // System level GPR -> FPSCR_NZCVQC 2551 def VMSR_FPSCR_NZCVQC 2552 : MovToVFP<0b0010 /* fpscr_nzcvqc */, 2553 (outs cl_FPSCR_NZCV:$fpscr_out), (ins GPR:$Rt), 2554 "vmsr", "\tfpscr_nzcvqc, $Rt", []>; 2555 } 2556 2557 let Predicates = [HasV8_1MMainline, HasMVEInt] in { 2558 // System level GPR -> VPR/P0 2559 let Defs = [VPR] in 2560 def VMSR_VPR : MovToVFP<0b1100 /* vpr */, (outs), (ins GPR:$Rt), 2561 "vmsr", "\tvpr, $Rt", []>; 2562 2563 def VMSR_P0 : MovToVFP<0b1101 /* p0 */, (outs VCCR:$cond), (ins GPR:$Rt), 2564 "vmsr", "\tp0, $Rt", []>; 2565 } 2566} 2567 2568//===----------------------------------------------------------------------===// 2569// Misc. 2570// 2571 2572// Materialize FP immediates. VFP3 only. 2573let isReMaterializable = 1 in { 2574def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm), 2575 VFPMiscFrm, IIC_fpUNA64, 2576 "vmov", ".f64\t$Dd, $imm", 2577 [(set DPR:$Dd, vfp_f64imm:$imm)]>, 2578 Requires<[HasVFP3,HasDPVFP]> { 2579 bits<5> Dd; 2580 bits<8> imm; 2581 2582 let Inst{27-23} = 0b11101; 2583 let Inst{22} = Dd{4}; 2584 let Inst{21-20} = 0b11; 2585 let Inst{19-16} = imm{7-4}; 2586 let Inst{15-12} = Dd{3-0}; 2587 let Inst{11-9} = 0b101; 2588 let Inst{8} = 1; // Double precision. 2589 let Inst{7-4} = 0b0000; 2590 let Inst{3-0} = imm{3-0}; 2591} 2592 2593def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm), 2594 VFPMiscFrm, IIC_fpUNA32, 2595 "vmov", ".f32\t$Sd, $imm", 2596 [(set SPR:$Sd, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> { 2597 bits<5> Sd; 2598 bits<8> imm; 2599 2600 let Inst{27-23} = 0b11101; 2601 let Inst{22} = Sd{0}; 2602 let Inst{21-20} = 0b11; 2603 let Inst{19-16} = imm{7-4}; 2604 let Inst{15-12} = Sd{4-1}; 2605 let Inst{11-9} = 0b101; 2606 let Inst{8} = 0; // Single precision. 2607 let Inst{7-4} = 0b0000; 2608 let Inst{3-0} = imm{3-0}; 2609} 2610 2611def FCONSTH : VFPAI<(outs HPR:$Sd), (ins vfp_f16imm:$imm), 2612 VFPMiscFrm, IIC_fpUNA16, 2613 "vmov", ".f16\t$Sd, $imm", 2614 [(set (f16 HPR:$Sd), vfp_f16imm:$imm)]>, 2615 Requires<[HasFullFP16]> { 2616 bits<5> Sd; 2617 bits<8> imm; 2618 2619 let Inst{27-23} = 0b11101; 2620 let Inst{22} = Sd{0}; 2621 let Inst{21-20} = 0b11; 2622 let Inst{19-16} = imm{7-4}; 2623 let Inst{15-12} = Sd{4-1}; 2624 let Inst{11-8} = 0b1001; // Half precision 2625 let Inst{7-4} = 0b0000; 2626 let Inst{3-0} = imm{3-0}; 2627 2628 let isUnpredicable = 1; 2629} 2630} 2631 2632def : Pat<(f32 (vfp_f32f16imm:$imm)), 2633 (f32 (COPY_TO_REGCLASS (f16 (FCONSTH (vfp_f32f16imm_xform (f32 $imm)))), SPR))> { 2634 let Predicates = [HasFullFP16]; 2635} 2636 2637//===----------------------------------------------------------------------===// 2638// Assembler aliases. 2639// 2640// A few mnemonic aliases for pre-unifixed syntax. We don't guarantee to 2641// support them all, but supporting at least some of the basics is 2642// good to be friendly. 2643def : VFP2MnemonicAlias<"flds", "vldr">; 2644def : VFP2MnemonicAlias<"fldd", "vldr">; 2645def : VFP2MnemonicAlias<"fmrs", "vmov">; 2646def : VFP2MnemonicAlias<"fmsr", "vmov">; 2647def : VFP2MnemonicAlias<"fsqrts", "vsqrt">; 2648def : VFP2MnemonicAlias<"fsqrtd", "vsqrt">; 2649def : VFP2MnemonicAlias<"fadds", "vadd.f32">; 2650def : VFP2MnemonicAlias<"faddd", "vadd.f64">; 2651def : VFP2MnemonicAlias<"fmrdd", "vmov">; 2652def : VFP2MnemonicAlias<"fmrds", "vmov">; 2653def : VFP2MnemonicAlias<"fmrrd", "vmov">; 2654def : VFP2MnemonicAlias<"fmdrr", "vmov">; 2655def : VFP2MnemonicAlias<"fmuls", "vmul.f32">; 2656def : VFP2MnemonicAlias<"fmuld", "vmul.f64">; 2657def : VFP2MnemonicAlias<"fnegs", "vneg.f32">; 2658def : VFP2MnemonicAlias<"fnegd", "vneg.f64">; 2659def : VFP2MnemonicAlias<"ftosizd", "vcvt.s32.f64">; 2660def : VFP2MnemonicAlias<"ftosid", "vcvtr.s32.f64">; 2661def : VFP2MnemonicAlias<"ftosizs", "vcvt.s32.f32">; 2662def : VFP2MnemonicAlias<"ftosis", "vcvtr.s32.f32">; 2663def : VFP2MnemonicAlias<"ftouizd", "vcvt.u32.f64">; 2664def : VFP2MnemonicAlias<"ftouid", "vcvtr.u32.f64">; 2665def : VFP2MnemonicAlias<"ftouizs", "vcvt.u32.f32">; 2666def : VFP2MnemonicAlias<"ftouis", "vcvtr.u32.f32">; 2667def : VFP2MnemonicAlias<"fsitod", "vcvt.f64.s32">; 2668def : VFP2MnemonicAlias<"fsitos", "vcvt.f32.s32">; 2669def : VFP2MnemonicAlias<"fuitod", "vcvt.f64.u32">; 2670def : VFP2MnemonicAlias<"fuitos", "vcvt.f32.u32">; 2671def : VFP2MnemonicAlias<"fsts", "vstr">; 2672def : VFP2MnemonicAlias<"fstd", "vstr">; 2673def : VFP2MnemonicAlias<"fmacd", "vmla.f64">; 2674def : VFP2MnemonicAlias<"fmacs", "vmla.f32">; 2675def : VFP2MnemonicAlias<"fcpys", "vmov.f32">; 2676def : VFP2MnemonicAlias<"fcpyd", "vmov.f64">; 2677def : VFP2MnemonicAlias<"fcmps", "vcmp.f32">; 2678def : VFP2MnemonicAlias<"fcmpd", "vcmp.f64">; 2679def : VFP2MnemonicAlias<"fdivs", "vdiv.f32">; 2680def : VFP2MnemonicAlias<"fdivd", "vdiv.f64">; 2681def : VFP2MnemonicAlias<"fmrx", "vmrs">; 2682def : VFP2MnemonicAlias<"fmxr", "vmsr">; 2683 2684// Be friendly and accept the old form of zero-compare 2685def : VFP2DPInstAlias<"fcmpzd${p} $val", (VCMPZD DPR:$val, pred:$p)>; 2686def : VFP2InstAlias<"fcmpzs${p} $val", (VCMPZS SPR:$val, pred:$p)>; 2687 2688 2689def : InstAlias<"fmstat${p}", (FMSTAT pred:$p), 0>, Requires<[HasFPRegs]>; 2690def : VFP2InstAlias<"fadds${p} $Sd, $Sn, $Sm", 2691 (VADDS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>; 2692def : VFP2DPInstAlias<"faddd${p} $Dd, $Dn, $Dm", 2693 (VADDD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>; 2694def : VFP2InstAlias<"fsubs${p} $Sd, $Sn, $Sm", 2695 (VSUBS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>; 2696def : VFP2DPInstAlias<"fsubd${p} $Dd, $Dn, $Dm", 2697 (VSUBD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>; 2698 2699// No need for the size suffix on VSQRT. It's implied by the register classes. 2700def : VFP2InstAlias<"vsqrt${p} $Sd, $Sm", (VSQRTS SPR:$Sd, SPR:$Sm, pred:$p)>; 2701def : VFP2DPInstAlias<"vsqrt${p} $Dd, $Dm", (VSQRTD DPR:$Dd, DPR:$Dm, pred:$p)>; 2702 2703// VLDR/VSTR accept an optional type suffix. 2704def : VFP2InstAlias<"vldr${p}.32 $Sd, $addr", 2705 (VLDRS SPR:$Sd, addrmode5:$addr, pred:$p)>; 2706def : VFP2InstAlias<"vstr${p}.32 $Sd, $addr", 2707 (VSTRS SPR:$Sd, addrmode5:$addr, pred:$p)>; 2708def : VFP2InstAlias<"vldr${p}.64 $Dd, $addr", 2709 (VLDRD DPR:$Dd, addrmode5:$addr, pred:$p)>; 2710def : VFP2InstAlias<"vstr${p}.64 $Dd, $addr", 2711 (VSTRD DPR:$Dd, addrmode5:$addr, pred:$p)>; 2712 2713// VMOV can accept optional 32-bit or less data type suffix suffix. 2714def : VFP2InstAlias<"vmov${p}.8 $Rt, $Sn", 2715 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>; 2716def : VFP2InstAlias<"vmov${p}.16 $Rt, $Sn", 2717 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>; 2718def : VFP2InstAlias<"vmov${p}.32 $Rt, $Sn", 2719 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>; 2720def : VFP2InstAlias<"vmov${p}.8 $Sn, $Rt", 2721 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>; 2722def : VFP2InstAlias<"vmov${p}.16 $Sn, $Rt", 2723 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>; 2724def : VFP2InstAlias<"vmov${p}.32 $Sn, $Rt", 2725 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>; 2726 2727def : VFP2InstAlias<"vmov${p}.f64 $Rt, $Rt2, $Dn", 2728 (VMOVRRD GPR:$Rt, GPR:$Rt2, DPR:$Dn, pred:$p)>; 2729def : VFP2InstAlias<"vmov${p}.f64 $Dn, $Rt, $Rt2", 2730 (VMOVDRR DPR:$Dn, GPR:$Rt, GPR:$Rt2, pred:$p)>; 2731 2732// VMOVS doesn't need the .f32 to disambiguate from the NEON encoding the way 2733// VMOVD does. 2734def : VFP2InstAlias<"vmov${p} $Sd, $Sm", 2735 (VMOVS SPR:$Sd, SPR:$Sm, pred:$p)>; 2736 2737// FCONSTD/FCONSTS alias for vmov.f64/vmov.f32 2738// These aliases provide added functionality over vmov.f instructions by 2739// allowing users to write assembly containing encoded floating point constants 2740// (e.g. #0x70 vs #1.0). Without these alises there is no way for the 2741// assembler to accept encoded fp constants (but the equivalent fp-literal is 2742// accepted directly by vmovf). 2743def : VFP3InstAlias<"fconstd${p} $Dd, $val", 2744 (FCONSTD DPR:$Dd, vfp_f64imm:$val, pred:$p)>; 2745def : VFP3InstAlias<"fconsts${p} $Sd, $val", 2746 (FCONSTS SPR:$Sd, vfp_f32imm:$val, pred:$p)>; 2747 2748def VSCCLRMD : VFPXI<(outs), (ins pred:$p, fp_dreglist_with_vpr:$regs, variable_ops), 2749 AddrModeNone, 4, IndexModeNone, VFPMiscFrm, NoItinerary, 2750 "vscclrm{$p}\t$regs", "", []>, Sched<[]> { 2751 bits<13> regs; 2752 let Inst{31-23} = 0b111011001; 2753 let Inst{22} = regs{12}; 2754 let Inst{21-16} = 0b011111; 2755 let Inst{15-12} = regs{11-8}; 2756 let Inst{11-8} = 0b1011; 2757 let Inst{7-1} = regs{7-1}; 2758 let Inst{0} = 0; 2759 2760 let DecoderMethod = "DecodeVSCCLRM"; 2761 2762 list<Predicate> Predicates = [HasV8_1MMainline, Has8MSecExt]; 2763} 2764 2765def VSCCLRMS : VFPXI<(outs), (ins pred:$p, fp_sreglist_with_vpr:$regs, variable_ops), 2766 AddrModeNone, 4, IndexModeNone, VFPMiscFrm, NoItinerary, 2767 "vscclrm{$p}\t$regs", "", []>, Sched<[]> { 2768 bits<13> regs; 2769 let Inst{31-23} = 0b111011001; 2770 let Inst{22} = regs{8}; 2771 let Inst{21-16} = 0b011111; 2772 let Inst{15-12} = regs{12-9}; 2773 let Inst{11-8} = 0b1010; 2774 let Inst{7-0} = regs{7-0}; 2775 2776 let DecoderMethod = "DecodeVSCCLRM"; 2777 2778 list<Predicate> Predicates = [HasV8_1MMainline, Has8MSecExt]; 2779} 2780 2781//===----------------------------------------------------------------------===// 2782// Store VFP System Register to memory. 2783// 2784 2785class vfp_vstrldr<bit opc, bit P, bit W, bits<4> SysReg, string sysreg, 2786 dag oops, dag iops, IndexMode im, string Dest, string cstr> 2787 : VFPI<oops, iops, AddrModeT2_i7s4, 4, im, VFPLdStFrm, IIC_fpSTAT, 2788 !if(opc,"vldr","vstr"), !strconcat("\t", sysreg, ", ", Dest), cstr, []>, 2789 Sched<[]> { 2790 bits<12> addr; 2791 let Inst{27-25} = 0b110; 2792 let Inst{24} = P; 2793 let Inst{23} = addr{7}; 2794 let Inst{22} = SysReg{3}; 2795 let Inst{21} = W; 2796 let Inst{20} = opc; 2797 let Inst{19-16} = addr{11-8}; 2798 let Inst{15-13} = SysReg{2-0}; 2799 let Inst{12-7} = 0b011111; 2800 let Inst{6-0} = addr{6-0}; 2801 list<Predicate> Predicates = [HasFPRegs, HasV8_1MMainline]; 2802 let mayLoad = opc; 2803 let mayStore = !if(opc, 0b0, 0b1); 2804 let hasSideEffects = 1; 2805} 2806 2807multiclass vfp_vstrldr_sysreg<bit opc, bits<4> SysReg, string sysreg, 2808 dag oops=(outs), dag iops=(ins)> { 2809 def _off : 2810 vfp_vstrldr<opc, 1, 0, SysReg, sysreg, 2811 oops, !con(iops, (ins t2addrmode_imm7s4:$addr)), 2812 IndexModePost, "$addr", "" > { 2813 let DecoderMethod = "DecodeVSTRVLDR_SYSREG<false>"; 2814 } 2815 2816 def _pre : 2817 vfp_vstrldr<opc, 1, 1, SysReg, sysreg, 2818 !con(oops, (outs GPRnopc:$wb)), 2819 !con(iops, (ins t2addrmode_imm7s4_pre:$addr)), 2820 IndexModePre, "$addr!", "$addr.base = $wb"> { 2821 let DecoderMethod = "DecodeVSTRVLDR_SYSREG<true>"; 2822 } 2823 2824 def _post : 2825 vfp_vstrldr<opc, 0, 1, SysReg, sysreg, 2826 !con(oops, (outs GPRnopc:$wb)), 2827 !con(iops, (ins t2_addr_offset_none:$Rn, 2828 t2am_imm7s4_offset:$addr)), 2829 IndexModePost, "$Rn$addr", "$Rn.base = $wb"> { 2830 bits<4> Rn; 2831 let Inst{19-16} = Rn{3-0}; 2832 let DecoderMethod = "DecodeVSTRVLDR_SYSREG<true>"; 2833 } 2834} 2835 2836let Defs = [FPSCR] in { 2837 defm VSTR_FPSCR : vfp_vstrldr_sysreg<0b0,0b0001, "fpscr">; 2838 defm VSTR_FPSCR_NZCVQC : vfp_vstrldr_sysreg<0b0,0b0010, "fpscr_nzcvqc">; 2839 2840 let Predicates = [HasV8_1MMainline, Has8MSecExt] in { 2841 defm VSTR_FPCXTNS : vfp_vstrldr_sysreg<0b0,0b1110, "fpcxtns">; 2842 defm VSTR_FPCXTS : vfp_vstrldr_sysreg<0b0,0b1111, "fpcxts">; 2843 } 2844} 2845 2846let Predicates = [HasV8_1MMainline, HasMVEInt] in { 2847 let Uses = [VPR] in { 2848 defm VSTR_VPR : vfp_vstrldr_sysreg<0b0,0b1100, "vpr">; 2849 } 2850 defm VSTR_P0 : vfp_vstrldr_sysreg<0b0,0b1101, "p0", 2851 (outs), (ins VCCR:$P0)>; 2852 2853 let Defs = [VPR] in { 2854 defm VLDR_VPR : vfp_vstrldr_sysreg<0b1,0b1100, "vpr">; 2855 } 2856 defm VLDR_P0 : vfp_vstrldr_sysreg<0b1,0b1101, "p0", 2857 (outs VCCR:$P0), (ins)>; 2858} 2859 2860let Uses = [FPSCR] in { 2861 defm VLDR_FPSCR : vfp_vstrldr_sysreg<0b1,0b0001, "fpscr">; 2862 defm VLDR_FPSCR_NZCVQC : vfp_vstrldr_sysreg<0b1,0b0010, "fpscr_nzcvqc">; 2863 2864 let Predicates = [HasV8_1MMainline, Has8MSecExt] in { 2865 defm VLDR_FPCXTNS : vfp_vstrldr_sysreg<0b1,0b1110, "fpcxtns">; 2866 defm VLDR_FPCXTS : vfp_vstrldr_sysreg<0b1,0b1111, "fpcxts">; 2867 } 2868} 2869