1//===-- ARMInstrVFP.td - VFP support for ARM ---------------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the ARM VFP instruction set. 10// 11//===----------------------------------------------------------------------===// 12 13def SDT_CMPFP0 : SDTypeProfile<0, 1, [SDTCisFP<0>]>; 14def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>, 15 SDTCisSameAs<1, 2>]>; 16def SDT_VMOVRRD : SDTypeProfile<2, 1, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>, 17 SDTCisVT<2, f64>]>; 18 19def SDT_VMOVSR : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisVT<1, i32>]>; 20 21def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInGlue, SDNPOutGlue]>; 22def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutGlue]>; 23def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutGlue]>; 24def arm_cmpfpe : SDNode<"ARMISD::CMPFPE", SDT_ARMCmp, [SDNPOutGlue]>; 25def arm_cmpfpe0: SDNode<"ARMISD::CMPFPEw0",SDT_CMPFP0, [SDNPOutGlue]>; 26def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>; 27def arm_fmrrd : SDNode<"ARMISD::VMOVRRD", SDT_VMOVRRD>; 28def arm_vmovsr : SDNode<"ARMISD::VMOVSR", SDT_VMOVSR>; 29 30def SDT_VMOVhr : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, i32>] >; 31def SDT_VMOVrh : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisFP<1>] >; 32def arm_vmovhr : SDNode<"ARMISD::VMOVhr", SDT_VMOVhr>; 33def arm_vmovrh : SDNode<"ARMISD::VMOVrh", SDT_VMOVrh>; 34 35//===----------------------------------------------------------------------===// 36// Operand Definitions. 37// 38 39// 8-bit floating-point immediate encodings. 40def FPImmOperand : AsmOperandClass { 41 let Name = "FPImm"; 42 let ParserMethod = "parseFPImm"; 43} 44 45def vfp_f16imm : Operand<f16>, 46 PatLeaf<(f16 fpimm), [{ 47 return ARM_AM::getFP16Imm(N->getValueAPF()) != -1; 48 }], SDNodeXForm<fpimm, [{ 49 APFloat InVal = N->getValueAPF(); 50 uint32_t enc = ARM_AM::getFP16Imm(InVal); 51 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32); 52 }]>> { 53 let PrintMethod = "printFPImmOperand"; 54 let ParserMatchClass = FPImmOperand; 55} 56 57def vfp_f32f16imm_xform : SDNodeXForm<fpimm, [{ 58 APFloat InVal = N->getValueAPF(); 59 uint32_t enc = ARM_AM::getFP32FP16Imm(InVal); 60 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32); 61 }]>; 62 63def vfp_f32f16imm : PatLeaf<(f32 fpimm), [{ 64 return ARM_AM::getFP32FP16Imm(N->getValueAPF()) != -1; 65 }], vfp_f32f16imm_xform>; 66 67def vfp_f32imm_xform : SDNodeXForm<fpimm, [{ 68 APFloat InVal = N->getValueAPF(); 69 uint32_t enc = ARM_AM::getFP32Imm(InVal); 70 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32); 71 }]>; 72 73def gi_vfp_f32imm : GICustomOperandRenderer<"renderVFPF32Imm">, 74 GISDNodeXFormEquiv<vfp_f32imm_xform>; 75 76def vfp_f32imm : Operand<f32>, 77 PatLeaf<(f32 fpimm), [{ 78 return ARM_AM::getFP32Imm(N->getValueAPF()) != -1; 79 }], vfp_f32imm_xform> { 80 let PrintMethod = "printFPImmOperand"; 81 let ParserMatchClass = FPImmOperand; 82 let GISelPredicateCode = [{ 83 const auto &MO = MI.getOperand(1); 84 if (!MO.isFPImm()) 85 return false; 86 return ARM_AM::getFP32Imm(MO.getFPImm()->getValueAPF()) != -1; 87 }]; 88} 89 90def vfp_f64imm_xform : SDNodeXForm<fpimm, [{ 91 APFloat InVal = N->getValueAPF(); 92 uint32_t enc = ARM_AM::getFP64Imm(InVal); 93 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32); 94 }]>; 95 96def gi_vfp_f64imm : GICustomOperandRenderer<"renderVFPF64Imm">, 97 GISDNodeXFormEquiv<vfp_f64imm_xform>; 98 99def vfp_f64imm : Operand<f64>, 100 PatLeaf<(f64 fpimm), [{ 101 return ARM_AM::getFP64Imm(N->getValueAPF()) != -1; 102 }], vfp_f64imm_xform> { 103 let PrintMethod = "printFPImmOperand"; 104 let ParserMatchClass = FPImmOperand; 105 let GISelPredicateCode = [{ 106 const auto &MO = MI.getOperand(1); 107 if (!MO.isFPImm()) 108 return false; 109 return ARM_AM::getFP64Imm(MO.getFPImm()->getValueAPF()) != -1; 110 }]; 111} 112 113def alignedload16 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 114 return cast<LoadSDNode>(N)->getAlignment() >= 2; 115}]>; 116 117def alignedload32 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 118 return cast<LoadSDNode>(N)->getAlignment() >= 4; 119}]>; 120 121def alignedstore16 : PatFrag<(ops node:$val, node:$ptr), 122 (store node:$val, node:$ptr), [{ 123 return cast<StoreSDNode>(N)->getAlignment() >= 2; 124}]>; 125 126def alignedstore32 : PatFrag<(ops node:$val, node:$ptr), 127 (store node:$val, node:$ptr), [{ 128 return cast<StoreSDNode>(N)->getAlignment() >= 4; 129}]>; 130 131// The VCVT to/from fixed-point instructions encode the 'fbits' operand 132// (the number of fixed bits) differently than it appears in the assembly 133// source. It's encoded as "Size - fbits" where Size is the size of the 134// fixed-point representation (32 or 16) and fbits is the value appearing 135// in the assembly source, an integer in [0,16] or (0,32], depending on size. 136def fbits32_asm_operand : AsmOperandClass { let Name = "FBits32"; } 137def fbits32 : Operand<i32> { 138 let PrintMethod = "printFBits32"; 139 let ParserMatchClass = fbits32_asm_operand; 140} 141 142def fbits16_asm_operand : AsmOperandClass { let Name = "FBits16"; } 143def fbits16 : Operand<i32> { 144 let PrintMethod = "printFBits16"; 145 let ParserMatchClass = fbits16_asm_operand; 146} 147 148//===----------------------------------------------------------------------===// 149// Load / store Instructions. 150// 151 152let canFoldAsLoad = 1, isReMaterializable = 1 in { 153 154def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr), 155 IIC_fpLoad64, "vldr", "\t$Dd, $addr", 156 [(set DPR:$Dd, (f64 (alignedload32 addrmode5:$addr)))]>, 157 Requires<[HasFPRegs]>; 158 159def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr), 160 IIC_fpLoad32, "vldr", "\t$Sd, $addr", 161 [(set SPR:$Sd, (alignedload32 addrmode5:$addr))]>, 162 Requires<[HasFPRegs]> { 163 // Some single precision VFP instructions may be executed on both NEON and VFP 164 // pipelines. 165 let D = VFPNeonDomain; 166} 167 168let isUnpredicable = 1 in 169def VLDRH : AHI5<0b1101, 0b01, (outs HPR:$Sd), (ins addrmode5fp16:$addr), 170 IIC_fpLoad16, "vldr", ".16\t$Sd, $addr", 171 [(set HPR:$Sd, (f16 (alignedload16 addrmode5fp16:$addr)))]>, 172 Requires<[HasFPRegs16]>; 173 174} // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in' 175 176def : Pat<(bf16 (alignedload16 addrmode5fp16:$addr)), 177 (VLDRH addrmode5fp16:$addr)> { 178 let Predicates = [HasFPRegs16]; 179} 180def : Pat<(bf16 (alignedload16 addrmode3:$addr)), 181 (COPY_TO_REGCLASS (LDRH addrmode3:$addr), HPR)> { 182 let Predicates = [HasNoFPRegs16, IsARM]; 183} 184def : Pat<(bf16 (alignedload16 t2addrmode_imm12:$addr)), 185 (COPY_TO_REGCLASS (t2LDRHi12 t2addrmode_imm12:$addr), HPR)> { 186 let Predicates = [HasNoFPRegs16, IsThumb]; 187} 188 189def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr), 190 IIC_fpStore64, "vstr", "\t$Dd, $addr", 191 [(alignedstore32 (f64 DPR:$Dd), addrmode5:$addr)]>, 192 Requires<[HasFPRegs]>; 193 194def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr), 195 IIC_fpStore32, "vstr", "\t$Sd, $addr", 196 [(alignedstore32 SPR:$Sd, addrmode5:$addr)]>, 197 Requires<[HasFPRegs]> { 198 // Some single precision VFP instructions may be executed on both NEON and VFP 199 // pipelines. 200 let D = VFPNeonDomain; 201} 202 203let isUnpredicable = 1 in 204def VSTRH : AHI5<0b1101, 0b00, (outs), (ins HPR:$Sd, addrmode5fp16:$addr), 205 IIC_fpStore16, "vstr", ".16\t$Sd, $addr", 206 [(alignedstore16 (f16 HPR:$Sd), addrmode5fp16:$addr)]>, 207 Requires<[HasFPRegs16]>; 208 209def : Pat<(alignedstore16 (bf16 HPR:$Sd), addrmode5fp16:$addr), 210 (VSTRH (bf16 HPR:$Sd), addrmode5fp16:$addr)> { 211 let Predicates = [HasFPRegs16]; 212} 213def : Pat<(alignedstore16 (bf16 HPR:$Sd), addrmode3:$addr), 214 (STRH (COPY_TO_REGCLASS $Sd, GPR), addrmode3:$addr)> { 215 let Predicates = [HasNoFPRegs16, IsARM]; 216} 217def : Pat<(alignedstore16 (bf16 HPR:$Sd), t2addrmode_imm12:$addr), 218 (t2STRHi12 (COPY_TO_REGCLASS $Sd, GPR), t2addrmode_imm12:$addr)> { 219 let Predicates = [HasNoFPRegs16, IsThumb]; 220} 221 222//===----------------------------------------------------------------------===// 223// Load / store multiple Instructions. 224// 225 226multiclass vfp_ldst_mult<string asm, bit L_bit, 227 InstrItinClass itin, InstrItinClass itin_upd> { 228 let Predicates = [HasFPRegs] in { 229 // Double Precision 230 def DIA : 231 AXDI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops), 232 IndexModeNone, itin, 233 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> { 234 let Inst{24-23} = 0b01; // Increment After 235 let Inst{21} = 0; // No writeback 236 let Inst{20} = L_bit; 237 } 238 def DIA_UPD : 239 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, 240 variable_ops), 241 IndexModeUpd, itin_upd, 242 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 243 let Inst{24-23} = 0b01; // Increment After 244 let Inst{21} = 1; // Writeback 245 let Inst{20} = L_bit; 246 } 247 def DDB_UPD : 248 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, 249 variable_ops), 250 IndexModeUpd, itin_upd, 251 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 252 let Inst{24-23} = 0b10; // Decrement Before 253 let Inst{21} = 1; // Writeback 254 let Inst{20} = L_bit; 255 } 256 257 // Single Precision 258 def SIA : 259 AXSI4<(outs), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops), 260 IndexModeNone, itin, 261 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> { 262 let Inst{24-23} = 0b01; // Increment After 263 let Inst{21} = 0; // No writeback 264 let Inst{20} = L_bit; 265 266 // Some single precision VFP instructions may be executed on both NEON and 267 // VFP pipelines. 268 let D = VFPNeonDomain; 269 } 270 def SIA_UPD : 271 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, 272 variable_ops), 273 IndexModeUpd, itin_upd, 274 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 275 let Inst{24-23} = 0b01; // Increment After 276 let Inst{21} = 1; // Writeback 277 let Inst{20} = L_bit; 278 279 // Some single precision VFP instructions may be executed on both NEON and 280 // VFP pipelines. 281 let D = VFPNeonDomain; 282 } 283 def SDB_UPD : 284 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, 285 variable_ops), 286 IndexModeUpd, itin_upd, 287 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 288 let Inst{24-23} = 0b10; // Decrement Before 289 let Inst{21} = 1; // Writeback 290 let Inst{20} = L_bit; 291 292 // Some single precision VFP instructions may be executed on both NEON and 293 // VFP pipelines. 294 let D = VFPNeonDomain; 295 } 296 } 297} 298 299let hasSideEffects = 0 in { 300 301let mayLoad = 1, hasExtraDefRegAllocReq = 1 in 302defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>; 303 304let mayStore = 1, hasExtraSrcRegAllocReq = 1 in 305defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpStore_m, IIC_fpStore_mu>; 306 307} // hasSideEffects 308 309def : MnemonicAlias<"vldm", "vldmia">; 310def : MnemonicAlias<"vstm", "vstmia">; 311 312 313//===----------------------------------------------------------------------===// 314// Lazy load / store multiple Instructions 315// 316def VLLDM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone, 317 NoItinerary, "vlldm${p}\t$Rn", "", []>, 318 Requires<[HasV8MMainline, Has8MSecExt]> { 319 let Inst{24-23} = 0b00; 320 let Inst{22} = 0; 321 let Inst{21} = 1; 322 let Inst{20} = 1; 323 let Inst{15-12} = 0; 324 let Inst{7-0} = 0; 325 let mayLoad = 1; 326 let Defs = [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, VPR, FPSCR, FPSCR_NZCV]; 327} 328 329def VLSTM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone, 330 NoItinerary, "vlstm${p}\t$Rn", "", []>, 331 Requires<[HasV8MMainline, Has8MSecExt]> { 332 let Inst{24-23} = 0b00; 333 let Inst{22} = 0; 334 let Inst{21} = 1; 335 let Inst{20} = 0; 336 let Inst{15-12} = 0; 337 let Inst{7-0} = 0; 338 let mayStore = 1; 339} 340 341def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r), 0>, 342 Requires<[HasFPRegs]>; 343def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r), 0>, 344 Requires<[HasFPRegs]>; 345def : InstAlias<"vpop${p} $r", (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r), 0>, 346 Requires<[HasFPRegs]>; 347def : InstAlias<"vpop${p} $r", (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r), 0>, 348 Requires<[HasFPRegs]>; 349defm : VFPDTAnyInstAlias<"vpush${p}", "$r", 350 (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>; 351defm : VFPDTAnyInstAlias<"vpush${p}", "$r", 352 (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>; 353defm : VFPDTAnyInstAlias<"vpop${p}", "$r", 354 (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>; 355defm : VFPDTAnyInstAlias<"vpop${p}", "$r", 356 (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>; 357 358// FLDMX, FSTMX - Load and store multiple unknown precision registers for 359// pre-armv6 cores. 360// These instruction are deprecated so we don't want them to get selected. 361// However, there is no UAL syntax for them, so we keep them around for 362// (dis)assembly only. 363multiclass vfp_ldstx_mult<string asm, bit L_bit> { 364 let Predicates = [HasFPRegs], hasNoSchedulingInfo = 1 in { 365 // Unknown precision 366 def XIA : 367 AXXI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops), 368 IndexModeNone, !strconcat(asm, "iax${p}\t$Rn, $regs"), "", []> { 369 let Inst{24-23} = 0b01; // Increment After 370 let Inst{21} = 0; // No writeback 371 let Inst{20} = L_bit; 372 } 373 def XIA_UPD : 374 AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops), 375 IndexModeUpd, !strconcat(asm, "iax${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 376 let Inst{24-23} = 0b01; // Increment After 377 let Inst{21} = 1; // Writeback 378 let Inst{20} = L_bit; 379 } 380 def XDB_UPD : 381 AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops), 382 IndexModeUpd, !strconcat(asm, "dbx${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 383 let Inst{24-23} = 0b10; // Decrement Before 384 let Inst{21} = 1; // Writeback 385 let Inst{20} = L_bit; 386 } 387 } 388} 389 390defm FLDM : vfp_ldstx_mult<"fldm", 1>; 391defm FSTM : vfp_ldstx_mult<"fstm", 0>; 392 393def : VFP2MnemonicAlias<"fldmeax", "fldmdbx">; 394def : VFP2MnemonicAlias<"fldmfdx", "fldmiax">; 395 396def : VFP2MnemonicAlias<"fstmeax", "fstmiax">; 397def : VFP2MnemonicAlias<"fstmfdx", "fstmdbx">; 398 399//===----------------------------------------------------------------------===// 400// FP Binary Operations. 401// 402 403let TwoOperandAliasConstraint = "$Dn = $Dd" in 404def VADDD : ADbI<0b11100, 0b11, 0, 0, 405 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 406 IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm", 407 [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>, 408 Sched<[WriteFPALU64]>; 409 410let TwoOperandAliasConstraint = "$Sn = $Sd" in 411def VADDS : ASbIn<0b11100, 0b11, 0, 0, 412 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 413 IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm", 414 [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]>, 415 Sched<[WriteFPALU32]> { 416 // Some single precision VFP instructions may be executed on both NEON and 417 // VFP pipelines on A8. 418 let D = VFPNeonA8Domain; 419} 420 421let TwoOperandAliasConstraint = "$Sn = $Sd" in 422def VADDH : AHbI<0b11100, 0b11, 0, 0, 423 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 424 IIC_fpALU16, "vadd", ".f16\t$Sd, $Sn, $Sm", 425 [(set (f16 HPR:$Sd), (fadd (f16 HPR:$Sn), (f16 HPR:$Sm)))]>, 426 Sched<[WriteFPALU32]>; 427 428let TwoOperandAliasConstraint = "$Dn = $Dd" in 429def VSUBD : ADbI<0b11100, 0b11, 1, 0, 430 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 431 IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm", 432 [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>, 433 Sched<[WriteFPALU64]>; 434 435let TwoOperandAliasConstraint = "$Sn = $Sd" in 436def VSUBS : ASbIn<0b11100, 0b11, 1, 0, 437 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 438 IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm", 439 [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]>, 440 Sched<[WriteFPALU32]>{ 441 // Some single precision VFP instructions may be executed on both NEON and 442 // VFP pipelines on A8. 443 let D = VFPNeonA8Domain; 444} 445 446let TwoOperandAliasConstraint = "$Sn = $Sd" in 447def VSUBH : AHbI<0b11100, 0b11, 1, 0, 448 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 449 IIC_fpALU16, "vsub", ".f16\t$Sd, $Sn, $Sm", 450 [(set (f16 HPR:$Sd), (fsub (f16 HPR:$Sn), (f16 HPR:$Sm)))]>, 451 Sched<[WriteFPALU32]>; 452 453let TwoOperandAliasConstraint = "$Dn = $Dd" in 454def VDIVD : ADbI<0b11101, 0b00, 0, 0, 455 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 456 IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm", 457 [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>, 458 Sched<[WriteFPDIV64]>; 459 460let TwoOperandAliasConstraint = "$Sn = $Sd" in 461def VDIVS : ASbI<0b11101, 0b00, 0, 0, 462 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 463 IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm", 464 [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>, 465 Sched<[WriteFPDIV32]>; 466 467let TwoOperandAliasConstraint = "$Sn = $Sd" in 468def VDIVH : AHbI<0b11101, 0b00, 0, 0, 469 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 470 IIC_fpDIV16, "vdiv", ".f16\t$Sd, $Sn, $Sm", 471 [(set (f16 HPR:$Sd), (fdiv (f16 HPR:$Sn), (f16 HPR:$Sm)))]>, 472 Sched<[WriteFPDIV32]>; 473 474let TwoOperandAliasConstraint = "$Dn = $Dd" in 475def VMULD : ADbI<0b11100, 0b10, 0, 0, 476 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 477 IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm", 478 [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>, 479 Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>; 480 481let TwoOperandAliasConstraint = "$Sn = $Sd" in 482def VMULS : ASbIn<0b11100, 0b10, 0, 0, 483 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 484 IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm", 485 [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]>, 486 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> { 487 // Some single precision VFP instructions may be executed on both NEON and 488 // VFP pipelines on A8. 489 let D = VFPNeonA8Domain; 490} 491 492let TwoOperandAliasConstraint = "$Sn = $Sd" in 493def VMULH : AHbI<0b11100, 0b10, 0, 0, 494 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 495 IIC_fpMUL16, "vmul", ".f16\t$Sd, $Sn, $Sm", 496 [(set (f16 HPR:$Sd), (fmul (f16 HPR:$Sn), (f16 HPR:$Sm)))]>, 497 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>; 498 499def VNMULD : ADbI<0b11100, 0b10, 1, 0, 500 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 501 IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm", 502 [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>, 503 Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>; 504 505def VNMULS : ASbI<0b11100, 0b10, 1, 0, 506 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 507 IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm", 508 [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]>, 509 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> { 510 // Some single precision VFP instructions may be executed on both NEON and 511 // VFP pipelines on A8. 512 let D = VFPNeonA8Domain; 513} 514 515def VNMULH : AHbI<0b11100, 0b10, 1, 0, 516 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 517 IIC_fpMUL16, "vnmul", ".f16\t$Sd, $Sn, $Sm", 518 [(set (f16 HPR:$Sd), (fneg (fmul (f16 HPR:$Sn), (f16 HPR:$Sm))))]>, 519 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>; 520 521multiclass vsel_inst<string op, bits<2> opc, int CC> { 522 let DecoderNamespace = "VFPV8", PostEncoderMethod = "", 523 Uses = [CPSR], AddedComplexity = 4, isUnpredicable = 1 in { 524 def H : AHbInp<0b11100, opc, 0, 525 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 526 NoItinerary, !strconcat("vsel", op, ".f16\t$Sd, $Sn, $Sm"), 527 [(set (f16 HPR:$Sd), (ARMcmov (f16 HPR:$Sm), (f16 HPR:$Sn), CC))]>, 528 Requires<[HasFullFP16]>; 529 530 def S : ASbInp<0b11100, opc, 0, 531 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 532 NoItinerary, !strconcat("vsel", op, ".f32\t$Sd, $Sn, $Sm"), 533 [(set SPR:$Sd, (ARMcmov SPR:$Sm, SPR:$Sn, CC))]>, 534 Requires<[HasFPARMv8]>; 535 536 def D : ADbInp<0b11100, opc, 0, 537 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 538 NoItinerary, !strconcat("vsel", op, ".f64\t$Dd, $Dn, $Dm"), 539 [(set DPR:$Dd, (ARMcmov (f64 DPR:$Dm), (f64 DPR:$Dn), CC))]>, 540 Requires<[HasFPARMv8, HasDPVFP]>; 541 } 542} 543 544// The CC constants here match ARMCC::CondCodes. 545defm VSELGT : vsel_inst<"gt", 0b11, 12>; 546defm VSELGE : vsel_inst<"ge", 0b10, 10>; 547defm VSELEQ : vsel_inst<"eq", 0b00, 0>; 548defm VSELVS : vsel_inst<"vs", 0b01, 6>; 549 550multiclass vmaxmin_inst<string op, bit opc, SDNode SD> { 551 let DecoderNamespace = "VFPV8", PostEncoderMethod = "", 552 isUnpredicable = 1 in { 553 def H : AHbInp<0b11101, 0b00, opc, 554 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 555 NoItinerary, !strconcat(op, ".f16\t$Sd, $Sn, $Sm"), 556 [(set (f16 HPR:$Sd), (SD (f16 HPR:$Sn), (f16 HPR:$Sm)))]>, 557 Requires<[HasFullFP16]>; 558 559 def S : ASbInp<0b11101, 0b00, opc, 560 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 561 NoItinerary, !strconcat(op, ".f32\t$Sd, $Sn, $Sm"), 562 [(set SPR:$Sd, (SD SPR:$Sn, SPR:$Sm))]>, 563 Requires<[HasFPARMv8]>; 564 565 def D : ADbInp<0b11101, 0b00, opc, 566 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 567 NoItinerary, !strconcat(op, ".f64\t$Dd, $Dn, $Dm"), 568 [(set DPR:$Dd, (f64 (SD (f64 DPR:$Dn), (f64 DPR:$Dm))))]>, 569 Requires<[HasFPARMv8, HasDPVFP]>; 570 } 571} 572 573defm VFP_VMAXNM : vmaxmin_inst<"vmaxnm", 0, fmaxnum>; 574defm VFP_VMINNM : vmaxmin_inst<"vminnm", 1, fminnum>; 575 576// Match reassociated forms only if not sign dependent rounding. 577def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)), 578 (VNMULD DPR:$a, DPR:$b)>, 579 Requires<[NoHonorSignDependentRounding,HasDPVFP]>; 580def : Pat<(fmul (fneg SPR:$a), SPR:$b), 581 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>; 582 583// These are encoded as unary instructions. 584let Defs = [FPSCR_NZCV] in { 585def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0, 586 (outs), (ins DPR:$Dd, DPR:$Dm), 587 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm", "", 588 [(arm_cmpfpe DPR:$Dd, (f64 DPR:$Dm))]>; 589 590def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0, 591 (outs), (ins SPR:$Sd, SPR:$Sm), 592 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm", "", 593 [(arm_cmpfpe SPR:$Sd, SPR:$Sm)]> { 594 // Some single precision VFP instructions may be executed on both NEON and 595 // VFP pipelines on A8. 596 let D = VFPNeonA8Domain; 597} 598 599def VCMPEH : AHuI<0b11101, 0b11, 0b0100, 0b11, 0, 600 (outs), (ins HPR:$Sd, HPR:$Sm), 601 IIC_fpCMP16, "vcmpe", ".f16\t$Sd, $Sm", 602 [(arm_cmpfpe (f16 HPR:$Sd), (f16 HPR:$Sm))]>; 603 604def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0, 605 (outs), (ins DPR:$Dd, DPR:$Dm), 606 IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm", "", 607 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm))]>; 608 609def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0, 610 (outs), (ins SPR:$Sd, SPR:$Sm), 611 IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm", "", 612 [(arm_cmpfp SPR:$Sd, SPR:$Sm)]> { 613 // Some single precision VFP instructions may be executed on both NEON and 614 // VFP pipelines on A8. 615 let D = VFPNeonA8Domain; 616} 617 618def VCMPH : AHuI<0b11101, 0b11, 0b0100, 0b01, 0, 619 (outs), (ins HPR:$Sd, HPR:$Sm), 620 IIC_fpCMP16, "vcmp", ".f16\t$Sd, $Sm", 621 [(arm_cmpfp (f16 HPR:$Sd), (f16 HPR:$Sm))]>; 622} // Defs = [FPSCR_NZCV] 623 624//===----------------------------------------------------------------------===// 625// FP Unary Operations. 626// 627 628def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0, 629 (outs DPR:$Dd), (ins DPR:$Dm), 630 IIC_fpUNA64, "vabs", ".f64\t$Dd, $Dm", "", 631 [(set DPR:$Dd, (fabs (f64 DPR:$Dm)))]>; 632 633def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0, 634 (outs SPR:$Sd), (ins SPR:$Sm), 635 IIC_fpUNA32, "vabs", ".f32\t$Sd, $Sm", 636 [(set SPR:$Sd, (fabs SPR:$Sm))]> { 637 // Some single precision VFP instructions may be executed on both NEON and 638 // VFP pipelines on A8. 639 let D = VFPNeonA8Domain; 640} 641 642def VABSH : AHuI<0b11101, 0b11, 0b0000, 0b11, 0, 643 (outs HPR:$Sd), (ins HPR:$Sm), 644 IIC_fpUNA16, "vabs", ".f16\t$Sd, $Sm", 645 [(set (f16 HPR:$Sd), (fabs (f16 HPR:$Sm)))]>; 646 647let Defs = [FPSCR_NZCV] in { 648def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0, 649 (outs), (ins DPR:$Dd), 650 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0", "", 651 [(arm_cmpfpe0 (f64 DPR:$Dd))]> { 652 let Inst{3-0} = 0b0000; 653 let Inst{5} = 0; 654} 655 656def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0, 657 (outs), (ins SPR:$Sd), 658 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0", "", 659 [(arm_cmpfpe0 SPR:$Sd)]> { 660 let Inst{3-0} = 0b0000; 661 let Inst{5} = 0; 662 663 // Some single precision VFP instructions may be executed on both NEON and 664 // VFP pipelines on A8. 665 let D = VFPNeonA8Domain; 666} 667 668def VCMPEZH : AHuI<0b11101, 0b11, 0b0101, 0b11, 0, 669 (outs), (ins HPR:$Sd), 670 IIC_fpCMP16, "vcmpe", ".f16\t$Sd, #0", 671 [(arm_cmpfpe0 (f16 HPR:$Sd))]> { 672 let Inst{3-0} = 0b0000; 673 let Inst{5} = 0; 674} 675 676def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0, 677 (outs), (ins DPR:$Dd), 678 IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0", "", 679 [(arm_cmpfp0 (f64 DPR:$Dd))]> { 680 let Inst{3-0} = 0b0000; 681 let Inst{5} = 0; 682} 683 684def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0, 685 (outs), (ins SPR:$Sd), 686 IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0", "", 687 [(arm_cmpfp0 SPR:$Sd)]> { 688 let Inst{3-0} = 0b0000; 689 let Inst{5} = 0; 690 691 // Some single precision VFP instructions may be executed on both NEON and 692 // VFP pipelines on A8. 693 let D = VFPNeonA8Domain; 694} 695 696def VCMPZH : AHuI<0b11101, 0b11, 0b0101, 0b01, 0, 697 (outs), (ins HPR:$Sd), 698 IIC_fpCMP16, "vcmp", ".f16\t$Sd, #0", 699 [(arm_cmpfp0 (f16 HPR:$Sd))]> { 700 let Inst{3-0} = 0b0000; 701 let Inst{5} = 0; 702} 703} // Defs = [FPSCR_NZCV] 704 705def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0, 706 (outs DPR:$Dd), (ins SPR:$Sm), 707 IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm", "", 708 [(set DPR:$Dd, (fpextend SPR:$Sm))]>, 709 Sched<[WriteFPCVT]> { 710 // Instruction operands. 711 bits<5> Dd; 712 bits<5> Sm; 713 714 // Encode instruction operands. 715 let Inst{3-0} = Sm{4-1}; 716 let Inst{5} = Sm{0}; 717 let Inst{15-12} = Dd{3-0}; 718 let Inst{22} = Dd{4}; 719 720 let Predicates = [HasVFP2, HasDPVFP]; 721 let hasSideEffects = 0; 722} 723 724// Special case encoding: bits 11-8 is 0b1011. 725def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm, 726 IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm", "", 727 [(set SPR:$Sd, (fpround DPR:$Dm))]>, 728 Sched<[WriteFPCVT]> { 729 // Instruction operands. 730 bits<5> Sd; 731 bits<5> Dm; 732 733 // Encode instruction operands. 734 let Inst{3-0} = Dm{3-0}; 735 let Inst{5} = Dm{4}; 736 let Inst{15-12} = Sd{4-1}; 737 let Inst{22} = Sd{0}; 738 739 let Inst{27-23} = 0b11101; 740 let Inst{21-16} = 0b110111; 741 let Inst{11-8} = 0b1011; 742 let Inst{7-6} = 0b11; 743 let Inst{4} = 0; 744 745 let Predicates = [HasVFP2, HasDPVFP]; 746 let hasSideEffects = 0; 747} 748 749// Between half, single and double-precision. 750let hasSideEffects = 0 in 751def VCVTBHS: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm), 752 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$Sd, $Sm", "", 753 [/* Intentionally left blank, see patterns below */]>, 754 Requires<[HasFP16]>, 755 Sched<[WriteFPCVT]>; 756 757def : FP16Pat<(f32 (fpextend (f16 HPR:$Sm))), 758 (VCVTBHS (COPY_TO_REGCLASS (f16 HPR:$Sm), SPR))>; 759def : FP16Pat<(f16_to_fp GPR:$a), 760 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>; 761 762let hasSideEffects = 0 in 763def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sda, SPR:$Sm), 764 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$Sd, $Sm", "$Sd = $Sda", 765 [/* Intentionally left blank, see patterns below */]>, 766 Requires<[HasFP16]>, 767 Sched<[WriteFPCVT]>; 768 769def : FP16Pat<(f16 (fpround SPR:$Sm)), 770 (COPY_TO_REGCLASS (VCVTBSH (IMPLICIT_DEF), SPR:$Sm), HPR)>; 771def : FP16Pat<(fp_to_f16 SPR:$a), 772 (i32 (COPY_TO_REGCLASS (VCVTBSH (IMPLICIT_DEF), SPR:$a), GPR))>; 773def : FP16Pat<(insertelt (v8f16 MQPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_even:$lane), 774 (v8f16 (INSERT_SUBREG (v8f16 MQPR:$src1), 775 (VCVTBSH (EXTRACT_SUBREG (v8f16 MQPR:$src1), (SSubReg_f16_reg imm:$lane)), 776 SPR:$src2), 777 (SSubReg_f16_reg imm:$lane)))>; 778def : FP16Pat<(insertelt (v4f16 DPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_even:$lane), 779 (v4f16 (INSERT_SUBREG (v4f16 DPR:$src1), 780 (VCVTBSH (EXTRACT_SUBREG (v4f16 DPR:$src1), (SSubReg_f16_reg imm:$lane)), 781 SPR:$src2), 782 (SSubReg_f16_reg imm:$lane)))>; 783 784let hasSideEffects = 0 in 785def VCVTTHS: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm), 786 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$Sd, $Sm", "", 787 [/* Intentionally left blank, see patterns below */]>, 788 Requires<[HasFP16]>, 789 Sched<[WriteFPCVT]>; 790 791def : FP16Pat<(f32 (fpextend (extractelt (v8f16 MQPR:$src), imm_odd:$lane))), 792 (VCVTTHS (EXTRACT_SUBREG MQPR:$src, (SSubReg_f16_reg imm_odd:$lane)))>; 793def : FP16Pat<(f32 (fpextend (extractelt (v4f16 DPR:$src), imm_odd:$lane))), 794 (VCVTTHS (EXTRACT_SUBREG 795 (v2f32 (COPY_TO_REGCLASS (v4f16 DPR:$src), DPR_VFP2)), 796 (SSubReg_f16_reg imm_odd:$lane)))>; 797 798let hasSideEffects = 0 in 799def VCVTTSH: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sda, SPR:$Sm), 800 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$Sd, $Sm", "$Sd = $Sda", 801 [/* Intentionally left blank, see patterns below */]>, 802 Requires<[HasFP16]>, 803 Sched<[WriteFPCVT]>; 804 805def : FP16Pat<(insertelt (v8f16 MQPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_odd:$lane), 806 (v8f16 (INSERT_SUBREG (v8f16 MQPR:$src1), 807 (VCVTTSH (EXTRACT_SUBREG (v8f16 MQPR:$src1), (SSubReg_f16_reg imm:$lane)), 808 SPR:$src2), 809 (SSubReg_f16_reg imm:$lane)))>; 810def : FP16Pat<(insertelt (v4f16 DPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_odd:$lane), 811 (v4f16 (INSERT_SUBREG (v4f16 DPR:$src1), 812 (VCVTTSH (EXTRACT_SUBREG (v4f16 DPR:$src1), (SSubReg_f16_reg imm:$lane)), 813 SPR:$src2), 814 (SSubReg_f16_reg imm:$lane)))>; 815 816def VCVTBHD : ADuI<0b11101, 0b11, 0b0010, 0b01, 0, 817 (outs DPR:$Dd), (ins SPR:$Sm), 818 NoItinerary, "vcvtb", ".f64.f16\t$Dd, $Sm", "", 819 [/* Intentionally left blank, see patterns below */]>, 820 Requires<[HasFPARMv8, HasDPVFP]>, 821 Sched<[WriteFPCVT]> { 822 // Instruction operands. 823 bits<5> Sm; 824 825 // Encode instruction operands. 826 let Inst{3-0} = Sm{4-1}; 827 let Inst{5} = Sm{0}; 828 829 let hasSideEffects = 0; 830} 831 832def : FullFP16Pat<(f64 (fpextend (f16 HPR:$Sm))), 833 (VCVTBHD (COPY_TO_REGCLASS (f16 HPR:$Sm), SPR))>, 834 Requires<[HasFPARMv8, HasDPVFP]>; 835def : FP16Pat<(f64 (f16_to_fp GPR:$a)), 836 (VCVTBHD (COPY_TO_REGCLASS GPR:$a, SPR))>, 837 Requires<[HasFPARMv8, HasDPVFP]>; 838 839def VCVTBDH : ADuI<0b11101, 0b11, 0b0011, 0b01, 0, 840 (outs SPR:$Sd), (ins SPR:$Sda, DPR:$Dm), 841 NoItinerary, "vcvtb", ".f16.f64\t$Sd, $Dm", "$Sd = $Sda", 842 [/* Intentionally left blank, see patterns below */]>, 843 Requires<[HasFPARMv8, HasDPVFP]> { 844 // Instruction operands. 845 bits<5> Sd; 846 bits<5> Dm; 847 848 // Encode instruction operands. 849 let Inst{3-0} = Dm{3-0}; 850 let Inst{5} = Dm{4}; 851 let Inst{15-12} = Sd{4-1}; 852 let Inst{22} = Sd{0}; 853 854 let hasSideEffects = 0; 855} 856 857def : FullFP16Pat<(f16 (fpround DPR:$Dm)), 858 (COPY_TO_REGCLASS (VCVTBDH (IMPLICIT_DEF), DPR:$Dm), HPR)>, 859 Requires<[HasFPARMv8, HasDPVFP]>; 860def : FP16Pat<(fp_to_f16 (f64 DPR:$a)), 861 (i32 (COPY_TO_REGCLASS (VCVTBDH (IMPLICIT_DEF), DPR:$a), GPR))>, 862 Requires<[HasFPARMv8, HasDPVFP]>; 863 864def VCVTTHD : ADuI<0b11101, 0b11, 0b0010, 0b11, 0, 865 (outs DPR:$Dd), (ins SPR:$Sm), 866 NoItinerary, "vcvtt", ".f64.f16\t$Dd, $Sm", "", 867 []>, Requires<[HasFPARMv8, HasDPVFP]> { 868 // Instruction operands. 869 bits<5> Sm; 870 871 // Encode instruction operands. 872 let Inst{3-0} = Sm{4-1}; 873 let Inst{5} = Sm{0}; 874 875 let hasSideEffects = 0; 876} 877 878def VCVTTDH : ADuI<0b11101, 0b11, 0b0011, 0b11, 0, 879 (outs SPR:$Sd), (ins SPR:$Sda, DPR:$Dm), 880 NoItinerary, "vcvtt", ".f16.f64\t$Sd, $Dm", "$Sd = $Sda", 881 []>, Requires<[HasFPARMv8, HasDPVFP]> { 882 // Instruction operands. 883 bits<5> Sd; 884 bits<5> Dm; 885 886 // Encode instruction operands. 887 let Inst{15-12} = Sd{4-1}; 888 let Inst{22} = Sd{0}; 889 let Inst{3-0} = Dm{3-0}; 890 let Inst{5} = Dm{4}; 891 892 let hasSideEffects = 0; 893} 894 895multiclass vcvt_inst<string opc, bits<2> rm, 896 SDPatternOperator node = null_frag> { 897 let PostEncoderMethod = "", DecoderNamespace = "VFPV8", hasSideEffects = 0 in { 898 def SH : AHuInp<0b11101, 0b11, 0b1100, 0b11, 0, 899 (outs SPR:$Sd), (ins HPR:$Sm), 900 NoItinerary, !strconcat("vcvt", opc, ".s32.f16\t$Sd, $Sm"), 901 []>, 902 Requires<[HasFullFP16]> { 903 let Inst{17-16} = rm; 904 } 905 906 def UH : AHuInp<0b11101, 0b11, 0b1100, 0b01, 0, 907 (outs SPR:$Sd), (ins HPR:$Sm), 908 NoItinerary, !strconcat("vcvt", opc, ".u32.f16\t$Sd, $Sm"), 909 []>, 910 Requires<[HasFullFP16]> { 911 let Inst{17-16} = rm; 912 } 913 914 def SS : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0, 915 (outs SPR:$Sd), (ins SPR:$Sm), 916 NoItinerary, !strconcat("vcvt", opc, ".s32.f32\t$Sd, $Sm"), 917 []>, 918 Requires<[HasFPARMv8]> { 919 let Inst{17-16} = rm; 920 } 921 922 def US : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0, 923 (outs SPR:$Sd), (ins SPR:$Sm), 924 NoItinerary, !strconcat("vcvt", opc, ".u32.f32\t$Sd, $Sm"), 925 []>, 926 Requires<[HasFPARMv8]> { 927 let Inst{17-16} = rm; 928 } 929 930 def SD : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0, 931 (outs SPR:$Sd), (ins DPR:$Dm), 932 NoItinerary, !strconcat("vcvt", opc, ".s32.f64\t$Sd, $Dm"), 933 []>, 934 Requires<[HasFPARMv8, HasDPVFP]> { 935 bits<5> Dm; 936 937 let Inst{17-16} = rm; 938 939 // Encode instruction operands. 940 let Inst{3-0} = Dm{3-0}; 941 let Inst{5} = Dm{4}; 942 let Inst{8} = 1; 943 } 944 945 def UD : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0, 946 (outs SPR:$Sd), (ins DPR:$Dm), 947 NoItinerary, !strconcat("vcvt", opc, ".u32.f64\t$Sd, $Dm"), 948 []>, 949 Requires<[HasFPARMv8, HasDPVFP]> { 950 bits<5> Dm; 951 952 let Inst{17-16} = rm; 953 954 // Encode instruction operands 955 let Inst{3-0} = Dm{3-0}; 956 let Inst{5} = Dm{4}; 957 let Inst{8} = 1; 958 } 959 } 960 961 let Predicates = [HasFPARMv8] in { 962 let Predicates = [HasFullFP16] in { 963 def : Pat<(i32 (fp_to_sint (node (f16 HPR:$a)))), 964 (COPY_TO_REGCLASS 965 (!cast<Instruction>(NAME#"SH") (f16 HPR:$a)), 966 GPR)>; 967 968 def : Pat<(i32 (fp_to_uint (node (f16 HPR:$a)))), 969 (COPY_TO_REGCLASS 970 (!cast<Instruction>(NAME#"UH") (f16 HPR:$a)), 971 GPR)>; 972 } 973 def : Pat<(i32 (fp_to_sint (node SPR:$a))), 974 (COPY_TO_REGCLASS 975 (!cast<Instruction>(NAME#"SS") SPR:$a), 976 GPR)>; 977 def : Pat<(i32 (fp_to_uint (node SPR:$a))), 978 (COPY_TO_REGCLASS 979 (!cast<Instruction>(NAME#"US") SPR:$a), 980 GPR)>; 981 } 982 let Predicates = [HasFPARMv8, HasDPVFP] in { 983 def : Pat<(i32 (fp_to_sint (node (f64 DPR:$a)))), 984 (COPY_TO_REGCLASS 985 (!cast<Instruction>(NAME#"SD") DPR:$a), 986 GPR)>; 987 def : Pat<(i32 (fp_to_uint (node (f64 DPR:$a)))), 988 (COPY_TO_REGCLASS 989 (!cast<Instruction>(NAME#"UD") DPR:$a), 990 GPR)>; 991 } 992} 993 994defm VCVTA : vcvt_inst<"a", 0b00, fround>; 995defm VCVTN : vcvt_inst<"n", 0b01>; 996defm VCVTP : vcvt_inst<"p", 0b10, fceil>; 997defm VCVTM : vcvt_inst<"m", 0b11, ffloor>; 998 999def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0, 1000 (outs DPR:$Dd), (ins DPR:$Dm), 1001 IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm", "", 1002 [(set DPR:$Dd, (fneg (f64 DPR:$Dm)))]>; 1003 1004def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0, 1005 (outs SPR:$Sd), (ins SPR:$Sm), 1006 IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm", 1007 [(set SPR:$Sd, (fneg SPR:$Sm))]> { 1008 // Some single precision VFP instructions may be executed on both NEON and 1009 // VFP pipelines on A8. 1010 let D = VFPNeonA8Domain; 1011} 1012 1013def VNEGH : AHuI<0b11101, 0b11, 0b0001, 0b01, 0, 1014 (outs HPR:$Sd), (ins HPR:$Sm), 1015 IIC_fpUNA16, "vneg", ".f16\t$Sd, $Sm", 1016 [(set (f16 HPR:$Sd), (fneg (f16 HPR:$Sm)))]>; 1017 1018multiclass vrint_inst_zrx<string opc, bit op, bit op2, SDPatternOperator node> { 1019 def H : AHuI<0b11101, 0b11, 0b0110, 0b11, 0, 1020 (outs HPR:$Sd), (ins HPR:$Sm), 1021 NoItinerary, !strconcat("vrint", opc), ".f16\t$Sd, $Sm", 1022 [(set (f16 HPR:$Sd), (node (f16 HPR:$Sm)))]>, 1023 Requires<[HasFullFP16]> { 1024 let Inst{7} = op2; 1025 let Inst{16} = op; 1026 } 1027 1028 def S : ASuI<0b11101, 0b11, 0b0110, 0b11, 0, 1029 (outs SPR:$Sd), (ins SPR:$Sm), 1030 NoItinerary, !strconcat("vrint", opc), ".f32\t$Sd, $Sm", "", 1031 [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>, 1032 Requires<[HasFPARMv8]> { 1033 let Inst{7} = op2; 1034 let Inst{16} = op; 1035 } 1036 def D : ADuI<0b11101, 0b11, 0b0110, 0b11, 0, 1037 (outs DPR:$Dd), (ins DPR:$Dm), 1038 NoItinerary, !strconcat("vrint", opc), ".f64\t$Dd, $Dm", "", 1039 [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>, 1040 Requires<[HasFPARMv8, HasDPVFP]> { 1041 let Inst{7} = op2; 1042 let Inst{16} = op; 1043 } 1044 1045 def : InstAlias<!strconcat("vrint", opc, "$p.f16.f16\t$Sd, $Sm"), 1046 (!cast<Instruction>(NAME#"H") SPR:$Sd, SPR:$Sm, pred:$p), 0>, 1047 Requires<[HasFullFP16]>; 1048 def : InstAlias<!strconcat("vrint", opc, "$p.f32.f32\t$Sd, $Sm"), 1049 (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm, pred:$p), 0>, 1050 Requires<[HasFPARMv8]>; 1051 def : InstAlias<!strconcat("vrint", opc, "$p.f64.f64\t$Dd, $Dm"), 1052 (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm, pred:$p), 0>, 1053 Requires<[HasFPARMv8,HasDPVFP]>; 1054} 1055 1056defm VRINTZ : vrint_inst_zrx<"z", 0, 1, ftrunc>; 1057defm VRINTR : vrint_inst_zrx<"r", 0, 0, fnearbyint>; 1058defm VRINTX : vrint_inst_zrx<"x", 1, 0, frint>; 1059 1060multiclass vrint_inst_anpm<string opc, bits<2> rm, 1061 SDPatternOperator node = null_frag> { 1062 let PostEncoderMethod = "", DecoderNamespace = "VFPV8", 1063 isUnpredicable = 1 in { 1064 def H : AHuInp<0b11101, 0b11, 0b1000, 0b01, 0, 1065 (outs HPR:$Sd), (ins HPR:$Sm), 1066 NoItinerary, !strconcat("vrint", opc, ".f16\t$Sd, $Sm"), 1067 [(set (f16 HPR:$Sd), (node (f16 HPR:$Sm)))]>, 1068 Requires<[HasFullFP16]> { 1069 let Inst{17-16} = rm; 1070 } 1071 def S : ASuInp<0b11101, 0b11, 0b1000, 0b01, 0, 1072 (outs SPR:$Sd), (ins SPR:$Sm), 1073 NoItinerary, !strconcat("vrint", opc, ".f32\t$Sd, $Sm"), 1074 [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>, 1075 Requires<[HasFPARMv8]> { 1076 let Inst{17-16} = rm; 1077 } 1078 def D : ADuInp<0b11101, 0b11, 0b1000, 0b01, 0, 1079 (outs DPR:$Dd), (ins DPR:$Dm), 1080 NoItinerary, !strconcat("vrint", opc, ".f64\t$Dd, $Dm"), 1081 [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>, 1082 Requires<[HasFPARMv8, HasDPVFP]> { 1083 let Inst{17-16} = rm; 1084 } 1085 } 1086 1087 def : InstAlias<!strconcat("vrint", opc, ".f16.f16\t$Sd, $Sm"), 1088 (!cast<Instruction>(NAME#"H") HPR:$Sd, HPR:$Sm), 0>, 1089 Requires<[HasFullFP16]>; 1090 def : InstAlias<!strconcat("vrint", opc, ".f32.f32\t$Sd, $Sm"), 1091 (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm), 0>, 1092 Requires<[HasFPARMv8]>; 1093 def : InstAlias<!strconcat("vrint", opc, ".f64.f64\t$Dd, $Dm"), 1094 (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm), 0>, 1095 Requires<[HasFPARMv8,HasDPVFP]>; 1096} 1097 1098defm VRINTA : vrint_inst_anpm<"a", 0b00, fround>; 1099defm VRINTN : vrint_inst_anpm<"n", 0b01, int_arm_neon_vrintn>; 1100defm VRINTP : vrint_inst_anpm<"p", 0b10, fceil>; 1101defm VRINTM : vrint_inst_anpm<"m", 0b11, ffloor>; 1102 1103def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0, 1104 (outs DPR:$Dd), (ins DPR:$Dm), 1105 IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm", "", 1106 [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>, 1107 Sched<[WriteFPSQRT64]>; 1108 1109def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0, 1110 (outs SPR:$Sd), (ins SPR:$Sm), 1111 IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm", "", 1112 [(set SPR:$Sd, (fsqrt SPR:$Sm))]>, 1113 Sched<[WriteFPSQRT32]>; 1114 1115def VSQRTH : AHuI<0b11101, 0b11, 0b0001, 0b11, 0, 1116 (outs HPR:$Sd), (ins HPR:$Sm), 1117 IIC_fpSQRT16, "vsqrt", ".f16\t$Sd, $Sm", 1118 [(set (f16 HPR:$Sd), (fsqrt (f16 HPR:$Sm)))]>; 1119 1120let hasSideEffects = 0 in { 1121let isMoveReg = 1 in { 1122def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0, 1123 (outs DPR:$Dd), (ins DPR:$Dm), 1124 IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", "", []>, 1125 Requires<[HasFPRegs64]>; 1126 1127def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0, 1128 (outs SPR:$Sd), (ins SPR:$Sm), 1129 IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", "", []>, 1130 Requires<[HasFPRegs]>; 1131} // isMoveReg 1132 1133let PostEncoderMethod = "", DecoderNamespace = "VFPV8", isUnpredicable = 1 in { 1134def VMOVH : ASuInp<0b11101, 0b11, 0b0000, 0b01, 0, 1135 (outs SPR:$Sd), (ins SPR:$Sm), 1136 IIC_fpUNA16, "vmovx.f16\t$Sd, $Sm", []>, 1137 Requires<[HasFullFP16]>; 1138 1139def VINSH : ASuInp<0b11101, 0b11, 0b0000, 0b11, 0, 1140 (outs SPR:$Sd), (ins SPR:$Sda, SPR:$Sm), 1141 IIC_fpUNA16, "vins.f16\t$Sd, $Sm", []>, 1142 Requires<[HasFullFP16]> { 1143 let Constraints = "$Sd = $Sda"; 1144} 1145 1146} // PostEncoderMethod 1147} // hasSideEffects 1148 1149//===----------------------------------------------------------------------===// 1150// FP <-> GPR Copies. Int <-> FP Conversions. 1151// 1152 1153let isMoveReg = 1 in { 1154def VMOVRS : AVConv2I<0b11100001, 0b1010, 1155 (outs GPR:$Rt), (ins SPR:$Sn), 1156 IIC_fpMOVSI, "vmov", "\t$Rt, $Sn", 1157 [(set GPR:$Rt, (bitconvert SPR:$Sn))]>, 1158 Requires<[HasFPRegs]>, 1159 Sched<[WriteFPMOV]> { 1160 // Instruction operands. 1161 bits<4> Rt; 1162 bits<5> Sn; 1163 1164 // Encode instruction operands. 1165 let Inst{19-16} = Sn{4-1}; 1166 let Inst{7} = Sn{0}; 1167 let Inst{15-12} = Rt; 1168 1169 let Inst{6-5} = 0b00; 1170 let Inst{3-0} = 0b0000; 1171 1172 // Some single precision VFP instructions may be executed on both NEON and VFP 1173 // pipelines. 1174 let D = VFPNeonDomain; 1175} 1176 1177// Bitcast i32 -> f32. NEON prefers to use VMOVDRR. 1178def VMOVSR : AVConv4I<0b11100000, 0b1010, 1179 (outs SPR:$Sn), (ins GPR:$Rt), 1180 IIC_fpMOVIS, "vmov", "\t$Sn, $Rt", 1181 [(set SPR:$Sn, (bitconvert GPR:$Rt))]>, 1182 Requires<[HasFPRegs, UseVMOVSR]>, 1183 Sched<[WriteFPMOV]> { 1184 // Instruction operands. 1185 bits<5> Sn; 1186 bits<4> Rt; 1187 1188 // Encode instruction operands. 1189 let Inst{19-16} = Sn{4-1}; 1190 let Inst{7} = Sn{0}; 1191 let Inst{15-12} = Rt; 1192 1193 let Inst{6-5} = 0b00; 1194 let Inst{3-0} = 0b0000; 1195 1196 // Some single precision VFP instructions may be executed on both NEON and VFP 1197 // pipelines. 1198 let D = VFPNeonDomain; 1199} 1200} // isMoveReg 1201def : Pat<(arm_vmovsr GPR:$Rt), (VMOVSR GPR:$Rt)>, Requires<[HasVFP2, UseVMOVSR]>; 1202 1203let hasSideEffects = 0 in { 1204def VMOVRRD : AVConv3I<0b11000101, 0b1011, 1205 (outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm), 1206 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm", 1207 [(set GPR:$Rt, GPR:$Rt2, (arm_fmrrd DPR:$Dm))]>, 1208 Requires<[HasFPRegs]>, 1209 Sched<[WriteFPMOV]> { 1210 // Instruction operands. 1211 bits<5> Dm; 1212 bits<4> Rt; 1213 bits<4> Rt2; 1214 1215 // Encode instruction operands. 1216 let Inst{3-0} = Dm{3-0}; 1217 let Inst{5} = Dm{4}; 1218 let Inst{15-12} = Rt; 1219 let Inst{19-16} = Rt2; 1220 1221 let Inst{7-6} = 0b00; 1222 1223 // Some single precision VFP instructions may be executed on both NEON and VFP 1224 // pipelines. 1225 let D = VFPNeonDomain; 1226 1227 // This instruction is equivalent to 1228 // $Rt = EXTRACT_SUBREG $Dm, ssub_0 1229 // $Rt2 = EXTRACT_SUBREG $Dm, ssub_1 1230 let isExtractSubreg = 1; 1231} 1232 1233def VMOVRRS : AVConv3I<0b11000101, 0b1010, 1234 (outs GPR:$Rt, GPR:$Rt2), (ins SPR:$src1, SPR:$src2), 1235 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $src1, $src2", 1236 [/* For disassembly only; pattern left blank */]>, 1237 Requires<[HasFPRegs]>, 1238 Sched<[WriteFPMOV]> { 1239 bits<5> src1; 1240 bits<4> Rt; 1241 bits<4> Rt2; 1242 1243 // Encode instruction operands. 1244 let Inst{3-0} = src1{4-1}; 1245 let Inst{5} = src1{0}; 1246 let Inst{15-12} = Rt; 1247 let Inst{19-16} = Rt2; 1248 1249 let Inst{7-6} = 0b00; 1250 1251 // Some single precision VFP instructions may be executed on both NEON and VFP 1252 // pipelines. 1253 let D = VFPNeonDomain; 1254 let DecoderMethod = "DecodeVMOVRRS"; 1255} 1256} // hasSideEffects 1257 1258// FMDHR: GPR -> SPR 1259// FMDLR: GPR -> SPR 1260 1261def VMOVDRR : AVConv5I<0b11000100, 0b1011, 1262 (outs DPR:$Dm), (ins GPR:$Rt, GPR:$Rt2), 1263 IIC_fpMOVID, "vmov", "\t$Dm, $Rt, $Rt2", 1264 [(set DPR:$Dm, (arm_fmdrr GPR:$Rt, GPR:$Rt2))]>, 1265 Requires<[HasFPRegs]>, 1266 Sched<[WriteFPMOV]> { 1267 // Instruction operands. 1268 bits<5> Dm; 1269 bits<4> Rt; 1270 bits<4> Rt2; 1271 1272 // Encode instruction operands. 1273 let Inst{3-0} = Dm{3-0}; 1274 let Inst{5} = Dm{4}; 1275 let Inst{15-12} = Rt; 1276 let Inst{19-16} = Rt2; 1277 1278 let Inst{7-6} = 0b00; 1279 1280 // Some single precision VFP instructions may be executed on both NEON and VFP 1281 // pipelines. 1282 let D = VFPNeonDomain; 1283 1284 // This instruction is equivalent to 1285 // $Dm = REG_SEQUENCE $Rt, ssub_0, $Rt2, ssub_1 1286 let isRegSequence = 1; 1287} 1288 1289// Hoist an fabs or a fneg of a value coming from integer registers 1290// and do the fabs/fneg on the integer value. This is never a lose 1291// and could enable the conversion to float to be removed completely. 1292def : Pat<(fabs (arm_fmdrr GPR:$Rl, GPR:$Rh)), 1293 (VMOVDRR GPR:$Rl, (BFC GPR:$Rh, (i32 0x7FFFFFFF)))>, 1294 Requires<[IsARM, HasV6T2]>; 1295def : Pat<(fabs (arm_fmdrr GPR:$Rl, GPR:$Rh)), 1296 (VMOVDRR GPR:$Rl, (t2BFC GPR:$Rh, (i32 0x7FFFFFFF)))>, 1297 Requires<[IsThumb2, HasV6T2]>; 1298def : Pat<(fneg (arm_fmdrr GPR:$Rl, GPR:$Rh)), 1299 (VMOVDRR GPR:$Rl, (EORri GPR:$Rh, (i32 0x80000000)))>, 1300 Requires<[IsARM]>; 1301def : Pat<(fneg (arm_fmdrr GPR:$Rl, GPR:$Rh)), 1302 (VMOVDRR GPR:$Rl, (t2EORri GPR:$Rh, (i32 0x80000000)))>, 1303 Requires<[IsThumb2]>; 1304 1305let hasSideEffects = 0 in 1306def VMOVSRR : AVConv5I<0b11000100, 0b1010, 1307 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2), 1308 IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2", 1309 [/* For disassembly only; pattern left blank */]>, 1310 Requires<[HasFPRegs]>, 1311 Sched<[WriteFPMOV]> { 1312 // Instruction operands. 1313 bits<5> dst1; 1314 bits<4> src1; 1315 bits<4> src2; 1316 1317 // Encode instruction operands. 1318 let Inst{3-0} = dst1{4-1}; 1319 let Inst{5} = dst1{0}; 1320 let Inst{15-12} = src1; 1321 let Inst{19-16} = src2; 1322 1323 let Inst{7-6} = 0b00; 1324 1325 // Some single precision VFP instructions may be executed on both NEON and VFP 1326 // pipelines. 1327 let D = VFPNeonDomain; 1328 1329 let DecoderMethod = "DecodeVMOVSRR"; 1330} 1331 1332// Move H->R, clearing top 16 bits 1333def VMOVRH : AVConv2I<0b11100001, 0b1001, 1334 (outs rGPR:$Rt), (ins HPR:$Sn), 1335 IIC_fpMOVSI, "vmov", ".f16\t$Rt, $Sn", 1336 []>, 1337 Requires<[HasFPRegs16]>, 1338 Sched<[WriteFPMOV]> { 1339 // Instruction operands. 1340 bits<4> Rt; 1341 bits<5> Sn; 1342 1343 // Encode instruction operands. 1344 let Inst{19-16} = Sn{4-1}; 1345 let Inst{7} = Sn{0}; 1346 let Inst{15-12} = Rt; 1347 1348 let Inst{6-5} = 0b00; 1349 let Inst{3-0} = 0b0000; 1350 1351 let isUnpredicable = 1; 1352} 1353 1354// Move R->H, clearing top 16 bits 1355def VMOVHR : AVConv4I<0b11100000, 0b1001, 1356 (outs HPR:$Sn), (ins rGPR:$Rt), 1357 IIC_fpMOVIS, "vmov", ".f16\t$Sn, $Rt", 1358 []>, 1359 Requires<[HasFPRegs16]>, 1360 Sched<[WriteFPMOV]> { 1361 // Instruction operands. 1362 bits<5> Sn; 1363 bits<4> Rt; 1364 1365 // Encode instruction operands. 1366 let Inst{19-16} = Sn{4-1}; 1367 let Inst{7} = Sn{0}; 1368 let Inst{15-12} = Rt; 1369 1370 let Inst{6-5} = 0b00; 1371 let Inst{3-0} = 0b0000; 1372 1373 let isUnpredicable = 1; 1374} 1375 1376def : FPRegs16Pat<(arm_vmovrh (f16 HPR:$Sn)), (VMOVRH (f16 HPR:$Sn))>; 1377def : FPRegs16Pat<(arm_vmovrh (bf16 HPR:$Sn)), (VMOVRH (bf16 HPR:$Sn))>; 1378def : FPRegs16Pat<(f16 (arm_vmovhr rGPR:$Rt)), (VMOVHR rGPR:$Rt)>; 1379def : FPRegs16Pat<(bf16 (arm_vmovhr rGPR:$Rt)), (VMOVHR rGPR:$Rt)>; 1380 1381// FMRDH: SPR -> GPR 1382// FMRDL: SPR -> GPR 1383// FMRRS: SPR -> GPR 1384// FMRX: SPR system reg -> GPR 1385// FMSRR: GPR -> SPR 1386// FMXR: GPR -> VFP system reg 1387 1388 1389// Int -> FP: 1390 1391class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1392 bits<4> opcod4, dag oops, dag iops, 1393 InstrItinClass itin, string opc, string asm, 1394 list<dag> pattern> 1395 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1396 pattern> { 1397 // Instruction operands. 1398 bits<5> Dd; 1399 bits<5> Sm; 1400 1401 // Encode instruction operands. 1402 let Inst{3-0} = Sm{4-1}; 1403 let Inst{5} = Sm{0}; 1404 let Inst{15-12} = Dd{3-0}; 1405 let Inst{22} = Dd{4}; 1406 1407 let Predicates = [HasVFP2, HasDPVFP]; 1408 let hasSideEffects = 0; 1409} 1410 1411class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1412 bits<4> opcod4, dag oops, dag iops,InstrItinClass itin, 1413 string opc, string asm, list<dag> pattern> 1414 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1415 pattern> { 1416 // Instruction operands. 1417 bits<5> Sd; 1418 bits<5> Sm; 1419 1420 // Encode instruction operands. 1421 let Inst{3-0} = Sm{4-1}; 1422 let Inst{5} = Sm{0}; 1423 let Inst{15-12} = Sd{4-1}; 1424 let Inst{22} = Sd{0}; 1425 1426 let hasSideEffects = 0; 1427} 1428 1429class AVConv1IHs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1430 bits<4> opcod4, dag oops, dag iops, 1431 InstrItinClass itin, string opc, string asm, 1432 list<dag> pattern> 1433 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1434 pattern> { 1435 // Instruction operands. 1436 bits<5> Sd; 1437 bits<5> Sm; 1438 1439 // Encode instruction operands. 1440 let Inst{3-0} = Sm{4-1}; 1441 let Inst{5} = Sm{0}; 1442 let Inst{15-12} = Sd{4-1}; 1443 let Inst{22} = Sd{0}; 1444 1445 let Predicates = [HasFullFP16]; 1446 let hasSideEffects = 0; 1447} 1448 1449def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011, 1450 (outs DPR:$Dd), (ins SPR:$Sm), 1451 IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm", 1452 []>, 1453 Sched<[WriteFPCVT]> { 1454 let Inst{7} = 1; // s32 1455} 1456 1457let Predicates=[HasVFP2, HasDPVFP] in { 1458 def : VFPPat<(f64 (sint_to_fp GPR:$a)), 1459 (VSITOD (COPY_TO_REGCLASS GPR:$a, SPR))>; 1460 1461 def : VFPPat<(f64 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))), 1462 (VSITOD (VLDRS addrmode5:$a))>; 1463} 1464 1465def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010, 1466 (outs SPR:$Sd),(ins SPR:$Sm), 1467 IIC_fpCVTIS, "vcvt", ".f32.s32\t$Sd, $Sm", 1468 []>, 1469 Sched<[WriteFPCVT]> { 1470 let Inst{7} = 1; // s32 1471 1472 // Some single precision VFP instructions may be executed on both NEON and 1473 // VFP pipelines on A8. 1474 let D = VFPNeonA8Domain; 1475} 1476 1477def : VFPNoNEONPat<(f32 (sint_to_fp GPR:$a)), 1478 (VSITOS (COPY_TO_REGCLASS GPR:$a, SPR))>; 1479 1480def : VFPNoNEONPat<(f32 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))), 1481 (VSITOS (VLDRS addrmode5:$a))>; 1482 1483def VSITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001, 1484 (outs HPR:$Sd), (ins SPR:$Sm), 1485 IIC_fpCVTIH, "vcvt", ".f16.s32\t$Sd, $Sm", 1486 []>, 1487 Sched<[WriteFPCVT]> { 1488 let Inst{7} = 1; // s32 1489 let isUnpredicable = 1; 1490} 1491 1492def : VFPNoNEONPat<(f16 (sint_to_fp GPR:$a)), 1493 (VSITOH (COPY_TO_REGCLASS GPR:$a, SPR))>; 1494 1495def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011, 1496 (outs DPR:$Dd), (ins SPR:$Sm), 1497 IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm", 1498 []>, 1499 Sched<[WriteFPCVT]> { 1500 let Inst{7} = 0; // u32 1501} 1502 1503let Predicates=[HasVFP2, HasDPVFP] in { 1504 def : VFPPat<(f64 (uint_to_fp GPR:$a)), 1505 (VUITOD (COPY_TO_REGCLASS GPR:$a, SPR))>; 1506 1507 def : VFPPat<(f64 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))), 1508 (VUITOD (VLDRS addrmode5:$a))>; 1509} 1510 1511def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010, 1512 (outs SPR:$Sd), (ins SPR:$Sm), 1513 IIC_fpCVTIS, "vcvt", ".f32.u32\t$Sd, $Sm", 1514 []>, 1515 Sched<[WriteFPCVT]> { 1516 let Inst{7} = 0; // u32 1517 1518 // Some single precision VFP instructions may be executed on both NEON and 1519 // VFP pipelines on A8. 1520 let D = VFPNeonA8Domain; 1521} 1522 1523def : VFPNoNEONPat<(f32 (uint_to_fp GPR:$a)), 1524 (VUITOS (COPY_TO_REGCLASS GPR:$a, SPR))>; 1525 1526def : VFPNoNEONPat<(f32 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))), 1527 (VUITOS (VLDRS addrmode5:$a))>; 1528 1529def VUITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001, 1530 (outs HPR:$Sd), (ins SPR:$Sm), 1531 IIC_fpCVTIH, "vcvt", ".f16.u32\t$Sd, $Sm", 1532 []>, 1533 Sched<[WriteFPCVT]> { 1534 let Inst{7} = 0; // u32 1535 let isUnpredicable = 1; 1536} 1537 1538def : VFPNoNEONPat<(f16 (uint_to_fp GPR:$a)), 1539 (VUITOH (COPY_TO_REGCLASS GPR:$a, SPR))>; 1540 1541// FP -> Int: 1542 1543class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1544 bits<4> opcod4, dag oops, dag iops, 1545 InstrItinClass itin, string opc, string asm, 1546 list<dag> pattern> 1547 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1548 pattern> { 1549 // Instruction operands. 1550 bits<5> Sd; 1551 bits<5> Dm; 1552 1553 // Encode instruction operands. 1554 let Inst{3-0} = Dm{3-0}; 1555 let Inst{5} = Dm{4}; 1556 let Inst{15-12} = Sd{4-1}; 1557 let Inst{22} = Sd{0}; 1558 1559 let Predicates = [HasVFP2, HasDPVFP]; 1560 let hasSideEffects = 0; 1561} 1562 1563class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1564 bits<4> opcod4, dag oops, dag iops, 1565 InstrItinClass itin, string opc, string asm, 1566 list<dag> pattern> 1567 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1568 pattern> { 1569 // Instruction operands. 1570 bits<5> Sd; 1571 bits<5> Sm; 1572 1573 // Encode instruction operands. 1574 let Inst{3-0} = Sm{4-1}; 1575 let Inst{5} = Sm{0}; 1576 let Inst{15-12} = Sd{4-1}; 1577 let Inst{22} = Sd{0}; 1578 1579 let hasSideEffects = 0; 1580} 1581 1582class AVConv1IsH_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1583 bits<4> opcod4, dag oops, dag iops, 1584 InstrItinClass itin, string opc, string asm, 1585 list<dag> pattern> 1586 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1587 pattern> { 1588 // Instruction operands. 1589 bits<5> Sd; 1590 bits<5> Sm; 1591 1592 // Encode instruction operands. 1593 let Inst{3-0} = Sm{4-1}; 1594 let Inst{5} = Sm{0}; 1595 let Inst{15-12} = Sd{4-1}; 1596 let Inst{22} = Sd{0}; 1597 1598 let Predicates = [HasFullFP16]; 1599 let hasSideEffects = 0; 1600} 1601 1602// Always set Z bit in the instruction, i.e. "round towards zero" variants. 1603def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011, 1604 (outs SPR:$Sd), (ins DPR:$Dm), 1605 IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm", 1606 []>, 1607 Sched<[WriteFPCVT]> { 1608 let Inst{7} = 1; // Z bit 1609} 1610 1611let Predicates=[HasVFP2, HasDPVFP] in { 1612 def : VFPPat<(i32 (fp_to_sint (f64 DPR:$a))), 1613 (COPY_TO_REGCLASS (VTOSIZD DPR:$a), GPR)>; 1614 def : VFPPat<(i32 (fp_to_sint_sat (f64 DPR:$a), i32)), 1615 (COPY_TO_REGCLASS (VTOSIZD DPR:$a), GPR)>; 1616 1617 def : VFPPat<(alignedstore32 (i32 (fp_to_sint (f64 DPR:$a))), addrmode5:$ptr), 1618 (VSTRS (VTOSIZD DPR:$a), addrmode5:$ptr)>; 1619 def : VFPPat<(alignedstore32 (i32 (fp_to_sint_sat (f64 DPR:$a), i32)), addrmode5:$ptr), 1620 (VSTRS (VTOSIZD DPR:$a), addrmode5:$ptr)>; 1621} 1622 1623def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010, 1624 (outs SPR:$Sd), (ins SPR:$Sm), 1625 IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm", 1626 []>, 1627 Sched<[WriteFPCVT]> { 1628 let Inst{7} = 1; // Z bit 1629 1630 // Some single precision VFP instructions may be executed on both NEON and 1631 // VFP pipelines on A8. 1632 let D = VFPNeonA8Domain; 1633} 1634 1635def : VFPNoNEONPat<(i32 (fp_to_sint SPR:$a)), 1636 (COPY_TO_REGCLASS (VTOSIZS SPR:$a), GPR)>; 1637def : VFPPat<(i32 (fp_to_sint_sat SPR:$a, i32)), 1638 (COPY_TO_REGCLASS (VTOSIZS SPR:$a), GPR)>; 1639 1640def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_sint (f32 SPR:$a))), 1641 addrmode5:$ptr), 1642 (VSTRS (VTOSIZS SPR:$a), addrmode5:$ptr)>; 1643def : VFPPat<(alignedstore32 (i32 (fp_to_sint_sat (f32 SPR:$a), i32)), 1644 addrmode5:$ptr), 1645 (VSTRS (VTOSIZS SPR:$a), addrmode5:$ptr)>; 1646 1647def VTOSIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001, 1648 (outs SPR:$Sd), (ins HPR:$Sm), 1649 IIC_fpCVTHI, "vcvt", ".s32.f16\t$Sd, $Sm", 1650 []>, 1651 Sched<[WriteFPCVT]> { 1652 let Inst{7} = 1; // Z bit 1653 let isUnpredicable = 1; 1654} 1655 1656def : VFPNoNEONPat<(i32 (fp_to_sint (f16 HPR:$a))), 1657 (COPY_TO_REGCLASS (VTOSIZH (f16 HPR:$a)), GPR)>; 1658def : VFPPat<(i32 (fp_to_sint_sat (f16 HPR:$a), i32)), 1659 (COPY_TO_REGCLASS (VTOSIZH (f16 HPR:$a)), GPR)>; 1660 1661def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011, 1662 (outs SPR:$Sd), (ins DPR:$Dm), 1663 IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm", 1664 []>, 1665 Sched<[WriteFPCVT]> { 1666 let Inst{7} = 1; // Z bit 1667} 1668 1669let Predicates=[HasVFP2, HasDPVFP] in { 1670 def : VFPPat<(i32 (fp_to_uint (f64 DPR:$a))), 1671 (COPY_TO_REGCLASS (VTOUIZD DPR:$a), GPR)>; 1672 def : VFPPat<(i32 (fp_to_uint_sat (f64 DPR:$a), i32)), 1673 (COPY_TO_REGCLASS (VTOUIZD DPR:$a), GPR)>; 1674 1675 def : VFPPat<(alignedstore32 (i32 (fp_to_uint (f64 DPR:$a))), addrmode5:$ptr), 1676 (VSTRS (VTOUIZD DPR:$a), addrmode5:$ptr)>; 1677 def : VFPPat<(alignedstore32 (i32 (fp_to_uint_sat (f64 DPR:$a), i32)), addrmode5:$ptr), 1678 (VSTRS (VTOUIZD DPR:$a), addrmode5:$ptr)>; 1679} 1680 1681def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010, 1682 (outs SPR:$Sd), (ins SPR:$Sm), 1683 IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm", 1684 []>, 1685 Sched<[WriteFPCVT]> { 1686 let Inst{7} = 1; // Z bit 1687 1688 // Some single precision VFP instructions may be executed on both NEON and 1689 // VFP pipelines on A8. 1690 let D = VFPNeonA8Domain; 1691} 1692 1693def : VFPNoNEONPat<(i32 (fp_to_uint SPR:$a)), 1694 (COPY_TO_REGCLASS (VTOUIZS SPR:$a), GPR)>; 1695def : VFPPat<(i32 (fp_to_uint_sat SPR:$a, i32)), 1696 (COPY_TO_REGCLASS (VTOUIZS SPR:$a), GPR)>; 1697 1698def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_uint (f32 SPR:$a))), 1699 addrmode5:$ptr), 1700 (VSTRS (VTOUIZS SPR:$a), addrmode5:$ptr)>; 1701def : VFPPat<(alignedstore32 (i32 (fp_to_uint_sat (f32 SPR:$a), i32)), 1702 addrmode5:$ptr), 1703 (VSTRS (VTOUIZS SPR:$a), addrmode5:$ptr)>; 1704 1705def VTOUIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001, 1706 (outs SPR:$Sd), (ins HPR:$Sm), 1707 IIC_fpCVTHI, "vcvt", ".u32.f16\t$Sd, $Sm", 1708 []>, 1709 Sched<[WriteFPCVT]> { 1710 let Inst{7} = 1; // Z bit 1711 let isUnpredicable = 1; 1712} 1713 1714def : VFPNoNEONPat<(i32 (fp_to_uint (f16 HPR:$a))), 1715 (COPY_TO_REGCLASS (VTOUIZH (f16 HPR:$a)), GPR)>; 1716def : VFPPat<(i32 (fp_to_uint_sat (f16 HPR:$a), i32)), 1717 (COPY_TO_REGCLASS (VTOUIZH (f16 HPR:$a)), GPR)>; 1718 1719// And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR. 1720let Uses = [FPSCR] in { 1721def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011, 1722 (outs SPR:$Sd), (ins DPR:$Dm), 1723 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm", 1724 [(set SPR:$Sd, (int_arm_vcvtr (f64 DPR:$Dm)))]>, 1725 Sched<[WriteFPCVT]> { 1726 let Inst{7} = 0; // Z bit 1727} 1728 1729def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010, 1730 (outs SPR:$Sd), (ins SPR:$Sm), 1731 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$Sd, $Sm", 1732 [(set SPR:$Sd, (int_arm_vcvtr SPR:$Sm))]>, 1733 Sched<[WriteFPCVT]> { 1734 let Inst{7} = 0; // Z bit 1735} 1736 1737def VTOSIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001, 1738 (outs SPR:$Sd), (ins SPR:$Sm), 1739 IIC_fpCVTHI, "vcvtr", ".s32.f16\t$Sd, $Sm", 1740 []>, 1741 Sched<[WriteFPCVT]> { 1742 let Inst{7} = 0; // Z bit 1743 let isUnpredicable = 1; 1744} 1745 1746def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011, 1747 (outs SPR:$Sd), (ins DPR:$Dm), 1748 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm", 1749 [(set SPR:$Sd, (int_arm_vcvtru(f64 DPR:$Dm)))]>, 1750 Sched<[WriteFPCVT]> { 1751 let Inst{7} = 0; // Z bit 1752} 1753 1754def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010, 1755 (outs SPR:$Sd), (ins SPR:$Sm), 1756 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$Sd, $Sm", 1757 [(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]>, 1758 Sched<[WriteFPCVT]> { 1759 let Inst{7} = 0; // Z bit 1760} 1761 1762def VTOUIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001, 1763 (outs SPR:$Sd), (ins SPR:$Sm), 1764 IIC_fpCVTHI, "vcvtr", ".u32.f16\t$Sd, $Sm", 1765 []>, 1766 Sched<[WriteFPCVT]> { 1767 let Inst{7} = 0; // Z bit 1768 let isUnpredicable = 1; 1769} 1770} 1771 1772// v8.3-a Javascript Convert to Signed fixed-point 1773def VJCVT : AVConv1IsD_Encode<0b11101, 0b11, 0b1001, 0b1011, 1774 (outs SPR:$Sd), (ins DPR:$Dm), 1775 IIC_fpCVTDI, "vjcvt", ".s32.f64\t$Sd, $Dm", 1776 []>, 1777 Requires<[HasFPARMv8, HasV8_3a]> { 1778 let Inst{7} = 1; // Z bit 1779} 1780 1781// Convert between floating-point and fixed-point 1782// Data type for fixed-point naming convention: 1783// S16 (U=0, sx=0) -> SH 1784// U16 (U=1, sx=0) -> UH 1785// S32 (U=0, sx=1) -> SL 1786// U32 (U=1, sx=1) -> UL 1787 1788let Constraints = "$a = $dst" in { 1789 1790// FP to Fixed-Point: 1791 1792// Single Precision register 1793class AVConv1XInsS_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4, 1794 bit op5, dag oops, dag iops, InstrItinClass itin, 1795 string opc, string asm, list<dag> pattern> 1796 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> { 1797 bits<5> dst; 1798 // if dp_operation then UInt(D:Vd) else UInt(Vd:D); 1799 let Inst{22} = dst{0}; 1800 let Inst{15-12} = dst{4-1}; 1801 1802 let hasSideEffects = 0; 1803} 1804 1805// Double Precision register 1806class AVConv1XInsD_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4, 1807 bit op5, dag oops, dag iops, InstrItinClass itin, 1808 string opc, string asm, list<dag> pattern> 1809 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> { 1810 bits<5> dst; 1811 // if dp_operation then UInt(D:Vd) else UInt(Vd:D); 1812 let Inst{22} = dst{4}; 1813 let Inst{15-12} = dst{3-0}; 1814 1815 let hasSideEffects = 0; 1816 let Predicates = [HasVFP2, HasDPVFP]; 1817} 1818 1819let isUnpredicable = 1 in { 1820 1821def VTOSHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 0, 1822 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1823 IIC_fpCVTHI, "vcvt", ".s16.f16\t$dst, $a, $fbits", []>, 1824 Requires<[HasFullFP16]>, 1825 Sched<[WriteFPCVT]>; 1826 1827def VTOUHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 0, 1828 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1829 IIC_fpCVTHI, "vcvt", ".u16.f16\t$dst, $a, $fbits", []>, 1830 Requires<[HasFullFP16]>, 1831 Sched<[WriteFPCVT]>; 1832 1833def VTOSLH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 1, 1834 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1835 IIC_fpCVTHI, "vcvt", ".s32.f16\t$dst, $a, $fbits", []>, 1836 Requires<[HasFullFP16]>, 1837 Sched<[WriteFPCVT]>; 1838 1839def VTOULH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 1, 1840 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1841 IIC_fpCVTHI, "vcvt", ".u32.f16\t$dst, $a, $fbits", []>, 1842 Requires<[HasFullFP16]>, 1843 Sched<[WriteFPCVT]>; 1844 1845} // End of 'let isUnpredicable = 1 in' 1846 1847def VTOSHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 0, 1848 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1849 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits", []>, 1850 Sched<[WriteFPCVT]> { 1851 // Some single precision VFP instructions may be executed on both NEON and 1852 // VFP pipelines on A8. 1853 let D = VFPNeonA8Domain; 1854} 1855 1856def VTOUHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 0, 1857 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1858 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits", []>, 1859 Sched<[WriteFPCVT]> { 1860 // Some single precision VFP instructions may be executed on both NEON and 1861 // VFP pipelines on A8. 1862 let D = VFPNeonA8Domain; 1863} 1864 1865def VTOSLS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 1, 1866 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1867 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits", []>, 1868 Sched<[WriteFPCVT]> { 1869 // Some single precision VFP instructions may be executed on both NEON and 1870 // VFP pipelines on A8. 1871 let D = VFPNeonA8Domain; 1872} 1873 1874def VTOULS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 1, 1875 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1876 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits", []>, 1877 Sched<[WriteFPCVT]> { 1878 // Some single precision VFP instructions may be executed on both NEON and 1879 // VFP pipelines on A8. 1880 let D = VFPNeonA8Domain; 1881} 1882 1883def VTOSHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 0, 1884 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits), 1885 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits", []>, 1886 Sched<[WriteFPCVT]>; 1887 1888def VTOUHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 0, 1889 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits), 1890 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits", []>, 1891 Sched<[WriteFPCVT]>; 1892 1893def VTOSLD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 1, 1894 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits), 1895 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits", []>, 1896 Sched<[WriteFPCVT]>; 1897 1898def VTOULD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 1, 1899 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits), 1900 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits", []>, 1901 Sched<[WriteFPCVT]>; 1902 1903// Fixed-Point to FP: 1904 1905let isUnpredicable = 1 in { 1906 1907def VSHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 0, 1908 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1909 IIC_fpCVTIH, "vcvt", ".f16.s16\t$dst, $a, $fbits", []>, 1910 Requires<[HasFullFP16]>, 1911 Sched<[WriteFPCVT]>; 1912 1913def VUHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 0, 1914 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1915 IIC_fpCVTIH, "vcvt", ".f16.u16\t$dst, $a, $fbits", []>, 1916 Requires<[HasFullFP16]>, 1917 Sched<[WriteFPCVT]>; 1918 1919def VSLTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 1, 1920 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1921 IIC_fpCVTIH, "vcvt", ".f16.s32\t$dst, $a, $fbits", []>, 1922 Requires<[HasFullFP16]>, 1923 Sched<[WriteFPCVT]>; 1924 1925def VULTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 1, 1926 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1927 IIC_fpCVTIH, "vcvt", ".f16.u32\t$dst, $a, $fbits", []>, 1928 Requires<[HasFullFP16]>, 1929 Sched<[WriteFPCVT]>; 1930 1931} // End of 'let isUnpredicable = 1 in' 1932 1933def VSHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 0, 1934 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1935 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits", []>, 1936 Sched<[WriteFPCVT]> { 1937 // Some single precision VFP instructions may be executed on both NEON and 1938 // VFP pipelines on A8. 1939 let D = VFPNeonA8Domain; 1940} 1941 1942def VUHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 0, 1943 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1944 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits", []>, 1945 Sched<[WriteFPCVT]> { 1946 // Some single precision VFP instructions may be executed on both NEON and 1947 // VFP pipelines on A8. 1948 let D = VFPNeonA8Domain; 1949} 1950 1951def VSLTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 1, 1952 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1953 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits", []>, 1954 Sched<[WriteFPCVT]> { 1955 // Some single precision VFP instructions may be executed on both NEON and 1956 // VFP pipelines on A8. 1957 let D = VFPNeonA8Domain; 1958} 1959 1960def VULTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 1, 1961 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1962 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits", []>, 1963 Sched<[WriteFPCVT]> { 1964 // Some single precision VFP instructions may be executed on both NEON and 1965 // VFP pipelines on A8. 1966 let D = VFPNeonA8Domain; 1967} 1968 1969def VSHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 0, 1970 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits), 1971 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits", []>, 1972 Sched<[WriteFPCVT]>; 1973 1974def VUHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 0, 1975 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits), 1976 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits", []>, 1977 Sched<[WriteFPCVT]>; 1978 1979def VSLTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 1, 1980 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits), 1981 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits", []>, 1982 Sched<[WriteFPCVT]>; 1983 1984def VULTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 1, 1985 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits), 1986 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits", []>, 1987 Sched<[WriteFPCVT]>; 1988 1989} // End of 'let Constraints = "$a = $dst" in' 1990 1991// BFloat16 - Single precision, unary, predicated 1992class BF16_VCVT<string opc, bits<2> op7_6> 1993 : VFPAI<(outs SPR:$Sd), (ins SPR:$dst, SPR:$Sm), 1994 VFPUnaryFrm, NoItinerary, 1995 opc, ".bf16.f32\t$Sd, $Sm", "", []>, 1996 RegConstraint<"$dst = $Sd">, 1997 Requires<[HasBF16]>, 1998 Sched<[]> { 1999 bits<5> Sd; 2000 bits<5> Sm; 2001 2002 // Encode instruction operands. 2003 let Inst{3-0} = Sm{4-1}; 2004 let Inst{5} = Sm{0}; 2005 let Inst{15-12} = Sd{4-1}; 2006 let Inst{22} = Sd{0}; 2007 2008 let Inst{27-23} = 0b11101; // opcode1 2009 let Inst{21-20} = 0b11; // opcode2 2010 let Inst{19-16} = 0b0011; // opcode3 2011 let Inst{11-8} = 0b1001; 2012 let Inst{7-6} = op7_6; 2013 let Inst{4} = 0; 2014 2015 let DecoderNamespace = "VFPV8"; 2016 let hasSideEffects = 0; 2017} 2018 2019def BF16_VCVTB : BF16_VCVT<"vcvtb", 0b01>; 2020def BF16_VCVTT : BF16_VCVT<"vcvtt", 0b11>; 2021 2022//===----------------------------------------------------------------------===// 2023// FP Multiply-Accumulate Operations. 2024// 2025 2026def VMLAD : ADbI<0b11100, 0b00, 0, 0, 2027 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2028 IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm", 2029 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm), 2030 (f64 DPR:$Ddin)))]>, 2031 RegConstraint<"$Ddin = $Dd">, 2032 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>, 2033 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2034 2035def VMLAS : ASbIn<0b11100, 0b00, 0, 0, 2036 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2037 IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm", 2038 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm), 2039 SPR:$Sdin))]>, 2040 RegConstraint<"$Sdin = $Sd">, 2041 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>, 2042 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2043 // Some single precision VFP instructions may be executed on both NEON and 2044 // VFP pipelines on A8. 2045 let D = VFPNeonA8Domain; 2046} 2047 2048def VMLAH : AHbI<0b11100, 0b00, 0, 0, 2049 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2050 IIC_fpMAC16, "vmla", ".f16\t$Sd, $Sn, $Sm", 2051 [(set (f16 HPR:$Sd), (fadd_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)), 2052 (f16 HPR:$Sdin)))]>, 2053 RegConstraint<"$Sdin = $Sd">, 2054 Requires<[HasFullFP16,UseFPVMLx]>; 2055 2056def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))), 2057 (VMLAD DPR:$dstin, DPR:$a, DPR:$b)>, 2058 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>; 2059def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)), 2060 (VMLAS SPR:$dstin, SPR:$a, SPR:$b)>, 2061 Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx]>; 2062def : Pat<(fadd_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)), 2063 (VMLAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>, 2064 Requires<[HasFullFP16,DontUseNEONForFP, UseFPVMLx]>; 2065 2066 2067def VMLSD : ADbI<0b11100, 0b00, 1, 0, 2068 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2069 IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm", 2070 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)), 2071 (f64 DPR:$Ddin)))]>, 2072 RegConstraint<"$Ddin = $Dd">, 2073 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>, 2074 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2075 2076def VMLSS : ASbIn<0b11100, 0b00, 1, 0, 2077 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2078 IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm", 2079 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)), 2080 SPR:$Sdin))]>, 2081 RegConstraint<"$Sdin = $Sd">, 2082 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>, 2083 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2084 // Some single precision VFP instructions may be executed on both NEON and 2085 // VFP pipelines on A8. 2086 let D = VFPNeonA8Domain; 2087} 2088 2089def VMLSH : AHbI<0b11100, 0b00, 1, 0, 2090 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2091 IIC_fpMAC16, "vmls", ".f16\t$Sd, $Sn, $Sm", 2092 [(set (f16 HPR:$Sd), (fadd_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))), 2093 (f16 HPR:$Sdin)))]>, 2094 RegConstraint<"$Sdin = $Sd">, 2095 Requires<[HasFullFP16,UseFPVMLx]>; 2096 2097def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))), 2098 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>, 2099 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>; 2100def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)), 2101 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>, 2102 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>; 2103def : Pat<(fsub_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)), 2104 (VMLSH HPR:$dstin, (f16 HPR:$a), HPR:$b)>, 2105 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>; 2106 2107def VNMLAD : ADbI<0b11100, 0b01, 1, 0, 2108 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2109 IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm", 2110 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)), 2111 (f64 DPR:$Ddin)))]>, 2112 RegConstraint<"$Ddin = $Dd">, 2113 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>, 2114 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2115 2116def VNMLAS : ASbI<0b11100, 0b01, 1, 0, 2117 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2118 IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm", 2119 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)), 2120 SPR:$Sdin))]>, 2121 RegConstraint<"$Sdin = $Sd">, 2122 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>, 2123 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2124 // Some single precision VFP instructions may be executed on both NEON and 2125 // VFP pipelines on A8. 2126 let D = VFPNeonA8Domain; 2127} 2128 2129def VNMLAH : AHbI<0b11100, 0b01, 1, 0, 2130 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2131 IIC_fpMAC16, "vnmla", ".f16\t$Sd, $Sn, $Sm", 2132 [(set (f16 HPR:$Sd), (fsub_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))), 2133 (f16 HPR:$Sdin)))]>, 2134 RegConstraint<"$Sdin = $Sd">, 2135 Requires<[HasFullFP16,UseFPVMLx]>; 2136 2137// (-(a * b) - dst) -> -(dst + (a * b)) 2138def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin), 2139 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>, 2140 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>; 2141def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin), 2142 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>, 2143 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>; 2144def : Pat<(fsub_mlx (fneg (fmul_su (f16 HPR:$a), HPR:$b)), HPR:$dstin), 2145 (VNMLAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>, 2146 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>; 2147 2148// (-dst - (a * b)) -> -(dst + (a * b)) 2149def : Pat<(fsub_mlx (fneg DPR:$dstin), (fmul_su DPR:$a, (f64 DPR:$b))), 2150 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>, 2151 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>; 2152def : Pat<(fsub_mlx (fneg SPR:$dstin), (fmul_su SPR:$a, SPR:$b)), 2153 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>, 2154 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>; 2155def : Pat<(fsub_mlx (fneg HPR:$dstin), (fmul_su (f16 HPR:$a), HPR:$b)), 2156 (VNMLAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>, 2157 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>; 2158 2159def VNMLSD : ADbI<0b11100, 0b01, 0, 0, 2160 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2161 IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm", 2162 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm), 2163 (f64 DPR:$Ddin)))]>, 2164 RegConstraint<"$Ddin = $Dd">, 2165 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>, 2166 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2167 2168def VNMLSS : ASbI<0b11100, 0b01, 0, 0, 2169 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2170 IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm", 2171 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>, 2172 RegConstraint<"$Sdin = $Sd">, 2173 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>, 2174 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2175 // Some single precision VFP instructions may be executed on both NEON and 2176 // VFP pipelines on A8. 2177 let D = VFPNeonA8Domain; 2178} 2179 2180def VNMLSH : AHbI<0b11100, 0b01, 0, 0, 2181 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2182 IIC_fpMAC16, "vnmls", ".f16\t$Sd, $Sn, $Sm", 2183 [(set (f16 HPR:$Sd), (fsub_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)), (f16 HPR:$Sdin)))]>, 2184 RegConstraint<"$Sdin = $Sd">, 2185 Requires<[HasFullFP16,UseFPVMLx]>; 2186 2187def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin), 2188 (VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>, 2189 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>; 2190def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin), 2191 (VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>, 2192 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>; 2193def : Pat<(fsub_mlx (fmul_su (f16 HPR:$a), HPR:$b), HPR:$dstin), 2194 (VNMLSH HPR:$dstin, (f16 HPR:$a), HPR:$b)>, 2195 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>; 2196 2197//===----------------------------------------------------------------------===// 2198// Fused FP Multiply-Accumulate Operations. 2199// 2200def VFMAD : ADbI<0b11101, 0b10, 0, 0, 2201 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2202 IIC_fpFMAC64, "vfma", ".f64\t$Dd, $Dn, $Dm", 2203 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm), 2204 (f64 DPR:$Ddin)))]>, 2205 RegConstraint<"$Ddin = $Dd">, 2206 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>, 2207 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2208 2209def VFMAS : ASbIn<0b11101, 0b10, 0, 0, 2210 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2211 IIC_fpFMAC32, "vfma", ".f32\t$Sd, $Sn, $Sm", 2212 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm), 2213 SPR:$Sdin))]>, 2214 RegConstraint<"$Sdin = $Sd">, 2215 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>, 2216 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2217 // Some single precision VFP instructions may be executed on both NEON and 2218 // VFP pipelines. 2219} 2220 2221def VFMAH : AHbI<0b11101, 0b10, 0, 0, 2222 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2223 IIC_fpFMAC16, "vfma", ".f16\t$Sd, $Sn, $Sm", 2224 [(set (f16 HPR:$Sd), (fadd_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)), 2225 (f16 HPR:$Sdin)))]>, 2226 RegConstraint<"$Sdin = $Sd">, 2227 Requires<[HasFullFP16,UseFusedMAC]>, 2228 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2229 2230def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))), 2231 (VFMAD DPR:$dstin, DPR:$a, DPR:$b)>, 2232 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 2233def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)), 2234 (VFMAS SPR:$dstin, SPR:$a, SPR:$b)>, 2235 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>; 2236def : Pat<(fadd_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)), 2237 (VFMAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>, 2238 Requires<[HasFullFP16,DontUseNEONForFP,UseFusedMAC]>; 2239 2240// Match @llvm.fma.* intrinsics 2241// (fma x, y, z) -> (vfms z, x, y) 2242def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, DPR:$Ddin)), 2243 (VFMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2244 Requires<[HasVFP4,HasDPVFP]>; 2245def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, SPR:$Sdin)), 2246 (VFMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2247 Requires<[HasVFP4]>; 2248def : Pat<(f16 (fma HPR:$Sn, HPR:$Sm, (f16 HPR:$Sdin))), 2249 (VFMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>, 2250 Requires<[HasFullFP16]>; 2251 2252def VFMSD : ADbI<0b11101, 0b10, 1, 0, 2253 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2254 IIC_fpFMAC64, "vfms", ".f64\t$Dd, $Dn, $Dm", 2255 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)), 2256 (f64 DPR:$Ddin)))]>, 2257 RegConstraint<"$Ddin = $Dd">, 2258 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>, 2259 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2260 2261def VFMSS : ASbIn<0b11101, 0b10, 1, 0, 2262 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2263 IIC_fpFMAC32, "vfms", ".f32\t$Sd, $Sn, $Sm", 2264 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)), 2265 SPR:$Sdin))]>, 2266 RegConstraint<"$Sdin = $Sd">, 2267 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>, 2268 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2269 // Some single precision VFP instructions may be executed on both NEON and 2270 // VFP pipelines. 2271} 2272 2273def VFMSH : AHbI<0b11101, 0b10, 1, 0, 2274 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2275 IIC_fpFMAC16, "vfms", ".f16\t$Sd, $Sn, $Sm", 2276 [(set (f16 HPR:$Sd), (fadd_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))), 2277 (f16 HPR:$Sdin)))]>, 2278 RegConstraint<"$Sdin = $Sd">, 2279 Requires<[HasFullFP16,UseFusedMAC]>, 2280 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2281 2282def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))), 2283 (VFMSD DPR:$dstin, DPR:$a, DPR:$b)>, 2284 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 2285def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)), 2286 (VFMSS SPR:$dstin, SPR:$a, SPR:$b)>, 2287 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>; 2288def : Pat<(fsub_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)), 2289 (VFMSH HPR:$dstin, (f16 HPR:$a), HPR:$b)>, 2290 Requires<[HasFullFP16,DontUseNEONForFP,UseFusedMAC]>; 2291 2292// Match @llvm.fma.* intrinsics 2293// (fma (fneg x), y, z) -> (vfms z, x, y) 2294def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin)), 2295 (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2296 Requires<[HasVFP4,HasDPVFP]>; 2297def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin)), 2298 (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2299 Requires<[HasVFP4]>; 2300def : Pat<(f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (f16 HPR:$Sdin))), 2301 (VFMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>, 2302 Requires<[HasFullFP16]>; 2303 2304def VFNMAD : ADbI<0b11101, 0b01, 1, 0, 2305 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2306 IIC_fpFMAC64, "vfnma", ".f64\t$Dd, $Dn, $Dm", 2307 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)), 2308 (f64 DPR:$Ddin)))]>, 2309 RegConstraint<"$Ddin = $Dd">, 2310 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>, 2311 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2312 2313def VFNMAS : ASbI<0b11101, 0b01, 1, 0, 2314 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2315 IIC_fpFMAC32, "vfnma", ".f32\t$Sd, $Sn, $Sm", 2316 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)), 2317 SPR:$Sdin))]>, 2318 RegConstraint<"$Sdin = $Sd">, 2319 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>, 2320 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2321 // Some single precision VFP instructions may be executed on both NEON and 2322 // VFP pipelines. 2323} 2324 2325def VFNMAH : AHbI<0b11101, 0b01, 1, 0, 2326 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2327 IIC_fpFMAC16, "vfnma", ".f16\t$Sd, $Sn, $Sm", 2328 [(set (f16 HPR:$Sd), (fsub_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))), 2329 (f16 HPR:$Sdin)))]>, 2330 RegConstraint<"$Sdin = $Sd">, 2331 Requires<[HasFullFP16,UseFusedMAC]>, 2332 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2333 2334def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin), 2335 (VFNMAD DPR:$dstin, DPR:$a, DPR:$b)>, 2336 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 2337def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin), 2338 (VFNMAS SPR:$dstin, SPR:$a, SPR:$b)>, 2339 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>; 2340 2341// Match @llvm.fma.* intrinsics 2342// (fneg (fma x, y, z)) -> (vfnma z, x, y) 2343def : Pat<(fneg (fma (f64 DPR:$Dn), (f64 DPR:$Dm), (f64 DPR:$Ddin))), 2344 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2345 Requires<[HasVFP4,HasDPVFP]>; 2346def : Pat<(fneg (fma (f32 SPR:$Sn), (f32 SPR:$Sm), (f32 SPR:$Sdin))), 2347 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2348 Requires<[HasVFP4]>; 2349def : Pat<(fneg (fma (f16 HPR:$Sn), (f16 HPR:$Sm), (f16 (f16 HPR:$Sdin)))), 2350 (VFNMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>, 2351 Requires<[HasFullFP16]>; 2352// (fma (fneg x), y, (fneg z)) -> (vfnma z, x, y) 2353def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, (fneg DPR:$Ddin))), 2354 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2355 Requires<[HasVFP4,HasDPVFP]>; 2356def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, (fneg SPR:$Sdin))), 2357 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2358 Requires<[HasVFP4]>; 2359def : Pat<(f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (fneg (f16 HPR:$Sdin)))), 2360 (VFNMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>, 2361 Requires<[HasFullFP16]>; 2362 2363def VFNMSD : ADbI<0b11101, 0b01, 0, 0, 2364 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2365 IIC_fpFMAC64, "vfnms", ".f64\t$Dd, $Dn, $Dm", 2366 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm), 2367 (f64 DPR:$Ddin)))]>, 2368 RegConstraint<"$Ddin = $Dd">, 2369 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>, 2370 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2371 2372def VFNMSS : ASbI<0b11101, 0b01, 0, 0, 2373 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2374 IIC_fpFMAC32, "vfnms", ".f32\t$Sd, $Sn, $Sm", 2375 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>, 2376 RegConstraint<"$Sdin = $Sd">, 2377 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>, 2378 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2379 // Some single precision VFP instructions may be executed on both NEON and 2380 // VFP pipelines. 2381} 2382 2383def VFNMSH : AHbI<0b11101, 0b01, 0, 0, 2384 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2385 IIC_fpFMAC16, "vfnms", ".f16\t$Sd, $Sn, $Sm", 2386 [(set (f16 HPR:$Sd), (fsub_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)), (f16 HPR:$Sdin)))]>, 2387 RegConstraint<"$Sdin = $Sd">, 2388 Requires<[HasFullFP16,UseFusedMAC]>, 2389 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2390 2391def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin), 2392 (VFNMSD DPR:$dstin, DPR:$a, DPR:$b)>, 2393 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 2394def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin), 2395 (VFNMSS SPR:$dstin, SPR:$a, SPR:$b)>, 2396 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>; 2397 2398// Match @llvm.fma.* intrinsics 2399 2400// (fma x, y, (fneg z)) -> (vfnms z, x, y)) 2401def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, (fneg DPR:$Ddin))), 2402 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2403 Requires<[HasVFP4,HasDPVFP]>; 2404def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, (fneg SPR:$Sdin))), 2405 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2406 Requires<[HasVFP4]>; 2407def : Pat<(f16 (fma (f16 HPR:$Sn), (f16 HPR:$Sm), (fneg (f16 HPR:$Sdin)))), 2408 (VFNMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>, 2409 Requires<[HasFullFP16]>; 2410// (fneg (fma (fneg x), y, z)) -> (vfnms z, x, y) 2411def : Pat<(fneg (f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin))), 2412 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2413 Requires<[HasVFP4,HasDPVFP]>; 2414def : Pat<(fneg (f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin))), 2415 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2416 Requires<[HasVFP4]>; 2417def : Pat<(fneg (f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (f16 HPR:$Sdin)))), 2418 (VFNMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>, 2419 Requires<[HasFullFP16]>; 2420 2421//===----------------------------------------------------------------------===// 2422// FP Conditional moves. 2423// 2424 2425let hasSideEffects = 0 in { 2426def VMOVDcc : PseudoInst<(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm, cmovpred:$p), 2427 IIC_fpUNA64, 2428 [(set (f64 DPR:$Dd), 2429 (ARMcmov DPR:$Dn, DPR:$Dm, cmovpred:$p))]>, 2430 RegConstraint<"$Dn = $Dd">, Requires<[HasFPRegs64]>; 2431 2432def VMOVScc : PseudoInst<(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm, cmovpred:$p), 2433 IIC_fpUNA32, 2434 [(set (f32 SPR:$Sd), 2435 (ARMcmov SPR:$Sn, SPR:$Sm, cmovpred:$p))]>, 2436 RegConstraint<"$Sn = $Sd">, Requires<[HasFPRegs]>; 2437 2438def VMOVHcc : PseudoInst<(outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm, cmovpred:$p), 2439 IIC_fpUNA16, 2440 [(set (f16 HPR:$Sd), 2441 (ARMcmov (f16 HPR:$Sn), (f16 HPR:$Sm), cmovpred:$p))]>, 2442 RegConstraint<"$Sd = $Sn">, Requires<[HasFPRegs]>; 2443} // hasSideEffects 2444 2445//===----------------------------------------------------------------------===// 2446// Move from VFP System Register to ARM core register. 2447// 2448 2449class MovFromVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm, 2450 list<dag> pattern>: 2451 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, "", pattern> { 2452 2453 // Instruction operand. 2454 bits<4> Rt; 2455 2456 let Inst{27-20} = 0b11101111; 2457 let Inst{19-16} = opc19_16; 2458 let Inst{15-12} = Rt; 2459 let Inst{11-8} = 0b1010; 2460 let Inst{7} = 0; 2461 let Inst{6-5} = 0b00; 2462 let Inst{4} = 1; 2463 let Inst{3-0} = 0b0000; 2464 let Unpredictable{7-5} = 0b111; 2465 let Unpredictable{3-0} = 0b1111; 2466} 2467 2468let DecoderMethod = "DecodeForVMRSandVMSR" in { 2469 // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags 2470 // to APSR. 2471 let Defs = [CPSR], Uses = [FPSCR_NZCV], Predicates = [HasFPRegs], 2472 Rt = 0b1111 /* apsr_nzcv */ in 2473 def FMSTAT : MovFromVFP<0b0001 /* fpscr */, (outs), (ins), 2474 "vmrs", "\tAPSR_nzcv, fpscr", [(arm_fmstat)]>; 2475 2476 // Application level FPSCR -> GPR 2477 let hasSideEffects = 1, Uses = [FPSCR], Predicates = [HasFPRegs] in 2478 def VMRS : MovFromVFP<0b0001 /* fpscr */, (outs GPRnopc:$Rt), (ins), 2479 "vmrs", "\t$Rt, fpscr", 2480 [(set GPRnopc:$Rt, (int_arm_get_fpscr))]>; 2481 2482 // System level FPEXC, FPSID -> GPR 2483 let Uses = [FPSCR] in { 2484 def VMRS_FPEXC : MovFromVFP<0b1000 /* fpexc */, (outs GPRnopc:$Rt), (ins), 2485 "vmrs", "\t$Rt, fpexc", []>; 2486 def VMRS_FPSID : MovFromVFP<0b0000 /* fpsid */, (outs GPRnopc:$Rt), (ins), 2487 "vmrs", "\t$Rt, fpsid", []>; 2488 def VMRS_MVFR0 : MovFromVFP<0b0111 /* mvfr0 */, (outs GPRnopc:$Rt), (ins), 2489 "vmrs", "\t$Rt, mvfr0", []>; 2490 def VMRS_MVFR1 : MovFromVFP<0b0110 /* mvfr1 */, (outs GPRnopc:$Rt), (ins), 2491 "vmrs", "\t$Rt, mvfr1", []>; 2492 let Predicates = [HasFPARMv8] in { 2493 def VMRS_MVFR2 : MovFromVFP<0b0101 /* mvfr2 */, (outs GPRnopc:$Rt), (ins), 2494 "vmrs", "\t$Rt, mvfr2", []>; 2495 } 2496 def VMRS_FPINST : MovFromVFP<0b1001 /* fpinst */, (outs GPRnopc:$Rt), (ins), 2497 "vmrs", "\t$Rt, fpinst", []>; 2498 def VMRS_FPINST2 : MovFromVFP<0b1010 /* fpinst2 */, (outs GPRnopc:$Rt), 2499 (ins), "vmrs", "\t$Rt, fpinst2", []>; 2500 let Predicates = [HasV8_1MMainline, HasFPRegs] in { 2501 // System level FPSCR_NZCVQC -> GPR 2502 def VMRS_FPSCR_NZCVQC 2503 : MovFromVFP<0b0010 /* fpscr_nzcvqc */, 2504 (outs GPR:$Rt), (ins cl_FPSCR_NZCV:$fpscr_in), 2505 "vmrs", "\t$Rt, fpscr_nzcvqc", []>; 2506 } 2507 } 2508 let Predicates = [HasV8_1MMainline, Has8MSecExt] in { 2509 // System level FPSCR -> GPR, with context saving for security extensions 2510 def VMRS_FPCXTNS : MovFromVFP<0b1110 /* fpcxtns */, (outs GPR:$Rt), (ins), 2511 "vmrs", "\t$Rt, fpcxtns", []>; 2512 } 2513 let Predicates = [HasV8_1MMainline, Has8MSecExt] in { 2514 // System level FPSCR -> GPR, with context saving for security extensions 2515 def VMRS_FPCXTS : MovFromVFP<0b1111 /* fpcxts */, (outs GPR:$Rt), (ins), 2516 "vmrs", "\t$Rt, fpcxts", []>; 2517 } 2518 2519 let Predicates = [HasV8_1MMainline, HasMVEInt] in { 2520 // System level VPR/P0 -> GPR 2521 let Uses = [VPR] in 2522 def VMRS_VPR : MovFromVFP<0b1100 /* vpr */, (outs GPR:$Rt), (ins), 2523 "vmrs", "\t$Rt, vpr", []>; 2524 2525 def VMRS_P0 : MovFromVFP<0b1101 /* p0 */, (outs GPR:$Rt), (ins VCCR:$cond), 2526 "vmrs", "\t$Rt, p0", []>; 2527 } 2528} 2529 2530//===----------------------------------------------------------------------===// 2531// Move from ARM core register to VFP System Register. 2532// 2533 2534class MovToVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm, 2535 list<dag> pattern>: 2536 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, "", pattern> { 2537 2538 // Instruction operand. 2539 bits<4> Rt; 2540 2541 let Inst{27-20} = 0b11101110; 2542 let Inst{19-16} = opc19_16; 2543 let Inst{15-12} = Rt; 2544 let Inst{11-8} = 0b1010; 2545 let Inst{7} = 0; 2546 let Inst{6-5} = 0b00; 2547 let Inst{4} = 1; 2548 let Inst{3-0} = 0b0000; 2549 let Predicates = [HasVFP2]; 2550 let Unpredictable{7-5} = 0b111; 2551 let Unpredictable{3-0} = 0b1111; 2552} 2553 2554let DecoderMethod = "DecodeForVMRSandVMSR" in { 2555 let Defs = [FPSCR] in { 2556 let Predicates = [HasFPRegs] in 2557 // Application level GPR -> FPSCR 2558 def VMSR : MovToVFP<0b0001 /* fpscr */, (outs), (ins GPRnopc:$Rt), 2559 "vmsr", "\tfpscr, $Rt", 2560 [(int_arm_set_fpscr GPRnopc:$Rt)]>; 2561 // System level GPR -> FPEXC 2562 def VMSR_FPEXC : MovToVFP<0b1000 /* fpexc */, (outs), (ins GPRnopc:$Rt), 2563 "vmsr", "\tfpexc, $Rt", []>; 2564 // System level GPR -> FPSID 2565 def VMSR_FPSID : MovToVFP<0b0000 /* fpsid */, (outs), (ins GPRnopc:$Rt), 2566 "vmsr", "\tfpsid, $Rt", []>; 2567 def VMSR_FPINST : MovToVFP<0b1001 /* fpinst */, (outs), (ins GPRnopc:$Rt), 2568 "vmsr", "\tfpinst, $Rt", []>; 2569 def VMSR_FPINST2 : MovToVFP<0b1010 /* fpinst2 */, (outs), (ins GPRnopc:$Rt), 2570 "vmsr", "\tfpinst2, $Rt", []>; 2571 } 2572 let Predicates = [HasV8_1MMainline, Has8MSecExt] in { 2573 // System level GPR -> FPSCR with context saving for security extensions 2574 def VMSR_FPCXTNS : MovToVFP<0b1110 /* fpcxtns */, (outs), (ins GPR:$Rt), 2575 "vmsr", "\tfpcxtns, $Rt", []>; 2576 } 2577 let Predicates = [HasV8_1MMainline, Has8MSecExt] in { 2578 // System level GPR -> FPSCR with context saving for security extensions 2579 def VMSR_FPCXTS : MovToVFP<0b1111 /* fpcxts */, (outs), (ins GPR:$Rt), 2580 "vmsr", "\tfpcxts, $Rt", []>; 2581 } 2582 let Predicates = [HasV8_1MMainline, HasFPRegs] in { 2583 // System level GPR -> FPSCR_NZCVQC 2584 def VMSR_FPSCR_NZCVQC 2585 : MovToVFP<0b0010 /* fpscr_nzcvqc */, 2586 (outs cl_FPSCR_NZCV:$fpscr_out), (ins GPR:$Rt), 2587 "vmsr", "\tfpscr_nzcvqc, $Rt", []>; 2588 } 2589 2590 let Predicates = [HasV8_1MMainline, HasMVEInt] in { 2591 // System level GPR -> VPR/P0 2592 let Defs = [VPR] in 2593 def VMSR_VPR : MovToVFP<0b1100 /* vpr */, (outs), (ins GPR:$Rt), 2594 "vmsr", "\tvpr, $Rt", []>; 2595 2596 def VMSR_P0 : MovToVFP<0b1101 /* p0 */, (outs VCCR:$cond), (ins GPR:$Rt), 2597 "vmsr", "\tp0, $Rt", []>; 2598 } 2599} 2600 2601//===----------------------------------------------------------------------===// 2602// Misc. 2603// 2604 2605// Materialize FP immediates. VFP3 only. 2606let isReMaterializable = 1 in { 2607def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm), 2608 VFPMiscFrm, IIC_fpUNA64, 2609 "vmov", ".f64\t$Dd, $imm", "", 2610 [(set DPR:$Dd, vfp_f64imm:$imm)]>, 2611 Requires<[HasVFP3,HasDPVFP]> { 2612 bits<5> Dd; 2613 bits<8> imm; 2614 2615 let Inst{27-23} = 0b11101; 2616 let Inst{22} = Dd{4}; 2617 let Inst{21-20} = 0b11; 2618 let Inst{19-16} = imm{7-4}; 2619 let Inst{15-12} = Dd{3-0}; 2620 let Inst{11-9} = 0b101; 2621 let Inst{8} = 1; // Double precision. 2622 let Inst{7-4} = 0b0000; 2623 let Inst{3-0} = imm{3-0}; 2624} 2625 2626def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm), 2627 VFPMiscFrm, IIC_fpUNA32, 2628 "vmov", ".f32\t$Sd, $imm", "", 2629 [(set SPR:$Sd, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> { 2630 bits<5> Sd; 2631 bits<8> imm; 2632 2633 let Inst{27-23} = 0b11101; 2634 let Inst{22} = Sd{0}; 2635 let Inst{21-20} = 0b11; 2636 let Inst{19-16} = imm{7-4}; 2637 let Inst{15-12} = Sd{4-1}; 2638 let Inst{11-9} = 0b101; 2639 let Inst{8} = 0; // Single precision. 2640 let Inst{7-4} = 0b0000; 2641 let Inst{3-0} = imm{3-0}; 2642} 2643 2644def FCONSTH : VFPAI<(outs HPR:$Sd), (ins vfp_f16imm:$imm), 2645 VFPMiscFrm, IIC_fpUNA16, 2646 "vmov", ".f16\t$Sd, $imm", "", 2647 [(set (f16 HPR:$Sd), vfp_f16imm:$imm)]>, 2648 Requires<[HasFullFP16]> { 2649 bits<5> Sd; 2650 bits<8> imm; 2651 2652 let Inst{27-23} = 0b11101; 2653 let Inst{22} = Sd{0}; 2654 let Inst{21-20} = 0b11; 2655 let Inst{19-16} = imm{7-4}; 2656 let Inst{15-12} = Sd{4-1}; 2657 let Inst{11-8} = 0b1001; // Half precision 2658 let Inst{7-4} = 0b0000; 2659 let Inst{3-0} = imm{3-0}; 2660 2661 let isUnpredicable = 1; 2662} 2663} 2664 2665def : Pat<(f32 (vfp_f32f16imm:$imm)), 2666 (f32 (COPY_TO_REGCLASS (f16 (FCONSTH (vfp_f32f16imm_xform (f32 $imm)))), SPR))> { 2667 let Predicates = [HasFullFP16]; 2668} 2669 2670//===----------------------------------------------------------------------===// 2671// Assembler aliases. 2672// 2673// A few mnemonic aliases for pre-unifixed syntax. We don't guarantee to 2674// support them all, but supporting at least some of the basics is 2675// good to be friendly. 2676def : VFP2MnemonicAlias<"flds", "vldr">; 2677def : VFP2MnemonicAlias<"fldd", "vldr">; 2678def : VFP2MnemonicAlias<"fmrs", "vmov">; 2679def : VFP2MnemonicAlias<"fmsr", "vmov">; 2680def : VFP2MnemonicAlias<"fsqrts", "vsqrt">; 2681def : VFP2MnemonicAlias<"fsqrtd", "vsqrt">; 2682def : VFP2MnemonicAlias<"fadds", "vadd.f32">; 2683def : VFP2MnemonicAlias<"faddd", "vadd.f64">; 2684def : VFP2MnemonicAlias<"fmrdd", "vmov">; 2685def : VFP2MnemonicAlias<"fmrds", "vmov">; 2686def : VFP2MnemonicAlias<"fmrrd", "vmov">; 2687def : VFP2MnemonicAlias<"fmdrr", "vmov">; 2688def : VFP2MnemonicAlias<"fmuls", "vmul.f32">; 2689def : VFP2MnemonicAlias<"fmuld", "vmul.f64">; 2690def : VFP2MnemonicAlias<"fnegs", "vneg.f32">; 2691def : VFP2MnemonicAlias<"fnegd", "vneg.f64">; 2692def : VFP2MnemonicAlias<"ftosizd", "vcvt.s32.f64">; 2693def : VFP2MnemonicAlias<"ftosid", "vcvtr.s32.f64">; 2694def : VFP2MnemonicAlias<"ftosizs", "vcvt.s32.f32">; 2695def : VFP2MnemonicAlias<"ftosis", "vcvtr.s32.f32">; 2696def : VFP2MnemonicAlias<"ftouizd", "vcvt.u32.f64">; 2697def : VFP2MnemonicAlias<"ftouid", "vcvtr.u32.f64">; 2698def : VFP2MnemonicAlias<"ftouizs", "vcvt.u32.f32">; 2699def : VFP2MnemonicAlias<"ftouis", "vcvtr.u32.f32">; 2700def : VFP2MnemonicAlias<"fsitod", "vcvt.f64.s32">; 2701def : VFP2MnemonicAlias<"fsitos", "vcvt.f32.s32">; 2702def : VFP2MnemonicAlias<"fuitod", "vcvt.f64.u32">; 2703def : VFP2MnemonicAlias<"fuitos", "vcvt.f32.u32">; 2704def : VFP2MnemonicAlias<"fsts", "vstr">; 2705def : VFP2MnemonicAlias<"fstd", "vstr">; 2706def : VFP2MnemonicAlias<"fmacd", "vmla.f64">; 2707def : VFP2MnemonicAlias<"fmacs", "vmla.f32">; 2708def : VFP2MnemonicAlias<"fcpys", "vmov.f32">; 2709def : VFP2MnemonicAlias<"fcpyd", "vmov.f64">; 2710def : VFP2MnemonicAlias<"fcmps", "vcmp.f32">; 2711def : VFP2MnemonicAlias<"fcmpd", "vcmp.f64">; 2712def : VFP2MnemonicAlias<"fdivs", "vdiv.f32">; 2713def : VFP2MnemonicAlias<"fdivd", "vdiv.f64">; 2714def : VFP2MnemonicAlias<"fmrx", "vmrs">; 2715def : VFP2MnemonicAlias<"fmxr", "vmsr">; 2716 2717// Be friendly and accept the old form of zero-compare 2718def : VFP2DPInstAlias<"fcmpzd${p} $val", (VCMPZD DPR:$val, pred:$p)>; 2719def : VFP2InstAlias<"fcmpzs${p} $val", (VCMPZS SPR:$val, pred:$p)>; 2720 2721 2722def : InstAlias<"fmstat${p}", (FMSTAT pred:$p), 0>, Requires<[HasFPRegs]>; 2723def : VFP2InstAlias<"fadds${p} $Sd, $Sn, $Sm", 2724 (VADDS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>; 2725def : VFP2DPInstAlias<"faddd${p} $Dd, $Dn, $Dm", 2726 (VADDD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>; 2727def : VFP2InstAlias<"fsubs${p} $Sd, $Sn, $Sm", 2728 (VSUBS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>; 2729def : VFP2DPInstAlias<"fsubd${p} $Dd, $Dn, $Dm", 2730 (VSUBD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>; 2731 2732// No need for the size suffix on VSQRT. It's implied by the register classes. 2733def : VFP2InstAlias<"vsqrt${p} $Sd, $Sm", (VSQRTS SPR:$Sd, SPR:$Sm, pred:$p)>; 2734def : VFP2DPInstAlias<"vsqrt${p} $Dd, $Dm", (VSQRTD DPR:$Dd, DPR:$Dm, pred:$p)>; 2735 2736// VLDR/VSTR accept an optional type suffix. 2737def : VFP2InstAlias<"vldr${p}.32 $Sd, $addr", 2738 (VLDRS SPR:$Sd, addrmode5:$addr, pred:$p)>; 2739def : VFP2InstAlias<"vstr${p}.32 $Sd, $addr", 2740 (VSTRS SPR:$Sd, addrmode5:$addr, pred:$p)>; 2741def : VFP2InstAlias<"vldr${p}.64 $Dd, $addr", 2742 (VLDRD DPR:$Dd, addrmode5:$addr, pred:$p)>; 2743def : VFP2InstAlias<"vstr${p}.64 $Dd, $addr", 2744 (VSTRD DPR:$Dd, addrmode5:$addr, pred:$p)>; 2745 2746// VMOV can accept optional 32-bit or less data type suffix suffix. 2747def : VFP2InstAlias<"vmov${p}.8 $Rt, $Sn", 2748 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>; 2749def : VFP2InstAlias<"vmov${p}.16 $Rt, $Sn", 2750 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>; 2751def : VFP2InstAlias<"vmov${p}.32 $Rt, $Sn", 2752 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>; 2753def : VFP2InstAlias<"vmov${p}.8 $Sn, $Rt", 2754 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>; 2755def : VFP2InstAlias<"vmov${p}.16 $Sn, $Rt", 2756 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>; 2757def : VFP2InstAlias<"vmov${p}.32 $Sn, $Rt", 2758 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>; 2759 2760def : VFP2InstAlias<"vmov${p}.f64 $Rt, $Rt2, $Dn", 2761 (VMOVRRD GPR:$Rt, GPR:$Rt2, DPR:$Dn, pred:$p)>; 2762def : VFP2InstAlias<"vmov${p}.f64 $Dn, $Rt, $Rt2", 2763 (VMOVDRR DPR:$Dn, GPR:$Rt, GPR:$Rt2, pred:$p)>; 2764 2765// VMOVS doesn't need the .f32 to disambiguate from the NEON encoding the way 2766// VMOVD does. 2767def : VFP2InstAlias<"vmov${p} $Sd, $Sm", 2768 (VMOVS SPR:$Sd, SPR:$Sm, pred:$p)>; 2769 2770// FCONSTD/FCONSTS alias for vmov.f64/vmov.f32 2771// These aliases provide added functionality over vmov.f instructions by 2772// allowing users to write assembly containing encoded floating point constants 2773// (e.g. #0x70 vs #1.0). Without these alises there is no way for the 2774// assembler to accept encoded fp constants (but the equivalent fp-literal is 2775// accepted directly by vmovf). 2776def : VFP3InstAlias<"fconstd${p} $Dd, $val", 2777 (FCONSTD DPR:$Dd, vfp_f64imm:$val, pred:$p)>; 2778def : VFP3InstAlias<"fconsts${p} $Sd, $val", 2779 (FCONSTS SPR:$Sd, vfp_f32imm:$val, pred:$p)>; 2780 2781def VSCCLRMD : VFPXI<(outs), (ins pred:$p, fp_dreglist_with_vpr:$regs, variable_ops), 2782 AddrModeNone, 4, IndexModeNone, VFPMiscFrm, NoItinerary, 2783 "vscclrm{$p}\t$regs", "", []>, Sched<[]> { 2784 bits<13> regs; 2785 let Inst{31-23} = 0b111011001; 2786 let Inst{22} = regs{12}; 2787 let Inst{21-16} = 0b011111; 2788 let Inst{15-12} = regs{11-8}; 2789 let Inst{11-8} = 0b1011; 2790 let Inst{7-1} = regs{7-1}; 2791 let Inst{0} = 0; 2792 2793 let DecoderMethod = "DecodeVSCCLRM"; 2794 2795 list<Predicate> Predicates = [HasV8_1MMainline, Has8MSecExt]; 2796} 2797 2798def VSCCLRMS : VFPXI<(outs), (ins pred:$p, fp_sreglist_with_vpr:$regs, variable_ops), 2799 AddrModeNone, 4, IndexModeNone, VFPMiscFrm, NoItinerary, 2800 "vscclrm{$p}\t$regs", "", []>, Sched<[]> { 2801 bits<13> regs; 2802 let Inst{31-23} = 0b111011001; 2803 let Inst{22} = regs{8}; 2804 let Inst{21-16} = 0b011111; 2805 let Inst{15-12} = regs{12-9}; 2806 let Inst{11-8} = 0b1010; 2807 let Inst{7-0} = regs{7-0}; 2808 2809 let DecoderMethod = "DecodeVSCCLRM"; 2810 2811 list<Predicate> Predicates = [HasV8_1MMainline, Has8MSecExt]; 2812} 2813 2814//===----------------------------------------------------------------------===// 2815// Store VFP System Register to memory. 2816// 2817 2818class vfp_vstrldr<bit opc, bit P, bit W, bits<4> SysReg, string sysreg, 2819 dag oops, dag iops, IndexMode im, string Dest, string cstr> 2820 : VFPI<oops, iops, AddrModeT2_i7s4, 4, im, VFPLdStFrm, IIC_fpSTAT, 2821 !if(opc,"vldr","vstr"), !strconcat("\t", sysreg, ", ", Dest), cstr, []>, 2822 Sched<[]> { 2823 bits<12> addr; 2824 let Inst{27-25} = 0b110; 2825 let Inst{24} = P; 2826 let Inst{23} = addr{7}; 2827 let Inst{22} = SysReg{3}; 2828 let Inst{21} = W; 2829 let Inst{20} = opc; 2830 let Inst{19-16} = addr{11-8}; 2831 let Inst{15-13} = SysReg{2-0}; 2832 let Inst{12-7} = 0b011111; 2833 let Inst{6-0} = addr{6-0}; 2834 list<Predicate> Predicates = [HasFPRegs, HasV8_1MMainline]; 2835 let mayLoad = opc; 2836 let mayStore = !if(opc, 0b0, 0b1); 2837 let hasSideEffects = 1; 2838} 2839 2840multiclass vfp_vstrldr_sysreg<bit opc, bits<4> SysReg, string sysreg, 2841 dag oops=(outs), dag iops=(ins)> { 2842 def _off : 2843 vfp_vstrldr<opc, 1, 0, SysReg, sysreg, 2844 oops, !con(iops, (ins t2addrmode_imm7s4:$addr)), 2845 IndexModePost, "$addr", "" > { 2846 let DecoderMethod = "DecodeVSTRVLDR_SYSREG<false>"; 2847 } 2848 2849 def _pre : 2850 vfp_vstrldr<opc, 1, 1, SysReg, sysreg, 2851 !con(oops, (outs GPRnopc:$wb)), 2852 !con(iops, (ins t2addrmode_imm7s4_pre:$addr)), 2853 IndexModePre, "$addr!", "$addr.base = $wb"> { 2854 let DecoderMethod = "DecodeVSTRVLDR_SYSREG<true>"; 2855 } 2856 2857 def _post : 2858 vfp_vstrldr<opc, 0, 1, SysReg, sysreg, 2859 !con(oops, (outs GPRnopc:$wb)), 2860 !con(iops, (ins t2_addr_offset_none:$Rn, 2861 t2am_imm7s4_offset:$addr)), 2862 IndexModePost, "$Rn$addr", "$Rn.base = $wb"> { 2863 bits<4> Rn; 2864 let Inst{19-16} = Rn{3-0}; 2865 let DecoderMethod = "DecodeVSTRVLDR_SYSREG<true>"; 2866 } 2867} 2868 2869let Defs = [FPSCR] in { 2870 defm VSTR_FPSCR : vfp_vstrldr_sysreg<0b0,0b0001, "fpscr">; 2871 defm VSTR_FPSCR_NZCVQC : vfp_vstrldr_sysreg<0b0,0b0010, "fpscr_nzcvqc">; 2872 2873 let Predicates = [HasV8_1MMainline, Has8MSecExt] in { 2874 defm VSTR_FPCXTNS : vfp_vstrldr_sysreg<0b0,0b1110, "fpcxtns">; 2875 defm VSTR_FPCXTS : vfp_vstrldr_sysreg<0b0,0b1111, "fpcxts">; 2876 } 2877} 2878 2879let Predicates = [HasV8_1MMainline, HasMVEInt] in { 2880 let Uses = [VPR] in { 2881 defm VSTR_VPR : vfp_vstrldr_sysreg<0b0,0b1100, "vpr">; 2882 } 2883 defm VSTR_P0 : vfp_vstrldr_sysreg<0b0,0b1101, "p0", 2884 (outs), (ins VCCR:$P0)>; 2885 2886 let Defs = [VPR] in { 2887 defm VLDR_VPR : vfp_vstrldr_sysreg<0b1,0b1100, "vpr">; 2888 } 2889 defm VLDR_P0 : vfp_vstrldr_sysreg<0b1,0b1101, "p0", 2890 (outs VCCR:$P0), (ins)>; 2891} 2892 2893let Uses = [FPSCR] in { 2894 defm VLDR_FPSCR : vfp_vstrldr_sysreg<0b1,0b0001, "fpscr">; 2895 defm VLDR_FPSCR_NZCVQC : vfp_vstrldr_sysreg<0b1,0b0010, "fpscr_nzcvqc">; 2896 2897 let Predicates = [HasV8_1MMainline, Has8MSecExt] in { 2898 defm VLDR_FPCXTNS : vfp_vstrldr_sysreg<0b1,0b1110, "fpcxtns">; 2899 defm VLDR_FPCXTS : vfp_vstrldr_sysreg<0b1,0b1111, "fpcxts">; 2900 } 2901} 2902