1//===- PPCInstrVSX.td - The PowerPC VSX Extension --*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the VSX extension to the PowerPC instruction set. 10// 11//===----------------------------------------------------------------------===// 12 13// *********************************** NOTE *********************************** 14// ** For POWER8 Little Endian, the VSX swap optimization relies on knowing ** 15// ** which VMX and VSX instructions are lane-sensitive and which are not. ** 16// ** A lane-sensitive instruction relies, implicitly or explicitly, on ** 17// ** whether lanes are numbered from left to right. An instruction like ** 18// ** VADDFP is not lane-sensitive, because each lane of the result vector ** 19// ** relies only on the corresponding lane of the source vectors. However, ** 20// ** an instruction like VMULESB is lane-sensitive, because "even" and ** 21// ** "odd" lanes are different for big-endian and little-endian numbering. ** 22// ** ** 23// ** When adding new VMX and VSX instructions, please consider whether they ** 24// ** are lane-sensitive. If so, they must be added to a switch statement ** 25// ** in PPCVSXSwapRemoval::gatherVectorInstructions(). ** 26// **************************************************************************** 27 28// *********************************** NOTE *********************************** 29// ** When adding new anonymous patterns to this file, please add them to ** 30// ** the section titled Anonymous Patterns. Chances are that the existing ** 31// ** predicate blocks already contain a combination of features that you ** 32// ** are after. There is a list of blocks at the top of the section. If ** 33// ** you definitely need a new combination of predicates, please add that ** 34// ** combination to the list. ** 35// ** File Structure: ** 36// ** - Custom PPCISD node definitions ** 37// ** - Predicate definitions: predicates to specify the subtargets for ** 38// ** which an instruction or pattern can be emitted. ** 39// ** - Instruction formats: classes instantiated by the instructions. ** 40// ** These generally correspond to instruction formats in section 1.6 of ** 41// ** the ISA document. ** 42// ** - Instruction definitions: the actual definitions of the instructions ** 43// ** often including input patterns that they match. ** 44// ** - Helper DAG definitions: We define a number of dag objects to use as ** 45// ** input or output patterns for consciseness of the code. ** 46// ** - Anonymous patterns: input patterns that an instruction matches can ** 47// ** often not be specified as part of the instruction definition, so an ** 48// ** anonymous pattern must be specified mapping an input pattern to an ** 49// ** output pattern. These are generally guarded by subtarget predicates. ** 50// ** - Instruction aliases: used to define extended mnemonics for assembly ** 51// ** printing (for example: xxswapd for xxpermdi with 0x2 as the imm). ** 52// **************************************************************************** 53 54def SDT_PPCldvsxlh : SDTypeProfile<1, 1, [ 55 SDTCisVT<0, v4f32>, SDTCisPtrTy<1> 56]>; 57 58def SDT_PPCfpexth : SDTypeProfile<1, 2, [ 59 SDTCisVT<0, v2f64>, SDTCisVT<1, v4f32>, SDTCisPtrTy<2> 60]>; 61 62def SDT_PPCldsplat : SDTypeProfile<1, 1, [ 63 SDTCisVec<0>, SDTCisPtrTy<1> 64]>; 65 66// Little-endian-specific nodes. 67def SDT_PPClxvd2x : SDTypeProfile<1, 1, [ 68 SDTCisVT<0, v2f64>, SDTCisPtrTy<1> 69]>; 70def SDT_PPCstxvd2x : SDTypeProfile<0, 2, [ 71 SDTCisVT<0, v2f64>, SDTCisPtrTy<1> 72]>; 73def SDT_PPCxxswapd : SDTypeProfile<1, 1, [ 74 SDTCisSameAs<0, 1> 75]>; 76def SDTVecConv : SDTypeProfile<1, 2, [ 77 SDTCisVec<0>, SDTCisVec<1>, SDTCisPtrTy<2> 78]>; 79def SDT_PPCld_vec_be : SDTypeProfile<1, 1, [ 80 SDTCisVec<0>, SDTCisPtrTy<1> 81]>; 82def SDT_PPCst_vec_be : SDTypeProfile<0, 2, [ 83 SDTCisVec<0>, SDTCisPtrTy<1> 84]>; 85 86def SDT_PPCxxperm : SDTypeProfile<1, 3, [ 87 SDTCisVT<0, v2f64>, SDTCisVT<1, v2f64>, 88 SDTCisVT<2, v2f64>, SDTCisVT<3, v4i32>]>; 89//--------------------------- Custom PPC nodes -------------------------------// 90def PPClxvd2x : SDNode<"PPCISD::LXVD2X", SDT_PPClxvd2x, 91 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; 92def PPCstxvd2x : SDNode<"PPCISD::STXVD2X", SDT_PPCstxvd2x, 93 [SDNPHasChain, SDNPMayStore]>; 94def PPCld_vec_be : SDNode<"PPCISD::LOAD_VEC_BE", SDT_PPCld_vec_be, 95 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; 96def PPCst_vec_be : SDNode<"PPCISD::STORE_VEC_BE", SDT_PPCst_vec_be, 97 [SDNPHasChain, SDNPMayStore]>; 98def PPCxxswapd : SDNode<"PPCISD::XXSWAPD", SDT_PPCxxswapd, [SDNPHasChain]>; 99def PPCmfvsr : SDNode<"PPCISD::MFVSR", SDTUnaryOp, []>; 100def PPCmtvsra : SDNode<"PPCISD::MTVSRA", SDTUnaryOp, []>; 101def PPCmtvsrz : SDNode<"PPCISD::MTVSRZ", SDTUnaryOp, []>; 102def PPCsvec2fp : SDNode<"PPCISD::SINT_VEC_TO_FP", SDTVecConv, []>; 103def PPCuvec2fp: SDNode<"PPCISD::UINT_VEC_TO_FP", SDTVecConv, []>; 104def PPCswapNoChain : SDNode<"PPCISD::SWAP_NO_CHAIN", SDT_PPCxxswapd>; 105 106def PPCfpexth : SDNode<"PPCISD::FP_EXTEND_HALF", SDT_PPCfpexth, []>; 107def PPCldvsxlh : SDNode<"PPCISD::LD_VSX_LH", SDT_PPCldvsxlh, 108 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; 109def PPCldsplat : SDNode<"PPCISD::LD_SPLAT", SDT_PPCldsplat, 110 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; 111def PPCzextldsplat : SDNode<"PPCISD::ZEXT_LD_SPLAT", SDT_PPCldsplat, 112 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; 113def PPCsextldsplat : SDNode<"PPCISD::SEXT_LD_SPLAT", SDT_PPCldsplat, 114 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; 115def PPCSToV : SDNode<"PPCISD::SCALAR_TO_VECTOR_PERMUTED", 116 SDTypeProfile<1, 1, []>, []>; 117 118def PPCxxperm : SDNode<"PPCISD::XXPERM", SDT_PPCxxperm, []>; 119//-------------------------- Predicate definitions ---------------------------// 120def HasVSX : Predicate<"Subtarget->hasVSX()">; 121def IsLittleEndian : Predicate<"Subtarget->isLittleEndian()">; 122def IsBigEndian : Predicate<"!Subtarget->isLittleEndian()">; 123def IsPPC64 : Predicate<"Subtarget->isPPC64()">; 124def HasOnlySwappingMemOps : Predicate<"!Subtarget->hasP9Vector()">; 125def NoP8Vector : Predicate<"!Subtarget->hasP8Vector()">; 126def HasP8Vector : Predicate<"Subtarget->hasP8Vector()">; 127def HasDirectMove : Predicate<"Subtarget->hasDirectMove()">; 128def NoP9Vector : Predicate<"!Subtarget->hasP9Vector()">; 129def HasP9Vector : Predicate<"Subtarget->hasP9Vector()">; 130def NoP9Altivec : Predicate<"!Subtarget->hasP9Altivec()">; 131def NoP10Vector: Predicate<"!Subtarget->hasP10Vector()">; 132def HasP10Vector: Predicate<"Subtarget->hasP10Vector()">; 133 134def PPCldsplatAlign16 : PatFrag<(ops node:$ptr), (PPCldsplat node:$ptr), [{ 135 return cast<MemIntrinsicSDNode>(N)->getAlign() >= Align(16) && 136 isOffsetMultipleOf(N, 16); 137}]>; 138 139//--------------------- VSX-specific instruction formats ---------------------// 140// By default, all VSX instructions are to be selected over their Altivec 141// counter parts and they do not have unmodeled sideeffects. 142let AddedComplexity = 400, hasSideEffects = 0 in { 143multiclass XX3Form_Rcr<bits<6> opcode, bits<7> xo, string asmbase, 144 string asmstr, InstrItinClass itin, Intrinsic Int, 145 ValueType OutTy, ValueType InTy> { 146 let BaseName = asmbase in { 147 def NAME : XX3Form_Rc<opcode, xo, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 148 !strconcat(asmbase, !strconcat(" ", asmstr)), itin, 149 [(set OutTy:$XT, (Int InTy:$XA, InTy:$XB))]>; 150 let Defs = [CR6] in 151 def _rec : XX3Form_Rc<opcode, xo, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 152 !strconcat(asmbase, !strconcat(". ", asmstr)), itin, 153 [(set InTy:$XT, 154 (InTy (PPCvcmp_rec InTy:$XA, InTy:$XB, xo)))]>, 155 isRecordForm; 156 } 157} 158 159// Instruction form with a single input register for instructions such as 160// XXPERMDI. The reason for defining this is that specifying multiple chained 161// operands (such as loads) to an instruction will perform both chained 162// operations rather than coalescing them into a single register - even though 163// the source memory location is the same. This simply forces the instruction 164// to use the same register for both inputs. 165// For example, an output DAG such as this: 166// (XXPERMDI (LXSIBZX xoaddr:$src), (LXSIBZX xoaddr:$src ), 0)) 167// would result in two load instructions emitted and used as separate inputs 168// to the XXPERMDI instruction. 169class XX3Form_2s<bits<6> opcode, bits<5> xo, dag OOL, dag IOL, string asmstr, 170 InstrItinClass itin, list<dag> pattern> 171 : XX3Form_2<opcode, xo, OOL, IOL, asmstr, itin, pattern> { 172 let XB = XA; 173} 174 175let Predicates = [HasVSX, HasP9Vector] in { 176class X_VT5_XO5_VB5<bits<6> opcode, bits<5> xo2, bits<10> xo, string opc, 177 list<dag> pattern> 178 : X_RD5_XO5_RS5<opcode, xo2, xo, (outs vrrc:$RST), (ins vrrc:$RB), 179 !strconcat(opc, " $RST, $RB"), IIC_VecFP, pattern>; 180 181// [PO VRT XO VRB XO RO], Round to Odd version of [PO VRT XO VRB XO /] 182class X_VT5_XO5_VB5_Ro<bits<6> opcode, bits<5> xo2, bits<10> xo, string opc, 183 list<dag> pattern> 184 : X_VT5_XO5_VB5<opcode, xo2, xo, opc, pattern>, isRecordForm; 185 186// [PO VRT XO VRB XO /], but the VRB is only used the left 64 bits (or less), 187// So we use different operand class for VRB 188class X_VT5_XO5_VB5_TyVB<bits<6> opcode, bits<5> xo2, bits<10> xo, string opc, 189 RegisterOperand vbtype, list<dag> pattern> 190 : X_RD5_XO5_RS5<opcode, xo2, xo, (outs vrrc:$RST), (ins vbtype:$RB), 191 !strconcat(opc, " $RST, $RB"), IIC_VecFP, pattern>; 192 193// [PO VRT XO VRB XO /] 194class X_VT5_XO5_VB5_VSFR<bits<6> opcode, bits<5> xo2, bits<10> xo, string opc, 195 list<dag> pattern> 196 : X_RD5_XO5_RS5<opcode, xo2, xo, (outs vfrc:$RST), (ins vrrc:$RB), 197 !strconcat(opc, " $RST, $RB"), IIC_VecFP, pattern>; 198 199// [PO VRT XO VRB XO RO], Round to Odd version of [PO VRT XO VRB XO /] 200class X_VT5_XO5_VB5_VSFR_Ro<bits<6> opcode, bits<5> xo2, bits<10> xo, string opc, 201 list<dag> pattern> 202 : X_VT5_XO5_VB5_VSFR<opcode, xo2, xo, opc, pattern>, isRecordForm; 203 204// [PO T XO B XO BX /] 205class XX2_RT5_XO5_XB6<bits<6> opcode, bits<5> xo2, bits<9> xo, string opc, 206 list<dag> pattern> 207 : XX2_RD5_XO5_RS6<opcode, xo2, xo, (outs g8rc:$RT), (ins vsfrc:$XB), 208 !strconcat(opc, " $RT, $XB"), IIC_VecFP, pattern>; 209 210// [PO T XO B XO BX TX] 211class XX2_XT6_XO5_XB6<bits<6> opcode, bits<5> xo2, bits<9> xo, string opc, 212 RegisterOperand vtype, list<dag> pattern> 213 : XX2_RD6_XO5_RS6<opcode, xo2, xo, (outs vtype:$XT), (ins vtype:$XB), 214 !strconcat(opc, " $XT, $XB"), IIC_VecFP, pattern>; 215 216// [PO T A B XO AX BX TX], src and dest register use different operand class 217class XX3_XT5_XA5_XB5<bits<6> opcode, bits<8> xo, string opc, 218 RegisterOperand xty, RegisterOperand aty, RegisterOperand bty, 219 InstrItinClass itin, list<dag> pattern> 220 : XX3Form<opcode, xo, (outs xty:$XT), (ins aty:$XA, bty:$XB), 221 !strconcat(opc, " $XT, $XA, $XB"), itin, pattern>; 222 223// [PO VRT VRA VRB XO /] 224class X_VT5_VA5_VB5<bits<6> opcode, bits<10> xo, string opc, 225 list<dag> pattern> 226 : XForm_1<opcode, xo, (outs vrrc:$RST), (ins vrrc:$RA, vrrc:$RB), 227 !strconcat(opc, " $RST, $RA, $RB"), IIC_VecFP, pattern>; 228 229// [PO VRT VRA VRB XO RO], Round to Odd version of [PO VRT VRA VRB XO /] 230class X_VT5_VA5_VB5_Ro<bits<6> opcode, bits<10> xo, string opc, 231 list<dag> pattern> 232 : X_VT5_VA5_VB5<opcode, xo, opc, pattern>, isRecordForm; 233 234// [PO VRT VRA VRB XO /] 235class X_VT5_VA5_VB5_FMA<bits<6> opcode, bits<10> xo, string opc, 236 list<dag> pattern> 237 : XForm_1<opcode, xo, (outs vrrc:$RST), (ins vrrc:$RSTi, vrrc:$RA, vrrc:$RB), 238 !strconcat(opc, " $RST, $RA, $RB"), IIC_VecFP, pattern>, 239 RegConstraint<"$RSTi = $RST">, NoEncode<"$RSTi">; 240 241// [PO VRT VRA VRB XO RO], Round to Odd version of [PO VRT VRA VRB XO /] 242class X_VT5_VA5_VB5_FMA_Ro<bits<6> opcode, bits<10> xo, string opc, 243 list<dag> pattern> 244 : X_VT5_VA5_VB5_FMA<opcode, xo, opc, pattern>, isRecordForm; 245 246class Z23_VT5_R1_VB5_RMC2_EX1<bits<6> opcode, bits<8> xo, bit ex, string opc, 247 list<dag> pattern> 248 : Z23Form_8<opcode, xo, 249 (outs vrrc:$VRT), (ins u1imm:$R, vrrc:$VRB, u2imm:$idx), 250 !strconcat(opc, " $R, $VRT, $VRB, $idx"), IIC_VecFP, pattern> { 251 let RC = ex; 252} 253 254// [PO BF // VRA VRB XO /] 255class X_BF3_VA5_VB5<bits<6> opcode, bits<10> xo, string opc, 256 list<dag> pattern> 257 : XForm_17<opcode, xo, (outs crrc:$BF), (ins vrrc:$RA, vrrc:$RB), 258 !strconcat(opc, " $BF, $RA, $RB"), IIC_FPCompare> { 259 let Pattern = pattern; 260} 261 262// [PO T RA RB XO TX] almost equal to [PO S RA RB XO SX], but has different 263// "out" and "in" dag 264class X_XT6_RA5_RB5<bits<6> opcode, bits<10> xo, string opc, 265 RegisterOperand vtype, list<dag> pattern> 266 : XX1Form_memOp<opcode, xo, (outs vtype:$XT), (ins (memrr $RA, $RB):$addr), 267 !strconcat(opc, " $XT, $addr"), IIC_LdStLFD, pattern>; 268 269// [PO S RA RB XO SX] 270class X_XS6_RA5_RB5<bits<6> opcode, bits<10> xo, string opc, 271 RegisterOperand vtype, list<dag> pattern> 272 : XX1Form_memOp<opcode, xo, (outs), (ins vtype:$XT, (memrr $RA, $RB):$addr), 273 !strconcat(opc, " $XT, $addr"), IIC_LdStSTFD, pattern>; 274} // Predicates = HasP9Vector 275} // AddedComplexity = 400, hasSideEffects = 0 276 277multiclass ScalToVecWPermute<ValueType Ty, dag In, dag NonPermOut, dag PermOut> { 278 def : Pat<(Ty (scalar_to_vector In)), (Ty NonPermOut)>; 279 def : Pat<(Ty (PPCSToV In)), (Ty PermOut)>; 280} 281 282//-------------------------- Instruction definitions -------------------------// 283// VSX instructions require the VSX feature, they are to be selected over 284// equivalent Altivec patterns (as they address a larger register set) and 285// they do not have unmodeled side effects. 286let Predicates = [HasVSX], AddedComplexity = 400 in { 287let hasSideEffects = 0 in { 288 289 // Load indexed instructions 290 let mayLoad = 1, mayStore = 0 in { 291 let CodeSize = 3 in 292 def LXSDX : XX1Form_memOp<31, 588, 293 (outs vsfrc:$XT), (ins (memrr $RA, $RB):$addr), 294 "lxsdx $XT, $addr", IIC_LdStLFD, 295 []>; 296 297 // Pseudo instruction XFLOADf64 will be expanded to LXSDX or LFDX later 298 let CodeSize = 3 in 299 def XFLOADf64 : PseudoXFormMemOp<(outs vsfrc:$XT), (ins (memrr $RA, $RB):$addr), 300 "#XFLOADf64", 301 [(set f64:$XT, (load XForm:$addr))]>; 302 303 let Predicates = [HasVSX, HasOnlySwappingMemOps] in 304 def LXVD2X : XX1Form_memOp<31, 844, 305 (outs vsrc:$XT), (ins (memrr $RA, $RB):$addr), 306 "lxvd2x $XT, $addr", IIC_LdStLFD, 307 []>; 308 309 def LXVDSX : XX1Form_memOp<31, 332, 310 (outs vsrc:$XT), (ins (memrr $RA, $RB):$addr), 311 "lxvdsx $XT, $addr", IIC_LdStLFD, []>; 312 313 let Predicates = [HasVSX, HasOnlySwappingMemOps] in 314 def LXVW4X : XX1Form_memOp<31, 780, 315 (outs vsrc:$XT), (ins (memrr $RA, $RB):$addr), 316 "lxvw4x $XT, $addr", IIC_LdStLFD, 317 []>; 318 } // mayLoad 319 320 // Store indexed instructions 321 let mayStore = 1, mayLoad = 0 in { 322 let CodeSize = 3 in 323 def STXSDX : XX1Form_memOp<31, 716, 324 (outs), (ins vsfrc:$XT, (memrr $RA, $RB):$addr), 325 "stxsdx $XT, $addr", IIC_LdStSTFD, 326 []>; 327 328 // Pseudo instruction XFSTOREf64 will be expanded to STXSDX or STFDX later 329 let CodeSize = 3 in 330 def XFSTOREf64 : PseudoXFormMemOp<(outs), (ins vsfrc:$XT, (memrr $RA, $RB):$addr), 331 "#XFSTOREf64", 332 [(store f64:$XT, XForm:$addr)]>; 333 334 let Predicates = [HasVSX, HasOnlySwappingMemOps] in { 335 // The behaviour of this instruction is endianness-specific so we provide no 336 // pattern to match it without considering endianness. 337 def STXVD2X : XX1Form_memOp<31, 972, 338 (outs), (ins vsrc:$XT, (memrr $RA, $RB):$addr), 339 "stxvd2x $XT, $addr", IIC_LdStSTFD, 340 []>; 341 342 def STXVW4X : XX1Form_memOp<31, 908, 343 (outs), (ins vsrc:$XT, (memrr $RA, $RB):$addr), 344 "stxvw4x $XT, $addr", IIC_LdStSTFD, 345 []>; 346 } 347 } // mayStore 348 349 let mayRaiseFPException = 1 in { 350 let Uses = [RM] in { 351 // Add/Mul Instructions 352 let isCommutable = 1 in { 353 def XSADDDP : XX3Form<60, 32, 354 (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB), 355 "xsadddp $XT, $XA, $XB", IIC_VecFP, 356 [(set f64:$XT, (any_fadd f64:$XA, f64:$XB))]>; 357 def XSMULDP : XX3Form<60, 48, 358 (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB), 359 "xsmuldp $XT, $XA, $XB", IIC_VecFP, 360 [(set f64:$XT, (any_fmul f64:$XA, f64:$XB))]>; 361 362 def XVADDDP : XX3Form<60, 96, 363 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 364 "xvadddp $XT, $XA, $XB", IIC_VecFP, 365 [(set v2f64:$XT, (any_fadd v2f64:$XA, v2f64:$XB))]>; 366 367 def XVADDSP : XX3Form<60, 64, 368 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 369 "xvaddsp $XT, $XA, $XB", IIC_VecFP, 370 [(set v4f32:$XT, (any_fadd v4f32:$XA, v4f32:$XB))]>; 371 372 def XVMULDP : XX3Form<60, 112, 373 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 374 "xvmuldp $XT, $XA, $XB", IIC_VecFP, 375 [(set v2f64:$XT, (any_fmul v2f64:$XA, v2f64:$XB))]>; 376 377 def XVMULSP : XX3Form<60, 80, 378 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 379 "xvmulsp $XT, $XA, $XB", IIC_VecFP, 380 [(set v4f32:$XT, (any_fmul v4f32:$XA, v4f32:$XB))]>; 381 } 382 383 // Subtract Instructions 384 def XSSUBDP : XX3Form<60, 40, 385 (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB), 386 "xssubdp $XT, $XA, $XB", IIC_VecFP, 387 [(set f64:$XT, (any_fsub f64:$XA, f64:$XB))]>; 388 389 def XVSUBDP : XX3Form<60, 104, 390 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 391 "xvsubdp $XT, $XA, $XB", IIC_VecFP, 392 [(set v2f64:$XT, (any_fsub v2f64:$XA, v2f64:$XB))]>; 393 def XVSUBSP : XX3Form<60, 72, 394 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 395 "xvsubsp $XT, $XA, $XB", IIC_VecFP, 396 [(set v4f32:$XT, (any_fsub v4f32:$XA, v4f32:$XB))]>; 397 398 // FMA Instructions 399 let BaseName = "XSMADDADP" in { 400 let isCommutable = 1 in 401 def XSMADDADP : XX3Form<60, 33, 402 (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB), 403 "xsmaddadp $XT, $XA, $XB", IIC_VecFP, 404 [(set f64:$XT, (any_fma f64:$XA, f64:$XB, f64:$XTi))]>, 405 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 406 AltVSXFMARel; 407 let IsVSXFMAAlt = 1 in 408 def XSMADDMDP : XX3Form<60, 41, 409 (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB), 410 "xsmaddmdp $XT, $XA, $XB", IIC_VecFP, []>, 411 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 412 AltVSXFMARel; 413 } 414 415 let BaseName = "XSMSUBADP" in { 416 let isCommutable = 1 in 417 def XSMSUBADP : XX3Form<60, 49, 418 (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB), 419 "xsmsubadp $XT, $XA, $XB", IIC_VecFP, 420 [(set f64:$XT, (any_fma f64:$XA, f64:$XB, (fneg f64:$XTi)))]>, 421 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 422 AltVSXFMARel; 423 let IsVSXFMAAlt = 1 in 424 def XSMSUBMDP : XX3Form<60, 57, 425 (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB), 426 "xsmsubmdp $XT, $XA, $XB", IIC_VecFP, []>, 427 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 428 AltVSXFMARel; 429 } 430 431 let BaseName = "XSNMADDADP" in { 432 let isCommutable = 1 in 433 def XSNMADDADP : XX3Form<60, 161, 434 (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB), 435 "xsnmaddadp $XT, $XA, $XB", IIC_VecFP, 436 [(set f64:$XT, (fneg (any_fma f64:$XA, f64:$XB, f64:$XTi)))]>, 437 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 438 AltVSXFMARel; 439 let IsVSXFMAAlt = 1 in 440 def XSNMADDMDP : XX3Form<60, 169, 441 (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB), 442 "xsnmaddmdp $XT, $XA, $XB", IIC_VecFP, []>, 443 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 444 AltVSXFMARel; 445 } 446 447 let BaseName = "XSNMSUBADP" in { 448 let isCommutable = 1 in 449 def XSNMSUBADP : XX3Form<60, 177, 450 (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB), 451 "xsnmsubadp $XT, $XA, $XB", IIC_VecFP, 452 [(set f64:$XT, (fneg (any_fma f64:$XA, f64:$XB, (fneg f64:$XTi))))]>, 453 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 454 AltVSXFMARel; 455 let IsVSXFMAAlt = 1 in 456 def XSNMSUBMDP : XX3Form<60, 185, 457 (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB), 458 "xsnmsubmdp $XT, $XA, $XB", IIC_VecFP, []>, 459 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 460 AltVSXFMARel; 461 } 462 463 let BaseName = "XVMADDADP" in { 464 let isCommutable = 1 in 465 def XVMADDADP : XX3Form<60, 97, 466 (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB), 467 "xvmaddadp $XT, $XA, $XB", IIC_VecFP, 468 [(set v2f64:$XT, (any_fma v2f64:$XA, v2f64:$XB, v2f64:$XTi))]>, 469 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 470 AltVSXFMARel; 471 let IsVSXFMAAlt = 1 in 472 def XVMADDMDP : XX3Form<60, 105, 473 (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB), 474 "xvmaddmdp $XT, $XA, $XB", IIC_VecFP, []>, 475 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 476 AltVSXFMARel; 477 } 478 479 let BaseName = "XVMADDASP" in { 480 let isCommutable = 1 in 481 def XVMADDASP : XX3Form<60, 65, 482 (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB), 483 "xvmaddasp $XT, $XA, $XB", IIC_VecFP, 484 [(set v4f32:$XT, (any_fma v4f32:$XA, v4f32:$XB, v4f32:$XTi))]>, 485 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 486 AltVSXFMARel; 487 let IsVSXFMAAlt = 1 in 488 def XVMADDMSP : XX3Form<60, 73, 489 (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB), 490 "xvmaddmsp $XT, $XA, $XB", IIC_VecFP, []>, 491 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 492 AltVSXFMARel; 493 } 494 495 let BaseName = "XVMSUBADP" in { 496 let isCommutable = 1 in 497 def XVMSUBADP : XX3Form<60, 113, 498 (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB), 499 "xvmsubadp $XT, $XA, $XB", IIC_VecFP, 500 [(set v2f64:$XT, (any_fma v2f64:$XA, v2f64:$XB, (fneg v2f64:$XTi)))]>, 501 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 502 AltVSXFMARel; 503 let IsVSXFMAAlt = 1 in 504 def XVMSUBMDP : XX3Form<60, 121, 505 (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB), 506 "xvmsubmdp $XT, $XA, $XB", IIC_VecFP, []>, 507 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 508 AltVSXFMARel; 509 } 510 511 let BaseName = "XVMSUBASP" in { 512 let isCommutable = 1 in 513 def XVMSUBASP : XX3Form<60, 81, 514 (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB), 515 "xvmsubasp $XT, $XA, $XB", IIC_VecFP, 516 [(set v4f32:$XT, (any_fma v4f32:$XA, v4f32:$XB, (fneg v4f32:$XTi)))]>, 517 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 518 AltVSXFMARel; 519 let IsVSXFMAAlt = 1 in 520 def XVMSUBMSP : XX3Form<60, 89, 521 (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB), 522 "xvmsubmsp $XT, $XA, $XB", IIC_VecFP, []>, 523 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 524 AltVSXFMARel; 525 } 526 527 let BaseName = "XVNMADDADP" in { 528 let isCommutable = 1 in 529 def XVNMADDADP : XX3Form<60, 225, 530 (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB), 531 "xvnmaddadp $XT, $XA, $XB", IIC_VecFP, 532 [(set v2f64:$XT, (fneg (any_fma v2f64:$XA, v2f64:$XB, v2f64:$XTi)))]>, 533 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 534 AltVSXFMARel; 535 let IsVSXFMAAlt = 1 in 536 def XVNMADDMDP : XX3Form<60, 233, 537 (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB), 538 "xvnmaddmdp $XT, $XA, $XB", IIC_VecFP, []>, 539 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 540 AltVSXFMARel; 541 } 542 543 let BaseName = "XVNMADDASP" in { 544 let isCommutable = 1 in 545 def XVNMADDASP : XX3Form<60, 193, 546 (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB), 547 "xvnmaddasp $XT, $XA, $XB", IIC_VecFP, 548 [(set v4f32:$XT, (fneg (fma v4f32:$XA, v4f32:$XB, v4f32:$XTi)))]>, 549 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 550 AltVSXFMARel; 551 let IsVSXFMAAlt = 1 in 552 def XVNMADDMSP : XX3Form<60, 201, 553 (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB), 554 "xvnmaddmsp $XT, $XA, $XB", IIC_VecFP, []>, 555 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 556 AltVSXFMARel; 557 } 558 559 let BaseName = "XVNMSUBADP" in { 560 let isCommutable = 1 in 561 def XVNMSUBADP : XX3Form<60, 241, 562 (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB), 563 "xvnmsubadp $XT, $XA, $XB", IIC_VecFP, 564 [(set v2f64:$XT, (fneg (any_fma v2f64:$XA, v2f64:$XB, (fneg v2f64:$XTi))))]>, 565 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 566 AltVSXFMARel; 567 let IsVSXFMAAlt = 1 in 568 def XVNMSUBMDP : XX3Form<60, 249, 569 (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB), 570 "xvnmsubmdp $XT, $XA, $XB", IIC_VecFP, []>, 571 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 572 AltVSXFMARel; 573 } 574 575 let BaseName = "XVNMSUBASP" in { 576 let isCommutable = 1 in 577 def XVNMSUBASP : XX3Form<60, 209, 578 (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB), 579 "xvnmsubasp $XT, $XA, $XB", IIC_VecFP, 580 [(set v4f32:$XT, (fneg (any_fma v4f32:$XA, v4f32:$XB, (fneg v4f32:$XTi))))]>, 581 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 582 AltVSXFMARel; 583 let IsVSXFMAAlt = 1 in 584 def XVNMSUBMSP : XX3Form<60, 217, 585 (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB), 586 "xvnmsubmsp $XT, $XA, $XB", IIC_VecFP, []>, 587 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 588 AltVSXFMARel; 589 } 590 591 // Division Instructions 592 def XSDIVDP : XX3Form<60, 56, 593 (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB), 594 "xsdivdp $XT, $XA, $XB", IIC_FPDivD, 595 [(set f64:$XT, (any_fdiv f64:$XA, f64:$XB))]>; 596 def XSSQRTDP : XX2Form<60, 75, 597 (outs vsfrc:$XT), (ins vsfrc:$XB), 598 "xssqrtdp $XT, $XB", IIC_FPSqrtD, 599 [(set f64:$XT, (any_fsqrt f64:$XB))]>; 600 601 def XSREDP : XX2Form<60, 90, 602 (outs vsfrc:$XT), (ins vsfrc:$XB), 603 "xsredp $XT, $XB", IIC_VecFP, 604 [(set f64:$XT, (PPCfre f64:$XB))]>; 605 def XSRSQRTEDP : XX2Form<60, 74, 606 (outs vsfrc:$XT), (ins vsfrc:$XB), 607 "xsrsqrtedp $XT, $XB", IIC_VecFP, 608 [(set f64:$XT, (PPCfrsqrte f64:$XB))]>; 609 610 let mayRaiseFPException = 0 in { 611 def XSTDIVDP : XX3Form_1<60, 61, 612 (outs crrc:$CR), (ins vsfrc:$XA, vsfrc:$XB), 613 "xstdivdp $CR, $XA, $XB", IIC_FPCompare, []>; 614 def XSTSQRTDP : XX2Form_1<60, 106, 615 (outs crrc:$CR), (ins vsfrc:$XB), 616 "xstsqrtdp $CR, $XB", IIC_FPCompare, 617 [(set i32:$CR, (PPCftsqrt f64:$XB))]>; 618 def XVTDIVDP : XX3Form_1<60, 125, 619 (outs crrc:$CR), (ins vsrc:$XA, vsrc:$XB), 620 "xvtdivdp $CR, $XA, $XB", IIC_FPCompare, []>; 621 def XVTDIVSP : XX3Form_1<60, 93, 622 (outs crrc:$CR), (ins vsrc:$XA, vsrc:$XB), 623 "xvtdivsp $CR, $XA, $XB", IIC_FPCompare, []>; 624 625 def XVTSQRTDP : XX2Form_1<60, 234, 626 (outs crrc:$CR), (ins vsrc:$XB), 627 "xvtsqrtdp $CR, $XB", IIC_FPCompare, 628 [(set i32:$CR, (PPCftsqrt v2f64:$XB))]>; 629 def XVTSQRTSP : XX2Form_1<60, 170, 630 (outs crrc:$CR), (ins vsrc:$XB), 631 "xvtsqrtsp $CR, $XB", IIC_FPCompare, 632 [(set i32:$CR, (PPCftsqrt v4f32:$XB))]>; 633 } 634 635 def XVDIVDP : XX3Form<60, 120, 636 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 637 "xvdivdp $XT, $XA, $XB", IIC_FPDivD, 638 [(set v2f64:$XT, (any_fdiv v2f64:$XA, v2f64:$XB))]>; 639 def XVDIVSP : XX3Form<60, 88, 640 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 641 "xvdivsp $XT, $XA, $XB", IIC_FPDivS, 642 [(set v4f32:$XT, (any_fdiv v4f32:$XA, v4f32:$XB))]>; 643 644 def XVSQRTDP : XX2Form<60, 203, 645 (outs vsrc:$XT), (ins vsrc:$XB), 646 "xvsqrtdp $XT, $XB", IIC_FPSqrtD, 647 [(set v2f64:$XT, (any_fsqrt v2f64:$XB))]>; 648 def XVSQRTSP : XX2Form<60, 139, 649 (outs vsrc:$XT), (ins vsrc:$XB), 650 "xvsqrtsp $XT, $XB", IIC_FPSqrtS, 651 [(set v4f32:$XT, (any_fsqrt v4f32:$XB))]>; 652 653 def XVREDP : XX2Form<60, 218, 654 (outs vsrc:$XT), (ins vsrc:$XB), 655 "xvredp $XT, $XB", IIC_VecFP, 656 [(set v2f64:$XT, (PPCfre v2f64:$XB))]>; 657 def XVRESP : XX2Form<60, 154, 658 (outs vsrc:$XT), (ins vsrc:$XB), 659 "xvresp $XT, $XB", IIC_VecFP, 660 [(set v4f32:$XT, (PPCfre v4f32:$XB))]>; 661 662 def XVRSQRTEDP : XX2Form<60, 202, 663 (outs vsrc:$XT), (ins vsrc:$XB), 664 "xvrsqrtedp $XT, $XB", IIC_VecFP, 665 [(set v2f64:$XT, (PPCfrsqrte v2f64:$XB))]>; 666 def XVRSQRTESP : XX2Form<60, 138, 667 (outs vsrc:$XT), (ins vsrc:$XB), 668 "xvrsqrtesp $XT, $XB", IIC_VecFP, 669 [(set v4f32:$XT, (PPCfrsqrte v4f32:$XB))]>; 670 671 // Compare Instructions 672 def XSCMPODP : XX3Form_1<60, 43, 673 (outs crrc:$CR), (ins vsfrc:$XA, vsfrc:$XB), 674 "xscmpodp $CR, $XA, $XB", IIC_FPCompare, []>; 675 def XSCMPUDP : XX3Form_1<60, 35, 676 (outs crrc:$CR), (ins vsfrc:$XA, vsfrc:$XB), 677 "xscmpudp $CR, $XA, $XB", IIC_FPCompare, []>; 678 679 defm XVCMPEQDP : XX3Form_Rcr<60, 99, 680 "xvcmpeqdp", "$XT, $XA, $XB", IIC_VecFPCompare, 681 int_ppc_vsx_xvcmpeqdp, v2i64, v2f64>; 682 defm XVCMPEQSP : XX3Form_Rcr<60, 67, 683 "xvcmpeqsp", "$XT, $XA, $XB", IIC_VecFPCompare, 684 int_ppc_vsx_xvcmpeqsp, v4i32, v4f32>; 685 defm XVCMPGEDP : XX3Form_Rcr<60, 115, 686 "xvcmpgedp", "$XT, $XA, $XB", IIC_VecFPCompare, 687 int_ppc_vsx_xvcmpgedp, v2i64, v2f64>; 688 defm XVCMPGESP : XX3Form_Rcr<60, 83, 689 "xvcmpgesp", "$XT, $XA, $XB", IIC_VecFPCompare, 690 int_ppc_vsx_xvcmpgesp, v4i32, v4f32>; 691 defm XVCMPGTDP : XX3Form_Rcr<60, 107, 692 "xvcmpgtdp", "$XT, $XA, $XB", IIC_VecFPCompare, 693 int_ppc_vsx_xvcmpgtdp, v2i64, v2f64>; 694 defm XVCMPGTSP : XX3Form_Rcr<60, 75, 695 "xvcmpgtsp", "$XT, $XA, $XB", IIC_VecFPCompare, 696 int_ppc_vsx_xvcmpgtsp, v4i32, v4f32>; 697 698 // Move Instructions 699 let mayRaiseFPException = 0 in { 700 def XSABSDP : XX2Form<60, 345, 701 (outs vsfrc:$XT), (ins vsfrc:$XB), 702 "xsabsdp $XT, $XB", IIC_VecFP, 703 [(set f64:$XT, (fabs f64:$XB))]>; 704 def XSNABSDP : XX2Form<60, 361, 705 (outs vsfrc:$XT), (ins vsfrc:$XB), 706 "xsnabsdp $XT, $XB", IIC_VecFP, 707 [(set f64:$XT, (fneg (fabs f64:$XB)))]>; 708 let isCodeGenOnly = 1 in 709 def XSNABSDPs : XX2Form<60, 361, 710 (outs vssrc:$XT), (ins vssrc:$XB), 711 "xsnabsdp $XT, $XB", IIC_VecFP, 712 [(set f32:$XT, (fneg (fabs f32:$XB)))]>; 713 def XSNEGDP : XX2Form<60, 377, 714 (outs vsfrc:$XT), (ins vsfrc:$XB), 715 "xsnegdp $XT, $XB", IIC_VecFP, 716 [(set f64:$XT, (fneg f64:$XB))]>; 717 def XSCPSGNDP : XX3Form<60, 176, 718 (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB), 719 "xscpsgndp $XT, $XA, $XB", IIC_VecFP, 720 [(set f64:$XT, (fcopysign f64:$XB, f64:$XA))]>; 721 722 def XVABSDP : XX2Form<60, 473, 723 (outs vsrc:$XT), (ins vsrc:$XB), 724 "xvabsdp $XT, $XB", IIC_VecFP, 725 [(set v2f64:$XT, (fabs v2f64:$XB))]>; 726 727 def XVABSSP : XX2Form<60, 409, 728 (outs vsrc:$XT), (ins vsrc:$XB), 729 "xvabssp $XT, $XB", IIC_VecFP, 730 [(set v4f32:$XT, (fabs v4f32:$XB))]>; 731 732 def XVCPSGNDP : XX3Form<60, 240, 733 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 734 "xvcpsgndp $XT, $XA, $XB", IIC_VecFP, 735 [(set v2f64:$XT, (fcopysign v2f64:$XB, v2f64:$XA))]>; 736 def XVCPSGNSP : XX3Form<60, 208, 737 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 738 "xvcpsgnsp $XT, $XA, $XB", IIC_VecFP, 739 [(set v4f32:$XT, (fcopysign v4f32:$XB, v4f32:$XA))]>; 740 741 def XVNABSDP : XX2Form<60, 489, 742 (outs vsrc:$XT), (ins vsrc:$XB), 743 "xvnabsdp $XT, $XB", IIC_VecFP, 744 [(set v2f64:$XT, (fneg (fabs v2f64:$XB)))]>; 745 def XVNABSSP : XX2Form<60, 425, 746 (outs vsrc:$XT), (ins vsrc:$XB), 747 "xvnabssp $XT, $XB", IIC_VecFP, 748 [(set v4f32:$XT, (fneg (fabs v4f32:$XB)))]>; 749 750 def XVNEGDP : XX2Form<60, 505, 751 (outs vsrc:$XT), (ins vsrc:$XB), 752 "xvnegdp $XT, $XB", IIC_VecFP, 753 [(set v2f64:$XT, (fneg v2f64:$XB))]>; 754 def XVNEGSP : XX2Form<60, 441, 755 (outs vsrc:$XT), (ins vsrc:$XB), 756 "xvnegsp $XT, $XB", IIC_VecFP, 757 [(set v4f32:$XT, (fneg v4f32:$XB))]>; 758 } 759 760 // Conversion Instructions 761 def XSCVDPSP : XX2Form<60, 265, 762 (outs vsfrc:$XT), (ins vsfrc:$XB), 763 "xscvdpsp $XT, $XB", IIC_VecFP, []>; 764 def XSCVDPSXDS : XX2Form<60, 344, 765 (outs vsfrc:$XT), (ins vsfrc:$XB), 766 "xscvdpsxds $XT, $XB", IIC_VecFP, 767 [(set f64:$XT, (PPCany_fctidz f64:$XB))]>; 768 let isCodeGenOnly = 1 in 769 def XSCVDPSXDSs : XX2Form<60, 344, 770 (outs vssrc:$XT), (ins vssrc:$XB), 771 "xscvdpsxds $XT, $XB", IIC_VecFP, 772 [(set f32:$XT, (PPCany_fctidz f32:$XB))]>; 773 def XSCVDPSXWS : XX2Form<60, 88, 774 (outs vsfrc:$XT), (ins vsfrc:$XB), 775 "xscvdpsxws $XT, $XB", IIC_VecFP, 776 [(set f64:$XT, (PPCany_fctiwz f64:$XB))]>; 777 let isCodeGenOnly = 1 in 778 def XSCVDPSXWSs : XX2Form<60, 88, 779 (outs vssrc:$XT), (ins vssrc:$XB), 780 "xscvdpsxws $XT, $XB", IIC_VecFP, 781 [(set f32:$XT, (PPCany_fctiwz f32:$XB))]>; 782 def XSCVDPUXDS : XX2Form<60, 328, 783 (outs vsfrc:$XT), (ins vsfrc:$XB), 784 "xscvdpuxds $XT, $XB", IIC_VecFP, 785 [(set f64:$XT, (PPCany_fctiduz f64:$XB))]>; 786 let isCodeGenOnly = 1 in 787 def XSCVDPUXDSs : XX2Form<60, 328, 788 (outs vssrc:$XT), (ins vssrc:$XB), 789 "xscvdpuxds $XT, $XB", IIC_VecFP, 790 [(set f32:$XT, (PPCany_fctiduz f32:$XB))]>; 791 def XSCVDPUXWS : XX2Form<60, 72, 792 (outs vsfrc:$XT), (ins vsfrc:$XB), 793 "xscvdpuxws $XT, $XB", IIC_VecFP, 794 [(set f64:$XT, (PPCany_fctiwuz f64:$XB))]>; 795 let isCodeGenOnly = 1 in 796 def XSCVDPUXWSs : XX2Form<60, 72, 797 (outs vssrc:$XT), (ins vssrc:$XB), 798 "xscvdpuxws $XT, $XB", IIC_VecFP, 799 [(set f32:$XT, (PPCany_fctiwuz f32:$XB))]>; 800 def XSCVSPDP : XX2Form<60, 329, 801 (outs vsfrc:$XT), (ins vsfrc:$XB), 802 "xscvspdp $XT, $XB", IIC_VecFP, []>; 803 def XSCVSXDDP : XX2Form<60, 376, 804 (outs vsfrc:$XT), (ins vsfrc:$XB), 805 "xscvsxddp $XT, $XB", IIC_VecFP, 806 [(set f64:$XT, (PPCany_fcfid f64:$XB))]>; 807 def XSCVUXDDP : XX2Form<60, 360, 808 (outs vsfrc:$XT), (ins vsfrc:$XB), 809 "xscvuxddp $XT, $XB", IIC_VecFP, 810 [(set f64:$XT, (PPCany_fcfidu f64:$XB))]>; 811 812 def XVCVDPSP : XX2Form<60, 393, 813 (outs vsrc:$XT), (ins vsrc:$XB), 814 "xvcvdpsp $XT, $XB", IIC_VecFP, 815 [(set v4f32:$XT, (int_ppc_vsx_xvcvdpsp v2f64:$XB))]>; 816 def XVCVDPSXDS : XX2Form<60, 472, 817 (outs vsrc:$XT), (ins vsrc:$XB), 818 "xvcvdpsxds $XT, $XB", IIC_VecFP, 819 [(set v2i64:$XT, (any_fp_to_sint v2f64:$XB))]>; 820 def XVCVDPSXWS : XX2Form<60, 216, 821 (outs vsrc:$XT), (ins vsrc:$XB), 822 "xvcvdpsxws $XT, $XB", IIC_VecFP, 823 [(set v4i32:$XT, (int_ppc_vsx_xvcvdpsxws v2f64:$XB))]>; 824 def XVCVDPUXDS : XX2Form<60, 456, 825 (outs vsrc:$XT), (ins vsrc:$XB), 826 "xvcvdpuxds $XT, $XB", IIC_VecFP, 827 [(set v2i64:$XT, (any_fp_to_uint v2f64:$XB))]>; 828 def XVCVDPUXWS : XX2Form<60, 200, 829 (outs vsrc:$XT), (ins vsrc:$XB), 830 "xvcvdpuxws $XT, $XB", IIC_VecFP, 831 [(set v4i32:$XT, (int_ppc_vsx_xvcvdpuxws v2f64:$XB))]>; 832 833 def XVCVSPDP : XX2Form<60, 457, 834 (outs vsrc:$XT), (ins vsrc:$XB), 835 "xvcvspdp $XT, $XB", IIC_VecFP, 836 [(set v2f64:$XT, (int_ppc_vsx_xvcvspdp v4f32:$XB))]>; 837 def XVCVSPSXDS : XX2Form<60, 408, 838 (outs vsrc:$XT), (ins vsrc:$XB), 839 "xvcvspsxds $XT, $XB", IIC_VecFP, 840 [(set v2i64:$XT, (int_ppc_vsx_xvcvspsxds v4f32:$XB))]>; 841 def XVCVSPSXWS : XX2Form<60, 152, 842 (outs vsrc:$XT), (ins vsrc:$XB), 843 "xvcvspsxws $XT, $XB", IIC_VecFP, 844 [(set v4i32:$XT, (any_fp_to_sint v4f32:$XB))]>; 845 def XVCVSPUXDS : XX2Form<60, 392, 846 (outs vsrc:$XT), (ins vsrc:$XB), 847 "xvcvspuxds $XT, $XB", IIC_VecFP, 848 [(set v2i64:$XT, (int_ppc_vsx_xvcvspuxds v4f32:$XB))]>; 849 def XVCVSPUXWS : XX2Form<60, 136, 850 (outs vsrc:$XT), (ins vsrc:$XB), 851 "xvcvspuxws $XT, $XB", IIC_VecFP, 852 [(set v4i32:$XT, (any_fp_to_uint v4f32:$XB))]>; 853 def XVCVSXDDP : XX2Form<60, 504, 854 (outs vsrc:$XT), (ins vsrc:$XB), 855 "xvcvsxddp $XT, $XB", IIC_VecFP, 856 [(set v2f64:$XT, (any_sint_to_fp v2i64:$XB))]>; 857 def XVCVSXDSP : XX2Form<60, 440, 858 (outs vsrc:$XT), (ins vsrc:$XB), 859 "xvcvsxdsp $XT, $XB", IIC_VecFP, 860 [(set v4f32:$XT, (int_ppc_vsx_xvcvsxdsp v2i64:$XB))]>; 861 def XVCVSXWSP : XX2Form<60, 184, 862 (outs vsrc:$XT), (ins vsrc:$XB), 863 "xvcvsxwsp $XT, $XB", IIC_VecFP, 864 [(set v4f32:$XT, (any_sint_to_fp v4i32:$XB))]>; 865 def XVCVUXDDP : XX2Form<60, 488, 866 (outs vsrc:$XT), (ins vsrc:$XB), 867 "xvcvuxddp $XT, $XB", IIC_VecFP, 868 [(set v2f64:$XT, (any_uint_to_fp v2i64:$XB))]>; 869 def XVCVUXDSP : XX2Form<60, 424, 870 (outs vsrc:$XT), (ins vsrc:$XB), 871 "xvcvuxdsp $XT, $XB", IIC_VecFP, 872 [(set v4f32:$XT, (int_ppc_vsx_xvcvuxdsp v2i64:$XB))]>; 873 def XVCVUXWSP : XX2Form<60, 168, 874 (outs vsrc:$XT), (ins vsrc:$XB), 875 "xvcvuxwsp $XT, $XB", IIC_VecFP, 876 [(set v4f32:$XT, (any_uint_to_fp v4i32:$XB))]>; 877 878 let mayRaiseFPException = 0 in { 879 def XVCVSXWDP : XX2Form<60, 248, 880 (outs vsrc:$XT), (ins vsrc:$XB), 881 "xvcvsxwdp $XT, $XB", IIC_VecFP, 882 [(set v2f64:$XT, (int_ppc_vsx_xvcvsxwdp v4i32:$XB))]>; 883 def XVCVUXWDP : XX2Form<60, 232, 884 (outs vsrc:$XT), (ins vsrc:$XB), 885 "xvcvuxwdp $XT, $XB", IIC_VecFP, 886 [(set v2f64:$XT, (int_ppc_vsx_xvcvuxwdp v4i32:$XB))]>; 887 } 888 889 // Rounding Instructions respecting current rounding mode 890 def XSRDPIC : XX2Form<60, 107, 891 (outs vsfrc:$XT), (ins vsfrc:$XB), 892 "xsrdpic $XT, $XB", IIC_VecFP, []>; 893 def XVRDPIC : XX2Form<60, 235, 894 (outs vsrc:$XT), (ins vsrc:$XB), 895 "xvrdpic $XT, $XB", IIC_VecFP, []>; 896 def XVRSPIC : XX2Form<60, 171, 897 (outs vsrc:$XT), (ins vsrc:$XB), 898 "xvrspic $XT, $XB", IIC_VecFP, []>; 899 // Max/Min Instructions 900 let isCommutable = 1 in { 901 def XSMAXDP : XX3Form<60, 160, 902 (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB), 903 "xsmaxdp $XT, $XA, $XB", IIC_VecFP, 904 [(set vsfrc:$XT, 905 (int_ppc_vsx_xsmaxdp vsfrc:$XA, vsfrc:$XB))]>; 906 def XSMINDP : XX3Form<60, 168, 907 (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB), 908 "xsmindp $XT, $XA, $XB", IIC_VecFP, 909 [(set vsfrc:$XT, 910 (int_ppc_vsx_xsmindp vsfrc:$XA, vsfrc:$XB))]>; 911 912 def XVMAXDP : XX3Form<60, 224, 913 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 914 "xvmaxdp $XT, $XA, $XB", IIC_VecFP, 915 [(set vsrc:$XT, 916 (int_ppc_vsx_xvmaxdp vsrc:$XA, vsrc:$XB))]>; 917 def XVMINDP : XX3Form<60, 232, 918 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 919 "xvmindp $XT, $XA, $XB", IIC_VecFP, 920 [(set vsrc:$XT, 921 (int_ppc_vsx_xvmindp vsrc:$XA, vsrc:$XB))]>; 922 923 def XVMAXSP : XX3Form<60, 192, 924 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 925 "xvmaxsp $XT, $XA, $XB", IIC_VecFP, 926 [(set vsrc:$XT, 927 (int_ppc_vsx_xvmaxsp vsrc:$XA, vsrc:$XB))]>; 928 def XVMINSP : XX3Form<60, 200, 929 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 930 "xvminsp $XT, $XA, $XB", IIC_VecFP, 931 [(set vsrc:$XT, 932 (int_ppc_vsx_xvminsp vsrc:$XA, vsrc:$XB))]>; 933 } // isCommutable 934 } // Uses = [RM] 935 936 // Rounding Instructions with static direction. 937 def XSRDPI : XX2Form<60, 73, 938 (outs vsfrc:$XT), (ins vsfrc:$XB), 939 "xsrdpi $XT, $XB", IIC_VecFP, 940 [(set f64:$XT, (any_fround f64:$XB))]>; 941 def XSRDPIM : XX2Form<60, 121, 942 (outs vsfrc:$XT), (ins vsfrc:$XB), 943 "xsrdpim $XT, $XB", IIC_VecFP, 944 [(set f64:$XT, (any_ffloor f64:$XB))]>; 945 def XSRDPIP : XX2Form<60, 105, 946 (outs vsfrc:$XT), (ins vsfrc:$XB), 947 "xsrdpip $XT, $XB", IIC_VecFP, 948 [(set f64:$XT, (any_fceil f64:$XB))]>; 949 def XSRDPIZ : XX2Form<60, 89, 950 (outs vsfrc:$XT), (ins vsfrc:$XB), 951 "xsrdpiz $XT, $XB", IIC_VecFP, 952 [(set f64:$XT, (any_ftrunc f64:$XB))]>; 953 954 def XVRDPI : XX2Form<60, 201, 955 (outs vsrc:$XT), (ins vsrc:$XB), 956 "xvrdpi $XT, $XB", IIC_VecFP, 957 [(set v2f64:$XT, (any_fround v2f64:$XB))]>; 958 def XVRDPIM : XX2Form<60, 249, 959 (outs vsrc:$XT), (ins vsrc:$XB), 960 "xvrdpim $XT, $XB", IIC_VecFP, 961 [(set v2f64:$XT, (any_ffloor v2f64:$XB))]>; 962 def XVRDPIP : XX2Form<60, 233, 963 (outs vsrc:$XT), (ins vsrc:$XB), 964 "xvrdpip $XT, $XB", IIC_VecFP, 965 [(set v2f64:$XT, (any_fceil v2f64:$XB))]>; 966 def XVRDPIZ : XX2Form<60, 217, 967 (outs vsrc:$XT), (ins vsrc:$XB), 968 "xvrdpiz $XT, $XB", IIC_VecFP, 969 [(set v2f64:$XT, (any_ftrunc v2f64:$XB))]>; 970 971 def XVRSPI : XX2Form<60, 137, 972 (outs vsrc:$XT), (ins vsrc:$XB), 973 "xvrspi $XT, $XB", IIC_VecFP, 974 [(set v4f32:$XT, (any_fround v4f32:$XB))]>; 975 def XVRSPIM : XX2Form<60, 185, 976 (outs vsrc:$XT), (ins vsrc:$XB), 977 "xvrspim $XT, $XB", IIC_VecFP, 978 [(set v4f32:$XT, (any_ffloor v4f32:$XB))]>; 979 def XVRSPIP : XX2Form<60, 169, 980 (outs vsrc:$XT), (ins vsrc:$XB), 981 "xvrspip $XT, $XB", IIC_VecFP, 982 [(set v4f32:$XT, (any_fceil v4f32:$XB))]>; 983 def XVRSPIZ : XX2Form<60, 153, 984 (outs vsrc:$XT), (ins vsrc:$XB), 985 "xvrspiz $XT, $XB", IIC_VecFP, 986 [(set v4f32:$XT, (any_ftrunc v4f32:$XB))]>; 987 } // mayRaiseFPException 988 989 // Logical Instructions 990 let isCommutable = 1 in 991 def XXLAND : XX3Form<60, 130, 992 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 993 "xxland $XT, $XA, $XB", IIC_VecGeneral, 994 [(set v4i32:$XT, (and v4i32:$XA, v4i32:$XB))]>; 995 def XXLANDC : XX3Form<60, 138, 996 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 997 "xxlandc $XT, $XA, $XB", IIC_VecGeneral, 998 [(set v4i32:$XT, (and v4i32:$XA, 999 (vnot v4i32:$XB)))]>; 1000 let isCommutable = 1 in { 1001 def XXLNOR : XX3Form<60, 162, 1002 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 1003 "xxlnor $XT, $XA, $XB", IIC_VecGeneral, 1004 [(set v4i32:$XT, (vnot (or v4i32:$XA, 1005 v4i32:$XB)))]>; 1006 def XXLOR : XX3Form<60, 146, 1007 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 1008 "xxlor $XT, $XA, $XB", IIC_VecGeneral, 1009 [(set v4i32:$XT, (or v4i32:$XA, v4i32:$XB))]>; 1010 let isCodeGenOnly = 1 in 1011 def XXLORf: XX3Form<60, 146, 1012 (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB), 1013 "xxlor $XT, $XA, $XB", IIC_VecGeneral, []>; 1014 def XXLXOR : XX3Form<60, 154, 1015 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 1016 "xxlxor $XT, $XA, $XB", IIC_VecGeneral, 1017 [(set v4i32:$XT, (xor v4i32:$XA, v4i32:$XB))]>; 1018 } // isCommutable 1019 1020 let isCodeGenOnly = 1, isMoveImm = 1, isAsCheapAsAMove = 1, 1021 isReMaterializable = 1 in { 1022 def XXLXORz : XX3Form_SameOp<60, 154, (outs vsrc:$XT), (ins), 1023 "xxlxor $XT, $XT, $XT", IIC_VecGeneral, 1024 [(set v4i32:$XT, (v4i32 immAllZerosV))]>; 1025 def XXLXORdpz : XX3Form_SameOp<60, 154, 1026 (outs vsfrc:$XT), (ins), 1027 "xxlxor $XT, $XT, $XT", IIC_VecGeneral, 1028 [(set f64:$XT, (fpimm0))]>; 1029 def XXLXORspz : XX3Form_SameOp<60, 154, 1030 (outs vssrc:$XT), (ins), 1031 "xxlxor $XT, $XT, $XT", IIC_VecGeneral, 1032 [(set f32:$XT, (fpimm0))]>; 1033 } 1034 1035 // Permutation Instructions 1036 def XXMRGHW : XX3Form<60, 18, 1037 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 1038 "xxmrghw $XT, $XA, $XB", IIC_VecPerm, []>; 1039 def XXMRGLW : XX3Form<60, 50, 1040 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 1041 "xxmrglw $XT, $XA, $XB", IIC_VecPerm, []>; 1042 1043 def XXPERMDI : XX3Form_2<60, 10, 1044 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB, u2imm:$D), 1045 "xxpermdi $XT, $XA, $XB, $D", IIC_VecPerm, 1046 [(set v2i64:$XT, (PPCxxpermdi v2i64:$XA, v2i64:$XB, 1047 imm32SExt16:$D))]>; 1048 let isCodeGenOnly = 1 in 1049 // Note that the input register class for `$XA` of XXPERMDIs is `vsfrc` which 1050 // is not the same with the input register class(`vsrc`) of XXPERMDI instruction. 1051 // We did this on purpose because: 1052 // 1: The input is primarily for loads that load a partial vector(LFIWZX, 1053 // etc.), no need for SUBREG_TO_REG. 1054 // 2: With `vsfrc` register class, in the final assembly, float registers 1055 // like `f0` are used instead of vector scalar register like `vs0`. This 1056 // helps readability. 1057 def XXPERMDIs : XX3Form_2s<60, 10, (outs vsrc:$XT), (ins vsfrc:$XA, u2imm:$D), 1058 "xxpermdi $XT, $XA, $XA, $D", IIC_VecPerm, []>; 1059 def XXSEL : XX4Form<60, 3, 1060 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB, vsrc:$XC), 1061 "xxsel $XT, $XA, $XB, $XC", IIC_VecPerm, []>; 1062 1063 def XXSLDWI : XX3Form_2<60, 2, 1064 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB, u2imm:$D), 1065 "xxsldwi $XT, $XA, $XB, $D", IIC_VecPerm, 1066 [(set v4i32:$XT, (PPCvecshl v4i32:$XA, v4i32:$XB, 1067 imm32SExt16:$D))]>; 1068 1069 let isCodeGenOnly = 1 in 1070 def XXSLDWIs : XX3Form_2s<60, 2, 1071 (outs vsrc:$XT), (ins vsfrc:$XA, u2imm:$D), 1072 "xxsldwi $XT, $XA, $XA, $D", IIC_VecPerm, []>; 1073 1074 def XXSPLTW : XX2Form_2<60, 164, 1075 (outs vsrc:$XT), (ins vsrc:$XB, u2imm:$D), 1076 "xxspltw $XT, $XB, $D", IIC_VecPerm, 1077 [(set v4i32:$XT, 1078 (PPCxxsplt v4i32:$XB, imm32SExt16:$D))]>; 1079 let isCodeGenOnly = 1 in 1080 def XXSPLTWs : XX2Form_2<60, 164, 1081 (outs vsrc:$XT), (ins vsfrc:$XB, u2imm:$D), 1082 "xxspltw $XT, $XB, $D", IIC_VecPerm, []>; 1083 1084// The following VSX instructions were introduced in Power ISA 2.07 1085let Predicates = [HasVSX, HasP8Vector] in { 1086 let isCommutable = 1 in { 1087 def XXLEQV : XX3Form<60, 186, 1088 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 1089 "xxleqv $XT, $XA, $XB", IIC_VecGeneral, 1090 [(set v4i32:$XT, (vnot (xor v4i32:$XA, v4i32:$XB)))]>; 1091 def XXLNAND : XX3Form<60, 178, 1092 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 1093 "xxlnand $XT, $XA, $XB", IIC_VecGeneral, 1094 [(set v4i32:$XT, (vnot (and v4i32:$XA, v4i32:$XB)))]>; 1095 } // isCommutable 1096 1097 let isCodeGenOnly = 1, isMoveImm = 1, isAsCheapAsAMove = 1, 1098 isReMaterializable = 1 in { 1099 def XXLEQVOnes : XX3Form_SameOp<60, 186, (outs vsrc:$XT), (ins), 1100 "xxleqv $XT, $XT, $XT", IIC_VecGeneral, 1101 [(set v4i32:$XT, (bitconvert (v16i8 immAllOnesV)))]>; 1102 } 1103 1104 def XXLORC : XX3Form<60, 170, 1105 (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), 1106 "xxlorc $XT, $XA, $XB", IIC_VecGeneral, 1107 [(set v4i32:$XT, (or v4i32:$XA, (vnot v4i32:$XB)))]>; 1108 1109 // VSX scalar loads introduced in ISA 2.07 1110 let mayLoad = 1, mayStore = 0 in { 1111 let CodeSize = 3 in 1112 def LXSSPX : XX1Form_memOp<31, 524, (outs vssrc:$XT), (ins (memrr $RA, $RB):$addr), 1113 "lxsspx $XT, $addr", IIC_LdStLFD, []>; 1114 def LXSIWAX : XX1Form_memOp<31, 76, (outs vsfrc:$XT), (ins (memrr $RA, $RB):$addr), 1115 "lxsiwax $XT, $addr", IIC_LdStLFD, []>; 1116 def LXSIWZX : XX1Form_memOp<31, 12, (outs vsfrc:$XT), (ins (memrr $RA, $RB):$addr), 1117 "lxsiwzx $XT, $addr", IIC_LdStLFD, []>; 1118 1119 // Pseudo instruction XFLOADf32 will be expanded to LXSSPX or LFSX later 1120 let CodeSize = 3 in 1121 def XFLOADf32 : PseudoXFormMemOp<(outs vssrc:$XT), (ins memrr:$src), 1122 "#XFLOADf32", 1123 [(set f32:$XT, (load XForm:$src))]>; 1124 // Pseudo instruction LIWAX will be expanded to LXSIWAX or LFIWAX later 1125 def LIWAX : PseudoXFormMemOp<(outs vsfrc:$XT), (ins memrr:$src), 1126 "#LIWAX", 1127 [(set f64:$XT, (PPClfiwax ForceXForm:$src))]>; 1128 // Pseudo instruction LIWZX will be expanded to LXSIWZX or LFIWZX later 1129 def LIWZX : PseudoXFormMemOp<(outs vsfrc:$XT), (ins memrr:$src), 1130 "#LIWZX", 1131 [(set f64:$XT, (PPClfiwzx ForceXForm:$src))]>; 1132 } // mayLoad 1133 1134 // VSX scalar stores introduced in ISA 2.07 1135 let mayStore = 1, mayLoad = 0 in { 1136 let CodeSize = 3 in 1137 def STXSSPX : XX1Form_memOp<31, 652, (outs), (ins vssrc:$XT, (memrr $RA, $RB):$addr), 1138 "stxsspx $XT, $addr", IIC_LdStSTFD, []>; 1139 def STXSIWX : XX1Form_memOp<31, 140, (outs), (ins vsfrc:$XT, (memrr $RA, $RB):$addr), 1140 "stxsiwx $XT, $addr", IIC_LdStSTFD, []>; 1141 1142 // Pseudo instruction XFSTOREf32 will be expanded to STXSSPX or STFSX later 1143 let CodeSize = 3 in 1144 def XFSTOREf32 : PseudoXFormMemOp<(outs), (ins vssrc:$XT, memrr:$dst), 1145 "#XFSTOREf32", 1146 [(store f32:$XT, XForm:$dst)]>; 1147 // Pseudo instruction STIWX will be expanded to STXSIWX or STFIWX later 1148 def STIWX : PseudoXFormMemOp<(outs), (ins vsfrc:$XT, memrr:$dst), 1149 "#STIWX", 1150 [(PPCstfiwx f64:$XT, ForceXForm:$dst)]>; 1151 } // mayStore 1152 1153 // VSX Elementary Scalar FP arithmetic (SP) 1154 let mayRaiseFPException = 1 in { 1155 let isCommutable = 1 in { 1156 def XSADDSP : XX3Form<60, 0, 1157 (outs vssrc:$XT), (ins vssrc:$XA, vssrc:$XB), 1158 "xsaddsp $XT, $XA, $XB", IIC_VecFP, 1159 [(set f32:$XT, (any_fadd f32:$XA, f32:$XB))]>; 1160 def XSMULSP : XX3Form<60, 16, 1161 (outs vssrc:$XT), (ins vssrc:$XA, vssrc:$XB), 1162 "xsmulsp $XT, $XA, $XB", IIC_VecFP, 1163 [(set f32:$XT, (any_fmul f32:$XA, f32:$XB))]>; 1164 } // isCommutable 1165 1166 def XSSUBSP : XX3Form<60, 8, 1167 (outs vssrc:$XT), (ins vssrc:$XA, vssrc:$XB), 1168 "xssubsp $XT, $XA, $XB", IIC_VecFP, 1169 [(set f32:$XT, (any_fsub f32:$XA, f32:$XB))]>; 1170 def XSDIVSP : XX3Form<60, 24, 1171 (outs vssrc:$XT), (ins vssrc:$XA, vssrc:$XB), 1172 "xsdivsp $XT, $XA, $XB", IIC_FPDivS, 1173 [(set f32:$XT, (any_fdiv f32:$XA, f32:$XB))]>; 1174 1175 def XSRESP : XX2Form<60, 26, 1176 (outs vssrc:$XT), (ins vssrc:$XB), 1177 "xsresp $XT, $XB", IIC_VecFP, 1178 [(set f32:$XT, (PPCfre f32:$XB))]>; 1179 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1180 let hasSideEffects = 1 in 1181 def XSRSP : XX2Form<60, 281, 1182 (outs vssrc:$XT), (ins vsfrc:$XB), 1183 "xsrsp $XT, $XB", IIC_VecFP, 1184 [(set f32:$XT, (any_fpround f64:$XB))]>; 1185 def XSSQRTSP : XX2Form<60, 11, 1186 (outs vssrc:$XT), (ins vssrc:$XB), 1187 "xssqrtsp $XT, $XB", IIC_FPSqrtS, 1188 [(set f32:$XT, (any_fsqrt f32:$XB))]>; 1189 def XSRSQRTESP : XX2Form<60, 10, 1190 (outs vssrc:$XT), (ins vssrc:$XB), 1191 "xsrsqrtesp $XT, $XB", IIC_VecFP, 1192 [(set f32:$XT, (PPCfrsqrte f32:$XB))]>; 1193 1194 // FMA Instructions 1195 let BaseName = "XSMADDASP" in { 1196 let isCommutable = 1 in 1197 def XSMADDASP : XX3Form<60, 1, 1198 (outs vssrc:$XT), 1199 (ins vssrc:$XTi, vssrc:$XA, vssrc:$XB), 1200 "xsmaddasp $XT, $XA, $XB", IIC_VecFP, 1201 [(set f32:$XT, (any_fma f32:$XA, f32:$XB, f32:$XTi))]>, 1202 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 1203 AltVSXFMARel; 1204 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1205 let IsVSXFMAAlt = 1, hasSideEffects = 1 in 1206 def XSMADDMSP : XX3Form<60, 9, 1207 (outs vssrc:$XT), 1208 (ins vssrc:$XTi, vssrc:$XA, vssrc:$XB), 1209 "xsmaddmsp $XT, $XA, $XB", IIC_VecFP, []>, 1210 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 1211 AltVSXFMARel; 1212 } 1213 1214 let BaseName = "XSMSUBASP" in { 1215 let isCommutable = 1 in 1216 def XSMSUBASP : XX3Form<60, 17, 1217 (outs vssrc:$XT), 1218 (ins vssrc:$XTi, vssrc:$XA, vssrc:$XB), 1219 "xsmsubasp $XT, $XA, $XB", IIC_VecFP, 1220 [(set f32:$XT, (any_fma f32:$XA, f32:$XB, 1221 (fneg f32:$XTi)))]>, 1222 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 1223 AltVSXFMARel; 1224 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1225 let IsVSXFMAAlt = 1, hasSideEffects = 1 in 1226 def XSMSUBMSP : XX3Form<60, 25, 1227 (outs vssrc:$XT), 1228 (ins vssrc:$XTi, vssrc:$XA, vssrc:$XB), 1229 "xsmsubmsp $XT, $XA, $XB", IIC_VecFP, []>, 1230 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 1231 AltVSXFMARel; 1232 } 1233 1234 let BaseName = "XSNMADDASP" in { 1235 let isCommutable = 1 in 1236 def XSNMADDASP : XX3Form<60, 129, 1237 (outs vssrc:$XT), 1238 (ins vssrc:$XTi, vssrc:$XA, vssrc:$XB), 1239 "xsnmaddasp $XT, $XA, $XB", IIC_VecFP, 1240 [(set f32:$XT, (fneg (any_fma f32:$XA, f32:$XB, 1241 f32:$XTi)))]>, 1242 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 1243 AltVSXFMARel; 1244 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1245 let IsVSXFMAAlt = 1, hasSideEffects = 1 in 1246 def XSNMADDMSP : XX3Form<60, 137, 1247 (outs vssrc:$XT), 1248 (ins vssrc:$XTi, vssrc:$XA, vssrc:$XB), 1249 "xsnmaddmsp $XT, $XA, $XB", IIC_VecFP, []>, 1250 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 1251 AltVSXFMARel; 1252 } 1253 1254 let BaseName = "XSNMSUBASP" in { 1255 let isCommutable = 1 in 1256 def XSNMSUBASP : XX3Form<60, 145, 1257 (outs vssrc:$XT), 1258 (ins vssrc:$XTi, vssrc:$XA, vssrc:$XB), 1259 "xsnmsubasp $XT, $XA, $XB", IIC_VecFP, 1260 [(set f32:$XT, (fneg (any_fma f32:$XA, f32:$XB, 1261 (fneg f32:$XTi))))]>, 1262 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 1263 AltVSXFMARel; 1264 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1265 let IsVSXFMAAlt = 1, hasSideEffects = 1 in 1266 def XSNMSUBMSP : XX3Form<60, 153, 1267 (outs vssrc:$XT), 1268 (ins vssrc:$XTi, vssrc:$XA, vssrc:$XB), 1269 "xsnmsubmsp $XT, $XA, $XB", IIC_VecFP, []>, 1270 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">, 1271 AltVSXFMARel; 1272 } 1273 1274 // Single Precision Conversions (FP <-> INT) 1275 def XSCVSXDSP : XX2Form<60, 312, 1276 (outs vssrc:$XT), (ins vsfrc:$XB), 1277 "xscvsxdsp $XT, $XB", IIC_VecFP, 1278 [(set f32:$XT, (PPCany_fcfids f64:$XB))]>; 1279 def XSCVUXDSP : XX2Form<60, 296, 1280 (outs vssrc:$XT), (ins vsfrc:$XB), 1281 "xscvuxdsp $XT, $XB", IIC_VecFP, 1282 [(set f32:$XT, (PPCany_fcfidus f64:$XB))]>; 1283 } // mayRaiseFPException 1284 1285 // Conversions between vector and scalar single precision 1286 def XSCVDPSPN : XX2Form<60, 267, (outs vsrc:$XT), (ins vssrc:$XB), 1287 "xscvdpspn $XT, $XB", IIC_VecFP, []>; 1288 def XSCVSPDPN : XX2Form<60, 331, (outs vssrc:$XT), (ins vsrc:$XB), 1289 "xscvspdpn $XT, $XB", IIC_VecFP, []>; 1290 1291 let Predicates = [HasVSX, HasDirectMove] in { 1292 // VSX direct move instructions 1293 def MFVSRD : XX1_RS6_RD5_XO<31, 51, (outs g8rc:$RA), (ins vsfrc:$XT), 1294 "mfvsrd $RA, $XT", IIC_VecGeneral, 1295 [(set i64:$RA, (PPCmfvsr f64:$XT))]>, 1296 Requires<[In64BitMode]>; 1297 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1298 let isCodeGenOnly = 1, hasSideEffects = 1 in 1299 def MFVRD : XX1_RS6_RD5_XO<31, 51, (outs g8rc:$RA), (ins vsrc:$XT), 1300 "mfvsrd $RA, $XT", IIC_VecGeneral, 1301 []>, 1302 Requires<[In64BitMode]>; 1303 def MFVSRWZ : XX1_RS6_RD5_XO<31, 115, (outs gprc:$RA), (ins vsfrc:$XT), 1304 "mfvsrwz $RA, $XT", IIC_VecGeneral, 1305 [(set i32:$RA, (PPCmfvsr f64:$XT))]>, ZExt32To64; 1306 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1307 let isCodeGenOnly = 1, hasSideEffects = 1 in 1308 def MFVRWZ : XX1_RS6_RD5_XO<31, 115, (outs gprc:$RA), (ins vsrc:$XT), 1309 "mfvsrwz $RA, $XT", IIC_VecGeneral, 1310 []>; 1311 def MTVSRD : XX1_RS6_RD5_XO<31, 179, (outs vsfrc:$XT), (ins g8rc:$RA), 1312 "mtvsrd $XT, $RA", IIC_VecGeneral, 1313 [(set f64:$XT, (PPCmtvsra i64:$RA))]>, 1314 Requires<[In64BitMode]>; 1315 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1316 let isCodeGenOnly = 1, hasSideEffects = 1 in 1317 def MTVRD : XX1_RS6_RD5_XO<31, 179, (outs vsrc:$XT), (ins g8rc:$RA), 1318 "mtvsrd $XT, $RA", IIC_VecGeneral, 1319 []>, 1320 Requires<[In64BitMode]>; 1321 def MTVSRWA : XX1_RS6_RD5_XO<31, 211, (outs vsfrc:$XT), (ins gprc:$RA), 1322 "mtvsrwa $XT, $RA", IIC_VecGeneral, 1323 [(set f64:$XT, (PPCmtvsra i32:$RA))]>; 1324 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1325 let isCodeGenOnly = 1, hasSideEffects = 1 in 1326 def MTVRWA : XX1_RS6_RD5_XO<31, 211, (outs vsrc:$XT), (ins gprc:$RA), 1327 "mtvsrwa $XT, $RA", IIC_VecGeneral, 1328 []>; 1329 def MTVSRWZ : XX1_RS6_RD5_XO<31, 243, (outs vsfrc:$XT), (ins gprc:$RA), 1330 "mtvsrwz $XT, $RA", IIC_VecGeneral, 1331 [(set f64:$XT, (PPCmtvsrz i32:$RA))]>; 1332 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1333 let isCodeGenOnly = 1, hasSideEffects = 1 in 1334 def MTVRWZ : XX1_RS6_RD5_XO<31, 243, (outs vsrc:$XT), (ins gprc:$RA), 1335 "mtvsrwz $XT, $RA", IIC_VecGeneral, 1336 []>; 1337 } // HasDirectMove 1338 1339} // HasVSX, HasP8Vector 1340 1341let Predicates = [HasVSX, IsISA3_0, HasDirectMove] in { 1342def MTVSRWS: XX1_RS6_RD5_XO<31, 403, (outs vsrc:$XT), (ins gprc:$RA), 1343 "mtvsrws $XT, $RA", IIC_VecGeneral, []>; 1344 1345def MTVSRDD: XX1Form<31, 435, (outs vsrc:$XT), (ins g8rc_nox0:$RA, g8rc:$RB), 1346 "mtvsrdd $XT, $RA, $RB", IIC_VecGeneral, 1347 []>, Requires<[In64BitMode]>; 1348 1349def MFVSRLD: XX1_RS6_RD5_XO<31, 307, (outs g8rc:$RA), (ins vsrc:$XT), 1350 "mfvsrld $RA, $XT", IIC_VecGeneral, 1351 []>, Requires<[In64BitMode]>; 1352 1353} // HasVSX, IsISA3_0, HasDirectMove 1354 1355let Predicates = [HasVSX, HasP9Vector] in { 1356 // Quad-Precision Scalar Move Instructions: 1357 // Copy Sign 1358 def XSCPSGNQP : X_VT5_VA5_VB5<63, 100, "xscpsgnqp", 1359 [(set f128:$RST, 1360 (fcopysign f128:$RB, f128:$RA))]>; 1361 1362 // Absolute/Negative-Absolute/Negate 1363 def XSABSQP : X_VT5_XO5_VB5<63, 0, 804, "xsabsqp", 1364 [(set f128:$RST, (fabs f128:$RB))]>; 1365 def XSNABSQP : X_VT5_XO5_VB5<63, 8, 804, "xsnabsqp", 1366 [(set f128:$RST, (fneg (fabs f128:$RB)))]>; 1367 def XSNEGQP : X_VT5_XO5_VB5<63, 16, 804, "xsnegqp", 1368 [(set f128:$RST, (fneg f128:$RB))]>; 1369 1370 //===--------------------------------------------------------------------===// 1371 // Quad-Precision Scalar Floating-Point Arithmetic Instructions: 1372 1373 // Add/Divide/Multiply/Subtract 1374 let mayRaiseFPException = 1 in { 1375 let isCommutable = 1 in { 1376 def XSADDQP : X_VT5_VA5_VB5 <63, 4, "xsaddqp", 1377 [(set f128:$RST, (any_fadd f128:$RA, f128:$RB))]>; 1378 def XSMULQP : X_VT5_VA5_VB5 <63, 36, "xsmulqp", 1379 [(set f128:$RST, (any_fmul f128:$RA, f128:$RB))]>; 1380 } 1381 def XSSUBQP : X_VT5_VA5_VB5 <63, 516, "xssubqp" , 1382 [(set f128:$RST, (any_fsub f128:$RA, f128:$RB))]>; 1383 def XSDIVQP : X_VT5_VA5_VB5 <63, 548, "xsdivqp", 1384 [(set f128:$RST, (any_fdiv f128:$RA, f128:$RB))]>; 1385 // Square-Root 1386 def XSSQRTQP : X_VT5_XO5_VB5 <63, 27, 804, "xssqrtqp", 1387 [(set f128:$RST, (any_fsqrt f128:$RB))]>; 1388 // (Negative) Multiply-{Add/Subtract} 1389 def XSMADDQP : X_VT5_VA5_VB5_FMA <63, 388, "xsmaddqp", 1390 [(set f128:$RST, 1391 (any_fma f128:$RA, f128:$RB, f128:$RSTi))]>; 1392 def XSMSUBQP : X_VT5_VA5_VB5_FMA <63, 420, "xsmsubqp" , 1393 [(set f128:$RST, 1394 (any_fma f128:$RA, f128:$RB, 1395 (fneg f128:$RSTi)))]>; 1396 def XSNMADDQP : X_VT5_VA5_VB5_FMA <63, 452, "xsnmaddqp", 1397 [(set f128:$RST, 1398 (fneg (any_fma f128:$RA, f128:$RB, 1399 f128:$RSTi)))]>; 1400 def XSNMSUBQP : X_VT5_VA5_VB5_FMA <63, 484, "xsnmsubqp", 1401 [(set f128:$RST, 1402 (fneg (any_fma f128:$RA, f128:$RB, 1403 (fneg f128:$RSTi))))]>; 1404 1405 let isCommutable = 1 in { 1406 def XSADDQPO : X_VT5_VA5_VB5_Ro<63, 4, "xsaddqpo", 1407 [(set f128:$RST, 1408 (int_ppc_addf128_round_to_odd 1409 f128:$RA, f128:$RB))]>; 1410 def XSMULQPO : X_VT5_VA5_VB5_Ro<63, 36, "xsmulqpo", 1411 [(set f128:$RST, 1412 (int_ppc_mulf128_round_to_odd 1413 f128:$RA, f128:$RB))]>; 1414 } 1415 def XSSUBQPO : X_VT5_VA5_VB5_Ro<63, 516, "xssubqpo", 1416 [(set f128:$RST, 1417 (int_ppc_subf128_round_to_odd 1418 f128:$RA, f128:$RB))]>; 1419 def XSDIVQPO : X_VT5_VA5_VB5_Ro<63, 548, "xsdivqpo", 1420 [(set f128:$RST, 1421 (int_ppc_divf128_round_to_odd 1422 f128:$RA, f128:$RB))]>; 1423 def XSSQRTQPO : X_VT5_XO5_VB5_Ro<63, 27, 804, "xssqrtqpo", 1424 [(set f128:$RST, 1425 (int_ppc_sqrtf128_round_to_odd f128:$RB))]>; 1426 1427 1428 def XSMADDQPO : X_VT5_VA5_VB5_FMA_Ro<63, 388, "xsmaddqpo", 1429 [(set f128:$RST, 1430 (int_ppc_fmaf128_round_to_odd 1431 f128:$RA,f128:$RB,f128:$RSTi))]>; 1432 1433 def XSMSUBQPO : X_VT5_VA5_VB5_FMA_Ro<63, 420, "xsmsubqpo" , 1434 [(set f128:$RST, 1435 (int_ppc_fmaf128_round_to_odd 1436 f128:$RA, f128:$RB, (fneg f128:$RSTi)))]>; 1437 def XSNMADDQPO: X_VT5_VA5_VB5_FMA_Ro<63, 452, "xsnmaddqpo", 1438 [(set f128:$RST, 1439 (fneg (int_ppc_fmaf128_round_to_odd 1440 f128:$RA, f128:$RB, f128:$RSTi)))]>; 1441 def XSNMSUBQPO: X_VT5_VA5_VB5_FMA_Ro<63, 484, "xsnmsubqpo", 1442 [(set f128:$RST, 1443 (fneg (int_ppc_fmaf128_round_to_odd 1444 f128:$RA, f128:$RB, (fneg f128:$RSTi))))]>; 1445 } // mayRaiseFPException 1446 1447 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1448 // QP Compare Ordered/Unordered 1449 let hasSideEffects = 1 in { 1450 // DP/QP Compare Exponents 1451 def XSCMPEXPDP : XX3Form_1<60, 59, 1452 (outs crrc:$CR), (ins vsfrc:$XA, vsfrc:$XB), 1453 "xscmpexpdp $CR, $XA, $XB", IIC_FPCompare, []>; 1454 def XSCMPEXPQP : X_BF3_VA5_VB5<63, 164, "xscmpexpqp", []>; 1455 1456 let mayRaiseFPException = 1 in { 1457 def XSCMPOQP : X_BF3_VA5_VB5<63, 132, "xscmpoqp", []>; 1458 def XSCMPUQP : X_BF3_VA5_VB5<63, 644, "xscmpuqp", []>; 1459 1460 // DP Compare ==, >=, >, != 1461 // Use vsrc for XT, because the entire register of XT is set. 1462 // XT.dword[1] = 0x0000_0000_0000_0000 1463 def XSCMPEQDP : XX3_XT5_XA5_XB5<60, 3, "xscmpeqdp", vsrc, vsfrc, vsfrc, 1464 IIC_FPCompare, []>; 1465 def XSCMPGEDP : XX3_XT5_XA5_XB5<60, 19, "xscmpgedp", vsrc, vsfrc, vsfrc, 1466 IIC_FPCompare, []>; 1467 def XSCMPGTDP : XX3_XT5_XA5_XB5<60, 11, "xscmpgtdp", vsrc, vsfrc, vsfrc, 1468 IIC_FPCompare, []>; 1469 } 1470 } 1471 1472 //===--------------------------------------------------------------------===// 1473 // Quad-Precision Floating-Point Conversion Instructions: 1474 1475 let mayRaiseFPException = 1 in { 1476 // Convert DP -> QP 1477 def XSCVDPQP : X_VT5_XO5_VB5_TyVB<63, 22, 836, "xscvdpqp", vfrc, 1478 [(set f128:$RST, (any_fpextend f64:$RB))]>; 1479 1480 // Round & Convert QP -> DP (dword[1] is set to zero) 1481 def XSCVQPDP : X_VT5_XO5_VB5_VSFR<63, 20, 836, "xscvqpdp" , []>; 1482 def XSCVQPDPO : X_VT5_XO5_VB5_VSFR_Ro<63, 20, 836, "xscvqpdpo", 1483 [(set f64:$RST, 1484 (int_ppc_truncf128_round_to_odd 1485 f128:$RB))]>; 1486 } 1487 1488 // Truncate & Convert QP -> (Un)Signed (D)Word (dword[1] is set to zero) 1489 let mayRaiseFPException = 1 in { 1490 def XSCVQPSDZ : X_VT5_XO5_VB5<63, 25, 836, "xscvqpsdz", 1491 [(set f128:$RST, (PPCany_fctidz f128:$RB))]>; 1492 def XSCVQPSWZ : X_VT5_XO5_VB5<63, 9, 836, "xscvqpswz", 1493 [(set f128:$RST, (PPCany_fctiwz f128:$RB))]>; 1494 def XSCVQPUDZ : X_VT5_XO5_VB5<63, 17, 836, "xscvqpudz", 1495 [(set f128:$RST, (PPCany_fctiduz f128:$RB))]>; 1496 def XSCVQPUWZ : X_VT5_XO5_VB5<63, 1, 836, "xscvqpuwz", 1497 [(set f128:$RST, (PPCany_fctiwuz f128:$RB))]>; 1498 } 1499 1500 // Convert (Un)Signed DWord -> QP. 1501 def XSCVSDQP : X_VT5_XO5_VB5_TyVB<63, 10, 836, "xscvsdqp", vfrc, []>; 1502 def XSCVUDQP : X_VT5_XO5_VB5_TyVB<63, 2, 836, "xscvudqp", vfrc, []>; 1503 1504 // (Round &) Convert DP <-> HP 1505 // Note! xscvdphp's src and dest register both use the left 64 bits, so we use 1506 // vsfrc for src and dest register. xscvhpdp's src only use the left 16 bits, 1507 // but we still use vsfrc for it. 1508 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1509 let hasSideEffects = 1, mayRaiseFPException = 1 in { 1510 def XSCVDPHP : XX2_XT6_XO5_XB6<60, 17, 347, "xscvdphp", vsfrc, []>; 1511 def XSCVHPDP : XX2_XT6_XO5_XB6<60, 16, 347, "xscvhpdp", vsfrc, []>; 1512 } 1513 1514 let mayRaiseFPException = 1 in { 1515 // Vector HP -> SP 1516 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1517 let hasSideEffects = 1 in 1518 def XVCVHPSP : XX2_XT6_XO5_XB6<60, 24, 475, "xvcvhpsp", vsrc, []>; 1519 def XVCVSPHP : XX2_XT6_XO5_XB6<60, 25, 475, "xvcvsphp", vsrc, 1520 [(set v4f32:$XT, 1521 (int_ppc_vsx_xvcvsphp v4f32:$XB))]>; 1522 1523 // Round to Quad-Precision Integer [with Inexact] 1524 def XSRQPI : Z23_VT5_R1_VB5_RMC2_EX1<63, 5, 0, "xsrqpi" , []>; 1525 def XSRQPIX : Z23_VT5_R1_VB5_RMC2_EX1<63, 5, 1, "xsrqpix", []>; 1526 1527 // Round Quad-Precision to Double-Extended Precision (fp80) 1528 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1529 let hasSideEffects = 1 in 1530 def XSRQPXP : Z23_VT5_R1_VB5_RMC2_EX1<63, 37, 0, "xsrqpxp", []>; 1531 } 1532 1533 //===--------------------------------------------------------------------===// 1534 // Insert/Extract Instructions 1535 1536 // Insert Exponent DP/QP 1537 // XT NOTE: XT.dword[1] = 0xUUUU_UUUU_UUUU_UUUU 1538 def XSIEXPDP : XX1Form <60, 918, (outs vsrc:$XT), (ins g8rc:$RA, g8rc:$RB), 1539 "xsiexpdp $XT, $RA, $RB", IIC_VecFP, []>; 1540 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1541 let hasSideEffects = 1 in { 1542 // vB NOTE: only vB.dword[0] is used, that's why we don't use 1543 // X_VT5_VA5_VB5 form 1544 def XSIEXPQP : XForm_18<63, 868, (outs vrrc:$FRT), (ins vrrc:$FRA, vsfrc:$FRB), 1545 "xsiexpqp $FRT, $FRA, $FRB", IIC_VecFP, []>; 1546 } 1547 1548 // Extract Exponent/Significand DP/QP 1549 def XSXEXPDP : XX2_RT5_XO5_XB6<60, 0, 347, "xsxexpdp", []>; 1550 def XSXSIGDP : XX2_RT5_XO5_XB6<60, 1, 347, "xsxsigdp", []>; 1551 1552 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1553 let hasSideEffects = 1 in { 1554 def XSXEXPQP : X_VT5_XO5_VB5 <63, 2, 804, "xsxexpqp", []>; 1555 def XSXSIGQP : X_VT5_XO5_VB5 <63, 18, 804, "xsxsigqp", []>; 1556 } 1557 1558 // Vector Insert Word 1559 // XB NOTE: Only XB.dword[1] is used, but we use vsrc on XB. 1560 def XXINSERTW : 1561 XX2_RD6_UIM5_RS6<60, 181, (outs vsrc:$XT), 1562 (ins vsrc:$XTi, vsrc:$XB, u4imm:$UIM5), 1563 "xxinsertw $XT, $XB, $UIM5", IIC_VecFP, 1564 [(set v4i32:$XT, (PPCvecinsert v4i32:$XTi, v4i32:$XB, 1565 imm32SExt16:$UIM5))]>, 1566 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">; 1567 1568 // Vector Extract Unsigned Word 1569 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1570 let hasSideEffects = 1 in 1571 def XXEXTRACTUW : XX2_RD6_UIM5_RS6<60, 165, 1572 (outs vsfrc:$XT), (ins vsrc:$XB, u4imm:$UIM5), 1573 "xxextractuw $XT, $XB, $UIM5", IIC_VecFP, []>; 1574 1575 // Vector Insert Exponent DP/SP 1576 def XVIEXPDP : XX3_XT5_XA5_XB5<60, 248, "xviexpdp", vsrc, vsrc, vsrc, 1577 IIC_VecFP, [(set v2f64: $XT,(int_ppc_vsx_xviexpdp v2i64:$XA, v2i64:$XB))]>; 1578 def XVIEXPSP : XX3_XT5_XA5_XB5<60, 216, "xviexpsp", vsrc, vsrc, vsrc, 1579 IIC_VecFP, [(set v4f32: $XT,(int_ppc_vsx_xviexpsp v4i32:$XA, v4i32:$XB))]>; 1580 1581 // Vector Extract Exponent/Significand DP/SP 1582 def XVXEXPDP : XX2_XT6_XO5_XB6<60, 0, 475, "xvxexpdp", vsrc, 1583 [(set v2i64: $XT, 1584 (int_ppc_vsx_xvxexpdp v2f64:$XB))]>; 1585 def XVXEXPSP : XX2_XT6_XO5_XB6<60, 8, 475, "xvxexpsp", vsrc, 1586 [(set v4i32: $XT, 1587 (int_ppc_vsx_xvxexpsp v4f32:$XB))]>; 1588 def XVXSIGDP : XX2_XT6_XO5_XB6<60, 1, 475, "xvxsigdp", vsrc, 1589 [(set v2i64: $XT, 1590 (int_ppc_vsx_xvxsigdp v2f64:$XB))]>; 1591 def XVXSIGSP : XX2_XT6_XO5_XB6<60, 9, 475, "xvxsigsp", vsrc, 1592 [(set v4i32: $XT, 1593 (int_ppc_vsx_xvxsigsp v4f32:$XB))]>; 1594 1595 // Test Data Class SP/DP/QP 1596 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1597 let hasSideEffects = 1 in { 1598 def XSTSTDCSP : XX2_BF3_DCMX7_RS6<60, 298, 1599 (outs crrc:$BF), (ins u7imm:$DCMX, vssrc:$XB), 1600 "xststdcsp $BF, $XB, $DCMX", IIC_VecFP, []>; 1601 def XSTSTDCDP : XX2_BF3_DCMX7_RS6<60, 362, 1602 (outs crrc:$BF), (ins u7imm:$DCMX, vsfrc:$XB), 1603 "xststdcdp $BF, $XB, $DCMX", IIC_VecFP, []>; 1604 def XSTSTDCQP : X_BF3_DCMX7_RS5 <63, 708, 1605 (outs crrc:$BF), (ins u7imm:$DCMX, vrrc:$VB), 1606 "xststdcqp $BF, $VB, $DCMX", IIC_VecFP, []>; 1607 } 1608 1609 // Vector Test Data Class SP/DP 1610 def XVTSTDCSP : XX2_RD6_DCMX7_RS6<60, 13, 5, 1611 (outs vsrc:$XT), (ins u7imm:$DCMX, vsrc:$XB), 1612 "xvtstdcsp $XT, $XB, $DCMX", IIC_VecFP, 1613 [(set v4i32: $XT, 1614 (int_ppc_vsx_xvtstdcsp v4f32:$XB, timm:$DCMX))]>; 1615 def XVTSTDCDP : XX2_RD6_DCMX7_RS6<60, 15, 5, 1616 (outs vsrc:$XT), (ins u7imm:$DCMX, vsrc:$XB), 1617 "xvtstdcdp $XT, $XB, $DCMX", IIC_VecFP, 1618 [(set v2i64: $XT, 1619 (int_ppc_vsx_xvtstdcdp v2f64:$XB, timm:$DCMX))]>; 1620 1621 // Maximum/Minimum Type-C/Type-J DP 1622 let mayRaiseFPException = 1 in { 1623 def XSMAXCDP : XX3_XT5_XA5_XB5<60, 128, "xsmaxcdp", vsfrc, vsfrc, vsfrc, 1624 IIC_VecFP, 1625 [(set f64:$XT, (PPCxsmaxc f64:$XA, f64:$XB))]>; 1626 def XSMINCDP : XX3_XT5_XA5_XB5<60, 136, "xsmincdp", vsfrc, vsfrc, vsfrc, 1627 IIC_VecFP, 1628 [(set f64:$XT, (PPCxsminc f64:$XA, f64:$XB))]>; 1629 1630 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1631 let hasSideEffects = 1 in { 1632 def XSMAXJDP : XX3_XT5_XA5_XB5<60, 144, "xsmaxjdp", vsrc, vsfrc, vsfrc, 1633 IIC_VecFP, []>; 1634 def XSMINJDP : XX3_XT5_XA5_XB5<60, 152, "xsminjdp", vsrc, vsfrc, vsfrc, 1635 IIC_VecFP, []>; 1636 } 1637 } 1638 1639 // Vector Byte-Reverse H/W/D/Q Word 1640 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1641 let hasSideEffects = 1 in 1642 def XXBRH : XX2_XT6_XO5_XB6<60, 7, 475, "xxbrh", vsrc, []>; 1643 def XXBRW : XX2_XT6_XO5_XB6<60, 15, 475, "xxbrw", vsrc, 1644 [(set v4i32:$XT, (bswap v4i32:$XB))]>; 1645 def XXBRD : XX2_XT6_XO5_XB6<60, 23, 475, "xxbrd", vsrc, 1646 [(set v2i64:$XT, (bswap v2i64:$XB))]>; 1647 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1648 let hasSideEffects = 1 in 1649 def XXBRQ : XX2_XT6_XO5_XB6<60, 31, 475, "xxbrq", vsrc, []>; 1650 1651 // Vector Permute 1652 def XXPERM : XX3Form<60, 26, (outs vsrc:$XT), 1653 (ins vsrc:$XA, vsrc:$XTi, vsrc:$XB), 1654 "xxperm $XT, $XA, $XB", IIC_VecPerm, []>, 1655 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">; 1656 def XXPERMR : XX3Form<60, 58, (outs vsrc:$XT), 1657 (ins vsrc:$XA, vsrc:$XTi, vsrc:$XB), 1658 "xxpermr $XT, $XA, $XB", IIC_VecPerm, []>, 1659 RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">; 1660 1661 // Vector Splat Immediate Byte 1662 // FIXME: Setting the hasSideEffects flag here to match current behaviour. 1663 let hasSideEffects = 1 in 1664 def XXSPLTIB : X_RD6_IMM8<60, 360, (outs vsrc:$XT), (ins u8imm:$IMM8), 1665 "xxspltib $XT, $IMM8", IIC_VecPerm, []>; 1666 1667 // When adding new D-Form loads/stores, be sure to update the ImmToIdxMap in 1668 // PPCRegisterInfo::PPCRegisterInfo and maybe save yourself some debugging. 1669 let mayLoad = 1, mayStore = 0 in { 1670 // Load Vector 1671 def LXV : DQ_RD6_RS5_DQ12<61, 1, (outs vsrc:$XT), (ins (memrix16 $DQ, $RA):$addr), 1672 "lxv $XT, $addr", IIC_LdStLFD, []>; 1673 // Load DWord 1674 def LXSD : DSForm_1<57, 2, (outs vfrc:$RST), (ins (memrix $D, $RA):$addr), 1675 "lxsd $RST, $addr", IIC_LdStLFD, []>; 1676 // Load SP from src, convert it to DP, and place in dword[0] 1677 def LXSSP : DSForm_1<57, 3, (outs vfrc:$RST), (ins (memrix $D, $RA):$addr), 1678 "lxssp $RST, $addr", IIC_LdStLFD, []>; 1679 1680 // Load as Integer Byte/Halfword & Zero Indexed 1681 def LXSIBZX : X_XT6_RA5_RB5<31, 781, "lxsibzx", vsfrc, 1682 [(set f64:$XT, (PPClxsizx ForceXForm:$addr, 1))]>; 1683 def LXSIHZX : X_XT6_RA5_RB5<31, 813, "lxsihzx", vsfrc, 1684 [(set f64:$XT, (PPClxsizx ForceXForm:$addr, 2))]>; 1685 1686 // Load Vector Halfword*8/Byte*16 Indexed 1687 def LXVH8X : X_XT6_RA5_RB5<31, 812, "lxvh8x" , vsrc, []>; 1688 def LXVB16X : X_XT6_RA5_RB5<31, 876, "lxvb16x", vsrc, []>; 1689 1690 // Load Vector Indexed 1691 def LXVX : X_XT6_RA5_RB5<31, 268, "lxvx" , vsrc, 1692 [(set v2f64:$XT, (load XForm:$addr))]>; 1693 // Load Vector (Left-justified) with Length 1694 def LXVL : XX1Form_memOp<31, 269, (outs vsrc:$XT), (ins (memr $RA):$addr, g8rc:$RB), 1695 "lxvl $XT, $addr, $RB", IIC_LdStLoad, 1696 [(set v4i32:$XT, (int_ppc_vsx_lxvl addr:$addr, i64:$RB))]>; 1697 def LXVLL : XX1Form_memOp<31,301, (outs vsrc:$XT), (ins (memr $RA):$addr, g8rc:$RB), 1698 "lxvll $XT, $addr, $RB", IIC_LdStLoad, 1699 [(set v4i32:$XT, (int_ppc_vsx_lxvll addr:$addr, i64:$RB))]>; 1700 1701 // Load Vector Word & Splat Indexed 1702 def LXVWSX : X_XT6_RA5_RB5<31, 364, "lxvwsx" , vsrc, []>; 1703 } // mayLoad 1704 1705 // When adding new D-Form loads/stores, be sure to update the ImmToIdxMap in 1706 // PPCRegisterInfo::PPCRegisterInfo and maybe save yourself some debugging. 1707 let mayStore = 1, mayLoad = 0 in { 1708 // Store Vector 1709 def STXV : DQ_RD6_RS5_DQ12<61, 5, (outs), (ins vsrc:$XT, (memrix16 $DQ, $RA):$addr), 1710 "stxv $XT, $addr", IIC_LdStSTFD, []>; 1711 // Store DWord 1712 def STXSD : DSForm_1<61, 2, (outs), (ins vfrc:$RST, (memrix $D, $RA):$addr), 1713 "stxsd $RST, $addr", IIC_LdStSTFD, []>; 1714 // Convert DP of dword[0] to SP, and Store to dst 1715 def STXSSP : DSForm_1<61, 3, (outs), (ins vfrc:$RST, (memrix $D, $RA):$addr), 1716 "stxssp $RST, $addr", IIC_LdStSTFD, []>; 1717 1718 // Store as Integer Byte/Halfword Indexed 1719 def STXSIBX : X_XS6_RA5_RB5<31, 909, "stxsibx" , vsfrc, 1720 [(PPCstxsix f64:$XT, ForceXForm:$addr, 1)]>; 1721 def STXSIHX : X_XS6_RA5_RB5<31, 941, "stxsihx" , vsfrc, 1722 [(PPCstxsix f64:$XT, ForceXForm:$addr, 2)]>; 1723 let isCodeGenOnly = 1 in { 1724 def STXSIBXv : X_XS6_RA5_RB5<31, 909, "stxsibx" , vsrc, []>; 1725 def STXSIHXv : X_XS6_RA5_RB5<31, 941, "stxsihx" , vsrc, []>; 1726 } 1727 1728 // Store Vector Halfword*8/Byte*16 Indexed 1729 def STXVH8X : X_XS6_RA5_RB5<31, 940, "stxvh8x" , vsrc, []>; 1730 def STXVB16X : X_XS6_RA5_RB5<31, 1004, "stxvb16x", vsrc, []>; 1731 1732 // Store Vector Indexed 1733 def STXVX : X_XS6_RA5_RB5<31, 396, "stxvx" , vsrc, 1734 [(store v2f64:$XT, XForm:$addr)]>; 1735 1736 // Store Vector (Left-justified) with Length 1737 def STXVL : XX1Form_memOp<31, 397, (outs), 1738 (ins vsrc:$XT, (memr $RA):$addr, g8rc:$RB), 1739 "stxvl $XT, $addr, $RB", IIC_LdStLoad, 1740 [(int_ppc_vsx_stxvl v4i32:$XT, addr:$addr, 1741 i64:$RB)]>; 1742 def STXVLL : XX1Form_memOp<31, 429, (outs), 1743 (ins vsrc:$XT, (memr $RA):$addr, g8rc:$RB), 1744 "stxvll $XT, $addr, $RB", IIC_LdStLoad, 1745 [(int_ppc_vsx_stxvll v4i32:$XT, addr:$addr, 1746 i64:$RB)]>; 1747 } // mayStore 1748 1749 def DFLOADf32 : PPCPostRAExpPseudo<(outs vssrc:$XT), (ins memrix:$src), 1750 "#DFLOADf32", 1751 [(set f32:$XT, (load DSForm:$src))]>; 1752 def DFLOADf64 : PPCPostRAExpPseudo<(outs vsfrc:$XT), (ins memrix:$src), 1753 "#DFLOADf64", 1754 [(set f64:$XT, (load DSForm:$src))]>; 1755 def DFSTOREf32 : PPCPostRAExpPseudo<(outs), (ins vssrc:$XT, memrix:$dst), 1756 "#DFSTOREf32", 1757 [(store f32:$XT, DSForm:$dst)]>; 1758 def DFSTOREf64 : PPCPostRAExpPseudo<(outs), (ins vsfrc:$XT, memrix:$dst), 1759 "#DFSTOREf64", 1760 [(store f64:$XT, DSForm:$dst)]>; 1761 1762 let mayStore = 1 in { 1763 def SPILLTOVSR_STX : PseudoXFormMemOp<(outs), 1764 (ins spilltovsrrc:$XT, memrr:$dst), 1765 "#SPILLTOVSR_STX", []>; 1766 def SPILLTOVSR_ST : PPCPostRAExpPseudo<(outs), (ins spilltovsrrc:$XT, memrix:$dst), 1767 "#SPILLTOVSR_ST", []>; 1768 } 1769 let mayLoad = 1 in { 1770 def SPILLTOVSR_LDX : PseudoXFormMemOp<(outs spilltovsrrc:$XT), 1771 (ins memrr:$src), 1772 "#SPILLTOVSR_LDX", []>; 1773 def SPILLTOVSR_LD : PPCPostRAExpPseudo<(outs spilltovsrrc:$XT), (ins memrix:$src), 1774 "#SPILLTOVSR_LD", []>; 1775 1776 } 1777 } // HasP9Vector 1778} // hasSideEffects = 0 1779 1780let PPC970_Single = 1, AddedComplexity = 400 in { 1781 1782 def SELECT_CC_VSRC: PPCCustomInserterPseudo<(outs vsrc:$dst), 1783 (ins crrc:$cond, vsrc:$T, vsrc:$F, i32imm:$BROPC), 1784 "#SELECT_CC_VSRC", 1785 []>; 1786 def SELECT_VSRC: PPCCustomInserterPseudo<(outs vsrc:$dst), 1787 (ins crbitrc:$cond, vsrc:$T, vsrc:$F), 1788 "#SELECT_VSRC", 1789 [(set v2f64:$dst, 1790 (select i1:$cond, v2f64:$T, v2f64:$F))]>; 1791 def SELECT_CC_VSFRC: PPCCustomInserterPseudo<(outs f8rc:$dst), 1792 (ins crrc:$cond, f8rc:$T, f8rc:$F, 1793 i32imm:$BROPC), "#SELECT_CC_VSFRC", 1794 []>; 1795 def SELECT_VSFRC: PPCCustomInserterPseudo<(outs f8rc:$dst), 1796 (ins crbitrc:$cond, f8rc:$T, f8rc:$F), 1797 "#SELECT_VSFRC", 1798 [(set f64:$dst, 1799 (select i1:$cond, f64:$T, f64:$F))]>; 1800 def SELECT_CC_VSSRC: PPCCustomInserterPseudo<(outs f4rc:$dst), 1801 (ins crrc:$cond, f4rc:$T, f4rc:$F, 1802 i32imm:$BROPC), "#SELECT_CC_VSSRC", 1803 []>; 1804 def SELECT_VSSRC: PPCCustomInserterPseudo<(outs f4rc:$dst), 1805 (ins crbitrc:$cond, f4rc:$T, f4rc:$F), 1806 "#SELECT_VSSRC", 1807 [(set f32:$dst, 1808 (select i1:$cond, f32:$T, f32:$F))]>; 1809} 1810} 1811 1812//----------------------------- DAG Definitions ------------------------------// 1813 1814// Output dag used to bitcast f32 to i32 and f64 to i64 1815def Bitcast { 1816 dag FltToInt = (i32 (MFVSRWZ (EXTRACT_SUBREG (XSCVDPSPN $A), sub_64))); 1817 dag DblToLong = (i64 (MFVSRD $A)); 1818} 1819 1820def FpMinMax { 1821 dag F32Min = (COPY_TO_REGCLASS (XSMINDP (COPY_TO_REGCLASS $A, VSFRC), 1822 (COPY_TO_REGCLASS $B, VSFRC)), 1823 VSSRC); 1824 dag F32Max = (COPY_TO_REGCLASS (XSMAXDP (COPY_TO_REGCLASS $A, VSFRC), 1825 (COPY_TO_REGCLASS $B, VSFRC)), 1826 VSSRC); 1827} 1828 1829def ScalarLoads { 1830 dag Li8 = (i32 (extloadi8 ForceXForm:$src)); 1831 dag ZELi8 = (i32 (zextloadi8 ForceXForm:$src)); 1832 dag ZELi8i64 = (i64 (zextloadi8 ForceXForm:$src)); 1833 dag SELi8 = (i32 (sext_inreg (extloadi8 ForceXForm:$src), i8)); 1834 dag SELi8i64 = (i64 (sext_inreg (extloadi8 ForceXForm:$src), i8)); 1835 1836 dag Li16 = (i32 (extloadi16 ForceXForm:$src)); 1837 dag ZELi16 = (i32 (zextloadi16 ForceXForm:$src)); 1838 dag ZELi16i64 = (i64 (zextloadi16 ForceXForm:$src)); 1839 dag SELi16 = (i32 (sextloadi16 ForceXForm:$src)); 1840 dag SELi16i64 = (i64 (sextloadi16 ForceXForm:$src)); 1841 1842 dag Li32 = (i32 (load ForceXForm:$src)); 1843} 1844 1845def DWToSPExtractConv { 1846 dag El0US1 = (f32 (PPCfcfidus 1847 (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S1, 0)))))); 1848 dag El1US1 = (f32 (PPCfcfidus 1849 (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S1, 1)))))); 1850 dag El0US2 = (f32 (PPCfcfidus 1851 (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S2, 0)))))); 1852 dag El1US2 = (f32 (PPCfcfidus 1853 (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S2, 1)))))); 1854 dag El0SS1 = (f32 (PPCfcfids 1855 (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S1, 0)))))); 1856 dag El1SS1 = (f32 (PPCfcfids 1857 (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S1, 1)))))); 1858 dag El0SS2 = (f32 (PPCfcfids 1859 (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S2, 0)))))); 1860 dag El1SS2 = (f32 (PPCfcfids 1861 (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S2, 1)))))); 1862 dag BVU = (v4f32 (build_vector El0US1, El1US1, El0US2, El1US2)); 1863 dag BVS = (v4f32 (build_vector El0SS1, El1SS1, El0SS2, El1SS2)); 1864} 1865 1866def WToDPExtractConv { 1867 dag El0S = (f64 (PPCfcfid (PPCmtvsra (extractelt v4i32:$A, 0)))); 1868 dag El1S = (f64 (PPCfcfid (PPCmtvsra (extractelt v4i32:$A, 1)))); 1869 dag El2S = (f64 (PPCfcfid (PPCmtvsra (extractelt v4i32:$A, 2)))); 1870 dag El3S = (f64 (PPCfcfid (PPCmtvsra (extractelt v4i32:$A, 3)))); 1871 dag El0U = (f64 (PPCfcfidu (PPCmtvsrz (extractelt v4i32:$A, 0)))); 1872 dag El1U = (f64 (PPCfcfidu (PPCmtvsrz (extractelt v4i32:$A, 1)))); 1873 dag El2U = (f64 (PPCfcfidu (PPCmtvsrz (extractelt v4i32:$A, 2)))); 1874 dag El3U = (f64 (PPCfcfidu (PPCmtvsrz (extractelt v4i32:$A, 3)))); 1875 dag BV02S = (v2f64 (build_vector El0S, El2S)); 1876 dag BV13S = (v2f64 (build_vector El1S, El3S)); 1877 dag BV02U = (v2f64 (build_vector El0U, El2U)); 1878 dag BV13U = (v2f64 (build_vector El1U, El3U)); 1879} 1880 1881/* Direct moves of various widths from GPR's into VSR's. Each move lines 1882 the value up into element 0 (both BE and LE). Namely, entities smaller than 1883 a doubleword are shifted left and moved for BE. For LE, they're moved, then 1884 swapped to go into the least significant element of the VSR. 1885*/ 1886def MovesToVSR { 1887 dag BE_BYTE_0 = 1888 (MTVSRD 1889 (RLDICR 1890 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $A, sub_32), 56, 7)); 1891 dag BE_HALF_0 = 1892 (MTVSRD 1893 (RLDICR 1894 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $A, sub_32), 48, 15)); 1895 dag BE_WORD_0 = 1896 (MTVSRD 1897 (RLDICR 1898 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $A, sub_32), 32, 31)); 1899 dag BE_DWORD_0 = (MTVSRD $A); 1900 1901 dag LE_MTVSRW = (MTVSRD (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $A, sub_32)); 1902 dag LE_WORD_1 = (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), 1903 LE_MTVSRW, sub_64)); 1904 dag LE_WORD_0 = (XXPERMDI LE_WORD_1, LE_WORD_1, 2); 1905 dag LE_DWORD_1 = (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), 1906 BE_DWORD_0, sub_64)); 1907 dag LE_DWORD_0 = (XXPERMDI LE_DWORD_1, LE_DWORD_1, 2); 1908} 1909 1910/* Patterns for extracting elements out of vectors. Integer elements are 1911 extracted using direct move operations. Patterns for extracting elements 1912 whose indices are not available at compile time are also provided with 1913 various _VARIABLE_ patterns. 1914 The numbering for the DAG's is for LE, but when used on BE, the correct 1915 LE element can just be used (i.e. LE_BYTE_2 == BE_BYTE_13). 1916*/ 1917def VectorExtractions { 1918 // Doubleword extraction 1919 dag LE_DWORD_0 = 1920 (MFVSRD 1921 (EXTRACT_SUBREG 1922 (XXPERMDI (COPY_TO_REGCLASS $S, VSRC), 1923 (COPY_TO_REGCLASS $S, VSRC), 2), sub_64)); 1924 dag LE_DWORD_1 = (MFVSRD 1925 (EXTRACT_SUBREG 1926 (v2i64 (COPY_TO_REGCLASS $S, VSRC)), sub_64)); 1927 1928 // Word extraction 1929 dag LE_WORD_0 = (MFVSRWZ (EXTRACT_SUBREG (XXPERMDI $S, $S, 2), sub_64)); 1930 dag LE_WORD_1 = (MFVSRWZ (EXTRACT_SUBREG (XXSLDWI $S, $S, 1), sub_64)); 1931 dag LE_WORD_2 = (MFVSRWZ (EXTRACT_SUBREG 1932 (v2i64 (COPY_TO_REGCLASS $S, VSRC)), sub_64)); 1933 dag LE_WORD_3 = (MFVSRWZ (EXTRACT_SUBREG (XXSLDWI $S, $S, 3), sub_64)); 1934 1935 // Halfword extraction 1936 dag LE_HALF_0 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 0, 48), sub_32)); 1937 dag LE_HALF_1 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 48, 48), sub_32)); 1938 dag LE_HALF_2 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 32, 48), sub_32)); 1939 dag LE_HALF_3 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 16, 48), sub_32)); 1940 dag LE_HALF_4 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 0, 48), sub_32)); 1941 dag LE_HALF_5 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 48, 48), sub_32)); 1942 dag LE_HALF_6 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 32, 48), sub_32)); 1943 dag LE_HALF_7 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 16, 48), sub_32)); 1944 1945 // Byte extraction 1946 dag LE_BYTE_0 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 0, 56), sub_32)); 1947 dag LE_BYTE_1 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 56, 56), sub_32)); 1948 dag LE_BYTE_2 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 48, 56), sub_32)); 1949 dag LE_BYTE_3 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 40, 56), sub_32)); 1950 dag LE_BYTE_4 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 32, 56), sub_32)); 1951 dag LE_BYTE_5 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 24, 56), sub_32)); 1952 dag LE_BYTE_6 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 16, 56), sub_32)); 1953 dag LE_BYTE_7 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 8, 56), sub_32)); 1954 dag LE_BYTE_8 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 0, 56), sub_32)); 1955 dag LE_BYTE_9 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 56, 56), sub_32)); 1956 dag LE_BYTE_10 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 48, 56), sub_32)); 1957 dag LE_BYTE_11 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 40, 56), sub_32)); 1958 dag LE_BYTE_12 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 32, 56), sub_32)); 1959 dag LE_BYTE_13 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 24, 56), sub_32)); 1960 dag LE_BYTE_14 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 16, 56), sub_32)); 1961 dag LE_BYTE_15 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 8, 56), sub_32)); 1962 1963 /* Variable element number (BE and LE patterns must be specified separately) 1964 This is a rather involved process. 1965 1966 Conceptually, this is how the move is accomplished: 1967 1. Identify which doubleword contains the element 1968 2. Shift in the VMX register so that the correct doubleword is correctly 1969 lined up for the MFVSRD 1970 3. Perform the move so that the element (along with some extra stuff) 1971 is in the GPR 1972 4. Right shift within the GPR so that the element is right-justified 1973 1974 Of course, the index is an element number which has a different meaning 1975 on LE/BE so the patterns have to be specified separately. 1976 1977 Note: The final result will be the element right-justified with high 1978 order bits being arbitrarily defined (namely, whatever was in the 1979 vector register to the left of the value originally). 1980 */ 1981 1982 /* LE variable byte 1983 Number 1. above: 1984 - For elements 0-7, we shift left by 8 bytes since they're on the right 1985 - For elements 8-15, we need not shift (shift left by zero bytes) 1986 This is accomplished by inverting the bits of the index and AND-ing 1987 with 0x8 (i.e. clearing all bits of the index and inverting bit 60). 1988 */ 1989 dag LE_VBYTE_PERM_VEC = (v16i8 (LVSL ZERO8, (ANDC8 (LI8 8), $Idx))); 1990 1991 // Number 2. above: 1992 // - Now that we set up the shift amount, we shift in the VMX register 1993 dag LE_VBYTE_PERMUTE = (v16i8 (VPERM $S, $S, LE_VBYTE_PERM_VEC)); 1994 1995 // Number 3. above: 1996 // - The doubleword containing our element is moved to a GPR 1997 dag LE_MV_VBYTE = (MFVSRD 1998 (EXTRACT_SUBREG 1999 (v2i64 (COPY_TO_REGCLASS LE_VBYTE_PERMUTE, VSRC)), 2000 sub_64)); 2001 2002 /* Number 4. above: 2003 - Truncate the element number to the range 0-7 (8-15 are symmetrical 2004 and out of range values are truncated accordingly) 2005 - Multiply by 8 as we need to shift right by the number of bits, not bytes 2006 - Shift right in the GPR by the calculated value 2007 */ 2008 dag LE_VBYTE_SHIFT = (EXTRACT_SUBREG (RLDICR (AND8 (LI8 7), $Idx), 3, 60), 2009 sub_32); 2010 dag LE_VARIABLE_BYTE = (EXTRACT_SUBREG (SRD LE_MV_VBYTE, LE_VBYTE_SHIFT), 2011 sub_32); 2012 2013 /* LE variable halfword 2014 Number 1. above: 2015 - For elements 0-3, we shift left by 8 since they're on the right 2016 - For elements 4-7, we need not shift (shift left by zero bytes) 2017 Similarly to the byte pattern, we invert the bits of the index, but we 2018 AND with 0x4 (i.e. clear all bits of the index and invert bit 61). 2019 Of course, the shift is still by 8 bytes, so we must multiply by 2. 2020 */ 2021 dag LE_VHALF_PERM_VEC = 2022 (v16i8 (LVSL ZERO8, (RLDICR (ANDC8 (LI8 4), $Idx), 1, 62))); 2023 2024 // Number 2. above: 2025 // - Now that we set up the shift amount, we shift in the VMX register 2026 dag LE_VHALF_PERMUTE = (v16i8 (VPERM $S, $S, LE_VHALF_PERM_VEC)); 2027 2028 // Number 3. above: 2029 // - The doubleword containing our element is moved to a GPR 2030 dag LE_MV_VHALF = (MFVSRD 2031 (EXTRACT_SUBREG 2032 (v2i64 (COPY_TO_REGCLASS LE_VHALF_PERMUTE, VSRC)), 2033 sub_64)); 2034 2035 /* Number 4. above: 2036 - Truncate the element number to the range 0-3 (4-7 are symmetrical 2037 and out of range values are truncated accordingly) 2038 - Multiply by 16 as we need to shift right by the number of bits 2039 - Shift right in the GPR by the calculated value 2040 */ 2041 dag LE_VHALF_SHIFT = (EXTRACT_SUBREG (RLDICR (AND8 (LI8 3), $Idx), 4, 59), 2042 sub_32); 2043 dag LE_VARIABLE_HALF = (EXTRACT_SUBREG (SRD LE_MV_VHALF, LE_VHALF_SHIFT), 2044 sub_32); 2045 2046 /* LE variable word 2047 Number 1. above: 2048 - For elements 0-1, we shift left by 8 since they're on the right 2049 - For elements 2-3, we need not shift 2050 */ 2051 dag LE_VWORD_PERM_VEC = (v16i8 (LVSL ZERO8, 2052 (RLDICR (ANDC8 (LI8 2), $Idx), 2, 61))); 2053 2054 // Number 2. above: 2055 // - Now that we set up the shift amount, we shift in the VMX register 2056 dag LE_VWORD_PERMUTE = (v16i8 (VPERM $S, $S, LE_VWORD_PERM_VEC)); 2057 2058 // Number 3. above: 2059 // - The doubleword containing our element is moved to a GPR 2060 dag LE_MV_VWORD = (MFVSRD 2061 (EXTRACT_SUBREG 2062 (v2i64 (COPY_TO_REGCLASS LE_VWORD_PERMUTE, VSRC)), 2063 sub_64)); 2064 2065 /* Number 4. above: 2066 - Truncate the element number to the range 0-1 (2-3 are symmetrical 2067 and out of range values are truncated accordingly) 2068 - Multiply by 32 as we need to shift right by the number of bits 2069 - Shift right in the GPR by the calculated value 2070 */ 2071 dag LE_VWORD_SHIFT = (EXTRACT_SUBREG (RLDICR (AND8 (LI8 1), $Idx), 5, 58), 2072 sub_32); 2073 dag LE_VARIABLE_WORD = (EXTRACT_SUBREG (SRD LE_MV_VWORD, LE_VWORD_SHIFT), 2074 sub_32); 2075 2076 /* LE variable doubleword 2077 Number 1. above: 2078 - For element 0, we shift left by 8 since it's on the right 2079 - For element 1, we need not shift 2080 */ 2081 dag LE_VDWORD_PERM_VEC = (v16i8 (LVSL ZERO8, 2082 (RLDICR (ANDC8 (LI8 1), $Idx), 3, 60))); 2083 2084 // Number 2. above: 2085 // - Now that we set up the shift amount, we shift in the VMX register 2086 dag LE_VDWORD_PERMUTE = (v16i8 (VPERM $S, $S, LE_VDWORD_PERM_VEC)); 2087 2088 // Number 3. above: 2089 // - The doubleword containing our element is moved to a GPR 2090 // - Number 4. is not needed for the doubleword as the value is 64-bits 2091 dag LE_VARIABLE_DWORD = 2092 (MFVSRD (EXTRACT_SUBREG 2093 (v2i64 (COPY_TO_REGCLASS LE_VDWORD_PERMUTE, VSRC)), 2094 sub_64)); 2095 2096 /* LE variable float 2097 - Shift the vector to line up the desired element to BE Word 0 2098 - Convert 32-bit float to a 64-bit single precision float 2099 */ 2100 dag LE_VFLOAT_PERM_VEC = (v16i8 (LVSL ZERO8, 2101 (RLDICR (XOR8 (LI8 3), $Idx), 2, 61))); 2102 dag LE_VFLOAT_PERMUTE = (VPERM $S, $S, LE_VFLOAT_PERM_VEC); 2103 dag LE_VARIABLE_FLOAT = (XSCVSPDPN LE_VFLOAT_PERMUTE); 2104 2105 /* LE variable double 2106 Same as the LE doubleword except there is no move. 2107 */ 2108 dag LE_VDOUBLE_PERMUTE = (v16i8 (VPERM (v16i8 (COPY_TO_REGCLASS $S, VRRC)), 2109 (v16i8 (COPY_TO_REGCLASS $S, VRRC)), 2110 LE_VDWORD_PERM_VEC)); 2111 dag LE_VARIABLE_DOUBLE = (COPY_TO_REGCLASS LE_VDOUBLE_PERMUTE, VSRC); 2112 2113 /* BE variable byte 2114 The algorithm here is the same as the LE variable byte except: 2115 - The shift in the VMX register is by 0/8 for opposite element numbers so 2116 we simply AND the element number with 0x8 2117 - The order of elements after the move to GPR is reversed, so we invert 2118 the bits of the index prior to truncating to the range 0-7 2119 */ 2120 dag BE_VBYTE_PERM_VEC = (v16i8 (LVSL ZERO8, (ANDI8_rec $Idx, 8))); 2121 dag BE_VBYTE_PERMUTE = (v16i8 (VPERM $S, $S, BE_VBYTE_PERM_VEC)); 2122 dag BE_MV_VBYTE = (MFVSRD 2123 (EXTRACT_SUBREG 2124 (v2i64 (COPY_TO_REGCLASS BE_VBYTE_PERMUTE, VSRC)), 2125 sub_64)); 2126 dag BE_VBYTE_SHIFT = (EXTRACT_SUBREG (RLDICR (ANDC8 (LI8 7), $Idx), 3, 60), 2127 sub_32); 2128 dag BE_VARIABLE_BYTE = (EXTRACT_SUBREG (SRD BE_MV_VBYTE, BE_VBYTE_SHIFT), 2129 sub_32); 2130 2131 /* BE variable halfword 2132 The algorithm here is the same as the LE variable halfword except: 2133 - The shift in the VMX register is by 0/8 for opposite element numbers so 2134 we simply AND the element number with 0x4 and multiply by 2 2135 - The order of elements after the move to GPR is reversed, so we invert 2136 the bits of the index prior to truncating to the range 0-3 2137 */ 2138 dag BE_VHALF_PERM_VEC = (v16i8 (LVSL ZERO8, 2139 (RLDICR (ANDI8_rec $Idx, 4), 1, 62))); 2140 dag BE_VHALF_PERMUTE = (v16i8 (VPERM $S, $S, BE_VHALF_PERM_VEC)); 2141 dag BE_MV_VHALF = (MFVSRD 2142 (EXTRACT_SUBREG 2143 (v2i64 (COPY_TO_REGCLASS BE_VHALF_PERMUTE, VSRC)), 2144 sub_64)); 2145 dag BE_VHALF_SHIFT = (EXTRACT_SUBREG (RLDICR (ANDC8 (LI8 3), $Idx), 4, 59), 2146 sub_32); 2147 dag BE_VARIABLE_HALF = (EXTRACT_SUBREG (SRD BE_MV_VHALF, BE_VHALF_SHIFT), 2148 sub_32); 2149 2150 /* BE variable word 2151 The algorithm is the same as the LE variable word except: 2152 - The shift in the VMX register happens for opposite element numbers 2153 - The order of elements after the move to GPR is reversed, so we invert 2154 the bits of the index prior to truncating to the range 0-1 2155 */ 2156 dag BE_VWORD_PERM_VEC = (v16i8 (LVSL ZERO8, 2157 (RLDICR (ANDI8_rec $Idx, 2), 2, 61))); 2158 dag BE_VWORD_PERMUTE = (v16i8 (VPERM $S, $S, BE_VWORD_PERM_VEC)); 2159 dag BE_MV_VWORD = (MFVSRD 2160 (EXTRACT_SUBREG 2161 (v2i64 (COPY_TO_REGCLASS BE_VWORD_PERMUTE, VSRC)), 2162 sub_64)); 2163 dag BE_VWORD_SHIFT = (EXTRACT_SUBREG (RLDICR (ANDC8 (LI8 1), $Idx), 5, 58), 2164 sub_32); 2165 dag BE_VARIABLE_WORD = (EXTRACT_SUBREG (SRD BE_MV_VWORD, BE_VWORD_SHIFT), 2166 sub_32); 2167 2168 /* BE variable doubleword 2169 Same as the LE doubleword except we shift in the VMX register for opposite 2170 element indices. 2171 */ 2172 dag BE_VDWORD_PERM_VEC = (v16i8 (LVSL ZERO8, 2173 (RLDICR (ANDI8_rec $Idx, 1), 3, 60))); 2174 dag BE_VDWORD_PERMUTE = (v16i8 (VPERM $S, $S, BE_VDWORD_PERM_VEC)); 2175 dag BE_VARIABLE_DWORD = 2176 (MFVSRD (EXTRACT_SUBREG 2177 (v2i64 (COPY_TO_REGCLASS BE_VDWORD_PERMUTE, VSRC)), 2178 sub_64)); 2179 2180 /* BE variable float 2181 - Shift the vector to line up the desired element to BE Word 0 2182 - Convert 32-bit float to a 64-bit single precision float 2183 */ 2184 dag BE_VFLOAT_PERM_VEC = (v16i8 (LVSL ZERO8, (RLDICR $Idx, 2, 61))); 2185 dag BE_VFLOAT_PERMUTE = (VPERM $S, $S, BE_VFLOAT_PERM_VEC); 2186 dag BE_VARIABLE_FLOAT = (XSCVSPDPN BE_VFLOAT_PERMUTE); 2187 2188 // BE variable float 32-bit version 2189 dag BE_32B_VFLOAT_PERM_VEC = (v16i8 (LVSL (i32 ZERO), (RLWINM $Idx, 2, 0, 29))); 2190 dag BE_32B_VFLOAT_PERMUTE = (VPERM $S, $S, BE_32B_VFLOAT_PERM_VEC); 2191 dag BE_32B_VARIABLE_FLOAT = (XSCVSPDPN BE_32B_VFLOAT_PERMUTE); 2192 2193 /* BE variable double 2194 Same as the BE doubleword except there is no move. 2195 */ 2196 dag BE_VDOUBLE_PERMUTE = (v16i8 (VPERM (v16i8 (COPY_TO_REGCLASS $S, VRRC)), 2197 (v16i8 (COPY_TO_REGCLASS $S, VRRC)), 2198 BE_VDWORD_PERM_VEC)); 2199 dag BE_VARIABLE_DOUBLE = (COPY_TO_REGCLASS BE_VDOUBLE_PERMUTE, VSRC); 2200 2201 // BE variable double 32-bit version 2202 dag BE_32B_VDWORD_PERM_VEC = (v16i8 (LVSL (i32 ZERO), 2203 (RLWINM (ANDI_rec $Idx, 1), 3, 0, 28))); 2204 dag BE_32B_VDOUBLE_PERMUTE = (v16i8 (VPERM (v16i8 (COPY_TO_REGCLASS $S, VRRC)), 2205 (v16i8 (COPY_TO_REGCLASS $S, VRRC)), 2206 BE_32B_VDWORD_PERM_VEC)); 2207 dag BE_32B_VARIABLE_DOUBLE = (COPY_TO_REGCLASS BE_32B_VDOUBLE_PERMUTE, VSRC); 2208} 2209 2210def AlignValues { 2211 dag F32_TO_BE_WORD1 = (v4f32 (XSCVDPSPN $B)); 2212 dag I32_TO_BE_WORD1 = (SUBREG_TO_REG (i64 1), (MTVSRWZ $B), sub_64); 2213} 2214 2215// Integer extend helper dags 32 -> 64 2216def AnyExts { 2217 dag A = (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $A, sub_32); 2218 dag B = (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $B, sub_32); 2219 dag C = (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $C, sub_32); 2220 dag D = (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $D, sub_32); 2221} 2222 2223def DblToFlt { 2224 dag A0 = (f32 (any_fpround (f64 (extractelt v2f64:$A, 0)))); 2225 dag A1 = (f32 (any_fpround (f64 (extractelt v2f64:$A, 1)))); 2226 dag B0 = (f32 (any_fpround (f64 (extractelt v2f64:$B, 0)))); 2227 dag B1 = (f32 (any_fpround (f64 (extractelt v2f64:$B, 1)))); 2228} 2229 2230def ExtDbl { 2231 dag A0S = (i32 (PPCmfvsr (f64 (PPCfctiwz (f64 (extractelt v2f64:$A, 0)))))); 2232 dag A1S = (i32 (PPCmfvsr (f64 (PPCfctiwz (f64 (extractelt v2f64:$A, 1)))))); 2233 dag B0S = (i32 (PPCmfvsr (f64 (PPCfctiwz (f64 (extractelt v2f64:$B, 0)))))); 2234 dag B1S = (i32 (PPCmfvsr (f64 (PPCfctiwz (f64 (extractelt v2f64:$B, 1)))))); 2235 dag A0U = (i32 (PPCmfvsr (f64 (PPCfctiwuz (f64 (extractelt v2f64:$A, 0)))))); 2236 dag A1U = (i32 (PPCmfvsr (f64 (PPCfctiwuz (f64 (extractelt v2f64:$A, 1)))))); 2237 dag B0U = (i32 (PPCmfvsr (f64 (PPCfctiwuz (f64 (extractelt v2f64:$B, 0)))))); 2238 dag B1U = (i32 (PPCmfvsr (f64 (PPCfctiwuz (f64 (extractelt v2f64:$B, 1)))))); 2239} 2240 2241def ByteToWord { 2242 dag LE_A0 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 0)), i8)); 2243 dag LE_A1 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 4)), i8)); 2244 dag LE_A2 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 8)), i8)); 2245 dag LE_A3 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 12)), i8)); 2246 dag BE_A0 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 3)), i8)); 2247 dag BE_A1 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 7)), i8)); 2248 dag BE_A2 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 11)), i8)); 2249 dag BE_A3 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 15)), i8)); 2250} 2251 2252def ByteToDWord { 2253 dag LE_A0 = (i64 (sext_inreg 2254 (i64 (anyext (i32 (vector_extract v16i8:$A, 0)))), i8)); 2255 dag LE_A1 = (i64 (sext_inreg 2256 (i64 (anyext (i32 (vector_extract v16i8:$A, 8)))), i8)); 2257 dag BE_A0 = (i64 (sext_inreg 2258 (i64 (anyext (i32 (vector_extract v16i8:$A, 7)))), i8)); 2259 dag BE_A1 = (i64 (sext_inreg 2260 (i64 (anyext (i32 (vector_extract v16i8:$A, 15)))), i8)); 2261} 2262 2263def HWordToWord { 2264 dag LE_A0 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 0)), i16)); 2265 dag LE_A1 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 2)), i16)); 2266 dag LE_A2 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 4)), i16)); 2267 dag LE_A3 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 6)), i16)); 2268 dag BE_A0 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 1)), i16)); 2269 dag BE_A1 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 3)), i16)); 2270 dag BE_A2 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 5)), i16)); 2271 dag BE_A3 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 7)), i16)); 2272} 2273 2274def HWordToDWord { 2275 dag LE_A0 = (i64 (sext_inreg 2276 (i64 (anyext (i32 (vector_extract v8i16:$A, 0)))), i16)); 2277 dag LE_A1 = (i64 (sext_inreg 2278 (i64 (anyext (i32 (vector_extract v8i16:$A, 4)))), i16)); 2279 dag BE_A0 = (i64 (sext_inreg 2280 (i64 (anyext (i32 (vector_extract v8i16:$A, 3)))), i16)); 2281 dag BE_A1 = (i64 (sext_inreg 2282 (i64 (anyext (i32 (vector_extract v8i16:$A, 7)))), i16)); 2283} 2284 2285def WordToDWord { 2286 dag LE_A0 = (i64 (sext (i32 (vector_extract v4i32:$A, 0)))); 2287 dag LE_A1 = (i64 (sext (i32 (vector_extract v4i32:$A, 2)))); 2288 dag BE_A0 = (i64 (sext (i32 (vector_extract v4i32:$A, 1)))); 2289 dag BE_A1 = (i64 (sext (i32 (vector_extract v4i32:$A, 3)))); 2290} 2291 2292def FltToIntLoad { 2293 dag A = (i32 (PPCmfvsr (PPCfctiwz (f64 (extloadf32 ForceXForm:$A))))); 2294} 2295def FltToUIntLoad { 2296 dag A = (i32 (PPCmfvsr (PPCfctiwuz (f64 (extloadf32 ForceXForm:$A))))); 2297} 2298def FltToLongLoad { 2299 dag A = (i64 (PPCmfvsr (PPCfctidz (f64 (extloadf32 ForceXForm:$A))))); 2300} 2301def FltToLongLoadP9 { 2302 dag A = (i64 (PPCmfvsr (PPCfctidz (f64 (extloadf32 DSForm:$A))))); 2303} 2304def FltToULongLoad { 2305 dag A = (i64 (PPCmfvsr (PPCfctiduz (f64 (extloadf32 ForceXForm:$A))))); 2306} 2307def FltToULongLoadP9 { 2308 dag A = (i64 (PPCmfvsr (PPCfctiduz (f64 (extloadf32 DSForm:$A))))); 2309} 2310def FltToLong { 2311 dag A = (i64 (PPCmfvsr (f64 (PPCfctidz (fpextend f32:$A))))); 2312} 2313def FltToULong { 2314 dag A = (i64 (PPCmfvsr (f64 (PPCfctiduz (fpextend f32:$A))))); 2315} 2316def DblToInt { 2317 dag A = (i32 (PPCmfvsr (f64 (PPCfctiwz f64:$A)))); 2318 dag B = (i32 (PPCmfvsr (f64 (PPCfctiwz f64:$B)))); 2319 dag C = (i32 (PPCmfvsr (f64 (PPCfctiwz f64:$C)))); 2320 dag D = (i32 (PPCmfvsr (f64 (PPCfctiwz f64:$D)))); 2321} 2322def DblToUInt { 2323 dag A = (i32 (PPCmfvsr (f64 (PPCfctiwuz f64:$A)))); 2324 dag B = (i32 (PPCmfvsr (f64 (PPCfctiwuz f64:$B)))); 2325 dag C = (i32 (PPCmfvsr (f64 (PPCfctiwuz f64:$C)))); 2326 dag D = (i32 (PPCmfvsr (f64 (PPCfctiwuz f64:$D)))); 2327} 2328def DblToLong { 2329 dag A = (i64 (PPCmfvsr (f64 (PPCfctidz f64:$A)))); 2330} 2331def DblToULong { 2332 dag A = (i64 (PPCmfvsr (f64 (PPCfctiduz f64:$A)))); 2333} 2334def DblToIntLoad { 2335 dag A = (i32 (PPCmfvsr (PPCfctiwz (f64 (load ForceXForm:$A))))); 2336} 2337def DblToIntLoadP9 { 2338 dag A = (i32 (PPCmfvsr (PPCfctiwz (f64 (load DSForm:$A))))); 2339} 2340def DblToUIntLoad { 2341 dag A = (i32 (PPCmfvsr (PPCfctiwuz (f64 (load ForceXForm:$A))))); 2342} 2343def DblToUIntLoadP9 { 2344 dag A = (i32 (PPCmfvsr (PPCfctiwuz (f64 (load DSForm:$A))))); 2345} 2346def DblToLongLoad { 2347 dag A = (i64 (PPCmfvsr (PPCfctidz (f64 (load ForceXForm:$A))))); 2348} 2349def DblToULongLoad { 2350 dag A = (i64 (PPCmfvsr (PPCfctiduz (f64 (load ForceXForm:$A))))); 2351} 2352 2353// FP load dags (for f32 -> v4f32) 2354def LoadFP { 2355 dag A = (f32 (load ForceXForm:$A)); 2356 dag B = (f32 (load ForceXForm:$B)); 2357 dag C = (f32 (load ForceXForm:$C)); 2358 dag D = (f32 (load ForceXForm:$D)); 2359} 2360 2361// FP merge dags (for f32 -> v4f32) 2362def MrgFP { 2363 dag LD32A = (SUBREG_TO_REG (i64 1), (LIWZX ForceXForm:$A), sub_64); 2364 dag LD32B = (SUBREG_TO_REG (i64 1), (LIWZX ForceXForm:$B), sub_64); 2365 dag LD32C = (SUBREG_TO_REG (i64 1), (LIWZX ForceXForm:$C), sub_64); 2366 dag LD32D = (SUBREG_TO_REG (i64 1), (LIWZX ForceXForm:$D), sub_64); 2367 dag AC = (XVCVDPSP (XXPERMDI (SUBREG_TO_REG (i64 1), $A, sub_64), 2368 (SUBREG_TO_REG (i64 1), $C, sub_64), 0)); 2369 dag BD = (XVCVDPSP (XXPERMDI (SUBREG_TO_REG (i64 1), $B, sub_64), 2370 (SUBREG_TO_REG (i64 1), $D, sub_64), 0)); 2371 dag ABhToFlt = (XVCVDPSP (XXPERMDI $A, $B, 0)); 2372 dag ABlToFlt = (XVCVDPSP (XXPERMDI $A, $B, 3)); 2373 dag BAhToFlt = (XVCVDPSP (XXPERMDI $B, $A, 0)); 2374 dag BAlToFlt = (XVCVDPSP (XXPERMDI $B, $A, 3)); 2375} 2376 2377// Word-element merge dags - conversions from f64 to i32 merged into vectors. 2378def MrgWords { 2379 // For big endian, we merge low and hi doublewords (A, B). 2380 dag A0B0 = (v2f64 (XXPERMDI v2f64:$A, v2f64:$B, 0)); 2381 dag A1B1 = (v2f64 (XXPERMDI v2f64:$A, v2f64:$B, 3)); 2382 dag CVA1B1S = (v4i32 (XVCVDPSXWS A1B1)); 2383 dag CVA0B0S = (v4i32 (XVCVDPSXWS A0B0)); 2384 dag CVA1B1U = (v4i32 (XVCVDPUXWS A1B1)); 2385 dag CVA0B0U = (v4i32 (XVCVDPUXWS A0B0)); 2386 2387 // For little endian, we merge low and hi doublewords (B, A). 2388 dag B1A1 = (v2f64 (XXPERMDI v2f64:$B, v2f64:$A, 0)); 2389 dag B0A0 = (v2f64 (XXPERMDI v2f64:$B, v2f64:$A, 3)); 2390 dag CVB1A1S = (v4i32 (XVCVDPSXWS B1A1)); 2391 dag CVB0A0S = (v4i32 (XVCVDPSXWS B0A0)); 2392 dag CVB1A1U = (v4i32 (XVCVDPUXWS B1A1)); 2393 dag CVB0A0U = (v4i32 (XVCVDPUXWS B0A0)); 2394 2395 // For big endian, we merge hi doublewords of (A, C) and (B, D), convert 2396 // then merge. 2397 dag AC = (v2f64 (XXPERMDI (SUBREG_TO_REG (i64 1), f64:$A, sub_64), 2398 (SUBREG_TO_REG (i64 1), f64:$C, sub_64), 0)); 2399 dag BD = (v2f64 (XXPERMDI (SUBREG_TO_REG (i64 1), f64:$B, sub_64), 2400 (SUBREG_TO_REG (i64 1), f64:$D, sub_64), 0)); 2401 dag CVACS = (v4i32 (XVCVDPSXWS AC)); 2402 dag CVBDS = (v4i32 (XVCVDPSXWS BD)); 2403 dag CVACU = (v4i32 (XVCVDPUXWS AC)); 2404 dag CVBDU = (v4i32 (XVCVDPUXWS BD)); 2405 2406 // For little endian, we merge hi doublewords of (D, B) and (C, A), convert 2407 // then merge. 2408 dag DB = (v2f64 (XXPERMDI (SUBREG_TO_REG (i64 1), f64:$D, sub_64), 2409 (SUBREG_TO_REG (i64 1), f64:$B, sub_64), 0)); 2410 dag CA = (v2f64 (XXPERMDI (SUBREG_TO_REG (i64 1), f64:$C, sub_64), 2411 (SUBREG_TO_REG (i64 1), f64:$A, sub_64), 0)); 2412 dag CVDBS = (v4i32 (XVCVDPSXWS DB)); 2413 dag CVCAS = (v4i32 (XVCVDPSXWS CA)); 2414 dag CVDBU = (v4i32 (XVCVDPUXWS DB)); 2415 dag CVCAU = (v4i32 (XVCVDPUXWS CA)); 2416} 2417 2418def DblwdCmp { 2419 dag SGTW = (v2i64 (v2i64 (VCMPGTSW v2i64:$vA, v2i64:$vB))); 2420 dag UGTW = (v2i64 (v2i64 (VCMPGTUW v2i64:$vA, v2i64:$vB))); 2421 dag EQW = (v2i64 (v2i64 (VCMPEQUW v2i64:$vA, v2i64:$vB))); 2422 dag UGTWSHAND = (v2i64 (XXLAND (v2i64 (XXSLDWI UGTW, UGTW, 1)), EQW)); 2423 dag EQWSHAND = (v2i64 (XXLAND (v2i64 (XXSLDWI EQW, EQW, 1)), EQW)); 2424 dag SGTWOR = (v2i64 (XXLOR SGTW, UGTWSHAND)); 2425 dag UGTWOR = (v2i64 (XXLOR UGTW, UGTWSHAND)); 2426 dag MRGSGT = (v2i64 (XXPERMDI (v2i64 (XXSPLTW SGTWOR, 0)), 2427 (v2i64 (XXSPLTW SGTWOR, 2)), 0)); 2428 dag MRGUGT = (v2i64 (XXPERMDI (v2i64 (XXSPLTW UGTWOR, 0)), 2429 (v2i64 (XXSPLTW UGTWOR, 2)), 0)); 2430 dag MRGEQ = (v2i64 (XXPERMDI (v2i64 (XXSPLTW EQWSHAND, 0)), 2431 (v2i64 (XXSPLTW EQWSHAND, 2)), 0)); 2432} 2433 2434//---------------------------- Anonymous Patterns ----------------------------// 2435// Predicate combinations are kept in roughly chronological order in terms of 2436// instruction availability in the architecture. For example, VSX came in with 2437// ISA 2.06 (Power7). There have since been additions in ISA 2.07 (Power8) and 2438// ISA 3.0 (Power9). However, the granularity of features on later subtargets 2439// is finer for various reasons. For example, we have Power8Vector, 2440// Power8Altivec, DirectMove that all came in with ISA 2.07. The situation is 2441// similar with ISA 3.0 with Power9Vector, Power9Altivec, IsISA3_0. Then there 2442// are orthogonal predicates such as endianness for which the order was 2443// arbitrarily chosen to be Big, Little. 2444// 2445// Predicate combinations available: 2446// [HasVSX, IsLittleEndian, HasP8Altivec] Altivec patterns using VSX instr. 2447// [HasVSX, IsBigEndian, HasP8Altivec] Altivec patterns using VSX instr. 2448// [HasVSX] 2449// [HasVSX, IsBigEndian] 2450// [HasVSX, IsLittleEndian] 2451// [HasVSX, NoP9Vector] 2452// [HasVSX, NoP9Vector, IsLittleEndian] 2453// [HasVSX, NoP9Vector, IsBigEndian] 2454// [HasVSX, HasOnlySwappingMemOps] 2455// [HasVSX, HasOnlySwappingMemOps, IsBigEndian] 2456// [HasVSX, NoP8Vector] 2457// [HasVSX, HasP8Vector] 2458// [HasVSX, HasP8Vector, IsBigEndian] 2459// [HasVSX, HasP8Vector, IsBigEndian, IsPPC64] 2460// [HasVSX, HasP8Vector, IsLittleEndian] 2461// [HasVSX, HasP8Vector, NoP9Vector, IsBigEndian, IsPPC64] 2462// [HasVSX, HasP8Vector, NoP9Vector, IsLittleEndian] 2463// [HasVSX, HasP8Altivec] 2464// [HasVSX, HasDirectMove] 2465// [HasVSX, HasDirectMove, IsBigEndian] 2466// [HasVSX, HasDirectMove, IsLittleEndian] 2467// [HasVSX, HasDirectMove, NoP9Altivec, IsBigEndian, IsPPC64] 2468// [HasVSX, HasDirectMove, NoP9Vector, IsBigEndian, IsPPC64] 2469// [HasVSX, HasDirectMove, NoP9Altivec, IsLittleEndian] 2470// [HasVSX, HasDirectMove, NoP9Vector, IsLittleEndian] 2471// [HasVSX, HasP9Vector] 2472// [HasVSX, HasP9Vector, NoP10Vector] 2473// [HasVSX, HasP9Vector, IsBigEndian] 2474// [HasVSX, HasP9Vector, IsBigEndian, IsPPC64] 2475// [HasVSX, HasP9Vector, IsLittleEndian] 2476// [HasVSX, HasP9Altivec] 2477// [HasVSX, HasP9Altivec, IsBigEndian, IsPPC64] 2478// [HasVSX, HasP9Altivec, IsLittleEndian] 2479// [HasVSX, IsISA3_0, HasDirectMove, IsBigEndian, IsPPC64] 2480// [HasVSX, IsISA3_0, HasDirectMove, IsLittleEndian] 2481 2482// These Altivec patterns are here because we need a VSX instruction to match 2483// the intrinsic (but only for little endian system). 2484let Predicates = [HasVSX, IsLittleEndian, HasP8Altivec] in 2485 def : Pat<(v16i8 (int_ppc_altivec_crypto_vpermxor v16i8:$a, 2486 v16i8:$b, v16i8:$c)), 2487 (v16i8 (VPERMXOR $a, $b, (XXLNOR (COPY_TO_REGCLASS $c, VSRC), 2488 (COPY_TO_REGCLASS $c, VSRC))))>; 2489let Predicates = [HasVSX, IsBigEndian, HasP8Altivec] in 2490 def : Pat<(v16i8 (int_ppc_altivec_crypto_vpermxor v16i8:$a, 2491 v16i8:$b, v16i8:$c)), 2492 (v16i8 (VPERMXOR $a, $b, $c))>; 2493let Predicates = [HasVSX, HasP8Altivec] in 2494 def : Pat<(v16i8 (int_ppc_altivec_crypto_vpermxor_be v16i8:$a, 2495 v16i8:$b, v16i8:$c)), 2496 (v16i8 (VPERMXOR $a, $b, $c))>; 2497 2498let AddedComplexity = 400 in { 2499// Valid for any VSX subtarget, regardless of endianness. 2500let Predicates = [HasVSX] in { 2501def : Pat<(v4i32 (vnot v4i32:$A)), 2502 (v4i32 (XXLNOR $A, $A))>; 2503def : Pat<(v4i32 (or (and (vnot v4i32:$C), v4i32:$A), 2504 (and v4i32:$B, v4i32:$C))), 2505 (v4i32 (XXSEL $A, $B, $C))>; 2506 2507def : Pat<(f64 (fpimm0neg)), 2508 (f64 (XSNEGDP (XXLXORdpz)))>; 2509 2510def : Pat<(f64 (nzFPImmExactInti5:$A)), 2511 (COPY_TO_REGCLASS (XVCVSXWDP (COPY_TO_REGCLASS 2512 (VSPLTISW (getFPAs5BitExactInt fpimm:$A)), VSRC)), VSFRC)>; 2513 2514// Additional fnmsub pattern for PPC specific ISD opcode 2515def : Pat<(PPCfnmsub f64:$A, f64:$B, f64:$C), 2516 (XSNMSUBADP $C, $A, $B)>; 2517def : Pat<(fneg (PPCfnmsub f64:$A, f64:$B, f64:$C)), 2518 (XSMSUBADP $C, $A, $B)>; 2519def : Pat<(PPCfnmsub f64:$A, f64:$B, (fneg f64:$C)), 2520 (XSNMADDADP $C, $A, $B)>; 2521 2522def : Pat<(PPCfnmsub v2f64:$A, v2f64:$B, v2f64:$C), 2523 (XVNMSUBADP $C, $A, $B)>; 2524def : Pat<(fneg (PPCfnmsub v2f64:$A, v2f64:$B, v2f64:$C)), 2525 (XVMSUBADP $C, $A, $B)>; 2526def : Pat<(PPCfnmsub v2f64:$A, v2f64:$B, (fneg v2f64:$C)), 2527 (XVNMADDADP $C, $A, $B)>; 2528 2529def : Pat<(PPCfnmsub v4f32:$A, v4f32:$B, v4f32:$C), 2530 (XVNMSUBASP $C, $A, $B)>; 2531def : Pat<(fneg (PPCfnmsub v4f32:$A, v4f32:$B, v4f32:$C)), 2532 (XVMSUBASP $C, $A, $B)>; 2533def : Pat<(PPCfnmsub v4f32:$A, v4f32:$B, (fneg v4f32:$C)), 2534 (XVNMADDASP $C, $A, $B)>; 2535 2536def : Pat<(PPCfsqrt f64:$frA), (XSSQRTDP $frA)>; 2537def : Pat<(PPCfsqrt v2f64:$frA), (XVSQRTDP $frA)>; 2538def : Pat<(PPCfsqrt v4f32:$frA), (XVSQRTSP $frA)>; 2539 2540def : Pat<(v2f64 (bitconvert v4f32:$A)), 2541 (COPY_TO_REGCLASS $A, VSRC)>; 2542def : Pat<(v2f64 (bitconvert v4i32:$A)), 2543 (COPY_TO_REGCLASS $A, VSRC)>; 2544def : Pat<(v2f64 (bitconvert v8i16:$A)), 2545 (COPY_TO_REGCLASS $A, VSRC)>; 2546def : Pat<(v2f64 (bitconvert v16i8:$A)), 2547 (COPY_TO_REGCLASS $A, VSRC)>; 2548 2549def : Pat<(v4f32 (bitconvert v2f64:$A)), 2550 (COPY_TO_REGCLASS $A, VRRC)>; 2551def : Pat<(v4i32 (bitconvert v2f64:$A)), 2552 (COPY_TO_REGCLASS $A, VRRC)>; 2553def : Pat<(v8i16 (bitconvert v2f64:$A)), 2554 (COPY_TO_REGCLASS $A, VRRC)>; 2555def : Pat<(v16i8 (bitconvert v2f64:$A)), 2556 (COPY_TO_REGCLASS $A, VRRC)>; 2557 2558def : Pat<(v2i64 (bitconvert v4f32:$A)), 2559 (COPY_TO_REGCLASS $A, VSRC)>; 2560def : Pat<(v2i64 (bitconvert v4i32:$A)), 2561 (COPY_TO_REGCLASS $A, VSRC)>; 2562def : Pat<(v2i64 (bitconvert v8i16:$A)), 2563 (COPY_TO_REGCLASS $A, VSRC)>; 2564def : Pat<(v2i64 (bitconvert v16i8:$A)), 2565 (COPY_TO_REGCLASS $A, VSRC)>; 2566 2567def : Pat<(v4f32 (bitconvert v2i64:$A)), 2568 (COPY_TO_REGCLASS $A, VRRC)>; 2569def : Pat<(v4i32 (bitconvert v2i64:$A)), 2570 (COPY_TO_REGCLASS $A, VRRC)>; 2571def : Pat<(v8i16 (bitconvert v2i64:$A)), 2572 (COPY_TO_REGCLASS $A, VRRC)>; 2573def : Pat<(v16i8 (bitconvert v2i64:$A)), 2574 (COPY_TO_REGCLASS $A, VRRC)>; 2575 2576def : Pat<(v2f64 (bitconvert v2i64:$A)), 2577 (COPY_TO_REGCLASS $A, VRRC)>; 2578def : Pat<(v2i64 (bitconvert v2f64:$A)), 2579 (COPY_TO_REGCLASS $A, VRRC)>; 2580 2581def : Pat<(v2f64 (bitconvert v1i128:$A)), 2582 (COPY_TO_REGCLASS $A, VRRC)>; 2583def : Pat<(v1i128 (bitconvert v2f64:$A)), 2584 (COPY_TO_REGCLASS $A, VRRC)>; 2585 2586def : Pat<(v2i64 (bitconvert f128:$A)), 2587 (COPY_TO_REGCLASS $A, VRRC)>; 2588def : Pat<(v4i32 (bitconvert f128:$A)), 2589 (COPY_TO_REGCLASS $A, VRRC)>; 2590def : Pat<(v8i16 (bitconvert f128:$A)), 2591 (COPY_TO_REGCLASS $A, VRRC)>; 2592def : Pat<(v16i8 (bitconvert f128:$A)), 2593 (COPY_TO_REGCLASS $A, VRRC)>; 2594 2595def : Pat<(v2f64 (PPCsvec2fp v4i32:$C, 0)), 2596 (v2f64 (XVCVSXWDP (v2i64 (XXMRGHW $C, $C))))>; 2597def : Pat<(v2f64 (PPCsvec2fp v4i32:$C, 1)), 2598 (v2f64 (XVCVSXWDP (v2i64 (XXMRGLW $C, $C))))>; 2599 2600def : Pat<(v2f64 (PPCuvec2fp v4i32:$C, 0)), 2601 (v2f64 (XVCVUXWDP (v2i64 (XXMRGHW $C, $C))))>; 2602def : Pat<(v2f64 (PPCuvec2fp v4i32:$C, 1)), 2603 (v2f64 (XVCVUXWDP (v2i64 (XXMRGLW $C, $C))))>; 2604 2605def : Pat<(v2f64 (PPCfpexth v4f32:$C, 0)), (XVCVSPDP (XXMRGHW $C, $C))>; 2606def : Pat<(v2f64 (PPCfpexth v4f32:$C, 1)), (XVCVSPDP (XXMRGLW $C, $C))>; 2607 2608// Permutes. 2609def : Pat<(v2f64 (PPCxxswapd v2f64:$src)), (XXPERMDI $src, $src, 2)>; 2610def : Pat<(v2i64 (PPCxxswapd v2i64:$src)), (XXPERMDI $src, $src, 2)>; 2611def : Pat<(v4f32 (PPCxxswapd v4f32:$src)), (XXPERMDI $src, $src, 2)>; 2612def : Pat<(v4i32 (PPCxxswapd v4i32:$src)), (XXPERMDI $src, $src, 2)>; 2613def : Pat<(v2f64 (PPCswapNoChain v2f64:$src)), (XXPERMDI $src, $src, 2)>; 2614 2615// PPCvecshl XT, XA, XA, 2 can be selected to both XXSLDWI XT,XA,XA,2 and 2616// XXSWAPD XT,XA (i.e. XXPERMDI XT,XA,XA,2), the later one is more profitable. 2617def : Pat<(v4i32 (PPCvecshl v4i32:$src, v4i32:$src, 2)), 2618 (XXPERMDI $src, $src, 2)>; 2619 2620// Selects. 2621def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETLT)), 2622 (SELECT_VSRC (CRANDC $lhs, $rhs), $tval, $fval)>; 2623def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETULT)), 2624 (SELECT_VSRC (CRANDC $rhs, $lhs), $tval, $fval)>; 2625def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETLE)), 2626 (SELECT_VSRC (CRORC $lhs, $rhs), $tval, $fval)>; 2627def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETULE)), 2628 (SELECT_VSRC (CRORC $rhs, $lhs), $tval, $fval)>; 2629def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETEQ)), 2630 (SELECT_VSRC (CREQV $lhs, $rhs), $tval, $fval)>; 2631def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETGE)), 2632 (SELECT_VSRC (CRORC $rhs, $lhs), $tval, $fval)>; 2633def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETUGE)), 2634 (SELECT_VSRC (CRORC $lhs, $rhs), $tval, $fval)>; 2635def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETGT)), 2636 (SELECT_VSRC (CRANDC $rhs, $lhs), $tval, $fval)>; 2637def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETUGT)), 2638 (SELECT_VSRC (CRANDC $lhs, $rhs), $tval, $fval)>; 2639def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETNE)), 2640 (SELECT_VSRC (CRXOR $lhs, $rhs), $tval, $fval)>; 2641 2642def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETLT)), 2643 (SELECT_VSFRC (CRANDC $lhs, $rhs), $tval, $fval)>; 2644def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETULT)), 2645 (SELECT_VSFRC (CRANDC $rhs, $lhs), $tval, $fval)>; 2646def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETLE)), 2647 (SELECT_VSFRC (CRORC $lhs, $rhs), $tval, $fval)>; 2648def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETULE)), 2649 (SELECT_VSFRC (CRORC $rhs, $lhs), $tval, $fval)>; 2650def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETEQ)), 2651 (SELECT_VSFRC (CREQV $lhs, $rhs), $tval, $fval)>; 2652def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETGE)), 2653 (SELECT_VSFRC (CRORC $rhs, $lhs), $tval, $fval)>; 2654def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETUGE)), 2655 (SELECT_VSFRC (CRORC $lhs, $rhs), $tval, $fval)>; 2656def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETGT)), 2657 (SELECT_VSFRC (CRANDC $rhs, $lhs), $tval, $fval)>; 2658def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETUGT)), 2659 (SELECT_VSFRC (CRANDC $lhs, $rhs), $tval, $fval)>; 2660def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETNE)), 2661 (SELECT_VSFRC (CRXOR $lhs, $rhs), $tval, $fval)>; 2662 2663// Divides. 2664def : Pat<(int_ppc_vsx_xvdivsp v4f32:$A, v4f32:$B), 2665 (XVDIVSP $A, $B)>; 2666def : Pat<(int_ppc_vsx_xvdivdp v2f64:$A, v2f64:$B), 2667 (XVDIVDP $A, $B)>; 2668 2669// Vector test for software divide and sqrt. 2670def : Pat<(i32 (int_ppc_vsx_xvtdivdp v2f64:$A, v2f64:$B)), 2671 (COPY_TO_REGCLASS (XVTDIVDP $A, $B), GPRC)>; 2672def : Pat<(i32 (int_ppc_vsx_xvtdivsp v4f32:$A, v4f32:$B)), 2673 (COPY_TO_REGCLASS (XVTDIVSP $A, $B), GPRC)>; 2674def : Pat<(i32 (int_ppc_vsx_xvtsqrtdp v2f64:$A)), 2675 (COPY_TO_REGCLASS (XVTSQRTDP $A), GPRC)>; 2676def : Pat<(i32 (int_ppc_vsx_xvtsqrtsp v4f32:$A)), 2677 (COPY_TO_REGCLASS (XVTSQRTSP $A), GPRC)>; 2678 2679// Reciprocal estimate 2680def : Pat<(int_ppc_vsx_xvresp v4f32:$A), 2681 (XVRESP $A)>; 2682def : Pat<(int_ppc_vsx_xvredp v2f64:$A), 2683 (XVREDP $A)>; 2684 2685// Recip. square root estimate 2686def : Pat<(int_ppc_vsx_xvrsqrtesp v4f32:$A), 2687 (XVRSQRTESP $A)>; 2688def : Pat<(int_ppc_vsx_xvrsqrtedp v2f64:$A), 2689 (XVRSQRTEDP $A)>; 2690 2691// Vector selection 2692def : Pat<(v16i8 (vselect v16i8:$vA, v16i8:$vB, v16i8:$vC)), 2693 (COPY_TO_REGCLASS 2694 (XXSEL (COPY_TO_REGCLASS $vC, VSRC), 2695 (COPY_TO_REGCLASS $vB, VSRC), 2696 (COPY_TO_REGCLASS $vA, VSRC)), VRRC)>; 2697def : Pat<(v8i16 (vselect v8i16:$vA, v8i16:$vB, v8i16:$vC)), 2698 (COPY_TO_REGCLASS 2699 (XXSEL (COPY_TO_REGCLASS $vC, VSRC), 2700 (COPY_TO_REGCLASS $vB, VSRC), 2701 (COPY_TO_REGCLASS $vA, VSRC)), VRRC)>; 2702def : Pat<(vselect v4i32:$vA, v4i32:$vB, v4i32:$vC), 2703 (XXSEL $vC, $vB, $vA)>; 2704def : Pat<(vselect v2i64:$vA, v2i64:$vB, v2i64:$vC), 2705 (XXSEL $vC, $vB, $vA)>; 2706def : Pat<(vselect v4i32:$vA, v4f32:$vB, v4f32:$vC), 2707 (XXSEL $vC, $vB, $vA)>; 2708def : Pat<(vselect v2i64:$vA, v2f64:$vB, v2f64:$vC), 2709 (XXSEL $vC, $vB, $vA)>; 2710def : Pat<(v1i128 (vselect v1i128:$vA, v1i128:$vB, v1i128:$vC)), 2711 (COPY_TO_REGCLASS 2712 (XXSEL (COPY_TO_REGCLASS $vC, VSRC), 2713 (COPY_TO_REGCLASS $vB, VSRC), 2714 (COPY_TO_REGCLASS $vA, VSRC)), VRRC)>; 2715 2716def : Pat<(v4f32 (any_fmaxnum v4f32:$src1, v4f32:$src2)), 2717 (v4f32 (XVMAXSP $src1, $src2))>; 2718def : Pat<(v4f32 (any_fminnum v4f32:$src1, v4f32:$src2)), 2719 (v4f32 (XVMINSP $src1, $src2))>; 2720def : Pat<(v2f64 (any_fmaxnum v2f64:$src1, v2f64:$src2)), 2721 (v2f64 (XVMAXDP $src1, $src2))>; 2722def : Pat<(v2f64 (any_fminnum v2f64:$src1, v2f64:$src2)), 2723 (v2f64 (XVMINDP $src1, $src2))>; 2724 2725// f32 abs 2726def : Pat<(f32 (fabs f32:$S)), 2727 (f32 (COPY_TO_REGCLASS (XSABSDP 2728 (COPY_TO_REGCLASS $S, VSFRC)), VSSRC))>; 2729 2730// f32 nabs 2731def : Pat<(f32 (fneg (fabs f32:$S))), 2732 (f32 (COPY_TO_REGCLASS (XSNABSDP 2733 (COPY_TO_REGCLASS $S, VSFRC)), VSSRC))>; 2734 2735// f32 Min. 2736def : Pat<(f32 (fminnum_ieee f32:$A, f32:$B)), 2737 (f32 FpMinMax.F32Min)>; 2738def : Pat<(f32 (fminnum_ieee (fcanonicalize f32:$A), f32:$B)), 2739 (f32 FpMinMax.F32Min)>; 2740def : Pat<(f32 (fminnum_ieee f32:$A, (fcanonicalize f32:$B))), 2741 (f32 FpMinMax.F32Min)>; 2742def : Pat<(f32 (fminnum_ieee (fcanonicalize f32:$A), (fcanonicalize f32:$B))), 2743 (f32 FpMinMax.F32Min)>; 2744// F32 Max. 2745def : Pat<(f32 (fmaxnum_ieee f32:$A, f32:$B)), 2746 (f32 FpMinMax.F32Max)>; 2747def : Pat<(f32 (fmaxnum_ieee (fcanonicalize f32:$A), f32:$B)), 2748 (f32 FpMinMax.F32Max)>; 2749def : Pat<(f32 (fmaxnum_ieee f32:$A, (fcanonicalize f32:$B))), 2750 (f32 FpMinMax.F32Max)>; 2751def : Pat<(f32 (fmaxnum_ieee (fcanonicalize f32:$A), (fcanonicalize f32:$B))), 2752 (f32 FpMinMax.F32Max)>; 2753 2754// f64 Min. 2755def : Pat<(f64 (fminnum_ieee f64:$A, f64:$B)), 2756 (f64 (XSMINDP $A, $B))>; 2757def : Pat<(f64 (fminnum_ieee (fcanonicalize f64:$A), f64:$B)), 2758 (f64 (XSMINDP $A, $B))>; 2759def : Pat<(f64 (fminnum_ieee f64:$A, (fcanonicalize f64:$B))), 2760 (f64 (XSMINDP $A, $B))>; 2761def : Pat<(f64 (fminnum_ieee (fcanonicalize f64:$A), (fcanonicalize f64:$B))), 2762 (f64 (XSMINDP $A, $B))>; 2763// f64 Max. 2764def : Pat<(f64 (fmaxnum_ieee f64:$A, f64:$B)), 2765 (f64 (XSMAXDP $A, $B))>; 2766def : Pat<(f64 (fmaxnum_ieee (fcanonicalize f64:$A), f64:$B)), 2767 (f64 (XSMAXDP $A, $B))>; 2768def : Pat<(f64 (fmaxnum_ieee f64:$A, (fcanonicalize f64:$B))), 2769 (f64 (XSMAXDP $A, $B))>; 2770def : Pat<(f64 (fmaxnum_ieee (fcanonicalize f64:$A), (fcanonicalize f64:$B))), 2771 (f64 (XSMAXDP $A, $B))>; 2772 2773def : Pat<(int_ppc_vsx_stxvd2x_be v2f64:$rS, ForceXForm:$dst), 2774 (STXVD2X $rS, ForceXForm:$dst)>; 2775def : Pat<(int_ppc_vsx_stxvw4x_be v4i32:$rS, ForceXForm:$dst), 2776 (STXVW4X $rS, ForceXForm:$dst)>; 2777def : Pat<(v4i32 (int_ppc_vsx_lxvw4x_be ForceXForm:$src)), (LXVW4X ForceXForm:$src)>; 2778def : Pat<(v2f64 (int_ppc_vsx_lxvd2x_be ForceXForm:$src)), (LXVD2X ForceXForm:$src)>; 2779 2780// Rounding for single precision. 2781def : Pat<(f32 (any_fround f32:$S)), 2782 (f32 (COPY_TO_REGCLASS (XSRDPI 2783 (COPY_TO_REGCLASS $S, VSFRC)), VSSRC))>; 2784def : Pat<(f32 (any_ffloor f32:$S)), 2785 (f32 (COPY_TO_REGCLASS (XSRDPIM 2786 (COPY_TO_REGCLASS $S, VSFRC)), VSSRC))>; 2787def : Pat<(f32 (any_fceil f32:$S)), 2788 (f32 (COPY_TO_REGCLASS (XSRDPIP 2789 (COPY_TO_REGCLASS $S, VSFRC)), VSSRC))>; 2790def : Pat<(f32 (any_ftrunc f32:$S)), 2791 (f32 (COPY_TO_REGCLASS (XSRDPIZ 2792 (COPY_TO_REGCLASS $S, VSFRC)), VSSRC))>; 2793def : Pat<(f32 (any_frint f32:$S)), 2794 (f32 (COPY_TO_REGCLASS (XSRDPIC 2795 (COPY_TO_REGCLASS $S, VSFRC)), VSSRC))>; 2796def : Pat<(v4f32 (any_frint v4f32:$S)), (v4f32 (XVRSPIC $S))>; 2797 2798// Rounding for double precision. 2799def : Pat<(f64 (any_frint f64:$S)), (f64 (XSRDPIC $S))>; 2800def : Pat<(v2f64 (any_frint v2f64:$S)), (v2f64 (XVRDPIC $S))>; 2801 2802// Rounding without exceptions (nearbyint). Due to strange tblgen behaviour, 2803// these need to be defined after the any_frint versions so ISEL will correctly 2804// add the chain to the strict versions. 2805def : Pat<(f32 (fnearbyint f32:$S)), 2806 (f32 (COPY_TO_REGCLASS (XSRDPIC 2807 (COPY_TO_REGCLASS $S, VSFRC)), VSSRC))>; 2808def : Pat<(f64 (fnearbyint f64:$S)), 2809 (f64 (XSRDPIC $S))>; 2810def : Pat<(v2f64 (fnearbyint v2f64:$S)), 2811 (v2f64 (XVRDPIC $S))>; 2812def : Pat<(v4f32 (fnearbyint v4f32:$S)), 2813 (v4f32 (XVRSPIC $S))>; 2814 2815// Materialize a zero-vector of long long 2816def : Pat<(v2i64 immAllZerosV), 2817 (v2i64 (XXLXORz))>; 2818 2819// Build vectors of floating point converted to i32. 2820def : Pat<(v4i32 (build_vector DblToInt.A, DblToInt.A, 2821 DblToInt.A, DblToInt.A)), 2822 (v4i32 (XXSPLTW (SUBREG_TO_REG (i64 1), (XSCVDPSXWS $A), sub_64), 1))>; 2823def : Pat<(v4i32 (build_vector DblToUInt.A, DblToUInt.A, 2824 DblToUInt.A, DblToUInt.A)), 2825 (v4i32 (XXSPLTW (SUBREG_TO_REG (i64 1), (XSCVDPUXWS $A), sub_64), 1))>; 2826def : Pat<(v2i64 (build_vector DblToLong.A, DblToLong.A)), 2827 (v2i64 (XXPERMDI (SUBREG_TO_REG (i64 1), (XSCVDPSXDS $A), sub_64), 2828 (SUBREG_TO_REG (i64 1), (XSCVDPSXDS $A), sub_64), 0))>; 2829def : Pat<(v2i64 (build_vector DblToULong.A, DblToULong.A)), 2830 (v2i64 (XXPERMDI (SUBREG_TO_REG (i64 1), (XSCVDPUXDS $A), sub_64), 2831 (SUBREG_TO_REG (i64 1), (XSCVDPUXDS $A), sub_64), 0))>; 2832def : Pat<(v4i32 (PPCSToV DblToInt.A)), 2833 (v4i32 (SUBREG_TO_REG (i64 1), (XSCVDPSXWS f64:$A), sub_64))>; 2834def : Pat<(v4i32 (PPCSToV DblToUInt.A)), 2835 (v4i32 (SUBREG_TO_REG (i64 1), (XSCVDPUXWS f64:$A), sub_64))>; 2836defm : ScalToVecWPermute< 2837 v4i32, FltToIntLoad.A, 2838 (XXSPLTW (SUBREG_TO_REG (i64 1), (XSCVDPSXWSs (XFLOADf32 ForceXForm:$A)), sub_64), 1), 2839 (SUBREG_TO_REG (i64 1), (XSCVDPSXWSs (XFLOADf32 ForceXForm:$A)), sub_64)>; 2840defm : ScalToVecWPermute< 2841 v4i32, FltToUIntLoad.A, 2842 (XXSPLTW (SUBREG_TO_REG (i64 1), (XSCVDPUXWSs (XFLOADf32 ForceXForm:$A)), sub_64), 1), 2843 (SUBREG_TO_REG (i64 1), (XSCVDPUXWSs (XFLOADf32 ForceXForm:$A)), sub_64)>; 2844def : Pat<(v4f32 (build_vector (f32 (fpround f64:$A)), (f32 (fpround f64:$A)), 2845 (f32 (fpround f64:$A)), (f32 (fpround f64:$A)))), 2846 (v4f32 (XXSPLTW (SUBREG_TO_REG (i64 1), (XSCVDPSP f64:$A), sub_64), 0))>; 2847 2848def : Pat<(v4f32 (build_vector f32:$A, f32:$A, f32:$A, f32:$A)), 2849 (v4f32 (XXSPLTW (v4f32 (XSCVDPSPN $A)), 0))>; 2850 2851// Splat loads. 2852def : Pat<(v2f64 (PPCldsplat ForceXForm:$A)), 2853 (v2f64 (LXVDSX ForceXForm:$A))>; 2854def : Pat<(v4f32 (PPCldsplat ForceXForm:$A)), 2855 (v4f32 (XXSPLTW (SUBREG_TO_REG (i64 1), (LFIWZX ForceXForm:$A), sub_64), 1))>; 2856def : Pat<(v2i64 (PPCldsplat ForceXForm:$A)), 2857 (v2i64 (LXVDSX ForceXForm:$A))>; 2858def : Pat<(v4i32 (PPCldsplat ForceXForm:$A)), 2859 (v4i32 (XXSPLTW (SUBREG_TO_REG (i64 1), (LFIWZX ForceXForm:$A), sub_64), 1))>; 2860def : Pat<(v2i64 (PPCzextldsplat ForceXForm:$A)), 2861 (v2i64 (XXPERMDIs (LFIWZX ForceXForm:$A), 0))>; 2862def : Pat<(v2i64 (PPCsextldsplat ForceXForm:$A)), 2863 (v2i64 (XXPERMDIs (LFIWAX ForceXForm:$A), 0))>; 2864 2865// Build vectors of floating point converted to i64. 2866def : Pat<(v2i64 (build_vector FltToLong.A, FltToLong.A)), 2867 (v2i64 (XXPERMDIs 2868 (COPY_TO_REGCLASS (XSCVDPSXDSs $A), VSFRC), 0))>; 2869def : Pat<(v2i64 (build_vector FltToULong.A, FltToULong.A)), 2870 (v2i64 (XXPERMDIs 2871 (COPY_TO_REGCLASS (XSCVDPUXDSs $A), VSFRC), 0))>; 2872defm : ScalToVecWPermute< 2873 v2i64, DblToLongLoad.A, 2874 (XVCVDPSXDS (LXVDSX ForceXForm:$A)), (XVCVDPSXDS (LXVDSX ForceXForm:$A))>; 2875defm : ScalToVecWPermute< 2876 v2i64, DblToULongLoad.A, 2877 (XVCVDPUXDS (LXVDSX ForceXForm:$A)), (XVCVDPUXDS (LXVDSX ForceXForm:$A))>; 2878 2879// Doubleword vector predicate comparisons without Power8. 2880let AddedComplexity = 0 in { 2881def : Pat<(v2i64 (PPCvcmp_rec v2i64:$vA, v2i64:$vB, 967)), 2882 (VCMPGTUB_rec DblwdCmp.MRGSGT, (v2i64 (XXLXORz)))>; 2883def : Pat<(v2i64 (PPCvcmp_rec v2i64:$vA, v2i64:$vB, 711)), 2884 (VCMPGTUB_rec DblwdCmp.MRGUGT, (v2i64 (XXLXORz)))>; 2885def : Pat<(v2i64 (PPCvcmp_rec v2i64:$vA, v2i64:$vB, 199)), 2886 (VCMPGTUB_rec DblwdCmp.MRGEQ, (v2i64 (XXLXORz)))>; 2887} // AddedComplexity = 0 2888 2889// XL Compat builtins. 2890def : Pat<(int_ppc_fmsub f64:$A, f64:$B, f64:$C), (XSMSUBMDP $A, $B, $C)>; 2891def : Pat<(int_ppc_fnmadd f64:$A, f64:$B, f64:$C), (XSNMADDMDP $A, $B, $C)>; 2892def : Pat<(int_ppc_fre f64:$A), (XSREDP $A)>; 2893def : Pat<(int_ppc_frsqrte vsfrc:$XB), (XSRSQRTEDP $XB)>; 2894def : Pat<(int_ppc_fnabs f64:$A), (XSNABSDP $A)>; 2895def : Pat<(int_ppc_fnabss f32:$A), (XSNABSDPs $A)>; 2896 2897// XXMRG[LH]W is a direct replacement for VMRG[LH]W respectively. 2898// Prefer the VSX form for greater register range. 2899def:Pat<(vmrglw_unary_shuffle v16i8:$vA, undef), 2900 (COPY_TO_REGCLASS (XXMRGLW (COPY_TO_REGCLASS $vA, VSRC), 2901 (COPY_TO_REGCLASS $vA, VSRC)), VRRC)>; 2902def:Pat<(vmrghw_unary_shuffle v16i8:$vA, undef), 2903 (COPY_TO_REGCLASS (XXMRGHW (COPY_TO_REGCLASS $vA, VSRC), 2904 (COPY_TO_REGCLASS $vA, VSRC)), VRRC)>; 2905def:Pat<(vmrglw_shuffle v16i8:$vA, v16i8:$vB), 2906 (COPY_TO_REGCLASS (XXMRGLW (COPY_TO_REGCLASS $vA, VSRC), 2907 (COPY_TO_REGCLASS $vB, VSRC)), VRRC)>; 2908def:Pat<(vmrghw_shuffle v16i8:$vA, v16i8:$vB), 2909 (COPY_TO_REGCLASS (XXMRGHW (COPY_TO_REGCLASS $vA, VSRC), 2910 (COPY_TO_REGCLASS $vB, VSRC)), VRRC)>; 2911def:Pat<(vmrglw_swapped_shuffle v16i8:$vA, v16i8:$vB), 2912 (COPY_TO_REGCLASS (XXMRGLW (COPY_TO_REGCLASS $vB, VSRC), 2913 (COPY_TO_REGCLASS $vA, VSRC)), VRRC)>; 2914def:Pat<(vmrghw_swapped_shuffle v16i8:$vA, v16i8:$vB), 2915 (COPY_TO_REGCLASS (XXMRGHW (COPY_TO_REGCLASS $vB, VSRC), 2916 (COPY_TO_REGCLASS $vA, VSRC)), VRRC)>; 2917def : Pat<(PPCstore_scal_int_from_vsr f64:$src, XForm:$dst, 8), 2918 (STXSDX $src, XForm:$dst)>; 2919def : Pat<(PPCstore_scal_int_from_vsr f128:$src, XForm:$dst, 8), 2920 (STXSDX (COPY_TO_REGCLASS $src, VSFRC), XForm:$dst)>; 2921} // HasVSX 2922 2923// Any big endian VSX subtarget. 2924let Predicates = [HasVSX, IsBigEndian] in { 2925def : Pat<(v2f64 (scalar_to_vector f64:$A)), 2926 (v2f64 (SUBREG_TO_REG (i64 1), $A, sub_64))>; 2927 2928def : Pat<(f64 (extractelt v2f64:$S, 0)), 2929 (f64 (EXTRACT_SUBREG $S, sub_64))>; 2930def : Pat<(f64 (extractelt v2f64:$S, 1)), 2931 (f64 (EXTRACT_SUBREG (XXPERMDI $S, $S, 2), sub_64))>; 2932def : Pat<(f64 (PPCfcfid (PPCmtvsra (i64 (vector_extract v2i64:$S, 0))))), 2933 (f64 (XSCVSXDDP (COPY_TO_REGCLASS $S, VSFRC)))>; 2934def : Pat<(f64 (PPCfcfid (PPCmtvsra (i64 (vector_extract v2i64:$S, 1))))), 2935 (f64 (XSCVSXDDP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>; 2936def : Pat<(f64 (PPCfcfidu (PPCmtvsra (i64 (vector_extract v2i64:$S, 0))))), 2937 (f64 (XSCVUXDDP (COPY_TO_REGCLASS $S, VSFRC)))>; 2938def : Pat<(f64 (PPCfcfidu (PPCmtvsra (i64 (vector_extract v2i64:$S, 1))))), 2939 (f64 (XSCVUXDDP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>; 2940 2941def : Pat<(f64 (vector_extract v2f64:$S, i64:$Idx)), 2942 (f64 VectorExtractions.BE_VARIABLE_DOUBLE)>; 2943 2944def : Pat<(v2f64 (build_vector f64:$A, f64:$B)), 2945 (v2f64 (XXPERMDI 2946 (SUBREG_TO_REG (i64 1), $A, sub_64), 2947 (SUBREG_TO_REG (i64 1), $B, sub_64), 0))>; 2948// Using VMRGEW to assemble the final vector would be a lower latency 2949// solution. However, we choose to go with the slightly higher latency 2950// XXPERMDI for 2 reasons: 2951// 1. This is likely to occur in unrolled loops where regpressure is high, 2952// so we want to use the latter as it has access to all 64 VSX registers. 2953// 2. Using Altivec instructions in this sequence would likely cause the 2954// allocation of Altivec registers even for the loads which in turn would 2955// force the use of LXSIWZX for the loads, adding a cycle of latency to 2956// each of the loads which would otherwise be able to use LFIWZX. 2957def : Pat<(v4f32 (build_vector LoadFP.A, LoadFP.B, LoadFP.C, LoadFP.D)), 2958 (v4f32 (XXPERMDI (XXMRGHW MrgFP.LD32A, MrgFP.LD32B), 2959 (XXMRGHW MrgFP.LD32C, MrgFP.LD32D), 3))>; 2960def : Pat<(v4f32 (build_vector f32:$A, f32:$B, f32:$C, f32:$D)), 2961 (VMRGEW MrgFP.AC, MrgFP.BD)>; 2962def : Pat<(v4f32 (build_vector DblToFlt.A0, DblToFlt.A1, 2963 DblToFlt.B0, DblToFlt.B1)), 2964 (v4f32 (VMRGEW MrgFP.ABhToFlt, MrgFP.ABlToFlt))>; 2965 2966// Convert 4 doubles to a vector of ints. 2967def : Pat<(v4i32 (build_vector DblToInt.A, DblToInt.B, 2968 DblToInt.C, DblToInt.D)), 2969 (v4i32 (VMRGEW MrgWords.CVACS, MrgWords.CVBDS))>; 2970def : Pat<(v4i32 (build_vector DblToUInt.A, DblToUInt.B, 2971 DblToUInt.C, DblToUInt.D)), 2972 (v4i32 (VMRGEW MrgWords.CVACU, MrgWords.CVBDU))>; 2973def : Pat<(v4i32 (build_vector ExtDbl.A0S, ExtDbl.A1S, 2974 ExtDbl.B0S, ExtDbl.B1S)), 2975 (v4i32 (VMRGEW MrgWords.CVA0B0S, MrgWords.CVA1B1S))>; 2976def : Pat<(v4i32 (build_vector ExtDbl.A0U, ExtDbl.A1U, 2977 ExtDbl.B0U, ExtDbl.B1U)), 2978 (v4i32 (VMRGEW MrgWords.CVA0B0U, MrgWords.CVA1B1U))>; 2979def : Pat<(v2f64 (build_vector (f64 (fpextend (extractelt v4f32:$A, 0))), 2980 (f64 (fpextend (extractelt v4f32:$A, 1))))), 2981 (v2f64 (XVCVSPDP (XXMRGHW $A, $A)))>; 2982def : Pat<(v2f64 (build_vector (f64 (fpextend (extractelt v4f32:$A, 1))), 2983 (f64 (fpextend (extractelt v4f32:$A, 0))))), 2984 (v2f64 (XXPERMDI (XVCVSPDP (XXMRGHW $A, $A)), 2985 (XVCVSPDP (XXMRGHW $A, $A)), 2))>; 2986def : Pat<(v2f64 (build_vector (f64 (fpextend (extractelt v4f32:$A, 0))), 2987 (f64 (fpextend (extractelt v4f32:$A, 2))))), 2988 (v2f64 (XVCVSPDP $A))>; 2989def : Pat<(v2f64 (build_vector (f64 (fpextend (extractelt v4f32:$A, 1))), 2990 (f64 (fpextend (extractelt v4f32:$A, 3))))), 2991 (v2f64 (XVCVSPDP (XXSLDWI $A, $A, 1)))>; 2992def : Pat<(v2f64 (build_vector (f64 (fpextend (extractelt v4f32:$A, 2))), 2993 (f64 (fpextend (extractelt v4f32:$A, 3))))), 2994 (v2f64 (XVCVSPDP (XXMRGLW $A, $A)))>; 2995def : Pat<(v2f64 (build_vector (f64 (fpextend (extractelt v4f32:$A, 3))), 2996 (f64 (fpextend (extractelt v4f32:$A, 2))))), 2997 (v2f64 (XXPERMDI (XVCVSPDP (XXMRGLW $A, $A)), 2998 (XVCVSPDP (XXMRGLW $A, $A)), 2))>; 2999def : Pat<(v2f64 (build_vector (f64 (fpextend (extractelt v4f32:$A, 0))), 3000 (f64 (fpextend (extractelt v4f32:$B, 0))))), 3001 (v2f64 (XVCVSPDP (XXPERMDI $A, $B, 0)))>; 3002def : Pat<(v2f64 (build_vector (f64 (fpextend (extractelt v4f32:$A, 3))), 3003 (f64 (fpextend (extractelt v4f32:$B, 3))))), 3004 (v2f64 (XVCVSPDP (XXSLDWI (XXPERMDI $A, $B, 3), 3005 (XXPERMDI $A, $B, 3), 1)))>; 3006def : Pat<(v2i64 (fp_to_sint 3007 (build_vector (f64 (fpextend (extractelt v4f32:$A, 0))), 3008 (f64 (fpextend (extractelt v4f32:$A, 2)))))), 3009 (v2i64 (XVCVSPSXDS $A))>; 3010def : Pat<(v2i64 (fp_to_uint 3011 (build_vector (f64 (fpextend (extractelt v4f32:$A, 0))), 3012 (f64 (fpextend (extractelt v4f32:$A, 2)))))), 3013 (v2i64 (XVCVSPUXDS $A))>; 3014def : Pat<(v2i64 (fp_to_sint 3015 (build_vector (f64 (fpextend (extractelt v4f32:$A, 1))), 3016 (f64 (fpextend (extractelt v4f32:$A, 3)))))), 3017 (v2i64 (XVCVSPSXDS (XXSLDWI $A, $A, 1)))>; 3018def : Pat<(v2i64 (fp_to_uint 3019 (build_vector (f64 (fpextend (extractelt v4f32:$A, 1))), 3020 (f64 (fpextend (extractelt v4f32:$A, 3)))))), 3021 (v2i64 (XVCVSPUXDS (XXSLDWI $A, $A, 1)))>; 3022def : Pat<WToDPExtractConv.BV02S, 3023 (v2f64 (XVCVSXWDP $A))>; 3024def : Pat<WToDPExtractConv.BV13S, 3025 (v2f64 (XVCVSXWDP (XXSLDWI $A, $A, 1)))>; 3026def : Pat<WToDPExtractConv.BV02U, 3027 (v2f64 (XVCVUXWDP $A))>; 3028def : Pat<WToDPExtractConv.BV13U, 3029 (v2f64 (XVCVUXWDP (XXSLDWI $A, $A, 1)))>; 3030def : Pat<(v2f64 (insertelt v2f64:$A, f64:$B, 0)), 3031 (v2f64 (XXPERMDI (SUBREG_TO_REG (i64 1), $B, sub_64), $A, 1))>; 3032def : Pat<(v2f64 (insertelt v2f64:$A, f64:$B, 1)), 3033 (v2f64 (XXPERMDI $A, (SUBREG_TO_REG (i64 1), $B, sub_64), 0))>; 3034} // HasVSX, IsBigEndian 3035 3036// Any little endian VSX subtarget. 3037let Predicates = [HasVSX, IsLittleEndian] in { 3038defm : ScalToVecWPermute<v2f64, (f64 f64:$A), 3039 (XXPERMDI (SUBREG_TO_REG (i64 1), $A, sub_64), 3040 (SUBREG_TO_REG (i64 1), $A, sub_64), 0), 3041 (SUBREG_TO_REG (i64 1), $A, sub_64)>; 3042 3043def : Pat<(f64 (extractelt v2f64:$S, 0)), 3044 (f64 (EXTRACT_SUBREG (XXPERMDI $S, $S, 2), sub_64))>; 3045def : Pat<(f64 (extractelt v2f64:$S, 1)), 3046 (f64 (EXTRACT_SUBREG $S, sub_64))>; 3047 3048def : Pat<(v2f64 (PPCld_vec_be ForceXForm:$src)), (LXVD2X ForceXForm:$src)>; 3049def : Pat<(PPCst_vec_be v2f64:$rS, ForceXForm:$dst), (STXVD2X $rS, ForceXForm:$dst)>; 3050def : Pat<(v4f32 (PPCld_vec_be ForceXForm:$src)), (LXVW4X ForceXForm:$src)>; 3051def : Pat<(PPCst_vec_be v4f32:$rS, ForceXForm:$dst), (STXVW4X $rS, ForceXForm:$dst)>; 3052def : Pat<(v2i64 (PPCld_vec_be ForceXForm:$src)), (LXVD2X ForceXForm:$src)>; 3053def : Pat<(PPCst_vec_be v2i64:$rS, ForceXForm:$dst), (STXVD2X $rS, ForceXForm:$dst)>; 3054def : Pat<(v4i32 (PPCld_vec_be ForceXForm:$src)), (LXVW4X ForceXForm:$src)>; 3055def : Pat<(PPCst_vec_be v4i32:$rS, ForceXForm:$dst), (STXVW4X $rS, ForceXForm:$dst)>; 3056def : Pat<(f64 (PPCfcfid (PPCmtvsra (i64 (vector_extract v2i64:$S, 0))))), 3057 (f64 (XSCVSXDDP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>; 3058def : Pat<(f64 (PPCfcfid (PPCmtvsra (i64 (vector_extract v2i64:$S, 1))))), 3059 (f64 (XSCVSXDDP (COPY_TO_REGCLASS (f64 (COPY_TO_REGCLASS $S, VSRC)), VSFRC)))>; 3060def : Pat<(f64 (PPCfcfidu (PPCmtvsra (i64 (vector_extract v2i64:$S, 0))))), 3061 (f64 (XSCVUXDDP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>; 3062def : Pat<(f64 (PPCfcfidu (PPCmtvsra (i64 (vector_extract v2i64:$S, 1))))), 3063 (f64 (XSCVUXDDP (COPY_TO_REGCLASS (f64 (COPY_TO_REGCLASS $S, VSRC)), VSFRC)))>; 3064 3065def : Pat<(f64 (vector_extract v2f64:$S, i64:$Idx)), 3066 (f64 VectorExtractions.LE_VARIABLE_DOUBLE)>; 3067 3068// Little endian, available on all targets with VSX 3069def : Pat<(v2f64 (build_vector f64:$A, f64:$B)), 3070 (v2f64 (XXPERMDI 3071 (SUBREG_TO_REG (i64 1), $B, sub_64), 3072 (SUBREG_TO_REG (i64 1), $A, sub_64), 0))>; 3073// Using VMRGEW to assemble the final vector would be a lower latency 3074// solution. However, we choose to go with the slightly higher latency 3075// XXPERMDI for 2 reasons: 3076// 1. This is likely to occur in unrolled loops where regpressure is high, 3077// so we want to use the latter as it has access to all 64 VSX registers. 3078// 2. Using Altivec instructions in this sequence would likely cause the 3079// allocation of Altivec registers even for the loads which in turn would 3080// force the use of LXSIWZX for the loads, adding a cycle of latency to 3081// each of the loads which would otherwise be able to use LFIWZX. 3082def : Pat<(v4f32 (build_vector LoadFP.A, LoadFP.B, LoadFP.C, LoadFP.D)), 3083 (v4f32 (XXPERMDI (XXMRGHW MrgFP.LD32D, MrgFP.LD32C), 3084 (XXMRGHW MrgFP.LD32B, MrgFP.LD32A), 3))>; 3085def : Pat<(v4f32 (build_vector f32:$D, f32:$C, f32:$B, f32:$A)), 3086 (VMRGEW MrgFP.AC, MrgFP.BD)>; 3087def : Pat<(v4f32 (build_vector DblToFlt.A0, DblToFlt.A1, 3088 DblToFlt.B0, DblToFlt.B1)), 3089 (v4f32 (VMRGEW MrgFP.BAhToFlt, MrgFP.BAlToFlt))>; 3090 3091// Convert 4 doubles to a vector of ints. 3092def : Pat<(v4i32 (build_vector DblToInt.A, DblToInt.B, 3093 DblToInt.C, DblToInt.D)), 3094 (v4i32 (VMRGEW MrgWords.CVDBS, MrgWords.CVCAS))>; 3095def : Pat<(v4i32 (build_vector DblToUInt.A, DblToUInt.B, 3096 DblToUInt.C, DblToUInt.D)), 3097 (v4i32 (VMRGEW MrgWords.CVDBU, MrgWords.CVCAU))>; 3098def : Pat<(v4i32 (build_vector ExtDbl.A0S, ExtDbl.A1S, 3099 ExtDbl.B0S, ExtDbl.B1S)), 3100 (v4i32 (VMRGEW MrgWords.CVB1A1S, MrgWords.CVB0A0S))>; 3101def : Pat<(v4i32 (build_vector ExtDbl.A0U, ExtDbl.A1U, 3102 ExtDbl.B0U, ExtDbl.B1U)), 3103 (v4i32 (VMRGEW MrgWords.CVB1A1U, MrgWords.CVB0A0U))>; 3104def : Pat<(v2f64 (build_vector (f64 (fpextend (extractelt v4f32:$A, 0))), 3105 (f64 (fpextend (extractelt v4f32:$A, 1))))), 3106 (v2f64 (XVCVSPDP (XXMRGLW $A, $A)))>; 3107def : Pat<(v2f64 (build_vector (f64 (fpextend (extractelt v4f32:$A, 1))), 3108 (f64 (fpextend (extractelt v4f32:$A, 0))))), 3109 (v2f64 (XXPERMDI (XVCVSPDP (XXMRGLW $A, $A)), 3110 (XVCVSPDP (XXMRGLW $A, $A)), 2))>; 3111def : Pat<(v2f64 (build_vector (f64 (fpextend (extractelt v4f32:$A, 0))), 3112 (f64 (fpextend (extractelt v4f32:$A, 2))))), 3113 (v2f64 (XVCVSPDP (XXSLDWI $A, $A, 1)))>; 3114def : Pat<(v2f64 (build_vector (f64 (fpextend (extractelt v4f32:$A, 1))), 3115 (f64 (fpextend (extractelt v4f32:$A, 3))))), 3116 (v2f64 (XVCVSPDP $A))>; 3117def : Pat<(v2f64 (build_vector (f64 (fpextend (extractelt v4f32:$A, 2))), 3118 (f64 (fpextend (extractelt v4f32:$A, 3))))), 3119 (v2f64 (XVCVSPDP (XXMRGHW $A, $A)))>; 3120def : Pat<(v2f64 (build_vector (f64 (fpextend (extractelt v4f32:$A, 3))), 3121 (f64 (fpextend (extractelt v4f32:$A, 2))))), 3122 (v2f64 (XXPERMDI (XVCVSPDP (XXMRGHW $A, $A)), 3123 (XVCVSPDP (XXMRGHW $A, $A)), 2))>; 3124def : Pat<(v2f64 (build_vector (f64 (fpextend (extractelt v4f32:$A, 0))), 3125 (f64 (fpextend (extractelt v4f32:$B, 0))))), 3126 (v2f64 (XVCVSPDP (XXSLDWI (XXPERMDI $B, $A, 3), 3127 (XXPERMDI $B, $A, 3), 1)))>; 3128def : Pat<(v2f64 (build_vector (f64 (fpextend (extractelt v4f32:$A, 3))), 3129 (f64 (fpextend (extractelt v4f32:$B, 3))))), 3130 (v2f64 (XVCVSPDP (XXPERMDI $B, $A, 0)))>; 3131def : Pat<(v2i64 (fp_to_sint 3132 (build_vector (f64 (fpextend (extractelt v4f32:$A, 1))), 3133 (f64 (fpextend (extractelt v4f32:$A, 3)))))), 3134 (v2i64 (XVCVSPSXDS $A))>; 3135def : Pat<(v2i64 (fp_to_uint 3136 (build_vector (f64 (fpextend (extractelt v4f32:$A, 1))), 3137 (f64 (fpextend (extractelt v4f32:$A, 3)))))), 3138 (v2i64 (XVCVSPUXDS $A))>; 3139def : Pat<(v2i64 (fp_to_sint 3140 (build_vector (f64 (fpextend (extractelt v4f32:$A, 0))), 3141 (f64 (fpextend (extractelt v4f32:$A, 2)))))), 3142 (v2i64 (XVCVSPSXDS (XXSLDWI $A, $A, 1)))>; 3143def : Pat<(v2i64 (fp_to_uint 3144 (build_vector (f64 (fpextend (extractelt v4f32:$A, 0))), 3145 (f64 (fpextend (extractelt v4f32:$A, 2)))))), 3146 (v2i64 (XVCVSPUXDS (XXSLDWI $A, $A, 1)))>; 3147def : Pat<WToDPExtractConv.BV02S, 3148 (v2f64 (XVCVSXWDP (XXSLDWI $A, $A, 1)))>; 3149def : Pat<WToDPExtractConv.BV13S, 3150 (v2f64 (XVCVSXWDP $A))>; 3151def : Pat<WToDPExtractConv.BV02U, 3152 (v2f64 (XVCVUXWDP (XXSLDWI $A, $A, 1)))>; 3153def : Pat<WToDPExtractConv.BV13U, 3154 (v2f64 (XVCVUXWDP $A))>; 3155def : Pat<(v2f64 (insertelt v2f64:$A, f64:$B, 0)), 3156 (v2f64 (XXPERMDI $A, (SUBREG_TO_REG (i64 1), $B, sub_64), 0))>; 3157def : Pat<(v2f64 (insertelt v2f64:$A, f64:$B, 1)), 3158 (v2f64 (XXPERMDI (SUBREG_TO_REG (i64 1), $B, sub_64), $A, 1))>; 3159} // HasVSX, IsLittleEndian 3160 3161// Any pre-Power9 VSX subtarget. 3162let Predicates = [HasVSX, NoP9Vector] in { 3163def : Pat<(PPCstore_scal_int_from_vsr f64:$src, ForceXForm:$dst, 8), 3164 (STXSDX $src, ForceXForm:$dst)>; 3165def : Pat<(PPCstore_scal_int_from_vsr f128:$src, ForceXForm:$dst, 8), 3166 (STXSDX (COPY_TO_REGCLASS $src, VSFRC), ForceXForm:$dst)>; 3167 3168// Load-and-splat with fp-to-int conversion (using X-Form VSX/FP loads). 3169defm : ScalToVecWPermute< 3170 v4i32, DblToIntLoad.A, 3171 (XXSPLTW (SUBREG_TO_REG (i64 1), (XSCVDPSXWS (XFLOADf64 ForceXForm:$A)), sub_64), 1), 3172 (SUBREG_TO_REG (i64 1), (XSCVDPSXWS (XFLOADf64 ForceXForm:$A)), sub_64)>; 3173defm : ScalToVecWPermute< 3174 v4i32, DblToUIntLoad.A, 3175 (XXSPLTW (SUBREG_TO_REG (i64 1), (XSCVDPUXWS (XFLOADf64 ForceXForm:$A)), sub_64), 1), 3176 (SUBREG_TO_REG (i64 1), (XSCVDPUXWS (XFLOADf64 ForceXForm:$A)), sub_64)>; 3177defm : ScalToVecWPermute< 3178 v2i64, FltToLongLoad.A, 3179 (XXPERMDIs (XSCVDPSXDS (COPY_TO_REGCLASS (XFLOADf32 ForceXForm:$A), VSFRC)), 0), 3180 (SUBREG_TO_REG (i64 1), (XSCVDPSXDS (COPY_TO_REGCLASS (XFLOADf32 ForceXForm:$A), 3181 VSFRC)), sub_64)>; 3182defm : ScalToVecWPermute< 3183 v2i64, FltToULongLoad.A, 3184 (XXPERMDIs (XSCVDPUXDS (COPY_TO_REGCLASS (XFLOADf32 ForceXForm:$A), VSFRC)), 0), 3185 (SUBREG_TO_REG (i64 1), (XSCVDPUXDS (COPY_TO_REGCLASS (XFLOADf32 ForceXForm:$A), 3186 VSFRC)), sub_64)>; 3187} // HasVSX, NoP9Vector 3188 3189// Any little endian pre-Power9 VSX subtarget. 3190let Predicates = [HasVSX, NoP9Vector, IsLittleEndian] in { 3191// Load-and-splat using only X-Form VSX loads. 3192defm : ScalToVecWPermute< 3193 v2i64, (i64 (load ForceXForm:$src)), 3194 (XXPERMDIs (XFLOADf64 ForceXForm:$src), 2), 3195 (SUBREG_TO_REG (i64 1), (XFLOADf64 ForceXForm:$src), sub_64)>; 3196defm : ScalToVecWPermute< 3197 v2f64, (f64 (load ForceXForm:$src)), 3198 (XXPERMDIs (XFLOADf64 ForceXForm:$src), 2), 3199 (SUBREG_TO_REG (i64 1), (XFLOADf64 ForceXForm:$src), sub_64)>; 3200 3201// Splat loads. 3202def : Pat<(v8i16 (PPCldsplatAlign16 ForceXForm:$A)), 3203 (v8i16 (VSPLTH 7, (LVX ForceXForm:$A)))>; 3204def : Pat<(v16i8 (PPCldsplatAlign16 ForceXForm:$A)), 3205 (v16i8 (VSPLTB 15, (LVX ForceXForm:$A)))>; 3206} // HasVSX, NoP9Vector, IsLittleEndian 3207 3208let Predicates = [HasVSX, NoP9Vector, IsBigEndian] in { 3209 def : Pat<(v2f64 (int_ppc_vsx_lxvd2x ForceXForm:$src)), 3210 (LXVD2X ForceXForm:$src)>; 3211 def : Pat<(int_ppc_vsx_stxvd2x v2f64:$rS, ForceXForm:$dst), 3212 (STXVD2X $rS, ForceXForm:$dst)>; 3213 3214 // Splat loads. 3215 def : Pat<(v8i16 (PPCldsplatAlign16 ForceXForm:$A)), 3216 (v8i16 (VSPLTH 0, (LVX ForceXForm:$A)))>; 3217 def : Pat<(v16i8 (PPCldsplatAlign16 ForceXForm:$A)), 3218 (v16i8 (VSPLTB 0, (LVX ForceXForm:$A)))>; 3219} // HasVSX, NoP9Vector, IsBigEndian 3220 3221// Any VSX subtarget that only has loads and stores that load in big endian 3222// order regardless of endianness. This is really pre-Power9 subtargets. 3223let Predicates = [HasVSX, HasOnlySwappingMemOps] in { 3224 def : Pat<(v2f64 (PPClxvd2x ForceXForm:$src)), (LXVD2X ForceXForm:$src)>; 3225 3226 // Stores. 3227 def : Pat<(PPCstxvd2x v2f64:$rS, ForceXForm:$dst), (STXVD2X $rS, ForceXForm:$dst)>; 3228} // HasVSX, HasOnlySwappingMemOps 3229 3230// Big endian VSX subtarget that only has loads and stores that always 3231// load in big endian order. Really big endian pre-Power9 subtargets. 3232let Predicates = [HasVSX, HasOnlySwappingMemOps, IsBigEndian] in { 3233 def : Pat<(v2f64 (load ForceXForm:$src)), (LXVD2X ForceXForm:$src)>; 3234 def : Pat<(v2i64 (load ForceXForm:$src)), (LXVD2X ForceXForm:$src)>; 3235 def : Pat<(v4i32 (load ForceXForm:$src)), (LXVW4X ForceXForm:$src)>; 3236 def : Pat<(v4i32 (int_ppc_vsx_lxvw4x ForceXForm:$src)), (LXVW4X ForceXForm:$src)>; 3237 def : Pat<(store v2f64:$rS, ForceXForm:$dst), (STXVD2X $rS, ForceXForm:$dst)>; 3238 def : Pat<(store v2i64:$rS, ForceXForm:$dst), (STXVD2X $rS, ForceXForm:$dst)>; 3239 def : Pat<(store v4i32:$XT, ForceXForm:$dst), (STXVW4X $XT, ForceXForm:$dst)>; 3240 def : Pat<(int_ppc_vsx_stxvw4x v4i32:$rS, ForceXForm:$dst), 3241 (STXVW4X $rS, ForceXForm:$dst)>; 3242 def : Pat<(v2i64 (scalar_to_vector (i64 (load ForceXForm:$src)))), 3243 (SUBREG_TO_REG (i64 1), (XFLOADf64 ForceXForm:$src), sub_64)>; 3244} // HasVSX, HasOnlySwappingMemOps, IsBigEndian 3245 3246// Target before Power8 with VSX. 3247let Predicates = [HasVSX, NoP8Vector] in { 3248def : Pat<(f32 (fpimm0neg)), 3249 (f32 (COPY_TO_REGCLASS (XSNEGDP (XXLXORdpz)), F4RC))>; 3250 3251def : Pat<(f32 (nzFPImmExactInti5:$A)), 3252 (COPY_TO_REGCLASS (XVCVSXWDP (COPY_TO_REGCLASS 3253 (VSPLTISW (getFPAs5BitExactInt fpimm:$A)), VSRC)), F4RC)>; 3254 3255} // HasVSX, NoP8Vector 3256 3257// Any Power8 VSX subtarget. 3258let Predicates = [HasVSX, HasP8Vector] in { 3259def : Pat<(int_ppc_vsx_xxleqv v4i32:$A, v4i32:$B), 3260 (XXLEQV $A, $B)>; 3261def : Pat<(f64 (extloadf32 XForm:$src)), 3262 (COPY_TO_REGCLASS (XFLOADf32 XForm:$src), VSFRC)>; 3263def : Pat<(f32 (fpround (f64 (extloadf32 ForceXForm:$src)))), 3264 (f32 (XFLOADf32 ForceXForm:$src))>; 3265def : Pat<(f64 (any_fpextend f32:$src)), 3266 (COPY_TO_REGCLASS $src, VSFRC)>; 3267 3268def : Pat<(f32 (fpimm0neg)), 3269 (f32 (COPY_TO_REGCLASS (XSNEGDP (XXLXORdpz)), VSSRC))>; 3270 3271def : Pat<(f32 (nzFPImmExactInti5:$A)), 3272 (COPY_TO_REGCLASS (XVCVSXWDP (COPY_TO_REGCLASS 3273 (VSPLTISW (getFPAs5BitExactInt fpimm:$A)), VSRC)), VSSRC)>; 3274 3275def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETLT)), 3276 (SELECT_VSSRC (CRANDC $lhs, $rhs), $tval, $fval)>; 3277def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETULT)), 3278 (SELECT_VSSRC (CRANDC $rhs, $lhs), $tval, $fval)>; 3279def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETLE)), 3280 (SELECT_VSSRC (CRORC $lhs, $rhs), $tval, $fval)>; 3281def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETULE)), 3282 (SELECT_VSSRC (CRORC $rhs, $lhs), $tval, $fval)>; 3283def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETEQ)), 3284 (SELECT_VSSRC (CREQV $lhs, $rhs), $tval, $fval)>; 3285def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETGE)), 3286 (SELECT_VSSRC (CRORC $rhs, $lhs), $tval, $fval)>; 3287def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETUGE)), 3288 (SELECT_VSSRC (CRORC $lhs, $rhs), $tval, $fval)>; 3289def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETGT)), 3290 (SELECT_VSSRC (CRANDC $rhs, $lhs), $tval, $fval)>; 3291def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETUGT)), 3292 (SELECT_VSSRC (CRANDC $lhs, $rhs), $tval, $fval)>; 3293def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETNE)), 3294 (SELECT_VSSRC (CRXOR $lhs, $rhs), $tval, $fval)>; 3295 3296// Additional fnmsub pattern for PPC specific ISD opcode 3297def : Pat<(PPCfnmsub f32:$A, f32:$B, f32:$C), 3298 (XSNMSUBASP $C, $A, $B)>; 3299def : Pat<(fneg (PPCfnmsub f32:$A, f32:$B, f32:$C)), 3300 (XSMSUBASP $C, $A, $B)>; 3301def : Pat<(PPCfnmsub f32:$A, f32:$B, (fneg f32:$C)), 3302 (XSNMADDASP $C, $A, $B)>; 3303 3304// f32 neg 3305// Although XSNEGDP is available in P7, we want to select it starting from P8, 3306// so that FNMSUBS can be selected for fneg-fmsub pattern on P7. (VSX version, 3307// XSNMSUBASP, is available since P8) 3308def : Pat<(f32 (fneg f32:$S)), 3309 (f32 (COPY_TO_REGCLASS (XSNEGDP 3310 (COPY_TO_REGCLASS $S, VSFRC)), VSSRC))>; 3311 3312// Instructions for converting float to i32 feeding a store. 3313def : Pat<(PPCstore_scal_int_from_vsr f64:$src, ForceXForm:$dst, 4), 3314 (STIWX $src, ForceXForm:$dst)>; 3315def : Pat<(PPCstore_scal_int_from_vsr f128:$src, ForceXForm:$dst, 4), 3316 (STIWX (COPY_TO_REGCLASS $src, VSFRC), ForceXForm:$dst)>; 3317 3318def : Pat<(PPCstore_scal_int_from_vsr f64:$src, ForceXForm:$dst, 4), 3319 (STXSIWX $src, ForceXForm:$dst)>; 3320def : Pat<(PPCstore_scal_int_from_vsr f128:$src, ForceXForm:$dst, 4), 3321 (STXSIWX (COPY_TO_REGCLASS $src, VSFRC), ForceXForm:$dst)>; 3322 3323def : Pat<(v2i64 (smax v2i64:$src1, v2i64:$src2)), 3324 (v2i64 (VMAXSD (COPY_TO_REGCLASS $src1, VRRC), 3325 (COPY_TO_REGCLASS $src2, VRRC)))>; 3326def : Pat<(v2i64 (umax v2i64:$src1, v2i64:$src2)), 3327 (v2i64 (VMAXUD (COPY_TO_REGCLASS $src1, VRRC), 3328 (COPY_TO_REGCLASS $src2, VRRC)))>; 3329def : Pat<(v2i64 (smin v2i64:$src1, v2i64:$src2)), 3330 (v2i64 (VMINSD (COPY_TO_REGCLASS $src1, VRRC), 3331 (COPY_TO_REGCLASS $src2, VRRC)))>; 3332def : Pat<(v2i64 (umin v2i64:$src1, v2i64:$src2)), 3333 (v2i64 (VMINUD (COPY_TO_REGCLASS $src1, VRRC), 3334 (COPY_TO_REGCLASS $src2, VRRC)))>; 3335 3336def : Pat<(v1i128 (bitconvert (v16i8 immAllOnesV))), 3337 (v1i128 (COPY_TO_REGCLASS(XXLEQVOnes), VSRC))>; 3338def : Pat<(v2i64 (bitconvert (v16i8 immAllOnesV))), 3339 (v2i64 (COPY_TO_REGCLASS(XXLEQVOnes), VSRC))>; 3340def : Pat<(v8i16 (bitconvert (v16i8 immAllOnesV))), 3341 (v8i16 (COPY_TO_REGCLASS(XXLEQVOnes), VSRC))>; 3342def : Pat<(v16i8 (bitconvert (v16i8 immAllOnesV))), 3343 (v16i8 (COPY_TO_REGCLASS(XXLEQVOnes), VSRC))>; 3344 3345// XL Compat builtins. 3346def : Pat<(int_ppc_fmsubs f32:$A, f32:$B, f32:$C), (XSMSUBMSP $A, $B, $C)>; 3347def : Pat<(int_ppc_fnmadds f32:$A, f32:$B, f32:$C), (XSNMADDMSP $A, $B, $C)>; 3348def : Pat<(int_ppc_fres f32:$A), (XSRESP $A)>; 3349def : Pat<(i32 (int_ppc_extract_exp f64:$A)), 3350 (EXTRACT_SUBREG (XSXEXPDP (COPY_TO_REGCLASS $A, VSFRC)), sub_32)>; 3351def : Pat<(int_ppc_extract_sig f64:$A), 3352 (XSXSIGDP (COPY_TO_REGCLASS $A, VSFRC))>; 3353def : Pat<(f64 (int_ppc_insert_exp f64:$A, i64:$B)), 3354 (COPY_TO_REGCLASS (XSIEXPDP (COPY_TO_REGCLASS $A, G8RC), $B), F8RC)>; 3355 3356def : Pat<(int_ppc_stfiw ForceXForm:$dst, f64:$XT), 3357 (STXSIWX f64:$XT, ForceXForm:$dst)>; 3358def : Pat<(int_ppc_frsqrtes vssrc:$XB), (XSRSQRTESP $XB)>; 3359} // HasVSX, HasP8Vector 3360 3361// Any big endian Power8 VSX subtarget. 3362let Predicates = [HasVSX, HasP8Vector, IsBigEndian] in { 3363def : Pat<DWToSPExtractConv.El0SS1, 3364 (f32 (XSCVSXDSP (COPY_TO_REGCLASS $S1, VSFRC)))>; 3365def : Pat<DWToSPExtractConv.El1SS1, 3366 (f32 (XSCVSXDSP (COPY_TO_REGCLASS (XXPERMDI $S1, $S1, 2), VSFRC)))>; 3367def : Pat<DWToSPExtractConv.El0US1, 3368 (f32 (XSCVUXDSP (COPY_TO_REGCLASS $S1, VSFRC)))>; 3369def : Pat<DWToSPExtractConv.El1US1, 3370 (f32 (XSCVUXDSP (COPY_TO_REGCLASS (XXPERMDI $S1, $S1, 2), VSFRC)))>; 3371 3372// v4f32 scalar <-> vector conversions (BE) 3373defm : ScalToVecWPermute<v4f32, (f32 f32:$A), (XSCVDPSPN $A), (XSCVDPSPN $A)>; 3374def : Pat<(f32 (vector_extract v4f32:$S, 0)), 3375 (f32 (XSCVSPDPN $S))>; 3376def : Pat<(f32 (vector_extract v4f32:$S, 1)), 3377 (f32 (XSCVSPDPN (XXSLDWI $S, $S, 1)))>; 3378def : Pat<(f32 (vector_extract v4f32:$S, 2)), 3379 (f32 (XSCVSPDPN (XXPERMDI $S, $S, 2)))>; 3380def : Pat<(f32 (vector_extract v4f32:$S, 3)), 3381 (f32 (XSCVSPDPN (XXSLDWI $S, $S, 3)))>; 3382 3383def : Pat<(f32 (PPCfcfids (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 0)))))), 3384 (f32 (XSCVSPDPN (XVCVSXWSP (XXSPLTW $A, 0))))>; 3385def : Pat<(f32 (PPCfcfids (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 1)))))), 3386 (f32 (XSCVSPDPN (XVCVSXWSP (XXSPLTW $A, 1))))>; 3387def : Pat<(f32 (PPCfcfids (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 2)))))), 3388 (f32 (XSCVSPDPN (XVCVSXWSP (XXSPLTW $A, 2))))>; 3389def : Pat<(f32 (PPCfcfids (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 3)))))), 3390 (f32 (XSCVSPDPN (XVCVSXWSP (XXSPLTW $A, 3))))>; 3391def : Pat<(f64 (PPCfcfid (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 0)))))), 3392 (f64 (COPY_TO_REGCLASS (XVCVSXWDP (XXSPLTW $A, 0)), VSFRC))>; 3393def : Pat<(f64 (PPCfcfid (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 1)))))), 3394 (f64 (COPY_TO_REGCLASS (XVCVSXWDP (XXSPLTW $A, 1)), VSFRC))>; 3395def : Pat<(f64 (PPCfcfid (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 2)))))), 3396 (f64 (COPY_TO_REGCLASS (XVCVSXWDP (XXSPLTW $A, 2)), VSFRC))>; 3397def : Pat<(f64 (PPCfcfid (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 3)))))), 3398 (f64 (COPY_TO_REGCLASS (XVCVSXWDP (XXSPLTW $A, 3)), VSFRC))>; 3399 3400def : Pat<(f32 (vector_extract v4f32:$S, i32:$Idx)), 3401 (f32 VectorExtractions.BE_32B_VARIABLE_FLOAT)>; 3402 3403def : Pat<(f64 (vector_extract v2f64:$S, i32:$Idx)), 3404 (f64 VectorExtractions.BE_32B_VARIABLE_DOUBLE)>; 3405 3406defm : ScalToVecWPermute< 3407 v4i32, (i32 (load ForceXForm:$src)), 3408 (XXSLDWIs (LIWZX ForceXForm:$src), 1), 3409 (SUBREG_TO_REG (i64 1), (LIWZX ForceXForm:$src), sub_64)>; 3410defm : ScalToVecWPermute< 3411 v4f32, (f32 (load ForceXForm:$src)), 3412 (XXSLDWIs (LIWZX ForceXForm:$src), 1), 3413 (SUBREG_TO_REG (i64 1), (LIWZX ForceXForm:$src), sub_64)>; 3414} // HasVSX, HasP8Vector, IsBigEndian 3415 3416// Big endian Power8 64Bit VSX subtarget. 3417let Predicates = [HasVSX, HasP8Vector, IsBigEndian, IsPPC64] in { 3418def : Pat<(f32 (vector_extract v4f32:$S, i64:$Idx)), 3419 (f32 VectorExtractions.BE_VARIABLE_FLOAT)>; 3420 3421// LIWAX - This instruction is used for sign extending i32 -> i64. 3422// LIWZX - This instruction will be emitted for i32, f32, and when 3423// zero-extending i32 to i64 (zext i32 -> i64). 3424def : Pat<(v2i64 (scalar_to_vector (i64 (sextloadi32 ForceXForm:$src)))), 3425 (v2i64 (SUBREG_TO_REG (i64 1), (LIWAX ForceXForm:$src), sub_64))>; 3426def : Pat<(v2i64 (scalar_to_vector (i64 (zextloadi32 ForceXForm:$src)))), 3427 (v2i64 (SUBREG_TO_REG (i64 1), (LIWZX ForceXForm:$src), sub_64))>; 3428 3429def : Pat<DWToSPExtractConv.BVU, 3430 (v4f32 (VPKUDUM (XXSLDWI (XVCVUXDSP $S1), (XVCVUXDSP $S1), 3), 3431 (XXSLDWI (XVCVUXDSP $S2), (XVCVUXDSP $S2), 3)))>; 3432def : Pat<DWToSPExtractConv.BVS, 3433 (v4f32 (VPKUDUM (XXSLDWI (XVCVSXDSP $S1), (XVCVSXDSP $S1), 3), 3434 (XXSLDWI (XVCVSXDSP $S2), (XVCVSXDSP $S2), 3)))>; 3435def : Pat<(store (i32 (extractelt v4i32:$A, 1)), ForceXForm:$src), 3436 (STIWX (EXTRACT_SUBREG $A, sub_64), ForceXForm:$src)>; 3437def : Pat<(store (f32 (extractelt v4f32:$A, 1)), ForceXForm:$src), 3438 (STIWX (EXTRACT_SUBREG $A, sub_64), ForceXForm:$src)>; 3439 3440// Elements in a register on a BE system are in order <0, 1, 2, 3>. 3441// The store instructions store the second word from the left. 3442// So to align element zero, we need to modulo-left-shift by 3 words. 3443// Similar logic applies for elements 2 and 3. 3444foreach Idx = [ [0,3], [2,1], [3,2] ] in { 3445 def : Pat<(store (i32 (extractelt v4i32:$A, !head(Idx))), ForceXForm:$src), 3446 (STIWX (EXTRACT_SUBREG (XXSLDWI $A, $A, !head(!tail(Idx))), 3447 sub_64), ForceXForm:$src)>; 3448 def : Pat<(store (f32 (extractelt v4f32:$A, !head(Idx))), ForceXForm:$src), 3449 (STIWX (EXTRACT_SUBREG (XXSLDWI $A, $A, !head(!tail(Idx))), 3450 sub_64), ForceXForm:$src)>; 3451} 3452} // HasVSX, HasP8Vector, IsBigEndian, IsPPC64 3453 3454// Little endian Power8 VSX subtarget. 3455let Predicates = [HasVSX, HasP8Vector, IsLittleEndian] in { 3456def : Pat<DWToSPExtractConv.El0SS1, 3457 (f32 (XSCVSXDSP (COPY_TO_REGCLASS (XXPERMDI $S1, $S1, 2), VSFRC)))>; 3458def : Pat<DWToSPExtractConv.El1SS1, 3459 (f32 (XSCVSXDSP (COPY_TO_REGCLASS 3460 (f64 (COPY_TO_REGCLASS $S1, VSRC)), VSFRC)))>; 3461def : Pat<DWToSPExtractConv.El0US1, 3462 (f32 (XSCVUXDSP (COPY_TO_REGCLASS (XXPERMDI $S1, $S1, 2), VSFRC)))>; 3463def : Pat<DWToSPExtractConv.El1US1, 3464 (f32 (XSCVUXDSP (COPY_TO_REGCLASS 3465 (f64 (COPY_TO_REGCLASS $S1, VSRC)), VSFRC)))>; 3466 3467// v4f32 scalar <-> vector conversions (LE) 3468 defm : ScalToVecWPermute<v4f32, (f32 f32:$A), 3469 (XXSLDWI (XSCVDPSPN $A), (XSCVDPSPN $A), 1), 3470 (XSCVDPSPN $A)>; 3471def : Pat<(f32 (vector_extract v4f32:$S, 0)), 3472 (f32 (XSCVSPDPN (XXSLDWI $S, $S, 3)))>; 3473def : Pat<(f32 (vector_extract v4f32:$S, 1)), 3474 (f32 (XSCVSPDPN (XXPERMDI $S, $S, 2)))>; 3475def : Pat<(f32 (vector_extract v4f32:$S, 2)), 3476 (f32 (XSCVSPDPN (XXSLDWI $S, $S, 1)))>; 3477def : Pat<(f32 (vector_extract v4f32:$S, 3)), 3478 (f32 (XSCVSPDPN $S))>; 3479def : Pat<(f32 (vector_extract v4f32:$S, i64:$Idx)), 3480 (f32 VectorExtractions.LE_VARIABLE_FLOAT)>; 3481 3482def : Pat<(f32 (PPCfcfids (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 0)))))), 3483 (f32 (XSCVSPDPN (XVCVSXWSP (XXSPLTW $A, 3))))>; 3484def : Pat<(f32 (PPCfcfids (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 1)))))), 3485 (f32 (XSCVSPDPN (XVCVSXWSP (XXSPLTW $A, 2))))>; 3486def : Pat<(f32 (PPCfcfids (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 2)))))), 3487 (f32 (XSCVSPDPN (XVCVSXWSP (XXSPLTW $A, 1))))>; 3488def : Pat<(f32 (PPCfcfids (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 3)))))), 3489 (f32 (XSCVSPDPN (XVCVSXWSP (XXSPLTW $A, 0))))>; 3490def : Pat<(f64 (PPCfcfid (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 0)))))), 3491 (f64 (COPY_TO_REGCLASS (XVCVSXWDP (XXSPLTW $A, 3)), VSFRC))>; 3492def : Pat<(f64 (PPCfcfid (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 1)))))), 3493 (f64 (COPY_TO_REGCLASS (XVCVSXWDP (XXSPLTW $A, 2)), VSFRC))>; 3494def : Pat<(f64 (PPCfcfid (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 2)))))), 3495 (f64 (COPY_TO_REGCLASS (XVCVSXWDP (XXSPLTW $A, 1)), VSFRC))>; 3496def : Pat<(f64 (PPCfcfid (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 3)))))), 3497 (f64 (COPY_TO_REGCLASS (XVCVSXWDP (XXSPLTW $A, 0)), VSFRC))>; 3498 3499// LIWAX - This instruction is used for sign extending i32 -> i64. 3500// LIWZX - This instruction will be emitted for i32, f32, and when 3501// zero-extending i32 to i64 (zext i32 -> i64). 3502defm : ScalToVecWPermute< 3503 v2i64, (i64 (sextloadi32 ForceXForm:$src)), 3504 (XXPERMDIs (LIWAX ForceXForm:$src), 2), 3505 (SUBREG_TO_REG (i64 1), (LIWAX ForceXForm:$src), sub_64)>; 3506 3507defm : ScalToVecWPermute< 3508 v2i64, (i64 (zextloadi32 ForceXForm:$src)), 3509 (XXPERMDIs (LIWZX ForceXForm:$src), 2), 3510 (SUBREG_TO_REG (i64 1), (LIWZX ForceXForm:$src), sub_64)>; 3511 3512defm : ScalToVecWPermute< 3513 v4i32, (i32 (load ForceXForm:$src)), 3514 (XXPERMDIs (LIWZX ForceXForm:$src), 2), 3515 (SUBREG_TO_REG (i64 1), (LIWZX ForceXForm:$src), sub_64)>; 3516 3517defm : ScalToVecWPermute< 3518 v4f32, (f32 (load ForceXForm:$src)), 3519 (XXPERMDIs (LIWZX ForceXForm:$src), 2), 3520 (SUBREG_TO_REG (i64 1), (LIWZX ForceXForm:$src), sub_64)>; 3521 3522def : Pat<DWToSPExtractConv.BVU, 3523 (v4f32 (VPKUDUM (XXSLDWI (XVCVUXDSP $S2), (XVCVUXDSP $S2), 3), 3524 (XXSLDWI (XVCVUXDSP $S1), (XVCVUXDSP $S1), 3)))>; 3525def : Pat<DWToSPExtractConv.BVS, 3526 (v4f32 (VPKUDUM (XXSLDWI (XVCVSXDSP $S2), (XVCVSXDSP $S2), 3), 3527 (XXSLDWI (XVCVSXDSP $S1), (XVCVSXDSP $S1), 3)))>; 3528def : Pat<(store (i32 (extractelt v4i32:$A, 2)), ForceXForm:$src), 3529 (STIWX (EXTRACT_SUBREG $A, sub_64), ForceXForm:$src)>; 3530def : Pat<(store (f32 (extractelt v4f32:$A, 2)), ForceXForm:$src), 3531 (STIWX (EXTRACT_SUBREG $A, sub_64), ForceXForm:$src)>; 3532 3533// Elements in a register on a LE system are in order <3, 2, 1, 0>. 3534// The store instructions store the second word from the left. 3535// So to align element 3, we need to modulo-left-shift by 3 words. 3536// Similar logic applies for elements 0 and 1. 3537foreach Idx = [ [0,2], [1,1], [3,3] ] in { 3538 def : Pat<(store (i32 (extractelt v4i32:$A, !head(Idx))), ForceXForm:$src), 3539 (STIWX (EXTRACT_SUBREG (XXSLDWI $A, $A, !head(!tail(Idx))), 3540 sub_64), ForceXForm:$src)>; 3541 def : Pat<(store (f32 (extractelt v4f32:$A, !head(Idx))), ForceXForm:$src), 3542 (STIWX (EXTRACT_SUBREG (XXSLDWI $A, $A, !head(!tail(Idx))), 3543 sub_64), ForceXForm:$src)>; 3544} 3545} // HasVSX, HasP8Vector, IsLittleEndian 3546 3547// Big endian pre-Power9 VSX subtarget. 3548let Predicates = [HasVSX, HasP8Vector, NoP9Vector, IsBigEndian, IsPPC64] in { 3549def : Pat<(store (i64 (extractelt v2i64:$A, 0)), ForceXForm:$src), 3550 (XFSTOREf64 (EXTRACT_SUBREG $A, sub_64), ForceXForm:$src)>; 3551def : Pat<(store (f64 (extractelt v2f64:$A, 0)), ForceXForm:$src), 3552 (XFSTOREf64 (EXTRACT_SUBREG $A, sub_64), ForceXForm:$src)>; 3553def : Pat<(store (i64 (extractelt v2i64:$A, 1)), ForceXForm:$src), 3554 (XFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2), sub_64), 3555 ForceXForm:$src)>; 3556def : Pat<(store (f64 (extractelt v2f64:$A, 1)), ForceXForm:$src), 3557 (XFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2), sub_64), 3558 ForceXForm:$src)>; 3559} // HasVSX, HasP8Vector, NoP9Vector, IsBigEndian, IsPPC64 3560 3561// Little endian pre-Power9 VSX subtarget. 3562let Predicates = [HasVSX, HasP8Vector, NoP9Vector, IsLittleEndian] in { 3563def : Pat<(store (i64 (extractelt v2i64:$A, 0)), ForceXForm:$src), 3564 (XFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2), sub_64), 3565 ForceXForm:$src)>; 3566def : Pat<(store (f64 (extractelt v2f64:$A, 0)), ForceXForm:$src), 3567 (XFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2), sub_64), 3568 ForceXForm:$src)>; 3569def : Pat<(store (i64 (extractelt v2i64:$A, 1)), ForceXForm:$src), 3570 (XFSTOREf64 (EXTRACT_SUBREG $A, sub_64), ForceXForm:$src)>; 3571def : Pat<(store (f64 (extractelt v2f64:$A, 1)), ForceXForm:$src), 3572 (XFSTOREf64 (EXTRACT_SUBREG $A, sub_64), ForceXForm:$src)>; 3573} // HasVSX, HasP8Vector, NoP9Vector, IsLittleEndian 3574 3575// Any VSX target with direct moves. 3576let Predicates = [HasVSX, HasDirectMove] in { 3577// bitconvert f32 -> i32 3578// (convert to 32-bit fp single, shift right 1 word, move to GPR) 3579def : Pat<(i32 (bitconvert f32:$A)), Bitcast.FltToInt>; 3580 3581// bitconvert i32 -> f32 3582// (move to FPR, shift left 1 word, convert to 64-bit fp single) 3583def : Pat<(f32 (bitconvert i32:$A)), 3584 (f32 (XSCVSPDPN 3585 (XXSLDWI MovesToVSR.LE_WORD_1, MovesToVSR.LE_WORD_1, 1)))>; 3586 3587// bitconvert f64 -> i64 3588// (move to GPR, nothing else needed) 3589def : Pat<(i64 (bitconvert f64:$A)), Bitcast.DblToLong>; 3590 3591// bitconvert i64 -> f64 3592// (move to FPR, nothing else needed) 3593def : Pat<(f64 (bitconvert i64:$S)), 3594 (f64 (MTVSRD $S))>; 3595 3596// Rounding to integer. 3597def : Pat<(i64 (lrint f64:$S)), 3598 (i64 (MFVSRD (FCTID $S)))>; 3599def : Pat<(i64 (lrint f32:$S)), 3600 (i64 (MFVSRD (FCTID (COPY_TO_REGCLASS $S, F8RC))))>; 3601def : Pat<(i64 (llrint f64:$S)), 3602 (i64 (MFVSRD (FCTID $S)))>; 3603def : Pat<(i64 (llrint f32:$S)), 3604 (i64 (MFVSRD (FCTID (COPY_TO_REGCLASS $S, F8RC))))>; 3605def : Pat<(i64 (lround f64:$S)), 3606 (i64 (MFVSRD (FCTID (XSRDPI $S))))>; 3607def : Pat<(i64 (lround f32:$S)), 3608 (i64 (MFVSRD (FCTID (XSRDPI (COPY_TO_REGCLASS $S, VSFRC)))))>; 3609def : Pat<(i64 (llround f64:$S)), 3610 (i64 (MFVSRD (FCTID (XSRDPI $S))))>; 3611def : Pat<(i64 (llround f32:$S)), 3612 (i64 (MFVSRD (FCTID (XSRDPI (COPY_TO_REGCLASS $S, VSFRC)))))>; 3613 3614// Alternate patterns for PPCmtvsrz where the output is v8i16 or v16i8 instead 3615// of f64 3616def : Pat<(v8i16 (PPCmtvsrz i32:$A)), 3617 (v8i16 (SUBREG_TO_REG (i64 1), (MTVSRWZ $A), sub_64))>; 3618def : Pat<(v16i8 (PPCmtvsrz i32:$A)), 3619 (v16i8 (SUBREG_TO_REG (i64 1), (MTVSRWZ $A), sub_64))>; 3620 3621// Endianness-neutral constant splat on P8 and newer targets. The reason 3622// for this pattern is that on targets with direct moves, we don't expand 3623// BUILD_VECTOR nodes for v4i32. 3624def : Pat<(v4i32 (build_vector immSExt5NonZero:$A, immSExt5NonZero:$A, 3625 immSExt5NonZero:$A, immSExt5NonZero:$A)), 3626 (v4i32 (VSPLTISW imm:$A))>; 3627 3628// Splat loads. 3629def : Pat<(v8i16 (PPCldsplat ForceXForm:$A)), 3630 (v8i16 (VSPLTHs 3, (MTVSRWZ (LHZX ForceXForm:$A))))>; 3631def : Pat<(v16i8 (PPCldsplat ForceXForm:$A)), 3632 (v16i8 (VSPLTBs 7, (MTVSRWZ (LBZX ForceXForm:$A))))>; 3633} // HasVSX, HasDirectMove 3634 3635// Big endian VSX subtarget with direct moves. 3636let Predicates = [HasVSX, HasDirectMove, IsBigEndian] in { 3637// v16i8 scalar <-> vector conversions (BE) 3638defm : ScalToVecWPermute< 3639 v16i8, (i32 i32:$A), 3640 (SUBREG_TO_REG (i64 1), MovesToVSR.BE_BYTE_0, sub_64), 3641 (SUBREG_TO_REG (i64 1), (MTVSRWZ $A), sub_64)>; 3642defm : ScalToVecWPermute< 3643 v8i16, (i32 i32:$A), 3644 (SUBREG_TO_REG (i64 1), MovesToVSR.BE_HALF_0, sub_64), 3645 (SUBREG_TO_REG (i64 1), (MTVSRWZ $A), sub_64)>; 3646defm : ScalToVecWPermute< 3647 v4i32, (i32 i32:$A), 3648 (SUBREG_TO_REG (i64 1), MovesToVSR.BE_WORD_0, sub_64), 3649 (SUBREG_TO_REG (i64 1), (MTVSRWZ $A), sub_64)>; 3650def : Pat<(v2i64 (scalar_to_vector i64:$A)), 3651 (v2i64 (SUBREG_TO_REG (i64 1), MovesToVSR.BE_DWORD_0, sub_64))>; 3652 3653// v2i64 scalar <-> vector conversions (BE) 3654def : Pat<(i64 (vector_extract v2i64:$S, 0)), 3655 (i64 VectorExtractions.LE_DWORD_1)>; 3656def : Pat<(i64 (vector_extract v2i64:$S, 1)), 3657 (i64 VectorExtractions.LE_DWORD_0)>; 3658def : Pat<(i64 (vector_extract v2i64:$S, i64:$Idx)), 3659 (i64 VectorExtractions.BE_VARIABLE_DWORD)>; 3660} // HasVSX, HasDirectMove, IsBigEndian 3661 3662// Little endian VSX subtarget with direct moves. 3663let Predicates = [HasVSX, HasDirectMove, IsLittleEndian] in { 3664 // v16i8 scalar <-> vector conversions (LE) 3665 defm : ScalToVecWPermute<v16i8, (i32 i32:$A), 3666 (COPY_TO_REGCLASS MovesToVSR.LE_WORD_0, VSRC), 3667 (COPY_TO_REGCLASS MovesToVSR.LE_WORD_1, VSRC)>; 3668 defm : ScalToVecWPermute<v8i16, (i32 i32:$A), 3669 (COPY_TO_REGCLASS MovesToVSR.LE_WORD_0, VSRC), 3670 (COPY_TO_REGCLASS MovesToVSR.LE_WORD_1, VSRC)>; 3671 defm : ScalToVecWPermute<v4i32, (i32 i32:$A), MovesToVSR.LE_WORD_0, 3672 (SUBREG_TO_REG (i64 1), (MTVSRWZ $A), sub_64)>; 3673 defm : ScalToVecWPermute<v2i64, (i64 i64:$A), MovesToVSR.LE_DWORD_0, 3674 MovesToVSR.LE_DWORD_1>; 3675 3676 // v2i64 scalar <-> vector conversions (LE) 3677 def : Pat<(i64 (vector_extract v2i64:$S, 0)), 3678 (i64 VectorExtractions.LE_DWORD_0)>; 3679 def : Pat<(i64 (vector_extract v2i64:$S, 1)), 3680 (i64 VectorExtractions.LE_DWORD_1)>; 3681 def : Pat<(i64 (vector_extract v2i64:$S, i64:$Idx)), 3682 (i64 VectorExtractions.LE_VARIABLE_DWORD)>; 3683} // HasVSX, HasDirectMove, IsLittleEndian 3684 3685// Big endian pre-P9 VSX subtarget with direct moves. 3686let Predicates = [HasVSX, HasDirectMove, NoP9Altivec, IsBigEndian] in { 3687def : Pat<(i32 (vector_extract v16i8:$S, 0)), 3688 (i32 VectorExtractions.LE_BYTE_15)>; 3689def : Pat<(i32 (vector_extract v16i8:$S, 1)), 3690 (i32 VectorExtractions.LE_BYTE_14)>; 3691def : Pat<(i32 (vector_extract v16i8:$S, 2)), 3692 (i32 VectorExtractions.LE_BYTE_13)>; 3693def : Pat<(i32 (vector_extract v16i8:$S, 3)), 3694 (i32 VectorExtractions.LE_BYTE_12)>; 3695def : Pat<(i32 (vector_extract v16i8:$S, 4)), 3696 (i32 VectorExtractions.LE_BYTE_11)>; 3697def : Pat<(i32 (vector_extract v16i8:$S, 5)), 3698 (i32 VectorExtractions.LE_BYTE_10)>; 3699def : Pat<(i32 (vector_extract v16i8:$S, 6)), 3700 (i32 VectorExtractions.LE_BYTE_9)>; 3701def : Pat<(i32 (vector_extract v16i8:$S, 7)), 3702 (i32 VectorExtractions.LE_BYTE_8)>; 3703def : Pat<(i32 (vector_extract v16i8:$S, 8)), 3704 (i32 VectorExtractions.LE_BYTE_7)>; 3705def : Pat<(i32 (vector_extract v16i8:$S, 9)), 3706 (i32 VectorExtractions.LE_BYTE_6)>; 3707def : Pat<(i32 (vector_extract v16i8:$S, 10)), 3708 (i32 VectorExtractions.LE_BYTE_5)>; 3709def : Pat<(i32 (vector_extract v16i8:$S, 11)), 3710 (i32 VectorExtractions.LE_BYTE_4)>; 3711def : Pat<(i32 (vector_extract v16i8:$S, 12)), 3712 (i32 VectorExtractions.LE_BYTE_3)>; 3713def : Pat<(i32 (vector_extract v16i8:$S, 13)), 3714 (i32 VectorExtractions.LE_BYTE_2)>; 3715def : Pat<(i32 (vector_extract v16i8:$S, 14)), 3716 (i32 VectorExtractions.LE_BYTE_1)>; 3717def : Pat<(i32 (vector_extract v16i8:$S, 15)), 3718 (i32 VectorExtractions.LE_BYTE_0)>; 3719def : Pat<(i32 (vector_extract v16i8:$S, i64:$Idx)), 3720 (i32 VectorExtractions.BE_VARIABLE_BYTE)>; 3721 3722// v8i16 scalar <-> vector conversions (BE) 3723def : Pat<(i32 (vector_extract v8i16:$S, 0)), 3724 (i32 VectorExtractions.LE_HALF_7)>; 3725def : Pat<(i32 (vector_extract v8i16:$S, 1)), 3726 (i32 VectorExtractions.LE_HALF_6)>; 3727def : Pat<(i32 (vector_extract v8i16:$S, 2)), 3728 (i32 VectorExtractions.LE_HALF_5)>; 3729def : Pat<(i32 (vector_extract v8i16:$S, 3)), 3730 (i32 VectorExtractions.LE_HALF_4)>; 3731def : Pat<(i32 (vector_extract v8i16:$S, 4)), 3732 (i32 VectorExtractions.LE_HALF_3)>; 3733def : Pat<(i32 (vector_extract v8i16:$S, 5)), 3734 (i32 VectorExtractions.LE_HALF_2)>; 3735def : Pat<(i32 (vector_extract v8i16:$S, 6)), 3736 (i32 VectorExtractions.LE_HALF_1)>; 3737def : Pat<(i32 (vector_extract v8i16:$S, 7)), 3738 (i32 VectorExtractions.LE_HALF_0)>; 3739def : Pat<(i32 (vector_extract v8i16:$S, i64:$Idx)), 3740 (i32 VectorExtractions.BE_VARIABLE_HALF)>; 3741 3742// v4i32 scalar <-> vector conversions (BE) 3743def : Pat<(i32 (vector_extract v4i32:$S, 0)), 3744 (i32 VectorExtractions.LE_WORD_3)>; 3745def : Pat<(i32 (vector_extract v4i32:$S, 1)), 3746 (i32 VectorExtractions.LE_WORD_2)>; 3747def : Pat<(i32 (vector_extract v4i32:$S, 2)), 3748 (i32 VectorExtractions.LE_WORD_1)>; 3749def : Pat<(i32 (vector_extract v4i32:$S, 3)), 3750 (i32 VectorExtractions.LE_WORD_0)>; 3751def : Pat<(i32 (vector_extract v4i32:$S, i64:$Idx)), 3752 (i32 VectorExtractions.BE_VARIABLE_WORD)>; 3753} // HasVSX, HasDirectMove, NoP9Altivec, IsBigEndian 3754 3755// Little endian pre-P9 VSX subtarget with direct moves. 3756let Predicates = [HasVSX, HasDirectMove, NoP9Altivec, IsLittleEndian] in { 3757def : Pat<(i32 (vector_extract v16i8:$S, 0)), 3758 (i32 VectorExtractions.LE_BYTE_0)>; 3759def : Pat<(i32 (vector_extract v16i8:$S, 1)), 3760 (i32 VectorExtractions.LE_BYTE_1)>; 3761def : Pat<(i32 (vector_extract v16i8:$S, 2)), 3762 (i32 VectorExtractions.LE_BYTE_2)>; 3763def : Pat<(i32 (vector_extract v16i8:$S, 3)), 3764 (i32 VectorExtractions.LE_BYTE_3)>; 3765def : Pat<(i32 (vector_extract v16i8:$S, 4)), 3766 (i32 VectorExtractions.LE_BYTE_4)>; 3767def : Pat<(i32 (vector_extract v16i8:$S, 5)), 3768 (i32 VectorExtractions.LE_BYTE_5)>; 3769def : Pat<(i32 (vector_extract v16i8:$S, 6)), 3770 (i32 VectorExtractions.LE_BYTE_6)>; 3771def : Pat<(i32 (vector_extract v16i8:$S, 7)), 3772 (i32 VectorExtractions.LE_BYTE_7)>; 3773def : Pat<(i32 (vector_extract v16i8:$S, 8)), 3774 (i32 VectorExtractions.LE_BYTE_8)>; 3775def : Pat<(i32 (vector_extract v16i8:$S, 9)), 3776 (i32 VectorExtractions.LE_BYTE_9)>; 3777def : Pat<(i32 (vector_extract v16i8:$S, 10)), 3778 (i32 VectorExtractions.LE_BYTE_10)>; 3779def : Pat<(i32 (vector_extract v16i8:$S, 11)), 3780 (i32 VectorExtractions.LE_BYTE_11)>; 3781def : Pat<(i32 (vector_extract v16i8:$S, 12)), 3782 (i32 VectorExtractions.LE_BYTE_12)>; 3783def : Pat<(i32 (vector_extract v16i8:$S, 13)), 3784 (i32 VectorExtractions.LE_BYTE_13)>; 3785def : Pat<(i32 (vector_extract v16i8:$S, 14)), 3786 (i32 VectorExtractions.LE_BYTE_14)>; 3787def : Pat<(i32 (vector_extract v16i8:$S, 15)), 3788 (i32 VectorExtractions.LE_BYTE_15)>; 3789def : Pat<(i32 (vector_extract v16i8:$S, i64:$Idx)), 3790 (i32 VectorExtractions.LE_VARIABLE_BYTE)>; 3791 3792// v8i16 scalar <-> vector conversions (LE) 3793def : Pat<(i32 (vector_extract v8i16:$S, 0)), 3794 (i32 VectorExtractions.LE_HALF_0)>; 3795def : Pat<(i32 (vector_extract v8i16:$S, 1)), 3796 (i32 VectorExtractions.LE_HALF_1)>; 3797def : Pat<(i32 (vector_extract v8i16:$S, 2)), 3798 (i32 VectorExtractions.LE_HALF_2)>; 3799def : Pat<(i32 (vector_extract v8i16:$S, 3)), 3800 (i32 VectorExtractions.LE_HALF_3)>; 3801def : Pat<(i32 (vector_extract v8i16:$S, 4)), 3802 (i32 VectorExtractions.LE_HALF_4)>; 3803def : Pat<(i32 (vector_extract v8i16:$S, 5)), 3804 (i32 VectorExtractions.LE_HALF_5)>; 3805def : Pat<(i32 (vector_extract v8i16:$S, 6)), 3806 (i32 VectorExtractions.LE_HALF_6)>; 3807def : Pat<(i32 (vector_extract v8i16:$S, 7)), 3808 (i32 VectorExtractions.LE_HALF_7)>; 3809def : Pat<(i32 (vector_extract v8i16:$S, i64:$Idx)), 3810 (i32 VectorExtractions.LE_VARIABLE_HALF)>; 3811 3812// v4i32 scalar <-> vector conversions (LE) 3813def : Pat<(i32 (vector_extract v4i32:$S, 0)), 3814 (i32 VectorExtractions.LE_WORD_0)>; 3815def : Pat<(i32 (vector_extract v4i32:$S, 1)), 3816 (i32 VectorExtractions.LE_WORD_1)>; 3817def : Pat<(i32 (vector_extract v4i32:$S, 2)), 3818 (i32 VectorExtractions.LE_WORD_2)>; 3819def : Pat<(i32 (vector_extract v4i32:$S, 3)), 3820 (i32 VectorExtractions.LE_WORD_3)>; 3821def : Pat<(i32 (vector_extract v4i32:$S, i64:$Idx)), 3822 (i32 VectorExtractions.LE_VARIABLE_WORD)>; 3823} // HasVSX, HasDirectMove, NoP9Altivec, IsLittleEndian 3824 3825// Big endian pre-Power9 64Bit VSX subtarget that has direct moves. 3826let Predicates = [HasVSX, HasDirectMove, NoP9Vector, IsBigEndian, IsPPC64] in { 3827// Big endian integer vectors using direct moves. 3828def : Pat<(v2i64 (build_vector i64:$A, i64:$B)), 3829 (v2i64 (XXPERMDI 3830 (SUBREG_TO_REG (i64 1), (MTVSRD $A), sub_64), 3831 (SUBREG_TO_REG (i64 1), (MTVSRD $B), sub_64), 0))>; 3832def : Pat<(v4i32 (build_vector i32:$A, i32:$B, i32:$C, i32:$D)), 3833 (XXPERMDI 3834 (SUBREG_TO_REG (i64 1), 3835 (MTVSRD (RLDIMI AnyExts.B, AnyExts.A, 32, 0)), sub_64), 3836 (SUBREG_TO_REG (i64 1), 3837 (MTVSRD (RLDIMI AnyExts.D, AnyExts.C, 32, 0)), sub_64), 0)>; 3838def : Pat<(v4i32 (build_vector i32:$A, i32:$A, i32:$A, i32:$A)), 3839 (XXSPLTW (SUBREG_TO_REG (i64 1), (MTVSRWZ $A), sub_64), 1)>; 3840} // HasVSX, HasDirectMove, NoP9Vector, IsBigEndian, IsPPC64 3841 3842// Little endian pre-Power9 VSX subtarget that has direct moves. 3843let Predicates = [HasVSX, HasDirectMove, NoP9Vector, IsLittleEndian] in { 3844// Little endian integer vectors using direct moves. 3845def : Pat<(v2i64 (build_vector i64:$A, i64:$B)), 3846 (v2i64 (XXPERMDI 3847 (SUBREG_TO_REG (i64 1), (MTVSRD $B), sub_64), 3848 (SUBREG_TO_REG (i64 1), (MTVSRD $A), sub_64), 0))>; 3849def : Pat<(v4i32 (build_vector i32:$A, i32:$B, i32:$C, i32:$D)), 3850 (XXPERMDI 3851 (SUBREG_TO_REG (i64 1), 3852 (MTVSRD (RLDIMI AnyExts.C, AnyExts.D, 32, 0)), sub_64), 3853 (SUBREG_TO_REG (i64 1), 3854 (MTVSRD (RLDIMI AnyExts.A, AnyExts.B, 32, 0)), sub_64), 0)>; 3855def : Pat<(v4i32 (build_vector i32:$A, i32:$A, i32:$A, i32:$A)), 3856 (XXSPLTW (SUBREG_TO_REG (i64 1), (MTVSRWZ $A), sub_64), 1)>; 3857} 3858 3859// Any Power9 VSX subtarget. 3860let Predicates = [HasVSX, HasP9Vector] in { 3861// Additional fnmsub pattern for PPC specific ISD opcode 3862def : Pat<(PPCfnmsub f128:$A, f128:$B, f128:$C), 3863 (XSNMSUBQP $C, $A, $B)>; 3864def : Pat<(fneg (PPCfnmsub f128:$A, f128:$B, f128:$C)), 3865 (XSMSUBQP $C, $A, $B)>; 3866def : Pat<(PPCfnmsub f128:$A, f128:$B, (fneg f128:$C)), 3867 (XSNMADDQP $C, $A, $B)>; 3868 3869def : Pat<(f128 (any_sint_to_fp i64:$src)), 3870 (f128 (XSCVSDQP (COPY_TO_REGCLASS $src, VFRC)))>; 3871def : Pat<(f128 (any_sint_to_fp (i64 (PPCmfvsr f64:$src)))), 3872 (f128 (XSCVSDQP $src))>; 3873def : Pat<(f128 (any_sint_to_fp (i32 (PPCmfvsr f64:$src)))), 3874 (f128 (XSCVSDQP (VEXTSW2Ds $src)))>; 3875def : Pat<(f128 (any_uint_to_fp i64:$src)), 3876 (f128 (XSCVUDQP (COPY_TO_REGCLASS $src, VFRC)))>; 3877def : Pat<(f128 (any_uint_to_fp (i64 (PPCmfvsr f64:$src)))), 3878 (f128 (XSCVUDQP $src))>; 3879 3880// Convert (Un)Signed Word -> QP. 3881def : Pat<(f128 (any_sint_to_fp i32:$src)), 3882 (f128 (XSCVSDQP (MTVSRWA $src)))>; 3883def : Pat<(f128 (any_sint_to_fp (i32 (load ForceXForm:$src)))), 3884 (f128 (XSCVSDQP (LIWAX ForceXForm:$src)))>; 3885def : Pat<(f128 (any_uint_to_fp i32:$src)), 3886 (f128 (XSCVUDQP (MTVSRWZ $src)))>; 3887def : Pat<(f128 (any_uint_to_fp (i32 (load ForceXForm:$src)))), 3888 (f128 (XSCVUDQP (LIWZX ForceXForm:$src)))>; 3889 3890// Pattern for matching Vector HP -> Vector SP intrinsic. Defined as a 3891// separate pattern so that it can convert the input register class from 3892// VRRC(v8i16) to VSRC. 3893def : Pat<(v4f32 (int_ppc_vsx_xvcvhpsp v8i16:$A)), 3894 (v4f32 (XVCVHPSP (COPY_TO_REGCLASS $A, VSRC)))>; 3895 3896// Use current rounding mode 3897def : Pat<(f128 (any_fnearbyint f128:$vB)), (f128 (XSRQPI 0, $vB, 3))>; 3898// Round to nearest, ties away from zero 3899def : Pat<(f128 (any_fround f128:$vB)), (f128 (XSRQPI 0, $vB, 0))>; 3900// Round towards Zero 3901def : Pat<(f128 (any_ftrunc f128:$vB)), (f128 (XSRQPI 1, $vB, 1))>; 3902// Round towards +Inf 3903def : Pat<(f128 (any_fceil f128:$vB)), (f128 (XSRQPI 1, $vB, 2))>; 3904// Round towards -Inf 3905def : Pat<(f128 (any_ffloor f128:$vB)), (f128 (XSRQPI 1, $vB, 3))>; 3906// Use current rounding mode, [with Inexact] 3907def : Pat<(f128 (any_frint f128:$vB)), (f128 (XSRQPIX 0, $vB, 3))>; 3908 3909def : Pat<(f128 (int_ppc_scalar_insert_exp_qp f128:$vA, i64:$vB)), 3910 (f128 (XSIEXPQP $vA, (MTVSRD $vB)))>; 3911 3912def : Pat<(i64 (int_ppc_scalar_extract_expq f128:$vA)), 3913 (i64 (MFVSRD (EXTRACT_SUBREG 3914 (v2i64 (XSXEXPQP $vA)), sub_64)))>; 3915 3916// Extra patterns expanding to vector Extract Word/Insert Word 3917def : Pat<(v4i32 (int_ppc_vsx_xxinsertw v4i32:$A, v2i64:$B, imm:$IMM)), 3918 (v4i32 (XXINSERTW $A, $B, imm:$IMM))>; 3919def : Pat<(v2i64 (int_ppc_vsx_xxextractuw v2i64:$A, imm:$IMM)), 3920 (v2i64 (COPY_TO_REGCLASS (XXEXTRACTUW $A, imm:$IMM), VSRC))>; 3921 3922// Vector Reverse 3923def : Pat<(v8i16 (bswap v8i16 :$A)), 3924 (v8i16 (COPY_TO_REGCLASS (XXBRH (COPY_TO_REGCLASS $A, VSRC)), VRRC))>; 3925def : Pat<(v1i128 (bswap v1i128 :$A)), 3926 (v1i128 (COPY_TO_REGCLASS (XXBRQ (COPY_TO_REGCLASS $A, VSRC)), VRRC))>; 3927 3928// D-Form Load/Store 3929foreach Ty = [v4i32, v4f32, v2i64, v2f64] in { 3930 def : Pat<(Ty (load DQForm:$src)), (LXV memrix16:$src)>; 3931 def : Pat<(Ty (load XForm:$src)), (LXVX XForm:$src)>; 3932 def : Pat<(store Ty:$rS, DQForm:$dst), (STXV $rS, memrix16:$dst)>; 3933 def : Pat<(store Ty:$rS, XForm:$dst), (STXVX $rS, XForm:$dst)>; 3934} 3935 3936def : Pat<(f128 (load DQForm:$src)), 3937 (COPY_TO_REGCLASS (LXV memrix16:$src), VRRC)>; 3938def : Pat<(f128 (load XForm:$src)), 3939 (COPY_TO_REGCLASS (LXVX XForm:$src), VRRC)>; 3940def : Pat<(v4i32 (int_ppc_vsx_lxvw4x DQForm:$src)), (LXV memrix16:$src)>; 3941def : Pat<(v2f64 (int_ppc_vsx_lxvd2x DQForm:$src)), (LXV memrix16:$src)>; 3942def : Pat<(v4i32 (int_ppc_vsx_lxvw4x XForm:$src)), (LXVX XForm:$src)>; 3943def : Pat<(v2f64 (int_ppc_vsx_lxvd2x XForm:$src)), (LXVX XForm:$src)>; 3944 3945def : Pat<(store f128:$rS, DQForm:$dst), 3946 (STXV (COPY_TO_REGCLASS $rS, VSRC), memrix16:$dst)>; 3947def : Pat<(store f128:$rS, XForm:$dst), 3948 (STXVX (COPY_TO_REGCLASS $rS, VSRC), XForm:$dst)>; 3949def : Pat<(int_ppc_vsx_stxvw4x v4i32:$rS, DQForm:$dst), 3950 (STXV $rS, memrix16:$dst)>; 3951def : Pat<(int_ppc_vsx_stxvd2x v2f64:$rS, DQForm:$dst), 3952 (STXV $rS, memrix16:$dst)>; 3953def : Pat<(int_ppc_vsx_stxvw4x v4i32:$rS, XForm:$dst), 3954 (STXVX $rS, XForm:$dst)>; 3955def : Pat<(int_ppc_vsx_stxvd2x v2f64:$rS, XForm:$dst), 3956 (STXVX $rS, XForm:$dst)>; 3957 3958// Build vectors from i8 loads 3959defm : ScalToVecWPermute<v8i16, ScalarLoads.ZELi8, 3960 (VSPLTHs 3, (LXSIBZX ForceXForm:$src)), 3961 (SUBREG_TO_REG (i64 1), (LXSIBZX ForceXForm:$src), sub_64)>; 3962defm : ScalToVecWPermute<v4i32, ScalarLoads.ZELi8, 3963 (XXSPLTWs (LXSIBZX ForceXForm:$src), 1), 3964 (SUBREG_TO_REG (i64 1), (LXSIBZX ForceXForm:$src), sub_64)>; 3965defm : ScalToVecWPermute<v2i64, ScalarLoads.ZELi8i64, 3966 (XXPERMDIs (LXSIBZX ForceXForm:$src), 0), 3967 (SUBREG_TO_REG (i64 1), (LXSIBZX ForceXForm:$src), sub_64)>; 3968defm : ScalToVecWPermute< 3969 v4i32, ScalarLoads.SELi8, 3970 (XXSPLTWs (VEXTSB2Ws (LXSIBZX ForceXForm:$src)), 1), 3971 (SUBREG_TO_REG (i64 1), (VEXTSB2Ws (LXSIBZX ForceXForm:$src)), sub_64)>; 3972defm : ScalToVecWPermute< 3973 v2i64, ScalarLoads.SELi8i64, 3974 (XXPERMDIs (VEXTSB2Ds (LXSIBZX ForceXForm:$src)), 0), 3975 (SUBREG_TO_REG (i64 1), (VEXTSB2Ds (LXSIBZX ForceXForm:$src)), sub_64)>; 3976 3977// Build vectors from i16 loads 3978defm : ScalToVecWPermute< 3979 v4i32, ScalarLoads.ZELi16, 3980 (XXSPLTWs (LXSIHZX ForceXForm:$src), 1), 3981 (SUBREG_TO_REG (i64 1), (LXSIHZX ForceXForm:$src), sub_64)>; 3982defm : ScalToVecWPermute< 3983 v2i64, ScalarLoads.ZELi16i64, 3984 (XXPERMDIs (LXSIHZX ForceXForm:$src), 0), 3985 (SUBREG_TO_REG (i64 1), (LXSIHZX ForceXForm:$src), sub_64)>; 3986defm : ScalToVecWPermute< 3987 v4i32, ScalarLoads.SELi16, 3988 (XXSPLTWs (VEXTSH2Ws (LXSIHZX ForceXForm:$src)), 1), 3989 (SUBREG_TO_REG (i64 1), (VEXTSH2Ws (LXSIHZX ForceXForm:$src)), sub_64)>; 3990defm : ScalToVecWPermute< 3991 v2i64, ScalarLoads.SELi16i64, 3992 (XXPERMDIs (VEXTSH2Ds (LXSIHZX ForceXForm:$src)), 0), 3993 (SUBREG_TO_REG (i64 1), (VEXTSH2Ds (LXSIHZX ForceXForm:$src)), sub_64)>; 3994 3995// Load/convert and convert/store patterns for f16. 3996def : Pat<(f64 (extloadf16 ForceXForm:$src)), 3997 (f64 (XSCVHPDP (LXSIHZX ForceXForm:$src)))>; 3998def : Pat<(truncstoref16 f64:$src, ForceXForm:$dst), 3999 (STXSIHX (XSCVDPHP $src), ForceXForm:$dst)>; 4000def : Pat<(f32 (extloadf16 ForceXForm:$src)), 4001 (f32 (COPY_TO_REGCLASS (XSCVHPDP (LXSIHZX ForceXForm:$src)), VSSRC))>; 4002def : Pat<(truncstoref16 f32:$src, ForceXForm:$dst), 4003 (STXSIHX (XSCVDPHP (COPY_TO_REGCLASS $src, VSFRC)), ForceXForm:$dst)>; 4004def : Pat<(f64 (f16_to_fp i32:$A)), 4005 (f64 (XSCVHPDP (MTVSRWZ $A)))>; 4006def : Pat<(f32 (f16_to_fp i32:$A)), 4007 (f32 (COPY_TO_REGCLASS (XSCVHPDP (MTVSRWZ $A)), VSSRC))>; 4008def : Pat<(i32 (fp_to_f16 f32:$A)), 4009 (i32 (MFVSRWZ (XSCVDPHP (COPY_TO_REGCLASS $A, VSFRC))))>; 4010def : Pat<(i32 (fp_to_f16 f64:$A)), (i32 (MFVSRWZ (XSCVDPHP $A)))>; 4011 4012// Vector sign extensions 4013def : Pat<(f64 (PPCVexts f64:$A, 1)), 4014 (f64 (COPY_TO_REGCLASS (VEXTSB2Ds $A), VSFRC))>; 4015def : Pat<(f64 (PPCVexts f64:$A, 2)), 4016 (f64 (COPY_TO_REGCLASS (VEXTSH2Ds $A), VSFRC))>; 4017 4018def : Pat<(f64 (extloadf32 DSForm:$src)), 4019 (COPY_TO_REGCLASS (DFLOADf32 DSForm:$src), VSFRC)>; 4020def : Pat<(f32 (fpround (f64 (extloadf32 DSForm:$src)))), 4021 (f32 (DFLOADf32 DSForm:$src))>; 4022 4023def : Pat<(v4f32 (PPCldvsxlh XForm:$src)), 4024 (SUBREG_TO_REG (i64 1), (XFLOADf64 XForm:$src), sub_64)>; 4025def : Pat<(v4f32 (PPCldvsxlh DSForm:$src)), 4026 (SUBREG_TO_REG (i64 1), (DFLOADf64 DSForm:$src), sub_64)>; 4027 4028// Convert (Un)Signed DWord in memory -> QP 4029def : Pat<(f128 (sint_to_fp (i64 (load XForm:$src)))), 4030 (f128 (XSCVSDQP (LXSDX XForm:$src)))>; 4031def : Pat<(f128 (sint_to_fp (i64 (load DSForm:$src)))), 4032 (f128 (XSCVSDQP (LXSD DSForm:$src)))>; 4033def : Pat<(f128 (uint_to_fp (i64 (load XForm:$src)))), 4034 (f128 (XSCVUDQP (LXSDX XForm:$src)))>; 4035def : Pat<(f128 (uint_to_fp (i64 (load DSForm:$src)))), 4036 (f128 (XSCVUDQP (LXSD DSForm:$src)))>; 4037 4038// Convert Unsigned HWord in memory -> QP 4039def : Pat<(f128 (uint_to_fp ScalarLoads.ZELi16)), 4040 (f128 (XSCVUDQP (LXSIHZX XForm:$src)))>; 4041 4042// Convert Unsigned Byte in memory -> QP 4043def : Pat<(f128 (uint_to_fp ScalarLoads.ZELi8)), 4044 (f128 (XSCVUDQP (LXSIBZX ForceXForm:$src)))>; 4045 4046// Truncate & Convert QP -> (Un)Signed (D)Word. 4047def : Pat<(i64 (any_fp_to_sint f128:$src)), (i64 (MFVRD (XSCVQPSDZ $src)))>; 4048def : Pat<(i64 (any_fp_to_uint f128:$src)), (i64 (MFVRD (XSCVQPUDZ $src)))>; 4049def : Pat<(i32 (any_fp_to_sint f128:$src)), 4050 (i32 (MFVSRWZ (COPY_TO_REGCLASS (XSCVQPSWZ $src), VFRC)))>; 4051def : Pat<(i32 (any_fp_to_uint f128:$src)), 4052 (i32 (MFVSRWZ (COPY_TO_REGCLASS (XSCVQPUWZ $src), VFRC)))>; 4053 4054// Instructions for store(fptosi). 4055def : Pat<(PPCstore_scal_int_from_vsr f64:$src, DSForm:$dst, 8), 4056 (STXSD $src, DSForm:$dst)>; 4057def : Pat<(PPCstore_scal_int_from_vsr f64:$src, ForceXForm:$dst, 2), 4058 (STXSIHX $src, ForceXForm:$dst)>; 4059def : Pat<(PPCstore_scal_int_from_vsr f64:$src, ForceXForm:$dst, 1), 4060 (STXSIBX $src, ForceXForm:$dst)>; 4061 4062def : Pat<(PPCstore_scal_int_from_vsr f128:$src, DSForm:$dst, 8), 4063 (STXSD (COPY_TO_REGCLASS $src, VFRC), DSForm:$dst)>; 4064def : Pat<(PPCstore_scal_int_from_vsr f128:$src, ForceXForm:$dst, 2), 4065 (STXSIHX (COPY_TO_REGCLASS $src, VSFRC), ForceXForm:$dst)>; 4066def : Pat<(PPCstore_scal_int_from_vsr f128:$src, ForceXForm:$dst, 1), 4067 (STXSIBX (COPY_TO_REGCLASS $src, VSFRC), ForceXForm:$dst)>; 4068 4069// Round & Convert QP -> DP/SP 4070def : Pat<(f64 (any_fpround f128:$src)), (f64 (XSCVQPDP $src))>; 4071def : Pat<(f32 (any_fpround f128:$src)), (f32 (XSRSP (XSCVQPDPO $src)))>; 4072 4073// Convert SP -> QP 4074def : Pat<(f128 (any_fpextend f32:$src)), 4075 (f128 (XSCVDPQP (COPY_TO_REGCLASS $src, VFRC)))>; 4076 4077def : Pat<(f32 (PPCxsmaxc f32:$XA, f32:$XB)), 4078 (f32 (COPY_TO_REGCLASS (XSMAXCDP (COPY_TO_REGCLASS $XA, VSSRC), 4079 (COPY_TO_REGCLASS $XB, VSSRC)), 4080 VSSRC))>; 4081def : Pat<(f32 (PPCxsminc f32:$XA, f32:$XB)), 4082 (f32 (COPY_TO_REGCLASS (XSMINCDP (COPY_TO_REGCLASS $XA, VSSRC), 4083 (COPY_TO_REGCLASS $XB, VSSRC)), 4084 VSSRC))>; 4085 4086// Endianness-neutral patterns for const splats with ISA 3.0 instructions. 4087defm : ScalToVecWPermute<v4i32, (i32 i32:$A), (MTVSRWS $A), 4088 (SUBREG_TO_REG (i64 1), (MTVSRWZ $A), sub_64)>; 4089def : Pat<(v4i32 (build_vector i32:$A, i32:$A, i32:$A, i32:$A)), 4090 (v4i32 (MTVSRWS $A))>; 4091def : Pat<(v16i8 (build_vector immNonAllOneAnyExt8:$A, immNonAllOneAnyExt8:$A, 4092 immNonAllOneAnyExt8:$A, immNonAllOneAnyExt8:$A, 4093 immNonAllOneAnyExt8:$A, immNonAllOneAnyExt8:$A, 4094 immNonAllOneAnyExt8:$A, immNonAllOneAnyExt8:$A, 4095 immNonAllOneAnyExt8:$A, immNonAllOneAnyExt8:$A, 4096 immNonAllOneAnyExt8:$A, immNonAllOneAnyExt8:$A, 4097 immNonAllOneAnyExt8:$A, immNonAllOneAnyExt8:$A, 4098 immNonAllOneAnyExt8:$A, immNonAllOneAnyExt8:$A)), 4099 (v16i8 (COPY_TO_REGCLASS (XXSPLTIB imm:$A), VSRC))>; 4100defm : ScalToVecWPermute< 4101 v4i32, FltToIntLoad.A, 4102 (XVCVSPSXWS (LXVWSX ForceXForm:$A)), 4103 (XVCVSPSXWS (SUBREG_TO_REG (i64 1), (LIWZX ForceXForm:$A), sub_64))>; 4104defm : ScalToVecWPermute< 4105 v4i32, FltToUIntLoad.A, 4106 (XVCVSPUXWS (LXVWSX ForceXForm:$A)), 4107 (XVCVSPUXWS (SUBREG_TO_REG (i64 1), (LIWZX ForceXForm:$A), sub_64))>; 4108defm : ScalToVecWPermute< 4109 v4i32, DblToIntLoadP9.A, 4110 (XXSPLTW (SUBREG_TO_REG (i64 1), (XSCVDPSXWS (DFLOADf64 DSForm:$A)), sub_64), 1), 4111 (SUBREG_TO_REG (i64 1), (XSCVDPSXWS (DFLOADf64 DSForm:$A)), sub_64)>; 4112defm : ScalToVecWPermute< 4113 v4i32, DblToUIntLoadP9.A, 4114 (XXSPLTW (SUBREG_TO_REG (i64 1), (XSCVDPUXWS (DFLOADf64 DSForm:$A)), sub_64), 1), 4115 (SUBREG_TO_REG (i64 1), (XSCVDPUXWS (DFLOADf64 DSForm:$A)), sub_64)>; 4116defm : ScalToVecWPermute< 4117 v2i64, FltToLongLoadP9.A, 4118 (XXPERMDIs (XSCVDPSXDS (COPY_TO_REGCLASS (DFLOADf32 DSForm:$A), VSFRC)), 0), 4119 (SUBREG_TO_REG 4120 (i64 1), 4121 (XSCVDPSXDS (COPY_TO_REGCLASS (DFLOADf32 DSForm:$A), VSFRC)), sub_64)>; 4122defm : ScalToVecWPermute< 4123 v2i64, FltToULongLoadP9.A, 4124 (XXPERMDIs (XSCVDPUXDS (COPY_TO_REGCLASS (DFLOADf32 DSForm:$A), VSFRC)), 0), 4125 (SUBREG_TO_REG 4126 (i64 1), 4127 (XSCVDPUXDS (COPY_TO_REGCLASS (DFLOADf32 DSForm:$A), VSFRC)), sub_64)>; 4128def : Pat<(v4f32 (PPCldsplat ForceXForm:$A)), 4129 (v4f32 (LXVWSX ForceXForm:$A))>; 4130def : Pat<(v4i32 (PPCldsplat ForceXForm:$A)), 4131 (v4i32 (LXVWSX ForceXForm:$A))>; 4132def : Pat<(v8i16 (PPCldsplat ForceXForm:$A)), 4133 (v8i16 (VSPLTHs 3, (LXSIHZX ForceXForm:$A)))>; 4134def : Pat<(v16i8 (PPCldsplat ForceXForm:$A)), 4135 (v16i8 (VSPLTBs 7, (LXSIBZX ForceXForm:$A)))>; 4136def : Pat<(v2f64 (PPCxxperm v2f64:$XT, v2f64:$XB, v4i32:$C)), 4137 (XXPERM v2f64:$XT, v2f64:$XB, v4i32:$C)>; 4138} // HasVSX, HasP9Vector 4139 4140// Any Power9 VSX subtarget with equivalent length but better Power10 VSX 4141// patterns. 4142// Two identical blocks are required due to the slightly different predicates: 4143// One without P10 instructions, the other is BigEndian only with P10 instructions. 4144let Predicates = [HasVSX, HasP9Vector, NoP10Vector] in { 4145// Little endian Power10 subtargets produce a shorter pattern but require a 4146// COPY_TO_REGCLASS. The COPY_TO_REGCLASS makes it appear to need two instructions 4147// to perform the operation, when only one instruction is produced in practice. 4148// The NoP10Vector predicate excludes these patterns from Power10 VSX subtargets. 4149defm : ScalToVecWPermute< 4150 v16i8, ScalarLoads.Li8, 4151 (VSPLTBs 7, (LXSIBZX ForceXForm:$src)), 4152 (SUBREG_TO_REG (i64 1), (LXSIBZX ForceXForm:$src), sub_64)>; 4153// Build vectors from i16 loads 4154defm : ScalToVecWPermute< 4155 v8i16, ScalarLoads.Li16, 4156 (VSPLTHs 3, (LXSIHZX ForceXForm:$src)), 4157 (SUBREG_TO_REG (i64 1), (LXSIHZX ForceXForm:$src), sub_64)>; 4158} // HasVSX, HasP9Vector, NoP10Vector 4159 4160// Any big endian Power9 VSX subtarget 4161let Predicates = [HasVSX, HasP9Vector, IsBigEndian] in { 4162// Power10 VSX subtargets produce a shorter pattern for little endian targets 4163// but this is still the best pattern for Power9 and Power10 VSX big endian 4164// Build vectors from i8 loads 4165defm : ScalToVecWPermute< 4166 v16i8, ScalarLoads.Li8, 4167 (VSPLTBs 7, (LXSIBZX ForceXForm:$src)), 4168 (SUBREG_TO_REG (i64 1), (LXSIBZX ForceXForm:$src), sub_64)>; 4169// Build vectors from i16 loads 4170defm : ScalToVecWPermute< 4171 v8i16, ScalarLoads.Li16, 4172 (VSPLTHs 3, (LXSIHZX ForceXForm:$src)), 4173 (SUBREG_TO_REG (i64 1), (LXSIHZX ForceXForm:$src), sub_64)>; 4174 4175def : Pat<(f32 (PPCfcfidus (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 0)))))), 4176 (f32 (XSCVUXDSP (XXEXTRACTUW $A, 0)))>; 4177def : Pat<(f32 (PPCfcfidus (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 1)))))), 4178 (f32 (XSCVUXDSP (XXEXTRACTUW $A, 4)))>; 4179def : Pat<(f32 (PPCfcfidus (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 2)))))), 4180 (f32 (XSCVUXDSP (XXEXTRACTUW $A, 8)))>; 4181def : Pat<(f32 (PPCfcfidus (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 3)))))), 4182 (f32 (XSCVUXDSP (XXEXTRACTUW $A, 12)))>; 4183def : Pat<(f64 (PPCfcfidu (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 0)))))), 4184 (f64 (XSCVUXDDP (XXEXTRACTUW $A, 0)))>; 4185def : Pat<(f64 (PPCfcfidu (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 1)))))), 4186 (f64 (XSCVUXDDP (XXEXTRACTUW $A, 4)))>; 4187def : Pat<(f64 (PPCfcfidu (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 2)))))), 4188 (f64 (XSCVUXDDP (XXEXTRACTUW $A, 8)))>; 4189def : Pat<(f64 (PPCfcfidu (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 3)))))), 4190 (f64 (XSCVUXDDP (XXEXTRACTUW $A, 12)))>; 4191def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 0)), 4192 (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 0))>; 4193def : Pat<(v4i32 (insertelt v4i32:$A, DblToInt.B, 0)), 4194 (v4i32 (XXINSERTW v4i32:$A, 4195 (SUBREG_TO_REG (i64 1), 4196 (XSCVDPSXWS f64:$B), sub_64), 4197 0))>; 4198def : Pat<(v4i32 (insertelt v4i32:$A, DblToUInt.B, 0)), 4199 (v4i32 (XXINSERTW v4i32:$A, 4200 (SUBREG_TO_REG (i64 1), 4201 (XSCVDPUXWS f64:$B), sub_64), 4202 0))>; 4203def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 1)), 4204 (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 4))>; 4205def : Pat<(v4i32 (insertelt v4i32:$A, DblToInt.B, 1)), 4206 (v4i32 (XXINSERTW v4i32:$A, 4207 (SUBREG_TO_REG (i64 1), 4208 (XSCVDPSXWS f64:$B), sub_64), 4209 4))>; 4210def : Pat<(v4i32 (insertelt v4i32:$A, DblToUInt.B, 1)), 4211 (v4i32 (XXINSERTW v4i32:$A, 4212 (SUBREG_TO_REG (i64 1), 4213 (XSCVDPUXWS f64:$B), sub_64), 4214 4))>; 4215def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 2)), 4216 (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 8))>; 4217def : Pat<(v4i32 (insertelt v4i32:$A, DblToInt.B, 2)), 4218 (v4i32 (XXINSERTW v4i32:$A, 4219 (SUBREG_TO_REG (i64 1), 4220 (XSCVDPSXWS f64:$B), sub_64), 4221 8))>; 4222def : Pat<(v4i32 (insertelt v4i32:$A, DblToUInt.B, 2)), 4223 (v4i32 (XXINSERTW v4i32:$A, 4224 (SUBREG_TO_REG (i64 1), 4225 (XSCVDPUXWS f64:$B), sub_64), 4226 8))>; 4227def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 3)), 4228 (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 12))>; 4229def : Pat<(v4i32 (insertelt v4i32:$A, DblToInt.B, 3)), 4230 (v4i32 (XXINSERTW v4i32:$A, 4231 (SUBREG_TO_REG (i64 1), 4232 (XSCVDPSXWS f64:$B), sub_64), 4233 12))>; 4234def : Pat<(v4i32 (insertelt v4i32:$A, DblToUInt.B, 3)), 4235 (v4i32 (XXINSERTW v4i32:$A, 4236 (SUBREG_TO_REG (i64 1), 4237 (XSCVDPUXWS f64:$B), sub_64), 4238 12))>; 4239def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 0)), 4240 (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 0))>; 4241def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 1)), 4242 (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 4))>; 4243def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 2)), 4244 (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 8))>; 4245def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 3)), 4246 (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 12))>; 4247 4248def : Pat<(v4f32 (insertelt v4f32:$A, (f32 (fpround f64:$B)), 0)), 4249 (v4f32 (XXINSERTW v4f32:$A, 4250 (SUBREG_TO_REG (i64 1), (XSCVDPSP f64:$B), sub_64), 0))>; 4251def : Pat<(v4f32 (insertelt v4f32:$A, (f32 (fpround f64:$B)), 1)), 4252 (v4f32 (XXINSERTW v4f32:$A, 4253 (SUBREG_TO_REG (i64 1), (XSCVDPSP f64:$B), sub_64), 4))>; 4254def : Pat<(v4f32 (insertelt v4f32:$A, (f32 (fpround f64:$B)), 2)), 4255 (v4f32 (XXINSERTW v4f32:$A, 4256 (SUBREG_TO_REG (i64 1), (XSCVDPSP f64:$B), sub_64), 8))>; 4257def : Pat<(v4f32 (insertelt v4f32:$A, (f32 (fpround f64:$B)), 3)), 4258 (v4f32 (XXINSERTW v4f32:$A, 4259 (SUBREG_TO_REG (i64 1), (XSCVDPSP f64:$B), sub_64), 12))>; 4260 4261// Scalar stores of i8 4262def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 0)), ForceXForm:$dst), 4263 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 9)), VSRC), ForceXForm:$dst)>; 4264def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 1)), ForceXForm:$dst), 4265 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 10)), VSRC), ForceXForm:$dst)>; 4266def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 2)), ForceXForm:$dst), 4267 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 11)), VSRC), ForceXForm:$dst)>; 4268def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 3)), ForceXForm:$dst), 4269 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 12)), VSRC), ForceXForm:$dst)>; 4270def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 4)), ForceXForm:$dst), 4271 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 13)), VSRC), ForceXForm:$dst)>; 4272def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 5)), ForceXForm:$dst), 4273 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 14)), VSRC), ForceXForm:$dst)>; 4274def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 6)), ForceXForm:$dst), 4275 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 15)), VSRC), ForceXForm:$dst)>; 4276def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 7)), ForceXForm:$dst), 4277 (STXSIBXv (COPY_TO_REGCLASS $S, VSRC), ForceXForm:$dst)>; 4278def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 8)), ForceXForm:$dst), 4279 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 1)), VSRC), ForceXForm:$dst)>; 4280def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 9)), ForceXForm:$dst), 4281 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 2)), VSRC), ForceXForm:$dst)>; 4282def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 10)), ForceXForm:$dst), 4283 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 3)), VSRC), ForceXForm:$dst)>; 4284def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 11)), ForceXForm:$dst), 4285 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 4)), VSRC), ForceXForm:$dst)>; 4286def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 12)), ForceXForm:$dst), 4287 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 5)), VSRC), ForceXForm:$dst)>; 4288def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 13)), ForceXForm:$dst), 4289 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 6)), VSRC), ForceXForm:$dst)>; 4290def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 14)), ForceXForm:$dst), 4291 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 7)), VSRC), ForceXForm:$dst)>; 4292def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 15)), ForceXForm:$dst), 4293 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 8)), VSRC), ForceXForm:$dst)>; 4294 4295// Scalar stores of i16 4296def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 0)), ForceXForm:$dst), 4297 (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 10)), VSRC), ForceXForm:$dst)>; 4298def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 1)), ForceXForm:$dst), 4299 (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 12)), VSRC), ForceXForm:$dst)>; 4300def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 2)), ForceXForm:$dst), 4301 (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 14)), VSRC), ForceXForm:$dst)>; 4302def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 3)), ForceXForm:$dst), 4303 (STXSIHXv (COPY_TO_REGCLASS $S, VSRC), ForceXForm:$dst)>; 4304def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 4)), ForceXForm:$dst), 4305 (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 2)), VSRC), ForceXForm:$dst)>; 4306def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 5)), ForceXForm:$dst), 4307 (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 4)), VSRC), ForceXForm:$dst)>; 4308def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 6)), ForceXForm:$dst), 4309 (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 6)), VSRC), ForceXForm:$dst)>; 4310def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 7)), ForceXForm:$dst), 4311 (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 8)), VSRC), ForceXForm:$dst)>; 4312} // HasVSX, HasP9Vector, IsBigEndian 4313 4314// Big endian 64Bit Power9 subtarget. 4315let Predicates = [HasVSX, HasP9Vector, IsBigEndian, IsPPC64] in { 4316def : Pat<(v2i64 (scalar_to_vector (i64 (load DSForm:$src)))), 4317 (v2i64 (SUBREG_TO_REG (i64 1), (DFLOADf64 DSForm:$src), sub_64))>; 4318def : Pat<(v2i64 (scalar_to_vector (i64 (load XForm:$src)))), 4319 (v2i64 (SUBREG_TO_REG (i64 1), (XFLOADf64 XForm:$src), sub_64))>; 4320 4321def : Pat<(v2f64 (scalar_to_vector (f64 (load DSForm:$src)))), 4322 (v2f64 (SUBREG_TO_REG (i64 1), (DFLOADf64 DSForm:$src), sub_64))>; 4323def : Pat<(v2f64 (scalar_to_vector (f64 (load XForm:$src)))), 4324 (v2f64 (SUBREG_TO_REG (i64 1), (XFLOADf64 XForm:$src), sub_64))>; 4325def : Pat<(store (i64 (extractelt v2i64:$A, 1)), XForm:$src), 4326 (XFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2), 4327 sub_64), XForm:$src)>; 4328def : Pat<(store (f64 (extractelt v2f64:$A, 1)), XForm:$src), 4329 (XFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2), 4330 sub_64), XForm:$src)>; 4331def : Pat<(store (i64 (extractelt v2i64:$A, 0)), XForm:$src), 4332 (XFSTOREf64 (EXTRACT_SUBREG $A, sub_64), XForm:$src)>; 4333def : Pat<(store (f64 (extractelt v2f64:$A, 0)), XForm:$src), 4334 (XFSTOREf64 (EXTRACT_SUBREG $A, sub_64), XForm:$src)>; 4335def : Pat<(store (i64 (extractelt v2i64:$A, 1)), DSForm:$src), 4336 (DFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2), 4337 sub_64), DSForm:$src)>; 4338def : Pat<(store (f64 (extractelt v2f64:$A, 1)), DSForm:$src), 4339 (DFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2), 4340 sub_64), DSForm:$src)>; 4341def : Pat<(store (i64 (extractelt v2i64:$A, 0)), DSForm:$src), 4342 (DFSTOREf64 (EXTRACT_SUBREG $A, sub_64), DSForm:$src)>; 4343def : Pat<(store (f64 (extractelt v2f64:$A, 0)), DSForm:$src), 4344 (DFSTOREf64 (EXTRACT_SUBREG $A, sub_64), DSForm:$src)>; 4345 4346// (Un)Signed DWord vector extract -> QP 4347def : Pat<(f128 (sint_to_fp (i64 (extractelt v2i64:$src, 0)))), 4348 (f128 (XSCVSDQP (COPY_TO_REGCLASS $src, VFRC)))>; 4349def : Pat<(f128 (sint_to_fp (i64 (extractelt v2i64:$src, 1)))), 4350 (f128 (XSCVSDQP 4351 (EXTRACT_SUBREG (XXPERMDI $src, $src, 3), sub_64)))>; 4352def : Pat<(f128 (uint_to_fp (i64 (extractelt v2i64:$src, 0)))), 4353 (f128 (XSCVUDQP (COPY_TO_REGCLASS $src, VFRC)))>; 4354def : Pat<(f128 (uint_to_fp (i64 (extractelt v2i64:$src, 1)))), 4355 (f128 (XSCVUDQP 4356 (EXTRACT_SUBREG (XXPERMDI $src, $src, 3), sub_64)))>; 4357 4358// (Un)Signed Word vector extract -> QP 4359def : Pat<(f128 (sint_to_fp (i32 (extractelt v4i32:$src, 1)))), 4360 (f128 (XSCVSDQP (EXTRACT_SUBREG (VEXTSW2D $src), sub_64)))>; 4361foreach Idx = [0,2,3] in { 4362 def : Pat<(f128 (sint_to_fp (i32 (extractelt v4i32:$src, Idx)))), 4363 (f128 (XSCVSDQP (EXTRACT_SUBREG 4364 (VEXTSW2D (VSPLTW Idx, $src)), sub_64)))>; 4365} 4366foreach Idx = 0-3 in { 4367 def : Pat<(f128 (uint_to_fp (i32 (extractelt v4i32:$src, Idx)))), 4368 (f128 (XSCVUDQP (XXEXTRACTUW $src, !shl(Idx, 2))))>; 4369} 4370 4371// (Un)Signed HWord vector extract -> QP/DP/SP 4372foreach Idx = 0-7 in { 4373 def : Pat<(f128 (sint_to_fp 4374 (i32 (sext_inreg 4375 (vector_extract v8i16:$src, Idx), i16)))), 4376 (f128 (XSCVSDQP (EXTRACT_SUBREG 4377 (VEXTSH2D (VEXTRACTUH !add(Idx, Idx), $src)), 4378 sub_64)))>; 4379 // The SDAG adds the `and` since an `i16` is being extracted as an `i32`. 4380 def : Pat<(f128 (uint_to_fp 4381 (and (i32 (vector_extract v8i16:$src, Idx)), 65535))), 4382 (f128 (XSCVUDQP (EXTRACT_SUBREG 4383 (VEXTRACTUH !add(Idx, Idx), $src), sub_64)))>; 4384 def : Pat<(f32 (PPCfcfidus 4385 (f64 (PPCmtvsrz (and (i32 (vector_extract v8i16:$src, Idx)), 4386 65535))))), 4387 (f32 (XSCVUXDSP (EXTRACT_SUBREG 4388 (VEXTRACTUH !add(Idx, Idx), $src), sub_64)))>; 4389 def : Pat<(f32 (PPCfcfids 4390 (f64 (PPCmtvsra 4391 (i32 (sext_inreg (vector_extract v8i16:$src, Idx), 4392 i16)))))), 4393 (f32 (XSCVSXDSP (EXTRACT_SUBREG 4394 (VEXTSH2D (VEXTRACTUH !add(Idx, Idx), $src)), 4395 sub_64)))>; 4396 def : Pat<(f64 (PPCfcfidu 4397 (f64 (PPCmtvsrz 4398 (and (i32 (vector_extract v8i16:$src, Idx)), 4399 65535))))), 4400 (f64 (XSCVUXDDP (EXTRACT_SUBREG 4401 (VEXTRACTUH !add(Idx, Idx), $src), sub_64)))>; 4402 def : Pat<(f64 (PPCfcfid 4403 (f64 (PPCmtvsra 4404 (i32 (sext_inreg (vector_extract v8i16:$src, Idx), 4405 i16)))))), 4406 (f64 (XSCVSXDDP (EXTRACT_SUBREG 4407 (VEXTSH2D (VEXTRACTUH !add(Idx, Idx), $src)), 4408 sub_64)))>; 4409} 4410 4411// (Un)Signed Byte vector extract -> QP 4412foreach Idx = 0-15 in { 4413 def : Pat<(f128 (sint_to_fp 4414 (i32 (sext_inreg (vector_extract v16i8:$src, Idx), 4415 i8)))), 4416 (f128 (XSCVSDQP (EXTRACT_SUBREG 4417 (VEXTSB2D (VEXTRACTUB Idx, $src)), sub_64)))>; 4418 def : Pat<(f128 (uint_to_fp 4419 (and (i32 (vector_extract v16i8:$src, Idx)), 255))), 4420 (f128 (XSCVUDQP 4421 (EXTRACT_SUBREG (VEXTRACTUB Idx, $src), sub_64)))>; 4422 4423 def : Pat<(f32 (PPCfcfidus 4424 (f64 (PPCmtvsrz 4425 (and (i32 (vector_extract v16i8:$src, Idx)), 4426 255))))), 4427 (f32 (XSCVUXDSP (EXTRACT_SUBREG 4428 (VEXTRACTUB !add(Idx, Idx), $src), sub_64)))>; 4429 def : Pat<(f32 (PPCfcfids 4430 (f64 (PPCmtvsra 4431 (i32 (sext_inreg (vector_extract v16i8:$src, Idx), 4432 i8)))))), 4433 (f32 (XSCVSXDSP (EXTRACT_SUBREG 4434 (VEXTSH2D (VEXTRACTUB !add(Idx, Idx), $src)), 4435 sub_64)))>; 4436 def : Pat<(f64 (PPCfcfidu 4437 (f64 (PPCmtvsrz 4438 (and (i32 (vector_extract v16i8:$src, Idx)), 4439 255))))), 4440 (f64 (XSCVUXDDP (EXTRACT_SUBREG 4441 (VEXTRACTUB !add(Idx, Idx), $src), sub_64)))>; 4442 def : Pat<(f64 (PPCfcfid 4443 (f64 (PPCmtvsra 4444 (i32 (sext_inreg (vector_extract v16i8:$src, Idx), 4445 i8)))))), 4446 (f64 (XSCVSXDDP (EXTRACT_SUBREG 4447 (VEXTSH2D (VEXTRACTUB !add(Idx, Idx), $src)), 4448 sub_64)))>; 4449} 4450 4451// Unsiged int in vsx register -> QP 4452def : Pat<(f128 (uint_to_fp (i32 (PPCmfvsr f64:$src)))), 4453 (f128 (XSCVUDQP 4454 (XXEXTRACTUW (SUBREG_TO_REG (i64 1), $src, sub_64), 4)))>; 4455} // HasVSX, HasP9Vector, IsBigEndian, IsPPC64 4456 4457// Little endian Power9 subtarget. 4458let Predicates = [HasVSX, HasP9Vector, IsLittleEndian] in { 4459def : Pat<(f32 (PPCfcfidus (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 0)))))), 4460 (f32 (XSCVUXDSP (XXEXTRACTUW $A, 12)))>; 4461def : Pat<(f32 (PPCfcfidus (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 1)))))), 4462 (f32 (XSCVUXDSP (XXEXTRACTUW $A, 8)))>; 4463def : Pat<(f32 (PPCfcfidus (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 2)))))), 4464 (f32 (XSCVUXDSP (XXEXTRACTUW $A, 4)))>; 4465def : Pat<(f32 (PPCfcfidus (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 3)))))), 4466 (f32 (XSCVUXDSP (XXEXTRACTUW $A, 0)))>; 4467def : Pat<(f64 (PPCfcfidu (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 0)))))), 4468 (f64 (XSCVUXDDP (XXEXTRACTUW $A, 12)))>; 4469def : Pat<(f64 (PPCfcfidu (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 1)))))), 4470 (f64 (XSCVUXDDP (XXEXTRACTUW $A, 8)))>; 4471def : Pat<(f64 (PPCfcfidu (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 2)))))), 4472 (f64 (XSCVUXDDP (XXEXTRACTUW $A, 4)))>; 4473def : Pat<(f64 (PPCfcfidu (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 3)))))), 4474 (f64 (XSCVUXDDP (XXEXTRACTUW $A, 0)))>; 4475def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 0)), 4476 (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 12))>; 4477def : Pat<(v4i32 (insertelt v4i32:$A, DblToInt.B, 0)), 4478 (v4i32 (XXINSERTW v4i32:$A, 4479 (SUBREG_TO_REG (i64 1), 4480 (XSCVDPSXWS f64:$B), sub_64), 4481 12))>; 4482def : Pat<(v4i32 (insertelt v4i32:$A, DblToUInt.B, 0)), 4483 (v4i32 (XXINSERTW v4i32:$A, 4484 (SUBREG_TO_REG (i64 1), 4485 (XSCVDPUXWS f64:$B), sub_64), 4486 12))>; 4487def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 1)), 4488 (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 8))>; 4489def : Pat<(v4i32 (insertelt v4i32:$A, DblToInt.B, 1)), 4490 (v4i32 (XXINSERTW v4i32:$A, 4491 (SUBREG_TO_REG (i64 1), 4492 (XSCVDPSXWS f64:$B), sub_64), 4493 8))>; 4494def : Pat<(v4i32 (insertelt v4i32:$A, DblToUInt.B, 1)), 4495 (v4i32 (XXINSERTW v4i32:$A, 4496 (SUBREG_TO_REG (i64 1), 4497 (XSCVDPUXWS f64:$B), sub_64), 4498 8))>; 4499def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 2)), 4500 (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 4))>; 4501def : Pat<(v4i32 (insertelt v4i32:$A, DblToInt.B, 2)), 4502 (v4i32 (XXINSERTW v4i32:$A, 4503 (SUBREG_TO_REG (i64 1), 4504 (XSCVDPSXWS f64:$B), sub_64), 4505 4))>; 4506def : Pat<(v4i32 (insertelt v4i32:$A, DblToUInt.B, 2)), 4507 (v4i32 (XXINSERTW v4i32:$A, 4508 (SUBREG_TO_REG (i64 1), 4509 (XSCVDPUXWS f64:$B), sub_64), 4510 4))>; 4511def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 3)), 4512 (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 0))>; 4513def : Pat<(v4i32 (insertelt v4i32:$A, DblToInt.B, 3)), 4514 (v4i32 (XXINSERTW v4i32:$A, 4515 (SUBREG_TO_REG (i64 1), 4516 (XSCVDPSXWS f64:$B), sub_64), 4517 0))>; 4518def : Pat<(v4i32 (insertelt v4i32:$A, DblToUInt.B, 3)), 4519 (v4i32 (XXINSERTW v4i32:$A, 4520 (SUBREG_TO_REG (i64 1), 4521 (XSCVDPUXWS f64:$B), sub_64), 4522 0))>; 4523def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 0)), 4524 (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 12))>; 4525def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 1)), 4526 (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 8))>; 4527def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 2)), 4528 (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 4))>; 4529def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 3)), 4530 (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 0))>; 4531 4532def : Pat<(v4f32 (insertelt v4f32:$A, (f32 (fpround f64:$B)), 0)), 4533 (v4f32 (XXINSERTW v4f32:$A, 4534 (SUBREG_TO_REG (i64 1), (XSCVDPSP f64:$B), sub_64), 12))>; 4535def : Pat<(v4f32 (insertelt v4f32:$A, (f32 (fpround f64:$B)), 1)), 4536 (v4f32 (XXINSERTW v4f32:$A, 4537 (SUBREG_TO_REG (i64 1), (XSCVDPSP f64:$B), sub_64), 8))>; 4538def : Pat<(v4f32 (insertelt v4f32:$A, (f32 (fpround f64:$B)), 2)), 4539 (v4f32 (XXINSERTW v4f32:$A, 4540 (SUBREG_TO_REG (i64 1), (XSCVDPSP f64:$B), sub_64), 4))>; 4541def : Pat<(v4f32 (insertelt v4f32:$A, (f32 (fpround f64:$B)), 3)), 4542 (v4f32 (XXINSERTW v4f32:$A, 4543 (SUBREG_TO_REG (i64 1), (XSCVDPSP f64:$B), sub_64), 0))>; 4544 4545def : Pat<(v8i16 (PPCld_vec_be ForceXForm:$src)), 4546 (COPY_TO_REGCLASS (LXVH8X ForceXForm:$src), VRRC)>; 4547def : Pat<(PPCst_vec_be v8i16:$rS, ForceXForm:$dst), 4548 (STXVH8X (COPY_TO_REGCLASS $rS, VSRC), ForceXForm:$dst)>; 4549 4550def : Pat<(v16i8 (PPCld_vec_be ForceXForm:$src)), 4551 (COPY_TO_REGCLASS (LXVB16X ForceXForm:$src), VRRC)>; 4552def : Pat<(PPCst_vec_be v16i8:$rS, ForceXForm:$dst), 4553 (STXVB16X (COPY_TO_REGCLASS $rS, VSRC), ForceXForm:$dst)>; 4554 4555// Scalar stores of i8 4556def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 0)), ForceXForm:$dst), 4557 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 8)), VSRC), ForceXForm:$dst)>; 4558def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 1)), ForceXForm:$dst), 4559 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 7)), VSRC), ForceXForm:$dst)>; 4560def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 2)), ForceXForm:$dst), 4561 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 6)), VSRC), ForceXForm:$dst)>; 4562def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 3)), ForceXForm:$dst), 4563 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 5)), VSRC), ForceXForm:$dst)>; 4564def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 4)), ForceXForm:$dst), 4565 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 4)), VSRC), ForceXForm:$dst)>; 4566def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 5)), ForceXForm:$dst), 4567 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 3)), VSRC), ForceXForm:$dst)>; 4568def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 6)), ForceXForm:$dst), 4569 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 2)), VSRC), ForceXForm:$dst)>; 4570def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 7)), ForceXForm:$dst), 4571 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 1)), VSRC), ForceXForm:$dst)>; 4572def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 8)), ForceXForm:$dst), 4573 (STXSIBXv (COPY_TO_REGCLASS $S, VSRC), ForceXForm:$dst)>; 4574def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 9)), ForceXForm:$dst), 4575 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 15)), VSRC), ForceXForm:$dst)>; 4576def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 10)), ForceXForm:$dst), 4577 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 14)), VSRC), ForceXForm:$dst)>; 4578def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 11)), ForceXForm:$dst), 4579 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 13)), VSRC), ForceXForm:$dst)>; 4580def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 12)), ForceXForm:$dst), 4581 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 12)), VSRC), ForceXForm:$dst)>; 4582def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 13)), ForceXForm:$dst), 4583 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 11)), VSRC), ForceXForm:$dst)>; 4584def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 14)), ForceXForm:$dst), 4585 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 10)), VSRC), ForceXForm:$dst)>; 4586def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 15)), ForceXForm:$dst), 4587 (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 9)), VSRC), ForceXForm:$dst)>; 4588 4589// Scalar stores of i16 4590def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 0)), ForceXForm:$dst), 4591 (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 8)), VSRC), ForceXForm:$dst)>; 4592def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 1)), ForceXForm:$dst), 4593 (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 6)), VSRC), ForceXForm:$dst)>; 4594def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 2)), ForceXForm:$dst), 4595 (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 4)), VSRC), ForceXForm:$dst)>; 4596def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 3)), ForceXForm:$dst), 4597 (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 2)), VSRC), ForceXForm:$dst)>; 4598def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 4)), ForceXForm:$dst), 4599 (STXSIHXv (COPY_TO_REGCLASS $S, VSRC), ForceXForm:$dst)>; 4600def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 5)), ForceXForm:$dst), 4601 (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 14)), VSRC), ForceXForm:$dst)>; 4602def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 6)), ForceXForm:$dst), 4603 (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 12)), VSRC), ForceXForm:$dst)>; 4604def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 7)), ForceXForm:$dst), 4605 (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 10)), VSRC), ForceXForm:$dst)>; 4606 4607defm : ScalToVecWPermute< 4608 v2i64, (i64 (load DSForm:$src)), 4609 (XXPERMDIs (DFLOADf64 DSForm:$src), 2), 4610 (SUBREG_TO_REG (i64 1), (DFLOADf64 DSForm:$src), sub_64)>; 4611defm : ScalToVecWPermute< 4612 v2i64, (i64 (load XForm:$src)), 4613 (XXPERMDIs (XFLOADf64 XForm:$src), 2), 4614 (SUBREG_TO_REG (i64 1), (XFLOADf64 XForm:$src), sub_64)>; 4615defm : ScalToVecWPermute< 4616 v2f64, (f64 (load DSForm:$src)), 4617 (XXPERMDIs (DFLOADf64 DSForm:$src), 2), 4618 (SUBREG_TO_REG (i64 1), (DFLOADf64 DSForm:$src), sub_64)>; 4619defm : ScalToVecWPermute< 4620 v2f64, (f64 (load XForm:$src)), 4621 (XXPERMDIs (XFLOADf64 XForm:$src), 2), 4622 (SUBREG_TO_REG (i64 1), (XFLOADf64 XForm:$src), sub_64)>; 4623 4624def : Pat<(store (i64 (extractelt v2i64:$A, 0)), XForm:$src), 4625 (XFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2), 4626 sub_64), XForm:$src)>; 4627def : Pat<(store (f64 (extractelt v2f64:$A, 0)), XForm:$src), 4628 (XFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2), 4629 sub_64), XForm:$src)>; 4630def : Pat<(store (i64 (extractelt v2i64:$A, 1)), XForm:$src), 4631 (XFSTOREf64 (EXTRACT_SUBREG $A, sub_64), XForm:$src)>; 4632def : Pat<(store (f64 (extractelt v2f64:$A, 1)), XForm:$src), 4633 (XFSTOREf64 (EXTRACT_SUBREG $A, sub_64), XForm:$src)>; 4634def : Pat<(store (i64 (extractelt v2i64:$A, 0)), DSForm:$src), 4635 (DFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2), 4636 sub_64), DSForm:$src)>; 4637def : Pat<(store (f64 (extractelt v2f64:$A, 0)), DSForm:$src), 4638 (DFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2), sub_64), 4639 DSForm:$src)>; 4640def : Pat<(store (i64 (extractelt v2i64:$A, 1)), DSForm:$src), 4641 (DFSTOREf64 (EXTRACT_SUBREG $A, sub_64), DSForm:$src)>; 4642def : Pat<(store (f64 (extractelt v2f64:$A, 1)), DSForm:$src), 4643 (DFSTOREf64 (EXTRACT_SUBREG $A, sub_64), DSForm:$src)>; 4644 4645// (Un)Signed DWord vector extract -> QP 4646def : Pat<(f128 (sint_to_fp (i64 (extractelt v2i64:$src, 0)))), 4647 (f128 (XSCVSDQP 4648 (EXTRACT_SUBREG (XXPERMDI $src, $src, 3), sub_64)))>; 4649def : Pat<(f128 (sint_to_fp (i64 (extractelt v2i64:$src, 1)))), 4650 (f128 (XSCVSDQP (COPY_TO_REGCLASS $src, VFRC)))>; 4651def : Pat<(f128 (uint_to_fp (i64 (extractelt v2i64:$src, 0)))), 4652 (f128 (XSCVUDQP 4653 (EXTRACT_SUBREG (XXPERMDI $src, $src, 3), sub_64)))>; 4654def : Pat<(f128 (uint_to_fp (i64 (extractelt v2i64:$src, 1)))), 4655 (f128 (XSCVUDQP (COPY_TO_REGCLASS $src, VFRC)))>; 4656 4657// (Un)Signed Word vector extract -> QP 4658foreach Idx = [[0,3],[1,2],[3,0]] in { 4659 def : Pat<(f128 (sint_to_fp (i32 (extractelt v4i32:$src, !head(Idx))))), 4660 (f128 (XSCVSDQP (EXTRACT_SUBREG 4661 (VEXTSW2D (VSPLTW !head(!tail(Idx)), $src)), 4662 sub_64)))>; 4663} 4664def : Pat<(f128 (sint_to_fp (i32 (extractelt v4i32:$src, 2)))), 4665 (f128 (XSCVSDQP (EXTRACT_SUBREG (VEXTSW2D $src), sub_64)))>; 4666 4667foreach Idx = [[0,12],[1,8],[2,4],[3,0]] in { 4668 def : Pat<(f128 (uint_to_fp (i32 (extractelt v4i32:$src, !head(Idx))))), 4669 (f128 (XSCVUDQP (XXEXTRACTUW $src, !head(!tail(Idx)))))>; 4670} 4671 4672// (Un)Signed HWord vector extract -> QP/DP/SP 4673// The Nested foreach lists identifies the vector element and corresponding 4674// register byte location. 4675foreach Idx = [[0,14],[1,12],[2,10],[3,8],[4,6],[5,4],[6,2],[7,0]] in { 4676 def : Pat<(f128 (sint_to_fp 4677 (i32 (sext_inreg 4678 (vector_extract v8i16:$src, !head(Idx)), i16)))), 4679 (f128 (XSCVSDQP 4680 (EXTRACT_SUBREG (VEXTSH2D 4681 (VEXTRACTUH !head(!tail(Idx)), $src)), 4682 sub_64)))>; 4683 def : Pat<(f128 (uint_to_fp 4684 (and (i32 (vector_extract v8i16:$src, !head(Idx))), 4685 65535))), 4686 (f128 (XSCVUDQP (EXTRACT_SUBREG 4687 (VEXTRACTUH !head(!tail(Idx)), $src), sub_64)))>; 4688 def : Pat<(f32 (PPCfcfidus 4689 (f64 (PPCmtvsrz 4690 (and (i32 (vector_extract v8i16:$src, !head(Idx))), 4691 65535))))), 4692 (f32 (XSCVUXDSP (EXTRACT_SUBREG 4693 (VEXTRACTUH !head(!tail(Idx)), $src), sub_64)))>; 4694 def : Pat<(f32 (PPCfcfids 4695 (f64 (PPCmtvsra 4696 (i32 (sext_inreg (vector_extract v8i16:$src, 4697 !head(Idx)), i16)))))), 4698 (f32 (XSCVSXDSP 4699 (EXTRACT_SUBREG 4700 (VEXTSH2D (VEXTRACTUH !head(!tail(Idx)), $src)), 4701 sub_64)))>; 4702 def : Pat<(f64 (PPCfcfidu 4703 (f64 (PPCmtvsrz 4704 (and (i32 (vector_extract v8i16:$src, !head(Idx))), 4705 65535))))), 4706 (f64 (XSCVUXDDP (EXTRACT_SUBREG 4707 (VEXTRACTUH !head(!tail(Idx)), $src), sub_64)))>; 4708 def : Pat<(f64 (PPCfcfid 4709 (f64 (PPCmtvsra 4710 (i32 (sext_inreg 4711 (vector_extract v8i16:$src, !head(Idx)), i16)))))), 4712 (f64 (XSCVSXDDP 4713 (EXTRACT_SUBREG (VEXTSH2D 4714 (VEXTRACTUH !head(!tail(Idx)), $src)), 4715 sub_64)))>; 4716} 4717 4718// (Un)Signed Byte vector extract -> QP/DP/SP 4719foreach Idx = [[0,15],[1,14],[2,13],[3,12],[4,11],[5,10],[6,9],[7,8],[8,7], 4720 [9,6],[10,5],[11,4],[12,3],[13,2],[14,1],[15,0]] in { 4721 def : Pat<(f128 (sint_to_fp 4722 (i32 (sext_inreg 4723 (vector_extract v16i8:$src, !head(Idx)), i8)))), 4724 (f128 (XSCVSDQP 4725 (EXTRACT_SUBREG 4726 (VEXTSB2D (VEXTRACTUB !head(!tail(Idx)), $src)), 4727 sub_64)))>; 4728 def : Pat<(f128 (uint_to_fp 4729 (and (i32 (vector_extract v16i8:$src, !head(Idx))), 4730 255))), 4731 (f128 (XSCVUDQP 4732 (EXTRACT_SUBREG 4733 (VEXTRACTUB !head(!tail(Idx)), $src), sub_64)))>; 4734 4735 def : Pat<(f32 (PPCfcfidus 4736 (f64 (PPCmtvsrz 4737 (and (i32 (vector_extract v16i8:$src, !head(Idx))), 4738 255))))), 4739 (f32 (XSCVUXDSP (EXTRACT_SUBREG 4740 (VEXTRACTUB !head(!tail(Idx)), $src), sub_64)))>; 4741 def : Pat<(f32 (PPCfcfids 4742 (f64 (PPCmtvsra 4743 (i32 (sext_inreg 4744 (vector_extract v16i8:$src, !head(Idx)), i8)))))), 4745 (f32 (XSCVSXDSP 4746 (EXTRACT_SUBREG (VEXTSH2D 4747 (VEXTRACTUB !head(!tail(Idx)), $src)), 4748 sub_64)))>; 4749 def : Pat<(f64 (PPCfcfidu 4750 (f64 (PPCmtvsrz 4751 (and (i32 4752 (vector_extract v16i8:$src, !head(Idx))), 255))))), 4753 (f64 (XSCVUXDDP (EXTRACT_SUBREG 4754 (VEXTRACTUB !head(!tail(Idx)), $src), sub_64)))>; 4755 def : Pat<(f64 (PPCfcfidu 4756 (f64 (PPCmtvsra 4757 (i32 (sext_inreg 4758 (vector_extract v16i8:$src, !head(Idx)), i8)))))), 4759 (f64 (XSCVSXDDP 4760 (EXTRACT_SUBREG (VEXTSH2D 4761 (VEXTRACTUB !head(!tail(Idx)), $src)), 4762 sub_64)))>; 4763 4764 def : Pat<(f64 (PPCfcfid 4765 (f64 (PPCmtvsra 4766 (i32 (sext_inreg 4767 (vector_extract v16i8:$src, !head(Idx)), i8)))))), 4768 (f64 (XSCVSXDDP 4769 (EXTRACT_SUBREG (VEXTSH2D 4770 (VEXTRACTUH !head(!tail(Idx)), $src)), 4771 sub_64)))>; 4772} 4773 4774// Unsiged int in vsx register -> QP 4775def : Pat<(f128 (uint_to_fp (i32 (PPCmfvsr f64:$src)))), 4776 (f128 (XSCVUDQP 4777 (XXEXTRACTUW (SUBREG_TO_REG (i64 1), $src, sub_64), 8)))>; 4778} // HasVSX, HasP9Vector, IsLittleEndian 4779 4780// Any Power9 VSX subtarget that supports Power9 Altivec. 4781let Predicates = [HasVSX, HasP9Altivec] in { 4782// Unsigned absolute-difference. 4783def : Pat<(v4i32 (abdu v4i32:$A, v4i32:$B)), 4784 (v4i32 (VABSDUW $A, $B))>; 4785 4786def : Pat<(v8i16 (abdu v8i16:$A, v8i16:$B)), 4787 (v8i16 (VABSDUH $A, $B))>; 4788 4789def : Pat<(v16i8 (abdu v16i8:$A, v16i8:$B)), 4790 (v16i8 (VABSDUB $A, $B))>; 4791 4792// Signed absolute-difference. 4793// Power9 VABSD* instructions are designed to support unsigned integer 4794// vectors (byte/halfword/word), if we want to make use of them for signed 4795// integer vectors, we have to flip their sign bits first. To flip sign bit 4796// for byte/halfword integer vector would become inefficient, but for word 4797// integer vector, we can leverage XVNEGSP to make it efficiently. 4798def : Pat<(v4i32 (abds v4i32:$A, v4i32:$B)), 4799 (v4i32 (VABSDUW (XVNEGSP $A), (XVNEGSP $B)))>; 4800} // HasVSX, HasP9Altivec 4801 4802// Big endian Power9 64Bit VSX subtargets with P9 Altivec support. 4803let Predicates = [HasVSX, HasP9Altivec, IsBigEndian, IsPPC64] in { 4804def : Pat<(i64 (anyext (i32 (vector_extract v16i8:$S, i64:$Idx)))), 4805 (VEXTUBLX $Idx, $S)>; 4806 4807def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, i64:$Idx)))), 4808 (VEXTUHLX (RLWINM8 $Idx, 1, 28, 30), $S)>; 4809def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 0)))), 4810 (VEXTUHLX (LI8 0), $S)>; 4811def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 1)))), 4812 (VEXTUHLX (LI8 2), $S)>; 4813def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 2)))), 4814 (VEXTUHLX (LI8 4), $S)>; 4815def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 3)))), 4816 (VEXTUHLX (LI8 6), $S)>; 4817def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 4)))), 4818 (VEXTUHLX (LI8 8), $S)>; 4819def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 5)))), 4820 (VEXTUHLX (LI8 10), $S)>; 4821def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 6)))), 4822 (VEXTUHLX (LI8 12), $S)>; 4823def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 7)))), 4824 (VEXTUHLX (LI8 14), $S)>; 4825 4826def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, i64:$Idx)))), 4827 (VEXTUWLX (RLWINM8 $Idx, 2, 28, 29), $S)>; 4828def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 0)))), 4829 (VEXTUWLX (LI8 0), $S)>; 4830 4831// For extracting BE word 1, MFVSRWZ is better than VEXTUWLX 4832def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 1)))), 4833 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), 4834 (i32 VectorExtractions.LE_WORD_2), sub_32)>; 4835def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 2)))), 4836 (VEXTUWLX (LI8 8), $S)>; 4837def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 3)))), 4838 (VEXTUWLX (LI8 12), $S)>; 4839 4840def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, i64:$Idx)))), 4841 (EXTSW (VEXTUWLX (RLWINM8 $Idx, 2, 28, 29), $S))>; 4842def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 0)))), 4843 (EXTSW (VEXTUWLX (LI8 0), $S))>; 4844// For extracting BE word 1, MFVSRWZ is better than VEXTUWLX 4845def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 1)))), 4846 (EXTSW (INSERT_SUBREG (i64 (IMPLICIT_DEF)), 4847 (i32 VectorExtractions.LE_WORD_2), sub_32))>; 4848def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 2)))), 4849 (EXTSW (VEXTUWLX (LI8 8), $S))>; 4850def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 3)))), 4851 (EXTSW (VEXTUWLX (LI8 12), $S))>; 4852 4853def : Pat<(i32 (vector_extract v16i8:$S, i64:$Idx)), 4854 (i32 (EXTRACT_SUBREG (VEXTUBLX $Idx, $S), sub_32))>; 4855def : Pat<(i32 (vector_extract v16i8:$S, 0)), 4856 (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 0), $S), sub_32))>; 4857def : Pat<(i32 (vector_extract v16i8:$S, 1)), 4858 (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 1), $S), sub_32))>; 4859def : Pat<(i32 (vector_extract v16i8:$S, 2)), 4860 (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 2), $S), sub_32))>; 4861def : Pat<(i32 (vector_extract v16i8:$S, 3)), 4862 (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 3), $S), sub_32))>; 4863def : Pat<(i32 (vector_extract v16i8:$S, 4)), 4864 (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 4), $S), sub_32))>; 4865def : Pat<(i32 (vector_extract v16i8:$S, 5)), 4866 (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 5), $S), sub_32))>; 4867def : Pat<(i32 (vector_extract v16i8:$S, 6)), 4868 (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 6), $S), sub_32))>; 4869def : Pat<(i32 (vector_extract v16i8:$S, 7)), 4870 (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 7), $S), sub_32))>; 4871def : Pat<(i32 (vector_extract v16i8:$S, 8)), 4872 (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 8), $S), sub_32))>; 4873def : Pat<(i32 (vector_extract v16i8:$S, 9)), 4874 (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 9), $S), sub_32))>; 4875def : Pat<(i32 (vector_extract v16i8:$S, 10)), 4876 (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 10), $S), sub_32))>; 4877def : Pat<(i32 (vector_extract v16i8:$S, 11)), 4878 (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 11), $S), sub_32))>; 4879def : Pat<(i32 (vector_extract v16i8:$S, 12)), 4880 (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 12), $S), sub_32))>; 4881def : Pat<(i32 (vector_extract v16i8:$S, 13)), 4882 (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 13), $S), sub_32))>; 4883def : Pat<(i32 (vector_extract v16i8:$S, 14)), 4884 (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 14), $S), sub_32))>; 4885def : Pat<(i32 (vector_extract v16i8:$S, 15)), 4886 (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 15), $S), sub_32))>; 4887 4888def : Pat<(i32 (vector_extract v8i16:$S, i64:$Idx)), 4889 (i32 (EXTRACT_SUBREG (VEXTUHLX 4890 (RLWINM8 $Idx, 1, 28, 30), $S), sub_32))>; 4891def : Pat<(i32 (vector_extract v8i16:$S, 0)), 4892 (i32 (EXTRACT_SUBREG (VEXTUHLX (LI8 0), $S), sub_32))>; 4893def : Pat<(i32 (vector_extract v8i16:$S, 1)), 4894 (i32 (EXTRACT_SUBREG (VEXTUHLX (LI8 2), $S), sub_32))>; 4895def : Pat<(i32 (vector_extract v8i16:$S, 2)), 4896 (i32 (EXTRACT_SUBREG (VEXTUHLX (LI8 4), $S), sub_32))>; 4897def : Pat<(i32 (vector_extract v8i16:$S, 3)), 4898 (i32 (EXTRACT_SUBREG (VEXTUHLX (LI8 6), $S), sub_32))>; 4899def : Pat<(i32 (vector_extract v8i16:$S, 4)), 4900 (i32 (EXTRACT_SUBREG (VEXTUHLX (LI8 8), $S), sub_32))>; 4901def : Pat<(i32 (vector_extract v8i16:$S, 5)), 4902 (i32 (EXTRACT_SUBREG (VEXTUHLX (LI8 10), $S), sub_32))>; 4903def : Pat<(i32 (vector_extract v8i16:$S, 6)), 4904 (i32 (EXTRACT_SUBREG (VEXTUHLX (LI8 12), $S), sub_32))>; 4905def : Pat<(i32 (vector_extract v8i16:$S, 6)), 4906 (i32 (EXTRACT_SUBREG (VEXTUHLX (LI8 14), $S), sub_32))>; 4907 4908def : Pat<(i32 (vector_extract v4i32:$S, i64:$Idx)), 4909 (i32 (EXTRACT_SUBREG (VEXTUWLX 4910 (RLWINM8 $Idx, 2, 28, 29), $S), sub_32))>; 4911def : Pat<(i32 (vector_extract v4i32:$S, 0)), 4912 (i32 (EXTRACT_SUBREG (VEXTUWLX (LI8 0), $S), sub_32))>; 4913// For extracting BE word 1, MFVSRWZ is better than VEXTUWLX 4914def : Pat<(i32 (vector_extract v4i32:$S, 1)), 4915 (i32 VectorExtractions.LE_WORD_2)>; 4916def : Pat<(i32 (vector_extract v4i32:$S, 2)), 4917 (i32 (EXTRACT_SUBREG (VEXTUWLX (LI8 8), $S), sub_32))>; 4918def : Pat<(i32 (vector_extract v4i32:$S, 3)), 4919 (i32 (EXTRACT_SUBREG (VEXTUWLX (LI8 12), $S), sub_32))>; 4920 4921// P9 Altivec instructions that can be used to build vectors. 4922// Adding them to PPCInstrVSX.td rather than PPCAltivecVSX.td to compete 4923// with complexities of existing build vector patterns in this file. 4924def : Pat<(v2i64 (build_vector WordToDWord.BE_A0, WordToDWord.BE_A1)), 4925 (v2i64 (VEXTSW2D $A))>; 4926def : Pat<(v2i64 (build_vector HWordToDWord.BE_A0, HWordToDWord.BE_A1)), 4927 (v2i64 (VEXTSH2D $A))>; 4928def : Pat<(v4i32 (build_vector HWordToWord.BE_A0, HWordToWord.BE_A1, 4929 HWordToWord.BE_A2, HWordToWord.BE_A3)), 4930 (v4i32 (VEXTSH2W $A))>; 4931def : Pat<(v4i32 (build_vector ByteToWord.BE_A0, ByteToWord.BE_A1, 4932 ByteToWord.BE_A2, ByteToWord.BE_A3)), 4933 (v4i32 (VEXTSB2W $A))>; 4934def : Pat<(v2i64 (build_vector ByteToDWord.BE_A0, ByteToDWord.BE_A1)), 4935 (v2i64 (VEXTSB2D $A))>; 4936} // HasVSX, HasP9Altivec, IsBigEndian, IsPPC64 4937 4938// Little endian Power9 VSX subtargets with P9 Altivec support. 4939let Predicates = [HasVSX, HasP9Altivec, IsLittleEndian] in { 4940def : Pat<(i64 (anyext (i32 (vector_extract v16i8:$S, i64:$Idx)))), 4941 (VEXTUBRX $Idx, $S)>; 4942 4943def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, i64:$Idx)))), 4944 (VEXTUHRX (RLWINM8 $Idx, 1, 28, 30), $S)>; 4945def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 0)))), 4946 (VEXTUHRX (LI8 0), $S)>; 4947def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 1)))), 4948 (VEXTUHRX (LI8 2), $S)>; 4949def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 2)))), 4950 (VEXTUHRX (LI8 4), $S)>; 4951def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 3)))), 4952 (VEXTUHRX (LI8 6), $S)>; 4953def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 4)))), 4954 (VEXTUHRX (LI8 8), $S)>; 4955def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 5)))), 4956 (VEXTUHRX (LI8 10), $S)>; 4957def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 6)))), 4958 (VEXTUHRX (LI8 12), $S)>; 4959def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 7)))), 4960 (VEXTUHRX (LI8 14), $S)>; 4961 4962def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, i64:$Idx)))), 4963 (VEXTUWRX (RLWINM8 $Idx, 2, 28, 29), $S)>; 4964def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 0)))), 4965 (VEXTUWRX (LI8 0), $S)>; 4966def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 1)))), 4967 (VEXTUWRX (LI8 4), $S)>; 4968// For extracting LE word 2, MFVSRWZ is better than VEXTUWRX 4969def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 2)))), 4970 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), 4971 (i32 VectorExtractions.LE_WORD_2), sub_32)>; 4972def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 3)))), 4973 (VEXTUWRX (LI8 12), $S)>; 4974 4975def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, i64:$Idx)))), 4976 (EXTSW (VEXTUWRX (RLWINM8 $Idx, 2, 28, 29), $S))>; 4977def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 0)))), 4978 (EXTSW (VEXTUWRX (LI8 0), $S))>; 4979def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 1)))), 4980 (EXTSW (VEXTUWRX (LI8 4), $S))>; 4981// For extracting LE word 2, MFVSRWZ is better than VEXTUWRX 4982def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 2)))), 4983 (EXTSW (INSERT_SUBREG (i64 (IMPLICIT_DEF)), 4984 (i32 VectorExtractions.LE_WORD_2), sub_32))>; 4985def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 3)))), 4986 (EXTSW (VEXTUWRX (LI8 12), $S))>; 4987 4988def : Pat<(i32 (vector_extract v16i8:$S, i64:$Idx)), 4989 (i32 (EXTRACT_SUBREG (VEXTUBRX $Idx, $S), sub_32))>; 4990def : Pat<(i32 (vector_extract v16i8:$S, 0)), 4991 (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 0), $S), sub_32))>; 4992def : Pat<(i32 (vector_extract v16i8:$S, 1)), 4993 (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 1), $S), sub_32))>; 4994def : Pat<(i32 (vector_extract v16i8:$S, 2)), 4995 (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 2), $S), sub_32))>; 4996def : Pat<(i32 (vector_extract v16i8:$S, 3)), 4997 (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 3), $S), sub_32))>; 4998def : Pat<(i32 (vector_extract v16i8:$S, 4)), 4999 (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 4), $S), sub_32))>; 5000def : Pat<(i32 (vector_extract v16i8:$S, 5)), 5001 (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 5), $S), sub_32))>; 5002def : Pat<(i32 (vector_extract v16i8:$S, 6)), 5003 (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 6), $S), sub_32))>; 5004def : Pat<(i32 (vector_extract v16i8:$S, 7)), 5005 (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 7), $S), sub_32))>; 5006def : Pat<(i32 (vector_extract v16i8:$S, 8)), 5007 (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 8), $S), sub_32))>; 5008def : Pat<(i32 (vector_extract v16i8:$S, 9)), 5009 (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 9), $S), sub_32))>; 5010def : Pat<(i32 (vector_extract v16i8:$S, 10)), 5011 (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 10), $S), sub_32))>; 5012def : Pat<(i32 (vector_extract v16i8:$S, 11)), 5013 (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 11), $S), sub_32))>; 5014def : Pat<(i32 (vector_extract v16i8:$S, 12)), 5015 (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 12), $S), sub_32))>; 5016def : Pat<(i32 (vector_extract v16i8:$S, 13)), 5017 (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 13), $S), sub_32))>; 5018def : Pat<(i32 (vector_extract v16i8:$S, 14)), 5019 (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 14), $S), sub_32))>; 5020def : Pat<(i32 (vector_extract v16i8:$S, 15)), 5021 (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 15), $S), sub_32))>; 5022 5023def : Pat<(i32 (vector_extract v8i16:$S, i64:$Idx)), 5024 (i32 (EXTRACT_SUBREG (VEXTUHRX 5025 (RLWINM8 $Idx, 1, 28, 30), $S), sub_32))>; 5026def : Pat<(i32 (vector_extract v8i16:$S, 0)), 5027 (i32 (EXTRACT_SUBREG (VEXTUHRX (LI8 0), $S), sub_32))>; 5028def : Pat<(i32 (vector_extract v8i16:$S, 1)), 5029 (i32 (EXTRACT_SUBREG (VEXTUHRX (LI8 2), $S), sub_32))>; 5030def : Pat<(i32 (vector_extract v8i16:$S, 2)), 5031 (i32 (EXTRACT_SUBREG (VEXTUHRX (LI8 4), $S), sub_32))>; 5032def : Pat<(i32 (vector_extract v8i16:$S, 3)), 5033 (i32 (EXTRACT_SUBREG (VEXTUHRX (LI8 6), $S), sub_32))>; 5034def : Pat<(i32 (vector_extract v8i16:$S, 4)), 5035 (i32 (EXTRACT_SUBREG (VEXTUHRX (LI8 8), $S), sub_32))>; 5036def : Pat<(i32 (vector_extract v8i16:$S, 5)), 5037 (i32 (EXTRACT_SUBREG (VEXTUHRX (LI8 10), $S), sub_32))>; 5038def : Pat<(i32 (vector_extract v8i16:$S, 6)), 5039 (i32 (EXTRACT_SUBREG (VEXTUHRX (LI8 12), $S), sub_32))>; 5040def : Pat<(i32 (vector_extract v8i16:$S, 6)), 5041 (i32 (EXTRACT_SUBREG (VEXTUHRX (LI8 14), $S), sub_32))>; 5042 5043def : Pat<(i32 (vector_extract v4i32:$S, i64:$Idx)), 5044 (i32 (EXTRACT_SUBREG (VEXTUWRX 5045 (RLWINM8 $Idx, 2, 28, 29), $S), sub_32))>; 5046def : Pat<(i32 (vector_extract v4i32:$S, 0)), 5047 (i32 (EXTRACT_SUBREG (VEXTUWRX (LI8 0), $S), sub_32))>; 5048def : Pat<(i32 (vector_extract v4i32:$S, 1)), 5049 (i32 (EXTRACT_SUBREG (VEXTUWRX (LI8 4), $S), sub_32))>; 5050// For extracting LE word 2, MFVSRWZ is better than VEXTUWRX 5051def : Pat<(i32 (vector_extract v4i32:$S, 2)), 5052 (i32 VectorExtractions.LE_WORD_2)>; 5053def : Pat<(i32 (vector_extract v4i32:$S, 3)), 5054 (i32 (EXTRACT_SUBREG (VEXTUWRX (LI8 12), $S), sub_32))>; 5055 5056// P9 Altivec instructions that can be used to build vectors. 5057// Adding them to PPCInstrVSX.td rather than PPCAltivecVSX.td to compete 5058// with complexities of existing build vector patterns in this file. 5059def : Pat<(v2i64 (build_vector WordToDWord.LE_A0, WordToDWord.LE_A1)), 5060 (v2i64 (VEXTSW2D $A))>; 5061def : Pat<(v2i64 (build_vector HWordToDWord.LE_A0, HWordToDWord.LE_A1)), 5062 (v2i64 (VEXTSH2D $A))>; 5063def : Pat<(v4i32 (build_vector HWordToWord.LE_A0, HWordToWord.LE_A1, 5064 HWordToWord.LE_A2, HWordToWord.LE_A3)), 5065 (v4i32 (VEXTSH2W $A))>; 5066def : Pat<(v4i32 (build_vector ByteToWord.LE_A0, ByteToWord.LE_A1, 5067 ByteToWord.LE_A2, ByteToWord.LE_A3)), 5068 (v4i32 (VEXTSB2W $A))>; 5069def : Pat<(v2i64 (build_vector ByteToDWord.LE_A0, ByteToDWord.LE_A1)), 5070 (v2i64 (VEXTSB2D $A))>; 5071} // HasVSX, HasP9Altivec, IsLittleEndian 5072 5073// Big endian 64Bit VSX subtarget that supports additional direct moves from 5074// ISA3.0. 5075let Predicates = [HasVSX, IsISA3_0, HasDirectMove, IsBigEndian, IsPPC64] in { 5076def : Pat<(i64 (extractelt v2i64:$A, 1)), 5077 (i64 (MFVSRLD $A))>; 5078// Better way to build integer vectors if we have MTVSRDD. Big endian. 5079def : Pat<(v2i64 (build_vector i64:$rB, i64:$rA)), 5080 (v2i64 (MTVSRDD $rB, $rA))>; 5081def : Pat<(v4i32 (build_vector i32:$A, i32:$B, i32:$C, i32:$D)), 5082 (MTVSRDD 5083 (RLDIMI AnyExts.B, AnyExts.A, 32, 0), 5084 (RLDIMI AnyExts.D, AnyExts.C, 32, 0))>; 5085 5086def : Pat<(f128 (PPCbuild_fp128 i64:$rB, i64:$rA)), 5087 (f128 (COPY_TO_REGCLASS (MTVSRDD $rB, $rA), VRRC))>; 5088} // HasVSX, IsISA3_0, HasDirectMove, IsBigEndian, IsPPC64 5089 5090// Little endian VSX subtarget that supports direct moves from ISA3.0. 5091let Predicates = [HasVSX, IsISA3_0, HasDirectMove, IsLittleEndian] in { 5092def : Pat<(i64 (extractelt v2i64:$A, 0)), 5093 (i64 (MFVSRLD $A))>; 5094// Better way to build integer vectors if we have MTVSRDD. Little endian. 5095def : Pat<(v2i64 (build_vector i64:$rA, i64:$rB)), 5096 (v2i64 (MTVSRDD $rB, $rA))>; 5097def : Pat<(v4i32 (build_vector i32:$A, i32:$B, i32:$C, i32:$D)), 5098 (MTVSRDD 5099 (RLDIMI AnyExts.C, AnyExts.D, 32, 0), 5100 (RLDIMI AnyExts.A, AnyExts.B, 32, 0))>; 5101 5102def : Pat<(f128 (PPCbuild_fp128 i64:$rA, i64:$rB)), 5103 (f128 (COPY_TO_REGCLASS (MTVSRDD $rB, $rA), VRRC))>; 5104} // HasVSX, IsISA3_0, HasDirectMove, IsLittleEndian 5105} // AddedComplexity = 400 5106 5107//---------------------------- Instruction aliases ---------------------------// 5108def : InstAlias<"xvmovdp $XT, $XB", 5109 (XVCPSGNDP vsrc:$XT, vsrc:$XB, vsrc:$XB)>; 5110def : InstAlias<"xvmovsp $XT, $XB", 5111 (XVCPSGNSP vsrc:$XT, vsrc:$XB, vsrc:$XB)>; 5112 5113// Certain versions of the AIX assembler may missassemble these mnemonics. 5114let Predicates = [ModernAs] in { 5115 def : InstAlias<"xxspltd $XT, $XB, 0", 5116 (XXPERMDI vsrc:$XT, vsrc:$XB, vsrc:$XB, 0)>; 5117 def : InstAlias<"xxspltd $XT, $XB, 1", 5118 (XXPERMDI vsrc:$XT, vsrc:$XB, vsrc:$XB, 3)>; 5119 def : InstAlias<"xxspltd $XT, $XB, 0", 5120 (XXPERMDIs vsrc:$XT, vsfrc:$XB, 0)>; 5121 def : InstAlias<"xxspltd $XT, $XB, 1", 5122 (XXPERMDIs vsrc:$XT, vsfrc:$XB, 3)>; 5123} 5124 5125def : InstAlias<"xxmrghd $XT, $XA, $XB", 5126 (XXPERMDI vsrc:$XT, vsrc:$XA, vsrc:$XB, 0)>; 5127def : InstAlias<"xxmrgld $XT, $XA, $XB", 5128 (XXPERMDI vsrc:$XT, vsrc:$XA, vsrc:$XB, 3)>; 5129def : InstAlias<"xxswapd $XT, $XB", 5130 (XXPERMDI vsrc:$XT, vsrc:$XB, vsrc:$XB, 2)>; 5131def : InstAlias<"xxswapd $XT, $XB", 5132 (XXPERMDIs vsrc:$XT, vsfrc:$XB, 2)>; 5133def : InstAlias<"mfvrd $rA, $XT", 5134 (MFVRD g8rc:$rA, vrrc:$XT), 0>; 5135def : InstAlias<"mffprd $rA, $src", 5136 (MFVSRD g8rc:$rA, f8rc:$src)>; 5137def : InstAlias<"mtvrd $XT, $rA", 5138 (MTVRD vrrc:$XT, g8rc:$rA), 0>; 5139def : InstAlias<"mtfprd $dst, $rA", 5140 (MTVSRD f8rc:$dst, g8rc:$rA)>; 5141def : InstAlias<"mfvrwz $rA, $XT", 5142 (MFVRWZ gprc:$rA, vrrc:$XT), 0>; 5143def : InstAlias<"mffprwz $rA, $src", 5144 (MFVSRWZ gprc:$rA, f8rc:$src)>; 5145def : InstAlias<"mtvrwa $XT, $rA", 5146 (MTVRWA vrrc:$XT, gprc:$rA), 0>; 5147def : InstAlias<"mtfprwa $dst, $rA", 5148 (MTVSRWA f8rc:$dst, gprc:$rA)>; 5149def : InstAlias<"mtvrwz $XT, $rA", 5150 (MTVRWZ vrrc:$XT, gprc:$rA), 0>; 5151def : InstAlias<"mtfprwz $dst, $rA", 5152 (MTVSRWZ f8rc:$dst, gprc:$rA)>; 5153