xref: /freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrVSX.td (revision 8bcb0991864975618c09697b1aca10683346d9f0)
1//===- PPCInstrVSX.td - The PowerPC VSX Extension --*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes the VSX extension to the PowerPC instruction set.
10//
11//===----------------------------------------------------------------------===//
12
13// *********************************** NOTE ***********************************
14// ** For POWER8 Little Endian, the VSX swap optimization relies on knowing  **
15// ** which VMX and VSX instructions are lane-sensitive and which are not.   **
16// ** A lane-sensitive instruction relies, implicitly or explicitly, on      **
17// ** whether lanes are numbered from left to right.  An instruction like    **
18// ** VADDFP is not lane-sensitive, because each lane of the result vector   **
19// ** relies only on the corresponding lane of the source vectors.  However, **
20// ** an instruction like VMULESB is lane-sensitive, because "even" and      **
21// ** "odd" lanes are different for big-endian and little-endian numbering.  **
22// **                                                                        **
23// ** When adding new VMX and VSX instructions, please consider whether they **
24// ** are lane-sensitive.  If so, they must be added to a switch statement   **
25// ** in PPCVSXSwapRemoval::gatherVectorInstructions().                      **
26// ****************************************************************************
27
28def PPCRegVSRCAsmOperand : AsmOperandClass {
29  let Name = "RegVSRC"; let PredicateMethod = "isVSRegNumber";
30}
31def vsrc : RegisterOperand<VSRC> {
32  let ParserMatchClass = PPCRegVSRCAsmOperand;
33}
34
35def PPCRegVSFRCAsmOperand : AsmOperandClass {
36  let Name = "RegVSFRC"; let PredicateMethod = "isVSRegNumber";
37}
38def vsfrc : RegisterOperand<VSFRC> {
39  let ParserMatchClass = PPCRegVSFRCAsmOperand;
40}
41
42def PPCRegVSSRCAsmOperand : AsmOperandClass {
43  let Name = "RegVSSRC"; let PredicateMethod = "isVSRegNumber";
44}
45def vssrc : RegisterOperand<VSSRC> {
46  let ParserMatchClass = PPCRegVSSRCAsmOperand;
47}
48
49def PPCRegSPILLTOVSRRCAsmOperand : AsmOperandClass {
50  let Name = "RegSPILLTOVSRRC"; let PredicateMethod = "isVSRegNumber";
51}
52
53def spilltovsrrc : RegisterOperand<SPILLTOVSRRC> {
54  let ParserMatchClass = PPCRegSPILLTOVSRRCAsmOperand;
55}
56
57def SDT_PPCldvsxlh : SDTypeProfile<1, 1, [
58  SDTCisVT<0, v4f32>, SDTCisPtrTy<1>
59]>;
60
61def SDT_PPCfpexth : SDTypeProfile<1, 2, [
62  SDTCisVT<0, v2f64>, SDTCisVT<1, v4f32>, SDTCisPtrTy<2>
63]>;
64
65def SDT_PPCldsplat : SDTypeProfile<1, 1, [
66  SDTCisVec<0>, SDTCisPtrTy<1>
67]>;
68
69// Little-endian-specific nodes.
70def SDT_PPClxvd2x : SDTypeProfile<1, 1, [
71  SDTCisVT<0, v2f64>, SDTCisPtrTy<1>
72]>;
73def SDT_PPCstxvd2x : SDTypeProfile<0, 2, [
74  SDTCisVT<0, v2f64>, SDTCisPtrTy<1>
75]>;
76def SDT_PPCxxswapd : SDTypeProfile<1, 1, [
77  SDTCisSameAs<0, 1>
78]>;
79def SDTVecConv : SDTypeProfile<1, 2, [
80  SDTCisVec<0>, SDTCisVec<1>, SDTCisPtrTy<2>
81]>;
82def SDTVabsd : SDTypeProfile<1, 3, [
83  SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<3, i32>
84]>;
85def SDT_PPCld_vec_be : SDTypeProfile<1, 1, [
86  SDTCisVec<0>, SDTCisPtrTy<1>
87]>;
88def SDT_PPCst_vec_be : SDTypeProfile<0, 2, [
89  SDTCisVec<0>, SDTCisPtrTy<1>
90]>;
91
92def PPClxvd2x  : SDNode<"PPCISD::LXVD2X", SDT_PPClxvd2x,
93                        [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
94def PPCstxvd2x : SDNode<"PPCISD::STXVD2X", SDT_PPCstxvd2x,
95                        [SDNPHasChain, SDNPMayStore]>;
96def PPCld_vec_be  : SDNode<"PPCISD::LOAD_VEC_BE", SDT_PPCld_vec_be,
97                        [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
98def PPCst_vec_be : SDNode<"PPCISD::STORE_VEC_BE", SDT_PPCst_vec_be,
99                        [SDNPHasChain, SDNPMayStore]>;
100def PPCxxswapd : SDNode<"PPCISD::XXSWAPD", SDT_PPCxxswapd, [SDNPHasChain]>;
101def PPCmfvsr : SDNode<"PPCISD::MFVSR", SDTUnaryOp, []>;
102def PPCmtvsra : SDNode<"PPCISD::MTVSRA", SDTUnaryOp, []>;
103def PPCmtvsrz : SDNode<"PPCISD::MTVSRZ", SDTUnaryOp, []>;
104def PPCsvec2fp : SDNode<"PPCISD::SINT_VEC_TO_FP", SDTVecConv, []>;
105def PPCuvec2fp: SDNode<"PPCISD::UINT_VEC_TO_FP", SDTVecConv, []>;
106def PPCswapNoChain : SDNode<"PPCISD::SWAP_NO_CHAIN", SDT_PPCxxswapd>;
107def PPCvabsd : SDNode<"PPCISD::VABSD", SDTVabsd, []>;
108
109def PPCfpexth : SDNode<"PPCISD::FP_EXTEND_HALF", SDT_PPCfpexth, []>;
110def PPCldvsxlh : SDNode<"PPCISD::LD_VSX_LH", SDT_PPCldvsxlh,
111                        [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
112def PPCldsplat : SDNode<"PPCISD::LD_SPLAT", SDT_PPCldsplat,
113                        [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
114
115multiclass XX3Form_Rcr<bits<6> opcode, bits<7> xo, string asmbase,
116                    string asmstr, InstrItinClass itin, Intrinsic Int,
117                    ValueType OutTy, ValueType InTy> {
118  let BaseName = asmbase in {
119    def NAME : XX3Form_Rc<opcode, xo, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
120                       !strconcat(asmbase, !strconcat(" ", asmstr)), itin,
121                       [(set OutTy:$XT, (Int InTy:$XA, InTy:$XB))]>;
122    let Defs = [CR6] in
123    def o    : XX3Form_Rc<opcode, xo, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
124                       !strconcat(asmbase, !strconcat(". ", asmstr)), itin,
125                       [(set InTy:$XT,
126                                (InTy (PPCvcmp_o InTy:$XA, InTy:$XB, xo)))]>,
127                       isDOT;
128  }
129}
130
131// Instruction form with a single input register for instructions such as
132// XXPERMDI. The reason for defining this is that specifying multiple chained
133// operands (such as loads) to an instruction will perform both chained
134// operations rather than coalescing them into a single register - even though
135// the source memory location is the same. This simply forces the instruction
136// to use the same register for both inputs.
137// For example, an output DAG such as this:
138//   (XXPERMDI (LXSIBZX xoaddr:$src), (LXSIBZX xoaddr:$src ), 0))
139// would result in two load instructions emitted and used as separate inputs
140// to the XXPERMDI instruction.
141class XX3Form_2s<bits<6> opcode, bits<5> xo, dag OOL, dag IOL, string asmstr,
142                 InstrItinClass itin, list<dag> pattern>
143  : XX3Form_2<opcode, xo, OOL, IOL, asmstr, itin, pattern> {
144    let XB = XA;
145}
146
147def HasVSX : Predicate<"PPCSubTarget->hasVSX()">;
148def IsLittleEndian : Predicate<"PPCSubTarget->isLittleEndian()">;
149def IsBigEndian : Predicate<"!PPCSubTarget->isLittleEndian()">;
150def HasOnlySwappingMemOps : Predicate<"!PPCSubTarget->hasP9Vector()">;
151
152let Predicates = [HasVSX] in {
153let AddedComplexity = 400 in { // Prefer VSX patterns over non-VSX patterns.
154let hasSideEffects = 0 in { // VSX instructions don't have side effects.
155let Uses = [RM] in {
156
157  // Load indexed instructions
158  let mayLoad = 1, mayStore = 0 in {
159    let CodeSize = 3 in
160    def LXSDX : XX1Form_memOp<31, 588,
161                        (outs vsfrc:$XT), (ins memrr:$src),
162                        "lxsdx $XT, $src", IIC_LdStLFD,
163                        []>;
164
165    // Pseudo instruction XFLOADf64 will be expanded to LXSDX or LFDX later
166    let CodeSize = 3 in
167      def XFLOADf64  : PseudoXFormMemOp<(outs vsfrc:$XT), (ins memrr:$src),
168                              "#XFLOADf64",
169                              [(set f64:$XT, (load xoaddr:$src))]>;
170
171    let Predicates = [HasVSX, HasOnlySwappingMemOps] in
172    def LXVD2X : XX1Form_memOp<31, 844,
173                         (outs vsrc:$XT), (ins memrr:$src),
174                         "lxvd2x $XT, $src", IIC_LdStLFD,
175                         [(set v2f64:$XT, (int_ppc_vsx_lxvd2x xoaddr:$src))]>;
176
177    def LXVDSX : XX1Form_memOp<31, 332,
178                         (outs vsrc:$XT), (ins memrr:$src),
179                         "lxvdsx $XT, $src", IIC_LdStLFD, []>;
180
181    let Predicates = [HasVSX, HasOnlySwappingMemOps] in
182    def LXVW4X : XX1Form_memOp<31, 780,
183                         (outs vsrc:$XT), (ins memrr:$src),
184                         "lxvw4x $XT, $src", IIC_LdStLFD,
185                         []>;
186  } // mayLoad
187
188  // Store indexed instructions
189  let mayStore = 1, mayLoad = 0 in {
190    let CodeSize = 3 in
191    def STXSDX : XX1Form_memOp<31, 716,
192                        (outs), (ins vsfrc:$XT, memrr:$dst),
193                        "stxsdx $XT, $dst", IIC_LdStSTFD,
194                        []>;
195
196    // Pseudo instruction XFSTOREf64  will be expanded to STXSDX or STFDX later
197    let CodeSize = 3 in
198      def XFSTOREf64 : PseudoXFormMemOp<(outs), (ins vsfrc:$XT, memrr:$dst),
199                              "#XFSTOREf64",
200                              [(store f64:$XT, xoaddr:$dst)]>;
201
202    let Predicates = [HasVSX, HasOnlySwappingMemOps] in {
203    // The behaviour of this instruction is endianness-specific so we provide no
204    // pattern to match it without considering endianness.
205    def STXVD2X : XX1Form_memOp<31, 972,
206                         (outs), (ins vsrc:$XT, memrr:$dst),
207                         "stxvd2x $XT, $dst", IIC_LdStSTFD,
208                         []>;
209
210    def STXVW4X : XX1Form_memOp<31, 908,
211                         (outs), (ins vsrc:$XT, memrr:$dst),
212                         "stxvw4x $XT, $dst", IIC_LdStSTFD,
213                         []>;
214    }
215  } // mayStore
216
217  // Add/Mul Instructions
218  let isCommutable = 1 in {
219    def XSADDDP : XX3Form<60, 32,
220                          (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB),
221                          "xsadddp $XT, $XA, $XB", IIC_VecFP,
222                          [(set f64:$XT, (fadd f64:$XA, f64:$XB))]>;
223    def XSMULDP : XX3Form<60, 48,
224                          (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB),
225                          "xsmuldp $XT, $XA, $XB", IIC_VecFP,
226                          [(set f64:$XT, (fmul f64:$XA, f64:$XB))]>;
227
228    def XVADDDP : XX3Form<60, 96,
229                          (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
230                          "xvadddp $XT, $XA, $XB", IIC_VecFP,
231                          [(set v2f64:$XT, (fadd v2f64:$XA, v2f64:$XB))]>;
232
233    def XVADDSP : XX3Form<60, 64,
234                          (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
235                          "xvaddsp $XT, $XA, $XB", IIC_VecFP,
236                          [(set v4f32:$XT, (fadd v4f32:$XA, v4f32:$XB))]>;
237
238    def XVMULDP : XX3Form<60, 112,
239                          (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
240                          "xvmuldp $XT, $XA, $XB", IIC_VecFP,
241                          [(set v2f64:$XT, (fmul v2f64:$XA, v2f64:$XB))]>;
242
243    def XVMULSP : XX3Form<60, 80,
244                          (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
245                          "xvmulsp $XT, $XA, $XB", IIC_VecFP,
246                          [(set v4f32:$XT, (fmul v4f32:$XA, v4f32:$XB))]>;
247  }
248
249  // Subtract Instructions
250  def XSSUBDP : XX3Form<60, 40,
251                        (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB),
252                        "xssubdp $XT, $XA, $XB", IIC_VecFP,
253                        [(set f64:$XT, (fsub f64:$XA, f64:$XB))]>;
254
255  def XVSUBDP : XX3Form<60, 104,
256                        (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
257                        "xvsubdp $XT, $XA, $XB", IIC_VecFP,
258                        [(set v2f64:$XT, (fsub v2f64:$XA, v2f64:$XB))]>;
259  def XVSUBSP : XX3Form<60, 72,
260                        (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
261                        "xvsubsp $XT, $XA, $XB", IIC_VecFP,
262                        [(set v4f32:$XT, (fsub v4f32:$XA, v4f32:$XB))]>;
263
264  // FMA Instructions
265  let BaseName = "XSMADDADP" in {
266  let isCommutable = 1 in
267  def XSMADDADP : XX3Form<60, 33,
268                          (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB),
269                          "xsmaddadp $XT, $XA, $XB", IIC_VecFP,
270                          [(set f64:$XT, (fma f64:$XA, f64:$XB, f64:$XTi))]>,
271                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
272                          AltVSXFMARel;
273  let IsVSXFMAAlt = 1 in
274  def XSMADDMDP : XX3Form<60, 41,
275                          (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB),
276                          "xsmaddmdp $XT, $XA, $XB", IIC_VecFP, []>,
277                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
278                          AltVSXFMARel;
279  }
280
281  let BaseName = "XSMSUBADP" in {
282  let isCommutable = 1 in
283  def XSMSUBADP : XX3Form<60, 49,
284                          (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB),
285                          "xsmsubadp $XT, $XA, $XB", IIC_VecFP,
286                          [(set f64:$XT, (fma f64:$XA, f64:$XB, (fneg f64:$XTi)))]>,
287                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
288                          AltVSXFMARel;
289  let IsVSXFMAAlt = 1 in
290  def XSMSUBMDP : XX3Form<60, 57,
291                          (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB),
292                          "xsmsubmdp $XT, $XA, $XB", IIC_VecFP, []>,
293                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
294                          AltVSXFMARel;
295  }
296
297  let BaseName = "XSNMADDADP" in {
298  let isCommutable = 1 in
299  def XSNMADDADP : XX3Form<60, 161,
300                          (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB),
301                          "xsnmaddadp $XT, $XA, $XB", IIC_VecFP,
302                          [(set f64:$XT, (fneg (fma f64:$XA, f64:$XB, f64:$XTi)))]>,
303                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
304                          AltVSXFMARel;
305  let IsVSXFMAAlt = 1 in
306  def XSNMADDMDP : XX3Form<60, 169,
307                          (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB),
308                          "xsnmaddmdp $XT, $XA, $XB", IIC_VecFP, []>,
309                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
310                          AltVSXFMARel;
311  }
312
313  let BaseName = "XSNMSUBADP" in {
314  let isCommutable = 1 in
315  def XSNMSUBADP : XX3Form<60, 177,
316                          (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB),
317                          "xsnmsubadp $XT, $XA, $XB", IIC_VecFP,
318                          [(set f64:$XT, (fneg (fma f64:$XA, f64:$XB, (fneg f64:$XTi))))]>,
319                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
320                          AltVSXFMARel;
321  let IsVSXFMAAlt = 1 in
322  def XSNMSUBMDP : XX3Form<60, 185,
323                          (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB),
324                          "xsnmsubmdp $XT, $XA, $XB", IIC_VecFP, []>,
325                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
326                          AltVSXFMARel;
327  }
328
329  let BaseName = "XVMADDADP" in {
330  let isCommutable = 1 in
331  def XVMADDADP : XX3Form<60, 97,
332                          (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
333                          "xvmaddadp $XT, $XA, $XB", IIC_VecFP,
334                          [(set v2f64:$XT, (fma v2f64:$XA, v2f64:$XB, v2f64:$XTi))]>,
335                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
336                          AltVSXFMARel;
337  let IsVSXFMAAlt = 1 in
338  def XVMADDMDP : XX3Form<60, 105,
339                          (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
340                          "xvmaddmdp $XT, $XA, $XB", IIC_VecFP, []>,
341                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
342                          AltVSXFMARel;
343  }
344
345  let BaseName = "XVMADDASP" in {
346  let isCommutable = 1 in
347  def XVMADDASP : XX3Form<60, 65,
348                          (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
349                          "xvmaddasp $XT, $XA, $XB", IIC_VecFP,
350                          [(set v4f32:$XT, (fma v4f32:$XA, v4f32:$XB, v4f32:$XTi))]>,
351                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
352                          AltVSXFMARel;
353  let IsVSXFMAAlt = 1 in
354  def XVMADDMSP : XX3Form<60, 73,
355                          (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
356                          "xvmaddmsp $XT, $XA, $XB", IIC_VecFP, []>,
357                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
358                          AltVSXFMARel;
359  }
360
361  let BaseName = "XVMSUBADP" in {
362  let isCommutable = 1 in
363  def XVMSUBADP : XX3Form<60, 113,
364                          (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
365                          "xvmsubadp $XT, $XA, $XB", IIC_VecFP,
366                          [(set v2f64:$XT, (fma v2f64:$XA, v2f64:$XB, (fneg v2f64:$XTi)))]>,
367                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
368                          AltVSXFMARel;
369  let IsVSXFMAAlt = 1 in
370  def XVMSUBMDP : XX3Form<60, 121,
371                          (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
372                          "xvmsubmdp $XT, $XA, $XB", IIC_VecFP, []>,
373                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
374                          AltVSXFMARel;
375  }
376
377  let BaseName = "XVMSUBASP" in {
378  let isCommutable = 1 in
379  def XVMSUBASP : XX3Form<60, 81,
380                          (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
381                          "xvmsubasp $XT, $XA, $XB", IIC_VecFP,
382                          [(set v4f32:$XT, (fma v4f32:$XA, v4f32:$XB, (fneg v4f32:$XTi)))]>,
383                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
384                          AltVSXFMARel;
385  let IsVSXFMAAlt = 1 in
386  def XVMSUBMSP : XX3Form<60, 89,
387                          (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
388                          "xvmsubmsp $XT, $XA, $XB", IIC_VecFP, []>,
389                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
390                          AltVSXFMARel;
391  }
392
393  let BaseName = "XVNMADDADP" in {
394  let isCommutable = 1 in
395  def XVNMADDADP : XX3Form<60, 225,
396                          (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
397                          "xvnmaddadp $XT, $XA, $XB", IIC_VecFP,
398                          [(set v2f64:$XT, (fneg (fma v2f64:$XA, v2f64:$XB, v2f64:$XTi)))]>,
399                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
400                          AltVSXFMARel;
401  let IsVSXFMAAlt = 1 in
402  def XVNMADDMDP : XX3Form<60, 233,
403                          (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
404                          "xvnmaddmdp $XT, $XA, $XB", IIC_VecFP, []>,
405                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
406                          AltVSXFMARel;
407  }
408
409  let BaseName = "XVNMADDASP" in {
410  let isCommutable = 1 in
411  def XVNMADDASP : XX3Form<60, 193,
412                          (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
413                          "xvnmaddasp $XT, $XA, $XB", IIC_VecFP,
414                          [(set v4f32:$XT, (fneg (fma v4f32:$XA, v4f32:$XB, v4f32:$XTi)))]>,
415                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
416                          AltVSXFMARel;
417  let IsVSXFMAAlt = 1 in
418  def XVNMADDMSP : XX3Form<60, 201,
419                          (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
420                          "xvnmaddmsp $XT, $XA, $XB", IIC_VecFP, []>,
421                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
422                          AltVSXFMARel;
423  }
424
425  let BaseName = "XVNMSUBADP" in {
426  let isCommutable = 1 in
427  def XVNMSUBADP : XX3Form<60, 241,
428                          (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
429                          "xvnmsubadp $XT, $XA, $XB", IIC_VecFP,
430                          [(set v2f64:$XT, (fneg (fma v2f64:$XA, v2f64:$XB, (fneg v2f64:$XTi))))]>,
431                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
432                          AltVSXFMARel;
433  let IsVSXFMAAlt = 1 in
434  def XVNMSUBMDP : XX3Form<60, 249,
435                          (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
436                          "xvnmsubmdp $XT, $XA, $XB", IIC_VecFP, []>,
437                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
438                          AltVSXFMARel;
439  }
440
441  let BaseName = "XVNMSUBASP" in {
442  let isCommutable = 1 in
443  def XVNMSUBASP : XX3Form<60, 209,
444                          (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
445                          "xvnmsubasp $XT, $XA, $XB", IIC_VecFP,
446                          [(set v4f32:$XT, (fneg (fma v4f32:$XA, v4f32:$XB, (fneg v4f32:$XTi))))]>,
447                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
448                          AltVSXFMARel;
449  let IsVSXFMAAlt = 1 in
450  def XVNMSUBMSP : XX3Form<60, 217,
451                          (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
452                          "xvnmsubmsp $XT, $XA, $XB", IIC_VecFP, []>,
453                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
454                          AltVSXFMARel;
455  }
456
457  // Division Instructions
458  def XSDIVDP : XX3Form<60, 56,
459                        (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB),
460                        "xsdivdp $XT, $XA, $XB", IIC_FPDivD,
461                        [(set f64:$XT, (fdiv f64:$XA, f64:$XB))]>;
462  def XSSQRTDP : XX2Form<60, 75,
463                        (outs vsfrc:$XT), (ins vsfrc:$XB),
464                        "xssqrtdp $XT, $XB", IIC_FPSqrtD,
465                        [(set f64:$XT, (fsqrt f64:$XB))]>;
466
467  def XSREDP : XX2Form<60, 90,
468                        (outs vsfrc:$XT), (ins vsfrc:$XB),
469                        "xsredp $XT, $XB", IIC_VecFP,
470                        [(set f64:$XT, (PPCfre f64:$XB))]>;
471  def XSRSQRTEDP : XX2Form<60, 74,
472                           (outs vsfrc:$XT), (ins vsfrc:$XB),
473                           "xsrsqrtedp $XT, $XB", IIC_VecFP,
474                           [(set f64:$XT, (PPCfrsqrte f64:$XB))]>;
475
476  def XSTDIVDP : XX3Form_1<60, 61,
477                         (outs crrc:$crD), (ins vsfrc:$XA, vsfrc:$XB),
478                         "xstdivdp $crD, $XA, $XB", IIC_FPCompare, []>;
479  def XSTSQRTDP : XX2Form_1<60, 106,
480                          (outs crrc:$crD), (ins vsfrc:$XB),
481                          "xstsqrtdp $crD, $XB", IIC_FPCompare, []>;
482
483  def XVDIVDP : XX3Form<60, 120,
484                        (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
485                        "xvdivdp $XT, $XA, $XB", IIC_FPDivD,
486                        [(set v2f64:$XT, (fdiv v2f64:$XA, v2f64:$XB))]>;
487  def XVDIVSP : XX3Form<60, 88,
488                        (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
489                        "xvdivsp $XT, $XA, $XB", IIC_FPDivS,
490                        [(set v4f32:$XT, (fdiv v4f32:$XA, v4f32:$XB))]>;
491
492  def XVSQRTDP : XX2Form<60, 203,
493                        (outs vsrc:$XT), (ins vsrc:$XB),
494                        "xvsqrtdp $XT, $XB", IIC_FPSqrtD,
495                        [(set v2f64:$XT, (fsqrt v2f64:$XB))]>;
496  def XVSQRTSP : XX2Form<60, 139,
497                        (outs vsrc:$XT), (ins vsrc:$XB),
498                        "xvsqrtsp $XT, $XB", IIC_FPSqrtS,
499                        [(set v4f32:$XT, (fsqrt v4f32:$XB))]>;
500
501  def XVTDIVDP : XX3Form_1<60, 125,
502                         (outs crrc:$crD), (ins vsrc:$XA, vsrc:$XB),
503                         "xvtdivdp $crD, $XA, $XB", IIC_FPCompare, []>;
504  def XVTDIVSP : XX3Form_1<60, 93,
505                         (outs crrc:$crD), (ins vsrc:$XA, vsrc:$XB),
506                         "xvtdivsp $crD, $XA, $XB", IIC_FPCompare, []>;
507
508  def XVTSQRTDP : XX2Form_1<60, 234,
509                          (outs crrc:$crD), (ins vsrc:$XB),
510                          "xvtsqrtdp $crD, $XB", IIC_FPCompare, []>;
511  def XVTSQRTSP : XX2Form_1<60, 170,
512                          (outs crrc:$crD), (ins vsrc:$XB),
513                          "xvtsqrtsp $crD, $XB", IIC_FPCompare, []>;
514
515  def XVREDP : XX2Form<60, 218,
516                        (outs vsrc:$XT), (ins vsrc:$XB),
517                        "xvredp $XT, $XB", IIC_VecFP,
518                        [(set v2f64:$XT, (PPCfre v2f64:$XB))]>;
519  def XVRESP : XX2Form<60, 154,
520                        (outs vsrc:$XT), (ins vsrc:$XB),
521                        "xvresp $XT, $XB", IIC_VecFP,
522                        [(set v4f32:$XT, (PPCfre v4f32:$XB))]>;
523
524  def XVRSQRTEDP : XX2Form<60, 202,
525                           (outs vsrc:$XT), (ins vsrc:$XB),
526                           "xvrsqrtedp $XT, $XB", IIC_VecFP,
527                           [(set v2f64:$XT, (PPCfrsqrte v2f64:$XB))]>;
528  def XVRSQRTESP : XX2Form<60, 138,
529                           (outs vsrc:$XT), (ins vsrc:$XB),
530                           "xvrsqrtesp $XT, $XB", IIC_VecFP,
531                           [(set v4f32:$XT, (PPCfrsqrte v4f32:$XB))]>;
532
533  // Compare Instructions
534  def XSCMPODP : XX3Form_1<60, 43,
535                           (outs crrc:$crD), (ins vsfrc:$XA, vsfrc:$XB),
536                           "xscmpodp $crD, $XA, $XB", IIC_FPCompare, []>;
537  def XSCMPUDP : XX3Form_1<60, 35,
538                           (outs crrc:$crD), (ins vsfrc:$XA, vsfrc:$XB),
539                           "xscmpudp $crD, $XA, $XB", IIC_FPCompare, []>;
540
541  defm XVCMPEQDP : XX3Form_Rcr<60, 99,
542                             "xvcmpeqdp", "$XT, $XA, $XB", IIC_VecFPCompare,
543                             int_ppc_vsx_xvcmpeqdp, v2i64, v2f64>;
544  defm XVCMPEQSP : XX3Form_Rcr<60, 67,
545                             "xvcmpeqsp", "$XT, $XA, $XB", IIC_VecFPCompare,
546                             int_ppc_vsx_xvcmpeqsp, v4i32, v4f32>;
547  defm XVCMPGEDP : XX3Form_Rcr<60, 115,
548                             "xvcmpgedp", "$XT, $XA, $XB", IIC_VecFPCompare,
549                             int_ppc_vsx_xvcmpgedp, v2i64, v2f64>;
550  defm XVCMPGESP : XX3Form_Rcr<60, 83,
551                             "xvcmpgesp", "$XT, $XA, $XB", IIC_VecFPCompare,
552                             int_ppc_vsx_xvcmpgesp, v4i32, v4f32>;
553  defm XVCMPGTDP : XX3Form_Rcr<60, 107,
554                             "xvcmpgtdp", "$XT, $XA, $XB", IIC_VecFPCompare,
555                             int_ppc_vsx_xvcmpgtdp, v2i64, v2f64>;
556  defm XVCMPGTSP : XX3Form_Rcr<60, 75,
557                             "xvcmpgtsp", "$XT, $XA, $XB", IIC_VecFPCompare,
558                             int_ppc_vsx_xvcmpgtsp, v4i32, v4f32>;
559
560  // Move Instructions
561  def XSABSDP : XX2Form<60, 345,
562                      (outs vsfrc:$XT), (ins vsfrc:$XB),
563                      "xsabsdp $XT, $XB", IIC_VecFP,
564                      [(set f64:$XT, (fabs f64:$XB))]>;
565  def XSNABSDP : XX2Form<60, 361,
566                      (outs vsfrc:$XT), (ins vsfrc:$XB),
567                      "xsnabsdp $XT, $XB", IIC_VecFP,
568                      [(set f64:$XT, (fneg (fabs f64:$XB)))]>;
569  def XSNEGDP : XX2Form<60, 377,
570                      (outs vsfrc:$XT), (ins vsfrc:$XB),
571                      "xsnegdp $XT, $XB", IIC_VecFP,
572                      [(set f64:$XT, (fneg f64:$XB))]>;
573  def XSCPSGNDP : XX3Form<60, 176,
574                      (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB),
575                      "xscpsgndp $XT, $XA, $XB", IIC_VecFP,
576                      [(set f64:$XT, (fcopysign f64:$XB, f64:$XA))]>;
577
578  def XVABSDP : XX2Form<60, 473,
579                      (outs vsrc:$XT), (ins vsrc:$XB),
580                      "xvabsdp $XT, $XB", IIC_VecFP,
581                      [(set v2f64:$XT, (fabs v2f64:$XB))]>;
582
583  def XVABSSP : XX2Form<60, 409,
584                      (outs vsrc:$XT), (ins vsrc:$XB),
585                      "xvabssp $XT, $XB", IIC_VecFP,
586                      [(set v4f32:$XT, (fabs v4f32:$XB))]>;
587
588  def XVCPSGNDP : XX3Form<60, 240,
589                      (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
590                      "xvcpsgndp $XT, $XA, $XB", IIC_VecFP,
591                      [(set v2f64:$XT, (fcopysign v2f64:$XB, v2f64:$XA))]>;
592  def XVCPSGNSP : XX3Form<60, 208,
593                      (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
594                      "xvcpsgnsp $XT, $XA, $XB", IIC_VecFP,
595                      [(set v4f32:$XT, (fcopysign v4f32:$XB, v4f32:$XA))]>;
596
597  def XVNABSDP : XX2Form<60, 489,
598                      (outs vsrc:$XT), (ins vsrc:$XB),
599                      "xvnabsdp $XT, $XB", IIC_VecFP,
600                      [(set v2f64:$XT, (fneg (fabs v2f64:$XB)))]>;
601  def XVNABSSP : XX2Form<60, 425,
602                      (outs vsrc:$XT), (ins vsrc:$XB),
603                      "xvnabssp $XT, $XB", IIC_VecFP,
604                      [(set v4f32:$XT, (fneg (fabs v4f32:$XB)))]>;
605
606  def XVNEGDP : XX2Form<60, 505,
607                      (outs vsrc:$XT), (ins vsrc:$XB),
608                      "xvnegdp $XT, $XB", IIC_VecFP,
609                      [(set v2f64:$XT, (fneg v2f64:$XB))]>;
610  def XVNEGSP : XX2Form<60, 441,
611                      (outs vsrc:$XT), (ins vsrc:$XB),
612                      "xvnegsp $XT, $XB", IIC_VecFP,
613                      [(set v4f32:$XT, (fneg v4f32:$XB))]>;
614
615  // Conversion Instructions
616  def XSCVDPSP : XX2Form<60, 265,
617                      (outs vsfrc:$XT), (ins vsfrc:$XB),
618                      "xscvdpsp $XT, $XB", IIC_VecFP, []>;
619  def XSCVDPSXDS : XX2Form<60, 344,
620                      (outs vsfrc:$XT), (ins vsfrc:$XB),
621                      "xscvdpsxds $XT, $XB", IIC_VecFP,
622                      [(set f64:$XT, (PPCfctidz f64:$XB))]>;
623  let isCodeGenOnly = 1 in
624  def XSCVDPSXDSs : XX2Form<60, 344,
625                      (outs vssrc:$XT), (ins vssrc:$XB),
626                      "xscvdpsxds $XT, $XB", IIC_VecFP,
627                      [(set f32:$XT, (PPCfctidz f32:$XB))]>;
628  def XSCVDPSXWS : XX2Form<60, 88,
629                      (outs vsfrc:$XT), (ins vsfrc:$XB),
630                      "xscvdpsxws $XT, $XB", IIC_VecFP,
631                      [(set f64:$XT, (PPCfctiwz f64:$XB))]>;
632  let isCodeGenOnly = 1 in
633  def XSCVDPSXWSs : XX2Form<60, 88,
634                      (outs vssrc:$XT), (ins vssrc:$XB),
635                      "xscvdpsxws $XT, $XB", IIC_VecFP,
636                      [(set f32:$XT, (PPCfctiwz f32:$XB))]>;
637  def XSCVDPUXDS : XX2Form<60, 328,
638                      (outs vsfrc:$XT), (ins vsfrc:$XB),
639                      "xscvdpuxds $XT, $XB", IIC_VecFP,
640                      [(set f64:$XT, (PPCfctiduz f64:$XB))]>;
641  let isCodeGenOnly = 1 in
642  def XSCVDPUXDSs : XX2Form<60, 328,
643                      (outs vssrc:$XT), (ins vssrc:$XB),
644                      "xscvdpuxds $XT, $XB", IIC_VecFP,
645                      [(set f32:$XT, (PPCfctiduz f32:$XB))]>;
646  def XSCVDPUXWS : XX2Form<60, 72,
647                      (outs vsfrc:$XT), (ins vsfrc:$XB),
648                      "xscvdpuxws $XT, $XB", IIC_VecFP,
649                      [(set f64:$XT, (PPCfctiwuz f64:$XB))]>;
650  let isCodeGenOnly = 1 in
651  def XSCVDPUXWSs : XX2Form<60, 72,
652                      (outs vssrc:$XT), (ins vssrc:$XB),
653                      "xscvdpuxws $XT, $XB", IIC_VecFP,
654                      [(set f32:$XT, (PPCfctiwuz f32:$XB))]>;
655  def XSCVSPDP : XX2Form<60, 329,
656                      (outs vsfrc:$XT), (ins vsfrc:$XB),
657                      "xscvspdp $XT, $XB", IIC_VecFP, []>;
658  def XSCVSXDDP : XX2Form<60, 376,
659                      (outs vsfrc:$XT), (ins vsfrc:$XB),
660                      "xscvsxddp $XT, $XB", IIC_VecFP,
661                      [(set f64:$XT, (PPCfcfid f64:$XB))]>;
662  def XSCVUXDDP : XX2Form<60, 360,
663                      (outs vsfrc:$XT), (ins vsfrc:$XB),
664                      "xscvuxddp $XT, $XB", IIC_VecFP,
665                      [(set f64:$XT, (PPCfcfidu f64:$XB))]>;
666
667  def XVCVDPSP : XX2Form<60, 393,
668                      (outs vsrc:$XT), (ins vsrc:$XB),
669                      "xvcvdpsp $XT, $XB", IIC_VecFP,
670                      [(set v4f32:$XT, (int_ppc_vsx_xvcvdpsp v2f64:$XB))]>;
671  def XVCVDPSXDS : XX2Form<60, 472,
672                      (outs vsrc:$XT), (ins vsrc:$XB),
673                      "xvcvdpsxds $XT, $XB", IIC_VecFP,
674                      [(set v2i64:$XT, (fp_to_sint v2f64:$XB))]>;
675  def XVCVDPSXWS : XX2Form<60, 216,
676                      (outs vsrc:$XT), (ins vsrc:$XB),
677                      "xvcvdpsxws $XT, $XB", IIC_VecFP,
678                      [(set v4i32:$XT, (int_ppc_vsx_xvcvdpsxws v2f64:$XB))]>;
679  def XVCVDPUXDS : XX2Form<60, 456,
680                      (outs vsrc:$XT), (ins vsrc:$XB),
681                      "xvcvdpuxds $XT, $XB", IIC_VecFP,
682                      [(set v2i64:$XT, (fp_to_uint v2f64:$XB))]>;
683  def XVCVDPUXWS : XX2Form<60, 200,
684                      (outs vsrc:$XT), (ins vsrc:$XB),
685                      "xvcvdpuxws $XT, $XB", IIC_VecFP,
686                      [(set v4i32:$XT, (int_ppc_vsx_xvcvdpuxws v2f64:$XB))]>;
687
688  def XVCVSPDP : XX2Form<60, 457,
689                      (outs vsrc:$XT), (ins vsrc:$XB),
690                      "xvcvspdp $XT, $XB", IIC_VecFP,
691                      [(set v2f64:$XT, (int_ppc_vsx_xvcvspdp v4f32:$XB))]>;
692  def XVCVSPSXDS : XX2Form<60, 408,
693                      (outs vsrc:$XT), (ins vsrc:$XB),
694                      "xvcvspsxds $XT, $XB", IIC_VecFP, []>;
695  def XVCVSPSXWS : XX2Form<60, 152,
696                      (outs vsrc:$XT), (ins vsrc:$XB),
697                      "xvcvspsxws $XT, $XB", IIC_VecFP,
698                      [(set v4i32:$XT, (fp_to_sint v4f32:$XB))]>;
699  def XVCVSPUXDS : XX2Form<60, 392,
700                      (outs vsrc:$XT), (ins vsrc:$XB),
701                      "xvcvspuxds $XT, $XB", IIC_VecFP, []>;
702  def XVCVSPUXWS : XX2Form<60, 136,
703                      (outs vsrc:$XT), (ins vsrc:$XB),
704                      "xvcvspuxws $XT, $XB", IIC_VecFP,
705                      [(set v4i32:$XT, (fp_to_uint v4f32:$XB))]>;
706  def XVCVSXDDP : XX2Form<60, 504,
707                      (outs vsrc:$XT), (ins vsrc:$XB),
708                      "xvcvsxddp $XT, $XB", IIC_VecFP,
709                      [(set v2f64:$XT, (sint_to_fp v2i64:$XB))]>;
710  def XVCVSXDSP : XX2Form<60, 440,
711                      (outs vsrc:$XT), (ins vsrc:$XB),
712                      "xvcvsxdsp $XT, $XB", IIC_VecFP,
713                      [(set v4f32:$XT, (int_ppc_vsx_xvcvsxdsp v2i64:$XB))]>;
714  def XVCVSXWDP : XX2Form<60, 248,
715                      (outs vsrc:$XT), (ins vsrc:$XB),
716                      "xvcvsxwdp $XT, $XB", IIC_VecFP,
717                      [(set v2f64:$XT, (int_ppc_vsx_xvcvsxwdp v4i32:$XB))]>;
718  def XVCVSXWSP : XX2Form<60, 184,
719                      (outs vsrc:$XT), (ins vsrc:$XB),
720                      "xvcvsxwsp $XT, $XB", IIC_VecFP,
721                      [(set v4f32:$XT, (sint_to_fp v4i32:$XB))]>;
722  def XVCVUXDDP : XX2Form<60, 488,
723                      (outs vsrc:$XT), (ins vsrc:$XB),
724                      "xvcvuxddp $XT, $XB", IIC_VecFP,
725                      [(set v2f64:$XT, (uint_to_fp v2i64:$XB))]>;
726  def XVCVUXDSP : XX2Form<60, 424,
727                      (outs vsrc:$XT), (ins vsrc:$XB),
728                      "xvcvuxdsp $XT, $XB", IIC_VecFP,
729                      [(set v4f32:$XT, (int_ppc_vsx_xvcvuxdsp v2i64:$XB))]>;
730  def XVCVUXWDP : XX2Form<60, 232,
731                      (outs vsrc:$XT), (ins vsrc:$XB),
732                      "xvcvuxwdp $XT, $XB", IIC_VecFP,
733                      [(set v2f64:$XT, (int_ppc_vsx_xvcvuxwdp v4i32:$XB))]>;
734  def XVCVUXWSP : XX2Form<60, 168,
735                      (outs vsrc:$XT), (ins vsrc:$XB),
736                      "xvcvuxwsp $XT, $XB", IIC_VecFP,
737                      [(set v4f32:$XT, (uint_to_fp v4i32:$XB))]>;
738
739  // Rounding Instructions
740  def XSRDPI : XX2Form<60, 73,
741                      (outs vsfrc:$XT), (ins vsfrc:$XB),
742                      "xsrdpi $XT, $XB", IIC_VecFP,
743                      [(set f64:$XT, (fround f64:$XB))]>;
744  def XSRDPIC : XX2Form<60, 107,
745                      (outs vsfrc:$XT), (ins vsfrc:$XB),
746                      "xsrdpic $XT, $XB", IIC_VecFP,
747                      [(set f64:$XT, (fnearbyint f64:$XB))]>;
748  def XSRDPIM : XX2Form<60, 121,
749                      (outs vsfrc:$XT), (ins vsfrc:$XB),
750                      "xsrdpim $XT, $XB", IIC_VecFP,
751                      [(set f64:$XT, (ffloor f64:$XB))]>;
752  def XSRDPIP : XX2Form<60, 105,
753                      (outs vsfrc:$XT), (ins vsfrc:$XB),
754                      "xsrdpip $XT, $XB", IIC_VecFP,
755                      [(set f64:$XT, (fceil f64:$XB))]>;
756  def XSRDPIZ : XX2Form<60, 89,
757                      (outs vsfrc:$XT), (ins vsfrc:$XB),
758                      "xsrdpiz $XT, $XB", IIC_VecFP,
759                      [(set f64:$XT, (ftrunc f64:$XB))]>;
760
761  def XVRDPI : XX2Form<60, 201,
762                      (outs vsrc:$XT), (ins vsrc:$XB),
763                      "xvrdpi $XT, $XB", IIC_VecFP,
764                      [(set v2f64:$XT, (fround v2f64:$XB))]>;
765  def XVRDPIC : XX2Form<60, 235,
766                      (outs vsrc:$XT), (ins vsrc:$XB),
767                      "xvrdpic $XT, $XB", IIC_VecFP,
768                      [(set v2f64:$XT, (fnearbyint v2f64:$XB))]>;
769  def XVRDPIM : XX2Form<60, 249,
770                      (outs vsrc:$XT), (ins vsrc:$XB),
771                      "xvrdpim $XT, $XB", IIC_VecFP,
772                      [(set v2f64:$XT, (ffloor v2f64:$XB))]>;
773  def XVRDPIP : XX2Form<60, 233,
774                      (outs vsrc:$XT), (ins vsrc:$XB),
775                      "xvrdpip $XT, $XB", IIC_VecFP,
776                      [(set v2f64:$XT, (fceil v2f64:$XB))]>;
777  def XVRDPIZ : XX2Form<60, 217,
778                      (outs vsrc:$XT), (ins vsrc:$XB),
779                      "xvrdpiz $XT, $XB", IIC_VecFP,
780                      [(set v2f64:$XT, (ftrunc v2f64:$XB))]>;
781
782  def XVRSPI : XX2Form<60, 137,
783                      (outs vsrc:$XT), (ins vsrc:$XB),
784                      "xvrspi $XT, $XB", IIC_VecFP,
785                      [(set v4f32:$XT, (fround v4f32:$XB))]>;
786  def XVRSPIC : XX2Form<60, 171,
787                      (outs vsrc:$XT), (ins vsrc:$XB),
788                      "xvrspic $XT, $XB", IIC_VecFP,
789                      [(set v4f32:$XT, (fnearbyint v4f32:$XB))]>;
790  def XVRSPIM : XX2Form<60, 185,
791                      (outs vsrc:$XT), (ins vsrc:$XB),
792                      "xvrspim $XT, $XB", IIC_VecFP,
793                      [(set v4f32:$XT, (ffloor v4f32:$XB))]>;
794  def XVRSPIP : XX2Form<60, 169,
795                      (outs vsrc:$XT), (ins vsrc:$XB),
796                      "xvrspip $XT, $XB", IIC_VecFP,
797                      [(set v4f32:$XT, (fceil v4f32:$XB))]>;
798  def XVRSPIZ : XX2Form<60, 153,
799                      (outs vsrc:$XT), (ins vsrc:$XB),
800                      "xvrspiz $XT, $XB", IIC_VecFP,
801                      [(set v4f32:$XT, (ftrunc v4f32:$XB))]>;
802
803  // Max/Min Instructions
804  let isCommutable = 1 in {
805  def XSMAXDP : XX3Form<60, 160,
806                        (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB),
807                        "xsmaxdp $XT, $XA, $XB", IIC_VecFP,
808                        [(set vsfrc:$XT,
809                              (int_ppc_vsx_xsmaxdp vsfrc:$XA, vsfrc:$XB))]>;
810  def XSMINDP : XX3Form<60, 168,
811                        (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB),
812                        "xsmindp $XT, $XA, $XB", IIC_VecFP,
813                        [(set vsfrc:$XT,
814                              (int_ppc_vsx_xsmindp vsfrc:$XA, vsfrc:$XB))]>;
815
816  def XVMAXDP : XX3Form<60, 224,
817                        (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
818                        "xvmaxdp $XT, $XA, $XB", IIC_VecFP,
819                        [(set vsrc:$XT,
820                              (int_ppc_vsx_xvmaxdp vsrc:$XA, vsrc:$XB))]>;
821  def XVMINDP : XX3Form<60, 232,
822                        (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
823                        "xvmindp $XT, $XA, $XB", IIC_VecFP,
824                        [(set vsrc:$XT,
825                              (int_ppc_vsx_xvmindp vsrc:$XA, vsrc:$XB))]>;
826
827  def XVMAXSP : XX3Form<60, 192,
828                        (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
829                        "xvmaxsp $XT, $XA, $XB", IIC_VecFP,
830                        [(set vsrc:$XT,
831                              (int_ppc_vsx_xvmaxsp vsrc:$XA, vsrc:$XB))]>;
832  def XVMINSP : XX3Form<60, 200,
833                        (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
834                        "xvminsp $XT, $XA, $XB", IIC_VecFP,
835                        [(set vsrc:$XT,
836                              (int_ppc_vsx_xvminsp vsrc:$XA, vsrc:$XB))]>;
837  } // isCommutable
838} // Uses = [RM]
839
840  // Logical Instructions
841  let isCommutable = 1 in
842  def XXLAND : XX3Form<60, 130,
843                       (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
844                       "xxland $XT, $XA, $XB", IIC_VecGeneral,
845                       [(set v4i32:$XT, (and v4i32:$XA, v4i32:$XB))]>;
846  def XXLANDC : XX3Form<60, 138,
847                        (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
848                        "xxlandc $XT, $XA, $XB", IIC_VecGeneral,
849                        [(set v4i32:$XT, (and v4i32:$XA,
850                                              (vnot_ppc v4i32:$XB)))]>;
851  let isCommutable = 1 in {
852  def XXLNOR : XX3Form<60, 162,
853                       (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
854                       "xxlnor $XT, $XA, $XB", IIC_VecGeneral,
855                       [(set v4i32:$XT, (vnot_ppc (or v4i32:$XA,
856                                                   v4i32:$XB)))]>;
857  def XXLOR : XX3Form<60, 146,
858                      (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
859                      "xxlor $XT, $XA, $XB", IIC_VecGeneral,
860                      [(set v4i32:$XT, (or v4i32:$XA, v4i32:$XB))]>;
861  let isCodeGenOnly = 1 in
862  def XXLORf: XX3Form<60, 146,
863                      (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB),
864                      "xxlor $XT, $XA, $XB", IIC_VecGeneral, []>;
865  def XXLXOR : XX3Form<60, 154,
866                       (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
867                       "xxlxor $XT, $XA, $XB", IIC_VecGeneral,
868                       [(set v4i32:$XT, (xor v4i32:$XA, v4i32:$XB))]>;
869  } // isCommutable
870
871  let isCodeGenOnly = 1, isMoveImm = 1, isAsCheapAsAMove = 1,
872      isReMaterializable = 1 in {
873    def XXLXORz : XX3Form_SameOp<60, 154, (outs vsrc:$XT), (ins),
874                       "xxlxor $XT, $XT, $XT", IIC_VecGeneral,
875                       [(set v4i32:$XT, (v4i32 immAllZerosV))]>;
876    def XXLXORdpz : XX3Form_SameOp<60, 154,
877                         (outs vsfrc:$XT), (ins),
878                         "xxlxor $XT, $XT, $XT", IIC_VecGeneral,
879                         [(set f64:$XT, (fpimm0))]>;
880    def XXLXORspz : XX3Form_SameOp<60, 154,
881                         (outs vssrc:$XT), (ins),
882                         "xxlxor $XT, $XT, $XT", IIC_VecGeneral,
883                         [(set f32:$XT, (fpimm0))]>;
884  }
885
886  // Permutation Instructions
887  def XXMRGHW : XX3Form<60, 18,
888                       (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
889                       "xxmrghw $XT, $XA, $XB", IIC_VecPerm, []>;
890  def XXMRGLW : XX3Form<60, 50,
891                       (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
892                       "xxmrglw $XT, $XA, $XB", IIC_VecPerm, []>;
893
894  def XXPERMDI : XX3Form_2<60, 10,
895                       (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB, u2imm:$DM),
896                       "xxpermdi $XT, $XA, $XB, $DM", IIC_VecPerm,
897                       [(set v2i64:$XT, (PPCxxpermdi v2i64:$XA, v2i64:$XB,
898                         imm32SExt16:$DM))]>;
899  let isCodeGenOnly = 1 in
900  def XXPERMDIs : XX3Form_2s<60, 10, (outs vsrc:$XT), (ins vsfrc:$XA, u2imm:$DM),
901                             "xxpermdi $XT, $XA, $XA, $DM", IIC_VecPerm, []>;
902  def XXSEL : XX4Form<60, 3,
903                      (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB, vsrc:$XC),
904                      "xxsel $XT, $XA, $XB, $XC", IIC_VecPerm, []>;
905
906  def XXSLDWI : XX3Form_2<60, 2,
907                       (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB, u2imm:$SHW),
908                       "xxsldwi $XT, $XA, $XB, $SHW", IIC_VecPerm,
909                       [(set v4i32:$XT, (PPCvecshl v4i32:$XA, v4i32:$XB,
910                                                  imm32SExt16:$SHW))]>;
911
912  let isCodeGenOnly = 1 in
913  def XXSLDWIs : XX3Form_2s<60, 2,
914                       (outs vsrc:$XT), (ins vsfrc:$XA, u2imm:$SHW),
915                       "xxsldwi $XT, $XA, $XA, $SHW", IIC_VecPerm, []>;
916
917  def XXSPLTW : XX2Form_2<60, 164,
918                       (outs vsrc:$XT), (ins vsrc:$XB, u2imm:$UIM),
919                       "xxspltw $XT, $XB, $UIM", IIC_VecPerm,
920                       [(set v4i32:$XT,
921                             (PPCxxsplt v4i32:$XB, imm32SExt16:$UIM))]>;
922  let isCodeGenOnly = 1 in
923  def XXSPLTWs : XX2Form_2<60, 164,
924                       (outs vsrc:$XT), (ins vsfrc:$XB, u2imm:$UIM),
925                       "xxspltw $XT, $XB, $UIM", IIC_VecPerm, []>;
926
927} // hasSideEffects
928
929// SELECT_CC_* - Used to implement the SELECT_CC DAG operation.  Expanded after
930// instruction selection into a branch sequence.
931let PPC970_Single = 1 in {
932
933  def SELECT_CC_VSRC: PPCCustomInserterPseudo<(outs vsrc:$dst),
934                             (ins crrc:$cond, vsrc:$T, vsrc:$F, i32imm:$BROPC),
935                             "#SELECT_CC_VSRC",
936                             []>;
937  def SELECT_VSRC: PPCCustomInserterPseudo<(outs vsrc:$dst),
938                          (ins crbitrc:$cond, vsrc:$T, vsrc:$F),
939                          "#SELECT_VSRC",
940                          [(set v2f64:$dst,
941                                (select i1:$cond, v2f64:$T, v2f64:$F))]>;
942  def SELECT_CC_VSFRC: PPCCustomInserterPseudo<(outs f8rc:$dst),
943                              (ins crrc:$cond, f8rc:$T, f8rc:$F,
944                               i32imm:$BROPC), "#SELECT_CC_VSFRC",
945                              []>;
946  def SELECT_VSFRC: PPCCustomInserterPseudo<(outs f8rc:$dst),
947                           (ins crbitrc:$cond, f8rc:$T, f8rc:$F),
948                           "#SELECT_VSFRC",
949                           [(set f64:$dst,
950                                 (select i1:$cond, f64:$T, f64:$F))]>;
951  def SELECT_CC_VSSRC: PPCCustomInserterPseudo<(outs f4rc:$dst),
952                              (ins crrc:$cond, f4rc:$T, f4rc:$F,
953                               i32imm:$BROPC), "#SELECT_CC_VSSRC",
954                              []>;
955  def SELECT_VSSRC: PPCCustomInserterPseudo<(outs f4rc:$dst),
956                           (ins crbitrc:$cond, f4rc:$T, f4rc:$F),
957                           "#SELECT_VSSRC",
958                           [(set f32:$dst,
959                                 (select i1:$cond, f32:$T, f32:$F))]>;
960}
961} // AddedComplexity
962
963def : InstAlias<"xvmovdp $XT, $XB",
964                (XVCPSGNDP vsrc:$XT, vsrc:$XB, vsrc:$XB)>;
965def : InstAlias<"xvmovsp $XT, $XB",
966                (XVCPSGNSP vsrc:$XT, vsrc:$XB, vsrc:$XB)>;
967
968def : InstAlias<"xxspltd $XT, $XB, 0",
969                (XXPERMDI vsrc:$XT, vsrc:$XB, vsrc:$XB, 0)>;
970def : InstAlias<"xxspltd $XT, $XB, 1",
971                (XXPERMDI vsrc:$XT, vsrc:$XB, vsrc:$XB, 3)>;
972def : InstAlias<"xxmrghd $XT, $XA, $XB",
973                (XXPERMDI vsrc:$XT, vsrc:$XA, vsrc:$XB, 0)>;
974def : InstAlias<"xxmrgld $XT, $XA, $XB",
975                (XXPERMDI vsrc:$XT, vsrc:$XA, vsrc:$XB, 3)>;
976def : InstAlias<"xxswapd $XT, $XB",
977                (XXPERMDI vsrc:$XT, vsrc:$XB, vsrc:$XB, 2)>;
978def : InstAlias<"xxspltd $XT, $XB, 0",
979                (XXPERMDIs vsrc:$XT, vsfrc:$XB, 0)>;
980def : InstAlias<"xxspltd $XT, $XB, 1",
981                (XXPERMDIs vsrc:$XT, vsfrc:$XB, 3)>;
982def : InstAlias<"xxswapd $XT, $XB",
983                (XXPERMDIs vsrc:$XT, vsfrc:$XB, 2)>;
984
985let AddedComplexity = 400 in { // Prefer VSX patterns over non-VSX patterns.
986
987def : Pat<(v4i32 (vnot_ppc v4i32:$A)),
988          (v4i32 (XXLNOR $A, $A))>;
989def : Pat<(v4i32 (or (and (vnot_ppc v4i32:$C), v4i32:$A),
990                     (and v4i32:$B, v4i32:$C))),
991          (v4i32 (XXSEL $A, $B, $C))>;
992
993let Predicates = [IsBigEndian] in {
994def : Pat<(v2f64 (scalar_to_vector f64:$A)),
995          (v2f64 (SUBREG_TO_REG (i64 1), $A, sub_64))>;
996
997def : Pat<(f64 (extractelt v2f64:$S, 0)),
998          (f64 (EXTRACT_SUBREG $S, sub_64))>;
999def : Pat<(f64 (extractelt v2f64:$S, 1)),
1000          (f64 (EXTRACT_SUBREG (XXPERMDI $S, $S, 2), sub_64))>;
1001}
1002
1003let Predicates = [IsLittleEndian] in {
1004def : Pat<(v2f64 (scalar_to_vector f64:$A)),
1005          (v2f64 (XXPERMDI (SUBREG_TO_REG (i64 1), $A, sub_64),
1006                           (SUBREG_TO_REG (i64 1), $A, sub_64), 0))>;
1007
1008def : Pat<(f64 (extractelt v2f64:$S, 0)),
1009          (f64 (EXTRACT_SUBREG (XXPERMDI $S, $S, 2), sub_64))>;
1010def : Pat<(f64 (extractelt v2f64:$S, 1)),
1011          (f64 (EXTRACT_SUBREG $S, sub_64))>;
1012}
1013
1014// Additional fnmsub patterns: -a*b + c == -(a*b - c)
1015def : Pat<(fma (fneg f64:$A), f64:$B, f64:$C),
1016          (XSNMSUBADP $C, $A, $B)>;
1017def : Pat<(fma f64:$A, (fneg f64:$B), f64:$C),
1018          (XSNMSUBADP $C, $A, $B)>;
1019
1020def : Pat<(fma (fneg v2f64:$A), v2f64:$B, v2f64:$C),
1021          (XVNMSUBADP $C, $A, $B)>;
1022def : Pat<(fma v2f64:$A, (fneg v2f64:$B), v2f64:$C),
1023          (XVNMSUBADP $C, $A, $B)>;
1024
1025def : Pat<(fma (fneg v4f32:$A), v4f32:$B, v4f32:$C),
1026          (XVNMSUBASP $C, $A, $B)>;
1027def : Pat<(fma v4f32:$A, (fneg v4f32:$B), v4f32:$C),
1028          (XVNMSUBASP $C, $A, $B)>;
1029
1030def : Pat<(v2f64 (bitconvert v4f32:$A)),
1031          (COPY_TO_REGCLASS $A, VSRC)>;
1032def : Pat<(v2f64 (bitconvert v4i32:$A)),
1033          (COPY_TO_REGCLASS $A, VSRC)>;
1034def : Pat<(v2f64 (bitconvert v8i16:$A)),
1035          (COPY_TO_REGCLASS $A, VSRC)>;
1036def : Pat<(v2f64 (bitconvert v16i8:$A)),
1037          (COPY_TO_REGCLASS $A, VSRC)>;
1038
1039def : Pat<(v4f32 (bitconvert v2f64:$A)),
1040          (COPY_TO_REGCLASS $A, VRRC)>;
1041def : Pat<(v4i32 (bitconvert v2f64:$A)),
1042          (COPY_TO_REGCLASS $A, VRRC)>;
1043def : Pat<(v8i16 (bitconvert v2f64:$A)),
1044          (COPY_TO_REGCLASS $A, VRRC)>;
1045def : Pat<(v16i8 (bitconvert v2f64:$A)),
1046          (COPY_TO_REGCLASS $A, VRRC)>;
1047
1048def : Pat<(v2i64 (bitconvert v4f32:$A)),
1049          (COPY_TO_REGCLASS $A, VSRC)>;
1050def : Pat<(v2i64 (bitconvert v4i32:$A)),
1051          (COPY_TO_REGCLASS $A, VSRC)>;
1052def : Pat<(v2i64 (bitconvert v8i16:$A)),
1053          (COPY_TO_REGCLASS $A, VSRC)>;
1054def : Pat<(v2i64 (bitconvert v16i8:$A)),
1055          (COPY_TO_REGCLASS $A, VSRC)>;
1056
1057def : Pat<(v4f32 (bitconvert v2i64:$A)),
1058          (COPY_TO_REGCLASS $A, VRRC)>;
1059def : Pat<(v4i32 (bitconvert v2i64:$A)),
1060          (COPY_TO_REGCLASS $A, VRRC)>;
1061def : Pat<(v8i16 (bitconvert v2i64:$A)),
1062          (COPY_TO_REGCLASS $A, VRRC)>;
1063def : Pat<(v16i8 (bitconvert v2i64:$A)),
1064          (COPY_TO_REGCLASS $A, VRRC)>;
1065
1066def : Pat<(v2f64 (bitconvert v2i64:$A)),
1067          (COPY_TO_REGCLASS $A, VRRC)>;
1068def : Pat<(v2i64 (bitconvert v2f64:$A)),
1069          (COPY_TO_REGCLASS $A, VRRC)>;
1070
1071def : Pat<(v2f64 (bitconvert v1i128:$A)),
1072          (COPY_TO_REGCLASS $A, VRRC)>;
1073def : Pat<(v1i128 (bitconvert v2f64:$A)),
1074          (COPY_TO_REGCLASS $A, VRRC)>;
1075
1076def : Pat<(v2i64 (bitconvert f128:$A)),
1077          (COPY_TO_REGCLASS $A, VRRC)>;
1078def : Pat<(v4i32 (bitconvert f128:$A)),
1079          (COPY_TO_REGCLASS $A, VRRC)>;
1080def : Pat<(v8i16 (bitconvert f128:$A)),
1081          (COPY_TO_REGCLASS $A, VRRC)>;
1082def : Pat<(v16i8 (bitconvert f128:$A)),
1083          (COPY_TO_REGCLASS $A, VRRC)>;
1084
1085def : Pat<(v2f64 (PPCsvec2fp v4i32:$C, 0)),
1086          (v2f64 (XVCVSXWDP (v2i64 (XXMRGHW $C, $C))))>;
1087def : Pat<(v2f64 (PPCsvec2fp v4i32:$C, 1)),
1088          (v2f64 (XVCVSXWDP (v2i64 (XXMRGLW $C, $C))))>;
1089
1090def : Pat<(v2f64 (PPCuvec2fp v4i32:$C, 0)),
1091          (v2f64 (XVCVUXWDP (v2i64 (XXMRGHW $C, $C))))>;
1092def : Pat<(v2f64 (PPCuvec2fp v4i32:$C, 1)),
1093          (v2f64 (XVCVUXWDP (v2i64 (XXMRGLW $C, $C))))>;
1094
1095def : Pat<(v2f64 (PPCfpexth v4f32:$C, 0)), (XVCVSPDP (XXMRGHW $C, $C))>;
1096def : Pat<(v2f64 (PPCfpexth v4f32:$C, 1)), (XVCVSPDP (XXMRGLW $C, $C))>;
1097
1098// Loads.
1099let Predicates = [HasVSX, HasOnlySwappingMemOps] in {
1100  def : Pat<(v2f64 (PPClxvd2x xoaddr:$src)), (LXVD2X xoaddr:$src)>;
1101
1102  // Stores.
1103  def : Pat<(int_ppc_vsx_stxvd2x v2f64:$rS, xoaddr:$dst),
1104            (STXVD2X $rS, xoaddr:$dst)>;
1105  def : Pat<(PPCstxvd2x v2f64:$rS, xoaddr:$dst), (STXVD2X $rS, xoaddr:$dst)>;
1106}
1107
1108// Load vector big endian order
1109let Predicates = [IsLittleEndian, HasVSX] in {
1110  def : Pat<(v2f64 (PPCld_vec_be xoaddr:$src)), (LXVD2X xoaddr:$src)>;
1111  def : Pat<(PPCst_vec_be v2f64:$rS, xoaddr:$dst), (STXVD2X $rS, xoaddr:$dst)>;
1112  def : Pat<(v4f32 (PPCld_vec_be xoaddr:$src)), (LXVW4X xoaddr:$src)>;
1113  def : Pat<(PPCst_vec_be v4f32:$rS, xoaddr:$dst), (STXVW4X $rS, xoaddr:$dst)>;
1114  def : Pat<(v2i64 (PPCld_vec_be xoaddr:$src)), (LXVD2X xoaddr:$src)>;
1115  def : Pat<(PPCst_vec_be v2i64:$rS, xoaddr:$dst), (STXVD2X $rS, xoaddr:$dst)>;
1116  def : Pat<(v4i32 (PPCld_vec_be xoaddr:$src)), (LXVW4X xoaddr:$src)>;
1117  def : Pat<(PPCst_vec_be v4i32:$rS, xoaddr:$dst), (STXVW4X $rS, xoaddr:$dst)>;
1118}
1119
1120let Predicates = [IsBigEndian, HasVSX, HasOnlySwappingMemOps] in {
1121  def : Pat<(v2f64 (load xoaddr:$src)), (LXVD2X xoaddr:$src)>;
1122  def : Pat<(v2i64 (load xoaddr:$src)), (LXVD2X xoaddr:$src)>;
1123  def : Pat<(v4i32 (load xoaddr:$src)), (LXVW4X xoaddr:$src)>;
1124  def : Pat<(v4i32 (int_ppc_vsx_lxvw4x xoaddr:$src)), (LXVW4X xoaddr:$src)>;
1125  def : Pat<(store v2f64:$rS, xoaddr:$dst), (STXVD2X $rS, xoaddr:$dst)>;
1126  def : Pat<(store v2i64:$rS, xoaddr:$dst), (STXVD2X $rS, xoaddr:$dst)>;
1127  def : Pat<(store v4i32:$XT, xoaddr:$dst), (STXVW4X $XT, xoaddr:$dst)>;
1128  def : Pat<(int_ppc_vsx_stxvw4x v4i32:$rS, xoaddr:$dst),
1129            (STXVW4X $rS, xoaddr:$dst)>;
1130}
1131
1132// Permutes.
1133def : Pat<(v2f64 (PPCxxswapd v2f64:$src)), (XXPERMDI $src, $src, 2)>;
1134def : Pat<(v2i64 (PPCxxswapd v2i64:$src)), (XXPERMDI $src, $src, 2)>;
1135def : Pat<(v4f32 (PPCxxswapd v4f32:$src)), (XXPERMDI $src, $src, 2)>;
1136def : Pat<(v4i32 (PPCxxswapd v4i32:$src)), (XXPERMDI $src, $src, 2)>;
1137def : Pat<(v2f64 (PPCswapNoChain v2f64:$src)), (XXPERMDI $src, $src, 2)>;
1138
1139// PPCvecshl XT, XA, XA, 2 can be selected to both XXSLDWI XT,XA,XA,2 and
1140// XXSWAPD XT,XA (i.e. XXPERMDI XT,XA,XA,2), the later one is more profitable.
1141def : Pat<(v4i32 (PPCvecshl v4i32:$src, v4i32:$src, 2)), (XXPERMDI $src, $src, 2)>;
1142
1143// Selects.
1144def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETLT)),
1145          (SELECT_VSRC (CRANDC $lhs, $rhs), $tval, $fval)>;
1146def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETULT)),
1147          (SELECT_VSRC (CRANDC $rhs, $lhs), $tval, $fval)>;
1148def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETLE)),
1149          (SELECT_VSRC (CRORC  $lhs, $rhs), $tval, $fval)>;
1150def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETULE)),
1151          (SELECT_VSRC (CRORC  $rhs, $lhs), $tval, $fval)>;
1152def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETEQ)),
1153          (SELECT_VSRC (CREQV $lhs, $rhs), $tval, $fval)>;
1154def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETGE)),
1155          (SELECT_VSRC (CRORC  $rhs, $lhs), $tval, $fval)>;
1156def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETUGE)),
1157          (SELECT_VSRC (CRORC  $lhs, $rhs), $tval, $fval)>;
1158def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETGT)),
1159          (SELECT_VSRC (CRANDC $rhs, $lhs), $tval, $fval)>;
1160def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETUGT)),
1161          (SELECT_VSRC (CRANDC $lhs, $rhs), $tval, $fval)>;
1162def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETNE)),
1163          (SELECT_VSRC (CRXOR $lhs, $rhs), $tval, $fval)>;
1164
1165def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETLT)),
1166          (SELECT_VSFRC (CRANDC $lhs, $rhs), $tval, $fval)>;
1167def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETULT)),
1168          (SELECT_VSFRC (CRANDC $rhs, $lhs), $tval, $fval)>;
1169def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETLE)),
1170          (SELECT_VSFRC (CRORC  $lhs, $rhs), $tval, $fval)>;
1171def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETULE)),
1172          (SELECT_VSFRC (CRORC  $rhs, $lhs), $tval, $fval)>;
1173def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETEQ)),
1174          (SELECT_VSFRC (CREQV $lhs, $rhs), $tval, $fval)>;
1175def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETGE)),
1176          (SELECT_VSFRC (CRORC  $rhs, $lhs), $tval, $fval)>;
1177def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETUGE)),
1178          (SELECT_VSFRC (CRORC  $lhs, $rhs), $tval, $fval)>;
1179def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETGT)),
1180          (SELECT_VSFRC (CRANDC $rhs, $lhs), $tval, $fval)>;
1181def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETUGT)),
1182          (SELECT_VSFRC (CRANDC $lhs, $rhs), $tval, $fval)>;
1183def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETNE)),
1184          (SELECT_VSFRC (CRXOR $lhs, $rhs), $tval, $fval)>;
1185
1186// Divides.
1187def : Pat<(int_ppc_vsx_xvdivsp v4f32:$A, v4f32:$B),
1188          (XVDIVSP $A, $B)>;
1189def : Pat<(int_ppc_vsx_xvdivdp v2f64:$A, v2f64:$B),
1190          (XVDIVDP $A, $B)>;
1191
1192// Reciprocal estimate
1193def : Pat<(int_ppc_vsx_xvresp v4f32:$A),
1194          (XVRESP $A)>;
1195def : Pat<(int_ppc_vsx_xvredp v2f64:$A),
1196          (XVREDP $A)>;
1197
1198// Recip. square root estimate
1199def : Pat<(int_ppc_vsx_xvrsqrtesp v4f32:$A),
1200          (XVRSQRTESP $A)>;
1201def : Pat<(int_ppc_vsx_xvrsqrtedp v2f64:$A),
1202          (XVRSQRTEDP $A)>;
1203
1204// Vector selection
1205def : Pat<(v16i8 (vselect v16i8:$vA, v16i8:$vB, v16i8:$vC)),
1206          (COPY_TO_REGCLASS
1207                 (XXSEL (COPY_TO_REGCLASS $vC, VSRC),
1208                        (COPY_TO_REGCLASS $vB, VSRC),
1209                        (COPY_TO_REGCLASS $vA, VSRC)), VRRC)>;
1210def : Pat<(v8i16 (vselect v8i16:$vA, v8i16:$vB, v8i16:$vC)),
1211          (COPY_TO_REGCLASS
1212                 (XXSEL (COPY_TO_REGCLASS $vC, VSRC),
1213                        (COPY_TO_REGCLASS $vB, VSRC),
1214                        (COPY_TO_REGCLASS $vA, VSRC)), VRRC)>;
1215def : Pat<(vselect v4i32:$vA, v4i32:$vB, v4i32:$vC),
1216          (XXSEL $vC, $vB, $vA)>;
1217def : Pat<(vselect v2i64:$vA, v2i64:$vB, v2i64:$vC),
1218          (XXSEL $vC, $vB, $vA)>;
1219def : Pat<(vselect v4i32:$vA, v4f32:$vB, v4f32:$vC),
1220          (XXSEL $vC, $vB, $vA)>;
1221def : Pat<(vselect v2i64:$vA, v2f64:$vB, v2f64:$vC),
1222          (XXSEL $vC, $vB, $vA)>;
1223
1224def : Pat<(v4f32 (fmaxnum v4f32:$src1, v4f32:$src2)),
1225          (v4f32 (XVMAXSP $src1, $src2))>;
1226def : Pat<(v4f32 (fminnum v4f32:$src1, v4f32:$src2)),
1227          (v4f32 (XVMINSP $src1, $src2))>;
1228def : Pat<(v2f64 (fmaxnum v2f64:$src1, v2f64:$src2)),
1229          (v2f64 (XVMAXDP $src1, $src2))>;
1230def : Pat<(v2f64 (fminnum v2f64:$src1, v2f64:$src2)),
1231          (v2f64 (XVMINDP $src1, $src2))>;
1232
1233let Predicates = [IsLittleEndian] in {
1234def : Pat<(f64 (PPCfcfid (PPCmtvsra (i64 (vector_extract v2i64:$S, 0))))),
1235          (f64 (XSCVSXDDP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>;
1236def : Pat<(f64 (PPCfcfid (PPCmtvsra (i64 (vector_extract v2i64:$S, 1))))),
1237          (f64 (XSCVSXDDP (COPY_TO_REGCLASS (f64 (COPY_TO_REGCLASS $S, VSRC)), VSFRC)))>;
1238def : Pat<(f64 (PPCfcfidu (PPCmtvsra (i64 (vector_extract v2i64:$S, 0))))),
1239          (f64 (XSCVUXDDP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>;
1240def : Pat<(f64 (PPCfcfidu (PPCmtvsra (i64 (vector_extract v2i64:$S, 1))))),
1241          (f64 (XSCVUXDDP (COPY_TO_REGCLASS (f64 (COPY_TO_REGCLASS $S, VSRC)), VSFRC)))>;
1242} // IsLittleEndian
1243
1244let Predicates = [IsBigEndian] in {
1245def : Pat<(f64 (PPCfcfid (PPCmtvsra (i64 (vector_extract v2i64:$S, 0))))),
1246          (f64 (XSCVSXDDP (COPY_TO_REGCLASS $S, VSFRC)))>;
1247def : Pat<(f64 (PPCfcfid (PPCmtvsra (i64 (vector_extract v2i64:$S, 1))))),
1248          (f64 (XSCVSXDDP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>;
1249def : Pat<(f64 (PPCfcfidu (PPCmtvsra (i64 (vector_extract v2i64:$S, 0))))),
1250          (f64 (XSCVUXDDP (COPY_TO_REGCLASS $S, VSFRC)))>;
1251def : Pat<(f64 (PPCfcfidu (PPCmtvsra (i64 (vector_extract v2i64:$S, 1))))),
1252          (f64 (XSCVUXDDP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>;
1253} // IsBigEndian
1254
1255} // AddedComplexity
1256} // HasVSX
1257
1258def ScalarLoads {
1259  dag Li8 =       (i32 (extloadi8 xoaddr:$src));
1260  dag ZELi8 =     (i32 (zextloadi8 xoaddr:$src));
1261  dag ZELi8i64 =  (i64 (zextloadi8 xoaddr:$src));
1262  dag SELi8 =     (i32 (sext_inreg (extloadi8 xoaddr:$src), i8));
1263  dag SELi8i64 =  (i64 (sext_inreg (extloadi8 xoaddr:$src), i8));
1264
1265  dag Li16 =      (i32 (extloadi16 xoaddr:$src));
1266  dag ZELi16 =    (i32 (zextloadi16 xoaddr:$src));
1267  dag ZELi16i64 = (i64 (zextloadi16 xoaddr:$src));
1268  dag SELi16 =    (i32 (sextloadi16 xoaddr:$src));
1269  dag SELi16i64 = (i64 (sextloadi16 xoaddr:$src));
1270
1271  dag Li32 = (i32 (load xoaddr:$src));
1272}
1273
1274def DWToSPExtractConv {
1275  dag El0US1 = (f32 (PPCfcfidus
1276                    (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S1, 0))))));
1277  dag El1US1 = (f32 (PPCfcfidus
1278                    (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S1, 1))))));
1279  dag El0US2 = (f32 (PPCfcfidus
1280                    (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S2, 0))))));
1281  dag El1US2 = (f32 (PPCfcfidus
1282                    (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S2, 1))))));
1283  dag El0SS1 = (f32 (PPCfcfids
1284                    (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S1, 0))))));
1285  dag El1SS1 = (f32 (PPCfcfids
1286                    (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S1, 1))))));
1287  dag El0SS2 = (f32 (PPCfcfids
1288                    (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S2, 0))))));
1289  dag El1SS2 = (f32 (PPCfcfids
1290                    (f64 (PPCmtvsra (i64 (vector_extract v2i64:$S2, 1))))));
1291  dag BVU = (v4f32 (build_vector El0US1, El1US1, El0US2, El1US2));
1292  dag BVS = (v4f32 (build_vector El0SS1, El1SS1, El0SS2, El1SS2));
1293}
1294
1295// The following VSX instructions were introduced in Power ISA 2.07
1296/* FIXME: if the operands are v2i64, these patterns will not match.
1297   we should define new patterns or otherwise match the same patterns
1298   when the elements are larger than i32.
1299*/
1300def HasP8Vector : Predicate<"PPCSubTarget->hasP8Vector()">;
1301def HasDirectMove : Predicate<"PPCSubTarget->hasDirectMove()">;
1302def NoP9Vector : Predicate<"!PPCSubTarget->hasP9Vector()">;
1303let Predicates = [HasP8Vector] in {
1304let AddedComplexity = 400 in { // Prefer VSX patterns over non-VSX patterns.
1305  let isCommutable = 1 in {
1306    def XXLEQV : XX3Form<60, 186,
1307                         (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
1308                         "xxleqv $XT, $XA, $XB", IIC_VecGeneral,
1309                         [(set v4i32:$XT, (vnot_ppc (xor v4i32:$XA, v4i32:$XB)))]>;
1310    def XXLNAND : XX3Form<60, 178,
1311                          (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
1312                          "xxlnand $XT, $XA, $XB", IIC_VecGeneral,
1313                          [(set v4i32:$XT, (vnot_ppc (and v4i32:$XA,
1314                                                    v4i32:$XB)))]>;
1315  } // isCommutable
1316
1317  def : Pat<(int_ppc_vsx_xxleqv v4i32:$A, v4i32:$B),
1318            (XXLEQV $A, $B)>;
1319
1320  let isCodeGenOnly = 1, isMoveImm = 1, isAsCheapAsAMove = 1,
1321      isReMaterializable = 1 in {
1322    def XXLEQVOnes : XX3Form_SameOp<60, 186, (outs vsrc:$XT), (ins),
1323                         "xxleqv $XT, $XT, $XT", IIC_VecGeneral,
1324                         [(set v4i32:$XT, (bitconvert (v16i8 immAllOnesV)))]>;
1325  }
1326
1327  def XXLORC : XX3Form<60, 170,
1328                       (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
1329                       "xxlorc $XT, $XA, $XB", IIC_VecGeneral,
1330                       [(set v4i32:$XT, (or v4i32:$XA, (vnot_ppc v4i32:$XB)))]>;
1331
1332  // VSX scalar loads introduced in ISA 2.07
1333  let mayLoad = 1, mayStore = 0 in {
1334    let CodeSize = 3 in
1335    def LXSSPX : XX1Form_memOp<31, 524, (outs vssrc:$XT), (ins memrr:$src),
1336                         "lxsspx $XT, $src", IIC_LdStLFD, []>;
1337    def LXSIWAX : XX1Form_memOp<31, 76, (outs vsfrc:$XT), (ins memrr:$src),
1338                          "lxsiwax $XT, $src", IIC_LdStLFD, []>;
1339    def LXSIWZX : XX1Form_memOp<31, 12, (outs vsfrc:$XT), (ins memrr:$src),
1340                          "lxsiwzx $XT, $src", IIC_LdStLFD, []>;
1341
1342    // Pseudo instruction XFLOADf32 will be expanded to LXSSPX or LFSX later
1343    let CodeSize = 3 in
1344    def XFLOADf32  : PseudoXFormMemOp<(outs vssrc:$XT), (ins memrr:$src),
1345                            "#XFLOADf32",
1346                            [(set f32:$XT, (load xoaddr:$src))]>;
1347    // Pseudo instruction LIWAX will be expanded to LXSIWAX or LFIWAX later
1348    def LIWAX : PseudoXFormMemOp<(outs vsfrc:$XT), (ins memrr:$src),
1349                       "#LIWAX",
1350                       [(set f64:$XT, (PPClfiwax xoaddr:$src))]>;
1351    // Pseudo instruction LIWZX will be expanded to LXSIWZX or LFIWZX later
1352    def LIWZX : PseudoXFormMemOp<(outs vsfrc:$XT), (ins memrr:$src),
1353                       "#LIWZX",
1354                       [(set f64:$XT, (PPClfiwzx xoaddr:$src))]>;
1355  } // mayLoad
1356
1357  // VSX scalar stores introduced in ISA 2.07
1358  let mayStore = 1, mayLoad = 0 in {
1359    let CodeSize = 3 in
1360    def STXSSPX : XX1Form_memOp<31, 652, (outs), (ins vssrc:$XT, memrr:$dst),
1361                          "stxsspx $XT, $dst", IIC_LdStSTFD, []>;
1362    def STXSIWX : XX1Form_memOp<31, 140, (outs), (ins vsfrc:$XT, memrr:$dst),
1363                          "stxsiwx $XT, $dst", IIC_LdStSTFD, []>;
1364
1365    // Pseudo instruction XFSTOREf32 will be expanded to STXSSPX or STFSX later
1366    let CodeSize = 3 in
1367    def XFSTOREf32 : PseudoXFormMemOp<(outs), (ins vssrc:$XT, memrr:$dst),
1368                            "#XFSTOREf32",
1369                            [(store f32:$XT, xoaddr:$dst)]>;
1370    // Pseudo instruction STIWX will be expanded to STXSIWX or STFIWX later
1371    def STIWX : PseudoXFormMemOp<(outs), (ins vsfrc:$XT, memrr:$dst),
1372                       "#STIWX",
1373                      [(PPCstfiwx f64:$XT, xoaddr:$dst)]>;
1374  } // mayStore
1375
1376  def : Pat<(f64 (extloadf32 xoaddr:$src)),
1377            (COPY_TO_REGCLASS (XFLOADf32 xoaddr:$src), VSFRC)>;
1378  def : Pat<(f32 (fpround (f64 (extloadf32 xoaddr:$src)))),
1379            (f32 (XFLOADf32 xoaddr:$src))>;
1380  def : Pat<(f64 (fpextend f32:$src)),
1381            (COPY_TO_REGCLASS $src, VSFRC)>;
1382
1383  def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETLT)),
1384            (SELECT_VSSRC (CRANDC $lhs, $rhs), $tval, $fval)>;
1385  def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETULT)),
1386            (SELECT_VSSRC (CRANDC $rhs, $lhs), $tval, $fval)>;
1387  def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETLE)),
1388            (SELECT_VSSRC (CRORC  $lhs, $rhs), $tval, $fval)>;
1389  def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETULE)),
1390            (SELECT_VSSRC (CRORC  $rhs, $lhs), $tval, $fval)>;
1391  def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETEQ)),
1392            (SELECT_VSSRC (CREQV $lhs, $rhs), $tval, $fval)>;
1393  def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETGE)),
1394            (SELECT_VSSRC (CRORC  $rhs, $lhs), $tval, $fval)>;
1395  def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETUGE)),
1396            (SELECT_VSSRC (CRORC  $lhs, $rhs), $tval, $fval)>;
1397  def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETGT)),
1398            (SELECT_VSSRC (CRANDC $rhs, $lhs), $tval, $fval)>;
1399  def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETUGT)),
1400            (SELECT_VSSRC (CRANDC $lhs, $rhs), $tval, $fval)>;
1401  def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETNE)),
1402            (SELECT_VSSRC (CRXOR $lhs, $rhs), $tval, $fval)>;
1403
1404  // VSX Elementary Scalar FP arithmetic (SP)
1405  let isCommutable = 1 in {
1406    def XSADDSP : XX3Form<60, 0,
1407                          (outs vssrc:$XT), (ins vssrc:$XA, vssrc:$XB),
1408                          "xsaddsp $XT, $XA, $XB", IIC_VecFP,
1409                          [(set f32:$XT, (fadd f32:$XA, f32:$XB))]>;
1410    def XSMULSP : XX3Form<60, 16,
1411                          (outs vssrc:$XT), (ins vssrc:$XA, vssrc:$XB),
1412                          "xsmulsp $XT, $XA, $XB", IIC_VecFP,
1413                          [(set f32:$XT, (fmul f32:$XA, f32:$XB))]>;
1414  } // isCommutable
1415  def XSSUBSP : XX3Form<60, 8,
1416                        (outs vssrc:$XT), (ins vssrc:$XA, vssrc:$XB),
1417                        "xssubsp $XT, $XA, $XB", IIC_VecFP,
1418                        [(set f32:$XT, (fsub f32:$XA, f32:$XB))]>;
1419  def XSDIVSP : XX3Form<60, 24,
1420                        (outs vssrc:$XT), (ins vssrc:$XA, vssrc:$XB),
1421                        "xsdivsp $XT, $XA, $XB", IIC_FPDivS,
1422                        [(set f32:$XT, (fdiv f32:$XA, f32:$XB))]>;
1423  def XSRESP : XX2Form<60, 26,
1424                        (outs vssrc:$XT), (ins vssrc:$XB),
1425                        "xsresp $XT, $XB", IIC_VecFP,
1426                        [(set f32:$XT, (PPCfre f32:$XB))]>;
1427  def XSRSP : XX2Form<60, 281,
1428                        (outs vssrc:$XT), (ins vsfrc:$XB),
1429                        "xsrsp $XT, $XB", IIC_VecFP, []>;
1430  def XSSQRTSP : XX2Form<60, 11,
1431                        (outs vssrc:$XT), (ins vssrc:$XB),
1432                        "xssqrtsp $XT, $XB", IIC_FPSqrtS,
1433                        [(set f32:$XT, (fsqrt f32:$XB))]>;
1434  def XSRSQRTESP : XX2Form<60, 10,
1435                           (outs vssrc:$XT), (ins vssrc:$XB),
1436                           "xsrsqrtesp $XT, $XB", IIC_VecFP,
1437                           [(set f32:$XT, (PPCfrsqrte f32:$XB))]>;
1438
1439  // FMA Instructions
1440  let BaseName = "XSMADDASP" in {
1441  let isCommutable = 1 in
1442  def XSMADDASP : XX3Form<60, 1,
1443                          (outs vssrc:$XT),
1444                          (ins vssrc:$XTi, vssrc:$XA, vssrc:$XB),
1445                          "xsmaddasp $XT, $XA, $XB", IIC_VecFP,
1446                          [(set f32:$XT, (fma f32:$XA, f32:$XB, f32:$XTi))]>,
1447                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
1448                          AltVSXFMARel;
1449  let IsVSXFMAAlt = 1 in
1450  def XSMADDMSP : XX3Form<60, 9,
1451                          (outs vssrc:$XT),
1452                          (ins vssrc:$XTi, vssrc:$XA, vssrc:$XB),
1453                          "xsmaddmsp $XT, $XA, $XB", IIC_VecFP, []>,
1454                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
1455                          AltVSXFMARel;
1456  }
1457
1458  let BaseName = "XSMSUBASP" in {
1459  let isCommutable = 1 in
1460  def XSMSUBASP : XX3Form<60, 17,
1461                          (outs vssrc:$XT),
1462                          (ins vssrc:$XTi, vssrc:$XA, vssrc:$XB),
1463                          "xsmsubasp $XT, $XA, $XB", IIC_VecFP,
1464                          [(set f32:$XT, (fma f32:$XA, f32:$XB,
1465                                              (fneg f32:$XTi)))]>,
1466                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
1467                          AltVSXFMARel;
1468  let IsVSXFMAAlt = 1 in
1469  def XSMSUBMSP : XX3Form<60, 25,
1470                          (outs vssrc:$XT),
1471                          (ins vssrc:$XTi, vssrc:$XA, vssrc:$XB),
1472                          "xsmsubmsp $XT, $XA, $XB", IIC_VecFP, []>,
1473                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
1474                          AltVSXFMARel;
1475  }
1476
1477  let BaseName = "XSNMADDASP" in {
1478  let isCommutable = 1 in
1479  def XSNMADDASP : XX3Form<60, 129,
1480                          (outs vssrc:$XT),
1481                          (ins vssrc:$XTi, vssrc:$XA, vssrc:$XB),
1482                          "xsnmaddasp $XT, $XA, $XB", IIC_VecFP,
1483                          [(set f32:$XT, (fneg (fma f32:$XA, f32:$XB,
1484                                                    f32:$XTi)))]>,
1485                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
1486                          AltVSXFMARel;
1487  let IsVSXFMAAlt = 1 in
1488  def XSNMADDMSP : XX3Form<60, 137,
1489                          (outs vssrc:$XT),
1490                          (ins vssrc:$XTi, vssrc:$XA, vssrc:$XB),
1491                          "xsnmaddmsp $XT, $XA, $XB", IIC_VecFP, []>,
1492                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
1493                          AltVSXFMARel;
1494  }
1495
1496  let BaseName = "XSNMSUBASP" in {
1497  let isCommutable = 1 in
1498  def XSNMSUBASP : XX3Form<60, 145,
1499                          (outs vssrc:$XT),
1500                          (ins vssrc:$XTi, vssrc:$XA, vssrc:$XB),
1501                          "xsnmsubasp $XT, $XA, $XB", IIC_VecFP,
1502                          [(set f32:$XT, (fneg (fma f32:$XA, f32:$XB,
1503                                                    (fneg f32:$XTi))))]>,
1504                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
1505                          AltVSXFMARel;
1506  let IsVSXFMAAlt = 1 in
1507  def XSNMSUBMSP : XX3Form<60, 153,
1508                          (outs vssrc:$XT),
1509                          (ins vssrc:$XTi, vssrc:$XA, vssrc:$XB),
1510                          "xsnmsubmsp $XT, $XA, $XB", IIC_VecFP, []>,
1511                          RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
1512                          AltVSXFMARel;
1513  }
1514
1515  // Additional xsnmsubasp patterns: -a*b + c == -(a*b - c)
1516  def : Pat<(fma (fneg f32:$A), f32:$B, f32:$C),
1517            (XSNMSUBASP $C, $A, $B)>;
1518  def : Pat<(fma f32:$A, (fneg f32:$B), f32:$C),
1519            (XSNMSUBASP $C, $A, $B)>;
1520
1521  // Single Precision Conversions (FP <-> INT)
1522  def XSCVSXDSP : XX2Form<60, 312,
1523                      (outs vssrc:$XT), (ins vsfrc:$XB),
1524                      "xscvsxdsp $XT, $XB", IIC_VecFP,
1525                      [(set f32:$XT, (PPCfcfids f64:$XB))]>;
1526  def XSCVUXDSP : XX2Form<60, 296,
1527                      (outs vssrc:$XT), (ins vsfrc:$XB),
1528                      "xscvuxdsp $XT, $XB", IIC_VecFP,
1529                      [(set f32:$XT, (PPCfcfidus f64:$XB))]>;
1530
1531  // Conversions between vector and scalar single precision
1532  def XSCVDPSPN : XX2Form<60, 267, (outs vsrc:$XT), (ins vssrc:$XB),
1533                          "xscvdpspn $XT, $XB", IIC_VecFP, []>;
1534  def XSCVSPDPN : XX2Form<60, 331, (outs vssrc:$XT), (ins vsrc:$XB),
1535                          "xscvspdpn $XT, $XB", IIC_VecFP, []>;
1536
1537  let Predicates = [IsLittleEndian] in {
1538  def : Pat<DWToSPExtractConv.El0SS1,
1539            (f32 (XSCVSXDSP (COPY_TO_REGCLASS (XXPERMDI $S1, $S1, 2), VSFRC)))>;
1540  def : Pat<DWToSPExtractConv.El1SS1,
1541            (f32 (XSCVSXDSP (COPY_TO_REGCLASS
1542                              (f64 (COPY_TO_REGCLASS $S1, VSRC)), VSFRC)))>;
1543  def : Pat<DWToSPExtractConv.El0US1,
1544            (f32 (XSCVUXDSP (COPY_TO_REGCLASS (XXPERMDI $S1, $S1, 2), VSFRC)))>;
1545  def : Pat<DWToSPExtractConv.El1US1,
1546            (f32 (XSCVUXDSP (COPY_TO_REGCLASS
1547                              (f64 (COPY_TO_REGCLASS $S1, VSRC)), VSFRC)))>;
1548  }
1549
1550  let Predicates = [IsBigEndian] in {
1551  def : Pat<DWToSPExtractConv.El0SS1,
1552            (f32 (XSCVSXDSP (COPY_TO_REGCLASS $S1, VSFRC)))>;
1553  def : Pat<DWToSPExtractConv.El1SS1,
1554            (f32 (XSCVSXDSP (COPY_TO_REGCLASS (XXPERMDI $S1, $S1, 2), VSFRC)))>;
1555  def : Pat<DWToSPExtractConv.El0US1,
1556            (f32 (XSCVUXDSP (COPY_TO_REGCLASS $S1, VSFRC)))>;
1557  def : Pat<DWToSPExtractConv.El1US1,
1558            (f32 (XSCVUXDSP (COPY_TO_REGCLASS (XXPERMDI $S1, $S1, 2), VSFRC)))>;
1559  }
1560
1561  // Instructions for converting float to i64 feeding a store.
1562  let Predicates = [NoP9Vector] in {
1563  def : Pat<(PPCstore_scal_int_from_vsr
1564              (f64 (PPCcv_fp_to_sint_in_vsr f64:$src)), xoaddr:$dst, 8),
1565            (STXSDX (XSCVDPSXDS f64:$src), xoaddr:$dst)>;
1566  def : Pat<(PPCstore_scal_int_from_vsr
1567              (f64 (PPCcv_fp_to_uint_in_vsr f64:$src)), xoaddr:$dst, 8),
1568            (STXSDX (XSCVDPUXDS f64:$src), xoaddr:$dst)>;
1569  }
1570
1571  // Instructions for converting float to i32 feeding a store.
1572  def : Pat<(PPCstore_scal_int_from_vsr
1573              (f64 (PPCcv_fp_to_sint_in_vsr f64:$src)), xoaddr:$dst, 4),
1574            (STIWX (XSCVDPSXWS f64:$src), xoaddr:$dst)>;
1575  def : Pat<(PPCstore_scal_int_from_vsr
1576              (f64 (PPCcv_fp_to_uint_in_vsr f64:$src)), xoaddr:$dst, 4),
1577            (STIWX (XSCVDPUXWS f64:$src), xoaddr:$dst)>;
1578
1579  def : Pat<(v2i64 (smax v2i64:$src1, v2i64:$src2)),
1580            (v2i64 (VMAXSD (COPY_TO_REGCLASS $src1, VRRC),
1581                           (COPY_TO_REGCLASS $src2, VRRC)))>;
1582  def : Pat<(v2i64 (umax v2i64:$src1, v2i64:$src2)),
1583            (v2i64 (VMAXUD (COPY_TO_REGCLASS $src1, VRRC),
1584                           (COPY_TO_REGCLASS $src2, VRRC)))>;
1585  def : Pat<(v2i64 (smin v2i64:$src1, v2i64:$src2)),
1586            (v2i64 (VMINSD (COPY_TO_REGCLASS $src1, VRRC),
1587                           (COPY_TO_REGCLASS $src2, VRRC)))>;
1588  def : Pat<(v2i64 (umin v2i64:$src1, v2i64:$src2)),
1589            (v2i64 (VMINUD (COPY_TO_REGCLASS $src1, VRRC),
1590                           (COPY_TO_REGCLASS $src2, VRRC)))>;
1591} // AddedComplexity = 400
1592} // HasP8Vector
1593
1594let AddedComplexity = 400 in {
1595let Predicates = [HasDirectMove] in {
1596  // VSX direct move instructions
1597  def MFVSRD : XX1_RS6_RD5_XO<31, 51, (outs g8rc:$rA), (ins vsfrc:$XT),
1598                              "mfvsrd $rA, $XT", IIC_VecGeneral,
1599                              [(set i64:$rA, (PPCmfvsr f64:$XT))]>,
1600      Requires<[In64BitMode]>;
1601  let isCodeGenOnly = 1 in
1602  def MFVRD : XX1_RS6_RD5_XO<31, 51, (outs g8rc:$rA), (ins vsrc:$XT),
1603                             "mfvsrd $rA, $XT", IIC_VecGeneral,
1604                             []>,
1605      Requires<[In64BitMode]>;
1606  def MFVSRWZ : XX1_RS6_RD5_XO<31, 115, (outs gprc:$rA), (ins vsfrc:$XT),
1607                               "mfvsrwz $rA, $XT", IIC_VecGeneral,
1608                               [(set i32:$rA, (PPCmfvsr f64:$XT))]>;
1609  let isCodeGenOnly = 1 in
1610  def MFVRWZ : XX1_RS6_RD5_XO<31, 115, (outs gprc:$rA), (ins vsrc:$XT),
1611                               "mfvsrwz $rA, $XT", IIC_VecGeneral,
1612                               []>;
1613  def MTVSRD : XX1_RS6_RD5_XO<31, 179, (outs vsfrc:$XT), (ins g8rc:$rA),
1614                              "mtvsrd $XT, $rA", IIC_VecGeneral,
1615                              [(set f64:$XT, (PPCmtvsra i64:$rA))]>,
1616      Requires<[In64BitMode]>;
1617  let isCodeGenOnly = 1 in
1618  def MTVRD : XX1_RS6_RD5_XO<31, 179, (outs vsrc:$XT), (ins g8rc:$rA),
1619                              "mtvsrd $XT, $rA", IIC_VecGeneral,
1620                              []>,
1621      Requires<[In64BitMode]>;
1622  def MTVSRWA : XX1_RS6_RD5_XO<31, 211, (outs vsfrc:$XT), (ins gprc:$rA),
1623                               "mtvsrwa $XT, $rA", IIC_VecGeneral,
1624                               [(set f64:$XT, (PPCmtvsra i32:$rA))]>;
1625  let isCodeGenOnly = 1 in
1626  def MTVRWA : XX1_RS6_RD5_XO<31, 211, (outs vsrc:$XT), (ins gprc:$rA),
1627                               "mtvsrwa $XT, $rA", IIC_VecGeneral,
1628                               []>;
1629  def MTVSRWZ : XX1_RS6_RD5_XO<31, 243, (outs vsfrc:$XT), (ins gprc:$rA),
1630                               "mtvsrwz $XT, $rA", IIC_VecGeneral,
1631                               [(set f64:$XT, (PPCmtvsrz i32:$rA))]>;
1632  let isCodeGenOnly = 1 in
1633  def MTVRWZ : XX1_RS6_RD5_XO<31, 243, (outs vsrc:$XT), (ins gprc:$rA),
1634                               "mtvsrwz $XT, $rA", IIC_VecGeneral,
1635                               []>;
1636} // HasDirectMove
1637
1638let Predicates = [IsISA3_0, HasDirectMove] in {
1639  def MTVSRWS: XX1_RS6_RD5_XO<31, 403, (outs vsrc:$XT), (ins gprc:$rA),
1640                              "mtvsrws $XT, $rA", IIC_VecGeneral, []>;
1641
1642  def MTVSRDD: XX1Form<31, 435, (outs vsrc:$XT), (ins g8rc_nox0:$rA, g8rc:$rB),
1643                       "mtvsrdd $XT, $rA, $rB", IIC_VecGeneral,
1644                       []>, Requires<[In64BitMode]>;
1645
1646  def MFVSRLD: XX1_RS6_RD5_XO<31, 307, (outs g8rc:$rA), (ins vsrc:$XT),
1647                              "mfvsrld $rA, $XT", IIC_VecGeneral,
1648                              []>, Requires<[In64BitMode]>;
1649
1650} // IsISA3_0, HasDirectMove
1651} // AddedComplexity = 400
1652
1653// We want to parse this from asm, but we don't want to emit this as it would
1654// be emitted with a VSX reg. So leave Emit = 0 here.
1655def : InstAlias<"mfvrd $rA, $XT",
1656                (MFVRD g8rc:$rA, vrrc:$XT), 0>;
1657def : InstAlias<"mffprd $rA, $src",
1658                (MFVSRD g8rc:$rA, f8rc:$src)>;
1659def : InstAlias<"mtvrd $XT, $rA",
1660                (MTVRD vrrc:$XT, g8rc:$rA), 0>;
1661def : InstAlias<"mtfprd $dst, $rA",
1662                (MTVSRD f8rc:$dst, g8rc:$rA)>;
1663def : InstAlias<"mfvrwz $rA, $XT",
1664                (MFVRWZ gprc:$rA, vrrc:$XT), 0>;
1665def : InstAlias<"mffprwz $rA, $src",
1666                (MFVSRWZ gprc:$rA, f8rc:$src)>;
1667def : InstAlias<"mtvrwa $XT, $rA",
1668                (MTVRWA vrrc:$XT, gprc:$rA), 0>;
1669def : InstAlias<"mtfprwa $dst, $rA",
1670                (MTVSRWA f8rc:$dst, gprc:$rA)>;
1671def : InstAlias<"mtvrwz $XT, $rA",
1672                (MTVRWZ vrrc:$XT, gprc:$rA), 0>;
1673def : InstAlias<"mtfprwz $dst, $rA",
1674                (MTVSRWZ f8rc:$dst, gprc:$rA)>;
1675
1676/*  Direct moves of various widths from GPR's into VSR's. Each move lines
1677    the value up into element 0 (both BE and LE). Namely, entities smaller than
1678    a doubleword are shifted left and moved for BE. For LE, they're moved, then
1679    swapped to go into the least significant element of the VSR.
1680*/
1681def MovesToVSR {
1682  dag BE_BYTE_0 =
1683    (MTVSRD
1684      (RLDICR
1685        (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $A, sub_32), 56, 7));
1686  dag BE_HALF_0 =
1687    (MTVSRD
1688      (RLDICR
1689        (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $A, sub_32), 48, 15));
1690  dag BE_WORD_0 =
1691    (MTVSRD
1692      (RLDICR
1693        (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $A, sub_32), 32, 31));
1694  dag BE_DWORD_0 = (MTVSRD $A);
1695
1696  dag LE_MTVSRW = (MTVSRD (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $A, sub_32));
1697  dag LE_WORD_1 = (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
1698                                        LE_MTVSRW, sub_64));
1699  dag LE_WORD_0 = (XXPERMDI LE_WORD_1, LE_WORD_1, 2);
1700  dag LE_DWORD_1 = (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
1701                                         BE_DWORD_0, sub_64));
1702  dag LE_DWORD_0 = (XXPERMDI LE_DWORD_1, LE_DWORD_1, 2);
1703}
1704
1705/*  Patterns for extracting elements out of vectors. Integer elements are
1706    extracted using direct move operations. Patterns for extracting elements
1707    whose indices are not available at compile time are also provided with
1708    various _VARIABLE_ patterns.
1709    The numbering for the DAG's is for LE, but when used on BE, the correct
1710    LE element can just be used (i.e. LE_BYTE_2 == BE_BYTE_13).
1711*/
1712def VectorExtractions {
1713  // Doubleword extraction
1714  dag LE_DWORD_0 =
1715    (MFVSRD
1716      (EXTRACT_SUBREG
1717        (XXPERMDI (COPY_TO_REGCLASS $S, VSRC),
1718                  (COPY_TO_REGCLASS $S, VSRC), 2), sub_64));
1719  dag LE_DWORD_1 = (MFVSRD
1720                     (EXTRACT_SUBREG
1721                       (v2i64 (COPY_TO_REGCLASS $S, VSRC)), sub_64));
1722
1723  // Word extraction
1724  dag LE_WORD_0 = (MFVSRWZ (EXTRACT_SUBREG (XXPERMDI $S, $S, 2), sub_64));
1725  dag LE_WORD_1 = (MFVSRWZ (EXTRACT_SUBREG (XXSLDWI $S, $S, 1), sub_64));
1726  dag LE_WORD_2 = (MFVSRWZ (EXTRACT_SUBREG
1727                             (v2i64 (COPY_TO_REGCLASS $S, VSRC)), sub_64));
1728  dag LE_WORD_3 = (MFVSRWZ (EXTRACT_SUBREG (XXSLDWI $S, $S, 3), sub_64));
1729
1730  // Halfword extraction
1731  dag LE_HALF_0 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 0, 48), sub_32));
1732  dag LE_HALF_1 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 48, 48), sub_32));
1733  dag LE_HALF_2 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 32, 48), sub_32));
1734  dag LE_HALF_3 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 16, 48), sub_32));
1735  dag LE_HALF_4 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 0, 48), sub_32));
1736  dag LE_HALF_5 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 48, 48), sub_32));
1737  dag LE_HALF_6 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 32, 48), sub_32));
1738  dag LE_HALF_7 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 16, 48), sub_32));
1739
1740  // Byte extraction
1741  dag LE_BYTE_0 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 0, 56), sub_32));
1742  dag LE_BYTE_1 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 56, 56), sub_32));
1743  dag LE_BYTE_2 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 48, 56), sub_32));
1744  dag LE_BYTE_3 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 40, 56), sub_32));
1745  dag LE_BYTE_4 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 32, 56), sub_32));
1746  dag LE_BYTE_5 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 24, 56), sub_32));
1747  dag LE_BYTE_6 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 16, 56), sub_32));
1748  dag LE_BYTE_7 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_0, 8, 56), sub_32));
1749  dag LE_BYTE_8 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 0, 56), sub_32));
1750  dag LE_BYTE_9 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 56, 56), sub_32));
1751  dag LE_BYTE_10 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 48, 56), sub_32));
1752  dag LE_BYTE_11 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 40, 56), sub_32));
1753  dag LE_BYTE_12 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 32, 56), sub_32));
1754  dag LE_BYTE_13 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 24, 56), sub_32));
1755  dag LE_BYTE_14 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 16, 56), sub_32));
1756  dag LE_BYTE_15 = (i32 (EXTRACT_SUBREG (RLDICL LE_DWORD_1, 8, 56), sub_32));
1757
1758  /* Variable element number (BE and LE patterns must be specified separately)
1759     This is a rather involved process.
1760
1761     Conceptually, this is how the move is accomplished:
1762     1. Identify which doubleword contains the element
1763     2. Shift in the VMX register so that the correct doubleword is correctly
1764        lined up for the MFVSRD
1765     3. Perform the move so that the element (along with some extra stuff)
1766        is in the GPR
1767     4. Right shift within the GPR so that the element is right-justified
1768
1769     Of course, the index is an element number which has a different meaning
1770     on LE/BE so the patterns have to be specified separately.
1771
1772     Note: The final result will be the element right-justified with high
1773           order bits being arbitrarily defined (namely, whatever was in the
1774           vector register to the left of the value originally).
1775  */
1776
1777  /*  LE variable byte
1778      Number 1. above:
1779      - For elements 0-7, we shift left by 8 bytes since they're on the right
1780      - For elements 8-15, we need not shift (shift left by zero bytes)
1781      This is accomplished by inverting the bits of the index and AND-ing
1782      with 0x8 (i.e. clearing all bits of the index and inverting bit 60).
1783  */
1784  dag LE_VBYTE_PERM_VEC = (v16i8 (LVSL ZERO8, (ANDC8 (LI8 8), $Idx)));
1785
1786  //  Number 2. above:
1787  //  - Now that we set up the shift amount, we shift in the VMX register
1788  dag LE_VBYTE_PERMUTE = (v16i8 (VPERM $S, $S, LE_VBYTE_PERM_VEC));
1789
1790  //  Number 3. above:
1791  //  - The doubleword containing our element is moved to a GPR
1792  dag LE_MV_VBYTE = (MFVSRD
1793                      (EXTRACT_SUBREG
1794                        (v2i64 (COPY_TO_REGCLASS LE_VBYTE_PERMUTE, VSRC)),
1795                        sub_64));
1796
1797  /*  Number 4. above:
1798      - Truncate the element number to the range 0-7 (8-15 are symmetrical
1799        and out of range values are truncated accordingly)
1800      - Multiply by 8 as we need to shift right by the number of bits, not bytes
1801      - Shift right in the GPR by the calculated value
1802  */
1803  dag LE_VBYTE_SHIFT = (EXTRACT_SUBREG (RLDICR (AND8 (LI8 7), $Idx), 3, 60),
1804                                       sub_32);
1805  dag LE_VARIABLE_BYTE = (EXTRACT_SUBREG (SRD LE_MV_VBYTE, LE_VBYTE_SHIFT),
1806                                         sub_32);
1807
1808  /*  LE variable halfword
1809      Number 1. above:
1810      - For elements 0-3, we shift left by 8 since they're on the right
1811      - For elements 4-7, we need not shift (shift left by zero bytes)
1812      Similarly to the byte pattern, we invert the bits of the index, but we
1813      AND with 0x4 (i.e. clear all bits of the index and invert bit 61).
1814      Of course, the shift is still by 8 bytes, so we must multiply by 2.
1815  */
1816  dag LE_VHALF_PERM_VEC =
1817    (v16i8 (LVSL ZERO8, (RLDICR (ANDC8 (LI8 4), $Idx), 1, 62)));
1818
1819  //  Number 2. above:
1820  //  - Now that we set up the shift amount, we shift in the VMX register
1821  dag LE_VHALF_PERMUTE = (v16i8 (VPERM $S, $S, LE_VHALF_PERM_VEC));
1822
1823  //  Number 3. above:
1824  //  - The doubleword containing our element is moved to a GPR
1825  dag LE_MV_VHALF = (MFVSRD
1826                      (EXTRACT_SUBREG
1827                        (v2i64 (COPY_TO_REGCLASS LE_VHALF_PERMUTE, VSRC)),
1828                        sub_64));
1829
1830  /*  Number 4. above:
1831      - Truncate the element number to the range 0-3 (4-7 are symmetrical
1832        and out of range values are truncated accordingly)
1833      - Multiply by 16 as we need to shift right by the number of bits
1834      - Shift right in the GPR by the calculated value
1835  */
1836  dag LE_VHALF_SHIFT = (EXTRACT_SUBREG (RLDICR (AND8 (LI8 3), $Idx), 4, 59),
1837                                       sub_32);
1838  dag LE_VARIABLE_HALF = (EXTRACT_SUBREG (SRD LE_MV_VHALF, LE_VHALF_SHIFT),
1839                                         sub_32);
1840
1841  /*  LE variable word
1842      Number 1. above:
1843      - For elements 0-1, we shift left by 8 since they're on the right
1844      - For elements 2-3, we need not shift
1845  */
1846  dag LE_VWORD_PERM_VEC = (v16i8 (LVSL ZERO8,
1847                                       (RLDICR (ANDC8 (LI8 2), $Idx), 2, 61)));
1848
1849  //  Number 2. above:
1850  //  - Now that we set up the shift amount, we shift in the VMX register
1851  dag LE_VWORD_PERMUTE = (v16i8 (VPERM $S, $S, LE_VWORD_PERM_VEC));
1852
1853  //  Number 3. above:
1854  //  - The doubleword containing our element is moved to a GPR
1855  dag LE_MV_VWORD = (MFVSRD
1856                      (EXTRACT_SUBREG
1857                        (v2i64 (COPY_TO_REGCLASS LE_VWORD_PERMUTE, VSRC)),
1858                        sub_64));
1859
1860  /*  Number 4. above:
1861      - Truncate the element number to the range 0-1 (2-3 are symmetrical
1862        and out of range values are truncated accordingly)
1863      - Multiply by 32 as we need to shift right by the number of bits
1864      - Shift right in the GPR by the calculated value
1865  */
1866  dag LE_VWORD_SHIFT = (EXTRACT_SUBREG (RLDICR (AND8 (LI8 1), $Idx), 5, 58),
1867                                       sub_32);
1868  dag LE_VARIABLE_WORD = (EXTRACT_SUBREG (SRD LE_MV_VWORD, LE_VWORD_SHIFT),
1869                                         sub_32);
1870
1871  /*  LE variable doubleword
1872      Number 1. above:
1873      - For element 0, we shift left by 8 since it's on the right
1874      - For element 1, we need not shift
1875  */
1876  dag LE_VDWORD_PERM_VEC = (v16i8 (LVSL ZERO8,
1877                                        (RLDICR (ANDC8 (LI8 1), $Idx), 3, 60)));
1878
1879  //  Number 2. above:
1880  //  - Now that we set up the shift amount, we shift in the VMX register
1881  dag LE_VDWORD_PERMUTE = (v16i8 (VPERM $S, $S, LE_VDWORD_PERM_VEC));
1882
1883  // Number 3. above:
1884  //  - The doubleword containing our element is moved to a GPR
1885  //  - Number 4. is not needed for the doubleword as the value is 64-bits
1886  dag LE_VARIABLE_DWORD =
1887        (MFVSRD (EXTRACT_SUBREG
1888                  (v2i64 (COPY_TO_REGCLASS LE_VDWORD_PERMUTE, VSRC)),
1889                  sub_64));
1890
1891  /*  LE variable float
1892      - Shift the vector to line up the desired element to BE Word 0
1893      - Convert 32-bit float to a 64-bit single precision float
1894  */
1895  dag LE_VFLOAT_PERM_VEC = (v16i8 (LVSL ZERO8,
1896                                  (RLDICR (XOR8 (LI8 3), $Idx), 2, 61)));
1897  dag LE_VFLOAT_PERMUTE = (VPERM $S, $S, LE_VFLOAT_PERM_VEC);
1898  dag LE_VARIABLE_FLOAT = (XSCVSPDPN LE_VFLOAT_PERMUTE);
1899
1900  /*  LE variable double
1901      Same as the LE doubleword except there is no move.
1902  */
1903  dag LE_VDOUBLE_PERMUTE = (v16i8 (VPERM (v16i8 (COPY_TO_REGCLASS $S, VRRC)),
1904                                         (v16i8 (COPY_TO_REGCLASS $S, VRRC)),
1905                                         LE_VDWORD_PERM_VEC));
1906  dag LE_VARIABLE_DOUBLE = (COPY_TO_REGCLASS LE_VDOUBLE_PERMUTE, VSRC);
1907
1908  /*  BE variable byte
1909      The algorithm here is the same as the LE variable byte except:
1910      - The shift in the VMX register is by 0/8 for opposite element numbers so
1911        we simply AND the element number with 0x8
1912      - The order of elements after the move to GPR is reversed, so we invert
1913        the bits of the index prior to truncating to the range 0-7
1914  */
1915  dag BE_VBYTE_PERM_VEC = (v16i8 (LVSL ZERO8, (ANDIo8 $Idx, 8)));
1916  dag BE_VBYTE_PERMUTE = (v16i8 (VPERM $S, $S, BE_VBYTE_PERM_VEC));
1917  dag BE_MV_VBYTE = (MFVSRD
1918                      (EXTRACT_SUBREG
1919                        (v2i64 (COPY_TO_REGCLASS BE_VBYTE_PERMUTE, VSRC)),
1920                        sub_64));
1921  dag BE_VBYTE_SHIFT = (EXTRACT_SUBREG (RLDICR (ANDC8 (LI8 7), $Idx), 3, 60),
1922                                       sub_32);
1923  dag BE_VARIABLE_BYTE = (EXTRACT_SUBREG (SRD BE_MV_VBYTE, BE_VBYTE_SHIFT),
1924                                         sub_32);
1925
1926  /*  BE variable halfword
1927      The algorithm here is the same as the LE variable halfword except:
1928      - The shift in the VMX register is by 0/8 for opposite element numbers so
1929        we simply AND the element number with 0x4 and multiply by 2
1930      - The order of elements after the move to GPR is reversed, so we invert
1931        the bits of the index prior to truncating to the range 0-3
1932  */
1933  dag BE_VHALF_PERM_VEC = (v16i8 (LVSL ZERO8,
1934                                       (RLDICR (ANDIo8 $Idx, 4), 1, 62)));
1935  dag BE_VHALF_PERMUTE = (v16i8 (VPERM $S, $S, BE_VHALF_PERM_VEC));
1936  dag BE_MV_VHALF = (MFVSRD
1937                      (EXTRACT_SUBREG
1938                        (v2i64 (COPY_TO_REGCLASS BE_VHALF_PERMUTE, VSRC)),
1939                        sub_64));
1940  dag BE_VHALF_SHIFT = (EXTRACT_SUBREG (RLDICR (ANDC8 (LI8 3), $Idx), 4, 59),
1941                                       sub_32);
1942  dag BE_VARIABLE_HALF = (EXTRACT_SUBREG (SRD BE_MV_VHALF, BE_VHALF_SHIFT),
1943                                         sub_32);
1944
1945  /*  BE variable word
1946      The algorithm is the same as the LE variable word except:
1947      - The shift in the VMX register happens for opposite element numbers
1948      - The order of elements after the move to GPR is reversed, so we invert
1949        the bits of the index prior to truncating to the range 0-1
1950  */
1951  dag BE_VWORD_PERM_VEC = (v16i8 (LVSL ZERO8,
1952                                       (RLDICR (ANDIo8 $Idx, 2), 2, 61)));
1953  dag BE_VWORD_PERMUTE = (v16i8 (VPERM $S, $S, BE_VWORD_PERM_VEC));
1954  dag BE_MV_VWORD = (MFVSRD
1955                      (EXTRACT_SUBREG
1956                        (v2i64 (COPY_TO_REGCLASS BE_VWORD_PERMUTE, VSRC)),
1957                        sub_64));
1958  dag BE_VWORD_SHIFT = (EXTRACT_SUBREG (RLDICR (ANDC8 (LI8 1), $Idx), 5, 58),
1959                                       sub_32);
1960  dag BE_VARIABLE_WORD = (EXTRACT_SUBREG (SRD BE_MV_VWORD, BE_VWORD_SHIFT),
1961                                         sub_32);
1962
1963  /*  BE variable doubleword
1964      Same as the LE doubleword except we shift in the VMX register for opposite
1965      element indices.
1966  */
1967  dag BE_VDWORD_PERM_VEC = (v16i8 (LVSL ZERO8,
1968                                        (RLDICR (ANDIo8 $Idx, 1), 3, 60)));
1969  dag BE_VDWORD_PERMUTE = (v16i8 (VPERM $S, $S, BE_VDWORD_PERM_VEC));
1970  dag BE_VARIABLE_DWORD =
1971        (MFVSRD (EXTRACT_SUBREG
1972                  (v2i64 (COPY_TO_REGCLASS BE_VDWORD_PERMUTE, VSRC)),
1973                  sub_64));
1974
1975  /*  BE variable float
1976      - Shift the vector to line up the desired element to BE Word 0
1977      - Convert 32-bit float to a 64-bit single precision float
1978  */
1979  dag BE_VFLOAT_PERM_VEC = (v16i8 (LVSL ZERO8, (RLDICR $Idx, 2, 61)));
1980  dag BE_VFLOAT_PERMUTE = (VPERM $S, $S, BE_VFLOAT_PERM_VEC);
1981  dag BE_VARIABLE_FLOAT = (XSCVSPDPN BE_VFLOAT_PERMUTE);
1982
1983  /* BE variable double
1984      Same as the BE doubleword except there is no move.
1985  */
1986  dag BE_VDOUBLE_PERMUTE = (v16i8 (VPERM (v16i8 (COPY_TO_REGCLASS $S, VRRC)),
1987                                         (v16i8 (COPY_TO_REGCLASS $S, VRRC)),
1988                                         BE_VDWORD_PERM_VEC));
1989  dag BE_VARIABLE_DOUBLE = (COPY_TO_REGCLASS BE_VDOUBLE_PERMUTE, VSRC);
1990}
1991
1992def NoP9Altivec : Predicate<"!PPCSubTarget->hasP9Altivec()">;
1993let AddedComplexity = 400 in {
1994// v4f32 scalar <-> vector conversions (BE)
1995let Predicates = [IsBigEndian, HasP8Vector] in {
1996  def : Pat<(v4f32 (scalar_to_vector f32:$A)),
1997            (v4f32 (XSCVDPSPN $A))>;
1998  def : Pat<(f32 (vector_extract v4f32:$S, 0)),
1999            (f32 (XSCVSPDPN $S))>;
2000  def : Pat<(f32 (vector_extract v4f32:$S, 1)),
2001            (f32 (XSCVSPDPN (XXSLDWI $S, $S, 1)))>;
2002  def : Pat<(f32 (vector_extract v4f32:$S, 2)),
2003            (f32 (XSCVSPDPN (XXPERMDI $S, $S, 2)))>;
2004  def : Pat<(f32 (vector_extract v4f32:$S, 3)),
2005            (f32 (XSCVSPDPN (XXSLDWI $S, $S, 3)))>;
2006  def : Pat<(f32 (vector_extract v4f32:$S, i64:$Idx)),
2007            (f32 VectorExtractions.BE_VARIABLE_FLOAT)>;
2008} // IsBigEndian, HasP8Vector
2009
2010// Variable index vector_extract for v2f64 does not require P8Vector
2011let Predicates = [IsBigEndian, HasVSX] in
2012  def : Pat<(f64 (vector_extract v2f64:$S, i64:$Idx)),
2013            (f64 VectorExtractions.BE_VARIABLE_DOUBLE)>;
2014
2015let Predicates = [IsBigEndian, HasDirectMove] in {
2016  // v16i8 scalar <-> vector conversions (BE)
2017  def : Pat<(v16i8 (scalar_to_vector i32:$A)),
2018            (v16i8 (SUBREG_TO_REG (i64 1), MovesToVSR.BE_BYTE_0, sub_64))>;
2019  def : Pat<(v8i16 (scalar_to_vector i32:$A)),
2020            (v8i16 (SUBREG_TO_REG (i64 1), MovesToVSR.BE_HALF_0, sub_64))>;
2021  def : Pat<(v4i32 (scalar_to_vector i32:$A)),
2022            (v4i32 (SUBREG_TO_REG (i64 1), MovesToVSR.BE_WORD_0, sub_64))>;
2023  def : Pat<(v2i64 (scalar_to_vector i64:$A)),
2024            (v2i64 (SUBREG_TO_REG (i64 1), MovesToVSR.BE_DWORD_0, sub_64))>;
2025
2026  // v2i64 scalar <-> vector conversions (BE)
2027  def : Pat<(i64 (vector_extract v2i64:$S, 0)),
2028            (i64 VectorExtractions.LE_DWORD_1)>;
2029  def : Pat<(i64 (vector_extract v2i64:$S, 1)),
2030            (i64 VectorExtractions.LE_DWORD_0)>;
2031  def : Pat<(i64 (vector_extract v2i64:$S, i64:$Idx)),
2032            (i64 VectorExtractions.BE_VARIABLE_DWORD)>;
2033} // IsBigEndian, HasDirectMove
2034
2035let Predicates = [IsBigEndian, HasDirectMove, NoP9Altivec] in {
2036  def : Pat<(i32 (vector_extract v16i8:$S, 0)),
2037            (i32 VectorExtractions.LE_BYTE_15)>;
2038  def : Pat<(i32 (vector_extract v16i8:$S, 1)),
2039            (i32 VectorExtractions.LE_BYTE_14)>;
2040  def : Pat<(i32 (vector_extract v16i8:$S, 2)),
2041            (i32 VectorExtractions.LE_BYTE_13)>;
2042  def : Pat<(i32 (vector_extract v16i8:$S, 3)),
2043            (i32 VectorExtractions.LE_BYTE_12)>;
2044  def : Pat<(i32 (vector_extract v16i8:$S, 4)),
2045            (i32 VectorExtractions.LE_BYTE_11)>;
2046  def : Pat<(i32 (vector_extract v16i8:$S, 5)),
2047            (i32 VectorExtractions.LE_BYTE_10)>;
2048  def : Pat<(i32 (vector_extract v16i8:$S, 6)),
2049            (i32 VectorExtractions.LE_BYTE_9)>;
2050  def : Pat<(i32 (vector_extract v16i8:$S, 7)),
2051            (i32 VectorExtractions.LE_BYTE_8)>;
2052  def : Pat<(i32 (vector_extract v16i8:$S, 8)),
2053            (i32 VectorExtractions.LE_BYTE_7)>;
2054  def : Pat<(i32 (vector_extract v16i8:$S, 9)),
2055            (i32 VectorExtractions.LE_BYTE_6)>;
2056  def : Pat<(i32 (vector_extract v16i8:$S, 10)),
2057            (i32 VectorExtractions.LE_BYTE_5)>;
2058  def : Pat<(i32 (vector_extract v16i8:$S, 11)),
2059            (i32 VectorExtractions.LE_BYTE_4)>;
2060  def : Pat<(i32 (vector_extract v16i8:$S, 12)),
2061            (i32 VectorExtractions.LE_BYTE_3)>;
2062  def : Pat<(i32 (vector_extract v16i8:$S, 13)),
2063            (i32 VectorExtractions.LE_BYTE_2)>;
2064  def : Pat<(i32 (vector_extract v16i8:$S, 14)),
2065            (i32 VectorExtractions.LE_BYTE_1)>;
2066  def : Pat<(i32 (vector_extract v16i8:$S, 15)),
2067            (i32 VectorExtractions.LE_BYTE_0)>;
2068  def : Pat<(i32 (vector_extract v16i8:$S, i64:$Idx)),
2069            (i32 VectorExtractions.BE_VARIABLE_BYTE)>;
2070
2071  // v8i16 scalar <-> vector conversions (BE)
2072  def : Pat<(i32 (vector_extract v8i16:$S, 0)),
2073            (i32 VectorExtractions.LE_HALF_7)>;
2074  def : Pat<(i32 (vector_extract v8i16:$S, 1)),
2075            (i32 VectorExtractions.LE_HALF_6)>;
2076  def : Pat<(i32 (vector_extract v8i16:$S, 2)),
2077            (i32 VectorExtractions.LE_HALF_5)>;
2078  def : Pat<(i32 (vector_extract v8i16:$S, 3)),
2079            (i32 VectorExtractions.LE_HALF_4)>;
2080  def : Pat<(i32 (vector_extract v8i16:$S, 4)),
2081            (i32 VectorExtractions.LE_HALF_3)>;
2082  def : Pat<(i32 (vector_extract v8i16:$S, 5)),
2083            (i32 VectorExtractions.LE_HALF_2)>;
2084  def : Pat<(i32 (vector_extract v8i16:$S, 6)),
2085            (i32 VectorExtractions.LE_HALF_1)>;
2086  def : Pat<(i32 (vector_extract v8i16:$S, 7)),
2087            (i32 VectorExtractions.LE_HALF_0)>;
2088  def : Pat<(i32 (vector_extract v8i16:$S, i64:$Idx)),
2089            (i32 VectorExtractions.BE_VARIABLE_HALF)>;
2090
2091  // v4i32 scalar <-> vector conversions (BE)
2092  def : Pat<(i32 (vector_extract v4i32:$S, 0)),
2093            (i32 VectorExtractions.LE_WORD_3)>;
2094  def : Pat<(i32 (vector_extract v4i32:$S, 1)),
2095            (i32 VectorExtractions.LE_WORD_2)>;
2096  def : Pat<(i32 (vector_extract v4i32:$S, 2)),
2097            (i32 VectorExtractions.LE_WORD_1)>;
2098  def : Pat<(i32 (vector_extract v4i32:$S, 3)),
2099            (i32 VectorExtractions.LE_WORD_0)>;
2100  def : Pat<(i32 (vector_extract v4i32:$S, i64:$Idx)),
2101            (i32 VectorExtractions.BE_VARIABLE_WORD)>;
2102} // IsBigEndian, HasDirectMove, NoP9Altivec
2103
2104// v4f32 scalar <-> vector conversions (LE)
2105let Predicates = [IsLittleEndian, HasP8Vector] in {
2106  def : Pat<(v4f32 (scalar_to_vector f32:$A)),
2107            (v4f32 (XXSLDWI (XSCVDPSPN $A), (XSCVDPSPN $A), 1))>;
2108  def : Pat<(f32 (vector_extract v4f32:$S, 0)),
2109            (f32 (XSCVSPDPN (XXSLDWI $S, $S, 3)))>;
2110  def : Pat<(f32 (vector_extract v4f32:$S, 1)),
2111            (f32 (XSCVSPDPN (XXPERMDI $S, $S, 2)))>;
2112  def : Pat<(f32 (vector_extract v4f32:$S, 2)),
2113            (f32 (XSCVSPDPN (XXSLDWI $S, $S, 1)))>;
2114  def : Pat<(f32 (vector_extract v4f32:$S, 3)),
2115            (f32 (XSCVSPDPN $S))>;
2116  def : Pat<(f32 (vector_extract v4f32:$S, i64:$Idx)),
2117            (f32 VectorExtractions.LE_VARIABLE_FLOAT)>;
2118} // IsLittleEndian, HasP8Vector
2119
2120// Variable index vector_extract for v2f64 does not require P8Vector
2121let Predicates = [IsLittleEndian, HasVSX] in
2122  def : Pat<(f64 (vector_extract v2f64:$S, i64:$Idx)),
2123            (f64 VectorExtractions.LE_VARIABLE_DOUBLE)>;
2124
2125def : Pat<(int_ppc_vsx_stxvd2x_be v2f64:$rS, xoaddr:$dst),
2126            (STXVD2X $rS, xoaddr:$dst)>;
2127def : Pat<(int_ppc_vsx_stxvw4x_be v4i32:$rS, xoaddr:$dst),
2128            (STXVW4X $rS, xoaddr:$dst)>;
2129def : Pat<(v4i32 (int_ppc_vsx_lxvw4x_be xoaddr:$src)), (LXVW4X xoaddr:$src)>;
2130def : Pat<(v2f64 (int_ppc_vsx_lxvd2x_be xoaddr:$src)), (LXVD2X xoaddr:$src)>;
2131
2132// Variable index unsigned vector_extract on Power9
2133let Predicates = [HasP9Altivec, IsLittleEndian] in {
2134  def : Pat<(i64 (anyext (i32 (vector_extract v16i8:$S, i64:$Idx)))),
2135            (VEXTUBRX $Idx, $S)>;
2136
2137  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, i64:$Idx)))),
2138            (VEXTUHRX (RLWINM8 $Idx, 1, 28, 30), $S)>;
2139  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 0)))),
2140            (VEXTUHRX (LI8 0), $S)>;
2141  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 1)))),
2142            (VEXTUHRX (LI8 2), $S)>;
2143  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 2)))),
2144            (VEXTUHRX (LI8 4), $S)>;
2145  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 3)))),
2146            (VEXTUHRX (LI8 6), $S)>;
2147  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 4)))),
2148            (VEXTUHRX (LI8 8), $S)>;
2149  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 5)))),
2150            (VEXTUHRX (LI8 10), $S)>;
2151  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 6)))),
2152            (VEXTUHRX (LI8 12), $S)>;
2153  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 7)))),
2154            (VEXTUHRX (LI8 14), $S)>;
2155
2156  def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, i64:$Idx)))),
2157            (VEXTUWRX (RLWINM8 $Idx, 2, 28, 29), $S)>;
2158  def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 0)))),
2159            (VEXTUWRX (LI8 0), $S)>;
2160  def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 1)))),
2161            (VEXTUWRX (LI8 4), $S)>;
2162  // For extracting LE word 2, MFVSRWZ is better than VEXTUWRX
2163  def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 2)))),
2164            (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2165	    (i32 VectorExtractions.LE_WORD_2), sub_32)>;
2166  def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 3)))),
2167            (VEXTUWRX (LI8 12), $S)>;
2168
2169  def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, i64:$Idx)))),
2170            (EXTSW (VEXTUWRX (RLWINM8 $Idx, 2, 28, 29), $S))>;
2171  def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 0)))),
2172            (EXTSW (VEXTUWRX (LI8 0), $S))>;
2173  def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 1)))),
2174            (EXTSW (VEXTUWRX (LI8 4), $S))>;
2175  // For extracting LE word 2, MFVSRWZ is better than VEXTUWRX
2176  def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 2)))),
2177            (EXTSW (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2178	    (i32 VectorExtractions.LE_WORD_2), sub_32))>;
2179  def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 3)))),
2180            (EXTSW (VEXTUWRX (LI8 12), $S))>;
2181
2182  def : Pat<(i32 (vector_extract v16i8:$S, i64:$Idx)),
2183            (i32 (EXTRACT_SUBREG (VEXTUBRX $Idx, $S), sub_32))>;
2184  def : Pat<(i32 (vector_extract v16i8:$S, 0)),
2185            (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 0), $S), sub_32))>;
2186  def : Pat<(i32 (vector_extract v16i8:$S, 1)),
2187            (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 1), $S), sub_32))>;
2188  def : Pat<(i32 (vector_extract v16i8:$S, 2)),
2189            (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 2), $S), sub_32))>;
2190  def : Pat<(i32 (vector_extract v16i8:$S, 3)),
2191            (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 3), $S), sub_32))>;
2192  def : Pat<(i32 (vector_extract v16i8:$S, 4)),
2193            (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 4), $S), sub_32))>;
2194  def : Pat<(i32 (vector_extract v16i8:$S, 5)),
2195            (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 5), $S), sub_32))>;
2196  def : Pat<(i32 (vector_extract v16i8:$S, 6)),
2197            (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 6), $S), sub_32))>;
2198  def : Pat<(i32 (vector_extract v16i8:$S, 7)),
2199            (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 7), $S), sub_32))>;
2200  def : Pat<(i32 (vector_extract v16i8:$S, 8)),
2201            (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 8), $S), sub_32))>;
2202  def : Pat<(i32 (vector_extract v16i8:$S, 9)),
2203            (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 9), $S), sub_32))>;
2204  def : Pat<(i32 (vector_extract v16i8:$S, 10)),
2205            (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 10), $S), sub_32))>;
2206  def : Pat<(i32 (vector_extract v16i8:$S, 11)),
2207            (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 11), $S), sub_32))>;
2208  def : Pat<(i32 (vector_extract v16i8:$S, 12)),
2209            (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 12), $S), sub_32))>;
2210  def : Pat<(i32 (vector_extract v16i8:$S, 13)),
2211            (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 13), $S), sub_32))>;
2212  def : Pat<(i32 (vector_extract v16i8:$S, 14)),
2213            (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 14), $S), sub_32))>;
2214  def : Pat<(i32 (vector_extract v16i8:$S, 15)),
2215            (i32 (EXTRACT_SUBREG (VEXTUBRX (LI8 15), $S), sub_32))>;
2216
2217  def : Pat<(i32 (vector_extract v8i16:$S, i64:$Idx)),
2218            (i32 (EXTRACT_SUBREG (VEXTUHRX
2219	    (RLWINM8 $Idx, 1, 28, 30), $S), sub_32))>;
2220  def : Pat<(i32 (vector_extract v8i16:$S, 0)),
2221            (i32 (EXTRACT_SUBREG (VEXTUHRX (LI8 0), $S), sub_32))>;
2222  def : Pat<(i32 (vector_extract v8i16:$S, 1)),
2223            (i32 (EXTRACT_SUBREG (VEXTUHRX (LI8 2), $S), sub_32))>;
2224  def : Pat<(i32 (vector_extract v8i16:$S, 2)),
2225            (i32 (EXTRACT_SUBREG (VEXTUHRX (LI8 4), $S), sub_32))>;
2226  def : Pat<(i32 (vector_extract v8i16:$S, 3)),
2227            (i32 (EXTRACT_SUBREG (VEXTUHRX (LI8 6), $S), sub_32))>;
2228  def : Pat<(i32 (vector_extract v8i16:$S, 4)),
2229            (i32 (EXTRACT_SUBREG (VEXTUHRX (LI8 8), $S), sub_32))>;
2230  def : Pat<(i32 (vector_extract v8i16:$S, 5)),
2231            (i32 (EXTRACT_SUBREG (VEXTUHRX (LI8 10), $S), sub_32))>;
2232  def : Pat<(i32 (vector_extract v8i16:$S, 6)),
2233            (i32 (EXTRACT_SUBREG (VEXTUHRX (LI8 12), $S), sub_32))>;
2234  def : Pat<(i32 (vector_extract v8i16:$S, 6)),
2235            (i32 (EXTRACT_SUBREG (VEXTUHRX (LI8 14), $S), sub_32))>;
2236
2237  def : Pat<(i32 (vector_extract v4i32:$S, i64:$Idx)),
2238            (i32 (EXTRACT_SUBREG (VEXTUWRX
2239	    (RLWINM8 $Idx, 2, 28, 29), $S), sub_32))>;
2240  def : Pat<(i32 (vector_extract v4i32:$S, 0)),
2241            (i32 (EXTRACT_SUBREG (VEXTUWRX (LI8 0), $S), sub_32))>;
2242  def : Pat<(i32 (vector_extract v4i32:$S, 1)),
2243            (i32 (EXTRACT_SUBREG (VEXTUWRX (LI8 4), $S), sub_32))>;
2244  // For extracting LE word 2, MFVSRWZ is better than VEXTUWRX
2245  def : Pat<(i32 (vector_extract v4i32:$S, 2)),
2246            (i32 VectorExtractions.LE_WORD_2)>;
2247  def : Pat<(i32 (vector_extract v4i32:$S, 3)),
2248            (i32 (EXTRACT_SUBREG (VEXTUWRX (LI8 12), $S), sub_32))>;
2249}
2250
2251let Predicates = [HasP9Altivec, IsBigEndian] in {
2252  def : Pat<(i64 (anyext (i32 (vector_extract v16i8:$S, i64:$Idx)))),
2253            (VEXTUBLX $Idx, $S)>;
2254
2255  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, i64:$Idx)))),
2256            (VEXTUHLX (RLWINM8 $Idx, 1, 28, 30), $S)>;
2257  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 0)))),
2258            (VEXTUHLX (LI8 0), $S)>;
2259  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 1)))),
2260            (VEXTUHLX (LI8 2), $S)>;
2261  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 2)))),
2262            (VEXTUHLX (LI8 4), $S)>;
2263  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 3)))),
2264            (VEXTUHLX (LI8 6), $S)>;
2265  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 4)))),
2266            (VEXTUHLX (LI8 8), $S)>;
2267  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 5)))),
2268            (VEXTUHLX (LI8 10), $S)>;
2269  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 6)))),
2270            (VEXTUHLX (LI8 12), $S)>;
2271  def : Pat<(i64 (anyext (i32 (vector_extract v8i16:$S, 7)))),
2272            (VEXTUHLX (LI8 14), $S)>;
2273
2274  def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, i64:$Idx)))),
2275            (VEXTUWLX (RLWINM8 $Idx, 2, 28, 29), $S)>;
2276  def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 0)))),
2277            (VEXTUWLX (LI8 0), $S)>;
2278
2279  // For extracting BE word 1, MFVSRWZ is better than VEXTUWLX
2280  def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 1)))),
2281            (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2282	    (i32 VectorExtractions.LE_WORD_2), sub_32)>;
2283  def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 2)))),
2284            (VEXTUWLX (LI8 8), $S)>;
2285  def : Pat<(i64 (zext (i32 (vector_extract v4i32:$S, 3)))),
2286            (VEXTUWLX (LI8 12), $S)>;
2287
2288  def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, i64:$Idx)))),
2289            (EXTSW (VEXTUWLX (RLWINM8 $Idx, 2, 28, 29), $S))>;
2290  def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 0)))),
2291            (EXTSW (VEXTUWLX (LI8 0), $S))>;
2292  // For extracting BE word 1, MFVSRWZ is better than VEXTUWLX
2293  def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 1)))),
2294            (EXTSW (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2295	    (i32 VectorExtractions.LE_WORD_2), sub_32))>;
2296  def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 2)))),
2297            (EXTSW (VEXTUWLX (LI8 8), $S))>;
2298  def : Pat<(i64 (sext (i32 (vector_extract v4i32:$S, 3)))),
2299            (EXTSW (VEXTUWLX (LI8 12), $S))>;
2300
2301  def : Pat<(i32 (vector_extract v16i8:$S, i64:$Idx)),
2302            (i32 (EXTRACT_SUBREG (VEXTUBLX $Idx, $S), sub_32))>;
2303  def : Pat<(i32 (vector_extract v16i8:$S, 0)),
2304            (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 0), $S), sub_32))>;
2305  def : Pat<(i32 (vector_extract v16i8:$S, 1)),
2306            (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 1), $S), sub_32))>;
2307  def : Pat<(i32 (vector_extract v16i8:$S, 2)),
2308            (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 2), $S), sub_32))>;
2309  def : Pat<(i32 (vector_extract v16i8:$S, 3)),
2310            (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 3), $S), sub_32))>;
2311  def : Pat<(i32 (vector_extract v16i8:$S, 4)),
2312            (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 4), $S), sub_32))>;
2313  def : Pat<(i32 (vector_extract v16i8:$S, 5)),
2314            (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 5), $S), sub_32))>;
2315  def : Pat<(i32 (vector_extract v16i8:$S, 6)),
2316            (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 6), $S), sub_32))>;
2317  def : Pat<(i32 (vector_extract v16i8:$S, 7)),
2318            (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 7), $S), sub_32))>;
2319  def : Pat<(i32 (vector_extract v16i8:$S, 8)),
2320            (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 8), $S), sub_32))>;
2321  def : Pat<(i32 (vector_extract v16i8:$S, 9)),
2322            (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 9), $S), sub_32))>;
2323  def : Pat<(i32 (vector_extract v16i8:$S, 10)),
2324            (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 10), $S), sub_32))>;
2325  def : Pat<(i32 (vector_extract v16i8:$S, 11)),
2326            (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 11), $S), sub_32))>;
2327  def : Pat<(i32 (vector_extract v16i8:$S, 12)),
2328            (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 12), $S), sub_32))>;
2329  def : Pat<(i32 (vector_extract v16i8:$S, 13)),
2330            (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 13), $S), sub_32))>;
2331  def : Pat<(i32 (vector_extract v16i8:$S, 14)),
2332            (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 14), $S), sub_32))>;
2333  def : Pat<(i32 (vector_extract v16i8:$S, 15)),
2334            (i32 (EXTRACT_SUBREG (VEXTUBLX (LI8 15), $S), sub_32))>;
2335
2336  def : Pat<(i32 (vector_extract v8i16:$S, i64:$Idx)),
2337            (i32 (EXTRACT_SUBREG (VEXTUHLX
2338	    (RLWINM8 $Idx, 1, 28, 30), $S), sub_32))>;
2339  def : Pat<(i32 (vector_extract v8i16:$S, 0)),
2340            (i32 (EXTRACT_SUBREG (VEXTUHLX (LI8 0), $S), sub_32))>;
2341  def : Pat<(i32 (vector_extract v8i16:$S, 1)),
2342            (i32 (EXTRACT_SUBREG (VEXTUHLX (LI8 2), $S), sub_32))>;
2343  def : Pat<(i32 (vector_extract v8i16:$S, 2)),
2344            (i32 (EXTRACT_SUBREG (VEXTUHLX (LI8 4), $S), sub_32))>;
2345  def : Pat<(i32 (vector_extract v8i16:$S, 3)),
2346            (i32 (EXTRACT_SUBREG (VEXTUHLX (LI8 6), $S), sub_32))>;
2347  def : Pat<(i32 (vector_extract v8i16:$S, 4)),
2348            (i32 (EXTRACT_SUBREG (VEXTUHLX (LI8 8), $S), sub_32))>;
2349  def : Pat<(i32 (vector_extract v8i16:$S, 5)),
2350            (i32 (EXTRACT_SUBREG (VEXTUHLX (LI8 10), $S), sub_32))>;
2351  def : Pat<(i32 (vector_extract v8i16:$S, 6)),
2352            (i32 (EXTRACT_SUBREG (VEXTUHLX (LI8 12), $S), sub_32))>;
2353  def : Pat<(i32 (vector_extract v8i16:$S, 6)),
2354            (i32 (EXTRACT_SUBREG (VEXTUHLX (LI8 14), $S), sub_32))>;
2355
2356  def : Pat<(i32 (vector_extract v4i32:$S, i64:$Idx)),
2357            (i32 (EXTRACT_SUBREG (VEXTUWLX
2358	    (RLWINM8 $Idx, 2, 28, 29), $S), sub_32))>;
2359  def : Pat<(i32 (vector_extract v4i32:$S, 0)),
2360            (i32 (EXTRACT_SUBREG (VEXTUWLX (LI8 0), $S), sub_32))>;
2361  // For extracting BE word 1, MFVSRWZ is better than VEXTUWLX
2362  def : Pat<(i32 (vector_extract v4i32:$S, 1)),
2363            (i32 VectorExtractions.LE_WORD_2)>;
2364  def : Pat<(i32 (vector_extract v4i32:$S, 2)),
2365            (i32 (EXTRACT_SUBREG (VEXTUWLX (LI8 8), $S), sub_32))>;
2366  def : Pat<(i32 (vector_extract v4i32:$S, 3)),
2367            (i32 (EXTRACT_SUBREG (VEXTUWLX (LI8 12), $S), sub_32))>;
2368}
2369
2370let Predicates = [IsLittleEndian, HasDirectMove] in {
2371  // v16i8 scalar <-> vector conversions (LE)
2372  def : Pat<(v16i8 (scalar_to_vector i32:$A)),
2373            (v16i8 (COPY_TO_REGCLASS MovesToVSR.LE_WORD_0, VSRC))>;
2374  def : Pat<(v8i16 (scalar_to_vector i32:$A)),
2375            (v8i16 (COPY_TO_REGCLASS MovesToVSR.LE_WORD_0, VSRC))>;
2376  def : Pat<(v4i32 (scalar_to_vector i32:$A)),
2377            (v4i32 MovesToVSR.LE_WORD_0)>;
2378  def : Pat<(v2i64 (scalar_to_vector i64:$A)),
2379            (v2i64 MovesToVSR.LE_DWORD_0)>;
2380  // v2i64 scalar <-> vector conversions (LE)
2381  def : Pat<(i64 (vector_extract v2i64:$S, 0)),
2382            (i64 VectorExtractions.LE_DWORD_0)>;
2383  def : Pat<(i64 (vector_extract v2i64:$S, 1)),
2384            (i64 VectorExtractions.LE_DWORD_1)>;
2385  def : Pat<(i64 (vector_extract v2i64:$S, i64:$Idx)),
2386            (i64 VectorExtractions.LE_VARIABLE_DWORD)>;
2387} // IsLittleEndian, HasDirectMove
2388
2389let Predicates = [IsLittleEndian, HasDirectMove, NoP9Altivec] in {
2390  def : Pat<(i32 (vector_extract v16i8:$S, 0)),
2391            (i32 VectorExtractions.LE_BYTE_0)>;
2392  def : Pat<(i32 (vector_extract v16i8:$S, 1)),
2393            (i32 VectorExtractions.LE_BYTE_1)>;
2394  def : Pat<(i32 (vector_extract v16i8:$S, 2)),
2395            (i32 VectorExtractions.LE_BYTE_2)>;
2396  def : Pat<(i32 (vector_extract v16i8:$S, 3)),
2397            (i32 VectorExtractions.LE_BYTE_3)>;
2398  def : Pat<(i32 (vector_extract v16i8:$S, 4)),
2399            (i32 VectorExtractions.LE_BYTE_4)>;
2400  def : Pat<(i32 (vector_extract v16i8:$S, 5)),
2401            (i32 VectorExtractions.LE_BYTE_5)>;
2402  def : Pat<(i32 (vector_extract v16i8:$S, 6)),
2403            (i32 VectorExtractions.LE_BYTE_6)>;
2404  def : Pat<(i32 (vector_extract v16i8:$S, 7)),
2405            (i32 VectorExtractions.LE_BYTE_7)>;
2406  def : Pat<(i32 (vector_extract v16i8:$S, 8)),
2407            (i32 VectorExtractions.LE_BYTE_8)>;
2408  def : Pat<(i32 (vector_extract v16i8:$S, 9)),
2409            (i32 VectorExtractions.LE_BYTE_9)>;
2410  def : Pat<(i32 (vector_extract v16i8:$S, 10)),
2411            (i32 VectorExtractions.LE_BYTE_10)>;
2412  def : Pat<(i32 (vector_extract v16i8:$S, 11)),
2413            (i32 VectorExtractions.LE_BYTE_11)>;
2414  def : Pat<(i32 (vector_extract v16i8:$S, 12)),
2415            (i32 VectorExtractions.LE_BYTE_12)>;
2416  def : Pat<(i32 (vector_extract v16i8:$S, 13)),
2417            (i32 VectorExtractions.LE_BYTE_13)>;
2418  def : Pat<(i32 (vector_extract v16i8:$S, 14)),
2419            (i32 VectorExtractions.LE_BYTE_14)>;
2420  def : Pat<(i32 (vector_extract v16i8:$S, 15)),
2421            (i32 VectorExtractions.LE_BYTE_15)>;
2422  def : Pat<(i32 (vector_extract v16i8:$S, i64:$Idx)),
2423            (i32 VectorExtractions.LE_VARIABLE_BYTE)>;
2424
2425  // v8i16 scalar <-> vector conversions (LE)
2426  def : Pat<(i32 (vector_extract v8i16:$S, 0)),
2427            (i32 VectorExtractions.LE_HALF_0)>;
2428  def : Pat<(i32 (vector_extract v8i16:$S, 1)),
2429            (i32 VectorExtractions.LE_HALF_1)>;
2430  def : Pat<(i32 (vector_extract v8i16:$S, 2)),
2431            (i32 VectorExtractions.LE_HALF_2)>;
2432  def : Pat<(i32 (vector_extract v8i16:$S, 3)),
2433            (i32 VectorExtractions.LE_HALF_3)>;
2434  def : Pat<(i32 (vector_extract v8i16:$S, 4)),
2435            (i32 VectorExtractions.LE_HALF_4)>;
2436  def : Pat<(i32 (vector_extract v8i16:$S, 5)),
2437            (i32 VectorExtractions.LE_HALF_5)>;
2438  def : Pat<(i32 (vector_extract v8i16:$S, 6)),
2439            (i32 VectorExtractions.LE_HALF_6)>;
2440  def : Pat<(i32 (vector_extract v8i16:$S, 7)),
2441            (i32 VectorExtractions.LE_HALF_7)>;
2442  def : Pat<(i32 (vector_extract v8i16:$S, i64:$Idx)),
2443            (i32 VectorExtractions.LE_VARIABLE_HALF)>;
2444
2445  // v4i32 scalar <-> vector conversions (LE)
2446  def : Pat<(i32 (vector_extract v4i32:$S, 0)),
2447            (i32 VectorExtractions.LE_WORD_0)>;
2448  def : Pat<(i32 (vector_extract v4i32:$S, 1)),
2449            (i32 VectorExtractions.LE_WORD_1)>;
2450  def : Pat<(i32 (vector_extract v4i32:$S, 2)),
2451            (i32 VectorExtractions.LE_WORD_2)>;
2452  def : Pat<(i32 (vector_extract v4i32:$S, 3)),
2453            (i32 VectorExtractions.LE_WORD_3)>;
2454  def : Pat<(i32 (vector_extract v4i32:$S, i64:$Idx)),
2455            (i32 VectorExtractions.LE_VARIABLE_WORD)>;
2456} // IsLittleEndian, HasDirectMove, NoP9Altivec
2457
2458let Predicates = [HasDirectMove, HasVSX] in {
2459// bitconvert f32 -> i32
2460// (convert to 32-bit fp single, shift right 1 word, move to GPR)
2461def : Pat<(i32 (bitconvert f32:$S)),
2462          (i32 (MFVSRWZ (EXTRACT_SUBREG
2463                          (XXSLDWI (XSCVDPSPN $S), (XSCVDPSPN $S), 3),
2464                          sub_64)))>;
2465// bitconvert i32 -> f32
2466// (move to FPR, shift left 1 word, convert to 64-bit fp single)
2467def : Pat<(f32 (bitconvert i32:$A)),
2468          (f32 (XSCVSPDPN
2469                 (XXSLDWI MovesToVSR.LE_WORD_1, MovesToVSR.LE_WORD_1, 1)))>;
2470
2471// bitconvert f64 -> i64
2472// (move to GPR, nothing else needed)
2473def : Pat<(i64 (bitconvert f64:$S)),
2474          (i64 (MFVSRD $S))>;
2475
2476// bitconvert i64 -> f64
2477// (move to FPR, nothing else needed)
2478def : Pat<(f64 (bitconvert i64:$S)),
2479          (f64 (MTVSRD $S))>;
2480}
2481
2482// Materialize a zero-vector of long long
2483def : Pat<(v2i64 immAllZerosV),
2484          (v2i64 (XXLXORz))>;
2485}
2486
2487def AlignValues {
2488  dag F32_TO_BE_WORD1 = (v4f32 (XXSLDWI (XSCVDPSPN $B), (XSCVDPSPN $B), 3));
2489  dag I32_TO_BE_WORD1 = (COPY_TO_REGCLASS (MTVSRWZ $B), VSRC);
2490}
2491
2492// The following VSX instructions were introduced in Power ISA 3.0
2493def HasP9Vector : Predicate<"PPCSubTarget->hasP9Vector()">;
2494let AddedComplexity = 400, Predicates = [HasP9Vector] in {
2495
2496  // [PO VRT XO VRB XO /]
2497  class X_VT5_XO5_VB5<bits<6> opcode, bits<5> xo2, bits<10> xo, string opc,
2498                      list<dag> pattern>
2499    : X_RD5_XO5_RS5<opcode, xo2, xo, (outs vrrc:$vT), (ins vrrc:$vB),
2500                    !strconcat(opc, " $vT, $vB"), IIC_VecFP, pattern>;
2501
2502  // [PO VRT XO VRB XO RO], Round to Odd version of [PO VRT XO VRB XO /]
2503  class X_VT5_XO5_VB5_Ro<bits<6> opcode, bits<5> xo2, bits<10> xo, string opc,
2504                         list<dag> pattern>
2505    : X_VT5_XO5_VB5<opcode, xo2, xo, opc, pattern>, isDOT;
2506
2507  // [PO VRT XO VRB XO /], but the VRB is only used the left 64 bits (or less),
2508  // So we use different operand class for VRB
2509  class X_VT5_XO5_VB5_TyVB<bits<6> opcode, bits<5> xo2, bits<10> xo, string opc,
2510                           RegisterOperand vbtype, list<dag> pattern>
2511    : X_RD5_XO5_RS5<opcode, xo2, xo, (outs vrrc:$vT), (ins vbtype:$vB),
2512                    !strconcat(opc, " $vT, $vB"), IIC_VecFP, pattern>;
2513
2514  // [PO VRT XO VRB XO /]
2515  class X_VT5_XO5_VB5_VSFR<bits<6> opcode, bits<5> xo2, bits<10> xo, string opc,
2516                      list<dag> pattern>
2517    : X_RD5_XO5_RS5<opcode, xo2, xo, (outs vfrc:$vT), (ins vrrc:$vB),
2518                    !strconcat(opc, " $vT, $vB"), IIC_VecFP, pattern>;
2519
2520  // [PO VRT XO VRB XO RO], Round to Odd version of [PO VRT XO VRB XO /]
2521  class X_VT5_XO5_VB5_VSFR_Ro<bits<6> opcode, bits<5> xo2, bits<10> xo, string opc,
2522                         list<dag> pattern>
2523    : X_VT5_XO5_VB5_VSFR<opcode, xo2, xo, opc, pattern>, isDOT;
2524
2525  // [PO T XO B XO BX /]
2526  class XX2_RT5_XO5_XB6<bits<6> opcode, bits<5> xo2, bits<9> xo, string opc,
2527                        list<dag> pattern>
2528    : XX2_RD5_XO5_RS6<opcode, xo2, xo, (outs g8rc:$rT), (ins vsfrc:$XB),
2529                      !strconcat(opc, " $rT, $XB"), IIC_VecFP, pattern>;
2530
2531  // [PO T XO B XO BX TX]
2532  class XX2_XT6_XO5_XB6<bits<6> opcode, bits<5> xo2, bits<9> xo, string opc,
2533                        RegisterOperand vtype, list<dag> pattern>
2534    : XX2_RD6_XO5_RS6<opcode, xo2, xo, (outs vtype:$XT), (ins vtype:$XB),
2535                      !strconcat(opc, " $XT, $XB"), IIC_VecFP, pattern>;
2536
2537  // [PO T A B XO AX BX TX], src and dest register use different operand class
2538  class XX3_XT5_XA5_XB5<bits<6> opcode, bits<8> xo, string opc,
2539                  RegisterOperand xty, RegisterOperand aty, RegisterOperand bty,
2540                  InstrItinClass itin, list<dag> pattern>
2541    : XX3Form<opcode, xo, (outs xty:$XT), (ins aty:$XA, bty:$XB),
2542              !strconcat(opc, " $XT, $XA, $XB"), itin, pattern>;
2543
2544  // [PO VRT VRA VRB XO /]
2545  class X_VT5_VA5_VB5<bits<6> opcode, bits<10> xo, string opc,
2546                      list<dag> pattern>
2547    : XForm_1<opcode, xo, (outs vrrc:$vT), (ins vrrc:$vA, vrrc:$vB),
2548              !strconcat(opc, " $vT, $vA, $vB"), IIC_VecFP, pattern>;
2549
2550  // [PO VRT VRA VRB XO RO], Round to Odd version of [PO VRT VRA VRB XO /]
2551  class X_VT5_VA5_VB5_Ro<bits<6> opcode, bits<10> xo, string opc,
2552                         list<dag> pattern>
2553    : X_VT5_VA5_VB5<opcode, xo, opc, pattern>, isDOT;
2554
2555  // [PO VRT VRA VRB XO /]
2556  class X_VT5_VA5_VB5_FMA<bits<6> opcode, bits<10> xo, string opc,
2557                          list<dag> pattern>
2558    : XForm_1<opcode, xo, (outs vrrc:$vT), (ins vrrc:$vTi, vrrc:$vA, vrrc:$vB),
2559              !strconcat(opc, " $vT, $vA, $vB"), IIC_VecFP, pattern>,
2560              RegConstraint<"$vTi = $vT">, NoEncode<"$vTi">;
2561
2562  // [PO VRT VRA VRB XO RO], Round to Odd version of [PO VRT VRA VRB XO /]
2563  class X_VT5_VA5_VB5_FMA_Ro<bits<6> opcode, bits<10> xo, string opc,
2564                          list<dag> pattern>
2565    : X_VT5_VA5_VB5_FMA<opcode, xo, opc, pattern>, isDOT;
2566
2567  //===--------------------------------------------------------------------===//
2568  // Quad-Precision Scalar Move Instructions:
2569
2570  // Copy Sign
2571  def XSCPSGNQP : X_VT5_VA5_VB5<63, 100, "xscpsgnqp",
2572                                [(set f128:$vT,
2573                                      (fcopysign f128:$vB, f128:$vA))]>;
2574
2575  // Absolute/Negative-Absolute/Negate
2576  def XSABSQP   : X_VT5_XO5_VB5<63,  0, 804, "xsabsqp",
2577                                [(set f128:$vT, (fabs f128:$vB))]>;
2578  def XSNABSQP  : X_VT5_XO5_VB5<63,  8, 804, "xsnabsqp",
2579                                [(set f128:$vT, (fneg (fabs f128:$vB)))]>;
2580  def XSNEGQP   : X_VT5_XO5_VB5<63, 16, 804, "xsnegqp",
2581                                [(set f128:$vT, (fneg f128:$vB))]>;
2582
2583  //===--------------------------------------------------------------------===//
2584  // Quad-Precision Scalar Floating-Point Arithmetic Instructions:
2585
2586  // Add/Divide/Multiply/Subtract
2587  let isCommutable = 1 in {
2588  def XSADDQP   : X_VT5_VA5_VB5   <63,   4, "xsaddqp",
2589                                   [(set f128:$vT, (fadd f128:$vA, f128:$vB))]>;
2590  def XSMULQP   : X_VT5_VA5_VB5   <63,  36, "xsmulqp",
2591                                   [(set f128:$vT, (fmul f128:$vA, f128:$vB))]>;
2592  }
2593  def XSSUBQP   : X_VT5_VA5_VB5   <63, 516, "xssubqp" ,
2594                                   [(set f128:$vT, (fsub f128:$vA, f128:$vB))]>;
2595  def XSDIVQP   : X_VT5_VA5_VB5   <63, 548, "xsdivqp",
2596                                   [(set f128:$vT, (fdiv f128:$vA, f128:$vB))]>;
2597  // Square-Root
2598  def XSSQRTQP  : X_VT5_XO5_VB5   <63, 27, 804, "xssqrtqp",
2599                                   [(set f128:$vT, (fsqrt f128:$vB))]>;
2600  // (Negative) Multiply-{Add/Subtract}
2601  def XSMADDQP : X_VT5_VA5_VB5_FMA <63, 388, "xsmaddqp",
2602                                    [(set f128:$vT,
2603                                          (fma f128:$vA, f128:$vB,
2604                                               f128:$vTi))]>;
2605  def XSMSUBQP  : X_VT5_VA5_VB5_FMA   <63, 420, "xsmsubqp"  ,
2606                                       [(set f128:$vT,
2607                                             (fma f128:$vA, f128:$vB,
2608                                                  (fneg f128:$vTi)))]>;
2609  def XSNMADDQP : X_VT5_VA5_VB5_FMA <63, 452, "xsnmaddqp",
2610                                     [(set f128:$vT,
2611                                           (fneg (fma f128:$vA, f128:$vB,
2612                                                      f128:$vTi)))]>;
2613  def XSNMSUBQP : X_VT5_VA5_VB5_FMA <63, 484, "xsnmsubqp",
2614                                     [(set f128:$vT,
2615                                           (fneg (fma f128:$vA, f128:$vB,
2616                                                      (fneg f128:$vTi))))]>;
2617
2618  let isCommutable = 1 in {
2619  def XSADDQPO : X_VT5_VA5_VB5_Ro<63, 4, "xsaddqpo",
2620                                  [(set f128:$vT,
2621                                  (int_ppc_addf128_round_to_odd
2622                                  f128:$vA, f128:$vB))]>;
2623  def XSMULQPO : X_VT5_VA5_VB5_Ro<63, 36, "xsmulqpo",
2624                                  [(set f128:$vT,
2625                                  (int_ppc_mulf128_round_to_odd
2626                                  f128:$vA, f128:$vB))]>;
2627  }
2628  def XSSUBQPO : X_VT5_VA5_VB5_Ro<63, 516, "xssubqpo",
2629                                  [(set f128:$vT,
2630                                  (int_ppc_subf128_round_to_odd
2631                                  f128:$vA, f128:$vB))]>;
2632  def XSDIVQPO : X_VT5_VA5_VB5_Ro<63, 548, "xsdivqpo",
2633                                  [(set f128:$vT,
2634                                  (int_ppc_divf128_round_to_odd
2635                                  f128:$vA, f128:$vB))]>;
2636  def XSSQRTQPO : X_VT5_XO5_VB5_Ro<63, 27, 804, "xssqrtqpo",
2637                                  [(set f128:$vT,
2638                                  (int_ppc_sqrtf128_round_to_odd f128:$vB))]>;
2639
2640
2641  def XSMADDQPO : X_VT5_VA5_VB5_FMA_Ro<63, 388, "xsmaddqpo",
2642                                      [(set f128:$vT,
2643                                      (int_ppc_fmaf128_round_to_odd
2644                                      f128:$vA,f128:$vB,f128:$vTi))]>;
2645
2646  def XSMSUBQPO : X_VT5_VA5_VB5_FMA_Ro<63, 420, "xsmsubqpo" ,
2647                                      [(set f128:$vT,
2648                                      (int_ppc_fmaf128_round_to_odd
2649                                      f128:$vA, f128:$vB, (fneg f128:$vTi)))]>;
2650  def XSNMADDQPO: X_VT5_VA5_VB5_FMA_Ro<63, 452, "xsnmaddqpo",
2651                                      [(set f128:$vT,
2652                                      (fneg (int_ppc_fmaf128_round_to_odd
2653                                      f128:$vA, f128:$vB, f128:$vTi)))]>;
2654  def XSNMSUBQPO: X_VT5_VA5_VB5_FMA_Ro<63, 484, "xsnmsubqpo",
2655                                      [(set f128:$vT,
2656                                      (fneg (int_ppc_fmaf128_round_to_odd
2657                                      f128:$vA, f128:$vB, (fneg f128:$vTi))))]>;
2658
2659  // Additional fnmsub patterns: -a*b + c == -(a*b - c)
2660  def : Pat<(fma (fneg f128:$A), f128:$B, f128:$C), (XSNMSUBQP $C, $A, $B)>;
2661  def : Pat<(fma f128:$A, (fneg f128:$B), f128:$C), (XSNMSUBQP $C, $A, $B)>;
2662
2663  //===--------------------------------------------------------------------===//
2664  // Quad/Double-Precision Compare Instructions:
2665
2666  // [PO BF // VRA VRB XO /]
2667  class X_BF3_VA5_VB5<bits<6> opcode, bits<10> xo, string opc,
2668                      list<dag> pattern>
2669    : XForm_17<opcode, xo, (outs crrc:$crD), (ins vrrc:$VA, vrrc:$VB),
2670               !strconcat(opc, " $crD, $VA, $VB"), IIC_FPCompare> {
2671    let Pattern = pattern;
2672  }
2673
2674  // QP Compare Ordered/Unordered
2675  def XSCMPOQP : X_BF3_VA5_VB5<63, 132, "xscmpoqp", []>;
2676  def XSCMPUQP : X_BF3_VA5_VB5<63, 644, "xscmpuqp", []>;
2677
2678  // DP/QP Compare Exponents
2679  def XSCMPEXPDP : XX3Form_1<60, 59,
2680                             (outs crrc:$crD), (ins vsfrc:$XA, vsfrc:$XB),
2681                             "xscmpexpdp $crD, $XA, $XB", IIC_FPCompare, []>;
2682  def XSCMPEXPQP : X_BF3_VA5_VB5<63, 164, "xscmpexpqp", []>;
2683
2684  // DP Compare ==, >=, >, !=
2685  // Use vsrc for XT, because the entire register of XT is set.
2686  // XT.dword[1] = 0x0000_0000_0000_0000
2687  def XSCMPEQDP : XX3_XT5_XA5_XB5<60,  3, "xscmpeqdp", vsrc, vsfrc, vsfrc,
2688                                  IIC_FPCompare, []>;
2689  def XSCMPGEDP : XX3_XT5_XA5_XB5<60, 19, "xscmpgedp", vsrc, vsfrc, vsfrc,
2690                                  IIC_FPCompare, []>;
2691  def XSCMPGTDP : XX3_XT5_XA5_XB5<60, 11, "xscmpgtdp", vsrc, vsfrc, vsfrc,
2692                                  IIC_FPCompare, []>;
2693
2694  //===--------------------------------------------------------------------===//
2695  // Quad-Precision Floating-Point Conversion Instructions:
2696
2697  // Convert DP -> QP
2698  def XSCVDPQP  : X_VT5_XO5_VB5_TyVB<63, 22, 836, "xscvdpqp", vfrc,
2699                                     [(set f128:$vT, (fpextend f64:$vB))]>;
2700
2701  // Round & Convert QP -> DP (dword[1] is set to zero)
2702  def XSCVQPDP  : X_VT5_XO5_VB5_VSFR<63, 20, 836, "xscvqpdp" , []>;
2703  def XSCVQPDPO : X_VT5_XO5_VB5_VSFR_Ro<63, 20, 836, "xscvqpdpo",
2704                                        [(set f64:$vT,
2705                                        (int_ppc_truncf128_round_to_odd
2706                                        f128:$vB))]>;
2707
2708  // Truncate & Convert QP -> (Un)Signed (D)Word (dword[1] is set to zero)
2709  def XSCVQPSDZ : X_VT5_XO5_VB5<63, 25, 836, "xscvqpsdz", []>;
2710  def XSCVQPSWZ : X_VT5_XO5_VB5<63,  9, 836, "xscvqpswz", []>;
2711  def XSCVQPUDZ : X_VT5_XO5_VB5<63, 17, 836, "xscvqpudz", []>;
2712  def XSCVQPUWZ : X_VT5_XO5_VB5<63,  1, 836, "xscvqpuwz", []>;
2713
2714  // Convert (Un)Signed DWord -> QP.
2715  def XSCVSDQP  : X_VT5_XO5_VB5_TyVB<63, 10, 836, "xscvsdqp", vfrc, []>;
2716  def : Pat<(f128 (sint_to_fp i64:$src)),
2717            (f128 (XSCVSDQP (COPY_TO_REGCLASS $src, VFRC)))>;
2718  def : Pat<(f128 (sint_to_fp (i64 (PPCmfvsr f64:$src)))),
2719            (f128 (XSCVSDQP $src))>;
2720  def : Pat<(f128 (sint_to_fp (i32 (PPCmfvsr f64:$src)))),
2721            (f128 (XSCVSDQP (VEXTSW2Ds $src)))>;
2722
2723  def XSCVUDQP  : X_VT5_XO5_VB5_TyVB<63,  2, 836, "xscvudqp", vfrc, []>;
2724  def : Pat<(f128 (uint_to_fp i64:$src)),
2725            (f128 (XSCVUDQP (COPY_TO_REGCLASS $src, VFRC)))>;
2726  def : Pat<(f128 (uint_to_fp (i64 (PPCmfvsr f64:$src)))),
2727            (f128 (XSCVUDQP $src))>;
2728
2729  // Convert (Un)Signed Word -> QP.
2730  def : Pat<(f128 (sint_to_fp i32:$src)),
2731            (f128 (XSCVSDQP (MTVSRWA $src)))>;
2732  def : Pat<(f128 (sint_to_fp (i32 (load xoaddr:$src)))),
2733            (f128 (XSCVSDQP (LIWAX xoaddr:$src)))>;
2734  def : Pat<(f128 (uint_to_fp i32:$src)),
2735            (f128 (XSCVUDQP (MTVSRWZ $src)))>;
2736  def : Pat<(f128 (uint_to_fp (i32 (load xoaddr:$src)))),
2737            (f128 (XSCVUDQP (LIWZX xoaddr:$src)))>;
2738
2739  //===--------------------------------------------------------------------===//
2740  // Round to Floating-Point Integer Instructions
2741
2742  // (Round &) Convert DP <-> HP
2743  // Note! xscvdphp's src and dest register both use the left 64 bits, so we use
2744  // vsfrc for src and dest register. xscvhpdp's src only use the left 16 bits,
2745  // but we still use vsfrc for it.
2746  def XSCVDPHP : XX2_XT6_XO5_XB6<60, 17, 347, "xscvdphp", vsfrc, []>;
2747  def XSCVHPDP : XX2_XT6_XO5_XB6<60, 16, 347, "xscvhpdp", vsfrc, []>;
2748
2749  // Vector HP -> SP
2750  def XVCVHPSP : XX2_XT6_XO5_XB6<60, 24, 475, "xvcvhpsp", vsrc, []>;
2751  def XVCVSPHP : XX2_XT6_XO5_XB6<60, 25, 475, "xvcvsphp", vsrc,
2752                                 [(set v4f32:$XT,
2753                                     (int_ppc_vsx_xvcvsphp v4f32:$XB))]>;
2754
2755  // Pattern for matching Vector HP -> Vector SP intrinsic. Defined as a
2756  // separate pattern so that it can convert the input register class from
2757  // VRRC(v8i16) to VSRC.
2758  def : Pat<(v4f32 (int_ppc_vsx_xvcvhpsp v8i16:$A)),
2759            (v4f32 (XVCVHPSP (COPY_TO_REGCLASS $A, VSRC)))>;
2760
2761  class Z23_VT5_R1_VB5_RMC2_EX1<bits<6> opcode, bits<8> xo, bit ex, string opc,
2762                                list<dag> pattern>
2763    : Z23Form_8<opcode, xo,
2764                (outs vrrc:$vT), (ins u1imm:$r, vrrc:$vB, u2imm:$rmc),
2765                !strconcat(opc, " $r, $vT, $vB, $rmc"), IIC_VecFP, pattern> {
2766    let RC = ex;
2767  }
2768
2769  // Round to Quad-Precision Integer [with Inexact]
2770  def XSRQPI   : Z23_VT5_R1_VB5_RMC2_EX1<63,  5, 0, "xsrqpi" , []>;
2771  def XSRQPIX  : Z23_VT5_R1_VB5_RMC2_EX1<63,  5, 1, "xsrqpix", []>;
2772
2773  // Use current rounding mode
2774  def : Pat<(f128 (fnearbyint f128:$vB)), (f128 (XSRQPI 0, $vB, 3))>;
2775  // Round to nearest, ties away from zero
2776  def : Pat<(f128 (fround f128:$vB)), (f128 (XSRQPI 0, $vB, 0))>;
2777  // Round towards Zero
2778  def : Pat<(f128 (ftrunc f128:$vB)), (f128 (XSRQPI 1, $vB, 1))>;
2779  // Round towards +Inf
2780  def : Pat<(f128 (fceil f128:$vB)), (f128 (XSRQPI 1, $vB, 2))>;
2781  // Round towards -Inf
2782  def : Pat<(f128 (ffloor f128:$vB)), (f128 (XSRQPI 1, $vB, 3))>;
2783
2784  // Use current rounding mode, [with Inexact]
2785  def : Pat<(f128 (frint f128:$vB)), (f128 (XSRQPIX 0, $vB, 3))>;
2786
2787  // Round Quad-Precision to Double-Extended Precision (fp80)
2788  def XSRQPXP  : Z23_VT5_R1_VB5_RMC2_EX1<63, 37, 0, "xsrqpxp", []>;
2789
2790  //===--------------------------------------------------------------------===//
2791  // Insert/Extract Instructions
2792
2793  // Insert Exponent DP/QP
2794  // XT NOTE: XT.dword[1] = 0xUUUU_UUUU_UUUU_UUUU
2795  def XSIEXPDP : XX1Form <60, 918, (outs vsrc:$XT), (ins g8rc:$rA, g8rc:$rB),
2796                          "xsiexpdp $XT, $rA, $rB", IIC_VecFP, []>;
2797  // vB NOTE: only vB.dword[0] is used, that's why we don't use
2798  //          X_VT5_VA5_VB5 form
2799  def XSIEXPQP : XForm_18<63, 868, (outs vrrc:$vT), (ins vrrc:$vA, vsfrc:$vB),
2800                          "xsiexpqp $vT, $vA, $vB", IIC_VecFP, []>;
2801
2802  def : Pat<(f128 (int_ppc_scalar_insert_exp_qp f128:$vA, i64:$vB)),
2803            (f128 (XSIEXPQP $vA, (MTVSRD $vB)))>;
2804
2805  // Extract Exponent/Significand DP/QP
2806  def XSXEXPDP : XX2_RT5_XO5_XB6<60,  0, 347, "xsxexpdp", []>;
2807  def XSXSIGDP : XX2_RT5_XO5_XB6<60,  1, 347, "xsxsigdp", []>;
2808
2809  def XSXEXPQP : X_VT5_XO5_VB5  <63,  2, 804, "xsxexpqp", []>;
2810  def XSXSIGQP : X_VT5_XO5_VB5  <63, 18, 804, "xsxsigqp", []>;
2811
2812  def : Pat<(i64 (int_ppc_scalar_extract_expq  f128:$vA)),
2813            (i64 (MFVSRD (EXTRACT_SUBREG
2814                           (v2i64 (XSXEXPQP $vA)), sub_64)))>;
2815
2816  // Vector Insert Word
2817  // XB NOTE: Only XB.dword[1] is used, but we use vsrc on XB.
2818  def XXINSERTW   :
2819    XX2_RD6_UIM5_RS6<60, 181, (outs vsrc:$XT),
2820                     (ins vsrc:$XTi, vsrc:$XB, u4imm:$UIM),
2821                     "xxinsertw $XT, $XB, $UIM", IIC_VecFP,
2822                     [(set v4i32:$XT, (PPCvecinsert v4i32:$XTi, v4i32:$XB,
2823                                                   imm32SExt16:$UIM))]>,
2824                     RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">;
2825
2826  // Vector Extract Unsigned Word
2827  def XXEXTRACTUW : XX2_RD6_UIM5_RS6<60, 165,
2828                                  (outs vsfrc:$XT), (ins vsrc:$XB, u4imm:$UIMM),
2829                                  "xxextractuw $XT, $XB, $UIMM", IIC_VecFP, []>;
2830
2831  // Vector Insert Exponent DP/SP
2832  def XVIEXPDP : XX3_XT5_XA5_XB5<60, 248, "xviexpdp", vsrc, vsrc, vsrc,
2833    IIC_VecFP, [(set v2f64: $XT,(int_ppc_vsx_xviexpdp v2i64:$XA, v2i64:$XB))]>;
2834  def XVIEXPSP : XX3_XT5_XA5_XB5<60, 216, "xviexpsp", vsrc, vsrc, vsrc,
2835    IIC_VecFP, [(set v4f32: $XT,(int_ppc_vsx_xviexpsp v4i32:$XA, v4i32:$XB))]>;
2836
2837  // Vector Extract Exponent/Significand DP/SP
2838  def XVXEXPDP : XX2_XT6_XO5_XB6<60,  0, 475, "xvxexpdp", vsrc,
2839                                 [(set v2i64: $XT,
2840                                  (int_ppc_vsx_xvxexpdp v2f64:$XB))]>;
2841  def XVXEXPSP : XX2_XT6_XO5_XB6<60,  8, 475, "xvxexpsp", vsrc,
2842                                 [(set v4i32: $XT,
2843                                  (int_ppc_vsx_xvxexpsp v4f32:$XB))]>;
2844  def XVXSIGDP : XX2_XT6_XO5_XB6<60,  1, 475, "xvxsigdp", vsrc,
2845                                 [(set v2i64: $XT,
2846                                  (int_ppc_vsx_xvxsigdp v2f64:$XB))]>;
2847  def XVXSIGSP : XX2_XT6_XO5_XB6<60,  9, 475, "xvxsigsp", vsrc,
2848                                 [(set v4i32: $XT,
2849                                  (int_ppc_vsx_xvxsigsp v4f32:$XB))]>;
2850
2851  let AddedComplexity = 400, Predicates = [HasP9Vector] in {
2852  // Extra patterns expanding to vector Extract Word/Insert Word
2853  def : Pat<(v4i32 (int_ppc_vsx_xxinsertw v4i32:$A, v2i64:$B, imm:$IMM)),
2854            (v4i32 (XXINSERTW $A, $B, imm:$IMM))>;
2855  def : Pat<(v2i64 (int_ppc_vsx_xxextractuw v2i64:$A, imm:$IMM)),
2856            (v2i64 (COPY_TO_REGCLASS (XXEXTRACTUW $A, imm:$IMM), VSRC))>;
2857  } // AddedComplexity = 400, HasP9Vector
2858
2859  //===--------------------------------------------------------------------===//
2860
2861  // Test Data Class SP/DP/QP
2862  def XSTSTDCSP : XX2_BF3_DCMX7_RS6<60, 298,
2863                              (outs crrc:$BF), (ins u7imm:$DCMX, vsfrc:$XB),
2864                              "xststdcsp $BF, $XB, $DCMX", IIC_VecFP, []>;
2865  def XSTSTDCDP : XX2_BF3_DCMX7_RS6<60, 362,
2866                              (outs crrc:$BF), (ins u7imm:$DCMX, vsfrc:$XB),
2867                              "xststdcdp $BF, $XB, $DCMX", IIC_VecFP, []>;
2868  def XSTSTDCQP : X_BF3_DCMX7_RS5  <63, 708,
2869                              (outs crrc:$BF), (ins u7imm:$DCMX, vrrc:$vB),
2870                              "xststdcqp $BF, $vB, $DCMX", IIC_VecFP, []>;
2871
2872  // Vector Test Data Class SP/DP
2873  def XVTSTDCSP : XX2_RD6_DCMX7_RS6<60, 13, 5,
2874                              (outs vsrc:$XT), (ins u7imm:$DCMX, vsrc:$XB),
2875                              "xvtstdcsp $XT, $XB, $DCMX", IIC_VecFP,
2876                              [(set v4i32: $XT,
2877                               (int_ppc_vsx_xvtstdcsp v4f32:$XB, timm:$DCMX))]>;
2878  def XVTSTDCDP : XX2_RD6_DCMX7_RS6<60, 15, 5,
2879                              (outs vsrc:$XT), (ins u7imm:$DCMX, vsrc:$XB),
2880                              "xvtstdcdp $XT, $XB, $DCMX", IIC_VecFP,
2881                              [(set v2i64: $XT,
2882                               (int_ppc_vsx_xvtstdcdp v2f64:$XB, timm:$DCMX))]>;
2883
2884  //===--------------------------------------------------------------------===//
2885
2886  // Maximum/Minimum Type-C/Type-J DP
2887  // XT.dword[1] = 0xUUUU_UUUU_UUUU_UUUU, so we use vsrc for XT
2888  def XSMAXCDP : XX3_XT5_XA5_XB5<60, 128, "xsmaxcdp", vsrc, vsfrc, vsfrc,
2889                                 IIC_VecFP, []>;
2890  def XSMAXJDP : XX3_XT5_XA5_XB5<60, 144, "xsmaxjdp", vsrc, vsfrc, vsfrc,
2891                                 IIC_VecFP, []>;
2892  def XSMINCDP : XX3_XT5_XA5_XB5<60, 136, "xsmincdp", vsrc, vsfrc, vsfrc,
2893                                 IIC_VecFP, []>;
2894  def XSMINJDP : XX3_XT5_XA5_XB5<60, 152, "xsminjdp", vsrc, vsfrc, vsfrc,
2895                                 IIC_VecFP, []>;
2896
2897  //===--------------------------------------------------------------------===//
2898
2899  // Vector Byte-Reverse H/W/D/Q Word
2900  def XXBRH : XX2_XT6_XO5_XB6<60,  7, 475, "xxbrh", vsrc, []>;
2901  def XXBRW : XX2_XT6_XO5_XB6<60, 15, 475, "xxbrw", vsrc, []>;
2902  def XXBRD : XX2_XT6_XO5_XB6<60, 23, 475, "xxbrd", vsrc, []>;
2903  def XXBRQ : XX2_XT6_XO5_XB6<60, 31, 475, "xxbrq", vsrc, []>;
2904
2905  // Vector Reverse
2906  def : Pat<(v8i16 (PPCxxreverse v8i16 :$A)),
2907            (v8i16 (COPY_TO_REGCLASS (XXBRH (COPY_TO_REGCLASS $A, VSRC)), VRRC))>;
2908  def : Pat<(v4i32 (PPCxxreverse v4i32 :$A)),
2909            (v4i32 (XXBRW $A))>;
2910  def : Pat<(v2i64 (PPCxxreverse v2i64 :$A)),
2911            (v2i64 (XXBRD $A))>;
2912  def : Pat<(v1i128 (PPCxxreverse v1i128 :$A)),
2913            (v1i128 (COPY_TO_REGCLASS (XXBRQ (COPY_TO_REGCLASS $A, VSRC)), VRRC))>;
2914
2915  // Vector Permute
2916  def XXPERM  : XX3_XT5_XA5_XB5<60, 26, "xxperm" , vsrc, vsrc, vsrc,
2917                                IIC_VecPerm, []>;
2918  def XXPERMR : XX3_XT5_XA5_XB5<60, 58, "xxpermr", vsrc, vsrc, vsrc,
2919                                IIC_VecPerm, []>;
2920
2921  // Vector Splat Immediate Byte
2922  def XXSPLTIB : X_RD6_IMM8<60, 360, (outs vsrc:$XT), (ins u8imm:$IMM8),
2923                            "xxspltib $XT, $IMM8", IIC_VecPerm, []>;
2924
2925  //===--------------------------------------------------------------------===//
2926  // Vector/Scalar Load/Store Instructions
2927
2928  // When adding new D-Form loads/stores, be sure to update the ImmToIdxMap in
2929  // PPCRegisterInfo::PPCRegisterInfo and maybe save yourself some debugging.
2930  let mayLoad = 1, mayStore = 0 in {
2931  // Load Vector
2932  def LXV : DQ_RD6_RS5_DQ12<61, 1, (outs vsrc:$XT), (ins memrix16:$src),
2933                            "lxv $XT, $src", IIC_LdStLFD, []>;
2934  // Load DWord
2935  def LXSD  : DSForm_1<57, 2, (outs vfrc:$vD), (ins memrix:$src),
2936                       "lxsd $vD, $src", IIC_LdStLFD, []>;
2937  // Load SP from src, convert it to DP, and place in dword[0]
2938  def LXSSP : DSForm_1<57, 3, (outs vfrc:$vD), (ins memrix:$src),
2939                       "lxssp $vD, $src", IIC_LdStLFD, []>;
2940
2941  // [PO T RA RB XO TX] almost equal to [PO S RA RB XO SX], but has different
2942  // "out" and "in" dag
2943  class X_XT6_RA5_RB5<bits<6> opcode, bits<10> xo, string opc,
2944                      RegisterOperand vtype, list<dag> pattern>
2945    : XX1Form_memOp<opcode, xo, (outs vtype:$XT), (ins memrr:$src),
2946              !strconcat(opc, " $XT, $src"), IIC_LdStLFD, pattern>;
2947
2948  // Load as Integer Byte/Halfword & Zero Indexed
2949  def LXSIBZX : X_XT6_RA5_RB5<31, 781, "lxsibzx", vsfrc,
2950                              [(set f64:$XT, (PPClxsizx xoaddr:$src, 1))]>;
2951  def LXSIHZX : X_XT6_RA5_RB5<31, 813, "lxsihzx", vsfrc,
2952                              [(set f64:$XT, (PPClxsizx xoaddr:$src, 2))]>;
2953
2954  // Load Vector Halfword*8/Byte*16 Indexed
2955  def LXVH8X  : X_XT6_RA5_RB5<31, 812, "lxvh8x" , vsrc, []>;
2956  def LXVB16X : X_XT6_RA5_RB5<31, 876, "lxvb16x", vsrc, []>;
2957
2958  // Load Vector Indexed
2959  def LXVX    : X_XT6_RA5_RB5<31, 268, "lxvx"   , vsrc,
2960                [(set v2f64:$XT, (load xaddrX16:$src))]>;
2961  // Load Vector (Left-justified) with Length
2962  def LXVL : XX1Form_memOp<31, 269, (outs vsrc:$XT), (ins memr:$src, g8rc:$rB),
2963                   "lxvl $XT, $src, $rB", IIC_LdStLoad,
2964                   [(set v4i32:$XT, (int_ppc_vsx_lxvl addr:$src, i64:$rB))]>;
2965  def LXVLL : XX1Form_memOp<31,301, (outs vsrc:$XT), (ins memr:$src, g8rc:$rB),
2966                   "lxvll $XT, $src, $rB", IIC_LdStLoad,
2967                   [(set v4i32:$XT, (int_ppc_vsx_lxvll addr:$src, i64:$rB))]>;
2968
2969  // Load Vector Word & Splat Indexed
2970  def LXVWSX  : X_XT6_RA5_RB5<31, 364, "lxvwsx" , vsrc, []>;
2971  } // mayLoad
2972
2973  // When adding new D-Form loads/stores, be sure to update the ImmToIdxMap in
2974  // PPCRegisterInfo::PPCRegisterInfo and maybe save yourself some debugging.
2975  let mayStore = 1, mayLoad = 0 in {
2976  // Store Vector
2977  def STXV : DQ_RD6_RS5_DQ12<61, 5, (outs), (ins vsrc:$XT, memrix16:$dst),
2978                             "stxv $XT, $dst", IIC_LdStSTFD, []>;
2979  // Store DWord
2980  def STXSD  : DSForm_1<61, 2, (outs), (ins vfrc:$vS, memrix:$dst),
2981                        "stxsd $vS, $dst", IIC_LdStSTFD, []>;
2982  // Convert DP of dword[0] to SP, and Store to dst
2983  def STXSSP : DSForm_1<61, 3, (outs), (ins vfrc:$vS, memrix:$dst),
2984                        "stxssp $vS, $dst", IIC_LdStSTFD, []>;
2985
2986  // [PO S RA RB XO SX]
2987  class X_XS6_RA5_RB5<bits<6> opcode, bits<10> xo, string opc,
2988                      RegisterOperand vtype, list<dag> pattern>
2989    : XX1Form_memOp<opcode, xo, (outs), (ins vtype:$XT, memrr:$dst),
2990              !strconcat(opc, " $XT, $dst"), IIC_LdStSTFD, pattern>;
2991
2992  // Store as Integer Byte/Halfword Indexed
2993  def STXSIBX  : X_XS6_RA5_RB5<31,  909, "stxsibx" , vsfrc,
2994                               [(PPCstxsix f64:$XT, xoaddr:$dst, 1)]>;
2995  def STXSIHX  : X_XS6_RA5_RB5<31,  941, "stxsihx" , vsfrc,
2996                               [(PPCstxsix f64:$XT, xoaddr:$dst, 2)]>;
2997  let isCodeGenOnly = 1 in {
2998    def STXSIBXv  : X_XS6_RA5_RB5<31,  909, "stxsibx" , vsrc, []>;
2999    def STXSIHXv  : X_XS6_RA5_RB5<31,  941, "stxsihx" , vsrc, []>;
3000  }
3001
3002  // Store Vector Halfword*8/Byte*16 Indexed
3003  def STXVH8X  : X_XS6_RA5_RB5<31,  940, "stxvh8x" , vsrc, []>;
3004  def STXVB16X : X_XS6_RA5_RB5<31, 1004, "stxvb16x", vsrc, []>;
3005
3006  // Store Vector Indexed
3007  def STXVX    : X_XS6_RA5_RB5<31,  396, "stxvx"   , vsrc,
3008                 [(store v2f64:$XT, xaddrX16:$dst)]>;
3009
3010  // Store Vector (Left-justified) with Length
3011  def STXVL : XX1Form_memOp<31, 397, (outs),
3012                            (ins vsrc:$XT, memr:$dst, g8rc:$rB),
3013                            "stxvl $XT, $dst, $rB", IIC_LdStLoad,
3014                            [(int_ppc_vsx_stxvl v4i32:$XT, addr:$dst,
3015                              i64:$rB)]>;
3016  def STXVLL : XX1Form_memOp<31, 429, (outs),
3017                            (ins vsrc:$XT, memr:$dst, g8rc:$rB),
3018                            "stxvll $XT, $dst, $rB", IIC_LdStLoad,
3019                            [(int_ppc_vsx_stxvll v4i32:$XT, addr:$dst,
3020                              i64:$rB)]>;
3021  } // mayStore
3022
3023  let Predicates = [IsLittleEndian] in {
3024  def: Pat<(f32 (PPCfcfids (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 0)))))),
3025           (f32 (XSCVSPDPN (XVCVSXWSP (XXSPLTW $A, 3))))>;
3026  def: Pat<(f32 (PPCfcfids (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 1)))))),
3027           (f32 (XSCVSPDPN (XVCVSXWSP (XXSPLTW $A, 2))))>;
3028  def: Pat<(f32 (PPCfcfids (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 2)))))),
3029           (f32 (XSCVSPDPN (XVCVSXWSP (XXSPLTW $A, 1))))>;
3030  def: Pat<(f32 (PPCfcfids (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 3)))))),
3031           (f32 (XSCVSPDPN (XVCVSXWSP (XXSPLTW $A, 0))))>;
3032  def: Pat<(f64 (PPCfcfid (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 0)))))),
3033           (f64 (COPY_TO_REGCLASS (XVCVSXWDP (XXSPLTW $A, 3)), VSFRC))>;
3034  def: Pat<(f64 (PPCfcfid (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 1)))))),
3035           (f64 (COPY_TO_REGCLASS (XVCVSXWDP (XXSPLTW $A, 2)), VSFRC))>;
3036  def: Pat<(f64 (PPCfcfid (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 2)))))),
3037           (f64 (COPY_TO_REGCLASS (XVCVSXWDP (XXSPLTW $A, 1)), VSFRC))>;
3038  def: Pat<(f64 (PPCfcfid (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 3)))))),
3039           (f64 (COPY_TO_REGCLASS (XVCVSXWDP (XXSPLTW $A, 0)), VSFRC))>;
3040  }
3041
3042  let Predicates = [IsBigEndian] in {
3043  def: Pat<(f32 (PPCfcfids (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 0)))))),
3044           (f32 (XSCVSPDPN (XVCVSXWSP (XXSPLTW $A, 0))))>;
3045  def: Pat<(f32 (PPCfcfids (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 1)))))),
3046           (f32 (XSCVSPDPN (XVCVSXWSP (XXSPLTW $A, 1))))>;
3047  def: Pat<(f32 (PPCfcfids (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 2)))))),
3048           (f32 (XSCVSPDPN (XVCVSXWSP (XXSPLTW $A, 2))))>;
3049  def: Pat<(f32 (PPCfcfids (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 3)))))),
3050           (f32 (XSCVSPDPN (XVCVSXWSP (XXSPLTW $A, 3))))>;
3051  def: Pat<(f64 (PPCfcfid (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 0)))))),
3052           (f64 (COPY_TO_REGCLASS (XVCVSXWDP (XXSPLTW $A, 0)), VSFRC))>;
3053  def: Pat<(f64 (PPCfcfid (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 1)))))),
3054           (f64 (COPY_TO_REGCLASS (XVCVSXWDP (XXSPLTW $A, 1)), VSFRC))>;
3055  def: Pat<(f64 (PPCfcfid (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 2)))))),
3056           (f64 (COPY_TO_REGCLASS (XVCVSXWDP (XXSPLTW $A, 2)), VSFRC))>;
3057  def: Pat<(f64 (PPCfcfid (f64 (PPCmtvsra (i32 (extractelt v4i32:$A, 3)))))),
3058           (f64 (COPY_TO_REGCLASS (XVCVSXWDP (XXSPLTW $A, 3)), VSFRC))>;
3059  }
3060
3061  // Alternate patterns for PPCmtvsrz where the output is v8i16 or v16i8 instead
3062  // of f64
3063  def : Pat<(v8i16 (PPCmtvsrz i32:$A)),
3064            (v8i16 (SUBREG_TO_REG (i64 1), (MTVSRWZ $A), sub_64))>;
3065  def : Pat<(v16i8 (PPCmtvsrz i32:$A)),
3066            (v16i8 (SUBREG_TO_REG (i64 1), (MTVSRWZ $A), sub_64))>;
3067
3068  // Patterns for which instructions from ISA 3.0 are a better match
3069  let Predicates = [IsLittleEndian, HasP9Vector] in {
3070  def : Pat<(f32 (PPCfcfidus (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 0)))))),
3071            (f32 (XSCVUXDSP (XXEXTRACTUW $A, 12)))>;
3072  def : Pat<(f32 (PPCfcfidus (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 1)))))),
3073            (f32 (XSCVUXDSP (XXEXTRACTUW $A, 8)))>;
3074  def : Pat<(f32 (PPCfcfidus (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 2)))))),
3075            (f32 (XSCVUXDSP (XXEXTRACTUW $A, 4)))>;
3076  def : Pat<(f32 (PPCfcfidus (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 3)))))),
3077            (f32 (XSCVUXDSP (XXEXTRACTUW $A, 0)))>;
3078  def : Pat<(f64 (PPCfcfidu (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 0)))))),
3079            (f64 (XSCVUXDDP (XXEXTRACTUW $A, 12)))>;
3080  def : Pat<(f64 (PPCfcfidu (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 1)))))),
3081            (f64 (XSCVUXDDP (XXEXTRACTUW $A, 8)))>;
3082  def : Pat<(f64 (PPCfcfidu (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 2)))))),
3083            (f64 (XSCVUXDDP (XXEXTRACTUW $A, 4)))>;
3084  def : Pat<(f64 (PPCfcfidu (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 3)))))),
3085            (f64 (XSCVUXDDP (XXEXTRACTUW $A, 0)))>;
3086  def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 0)),
3087            (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 12))>;
3088  def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 1)),
3089            (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 8))>;
3090  def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 2)),
3091            (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 4))>;
3092  def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 3)),
3093            (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 0))>;
3094  def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 0)),
3095            (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 12))>;
3096  def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 1)),
3097            (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 8))>;
3098  def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 2)),
3099            (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 4))>;
3100  def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 3)),
3101            (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 0))>;
3102
3103  def : Pat<(v8i16 (PPCld_vec_be xoaddr:$src)),
3104            (COPY_TO_REGCLASS (LXVH8X xoaddr:$src), VRRC)>;
3105  def : Pat<(PPCst_vec_be v8i16:$rS, xoaddr:$dst),
3106            (STXVH8X (COPY_TO_REGCLASS $rS, VSRC), xoaddr:$dst)>;
3107
3108  def : Pat<(v16i8 (PPCld_vec_be xoaddr:$src)),
3109            (COPY_TO_REGCLASS (LXVB16X xoaddr:$src), VRRC)>;
3110  def : Pat<(PPCst_vec_be v16i8:$rS, xoaddr:$dst),
3111            (STXVB16X (COPY_TO_REGCLASS $rS, VSRC), xoaddr:$dst)>;
3112  } // IsLittleEndian, HasP9Vector
3113
3114  let Predicates = [IsBigEndian, HasP9Vector] in {
3115  def : Pat<(f32 (PPCfcfidus (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 0)))))),
3116            (f32 (XSCVUXDSP (XXEXTRACTUW $A, 0)))>;
3117  def : Pat<(f32 (PPCfcfidus (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 1)))))),
3118            (f32 (XSCVUXDSP (XXEXTRACTUW $A, 4)))>;
3119  def : Pat<(f32 (PPCfcfidus (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 2)))))),
3120            (f32 (XSCVUXDSP (XXEXTRACTUW $A, 8)))>;
3121  def : Pat<(f32 (PPCfcfidus (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 3)))))),
3122            (f32 (XSCVUXDSP (XXEXTRACTUW $A, 12)))>;
3123  def : Pat<(f64 (PPCfcfidu (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 0)))))),
3124            (f64 (XSCVUXDDP (XXEXTRACTUW $A, 0)))>;
3125  def : Pat<(f64 (PPCfcfidu (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 1)))))),
3126            (f64 (XSCVUXDDP (XXEXTRACTUW $A, 4)))>;
3127  def : Pat<(f64 (PPCfcfidu (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 2)))))),
3128            (f64 (XSCVUXDDP (XXEXTRACTUW $A, 8)))>;
3129  def : Pat<(f64 (PPCfcfidu (f64 (PPCmtvsrz (i32 (extractelt v4i32:$A, 3)))))),
3130            (f64 (XSCVUXDDP (XXEXTRACTUW $A, 12)))>;
3131  def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 0)),
3132            (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 0))>;
3133  def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 1)),
3134            (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 4))>;
3135  def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 2)),
3136            (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 8))>;
3137  def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 3)),
3138            (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 12))>;
3139  def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 0)),
3140            (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 0))>;
3141  def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 1)),
3142            (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 4))>;
3143  def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 2)),
3144            (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 8))>;
3145  def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 3)),
3146            (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 12))>;
3147  } // IsBigEndian, HasP9Vector
3148
3149  // D-Form Load/Store
3150  def : Pat<(v4i32 (quadwOffsetLoad iaddrX16:$src)), (LXV memrix16:$src)>;
3151  def : Pat<(v4f32 (quadwOffsetLoad iaddrX16:$src)), (LXV memrix16:$src)>;
3152  def : Pat<(v2i64 (quadwOffsetLoad iaddrX16:$src)), (LXV memrix16:$src)>;
3153  def : Pat<(v2f64 (quadwOffsetLoad iaddrX16:$src)), (LXV memrix16:$src)>;
3154  def : Pat<(f128  (quadwOffsetLoad iaddrX16:$src)),
3155            (COPY_TO_REGCLASS (LXV memrix16:$src), VRRC)>;
3156  def : Pat<(v4i32 (int_ppc_vsx_lxvw4x iaddrX16:$src)), (LXV memrix16:$src)>;
3157  def : Pat<(v2f64 (int_ppc_vsx_lxvd2x iaddrX16:$src)), (LXV memrix16:$src)>;
3158
3159  def : Pat<(quadwOffsetStore v4f32:$rS, iaddrX16:$dst), (STXV $rS, memrix16:$dst)>;
3160  def : Pat<(quadwOffsetStore v4i32:$rS, iaddrX16:$dst), (STXV $rS, memrix16:$dst)>;
3161  def : Pat<(quadwOffsetStore v2f64:$rS, iaddrX16:$dst), (STXV $rS, memrix16:$dst)>;
3162  def : Pat<(quadwOffsetStore  f128:$rS, iaddrX16:$dst),
3163            (STXV (COPY_TO_REGCLASS $rS, VSRC), memrix16:$dst)>;
3164  def : Pat<(quadwOffsetStore v2i64:$rS, iaddrX16:$dst), (STXV $rS, memrix16:$dst)>;
3165  def : Pat<(int_ppc_vsx_stxvw4x v4i32:$rS, iaddrX16:$dst),
3166            (STXV $rS, memrix16:$dst)>;
3167  def : Pat<(int_ppc_vsx_stxvd2x v2f64:$rS, iaddrX16:$dst),
3168            (STXV $rS, memrix16:$dst)>;
3169
3170
3171  def : Pat<(v2f64 (nonQuadwOffsetLoad xoaddr:$src)), (LXVX xoaddr:$src)>;
3172  def : Pat<(v2i64 (nonQuadwOffsetLoad xoaddr:$src)), (LXVX xoaddr:$src)>;
3173  def : Pat<(v4f32 (nonQuadwOffsetLoad xoaddr:$src)), (LXVX xoaddr:$src)>;
3174  def : Pat<(v4i32 (nonQuadwOffsetLoad xoaddr:$src)), (LXVX xoaddr:$src)>;
3175  def : Pat<(v4i32 (int_ppc_vsx_lxvw4x xoaddr:$src)), (LXVX xoaddr:$src)>;
3176  def : Pat<(v2f64 (int_ppc_vsx_lxvd2x xoaddr:$src)), (LXVX xoaddr:$src)>;
3177  def : Pat<(f128  (nonQuadwOffsetLoad xoaddr:$src)),
3178            (COPY_TO_REGCLASS (LXVX xoaddr:$src), VRRC)>;
3179  def : Pat<(nonQuadwOffsetStore f128:$rS, xoaddr:$dst),
3180            (STXVX (COPY_TO_REGCLASS $rS, VSRC), xoaddr:$dst)>;
3181  def : Pat<(nonQuadwOffsetStore v2f64:$rS, xoaddr:$dst),
3182            (STXVX $rS, xoaddr:$dst)>;
3183  def : Pat<(nonQuadwOffsetStore v2i64:$rS, xoaddr:$dst),
3184            (STXVX $rS, xoaddr:$dst)>;
3185  def : Pat<(nonQuadwOffsetStore v4f32:$rS, xoaddr:$dst),
3186            (STXVX $rS, xoaddr:$dst)>;
3187  def : Pat<(nonQuadwOffsetStore v4i32:$rS, xoaddr:$dst),
3188            (STXVX $rS, xoaddr:$dst)>;
3189  def : Pat<(int_ppc_vsx_stxvw4x v4i32:$rS, xoaddr:$dst),
3190            (STXVX $rS, xoaddr:$dst)>;
3191  def : Pat<(int_ppc_vsx_stxvd2x v2f64:$rS, xoaddr:$dst),
3192            (STXVX $rS, xoaddr:$dst)>;
3193
3194  let AddedComplexity = 400 in {
3195    // LIWAX - This instruction is used for sign extending i32 -> i64.
3196    // LIWZX - This instruction will be emitted for i32, f32, and when
3197    //         zero-extending i32 to i64 (zext i32 -> i64).
3198    let Predicates = [IsLittleEndian] in {
3199
3200      def : Pat<(v2i64 (scalar_to_vector (i64 (sextloadi32 xoaddr:$src)))),
3201                (v2i64 (XXPERMDIs
3202                (COPY_TO_REGCLASS (LIWAX xoaddr:$src), VSRC), 2))>;
3203
3204      def : Pat<(v2i64 (scalar_to_vector (i64 (zextloadi32 xoaddr:$src)))),
3205                (v2i64 (XXPERMDIs
3206                (COPY_TO_REGCLASS (LIWZX xoaddr:$src), VSRC), 2))>;
3207
3208      def : Pat<(v4i32 (scalar_to_vector (i32 (load xoaddr:$src)))),
3209                (v4i32 (XXPERMDIs
3210                (COPY_TO_REGCLASS (LIWZX xoaddr:$src), VSRC), 2))>;
3211
3212      def : Pat<(v4f32 (scalar_to_vector (f32 (load xoaddr:$src)))),
3213                (v4f32 (XXPERMDIs
3214                (COPY_TO_REGCLASS (LIWZX xoaddr:$src), VSRC), 2))>;
3215    }
3216
3217    let Predicates = [IsBigEndian] in {
3218      def : Pat<(v2i64 (scalar_to_vector (i64 (sextloadi32 xoaddr:$src)))),
3219                (v2i64 (COPY_TO_REGCLASS (LIWAX xoaddr:$src), VSRC))>;
3220
3221      def : Pat<(v2i64 (scalar_to_vector (i64 (zextloadi32 xoaddr:$src)))),
3222                (v2i64 (COPY_TO_REGCLASS (LIWZX xoaddr:$src), VSRC))>;
3223
3224      def : Pat<(v4i32 (scalar_to_vector (i32 (load xoaddr:$src)))),
3225                (v4i32 (XXSLDWIs
3226                (COPY_TO_REGCLASS (LIWZX xoaddr:$src), VSRC), 1))>;
3227
3228      def : Pat<(v4f32 (scalar_to_vector (f32 (load xoaddr:$src)))),
3229                (v4f32 (XXSLDWIs
3230                (COPY_TO_REGCLASS (LIWZX xoaddr:$src), VSRC), 1))>;
3231    }
3232
3233  }
3234
3235  // Build vectors from i8 loads
3236  def : Pat<(v16i8 (scalar_to_vector ScalarLoads.Li8)),
3237            (v16i8 (VSPLTBs 7, (LXSIBZX xoaddr:$src)))>;
3238  def : Pat<(v8i16 (scalar_to_vector ScalarLoads.ZELi8)),
3239            (v8i16 (VSPLTHs 3, (LXSIBZX xoaddr:$src)))>;
3240  def : Pat<(v4i32 (scalar_to_vector ScalarLoads.ZELi8)),
3241           (v4i32 (XXSPLTWs (LXSIBZX xoaddr:$src), 1))>;
3242  def : Pat<(v2i64 (scalar_to_vector ScalarLoads.ZELi8i64)),
3243            (v2i64 (XXPERMDIs (LXSIBZX xoaddr:$src), 0))>;
3244  def : Pat<(v4i32 (scalar_to_vector ScalarLoads.SELi8)),
3245            (v4i32 (XXSPLTWs (VEXTSB2Ws (LXSIBZX xoaddr:$src)), 1))>;
3246  def : Pat<(v2i64 (scalar_to_vector ScalarLoads.SELi8i64)),
3247            (v2i64 (XXPERMDIs (VEXTSB2Ds (LXSIBZX xoaddr:$src)), 0))>;
3248
3249  // Build vectors from i16 loads
3250  def : Pat<(v8i16 (scalar_to_vector ScalarLoads.Li16)),
3251            (v8i16 (VSPLTHs 3, (LXSIHZX xoaddr:$src)))>;
3252  def : Pat<(v4i32 (scalar_to_vector ScalarLoads.ZELi16)),
3253            (v4i32 (XXSPLTWs (LXSIHZX xoaddr:$src), 1))>;
3254  def : Pat<(v2i64 (scalar_to_vector ScalarLoads.ZELi16i64)),
3255           (v2i64 (XXPERMDIs (LXSIHZX xoaddr:$src), 0))>;
3256  def : Pat<(v4i32 (scalar_to_vector ScalarLoads.SELi16)),
3257            (v4i32 (XXSPLTWs (VEXTSH2Ws (LXSIHZX xoaddr:$src)), 1))>;
3258  def : Pat<(v2i64 (scalar_to_vector ScalarLoads.SELi16i64)),
3259            (v2i64 (XXPERMDIs (VEXTSH2Ds (LXSIHZX xoaddr:$src)), 0))>;
3260
3261  let Predicates = [IsBigEndian, HasP9Vector] in {
3262  // Scalar stores of i8
3263  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 0)), xoaddr:$dst),
3264            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 9)), VSRC), xoaddr:$dst)>;
3265  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 1)), xoaddr:$dst),
3266            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 10)), VSRC), xoaddr:$dst)>;
3267  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 2)), xoaddr:$dst),
3268            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 11)), VSRC), xoaddr:$dst)>;
3269  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 3)), xoaddr:$dst),
3270            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 12)), VSRC), xoaddr:$dst)>;
3271  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 4)), xoaddr:$dst),
3272            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 13)), VSRC), xoaddr:$dst)>;
3273  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 5)), xoaddr:$dst),
3274            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 14)), VSRC), xoaddr:$dst)>;
3275  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 6)), xoaddr:$dst),
3276            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 15)), VSRC), xoaddr:$dst)>;
3277  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 7)), xoaddr:$dst),
3278            (STXSIBXv (COPY_TO_REGCLASS $S, VSRC), xoaddr:$dst)>;
3279  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 8)), xoaddr:$dst),
3280            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 1)), VSRC), xoaddr:$dst)>;
3281  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 9)), xoaddr:$dst),
3282            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 2)), VSRC), xoaddr:$dst)>;
3283  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 10)), xoaddr:$dst),
3284            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 3)), VSRC), xoaddr:$dst)>;
3285  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 11)), xoaddr:$dst),
3286            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 4)), VSRC), xoaddr:$dst)>;
3287  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 12)), xoaddr:$dst),
3288            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 5)), VSRC), xoaddr:$dst)>;
3289  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 13)), xoaddr:$dst),
3290            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 6)), VSRC), xoaddr:$dst)>;
3291  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 14)), xoaddr:$dst),
3292            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 7)), VSRC), xoaddr:$dst)>;
3293  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 15)), xoaddr:$dst),
3294            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 8)), VSRC), xoaddr:$dst)>;
3295
3296  // Scalar stores of i16
3297  def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 0)), xoaddr:$dst),
3298            (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 10)), VSRC), xoaddr:$dst)>;
3299  def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 1)), xoaddr:$dst),
3300            (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 12)), VSRC), xoaddr:$dst)>;
3301  def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 2)), xoaddr:$dst),
3302            (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 14)), VSRC), xoaddr:$dst)>;
3303  def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 3)), xoaddr:$dst),
3304            (STXSIHXv (COPY_TO_REGCLASS $S, VSRC), xoaddr:$dst)>;
3305  def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 4)), xoaddr:$dst),
3306            (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 2)), VSRC), xoaddr:$dst)>;
3307  def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 5)), xoaddr:$dst),
3308            (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 4)), VSRC), xoaddr:$dst)>;
3309  def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 6)), xoaddr:$dst),
3310            (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 6)), VSRC), xoaddr:$dst)>;
3311  def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 7)), xoaddr:$dst),
3312            (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 8)), VSRC), xoaddr:$dst)>;
3313  } // IsBigEndian, HasP9Vector
3314
3315  let Predicates = [IsLittleEndian, HasP9Vector] in {
3316  // Scalar stores of i8
3317  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 0)), xoaddr:$dst),
3318            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 8)), VSRC), xoaddr:$dst)>;
3319  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 1)), xoaddr:$dst),
3320            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 7)), VSRC), xoaddr:$dst)>;
3321  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 2)), xoaddr:$dst),
3322            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 6)), VSRC), xoaddr:$dst)>;
3323  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 3)), xoaddr:$dst),
3324            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 5)), VSRC), xoaddr:$dst)>;
3325  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 4)), xoaddr:$dst),
3326            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 4)), VSRC), xoaddr:$dst)>;
3327  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 5)), xoaddr:$dst),
3328            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 3)), VSRC), xoaddr:$dst)>;
3329  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 6)), xoaddr:$dst),
3330            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 2)), VSRC), xoaddr:$dst)>;
3331  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 7)), xoaddr:$dst),
3332            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 1)), VSRC), xoaddr:$dst)>;
3333  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 8)), xoaddr:$dst),
3334            (STXSIBXv (COPY_TO_REGCLASS $S, VSRC), xoaddr:$dst)>;
3335  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 9)), xoaddr:$dst),
3336            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 15)), VSRC), xoaddr:$dst)>;
3337  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 10)), xoaddr:$dst),
3338            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 14)), VSRC), xoaddr:$dst)>;
3339  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 11)), xoaddr:$dst),
3340            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 13)), VSRC), xoaddr:$dst)>;
3341  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 12)), xoaddr:$dst),
3342            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 12)), VSRC), xoaddr:$dst)>;
3343  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 13)), xoaddr:$dst),
3344            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 11)), VSRC), xoaddr:$dst)>;
3345  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 14)), xoaddr:$dst),
3346            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 10)), VSRC), xoaddr:$dst)>;
3347  def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$S, 15)), xoaddr:$dst),
3348            (STXSIBXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 9)), VSRC), xoaddr:$dst)>;
3349
3350  // Scalar stores of i16
3351  def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 0)), xoaddr:$dst),
3352            (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 8)), VSRC), xoaddr:$dst)>;
3353  def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 1)), xoaddr:$dst),
3354            (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 6)), VSRC), xoaddr:$dst)>;
3355  def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 2)), xoaddr:$dst),
3356            (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 4)), VSRC), xoaddr:$dst)>;
3357  def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 3)), xoaddr:$dst),
3358            (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 2)), VSRC), xoaddr:$dst)>;
3359  def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 4)), xoaddr:$dst),
3360            (STXSIHXv (COPY_TO_REGCLASS $S, VSRC), xoaddr:$dst)>;
3361  def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 5)), xoaddr:$dst),
3362            (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 14)), VSRC), xoaddr:$dst)>;
3363  def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 6)), xoaddr:$dst),
3364            (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 12)), VSRC), xoaddr:$dst)>;
3365  def : Pat<(truncstorei16 (i32 (vector_extract v8i16:$S, 7)), xoaddr:$dst),
3366            (STXSIHXv (COPY_TO_REGCLASS (v16i8 (VSLDOI $S, $S, 10)), VSRC), xoaddr:$dst)>;
3367  } // IsLittleEndian, HasP9Vector
3368
3369
3370  // Vector sign extensions
3371  def : Pat<(f64 (PPCVexts f64:$A, 1)),
3372            (f64 (COPY_TO_REGCLASS (VEXTSB2Ds $A), VSFRC))>;
3373  def : Pat<(f64 (PPCVexts f64:$A, 2)),
3374            (f64 (COPY_TO_REGCLASS (VEXTSH2Ds $A), VSFRC))>;
3375
3376  def DFLOADf32  : PPCPostRAExpPseudo<(outs vssrc:$XT), (ins memrix:$src),
3377                          "#DFLOADf32",
3378                          [(set f32:$XT, (load iaddrX4:$src))]>;
3379  def DFLOADf64  : PPCPostRAExpPseudo<(outs vsfrc:$XT), (ins memrix:$src),
3380                          "#DFLOADf64",
3381                          [(set f64:$XT, (load iaddrX4:$src))]>;
3382  def DFSTOREf32 : PPCPostRAExpPseudo<(outs), (ins vssrc:$XT, memrix:$dst),
3383                          "#DFSTOREf32",
3384                          [(store f32:$XT, iaddrX4:$dst)]>;
3385  def DFSTOREf64 : PPCPostRAExpPseudo<(outs), (ins vsfrc:$XT, memrix:$dst),
3386                          "#DFSTOREf64",
3387                          [(store f64:$XT, iaddrX4:$dst)]>;
3388
3389  def : Pat<(f64 (extloadf32 iaddrX4:$src)),
3390            (COPY_TO_REGCLASS (DFLOADf32 iaddrX4:$src), VSFRC)>;
3391  def : Pat<(f32 (fpround (f64 (extloadf32 iaddrX4:$src)))),
3392            (f32 (DFLOADf32 iaddrX4:$src))>;
3393
3394  def : Pat<(v4f32 (PPCldvsxlh xaddr:$src)),
3395            (COPY_TO_REGCLASS (XFLOADf64 xaddr:$src), VSRC)>;
3396  def : Pat<(v4f32 (PPCldvsxlh iaddrX4:$src)),
3397            (COPY_TO_REGCLASS (DFLOADf64 iaddrX4:$src), VSRC)>;
3398
3399  let AddedComplexity = 400 in {
3400  // The following pseudoinstructions are used to ensure the utilization
3401  // of all 64 VSX registers.
3402    let Predicates = [IsLittleEndian, HasP9Vector] in {
3403      def : Pat<(v2i64 (scalar_to_vector (i64 (load iaddrX4:$src)))),
3404                (v2i64 (XXPERMDIs
3405                (COPY_TO_REGCLASS (DFLOADf64 iaddrX4:$src), VSRC), 2))>;
3406      def : Pat<(v2i64 (scalar_to_vector (i64 (load xaddrX4:$src)))),
3407                (v2i64 (XXPERMDIs
3408		(COPY_TO_REGCLASS (XFLOADf64 xaddrX4:$src), VSRC), 2))>;
3409
3410      def : Pat<(v2f64 (scalar_to_vector (f64 (load iaddrX4:$src)))),
3411                (v2f64 (XXPERMDIs
3412                (COPY_TO_REGCLASS (DFLOADf64 iaddrX4:$src), VSRC), 2))>;
3413      def : Pat<(v2f64 (scalar_to_vector (f64 (load xaddrX4:$src)))),
3414                (v2f64 (XXPERMDIs
3415                (COPY_TO_REGCLASS (XFLOADf64 xaddrX4:$src), VSRC), 2))>;
3416      def : Pat<(store (i64 (extractelt v2i64:$A, 0)), xaddrX4:$src),
3417                (XFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2),
3418                             sub_64), xaddrX4:$src)>;
3419      def : Pat<(store (f64 (extractelt v2f64:$A, 0)), xaddrX4:$src),
3420                (XFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2),
3421                             sub_64), xaddrX4:$src)>;
3422      def : Pat<(store (i64 (extractelt v2i64:$A, 1)), xaddrX4:$src),
3423                (XFSTOREf64 (EXTRACT_SUBREG $A, sub_64), xaddrX4:$src)>;
3424      def : Pat<(store (f64 (extractelt v2f64:$A, 1)), xaddrX4:$src),
3425                (XFSTOREf64 (EXTRACT_SUBREG $A, sub_64), xaddrX4:$src)>;
3426      def : Pat<(store (i64 (extractelt v2i64:$A, 0)), iaddrX4:$src),
3427                (DFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2),
3428                             sub_64), iaddrX4:$src)>;
3429      def : Pat<(store (f64 (extractelt v2f64:$A, 0)), iaddrX4:$src),
3430                (DFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2), sub_64),
3431                            iaddrX4:$src)>;
3432      def : Pat<(store (i64 (extractelt v2i64:$A, 1)), iaddrX4:$src),
3433                (DFSTOREf64 (EXTRACT_SUBREG $A, sub_64), iaddrX4:$src)>;
3434      def : Pat<(store (f64 (extractelt v2f64:$A, 1)), iaddrX4:$src),
3435                (DFSTOREf64 (EXTRACT_SUBREG $A, sub_64), iaddrX4:$src)>;
3436    } // IsLittleEndian, HasP9Vector
3437
3438    let Predicates = [IsBigEndian, HasP9Vector] in {
3439      def : Pat<(v2i64 (scalar_to_vector (i64 (load iaddrX4:$src)))),
3440                (v2i64 (COPY_TO_REGCLASS (DFLOADf64 iaddrX4:$src), VSRC))>;
3441      def : Pat<(v2i64 (scalar_to_vector (i64 (load xaddrX4:$src)))),
3442                (v2i64 (COPY_TO_REGCLASS (XFLOADf64 xaddrX4:$src), VSRC))>;
3443
3444      def : Pat<(v2f64 (scalar_to_vector (f64 (load iaddrX4:$src)))),
3445                (v2f64 (COPY_TO_REGCLASS (DFLOADf64 iaddrX4:$src), VSRC))>;
3446      def : Pat<(v2f64 (scalar_to_vector (f64 (load xaddrX4:$src)))),
3447                (v2f64 (COPY_TO_REGCLASS (XFLOADf64 xaddrX4:$src), VSRC))>;
3448      def : Pat<(store (i64 (extractelt v2i64:$A, 1)), xaddrX4:$src),
3449                (XFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2),
3450                             sub_64), xaddrX4:$src)>;
3451      def : Pat<(store (f64 (extractelt v2f64:$A, 1)), xaddrX4:$src),
3452                (XFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2),
3453                             sub_64), xaddrX4:$src)>;
3454      def : Pat<(store (i64 (extractelt v2i64:$A, 0)), xaddrX4:$src),
3455                (XFSTOREf64 (EXTRACT_SUBREG $A, sub_64), xaddrX4:$src)>;
3456      def : Pat<(store (f64 (extractelt v2f64:$A, 0)), xaddrX4:$src),
3457                (XFSTOREf64 (EXTRACT_SUBREG $A, sub_64), xaddrX4:$src)>;
3458      def : Pat<(store (i64 (extractelt v2i64:$A, 1)), iaddrX4:$src),
3459                (DFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2),
3460                             sub_64), iaddrX4:$src)>;
3461      def : Pat<(store (f64 (extractelt v2f64:$A, 1)), iaddrX4:$src),
3462                (DFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2),
3463                             sub_64), iaddrX4:$src)>;
3464      def : Pat<(store (i64 (extractelt v2i64:$A, 0)), iaddrX4:$src),
3465                (DFSTOREf64 (EXTRACT_SUBREG $A, sub_64), iaddrX4:$src)>;
3466      def : Pat<(store (f64 (extractelt v2f64:$A, 0)), iaddrX4:$src),
3467                (DFSTOREf64 (EXTRACT_SUBREG $A, sub_64), iaddrX4:$src)>;
3468    } // IsBigEndian, HasP9Vector
3469  }
3470
3471  let Predicates = [IsBigEndian, HasP9Vector] in {
3472
3473    // (Un)Signed DWord vector extract -> QP
3474    def : Pat<(f128 (sint_to_fp (i64 (extractelt v2i64:$src, 0)))),
3475              (f128 (XSCVSDQP (COPY_TO_REGCLASS $src, VFRC)))>;
3476    def : Pat<(f128 (sint_to_fp (i64 (extractelt v2i64:$src, 1)))),
3477              (f128 (XSCVSDQP
3478                      (EXTRACT_SUBREG (XXPERMDI $src, $src, 3), sub_64)))>;
3479    def : Pat<(f128 (uint_to_fp (i64 (extractelt v2i64:$src, 0)))),
3480              (f128 (XSCVUDQP (COPY_TO_REGCLASS $src, VFRC)))>;
3481    def : Pat<(f128 (uint_to_fp (i64 (extractelt v2i64:$src, 1)))),
3482              (f128 (XSCVUDQP
3483                      (EXTRACT_SUBREG (XXPERMDI $src, $src, 3), sub_64)))>;
3484
3485    // (Un)Signed Word vector extract -> QP
3486    def : Pat<(f128 (sint_to_fp (i32 (extractelt v4i32:$src, 1)))),
3487              (f128 (XSCVSDQP (EXTRACT_SUBREG (VEXTSW2D $src), sub_64)))>;
3488    foreach Idx = [0,2,3] in {
3489      def : Pat<(f128 (sint_to_fp (i32 (extractelt v4i32:$src, Idx)))),
3490                (f128 (XSCVSDQP (EXTRACT_SUBREG
3491                                (VEXTSW2D (VSPLTW Idx, $src)), sub_64)))>;
3492    }
3493    foreach Idx = 0-3 in {
3494      def : Pat<(f128 (uint_to_fp (i32 (extractelt v4i32:$src, Idx)))),
3495                (f128 (XSCVUDQP (XXEXTRACTUW $src, !shl(Idx, 2))))>;
3496    }
3497
3498    // (Un)Signed HWord vector extract -> QP
3499    foreach Idx = 0-7 in {
3500      def : Pat<(f128 (sint_to_fp
3501                        (i32 (sext_inreg
3502                               (vector_extract v8i16:$src, Idx), i16)))),
3503              (f128 (XSCVSDQP (EXTRACT_SUBREG
3504                                (VEXTSH2D (VEXTRACTUH !add(Idx, Idx), $src)),
3505                                sub_64)))>;
3506      // The SDAG adds the `and` since an `i16` is being extracted as an `i32`.
3507      def : Pat<(f128 (uint_to_fp
3508                        (and (i32 (vector_extract v8i16:$src, Idx)), 65535))),
3509                (f128 (XSCVUDQP (EXTRACT_SUBREG
3510                                  (VEXTRACTUH !add(Idx, Idx), $src), sub_64)))>;
3511    }
3512
3513    // (Un)Signed Byte vector extract -> QP
3514    foreach Idx = 0-15 in {
3515      def : Pat<(f128 (sint_to_fp
3516                        (i32 (sext_inreg (vector_extract v16i8:$src, Idx),
3517                                         i8)))),
3518                (f128 (XSCVSDQP (EXTRACT_SUBREG
3519                                  (VEXTSB2D (VEXTRACTUB Idx, $src)), sub_64)))>;
3520      def : Pat<(f128 (uint_to_fp
3521                        (and (i32 (vector_extract v16i8:$src, Idx)), 255))),
3522                (f128 (XSCVUDQP
3523                        (EXTRACT_SUBREG (VEXTRACTUB Idx, $src), sub_64)))>;
3524    }
3525
3526    // Unsiged int in vsx register -> QP
3527    def : Pat<(f128 (uint_to_fp (i32 (PPCmfvsr f64:$src)))),
3528              (f128 (XSCVUDQP
3529                      (XXEXTRACTUW (SUBREG_TO_REG (i64 1), $src, sub_64), 4)))>;
3530  } // IsBigEndian, HasP9Vector
3531
3532  let Predicates = [IsLittleEndian, HasP9Vector] in {
3533
3534    // (Un)Signed DWord vector extract -> QP
3535    def : Pat<(f128 (sint_to_fp (i64 (extractelt v2i64:$src, 0)))),
3536              (f128 (XSCVSDQP
3537                      (EXTRACT_SUBREG (XXPERMDI $src, $src, 3), sub_64)))>;
3538    def : Pat<(f128 (sint_to_fp (i64 (extractelt v2i64:$src, 1)))),
3539              (f128 (XSCVSDQP (COPY_TO_REGCLASS $src, VFRC)))>;
3540    def : Pat<(f128 (uint_to_fp (i64 (extractelt v2i64:$src, 0)))),
3541              (f128 (XSCVUDQP
3542                      (EXTRACT_SUBREG (XXPERMDI $src, $src, 3), sub_64)))>;
3543    def : Pat<(f128 (uint_to_fp (i64 (extractelt v2i64:$src, 1)))),
3544              (f128 (XSCVUDQP (COPY_TO_REGCLASS $src, VFRC)))>;
3545
3546    // (Un)Signed Word vector extract -> QP
3547    foreach Idx = [[0,3],[1,2],[3,0]] in {
3548      def : Pat<(f128 (sint_to_fp (i32 (extractelt v4i32:$src, !head(Idx))))),
3549                (f128 (XSCVSDQP (EXTRACT_SUBREG
3550                                  (VEXTSW2D (VSPLTW !head(!tail(Idx)), $src)),
3551                                  sub_64)))>;
3552    }
3553    def : Pat<(f128 (sint_to_fp (i32 (extractelt v4i32:$src, 2)))),
3554              (f128 (XSCVSDQP (EXTRACT_SUBREG (VEXTSW2D $src), sub_64)))>;
3555
3556    foreach Idx = [[0,12],[1,8],[2,4],[3,0]] in {
3557      def : Pat<(f128 (uint_to_fp (i32 (extractelt v4i32:$src, !head(Idx))))),
3558                (f128 (XSCVUDQP (XXEXTRACTUW $src, !head(!tail(Idx)))))>;
3559    }
3560
3561    // (Un)Signed HWord vector extract -> QP
3562    // The Nested foreach lists identifies the vector element and corresponding
3563    // register byte location.
3564    foreach Idx = [[0,14],[1,12],[2,10],[3,8],[4,6],[5,4],[6,2],[7,0]] in {
3565      def : Pat<(f128 (sint_to_fp
3566                        (i32 (sext_inreg
3567                               (vector_extract v8i16:$src, !head(Idx)), i16)))),
3568                (f128 (XSCVSDQP
3569                        (EXTRACT_SUBREG (VEXTSH2D
3570                                          (VEXTRACTUH !head(!tail(Idx)), $src)),
3571                                        sub_64)))>;
3572      def : Pat<(f128 (uint_to_fp
3573                        (and (i32 (vector_extract v8i16:$src, !head(Idx))),
3574                             65535))),
3575                (f128 (XSCVUDQP (EXTRACT_SUBREG
3576                                  (VEXTRACTUH !head(!tail(Idx)), $src), sub_64)))>;
3577    }
3578
3579    // (Un)Signed Byte vector extract -> QP
3580    foreach Idx = [[0,15],[1,14],[2,13],[3,12],[4,11],[5,10],[6,9],[7,8],[8,7],
3581                   [9,6],[10,5],[11,4],[12,3],[13,2],[14,1],[15,0]] in {
3582      def : Pat<(f128 (sint_to_fp
3583                        (i32 (sext_inreg
3584                               (vector_extract v16i8:$src, !head(Idx)), i8)))),
3585                (f128 (XSCVSDQP
3586                        (EXTRACT_SUBREG
3587                          (VEXTSB2D (VEXTRACTUB !head(!tail(Idx)), $src)),
3588                          sub_64)))>;
3589      def : Pat<(f128 (uint_to_fp
3590                        (and (i32 (vector_extract v16i8:$src, !head(Idx))),
3591                             255))),
3592                (f128 (XSCVUDQP
3593                        (EXTRACT_SUBREG
3594                          (VEXTRACTUB !head(!tail(Idx)), $src), sub_64)))>;
3595    }
3596
3597    // Unsiged int in vsx register -> QP
3598    def : Pat<(f128 (uint_to_fp (i32 (PPCmfvsr f64:$src)))),
3599              (f128 (XSCVUDQP
3600                      (XXEXTRACTUW (SUBREG_TO_REG (i64 1), $src, sub_64), 8)))>;
3601  } // IsLittleEndian, HasP9Vector
3602
3603  // Convert (Un)Signed DWord in memory -> QP
3604  def : Pat<(f128 (sint_to_fp (i64 (load xaddrX4:$src)))),
3605            (f128 (XSCVSDQP (LXSDX xaddrX4:$src)))>;
3606  def : Pat<(f128 (sint_to_fp (i64 (load iaddrX4:$src)))),
3607            (f128 (XSCVSDQP (LXSD iaddrX4:$src)))>;
3608  def : Pat<(f128 (uint_to_fp (i64 (load xaddrX4:$src)))),
3609            (f128 (XSCVUDQP (LXSDX xaddrX4:$src)))>;
3610  def : Pat<(f128 (uint_to_fp (i64 (load iaddrX4:$src)))),
3611            (f128 (XSCVUDQP (LXSD iaddrX4:$src)))>;
3612
3613  // Convert Unsigned HWord in memory -> QP
3614  def : Pat<(f128 (uint_to_fp ScalarLoads.ZELi16)),
3615            (f128 (XSCVUDQP (LXSIHZX xaddr:$src)))>;
3616
3617  // Convert Unsigned Byte in memory -> QP
3618  def : Pat<(f128 (uint_to_fp ScalarLoads.ZELi8)),
3619            (f128 (XSCVUDQP (LXSIBZX xoaddr:$src)))>;
3620
3621  // Truncate & Convert QP -> (Un)Signed (D)Word.
3622  def : Pat<(i64 (fp_to_sint f128:$src)), (i64 (MFVRD (XSCVQPSDZ $src)))>;
3623  def : Pat<(i64 (fp_to_uint f128:$src)), (i64 (MFVRD (XSCVQPUDZ $src)))>;
3624  def : Pat<(i32 (fp_to_sint f128:$src)),
3625            (i32 (MFVSRWZ (COPY_TO_REGCLASS (XSCVQPSWZ $src), VFRC)))>;
3626  def : Pat<(i32 (fp_to_uint f128:$src)),
3627            (i32 (MFVSRWZ (COPY_TO_REGCLASS (XSCVQPUWZ $src), VFRC)))>;
3628
3629  // Instructions for store(fptosi).
3630  // The 8-byte version is repeated here due to availability of D-Form STXSD.
3631  def : Pat<(PPCstore_scal_int_from_vsr
3632              (f64 (PPCcv_fp_to_sint_in_vsr f128:$src)), xaddrX4:$dst, 8),
3633            (STXSDX (COPY_TO_REGCLASS (XSCVQPSDZ f128:$src), VFRC),
3634                    xaddrX4:$dst)>;
3635  def : Pat<(PPCstore_scal_int_from_vsr
3636              (f64 (PPCcv_fp_to_sint_in_vsr f128:$src)), iaddrX4:$dst, 8),
3637            (STXSD (COPY_TO_REGCLASS (XSCVQPSDZ f128:$src), VFRC),
3638                   iaddrX4:$dst)>;
3639  def : Pat<(PPCstore_scal_int_from_vsr
3640              (f64 (PPCcv_fp_to_sint_in_vsr f128:$src)), xoaddr:$dst, 4),
3641            (STXSIWX (COPY_TO_REGCLASS (XSCVQPSWZ $src), VFRC), xoaddr:$dst)>;
3642  def : Pat<(PPCstore_scal_int_from_vsr
3643              (f64 (PPCcv_fp_to_sint_in_vsr f128:$src)), xoaddr:$dst, 2),
3644            (STXSIHX (COPY_TO_REGCLASS (XSCVQPSWZ $src), VFRC), xoaddr:$dst)>;
3645  def : Pat<(PPCstore_scal_int_from_vsr
3646              (f64 (PPCcv_fp_to_sint_in_vsr f128:$src)), xoaddr:$dst, 1),
3647            (STXSIBX (COPY_TO_REGCLASS (XSCVQPSWZ $src), VFRC), xoaddr:$dst)>;
3648  def : Pat<(PPCstore_scal_int_from_vsr
3649              (f64 (PPCcv_fp_to_sint_in_vsr f64:$src)), xaddrX4:$dst, 8),
3650            (STXSDX (XSCVDPSXDS f64:$src), xaddrX4:$dst)>;
3651  def : Pat<(PPCstore_scal_int_from_vsr
3652              (f64 (PPCcv_fp_to_sint_in_vsr f64:$src)), iaddrX4:$dst, 8),
3653            (STXSD (XSCVDPSXDS f64:$src), iaddrX4:$dst)>;
3654  def : Pat<(PPCstore_scal_int_from_vsr
3655              (f64 (PPCcv_fp_to_sint_in_vsr f64:$src)), xoaddr:$dst, 2),
3656            (STXSIHX (XSCVDPSXWS f64:$src), xoaddr:$dst)>;
3657  def : Pat<(PPCstore_scal_int_from_vsr
3658              (f64 (PPCcv_fp_to_sint_in_vsr f64:$src)), xoaddr:$dst, 1),
3659            (STXSIBX (XSCVDPSXWS f64:$src), xoaddr:$dst)>;
3660
3661  // Instructions for store(fptoui).
3662  def : Pat<(PPCstore_scal_int_from_vsr
3663              (f64 (PPCcv_fp_to_uint_in_vsr f128:$src)), xaddrX4:$dst, 8),
3664            (STXSDX (COPY_TO_REGCLASS (XSCVQPUDZ f128:$src), VFRC),
3665                    xaddrX4:$dst)>;
3666  def : Pat<(PPCstore_scal_int_from_vsr
3667              (f64 (PPCcv_fp_to_uint_in_vsr f128:$src)), iaddrX4:$dst, 8),
3668            (STXSD (COPY_TO_REGCLASS (XSCVQPUDZ f128:$src), VFRC),
3669                   iaddrX4:$dst)>;
3670  def : Pat<(PPCstore_scal_int_from_vsr
3671              (f64 (PPCcv_fp_to_uint_in_vsr f128:$src)), xoaddr:$dst, 4),
3672            (STXSIWX (COPY_TO_REGCLASS (XSCVQPUWZ $src), VFRC), xoaddr:$dst)>;
3673  def : Pat<(PPCstore_scal_int_from_vsr
3674              (f64 (PPCcv_fp_to_uint_in_vsr f128:$src)), xoaddr:$dst, 2),
3675            (STXSIHX (COPY_TO_REGCLASS (XSCVQPUWZ $src), VFRC), xoaddr:$dst)>;
3676  def : Pat<(PPCstore_scal_int_from_vsr
3677              (f64 (PPCcv_fp_to_uint_in_vsr f128:$src)), xoaddr:$dst, 1),
3678            (STXSIBX (COPY_TO_REGCLASS (XSCVQPUWZ $src), VFRC), xoaddr:$dst)>;
3679  def : Pat<(PPCstore_scal_int_from_vsr
3680              (f64 (PPCcv_fp_to_uint_in_vsr f64:$src)), xaddrX4:$dst, 8),
3681            (STXSDX (XSCVDPUXDS f64:$src), xaddrX4:$dst)>;
3682  def : Pat<(PPCstore_scal_int_from_vsr
3683              (f64 (PPCcv_fp_to_uint_in_vsr f64:$src)), iaddrX4:$dst, 8),
3684            (STXSD (XSCVDPUXDS f64:$src), iaddrX4:$dst)>;
3685  def : Pat<(PPCstore_scal_int_from_vsr
3686              (f64 (PPCcv_fp_to_uint_in_vsr f64:$src)), xoaddr:$dst, 2),
3687            (STXSIHX (XSCVDPUXWS f64:$src), xoaddr:$dst)>;
3688  def : Pat<(PPCstore_scal_int_from_vsr
3689              (f64 (PPCcv_fp_to_uint_in_vsr f64:$src)), xoaddr:$dst, 1),
3690            (STXSIBX (XSCVDPUXWS f64:$src), xoaddr:$dst)>;
3691
3692  // Round & Convert QP -> DP/SP
3693  def : Pat<(f64 (fpround f128:$src)), (f64 (XSCVQPDP $src))>;
3694  def : Pat<(f32 (fpround f128:$src)), (f32 (XSRSP (XSCVQPDPO $src)))>;
3695
3696  // Convert SP -> QP
3697  def : Pat<(f128 (fpextend f32:$src)),
3698            (f128 (XSCVDPQP (COPY_TO_REGCLASS $src, VFRC)))>;
3699
3700} // end HasP9Vector, AddedComplexity
3701
3702let AddedComplexity = 400 in {
3703  let Predicates = [IsISA3_0, HasP9Vector, HasDirectMove, IsBigEndian] in {
3704    def : Pat<(f128 (PPCbuild_fp128 i64:$rB, i64:$rA)),
3705              (f128 (COPY_TO_REGCLASS (MTVSRDD $rB, $rA), VRRC))>;
3706  }
3707  let Predicates = [IsISA3_0, HasP9Vector, HasDirectMove, IsLittleEndian] in {
3708    def : Pat<(f128 (PPCbuild_fp128 i64:$rA, i64:$rB)),
3709              (f128 (COPY_TO_REGCLASS (MTVSRDD $rB, $rA), VRRC))>;
3710  }
3711}
3712
3713let Predicates = [HasP9Vector] in {
3714  let mayStore = 1 in {
3715    def SPILLTOVSR_STX : PseudoXFormMemOp<(outs),
3716                                          (ins spilltovsrrc:$XT, memrr:$dst),
3717                                          "#SPILLTOVSR_STX", []>;
3718    def SPILLTOVSR_ST : PPCPostRAExpPseudo<(outs), (ins spilltovsrrc:$XT, memrix:$dst),
3719                              "#SPILLTOVSR_ST", []>;
3720  }
3721  let mayLoad = 1 in {
3722    def SPILLTOVSR_LDX : PseudoXFormMemOp<(outs spilltovsrrc:$XT),
3723                                          (ins memrr:$src),
3724                                          "#SPILLTOVSR_LDX", []>;
3725    def SPILLTOVSR_LD : PPCPostRAExpPseudo<(outs spilltovsrrc:$XT), (ins memrix:$src),
3726                              "#SPILLTOVSR_LD", []>;
3727
3728  }
3729}
3730// Integer extend helper dags 32 -> 64
3731def AnyExts {
3732  dag A = (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $A, sub_32);
3733  dag B = (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $B, sub_32);
3734  dag C = (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $C, sub_32);
3735  dag D = (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $D, sub_32);
3736}
3737
3738def DblToFlt {
3739  dag A0 = (f32 (fpround (f64 (extractelt v2f64:$A, 0))));
3740  dag A1 = (f32 (fpround (f64 (extractelt v2f64:$A, 1))));
3741  dag B0 = (f32 (fpround (f64 (extractelt v2f64:$B, 0))));
3742  dag B1 = (f32 (fpround (f64 (extractelt v2f64:$B, 1))));
3743}
3744
3745def ExtDbl {
3746  dag A0S = (i32 (PPCmfvsr (f64 (PPCfctiwz (f64 (extractelt v2f64:$A, 0))))));
3747  dag A1S = (i32 (PPCmfvsr (f64 (PPCfctiwz (f64 (extractelt v2f64:$A, 1))))));
3748  dag B0S = (i32 (PPCmfvsr (f64 (PPCfctiwz (f64 (extractelt v2f64:$B, 0))))));
3749  dag B1S = (i32 (PPCmfvsr (f64 (PPCfctiwz (f64 (extractelt v2f64:$B, 1))))));
3750  dag A0U = (i32 (PPCmfvsr (f64 (PPCfctiwuz (f64 (extractelt v2f64:$A, 0))))));
3751  dag A1U = (i32 (PPCmfvsr (f64 (PPCfctiwuz (f64 (extractelt v2f64:$A, 1))))));
3752  dag B0U = (i32 (PPCmfvsr (f64 (PPCfctiwuz (f64 (extractelt v2f64:$B, 0))))));
3753  dag B1U = (i32 (PPCmfvsr (f64 (PPCfctiwuz (f64 (extractelt v2f64:$B, 1))))));
3754}
3755
3756def ByteToWord {
3757  dag LE_A0 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 0)), i8));
3758  dag LE_A1 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 4)), i8));
3759  dag LE_A2 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 8)), i8));
3760  dag LE_A3 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 12)), i8));
3761  dag BE_A0 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 3)), i8));
3762  dag BE_A1 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 7)), i8));
3763  dag BE_A2 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 11)), i8));
3764  dag BE_A3 = (i32 (sext_inreg (i32 (vector_extract v16i8:$A, 15)), i8));
3765}
3766
3767def ByteToDWord {
3768  dag LE_A0 = (i64 (sext_inreg
3769              (i64 (anyext (i32 (vector_extract v16i8:$A, 0)))), i8));
3770  dag LE_A1 = (i64 (sext_inreg
3771              (i64 (anyext (i32 (vector_extract v16i8:$A, 8)))), i8));
3772  dag BE_A0 = (i64 (sext_inreg
3773              (i64 (anyext (i32 (vector_extract v16i8:$A, 7)))), i8));
3774  dag BE_A1 = (i64 (sext_inreg
3775              (i64 (anyext (i32 (vector_extract v16i8:$A, 15)))), i8));
3776}
3777
3778def HWordToWord {
3779  dag LE_A0 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 0)), i16));
3780  dag LE_A1 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 2)), i16));
3781  dag LE_A2 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 4)), i16));
3782  dag LE_A3 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 6)), i16));
3783  dag BE_A0 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 1)), i16));
3784  dag BE_A1 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 3)), i16));
3785  dag BE_A2 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 5)), i16));
3786  dag BE_A3 = (i32 (sext_inreg (i32 (vector_extract v8i16:$A, 7)), i16));
3787}
3788
3789def HWordToDWord {
3790  dag LE_A0 = (i64 (sext_inreg
3791              (i64 (anyext (i32 (vector_extract v8i16:$A, 0)))), i16));
3792  dag LE_A1 = (i64 (sext_inreg
3793              (i64 (anyext (i32 (vector_extract v8i16:$A, 4)))), i16));
3794  dag BE_A0 = (i64 (sext_inreg
3795              (i64 (anyext (i32 (vector_extract v8i16:$A, 3)))), i16));
3796  dag BE_A1 = (i64 (sext_inreg
3797              (i64 (anyext (i32 (vector_extract v8i16:$A, 7)))), i16));
3798}
3799
3800def WordToDWord {
3801  dag LE_A0 = (i64 (sext (i32 (vector_extract v4i32:$A, 0))));
3802  dag LE_A1 = (i64 (sext (i32 (vector_extract v4i32:$A, 2))));
3803  dag BE_A0 = (i64 (sext (i32 (vector_extract v4i32:$A, 1))));
3804  dag BE_A1 = (i64 (sext (i32 (vector_extract v4i32:$A, 3))));
3805}
3806
3807def FltToIntLoad {
3808  dag A = (i32 (PPCmfvsr (PPCfctiwz (f64 (extloadf32 xoaddr:$A)))));
3809}
3810def FltToUIntLoad {
3811  dag A = (i32 (PPCmfvsr (PPCfctiwuz (f64 (extloadf32 xoaddr:$A)))));
3812}
3813def FltToLongLoad {
3814  dag A = (i64 (PPCmfvsr (PPCfctidz (f64 (extloadf32 xoaddr:$A)))));
3815}
3816def FltToLongLoadP9 {
3817  dag A = (i64 (PPCmfvsr (PPCfctidz (f64 (extloadf32 iaddrX4:$A)))));
3818}
3819def FltToULongLoad {
3820  dag A = (i64 (PPCmfvsr (PPCfctiduz (f64 (extloadf32 xoaddr:$A)))));
3821}
3822def FltToULongLoadP9 {
3823  dag A = (i64 (PPCmfvsr (PPCfctiduz (f64 (extloadf32 iaddrX4:$A)))));
3824}
3825def FltToLong {
3826  dag A = (i64 (PPCmfvsr (f64 (PPCfctidz (fpextend f32:$A)))));
3827}
3828def FltToULong {
3829  dag A = (i64 (PPCmfvsr (f64 (PPCfctiduz (fpextend f32:$A)))));
3830}
3831def DblToInt {
3832  dag A = (i32 (PPCmfvsr (f64 (PPCfctiwz f64:$A))));
3833  dag B = (i32 (PPCmfvsr (f64 (PPCfctiwz f64:$B))));
3834  dag C = (i32 (PPCmfvsr (f64 (PPCfctiwz f64:$C))));
3835  dag D = (i32 (PPCmfvsr (f64 (PPCfctiwz f64:$D))));
3836}
3837def DblToUInt {
3838  dag A = (i32 (PPCmfvsr (f64 (PPCfctiwuz f64:$A))));
3839  dag B = (i32 (PPCmfvsr (f64 (PPCfctiwuz f64:$B))));
3840  dag C = (i32 (PPCmfvsr (f64 (PPCfctiwuz f64:$C))));
3841  dag D = (i32 (PPCmfvsr (f64 (PPCfctiwuz f64:$D))));
3842}
3843def DblToLong {
3844  dag A = (i64 (PPCmfvsr (f64 (PPCfctidz f64:$A))));
3845}
3846def DblToULong {
3847  dag A = (i64 (PPCmfvsr (f64 (PPCfctiduz f64:$A))));
3848}
3849def DblToIntLoad {
3850  dag A = (i32 (PPCmfvsr (PPCfctiwz (f64 (load xoaddr:$A)))));
3851}
3852def DblToIntLoadP9 {
3853  dag A = (i32 (PPCmfvsr (PPCfctiwz (f64 (load iaddrX4:$A)))));
3854}
3855def DblToUIntLoad {
3856  dag A = (i32 (PPCmfvsr (PPCfctiwuz (f64 (load xoaddr:$A)))));
3857}
3858def DblToUIntLoadP9 {
3859  dag A = (i32 (PPCmfvsr (PPCfctiwuz (f64 (load iaddrX4:$A)))));
3860}
3861def DblToLongLoad {
3862  dag A = (i64 (PPCmfvsr (PPCfctidz (f64 (load xoaddr:$A)))));
3863}
3864def DblToULongLoad {
3865  dag A = (i64 (PPCmfvsr (PPCfctiduz (f64 (load xoaddr:$A)))));
3866}
3867
3868// FP merge dags (for f32 -> v4f32)
3869def MrgFP {
3870  dag AC = (XVCVDPSP (XXPERMDI (COPY_TO_REGCLASS $A, VSRC),
3871                               (COPY_TO_REGCLASS $C, VSRC), 0));
3872  dag BD = (XVCVDPSP (XXPERMDI (COPY_TO_REGCLASS $B, VSRC),
3873                               (COPY_TO_REGCLASS $D, VSRC), 0));
3874  dag ABhToFlt = (XVCVDPSP (XXPERMDI $A, $B, 0));
3875  dag ABlToFlt = (XVCVDPSP (XXPERMDI $A, $B, 3));
3876  dag BAhToFlt = (XVCVDPSP (XXPERMDI $B, $A, 0));
3877  dag BAlToFlt = (XVCVDPSP (XXPERMDI $B, $A, 3));
3878}
3879
3880// Word-element merge dags - conversions from f64 to i32 merged into vectors.
3881def MrgWords {
3882  // For big endian, we merge low and hi doublewords (A, B).
3883  dag A0B0 = (v2f64 (XXPERMDI v2f64:$A, v2f64:$B, 0));
3884  dag A1B1 = (v2f64 (XXPERMDI v2f64:$A, v2f64:$B, 3));
3885  dag CVA1B1S = (v4i32 (XVCVDPSXWS A1B1));
3886  dag CVA0B0S = (v4i32 (XVCVDPSXWS A0B0));
3887  dag CVA1B1U = (v4i32 (XVCVDPUXWS A1B1));
3888  dag CVA0B0U = (v4i32 (XVCVDPUXWS A0B0));
3889
3890  // For little endian, we merge low and hi doublewords (B, A).
3891  dag B1A1 = (v2f64 (XXPERMDI v2f64:$B, v2f64:$A, 0));
3892  dag B0A0 = (v2f64 (XXPERMDI v2f64:$B, v2f64:$A, 3));
3893  dag CVB1A1S = (v4i32 (XVCVDPSXWS B1A1));
3894  dag CVB0A0S = (v4i32 (XVCVDPSXWS B0A0));
3895  dag CVB1A1U = (v4i32 (XVCVDPUXWS B1A1));
3896  dag CVB0A0U = (v4i32 (XVCVDPUXWS B0A0));
3897
3898  // For big endian, we merge hi doublewords of (A, C) and (B, D), convert
3899  // then merge.
3900  dag AC = (v2f64 (XXPERMDI (COPY_TO_REGCLASS f64:$A, VSRC),
3901                            (COPY_TO_REGCLASS f64:$C, VSRC), 0));
3902  dag BD = (v2f64 (XXPERMDI (COPY_TO_REGCLASS f64:$B, VSRC),
3903                            (COPY_TO_REGCLASS f64:$D, VSRC), 0));
3904  dag CVACS = (v4i32 (XVCVDPSXWS AC));
3905  dag CVBDS = (v4i32 (XVCVDPSXWS BD));
3906  dag CVACU = (v4i32 (XVCVDPUXWS AC));
3907  dag CVBDU = (v4i32 (XVCVDPUXWS BD));
3908
3909  // For little endian, we merge hi doublewords of (D, B) and (C, A), convert
3910  // then merge.
3911  dag DB = (v2f64 (XXPERMDI (COPY_TO_REGCLASS f64:$D, VSRC),
3912                            (COPY_TO_REGCLASS f64:$B, VSRC), 0));
3913  dag CA = (v2f64 (XXPERMDI (COPY_TO_REGCLASS f64:$C, VSRC),
3914                            (COPY_TO_REGCLASS f64:$A, VSRC), 0));
3915  dag CVDBS = (v4i32 (XVCVDPSXWS DB));
3916  dag CVCAS = (v4i32 (XVCVDPSXWS CA));
3917  dag CVDBU = (v4i32 (XVCVDPUXWS DB));
3918  dag CVCAU = (v4i32 (XVCVDPUXWS CA));
3919}
3920
3921// Patterns for BUILD_VECTOR nodes.
3922let AddedComplexity = 400 in {
3923
3924  let Predicates = [HasVSX] in {
3925    // Build vectors of floating point converted to i32.
3926    def : Pat<(v4i32 (build_vector DblToInt.A, DblToInt.A,
3927                                   DblToInt.A, DblToInt.A)),
3928              (v4i32 (XXSPLTW (COPY_TO_REGCLASS (XSCVDPSXWS $A), VSRC), 1))>;
3929    def : Pat<(v4i32 (build_vector DblToUInt.A, DblToUInt.A,
3930                                   DblToUInt.A, DblToUInt.A)),
3931              (v4i32 (XXSPLTW (COPY_TO_REGCLASS (XSCVDPUXWS $A), VSRC), 1))>;
3932    def : Pat<(v2i64 (build_vector DblToLong.A, DblToLong.A)),
3933              (v2i64 (XXPERMDI (COPY_TO_REGCLASS (XSCVDPSXDS $A), VSRC),
3934                               (COPY_TO_REGCLASS (XSCVDPSXDS $A), VSRC), 0))>;
3935    def : Pat<(v2i64 (build_vector DblToULong.A, DblToULong.A)),
3936              (v2i64 (XXPERMDI (COPY_TO_REGCLASS (XSCVDPUXDS $A), VSRC),
3937                               (COPY_TO_REGCLASS (XSCVDPUXDS $A), VSRC), 0))>;
3938    def : Pat<(v4i32 (scalar_to_vector FltToIntLoad.A)),
3939              (v4i32 (XXSPLTW (COPY_TO_REGCLASS
3940                                (XSCVDPSXWSs (XFLOADf32 xoaddr:$A)), VSRC), 1))>;
3941    def : Pat<(v4i32 (scalar_to_vector FltToUIntLoad.A)),
3942              (v4i32 (XXSPLTW (COPY_TO_REGCLASS
3943                                (XSCVDPUXWSs (XFLOADf32 xoaddr:$A)), VSRC), 1))>;
3944    def : Pat<(v4f32 (build_vector f32:$A, f32:$A, f32:$A, f32:$A)),
3945              (v4f32 (XXSPLTW (v4f32 (XSCVDPSPN $A)), 0))>;
3946    def : Pat<(v2f64 (PPCldsplat xoaddr:$A)),
3947              (v2f64 (LXVDSX xoaddr:$A))>;
3948    def : Pat<(v2i64 (PPCldsplat xoaddr:$A)),
3949              (v2i64 (LXVDSX xoaddr:$A))>;
3950
3951    // Build vectors of floating point converted to i64.
3952    def : Pat<(v2i64 (build_vector FltToLong.A, FltToLong.A)),
3953              (v2i64 (XXPERMDIs
3954                       (COPY_TO_REGCLASS (XSCVDPSXDSs $A), VSFRC), 0))>;
3955    def : Pat<(v2i64 (build_vector FltToULong.A, FltToULong.A)),
3956              (v2i64 (XXPERMDIs
3957                       (COPY_TO_REGCLASS (XSCVDPUXDSs $A), VSFRC), 0))>;
3958    def : Pat<(v2i64 (scalar_to_vector DblToLongLoad.A)),
3959              (v2i64 (XVCVDPSXDS (LXVDSX xoaddr:$A)))>;
3960    def : Pat<(v2i64 (scalar_to_vector DblToULongLoad.A)),
3961              (v2i64 (XVCVDPUXDS (LXVDSX xoaddr:$A)))>;
3962  }
3963
3964  let Predicates = [HasVSX, NoP9Vector] in {
3965    // Load-and-splat with fp-to-int conversion (using X-Form VSX/FP loads).
3966    def : Pat<(v4i32 (scalar_to_vector DblToIntLoad.A)),
3967              (v4i32 (XXSPLTW (COPY_TO_REGCLASS
3968                                (XSCVDPSXWS (XFLOADf64 xoaddr:$A)), VSRC), 1))>;
3969    def : Pat<(v4i32 (scalar_to_vector DblToUIntLoad.A)),
3970              (v4i32 (XXSPLTW (COPY_TO_REGCLASS
3971                                (XSCVDPUXWS (XFLOADf64 xoaddr:$A)), VSRC), 1))>;
3972    def : Pat<(v2i64 (scalar_to_vector FltToLongLoad.A)),
3973              (v2i64 (XXPERMDIs (XSCVDPSXDS (COPY_TO_REGCLASS
3974                                              (XFLOADf32 xoaddr:$A), VSFRC)), 0))>;
3975    def : Pat<(v2i64 (scalar_to_vector FltToULongLoad.A)),
3976              (v2i64 (XXPERMDIs (XSCVDPUXDS (COPY_TO_REGCLASS
3977                                              (XFLOADf32 xoaddr:$A), VSFRC)), 0))>;
3978  }
3979
3980  let Predicates = [IsBigEndian, HasP8Vector] in {
3981    def : Pat<DWToSPExtractConv.BVU,
3982              (v4f32 (VPKUDUM (XXSLDWI (XVCVUXDSP $S1), (XVCVUXDSP $S1), 3),
3983                              (XXSLDWI (XVCVUXDSP $S2), (XVCVUXDSP $S2), 3)))>;
3984    def : Pat<DWToSPExtractConv.BVS,
3985              (v4f32 (VPKUDUM (XXSLDWI (XVCVSXDSP $S1), (XVCVSXDSP $S1), 3),
3986                              (XXSLDWI (XVCVSXDSP $S2), (XVCVSXDSP $S2), 3)))>;
3987    def : Pat<(store (i32 (extractelt v4i32:$A, 1)), xoaddr:$src),
3988              (STIWX (EXTRACT_SUBREG $A, sub_64), xoaddr:$src)>;
3989    def : Pat<(store (f32 (extractelt v4f32:$A, 1)), xoaddr:$src),
3990              (STIWX (EXTRACT_SUBREG $A, sub_64), xoaddr:$src)>;
3991
3992    // Elements in a register on a BE system are in order <0, 1, 2, 3>.
3993    // The store instructions store the second word from the left.
3994    // So to align element zero, we need to modulo-left-shift by 3 words.
3995    // Similar logic applies for elements 2 and 3.
3996    foreach Idx = [ [0,3], [2,1], [3,2] ] in {
3997      def : Pat<(store (i32 (extractelt v4i32:$A, !head(Idx))), xoaddr:$src),
3998                (STIWX (EXTRACT_SUBREG (XXSLDWI $A, $A, !head(!tail(Idx))),
3999                                       sub_64), xoaddr:$src)>;
4000      def : Pat<(store (f32 (extractelt v4f32:$A, !head(Idx))), xoaddr:$src),
4001                (STIWX (EXTRACT_SUBREG (XXSLDWI $A, $A, !head(!tail(Idx))),
4002                                       sub_64), xoaddr:$src)>;
4003    }
4004  }
4005
4006  let Predicates = [HasP8Vector, IsBigEndian, NoP9Vector] in {
4007    def : Pat<(store (i64 (extractelt v2i64:$A, 0)), xoaddr:$src),
4008              (XFSTOREf64 (EXTRACT_SUBREG $A, sub_64), xoaddr:$src)>;
4009    def : Pat<(store (f64 (extractelt v2f64:$A, 0)), xoaddr:$src),
4010              (XFSTOREf64 (EXTRACT_SUBREG $A, sub_64), xoaddr:$src)>;
4011    def : Pat<(store (i64 (extractelt v2i64:$A, 1)), xoaddr:$src),
4012              (XFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2), sub_64),
4013                          xoaddr:$src)>;
4014    def : Pat<(store (f64 (extractelt v2f64:$A, 1)), xoaddr:$src),
4015              (XFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2), sub_64),
4016                          xoaddr:$src)>;
4017   }
4018
4019  // Big endian, available on all targets with VSX
4020  let Predicates = [IsBigEndian, HasVSX] in {
4021    def : Pat<(v2f64 (build_vector f64:$A, f64:$B)),
4022              (v2f64 (XXPERMDI
4023                        (COPY_TO_REGCLASS $A, VSRC),
4024                        (COPY_TO_REGCLASS $B, VSRC), 0))>;
4025
4026    def : Pat<(v4f32 (build_vector f32:$A, f32:$B, f32:$C, f32:$D)),
4027              (VMRGEW MrgFP.AC, MrgFP.BD)>;
4028    def : Pat<(v4f32 (build_vector DblToFlt.A0, DblToFlt.A1,
4029                                   DblToFlt.B0, DblToFlt.B1)),
4030              (v4f32 (VMRGEW MrgFP.ABhToFlt, MrgFP.ABlToFlt))>;
4031
4032    // Convert 4 doubles to a vector of ints.
4033    def : Pat<(v4i32 (build_vector DblToInt.A, DblToInt.B,
4034                                   DblToInt.C, DblToInt.D)),
4035              (v4i32 (VMRGEW MrgWords.CVACS, MrgWords.CVBDS))>;
4036    def : Pat<(v4i32 (build_vector DblToUInt.A, DblToUInt.B,
4037                                   DblToUInt.C, DblToUInt.D)),
4038              (v4i32 (VMRGEW MrgWords.CVACU, MrgWords.CVBDU))>;
4039    def : Pat<(v4i32 (build_vector ExtDbl.A0S, ExtDbl.A1S,
4040                                   ExtDbl.B0S, ExtDbl.B1S)),
4041              (v4i32 (VMRGEW MrgWords.CVA0B0S, MrgWords.CVA1B1S))>;
4042    def : Pat<(v4i32 (build_vector ExtDbl.A0U, ExtDbl.A1U,
4043                                   ExtDbl.B0U, ExtDbl.B1U)),
4044              (v4i32 (VMRGEW MrgWords.CVA0B0U, MrgWords.CVA1B1U))>;
4045  }
4046
4047  let Predicates = [IsLittleEndian, HasP8Vector] in {
4048    def : Pat<DWToSPExtractConv.BVU,
4049              (v4f32 (VPKUDUM (XXSLDWI (XVCVUXDSP $S2), (XVCVUXDSP $S2), 3),
4050                              (XXSLDWI (XVCVUXDSP $S1), (XVCVUXDSP $S1), 3)))>;
4051    def : Pat<DWToSPExtractConv.BVS,
4052              (v4f32 (VPKUDUM (XXSLDWI (XVCVSXDSP $S2), (XVCVSXDSP $S2), 3),
4053                              (XXSLDWI (XVCVSXDSP $S1), (XVCVSXDSP $S1), 3)))>;
4054    def : Pat<(store (i32 (extractelt v4i32:$A, 2)), xoaddr:$src),
4055              (STIWX (EXTRACT_SUBREG $A, sub_64), xoaddr:$src)>;
4056    def : Pat<(store (f32 (extractelt v4f32:$A, 2)), xoaddr:$src),
4057              (STIWX (EXTRACT_SUBREG $A, sub_64), xoaddr:$src)>;
4058
4059    // Elements in a register on a LE system are in order <3, 2, 1, 0>.
4060    // The store instructions store the second word from the left.
4061    // So to align element 3, we need to modulo-left-shift by 3 words.
4062    // Similar logic applies for elements 0 and 1.
4063    foreach Idx = [ [0,2], [1,1], [3,3] ] in {
4064      def : Pat<(store (i32 (extractelt v4i32:$A, !head(Idx))), xoaddr:$src),
4065                (STIWX (EXTRACT_SUBREG (XXSLDWI $A, $A, !head(!tail(Idx))),
4066                                       sub_64), xoaddr:$src)>;
4067      def : Pat<(store (f32 (extractelt v4f32:$A, !head(Idx))), xoaddr:$src),
4068                (STIWX (EXTRACT_SUBREG (XXSLDWI $A, $A, !head(!tail(Idx))),
4069                                       sub_64), xoaddr:$src)>;
4070    }
4071  }
4072
4073  let Predicates = [HasP8Vector, IsLittleEndian, NoP9Vector] in {
4074    def : Pat<(store (i64 (extractelt v2i64:$A, 0)), xoaddr:$src),
4075              (XFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2), sub_64),
4076                          xoaddr:$src)>;
4077    def : Pat<(store (f64 (extractelt v2f64:$A, 0)), xoaddr:$src),
4078              (XFSTOREf64 (EXTRACT_SUBREG (XXPERMDI $A, $A, 2), sub_64),
4079                          xoaddr:$src)>;
4080    def : Pat<(store (i64 (extractelt v2i64:$A, 1)), xoaddr:$src),
4081              (XFSTOREf64 (EXTRACT_SUBREG $A, sub_64), xoaddr:$src)>;
4082    def : Pat<(store (f64 (extractelt v2f64:$A, 1)), xoaddr:$src),
4083              (XFSTOREf64 (EXTRACT_SUBREG $A, sub_64), xoaddr:$src)>;
4084   }
4085
4086  let Predicates = [IsLittleEndian, HasVSX] in {
4087  // Little endian, available on all targets with VSX
4088    def : Pat<(v2f64 (build_vector f64:$A, f64:$B)),
4089              (v2f64 (XXPERMDI
4090                        (COPY_TO_REGCLASS $B, VSRC),
4091                        (COPY_TO_REGCLASS $A, VSRC), 0))>;
4092
4093    def : Pat<(v4f32 (build_vector f32:$D, f32:$C, f32:$B, f32:$A)),
4094              (VMRGEW MrgFP.AC, MrgFP.BD)>;
4095    def : Pat<(v4f32 (build_vector DblToFlt.A0, DblToFlt.A1,
4096                                   DblToFlt.B0, DblToFlt.B1)),
4097              (v4f32 (VMRGEW MrgFP.BAhToFlt, MrgFP.BAlToFlt))>;
4098
4099    // Convert 4 doubles to a vector of ints.
4100    def : Pat<(v4i32 (build_vector DblToInt.A, DblToInt.B,
4101                                   DblToInt.C, DblToInt.D)),
4102              (v4i32 (VMRGEW MrgWords.CVDBS, MrgWords.CVCAS))>;
4103    def : Pat<(v4i32 (build_vector DblToUInt.A, DblToUInt.B,
4104                                   DblToUInt.C, DblToUInt.D)),
4105              (v4i32 (VMRGEW MrgWords.CVDBU, MrgWords.CVCAU))>;
4106    def : Pat<(v4i32 (build_vector ExtDbl.A0S, ExtDbl.A1S,
4107                                   ExtDbl.B0S, ExtDbl.B1S)),
4108              (v4i32 (VMRGEW MrgWords.CVB1A1S, MrgWords.CVB0A0S))>;
4109    def : Pat<(v4i32 (build_vector ExtDbl.A0U, ExtDbl.A1U,
4110                                   ExtDbl.B0U, ExtDbl.B1U)),
4111              (v4i32 (VMRGEW MrgWords.CVB1A1U, MrgWords.CVB0A0U))>;
4112  }
4113
4114  let Predicates = [HasDirectMove] in {
4115    // Endianness-neutral constant splat on P8 and newer targets. The reason
4116    // for this pattern is that on targets with direct moves, we don't expand
4117    // BUILD_VECTOR nodes for v4i32.
4118    def : Pat<(v4i32 (build_vector immSExt5NonZero:$A, immSExt5NonZero:$A,
4119                                   immSExt5NonZero:$A, immSExt5NonZero:$A)),
4120              (v4i32 (VSPLTISW imm:$A))>;
4121  }
4122
4123  let Predicates = [IsBigEndian, HasDirectMove, NoP9Vector] in {
4124    // Big endian integer vectors using direct moves.
4125    def : Pat<(v2i64 (build_vector i64:$A, i64:$B)),
4126              (v2i64 (XXPERMDI
4127                        (COPY_TO_REGCLASS (MTVSRD $A), VSRC),
4128                        (COPY_TO_REGCLASS (MTVSRD $B), VSRC), 0))>;
4129    def : Pat<(v4i32 (build_vector i32:$A, i32:$B, i32:$C, i32:$D)),
4130              (XXPERMDI
4131                (COPY_TO_REGCLASS
4132                  (MTVSRD (RLDIMI AnyExts.B, AnyExts.A, 32, 0)), VSRC),
4133                (COPY_TO_REGCLASS
4134                  (MTVSRD (RLDIMI AnyExts.D, AnyExts.C, 32, 0)), VSRC), 0)>;
4135    def : Pat<(v4i32 (build_vector i32:$A, i32:$A, i32:$A, i32:$A)),
4136              (XXSPLTW (COPY_TO_REGCLASS (MTVSRWZ $A), VSRC), 1)>;
4137  }
4138
4139  let Predicates = [IsLittleEndian, HasDirectMove, NoP9Vector] in {
4140    // Little endian integer vectors using direct moves.
4141    def : Pat<(v2i64 (build_vector i64:$A, i64:$B)),
4142              (v2i64 (XXPERMDI
4143                        (COPY_TO_REGCLASS (MTVSRD $B), VSRC),
4144                        (COPY_TO_REGCLASS (MTVSRD $A), VSRC), 0))>;
4145    def : Pat<(v4i32 (build_vector i32:$A, i32:$B, i32:$C, i32:$D)),
4146              (XXPERMDI
4147                (COPY_TO_REGCLASS
4148                  (MTVSRD (RLDIMI AnyExts.C, AnyExts.D, 32, 0)), VSRC),
4149                (COPY_TO_REGCLASS
4150                  (MTVSRD (RLDIMI AnyExts.A, AnyExts.B, 32, 0)), VSRC), 0)>;
4151    def : Pat<(v4i32 (build_vector i32:$A, i32:$A, i32:$A, i32:$A)),
4152              (XXSPLTW (COPY_TO_REGCLASS (MTVSRWZ $A), VSRC), 1)>;
4153  }
4154
4155  let Predicates = [HasP8Vector] in {
4156    def : Pat<(v1i128 (bitconvert (v16i8 immAllOnesV))),
4157              (v1i128 (COPY_TO_REGCLASS(XXLEQVOnes), VSRC))>;
4158    def : Pat<(v2i64 (bitconvert (v16i8 immAllOnesV))),
4159              (v2i64 (COPY_TO_REGCLASS(XXLEQVOnes), VSRC))>;
4160    def : Pat<(v8i16 (bitconvert (v16i8 immAllOnesV))),
4161              (v8i16 (COPY_TO_REGCLASS(XXLEQVOnes), VSRC))>;
4162    def : Pat<(v16i8 (bitconvert (v16i8 immAllOnesV))),
4163              (v16i8 (COPY_TO_REGCLASS(XXLEQVOnes), VSRC))>;
4164  }
4165
4166  let Predicates = [HasP9Vector] in {
4167    // Endianness-neutral patterns for const splats with ISA 3.0 instructions.
4168    def : Pat<(v4i32 (scalar_to_vector i32:$A)),
4169              (v4i32 (MTVSRWS $A))>;
4170    def : Pat<(v4i32 (build_vector i32:$A, i32:$A, i32:$A, i32:$A)),
4171              (v4i32 (MTVSRWS $A))>;
4172    def : Pat<(v16i8 (build_vector immNonAllOneAnyExt8:$A, immNonAllOneAnyExt8:$A,
4173                                   immNonAllOneAnyExt8:$A, immNonAllOneAnyExt8:$A,
4174                                   immNonAllOneAnyExt8:$A, immNonAllOneAnyExt8:$A,
4175                                   immNonAllOneAnyExt8:$A, immNonAllOneAnyExt8:$A,
4176                                   immNonAllOneAnyExt8:$A, immNonAllOneAnyExt8:$A,
4177                                   immNonAllOneAnyExt8:$A, immNonAllOneAnyExt8:$A,
4178                                   immNonAllOneAnyExt8:$A, immNonAllOneAnyExt8:$A,
4179                                   immNonAllOneAnyExt8:$A, immNonAllOneAnyExt8:$A)),
4180              (v16i8 (COPY_TO_REGCLASS (XXSPLTIB imm:$A), VSRC))>;
4181    def : Pat<(v4i32 (scalar_to_vector FltToIntLoad.A)),
4182              (v4i32 (XVCVSPSXWS (LXVWSX xoaddr:$A)))>;
4183    def : Pat<(v4i32 (scalar_to_vector FltToUIntLoad.A)),
4184              (v4i32 (XVCVSPUXWS (LXVWSX xoaddr:$A)))>;
4185    def : Pat<(v4i32 (scalar_to_vector DblToIntLoadP9.A)),
4186              (v4i32 (XXSPLTW (COPY_TO_REGCLASS
4187                                (XSCVDPSXWS (DFLOADf64 iaddrX4:$A)), VSRC), 1))>;
4188    def : Pat<(v4i32 (scalar_to_vector DblToUIntLoadP9.A)),
4189              (v4i32 (XXSPLTW (COPY_TO_REGCLASS
4190                                (XSCVDPUXWS (DFLOADf64 iaddrX4:$A)), VSRC), 1))>;
4191    def : Pat<(v2i64 (scalar_to_vector FltToLongLoadP9.A)),
4192              (v2i64 (XXPERMDIs (XSCVDPSXDS (COPY_TO_REGCLASS
4193                                              (DFLOADf32 iaddrX4:$A),
4194                                              VSFRC)), 0))>;
4195    def : Pat<(v2i64 (scalar_to_vector FltToULongLoadP9.A)),
4196              (v2i64 (XXPERMDIs (XSCVDPUXDS (COPY_TO_REGCLASS
4197                                              (DFLOADf32 iaddrX4:$A),
4198                                              VSFRC)), 0))>;
4199    def : Pat<(v4f32 (PPCldsplat xoaddr:$A)),
4200              (v4f32 (LXVWSX xoaddr:$A))>;
4201    def : Pat<(v4i32 (PPCldsplat xoaddr:$A)),
4202              (v4i32 (LXVWSX xoaddr:$A))>;
4203  }
4204
4205  let Predicates = [IsISA3_0, HasDirectMove, IsBigEndian] in {
4206    def : Pat<(i64 (extractelt v2i64:$A, 1)),
4207              (i64 (MFVSRLD $A))>;
4208    // Better way to build integer vectors if we have MTVSRDD. Big endian.
4209    def : Pat<(v2i64 (build_vector i64:$rB, i64:$rA)),
4210              (v2i64 (MTVSRDD $rB, $rA))>;
4211    def : Pat<(v4i32 (build_vector i32:$A, i32:$B, i32:$C, i32:$D)),
4212              (MTVSRDD
4213                (RLDIMI AnyExts.B, AnyExts.A, 32, 0),
4214                (RLDIMI AnyExts.D, AnyExts.C, 32, 0))>;
4215  }
4216
4217  let Predicates = [IsISA3_0, HasDirectMove, IsLittleEndian] in {
4218    def : Pat<(i64 (extractelt v2i64:$A, 0)),
4219              (i64 (MFVSRLD $A))>;
4220    // Better way to build integer vectors if we have MTVSRDD. Little endian.
4221    def : Pat<(v2i64 (build_vector i64:$rA, i64:$rB)),
4222              (v2i64 (MTVSRDD $rB, $rA))>;
4223    def : Pat<(v4i32 (build_vector i32:$A, i32:$B, i32:$C, i32:$D)),
4224              (MTVSRDD
4225                (RLDIMI AnyExts.C, AnyExts.D, 32, 0),
4226                (RLDIMI AnyExts.A, AnyExts.B, 32, 0))>;
4227  }
4228  // P9 Altivec instructions that can be used to build vectors.
4229  // Adding them to PPCInstrVSX.td rather than PPCAltivecVSX.td to compete
4230  // with complexities of existing build vector patterns in this file.
4231  let Predicates = [HasP9Altivec, IsLittleEndian] in {
4232    def : Pat<(v2i64 (build_vector WordToDWord.LE_A0, WordToDWord.LE_A1)),
4233              (v2i64 (VEXTSW2D $A))>;
4234    def : Pat<(v2i64 (build_vector HWordToDWord.LE_A0, HWordToDWord.LE_A1)),
4235              (v2i64 (VEXTSH2D $A))>;
4236    def : Pat<(v4i32 (build_vector HWordToWord.LE_A0, HWordToWord.LE_A1,
4237                      HWordToWord.LE_A2, HWordToWord.LE_A3)),
4238              (v4i32 (VEXTSH2W $A))>;
4239    def : Pat<(v4i32 (build_vector ByteToWord.LE_A0, ByteToWord.LE_A1,
4240                      ByteToWord.LE_A2, ByteToWord.LE_A3)),
4241              (v4i32 (VEXTSB2W $A))>;
4242    def : Pat<(v2i64 (build_vector ByteToDWord.LE_A0, ByteToDWord.LE_A1)),
4243              (v2i64 (VEXTSB2D $A))>;
4244  }
4245
4246  let Predicates = [HasP9Altivec, IsBigEndian] in {
4247    def : Pat<(v2i64 (build_vector WordToDWord.BE_A0, WordToDWord.BE_A1)),
4248              (v2i64 (VEXTSW2D $A))>;
4249    def : Pat<(v2i64 (build_vector HWordToDWord.BE_A0, HWordToDWord.BE_A1)),
4250              (v2i64 (VEXTSH2D $A))>;
4251    def : Pat<(v4i32 (build_vector HWordToWord.BE_A0, HWordToWord.BE_A1,
4252                      HWordToWord.BE_A2, HWordToWord.BE_A3)),
4253              (v4i32 (VEXTSH2W $A))>;
4254    def : Pat<(v4i32 (build_vector ByteToWord.BE_A0, ByteToWord.BE_A1,
4255                      ByteToWord.BE_A2, ByteToWord.BE_A3)),
4256              (v4i32 (VEXTSB2W $A))>;
4257    def : Pat<(v2i64 (build_vector ByteToDWord.BE_A0, ByteToDWord.BE_A1)),
4258              (v2i64 (VEXTSB2D $A))>;
4259  }
4260
4261  let Predicates = [HasP9Altivec] in {
4262    def: Pat<(v2i64 (PPCSExtVElems v16i8:$A)),
4263              (v2i64 (VEXTSB2D $A))>;
4264    def: Pat<(v2i64 (PPCSExtVElems v8i16:$A)),
4265              (v2i64 (VEXTSH2D $A))>;
4266    def: Pat<(v2i64 (PPCSExtVElems v4i32:$A)),
4267              (v2i64 (VEXTSW2D $A))>;
4268    def: Pat<(v4i32 (PPCSExtVElems v16i8:$A)),
4269              (v4i32 (VEXTSB2W $A))>;
4270    def: Pat<(v4i32 (PPCSExtVElems v8i16:$A)),
4271              (v4i32 (VEXTSH2W $A))>;
4272  }
4273}
4274
4275// Put this P9Altivec related definition here since it's possible to be
4276// selected to VSX instruction xvnegsp, avoid possible undef.
4277let Predicates = [HasP9Altivec] in {
4278
4279  def : Pat<(v4i32 (PPCvabsd v4i32:$A, v4i32:$B, (i32 0))),
4280            (v4i32 (VABSDUW $A, $B))>;
4281
4282  def : Pat<(v8i16 (PPCvabsd v8i16:$A, v8i16:$B, (i32 0))),
4283            (v8i16 (VABSDUH $A, $B))>;
4284
4285  def : Pat<(v16i8 (PPCvabsd v16i8:$A, v16i8:$B, (i32 0))),
4286            (v16i8 (VABSDUB $A, $B))>;
4287
4288  // As PPCVABSD description, the last operand indicates whether do the
4289  // sign bit flip.
4290  def : Pat<(v4i32 (PPCvabsd v4i32:$A, v4i32:$B, (i32 1))),
4291            (v4i32 (VABSDUW (XVNEGSP $A), (XVNEGSP $B)))>;
4292}
4293