xref: /freebsd/contrib/llvm-project/llvm/lib/Target/X86/X86InstrShiftRotate.td (revision 6966ac055c3b7a39266fb982493330df7a097997)
1//===-- X86InstrShiftRotate.td - Shift and Rotate Instrs ---*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes the shift and rotate instructions.
10//
11//===----------------------------------------------------------------------===//
12
13// FIXME: Someone needs to smear multipattern goodness all over this file.
14
15let Defs = [EFLAGS] in {
16
17let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
18let Uses = [CL], SchedRW = [WriteShiftCL] in {
19def SHL8rCL  : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1),
20                 "shl{b}\t{%cl, $dst|$dst, cl}",
21                 [(set GR8:$dst, (shl GR8:$src1, CL))]>;
22def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
23                 "shl{w}\t{%cl, $dst|$dst, cl}",
24                 [(set GR16:$dst, (shl GR16:$src1, CL))]>, OpSize16;
25def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
26                 "shl{l}\t{%cl, $dst|$dst, cl}",
27                 [(set GR32:$dst, (shl GR32:$src1, CL))]>, OpSize32;
28def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
29                  "shl{q}\t{%cl, $dst|$dst, cl}",
30                  [(set GR64:$dst, (shl GR64:$src1, CL))]>;
31} // Uses = [CL], SchedRW
32
33let isConvertibleToThreeAddress = 1 in {   // Can transform into LEA.
34def SHL8ri   : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
35                   "shl{b}\t{$src2, $dst|$dst, $src2}",
36                   [(set GR8:$dst, (shl GR8:$src1, (i8 imm:$src2)))]>;
37
38def SHL16ri  : Ii8<0xC1, MRM4r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
39                   "shl{w}\t{$src2, $dst|$dst, $src2}",
40                   [(set GR16:$dst, (shl GR16:$src1, (i8 imm:$src2)))]>,
41                   OpSize16;
42def SHL32ri  : Ii8<0xC1, MRM4r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
43                   "shl{l}\t{$src2, $dst|$dst, $src2}",
44                   [(set GR32:$dst, (shl GR32:$src1, (i8 imm:$src2)))]>,
45                   OpSize32;
46def SHL64ri  : RIi8<0xC1, MRM4r, (outs GR64:$dst),
47                    (ins GR64:$src1, u8imm:$src2),
48                    "shl{q}\t{$src2, $dst|$dst, $src2}",
49                    [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
50} // isConvertibleToThreeAddress = 1
51
52// NOTE: We don't include patterns for shifts of a register by one, because
53// 'add reg,reg' is cheaper (and we have a Pat pattern for shift-by-one).
54let hasSideEffects = 0 in {
55def SHL8r1   : I<0xD0, MRM4r, (outs GR8:$dst), (ins GR8:$src1),
56                 "shl{b}\t$dst", []>;
57def SHL16r1  : I<0xD1, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
58                 "shl{w}\t$dst", []>, OpSize16;
59def SHL32r1  : I<0xD1, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
60                 "shl{l}\t$dst", []>, OpSize32;
61def SHL64r1  : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
62                 "shl{q}\t$dst", []>;
63} // hasSideEffects = 0
64} // Constraints = "$src = $dst", SchedRW
65
66// FIXME: Why do we need an explicit "Uses = [CL]" when the instr has a pattern
67// using CL?
68let Uses = [CL], SchedRW = [WriteShiftCLLd, WriteRMW] in {
69def SHL8mCL  : I<0xD2, MRM4m, (outs), (ins i8mem :$dst),
70                 "shl{b}\t{%cl, $dst|$dst, cl}",
71                 [(store (shl (loadi8 addr:$dst), CL), addr:$dst)]>;
72def SHL16mCL : I<0xD3, MRM4m, (outs), (ins i16mem:$dst),
73                 "shl{w}\t{%cl, $dst|$dst, cl}",
74                 [(store (shl (loadi16 addr:$dst), CL), addr:$dst)]>,
75                 OpSize16;
76def SHL32mCL : I<0xD3, MRM4m, (outs), (ins i32mem:$dst),
77                 "shl{l}\t{%cl, $dst|$dst, cl}",
78                 [(store (shl (loadi32 addr:$dst), CL), addr:$dst)]>,
79                 OpSize32;
80def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
81                  "shl{q}\t{%cl, $dst|$dst, cl}",
82                  [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>,
83                  Requires<[In64BitMode]>;
84}
85
86let SchedRW = [WriteShiftLd, WriteRMW] in {
87def SHL8mi   : Ii8<0xC0, MRM4m, (outs), (ins i8mem :$dst, u8imm:$src),
88                   "shl{b}\t{$src, $dst|$dst, $src}",
89                [(store (shl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
90def SHL16mi  : Ii8<0xC1, MRM4m, (outs), (ins i16mem:$dst, u8imm:$src),
91                   "shl{w}\t{$src, $dst|$dst, $src}",
92               [(store (shl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
93               OpSize16;
94def SHL32mi  : Ii8<0xC1, MRM4m, (outs), (ins i32mem:$dst, u8imm:$src),
95                   "shl{l}\t{$src, $dst|$dst, $src}",
96               [(store (shl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
97               OpSize32;
98def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, u8imm:$src),
99                  "shl{q}\t{$src, $dst|$dst, $src}",
100                  [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
101                  Requires<[In64BitMode]>;
102
103// Shift by 1
104def SHL8m1   : I<0xD0, MRM4m, (outs), (ins i8mem :$dst),
105                 "shl{b}\t$dst",
106                [(store (shl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
107def SHL16m1  : I<0xD1, MRM4m, (outs), (ins i16mem:$dst),
108                 "shl{w}\t$dst",
109                 [(store (shl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
110                 OpSize16;
111def SHL32m1  : I<0xD1, MRM4m, (outs), (ins i32mem:$dst),
112                 "shl{l}\t$dst",
113                 [(store (shl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>,
114                 OpSize32;
115def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
116                  "shl{q}\t$dst",
117                 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>,
118                 Requires<[In64BitMode]>;
119} // SchedRW
120
121let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
122let Uses = [CL], SchedRW = [WriteShiftCL] in {
123def SHR8rCL  : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src1),
124                 "shr{b}\t{%cl, $dst|$dst, cl}",
125                 [(set GR8:$dst, (srl GR8:$src1, CL))]>;
126def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
127                 "shr{w}\t{%cl, $dst|$dst, cl}",
128                 [(set GR16:$dst, (srl GR16:$src1, CL))]>, OpSize16;
129def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
130                 "shr{l}\t{%cl, $dst|$dst, cl}",
131                 [(set GR32:$dst, (srl GR32:$src1, CL))]>, OpSize32;
132def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
133                  "shr{q}\t{%cl, $dst|$dst, cl}",
134                  [(set GR64:$dst, (srl GR64:$src1, CL))]>;
135}
136
137def SHR8ri   : Ii8<0xC0, MRM5r, (outs GR8:$dst), (ins GR8:$src1, u8imm:$src2),
138                   "shr{b}\t{$src2, $dst|$dst, $src2}",
139                   [(set GR8:$dst, (srl GR8:$src1, (i8 imm:$src2)))]>;
140def SHR16ri  : Ii8<0xC1, MRM5r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
141                   "shr{w}\t{$src2, $dst|$dst, $src2}",
142                   [(set GR16:$dst, (srl GR16:$src1, (i8 imm:$src2)))]>,
143                   OpSize16;
144def SHR32ri  : Ii8<0xC1, MRM5r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
145                   "shr{l}\t{$src2, $dst|$dst, $src2}",
146                   [(set GR32:$dst, (srl GR32:$src1, (i8 imm:$src2)))]>,
147                   OpSize32;
148def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, u8imm:$src2),
149                  "shr{q}\t{$src2, $dst|$dst, $src2}",
150                  [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
151
152// Shift right by 1
153def SHR8r1   : I<0xD0, MRM5r, (outs GR8:$dst), (ins GR8:$src1),
154                 "shr{b}\t$dst",
155                 [(set GR8:$dst, (srl GR8:$src1, (i8 1)))]>;
156def SHR16r1  : I<0xD1, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
157                 "shr{w}\t$dst",
158                 [(set GR16:$dst, (srl GR16:$src1, (i8 1)))]>, OpSize16;
159def SHR32r1  : I<0xD1, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
160                 "shr{l}\t$dst",
161                 [(set GR32:$dst, (srl GR32:$src1, (i8 1)))]>, OpSize32;
162def SHR64r1  : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
163                 "shr{q}\t$dst",
164                 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
165} // Constraints = "$src = $dst", SchedRW
166
167
168let Uses = [CL], SchedRW = [WriteShiftCLLd, WriteRMW] in {
169def SHR8mCL  : I<0xD2, MRM5m, (outs), (ins i8mem :$dst),
170                 "shr{b}\t{%cl, $dst|$dst, cl}",
171                 [(store (srl (loadi8 addr:$dst), CL), addr:$dst)]>;
172def SHR16mCL : I<0xD3, MRM5m, (outs), (ins i16mem:$dst),
173                 "shr{w}\t{%cl, $dst|$dst, cl}",
174                 [(store (srl (loadi16 addr:$dst), CL), addr:$dst)]>,
175                 OpSize16;
176def SHR32mCL : I<0xD3, MRM5m, (outs), (ins i32mem:$dst),
177                 "shr{l}\t{%cl, $dst|$dst, cl}",
178                 [(store (srl (loadi32 addr:$dst), CL), addr:$dst)]>,
179                 OpSize32;
180def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
181                  "shr{q}\t{%cl, $dst|$dst, cl}",
182                  [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>,
183                  Requires<[In64BitMode]>;
184}
185
186let SchedRW = [WriteShiftLd, WriteRMW] in {
187def SHR8mi   : Ii8<0xC0, MRM5m, (outs), (ins i8mem :$dst, u8imm:$src),
188                   "shr{b}\t{$src, $dst|$dst, $src}",
189                [(store (srl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
190def SHR16mi  : Ii8<0xC1, MRM5m, (outs), (ins i16mem:$dst, u8imm:$src),
191                   "shr{w}\t{$src, $dst|$dst, $src}",
192               [(store (srl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
193               OpSize16;
194def SHR32mi  : Ii8<0xC1, MRM5m, (outs), (ins i32mem:$dst, u8imm:$src),
195                   "shr{l}\t{$src, $dst|$dst, $src}",
196               [(store (srl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
197               OpSize32;
198def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, u8imm:$src),
199                  "shr{q}\t{$src, $dst|$dst, $src}",
200                 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
201                 Requires<[In64BitMode]>;
202
203// Shift by 1
204def SHR8m1   : I<0xD0, MRM5m, (outs), (ins i8mem :$dst),
205                 "shr{b}\t$dst",
206                 [(store (srl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
207def SHR16m1  : I<0xD1, MRM5m, (outs), (ins i16mem:$dst),
208                 "shr{w}\t$dst",
209                 [(store (srl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
210                 OpSize16;
211def SHR32m1  : I<0xD1, MRM5m, (outs), (ins i32mem:$dst),
212                 "shr{l}\t$dst",
213                 [(store (srl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>,
214                 OpSize32;
215def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
216                  "shr{q}\t$dst",
217                 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>,
218                 Requires<[In64BitMode]>;
219} // SchedRW
220
221let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
222let Uses = [CL], SchedRW = [WriteShiftCL] in {
223def SAR8rCL  : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
224                 "sar{b}\t{%cl, $dst|$dst, cl}",
225                 [(set GR8:$dst, (sra GR8:$src1, CL))]>;
226def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
227                 "sar{w}\t{%cl, $dst|$dst, cl}",
228                 [(set GR16:$dst, (sra GR16:$src1, CL))]>,
229                 OpSize16;
230def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
231                 "sar{l}\t{%cl, $dst|$dst, cl}",
232                 [(set GR32:$dst, (sra GR32:$src1, CL))]>,
233                 OpSize32;
234def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
235                 "sar{q}\t{%cl, $dst|$dst, cl}",
236                 [(set GR64:$dst, (sra GR64:$src1, CL))]>;
237}
238
239def SAR8ri   : Ii8<0xC0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
240                   "sar{b}\t{$src2, $dst|$dst, $src2}",
241                   [(set GR8:$dst, (sra GR8:$src1, (i8 imm:$src2)))]>;
242def SAR16ri  : Ii8<0xC1, MRM7r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
243                   "sar{w}\t{$src2, $dst|$dst, $src2}",
244                   [(set GR16:$dst, (sra GR16:$src1, (i8 imm:$src2)))]>,
245                   OpSize16;
246def SAR32ri  : Ii8<0xC1, MRM7r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
247                   "sar{l}\t{$src2, $dst|$dst, $src2}",
248                   [(set GR32:$dst, (sra GR32:$src1, (i8 imm:$src2)))]>,
249                   OpSize32;
250def SAR64ri  : RIi8<0xC1, MRM7r, (outs GR64:$dst),
251                    (ins GR64:$src1, u8imm:$src2),
252                    "sar{q}\t{$src2, $dst|$dst, $src2}",
253                    [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
254
255// Shift by 1
256def SAR8r1   : I<0xD0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
257                 "sar{b}\t$dst",
258                 [(set GR8:$dst, (sra GR8:$src1, (i8 1)))]>;
259def SAR16r1  : I<0xD1, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
260                 "sar{w}\t$dst",
261                 [(set GR16:$dst, (sra GR16:$src1, (i8 1)))]>, OpSize16;
262def SAR32r1  : I<0xD1, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
263                 "sar{l}\t$dst",
264                 [(set GR32:$dst, (sra GR32:$src1, (i8 1)))]>, OpSize32;
265def SAR64r1  : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
266                  "sar{q}\t$dst",
267                  [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
268} // Constraints = "$src = $dst", SchedRW
269
270
271let Uses = [CL], SchedRW = [WriteShiftCLLd, WriteRMW] in {
272def SAR8mCL  : I<0xD2, MRM7m, (outs), (ins i8mem :$dst),
273                 "sar{b}\t{%cl, $dst|$dst, cl}",
274                 [(store (sra (loadi8 addr:$dst), CL), addr:$dst)]>;
275def SAR16mCL : I<0xD3, MRM7m, (outs), (ins i16mem:$dst),
276                 "sar{w}\t{%cl, $dst|$dst, cl}",
277                 [(store (sra (loadi16 addr:$dst), CL), addr:$dst)]>,
278                 OpSize16;
279def SAR32mCL : I<0xD3, MRM7m, (outs), (ins i32mem:$dst),
280                 "sar{l}\t{%cl, $dst|$dst, cl}",
281                 [(store (sra (loadi32 addr:$dst), CL), addr:$dst)]>,
282                 OpSize32;
283def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
284                 "sar{q}\t{%cl, $dst|$dst, cl}",
285                 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>,
286                 Requires<[In64BitMode]>;
287}
288
289let SchedRW = [WriteShiftLd, WriteRMW] in {
290def SAR8mi   : Ii8<0xC0, MRM7m, (outs), (ins i8mem :$dst, u8imm:$src),
291                   "sar{b}\t{$src, $dst|$dst, $src}",
292                [(store (sra (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
293def SAR16mi  : Ii8<0xC1, MRM7m, (outs), (ins i16mem:$dst, u8imm:$src),
294                   "sar{w}\t{$src, $dst|$dst, $src}",
295               [(store (sra (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
296               OpSize16;
297def SAR32mi  : Ii8<0xC1, MRM7m, (outs), (ins i32mem:$dst, u8imm:$src),
298                   "sar{l}\t{$src, $dst|$dst, $src}",
299               [(store (sra (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
300               OpSize32;
301def SAR64mi  : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, u8imm:$src),
302                    "sar{q}\t{$src, $dst|$dst, $src}",
303                 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
304                 Requires<[In64BitMode]>;
305
306// Shift by 1
307def SAR8m1   : I<0xD0, MRM7m, (outs), (ins i8mem :$dst),
308                 "sar{b}\t$dst",
309                [(store (sra (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
310def SAR16m1  : I<0xD1, MRM7m, (outs), (ins i16mem:$dst),
311                 "sar{w}\t$dst",
312               [(store (sra (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
313               OpSize16;
314def SAR32m1  : I<0xD1, MRM7m, (outs), (ins i32mem:$dst),
315                 "sar{l}\t$dst",
316               [(store (sra (loadi32 addr:$dst), (i8 1)), addr:$dst)]>,
317               OpSize32;
318def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
319                  "sar{q}\t$dst",
320                 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>,
321                 Requires<[In64BitMode]>;
322} // SchedRW
323
324//===----------------------------------------------------------------------===//
325// Rotate instructions
326//===----------------------------------------------------------------------===//
327
328let hasSideEffects = 0 in {
329let Constraints = "$src1 = $dst", SchedRW = [WriteRotate] in {
330
331let Uses = [CL, EFLAGS], SchedRW = [WriteRotateCL] in {
332def RCL8rCL : I<0xD2, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
333                "rcl{b}\t{%cl, $dst|$dst, cl}", []>;
334def RCL16rCL : I<0xD3, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
335                 "rcl{w}\t{%cl, $dst|$dst, cl}", []>, OpSize16;
336def RCL32rCL : I<0xD3, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
337                 "rcl{l}\t{%cl, $dst|$dst, cl}", []>, OpSize32;
338def RCL64rCL : RI<0xD3, MRM2r, (outs GR64:$dst), (ins GR64:$src1),
339                  "rcl{q}\t{%cl, $dst|$dst, cl}", []>;
340} // Uses = [CL, EFLAGS]
341
342let Uses = [EFLAGS] in {
343def RCL8r1 : I<0xD0, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
344               "rcl{b}\t$dst", []>;
345def RCL8ri : Ii8<0xC0, MRM2r, (outs GR8:$dst), (ins GR8:$src1, u8imm:$cnt),
346                 "rcl{b}\t{$cnt, $dst|$dst, $cnt}", []>;
347def RCL16r1 : I<0xD1, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
348                "rcl{w}\t$dst", []>, OpSize16;
349def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$cnt),
350                  "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize16;
351def RCL32r1 : I<0xD1, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
352                "rcl{l}\t$dst", []>, OpSize32;
353def RCL32ri : Ii8<0xC1, MRM2r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$cnt),
354                  "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize32;
355def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src1),
356                 "rcl{q}\t$dst", []>;
357def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src1, u8imm:$cnt),
358                   "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
359} // Uses = [EFLAGS]
360
361let Uses = [CL, EFLAGS], SchedRW = [WriteRotateCL] in {
362def RCR8rCL : I<0xD2, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
363                "rcr{b}\t{%cl, $dst|$dst, cl}", []>;
364def RCR16rCL : I<0xD3, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
365                 "rcr{w}\t{%cl, $dst|$dst, cl}", []>, OpSize16;
366def RCR32rCL : I<0xD3, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
367                 "rcr{l}\t{%cl, $dst|$dst, cl}", []>, OpSize32;
368def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
369                  "rcr{q}\t{%cl, $dst|$dst, cl}", []>;
370} // Uses = [CL, EFLAGS]
371
372let Uses = [EFLAGS] in {
373def RCR8r1 : I<0xD0, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
374               "rcr{b}\t$dst", []>;
375def RCR8ri : Ii8<0xC0, MRM3r, (outs GR8:$dst), (ins GR8:$src1, u8imm:$cnt),
376                 "rcr{b}\t{$cnt, $dst|$dst, $cnt}", []>;
377def RCR16r1 : I<0xD1, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
378                "rcr{w}\t$dst", []>, OpSize16;
379def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$cnt),
380                  "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize16;
381def RCR32r1 : I<0xD1, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
382                "rcr{l}\t$dst", []>, OpSize32;
383def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$cnt),
384                  "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize32;
385def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
386                 "rcr{q}\t$dst", []>;
387def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src1, u8imm:$cnt),
388                   "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
389} // Uses = [EFLAGS]
390
391} // Constraints = "$src = $dst"
392
393let SchedRW = [WriteRotateLd, WriteRMW], mayStore = 1 in {
394let Uses = [EFLAGS] in {
395def RCL8m1 : I<0xD0, MRM2m, (outs), (ins i8mem:$dst),
396               "rcl{b}\t$dst", []>;
397def RCL8mi : Ii8<0xC0, MRM2m, (outs), (ins i8mem:$dst, u8imm:$cnt),
398                 "rcl{b}\t{$cnt, $dst|$dst, $cnt}", []>;
399def RCL16m1 : I<0xD1, MRM2m, (outs), (ins i16mem:$dst),
400                "rcl{w}\t$dst", []>, OpSize16;
401def RCL16mi : Ii8<0xC1, MRM2m, (outs), (ins i16mem:$dst, u8imm:$cnt),
402                  "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize16;
403def RCL32m1 : I<0xD1, MRM2m, (outs), (ins i32mem:$dst),
404                "rcl{l}\t$dst", []>, OpSize32;
405def RCL32mi : Ii8<0xC1, MRM2m, (outs), (ins i32mem:$dst, u8imm:$cnt),
406                  "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize32;
407def RCL64m1 : RI<0xD1, MRM2m, (outs), (ins i64mem:$dst),
408                 "rcl{q}\t$dst", []>, Requires<[In64BitMode]>;
409def RCL64mi : RIi8<0xC1, MRM2m, (outs), (ins i64mem:$dst, u8imm:$cnt),
410                   "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>,
411                   Requires<[In64BitMode]>;
412
413def RCR8m1 : I<0xD0, MRM3m, (outs), (ins i8mem:$dst),
414               "rcr{b}\t$dst", []>;
415def RCR8mi : Ii8<0xC0, MRM3m, (outs), (ins i8mem:$dst, u8imm:$cnt),
416                 "rcr{b}\t{$cnt, $dst|$dst, $cnt}", []>;
417def RCR16m1 : I<0xD1, MRM3m, (outs), (ins i16mem:$dst),
418                "rcr{w}\t$dst", []>, OpSize16;
419def RCR16mi : Ii8<0xC1, MRM3m, (outs), (ins i16mem:$dst, u8imm:$cnt),
420                  "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize16;
421def RCR32m1 : I<0xD1, MRM3m, (outs), (ins i32mem:$dst),
422                "rcr{l}\t$dst", []>, OpSize32;
423def RCR32mi : Ii8<0xC1, MRM3m, (outs), (ins i32mem:$dst, u8imm:$cnt),
424                  "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize32;
425def RCR64m1 : RI<0xD1, MRM3m, (outs), (ins i64mem:$dst),
426                 "rcr{q}\t$dst", []>, Requires<[In64BitMode]>;
427def RCR64mi : RIi8<0xC1, MRM3m, (outs), (ins i64mem:$dst, u8imm:$cnt),
428                   "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>,
429                   Requires<[In64BitMode]>;
430} // Uses = [EFLAGS]
431
432let Uses = [CL, EFLAGS], SchedRW = [WriteRotateCLLd, WriteRMW] in {
433def RCL8mCL : I<0xD2, MRM2m, (outs), (ins i8mem:$dst),
434                "rcl{b}\t{%cl, $dst|$dst, cl}", []>;
435def RCL16mCL : I<0xD3, MRM2m, (outs), (ins i16mem:$dst),
436                 "rcl{w}\t{%cl, $dst|$dst, cl}", []>, OpSize16;
437def RCL32mCL : I<0xD3, MRM2m, (outs), (ins i32mem:$dst),
438                 "rcl{l}\t{%cl, $dst|$dst, cl}", []>, OpSize32;
439def RCL64mCL : RI<0xD3, MRM2m, (outs), (ins i64mem:$dst),
440                  "rcl{q}\t{%cl, $dst|$dst, cl}", []>,
441                  Requires<[In64BitMode]>;
442
443def RCR8mCL : I<0xD2, MRM3m, (outs), (ins i8mem:$dst),
444                "rcr{b}\t{%cl, $dst|$dst, cl}", []>;
445def RCR16mCL : I<0xD3, MRM3m, (outs), (ins i16mem:$dst),
446                 "rcr{w}\t{%cl, $dst|$dst, cl}", []>, OpSize16;
447def RCR32mCL : I<0xD3, MRM3m, (outs), (ins i32mem:$dst),
448                 "rcr{l}\t{%cl, $dst|$dst, cl}", []>, OpSize32;
449def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst),
450                  "rcr{q}\t{%cl, $dst|$dst, cl}", []>,
451                  Requires<[In64BitMode]>;
452} // Uses = [CL, EFLAGS]
453} // SchedRW
454} // hasSideEffects = 0
455
456let Constraints = "$src1 = $dst", SchedRW = [WriteRotate] in {
457// FIXME: provide shorter instructions when imm8 == 1
458let Uses = [CL], SchedRW = [WriteRotateCL] in {
459def ROL8rCL  : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
460                 "rol{b}\t{%cl, $dst|$dst, cl}",
461                 [(set GR8:$dst, (rotl GR8:$src1, CL))]>;
462def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
463                 "rol{w}\t{%cl, $dst|$dst, cl}",
464                 [(set GR16:$dst, (rotl GR16:$src1, CL))]>, OpSize16;
465def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
466                 "rol{l}\t{%cl, $dst|$dst, cl}",
467                 [(set GR32:$dst, (rotl GR32:$src1, CL))]>, OpSize32;
468def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
469                  "rol{q}\t{%cl, $dst|$dst, cl}",
470                  [(set GR64:$dst, (rotl GR64:$src1, CL))]>;
471}
472
473def ROL8ri   : Ii8<0xC0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
474                   "rol{b}\t{$src2, $dst|$dst, $src2}",
475                   [(set GR8:$dst, (rotl GR8:$src1, (i8 relocImm:$src2)))]>;
476def ROL16ri  : Ii8<0xC1, MRM0r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
477                   "rol{w}\t{$src2, $dst|$dst, $src2}",
478                   [(set GR16:$dst, (rotl GR16:$src1, (i8 relocImm:$src2)))]>,
479                   OpSize16;
480def ROL32ri  : Ii8<0xC1, MRM0r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
481                   "rol{l}\t{$src2, $dst|$dst, $src2}",
482                   [(set GR32:$dst, (rotl GR32:$src1, (i8 relocImm:$src2)))]>,
483                   OpSize32;
484def ROL64ri  : RIi8<0xC1, MRM0r, (outs GR64:$dst),
485                    (ins GR64:$src1, u8imm:$src2),
486                    "rol{q}\t{$src2, $dst|$dst, $src2}",
487                    [(set GR64:$dst, (rotl GR64:$src1, (i8 relocImm:$src2)))]>;
488
489// Rotate by 1
490def ROL8r1   : I<0xD0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
491                 "rol{b}\t$dst",
492                 [(set GR8:$dst, (rotl GR8:$src1, (i8 1)))]>;
493def ROL16r1  : I<0xD1, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
494                 "rol{w}\t$dst",
495                 [(set GR16:$dst, (rotl GR16:$src1, (i8 1)))]>, OpSize16;
496def ROL32r1  : I<0xD1, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
497                 "rol{l}\t$dst",
498                 [(set GR32:$dst, (rotl GR32:$src1, (i8 1)))]>, OpSize32;
499def ROL64r1  : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
500                  "rol{q}\t$dst",
501                  [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
502} // Constraints = "$src = $dst", SchedRW
503
504let Uses = [CL], SchedRW = [WriteRotateCLLd, WriteRMW] in {
505def ROL8mCL  : I<0xD2, MRM0m, (outs), (ins i8mem :$dst),
506                 "rol{b}\t{%cl, $dst|$dst, cl}",
507                 [(store (rotl (loadi8 addr:$dst), CL), addr:$dst)]>;
508def ROL16mCL : I<0xD3, MRM0m, (outs), (ins i16mem:$dst),
509                 "rol{w}\t{%cl, $dst|$dst, cl}",
510                 [(store (rotl (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize16;
511def ROL32mCL : I<0xD3, MRM0m, (outs), (ins i32mem:$dst),
512                 "rol{l}\t{%cl, $dst|$dst, cl}",
513                 [(store (rotl (loadi32 addr:$dst), CL), addr:$dst)]>, OpSize32;
514def ROL64mCL :  RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
515                   "rol{q}\t{%cl, $dst|$dst, cl}",
516                   [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>,
517                   Requires<[In64BitMode]>;
518}
519
520let SchedRW = [WriteRotateLd, WriteRMW] in {
521def ROL8mi   : Ii8<0xC0, MRM0m, (outs), (ins i8mem :$dst, u8imm:$src1),
522                   "rol{b}\t{$src1, $dst|$dst, $src1}",
523               [(store (rotl (loadi8 addr:$dst), (i8 imm:$src1)), addr:$dst)]>;
524def ROL16mi  : Ii8<0xC1, MRM0m, (outs), (ins i16mem:$dst, u8imm:$src1),
525                   "rol{w}\t{$src1, $dst|$dst, $src1}",
526              [(store (rotl (loadi16 addr:$dst), (i8 imm:$src1)), addr:$dst)]>,
527              OpSize16;
528def ROL32mi  : Ii8<0xC1, MRM0m, (outs), (ins i32mem:$dst, u8imm:$src1),
529                   "rol{l}\t{$src1, $dst|$dst, $src1}",
530              [(store (rotl (loadi32 addr:$dst), (i8 imm:$src1)), addr:$dst)]>,
531              OpSize32;
532def ROL64mi  : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, u8imm:$src1),
533                    "rol{q}\t{$src1, $dst|$dst, $src1}",
534                [(store (rotl (loadi64 addr:$dst), (i8 imm:$src1)), addr:$dst)]>,
535                Requires<[In64BitMode]>;
536
537// Rotate by 1
538def ROL8m1   : I<0xD0, MRM0m, (outs), (ins i8mem :$dst),
539                 "rol{b}\t$dst",
540                 [(store (rotl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
541def ROL16m1  : I<0xD1, MRM0m, (outs), (ins i16mem:$dst),
542                 "rol{w}\t$dst",
543                 [(store (rotl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
544                 OpSize16;
545def ROL32m1  : I<0xD1, MRM0m, (outs), (ins i32mem:$dst),
546                 "rol{l}\t$dst",
547                 [(store (rotl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>,
548                 OpSize32;
549def ROL64m1  : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
550                 "rol{q}\t$dst",
551                 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>,
552                 Requires<[In64BitMode]>;
553} // SchedRW
554
555let Constraints = "$src1 = $dst", SchedRW = [WriteRotate] in {
556let Uses = [CL], SchedRW = [WriteRotateCL] in {
557def ROR8rCL  : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
558                 "ror{b}\t{%cl, $dst|$dst, cl}",
559                 [(set GR8:$dst, (rotr GR8:$src1, CL))]>;
560def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
561                 "ror{w}\t{%cl, $dst|$dst, cl}",
562                 [(set GR16:$dst, (rotr GR16:$src1, CL))]>, OpSize16;
563def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
564                 "ror{l}\t{%cl, $dst|$dst, cl}",
565                 [(set GR32:$dst, (rotr GR32:$src1, CL))]>, OpSize32;
566def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
567                  "ror{q}\t{%cl, $dst|$dst, cl}",
568                  [(set GR64:$dst, (rotr GR64:$src1, CL))]>;
569}
570
571def ROR8ri   : Ii8<0xC0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
572                   "ror{b}\t{$src2, $dst|$dst, $src2}",
573                   [(set GR8:$dst, (rotr GR8:$src1, (i8 relocImm:$src2)))]>;
574def ROR16ri  : Ii8<0xC1, MRM1r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
575                   "ror{w}\t{$src2, $dst|$dst, $src2}",
576                   [(set GR16:$dst, (rotr GR16:$src1, (i8 relocImm:$src2)))]>,
577                   OpSize16;
578def ROR32ri  : Ii8<0xC1, MRM1r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
579                   "ror{l}\t{$src2, $dst|$dst, $src2}",
580                   [(set GR32:$dst, (rotr GR32:$src1, (i8 relocImm:$src2)))]>,
581                   OpSize32;
582def ROR64ri  : RIi8<0xC1, MRM1r, (outs GR64:$dst),
583                    (ins GR64:$src1, u8imm:$src2),
584                    "ror{q}\t{$src2, $dst|$dst, $src2}",
585                    [(set GR64:$dst, (rotr GR64:$src1, (i8 relocImm:$src2)))]>;
586
587// Rotate by 1
588def ROR8r1   : I<0xD0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
589                 "ror{b}\t$dst",
590                 [(set GR8:$dst, (rotr GR8:$src1, (i8 1)))]>;
591def ROR16r1  : I<0xD1, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
592                 "ror{w}\t$dst",
593                 [(set GR16:$dst, (rotr GR16:$src1, (i8 1)))]>, OpSize16;
594def ROR32r1  : I<0xD1, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
595                 "ror{l}\t$dst",
596                 [(set GR32:$dst, (rotr GR32:$src1, (i8 1)))]>, OpSize32;
597def ROR64r1  : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
598                  "ror{q}\t$dst",
599                  [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
600} // Constraints = "$src = $dst", SchedRW
601
602let Uses = [CL], SchedRW = [WriteRotateCLLd, WriteRMW] in {
603def ROR8mCL  : I<0xD2, MRM1m, (outs), (ins i8mem :$dst),
604                 "ror{b}\t{%cl, $dst|$dst, cl}",
605                 [(store (rotr (loadi8 addr:$dst), CL), addr:$dst)]>;
606def ROR16mCL : I<0xD3, MRM1m, (outs), (ins i16mem:$dst),
607                 "ror{w}\t{%cl, $dst|$dst, cl}",
608                 [(store (rotr (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize16;
609def ROR32mCL : I<0xD3, MRM1m, (outs), (ins i32mem:$dst),
610                 "ror{l}\t{%cl, $dst|$dst, cl}",
611                 [(store (rotr (loadi32 addr:$dst), CL), addr:$dst)]>, OpSize32;
612def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
613                  "ror{q}\t{%cl, $dst|$dst, cl}",
614                  [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>,
615                  Requires<[In64BitMode]>;
616}
617
618let SchedRW = [WriteRotateLd, WriteRMW] in {
619def ROR8mi   : Ii8<0xC0, MRM1m, (outs), (ins i8mem :$dst, u8imm:$src),
620                   "ror{b}\t{$src, $dst|$dst, $src}",
621                   [(store (rotr (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
622def ROR16mi  : Ii8<0xC1, MRM1m, (outs), (ins i16mem:$dst, u8imm:$src),
623                   "ror{w}\t{$src, $dst|$dst, $src}",
624                   [(store (rotr (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
625                   OpSize16;
626def ROR32mi  : Ii8<0xC1, MRM1m, (outs), (ins i32mem:$dst, u8imm:$src),
627                   "ror{l}\t{$src, $dst|$dst, $src}",
628                   [(store (rotr (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
629                   OpSize32;
630def ROR64mi  : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, u8imm:$src),
631                    "ror{q}\t{$src, $dst|$dst, $src}",
632                    [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
633                    Requires<[In64BitMode]>;
634
635// Rotate by 1
636def ROR8m1   : I<0xD0, MRM1m, (outs), (ins i8mem :$dst),
637                 "ror{b}\t$dst",
638                 [(store (rotr (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
639def ROR16m1  : I<0xD1, MRM1m, (outs), (ins i16mem:$dst),
640                 "ror{w}\t$dst",
641                 [(store (rotr (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
642                 OpSize16;
643def ROR32m1  : I<0xD1, MRM1m, (outs), (ins i32mem:$dst),
644                 "ror{l}\t$dst",
645                 [(store (rotr (loadi32 addr:$dst), (i8 1)), addr:$dst)]>,
646                 OpSize32;
647def ROR64m1  : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
648                 "ror{q}\t$dst",
649                 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>,
650                 Requires<[In64BitMode]>;
651} // SchedRW
652
653
654//===----------------------------------------------------------------------===//
655// Double shift instructions (generalizations of rotate)
656//===----------------------------------------------------------------------===//
657
658let Constraints = "$src1 = $dst" in {
659
660let Uses = [CL], SchedRW = [WriteSHDrrcl] in {
661def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst),
662                   (ins GR16:$src1, GR16:$src2),
663                   "shld{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
664                   [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, CL))]>,
665                   TB, OpSize16;
666def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst),
667                   (ins GR16:$src1, GR16:$src2),
668                   "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
669                   [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, CL))]>,
670                   TB, OpSize16;
671def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst),
672                   (ins GR32:$src1, GR32:$src2),
673                   "shld{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
674                   [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, CL))]>,
675                   TB, OpSize32;
676def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst),
677                   (ins GR32:$src1, GR32:$src2),
678                   "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
679                   [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, CL))]>,
680                   TB, OpSize32;
681def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
682                    (ins GR64:$src1, GR64:$src2),
683                    "shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
684                    [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>,
685                    TB;
686def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst),
687                    (ins GR64:$src1, GR64:$src2),
688                    "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
689                    [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>,
690                    TB;
691} // SchedRW
692
693let isCommutable = 1, SchedRW = [WriteSHDrri] in {  // These instructions commute to each other.
694def SHLD16rri8 : Ii8<0xA4, MRMDestReg,
695                     (outs GR16:$dst),
696                     (ins GR16:$src1, GR16:$src2, u8imm:$src3),
697                     "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
698                     [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2,
699                                      (i8 imm:$src3)))]>,
700                     TB, OpSize16;
701def SHRD16rri8 : Ii8<0xAC, MRMDestReg,
702                     (outs GR16:$dst),
703                     (ins GR16:$src1, GR16:$src2, u8imm:$src3),
704                     "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
705                     [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2,
706                                      (i8 imm:$src3)))]>,
707                     TB, OpSize16;
708def SHLD32rri8 : Ii8<0xA4, MRMDestReg,
709                     (outs GR32:$dst),
710                     (ins GR32:$src1, GR32:$src2, u8imm:$src3),
711                     "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
712                     [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2,
713                                      (i8 imm:$src3)))]>,
714                 TB, OpSize32;
715def SHRD32rri8 : Ii8<0xAC, MRMDestReg,
716                     (outs GR32:$dst),
717                     (ins GR32:$src1, GR32:$src2, u8imm:$src3),
718                     "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
719                     [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2,
720                                      (i8 imm:$src3)))]>,
721                 TB, OpSize32;
722def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
723                      (outs GR64:$dst),
724                      (ins GR64:$src1, GR64:$src2, u8imm:$src3),
725                      "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
726                      [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
727                                       (i8 imm:$src3)))]>,
728                 TB;
729def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
730                      (outs GR64:$dst),
731                      (ins GR64:$src1, GR64:$src2, u8imm:$src3),
732                      "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
733                      [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
734                                       (i8 imm:$src3)))]>,
735                 TB;
736} // SchedRW
737} // Constraints = "$src = $dst"
738
739let Uses = [CL], SchedRW = [WriteSHDmrcl] in {
740def SHLD16mrCL : I<0xA5, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
741                   "shld{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
742                   [(store (X86shld (loadi16 addr:$dst), GR16:$src2, CL),
743                     addr:$dst)]>, TB, OpSize16;
744def SHRD16mrCL : I<0xAD, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
745                  "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
746                  [(store (X86shrd (loadi16 addr:$dst), GR16:$src2, CL),
747                    addr:$dst)]>, TB, OpSize16;
748
749def SHLD32mrCL : I<0xA5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
750                   "shld{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
751                   [(store (X86shld (loadi32 addr:$dst), GR32:$src2, CL),
752                     addr:$dst)]>, TB, OpSize32;
753def SHRD32mrCL : I<0xAD, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
754                  "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
755                  [(store (X86shrd (loadi32 addr:$dst), GR32:$src2, CL),
756                    addr:$dst)]>, TB, OpSize32;
757
758def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
759                    "shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
760                    [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
761                      addr:$dst)]>, TB;
762def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
763                    "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
764                    [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
765                      addr:$dst)]>, TB;
766} // SchedRW
767
768let SchedRW = [WriteSHDmri] in {
769def SHLD16mri8 : Ii8<0xA4, MRMDestMem,
770                    (outs), (ins i16mem:$dst, GR16:$src2, u8imm:$src3),
771                    "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
772                    [(store (X86shld (loadi16 addr:$dst), GR16:$src2,
773                                      (i8 imm:$src3)), addr:$dst)]>,
774                    TB, OpSize16;
775def SHRD16mri8 : Ii8<0xAC, MRMDestMem,
776                     (outs), (ins i16mem:$dst, GR16:$src2, u8imm:$src3),
777                     "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
778                    [(store (X86shrd (loadi16 addr:$dst), GR16:$src2,
779                                      (i8 imm:$src3)), addr:$dst)]>,
780                     TB, OpSize16;
781
782def SHLD32mri8 : Ii8<0xA4, MRMDestMem,
783                    (outs), (ins i32mem:$dst, GR32:$src2, u8imm:$src3),
784                    "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
785                    [(store (X86shld (loadi32 addr:$dst), GR32:$src2,
786                                      (i8 imm:$src3)), addr:$dst)]>,
787                    TB, OpSize32;
788def SHRD32mri8 : Ii8<0xAC, MRMDestMem,
789                     (outs), (ins i32mem:$dst, GR32:$src2, u8imm:$src3),
790                     "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
791                     [(store (X86shrd (loadi32 addr:$dst), GR32:$src2,
792                                       (i8 imm:$src3)), addr:$dst)]>,
793                     TB, OpSize32;
794
795def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
796                      (outs), (ins i64mem:$dst, GR64:$src2, u8imm:$src3),
797                      "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
798                      [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
799                                       (i8 imm:$src3)), addr:$dst)]>,
800                 TB;
801def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
802                      (outs), (ins i64mem:$dst, GR64:$src2, u8imm:$src3),
803                      "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
804                      [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
805                                       (i8 imm:$src3)), addr:$dst)]>,
806                 TB;
807} // SchedRW
808
809} // Defs = [EFLAGS]
810
811// Use the opposite rotate if allows us to use the rotate by 1 instruction.
812def : Pat<(rotl GR8:$src1,  (i8 7)),  (ROR8r1  GR8:$src1)>;
813def : Pat<(rotl GR16:$src1, (i8 15)), (ROR16r1 GR16:$src1)>;
814def : Pat<(rotl GR32:$src1, (i8 31)), (ROR32r1 GR32:$src1)>;
815def : Pat<(rotl GR64:$src1, (i8 63)), (ROR64r1 GR64:$src1)>;
816def : Pat<(rotr GR8:$src1,  (i8 7)),  (ROL8r1  GR8:$src1)>;
817def : Pat<(rotr GR16:$src1, (i8 15)), (ROL16r1 GR16:$src1)>;
818def : Pat<(rotr GR32:$src1, (i8 31)), (ROL32r1 GR32:$src1)>;
819def : Pat<(rotr GR64:$src1, (i8 63)), (ROL64r1 GR64:$src1)>;
820
821def : Pat<(store (rotl (loadi8 addr:$dst), (i8 7)), addr:$dst),
822          (ROR8m1 addr:$dst)>;
823def : Pat<(store (rotl (loadi16 addr:$dst), (i8 15)), addr:$dst),
824          (ROR16m1 addr:$dst)>;
825def : Pat<(store (rotl (loadi32 addr:$dst), (i8 31)), addr:$dst),
826          (ROR32m1 addr:$dst)>;
827def : Pat<(store (rotl (loadi64 addr:$dst), (i8 63)), addr:$dst),
828          (ROR64m1 addr:$dst)>, Requires<[In64BitMode]>;
829
830def : Pat<(store (rotr (loadi8 addr:$dst), (i8 7)), addr:$dst),
831          (ROL8m1 addr:$dst)>;
832def : Pat<(store (rotr (loadi16 addr:$dst), (i8 15)), addr:$dst),
833          (ROL16m1 addr:$dst)>;
834def : Pat<(store (rotr (loadi32 addr:$dst), (i8 31)), addr:$dst),
835          (ROL32m1 addr:$dst)>;
836def : Pat<(store (rotr (loadi64 addr:$dst), (i8 63)), addr:$dst),
837          (ROL64m1 addr:$dst)>, Requires<[In64BitMode]>;
838
839// Sandy Bridge and newer Intel processors support faster rotates using
840// SHLD to avoid a partial flag update on the normal rotate instructions.
841// Use a pseudo so that TwoInstructionPass and register allocation will see
842// this as unary instruction.
843let Predicates = [HasFastSHLDRotate], AddedComplexity = 5,
844    Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteSHDrri],
845    Constraints = "$src1 = $dst" in {
846  def SHLDROT32ri  : I<0, Pseudo, (outs GR32:$dst),
847                       (ins GR32:$src1, u8imm:$shamt), "",
848                     [(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$shamt)))]>;
849  def SHLDROT64ri  : I<0, Pseudo, (outs GR64:$dst),
850                       (ins GR64:$src1, u8imm:$shamt), "",
851                     [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$shamt)))]>;
852
853  def SHRDROT32ri  : I<0, Pseudo, (outs GR32:$dst),
854                       (ins GR32:$src1, u8imm:$shamt), "",
855                     [(set GR32:$dst, (rotr GR32:$src1, (i8 imm:$shamt)))]>;
856  def SHRDROT64ri  : I<0, Pseudo, (outs GR64:$dst),
857                       (ins GR64:$src1, u8imm:$shamt), "",
858                     [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$shamt)))]>;
859}
860
861def ROT32L2R_imm8  : SDNodeXForm<imm, [{
862  // Convert a ROTL shamt to a ROTR shamt on 32-bit integer.
863  return getI8Imm(32 - N->getZExtValue(), SDLoc(N));
864}]>;
865
866def ROT64L2R_imm8  : SDNodeXForm<imm, [{
867  // Convert a ROTL shamt to a ROTR shamt on 64-bit integer.
868  return getI8Imm(64 - N->getZExtValue(), SDLoc(N));
869}]>;
870
871// NOTE: We use WriteShift for these rotates as they avoid the stalls
872// of many of the older x86 rotate instructions.
873multiclass bmi_rotate<string asm, RegisterClass RC, X86MemOperand x86memop> {
874let hasSideEffects = 0 in {
875  def ri : Ii8<0xF0, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, u8imm:$src2),
876               !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
877               []>, TAXD, VEX, Sched<[WriteShift]>;
878  let mayLoad = 1 in
879  def mi : Ii8<0xF0, MRMSrcMem, (outs RC:$dst),
880               (ins x86memop:$src1, u8imm:$src2),
881               !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
882               []>, TAXD, VEX, Sched<[WriteShiftLd]>;
883}
884}
885
886multiclass bmi_shift<string asm, RegisterClass RC, X86MemOperand x86memop> {
887let hasSideEffects = 0 in {
888  def rr : I<0xF7, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
889             !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
890             VEX, Sched<[WriteShift]>;
891  let mayLoad = 1 in
892  def rm : I<0xF7, MRMSrcMem4VOp3,
893             (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
894             !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
895             VEX, Sched<[WriteShift.Folded,
896                         // x86memop:$src1
897                         ReadDefault, ReadDefault, ReadDefault, ReadDefault,
898                         ReadDefault,
899                         // RC:$src2
900                         WriteShift.ReadAfterFold]>;
901}
902}
903
904let Predicates = [HasBMI2] in {
905  defm RORX32 : bmi_rotate<"rorx{l}", GR32, i32mem>;
906  defm RORX64 : bmi_rotate<"rorx{q}", GR64, i64mem>, VEX_W;
907  defm SARX32 : bmi_shift<"sarx{l}", GR32, i32mem>, T8XS;
908  defm SARX64 : bmi_shift<"sarx{q}", GR64, i64mem>, T8XS, VEX_W;
909  defm SHRX32 : bmi_shift<"shrx{l}", GR32, i32mem>, T8XD;
910  defm SHRX64 : bmi_shift<"shrx{q}", GR64, i64mem>, T8XD, VEX_W;
911  defm SHLX32 : bmi_shift<"shlx{l}", GR32, i32mem>, T8PD;
912  defm SHLX64 : bmi_shift<"shlx{q}", GR64, i64mem>, T8PD, VEX_W;
913
914  // Prefer RORX which is non-destructive and doesn't update EFLAGS.
915  let AddedComplexity = 10 in {
916    def : Pat<(rotr GR32:$src, (i8 imm:$shamt)),
917              (RORX32ri GR32:$src, imm:$shamt)>;
918    def : Pat<(rotr GR64:$src, (i8 imm:$shamt)),
919              (RORX64ri GR64:$src, imm:$shamt)>;
920
921    def : Pat<(rotl GR32:$src, (i8 imm:$shamt)),
922              (RORX32ri GR32:$src, (ROT32L2R_imm8 imm:$shamt))>;
923    def : Pat<(rotl GR64:$src, (i8 imm:$shamt)),
924              (RORX64ri GR64:$src, (ROT64L2R_imm8 imm:$shamt))>;
925  }
926
927  def : Pat<(rotr (loadi32 addr:$src), (i8 imm:$shamt)),
928            (RORX32mi addr:$src, imm:$shamt)>;
929  def : Pat<(rotr (loadi64 addr:$src), (i8 imm:$shamt)),
930            (RORX64mi addr:$src, imm:$shamt)>;
931
932  def : Pat<(rotl (loadi32 addr:$src), (i8 imm:$shamt)),
933            (RORX32mi addr:$src, (ROT32L2R_imm8 imm:$shamt))>;
934  def : Pat<(rotl (loadi64 addr:$src), (i8 imm:$shamt)),
935            (RORX64mi addr:$src, (ROT64L2R_imm8 imm:$shamt))>;
936
937  // Prefer SARX/SHRX/SHLX over SAR/SHR/SHL with variable shift BUT not
938  // immediate shift, i.e. the following code is considered better
939  //
940  //  mov %edi, %esi
941  //  shl $imm, %esi
942  //  ... %edi, ...
943  //
944  // than
945  //
946  //  movb $imm, %sil
947  //  shlx %sil, %edi, %esi
948  //  ... %edi, ...
949  //
950  let AddedComplexity = 1 in {
951    def : Pat<(sra GR32:$src1, GR8:$src2),
952              (SARX32rr GR32:$src1,
953                        (INSERT_SUBREG
954                          (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
955    def : Pat<(sra GR64:$src1, GR8:$src2),
956              (SARX64rr GR64:$src1,
957                        (INSERT_SUBREG
958                          (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
959
960    def : Pat<(srl GR32:$src1, GR8:$src2),
961              (SHRX32rr GR32:$src1,
962                        (INSERT_SUBREG
963                          (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
964    def : Pat<(srl GR64:$src1, GR8:$src2),
965              (SHRX64rr GR64:$src1,
966                        (INSERT_SUBREG
967                          (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
968
969    def : Pat<(shl GR32:$src1, GR8:$src2),
970              (SHLX32rr GR32:$src1,
971                        (INSERT_SUBREG
972                          (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
973    def : Pat<(shl GR64:$src1, GR8:$src2),
974              (SHLX64rr GR64:$src1,
975                        (INSERT_SUBREG
976                          (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
977  }
978
979  // We prefer to use
980  //  mov (%ecx), %esi
981  //  shl $imm, $esi
982  //
983  // over
984  //
985  //  movb $imm, %al
986  //  shlx %al, (%ecx), %esi
987  //
988  // This priority is enforced by IsProfitableToFoldLoad.
989  def : Pat<(sra (loadi32 addr:$src1), GR8:$src2),
990            (SARX32rm addr:$src1,
991                      (INSERT_SUBREG
992                        (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
993  def : Pat<(sra (loadi64 addr:$src1), GR8:$src2),
994            (SARX64rm addr:$src1,
995                      (INSERT_SUBREG
996                        (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
997
998  def : Pat<(srl (loadi32 addr:$src1), GR8:$src2),
999            (SHRX32rm addr:$src1,
1000                      (INSERT_SUBREG
1001                        (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1002  def : Pat<(srl (loadi64 addr:$src1), GR8:$src2),
1003            (SHRX64rm addr:$src1,
1004                      (INSERT_SUBREG
1005                        (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1006
1007  def : Pat<(shl (loadi32 addr:$src1), GR8:$src2),
1008            (SHLX32rm addr:$src1,
1009                      (INSERT_SUBREG
1010                        (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1011  def : Pat<(shl (loadi64 addr:$src1), GR8:$src2),
1012            (SHLX64rm addr:$src1,
1013                      (INSERT_SUBREG
1014                        (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1015}
1016