xref: /freebsd/contrib/llvm-project/llvm/lib/Target/X86/X86InstrShiftRotate.td (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1//===-- X86InstrShiftRotate.td - Shift and Rotate Instrs ---*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes the shift and rotate instructions.
10//
11//===----------------------------------------------------------------------===//
12
13// FIXME: Someone needs to smear multipattern goodness all over this file.
14
15let Defs = [EFLAGS], hasSideEffects = 0 in {
16
17let Constraints = "$src1 = $dst" in {
18let Uses = [CL], SchedRW = [WriteShiftCL] in {
19def SHL8rCL  : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1),
20                 "shl{b}\t{%cl, $dst|$dst, cl}",
21                 [(set GR8:$dst, (shl GR8:$src1, CL))]>;
22def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
23                 "shl{w}\t{%cl, $dst|$dst, cl}",
24                 [(set GR16:$dst, (shl GR16:$src1, CL))]>, OpSize16;
25def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
26                 "shl{l}\t{%cl, $dst|$dst, cl}",
27                 [(set GR32:$dst, (shl GR32:$src1, CL))]>, OpSize32;
28def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
29                  "shl{q}\t{%cl, $dst|$dst, cl}",
30                  [(set GR64:$dst, (shl GR64:$src1, CL))]>;
31} // Uses = [CL], SchedRW
32
33let SchedRW = [WriteShift] in {
34let isConvertibleToThreeAddress = 1 in {   // Can transform into LEA.
35def SHL8ri   : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
36                   "shl{b}\t{$src2, $dst|$dst, $src2}",
37                   [(set GR8:$dst, (shl GR8:$src1, (i8 imm:$src2)))]>;
38
39def SHL16ri  : Ii8<0xC1, MRM4r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
40                   "shl{w}\t{$src2, $dst|$dst, $src2}",
41                   [(set GR16:$dst, (shl GR16:$src1, (i8 imm:$src2)))]>,
42                   OpSize16;
43def SHL32ri  : Ii8<0xC1, MRM4r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
44                   "shl{l}\t{$src2, $dst|$dst, $src2}",
45                   [(set GR32:$dst, (shl GR32:$src1, (i8 imm:$src2)))]>,
46                   OpSize32;
47def SHL64ri  : RIi8<0xC1, MRM4r, (outs GR64:$dst),
48                    (ins GR64:$src1, u8imm:$src2),
49                    "shl{q}\t{$src2, $dst|$dst, $src2}",
50                    [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
51} // isConvertibleToThreeAddress = 1
52
53def SHL8r1   : I<0xD0, MRM4r, (outs GR8:$dst), (ins GR8:$src1),
54                 "shl{b}\t$dst", []>;
55def SHL16r1  : I<0xD1, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
56                 "shl{w}\t$dst", []>, OpSize16;
57def SHL32r1  : I<0xD1, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
58                 "shl{l}\t$dst", []>, OpSize32;
59def SHL64r1  : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
60                 "shl{q}\t$dst", []>;
61} // SchedRW
62} // Constraints = "$src = $dst"
63
64// FIXME: Why do we need an explicit "Uses = [CL]" when the instr has a pattern
65// using CL?
66let Uses = [CL], SchedRW = [WriteShiftCLLd, WriteRMW] in {
67def SHL8mCL  : I<0xD2, MRM4m, (outs), (ins i8mem :$dst),
68                 "shl{b}\t{%cl, $dst|$dst, cl}",
69                 [(store (shl (loadi8 addr:$dst), CL), addr:$dst)]>;
70def SHL16mCL : I<0xD3, MRM4m, (outs), (ins i16mem:$dst),
71                 "shl{w}\t{%cl, $dst|$dst, cl}",
72                 [(store (shl (loadi16 addr:$dst), CL), addr:$dst)]>,
73                 OpSize16;
74def SHL32mCL : I<0xD3, MRM4m, (outs), (ins i32mem:$dst),
75                 "shl{l}\t{%cl, $dst|$dst, cl}",
76                 [(store (shl (loadi32 addr:$dst), CL), addr:$dst)]>,
77                 OpSize32;
78def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
79                  "shl{q}\t{%cl, $dst|$dst, cl}",
80                  [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>,
81                  Requires<[In64BitMode]>;
82} // Uses, SchedRW
83
84let SchedRW = [WriteShiftLd, WriteRMW], mayLoad = 1, mayStore = 1 in {
85def SHL8mi   : Ii8<0xC0, MRM4m, (outs), (ins i8mem :$dst, u8imm:$src),
86                   "shl{b}\t{$src, $dst|$dst, $src}",
87                [(store (shl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
88def SHL16mi  : Ii8<0xC1, MRM4m, (outs), (ins i16mem:$dst, u8imm:$src),
89                   "shl{w}\t{$src, $dst|$dst, $src}",
90               [(store (shl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
91               OpSize16;
92def SHL32mi  : Ii8<0xC1, MRM4m, (outs), (ins i32mem:$dst, u8imm:$src),
93                   "shl{l}\t{$src, $dst|$dst, $src}",
94               [(store (shl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
95               OpSize32;
96def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, u8imm:$src),
97                  "shl{q}\t{$src, $dst|$dst, $src}",
98                  [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
99                  Requires<[In64BitMode]>;
100
101// Shift by 1
102def SHL8m1   : I<0xD0, MRM4m, (outs), (ins i8mem :$dst),
103                 "shl{b}\t$dst", []>;
104def SHL16m1  : I<0xD1, MRM4m, (outs), (ins i16mem:$dst),
105                 "shl{w}\t$dst", []>, OpSize16;
106def SHL32m1  : I<0xD1, MRM4m, (outs), (ins i32mem:$dst),
107                 "shl{l}\t$dst", []>, OpSize32;
108def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
109                  "shl{q}\t$dst", []>, Requires<[In64BitMode]>;
110} // SchedRW, mayLoad, mayStore
111
112let Constraints = "$src1 = $dst" in {
113let Uses = [CL], SchedRW = [WriteShiftCL] in {
114def SHR8rCL  : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src1),
115                 "shr{b}\t{%cl, $dst|$dst, cl}",
116                 [(set GR8:$dst, (srl GR8:$src1, CL))]>;
117def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
118                 "shr{w}\t{%cl, $dst|$dst, cl}",
119                 [(set GR16:$dst, (srl GR16:$src1, CL))]>, OpSize16;
120def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
121                 "shr{l}\t{%cl, $dst|$dst, cl}",
122                 [(set GR32:$dst, (srl GR32:$src1, CL))]>, OpSize32;
123def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
124                  "shr{q}\t{%cl, $dst|$dst, cl}",
125                  [(set GR64:$dst, (srl GR64:$src1, CL))]>;
126} // Uses, SchedRW
127
128let SchedRW = [WriteShift] in {
129def SHR8ri   : Ii8<0xC0, MRM5r, (outs GR8:$dst), (ins GR8:$src1, u8imm:$src2),
130                   "shr{b}\t{$src2, $dst|$dst, $src2}",
131                   [(set GR8:$dst, (srl GR8:$src1, (i8 imm:$src2)))]>;
132def SHR16ri  : Ii8<0xC1, MRM5r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
133                   "shr{w}\t{$src2, $dst|$dst, $src2}",
134                   [(set GR16:$dst, (srl GR16:$src1, (i8 imm:$src2)))]>,
135                   OpSize16;
136def SHR32ri  : Ii8<0xC1, MRM5r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
137                   "shr{l}\t{$src2, $dst|$dst, $src2}",
138                   [(set GR32:$dst, (srl GR32:$src1, (i8 imm:$src2)))]>,
139                   OpSize32;
140def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, u8imm:$src2),
141                  "shr{q}\t{$src2, $dst|$dst, $src2}",
142                  [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
143
144// Shift right by 1
145def SHR8r1   : I<0xD0, MRM5r, (outs GR8:$dst), (ins GR8:$src1),
146                 "shr{b}\t$dst", []>;
147def SHR16r1  : I<0xD1, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
148                 "shr{w}\t$dst", []>, OpSize16;
149def SHR32r1  : I<0xD1, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
150                 "shr{l}\t$dst", []>, OpSize32;
151def SHR64r1  : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
152                 "shr{q}\t$dst", []>;
153} // SchedRW
154} // Constraints = "$src = $dst"
155
156
157let Uses = [CL], SchedRW = [WriteShiftCLLd, WriteRMW] in {
158def SHR8mCL  : I<0xD2, MRM5m, (outs), (ins i8mem :$dst),
159                 "shr{b}\t{%cl, $dst|$dst, cl}",
160                 [(store (srl (loadi8 addr:$dst), CL), addr:$dst)]>;
161def SHR16mCL : I<0xD3, MRM5m, (outs), (ins i16mem:$dst),
162                 "shr{w}\t{%cl, $dst|$dst, cl}",
163                 [(store (srl (loadi16 addr:$dst), CL), addr:$dst)]>,
164                 OpSize16;
165def SHR32mCL : I<0xD3, MRM5m, (outs), (ins i32mem:$dst),
166                 "shr{l}\t{%cl, $dst|$dst, cl}",
167                 [(store (srl (loadi32 addr:$dst), CL), addr:$dst)]>,
168                 OpSize32;
169def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
170                  "shr{q}\t{%cl, $dst|$dst, cl}",
171                  [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>,
172                  Requires<[In64BitMode]>;
173} // Uses, SchedRW
174
175let SchedRW = [WriteShiftLd, WriteRMW], mayLoad = 1, mayStore = 1 in {
176def SHR8mi   : Ii8<0xC0, MRM5m, (outs), (ins i8mem :$dst, u8imm:$src),
177                   "shr{b}\t{$src, $dst|$dst, $src}",
178                [(store (srl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
179def SHR16mi  : Ii8<0xC1, MRM5m, (outs), (ins i16mem:$dst, u8imm:$src),
180                   "shr{w}\t{$src, $dst|$dst, $src}",
181               [(store (srl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
182               OpSize16;
183def SHR32mi  : Ii8<0xC1, MRM5m, (outs), (ins i32mem:$dst, u8imm:$src),
184                   "shr{l}\t{$src, $dst|$dst, $src}",
185               [(store (srl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
186               OpSize32;
187def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, u8imm:$src),
188                  "shr{q}\t{$src, $dst|$dst, $src}",
189                 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
190                 Requires<[In64BitMode]>;
191
192// Shift by 1
193def SHR8m1   : I<0xD0, MRM5m, (outs), (ins i8mem :$dst),
194                 "shr{b}\t$dst", []>;
195def SHR16m1  : I<0xD1, MRM5m, (outs), (ins i16mem:$dst),
196                 "shr{w}\t$dst", []>, OpSize16;
197def SHR32m1  : I<0xD1, MRM5m, (outs), (ins i32mem:$dst),
198                 "shr{l}\t$dst", []>, OpSize32;
199def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
200                  "shr{q}\t$dst", []>, Requires<[In64BitMode]>;
201} // SchedRW, mayLoad, mayStore
202
203
204let Constraints = "$src1 = $dst" in {
205let Uses = [CL], SchedRW = [WriteShiftCL] in {
206def SAR8rCL  : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
207                 "sar{b}\t{%cl, $dst|$dst, cl}",
208                 [(set GR8:$dst, (sra GR8:$src1, CL))]>;
209def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
210                 "sar{w}\t{%cl, $dst|$dst, cl}",
211                 [(set GR16:$dst, (sra GR16:$src1, CL))]>,
212                 OpSize16;
213def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
214                 "sar{l}\t{%cl, $dst|$dst, cl}",
215                 [(set GR32:$dst, (sra GR32:$src1, CL))]>,
216                 OpSize32;
217def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
218                 "sar{q}\t{%cl, $dst|$dst, cl}",
219                 [(set GR64:$dst, (sra GR64:$src1, CL))]>;
220} // Uses, SchedRW
221
222let SchedRW = [WriteShift] in {
223def SAR8ri   : Ii8<0xC0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
224                   "sar{b}\t{$src2, $dst|$dst, $src2}",
225                   [(set GR8:$dst, (sra GR8:$src1, (i8 imm:$src2)))]>;
226def SAR16ri  : Ii8<0xC1, MRM7r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
227                   "sar{w}\t{$src2, $dst|$dst, $src2}",
228                   [(set GR16:$dst, (sra GR16:$src1, (i8 imm:$src2)))]>,
229                   OpSize16;
230def SAR32ri  : Ii8<0xC1, MRM7r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
231                   "sar{l}\t{$src2, $dst|$dst, $src2}",
232                   [(set GR32:$dst, (sra GR32:$src1, (i8 imm:$src2)))]>,
233                   OpSize32;
234def SAR64ri  : RIi8<0xC1, MRM7r, (outs GR64:$dst),
235                    (ins GR64:$src1, u8imm:$src2),
236                    "sar{q}\t{$src2, $dst|$dst, $src2}",
237                    [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
238
239// Shift by 1
240def SAR8r1   : I<0xD0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
241                 "sar{b}\t$dst", []>;
242def SAR16r1  : I<0xD1, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
243                 "sar{w}\t$dst", []>, OpSize16;
244def SAR32r1  : I<0xD1, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
245                 "sar{l}\t$dst", []>, OpSize32;
246def SAR64r1  : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
247                  "sar{q}\t$dst", []>;
248} // SchedRW
249} // Constraints = "$src = $dst"
250
251
252let Uses = [CL], SchedRW = [WriteShiftCLLd, WriteRMW] in {
253def SAR8mCL  : I<0xD2, MRM7m, (outs), (ins i8mem :$dst),
254                 "sar{b}\t{%cl, $dst|$dst, cl}",
255                 [(store (sra (loadi8 addr:$dst), CL), addr:$dst)]>;
256def SAR16mCL : I<0xD3, MRM7m, (outs), (ins i16mem:$dst),
257                 "sar{w}\t{%cl, $dst|$dst, cl}",
258                 [(store (sra (loadi16 addr:$dst), CL), addr:$dst)]>,
259                 OpSize16;
260def SAR32mCL : I<0xD3, MRM7m, (outs), (ins i32mem:$dst),
261                 "sar{l}\t{%cl, $dst|$dst, cl}",
262                 [(store (sra (loadi32 addr:$dst), CL), addr:$dst)]>,
263                 OpSize32;
264def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
265                 "sar{q}\t{%cl, $dst|$dst, cl}",
266                 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>,
267                 Requires<[In64BitMode]>;
268} // Uses, SchedRW
269
270let SchedRW = [WriteShiftLd, WriteRMW], mayLoad = 1, mayStore = 1 in {
271def SAR8mi   : Ii8<0xC0, MRM7m, (outs), (ins i8mem :$dst, u8imm:$src),
272                   "sar{b}\t{$src, $dst|$dst, $src}",
273                [(store (sra (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
274def SAR16mi  : Ii8<0xC1, MRM7m, (outs), (ins i16mem:$dst, u8imm:$src),
275                   "sar{w}\t{$src, $dst|$dst, $src}",
276               [(store (sra (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
277               OpSize16;
278def SAR32mi  : Ii8<0xC1, MRM7m, (outs), (ins i32mem:$dst, u8imm:$src),
279                   "sar{l}\t{$src, $dst|$dst, $src}",
280               [(store (sra (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
281               OpSize32;
282def SAR64mi  : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, u8imm:$src),
283                    "sar{q}\t{$src, $dst|$dst, $src}",
284                 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
285                 Requires<[In64BitMode]>;
286
287// Shift by 1
288def SAR8m1   : I<0xD0, MRM7m, (outs), (ins i8mem :$dst),
289                 "sar{b}\t$dst", []>;
290def SAR16m1  : I<0xD1, MRM7m, (outs), (ins i16mem:$dst),
291                 "sar{w}\t$dst", []>, OpSize16;
292def SAR32m1  : I<0xD1, MRM7m, (outs), (ins i32mem:$dst),
293                 "sar{l}\t$dst", []>, OpSize32;
294def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
295                  "sar{q}\t$dst", []>, Requires<[In64BitMode]>;
296} // SchedRW
297
298//===----------------------------------------------------------------------===//
299// Rotate instructions
300//===----------------------------------------------------------------------===//
301
302let Constraints = "$src1 = $dst" in {
303
304let Uses = [CL, EFLAGS], SchedRW = [WriteRotateCL] in {
305def RCL8rCL : I<0xD2, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
306                "rcl{b}\t{%cl, $dst|$dst, cl}", []>;
307def RCL16rCL : I<0xD3, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
308                 "rcl{w}\t{%cl, $dst|$dst, cl}", []>, OpSize16;
309def RCL32rCL : I<0xD3, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
310                 "rcl{l}\t{%cl, $dst|$dst, cl}", []>, OpSize32;
311def RCL64rCL : RI<0xD3, MRM2r, (outs GR64:$dst), (ins GR64:$src1),
312                  "rcl{q}\t{%cl, $dst|$dst, cl}", []>;
313} // Uses = [CL, EFLAGS], SchedRW
314
315let Uses = [EFLAGS], SchedRW = [WriteRotate] in {
316def RCL8r1 : I<0xD0, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
317               "rcl{b}\t$dst", []>;
318def RCL8ri : Ii8<0xC0, MRM2r, (outs GR8:$dst), (ins GR8:$src1, u8imm:$cnt),
319                 "rcl{b}\t{$cnt, $dst|$dst, $cnt}", []>;
320def RCL16r1 : I<0xD1, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
321                "rcl{w}\t$dst", []>, OpSize16;
322def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$cnt),
323                  "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize16;
324def RCL32r1 : I<0xD1, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
325                "rcl{l}\t$dst", []>, OpSize32;
326def RCL32ri : Ii8<0xC1, MRM2r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$cnt),
327                  "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize32;
328def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src1),
329                 "rcl{q}\t$dst", []>;
330def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src1, u8imm:$cnt),
331                   "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
332} // Uses = [EFLAGS], SchedRW
333
334let Uses = [CL, EFLAGS], SchedRW = [WriteRotateCL] in {
335def RCR8rCL : I<0xD2, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
336                "rcr{b}\t{%cl, $dst|$dst, cl}", []>;
337def RCR16rCL : I<0xD3, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
338                 "rcr{w}\t{%cl, $dst|$dst, cl}", []>, OpSize16;
339def RCR32rCL : I<0xD3, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
340                 "rcr{l}\t{%cl, $dst|$dst, cl}", []>, OpSize32;
341def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
342                  "rcr{q}\t{%cl, $dst|$dst, cl}", []>;
343} // Uses = [CL, EFLAGS], SchedRW
344
345let Uses = [EFLAGS], SchedRW = [WriteRotate] in {
346def RCR8r1 : I<0xD0, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
347               "rcr{b}\t$dst", []>;
348def RCR8ri : Ii8<0xC0, MRM3r, (outs GR8:$dst), (ins GR8:$src1, u8imm:$cnt),
349                 "rcr{b}\t{$cnt, $dst|$dst, $cnt}", []>;
350def RCR16r1 : I<0xD1, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
351                "rcr{w}\t$dst", []>, OpSize16;
352def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$cnt),
353                  "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize16;
354def RCR32r1 : I<0xD1, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
355                "rcr{l}\t$dst", []>, OpSize32;
356def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$cnt),
357                  "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize32;
358def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
359                 "rcr{q}\t$dst", []>;
360def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src1, u8imm:$cnt),
361                   "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
362} // Uses = [EFLAGS], SchedRW
363} // Constraints = "$src = $dst"
364
365let mayLoad = 1, mayStore = 1 in {
366let Uses = [EFLAGS], SchedRW = [WriteRotateLd, WriteRMW] in {
367def RCL8m1 : I<0xD0, MRM2m, (outs), (ins i8mem:$dst),
368               "rcl{b}\t$dst", []>;
369def RCL8mi : Ii8<0xC0, MRM2m, (outs), (ins i8mem:$dst, u8imm:$cnt),
370                 "rcl{b}\t{$cnt, $dst|$dst, $cnt}", []>;
371def RCL16m1 : I<0xD1, MRM2m, (outs), (ins i16mem:$dst),
372                "rcl{w}\t$dst", []>, OpSize16;
373def RCL16mi : Ii8<0xC1, MRM2m, (outs), (ins i16mem:$dst, u8imm:$cnt),
374                  "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize16;
375def RCL32m1 : I<0xD1, MRM2m, (outs), (ins i32mem:$dst),
376                "rcl{l}\t$dst", []>, OpSize32;
377def RCL32mi : Ii8<0xC1, MRM2m, (outs), (ins i32mem:$dst, u8imm:$cnt),
378                  "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize32;
379def RCL64m1 : RI<0xD1, MRM2m, (outs), (ins i64mem:$dst),
380                 "rcl{q}\t$dst", []>, Requires<[In64BitMode]>;
381def RCL64mi : RIi8<0xC1, MRM2m, (outs), (ins i64mem:$dst, u8imm:$cnt),
382                   "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>,
383                   Requires<[In64BitMode]>;
384
385def RCR8m1 : I<0xD0, MRM3m, (outs), (ins i8mem:$dst),
386               "rcr{b}\t$dst", []>;
387def RCR8mi : Ii8<0xC0, MRM3m, (outs), (ins i8mem:$dst, u8imm:$cnt),
388                 "rcr{b}\t{$cnt, $dst|$dst, $cnt}", []>;
389def RCR16m1 : I<0xD1, MRM3m, (outs), (ins i16mem:$dst),
390                "rcr{w}\t$dst", []>, OpSize16;
391def RCR16mi : Ii8<0xC1, MRM3m, (outs), (ins i16mem:$dst, u8imm:$cnt),
392                  "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize16;
393def RCR32m1 : I<0xD1, MRM3m, (outs), (ins i32mem:$dst),
394                "rcr{l}\t$dst", []>, OpSize32;
395def RCR32mi : Ii8<0xC1, MRM3m, (outs), (ins i32mem:$dst, u8imm:$cnt),
396                  "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize32;
397def RCR64m1 : RI<0xD1, MRM3m, (outs), (ins i64mem:$dst),
398                 "rcr{q}\t$dst", []>, Requires<[In64BitMode]>;
399def RCR64mi : RIi8<0xC1, MRM3m, (outs), (ins i64mem:$dst, u8imm:$cnt),
400                   "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>,
401                   Requires<[In64BitMode]>;
402} // Uses = [EFLAGS], SchedRW
403
404let Uses = [CL, EFLAGS], SchedRW = [WriteRotateCLLd, WriteRMW] in {
405def RCL8mCL : I<0xD2, MRM2m, (outs), (ins i8mem:$dst),
406                "rcl{b}\t{%cl, $dst|$dst, cl}", []>;
407def RCL16mCL : I<0xD3, MRM2m, (outs), (ins i16mem:$dst),
408                 "rcl{w}\t{%cl, $dst|$dst, cl}", []>, OpSize16;
409def RCL32mCL : I<0xD3, MRM2m, (outs), (ins i32mem:$dst),
410                 "rcl{l}\t{%cl, $dst|$dst, cl}", []>, OpSize32;
411def RCL64mCL : RI<0xD3, MRM2m, (outs), (ins i64mem:$dst),
412                  "rcl{q}\t{%cl, $dst|$dst, cl}", []>,
413                  Requires<[In64BitMode]>;
414
415def RCR8mCL : I<0xD2, MRM3m, (outs), (ins i8mem:$dst),
416                "rcr{b}\t{%cl, $dst|$dst, cl}", []>;
417def RCR16mCL : I<0xD3, MRM3m, (outs), (ins i16mem:$dst),
418                 "rcr{w}\t{%cl, $dst|$dst, cl}", []>, OpSize16;
419def RCR32mCL : I<0xD3, MRM3m, (outs), (ins i32mem:$dst),
420                 "rcr{l}\t{%cl, $dst|$dst, cl}", []>, OpSize32;
421def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst),
422                  "rcr{q}\t{%cl, $dst|$dst, cl}", []>,
423                  Requires<[In64BitMode]>;
424} // Uses = [CL, EFLAGS], SchedRW
425} // mayLoad, mayStore
426
427let Constraints = "$src1 = $dst" in {
428// FIXME: provide shorter instructions when imm8 == 1
429let Uses = [CL], SchedRW = [WriteRotateCL] in {
430def ROL8rCL  : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
431                 "rol{b}\t{%cl, $dst|$dst, cl}",
432                 [(set GR8:$dst, (rotl GR8:$src1, CL))]>;
433def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
434                 "rol{w}\t{%cl, $dst|$dst, cl}",
435                 [(set GR16:$dst, (rotl GR16:$src1, CL))]>, OpSize16;
436def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
437                 "rol{l}\t{%cl, $dst|$dst, cl}",
438                 [(set GR32:$dst, (rotl GR32:$src1, CL))]>, OpSize32;
439def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
440                  "rol{q}\t{%cl, $dst|$dst, cl}",
441                  [(set GR64:$dst, (rotl GR64:$src1, CL))]>;
442} // Uses, SchedRW
443
444let SchedRW = [WriteRotate] in {
445def ROL8ri   : Ii8<0xC0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
446                   "rol{b}\t{$src2, $dst|$dst, $src2}",
447                   [(set GR8:$dst, (rotl GR8:$src1, (i8 imm:$src2)))]>;
448def ROL16ri  : Ii8<0xC1, MRM0r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
449                   "rol{w}\t{$src2, $dst|$dst, $src2}",
450                   [(set GR16:$dst, (rotl GR16:$src1, (i8 imm:$src2)))]>,
451                   OpSize16;
452def ROL32ri  : Ii8<0xC1, MRM0r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
453                   "rol{l}\t{$src2, $dst|$dst, $src2}",
454                   [(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$src2)))]>,
455                   OpSize32;
456def ROL64ri  : RIi8<0xC1, MRM0r, (outs GR64:$dst),
457                    (ins GR64:$src1, u8imm:$src2),
458                    "rol{q}\t{$src2, $dst|$dst, $src2}",
459                    [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
460
461// Rotate by 1
462def ROL8r1   : I<0xD0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
463                 "rol{b}\t$dst", []>;
464def ROL16r1  : I<0xD1, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
465                 "rol{w}\t$dst", []>, OpSize16;
466def ROL32r1  : I<0xD1, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
467                 "rol{l}\t$dst", []>, OpSize32;
468def ROL64r1  : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
469                  "rol{q}\t$dst", []>;
470} // SchedRW
471} // Constraints = "$src = $dst"
472
473let Uses = [CL], SchedRW = [WriteRotateCLLd, WriteRMW] in {
474def ROL8mCL  : I<0xD2, MRM0m, (outs), (ins i8mem :$dst),
475                 "rol{b}\t{%cl, $dst|$dst, cl}",
476                 [(store (rotl (loadi8 addr:$dst), CL), addr:$dst)]>;
477def ROL16mCL : I<0xD3, MRM0m, (outs), (ins i16mem:$dst),
478                 "rol{w}\t{%cl, $dst|$dst, cl}",
479                 [(store (rotl (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize16;
480def ROL32mCL : I<0xD3, MRM0m, (outs), (ins i32mem:$dst),
481                 "rol{l}\t{%cl, $dst|$dst, cl}",
482                 [(store (rotl (loadi32 addr:$dst), CL), addr:$dst)]>, OpSize32;
483def ROL64mCL :  RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
484                   "rol{q}\t{%cl, $dst|$dst, cl}",
485                   [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>,
486                   Requires<[In64BitMode]>;
487} // Uses, SchedRW
488
489let SchedRW = [WriteRotateLd, WriteRMW], mayLoad = 1, mayStore = 1 in {
490def ROL8mi   : Ii8<0xC0, MRM0m, (outs), (ins i8mem :$dst, u8imm:$src1),
491                   "rol{b}\t{$src1, $dst|$dst, $src1}",
492               [(store (rotl (loadi8 addr:$dst), (i8 imm:$src1)), addr:$dst)]>;
493def ROL16mi  : Ii8<0xC1, MRM0m, (outs), (ins i16mem:$dst, u8imm:$src1),
494                   "rol{w}\t{$src1, $dst|$dst, $src1}",
495              [(store (rotl (loadi16 addr:$dst), (i8 imm:$src1)), addr:$dst)]>,
496              OpSize16;
497def ROL32mi  : Ii8<0xC1, MRM0m, (outs), (ins i32mem:$dst, u8imm:$src1),
498                   "rol{l}\t{$src1, $dst|$dst, $src1}",
499              [(store (rotl (loadi32 addr:$dst), (i8 imm:$src1)), addr:$dst)]>,
500              OpSize32;
501def ROL64mi  : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, u8imm:$src1),
502                    "rol{q}\t{$src1, $dst|$dst, $src1}",
503                [(store (rotl (loadi64 addr:$dst), (i8 imm:$src1)), addr:$dst)]>,
504                Requires<[In64BitMode]>;
505
506// Rotate by 1
507def ROL8m1   : I<0xD0, MRM0m, (outs), (ins i8mem :$dst),
508                 "rol{b}\t$dst", []>;
509def ROL16m1  : I<0xD1, MRM0m, (outs), (ins i16mem:$dst),
510                 "rol{w}\t$dst", []>, OpSize16;
511def ROL32m1  : I<0xD1, MRM0m, (outs), (ins i32mem:$dst),
512                 "rol{l}\t$dst", []>, OpSize32;
513def ROL64m1  : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
514                 "rol{q}\t$dst", []>, Requires<[In64BitMode]>;
515} // SchedRW, mayLoad, mayStore
516
517let Constraints = "$src1 = $dst" in {
518let Uses = [CL], SchedRW = [WriteRotateCL] in {
519def ROR8rCL  : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
520                 "ror{b}\t{%cl, $dst|$dst, cl}",
521                 [(set GR8:$dst, (rotr GR8:$src1, CL))]>;
522def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
523                 "ror{w}\t{%cl, $dst|$dst, cl}",
524                 [(set GR16:$dst, (rotr GR16:$src1, CL))]>, OpSize16;
525def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
526                 "ror{l}\t{%cl, $dst|$dst, cl}",
527                 [(set GR32:$dst, (rotr GR32:$src1, CL))]>, OpSize32;
528def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
529                  "ror{q}\t{%cl, $dst|$dst, cl}",
530                  [(set GR64:$dst, (rotr GR64:$src1, CL))]>;
531}
532
533let SchedRW = [WriteRotate] in {
534def ROR8ri   : Ii8<0xC0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
535                   "ror{b}\t{$src2, $dst|$dst, $src2}",
536                   [(set GR8:$dst, (rotr GR8:$src1, (i8 imm:$src2)))]>;
537def ROR16ri  : Ii8<0xC1, MRM1r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
538                   "ror{w}\t{$src2, $dst|$dst, $src2}",
539                   [(set GR16:$dst, (rotr GR16:$src1, (i8 imm:$src2)))]>,
540                   OpSize16;
541def ROR32ri  : Ii8<0xC1, MRM1r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
542                   "ror{l}\t{$src2, $dst|$dst, $src2}",
543                   [(set GR32:$dst, (rotr GR32:$src1, (i8 imm:$src2)))]>,
544                   OpSize32;
545def ROR64ri  : RIi8<0xC1, MRM1r, (outs GR64:$dst),
546                    (ins GR64:$src1, u8imm:$src2),
547                    "ror{q}\t{$src2, $dst|$dst, $src2}",
548                    [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
549
550// Rotate by 1
551def ROR8r1   : I<0xD0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
552                 "ror{b}\t$dst", []>;
553def ROR16r1  : I<0xD1, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
554                 "ror{w}\t$dst", []>, OpSize16;
555def ROR32r1  : I<0xD1, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
556                 "ror{l}\t$dst", []>, OpSize32;
557def ROR64r1  : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
558                  "ror{q}\t$dst", []>;
559} // SchedRW
560} // Constraints = "$src = $dst", SchedRW
561
562let Uses = [CL], SchedRW = [WriteRotateCLLd, WriteRMW] in {
563def ROR8mCL  : I<0xD2, MRM1m, (outs), (ins i8mem :$dst),
564                 "ror{b}\t{%cl, $dst|$dst, cl}",
565                 [(store (rotr (loadi8 addr:$dst), CL), addr:$dst)]>;
566def ROR16mCL : I<0xD3, MRM1m, (outs), (ins i16mem:$dst),
567                 "ror{w}\t{%cl, $dst|$dst, cl}",
568                 [(store (rotr (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize16;
569def ROR32mCL : I<0xD3, MRM1m, (outs), (ins i32mem:$dst),
570                 "ror{l}\t{%cl, $dst|$dst, cl}",
571                 [(store (rotr (loadi32 addr:$dst), CL), addr:$dst)]>, OpSize32;
572def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
573                  "ror{q}\t{%cl, $dst|$dst, cl}",
574                  [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>,
575                  Requires<[In64BitMode]>;
576} // Uses, SchedRW
577
578let SchedRW = [WriteRotateLd, WriteRMW], mayLoad = 1, mayStore =1 in {
579def ROR8mi   : Ii8<0xC0, MRM1m, (outs), (ins i8mem :$dst, u8imm:$src),
580                   "ror{b}\t{$src, $dst|$dst, $src}",
581                   [(store (rotr (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
582def ROR16mi  : Ii8<0xC1, MRM1m, (outs), (ins i16mem:$dst, u8imm:$src),
583                   "ror{w}\t{$src, $dst|$dst, $src}",
584                   [(store (rotr (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
585                   OpSize16;
586def ROR32mi  : Ii8<0xC1, MRM1m, (outs), (ins i32mem:$dst, u8imm:$src),
587                   "ror{l}\t{$src, $dst|$dst, $src}",
588                   [(store (rotr (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
589                   OpSize32;
590def ROR64mi  : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, u8imm:$src),
591                    "ror{q}\t{$src, $dst|$dst, $src}",
592                    [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
593                    Requires<[In64BitMode]>;
594
595// Rotate by 1
596def ROR8m1   : I<0xD0, MRM1m, (outs), (ins i8mem :$dst),
597                 "ror{b}\t$dst", []>;
598def ROR16m1  : I<0xD1, MRM1m, (outs), (ins i16mem:$dst),
599                 "ror{w}\t$dst", []>, OpSize16;
600def ROR32m1  : I<0xD1, MRM1m, (outs), (ins i32mem:$dst),
601                 "ror{l}\t$dst", []>,
602                 OpSize32;
603def ROR64m1  : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
604                 "ror{q}\t$dst", []>, Requires<[In64BitMode]>;
605} // SchedRW, mayLoad, mayStore
606
607
608//===----------------------------------------------------------------------===//
609// Double shift instructions (generalizations of rotate)
610//===----------------------------------------------------------------------===//
611
612let Constraints = "$src1 = $dst" in {
613
614let Uses = [CL], SchedRW = [WriteSHDrrcl] in {
615def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst),
616                   (ins GR16:$src1, GR16:$src2),
617                   "shld{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
618                   [(set GR16:$dst, (X86fshl GR16:$src1, GR16:$src2, CL))]>,
619                   TB, OpSize16;
620def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst),
621                   (ins GR16:$src1, GR16:$src2),
622                   "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
623                   [(set GR16:$dst, (X86fshr GR16:$src2, GR16:$src1, CL))]>,
624                   TB, OpSize16;
625def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst),
626                   (ins GR32:$src1, GR32:$src2),
627                   "shld{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
628                   [(set GR32:$dst, (fshl GR32:$src1, GR32:$src2, CL))]>,
629                   TB, OpSize32;
630def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst),
631                   (ins GR32:$src1, GR32:$src2),
632                   "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
633                   [(set GR32:$dst, (fshr GR32:$src2, GR32:$src1, CL))]>,
634                   TB, OpSize32;
635def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
636                    (ins GR64:$src1, GR64:$src2),
637                    "shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
638                    [(set GR64:$dst, (fshl GR64:$src1, GR64:$src2, CL))]>,
639                    TB;
640def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst),
641                    (ins GR64:$src1, GR64:$src2),
642                    "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
643                    [(set GR64:$dst, (fshr GR64:$src2, GR64:$src1, CL))]>,
644                    TB;
645} // Uses, SchedRW
646
647let isCommutable = 1, SchedRW = [WriteSHDrri] in {  // These instructions commute to each other.
648def SHLD16rri8 : Ii8<0xA4, MRMDestReg,
649                     (outs GR16:$dst),
650                     (ins GR16:$src1, GR16:$src2, u8imm:$src3),
651                     "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
652                     [(set GR16:$dst, (X86fshl GR16:$src1, GR16:$src2,
653                                      (i8 imm:$src3)))]>,
654                     TB, OpSize16;
655def SHRD16rri8 : Ii8<0xAC, MRMDestReg,
656                     (outs GR16:$dst),
657                     (ins GR16:$src1, GR16:$src2, u8imm:$src3),
658                     "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
659                     [(set GR16:$dst, (X86fshr GR16:$src2, GR16:$src1,
660                                      (i8 imm:$src3)))]>,
661                     TB, OpSize16;
662def SHLD32rri8 : Ii8<0xA4, MRMDestReg,
663                     (outs GR32:$dst),
664                     (ins GR32:$src1, GR32:$src2, u8imm:$src3),
665                     "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
666                     [(set GR32:$dst, (fshl GR32:$src1, GR32:$src2,
667                                      (i8 imm:$src3)))]>,
668                 TB, OpSize32;
669def SHRD32rri8 : Ii8<0xAC, MRMDestReg,
670                     (outs GR32:$dst),
671                     (ins GR32:$src1, GR32:$src2, u8imm:$src3),
672                     "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
673                     [(set GR32:$dst, (fshr GR32:$src2, GR32:$src1,
674                                      (i8 imm:$src3)))]>,
675                 TB, OpSize32;
676def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
677                      (outs GR64:$dst),
678                      (ins GR64:$src1, GR64:$src2, u8imm:$src3),
679                      "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
680                      [(set GR64:$dst, (fshl GR64:$src1, GR64:$src2,
681                                       (i8 imm:$src3)))]>,
682                 TB;
683def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
684                      (outs GR64:$dst),
685                      (ins GR64:$src1, GR64:$src2, u8imm:$src3),
686                      "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
687                      [(set GR64:$dst, (fshr GR64:$src2, GR64:$src1,
688                                       (i8 imm:$src3)))]>,
689                 TB;
690} // SchedRW
691} // Constraints = "$src = $dst"
692
693let Uses = [CL], SchedRW = [WriteSHDmrcl] in {
694def SHLD16mrCL : I<0xA5, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
695                   "shld{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
696                   [(store (X86fshl (loadi16 addr:$dst), GR16:$src2, CL),
697                                    addr:$dst)]>, TB, OpSize16;
698def SHRD16mrCL : I<0xAD, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
699                  "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
700                  [(store (X86fshr GR16:$src2, (loadi16 addr:$dst), CL),
701                                   addr:$dst)]>, TB, OpSize16;
702
703def SHLD32mrCL : I<0xA5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
704                   "shld{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
705                   [(store (fshl (loadi32 addr:$dst), GR32:$src2, CL),
706                     addr:$dst)]>, TB, OpSize32;
707def SHRD32mrCL : I<0xAD, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
708                  "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
709                  [(store (fshr GR32:$src2, (loadi32 addr:$dst), CL),
710                                addr:$dst)]>, TB, OpSize32;
711
712def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
713                    "shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
714                    [(store (fshl (loadi64 addr:$dst), GR64:$src2, CL),
715                                  addr:$dst)]>, TB;
716def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
717                    "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
718                    [(store (fshr GR64:$src2, (loadi64 addr:$dst), CL),
719                                  addr:$dst)]>, TB;
720} // Uses, SchedRW
721
722let SchedRW = [WriteSHDmri] in {
723def SHLD16mri8 : Ii8<0xA4, MRMDestMem,
724                    (outs), (ins i16mem:$dst, GR16:$src2, u8imm:$src3),
725                    "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
726                    [(store (X86fshl (loadi16 addr:$dst), GR16:$src2,
727                                     (i8 imm:$src3)), addr:$dst)]>,
728                    TB, OpSize16;
729def SHRD16mri8 : Ii8<0xAC, MRMDestMem,
730                     (outs), (ins i16mem:$dst, GR16:$src2, u8imm:$src3),
731                     "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
732                    [(store (X86fshr GR16:$src2, (loadi16 addr:$dst),
733                                     (i8 imm:$src3)), addr:$dst)]>,
734                     TB, OpSize16;
735
736def SHLD32mri8 : Ii8<0xA4, MRMDestMem,
737                    (outs), (ins i32mem:$dst, GR32:$src2, u8imm:$src3),
738                    "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
739                    [(store (fshl (loadi32 addr:$dst), GR32:$src2,
740                                  (i8 imm:$src3)), addr:$dst)]>,
741                    TB, OpSize32;
742def SHRD32mri8 : Ii8<0xAC, MRMDestMem,
743                     (outs), (ins i32mem:$dst, GR32:$src2, u8imm:$src3),
744                     "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
745                     [(store (fshr GR32:$src2, (loadi32 addr:$dst),
746                                   (i8 imm:$src3)), addr:$dst)]>,
747                     TB, OpSize32;
748
749def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
750                      (outs), (ins i64mem:$dst, GR64:$src2, u8imm:$src3),
751                      "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
752                      [(store (fshl (loadi64 addr:$dst), GR64:$src2,
753                                    (i8 imm:$src3)), addr:$dst)]>,
754                 TB;
755def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
756                      (outs), (ins i64mem:$dst, GR64:$src2, u8imm:$src3),
757                      "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
758                      [(store (fshr GR64:$src2, (loadi64 addr:$dst),
759                                    (i8 imm:$src3)), addr:$dst)]>,
760                 TB;
761} // SchedRW
762
763} // Defs = [EFLAGS], hasSideEffects
764
765// Use the opposite rotate if allows us to use the rotate by 1 instruction.
766def : Pat<(rotl GR8:$src1,  (i8 7)),  (ROR8r1  GR8:$src1)>;
767def : Pat<(rotl GR16:$src1, (i8 15)), (ROR16r1 GR16:$src1)>;
768def : Pat<(rotl GR32:$src1, (i8 31)), (ROR32r1 GR32:$src1)>;
769def : Pat<(rotl GR64:$src1, (i8 63)), (ROR64r1 GR64:$src1)>;
770def : Pat<(rotr GR8:$src1,  (i8 7)),  (ROL8r1  GR8:$src1)>;
771def : Pat<(rotr GR16:$src1, (i8 15)), (ROL16r1 GR16:$src1)>;
772def : Pat<(rotr GR32:$src1, (i8 31)), (ROL32r1 GR32:$src1)>;
773def : Pat<(rotr GR64:$src1, (i8 63)), (ROL64r1 GR64:$src1)>;
774
775def : Pat<(store (rotl (loadi8 addr:$dst), (i8 7)), addr:$dst),
776          (ROR8m1 addr:$dst)>;
777def : Pat<(store (rotl (loadi16 addr:$dst), (i8 15)), addr:$dst),
778          (ROR16m1 addr:$dst)>;
779def : Pat<(store (rotl (loadi32 addr:$dst), (i8 31)), addr:$dst),
780          (ROR32m1 addr:$dst)>;
781def : Pat<(store (rotl (loadi64 addr:$dst), (i8 63)), addr:$dst),
782          (ROR64m1 addr:$dst)>, Requires<[In64BitMode]>;
783
784def : Pat<(store (rotr (loadi8 addr:$dst), (i8 7)), addr:$dst),
785          (ROL8m1 addr:$dst)>;
786def : Pat<(store (rotr (loadi16 addr:$dst), (i8 15)), addr:$dst),
787          (ROL16m1 addr:$dst)>;
788def : Pat<(store (rotr (loadi32 addr:$dst), (i8 31)), addr:$dst),
789          (ROL32m1 addr:$dst)>;
790def : Pat<(store (rotr (loadi64 addr:$dst), (i8 63)), addr:$dst),
791          (ROL64m1 addr:$dst)>, Requires<[In64BitMode]>;
792
793// Sandy Bridge and newer Intel processors support faster rotates using
794// SHLD to avoid a partial flag update on the normal rotate instructions.
795// Use a pseudo so that TwoInstructionPass and register allocation will see
796// this as unary instruction.
797let Predicates = [HasFastSHLDRotate], AddedComplexity = 5,
798    Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteSHDrri],
799    Constraints = "$src1 = $dst" in {
800  def SHLDROT32ri  : I<0, Pseudo, (outs GR32:$dst),
801                       (ins GR32:$src1, u8imm:$shamt), "",
802                     [(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$shamt)))]>;
803  def SHLDROT64ri  : I<0, Pseudo, (outs GR64:$dst),
804                       (ins GR64:$src1, u8imm:$shamt), "",
805                     [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$shamt)))]>;
806
807  def SHRDROT32ri  : I<0, Pseudo, (outs GR32:$dst),
808                       (ins GR32:$src1, u8imm:$shamt), "",
809                     [(set GR32:$dst, (rotr GR32:$src1, (i8 imm:$shamt)))]>;
810  def SHRDROT64ri  : I<0, Pseudo, (outs GR64:$dst),
811                       (ins GR64:$src1, u8imm:$shamt), "",
812                     [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$shamt)))]>;
813}
814
815def ROT32L2R_imm8  : SDNodeXForm<imm, [{
816  // Convert a ROTL shamt to a ROTR shamt on 32-bit integer.
817  return getI8Imm(32 - N->getZExtValue(), SDLoc(N));
818}]>;
819
820def ROT64L2R_imm8  : SDNodeXForm<imm, [{
821  // Convert a ROTL shamt to a ROTR shamt on 64-bit integer.
822  return getI8Imm(64 - N->getZExtValue(), SDLoc(N));
823}]>;
824
825// NOTE: We use WriteShift for these rotates as they avoid the stalls
826// of many of the older x86 rotate instructions.
827multiclass bmi_rotate<string asm, RegisterClass RC, X86MemOperand x86memop,
828                      string Suffix = ""> {
829let hasSideEffects = 0 in {
830  def ri#Suffix : Ii8<0xF0, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, u8imm:$src2),
831                      !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
832                  TA, XD, VEX, Sched<[WriteShift]>;
833  let mayLoad = 1 in
834  def mi#Suffix : Ii8<0xF0, MRMSrcMem, (outs RC:$dst),
835                      (ins x86memop:$src1, u8imm:$src2),
836                      !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
837                  TA, XD, VEX, Sched<[WriteShiftLd]>;
838}
839}
840
841multiclass bmi_shift<string asm, RegisterClass RC, X86MemOperand x86memop,
842                     string Suffix = ""> {
843let hasSideEffects = 0 in {
844  def rr#Suffix : I<0xF7, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
845                    !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
846                    VEX, Sched<[WriteShift]>;
847  let mayLoad = 1 in
848  def rm#Suffix : I<0xF7, MRMSrcMem4VOp3,
849                    (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
850                    !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
851                  VEX, Sched<[WriteShift.Folded,
852                              // x86memop:$src1
853                              ReadDefault, ReadDefault, ReadDefault, ReadDefault,
854                              ReadDefault,
855                              // RC:$src2
856                              WriteShift.ReadAfterFold]>;
857}
858}
859
860let Predicates = [HasBMI2, NoEGPR] in {
861  defm RORX32 : bmi_rotate<"rorx{l}", GR32, i32mem>;
862  defm RORX64 : bmi_rotate<"rorx{q}", GR64, i64mem>, REX_W;
863  defm SARX32 : bmi_shift<"sarx{l}", GR32, i32mem>, T8, XS;
864  defm SARX64 : bmi_shift<"sarx{q}", GR64, i64mem>, T8, XS, REX_W;
865  defm SHRX32 : bmi_shift<"shrx{l}", GR32, i32mem>, T8, XD;
866  defm SHRX64 : bmi_shift<"shrx{q}", GR64, i64mem>, T8, XD, REX_W;
867  defm SHLX32 : bmi_shift<"shlx{l}", GR32, i32mem>, T8, PD;
868  defm SHLX64 : bmi_shift<"shlx{q}", GR64, i64mem>, T8, PD, REX_W;
869}
870
871let Predicates = [HasBMI2, HasEGPR, In64BitMode] in {
872  defm RORX32 : bmi_rotate<"rorx{l}", GR32, i32mem, "_EVEX">, EVEX;
873  defm RORX64 : bmi_rotate<"rorx{q}", GR64, i64mem, "_EVEX">, REX_W, EVEX;
874  defm SARX32 : bmi_shift<"sarx{l}", GR32, i32mem, "_EVEX">, T8, XS, EVEX;
875  defm SARX64 : bmi_shift<"sarx{q}", GR64, i64mem, "_EVEX">, T8, XS, REX_W, EVEX;
876  defm SHRX32 : bmi_shift<"shrx{l}", GR32, i32mem, "_EVEX">, T8, XD, EVEX;
877  defm SHRX64 : bmi_shift<"shrx{q}", GR64, i64mem, "_EVEX">, T8, XD, REX_W, EVEX;
878  defm SHLX32 : bmi_shift<"shlx{l}", GR32, i32mem, "_EVEX">, T8, PD, EVEX;
879  defm SHLX64 : bmi_shift<"shlx{q}", GR64, i64mem, "_EVEX">, T8, PD, REX_W, EVEX;
880}
881
882let Predicates = [HasBMI2] in {
883  // Prefer RORX which is non-destructive and doesn't update EFLAGS.
884  let AddedComplexity = 10 in {
885    def : Pat<(rotr GR32:$src, (i8 imm:$shamt)),
886              (RORX32ri GR32:$src, imm:$shamt)>;
887    def : Pat<(rotr GR64:$src, (i8 imm:$shamt)),
888              (RORX64ri GR64:$src, imm:$shamt)>;
889
890    def : Pat<(rotl GR32:$src, (i8 imm:$shamt)),
891              (RORX32ri GR32:$src, (ROT32L2R_imm8 imm:$shamt))>;
892    def : Pat<(rotl GR64:$src, (i8 imm:$shamt)),
893              (RORX64ri GR64:$src, (ROT64L2R_imm8 imm:$shamt))>;
894  }
895
896  def : Pat<(rotr (loadi32 addr:$src), (i8 imm:$shamt)),
897            (RORX32mi addr:$src, imm:$shamt)>;
898  def : Pat<(rotr (loadi64 addr:$src), (i8 imm:$shamt)),
899            (RORX64mi addr:$src, imm:$shamt)>;
900
901  def : Pat<(rotl (loadi32 addr:$src), (i8 imm:$shamt)),
902            (RORX32mi addr:$src, (ROT32L2R_imm8 imm:$shamt))>;
903  def : Pat<(rotl (loadi64 addr:$src), (i8 imm:$shamt)),
904            (RORX64mi addr:$src, (ROT64L2R_imm8 imm:$shamt))>;
905
906  // Prefer SARX/SHRX/SHLX over SAR/SHR/SHL with variable shift BUT not
907  // immediate shift, i.e. the following code is considered better
908  //
909  //  mov %edi, %esi
910  //  shl $imm, %esi
911  //  ... %edi, ...
912  //
913  // than
914  //
915  //  movb $imm, %sil
916  //  shlx %sil, %edi, %esi
917  //  ... %edi, ...
918  //
919  let AddedComplexity = 1 in {
920    def : Pat<(sra GR32:$src1, GR8:$src2),
921              (SARX32rr GR32:$src1,
922                        (INSERT_SUBREG
923                          (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
924    def : Pat<(sra GR64:$src1, GR8:$src2),
925              (SARX64rr GR64:$src1,
926                        (INSERT_SUBREG
927                          (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
928
929    def : Pat<(srl GR32:$src1, GR8:$src2),
930              (SHRX32rr GR32:$src1,
931                        (INSERT_SUBREG
932                          (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
933    def : Pat<(srl GR64:$src1, GR8:$src2),
934              (SHRX64rr GR64:$src1,
935                        (INSERT_SUBREG
936                          (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
937
938    def : Pat<(shl GR32:$src1, GR8:$src2),
939              (SHLX32rr GR32:$src1,
940                        (INSERT_SUBREG
941                          (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
942    def : Pat<(shl GR64:$src1, GR8:$src2),
943              (SHLX64rr GR64:$src1,
944                        (INSERT_SUBREG
945                          (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
946  }
947
948  // We prefer to use
949  //  mov (%ecx), %esi
950  //  shl $imm, $esi
951  //
952  // over
953  //
954  //  movb $imm, %al
955  //  shlx %al, (%ecx), %esi
956  //
957  // This priority is enforced by IsProfitableToFoldLoad.
958  def : Pat<(sra (loadi32 addr:$src1), GR8:$src2),
959            (SARX32rm addr:$src1,
960                      (INSERT_SUBREG
961                        (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
962  def : Pat<(sra (loadi64 addr:$src1), GR8:$src2),
963            (SARX64rm addr:$src1,
964                      (INSERT_SUBREG
965                        (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
966
967  def : Pat<(srl (loadi32 addr:$src1), GR8:$src2),
968            (SHRX32rm addr:$src1,
969                      (INSERT_SUBREG
970                        (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
971  def : Pat<(srl (loadi64 addr:$src1), GR8:$src2),
972            (SHRX64rm addr:$src1,
973                      (INSERT_SUBREG
974                        (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
975
976  def : Pat<(shl (loadi32 addr:$src1), GR8:$src2),
977            (SHLX32rm addr:$src1,
978                      (INSERT_SUBREG
979                        (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
980  def : Pat<(shl (loadi64 addr:$src1), GR8:$src2),
981            (SHLX64rm addr:$src1,
982                      (INSERT_SUBREG
983                        (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
984}
985
986def : Pat<(rotl GR8:$src1, (i8 relocImm:$src2)),
987          (ROL8ri GR8:$src1, relocImm:$src2)>;
988def : Pat<(rotl GR16:$src1, (i8 relocImm:$src2)),
989          (ROL16ri GR16:$src1, relocImm:$src2)>;
990def : Pat<(rotl GR32:$src1, (i8 relocImm:$src2)),
991          (ROL32ri GR32:$src1, relocImm:$src2)>;
992def : Pat<(rotl GR64:$src1, (i8 relocImm:$src2)),
993          (ROL64ri GR64:$src1, relocImm:$src2)>;
994
995def : Pat<(rotr GR8:$src1, (i8 relocImm:$src2)),
996          (ROR8ri GR8:$src1, relocImm:$src2)>;
997def : Pat<(rotr GR16:$src1, (i8 relocImm:$src2)),
998          (ROR16ri GR16:$src1, relocImm:$src2)>;
999def : Pat<(rotr GR32:$src1, (i8 relocImm:$src2)),
1000          (ROR32ri GR32:$src1, relocImm:$src2)>;
1001def : Pat<(rotr GR64:$src1, (i8 relocImm:$src2)),
1002          (ROR64ri GR64:$src1, relocImm:$src2)>;
1003