xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td (revision aa1a8ff2d6dbc51ef058f46f3db5a8bb77967145)
1//===-- RISCVInstrInfoZb.td - RISC-V Bitmanip instructions -*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes the RISC-V instructions from the standard Bitmanip
10// extensions, versions:
11//   Zba - 1.0
12//   Zbb - 1.0
13//   Zbc - 1.0
14//   Zbs - 1.0
15//
16// This file also describes RISC-V instructions from the Zbk* extensions in
17// Cryptography Extensions Volume I: Scalar & Entropy Source Instructions,
18// versions:
19//   Zbkb - 1.0
20//   Zbkc - 1.0
21//   Zbkx - 1.0
22//
23//===----------------------------------------------------------------------===//
24
25//===----------------------------------------------------------------------===//
26// Operand and SDNode transformation definitions.
27//===----------------------------------------------------------------------===//
28
29def riscv_clzw   : SDNode<"RISCVISD::CLZW",   SDT_RISCVIntUnaryOpW>;
30def riscv_ctzw   : SDNode<"RISCVISD::CTZW",   SDT_RISCVIntUnaryOpW>;
31def riscv_rolw   : SDNode<"RISCVISD::ROLW",   SDT_RISCVIntBinOpW>;
32def riscv_rorw   : SDNode<"RISCVISD::RORW",   SDT_RISCVIntBinOpW>;
33def riscv_brev8  : SDNode<"RISCVISD::BREV8",  SDTIntUnaryOp>;
34def riscv_orc_b  : SDNode<"RISCVISD::ORC_B",  SDTIntUnaryOp>;
35def riscv_zip    : SDNode<"RISCVISD::ZIP",    SDTIntUnaryOp>;
36def riscv_unzip  : SDNode<"RISCVISD::UNZIP",  SDTIntUnaryOp>;
37def riscv_absw   : SDNode<"RISCVISD::ABSW",   SDTIntUnaryOp>;
38def riscv_clmul  : SDNode<"RISCVISD::CLMUL",  SDTIntBinOp>;
39def riscv_clmulh : SDNode<"RISCVISD::CLMULH", SDTIntBinOp>;
40def riscv_clmulr : SDNode<"RISCVISD::CLMULR", SDTIntBinOp>;
41
42def UImmLog2XLenHalfAsmOperand : AsmOperandClass {
43  let Name = "UImmLog2XLenHalf";
44  let RenderMethod = "addImmOperands";
45  let DiagnosticType = "InvalidUImmLog2XLenHalf";
46}
47
48def shfl_uimm : RISCVOp, ImmLeaf<XLenVT, [{
49  if (Subtarget->is64Bit())
50    return isUInt<5>(Imm);
51  return isUInt<4>(Imm);
52}]> {
53  let ParserMatchClass = UImmLog2XLenHalfAsmOperand;
54  let DecoderMethod = "decodeUImmOperand<5>";
55  let OperandType = "OPERAND_UIMM_SHFL";
56  let MCOperandPredicate = [{
57    int64_t Imm;
58    if (!MCOp.evaluateAsConstantImm(Imm))
59      return false;
60    if (STI.getTargetTriple().isArch64Bit())
61      return  isUInt<5>(Imm);
62    return isUInt<4>(Imm);
63  }];
64}
65
66def BCLRXForm : SDNodeXForm<imm, [{
67  // Find the lowest 0.
68  return CurDAG->getTargetConstant(llvm::countr_one(N->getZExtValue()),
69                                   SDLoc(N), N->getValueType(0));
70}]>;
71
72def SingleBitSetMaskToIndex : SDNodeXForm<imm, [{
73  // Find the lowest 1.
74  return CurDAG->getTargetConstant(llvm::countr_zero(N->getZExtValue()),
75                                   SDLoc(N), N->getValueType(0));
76}]>;
77
78// Checks if this mask has a single 0 bit and cannot be used with ANDI.
79def BCLRMask : ImmLeaf<XLenVT, [{
80  if (Subtarget->is64Bit())
81    return !isInt<12>(Imm) && isPowerOf2_64(~Imm);
82  return !isInt<12>(Imm) && isPowerOf2_32(~Imm);
83}], BCLRXForm>;
84
85// Checks if this mask has a single 1 bit and cannot be used with ORI/XORI.
86def SingleBitSetMask : ImmLeaf<XLenVT, [{
87  if (Subtarget->is64Bit())
88    return !isInt<12>(Imm) && isPowerOf2_64(Imm);
89  return !isInt<12>(Imm) && isPowerOf2_32(Imm);
90}], SingleBitSetMaskToIndex>;
91
92// Check if (or r, i) can be optimized to (BSETI (BSETI r, i0), i1),
93// in which i = (1 << i0) | (1 << i1).
94def BSETINVTwoBitsMask : PatLeaf<(imm), [{
95  if (!N->hasOneUse())
96    return false;
97  // The immediate should not be a simm12.
98  if (isInt<12>(N->getSExtValue()))
99    return false;
100  // The immediate must have exactly two bits set.
101  return llvm::popcount(N->getZExtValue()) == 2;
102}]>;
103
104def BSETINVTwoBitsMaskHigh : SDNodeXForm<imm, [{
105  uint64_t I = N->getZExtValue();
106  return CurDAG->getTargetConstant(llvm::Log2_64(I), SDLoc(N),
107                                   N->getValueType(0));
108}]>;
109
110// Check if (or r, imm) can be optimized to (BSETI (ORI r, i0), i1),
111// in which imm = i0 | (1 << i1).
112def BSETINVORIMask : PatLeaf<(imm), [{
113  if (!N->hasOneUse())
114    return false;
115  // The immediate should not be a simm12.
116  if (isInt<12>(N->getSExtValue()))
117    return false;
118  // There should be only one set bit from bit 11 to the top.
119  return isPowerOf2_64(N->getZExtValue() & ~0x7ff);
120}]>;
121
122def BSETINVORIMaskLow : SDNodeXForm<imm, [{
123  return CurDAG->getTargetConstant(N->getZExtValue() & 0x7ff,
124                                   SDLoc(N), N->getValueType(0));
125}]>;
126
127// Check if (and r, i) can be optimized to (BCLRI (BCLRI r, i0), i1),
128// in which i = ~((1<<i0) | (1<<i1)).
129def BCLRITwoBitsMask : PatLeaf<(imm), [{
130  if (!N->hasOneUse())
131    return false;
132  // The immediate should not be a simm12.
133  if (isInt<12>(N->getSExtValue()))
134    return false;
135  // The immediate must have exactly two bits clear.
136  return (unsigned)llvm::popcount(N->getZExtValue()) == Subtarget->getXLen() - 2;
137}]>;
138
139def BCLRITwoBitsMaskLow : SDNodeXForm<imm, [{
140  return CurDAG->getTargetConstant(llvm::countr_zero(~N->getZExtValue()),
141                                   SDLoc(N), N->getValueType(0));
142}]>;
143
144def BCLRITwoBitsMaskHigh : SDNodeXForm<imm, [{
145  uint64_t I = N->getSExtValue();
146  if (!Subtarget->is64Bit())
147    I |= 0xffffffffull << 32;
148  return CurDAG->getTargetConstant(llvm::Log2_64(~I), SDLoc(N),
149                                   N->getValueType(0));
150}]>;
151
152// Check if (and r, i) can be optimized to (BCLRI (ANDI r, i0), i1),
153// in which i = i0 & ~(1<<i1).
154def BCLRIANDIMask : PatLeaf<(imm), [{
155  if (!N->hasOneUse())
156    return false;
157  // The immediate should not be a simm12.
158  if (isInt<12>(N->getSExtValue()))
159    return false;
160  // There should be only one clear bit from bit 11 to the top.
161  uint64_t I = N->getZExtValue() | 0x7ff;
162  return Subtarget->is64Bit() ? isPowerOf2_64(~I) : isPowerOf2_32(~I);
163}]>;
164
165def BCLRIANDIMaskLow : SDNodeXForm<imm, [{
166  return CurDAG->getTargetConstant((N->getZExtValue() & 0x7ff) | ~0x7ffull,
167                                   SDLoc(N), N->getValueType(0));
168}]>;
169
170def C3LeftShift : PatLeaf<(imm), [{
171  uint64_t C = N->getZExtValue();
172  return C > 3 && (C >> llvm::countr_zero(C)) == 3;
173}]>;
174
175def C5LeftShift : PatLeaf<(imm), [{
176  uint64_t C = N->getZExtValue();
177  return C > 5 && (C >> llvm::countr_zero(C)) == 5;
178}]>;
179
180def C9LeftShift : PatLeaf<(imm), [{
181  uint64_t C = N->getZExtValue();
182  return C > 9 && (C >> llvm::countr_zero(C)) == 9;
183}]>;
184
185// Constant of the form (3 << C) where C is less than 32.
186def C3LeftShiftUW : PatLeaf<(imm), [{
187  uint64_t C = N->getZExtValue();
188  unsigned Shift = llvm::countr_zero(C);
189  return 1 <= Shift && Shift < 32 && (C >> Shift) == 3;
190}]>;
191
192// Constant of the form (5 << C) where C is less than 32.
193def C5LeftShiftUW : PatLeaf<(imm), [{
194  uint64_t C = N->getZExtValue();
195  unsigned Shift = llvm::countr_zero(C);
196  return 1 <= Shift && Shift < 32 && (C >> Shift) == 5;
197}]>;
198
199// Constant of the form (9 << C) where C is less than 32.
200def C9LeftShiftUW : PatLeaf<(imm), [{
201  uint64_t C = N->getZExtValue();
202  unsigned Shift = llvm::countr_zero(C);
203  return 1 <= Shift && Shift < 32 && (C >> Shift) == 9;
204}]>;
205
206def CSImm12MulBy4 : PatLeaf<(imm), [{
207  if (!N->hasOneUse())
208    return false;
209  int64_t C = N->getSExtValue();
210  // Skip if C is simm12, an lui, or can be optimized by the PatLeaf AddiPair.
211  return !isInt<13>(C) && !isShiftedInt<20, 12>(C) && isShiftedInt<12, 2>(C);
212}]>;
213
214def CSImm12MulBy8 : PatLeaf<(imm), [{
215  if (!N->hasOneUse())
216    return false;
217  int64_t C = N->getSExtValue();
218  // Skip if C is simm12, an lui or can be optimized by the PatLeaf AddiPair or
219  // CSImm12MulBy4.
220  return !isInt<14>(C) && !isShiftedInt<20, 12>(C) && isShiftedInt<12, 3>(C);
221}]>;
222
223def SimmShiftRightBy2XForm : SDNodeXForm<imm, [{
224  return CurDAG->getTargetConstant(N->getSExtValue() >> 2, SDLoc(N),
225                                   N->getValueType(0));
226}]>;
227
228def SimmShiftRightBy3XForm : SDNodeXForm<imm, [{
229  return CurDAG->getTargetConstant(N->getSExtValue() >> 3, SDLoc(N),
230                                   N->getValueType(0));
231}]>;
232
233// Pattern to exclude simm12 immediates from matching, namely `non_imm12`.
234// GISel currently doesn't support PatFrag for leaf nodes, so `non_imm12`
235// cannot be implemented in that way. To reuse patterns between the two
236// ISels, we instead create PatFrag on operators that use `non_imm12`.
237class binop_with_non_imm12<SDPatternOperator binop>
238  : PatFrag<(ops node:$x, node:$y), (binop node:$x, node:$y), [{
239  auto *C = dyn_cast<ConstantSDNode>(Operands[1]);
240  return !C || !isInt<12>(C->getSExtValue());
241}]> {
242  let PredicateCodeUsesOperands = 1;
243  let GISelPredicateCode = [{
244    const MachineOperand &ImmOp = *Operands[1];
245    const MachineFunction &MF = *MI.getParent()->getParent();
246    const MachineRegisterInfo &MRI = MF.getRegInfo();
247
248    if (ImmOp.isReg() && ImmOp.getReg())
249      if (auto Val = getIConstantVRegValWithLookThrough(ImmOp.getReg(), MRI)) {
250        // We do NOT want immediates that fit in 12 bits.
251        return !isInt<12>(Val->Value.getSExtValue());
252      }
253
254    return true;
255  }];
256}
257def add_non_imm12       : binop_with_non_imm12<add>;
258def or_is_add_non_imm12 : binop_with_non_imm12<or_is_add>;
259
260def Shifted32OnesMask : IntImmLeaf<XLenVT, [{
261  if (!Imm.isShiftedMask())
262    return false;
263
264  unsigned TrailingZeros = Imm.countr_zero();
265  return TrailingZeros > 0 && TrailingZeros < 32 &&
266         Imm == UINT64_C(0xFFFFFFFF) << TrailingZeros;
267}], TrailingZeros>;
268
269def sh1add_op : ComplexPattern<XLenVT, 1, "selectSHXADDOp<1>", [], [], 6>;
270def sh2add_op : ComplexPattern<XLenVT, 1, "selectSHXADDOp<2>", [], [], 6>;
271def sh3add_op : ComplexPattern<XLenVT, 1, "selectSHXADDOp<3>", [], [], 6>;
272
273def sh1add_uw_op : ComplexPattern<XLenVT, 1, "selectSHXADD_UWOp<1>", [], [], 6>;
274def sh2add_uw_op : ComplexPattern<XLenVT, 1, "selectSHXADD_UWOp<2>", [], [], 6>;
275def sh3add_uw_op : ComplexPattern<XLenVT, 1, "selectSHXADD_UWOp<3>", [], [], 6>;
276
277//===----------------------------------------------------------------------===//
278// Instruction class templates
279//===----------------------------------------------------------------------===//
280
281let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
282class RVBUnary<bits<12> imm12, bits<3> funct3,
283               RISCVOpcode opcode, string opcodestr>
284    : RVInstIUnary<imm12, funct3, opcode, (outs GPR:$rd), (ins GPR:$rs1),
285                   opcodestr, "$rd, $rs1">;
286
287let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
288class RVBShift_ri<bits<5> imm11_7, bits<3> funct3, RISCVOpcode opcode,
289                  string opcodestr>
290    : RVInstIShift<imm11_7, funct3, opcode, (outs GPR:$rd),
291                   (ins GPR:$rs1, uimmlog2xlen:$shamt), opcodestr,
292                   "$rd, $rs1, $shamt">;
293
294let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
295class RVBShiftW_ri<bits<7> imm11_5, bits<3> funct3, RISCVOpcode opcode,
296                   string opcodestr>
297    : RVInstIShiftW<imm11_5, funct3, opcode, (outs GPR:$rd),
298                    (ins GPR:$rs1, uimm5:$shamt), opcodestr,
299                    "$rd, $rs1, $shamt">;
300
301//===----------------------------------------------------------------------===//
302// Instructions
303//===----------------------------------------------------------------------===//
304
305let Predicates = [HasStdExtZbbOrZbkb] in {
306def ANDN  : ALU_rr<0b0100000, 0b111, "andn">,
307            Sched<[WriteIALU, ReadIALU, ReadIALU]>;
308def ORN   : ALU_rr<0b0100000, 0b110, "orn">,
309            Sched<[WriteIALU, ReadIALU, ReadIALU]>;
310def XNOR  : ALU_rr<0b0100000, 0b100, "xnor">,
311            Sched<[WriteIALU, ReadIALU, ReadIALU]>;
312} // Predicates = [HasStdExtZbbOrZbkb]
313
314let Predicates = [HasStdExtZba] in {
315def SH1ADD : ALU_rr<0b0010000, 0b010, "sh1add">,
316             Sched<[WriteSHXADD, ReadSHXADD, ReadSHXADD]>;
317def SH2ADD : ALU_rr<0b0010000, 0b100, "sh2add">,
318             Sched<[WriteSHXADD, ReadSHXADD, ReadSHXADD]>;
319def SH3ADD : ALU_rr<0b0010000, 0b110, "sh3add">,
320             Sched<[WriteSHXADD, ReadSHXADD, ReadSHXADD]>;
321} // Predicates = [HasStdExtZba]
322
323let Predicates = [HasStdExtZba, IsRV64] in {
324def SLLI_UW : RVBShift_ri<0b00001, 0b001, OPC_OP_IMM_32, "slli.uw">,
325              Sched<[WriteShiftImm32, ReadShiftImm32]>;
326def ADD_UW : ALUW_rr<0b0000100, 0b000, "add.uw">,
327             Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>;
328def SH1ADD_UW : ALUW_rr<0b0010000, 0b010, "sh1add.uw">,
329                Sched<[WriteSHXADD32, ReadSHXADD32, ReadSHXADD32]>;
330def SH2ADD_UW : ALUW_rr<0b0010000, 0b100, "sh2add.uw">,
331                Sched<[WriteSHXADD32, ReadSHXADD32, ReadSHXADD32]>;
332def SH3ADD_UW : ALUW_rr<0b0010000, 0b110, "sh3add.uw">,
333                Sched<[WriteSHXADD32, ReadSHXADD32, ReadSHXADD32]>;
334} // Predicates = [HasStdExtZba, IsRV64]
335
336let Predicates = [HasStdExtZbbOrZbkb] in {
337def ROL   : ALU_rr<0b0110000, 0b001, "rol">,
338            Sched<[WriteRotateReg, ReadRotateReg, ReadRotateReg]>;
339def ROR   : ALU_rr<0b0110000, 0b101, "ror">,
340            Sched<[WriteRotateReg, ReadRotateReg, ReadRotateReg]>;
341
342def RORI  : RVBShift_ri<0b01100, 0b101, OPC_OP_IMM, "rori">,
343            Sched<[WriteRotateImm, ReadRotateImm]>;
344} // Predicates = [HasStdExtZbbOrZbkb]
345
346let Predicates = [HasStdExtZbbOrZbkb, IsRV64], IsSignExtendingOpW = 1 in {
347def ROLW  : ALUW_rr<0b0110000, 0b001, "rolw">,
348            Sched<[WriteRotateReg32, ReadRotateReg32, ReadRotateReg32]>;
349def RORW  : ALUW_rr<0b0110000, 0b101, "rorw">,
350            Sched<[WriteRotateReg32, ReadRotateReg32, ReadRotateReg32]>;
351
352def RORIW : RVBShiftW_ri<0b0110000, 0b101, OPC_OP_IMM_32, "roriw">,
353            Sched<[WriteRotateImm32, ReadRotateImm32]>;
354} // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
355
356let Predicates = [HasStdExtZbs] in {
357def BCLR : ALU_rr<0b0100100, 0b001, "bclr">,
358           Sched<[WriteSingleBit, ReadSingleBit, ReadSingleBit]>;
359def BSET : ALU_rr<0b0010100, 0b001, "bset">,
360           Sched<[WriteSingleBit, ReadSingleBit, ReadSingleBit]>;
361def BINV : ALU_rr<0b0110100, 0b001, "binv">,
362           Sched<[WriteSingleBit, ReadSingleBit, ReadSingleBit]>;
363let IsSignExtendingOpW = 1 in
364def BEXT : ALU_rr<0b0100100, 0b101, "bext">,
365           Sched<[WriteBEXT, ReadSingleBit, ReadSingleBit]>;
366
367def BCLRI : RVBShift_ri<0b01001, 0b001, OPC_OP_IMM, "bclri">,
368            Sched<[WriteSingleBitImm, ReadSingleBitImm]>;
369def BSETI : RVBShift_ri<0b00101, 0b001, OPC_OP_IMM, "bseti">,
370            Sched<[WriteSingleBitImm, ReadSingleBitImm]>;
371def BINVI : RVBShift_ri<0b01101, 0b001, OPC_OP_IMM, "binvi">,
372            Sched<[WriteSingleBitImm, ReadSingleBitImm]>;
373let IsSignExtendingOpW = 1 in
374def BEXTI : RVBShift_ri<0b01001, 0b101, OPC_OP_IMM, "bexti">,
375            Sched<[WriteBEXTI, ReadSingleBitImm]>;
376} // Predicates = [HasStdExtZbs]
377
378// These instructions were named xperm.n and xperm.b in the last version of
379// the draft bit manipulation specification they were included in. However, we
380// use the mnemonics given to them in the ratified Zbkx extension.
381let Predicates = [HasStdExtZbkx] in {
382def XPERM4 : ALU_rr<0b0010100, 0b010, "xperm4">,
383             Sched<[WriteXPERM, ReadXPERM, ReadXPERM]>;
384def XPERM8 : ALU_rr<0b0010100, 0b100, "xperm8">,
385             Sched<[WriteXPERM, ReadXPERM, ReadXPERM]>;
386} // Predicates = [HasStdExtZbkx]
387
388let Predicates = [HasStdExtZbb], IsSignExtendingOpW = 1 in {
389def CLZ  : RVBUnary<0b011000000000, 0b001, OPC_OP_IMM, "clz">,
390           Sched<[WriteCLZ, ReadCLZ]>;
391def CTZ  : RVBUnary<0b011000000001, 0b001, OPC_OP_IMM, "ctz">,
392           Sched<[WriteCTZ, ReadCTZ]>;
393def CPOP : RVBUnary<0b011000000010, 0b001, OPC_OP_IMM, "cpop">,
394           Sched<[WriteCPOP, ReadCPOP]>;
395} // Predicates = [HasStdExtZbb]
396
397let Predicates = [HasStdExtZbb, IsRV64], IsSignExtendingOpW = 1 in {
398def CLZW   : RVBUnary<0b011000000000, 0b001, OPC_OP_IMM_32, "clzw">,
399             Sched<[WriteCLZ32, ReadCLZ32]>;
400def CTZW   : RVBUnary<0b011000000001, 0b001, OPC_OP_IMM_32, "ctzw">,
401             Sched<[WriteCTZ32, ReadCTZ32]>;
402def CPOPW  : RVBUnary<0b011000000010, 0b001, OPC_OP_IMM_32, "cpopw">,
403             Sched<[WriteCPOP32, ReadCPOP32]>;
404} // Predicates = [HasStdExtZbb, IsRV64]
405
406let Predicates = [HasStdExtZbb], IsSignExtendingOpW = 1 in {
407def SEXT_B : RVBUnary<0b011000000100, 0b001, OPC_OP_IMM, "sext.b">,
408             Sched<[WriteIALU, ReadIALU]>;
409def SEXT_H : RVBUnary<0b011000000101, 0b001, OPC_OP_IMM, "sext.h">,
410             Sched<[WriteIALU, ReadIALU]>;
411} // Predicates = [HasStdExtZbb]
412
413let Predicates = [HasStdExtZbc] in {
414def CLMULR : ALU_rr<0b0000101, 0b010, "clmulr", Commutable=1>,
415             Sched<[WriteCLMUL, ReadCLMUL, ReadCLMUL]>;
416} // Predicates = [HasStdExtZbc]
417
418let Predicates = [HasStdExtZbcOrZbkc] in {
419def CLMUL  : ALU_rr<0b0000101, 0b001, "clmul", Commutable=1>,
420             Sched<[WriteCLMUL, ReadCLMUL, ReadCLMUL]>;
421def CLMULH : ALU_rr<0b0000101, 0b011, "clmulh", Commutable=1>,
422             Sched<[WriteCLMUL, ReadCLMUL, ReadCLMUL]>;
423} // Predicates = [HasStdExtZbcOrZbkc]
424
425let Predicates = [HasStdExtZbb] in {
426def MIN  : ALU_rr<0b0000101, 0b100, "min", Commutable=1>,
427           Sched<[WriteIALU, ReadIALU, ReadIALU]>;
428def MINU : ALU_rr<0b0000101, 0b101, "minu", Commutable=1>,
429           Sched<[WriteIALU, ReadIALU, ReadIALU]>;
430def MAX  : ALU_rr<0b0000101, 0b110, "max", Commutable=1>,
431           Sched<[WriteIALU, ReadIALU, ReadIALU]>;
432def MAXU : ALU_rr<0b0000101, 0b111, "maxu", Commutable=1>,
433           Sched<[WriteIALU, ReadIALU, ReadIALU]>;
434} // Predicates = [HasStdExtZbb]
435
436let Predicates = [HasStdExtZbkb] in {
437def PACK  : ALU_rr<0b0000100, 0b100, "pack">,
438            Sched<[WritePACK, ReadPACK, ReadPACK]>;
439let IsSignExtendingOpW = 1 in
440def PACKH : ALU_rr<0b0000100, 0b111, "packh">,
441            Sched<[WritePACK, ReadPACK, ReadPACK]>;
442} // Predicates = [HasStdExtZbkb]
443
444let Predicates = [HasStdExtZbkb, IsRV64], IsSignExtendingOpW = 1 in
445def PACKW  : ALUW_rr<0b0000100, 0b100, "packw">,
446             Sched<[WritePACK32, ReadPACK32, ReadPACK32]>;
447
448let Predicates = [HasStdExtZbb, IsRV32] in {
449def ZEXT_H_RV32 : RVBUnary<0b000010000000, 0b100, OPC_OP, "zext.h">,
450                  Sched<[WriteIALU, ReadIALU]>;
451} // Predicates = [HasStdExtZbb, IsRV32]
452
453let Predicates = [HasStdExtZbb, IsRV64], IsSignExtendingOpW = 1 in {
454def ZEXT_H_RV64 : RVBUnary<0b000010000000, 0b100, OPC_OP_32, "zext.h">,
455                  Sched<[WriteIALU, ReadIALU]>;
456} // Predicates = [HasStdExtZbb, IsRV64]
457
458let Predicates = [HasStdExtZbbOrZbkb, IsRV32] in {
459def REV8_RV32 : RVBUnary<0b011010011000, 0b101, OPC_OP_IMM, "rev8">,
460                Sched<[WriteREV8, ReadREV8]>;
461} // Predicates = [HasStdExtZbbOrZbkb, IsRV32]
462
463let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in {
464def REV8_RV64 : RVBUnary<0b011010111000, 0b101, OPC_OP_IMM, "rev8">,
465                Sched<[WriteREV8, ReadREV8]>;
466} // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
467
468let Predicates = [HasStdExtZbb] in {
469def ORC_B : RVBUnary<0b001010000111, 0b101, OPC_OP_IMM, "orc.b">,
470            Sched<[WriteORCB, ReadORCB]>;
471} // Predicates = [HasStdExtZbb]
472
473let Predicates = [HasStdExtZbkb] in
474def BREV8 : RVBUnary<0b011010000111, 0b101, OPC_OP_IMM, "brev8">,
475            Sched<[WriteBREV8, ReadBREV8]>;
476
477let Predicates = [HasStdExtZbkb, IsRV32] in {
478def ZIP_RV32   : RVBUnary<0b000010001111, 0b001, OPC_OP_IMM, "zip">,
479                 Sched<[WriteZIP, ReadZIP]>;
480def UNZIP_RV32 : RVBUnary<0b000010001111, 0b101, OPC_OP_IMM, "unzip">,
481                 Sched<[WriteZIP, ReadZIP]>;
482} // Predicates = [HasStdExtZbkb, IsRV32]
483
484
485//===----------------------------------------------------------------------===//
486// Pseudo Instructions
487//===----------------------------------------------------------------------===//
488
489let Predicates = [HasStdExtZba, IsRV64] in {
490def : InstAlias<"zext.w $rd, $rs", (ADD_UW GPR:$rd, GPR:$rs, X0)>;
491} // Predicates = [HasStdExtZba, IsRV64]
492
493let Predicates = [HasStdExtZbb] in {
494def : InstAlias<"ror $rd, $rs1, $shamt",
495                (RORI  GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
496} // Predicates = [HasStdExtZbb]
497
498let Predicates = [HasStdExtZbb, IsRV64] in {
499def : InstAlias<"rorw $rd, $rs1, $shamt",
500                (RORIW  GPR:$rd, GPR:$rs1, uimm5:$shamt), 0>;
501} // Predicates = [HasStdExtZbb, IsRV64]
502
503let Predicates = [HasStdExtZbs] in {
504def : InstAlias<"bset $rd, $rs1, $shamt",
505                (BSETI  GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
506def : InstAlias<"bclr $rd, $rs1, $shamt",
507                (BCLRI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
508def : InstAlias<"binv $rd, $rs1, $shamt",
509                (BINVI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
510def : InstAlias<"bext $rd, $rs1, $shamt",
511                (BEXTI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
512} // Predicates = [HasStdExtZbs]
513
514//===----------------------------------------------------------------------===//
515// Codegen patterns
516//===----------------------------------------------------------------------===//
517
518let Predicates = [HasStdExtZbbOrZbkb] in {
519def : Pat<(XLenVT (and GPR:$rs1, (not GPR:$rs2))), (ANDN GPR:$rs1, GPR:$rs2)>;
520def : Pat<(XLenVT (or  GPR:$rs1, (not GPR:$rs2))), (ORN  GPR:$rs1, GPR:$rs2)>;
521def : Pat<(XLenVT (xor GPR:$rs1, (not GPR:$rs2))), (XNOR GPR:$rs1, GPR:$rs2)>;
522} // Predicates = [HasStdExtZbbOrZbkb]
523
524let Predicates = [HasStdExtZbbOrZbkb] in {
525def : PatGprGpr<shiftop<rotl>, ROL>;
526def : PatGprGpr<shiftop<rotr>, ROR>;
527
528def : PatGprImm<rotr, RORI, uimmlog2xlen>;
529// There's no encoding for roli in the the 'B' extension as it can be
530// implemented with rori by negating the immediate.
531def : Pat<(XLenVT (rotl GPR:$rs1, uimmlog2xlen:$shamt)),
532          (RORI GPR:$rs1, (ImmSubFromXLen uimmlog2xlen:$shamt))>;
533} // Predicates = [HasStdExtZbbOrZbkb]
534
535let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in {
536def : PatGprGpr<shiftopw<riscv_rolw>, ROLW>;
537def : PatGprGpr<shiftopw<riscv_rorw>, RORW>;
538def : PatGprImm<riscv_rorw, RORIW, uimm5>;
539def : Pat<(riscv_rolw GPR:$rs1, uimm5:$rs2),
540          (RORIW GPR:$rs1, (ImmSubFrom32 uimm5:$rs2))>;
541} // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
542
543let Predicates = [HasStdExtZbs] in {
544def : Pat<(XLenVT (and (not (shiftop<shl> 1, (XLenVT GPR:$rs2))), GPR:$rs1)),
545          (BCLR GPR:$rs1, GPR:$rs2)>;
546def : Pat<(XLenVT (and (rotl -2, (XLenVT GPR:$rs2)), GPR:$rs1)),
547          (BCLR GPR:$rs1, GPR:$rs2)>;
548def : Pat<(XLenVT (or (shiftop<shl> 1, (XLenVT GPR:$rs2)), GPR:$rs1)),
549          (BSET GPR:$rs1, GPR:$rs2)>;
550def : Pat<(XLenVT (xor (shiftop<shl> 1, (XLenVT GPR:$rs2)), GPR:$rs1)),
551          (BINV GPR:$rs1, GPR:$rs2)>;
552def : Pat<(XLenVT (and (shiftop<srl> GPR:$rs1, (XLenVT GPR:$rs2)), 1)),
553          (BEXT GPR:$rs1, GPR:$rs2)>;
554
555def : Pat<(XLenVT (shiftop<shl> 1, (XLenVT GPR:$rs2))),
556          (BSET (XLenVT X0), GPR:$rs2)>;
557def : Pat<(XLenVT (not (shiftop<shl> -1, (XLenVT GPR:$rs2)))),
558          (ADDI (BSET (XLenVT X0), GPR:$rs2), -1)>;
559
560def : Pat<(XLenVT (and GPR:$rs1, BCLRMask:$mask)),
561          (BCLRI GPR:$rs1, BCLRMask:$mask)>;
562def : Pat<(XLenVT (or GPR:$rs1, SingleBitSetMask:$mask)),
563          (BSETI GPR:$rs1, SingleBitSetMask:$mask)>;
564def : Pat<(XLenVT (xor GPR:$rs1, SingleBitSetMask:$mask)),
565          (BINVI GPR:$rs1, SingleBitSetMask:$mask)>;
566
567def : Pat<(XLenVT (and (srl GPR:$rs1, uimmlog2xlen:$shamt), (XLenVT 1))),
568          (BEXTI GPR:$rs1, uimmlog2xlen:$shamt)>;
569
570def : Pat<(XLenVT (seteq (XLenVT (and GPR:$rs1, SingleBitSetMask:$mask)), 0)),
571          (BEXTI (XORI GPR:$rs1, -1), SingleBitSetMask:$mask)>;
572
573def : Pat<(XLenVT (or GPR:$r, BSETINVTwoBitsMask:$i)),
574          (BSETI (BSETI GPR:$r, (TrailingZeros BSETINVTwoBitsMask:$i)),
575                 (BSETINVTwoBitsMaskHigh BSETINVTwoBitsMask:$i))>;
576def : Pat<(XLenVT (xor GPR:$r, BSETINVTwoBitsMask:$i)),
577          (BINVI (BINVI GPR:$r, (TrailingZeros BSETINVTwoBitsMask:$i)),
578                 (BSETINVTwoBitsMaskHigh BSETINVTwoBitsMask:$i))>;
579def : Pat<(XLenVT (or GPR:$r, BSETINVORIMask:$i)),
580          (BSETI (ORI GPR:$r, (BSETINVORIMaskLow BSETINVORIMask:$i)),
581                 (BSETINVTwoBitsMaskHigh BSETINVORIMask:$i))>;
582def : Pat<(XLenVT (xor GPR:$r, BSETINVORIMask:$i)),
583          (BINVI (XORI GPR:$r, (BSETINVORIMaskLow BSETINVORIMask:$i)),
584                 (BSETINVTwoBitsMaskHigh BSETINVORIMask:$i))>;
585def : Pat<(XLenVT (and GPR:$r, BCLRITwoBitsMask:$i)),
586          (BCLRI (BCLRI GPR:$r, (BCLRITwoBitsMaskLow BCLRITwoBitsMask:$i)),
587                 (BCLRITwoBitsMaskHigh BCLRITwoBitsMask:$i))>;
588def : Pat<(XLenVT (and GPR:$r, BCLRIANDIMask:$i)),
589          (BCLRI (ANDI GPR:$r, (BCLRIANDIMaskLow BCLRIANDIMask:$i)),
590                 (BCLRITwoBitsMaskHigh BCLRIANDIMask:$i))>;
591} // Predicates = [HasStdExtZbs]
592
593let Predicates = [HasStdExtZbb] in
594def : PatGpr<riscv_orc_b, ORC_B>;
595
596let Predicates = [HasStdExtZbkb] in
597def : PatGpr<riscv_brev8, BREV8>;
598
599let Predicates = [HasStdExtZbkb, IsRV32] in {
600// We treat zip and unzip as separate instructions, so match it directly.
601def : PatGpr<riscv_zip, ZIP_RV32, i32>;
602def : PatGpr<riscv_unzip, UNZIP_RV32, i32>;
603} // Predicates = [HasStdExtZbkb, IsRV32]
604
605let Predicates = [HasStdExtZbb] in {
606def : PatGpr<ctlz, CLZ>;
607def : PatGpr<cttz, CTZ>;
608def : PatGpr<ctpop, CPOP>;
609} // Predicates = [HasStdExtZbb]
610
611let Predicates = [HasStdExtZbb, IsRV64] in {
612def : PatGpr<riscv_clzw, CLZW>;
613def : PatGpr<riscv_ctzw, CTZW>;
614def : Pat<(i64 (ctpop (i64 (zexti32 (i64 GPR:$rs1))))), (CPOPW GPR:$rs1)>;
615
616def : Pat<(i64 (riscv_absw GPR:$rs1)),
617          (MAX GPR:$rs1, (SUBW (XLenVT X0), GPR:$rs1))>;
618} // Predicates = [HasStdExtZbb, IsRV64]
619
620let Predicates = [HasStdExtZbb] in {
621def : Pat<(XLenVT (sext_inreg GPR:$rs1, i8)), (SEXT_B GPR:$rs1)>;
622def : Pat<(XLenVT (sext_inreg GPR:$rs1, i16)), (SEXT_H GPR:$rs1)>;
623} // Predicates = [HasStdExtZbb]
624
625let Predicates = [HasStdExtZbb] in {
626def : PatGprGpr<smin, MIN>;
627def : PatGprGpr<smax, MAX>;
628def : PatGprGpr<umin, MINU>;
629def : PatGprGpr<umax, MAXU>;
630} // Predicates = [HasStdExtZbb]
631
632let Predicates = [HasStdExtZbbOrZbkb, IsRV32] in
633def : PatGpr<bswap, REV8_RV32, i32>;
634
635let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in
636def : PatGpr<bswap, REV8_RV64, i64>;
637
638let Predicates = [HasStdExtZbkb] in {
639def : Pat<(or (and (shl GPR:$rs2, (XLenVT 8)), 0xFFFF),
640              (zexti8 (XLenVT GPR:$rs1))),
641          (PACKH GPR:$rs1, GPR:$rs2)>;
642def : Pat<(or (shl (zexti8 (XLenVT GPR:$rs2)), (XLenVT 8)),
643              (zexti8 (XLenVT GPR:$rs1))),
644          (PACKH GPR:$rs1, GPR:$rs2)>;
645def : Pat<(and (or (shl GPR:$rs2, (XLenVT 8)),
646                   (zexti8 (XLenVT GPR:$rs1))), 0xFFFF),
647          (PACKH GPR:$rs1, GPR:$rs2)>;
648
649def : Pat<(binop_allhusers<or> (shl GPR:$rs2, (XLenVT 8)),
650                               (zexti8 (XLenVT GPR:$rs1))),
651          (PACKH GPR:$rs1, GPR:$rs2)>;
652} // Predicates = [HasStdExtZbkb]
653
654let Predicates = [HasStdExtZbkb, IsRV32] in
655def : Pat<(i32 (or (zexti16 (i32 GPR:$rs1)), (shl GPR:$rs2, (i32 16)))),
656          (PACK GPR:$rs1, GPR:$rs2)>;
657
658let Predicates = [HasStdExtZbkb, IsRV64] in {
659def : Pat<(i64 (or (zexti32 (i64 GPR:$rs1)), (shl GPR:$rs2, (i64 32)))),
660          (PACK GPR:$rs1, GPR:$rs2)>;
661
662def : Pat<(binop_allwusers<or> (shl GPR:$rs2, (i64 16)),
663                               (zexti16 (i64 GPR:$rs1))),
664          (PACKW GPR:$rs1, GPR:$rs2)>;
665def : Pat<(i64 (or (sext_inreg (shl GPR:$rs2, (i64 16)), i32),
666                   (zexti16 (i64 GPR:$rs1)))),
667          (PACKW GPR:$rs1, GPR:$rs2)>;
668} // Predicates = [HasStdExtZbkb, IsRV64]
669
670let Predicates = [HasStdExtZbb, IsRV32] in
671def : Pat<(i32 (and GPR:$rs, 0xFFFF)), (ZEXT_H_RV32 GPR:$rs)>;
672let Predicates = [HasStdExtZbb, IsRV64] in
673def : Pat<(i64 (and GPR:$rs, 0xFFFF)), (ZEXT_H_RV64 GPR:$rs)>;
674
675let Predicates = [HasStdExtZba] in {
676
677foreach i = {1,2,3} in {
678  defvar shxadd = !cast<Instruction>("SH"#i#"ADD");
679  def : Pat<(XLenVT (add_non_imm12 (shl GPR:$rs1, (XLenVT i)), GPR:$rs2)),
680            (shxadd GPR:$rs1, GPR:$rs2)>;
681
682  defvar pat = !cast<ComplexPattern>("sh"#i#"add_op");
683  // More complex cases use a ComplexPattern.
684  def : Pat<(XLenVT (add_non_imm12 pat:$rs1, GPR:$rs2)),
685            (shxadd pat:$rs1, GPR:$rs2)>;
686}
687
688def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 6)), GPR:$rs2),
689          (SH1ADD (SH1ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
690def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 10)), GPR:$rs2),
691          (SH1ADD (SH2ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
692def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 18)), GPR:$rs2),
693          (SH1ADD (SH3ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
694def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 12)), GPR:$rs2),
695          (SH2ADD (SH1ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
696def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 20)), GPR:$rs2),
697          (SH2ADD (SH2ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
698def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 36)), GPR:$rs2),
699          (SH2ADD (SH3ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
700def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 24)), GPR:$rs2),
701          (SH3ADD (SH1ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
702def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 40)), GPR:$rs2),
703          (SH3ADD (SH2ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
704def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 72)), GPR:$rs2),
705          (SH3ADD (SH3ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
706
707def : Pat<(add (XLenVT GPR:$r), CSImm12MulBy4:$i),
708          (SH2ADD (ADDI (XLenVT X0), (SimmShiftRightBy2XForm CSImm12MulBy4:$i)),
709                  GPR:$r)>;
710def : Pat<(add (XLenVT GPR:$r), CSImm12MulBy8:$i),
711          (SH3ADD (ADDI (XLenVT X0), (SimmShiftRightBy3XForm CSImm12MulBy8:$i)),
712                  GPR:$r)>;
713
714def : Pat<(mul (XLenVT GPR:$r), C3LeftShift:$i),
715          (SLLI (SH1ADD GPR:$r, GPR:$r),
716                (TrailingZeros C3LeftShift:$i))>;
717def : Pat<(mul (XLenVT GPR:$r), C5LeftShift:$i),
718          (SLLI (SH2ADD GPR:$r, GPR:$r),
719                (TrailingZeros C5LeftShift:$i))>;
720def : Pat<(mul (XLenVT GPR:$r), C9LeftShift:$i),
721          (SLLI (SH3ADD GPR:$r, GPR:$r),
722                (TrailingZeros C9LeftShift:$i))>;
723
724def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 11)),
725          (SH1ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r)>;
726def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 19)),
727          (SH1ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r)>;
728def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 13)),
729          (SH2ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r)>;
730def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 21)),
731          (SH2ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r)>;
732def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 37)),
733          (SH2ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r)>;
734def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 25)),
735          (SH3ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r)>;
736def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 41)),
737          (SH3ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r)>;
738def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 73)),
739          (SH3ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r)>;
740def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 27)),
741          (SH1ADD (SH3ADD GPR:$r, GPR:$r), (SH3ADD GPR:$r, GPR:$r))>;
742def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 45)),
743          (SH2ADD (SH3ADD GPR:$r, GPR:$r), (SH3ADD GPR:$r, GPR:$r))>;
744def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 81)),
745          (SH3ADD (SH3ADD GPR:$r, GPR:$r), (SH3ADD GPR:$r, GPR:$r))>;
746} // Predicates = [HasStdExtZba]
747
748let Predicates = [HasStdExtZba, IsRV64] in {
749def : Pat<(i64 (shl (and GPR:$rs1, 0xFFFFFFFF), uimm5:$shamt)),
750          (SLLI_UW GPR:$rs1, uimm5:$shamt)>;
751// Match a shifted 0xffffffff mask. Use SRLI to clear the LSBs and SLLI_UW to
752// mask and shift.
753def : Pat<(i64 (and GPR:$rs1, Shifted32OnesMask:$mask)),
754          (SLLI_UW (SRLI GPR:$rs1, Shifted32OnesMask:$mask),
755                   Shifted32OnesMask:$mask)>;
756def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0xFFFFFFFF), GPR:$rs2)),
757          (ADD_UW GPR:$rs1, GPR:$rs2)>;
758def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (ADD_UW GPR:$rs, (XLenVT X0))>;
759
760def : Pat<(i64 (or_is_add_non_imm12 (and GPR:$rs1, 0xFFFFFFFF), GPR:$rs2)),
761          (ADD_UW GPR:$rs1, GPR:$rs2)>;
762
763foreach i = {1,2,3} in {
764  defvar shxadd_uw = !cast<Instruction>("SH"#i#"ADD_UW");
765  def : Pat<(i64 (add_non_imm12 (shl (and GPR:$rs1, 0xFFFFFFFF), (i64 i)), (XLenVT GPR:$rs2))),
766            (shxadd_uw GPR:$rs1, GPR:$rs2)>;
767}
768
769def : Pat<(i64 (add_non_imm12 (and (shl GPR:$rs1, (i64 1)), 0x1FFFFFFFF), (XLenVT GPR:$rs2))),
770          (SH1ADD_UW GPR:$rs1, GPR:$rs2)>;
771def : Pat<(i64 (add_non_imm12 (and (shl GPR:$rs1, (i64 2)), 0x3FFFFFFFF), (XLenVT GPR:$rs2))),
772          (SH2ADD_UW GPR:$rs1, GPR:$rs2)>;
773def : Pat<(i64 (add_non_imm12 (and (shl GPR:$rs1, (i64 3)), 0x7FFFFFFFF), (XLenVT GPR:$rs2))),
774          (SH3ADD_UW GPR:$rs1, GPR:$rs2)>;
775
776// More complex cases use a ComplexPattern.
777foreach i = {1,2,3} in {
778  defvar pat = !cast<ComplexPattern>("sh"#i#"add_uw_op");
779  def : Pat<(i64 (add_non_imm12 pat:$rs1, (XLenVT GPR:$rs2))),
780            (!cast<Instruction>("SH"#i#"ADD_UW") pat:$rs1, GPR:$rs2)>;
781}
782
783def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0xFFFFFFFE), (XLenVT GPR:$rs2))),
784          (SH1ADD (SRLIW GPR:$rs1, 1), GPR:$rs2)>;
785def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0xFFFFFFFC), (XLenVT GPR:$rs2))),
786          (SH2ADD (SRLIW GPR:$rs1, 2), GPR:$rs2)>;
787def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0xFFFFFFF8), (XLenVT GPR:$rs2))),
788          (SH3ADD (SRLIW GPR:$rs1, 3), GPR:$rs2)>;
789
790// Use SRLI to clear the LSBs and SHXADD_UW to mask and shift.
791def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0x1FFFFFFFE), (XLenVT GPR:$rs2))),
792          (SH1ADD_UW (SRLI GPR:$rs1, 1), GPR:$rs2)>;
793def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0x3FFFFFFFC), (XLenVT GPR:$rs2))),
794          (SH2ADD_UW (SRLI GPR:$rs1, 2), GPR:$rs2)>;
795def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0x7FFFFFFF8), (XLenVT GPR:$rs2))),
796          (SH3ADD_UW (SRLI GPR:$rs1, 3), GPR:$rs2)>;
797
798def : Pat<(i64 (mul (and_oneuse GPR:$r, 0xFFFFFFFF), C3LeftShiftUW:$i)),
799          (SH1ADD (SLLI_UW GPR:$r, (TrailingZeros C3LeftShiftUW:$i)),
800                  (SLLI_UW GPR:$r, (TrailingZeros C3LeftShiftUW:$i)))>;
801def : Pat<(i64 (mul (and_oneuse GPR:$r, 0xFFFFFFFF), C5LeftShiftUW:$i)),
802          (SH2ADD (SLLI_UW GPR:$r, (TrailingZeros C5LeftShiftUW:$i)),
803                  (SLLI_UW GPR:$r, (TrailingZeros C5LeftShiftUW:$i)))>;
804def : Pat<(i64 (mul (and_oneuse GPR:$r, 0xFFFFFFFF), C9LeftShiftUW:$i)),
805          (SH3ADD (SLLI_UW GPR:$r, (TrailingZeros C9LeftShiftUW:$i)),
806                  (SLLI_UW GPR:$r, (TrailingZeros C9LeftShiftUW:$i)))>;
807} // Predicates = [HasStdExtZba, IsRV64]
808
809let Predicates = [HasStdExtZbcOrZbkc] in {
810def : PatGprGpr<riscv_clmul, CLMUL>;
811def : PatGprGpr<riscv_clmulh, CLMULH>;
812} // Predicates = [HasStdExtZbcOrZbkc]
813
814let Predicates = [HasStdExtZbc] in
815def : PatGprGpr<riscv_clmulr, CLMULR>;
816
817let Predicates = [HasStdExtZbkx] in {
818def : PatGprGpr<int_riscv_xperm4, XPERM4>;
819def : PatGprGpr<int_riscv_xperm8, XPERM8>;
820} // Predicates = [HasStdExtZbkx]
821
822//===----------------------------------------------------------------------===//
823// Experimental RV64 i32 legalization patterns.
824//===----------------------------------------------------------------------===//
825
826def BCLRMaski32 : ImmLeaf<i32, [{
827  return !isInt<12>(Imm) && isPowerOf2_32(~Imm);
828}]>;
829def SingleBitSetMaski32 : ImmLeaf<i32, [{
830  return !isInt<12>(Imm) && isPowerOf2_32(Imm);
831}]>;
832
833let Predicates = [HasStdExtZbb, IsRV64] in {
834def : PatGpr<ctlz, CLZW, i32>;
835def : PatGpr<cttz, CTZW, i32>;
836def : PatGpr<ctpop, CPOPW, i32>;
837
838def : Pat<(i32 (sext_inreg GPR:$rs1, i8)), (SEXT_B GPR:$rs1)>;
839def : Pat<(i32 (sext_inreg GPR:$rs1, i16)), (SEXT_H GPR:$rs1)>;
840} // Predicates = [HasStdExtZbb, IsRV64]
841
842let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in {
843def : Pat<(i32 (and GPR:$rs1, (not GPR:$rs2))), (ANDN GPR:$rs1, GPR:$rs2)>;
844def : Pat<(i32 (or  GPR:$rs1, (not GPR:$rs2))), (ORN  GPR:$rs1, GPR:$rs2)>;
845def : Pat<(i32 (xor GPR:$rs1, (not GPR:$rs2))), (XNOR GPR:$rs1, GPR:$rs2)>;
846
847def : PatGprGpr<shiftopw<rotl>, ROLW, i32, i64>;
848def : PatGprGpr<shiftopw<rotr>, RORW, i32, i64>;
849def : PatGprImm<rotr, RORIW, uimm5, i32>;
850
851def : Pat<(i32 (rotl GPR:$rs1, uimm5:$rs2)),
852          (RORIW GPR:$rs1, (ImmSubFrom32 uimm5:$rs2))>;
853} // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
854
855let Predicates = [HasStdExtZbkb, IsRV64] in {
856def : Pat<(or (and (shl GPR:$rs2, (i64 8)), 0xFFFF),
857              (zexti8i32 (i32 GPR:$rs1))),
858          (PACKH GPR:$rs1, GPR:$rs2)>;
859def : Pat<(or (shl (zexti8i32 (i32 GPR:$rs2)), (i64 8)),
860              (zexti8i32 (i32 GPR:$rs1))),
861          (PACKH GPR:$rs1, GPR:$rs2)>;
862def : Pat<(and (anyext (or (shl GPR:$rs2, (XLenVT 8)),
863                           (zexti8i32 (i32 GPR:$rs1)))), 0xFFFF),
864          (PACKH GPR:$rs1, GPR:$rs2)>;
865
866def : Pat<(i32 (or (shl GPR:$rs2, (i64 16)), (zexti16i32 (i32 GPR:$rs1)))),
867          (PACKW GPR:$rs1, GPR:$rs2)>;
868} // Predicates = [HasStdExtZbkb, IsRV64]
869
870let Predicates = [HasStdExtZba, IsRV64] in {
871def : Pat<(shl (i64 (zext i32:$rs1)), uimm5:$shamt),
872          (SLLI_UW GPR:$rs1, uimm5:$shamt)>;
873
874def : Pat<(i64 (add_non_imm12 (zext GPR:$rs1), GPR:$rs2)),
875          (ADD_UW GPR:$rs1, GPR:$rs2)>;
876def : Pat<(zext GPR:$src), (ADD_UW GPR:$src, (XLenVT X0))>;
877
878def : Pat<(i64 (or_is_add_non_imm12 (zext GPR:$rs1), GPR:$rs2)),
879          (ADD_UW GPR:$rs1, GPR:$rs2)>;
880
881foreach i = {1,2,3} in {
882  defvar shxadd = !cast<Instruction>("SH"#i#"ADD");
883  def : Pat<(i32 (add_non_imm12 (shl GPR:$rs1, (i64 i)), GPR:$rs2)),
884            (shxadd GPR:$rs1, GPR:$rs2)>;
885}
886}
887
888let Predicates = [HasStdExtZbs, IsRV64] in {
889def : Pat<(i32 (and (not (shiftop<shl> 1, (XLenVT GPR:$rs2))), GPR:$rs1)),
890          (BCLR GPR:$rs1, GPR:$rs2)>;
891def : Pat<(i32 (and (rotl -2, (XLenVT GPR:$rs2)), GPR:$rs1)),
892          (BCLR GPR:$rs1, GPR:$rs2)>;
893def : Pat<(i32 (or (shiftop<shl> 1, (XLenVT GPR:$rs2)), GPR:$rs1)),
894          (BSET GPR:$rs1, GPR:$rs2)>;
895def : Pat<(i32 (xor (shiftop<shl> 1, (XLenVT GPR:$rs2)), GPR:$rs1)),
896          (BINV GPR:$rs1, GPR:$rs2)>;
897def : Pat<(i32 (and (shiftop<srl> GPR:$rs1, (XLenVT GPR:$rs2)), 1)),
898          (BEXT GPR:$rs1, GPR:$rs2)>;
899def : Pat<(i64 (and (anyext (i32 (shiftop<srl> GPR:$rs1, (XLenVT GPR:$rs2)))), 1)),
900          (BEXT GPR:$rs1, GPR:$rs2)>;
901
902def : Pat<(i32 (shiftop<shl> 1, (XLenVT GPR:$rs2))),
903          (BSET (XLenVT X0), GPR:$rs2)>;
904def : Pat<(i32 (not (shiftop<shl> -1, (XLenVT GPR:$rs2)))),
905          (ADDI (BSET (XLenVT X0), GPR:$rs2), -1)>;
906
907def : Pat<(i32 (and (srl GPR:$rs1, uimm5:$shamt), (i32 1))),
908          (BEXTI GPR:$rs1, uimm5:$shamt)>;
909
910def : Pat<(i32 (and GPR:$rs1, BCLRMaski32:$mask)),
911          (BCLRI GPR:$rs1, (i64 (BCLRXForm $mask)))>;
912def : Pat<(i32 (or GPR:$rs1, SingleBitSetMaski32:$mask)),
913          (BSETI GPR:$rs1, (i64 (SingleBitSetMaskToIndex $mask)))>;
914def : Pat<(i32 (xor GPR:$rs1, SingleBitSetMaski32:$mask)),
915          (BINVI GPR:$rs1, (i64 (SingleBitSetMaskToIndex $mask)))>;
916} // Predicates = [HasStdExtZbs, IsRV64]
917