xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td (revision b64c5a0ace59af62eff52bfe110a521dc73c937b)
1//===-- RISCVInstrInfoZb.td - RISC-V Bitmanip instructions -*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes the RISC-V instructions from the standard Bitmanip
10// extensions, versions:
11//   Zba - 1.0
12//   Zbb - 1.0
13//   Zbc - 1.0
14//   Zbs - 1.0
15//
16// This file also describes RISC-V instructions from the Zbk* extensions in
17// Cryptography Extensions Volume I: Scalar & Entropy Source Instructions,
18// versions:
19//   Zbkb - 1.0
20//   Zbkc - 1.0
21//   Zbkx - 1.0
22//
23//===----------------------------------------------------------------------===//
24
25//===----------------------------------------------------------------------===//
26// Operand and SDNode transformation definitions.
27//===----------------------------------------------------------------------===//
28
29def SDTIntShiftAddOp : SDTypeProfile<1, 3, [   // shl_add
30  SDTCisSameAs<0, 1>, SDTCisSameAs<0, 3>, SDTCisInt<0>, SDTCisInt<2>,
31  SDTCisInt<3>
32]>;
33
34def riscv_shl_add : SDNode<"RISCVISD::SHL_ADD", SDTIntShiftAddOp>;
35def riscv_clzw    : SDNode<"RISCVISD::CLZW",    SDT_RISCVIntUnaryOpW>;
36def riscv_ctzw    : SDNode<"RISCVISD::CTZW",    SDT_RISCVIntUnaryOpW>;
37def riscv_rolw    : SDNode<"RISCVISD::ROLW",    SDT_RISCVIntBinOpW>;
38def riscv_rorw    : SDNode<"RISCVISD::RORW",    SDT_RISCVIntBinOpW>;
39def riscv_brev8   : SDNode<"RISCVISD::BREV8",   SDTIntUnaryOp>;
40def riscv_orc_b   : SDNode<"RISCVISD::ORC_B",   SDTIntUnaryOp>;
41def riscv_zip     : SDNode<"RISCVISD::ZIP",     SDTIntUnaryOp>;
42def riscv_unzip   : SDNode<"RISCVISD::UNZIP",   SDTIntUnaryOp>;
43def riscv_absw    : SDNode<"RISCVISD::ABSW",    SDTIntUnaryOp>;
44def riscv_clmul   : SDNode<"RISCVISD::CLMUL",   SDTIntBinOp>;
45def riscv_clmulh  : SDNode<"RISCVISD::CLMULH",  SDTIntBinOp>;
46def riscv_clmulr  : SDNode<"RISCVISD::CLMULR",  SDTIntBinOp>;
47
48def UImmLog2XLenHalfAsmOperand : AsmOperandClass {
49  let Name = "UImmLog2XLenHalf";
50  let RenderMethod = "addImmOperands";
51  let DiagnosticType = "InvalidUImmLog2XLenHalf";
52}
53
54def shfl_uimm : RISCVOp, ImmLeaf<XLenVT, [{
55  if (Subtarget->is64Bit())
56    return isUInt<5>(Imm);
57  return isUInt<4>(Imm);
58}]> {
59  let ParserMatchClass = UImmLog2XLenHalfAsmOperand;
60  let DecoderMethod = "decodeUImmOperand<5>";
61  let OperandType = "OPERAND_UIMM_SHFL";
62  let MCOperandPredicate = [{
63    int64_t Imm;
64    if (!MCOp.evaluateAsConstantImm(Imm))
65      return false;
66    if (STI.getTargetTriple().isArch64Bit())
67      return  isUInt<5>(Imm);
68    return isUInt<4>(Imm);
69  }];
70}
71
72def BCLRXForm : SDNodeXForm<imm, [{
73  // Find the lowest 0.
74  return CurDAG->getTargetConstant(llvm::countr_one(N->getZExtValue()),
75                                   SDLoc(N), N->getValueType(0));
76}]>;
77
78def SingleBitSetMaskToIndex : SDNodeXForm<imm, [{
79  // Find the lowest 1.
80  return CurDAG->getTargetConstant(llvm::countr_zero(N->getZExtValue()),
81                                   SDLoc(N), N->getValueType(0));
82}]>;
83
84// Checks if this mask has a single 0 bit and cannot be used with ANDI.
85def BCLRMask : ImmLeaf<XLenVT, [{
86  if (Subtarget->is64Bit())
87    return !isInt<12>(Imm) && isPowerOf2_64(~Imm);
88  return !isInt<12>(Imm) && isPowerOf2_32(~Imm);
89}], BCLRXForm>;
90
91// Checks if this mask has a single 1 bit and cannot be used with ORI/XORI.
92def SingleBitSetMask : ImmLeaf<XLenVT, [{
93  if (Subtarget->is64Bit())
94    return !isInt<12>(Imm) && isPowerOf2_64(Imm);
95  return !isInt<12>(Imm) && isPowerOf2_32(Imm);
96}], SingleBitSetMaskToIndex>;
97
98// Check if (or r, i) can be optimized to (BSETI (BSETI r, i0), i1),
99// in which i = (1 << i0) | (1 << i1).
100def BSETINVTwoBitsMask : PatLeaf<(imm), [{
101  if (!N->hasOneUse())
102    return false;
103  // The immediate should not be a simm12.
104  if (isInt<12>(N->getSExtValue()))
105    return false;
106  // The immediate must have exactly two bits set.
107  return llvm::popcount(N->getZExtValue()) == 2;
108}]>;
109
110def BSETINVTwoBitsMaskHigh : SDNodeXForm<imm, [{
111  uint64_t I = N->getZExtValue();
112  return CurDAG->getTargetConstant(llvm::Log2_64(I), SDLoc(N),
113                                   N->getValueType(0));
114}]>;
115
116// Check if (or r, imm) can be optimized to (BSETI (ORI r, i0), i1),
117// in which imm = i0 | (1 << i1).
118def BSETINVORIMask : PatLeaf<(imm), [{
119  if (!N->hasOneUse())
120    return false;
121  // The immediate should not be a simm12.
122  if (isInt<12>(N->getSExtValue()))
123    return false;
124  // There should be only one set bit from bit 11 to the top.
125  return isPowerOf2_64(N->getZExtValue() & ~0x7ff);
126}]>;
127
128def BSETINVORIMaskLow : SDNodeXForm<imm, [{
129  return CurDAG->getTargetConstant(N->getZExtValue() & 0x7ff,
130                                   SDLoc(N), N->getValueType(0));
131}]>;
132
133// Check if (and r, i) can be optimized to (BCLRI (BCLRI r, i0), i1),
134// in which i = ~((1<<i0) | (1<<i1)).
135def BCLRITwoBitsMask : PatLeaf<(imm), [{
136  if (!N->hasOneUse())
137    return false;
138  // The immediate should not be a simm12.
139  if (isInt<12>(N->getSExtValue()))
140    return false;
141  // The immediate must have exactly two bits clear.
142  return (unsigned)llvm::popcount(N->getZExtValue()) == Subtarget->getXLen() - 2;
143}]>;
144
145def BCLRITwoBitsMaskLow : SDNodeXForm<imm, [{
146  return CurDAG->getTargetConstant(llvm::countr_zero(~N->getZExtValue()),
147                                   SDLoc(N), N->getValueType(0));
148}]>;
149
150def BCLRITwoBitsMaskHigh : SDNodeXForm<imm, [{
151  uint64_t I = N->getSExtValue();
152  if (!Subtarget->is64Bit())
153    I |= 0xffffffffull << 32;
154  return CurDAG->getTargetConstant(llvm::Log2_64(~I), SDLoc(N),
155                                   N->getValueType(0));
156}]>;
157
158// Check if (and r, i) can be optimized to (BCLRI (ANDI r, i0), i1),
159// in which i = i0 & ~(1<<i1).
160def BCLRIANDIMask : PatLeaf<(imm), [{
161  if (!N->hasOneUse())
162    return false;
163  // The immediate should not be a simm12.
164  if (isInt<12>(N->getSExtValue()))
165    return false;
166  // There should be only one clear bit from bit 11 to the top.
167  uint64_t I = N->getZExtValue() | 0x7ff;
168  return Subtarget->is64Bit() ? isPowerOf2_64(~I) : isPowerOf2_32(~I);
169}]>;
170
171def BCLRIANDIMaskLow : SDNodeXForm<imm, [{
172  return CurDAG->getTargetConstant((N->getZExtValue() & 0x7ff) | ~0x7ffull,
173                                   SDLoc(N), N->getValueType(0));
174}]>;
175
176def CSImm12MulBy4 : PatLeaf<(imm), [{
177  if (!N->hasOneUse())
178    return false;
179  int64_t C = N->getSExtValue();
180  // Skip if C is simm12, an lui, or can be optimized by the PatLeaf AddiPair.
181  return !isInt<13>(C) && !isShiftedInt<20, 12>(C) && isShiftedInt<12, 2>(C);
182}]>;
183
184def CSImm12MulBy8 : PatLeaf<(imm), [{
185  if (!N->hasOneUse())
186    return false;
187  int64_t C = N->getSExtValue();
188  // Skip if C is simm12, an lui or can be optimized by the PatLeaf AddiPair or
189  // CSImm12MulBy4.
190  return !isInt<14>(C) && !isShiftedInt<20, 12>(C) && isShiftedInt<12, 3>(C);
191}]>;
192
193def SimmShiftRightBy2XForm : SDNodeXForm<imm, [{
194  return CurDAG->getTargetConstant(N->getSExtValue() >> 2, SDLoc(N),
195                                   N->getValueType(0));
196}]>;
197
198def SimmShiftRightBy3XForm : SDNodeXForm<imm, [{
199  return CurDAG->getTargetConstant(N->getSExtValue() >> 3, SDLoc(N),
200                                   N->getValueType(0));
201}]>;
202
203// Pattern to exclude simm12 immediates from matching, namely `non_imm12`.
204// GISel currently doesn't support PatFrag for leaf nodes, so `non_imm12`
205// cannot be implemented in that way. To reuse patterns between the two
206// ISels, we instead create PatFrag on operators that use `non_imm12`.
207class binop_with_non_imm12<SDPatternOperator binop>
208  : PatFrag<(ops node:$x, node:$y), (binop node:$x, node:$y), [{
209  auto *C = dyn_cast<ConstantSDNode>(Operands[1]);
210  return !C || !isInt<12>(C->getSExtValue());
211}]> {
212  let PredicateCodeUsesOperands = 1;
213  let GISelPredicateCode = [{
214    const MachineOperand &ImmOp = *Operands[1];
215    const MachineFunction &MF = *MI.getParent()->getParent();
216    const MachineRegisterInfo &MRI = MF.getRegInfo();
217
218    if (ImmOp.isReg() && ImmOp.getReg())
219      if (auto Val = getIConstantVRegValWithLookThrough(ImmOp.getReg(), MRI)) {
220        // We do NOT want immediates that fit in 12 bits.
221        return !isInt<12>(Val->Value.getSExtValue());
222      }
223
224    return true;
225  }];
226}
227def add_non_imm12       : binop_with_non_imm12<add>;
228def add_like_non_imm12 : binop_with_non_imm12<add_like>;
229
230def Shifted32OnesMask : IntImmLeaf<XLenVT, [{
231  if (!Imm.isShiftedMask())
232    return false;
233
234  unsigned TrailingZeros = Imm.countr_zero();
235  return TrailingZeros > 0 && TrailingZeros < 32 &&
236         Imm == UINT64_C(0xFFFFFFFF) << TrailingZeros;
237}], TrailingZeros>;
238
239def sh1add_op : ComplexPattern<XLenVT, 1, "selectSHXADDOp<1>", [], [], 6>;
240def sh2add_op : ComplexPattern<XLenVT, 1, "selectSHXADDOp<2>", [], [], 6>;
241def sh3add_op : ComplexPattern<XLenVT, 1, "selectSHXADDOp<3>", [], [], 6>;
242
243def sh1add_uw_op : ComplexPattern<XLenVT, 1, "selectSHXADD_UWOp<1>", [], [], 6>;
244def sh2add_uw_op : ComplexPattern<XLenVT, 1, "selectSHXADD_UWOp<2>", [], [], 6>;
245def sh3add_uw_op : ComplexPattern<XLenVT, 1, "selectSHXADD_UWOp<3>", [], [], 6>;
246
247//===----------------------------------------------------------------------===//
248// Instruction class templates
249//===----------------------------------------------------------------------===//
250
251let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
252class RVBUnary<bits<12> imm12, bits<3> funct3,
253               RISCVOpcode opcode, string opcodestr>
254    : RVInstIUnary<imm12, funct3, opcode, (outs GPR:$rd), (ins GPR:$rs1),
255                   opcodestr, "$rd, $rs1">;
256
257let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
258class RVBShift_ri<bits<5> imm11_7, bits<3> funct3, RISCVOpcode opcode,
259                  string opcodestr>
260    : RVInstIShift<imm11_7, funct3, opcode, (outs GPR:$rd),
261                   (ins GPR:$rs1, uimmlog2xlen:$shamt), opcodestr,
262                   "$rd, $rs1, $shamt">;
263
264let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
265class RVBShiftW_ri<bits<7> imm11_5, bits<3> funct3, RISCVOpcode opcode,
266                   string opcodestr>
267    : RVInstIShiftW<imm11_5, funct3, opcode, (outs GPR:$rd),
268                    (ins GPR:$rs1, uimm5:$shamt), opcodestr,
269                    "$rd, $rs1, $shamt">;
270
271//===----------------------------------------------------------------------===//
272// Instructions
273//===----------------------------------------------------------------------===//
274
275let Predicates = [HasStdExtZbbOrZbkb] in {
276def ANDN  : ALU_rr<0b0100000, 0b111, "andn">,
277            Sched<[WriteIALU, ReadIALU, ReadIALU]>;
278def ORN   : ALU_rr<0b0100000, 0b110, "orn">,
279            Sched<[WriteIALU, ReadIALU, ReadIALU]>;
280def XNOR  : ALU_rr<0b0100000, 0b100, "xnor">,
281            Sched<[WriteIALU, ReadIALU, ReadIALU]>;
282} // Predicates = [HasStdExtZbbOrZbkb]
283
284let Predicates = [HasStdExtZba] in {
285def SH1ADD : ALU_rr<0b0010000, 0b010, "sh1add">,
286             Sched<[WriteSHXADD, ReadSHXADD, ReadSHXADD]>;
287def SH2ADD : ALU_rr<0b0010000, 0b100, "sh2add">,
288             Sched<[WriteSHXADD, ReadSHXADD, ReadSHXADD]>;
289def SH3ADD : ALU_rr<0b0010000, 0b110, "sh3add">,
290             Sched<[WriteSHXADD, ReadSHXADD, ReadSHXADD]>;
291} // Predicates = [HasStdExtZba]
292
293let Predicates = [HasStdExtZba, IsRV64] in {
294def SLLI_UW : RVBShift_ri<0b00001, 0b001, OPC_OP_IMM_32, "slli.uw">,
295              Sched<[WriteShiftImm32, ReadShiftImm32]>;
296def ADD_UW : ALUW_rr<0b0000100, 0b000, "add.uw">,
297             Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>;
298def SH1ADD_UW : ALUW_rr<0b0010000, 0b010, "sh1add.uw">,
299                Sched<[WriteSHXADD32, ReadSHXADD32, ReadSHXADD32]>;
300def SH2ADD_UW : ALUW_rr<0b0010000, 0b100, "sh2add.uw">,
301                Sched<[WriteSHXADD32, ReadSHXADD32, ReadSHXADD32]>;
302def SH3ADD_UW : ALUW_rr<0b0010000, 0b110, "sh3add.uw">,
303                Sched<[WriteSHXADD32, ReadSHXADD32, ReadSHXADD32]>;
304} // Predicates = [HasStdExtZba, IsRV64]
305
306let Predicates = [HasStdExtZbbOrZbkb] in {
307def ROL   : ALU_rr<0b0110000, 0b001, "rol">,
308            Sched<[WriteRotateReg, ReadRotateReg, ReadRotateReg]>;
309def ROR   : ALU_rr<0b0110000, 0b101, "ror">,
310            Sched<[WriteRotateReg, ReadRotateReg, ReadRotateReg]>;
311
312def RORI  : RVBShift_ri<0b01100, 0b101, OPC_OP_IMM, "rori">,
313            Sched<[WriteRotateImm, ReadRotateImm]>;
314} // Predicates = [HasStdExtZbbOrZbkb]
315
316let Predicates = [HasStdExtZbbOrZbkb, IsRV64], IsSignExtendingOpW = 1 in {
317def ROLW  : ALUW_rr<0b0110000, 0b001, "rolw">,
318            Sched<[WriteRotateReg32, ReadRotateReg32, ReadRotateReg32]>;
319def RORW  : ALUW_rr<0b0110000, 0b101, "rorw">,
320            Sched<[WriteRotateReg32, ReadRotateReg32, ReadRotateReg32]>;
321
322def RORIW : RVBShiftW_ri<0b0110000, 0b101, OPC_OP_IMM_32, "roriw">,
323            Sched<[WriteRotateImm32, ReadRotateImm32]>;
324} // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
325
326let Predicates = [HasStdExtZbs] in {
327def BCLR : ALU_rr<0b0100100, 0b001, "bclr">,
328           Sched<[WriteSingleBit, ReadSingleBit, ReadSingleBit]>;
329def BSET : ALU_rr<0b0010100, 0b001, "bset">,
330           Sched<[WriteSingleBit, ReadSingleBit, ReadSingleBit]>;
331def BINV : ALU_rr<0b0110100, 0b001, "binv">,
332           Sched<[WriteSingleBit, ReadSingleBit, ReadSingleBit]>;
333let IsSignExtendingOpW = 1 in
334def BEXT : ALU_rr<0b0100100, 0b101, "bext">,
335           Sched<[WriteBEXT, ReadSingleBit, ReadSingleBit]>;
336
337def BCLRI : RVBShift_ri<0b01001, 0b001, OPC_OP_IMM, "bclri">,
338            Sched<[WriteSingleBitImm, ReadSingleBitImm]>;
339def BSETI : RVBShift_ri<0b00101, 0b001, OPC_OP_IMM, "bseti">,
340            Sched<[WriteSingleBitImm, ReadSingleBitImm]>;
341def BINVI : RVBShift_ri<0b01101, 0b001, OPC_OP_IMM, "binvi">,
342            Sched<[WriteSingleBitImm, ReadSingleBitImm]>;
343let IsSignExtendingOpW = 1 in
344def BEXTI : RVBShift_ri<0b01001, 0b101, OPC_OP_IMM, "bexti">,
345            Sched<[WriteBEXTI, ReadSingleBitImm]>;
346} // Predicates = [HasStdExtZbs]
347
348// These instructions were named xperm.n and xperm.b in the last version of
349// the draft bit manipulation specification they were included in. However, we
350// use the mnemonics given to them in the ratified Zbkx extension.
351let Predicates = [HasStdExtZbkx] in {
352def XPERM4 : ALU_rr<0b0010100, 0b010, "xperm4">,
353             Sched<[WriteXPERM, ReadXPERM, ReadXPERM]>;
354def XPERM8 : ALU_rr<0b0010100, 0b100, "xperm8">,
355             Sched<[WriteXPERM, ReadXPERM, ReadXPERM]>;
356} // Predicates = [HasStdExtZbkx]
357
358let Predicates = [HasStdExtZbb], IsSignExtendingOpW = 1 in {
359def CLZ  : RVBUnary<0b011000000000, 0b001, OPC_OP_IMM, "clz">,
360           Sched<[WriteCLZ, ReadCLZ]>;
361def CTZ  : RVBUnary<0b011000000001, 0b001, OPC_OP_IMM, "ctz">,
362           Sched<[WriteCTZ, ReadCTZ]>;
363def CPOP : RVBUnary<0b011000000010, 0b001, OPC_OP_IMM, "cpop">,
364           Sched<[WriteCPOP, ReadCPOP]>;
365} // Predicates = [HasStdExtZbb]
366
367let Predicates = [HasStdExtZbb, IsRV64], IsSignExtendingOpW = 1 in {
368def CLZW   : RVBUnary<0b011000000000, 0b001, OPC_OP_IMM_32, "clzw">,
369             Sched<[WriteCLZ32, ReadCLZ32]>;
370def CTZW   : RVBUnary<0b011000000001, 0b001, OPC_OP_IMM_32, "ctzw">,
371             Sched<[WriteCTZ32, ReadCTZ32]>;
372def CPOPW  : RVBUnary<0b011000000010, 0b001, OPC_OP_IMM_32, "cpopw">,
373             Sched<[WriteCPOP32, ReadCPOP32]>;
374} // Predicates = [HasStdExtZbb, IsRV64]
375
376let Predicates = [HasStdExtZbb], IsSignExtendingOpW = 1 in {
377def SEXT_B : RVBUnary<0b011000000100, 0b001, OPC_OP_IMM, "sext.b">,
378             Sched<[WriteIALU, ReadIALU]>;
379def SEXT_H : RVBUnary<0b011000000101, 0b001, OPC_OP_IMM, "sext.h">,
380             Sched<[WriteIALU, ReadIALU]>;
381} // Predicates = [HasStdExtZbb]
382
383let Predicates = [HasStdExtZbc] in {
384def CLMULR : ALU_rr<0b0000101, 0b010, "clmulr", Commutable=1>,
385             Sched<[WriteCLMUL, ReadCLMUL, ReadCLMUL]>;
386} // Predicates = [HasStdExtZbc]
387
388let Predicates = [HasStdExtZbcOrZbkc] in {
389def CLMUL  : ALU_rr<0b0000101, 0b001, "clmul", Commutable=1>,
390             Sched<[WriteCLMUL, ReadCLMUL, ReadCLMUL]>;
391def CLMULH : ALU_rr<0b0000101, 0b011, "clmulh", Commutable=1>,
392             Sched<[WriteCLMUL, ReadCLMUL, ReadCLMUL]>;
393} // Predicates = [HasStdExtZbcOrZbkc]
394
395let Predicates = [HasStdExtZbb] in {
396def MIN  : ALU_rr<0b0000101, 0b100, "min", Commutable=1>,
397           Sched<[WriteIMinMax, ReadIMinMax, ReadIMinMax]>;
398def MINU : ALU_rr<0b0000101, 0b101, "minu", Commutable=1>,
399           Sched<[WriteIMinMax, ReadIMinMax, ReadIMinMax]>;
400def MAX  : ALU_rr<0b0000101, 0b110, "max", Commutable=1>,
401           Sched<[WriteIMinMax, ReadIMinMax, ReadIMinMax]>;
402def MAXU : ALU_rr<0b0000101, 0b111, "maxu", Commutable=1>,
403           Sched<[WriteIMinMax, ReadIMinMax, ReadIMinMax]>;
404} // Predicates = [HasStdExtZbb]
405
406let Predicates = [HasStdExtZbkb] in {
407def PACK  : ALU_rr<0b0000100, 0b100, "pack">,
408            Sched<[WritePACK, ReadPACK, ReadPACK]>;
409let IsSignExtendingOpW = 1 in
410def PACKH : ALU_rr<0b0000100, 0b111, "packh">,
411            Sched<[WritePACK, ReadPACK, ReadPACK]>;
412} // Predicates = [HasStdExtZbkb]
413
414let Predicates = [HasStdExtZbkb, IsRV64], IsSignExtendingOpW = 1 in
415def PACKW  : ALUW_rr<0b0000100, 0b100, "packw">,
416             Sched<[WritePACK32, ReadPACK32, ReadPACK32]>;
417
418let Predicates = [HasStdExtZbb, IsRV32] in {
419def ZEXT_H_RV32 : RVBUnary<0b000010000000, 0b100, OPC_OP, "zext.h">,
420                  Sched<[WriteIALU, ReadIALU]>;
421} // Predicates = [HasStdExtZbb, IsRV32]
422
423let Predicates = [HasStdExtZbb, IsRV64], IsSignExtendingOpW = 1 in {
424def ZEXT_H_RV64 : RVBUnary<0b000010000000, 0b100, OPC_OP_32, "zext.h">,
425                  Sched<[WriteIALU, ReadIALU]>;
426} // Predicates = [HasStdExtZbb, IsRV64]
427
428let Predicates = [HasStdExtZbbOrZbkb, IsRV32] in {
429def REV8_RV32 : RVBUnary<0b011010011000, 0b101, OPC_OP_IMM, "rev8">,
430                Sched<[WriteREV8, ReadREV8]>;
431} // Predicates = [HasStdExtZbbOrZbkb, IsRV32]
432
433let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in {
434def REV8_RV64 : RVBUnary<0b011010111000, 0b101, OPC_OP_IMM, "rev8">,
435                Sched<[WriteREV8, ReadREV8]>;
436} // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
437
438let Predicates = [HasStdExtZbb] in {
439def ORC_B : RVBUnary<0b001010000111, 0b101, OPC_OP_IMM, "orc.b">,
440            Sched<[WriteORCB, ReadORCB]>;
441} // Predicates = [HasStdExtZbb]
442
443let Predicates = [HasStdExtZbkb] in
444def BREV8 : RVBUnary<0b011010000111, 0b101, OPC_OP_IMM, "brev8">,
445            Sched<[WriteBREV8, ReadBREV8]>;
446
447let Predicates = [HasStdExtZbkb, IsRV32] in {
448def ZIP_RV32   : RVBUnary<0b000010001111, 0b001, OPC_OP_IMM, "zip">,
449                 Sched<[WriteZIP, ReadZIP]>;
450def UNZIP_RV32 : RVBUnary<0b000010001111, 0b101, OPC_OP_IMM, "unzip">,
451                 Sched<[WriteZIP, ReadZIP]>;
452} // Predicates = [HasStdExtZbkb, IsRV32]
453
454
455//===----------------------------------------------------------------------===//
456// Pseudo Instructions
457//===----------------------------------------------------------------------===//
458
459let Predicates = [HasStdExtZba, IsRV64] in {
460def : InstAlias<"zext.w $rd, $rs", (ADD_UW GPR:$rd, GPR:$rs, X0)>;
461} // Predicates = [HasStdExtZba, IsRV64]
462
463let Predicates = [HasStdExtZbb] in {
464def : InstAlias<"ror $rd, $rs1, $shamt",
465                (RORI  GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
466} // Predicates = [HasStdExtZbb]
467
468let Predicates = [HasStdExtZbb, IsRV64] in {
469def : InstAlias<"rorw $rd, $rs1, $shamt",
470                (RORIW  GPR:$rd, GPR:$rs1, uimm5:$shamt), 0>;
471} // Predicates = [HasStdExtZbb, IsRV64]
472
473let Predicates = [HasStdExtZbs] in {
474def : InstAlias<"bset $rd, $rs1, $shamt",
475                (BSETI  GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
476def : InstAlias<"bclr $rd, $rs1, $shamt",
477                (BCLRI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
478def : InstAlias<"binv $rd, $rs1, $shamt",
479                (BINVI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
480def : InstAlias<"bext $rd, $rs1, $shamt",
481                (BEXTI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
482} // Predicates = [HasStdExtZbs]
483
484let Predicates = [HasStdExtZbkb, NoStdExtZbb, IsRV32] in {
485def : InstAlias<"zext.h $rd, $rs", (PACK GPR:$rd, GPR:$rs, X0)>;
486} // Predicates = [HasStdExtZbkb, NoStdExtZbb, IsRV32]
487
488let Predicates = [HasStdExtZbkb, NoStdExtZbb, IsRV64] in {
489def : InstAlias<"zext.h $rd, $rs", (PACKW GPR:$rd, GPR:$rs, X0)>;
490} // Predicates = [HasStdExtZbkb, NoStdExtZbb, IsRV64]
491
492//===----------------------------------------------------------------------===//
493// Codegen patterns
494//===----------------------------------------------------------------------===//
495
496let Predicates = [HasStdExtZbbOrZbkb] in {
497def : Pat<(XLenVT (and GPR:$rs1, (not GPR:$rs2))), (ANDN GPR:$rs1, GPR:$rs2)>;
498def : Pat<(XLenVT (or  GPR:$rs1, (not GPR:$rs2))), (ORN  GPR:$rs1, GPR:$rs2)>;
499def : Pat<(XLenVT (xor GPR:$rs1, (not GPR:$rs2))), (XNOR GPR:$rs1, GPR:$rs2)>;
500} // Predicates = [HasStdExtZbbOrZbkb]
501
502let Predicates = [HasStdExtZbbOrZbkb] in {
503def : PatGprGpr<shiftop<rotl>, ROL>;
504def : PatGprGpr<shiftop<rotr>, ROR>;
505
506def : PatGprImm<rotr, RORI, uimmlog2xlen>;
507// There's no encoding for roli in the the 'B' extension as it can be
508// implemented with rori by negating the immediate.
509def : Pat<(XLenVT (rotl GPR:$rs1, uimmlog2xlen:$shamt)),
510          (RORI GPR:$rs1, (ImmSubFromXLen uimmlog2xlen:$shamt))>;
511} // Predicates = [HasStdExtZbbOrZbkb]
512
513let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in {
514def : PatGprGpr<shiftopw<riscv_rolw>, ROLW>;
515def : PatGprGpr<shiftopw<riscv_rorw>, RORW>;
516def : PatGprImm<riscv_rorw, RORIW, uimm5>;
517def : Pat<(riscv_rolw GPR:$rs1, uimm5:$rs2),
518          (RORIW GPR:$rs1, (ImmSubFrom32 uimm5:$rs2))>;
519} // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
520
521let Predicates = [HasStdExtZbs] in {
522def : Pat<(XLenVT (and (not (shiftop<shl> 1, (XLenVT GPR:$rs2))), GPR:$rs1)),
523          (BCLR GPR:$rs1, GPR:$rs2)>;
524def : Pat<(XLenVT (and (rotl -2, (XLenVT GPR:$rs2)), GPR:$rs1)),
525          (BCLR GPR:$rs1, GPR:$rs2)>;
526def : Pat<(XLenVT (or (shiftop<shl> 1, (XLenVT GPR:$rs2)), GPR:$rs1)),
527          (BSET GPR:$rs1, GPR:$rs2)>;
528def : Pat<(XLenVT (xor (shiftop<shl> 1, (XLenVT GPR:$rs2)), GPR:$rs1)),
529          (BINV GPR:$rs1, GPR:$rs2)>;
530def : Pat<(XLenVT (and (shiftop<srl> GPR:$rs1, (XLenVT GPR:$rs2)), 1)),
531          (BEXT GPR:$rs1, GPR:$rs2)>;
532
533def : Pat<(XLenVT (shiftop<shl> 1, (XLenVT GPR:$rs2))),
534          (BSET (XLenVT X0), GPR:$rs2)>;
535def : Pat<(XLenVT (not (shiftop<shl> -1, (XLenVT GPR:$rs2)))),
536          (ADDI (XLenVT (BSET (XLenVT X0), GPR:$rs2)), -1)>;
537
538def : Pat<(XLenVT (and GPR:$rs1, BCLRMask:$mask)),
539          (BCLRI GPR:$rs1, BCLRMask:$mask)>;
540def : Pat<(XLenVT (or GPR:$rs1, SingleBitSetMask:$mask)),
541          (BSETI GPR:$rs1, SingleBitSetMask:$mask)>;
542def : Pat<(XLenVT (xor GPR:$rs1, SingleBitSetMask:$mask)),
543          (BINVI GPR:$rs1, SingleBitSetMask:$mask)>;
544
545def : Pat<(XLenVT (and (srl GPR:$rs1, uimmlog2xlen:$shamt), (XLenVT 1))),
546          (BEXTI GPR:$rs1, uimmlog2xlen:$shamt)>;
547
548def : Pat<(XLenVT (seteq (XLenVT (and GPR:$rs1, SingleBitSetMask:$mask)), 0)),
549          (BEXTI (XLenVT (XORI GPR:$rs1, -1)), SingleBitSetMask:$mask)>;
550
551def : Pat<(XLenVT (or GPR:$r, BSETINVTwoBitsMask:$i)),
552          (BSETI (XLenVT (BSETI GPR:$r, (TrailingZeros BSETINVTwoBitsMask:$i))),
553                 (BSETINVTwoBitsMaskHigh BSETINVTwoBitsMask:$i))>;
554def : Pat<(XLenVT (xor GPR:$r, BSETINVTwoBitsMask:$i)),
555          (BINVI (XLenVT (BINVI GPR:$r, (TrailingZeros BSETINVTwoBitsMask:$i))),
556                 (BSETINVTwoBitsMaskHigh BSETINVTwoBitsMask:$i))>;
557def : Pat<(XLenVT (or GPR:$r, BSETINVORIMask:$i)),
558          (BSETI (XLenVT (ORI GPR:$r, (BSETINVORIMaskLow BSETINVORIMask:$i))),
559                 (BSETINVTwoBitsMaskHigh BSETINVORIMask:$i))>;
560def : Pat<(XLenVT (xor GPR:$r, BSETINVORIMask:$i)),
561          (BINVI (XLenVT (XORI GPR:$r, (BSETINVORIMaskLow BSETINVORIMask:$i))),
562                 (BSETINVTwoBitsMaskHigh BSETINVORIMask:$i))>;
563def : Pat<(XLenVT (and GPR:$r, BCLRITwoBitsMask:$i)),
564          (BCLRI (XLenVT (BCLRI GPR:$r, (BCLRITwoBitsMaskLow BCLRITwoBitsMask:$i))),
565                 (BCLRITwoBitsMaskHigh BCLRITwoBitsMask:$i))>;
566def : Pat<(XLenVT (and GPR:$r, BCLRIANDIMask:$i)),
567          (BCLRI (XLenVT (ANDI GPR:$r, (BCLRIANDIMaskLow BCLRIANDIMask:$i))),
568                 (BCLRITwoBitsMaskHigh BCLRIANDIMask:$i))>;
569} // Predicates = [HasStdExtZbs]
570
571let Predicates = [HasStdExtZbb] in
572def : PatGpr<riscv_orc_b, ORC_B>;
573
574let Predicates = [HasStdExtZbkb] in
575def : PatGpr<riscv_brev8, BREV8>;
576
577let Predicates = [HasStdExtZbkb, IsRV32] in {
578// We treat zip and unzip as separate instructions, so match it directly.
579def : PatGpr<riscv_zip, ZIP_RV32, i32>;
580def : PatGpr<riscv_unzip, UNZIP_RV32, i32>;
581} // Predicates = [HasStdExtZbkb, IsRV32]
582
583let Predicates = [HasStdExtZbb] in {
584def : PatGpr<ctlz, CLZ>;
585def : PatGpr<cttz, CTZ>;
586def : PatGpr<ctpop, CPOP>;
587} // Predicates = [HasStdExtZbb]
588
589let Predicates = [HasStdExtZbb, IsRV64] in {
590def : PatGpr<riscv_clzw, CLZW>;
591def : PatGpr<riscv_ctzw, CTZW>;
592def : Pat<(i64 (ctpop (i64 (zexti32 (i64 GPR:$rs1))))), (CPOPW GPR:$rs1)>;
593
594def : Pat<(i64 (riscv_absw GPR:$rs1)),
595          (MAX GPR:$rs1, (XLenVT (SUBW (XLenVT X0), GPR:$rs1)))>;
596} // Predicates = [HasStdExtZbb, IsRV64]
597
598let Predicates = [HasStdExtZbb] in {
599def : Pat<(XLenVT (sext_inreg GPR:$rs1, i8)), (SEXT_B GPR:$rs1)>;
600def : Pat<(XLenVT (sext_inreg GPR:$rs1, i16)), (SEXT_H GPR:$rs1)>;
601} // Predicates = [HasStdExtZbb]
602
603let Predicates = [HasStdExtZbb] in {
604def : PatGprGpr<smin, MIN>;
605def : PatGprGpr<smax, MAX>;
606def : PatGprGpr<umin, MINU>;
607def : PatGprGpr<umax, MAXU>;
608} // Predicates = [HasStdExtZbb]
609
610let Predicates = [HasStdExtZbbOrZbkb, IsRV32] in
611def : PatGpr<bswap, REV8_RV32, i32>;
612
613let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in
614def : PatGpr<bswap, REV8_RV64, i64>;
615
616let Predicates = [HasStdExtZbkb] in {
617def : Pat<(or (and (shl GPR:$rs2, (XLenVT 8)), 0xFFFF),
618              (zexti8 (XLenVT GPR:$rs1))),
619          (PACKH GPR:$rs1, GPR:$rs2)>;
620def : Pat<(or (shl (zexti8 (XLenVT GPR:$rs2)), (XLenVT 8)),
621              (zexti8 (XLenVT GPR:$rs1))),
622          (PACKH GPR:$rs1, GPR:$rs2)>;
623def : Pat<(and (or (shl GPR:$rs2, (XLenVT 8)),
624                   (zexti8 (XLenVT GPR:$rs1))), 0xFFFF),
625          (PACKH GPR:$rs1, GPR:$rs2)>;
626
627def : Pat<(binop_allhusers<or> (shl GPR:$rs2, (XLenVT 8)),
628                               (zexti8 (XLenVT GPR:$rs1))),
629          (PACKH GPR:$rs1, GPR:$rs2)>;
630} // Predicates = [HasStdExtZbkb]
631
632let Predicates = [HasStdExtZbkb, IsRV32] in
633def : Pat<(i32 (or (zexti16 (i32 GPR:$rs1)), (shl GPR:$rs2, (i32 16)))),
634          (PACK GPR:$rs1, GPR:$rs2)>;
635
636let Predicates = [HasStdExtZbkb, IsRV64] in {
637def : Pat<(i64 (or (zexti32 (i64 GPR:$rs1)), (shl GPR:$rs2, (i64 32)))),
638          (PACK GPR:$rs1, GPR:$rs2)>;
639
640def : Pat<(binop_allwusers<or> (shl GPR:$rs2, (i64 16)),
641                               (zexti16 (i64 GPR:$rs1))),
642          (PACKW GPR:$rs1, GPR:$rs2)>;
643def : Pat<(i64 (or (sext_inreg (shl GPR:$rs2, (i64 16)), i32),
644                   (zexti16 (i64 GPR:$rs1)))),
645          (PACKW GPR:$rs1, GPR:$rs2)>;
646} // Predicates = [HasStdExtZbkb, IsRV64]
647
648let Predicates = [HasStdExtZbb, IsRV32] in
649def : Pat<(i32 (and GPR:$rs, 0xFFFF)), (ZEXT_H_RV32 GPR:$rs)>;
650let Predicates = [HasStdExtZbb, IsRV64] in
651def : Pat<(i64 (and GPR:$rs, 0xFFFF)), (ZEXT_H_RV64 GPR:$rs)>;
652
653let Predicates = [HasStdExtZbkb, NoStdExtZbb, IsRV32] in
654def : Pat<(i32 (and GPR:$rs, 0xFFFF)), (PACK GPR:$rs, (XLenVT X0))>;
655let Predicates = [HasStdExtZbkb, NoStdExtZbb, IsRV64] in
656def : Pat<(i64 (and GPR:$rs, 0xFFFF)), (PACKW GPR:$rs, (XLenVT X0))>;
657
658let Predicates = [HasStdExtZba] in {
659
660foreach i = {1,2,3} in {
661  defvar shxadd = !cast<Instruction>("SH"#i#"ADD");
662  def : Pat<(XLenVT (add_like_non_imm12 (shl GPR:$rs1, (XLenVT i)), GPR:$rs2)),
663            (shxadd GPR:$rs1, GPR:$rs2)>;
664  def : Pat<(XLenVT (riscv_shl_add GPR:$rs1, (XLenVT i), GPR:$rs2)),
665            (shxadd GPR:$rs1, GPR:$rs2)>;
666
667  defvar pat = !cast<ComplexPattern>("sh"#i#"add_op");
668  // More complex cases use a ComplexPattern.
669  def : Pat<(XLenVT (add_like_non_imm12 pat:$rs1, GPR:$rs2)),
670            (shxadd pat:$rs1, GPR:$rs2)>;
671}
672
673def : Pat<(add_like (XLenVT GPR:$r), CSImm12MulBy4:$i),
674          (SH2ADD (XLenVT (ADDI (XLenVT X0), (SimmShiftRightBy2XForm CSImm12MulBy4:$i))),
675                  GPR:$r)>;
676def : Pat<(add_like (XLenVT GPR:$r), CSImm12MulBy8:$i),
677          (SH3ADD (XLenVT (ADDI (XLenVT X0), (SimmShiftRightBy3XForm CSImm12MulBy8:$i))),
678                  GPR:$r)>;
679
680} // Predicates = [HasStdExtZba]
681
682let Predicates = [HasStdExtZba, IsRV64] in {
683def : Pat<(i64 (shl (and GPR:$rs1, 0xFFFFFFFF), uimm5:$shamt)),
684          (SLLI_UW GPR:$rs1, uimm5:$shamt)>;
685// Match a shifted 0xffffffff mask. Use SRLI to clear the LSBs and SLLI_UW to
686// mask and shift.
687def : Pat<(i64 (and GPR:$rs1, Shifted32OnesMask:$mask)),
688          (SLLI_UW (XLenVT (SRLI GPR:$rs1, Shifted32OnesMask:$mask)),
689                   Shifted32OnesMask:$mask)>;
690def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFF), GPR:$rs2)),
691          (ADD_UW GPR:$rs1, GPR:$rs2)>;
692def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (ADD_UW GPR:$rs, (XLenVT X0))>;
693
694foreach i = {1,2,3} in {
695  defvar shxadd_uw = !cast<Instruction>("SH"#i#"ADD_UW");
696  def : Pat<(i64 (add_like_non_imm12 (shl (and GPR:$rs1, 0xFFFFFFFF), (i64 i)), (XLenVT GPR:$rs2))),
697            (shxadd_uw GPR:$rs1, GPR:$rs2)>;
698  def : Pat<(i64 (riscv_shl_add (and GPR:$rs1, 0xFFFFFFFF), (i64 i), GPR:$rs2)),
699            (shxadd_uw GPR:$rs1, GPR:$rs2)>;
700}
701
702def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 1)), 0x1FFFFFFFF), (XLenVT GPR:$rs2))),
703          (SH1ADD_UW GPR:$rs1, GPR:$rs2)>;
704def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 2)), 0x3FFFFFFFF), (XLenVT GPR:$rs2))),
705          (SH2ADD_UW GPR:$rs1, GPR:$rs2)>;
706def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 3)), 0x7FFFFFFFF), (XLenVT GPR:$rs2))),
707          (SH3ADD_UW GPR:$rs1, GPR:$rs2)>;
708
709// More complex cases use a ComplexPattern.
710foreach i = {1,2,3} in {
711  defvar pat = !cast<ComplexPattern>("sh"#i#"add_uw_op");
712  def : Pat<(i64 (add_like_non_imm12 pat:$rs1, (XLenVT GPR:$rs2))),
713            (!cast<Instruction>("SH"#i#"ADD_UW") pat:$rs1, GPR:$rs2)>;
714}
715
716def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFE), (XLenVT GPR:$rs2))),
717          (SH1ADD (XLenVT (SRLIW GPR:$rs1, 1)), GPR:$rs2)>;
718def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFC), (XLenVT GPR:$rs2))),
719          (SH2ADD (XLenVT (SRLIW GPR:$rs1, 2)), GPR:$rs2)>;
720def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFF8), (XLenVT GPR:$rs2))),
721          (SH3ADD (XLenVT (SRLIW GPR:$rs1, 3)), GPR:$rs2)>;
722
723// Use SRLI to clear the LSBs and SHXADD_UW to mask and shift.
724def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x1FFFFFFFE), (XLenVT GPR:$rs2))),
725          (SH1ADD_UW (XLenVT (SRLI GPR:$rs1, 1)), GPR:$rs2)>;
726def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x3FFFFFFFC), (XLenVT GPR:$rs2))),
727          (SH2ADD_UW (XLenVT (SRLI GPR:$rs1, 2)), GPR:$rs2)>;
728def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x7FFFFFFF8), (XLenVT GPR:$rs2))),
729          (SH3ADD_UW (XLenVT (SRLI GPR:$rs1, 3)), GPR:$rs2)>;
730
731} // Predicates = [HasStdExtZba, IsRV64]
732
733let Predicates = [HasStdExtZbcOrZbkc] in {
734def : PatGprGpr<riscv_clmul, CLMUL>;
735def : PatGprGpr<riscv_clmulh, CLMULH>;
736} // Predicates = [HasStdExtZbcOrZbkc]
737
738let Predicates = [HasStdExtZbc] in
739def : PatGprGpr<riscv_clmulr, CLMULR>;
740
741let Predicates = [HasStdExtZbkx] in {
742def : PatGprGpr<int_riscv_xperm4, XPERM4>;
743def : PatGprGpr<int_riscv_xperm8, XPERM8>;
744} // Predicates = [HasStdExtZbkx]
745
746//===----------------------------------------------------------------------===//
747// Experimental RV64 i32 legalization patterns.
748//===----------------------------------------------------------------------===//
749
750def BCLRMaski32 : ImmLeaf<i32, [{
751  return !isInt<12>(Imm) && isPowerOf2_32(~Imm);
752}]>;
753def SingleBitSetMaski32 : ImmLeaf<i32, [{
754  return !isInt<12>(Imm) && isPowerOf2_32(Imm);
755}]>;
756
757let Predicates = [HasStdExtZbb, IsRV64] in {
758def : PatGpr<ctlz, CLZW, i32>;
759def : PatGpr<cttz, CTZW, i32>;
760def : PatGpr<ctpop, CPOPW, i32>;
761
762def : Pat<(i32 (sext_inreg GPR:$rs1, i8)), (SEXT_B GPR:$rs1)>;
763def : Pat<(i32 (sext_inreg GPR:$rs1, i16)), (SEXT_H GPR:$rs1)>;
764
765def : Pat<(i32 (and GPR:$rs, 0xFFFF)), (ZEXT_H_RV64 GPR:$rs)>;
766} // Predicates = [HasStdExtZbb, IsRV64]
767
768let Predicates = [HasStdExtZbkb, NoStdExtZbb, IsRV64] in {
769def : Pat<(i32 (and GPR:$rs, 0xFFFF)), (PACKW GPR:$rs, (XLenVT X0))>;
770}
771
772let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in {
773def : Pat<(i32 (and GPR:$rs1, (not GPR:$rs2))), (ANDN GPR:$rs1, GPR:$rs2)>;
774def : Pat<(i32 (or  GPR:$rs1, (not GPR:$rs2))), (ORN  GPR:$rs1, GPR:$rs2)>;
775def : Pat<(i32 (xor GPR:$rs1, (not GPR:$rs2))), (XNOR GPR:$rs1, GPR:$rs2)>;
776
777def : PatGprGpr<shiftopw<rotl>, ROLW, i32, i64>;
778def : PatGprGpr<shiftopw<rotr>, RORW, i32, i64>;
779def : PatGprImm<rotr, RORIW, uimm5, i32>;
780
781def : Pat<(i32 (rotl GPR:$rs1, uimm5:$rs2)),
782          (RORIW GPR:$rs1, (ImmSubFrom32 uimm5:$rs2))>;
783} // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
784
785let Predicates = [HasStdExtZbkb, IsRV64] in {
786def : Pat<(or (and (shl GPR:$rs2, (i64 8)), 0xFFFF),
787              (zexti8i32 (i32 GPR:$rs1))),
788          (PACKH GPR:$rs1, GPR:$rs2)>;
789def : Pat<(or (shl (zexti8i32 (i32 GPR:$rs2)), (i64 8)),
790              (zexti8i32 (i32 GPR:$rs1))),
791          (PACKH GPR:$rs1, GPR:$rs2)>;
792def : Pat<(and (anyext (or (shl GPR:$rs2, (XLenVT 8)),
793                           (zexti8i32 (i32 GPR:$rs1)))), 0xFFFF),
794          (PACKH GPR:$rs1, GPR:$rs2)>;
795
796def : Pat<(i32 (or (shl GPR:$rs2, (i64 16)), (zexti16i32 (i32 GPR:$rs1)))),
797          (PACKW GPR:$rs1, GPR:$rs2)>;
798} // Predicates = [HasStdExtZbkb, IsRV64]
799
800let Predicates = [HasStdExtZba, IsRV64] in {
801def : Pat<(shl (i64 (zext i32:$rs1)), uimm5:$shamt),
802          (SLLI_UW GPR:$rs1, uimm5:$shamt)>;
803
804def : Pat<(i64 (add_like_non_imm12 (zext GPR:$rs1), GPR:$rs2)),
805          (ADD_UW GPR:$rs1, GPR:$rs2)>;
806def : Pat<(zext GPR:$src), (ADD_UW GPR:$src, (XLenVT X0))>;
807
808foreach i = {1,2,3} in {
809  defvar shxadd = !cast<Instruction>("SH"#i#"ADD");
810  def : Pat<(i32 (add_like_non_imm12 (shl GPR:$rs1, (i64 i)), GPR:$rs2)),
811            (shxadd GPR:$rs1, GPR:$rs2)>;
812  def : Pat<(i32 (riscv_shl_add GPR:$rs1, (i32 i), GPR:$rs2)),
813            (shxadd GPR:$rs1, GPR:$rs2)>;
814}
815}
816
817let Predicates = [HasStdExtZbs, IsRV64] in {
818def : Pat<(i32 (and (not (shiftop<shl> 1, (i64 GPR:$rs2))), GPR:$rs1)),
819          (BCLR GPR:$rs1, GPR:$rs2)>;
820def : Pat<(i32 (and (rotl -2, (i64 GPR:$rs2)), GPR:$rs1)),
821          (BCLR GPR:$rs1, GPR:$rs2)>;
822def : Pat<(i32 (or (shiftop<shl> 1, (i64 GPR:$rs2)), GPR:$rs1)),
823          (BSET GPR:$rs1, GPR:$rs2)>;
824def : Pat<(i32 (xor (shiftop<shl> 1, (i64 GPR:$rs2)), GPR:$rs1)),
825          (BINV GPR:$rs1, GPR:$rs2)>;
826def : Pat<(i32 (and (shiftop<srl> GPR:$rs1, (i64 GPR:$rs2)), 1)),
827          (BEXT GPR:$rs1, GPR:$rs2)>;
828def : Pat<(i64 (and (anyext (i32 (shiftop<srl> GPR:$rs1, (i64 GPR:$rs2)))), 1)),
829          (BEXT GPR:$rs1, GPR:$rs2)>;
830
831def : Pat<(i32 (shiftop<shl> 1, (i64 GPR:$rs2))),
832          (BSET (XLenVT X0), GPR:$rs2)>;
833def : Pat<(i32 (not (shiftop<shl> -1, (i64 GPR:$rs2)))),
834          (ADDI (i32 (BSET (XLenVT X0), GPR:$rs2)), -1)>;
835
836def : Pat<(i32 (and (srl GPR:$rs1, uimm5:$shamt), (i32 1))),
837          (BEXTI GPR:$rs1, uimm5:$shamt)>;
838
839def : Pat<(i32 (and GPR:$rs1, BCLRMaski32:$mask)),
840          (BCLRI GPR:$rs1, (i64 (BCLRXForm $mask)))>;
841def : Pat<(i32 (or GPR:$rs1, SingleBitSetMaski32:$mask)),
842          (BSETI GPR:$rs1, (i64 (SingleBitSetMaskToIndex $mask)))>;
843def : Pat<(i32 (xor GPR:$rs1, SingleBitSetMaski32:$mask)),
844          (BINVI GPR:$rs1, (i64 (SingleBitSetMaskToIndex $mask)))>;
845} // Predicates = [HasStdExtZbs, IsRV64]
846