1//===-- RISCVRegisterInfo.td - RISC-V Register defs --------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8 9//===----------------------------------------------------------------------===// 10// Declarations that describe the RISC-V register files 11//===----------------------------------------------------------------------===// 12 13let Namespace = "RISCV" in { 14class RISCVReg<bits<5> Enc, string n, list<string> alt = []> : Register<n> { 15 let HWEncoding{4-0} = Enc; 16 let AltNames = alt; 17} 18 19class RISCVReg16<bits<5> Enc, string n, list<string> alt = []> : Register<n> { 20 let HWEncoding{4-0} = Enc; 21 let AltNames = alt; 22} 23 24def sub_16 : SubRegIndex<16>; 25class RISCVReg32<RISCVReg16 subreg> : Register<""> { 26 let HWEncoding{4-0} = subreg.HWEncoding{4-0}; 27 let SubRegs = [subreg]; 28 let SubRegIndices = [sub_16]; 29 let AsmName = subreg.AsmName; 30 let AltNames = subreg.AltNames; 31} 32 33// Because RISCVReg64 register have AsmName and AltNames that alias with their 34// 16/32-bit sub-register, RISCVAsmParser will need to coerce a register number 35// from a RISCVReg16/RISCVReg32 to the equivalent RISCVReg64 when appropriate. 36def sub_32 : SubRegIndex<32>; 37class RISCVReg64<RISCVReg32 subreg> : Register<""> { 38 let HWEncoding{4-0} = subreg.HWEncoding{4-0}; 39 let SubRegs = [subreg]; 40 let SubRegIndices = [sub_32]; 41 let AsmName = subreg.AsmName; 42 let AltNames = subreg.AltNames; 43} 44 45class RISCVRegWithSubRegs<bits<5> Enc, string n, list<Register> subregs, 46 list<string> alt = []> 47 : RegisterWithSubRegs<n, subregs> { 48 let HWEncoding{4-0} = Enc; 49 let AltNames = alt; 50} 51 52def ABIRegAltName : RegAltNameIndex; 53 54def sub_vrm4_0 : SubRegIndex<256>; 55def sub_vrm4_1 : SubRegIndex<256, 256>; 56def sub_vrm2_0 : SubRegIndex<128>; 57def sub_vrm2_1 : SubRegIndex<128, 128>; 58def sub_vrm2_2 : ComposedSubRegIndex<sub_vrm4_1, sub_vrm2_0>; 59def sub_vrm2_3 : ComposedSubRegIndex<sub_vrm4_1, sub_vrm2_1>; 60def sub_vrm1_0 : SubRegIndex<64>; 61def sub_vrm1_1 : SubRegIndex<64, 64>; 62def sub_vrm1_2 : ComposedSubRegIndex<sub_vrm2_1, sub_vrm1_0>; 63def sub_vrm1_3 : ComposedSubRegIndex<sub_vrm2_1, sub_vrm1_1>; 64def sub_vrm1_4 : ComposedSubRegIndex<sub_vrm2_2, sub_vrm1_0>; 65def sub_vrm1_5 : ComposedSubRegIndex<sub_vrm2_2, sub_vrm1_1>; 66def sub_vrm1_6 : ComposedSubRegIndex<sub_vrm2_3, sub_vrm1_0>; 67def sub_vrm1_7 : ComposedSubRegIndex<sub_vrm2_3, sub_vrm1_1>; 68 69} // Namespace = "RISCV" 70 71// Integer registers 72// CostPerUse is set higher for registers that may not be compressible as they 73// are not part of GPRC, the most restrictive register class used by the 74// compressed instruction set. This will influence the greedy register 75// allocator to reduce the use of registers that can't be encoded in 16 bit 76// instructions. This affects register allocation even when compressed 77// instruction isn't targeted, we see no major negative codegen impact. 78 79let RegAltNameIndices = [ABIRegAltName] in { 80 def X0 : RISCVReg<0, "x0", ["zero"]>, DwarfRegNum<[0]>; 81 let CostPerUse = [1] in { 82 def X1 : RISCVReg<1, "x1", ["ra"]>, DwarfRegNum<[1]>; 83 def X2 : RISCVReg<2, "x2", ["sp"]>, DwarfRegNum<[2]>; 84 def X3 : RISCVReg<3, "x3", ["gp"]>, DwarfRegNum<[3]>; 85 def X4 : RISCVReg<4, "x4", ["tp"]>, DwarfRegNum<[4]>; 86 def X5 : RISCVReg<5, "x5", ["t0"]>, DwarfRegNum<[5]>; 87 def X6 : RISCVReg<6, "x6", ["t1"]>, DwarfRegNum<[6]>; 88 def X7 : RISCVReg<7, "x7", ["t2"]>, DwarfRegNum<[7]>; 89 } 90 def X8 : RISCVReg<8, "x8", ["s0", "fp"]>, DwarfRegNum<[8]>; 91 def X9 : RISCVReg<9, "x9", ["s1"]>, DwarfRegNum<[9]>; 92 def X10 : RISCVReg<10,"x10", ["a0"]>, DwarfRegNum<[10]>; 93 def X11 : RISCVReg<11,"x11", ["a1"]>, DwarfRegNum<[11]>; 94 def X12 : RISCVReg<12,"x12", ["a2"]>, DwarfRegNum<[12]>; 95 def X13 : RISCVReg<13,"x13", ["a3"]>, DwarfRegNum<[13]>; 96 def X14 : RISCVReg<14,"x14", ["a4"]>, DwarfRegNum<[14]>; 97 def X15 : RISCVReg<15,"x15", ["a5"]>, DwarfRegNum<[15]>; 98 let CostPerUse = [1] in { 99 def X16 : RISCVReg<16,"x16", ["a6"]>, DwarfRegNum<[16]>; 100 def X17 : RISCVReg<17,"x17", ["a7"]>, DwarfRegNum<[17]>; 101 def X18 : RISCVReg<18,"x18", ["s2"]>, DwarfRegNum<[18]>; 102 def X19 : RISCVReg<19,"x19", ["s3"]>, DwarfRegNum<[19]>; 103 def X20 : RISCVReg<20,"x20", ["s4"]>, DwarfRegNum<[20]>; 104 def X21 : RISCVReg<21,"x21", ["s5"]>, DwarfRegNum<[21]>; 105 def X22 : RISCVReg<22,"x22", ["s6"]>, DwarfRegNum<[22]>; 106 def X23 : RISCVReg<23,"x23", ["s7"]>, DwarfRegNum<[23]>; 107 def X24 : RISCVReg<24,"x24", ["s8"]>, DwarfRegNum<[24]>; 108 def X25 : RISCVReg<25,"x25", ["s9"]>, DwarfRegNum<[25]>; 109 def X26 : RISCVReg<26,"x26", ["s10"]>, DwarfRegNum<[26]>; 110 def X27 : RISCVReg<27,"x27", ["s11"]>, DwarfRegNum<[27]>; 111 def X28 : RISCVReg<28,"x28", ["t3"]>, DwarfRegNum<[28]>; 112 def X29 : RISCVReg<29,"x29", ["t4"]>, DwarfRegNum<[29]>; 113 def X30 : RISCVReg<30,"x30", ["t5"]>, DwarfRegNum<[30]>; 114 def X31 : RISCVReg<31,"x31", ["t6"]>, DwarfRegNum<[31]>; 115 } 116} 117 118def XLenVT : ValueTypeByHwMode<[RV32, RV64], 119 [i32, i64]>; 120def XLenRI : RegInfoByHwMode< 121 [RV32, RV64], 122 [RegInfo<32,32,32>, RegInfo<64,64,64>]>; 123 124// The order of registers represents the preferred allocation sequence. 125// Registers are listed in the order caller-save, callee-save, specials. 126def GPR : RegisterClass<"RISCV", [XLenVT], 32, (add 127 (sequence "X%u", 10, 17), 128 (sequence "X%u", 5, 7), 129 (sequence "X%u", 28, 31), 130 (sequence "X%u", 8, 9), 131 (sequence "X%u", 18, 27), 132 (sequence "X%u", 0, 4) 133 )> { 134 let RegInfos = XLenRI; 135} 136 137def GPRX0 : RegisterClass<"RISCV", [XLenVT], 32, (add X0)> { 138 let RegInfos = XLenRI; 139} 140 141// The order of registers represents the preferred allocation sequence. 142// Registers are listed in the order caller-save, callee-save, specials. 143def GPRNoX0 : RegisterClass<"RISCV", [XLenVT], 32, (add 144 (sequence "X%u", 10, 17), 145 (sequence "X%u", 5, 7), 146 (sequence "X%u", 28, 31), 147 (sequence "X%u", 8, 9), 148 (sequence "X%u", 18, 27), 149 (sequence "X%u", 1, 4) 150 )> { 151 let RegInfos = XLenRI; 152} 153 154def GPRNoX0X2 : RegisterClass<"RISCV", [XLenVT], 32, (add 155 (sequence "X%u", 10, 17), 156 (sequence "X%u", 5, 7), 157 (sequence "X%u", 28, 31), 158 (sequence "X%u", 8, 9), 159 (sequence "X%u", 18, 27), 160 X1, X3, X4 161 )> { 162 let RegInfos = XLenRI; 163} 164 165// Don't use X1 or X5 for JALR since that is a hint to pop the return address 166// stack on some microarchitectures. Also remove the reserved registers X0, X2, 167// X3, and X4 as it reduces the number of register classes that get synthesized 168// by tablegen. 169def GPRJALR : RegisterClass<"RISCV", [XLenVT], 32, (add 170 (sequence "X%u", 10, 17), 171 (sequence "X%u", 6, 7), 172 (sequence "X%u", 28, 31), 173 (sequence "X%u", 8, 9), 174 (sequence "X%u", 18, 27) 175 )> { 176 let RegInfos = XLenRI; 177} 178 179def GPRC : RegisterClass<"RISCV", [XLenVT], 32, (add 180 (sequence "X%u", 10, 15), 181 (sequence "X%u", 8, 9) 182 )> { 183 let RegInfos = XLenRI; 184} 185 186// For indirect tail calls, we can't use callee-saved registers, as they are 187// restored to the saved value before the tail call, which would clobber a call 188// address. We shouldn't use x5 since that is a hint for to pop the return 189// address stack on some microarchitectures. 190def GPRTC : RegisterClass<"RISCV", [XLenVT], 32, (add 191 (sequence "X%u", 6, 7), 192 (sequence "X%u", 10, 17), 193 (sequence "X%u", 28, 31) 194 )> { 195 let RegInfos = XLenRI; 196} 197 198def SP : RegisterClass<"RISCV", [XLenVT], 32, (add X2)> { 199 let RegInfos = XLenRI; 200} 201 202// Floating point registers 203let RegAltNameIndices = [ABIRegAltName] in { 204 def F0_H : RISCVReg16<0, "f0", ["ft0"]>, DwarfRegNum<[32]>; 205 def F1_H : RISCVReg16<1, "f1", ["ft1"]>, DwarfRegNum<[33]>; 206 def F2_H : RISCVReg16<2, "f2", ["ft2"]>, DwarfRegNum<[34]>; 207 def F3_H : RISCVReg16<3, "f3", ["ft3"]>, DwarfRegNum<[35]>; 208 def F4_H : RISCVReg16<4, "f4", ["ft4"]>, DwarfRegNum<[36]>; 209 def F5_H : RISCVReg16<5, "f5", ["ft5"]>, DwarfRegNum<[37]>; 210 def F6_H : RISCVReg16<6, "f6", ["ft6"]>, DwarfRegNum<[38]>; 211 def F7_H : RISCVReg16<7, "f7", ["ft7"]>, DwarfRegNum<[39]>; 212 def F8_H : RISCVReg16<8, "f8", ["fs0"]>, DwarfRegNum<[40]>; 213 def F9_H : RISCVReg16<9, "f9", ["fs1"]>, DwarfRegNum<[41]>; 214 def F10_H : RISCVReg16<10,"f10", ["fa0"]>, DwarfRegNum<[42]>; 215 def F11_H : RISCVReg16<11,"f11", ["fa1"]>, DwarfRegNum<[43]>; 216 def F12_H : RISCVReg16<12,"f12", ["fa2"]>, DwarfRegNum<[44]>; 217 def F13_H : RISCVReg16<13,"f13", ["fa3"]>, DwarfRegNum<[45]>; 218 def F14_H : RISCVReg16<14,"f14", ["fa4"]>, DwarfRegNum<[46]>; 219 def F15_H : RISCVReg16<15,"f15", ["fa5"]>, DwarfRegNum<[47]>; 220 def F16_H : RISCVReg16<16,"f16", ["fa6"]>, DwarfRegNum<[48]>; 221 def F17_H : RISCVReg16<17,"f17", ["fa7"]>, DwarfRegNum<[49]>; 222 def F18_H : RISCVReg16<18,"f18", ["fs2"]>, DwarfRegNum<[50]>; 223 def F19_H : RISCVReg16<19,"f19", ["fs3"]>, DwarfRegNum<[51]>; 224 def F20_H : RISCVReg16<20,"f20", ["fs4"]>, DwarfRegNum<[52]>; 225 def F21_H : RISCVReg16<21,"f21", ["fs5"]>, DwarfRegNum<[53]>; 226 def F22_H : RISCVReg16<22,"f22", ["fs6"]>, DwarfRegNum<[54]>; 227 def F23_H : RISCVReg16<23,"f23", ["fs7"]>, DwarfRegNum<[55]>; 228 def F24_H : RISCVReg16<24,"f24", ["fs8"]>, DwarfRegNum<[56]>; 229 def F25_H : RISCVReg16<25,"f25", ["fs9"]>, DwarfRegNum<[57]>; 230 def F26_H : RISCVReg16<26,"f26", ["fs10"]>, DwarfRegNum<[58]>; 231 def F27_H : RISCVReg16<27,"f27", ["fs11"]>, DwarfRegNum<[59]>; 232 def F28_H : RISCVReg16<28,"f28", ["ft8"]>, DwarfRegNum<[60]>; 233 def F29_H : RISCVReg16<29,"f29", ["ft9"]>, DwarfRegNum<[61]>; 234 def F30_H : RISCVReg16<30,"f30", ["ft10"]>, DwarfRegNum<[62]>; 235 def F31_H : RISCVReg16<31,"f31", ["ft11"]>, DwarfRegNum<[63]>; 236 237 foreach Index = 0-31 in { 238 def F#Index#_F : RISCVReg32<!cast<RISCVReg16>("F"#Index#"_H")>, 239 DwarfRegNum<[!add(Index, 32)]>; 240 } 241 242 foreach Index = 0-31 in { 243 def F#Index#_D : RISCVReg64<!cast<RISCVReg32>("F"#Index#"_F")>, 244 DwarfRegNum<[!add(Index, 32)]>; 245 } 246} 247 248// The order of registers represents the preferred allocation sequence, 249// meaning caller-save regs are listed before callee-save. 250def FPR16 : RegisterClass<"RISCV", [f16], 16, (add 251 (sequence "F%u_H", 0, 7), 252 (sequence "F%u_H", 10, 17), 253 (sequence "F%u_H", 28, 31), 254 (sequence "F%u_H", 8, 9), 255 (sequence "F%u_H", 18, 27) 256)>; 257 258def FPR32 : RegisterClass<"RISCV", [f32], 32, (add 259 (sequence "F%u_F", 0, 7), 260 (sequence "F%u_F", 10, 17), 261 (sequence "F%u_F", 28, 31), 262 (sequence "F%u_F", 8, 9), 263 (sequence "F%u_F", 18, 27) 264)>; 265 266def FPR32C : RegisterClass<"RISCV", [f32], 32, (add 267 (sequence "F%u_F", 10, 15), 268 (sequence "F%u_F", 8, 9) 269)>; 270 271// The order of registers represents the preferred allocation sequence, 272// meaning caller-save regs are listed before callee-save. 273def FPR64 : RegisterClass<"RISCV", [f64], 64, (add 274 (sequence "F%u_D", 0, 7), 275 (sequence "F%u_D", 10, 17), 276 (sequence "F%u_D", 28, 31), 277 (sequence "F%u_D", 8, 9), 278 (sequence "F%u_D", 18, 27) 279)>; 280 281def FPR64C : RegisterClass<"RISCV", [f64], 64, (add 282 (sequence "F%u_D", 10, 15), 283 (sequence "F%u_D", 8, 9) 284)>; 285 286// Vector type mapping to LLVM types. 287// 288// The V vector extension requires that VLEN >= 128 and <= 65536. 289// Additionally, the only supported ELEN values are 32 and 64, 290// thus `vscale` can be defined as VLEN/64, 291// allowing the same types with either ELEN value. 292// 293// MF8 MF4 MF2 M1 M2 M4 M8 294// i64* N/A N/A N/A nxv1i64 nxv2i64 nxv4i64 nxv8i64 295// i32 N/A N/A nxv1i32 nxv2i32 nxv4i32 nxv8i32 nxv16i32 296// i16 N/A nxv1i16 nxv2i16 nxv4i16 nxv8i16 nxv16i16 nxv32i16 297// i8 nxv1i8 nxv2i8 nxv4i8 nxv8i8 nxv16i8 nxv32i8 nxv64i8 298// double* N/A N/A N/A nxv1f64 nxv2f64 nxv4f64 nxv8f64 299// float N/A N/A nxv1f32 nxv2f32 nxv4f32 nxv8f32 nxv16f32 300// half N/A nxv1f16 nxv2f16 nxv4f16 nxv8f16 nxv16f16 nxv32f16 301// * ELEN=64 302 303defvar vint8mf8_t = nxv1i8; 304defvar vint8mf4_t = nxv2i8; 305defvar vint8mf2_t = nxv4i8; 306defvar vint8m1_t = nxv8i8; 307defvar vint8m2_t = nxv16i8; 308defvar vint8m4_t = nxv32i8; 309defvar vint8m8_t = nxv64i8; 310 311defvar vint16mf4_t = nxv1i16; 312defvar vint16mf2_t = nxv2i16; 313defvar vint16m1_t = nxv4i16; 314defvar vint16m2_t = nxv8i16; 315defvar vint16m4_t = nxv16i16; 316defvar vint16m8_t = nxv32i16; 317 318defvar vint32mf2_t = nxv1i32; 319defvar vint32m1_t = nxv2i32; 320defvar vint32m2_t = nxv4i32; 321defvar vint32m4_t = nxv8i32; 322defvar vint32m8_t = nxv16i32; 323 324defvar vint64m1_t = nxv1i64; 325defvar vint64m2_t = nxv2i64; 326defvar vint64m4_t = nxv4i64; 327defvar vint64m8_t = nxv8i64; 328 329defvar vfloat16mf4_t = nxv1f16; 330defvar vfloat16mf2_t = nxv2f16; 331defvar vfloat16m1_t = nxv4f16; 332defvar vfloat16m2_t = nxv8f16; 333defvar vfloat16m4_t = nxv16f16; 334defvar vfloat16m8_t = nxv32f16; 335 336defvar vfloat32mf2_t = nxv1f32; 337defvar vfloat32m1_t = nxv2f32; 338defvar vfloat32m2_t = nxv4f32; 339defvar vfloat32m4_t = nxv8f32; 340defvar vfloat32m8_t = nxv16f32; 341 342defvar vfloat64m1_t = nxv1f64; 343defvar vfloat64m2_t = nxv2f64; 344defvar vfloat64m4_t = nxv4f64; 345defvar vfloat64m8_t = nxv8f64; 346 347defvar vbool1_t = nxv64i1; 348defvar vbool2_t = nxv32i1; 349defvar vbool4_t = nxv16i1; 350defvar vbool8_t = nxv8i1; 351defvar vbool16_t = nxv4i1; 352defvar vbool32_t = nxv2i1; 353defvar vbool64_t = nxv1i1; 354 355// There is no need to define register classes for fractional LMUL. 356def LMULList { 357 list<int> m = [1, 2, 4, 8]; 358} 359 360//===----------------------------------------------------------------------===// 361// Utility classes for segment load/store. 362//===----------------------------------------------------------------------===// 363// The set of legal NF for LMUL = lmul. 364// LMUL == 1, NF = 2, 3, 4, 5, 6, 7, 8 365// LMUL == 2, NF = 2, 3, 4 366// LMUL == 4, NF = 2 367class NFList<int lmul> { 368 list<int> L = !cond(!eq(lmul, 1): [2, 3, 4, 5, 6, 7, 8], 369 !eq(lmul, 2): [2, 3, 4], 370 !eq(lmul, 4): [2], 371 !eq(lmul, 8): []); 372} 373 374// Generate [start, end) SubRegIndex list. 375class SubRegSet<int nf, int lmul> { 376 list<SubRegIndex> L = !foldl([]<SubRegIndex>, 377 [0, 1, 2, 3, 4, 5, 6, 7], 378 AccList, i, 379 !listconcat(AccList, 380 !if(!lt(i, nf), 381 [!cast<SubRegIndex>("sub_vrm" # lmul # "_" # i)], 382 []))); 383} 384 385// Collect the valid indexes into 'R' under NF and LMUL values from TUPLE_INDEX. 386// When NF = 2, the valid TUPLE_INDEX is 0 and 1. 387// For example, when LMUL = 4, the potential valid indexes is 388// [8, 12, 16, 20, 24, 28, 4]. However, not all these indexes are valid under 389// NF = 2. For example, 28 is not valid under LMUL = 4, NF = 2 and TUPLE_INDEX = 0. 390// The filter is 391// (tuple_index + i) x lmul <= (tuple_index x lmul) + 32 - (nf x lmul) 392// 393// Use START = 0, LMUL = 4 and NF = 2 as the example, 394// i x 4 <= 24 395// The class will return [8, 12, 16, 20, 24, 4]. 396// Use START = 1, LMUL = 4 and NF = 2 as the example, 397// (1 + i) x 4 <= 28 398// The class will return [12, 16, 20, 24, 28, 8]. 399// 400class IndexSet<int tuple_index, int nf, int lmul, bit isV0 = false> { 401 list<int> R = 402 !foldl([]<int>, 403 !if(isV0, [0], 404 !cond( 405 !eq(lmul, 1): 406 [8, 9, 10, 11, 12, 13, 14, 15, 407 16, 17, 18, 19, 20, 21, 22, 23, 408 24, 25, 26, 27, 28, 29, 30, 31, 409 1, 2, 3, 4, 5, 6, 7], 410 !eq(lmul, 2): 411 [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3], 412 !eq(lmul, 4): 413 [2, 3, 4, 5, 6, 7, 1])), 414 L, i, 415 !listconcat(L, 416 !if(!le(!mul(!add(i, tuple_index), lmul), 417 !sub(!add(32, !mul(tuple_index, lmul)), !mul(nf, lmul))), 418 [!mul(!add(i, tuple_index), lmul)], []))); 419} 420 421// This class returns a list of vector register collections. 422// For example, for NF = 2 and LMUL = 4, 423// it will return 424// ([ V8M4, V12M4, V16M4, V20M4, V24M4, V4M4], 425// [V12M4, V16M4, V20M4, V24M4, V28M4, V8M4]) 426// 427class VRegList<list<dag> LIn, int start, int nf, int lmul, bit isV0> { 428 list<dag> L = 429 !if(!ge(start, nf), 430 LIn, 431 !listconcat( 432 [!dag(add, 433 !foreach(i, IndexSet<start, nf, lmul, isV0>.R, 434 !cast<Register>("V" # i # !cond(!eq(lmul, 2): "M2", 435 !eq(lmul, 4): "M4", 436 true: ""))), 437 !listsplat("", 438 !size(IndexSet<start, nf, lmul, isV0>.R)))], 439 VRegList<LIn, !add(start, 1), nf, lmul, isV0>.L)); 440} 441 442// Vector registers 443let RegAltNameIndices = [ABIRegAltName] in { 444 foreach Index = 0-31 in { 445 def V#Index : RISCVReg<Index, "v"#Index, ["v"#Index]>, DwarfRegNum<[!add(Index, 96)]>; 446 } 447 448 foreach Index = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 449 24, 26, 28, 30] in { 450 def V#Index#M2 : RISCVRegWithSubRegs<Index, "v"#Index, 451 [!cast<Register>("V"#Index), 452 !cast<Register>("V"#!add(Index, 1))], 453 ["v"#Index]>, 454 DwarfRegAlias<!cast<Register>("V"#Index)> { 455 let SubRegIndices = [sub_vrm1_0, sub_vrm1_1]; 456 } 457 } 458 459 foreach Index = [0, 4, 8, 12, 16, 20, 24, 28] in { 460 def V#Index#M4 : RISCVRegWithSubRegs<Index, "v"#Index, 461 [!cast<Register>("V"#Index#"M2"), 462 !cast<Register>("V"#!add(Index, 2)#"M2")], 463 ["v"#Index]>, 464 DwarfRegAlias<!cast<Register>("V"#Index)> { 465 let SubRegIndices = [sub_vrm2_0, sub_vrm2_1]; 466 } 467 } 468 469 foreach Index = [0, 8, 16, 24] in { 470 def V#Index#M8 : RISCVRegWithSubRegs<Index, "v"#Index, 471 [!cast<Register>("V"#Index#"M4"), 472 !cast<Register>("V"#!add(Index, 4)#"M4")], 473 ["v"#Index]>, 474 DwarfRegAlias<!cast<Register>("V"#Index)> { 475 let SubRegIndices = [sub_vrm4_0, sub_vrm4_1]; 476 } 477 } 478 479 def VTYPE : RISCVReg<0, "vtype", ["vtype"]>; 480 def VL : RISCVReg<0, "vl", ["vl"]>; 481 def VXSAT : RISCVReg<0, "vxsat", ["vxsat"]>; 482 def VXRM : RISCVReg<0, "vxrm", ["vxrm"]>; 483 def VLENB : RISCVReg<0, "vlenb", ["vlenb"]>, 484 DwarfRegNum<[!add(4096, SysRegVLENB.Encoding)]>; 485} 486 487foreach m = [1, 2, 4] in { 488 foreach n = NFList<m>.L in { 489 def "VN" # n # "M" # m # "NoV0": RegisterTuples< 490 SubRegSet<n, m>.L, 491 VRegList<[], 0, n, m, false>.L>; 492 def "VN" # n # "M" # m # "V0" : RegisterTuples< 493 SubRegSet<n, m>.L, 494 VRegList<[], 0, n, m, true>.L>; 495 } 496} 497 498class VReg<list<ValueType> regTypes, dag regList, int Vlmul> 499 : RegisterClass<"RISCV", 500 regTypes, 501 64, // The maximum supported ELEN is 64. 502 regList> { 503 int VLMul = Vlmul; 504 int Size = !mul(Vlmul, 64); 505} 506 507def VR : VReg<[vint8m1_t, vint16m1_t, vint32m1_t, vint64m1_t, 508 vfloat16m1_t, vfloat32m1_t, vfloat64m1_t, 509 vint8mf2_t, vint8mf4_t, vint8mf8_t, 510 vint16mf2_t, vint16mf4_t, vint32mf2_t, 511 vfloat16mf4_t, vfloat16mf2_t, vfloat32mf2_t, 512 vbool64_t, vbool32_t, vbool16_t, vbool8_t, vbool4_t, 513 vbool2_t, vbool1_t], 514 (add (sequence "V%u", 8, 31), 515 (sequence "V%u", 0, 7)), 1>; 516 517def VRNoV0 : VReg<[vint8m1_t, vint16m1_t, vint32m1_t, vint64m1_t, 518 vfloat16m1_t, vfloat32m1_t, vfloat64m1_t, 519 vint8mf2_t, vint8mf4_t, vint8mf8_t, 520 vint16mf2_t, vint16mf4_t, vint32mf2_t, 521 vfloat16mf4_t, vfloat16mf2_t, vfloat32mf2_t, 522 vbool64_t, vbool32_t, vbool16_t, vbool8_t, vbool4_t, 523 vbool2_t, vbool1_t], 524 (add (sequence "V%u", 8, 31), 525 (sequence "V%u", 1, 7)), 1>; 526 527def VRM2 : VReg<[vint8m2_t, vint16m2_t, vint32m2_t, vint64m2_t, 528 vfloat16m2_t, vfloat32m2_t, vfloat64m2_t], 529 (add (sequence "V%uM2", 8, 31, 2), 530 (sequence "V%uM2", 0, 7, 2)), 2>; 531 532def VRM2NoV0 : VReg<[vint8m2_t, vint16m2_t, vint32m2_t, vint64m2_t, 533 vfloat16m2_t, vfloat32m2_t, vfloat64m2_t], 534 (add (sequence "V%uM2", 8, 31, 2), 535 (sequence "V%uM2", 2, 7, 2)), 2>; 536 537def VRM4 : VReg<[vint8m4_t, vint16m4_t, vint32m4_t, vint64m4_t, 538 vfloat16m4_t, vfloat32m4_t, vfloat64m4_t], 539 (add V8M4, V12M4, V16M4, V20M4, V24M4, V28M4, V0M4, V4M4), 4>; 540 541def VRM4NoV0 : VReg<[vint8m4_t, vint16m4_t, vint32m4_t, vint64m4_t, 542 vfloat16m4_t, vfloat32m4_t, vfloat64m4_t], 543 (add V8M4, V12M4, V16M4, V20M4, V24M4, V28M4, V4M4), 4>; 544 545def VRM8 : VReg<[vint8m8_t, vint16m8_t, vint32m8_t, vint64m8_t, 546 vfloat16m8_t, vfloat32m8_t, vfloat64m8_t], 547 (add V8M8, V16M8, V24M8, V0M8), 8>; 548 549def VRM8NoV0 : VReg<[vint8m8_t, vint16m8_t, vint32m8_t, vint64m8_t, 550 vfloat16m8_t, vfloat32m8_t, vfloat64m8_t], 551 (add V8M8, V16M8, V24M8), 8>; 552 553defvar VMaskVTs = [vbool1_t, vbool2_t, vbool4_t, vbool8_t, vbool16_t, 554 vbool32_t, vbool64_t]; 555 556def VMV0 : RegisterClass<"RISCV", VMaskVTs, 64, (add V0)> { 557 let Size = 64; 558} 559 560// The register class is added for inline assembly for vector mask types. 561def VM : VReg<VMaskVTs, 562 (add (sequence "V%u", 8, 31), 563 (sequence "V%u", 0, 7)), 1>; 564 565foreach m = LMULList.m in { 566 foreach nf = NFList<m>.L in { 567 def "VRN" # nf # "M" # m # "NoV0": VReg<[untyped], 568 (add !cast<RegisterTuples>("VN" # nf # "M" # m # "NoV0")), 569 !mul(nf, m)>; 570 def "VRN" # nf # "M" # m: VReg<[untyped], 571 (add !cast<RegisterTuples>("VN" # nf # "M" # m # "NoV0"), 572 !cast<RegisterTuples>("VN" # nf # "M" # m # "V0")), 573 !mul(nf, m)>; 574 } 575} 576 577// Special registers 578def FFLAGS : RISCVReg<0, "fflags">; 579def FRM : RISCVReg<0, "frm">; 580 581// Any type register. Used for .insn directives when we don't know what the 582// register types could be. 583// NOTE: The alignment and size are bogus values. The Size needs to be non-zero 584// or tablegen will use "untyped" to determine the size which will assert. 585let isAllocatable = 0 in 586def AnyReg : RegisterClass<"RISCV", [untyped], 32, 587 (add (sequence "X%u", 0, 31), 588 (sequence "F%u_D", 0, 31), 589 (sequence "V%u", 0, 31))> { 590 let Size = 32; 591} 592