1 //===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++ -*--===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "RISCVMatInt.h" 10 #include "MCTargetDesc/RISCVMCTargetDesc.h" 11 #include "llvm/ADT/APInt.h" 12 #include "llvm/Support/MathExtras.h" 13 using namespace llvm; 14 15 static int getInstSeqCost(RISCVMatInt::InstSeq &Res, bool HasRVC) { 16 if (!HasRVC) 17 return Res.size(); 18 19 int Cost = 0; 20 for (auto Instr : Res) { 21 // Assume instructions that aren't listed aren't compressible. 22 bool Compressed = false; 23 switch (Instr.getOpcode()) { 24 case RISCV::SLLI: 25 case RISCV::SRLI: 26 Compressed = true; 27 break; 28 case RISCV::ADDI: 29 case RISCV::ADDIW: 30 case RISCV::LUI: 31 Compressed = isInt<6>(Instr.getImm()); 32 break; 33 } 34 // Two RVC instructions take the same space as one RVI instruction, but 35 // can take longer to execute than the single RVI instruction. Thus, we 36 // consider that two RVC instruction are slightly more costly than one 37 // RVI instruction. For longer sequences of RVC instructions the space 38 // savings can be worth it, though. The costs below try to model that. 39 if (!Compressed) 40 Cost += 100; // Baseline cost of one RVI instruction: 100%. 41 else 42 Cost += 70; // 70% cost of baseline. 43 } 44 return Cost; 45 } 46 47 // Recursively generate a sequence for materializing an integer. 48 static void generateInstSeqImpl(int64_t Val, 49 const FeatureBitset &ActiveFeatures, 50 RISCVMatInt::InstSeq &Res) { 51 bool IsRV64 = ActiveFeatures[RISCV::Feature64Bit]; 52 53 // Use BSETI for a single bit that can't be expressed by a single LUI or ADDI. 54 if (ActiveFeatures[RISCV::FeatureStdExtZbs] && isPowerOf2_64(Val) && 55 (!isInt<32>(Val) || Val == 0x800)) { 56 Res.emplace_back(RISCV::BSETI, Log2_64(Val)); 57 return; 58 } 59 60 if (isInt<32>(Val)) { 61 // Depending on the active bits in the immediate Value v, the following 62 // instruction sequences are emitted: 63 // 64 // v == 0 : ADDI 65 // v[0,12) != 0 && v[12,32) == 0 : ADDI 66 // v[0,12) == 0 && v[12,32) != 0 : LUI 67 // v[0,32) != 0 : LUI+ADDI(W) 68 int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF; 69 int64_t Lo12 = SignExtend64<12>(Val); 70 71 if (Hi20) 72 Res.emplace_back(RISCV::LUI, Hi20); 73 74 if (Lo12 || Hi20 == 0) { 75 unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI; 76 Res.emplace_back(AddiOpc, Lo12); 77 } 78 return; 79 } 80 81 assert(IsRV64 && "Can't emit >32-bit imm for non-RV64 target"); 82 83 // In the worst case, for a full 64-bit constant, a sequence of 8 instructions 84 // (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be emitted. Note 85 // that the first two instructions (LUI+ADDIW) can contribute up to 32 bits 86 // while the following ADDI instructions contribute up to 12 bits each. 87 // 88 // On the first glance, implementing this seems to be possible by simply 89 // emitting the most significant 32 bits (LUI+ADDIW) followed by as many left 90 // shift (SLLI) and immediate additions (ADDI) as needed. However, due to the 91 // fact that ADDI performs a sign extended addition, doing it like that would 92 // only be possible when at most 11 bits of the ADDI instructions are used. 93 // Using all 12 bits of the ADDI instructions, like done by GAS, actually 94 // requires that the constant is processed starting with the least significant 95 // bit. 96 // 97 // In the following, constants are processed from LSB to MSB but instruction 98 // emission is performed from MSB to LSB by recursively calling 99 // generateInstSeq. In each recursion, first the lowest 12 bits are removed 100 // from the constant and the optimal shift amount, which can be greater than 101 // 12 bits if the constant is sparse, is determined. Then, the shifted 102 // remaining constant is processed recursively and gets emitted as soon as it 103 // fits into 32 bits. The emission of the shifts and additions is subsequently 104 // performed when the recursion returns. 105 106 int64_t Lo12 = SignExtend64<12>(Val); 107 Val = (uint64_t)Val - (uint64_t)Lo12; 108 109 int ShiftAmount = 0; 110 bool Unsigned = false; 111 112 // Val might now be valid for LUI without needing a shift. 113 if (!isInt<32>(Val)) { 114 ShiftAmount = llvm::countr_zero((uint64_t)Val); 115 Val >>= ShiftAmount; 116 117 // If the remaining bits don't fit in 12 bits, we might be able to reduce the 118 // shift amount in order to use LUI which will zero the lower 12 bits. 119 if (ShiftAmount > 12 && !isInt<12>(Val)) { 120 if (isInt<32>((uint64_t)Val << 12)) { 121 // Reduce the shift amount and add zeros to the LSBs so it will match LUI. 122 ShiftAmount -= 12; 123 Val = (uint64_t)Val << 12; 124 } else if (isUInt<32>((uint64_t)Val << 12) && 125 ActiveFeatures[RISCV::FeatureStdExtZba]) { 126 // Reduce the shift amount and add zeros to the LSBs so it will match 127 // LUI, then shift left with SLLI.UW to clear the upper 32 set bits. 128 ShiftAmount -= 12; 129 Val = ((uint64_t)Val << 12) | (0xffffffffull << 32); 130 Unsigned = true; 131 } 132 } 133 134 // Try to use SLLI_UW for Val when it is uint32 but not int32. 135 if (isUInt<32>((uint64_t)Val) && !isInt<32>((uint64_t)Val) && 136 ActiveFeatures[RISCV::FeatureStdExtZba]) { 137 // Use LUI+ADDI or LUI to compose, then clear the upper 32 bits with 138 // SLLI_UW. 139 Val = ((uint64_t)Val) | (0xffffffffull << 32); 140 Unsigned = true; 141 } 142 } 143 144 generateInstSeqImpl(Val, ActiveFeatures, Res); 145 146 // Skip shift if we were able to use LUI directly. 147 if (ShiftAmount) { 148 unsigned Opc = Unsigned ? RISCV::SLLI_UW : RISCV::SLLI; 149 Res.emplace_back(Opc, ShiftAmount); 150 } 151 152 if (Lo12) 153 Res.emplace_back(RISCV::ADDI, Lo12); 154 } 155 156 static unsigned extractRotateInfo(int64_t Val) { 157 // for case: 0b111..1..xxxxxx1..1.. 158 unsigned LeadingOnes = countLeadingOnes((uint64_t)Val); 159 unsigned TrailingOnes = countTrailingOnes((uint64_t)Val); 160 if (TrailingOnes > 0 && TrailingOnes < 64 && 161 (LeadingOnes + TrailingOnes) > (64 - 12)) 162 return 64 - TrailingOnes; 163 164 // for case: 0bxxx1..1..1...xxx 165 unsigned UpperTrailingOnes = countTrailingOnes(Hi_32(Val)); 166 unsigned LowerLeadingOnes = countLeadingOnes(Lo_32(Val)); 167 if (UpperTrailingOnes < 32 && 168 (UpperTrailingOnes + LowerLeadingOnes) > (64 - 12)) 169 return 32 - UpperTrailingOnes; 170 171 return 0; 172 } 173 174 namespace llvm::RISCVMatInt { 175 InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) { 176 RISCVMatInt::InstSeq Res; 177 generateInstSeqImpl(Val, ActiveFeatures, Res); 178 179 // If the low 12 bits are non-zero, the first expansion may end with an ADDI 180 // or ADDIW. If there are trailing zeros, try generating a sign extended 181 // constant with no trailing zeros and use a final SLLI to restore them. 182 if ((Val & 0xfff) != 0 && (Val & 1) == 0 && Res.size() >= 2) { 183 unsigned TrailingZeros = countTrailingZeros((uint64_t)Val); 184 int64_t ShiftedVal = Val >> TrailingZeros; 185 // If we can use C.LI+C.SLLI instead of LUI+ADDI(W) prefer that since 186 // its more compressible. But only if LUI+ADDI(W) isn't fusable. 187 // NOTE: We don't check for C extension to minimize differences in generated 188 // code. 189 bool IsShiftedCompressible = 190 isInt<6>(ShiftedVal) && !ActiveFeatures[RISCV::TuneLUIADDIFusion]; 191 RISCVMatInt::InstSeq TmpSeq; 192 generateInstSeqImpl(ShiftedVal, ActiveFeatures, TmpSeq); 193 TmpSeq.emplace_back(RISCV::SLLI, TrailingZeros); 194 195 // Keep the new sequence if it is an improvement. 196 if (TmpSeq.size() < Res.size() || IsShiftedCompressible) 197 Res = TmpSeq; 198 } 199 200 // If the constant is positive we might be able to generate a shifted constant 201 // with no leading zeros and use a final SRLI to restore them. 202 if (Val > 0 && Res.size() > 2) { 203 assert(ActiveFeatures[RISCV::Feature64Bit] && 204 "Expected RV32 to only need 2 instructions"); 205 unsigned LeadingZeros = countLeadingZeros((uint64_t)Val); 206 uint64_t ShiftedVal = (uint64_t)Val << LeadingZeros; 207 // Fill in the bits that will be shifted out with 1s. An example where this 208 // helps is trailing one masks with 32 or more ones. This will generate 209 // ADDI -1 and an SRLI. 210 ShiftedVal |= maskTrailingOnes<uint64_t>(LeadingZeros); 211 212 RISCVMatInt::InstSeq TmpSeq; 213 generateInstSeqImpl(ShiftedVal, ActiveFeatures, TmpSeq); 214 TmpSeq.emplace_back(RISCV::SRLI, LeadingZeros); 215 216 // Keep the new sequence if it is an improvement. 217 if (TmpSeq.size() < Res.size()) 218 Res = TmpSeq; 219 220 // Some cases can benefit from filling the lower bits with zeros instead. 221 ShiftedVal &= maskTrailingZeros<uint64_t>(LeadingZeros); 222 TmpSeq.clear(); 223 generateInstSeqImpl(ShiftedVal, ActiveFeatures, TmpSeq); 224 TmpSeq.emplace_back(RISCV::SRLI, LeadingZeros); 225 226 // Keep the new sequence if it is an improvement. 227 if (TmpSeq.size() < Res.size()) 228 Res = TmpSeq; 229 230 // If we have exactly 32 leading zeros and Zba, we can try using zext.w at 231 // the end of the sequence. 232 if (LeadingZeros == 32 && ActiveFeatures[RISCV::FeatureStdExtZba]) { 233 // Try replacing upper bits with 1. 234 uint64_t LeadingOnesVal = Val | maskLeadingOnes<uint64_t>(LeadingZeros); 235 TmpSeq.clear(); 236 generateInstSeqImpl(LeadingOnesVal, ActiveFeatures, TmpSeq); 237 TmpSeq.emplace_back(RISCV::ADD_UW, 0); 238 239 // Keep the new sequence if it is an improvement. 240 if (TmpSeq.size() < Res.size()) 241 Res = TmpSeq; 242 } 243 } 244 245 // Perform optimization with BCLRI/BSETI in the Zbs extension. 246 if (Res.size() > 2 && ActiveFeatures[RISCV::FeatureStdExtZbs]) { 247 assert(ActiveFeatures[RISCV::Feature64Bit] && 248 "Expected RV32 to only need 2 instructions"); 249 250 // 1. For values in range 0xffffffff 7fffffff ~ 0xffffffff 00000000, 251 // call generateInstSeqImpl with Val|0x80000000 (which is expected be 252 // an int32), then emit (BCLRI r, 31). 253 // 2. For values in range 0x80000000 ~ 0xffffffff, call generateInstSeqImpl 254 // with Val&~0x80000000 (which is expected to be an int32), then 255 // emit (BSETI r, 31). 256 int64_t NewVal; 257 unsigned Opc; 258 if (Val < 0) { 259 Opc = RISCV::BCLRI; 260 NewVal = Val | 0x80000000ll; 261 } else { 262 Opc = RISCV::BSETI; 263 NewVal = Val & ~0x80000000ll; 264 } 265 if (isInt<32>(NewVal)) { 266 RISCVMatInt::InstSeq TmpSeq; 267 generateInstSeqImpl(NewVal, ActiveFeatures, TmpSeq); 268 TmpSeq.emplace_back(Opc, 31); 269 if (TmpSeq.size() < Res.size()) 270 Res = TmpSeq; 271 } 272 273 // Try to use BCLRI for upper 32 bits if the original lower 32 bits are 274 // negative int32, or use BSETI for upper 32 bits if the original lower 275 // 32 bits are positive int32. 276 int32_t Lo = Lo_32(Val); 277 uint32_t Hi = Hi_32(Val); 278 Opc = 0; 279 RISCVMatInt::InstSeq TmpSeq; 280 generateInstSeqImpl(Lo, ActiveFeatures, TmpSeq); 281 // Check if it is profitable to use BCLRI/BSETI. 282 if (Lo > 0 && TmpSeq.size() + llvm::popcount(Hi) < Res.size()) { 283 Opc = RISCV::BSETI; 284 } else if (Lo < 0 && TmpSeq.size() + llvm::popcount(~Hi) < Res.size()) { 285 Opc = RISCV::BCLRI; 286 Hi = ~Hi; 287 } 288 // Search for each bit and build corresponding BCLRI/BSETI. 289 if (Opc > 0) { 290 while (Hi != 0) { 291 unsigned Bit = llvm::countr_zero(Hi); 292 TmpSeq.emplace_back(Opc, Bit + 32); 293 Hi &= (Hi - 1); // Clear lowest set bit. 294 } 295 if (TmpSeq.size() < Res.size()) 296 Res = TmpSeq; 297 } 298 } 299 300 // Perform optimization with SH*ADD in the Zba extension. 301 if (Res.size() > 2 && ActiveFeatures[RISCV::FeatureStdExtZba]) { 302 assert(ActiveFeatures[RISCV::Feature64Bit] && 303 "Expected RV32 to only need 2 instructions"); 304 int64_t Div = 0; 305 unsigned Opc = 0; 306 RISCVMatInt::InstSeq TmpSeq; 307 // Select the opcode and divisor. 308 if ((Val % 3) == 0 && isInt<32>(Val / 3)) { 309 Div = 3; 310 Opc = RISCV::SH1ADD; 311 } else if ((Val % 5) == 0 && isInt<32>(Val / 5)) { 312 Div = 5; 313 Opc = RISCV::SH2ADD; 314 } else if ((Val % 9) == 0 && isInt<32>(Val / 9)) { 315 Div = 9; 316 Opc = RISCV::SH3ADD; 317 } 318 // Build the new instruction sequence. 319 if (Div > 0) { 320 generateInstSeqImpl(Val / Div, ActiveFeatures, TmpSeq); 321 TmpSeq.emplace_back(Opc, 0); 322 if (TmpSeq.size() < Res.size()) 323 Res = TmpSeq; 324 } else { 325 // Try to use LUI+SH*ADD+ADDI. 326 int64_t Hi52 = ((uint64_t)Val + 0x800ull) & ~0xfffull; 327 int64_t Lo12 = SignExtend64<12>(Val); 328 Div = 0; 329 if (isInt<32>(Hi52 / 3) && (Hi52 % 3) == 0) { 330 Div = 3; 331 Opc = RISCV::SH1ADD; 332 } else if (isInt<32>(Hi52 / 5) && (Hi52 % 5) == 0) { 333 Div = 5; 334 Opc = RISCV::SH2ADD; 335 } else if (isInt<32>(Hi52 / 9) && (Hi52 % 9) == 0) { 336 Div = 9; 337 Opc = RISCV::SH3ADD; 338 } 339 // Build the new instruction sequence. 340 if (Div > 0) { 341 // For Val that has zero Lo12 (implies Val equals to Hi52) should has 342 // already been processed to LUI+SH*ADD by previous optimization. 343 assert(Lo12 != 0 && 344 "unexpected instruction sequence for immediate materialisation"); 345 assert(TmpSeq.empty() && "Expected empty TmpSeq"); 346 generateInstSeqImpl(Hi52 / Div, ActiveFeatures, TmpSeq); 347 TmpSeq.emplace_back(Opc, 0); 348 TmpSeq.emplace_back(RISCV::ADDI, Lo12); 349 if (TmpSeq.size() < Res.size()) 350 Res = TmpSeq; 351 } 352 } 353 } 354 355 // Perform optimization with rori in the Zbb extension. 356 if (Res.size() > 2 && ActiveFeatures[RISCV::FeatureStdExtZbb]) { 357 if (unsigned Rotate = extractRotateInfo(Val)) { 358 RISCVMatInt::InstSeq TmpSeq; 359 uint64_t NegImm12 = 360 ((uint64_t)Val >> (64 - Rotate)) | ((uint64_t)Val << Rotate); 361 assert(isInt<12>(NegImm12)); 362 TmpSeq.emplace_back(RISCV::ADDI, NegImm12); 363 TmpSeq.emplace_back(RISCV::RORI, Rotate); 364 Res = TmpSeq; 365 } 366 } 367 return Res; 368 } 369 370 int getIntMatCost(const APInt &Val, unsigned Size, 371 const FeatureBitset &ActiveFeatures, bool CompressionCost) { 372 bool IsRV64 = ActiveFeatures[RISCV::Feature64Bit]; 373 bool HasRVC = CompressionCost && (ActiveFeatures[RISCV::FeatureStdExtC] || 374 ActiveFeatures[RISCV::FeatureExtZca]); 375 int PlatRegSize = IsRV64 ? 64 : 32; 376 377 // Split the constant into platform register sized chunks, and calculate cost 378 // of each chunk. 379 int Cost = 0; 380 for (unsigned ShiftVal = 0; ShiftVal < Size; ShiftVal += PlatRegSize) { 381 APInt Chunk = Val.ashr(ShiftVal).sextOrTrunc(PlatRegSize); 382 InstSeq MatSeq = generateInstSeq(Chunk.getSExtValue(), ActiveFeatures); 383 Cost += getInstSeqCost(MatSeq, HasRVC); 384 } 385 return std::max(1, Cost); 386 } 387 388 OpndKind Inst::getOpndKind() const { 389 switch (Opc) { 390 default: 391 llvm_unreachable("Unexpected opcode!"); 392 case RISCV::LUI: 393 return RISCVMatInt::Imm; 394 case RISCV::ADD_UW: 395 return RISCVMatInt::RegX0; 396 case RISCV::SH1ADD: 397 case RISCV::SH2ADD: 398 case RISCV::SH3ADD: 399 return RISCVMatInt::RegReg; 400 case RISCV::ADDI: 401 case RISCV::ADDIW: 402 case RISCV::SLLI: 403 case RISCV::SRLI: 404 case RISCV::SLLI_UW: 405 case RISCV::RORI: 406 case RISCV::BSETI: 407 case RISCV::BCLRI: 408 return RISCVMatInt::RegImm; 409 } 410 } 411 412 } // namespace llvm::RISCVMatInt 413