1 //===- llvm/CodeGen/DwarfExpression.cpp - Dwarf Debug Framework -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains support for writing dwarf debug info into asm files. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "DwarfExpression.h" 14 #include "DwarfCompileUnit.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/SmallBitVector.h" 17 #include "llvm/BinaryFormat/Dwarf.h" 18 #include "llvm/CodeGen/Register.h" 19 #include "llvm/CodeGen/TargetRegisterInfo.h" 20 #include "llvm/IR/DataLayout.h" 21 #include "llvm/MC/MCAsmInfo.h" 22 #include "llvm/Support/ErrorHandling.h" 23 #include <algorithm> 24 25 using namespace llvm; 26 27 #define DEBUG_TYPE "dwarfdebug" 28 29 void DwarfExpression::emitConstu(uint64_t Value) { 30 if (Value < 32) 31 emitOp(dwarf::DW_OP_lit0 + Value); 32 else if (Value == std::numeric_limits<uint64_t>::max()) { 33 // Only do this for 64-bit values as the DWARF expression stack uses 34 // target-address-size values. 35 emitOp(dwarf::DW_OP_lit0); 36 emitOp(dwarf::DW_OP_not); 37 } else { 38 emitOp(dwarf::DW_OP_constu); 39 emitUnsigned(Value); 40 } 41 } 42 43 void DwarfExpression::addReg(int DwarfReg, const char *Comment) { 44 assert(DwarfReg >= 0 && "invalid negative dwarf register number"); 45 assert((isUnknownLocation() || isRegisterLocation()) && 46 "location description already locked down"); 47 LocationKind = Register; 48 if (DwarfReg < 32) { 49 emitOp(dwarf::DW_OP_reg0 + DwarfReg, Comment); 50 } else { 51 emitOp(dwarf::DW_OP_regx, Comment); 52 emitUnsigned(DwarfReg); 53 } 54 } 55 56 void DwarfExpression::addBReg(int DwarfReg, int Offset) { 57 assert(DwarfReg >= 0 && "invalid negative dwarf register number"); 58 assert(!isRegisterLocation() && "location description already locked down"); 59 if (DwarfReg < 32) { 60 emitOp(dwarf::DW_OP_breg0 + DwarfReg); 61 } else { 62 emitOp(dwarf::DW_OP_bregx); 63 emitUnsigned(DwarfReg); 64 } 65 emitSigned(Offset); 66 } 67 68 void DwarfExpression::addFBReg(int Offset) { 69 emitOp(dwarf::DW_OP_fbreg); 70 emitSigned(Offset); 71 } 72 73 void DwarfExpression::addOpPiece(unsigned SizeInBits, unsigned OffsetInBits) { 74 if (!SizeInBits) 75 return; 76 77 const unsigned SizeOfByte = 8; 78 if (OffsetInBits > 0 || SizeInBits % SizeOfByte) { 79 emitOp(dwarf::DW_OP_bit_piece); 80 emitUnsigned(SizeInBits); 81 emitUnsigned(OffsetInBits); 82 } else { 83 emitOp(dwarf::DW_OP_piece); 84 unsigned ByteSize = SizeInBits / SizeOfByte; 85 emitUnsigned(ByteSize); 86 } 87 this->OffsetInBits += SizeInBits; 88 } 89 90 void DwarfExpression::addShr(unsigned ShiftBy) { 91 emitConstu(ShiftBy); 92 emitOp(dwarf::DW_OP_shr); 93 } 94 95 void DwarfExpression::addAnd(unsigned Mask) { 96 emitConstu(Mask); 97 emitOp(dwarf::DW_OP_and); 98 } 99 100 bool DwarfExpression::addMachineReg(const TargetRegisterInfo &TRI, 101 llvm::Register MachineReg, 102 unsigned MaxSize) { 103 if (!MachineReg.isPhysical()) { 104 if (isFrameRegister(TRI, MachineReg)) { 105 DwarfRegs.push_back(Register::createRegister(-1, nullptr)); 106 return true; 107 } 108 return false; 109 } 110 111 int Reg = TRI.getDwarfRegNum(MachineReg, false); 112 113 // If this is a valid register number, emit it. 114 if (Reg >= 0) { 115 DwarfRegs.push_back(Register::createRegister(Reg, nullptr)); 116 return true; 117 } 118 119 // Walk up the super-register chain until we find a valid number. 120 // For example, EAX on x86_64 is a 32-bit fragment of RAX with offset 0. 121 for (MCPhysReg SR : TRI.superregs(MachineReg)) { 122 Reg = TRI.getDwarfRegNum(SR, false); 123 if (Reg >= 0) { 124 unsigned Idx = TRI.getSubRegIndex(SR, MachineReg); 125 unsigned Size = TRI.getSubRegIdxSize(Idx); 126 unsigned RegOffset = TRI.getSubRegIdxOffset(Idx); 127 DwarfRegs.push_back(Register::createRegister(Reg, "super-register")); 128 // Use a DW_OP_bit_piece to describe the sub-register. 129 setSubRegisterPiece(Size, RegOffset); 130 return true; 131 } 132 } 133 134 // Otherwise, attempt to find a covering set of sub-register numbers. 135 // For example, Q0 on ARM is a composition of D0+D1. 136 unsigned CurPos = 0; 137 // The size of the register in bits. 138 const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(MachineReg); 139 unsigned RegSize = TRI.getRegSizeInBits(*RC); 140 // Keep track of the bits in the register we already emitted, so we 141 // can avoid emitting redundant aliasing subregs. Because this is 142 // just doing a greedy scan of all subregisters, it is possible that 143 // this doesn't find a combination of subregisters that fully cover 144 // the register (even though one may exist). 145 SmallBitVector Coverage(RegSize, false); 146 for (MCPhysReg SR : TRI.subregs(MachineReg)) { 147 unsigned Idx = TRI.getSubRegIndex(MachineReg, SR); 148 unsigned Size = TRI.getSubRegIdxSize(Idx); 149 unsigned Offset = TRI.getSubRegIdxOffset(Idx); 150 Reg = TRI.getDwarfRegNum(SR, false); 151 if (Reg < 0) 152 continue; 153 154 // Used to build the intersection between the bits we already 155 // emitted and the bits covered by this subregister. 156 SmallBitVector CurSubReg(RegSize, false); 157 CurSubReg.set(Offset, Offset + Size); 158 159 // If this sub-register has a DWARF number and we haven't covered 160 // its range, and its range covers the value, emit a DWARF piece for it. 161 if (Offset < MaxSize && CurSubReg.test(Coverage)) { 162 // Emit a piece for any gap in the coverage. 163 if (Offset > CurPos) 164 DwarfRegs.push_back(Register::createSubRegister( 165 -1, Offset - CurPos, "no DWARF register encoding")); 166 if (Offset == 0 && Size >= MaxSize) 167 DwarfRegs.push_back(Register::createRegister(Reg, "sub-register")); 168 else 169 DwarfRegs.push_back(Register::createSubRegister( 170 Reg, std::min<unsigned>(Size, MaxSize - Offset), "sub-register")); 171 } 172 // Mark it as emitted. 173 Coverage.set(Offset, Offset + Size); 174 CurPos = Offset + Size; 175 } 176 // Failed to find any DWARF encoding. 177 if (CurPos == 0) 178 return false; 179 // Found a partial or complete DWARF encoding. 180 if (CurPos < RegSize) 181 DwarfRegs.push_back(Register::createSubRegister( 182 -1, RegSize - CurPos, "no DWARF register encoding")); 183 return true; 184 } 185 186 void DwarfExpression::addStackValue() { 187 if (DwarfVersion >= 4) 188 emitOp(dwarf::DW_OP_stack_value); 189 } 190 191 void DwarfExpression::addSignedConstant(int64_t Value) { 192 assert(isImplicitLocation() || isUnknownLocation()); 193 LocationKind = Implicit; 194 emitOp(dwarf::DW_OP_consts); 195 emitSigned(Value); 196 } 197 198 void DwarfExpression::addUnsignedConstant(uint64_t Value) { 199 assert(isImplicitLocation() || isUnknownLocation()); 200 LocationKind = Implicit; 201 emitConstu(Value); 202 } 203 204 void DwarfExpression::addUnsignedConstant(const APInt &Value) { 205 assert(isImplicitLocation() || isUnknownLocation()); 206 LocationKind = Implicit; 207 208 unsigned Size = Value.getBitWidth(); 209 const uint64_t *Data = Value.getRawData(); 210 211 // Chop it up into 64-bit pieces, because that's the maximum that 212 // addUnsignedConstant takes. 213 unsigned Offset = 0; 214 while (Offset < Size) { 215 addUnsignedConstant(*Data++); 216 if (Offset == 0 && Size <= 64) 217 break; 218 addStackValue(); 219 addOpPiece(std::min(Size - Offset, 64u), Offset); 220 Offset += 64; 221 } 222 } 223 224 void DwarfExpression::addConstantFP(const APFloat &APF, const AsmPrinter &AP) { 225 assert(isImplicitLocation() || isUnknownLocation()); 226 APInt API = APF.bitcastToAPInt(); 227 int NumBytes = API.getBitWidth() / 8; 228 if (NumBytes == 4 /*float*/ || NumBytes == 8 /*double*/) { 229 // FIXME: Add support for `long double`. 230 emitOp(dwarf::DW_OP_implicit_value); 231 emitUnsigned(NumBytes /*Size of the block in bytes*/); 232 233 // The loop below is emitting the value starting at least significant byte, 234 // so we need to perform a byte-swap to get the byte order correct in case 235 // of a big-endian target. 236 if (AP.getDataLayout().isBigEndian()) 237 API = API.byteSwap(); 238 239 for (int i = 0; i < NumBytes; ++i) { 240 emitData1(API.getZExtValue() & 0xFF); 241 API = API.lshr(8); 242 } 243 244 return; 245 } 246 LLVM_DEBUG( 247 dbgs() << "Skipped DW_OP_implicit_value creation for ConstantFP of size: " 248 << API.getBitWidth() << " bits\n"); 249 } 250 251 bool DwarfExpression::addMachineRegExpression(const TargetRegisterInfo &TRI, 252 DIExpressionCursor &ExprCursor, 253 llvm::Register MachineReg, 254 unsigned FragmentOffsetInBits) { 255 auto Fragment = ExprCursor.getFragmentInfo(); 256 if (!addMachineReg(TRI, MachineReg, Fragment ? Fragment->SizeInBits : ~1U)) { 257 LocationKind = Unknown; 258 return false; 259 } 260 261 bool HasComplexExpression = false; 262 auto Op = ExprCursor.peek(); 263 if (Op && Op->getOp() != dwarf::DW_OP_LLVM_fragment) 264 HasComplexExpression = true; 265 266 // If the register can only be described by a complex expression (i.e., 267 // multiple subregisters) it doesn't safely compose with another complex 268 // expression. For example, it is not possible to apply a DW_OP_deref 269 // operation to multiple DW_OP_pieces, since composite location descriptions 270 // do not push anything on the DWARF stack. 271 // 272 // DW_OP_entry_value operations can only hold a DWARF expression or a 273 // register location description, so we can't emit a single entry value 274 // covering a composite location description. In the future we may want to 275 // emit entry value operations for each register location in the composite 276 // location, but until that is supported do not emit anything. 277 if ((HasComplexExpression || IsEmittingEntryValue) && DwarfRegs.size() > 1) { 278 if (IsEmittingEntryValue) 279 cancelEntryValue(); 280 DwarfRegs.clear(); 281 LocationKind = Unknown; 282 return false; 283 } 284 285 // Handle simple register locations. If we are supposed to emit 286 // a call site parameter expression and if that expression is just a register 287 // location, emit it with addBReg and offset 0, because we should emit a DWARF 288 // expression representing a value, rather than a location. 289 if ((!isParameterValue() && !isMemoryLocation() && !HasComplexExpression) || 290 isEntryValue()) { 291 auto FragmentInfo = ExprCursor.getFragmentInfo(); 292 unsigned RegSize = 0; 293 for (auto &Reg : DwarfRegs) { 294 RegSize += Reg.SubRegSize; 295 if (Reg.DwarfRegNo >= 0) 296 addReg(Reg.DwarfRegNo, Reg.Comment); 297 if (FragmentInfo) 298 if (RegSize > FragmentInfo->SizeInBits) 299 // If the register is larger than the current fragment stop 300 // once the fragment is covered. 301 break; 302 addOpPiece(Reg.SubRegSize); 303 } 304 305 if (isEntryValue()) { 306 finalizeEntryValue(); 307 308 if (!isIndirect() && !isParameterValue() && !HasComplexExpression && 309 DwarfVersion >= 4) 310 emitOp(dwarf::DW_OP_stack_value); 311 } 312 313 DwarfRegs.clear(); 314 // If we need to mask out a subregister, do it now, unless the next 315 // operation would emit an OpPiece anyway. 316 auto NextOp = ExprCursor.peek(); 317 if (SubRegisterSizeInBits && NextOp && 318 (NextOp->getOp() != dwarf::DW_OP_LLVM_fragment)) 319 maskSubRegister(); 320 return true; 321 } 322 323 // Don't emit locations that cannot be expressed without DW_OP_stack_value. 324 if (DwarfVersion < 4) 325 if (any_of(ExprCursor, [](DIExpression::ExprOperand Op) -> bool { 326 return Op.getOp() == dwarf::DW_OP_stack_value; 327 })) { 328 DwarfRegs.clear(); 329 LocationKind = Unknown; 330 return false; 331 } 332 333 // TODO: We should not give up here but the following code needs to be changed 334 // to deal with multiple (sub)registers first. 335 if (DwarfRegs.size() > 1) { 336 LLVM_DEBUG(dbgs() << "TODO: giving up on debug information due to " 337 "multi-register usage.\n"); 338 DwarfRegs.clear(); 339 LocationKind = Unknown; 340 return false; 341 } 342 343 auto Reg = DwarfRegs[0]; 344 bool FBReg = isFrameRegister(TRI, MachineReg); 345 int SignedOffset = 0; 346 assert(!Reg.isSubRegister() && "full register expected"); 347 348 // Pattern-match combinations for which more efficient representations exist. 349 // [Reg, DW_OP_plus_uconst, Offset] --> [DW_OP_breg, Offset]. 350 if (Op && (Op->getOp() == dwarf::DW_OP_plus_uconst)) { 351 uint64_t Offset = Op->getArg(0); 352 uint64_t IntMax = static_cast<uint64_t>(std::numeric_limits<int>::max()); 353 if (Offset <= IntMax) { 354 SignedOffset = Offset; 355 ExprCursor.take(); 356 } 357 } 358 359 // [Reg, DW_OP_constu, Offset, DW_OP_plus] --> [DW_OP_breg, Offset] 360 // [Reg, DW_OP_constu, Offset, DW_OP_minus] --> [DW_OP_breg,-Offset] 361 // If Reg is a subregister we need to mask it out before subtracting. 362 if (Op && Op->getOp() == dwarf::DW_OP_constu) { 363 uint64_t Offset = Op->getArg(0); 364 uint64_t IntMax = static_cast<uint64_t>(std::numeric_limits<int>::max()); 365 auto N = ExprCursor.peekNext(); 366 if (N && N->getOp() == dwarf::DW_OP_plus && Offset <= IntMax) { 367 SignedOffset = Offset; 368 ExprCursor.consume(2); 369 } else if (N && N->getOp() == dwarf::DW_OP_minus && 370 !SubRegisterSizeInBits && Offset <= IntMax + 1) { 371 SignedOffset = -static_cast<int64_t>(Offset); 372 ExprCursor.consume(2); 373 } 374 } 375 376 if (FBReg) 377 addFBReg(SignedOffset); 378 else 379 addBReg(Reg.DwarfRegNo, SignedOffset); 380 DwarfRegs.clear(); 381 382 // If we need to mask out a subregister, do it now, unless the next 383 // operation would emit an OpPiece anyway. 384 auto NextOp = ExprCursor.peek(); 385 if (SubRegisterSizeInBits && NextOp && 386 (NextOp->getOp() != dwarf::DW_OP_LLVM_fragment)) 387 maskSubRegister(); 388 389 return true; 390 } 391 392 void DwarfExpression::setEntryValueFlags(const MachineLocation &Loc) { 393 LocationFlags |= EntryValue; 394 if (Loc.isIndirect()) 395 LocationFlags |= Indirect; 396 } 397 398 void DwarfExpression::setLocation(const MachineLocation &Loc, 399 const DIExpression *DIExpr) { 400 if (Loc.isIndirect()) 401 setMemoryLocationKind(); 402 403 if (DIExpr->isEntryValue()) 404 setEntryValueFlags(Loc); 405 } 406 407 void DwarfExpression::beginEntryValueExpression( 408 DIExpressionCursor &ExprCursor) { 409 auto Op = ExprCursor.take(); 410 (void)Op; 411 assert(Op && Op->getOp() == dwarf::DW_OP_LLVM_entry_value); 412 assert(!IsEmittingEntryValue && "Already emitting entry value?"); 413 assert(Op->getArg(0) == 1 && 414 "Can currently only emit entry values covering a single operation"); 415 416 SavedLocationKind = LocationKind; 417 LocationKind = Register; 418 LocationFlags |= EntryValue; 419 IsEmittingEntryValue = true; 420 enableTemporaryBuffer(); 421 } 422 423 void DwarfExpression::finalizeEntryValue() { 424 assert(IsEmittingEntryValue && "Entry value not open?"); 425 disableTemporaryBuffer(); 426 427 emitOp(CU.getDwarf5OrGNULocationAtom(dwarf::DW_OP_entry_value)); 428 429 // Emit the entry value's size operand. 430 unsigned Size = getTemporaryBufferSize(); 431 emitUnsigned(Size); 432 433 // Emit the entry value's DWARF block operand. 434 commitTemporaryBuffer(); 435 436 LocationFlags &= ~EntryValue; 437 LocationKind = SavedLocationKind; 438 IsEmittingEntryValue = false; 439 } 440 441 void DwarfExpression::cancelEntryValue() { 442 assert(IsEmittingEntryValue && "Entry value not open?"); 443 disableTemporaryBuffer(); 444 445 // The temporary buffer can't be emptied, so for now just assert that nothing 446 // has been emitted to it. 447 assert(getTemporaryBufferSize() == 0 && 448 "Began emitting entry value block before cancelling entry value"); 449 450 LocationKind = SavedLocationKind; 451 IsEmittingEntryValue = false; 452 } 453 454 unsigned DwarfExpression::getOrCreateBaseType(unsigned BitSize, 455 dwarf::TypeKind Encoding) { 456 // Reuse the base_type if we already have one in this CU otherwise we 457 // create a new one. 458 unsigned I = 0, E = CU.ExprRefedBaseTypes.size(); 459 for (; I != E; ++I) 460 if (CU.ExprRefedBaseTypes[I].BitSize == BitSize && 461 CU.ExprRefedBaseTypes[I].Encoding == Encoding) 462 break; 463 464 if (I == E) 465 CU.ExprRefedBaseTypes.emplace_back(BitSize, Encoding); 466 return I; 467 } 468 469 /// Assuming a well-formed expression, match "DW_OP_deref* 470 /// DW_OP_LLVM_fragment?". 471 static bool isMemoryLocation(DIExpressionCursor ExprCursor) { 472 while (ExprCursor) { 473 auto Op = ExprCursor.take(); 474 switch (Op->getOp()) { 475 case dwarf::DW_OP_deref: 476 case dwarf::DW_OP_LLVM_fragment: 477 break; 478 default: 479 return false; 480 } 481 } 482 return true; 483 } 484 485 void DwarfExpression::addExpression(DIExpressionCursor &&ExprCursor) { 486 addExpression(std::move(ExprCursor), 487 [](unsigned Idx, DIExpressionCursor &Cursor) -> bool { 488 llvm_unreachable("unhandled opcode found in expression"); 489 }); 490 } 491 492 bool DwarfExpression::addExpression( 493 DIExpressionCursor &&ExprCursor, 494 llvm::function_ref<bool(unsigned, DIExpressionCursor &)> InsertArg) { 495 // Entry values can currently only cover the initial register location, 496 // and not any other parts of the following DWARF expression. 497 assert(!IsEmittingEntryValue && "Can't emit entry value around expression"); 498 499 std::optional<DIExpression::ExprOperand> PrevConvertOp; 500 501 while (ExprCursor) { 502 auto Op = ExprCursor.take(); 503 uint64_t OpNum = Op->getOp(); 504 505 if (OpNum >= dwarf::DW_OP_reg0 && OpNum <= dwarf::DW_OP_reg31) { 506 emitOp(OpNum); 507 continue; 508 } else if (OpNum >= dwarf::DW_OP_breg0 && OpNum <= dwarf::DW_OP_breg31) { 509 addBReg(OpNum - dwarf::DW_OP_breg0, Op->getArg(0)); 510 continue; 511 } 512 513 switch (OpNum) { 514 case dwarf::DW_OP_LLVM_arg: 515 if (!InsertArg(Op->getArg(0), ExprCursor)) { 516 LocationKind = Unknown; 517 return false; 518 } 519 break; 520 case dwarf::DW_OP_LLVM_fragment: { 521 unsigned SizeInBits = Op->getArg(1); 522 unsigned FragmentOffset = Op->getArg(0); 523 // The fragment offset must have already been adjusted by emitting an 524 // empty DW_OP_piece / DW_OP_bit_piece before we emitted the base 525 // location. 526 assert(OffsetInBits >= FragmentOffset && "fragment offset not added?"); 527 assert(SizeInBits >= OffsetInBits - FragmentOffset && "size underflow"); 528 529 // If addMachineReg already emitted DW_OP_piece operations to represent 530 // a super-register by splicing together sub-registers, subtract the size 531 // of the pieces that was already emitted. 532 SizeInBits -= OffsetInBits - FragmentOffset; 533 534 // If addMachineReg requested a DW_OP_bit_piece to stencil out a 535 // sub-register that is smaller than the current fragment's size, use it. 536 if (SubRegisterSizeInBits) 537 SizeInBits = std::min<unsigned>(SizeInBits, SubRegisterSizeInBits); 538 539 // Emit a DW_OP_stack_value for implicit location descriptions. 540 if (isImplicitLocation()) 541 addStackValue(); 542 543 // Emit the DW_OP_piece. 544 addOpPiece(SizeInBits, SubRegisterOffsetInBits); 545 setSubRegisterPiece(0, 0); 546 // Reset the location description kind. 547 LocationKind = Unknown; 548 return true; 549 } 550 case dwarf::DW_OP_LLVM_extract_bits_sext: 551 case dwarf::DW_OP_LLVM_extract_bits_zext: { 552 unsigned SizeInBits = Op->getArg(1); 553 unsigned BitOffset = Op->getArg(0); 554 555 // If we have a memory location then dereference to get the value, though 556 // we have to make sure we don't dereference any bytes past the end of the 557 // object. 558 if (isMemoryLocation()) { 559 emitOp(dwarf::DW_OP_deref_size); 560 emitUnsigned(alignTo(BitOffset + SizeInBits, 8) / 8); 561 } 562 563 // Extract the bits by a shift left (to shift out the bits after what we 564 // want to extract) followed by shift right (to shift the bits to position 565 // 0 and also sign/zero extend). These operations are done in the DWARF 566 // "generic type" whose size is the size of a pointer. 567 unsigned PtrSizeInBytes = CU.getAsmPrinter()->MAI->getCodePointerSize(); 568 unsigned LeftShift = PtrSizeInBytes * 8 - (SizeInBits + BitOffset); 569 unsigned RightShift = LeftShift + BitOffset; 570 if (LeftShift) { 571 emitOp(dwarf::DW_OP_constu); 572 emitUnsigned(LeftShift); 573 emitOp(dwarf::DW_OP_shl); 574 } 575 emitOp(dwarf::DW_OP_constu); 576 emitUnsigned(RightShift); 577 emitOp(OpNum == dwarf::DW_OP_LLVM_extract_bits_sext ? dwarf::DW_OP_shra 578 : dwarf::DW_OP_shr); 579 580 // The value is now at the top of the stack, so set the location to 581 // implicit so that we get a stack_value at the end. 582 LocationKind = Implicit; 583 break; 584 } 585 case dwarf::DW_OP_plus_uconst: 586 assert(!isRegisterLocation()); 587 emitOp(dwarf::DW_OP_plus_uconst); 588 emitUnsigned(Op->getArg(0)); 589 break; 590 case dwarf::DW_OP_plus: 591 case dwarf::DW_OP_minus: 592 case dwarf::DW_OP_mul: 593 case dwarf::DW_OP_div: 594 case dwarf::DW_OP_mod: 595 case dwarf::DW_OP_or: 596 case dwarf::DW_OP_and: 597 case dwarf::DW_OP_xor: 598 case dwarf::DW_OP_shl: 599 case dwarf::DW_OP_shr: 600 case dwarf::DW_OP_shra: 601 case dwarf::DW_OP_lit0: 602 case dwarf::DW_OP_not: 603 case dwarf::DW_OP_dup: 604 case dwarf::DW_OP_push_object_address: 605 case dwarf::DW_OP_over: 606 case dwarf::DW_OP_eq: 607 case dwarf::DW_OP_ne: 608 case dwarf::DW_OP_gt: 609 case dwarf::DW_OP_ge: 610 case dwarf::DW_OP_lt: 611 case dwarf::DW_OP_le: 612 emitOp(OpNum); 613 break; 614 case dwarf::DW_OP_deref: 615 assert(!isRegisterLocation()); 616 if (!isMemoryLocation() && ::isMemoryLocation(ExprCursor)) 617 // Turning this into a memory location description makes the deref 618 // implicit. 619 LocationKind = Memory; 620 else 621 emitOp(dwarf::DW_OP_deref); 622 break; 623 case dwarf::DW_OP_constu: 624 assert(!isRegisterLocation()); 625 emitConstu(Op->getArg(0)); 626 break; 627 case dwarf::DW_OP_consts: 628 assert(!isRegisterLocation()); 629 emitOp(dwarf::DW_OP_consts); 630 emitSigned(Op->getArg(0)); 631 break; 632 case dwarf::DW_OP_LLVM_convert: { 633 unsigned BitSize = Op->getArg(0); 634 dwarf::TypeKind Encoding = static_cast<dwarf::TypeKind>(Op->getArg(1)); 635 if (DwarfVersion >= 5 && CU.getDwarfDebug().useOpConvert()) { 636 emitOp(dwarf::DW_OP_convert); 637 // If targeting a location-list; simply emit the index into the raw 638 // byte stream as ULEB128, DwarfDebug::emitDebugLocEntry has been 639 // fitted with means to extract it later. 640 // If targeting a inlined DW_AT_location; insert a DIEBaseTypeRef 641 // (containing the index and a resolve mechanism during emit) into the 642 // DIE value list. 643 emitBaseTypeRef(getOrCreateBaseType(BitSize, Encoding)); 644 } else { 645 if (PrevConvertOp && PrevConvertOp->getArg(0) < BitSize) { 646 if (Encoding == dwarf::DW_ATE_signed) 647 emitLegacySExt(PrevConvertOp->getArg(0)); 648 else if (Encoding == dwarf::DW_ATE_unsigned) 649 emitLegacyZExt(PrevConvertOp->getArg(0)); 650 PrevConvertOp = std::nullopt; 651 } else { 652 PrevConvertOp = Op; 653 } 654 } 655 break; 656 } 657 case dwarf::DW_OP_stack_value: 658 LocationKind = Implicit; 659 break; 660 case dwarf::DW_OP_swap: 661 assert(!isRegisterLocation()); 662 emitOp(dwarf::DW_OP_swap); 663 break; 664 case dwarf::DW_OP_xderef: 665 assert(!isRegisterLocation()); 666 emitOp(dwarf::DW_OP_xderef); 667 break; 668 case dwarf::DW_OP_deref_size: 669 emitOp(dwarf::DW_OP_deref_size); 670 emitData1(Op->getArg(0)); 671 break; 672 case dwarf::DW_OP_LLVM_tag_offset: 673 TagOffset = Op->getArg(0); 674 break; 675 case dwarf::DW_OP_regx: 676 emitOp(dwarf::DW_OP_regx); 677 emitUnsigned(Op->getArg(0)); 678 break; 679 case dwarf::DW_OP_bregx: 680 emitOp(dwarf::DW_OP_bregx); 681 emitUnsigned(Op->getArg(0)); 682 emitSigned(Op->getArg(1)); 683 break; 684 default: 685 llvm_unreachable("unhandled opcode found in expression"); 686 } 687 } 688 689 if (isImplicitLocation() && !isParameterValue()) 690 // Turn this into an implicit location description. 691 addStackValue(); 692 693 return true; 694 } 695 696 /// add masking operations to stencil out a subregister. 697 void DwarfExpression::maskSubRegister() { 698 assert(SubRegisterSizeInBits && "no subregister was registered"); 699 if (SubRegisterOffsetInBits > 0) 700 addShr(SubRegisterOffsetInBits); 701 uint64_t Mask = (1ULL << (uint64_t)SubRegisterSizeInBits) - 1ULL; 702 addAnd(Mask); 703 } 704 705 void DwarfExpression::finalize() { 706 assert(DwarfRegs.size() == 0 && "dwarf registers not emitted"); 707 // Emit any outstanding DW_OP_piece operations to mask out subregisters. 708 if (SubRegisterSizeInBits == 0) 709 return; 710 // Don't emit a DW_OP_piece for a subregister at offset 0. 711 if (SubRegisterOffsetInBits == 0) 712 return; 713 addOpPiece(SubRegisterSizeInBits, SubRegisterOffsetInBits); 714 } 715 716 void DwarfExpression::addFragmentOffset(const DIExpression *Expr) { 717 if (!Expr || !Expr->isFragment()) 718 return; 719 720 uint64_t FragmentOffset = Expr->getFragmentInfo()->OffsetInBits; 721 assert(FragmentOffset >= OffsetInBits && 722 "overlapping or duplicate fragments"); 723 if (FragmentOffset > OffsetInBits) 724 addOpPiece(FragmentOffset - OffsetInBits); 725 OffsetInBits = FragmentOffset; 726 } 727 728 void DwarfExpression::emitLegacySExt(unsigned FromBits) { 729 // (((X >> (FromBits - 1)) * (~0)) << FromBits) | X 730 emitOp(dwarf::DW_OP_dup); 731 emitOp(dwarf::DW_OP_constu); 732 emitUnsigned(FromBits - 1); 733 emitOp(dwarf::DW_OP_shr); 734 emitOp(dwarf::DW_OP_lit0); 735 emitOp(dwarf::DW_OP_not); 736 emitOp(dwarf::DW_OP_mul); 737 emitOp(dwarf::DW_OP_constu); 738 emitUnsigned(FromBits); 739 emitOp(dwarf::DW_OP_shl); 740 emitOp(dwarf::DW_OP_or); 741 } 742 743 void DwarfExpression::emitLegacyZExt(unsigned FromBits) { 744 // Heuristic to decide the most efficient encoding. 745 // A ULEB can encode 7 1-bits per byte. 746 if (FromBits / 7 < 1+1+1+1+1) { 747 // (X & (1 << FromBits - 1)) 748 emitOp(dwarf::DW_OP_constu); 749 emitUnsigned((1ULL << FromBits) - 1); 750 } else { 751 // Note that the DWARF 4 stack consists of pointer-sized elements, 752 // so technically it doesn't make sense to shift left more than 64 753 // bits. We leave that for the consumer to decide though. LLDB for 754 // example uses APInt for the stack elements and can still deal 755 // with this. 756 emitOp(dwarf::DW_OP_lit1); 757 emitOp(dwarf::DW_OP_constu); 758 emitUnsigned(FromBits); 759 emitOp(dwarf::DW_OP_shl); 760 emitOp(dwarf::DW_OP_lit1); 761 emitOp(dwarf::DW_OP_minus); 762 } 763 emitOp(dwarf::DW_OP_and); 764 } 765 766 void DwarfExpression::addWasmLocation(unsigned Index, uint64_t Offset) { 767 emitOp(dwarf::DW_OP_WASM_location); 768 emitUnsigned(Index == 4/*TI_LOCAL_INDIRECT*/ ? 0/*TI_LOCAL*/ : Index); 769 emitUnsigned(Offset); 770 if (Index == 4 /*TI_LOCAL_INDIRECT*/) { 771 assert(LocationKind == Unknown); 772 LocationKind = Memory; 773 } else { 774 assert(LocationKind == Implicit || LocationKind == Unknown); 775 LocationKind = Implicit; 776 } 777 } 778