1 //===- llvm/CodeGen/DwarfExpression.cpp - Dwarf Debug Framework -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains support for writing dwarf debug info into asm files. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "DwarfExpression.h" 14 #include "DwarfCompileUnit.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/SmallBitVector.h" 17 #include "llvm/BinaryFormat/Dwarf.h" 18 #include "llvm/CodeGen/Register.h" 19 #include "llvm/CodeGen/TargetRegisterInfo.h" 20 #include "llvm/IR/DataLayout.h" 21 #include "llvm/Support/ErrorHandling.h" 22 #include <algorithm> 23 24 using namespace llvm; 25 26 #define DEBUG_TYPE "dwarfdebug" 27 28 void DwarfExpression::emitConstu(uint64_t Value) { 29 if (Value < 32) 30 emitOp(dwarf::DW_OP_lit0 + Value); 31 else if (Value == std::numeric_limits<uint64_t>::max()) { 32 // Only do this for 64-bit values as the DWARF expression stack uses 33 // target-address-size values. 34 emitOp(dwarf::DW_OP_lit0); 35 emitOp(dwarf::DW_OP_not); 36 } else { 37 emitOp(dwarf::DW_OP_constu); 38 emitUnsigned(Value); 39 } 40 } 41 42 void DwarfExpression::addReg(int DwarfReg, const char *Comment) { 43 assert(DwarfReg >= 0 && "invalid negative dwarf register number"); 44 assert((isUnknownLocation() || isRegisterLocation()) && 45 "location description already locked down"); 46 LocationKind = Register; 47 if (DwarfReg < 32) { 48 emitOp(dwarf::DW_OP_reg0 + DwarfReg, Comment); 49 } else { 50 emitOp(dwarf::DW_OP_regx, Comment); 51 emitUnsigned(DwarfReg); 52 } 53 } 54 55 void DwarfExpression::addBReg(int DwarfReg, int Offset) { 56 assert(DwarfReg >= 0 && "invalid negative dwarf register number"); 57 assert(!isRegisterLocation() && "location description already locked down"); 58 if (DwarfReg < 32) { 59 emitOp(dwarf::DW_OP_breg0 + DwarfReg); 60 } else { 61 emitOp(dwarf::DW_OP_bregx); 62 emitUnsigned(DwarfReg); 63 } 64 emitSigned(Offset); 65 } 66 67 void DwarfExpression::addFBReg(int Offset) { 68 emitOp(dwarf::DW_OP_fbreg); 69 emitSigned(Offset); 70 } 71 72 void DwarfExpression::addOpPiece(unsigned SizeInBits, unsigned OffsetInBits) { 73 if (!SizeInBits) 74 return; 75 76 const unsigned SizeOfByte = 8; 77 if (OffsetInBits > 0 || SizeInBits % SizeOfByte) { 78 emitOp(dwarf::DW_OP_bit_piece); 79 emitUnsigned(SizeInBits); 80 emitUnsigned(OffsetInBits); 81 } else { 82 emitOp(dwarf::DW_OP_piece); 83 unsigned ByteSize = SizeInBits / SizeOfByte; 84 emitUnsigned(ByteSize); 85 } 86 this->OffsetInBits += SizeInBits; 87 } 88 89 void DwarfExpression::addShr(unsigned ShiftBy) { 90 emitConstu(ShiftBy); 91 emitOp(dwarf::DW_OP_shr); 92 } 93 94 void DwarfExpression::addAnd(unsigned Mask) { 95 emitConstu(Mask); 96 emitOp(dwarf::DW_OP_and); 97 } 98 99 bool DwarfExpression::addMachineReg(const TargetRegisterInfo &TRI, 100 llvm::Register MachineReg, 101 unsigned MaxSize) { 102 if (!llvm::Register::isPhysicalRegister(MachineReg)) { 103 if (isFrameRegister(TRI, MachineReg)) { 104 DwarfRegs.push_back(Register::createRegister(-1, nullptr)); 105 return true; 106 } 107 return false; 108 } 109 110 int Reg = TRI.getDwarfRegNum(MachineReg, false); 111 112 // If this is a valid register number, emit it. 113 if (Reg >= 0) { 114 DwarfRegs.push_back(Register::createRegister(Reg, nullptr)); 115 return true; 116 } 117 118 // Walk up the super-register chain until we find a valid number. 119 // For example, EAX on x86_64 is a 32-bit fragment of RAX with offset 0. 120 for (MCSuperRegIterator SR(MachineReg, &TRI); SR.isValid(); ++SR) { 121 Reg = TRI.getDwarfRegNum(*SR, false); 122 if (Reg >= 0) { 123 unsigned Idx = TRI.getSubRegIndex(*SR, MachineReg); 124 unsigned Size = TRI.getSubRegIdxSize(Idx); 125 unsigned RegOffset = TRI.getSubRegIdxOffset(Idx); 126 DwarfRegs.push_back(Register::createRegister(Reg, "super-register")); 127 // Use a DW_OP_bit_piece to describe the sub-register. 128 setSubRegisterPiece(Size, RegOffset); 129 return true; 130 } 131 } 132 133 // Otherwise, attempt to find a covering set of sub-register numbers. 134 // For example, Q0 on ARM is a composition of D0+D1. 135 unsigned CurPos = 0; 136 // The size of the register in bits. 137 const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(MachineReg); 138 unsigned RegSize = TRI.getRegSizeInBits(*RC); 139 // Keep track of the bits in the register we already emitted, so we 140 // can avoid emitting redundant aliasing subregs. Because this is 141 // just doing a greedy scan of all subregisters, it is possible that 142 // this doesn't find a combination of subregisters that fully cover 143 // the register (even though one may exist). 144 SmallBitVector Coverage(RegSize, false); 145 for (MCSubRegIterator SR(MachineReg, &TRI); SR.isValid(); ++SR) { 146 unsigned Idx = TRI.getSubRegIndex(MachineReg, *SR); 147 unsigned Size = TRI.getSubRegIdxSize(Idx); 148 unsigned Offset = TRI.getSubRegIdxOffset(Idx); 149 Reg = TRI.getDwarfRegNum(*SR, false); 150 if (Reg < 0) 151 continue; 152 153 // Used to build the intersection between the bits we already 154 // emitted and the bits covered by this subregister. 155 SmallBitVector CurSubReg(RegSize, false); 156 CurSubReg.set(Offset, Offset + Size); 157 158 // If this sub-register has a DWARF number and we haven't covered 159 // its range, and its range covers the value, emit a DWARF piece for it. 160 if (Offset < MaxSize && CurSubReg.test(Coverage)) { 161 // Emit a piece for any gap in the coverage. 162 if (Offset > CurPos) 163 DwarfRegs.push_back(Register::createSubRegister( 164 -1, Offset - CurPos, "no DWARF register encoding")); 165 if (Offset == 0 && Size >= MaxSize) 166 DwarfRegs.push_back(Register::createRegister(Reg, "sub-register")); 167 else 168 DwarfRegs.push_back(Register::createSubRegister( 169 Reg, std::min<unsigned>(Size, MaxSize - Offset), "sub-register")); 170 } 171 // Mark it as emitted. 172 Coverage.set(Offset, Offset + Size); 173 CurPos = Offset + Size; 174 } 175 // Failed to find any DWARF encoding. 176 if (CurPos == 0) 177 return false; 178 // Found a partial or complete DWARF encoding. 179 if (CurPos < RegSize) 180 DwarfRegs.push_back(Register::createSubRegister( 181 -1, RegSize - CurPos, "no DWARF register encoding")); 182 return true; 183 } 184 185 void DwarfExpression::addStackValue() { 186 if (DwarfVersion >= 4) 187 emitOp(dwarf::DW_OP_stack_value); 188 } 189 190 void DwarfExpression::addSignedConstant(int64_t Value) { 191 assert(isImplicitLocation() || isUnknownLocation()); 192 LocationKind = Implicit; 193 emitOp(dwarf::DW_OP_consts); 194 emitSigned(Value); 195 } 196 197 void DwarfExpression::addUnsignedConstant(uint64_t Value) { 198 assert(isImplicitLocation() || isUnknownLocation()); 199 LocationKind = Implicit; 200 emitConstu(Value); 201 } 202 203 void DwarfExpression::addUnsignedConstant(const APInt &Value) { 204 assert(isImplicitLocation() || isUnknownLocation()); 205 LocationKind = Implicit; 206 207 unsigned Size = Value.getBitWidth(); 208 const uint64_t *Data = Value.getRawData(); 209 210 // Chop it up into 64-bit pieces, because that's the maximum that 211 // addUnsignedConstant takes. 212 unsigned Offset = 0; 213 while (Offset < Size) { 214 addUnsignedConstant(*Data++); 215 if (Offset == 0 && Size <= 64) 216 break; 217 addStackValue(); 218 addOpPiece(std::min(Size - Offset, 64u), Offset); 219 Offset += 64; 220 } 221 } 222 223 void DwarfExpression::addConstantFP(const APFloat &APF, const AsmPrinter &AP) { 224 assert(isImplicitLocation() || isUnknownLocation()); 225 APInt API = APF.bitcastToAPInt(); 226 int NumBytes = API.getBitWidth() / 8; 227 if (NumBytes == 4 /*float*/ || NumBytes == 8 /*double*/) { 228 // FIXME: Add support for `long double`. 229 emitOp(dwarf::DW_OP_implicit_value); 230 emitUnsigned(NumBytes /*Size of the block in bytes*/); 231 232 // The loop below is emitting the value starting at least significant byte, 233 // so we need to perform a byte-swap to get the byte order correct in case 234 // of a big-endian target. 235 if (AP.getDataLayout().isBigEndian()) 236 API = API.byteSwap(); 237 238 for (int i = 0; i < NumBytes; ++i) { 239 emitData1(API.getZExtValue() & 0xFF); 240 API = API.lshr(8); 241 } 242 243 return; 244 } 245 LLVM_DEBUG( 246 dbgs() << "Skipped DW_OP_implicit_value creation for ConstantFP of size: " 247 << API.getBitWidth() << " bits\n"); 248 } 249 250 bool DwarfExpression::addMachineRegExpression(const TargetRegisterInfo &TRI, 251 DIExpressionCursor &ExprCursor, 252 llvm::Register MachineReg, 253 unsigned FragmentOffsetInBits) { 254 auto Fragment = ExprCursor.getFragmentInfo(); 255 if (!addMachineReg(TRI, MachineReg, Fragment ? Fragment->SizeInBits : ~1U)) { 256 LocationKind = Unknown; 257 return false; 258 } 259 260 bool HasComplexExpression = false; 261 auto Op = ExprCursor.peek(); 262 if (Op && Op->getOp() != dwarf::DW_OP_LLVM_fragment) 263 HasComplexExpression = true; 264 265 // If the register can only be described by a complex expression (i.e., 266 // multiple subregisters) it doesn't safely compose with another complex 267 // expression. For example, it is not possible to apply a DW_OP_deref 268 // operation to multiple DW_OP_pieces, since composite location descriptions 269 // do not push anything on the DWARF stack. 270 // 271 // DW_OP_entry_value operations can only hold a DWARF expression or a 272 // register location description, so we can't emit a single entry value 273 // covering a composite location description. In the future we may want to 274 // emit entry value operations for each register location in the composite 275 // location, but until that is supported do not emit anything. 276 if ((HasComplexExpression || IsEmittingEntryValue) && DwarfRegs.size() > 1) { 277 if (IsEmittingEntryValue) 278 cancelEntryValue(); 279 DwarfRegs.clear(); 280 LocationKind = Unknown; 281 return false; 282 } 283 284 // Handle simple register locations. If we are supposed to emit 285 // a call site parameter expression and if that expression is just a register 286 // location, emit it with addBReg and offset 0, because we should emit a DWARF 287 // expression representing a value, rather than a location. 288 if ((!isParameterValue() && !isMemoryLocation() && !HasComplexExpression) || 289 isEntryValue()) { 290 auto FragmentInfo = ExprCursor.getFragmentInfo(); 291 unsigned RegSize = 0; 292 for (auto &Reg : DwarfRegs) { 293 RegSize += Reg.SubRegSize; 294 if (Reg.DwarfRegNo >= 0) 295 addReg(Reg.DwarfRegNo, Reg.Comment); 296 if (FragmentInfo) 297 if (RegSize > FragmentInfo->SizeInBits) 298 // If the register is larger than the current fragment stop 299 // once the fragment is covered. 300 break; 301 addOpPiece(Reg.SubRegSize); 302 } 303 304 if (isEntryValue()) { 305 finalizeEntryValue(); 306 307 if (!isIndirect() && !isParameterValue() && !HasComplexExpression && 308 DwarfVersion >= 4) 309 emitOp(dwarf::DW_OP_stack_value); 310 } 311 312 DwarfRegs.clear(); 313 // If we need to mask out a subregister, do it now, unless the next 314 // operation would emit an OpPiece anyway. 315 auto NextOp = ExprCursor.peek(); 316 if (SubRegisterSizeInBits && NextOp && 317 (NextOp->getOp() != dwarf::DW_OP_LLVM_fragment)) 318 maskSubRegister(); 319 return true; 320 } 321 322 // Don't emit locations that cannot be expressed without DW_OP_stack_value. 323 if (DwarfVersion < 4) 324 if (any_of(ExprCursor, [](DIExpression::ExprOperand Op) -> bool { 325 return Op.getOp() == dwarf::DW_OP_stack_value; 326 })) { 327 DwarfRegs.clear(); 328 LocationKind = Unknown; 329 return false; 330 } 331 332 assert(DwarfRegs.size() == 1); 333 auto Reg = DwarfRegs[0]; 334 bool FBReg = isFrameRegister(TRI, MachineReg); 335 int SignedOffset = 0; 336 assert(!Reg.isSubRegister() && "full register expected"); 337 338 // Pattern-match combinations for which more efficient representations exist. 339 // [Reg, DW_OP_plus_uconst, Offset] --> [DW_OP_breg, Offset]. 340 if (Op && (Op->getOp() == dwarf::DW_OP_plus_uconst)) { 341 uint64_t Offset = Op->getArg(0); 342 uint64_t IntMax = static_cast<uint64_t>(std::numeric_limits<int>::max()); 343 if (Offset <= IntMax) { 344 SignedOffset = Offset; 345 ExprCursor.take(); 346 } 347 } 348 349 // [Reg, DW_OP_constu, Offset, DW_OP_plus] --> [DW_OP_breg, Offset] 350 // [Reg, DW_OP_constu, Offset, DW_OP_minus] --> [DW_OP_breg,-Offset] 351 // If Reg is a subregister we need to mask it out before subtracting. 352 if (Op && Op->getOp() == dwarf::DW_OP_constu) { 353 uint64_t Offset = Op->getArg(0); 354 uint64_t IntMax = static_cast<uint64_t>(std::numeric_limits<int>::max()); 355 auto N = ExprCursor.peekNext(); 356 if (N && N->getOp() == dwarf::DW_OP_plus && Offset <= IntMax) { 357 SignedOffset = Offset; 358 ExprCursor.consume(2); 359 } else if (N && N->getOp() == dwarf::DW_OP_minus && 360 !SubRegisterSizeInBits && Offset <= IntMax + 1) { 361 SignedOffset = -static_cast<int64_t>(Offset); 362 ExprCursor.consume(2); 363 } 364 } 365 366 if (FBReg) 367 addFBReg(SignedOffset); 368 else 369 addBReg(Reg.DwarfRegNo, SignedOffset); 370 DwarfRegs.clear(); 371 372 // If we need to mask out a subregister, do it now, unless the next 373 // operation would emit an OpPiece anyway. 374 auto NextOp = ExprCursor.peek(); 375 if (SubRegisterSizeInBits && NextOp && 376 (NextOp->getOp() != dwarf::DW_OP_LLVM_fragment)) 377 maskSubRegister(); 378 379 return true; 380 } 381 382 void DwarfExpression::setEntryValueFlags(const MachineLocation &Loc) { 383 LocationFlags |= EntryValue; 384 if (Loc.isIndirect()) 385 LocationFlags |= Indirect; 386 } 387 388 void DwarfExpression::setLocation(const MachineLocation &Loc, 389 const DIExpression *DIExpr) { 390 if (Loc.isIndirect()) 391 setMemoryLocationKind(); 392 393 if (DIExpr->isEntryValue()) 394 setEntryValueFlags(Loc); 395 } 396 397 void DwarfExpression::beginEntryValueExpression( 398 DIExpressionCursor &ExprCursor) { 399 auto Op = ExprCursor.take(); 400 (void)Op; 401 assert(Op && Op->getOp() == dwarf::DW_OP_LLVM_entry_value); 402 assert(!IsEmittingEntryValue && "Already emitting entry value?"); 403 assert(Op->getArg(0) == 1 && 404 "Can currently only emit entry values covering a single operation"); 405 406 SavedLocationKind = LocationKind; 407 LocationKind = Register; 408 IsEmittingEntryValue = true; 409 enableTemporaryBuffer(); 410 } 411 412 void DwarfExpression::finalizeEntryValue() { 413 assert(IsEmittingEntryValue && "Entry value not open?"); 414 disableTemporaryBuffer(); 415 416 emitOp(CU.getDwarf5OrGNULocationAtom(dwarf::DW_OP_entry_value)); 417 418 // Emit the entry value's size operand. 419 unsigned Size = getTemporaryBufferSize(); 420 emitUnsigned(Size); 421 422 // Emit the entry value's DWARF block operand. 423 commitTemporaryBuffer(); 424 425 LocationFlags &= ~EntryValue; 426 LocationKind = SavedLocationKind; 427 IsEmittingEntryValue = false; 428 } 429 430 void DwarfExpression::cancelEntryValue() { 431 assert(IsEmittingEntryValue && "Entry value not open?"); 432 disableTemporaryBuffer(); 433 434 // The temporary buffer can't be emptied, so for now just assert that nothing 435 // has been emitted to it. 436 assert(getTemporaryBufferSize() == 0 && 437 "Began emitting entry value block before cancelling entry value"); 438 439 LocationKind = SavedLocationKind; 440 IsEmittingEntryValue = false; 441 } 442 443 unsigned DwarfExpression::getOrCreateBaseType(unsigned BitSize, 444 dwarf::TypeKind Encoding) { 445 // Reuse the base_type if we already have one in this CU otherwise we 446 // create a new one. 447 unsigned I = 0, E = CU.ExprRefedBaseTypes.size(); 448 for (; I != E; ++I) 449 if (CU.ExprRefedBaseTypes[I].BitSize == BitSize && 450 CU.ExprRefedBaseTypes[I].Encoding == Encoding) 451 break; 452 453 if (I == E) 454 CU.ExprRefedBaseTypes.emplace_back(BitSize, Encoding); 455 return I; 456 } 457 458 /// Assuming a well-formed expression, match "DW_OP_deref* 459 /// DW_OP_LLVM_fragment?". 460 static bool isMemoryLocation(DIExpressionCursor ExprCursor) { 461 while (ExprCursor) { 462 auto Op = ExprCursor.take(); 463 switch (Op->getOp()) { 464 case dwarf::DW_OP_deref: 465 case dwarf::DW_OP_LLVM_fragment: 466 break; 467 default: 468 return false; 469 } 470 } 471 return true; 472 } 473 474 void DwarfExpression::addExpression(DIExpressionCursor &&ExprCursor) { 475 addExpression(std::move(ExprCursor), 476 [](unsigned Idx, DIExpressionCursor &Cursor) -> bool { 477 llvm_unreachable("unhandled opcode found in expression"); 478 }); 479 } 480 481 bool DwarfExpression::addExpression( 482 DIExpressionCursor &&ExprCursor, 483 llvm::function_ref<bool(unsigned, DIExpressionCursor &)> InsertArg) { 484 // Entry values can currently only cover the initial register location, 485 // and not any other parts of the following DWARF expression. 486 assert(!IsEmittingEntryValue && "Can't emit entry value around expression"); 487 488 Optional<DIExpression::ExprOperand> PrevConvertOp = None; 489 490 while (ExprCursor) { 491 auto Op = ExprCursor.take(); 492 uint64_t OpNum = Op->getOp(); 493 494 if (OpNum >= dwarf::DW_OP_reg0 && OpNum <= dwarf::DW_OP_reg31) { 495 emitOp(OpNum); 496 continue; 497 } else if (OpNum >= dwarf::DW_OP_breg0 && OpNum <= dwarf::DW_OP_breg31) { 498 addBReg(OpNum - dwarf::DW_OP_breg0, Op->getArg(0)); 499 continue; 500 } 501 502 switch (OpNum) { 503 case dwarf::DW_OP_LLVM_arg: 504 if (!InsertArg(Op->getArg(0), ExprCursor)) { 505 LocationKind = Unknown; 506 return false; 507 } 508 break; 509 case dwarf::DW_OP_LLVM_fragment: { 510 unsigned SizeInBits = Op->getArg(1); 511 unsigned FragmentOffset = Op->getArg(0); 512 // The fragment offset must have already been adjusted by emitting an 513 // empty DW_OP_piece / DW_OP_bit_piece before we emitted the base 514 // location. 515 assert(OffsetInBits >= FragmentOffset && "fragment offset not added?"); 516 assert(SizeInBits >= OffsetInBits - FragmentOffset && "size underflow"); 517 518 // If addMachineReg already emitted DW_OP_piece operations to represent 519 // a super-register by splicing together sub-registers, subtract the size 520 // of the pieces that was already emitted. 521 SizeInBits -= OffsetInBits - FragmentOffset; 522 523 // If addMachineReg requested a DW_OP_bit_piece to stencil out a 524 // sub-register that is smaller than the current fragment's size, use it. 525 if (SubRegisterSizeInBits) 526 SizeInBits = std::min<unsigned>(SizeInBits, SubRegisterSizeInBits); 527 528 // Emit a DW_OP_stack_value for implicit location descriptions. 529 if (isImplicitLocation()) 530 addStackValue(); 531 532 // Emit the DW_OP_piece. 533 addOpPiece(SizeInBits, SubRegisterOffsetInBits); 534 setSubRegisterPiece(0, 0); 535 // Reset the location description kind. 536 LocationKind = Unknown; 537 return true; 538 } 539 case dwarf::DW_OP_plus_uconst: 540 assert(!isRegisterLocation()); 541 emitOp(dwarf::DW_OP_plus_uconst); 542 emitUnsigned(Op->getArg(0)); 543 break; 544 case dwarf::DW_OP_plus: 545 case dwarf::DW_OP_minus: 546 case dwarf::DW_OP_mul: 547 case dwarf::DW_OP_div: 548 case dwarf::DW_OP_mod: 549 case dwarf::DW_OP_or: 550 case dwarf::DW_OP_and: 551 case dwarf::DW_OP_xor: 552 case dwarf::DW_OP_shl: 553 case dwarf::DW_OP_shr: 554 case dwarf::DW_OP_shra: 555 case dwarf::DW_OP_lit0: 556 case dwarf::DW_OP_not: 557 case dwarf::DW_OP_dup: 558 case dwarf::DW_OP_push_object_address: 559 case dwarf::DW_OP_over: 560 emitOp(OpNum); 561 break; 562 case dwarf::DW_OP_deref: 563 assert(!isRegisterLocation()); 564 if (!isMemoryLocation() && ::isMemoryLocation(ExprCursor)) 565 // Turning this into a memory location description makes the deref 566 // implicit. 567 LocationKind = Memory; 568 else 569 emitOp(dwarf::DW_OP_deref); 570 break; 571 case dwarf::DW_OP_constu: 572 assert(!isRegisterLocation()); 573 emitConstu(Op->getArg(0)); 574 break; 575 case dwarf::DW_OP_consts: 576 assert(!isRegisterLocation()); 577 emitOp(dwarf::DW_OP_consts); 578 emitSigned(Op->getArg(0)); 579 break; 580 case dwarf::DW_OP_LLVM_convert: { 581 unsigned BitSize = Op->getArg(0); 582 dwarf::TypeKind Encoding = static_cast<dwarf::TypeKind>(Op->getArg(1)); 583 if (DwarfVersion >= 5 && CU.getDwarfDebug().useOpConvert()) { 584 emitOp(dwarf::DW_OP_convert); 585 // If targeting a location-list; simply emit the index into the raw 586 // byte stream as ULEB128, DwarfDebug::emitDebugLocEntry has been 587 // fitted with means to extract it later. 588 // If targeting a inlined DW_AT_location; insert a DIEBaseTypeRef 589 // (containing the index and a resolve mechanism during emit) into the 590 // DIE value list. 591 emitBaseTypeRef(getOrCreateBaseType(BitSize, Encoding)); 592 } else { 593 if (PrevConvertOp && PrevConvertOp->getArg(0) < BitSize) { 594 if (Encoding == dwarf::DW_ATE_signed) 595 emitLegacySExt(PrevConvertOp->getArg(0)); 596 else if (Encoding == dwarf::DW_ATE_unsigned) 597 emitLegacyZExt(PrevConvertOp->getArg(0)); 598 PrevConvertOp = None; 599 } else { 600 PrevConvertOp = Op; 601 } 602 } 603 break; 604 } 605 case dwarf::DW_OP_stack_value: 606 LocationKind = Implicit; 607 break; 608 case dwarf::DW_OP_swap: 609 assert(!isRegisterLocation()); 610 emitOp(dwarf::DW_OP_swap); 611 break; 612 case dwarf::DW_OP_xderef: 613 assert(!isRegisterLocation()); 614 emitOp(dwarf::DW_OP_xderef); 615 break; 616 case dwarf::DW_OP_deref_size: 617 emitOp(dwarf::DW_OP_deref_size); 618 emitData1(Op->getArg(0)); 619 break; 620 case dwarf::DW_OP_LLVM_tag_offset: 621 TagOffset = Op->getArg(0); 622 break; 623 case dwarf::DW_OP_regx: 624 emitOp(dwarf::DW_OP_regx); 625 emitUnsigned(Op->getArg(0)); 626 break; 627 case dwarf::DW_OP_bregx: 628 emitOp(dwarf::DW_OP_bregx); 629 emitUnsigned(Op->getArg(0)); 630 emitSigned(Op->getArg(1)); 631 break; 632 default: 633 llvm_unreachable("unhandled opcode found in expression"); 634 } 635 } 636 637 if (isImplicitLocation() && !isParameterValue()) 638 // Turn this into an implicit location description. 639 addStackValue(); 640 641 return true; 642 } 643 644 /// add masking operations to stencil out a subregister. 645 void DwarfExpression::maskSubRegister() { 646 assert(SubRegisterSizeInBits && "no subregister was registered"); 647 if (SubRegisterOffsetInBits > 0) 648 addShr(SubRegisterOffsetInBits); 649 uint64_t Mask = (1ULL << (uint64_t)SubRegisterSizeInBits) - 1ULL; 650 addAnd(Mask); 651 } 652 653 void DwarfExpression::finalize() { 654 assert(DwarfRegs.size() == 0 && "dwarf registers not emitted"); 655 // Emit any outstanding DW_OP_piece operations to mask out subregisters. 656 if (SubRegisterSizeInBits == 0) 657 return; 658 // Don't emit a DW_OP_piece for a subregister at offset 0. 659 if (SubRegisterOffsetInBits == 0) 660 return; 661 addOpPiece(SubRegisterSizeInBits, SubRegisterOffsetInBits); 662 } 663 664 void DwarfExpression::addFragmentOffset(const DIExpression *Expr) { 665 if (!Expr || !Expr->isFragment()) 666 return; 667 668 uint64_t FragmentOffset = Expr->getFragmentInfo()->OffsetInBits; 669 assert(FragmentOffset >= OffsetInBits && 670 "overlapping or duplicate fragments"); 671 if (FragmentOffset > OffsetInBits) 672 addOpPiece(FragmentOffset - OffsetInBits); 673 OffsetInBits = FragmentOffset; 674 } 675 676 void DwarfExpression::emitLegacySExt(unsigned FromBits) { 677 // (((X >> (FromBits - 1)) * (~0)) << FromBits) | X 678 emitOp(dwarf::DW_OP_dup); 679 emitOp(dwarf::DW_OP_constu); 680 emitUnsigned(FromBits - 1); 681 emitOp(dwarf::DW_OP_shr); 682 emitOp(dwarf::DW_OP_lit0); 683 emitOp(dwarf::DW_OP_not); 684 emitOp(dwarf::DW_OP_mul); 685 emitOp(dwarf::DW_OP_constu); 686 emitUnsigned(FromBits); 687 emitOp(dwarf::DW_OP_shl); 688 emitOp(dwarf::DW_OP_or); 689 } 690 691 void DwarfExpression::emitLegacyZExt(unsigned FromBits) { 692 // Heuristic to decide the most efficient encoding. 693 // A ULEB can encode 7 1-bits per byte. 694 if (FromBits / 7 < 1+1+1+1+1) { 695 // (X & (1 << FromBits - 1)) 696 emitOp(dwarf::DW_OP_constu); 697 emitUnsigned((1ULL << FromBits) - 1); 698 } else { 699 // Note that the DWARF 4 stack consists of pointer-sized elements, 700 // so technically it doesn't make sense to shift left more than 64 701 // bits. We leave that for the consumer to decide though. LLDB for 702 // example uses APInt for the stack elements and can still deal 703 // with this. 704 emitOp(dwarf::DW_OP_lit1); 705 emitOp(dwarf::DW_OP_constu); 706 emitUnsigned(FromBits); 707 emitOp(dwarf::DW_OP_shl); 708 emitOp(dwarf::DW_OP_lit1); 709 emitOp(dwarf::DW_OP_minus); 710 } 711 emitOp(dwarf::DW_OP_and); 712 } 713 714 void DwarfExpression::addWasmLocation(unsigned Index, uint64_t Offset) { 715 emitOp(dwarf::DW_OP_WASM_location); 716 emitUnsigned(Index == 4/*TI_LOCAL_INDIRECT*/ ? 0/*TI_LOCAL*/ : Index); 717 emitUnsigned(Offset); 718 if (Index == 4 /*TI_LOCAL_INDIRECT*/) { 719 assert(LocationKind == Unknown); 720 LocationKind = Memory; 721 } else { 722 assert(LocationKind == Implicit || LocationKind == Unknown); 723 LocationKind = Implicit; 724 } 725 } 726