1 //===- llvm/CodeGen/DwarfExpression.cpp - Dwarf Debug Framework -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains support for writing dwarf debug info into asm files. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "DwarfExpression.h" 14 #include "DwarfCompileUnit.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/SmallBitVector.h" 17 #include "llvm/BinaryFormat/Dwarf.h" 18 #include "llvm/CodeGen/Register.h" 19 #include "llvm/CodeGen/TargetRegisterInfo.h" 20 #include "llvm/IR/DataLayout.h" 21 #include "llvm/Support/ErrorHandling.h" 22 #include <algorithm> 23 24 using namespace llvm; 25 26 #define DEBUG_TYPE "dwarfdebug" 27 28 void DwarfExpression::emitConstu(uint64_t Value) { 29 if (Value < 32) 30 emitOp(dwarf::DW_OP_lit0 + Value); 31 else if (Value == std::numeric_limits<uint64_t>::max()) { 32 // Only do this for 64-bit values as the DWARF expression stack uses 33 // target-address-size values. 34 emitOp(dwarf::DW_OP_lit0); 35 emitOp(dwarf::DW_OP_not); 36 } else { 37 emitOp(dwarf::DW_OP_constu); 38 emitUnsigned(Value); 39 } 40 } 41 42 void DwarfExpression::addReg(int DwarfReg, const char *Comment) { 43 assert(DwarfReg >= 0 && "invalid negative dwarf register number"); 44 assert((isUnknownLocation() || isRegisterLocation()) && 45 "location description already locked down"); 46 LocationKind = Register; 47 if (DwarfReg < 32) { 48 emitOp(dwarf::DW_OP_reg0 + DwarfReg, Comment); 49 } else { 50 emitOp(dwarf::DW_OP_regx, Comment); 51 emitUnsigned(DwarfReg); 52 } 53 } 54 55 void DwarfExpression::addBReg(int DwarfReg, int Offset) { 56 assert(DwarfReg >= 0 && "invalid negative dwarf register number"); 57 assert(!isRegisterLocation() && "location description already locked down"); 58 if (DwarfReg < 32) { 59 emitOp(dwarf::DW_OP_breg0 + DwarfReg); 60 } else { 61 emitOp(dwarf::DW_OP_bregx); 62 emitUnsigned(DwarfReg); 63 } 64 emitSigned(Offset); 65 } 66 67 void DwarfExpression::addFBReg(int Offset) { 68 emitOp(dwarf::DW_OP_fbreg); 69 emitSigned(Offset); 70 } 71 72 void DwarfExpression::addOpPiece(unsigned SizeInBits, unsigned OffsetInBits) { 73 if (!SizeInBits) 74 return; 75 76 const unsigned SizeOfByte = 8; 77 if (OffsetInBits > 0 || SizeInBits % SizeOfByte) { 78 emitOp(dwarf::DW_OP_bit_piece); 79 emitUnsigned(SizeInBits); 80 emitUnsigned(OffsetInBits); 81 } else { 82 emitOp(dwarf::DW_OP_piece); 83 unsigned ByteSize = SizeInBits / SizeOfByte; 84 emitUnsigned(ByteSize); 85 } 86 this->OffsetInBits += SizeInBits; 87 } 88 89 void DwarfExpression::addShr(unsigned ShiftBy) { 90 emitConstu(ShiftBy); 91 emitOp(dwarf::DW_OP_shr); 92 } 93 94 void DwarfExpression::addAnd(unsigned Mask) { 95 emitConstu(Mask); 96 emitOp(dwarf::DW_OP_and); 97 } 98 99 bool DwarfExpression::addMachineReg(const TargetRegisterInfo &TRI, 100 llvm::Register MachineReg, 101 unsigned MaxSize) { 102 if (!llvm::Register::isPhysicalRegister(MachineReg)) { 103 if (isFrameRegister(TRI, MachineReg)) { 104 DwarfRegs.push_back(Register::createRegister(-1, nullptr)); 105 return true; 106 } 107 return false; 108 } 109 110 int Reg = TRI.getDwarfRegNum(MachineReg, false); 111 112 // If this is a valid register number, emit it. 113 if (Reg >= 0) { 114 DwarfRegs.push_back(Register::createRegister(Reg, nullptr)); 115 return true; 116 } 117 118 // Walk up the super-register chain until we find a valid number. 119 // For example, EAX on x86_64 is a 32-bit fragment of RAX with offset 0. 120 for (MCSuperRegIterator SR(MachineReg, &TRI); SR.isValid(); ++SR) { 121 Reg = TRI.getDwarfRegNum(*SR, false); 122 if (Reg >= 0) { 123 unsigned Idx = TRI.getSubRegIndex(*SR, MachineReg); 124 unsigned Size = TRI.getSubRegIdxSize(Idx); 125 unsigned RegOffset = TRI.getSubRegIdxOffset(Idx); 126 DwarfRegs.push_back(Register::createRegister(Reg, "super-register")); 127 // Use a DW_OP_bit_piece to describe the sub-register. 128 setSubRegisterPiece(Size, RegOffset); 129 return true; 130 } 131 } 132 133 // Otherwise, attempt to find a covering set of sub-register numbers. 134 // For example, Q0 on ARM is a composition of D0+D1. 135 unsigned CurPos = 0; 136 // The size of the register in bits. 137 const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(MachineReg); 138 unsigned RegSize = TRI.getRegSizeInBits(*RC); 139 // Keep track of the bits in the register we already emitted, so we 140 // can avoid emitting redundant aliasing subregs. Because this is 141 // just doing a greedy scan of all subregisters, it is possible that 142 // this doesn't find a combination of subregisters that fully cover 143 // the register (even though one may exist). 144 SmallBitVector Coverage(RegSize, false); 145 for (MCSubRegIterator SR(MachineReg, &TRI); SR.isValid(); ++SR) { 146 unsigned Idx = TRI.getSubRegIndex(MachineReg, *SR); 147 unsigned Size = TRI.getSubRegIdxSize(Idx); 148 unsigned Offset = TRI.getSubRegIdxOffset(Idx); 149 Reg = TRI.getDwarfRegNum(*SR, false); 150 if (Reg < 0) 151 continue; 152 153 // Used to build the intersection between the bits we already 154 // emitted and the bits covered by this subregister. 155 SmallBitVector CurSubReg(RegSize, false); 156 CurSubReg.set(Offset, Offset + Size); 157 158 // If this sub-register has a DWARF number and we haven't covered 159 // its range, and its range covers the value, emit a DWARF piece for it. 160 if (Offset < MaxSize && CurSubReg.test(Coverage)) { 161 // Emit a piece for any gap in the coverage. 162 if (Offset > CurPos) 163 DwarfRegs.push_back(Register::createSubRegister( 164 -1, Offset - CurPos, "no DWARF register encoding")); 165 if (Offset == 0 && Size >= MaxSize) 166 DwarfRegs.push_back(Register::createRegister(Reg, "sub-register")); 167 else 168 DwarfRegs.push_back(Register::createSubRegister( 169 Reg, std::min<unsigned>(Size, MaxSize - Offset), "sub-register")); 170 } 171 // Mark it as emitted. 172 Coverage.set(Offset, Offset + Size); 173 CurPos = Offset + Size; 174 } 175 // Failed to find any DWARF encoding. 176 if (CurPos == 0) 177 return false; 178 // Found a partial or complete DWARF encoding. 179 if (CurPos < RegSize) 180 DwarfRegs.push_back(Register::createSubRegister( 181 -1, RegSize - CurPos, "no DWARF register encoding")); 182 return true; 183 } 184 185 void DwarfExpression::addStackValue() { 186 if (DwarfVersion >= 4) 187 emitOp(dwarf::DW_OP_stack_value); 188 } 189 190 void DwarfExpression::addSignedConstant(int64_t Value) { 191 assert(isImplicitLocation() || isUnknownLocation()); 192 LocationKind = Implicit; 193 emitOp(dwarf::DW_OP_consts); 194 emitSigned(Value); 195 } 196 197 void DwarfExpression::addUnsignedConstant(uint64_t Value) { 198 assert(isImplicitLocation() || isUnknownLocation()); 199 LocationKind = Implicit; 200 emitConstu(Value); 201 } 202 203 void DwarfExpression::addUnsignedConstant(const APInt &Value) { 204 assert(isImplicitLocation() || isUnknownLocation()); 205 LocationKind = Implicit; 206 207 unsigned Size = Value.getBitWidth(); 208 const uint64_t *Data = Value.getRawData(); 209 210 // Chop it up into 64-bit pieces, because that's the maximum that 211 // addUnsignedConstant takes. 212 unsigned Offset = 0; 213 while (Offset < Size) { 214 addUnsignedConstant(*Data++); 215 if (Offset == 0 && Size <= 64) 216 break; 217 addStackValue(); 218 addOpPiece(std::min(Size - Offset, 64u), Offset); 219 Offset += 64; 220 } 221 } 222 223 void DwarfExpression::addConstantFP(const APFloat &APF, const AsmPrinter &AP) { 224 assert(isImplicitLocation() || isUnknownLocation()); 225 APInt API = APF.bitcastToAPInt(); 226 int NumBytes = API.getBitWidth() / 8; 227 if (NumBytes == 4 /*float*/ || NumBytes == 8 /*double*/) { 228 // FIXME: Add support for `long double`. 229 emitOp(dwarf::DW_OP_implicit_value); 230 emitUnsigned(NumBytes /*Size of the block in bytes*/); 231 232 // The loop below is emitting the value starting at least significant byte, 233 // so we need to perform a byte-swap to get the byte order correct in case 234 // of a big-endian target. 235 if (AP.getDataLayout().isBigEndian()) 236 API = API.byteSwap(); 237 238 for (int i = 0; i < NumBytes; ++i) { 239 emitData1(API.getZExtValue() & 0xFF); 240 API = API.lshr(8); 241 } 242 243 return; 244 } 245 LLVM_DEBUG( 246 dbgs() << "Skipped DW_OP_implicit_value creation for ConstantFP of size: " 247 << API.getBitWidth() << " bits\n"); 248 } 249 250 bool DwarfExpression::addMachineRegExpression(const TargetRegisterInfo &TRI, 251 DIExpressionCursor &ExprCursor, 252 llvm::Register MachineReg, 253 unsigned FragmentOffsetInBits) { 254 auto Fragment = ExprCursor.getFragmentInfo(); 255 if (!addMachineReg(TRI, MachineReg, Fragment ? Fragment->SizeInBits : ~1U)) { 256 LocationKind = Unknown; 257 return false; 258 } 259 260 bool HasComplexExpression = false; 261 auto Op = ExprCursor.peek(); 262 if (Op && Op->getOp() != dwarf::DW_OP_LLVM_fragment) 263 HasComplexExpression = true; 264 265 // If the register can only be described by a complex expression (i.e., 266 // multiple subregisters) it doesn't safely compose with another complex 267 // expression. For example, it is not possible to apply a DW_OP_deref 268 // operation to multiple DW_OP_pieces, since composite location descriptions 269 // do not push anything on the DWARF stack. 270 // 271 // DW_OP_entry_value operations can only hold a DWARF expression or a 272 // register location description, so we can't emit a single entry value 273 // covering a composite location description. In the future we may want to 274 // emit entry value operations for each register location in the composite 275 // location, but until that is supported do not emit anything. 276 if ((HasComplexExpression || IsEmittingEntryValue) && DwarfRegs.size() > 1) { 277 if (IsEmittingEntryValue) 278 cancelEntryValue(); 279 DwarfRegs.clear(); 280 LocationKind = Unknown; 281 return false; 282 } 283 284 // Handle simple register locations. If we are supposed to emit 285 // a call site parameter expression and if that expression is just a register 286 // location, emit it with addBReg and offset 0, because we should emit a DWARF 287 // expression representing a value, rather than a location. 288 if (!isMemoryLocation() && !HasComplexExpression && 289 (!isParameterValue() || isEntryValue())) { 290 for (auto &Reg : DwarfRegs) { 291 if (Reg.DwarfRegNo >= 0) 292 addReg(Reg.DwarfRegNo, Reg.Comment); 293 addOpPiece(Reg.SubRegSize); 294 } 295 296 if (isEntryValue()) 297 finalizeEntryValue(); 298 299 if (isEntryValue() && !isIndirect() && !isParameterValue() && 300 DwarfVersion >= 4) 301 emitOp(dwarf::DW_OP_stack_value); 302 303 DwarfRegs.clear(); 304 return true; 305 } 306 307 // Don't emit locations that cannot be expressed without DW_OP_stack_value. 308 if (DwarfVersion < 4) 309 if (any_of(ExprCursor, [](DIExpression::ExprOperand Op) -> bool { 310 return Op.getOp() == dwarf::DW_OP_stack_value; 311 })) { 312 DwarfRegs.clear(); 313 LocationKind = Unknown; 314 return false; 315 } 316 317 assert(DwarfRegs.size() == 1); 318 auto Reg = DwarfRegs[0]; 319 bool FBReg = isFrameRegister(TRI, MachineReg); 320 int SignedOffset = 0; 321 assert(!Reg.isSubRegister() && "full register expected"); 322 323 // Pattern-match combinations for which more efficient representations exist. 324 // [Reg, DW_OP_plus_uconst, Offset] --> [DW_OP_breg, Offset]. 325 if (Op && (Op->getOp() == dwarf::DW_OP_plus_uconst)) { 326 uint64_t Offset = Op->getArg(0); 327 uint64_t IntMax = static_cast<uint64_t>(std::numeric_limits<int>::max()); 328 if (Offset <= IntMax) { 329 SignedOffset = Offset; 330 ExprCursor.take(); 331 } 332 } 333 334 // [Reg, DW_OP_constu, Offset, DW_OP_plus] --> [DW_OP_breg, Offset] 335 // [Reg, DW_OP_constu, Offset, DW_OP_minus] --> [DW_OP_breg,-Offset] 336 // If Reg is a subregister we need to mask it out before subtracting. 337 if (Op && Op->getOp() == dwarf::DW_OP_constu) { 338 uint64_t Offset = Op->getArg(0); 339 uint64_t IntMax = static_cast<uint64_t>(std::numeric_limits<int>::max()); 340 auto N = ExprCursor.peekNext(); 341 if (N && N->getOp() == dwarf::DW_OP_plus && Offset <= IntMax) { 342 SignedOffset = Offset; 343 ExprCursor.consume(2); 344 } else if (N && N->getOp() == dwarf::DW_OP_minus && 345 !SubRegisterSizeInBits && Offset <= IntMax + 1) { 346 SignedOffset = -static_cast<int64_t>(Offset); 347 ExprCursor.consume(2); 348 } 349 } 350 351 if (FBReg) 352 addFBReg(SignedOffset); 353 else 354 addBReg(Reg.DwarfRegNo, SignedOffset); 355 DwarfRegs.clear(); 356 return true; 357 } 358 359 void DwarfExpression::setEntryValueFlags(const MachineLocation &Loc) { 360 LocationFlags |= EntryValue; 361 if (Loc.isIndirect()) 362 LocationFlags |= Indirect; 363 } 364 365 void DwarfExpression::setLocation(const MachineLocation &Loc, 366 const DIExpression *DIExpr) { 367 if (Loc.isIndirect()) 368 // Do not treat entry value descriptions of indirect parameters as memory 369 // locations. This allows DwarfExpression::addReg() to add DW_OP_regN to an 370 // entry value description. 371 if (!DIExpr->isEntryValue()) 372 setMemoryLocationKind(); 373 374 if (DIExpr->isEntryValue()) 375 setEntryValueFlags(Loc); 376 } 377 378 void DwarfExpression::beginEntryValueExpression( 379 DIExpressionCursor &ExprCursor) { 380 auto Op = ExprCursor.take(); 381 (void)Op; 382 assert(Op && Op->getOp() == dwarf::DW_OP_LLVM_entry_value); 383 assert(!isMemoryLocation() && 384 "We don't support entry values of memory locations yet"); 385 assert(!IsEmittingEntryValue && "Already emitting entry value?"); 386 assert(Op->getArg(0) == 1 && 387 "Can currently only emit entry values covering a single operation"); 388 389 IsEmittingEntryValue = true; 390 enableTemporaryBuffer(); 391 } 392 393 void DwarfExpression::finalizeEntryValue() { 394 assert(IsEmittingEntryValue && "Entry value not open?"); 395 disableTemporaryBuffer(); 396 397 emitOp(CU.getDwarf5OrGNULocationAtom(dwarf::DW_OP_entry_value)); 398 399 // Emit the entry value's size operand. 400 unsigned Size = getTemporaryBufferSize(); 401 emitUnsigned(Size); 402 403 // Emit the entry value's DWARF block operand. 404 commitTemporaryBuffer(); 405 406 IsEmittingEntryValue = false; 407 } 408 409 void DwarfExpression::cancelEntryValue() { 410 assert(IsEmittingEntryValue && "Entry value not open?"); 411 disableTemporaryBuffer(); 412 413 // The temporary buffer can't be emptied, so for now just assert that nothing 414 // has been emitted to it. 415 assert(getTemporaryBufferSize() == 0 && 416 "Began emitting entry value block before cancelling entry value"); 417 418 IsEmittingEntryValue = false; 419 } 420 421 unsigned DwarfExpression::getOrCreateBaseType(unsigned BitSize, 422 dwarf::TypeKind Encoding) { 423 // Reuse the base_type if we already have one in this CU otherwise we 424 // create a new one. 425 unsigned I = 0, E = CU.ExprRefedBaseTypes.size(); 426 for (; I != E; ++I) 427 if (CU.ExprRefedBaseTypes[I].BitSize == BitSize && 428 CU.ExprRefedBaseTypes[I].Encoding == Encoding) 429 break; 430 431 if (I == E) 432 CU.ExprRefedBaseTypes.emplace_back(BitSize, Encoding); 433 return I; 434 } 435 436 /// Assuming a well-formed expression, match "DW_OP_deref* 437 /// DW_OP_LLVM_fragment?". 438 static bool isMemoryLocation(DIExpressionCursor ExprCursor) { 439 while (ExprCursor) { 440 auto Op = ExprCursor.take(); 441 switch (Op->getOp()) { 442 case dwarf::DW_OP_deref: 443 case dwarf::DW_OP_LLVM_fragment: 444 break; 445 default: 446 return false; 447 } 448 } 449 return true; 450 } 451 452 void DwarfExpression::addExpression(DIExpressionCursor &&ExprCursor, 453 unsigned FragmentOffsetInBits) { 454 // Entry values can currently only cover the initial register location, 455 // and not any other parts of the following DWARF expression. 456 assert(!IsEmittingEntryValue && "Can't emit entry value around expression"); 457 458 // If we need to mask out a subregister, do it now, unless the next 459 // operation would emit an OpPiece anyway. 460 auto N = ExprCursor.peek(); 461 if (SubRegisterSizeInBits && N && (N->getOp() != dwarf::DW_OP_LLVM_fragment)) 462 maskSubRegister(); 463 464 Optional<DIExpression::ExprOperand> PrevConvertOp = None; 465 466 while (ExprCursor) { 467 auto Op = ExprCursor.take(); 468 uint64_t OpNum = Op->getOp(); 469 470 if (OpNum >= dwarf::DW_OP_reg0 && OpNum <= dwarf::DW_OP_reg31) { 471 emitOp(OpNum); 472 continue; 473 } else if (OpNum >= dwarf::DW_OP_breg0 && OpNum <= dwarf::DW_OP_breg31) { 474 addBReg(OpNum - dwarf::DW_OP_breg0, Op->getArg(0)); 475 continue; 476 } 477 478 switch (OpNum) { 479 case dwarf::DW_OP_LLVM_fragment: { 480 unsigned SizeInBits = Op->getArg(1); 481 unsigned FragmentOffset = Op->getArg(0); 482 // The fragment offset must have already been adjusted by emitting an 483 // empty DW_OP_piece / DW_OP_bit_piece before we emitted the base 484 // location. 485 assert(OffsetInBits >= FragmentOffset && "fragment offset not added?"); 486 assert(SizeInBits >= OffsetInBits - FragmentOffset && "size underflow"); 487 488 // If addMachineReg already emitted DW_OP_piece operations to represent 489 // a super-register by splicing together sub-registers, subtract the size 490 // of the pieces that was already emitted. 491 SizeInBits -= OffsetInBits - FragmentOffset; 492 493 // If addMachineReg requested a DW_OP_bit_piece to stencil out a 494 // sub-register that is smaller than the current fragment's size, use it. 495 if (SubRegisterSizeInBits) 496 SizeInBits = std::min<unsigned>(SizeInBits, SubRegisterSizeInBits); 497 498 // Emit a DW_OP_stack_value for implicit location descriptions. 499 if (isImplicitLocation()) 500 addStackValue(); 501 502 // Emit the DW_OP_piece. 503 addOpPiece(SizeInBits, SubRegisterOffsetInBits); 504 setSubRegisterPiece(0, 0); 505 // Reset the location description kind. 506 LocationKind = Unknown; 507 return; 508 } 509 case dwarf::DW_OP_plus_uconst: 510 assert(!isRegisterLocation()); 511 emitOp(dwarf::DW_OP_plus_uconst); 512 emitUnsigned(Op->getArg(0)); 513 break; 514 case dwarf::DW_OP_plus: 515 case dwarf::DW_OP_minus: 516 case dwarf::DW_OP_mul: 517 case dwarf::DW_OP_div: 518 case dwarf::DW_OP_mod: 519 case dwarf::DW_OP_or: 520 case dwarf::DW_OP_and: 521 case dwarf::DW_OP_xor: 522 case dwarf::DW_OP_shl: 523 case dwarf::DW_OP_shr: 524 case dwarf::DW_OP_shra: 525 case dwarf::DW_OP_lit0: 526 case dwarf::DW_OP_not: 527 case dwarf::DW_OP_dup: 528 case dwarf::DW_OP_push_object_address: 529 case dwarf::DW_OP_over: 530 emitOp(OpNum); 531 break; 532 case dwarf::DW_OP_deref: 533 assert(!isRegisterLocation()); 534 if (!isMemoryLocation() && ::isMemoryLocation(ExprCursor)) 535 // Turning this into a memory location description makes the deref 536 // implicit. 537 LocationKind = Memory; 538 else 539 emitOp(dwarf::DW_OP_deref); 540 break; 541 case dwarf::DW_OP_constu: 542 assert(!isRegisterLocation()); 543 emitConstu(Op->getArg(0)); 544 break; 545 case dwarf::DW_OP_consts: 546 assert(!isRegisterLocation()); 547 emitOp(dwarf::DW_OP_consts); 548 emitSigned(Op->getArg(0)); 549 break; 550 case dwarf::DW_OP_LLVM_convert: { 551 unsigned BitSize = Op->getArg(0); 552 dwarf::TypeKind Encoding = static_cast<dwarf::TypeKind>(Op->getArg(1)); 553 if (DwarfVersion >= 5 && CU.getDwarfDebug().useOpConvert()) { 554 emitOp(dwarf::DW_OP_convert); 555 // If targeting a location-list; simply emit the index into the raw 556 // byte stream as ULEB128, DwarfDebug::emitDebugLocEntry has been 557 // fitted with means to extract it later. 558 // If targeting a inlined DW_AT_location; insert a DIEBaseTypeRef 559 // (containing the index and a resolve mechanism during emit) into the 560 // DIE value list. 561 emitBaseTypeRef(getOrCreateBaseType(BitSize, Encoding)); 562 } else { 563 if (PrevConvertOp && PrevConvertOp->getArg(0) < BitSize) { 564 if (Encoding == dwarf::DW_ATE_signed) 565 emitLegacySExt(PrevConvertOp->getArg(0)); 566 else if (Encoding == dwarf::DW_ATE_unsigned) 567 emitLegacyZExt(PrevConvertOp->getArg(0)); 568 PrevConvertOp = None; 569 } else { 570 PrevConvertOp = Op; 571 } 572 } 573 break; 574 } 575 case dwarf::DW_OP_stack_value: 576 LocationKind = Implicit; 577 break; 578 case dwarf::DW_OP_swap: 579 assert(!isRegisterLocation()); 580 emitOp(dwarf::DW_OP_swap); 581 break; 582 case dwarf::DW_OP_xderef: 583 assert(!isRegisterLocation()); 584 emitOp(dwarf::DW_OP_xderef); 585 break; 586 case dwarf::DW_OP_deref_size: 587 emitOp(dwarf::DW_OP_deref_size); 588 emitData1(Op->getArg(0)); 589 break; 590 case dwarf::DW_OP_LLVM_tag_offset: 591 TagOffset = Op->getArg(0); 592 break; 593 case dwarf::DW_OP_regx: 594 emitOp(dwarf::DW_OP_regx); 595 emitUnsigned(Op->getArg(0)); 596 break; 597 case dwarf::DW_OP_bregx: 598 emitOp(dwarf::DW_OP_bregx); 599 emitUnsigned(Op->getArg(0)); 600 emitSigned(Op->getArg(1)); 601 break; 602 default: 603 llvm_unreachable("unhandled opcode found in expression"); 604 } 605 } 606 607 if (isImplicitLocation() && !isParameterValue()) 608 // Turn this into an implicit location description. 609 addStackValue(); 610 } 611 612 /// add masking operations to stencil out a subregister. 613 void DwarfExpression::maskSubRegister() { 614 assert(SubRegisterSizeInBits && "no subregister was registered"); 615 if (SubRegisterOffsetInBits > 0) 616 addShr(SubRegisterOffsetInBits); 617 uint64_t Mask = (1ULL << (uint64_t)SubRegisterSizeInBits) - 1ULL; 618 addAnd(Mask); 619 } 620 621 void DwarfExpression::finalize() { 622 assert(DwarfRegs.size() == 0 && "dwarf registers not emitted"); 623 // Emit any outstanding DW_OP_piece operations to mask out subregisters. 624 if (SubRegisterSizeInBits == 0) 625 return; 626 // Don't emit a DW_OP_piece for a subregister at offset 0. 627 if (SubRegisterOffsetInBits == 0) 628 return; 629 addOpPiece(SubRegisterSizeInBits, SubRegisterOffsetInBits); 630 } 631 632 void DwarfExpression::addFragmentOffset(const DIExpression *Expr) { 633 if (!Expr || !Expr->isFragment()) 634 return; 635 636 uint64_t FragmentOffset = Expr->getFragmentInfo()->OffsetInBits; 637 assert(FragmentOffset >= OffsetInBits && 638 "overlapping or duplicate fragments"); 639 if (FragmentOffset > OffsetInBits) 640 addOpPiece(FragmentOffset - OffsetInBits); 641 OffsetInBits = FragmentOffset; 642 } 643 644 void DwarfExpression::emitLegacySExt(unsigned FromBits) { 645 // (((X >> (FromBits - 1)) * (~0)) << FromBits) | X 646 emitOp(dwarf::DW_OP_dup); 647 emitOp(dwarf::DW_OP_constu); 648 emitUnsigned(FromBits - 1); 649 emitOp(dwarf::DW_OP_shr); 650 emitOp(dwarf::DW_OP_lit0); 651 emitOp(dwarf::DW_OP_not); 652 emitOp(dwarf::DW_OP_mul); 653 emitOp(dwarf::DW_OP_constu); 654 emitUnsigned(FromBits); 655 emitOp(dwarf::DW_OP_shl); 656 emitOp(dwarf::DW_OP_or); 657 } 658 659 void DwarfExpression::emitLegacyZExt(unsigned FromBits) { 660 // (X & (1 << FromBits - 1)) 661 emitOp(dwarf::DW_OP_constu); 662 emitUnsigned((1ULL << FromBits) - 1); 663 emitOp(dwarf::DW_OP_and); 664 } 665 666 void DwarfExpression::addWasmLocation(unsigned Index, uint64_t Offset) { 667 assert(LocationKind == Implicit || LocationKind == Unknown); 668 LocationKind = Implicit; 669 emitOp(dwarf::DW_OP_WASM_location); 670 emitUnsigned(Index); 671 emitUnsigned(Offset); 672 } 673