1 //===- X86Operand.h - Parsed X86 machine instruction ------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #ifndef LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H 10 #define LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H 11 12 #include "MCTargetDesc/X86IntelInstPrinter.h" 13 #include "MCTargetDesc/X86MCTargetDesc.h" 14 #include "X86AsmParserCommon.h" 15 #include "llvm/ADT/STLExtras.h" 16 #include "llvm/ADT/StringRef.h" 17 #include "llvm/MC/MCExpr.h" 18 #include "llvm/MC/MCInst.h" 19 #include "llvm/MC/MCParser/MCParsedAsmOperand.h" 20 #include "llvm/Support/Casting.h" 21 #include "llvm/Support/SMLoc.h" 22 #include <cassert> 23 #include <memory> 24 25 namespace llvm { 26 27 /// X86Operand - Instances of this class represent a parsed X86 machine 28 /// instruction. 29 struct X86Operand final : public MCParsedAsmOperand { 30 enum KindTy { Token, Register, Immediate, Memory, Prefix, DXRegister } Kind; 31 32 SMLoc StartLoc, EndLoc; 33 SMLoc OffsetOfLoc; 34 StringRef SymName; 35 void *OpDecl; 36 bool AddressOf; 37 bool CallOperand; 38 39 struct TokOp { 40 const char *Data; 41 unsigned Length; 42 }; 43 44 struct RegOp { 45 unsigned RegNo; 46 }; 47 48 struct PrefOp { 49 unsigned Prefixes; 50 }; 51 52 struct ImmOp { 53 const MCExpr *Val; 54 bool LocalRef; 55 }; 56 57 struct MemOp { 58 unsigned SegReg; 59 const MCExpr *Disp; 60 unsigned BaseReg; 61 unsigned DefaultBaseReg; 62 unsigned IndexReg; 63 unsigned Scale; 64 unsigned Size; 65 unsigned ModeSize; 66 67 /// If the memory operand is unsized and there are multiple instruction 68 /// matches, prefer the one with this size. 69 unsigned FrontendSize; 70 }; 71 72 union { 73 struct TokOp Tok; 74 struct RegOp Reg; 75 struct ImmOp Imm; 76 struct MemOp Mem; 77 struct PrefOp Pref; 78 }; 79 80 X86Operand(KindTy K, SMLoc Start, SMLoc End) 81 : Kind(K), StartLoc(Start), EndLoc(End), OpDecl(nullptr), 82 AddressOf(false), CallOperand(false) {} 83 84 StringRef getSymName() override { return SymName; } 85 void *getOpDecl() override { return OpDecl; } 86 87 /// getStartLoc - Get the location of the first token of this operand. 88 SMLoc getStartLoc() const override { return StartLoc; } 89 90 /// getEndLoc - Get the location of the last token of this operand. 91 SMLoc getEndLoc() const override { return EndLoc; } 92 93 /// getLocRange - Get the range between the first and last token of this 94 /// operand. 95 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); } 96 97 /// getOffsetOfLoc - Get the location of the offset operator. 98 SMLoc getOffsetOfLoc() const override { return OffsetOfLoc; } 99 100 void print(raw_ostream &OS) const override { 101 102 auto PrintImmValue = [&](const MCExpr *Val, const char *VName) { 103 if (Val->getKind() == MCExpr::Constant) { 104 if (auto Imm = cast<MCConstantExpr>(Val)->getValue()) 105 OS << VName << Imm; 106 } else if (Val->getKind() == MCExpr::SymbolRef) { 107 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Val)) { 108 const MCSymbol &Sym = SRE->getSymbol(); 109 if (const char *SymNameStr = Sym.getName().data()) 110 OS << VName << SymNameStr; 111 } 112 } 113 }; 114 115 switch (Kind) { 116 case Token: 117 OS << Tok.Data; 118 break; 119 case Register: 120 OS << "Reg:" << X86IntelInstPrinter::getRegisterName(Reg.RegNo); 121 break; 122 case DXRegister: 123 OS << "DXReg"; 124 break; 125 case Immediate: 126 PrintImmValue(Imm.Val, "Imm:"); 127 break; 128 case Prefix: 129 OS << "Prefix:" << Pref.Prefixes; 130 break; 131 case Memory: 132 OS << "Memory: ModeSize=" << Mem.ModeSize; 133 if (Mem.Size) 134 OS << ",Size=" << Mem.Size; 135 if (Mem.BaseReg) 136 OS << ",BaseReg=" << X86IntelInstPrinter::getRegisterName(Mem.BaseReg); 137 if (Mem.IndexReg) 138 OS << ",IndexReg=" 139 << X86IntelInstPrinter::getRegisterName(Mem.IndexReg); 140 if (Mem.Scale) 141 OS << ",Scale=" << Mem.Scale; 142 if (Mem.Disp) 143 PrintImmValue(Mem.Disp, ",Disp="); 144 if (Mem.SegReg) 145 OS << ",SegReg=" << X86IntelInstPrinter::getRegisterName(Mem.SegReg); 146 break; 147 } 148 } 149 150 StringRef getToken() const { 151 assert(Kind == Token && "Invalid access!"); 152 return StringRef(Tok.Data, Tok.Length); 153 } 154 void setTokenValue(StringRef Value) { 155 assert(Kind == Token && "Invalid access!"); 156 Tok.Data = Value.data(); 157 Tok.Length = Value.size(); 158 } 159 160 unsigned getReg() const override { 161 assert(Kind == Register && "Invalid access!"); 162 return Reg.RegNo; 163 } 164 165 unsigned getPrefix() const { 166 assert(Kind == Prefix && "Invalid access!"); 167 return Pref.Prefixes; 168 } 169 170 const MCExpr *getImm() const { 171 assert(Kind == Immediate && "Invalid access!"); 172 return Imm.Val; 173 } 174 175 const MCExpr *getMemDisp() const { 176 assert(Kind == Memory && "Invalid access!"); 177 return Mem.Disp; 178 } 179 unsigned getMemSegReg() const { 180 assert(Kind == Memory && "Invalid access!"); 181 return Mem.SegReg; 182 } 183 unsigned getMemBaseReg() const { 184 assert(Kind == Memory && "Invalid access!"); 185 return Mem.BaseReg; 186 } 187 unsigned getMemDefaultBaseReg() const { 188 assert(Kind == Memory && "Invalid access!"); 189 return Mem.DefaultBaseReg; 190 } 191 unsigned getMemIndexReg() const { 192 assert(Kind == Memory && "Invalid access!"); 193 return Mem.IndexReg; 194 } 195 unsigned getMemScale() const { 196 assert(Kind == Memory && "Invalid access!"); 197 return Mem.Scale; 198 } 199 unsigned getMemModeSize() const { 200 assert(Kind == Memory && "Invalid access!"); 201 return Mem.ModeSize; 202 } 203 unsigned getMemFrontendSize() const { 204 assert(Kind == Memory && "Invalid access!"); 205 return Mem.FrontendSize; 206 } 207 208 bool isToken() const override {return Kind == Token; } 209 210 bool isImm() const override { return Kind == Immediate; } 211 212 bool isImmSExti16i8() const { 213 if (!isImm()) 214 return false; 215 216 // If this isn't a constant expr, just assume it fits and let relaxation 217 // handle it. 218 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 219 if (!CE) 220 return true; 221 222 // Otherwise, check the value is in a range that makes sense for this 223 // extension. 224 return isImmSExti16i8Value(CE->getValue()); 225 } 226 bool isImmSExti32i8() const { 227 if (!isImm()) 228 return false; 229 230 // If this isn't a constant expr, just assume it fits and let relaxation 231 // handle it. 232 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 233 if (!CE) 234 return true; 235 236 // Otherwise, check the value is in a range that makes sense for this 237 // extension. 238 return isImmSExti32i8Value(CE->getValue()); 239 } 240 bool isImmSExti64i8() const { 241 if (!isImm()) 242 return false; 243 244 // If this isn't a constant expr, just assume it fits and let relaxation 245 // handle it. 246 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 247 if (!CE) 248 return true; 249 250 // Otherwise, check the value is in a range that makes sense for this 251 // extension. 252 return isImmSExti64i8Value(CE->getValue()); 253 } 254 bool isImmSExti64i32() const { 255 if (!isImm()) 256 return false; 257 258 // If this isn't a constant expr, just assume it fits and let relaxation 259 // handle it. 260 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 261 if (!CE) 262 return true; 263 264 // Otherwise, check the value is in a range that makes sense for this 265 // extension. 266 return isImmSExti64i32Value(CE->getValue()); 267 } 268 269 bool isImmUnsignedi4() const { 270 if (!isImm()) return false; 271 // If this isn't a constant expr, reject it. The immediate byte is shared 272 // with a register encoding. We can't have it affected by a relocation. 273 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 274 if (!CE) return false; 275 return isImmUnsignedi4Value(CE->getValue()); 276 } 277 278 bool isImmUnsignedi8() const { 279 if (!isImm()) return false; 280 // If this isn't a constant expr, just assume it fits and let relaxation 281 // handle it. 282 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 283 if (!CE) return true; 284 return isImmUnsignedi8Value(CE->getValue()); 285 } 286 287 bool isOffsetOfLocal() const override { return isImm() && Imm.LocalRef; } 288 289 bool needAddressOf() const override { return AddressOf; } 290 291 bool isMem() const override { return Kind == Memory; } 292 bool isMemUnsized() const { 293 return Kind == Memory && Mem.Size == 0; 294 } 295 bool isMem8() const { 296 return Kind == Memory && (!Mem.Size || Mem.Size == 8); 297 } 298 bool isMem16() const { 299 return Kind == Memory && (!Mem.Size || Mem.Size == 16); 300 } 301 bool isMem32() const { 302 return Kind == Memory && (!Mem.Size || Mem.Size == 32); 303 } 304 bool isMem64() const { 305 return Kind == Memory && (!Mem.Size || Mem.Size == 64); 306 } 307 bool isMem80() const { 308 return Kind == Memory && (!Mem.Size || Mem.Size == 80); 309 } 310 bool isMem128() const { 311 return Kind == Memory && (!Mem.Size || Mem.Size == 128); 312 } 313 bool isMem256() const { 314 return Kind == Memory && (!Mem.Size || Mem.Size == 256); 315 } 316 bool isMem512() const { 317 return Kind == Memory && (!Mem.Size || Mem.Size == 512); 318 } 319 320 bool isSibMem() const { 321 return isMem() && Mem.BaseReg != X86::RIP && Mem.BaseReg != X86::EIP; 322 } 323 324 bool isMemIndexReg(unsigned LowR, unsigned HighR) const { 325 assert(Kind == Memory && "Invalid access!"); 326 return Mem.IndexReg >= LowR && Mem.IndexReg <= HighR; 327 } 328 329 bool isMem64_RC128() const { 330 return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM15); 331 } 332 bool isMem128_RC128() const { 333 return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM15); 334 } 335 bool isMem128_RC256() const { 336 return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM15); 337 } 338 bool isMem256_RC128() const { 339 return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM15); 340 } 341 bool isMem256_RC256() const { 342 return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM15); 343 } 344 345 bool isMem64_RC128X() const { 346 return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM31); 347 } 348 bool isMem128_RC128X() const { 349 return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM31); 350 } 351 bool isMem128_RC256X() const { 352 return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM31); 353 } 354 bool isMem256_RC128X() const { 355 return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM31); 356 } 357 bool isMem256_RC256X() const { 358 return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM31); 359 } 360 bool isMem256_RC512() const { 361 return isMem256() && isMemIndexReg(X86::ZMM0, X86::ZMM31); 362 } 363 bool isMem512_RC256X() const { 364 return isMem512() && isMemIndexReg(X86::YMM0, X86::YMM31); 365 } 366 bool isMem512_RC512() const { 367 return isMem512() && isMemIndexReg(X86::ZMM0, X86::ZMM31); 368 } 369 370 bool isAbsMem() const { 371 return Kind == Memory && !getMemSegReg() && !getMemBaseReg() && 372 !getMemIndexReg() && getMemScale() == 1; 373 } 374 bool isAVX512RC() const{ 375 return isImm(); 376 } 377 378 bool isAbsMem16() const { 379 return isAbsMem() && Mem.ModeSize == 16; 380 } 381 382 bool isSrcIdx() const { 383 return !getMemIndexReg() && getMemScale() == 1 && 384 (getMemBaseReg() == X86::RSI || getMemBaseReg() == X86::ESI || 385 getMemBaseReg() == X86::SI) && isa<MCConstantExpr>(getMemDisp()) && 386 cast<MCConstantExpr>(getMemDisp())->getValue() == 0; 387 } 388 bool isSrcIdx8() const { 389 return isMem8() && isSrcIdx(); 390 } 391 bool isSrcIdx16() const { 392 return isMem16() && isSrcIdx(); 393 } 394 bool isSrcIdx32() const { 395 return isMem32() && isSrcIdx(); 396 } 397 bool isSrcIdx64() const { 398 return isMem64() && isSrcIdx(); 399 } 400 401 bool isDstIdx() const { 402 return !getMemIndexReg() && getMemScale() == 1 && 403 (getMemSegReg() == 0 || getMemSegReg() == X86::ES) && 404 (getMemBaseReg() == X86::RDI || getMemBaseReg() == X86::EDI || 405 getMemBaseReg() == X86::DI) && isa<MCConstantExpr>(getMemDisp()) && 406 cast<MCConstantExpr>(getMemDisp())->getValue() == 0; 407 } 408 bool isDstIdx8() const { 409 return isMem8() && isDstIdx(); 410 } 411 bool isDstIdx16() const { 412 return isMem16() && isDstIdx(); 413 } 414 bool isDstIdx32() const { 415 return isMem32() && isDstIdx(); 416 } 417 bool isDstIdx64() const { 418 return isMem64() && isDstIdx(); 419 } 420 421 bool isMemOffs() const { 422 return Kind == Memory && !getMemBaseReg() && !getMemIndexReg() && 423 getMemScale() == 1; 424 } 425 426 bool isMemOffs16_8() const { 427 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 8); 428 } 429 bool isMemOffs16_16() const { 430 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 16); 431 } 432 bool isMemOffs16_32() const { 433 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 32); 434 } 435 bool isMemOffs32_8() const { 436 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 8); 437 } 438 bool isMemOffs32_16() const { 439 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 16); 440 } 441 bool isMemOffs32_32() const { 442 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 32); 443 } 444 bool isMemOffs32_64() const { 445 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 64); 446 } 447 bool isMemOffs64_8() const { 448 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 8); 449 } 450 bool isMemOffs64_16() const { 451 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 16); 452 } 453 bool isMemOffs64_32() const { 454 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 32); 455 } 456 bool isMemOffs64_64() const { 457 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 64); 458 } 459 460 bool isPrefix() const { return Kind == Prefix; } 461 bool isReg() const override { return Kind == Register; } 462 bool isDXReg() const { return Kind == DXRegister; } 463 464 bool isGR32orGR64() const { 465 return Kind == Register && 466 (X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) || 467 X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg())); 468 } 469 470 bool isGR16orGR32orGR64() const { 471 return Kind == Register && 472 (X86MCRegisterClasses[X86::GR16RegClassID].contains(getReg()) || 473 X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) || 474 X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg())); 475 } 476 477 bool isVectorReg() const { 478 return Kind == Register && 479 (X86MCRegisterClasses[X86::VR64RegClassID].contains(getReg()) || 480 X86MCRegisterClasses[X86::VR128XRegClassID].contains(getReg()) || 481 X86MCRegisterClasses[X86::VR256XRegClassID].contains(getReg()) || 482 X86MCRegisterClasses[X86::VR512RegClassID].contains(getReg())); 483 } 484 485 bool isVK1Pair() const { 486 return Kind == Register && 487 X86MCRegisterClasses[X86::VK1RegClassID].contains(getReg()); 488 } 489 490 bool isVK2Pair() const { 491 return Kind == Register && 492 X86MCRegisterClasses[X86::VK2RegClassID].contains(getReg()); 493 } 494 495 bool isVK4Pair() const { 496 return Kind == Register && 497 X86MCRegisterClasses[X86::VK4RegClassID].contains(getReg()); 498 } 499 500 bool isVK8Pair() const { 501 return Kind == Register && 502 X86MCRegisterClasses[X86::VK8RegClassID].contains(getReg()); 503 } 504 505 bool isVK16Pair() const { 506 return Kind == Register && 507 X86MCRegisterClasses[X86::VK16RegClassID].contains(getReg()); 508 } 509 510 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 511 // Add as immediates when possible. 512 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 513 Inst.addOperand(MCOperand::createImm(CE->getValue())); 514 else 515 Inst.addOperand(MCOperand::createExpr(Expr)); 516 } 517 518 void addRegOperands(MCInst &Inst, unsigned N) const { 519 assert(N == 1 && "Invalid number of operands!"); 520 Inst.addOperand(MCOperand::createReg(getReg())); 521 } 522 523 void addGR32orGR64Operands(MCInst &Inst, unsigned N) const { 524 assert(N == 1 && "Invalid number of operands!"); 525 MCRegister RegNo = getReg(); 526 if (X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo)) 527 RegNo = getX86SubSuperRegister(RegNo, 32); 528 Inst.addOperand(MCOperand::createReg(RegNo)); 529 } 530 531 void addGR16orGR32orGR64Operands(MCInst &Inst, unsigned N) const { 532 assert(N == 1 && "Invalid number of operands!"); 533 MCRegister RegNo = getReg(); 534 if (X86MCRegisterClasses[X86::GR32RegClassID].contains(RegNo) || 535 X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo)) 536 RegNo = getX86SubSuperRegister(RegNo, 16); 537 Inst.addOperand(MCOperand::createReg(RegNo)); 538 } 539 540 void addAVX512RCOperands(MCInst &Inst, unsigned N) const { 541 assert(N == 1 && "Invalid number of operands!"); 542 addExpr(Inst, getImm()); 543 } 544 545 void addImmOperands(MCInst &Inst, unsigned N) const { 546 assert(N == 1 && "Invalid number of operands!"); 547 addExpr(Inst, getImm()); 548 } 549 550 void addMaskPairOperands(MCInst &Inst, unsigned N) const { 551 assert(N == 1 && "Invalid number of operands!"); 552 unsigned Reg = getReg(); 553 switch (Reg) { 554 case X86::K0: 555 case X86::K1: 556 Reg = X86::K0_K1; 557 break; 558 case X86::K2: 559 case X86::K3: 560 Reg = X86::K2_K3; 561 break; 562 case X86::K4: 563 case X86::K5: 564 Reg = X86::K4_K5; 565 break; 566 case X86::K6: 567 case X86::K7: 568 Reg = X86::K6_K7; 569 break; 570 } 571 Inst.addOperand(MCOperand::createReg(Reg)); 572 } 573 574 void addMemOperands(MCInst &Inst, unsigned N) const { 575 assert((N == 5) && "Invalid number of operands!"); 576 if (getMemBaseReg()) 577 Inst.addOperand(MCOperand::createReg(getMemBaseReg())); 578 else 579 Inst.addOperand(MCOperand::createReg(getMemDefaultBaseReg())); 580 Inst.addOperand(MCOperand::createImm(getMemScale())); 581 Inst.addOperand(MCOperand::createReg(getMemIndexReg())); 582 addExpr(Inst, getMemDisp()); 583 Inst.addOperand(MCOperand::createReg(getMemSegReg())); 584 } 585 586 void addAbsMemOperands(MCInst &Inst, unsigned N) const { 587 assert((N == 1) && "Invalid number of operands!"); 588 // Add as immediates when possible. 589 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp())) 590 Inst.addOperand(MCOperand::createImm(CE->getValue())); 591 else 592 Inst.addOperand(MCOperand::createExpr(getMemDisp())); 593 } 594 595 void addSrcIdxOperands(MCInst &Inst, unsigned N) const { 596 assert((N == 2) && "Invalid number of operands!"); 597 Inst.addOperand(MCOperand::createReg(getMemBaseReg())); 598 Inst.addOperand(MCOperand::createReg(getMemSegReg())); 599 } 600 601 void addDstIdxOperands(MCInst &Inst, unsigned N) const { 602 assert((N == 1) && "Invalid number of operands!"); 603 Inst.addOperand(MCOperand::createReg(getMemBaseReg())); 604 } 605 606 void addMemOffsOperands(MCInst &Inst, unsigned N) const { 607 assert((N == 2) && "Invalid number of operands!"); 608 // Add as immediates when possible. 609 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp())) 610 Inst.addOperand(MCOperand::createImm(CE->getValue())); 611 else 612 Inst.addOperand(MCOperand::createExpr(getMemDisp())); 613 Inst.addOperand(MCOperand::createReg(getMemSegReg())); 614 } 615 616 static std::unique_ptr<X86Operand> CreateToken(StringRef Str, SMLoc Loc) { 617 SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size()); 618 auto Res = std::make_unique<X86Operand>(Token, Loc, EndLoc); 619 Res->Tok.Data = Str.data(); 620 Res->Tok.Length = Str.size(); 621 return Res; 622 } 623 624 static std::unique_ptr<X86Operand> 625 CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc, 626 bool AddressOf = false, SMLoc OffsetOfLoc = SMLoc(), 627 StringRef SymName = StringRef(), void *OpDecl = nullptr) { 628 auto Res = std::make_unique<X86Operand>(Register, StartLoc, EndLoc); 629 Res->Reg.RegNo = RegNo; 630 Res->AddressOf = AddressOf; 631 Res->OffsetOfLoc = OffsetOfLoc; 632 Res->SymName = SymName; 633 Res->OpDecl = OpDecl; 634 return Res; 635 } 636 637 static std::unique_ptr<X86Operand> 638 CreateDXReg(SMLoc StartLoc, SMLoc EndLoc) { 639 return std::make_unique<X86Operand>(DXRegister, StartLoc, EndLoc); 640 } 641 642 static std::unique_ptr<X86Operand> 643 CreatePrefix(unsigned Prefixes, SMLoc StartLoc, SMLoc EndLoc) { 644 auto Res = std::make_unique<X86Operand>(Prefix, StartLoc, EndLoc); 645 Res->Pref.Prefixes = Prefixes; 646 return Res; 647 } 648 649 static std::unique_ptr<X86Operand> CreateImm(const MCExpr *Val, 650 SMLoc StartLoc, SMLoc EndLoc, 651 StringRef SymName = StringRef(), 652 void *OpDecl = nullptr, 653 bool GlobalRef = true) { 654 auto Res = std::make_unique<X86Operand>(Immediate, StartLoc, EndLoc); 655 Res->Imm.Val = Val; 656 Res->Imm.LocalRef = !GlobalRef; 657 Res->SymName = SymName; 658 Res->OpDecl = OpDecl; 659 Res->AddressOf = true; 660 return Res; 661 } 662 663 /// Create an absolute memory operand. 664 static std::unique_ptr<X86Operand> 665 CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc, 666 unsigned Size = 0, StringRef SymName = StringRef(), 667 void *OpDecl = nullptr, unsigned FrontendSize = 0) { 668 auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc); 669 Res->Mem.SegReg = 0; 670 Res->Mem.Disp = Disp; 671 Res->Mem.BaseReg = 0; 672 Res->Mem.DefaultBaseReg = 0; 673 Res->Mem.IndexReg = 0; 674 Res->Mem.Scale = 1; 675 Res->Mem.Size = Size; 676 Res->Mem.ModeSize = ModeSize; 677 Res->Mem.FrontendSize = FrontendSize; 678 Res->SymName = SymName; 679 Res->OpDecl = OpDecl; 680 Res->AddressOf = false; 681 return Res; 682 } 683 684 /// Create a generalized memory operand. 685 static std::unique_ptr<X86Operand> 686 CreateMem(unsigned ModeSize, unsigned SegReg, const MCExpr *Disp, 687 unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc, 688 SMLoc EndLoc, unsigned Size = 0, 689 unsigned DefaultBaseReg = X86::NoRegister, 690 StringRef SymName = StringRef(), void *OpDecl = nullptr, 691 unsigned FrontendSize = 0) { 692 // We should never just have a displacement, that should be parsed as an 693 // absolute memory operand. 694 assert((SegReg || BaseReg || IndexReg || DefaultBaseReg) && 695 "Invalid memory operand!"); 696 697 // The scale should always be one of {1,2,4,8}. 698 assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) && 699 "Invalid scale!"); 700 auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc); 701 Res->Mem.SegReg = SegReg; 702 Res->Mem.Disp = Disp; 703 Res->Mem.BaseReg = BaseReg; 704 Res->Mem.DefaultBaseReg = DefaultBaseReg; 705 Res->Mem.IndexReg = IndexReg; 706 Res->Mem.Scale = Scale; 707 Res->Mem.Size = Size; 708 Res->Mem.ModeSize = ModeSize; 709 Res->Mem.FrontendSize = FrontendSize; 710 Res->SymName = SymName; 711 Res->OpDecl = OpDecl; 712 Res->AddressOf = false; 713 return Res; 714 } 715 }; 716 717 } // end namespace llvm 718 719 #endif // LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H 720