1 //===- X86Operand.h - Parsed X86 machine instruction ------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #ifndef LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H 10 #define LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H 11 12 #include "MCTargetDesc/X86IntelInstPrinter.h" 13 #include "MCTargetDesc/X86MCTargetDesc.h" 14 #include "X86AsmParserCommon.h" 15 #include "llvm/ADT/STLExtras.h" 16 #include "llvm/ADT/StringRef.h" 17 #include "llvm/MC/MCExpr.h" 18 #include "llvm/MC/MCInst.h" 19 #include "llvm/MC/MCParser/MCParsedAsmOperand.h" 20 #include "llvm/MC/MCRegisterInfo.h" 21 #include "llvm/Support/Casting.h" 22 #include "llvm/Support/ErrorHandling.h" 23 #include "llvm/Support/SMLoc.h" 24 #include <cassert> 25 #include <memory> 26 27 namespace llvm { 28 29 /// X86Operand - Instances of this class represent a parsed X86 machine 30 /// instruction. 31 struct X86Operand final : public MCParsedAsmOperand { 32 enum KindTy { Token, Register, Immediate, Memory, Prefix, DXRegister } Kind; 33 34 SMLoc StartLoc, EndLoc; 35 SMLoc OffsetOfLoc; 36 StringRef SymName; 37 void *OpDecl; 38 bool AddressOf; 39 bool CallOperand; 40 41 struct TokOp { 42 const char *Data; 43 unsigned Length; 44 }; 45 46 struct RegOp { 47 unsigned RegNo; 48 }; 49 50 struct PrefOp { 51 unsigned Prefixes; 52 }; 53 54 struct ImmOp { 55 const MCExpr *Val; 56 bool LocalRef; 57 }; 58 59 struct MemOp { 60 unsigned SegReg; 61 const MCExpr *Disp; 62 unsigned BaseReg; 63 unsigned IndexReg; 64 unsigned Scale; 65 unsigned Size; 66 unsigned ModeSize; 67 68 /// If the memory operand is unsized and there are multiple instruction 69 /// matches, prefer the one with this size. 70 unsigned FrontendSize; 71 }; 72 73 union { 74 struct TokOp Tok; 75 struct RegOp Reg; 76 struct ImmOp Imm; 77 struct MemOp Mem; 78 struct PrefOp Pref; 79 }; 80 81 X86Operand(KindTy K, SMLoc Start, SMLoc End) 82 : Kind(K), StartLoc(Start), EndLoc(End), CallOperand(false) {} 83 84 StringRef getSymName() override { return SymName; } 85 void *getOpDecl() override { return OpDecl; } 86 87 /// getStartLoc - Get the location of the first token of this operand. 88 SMLoc getStartLoc() const override { return StartLoc; } 89 90 /// getEndLoc - Get the location of the last token of this operand. 91 SMLoc getEndLoc() const override { return EndLoc; } 92 93 /// getLocRange - Get the range between the first and last token of this 94 /// operand. 95 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); } 96 97 /// getOffsetOfLoc - Get the location of the offset operator. 98 SMLoc getOffsetOfLoc() const override { return OffsetOfLoc; } 99 100 void print(raw_ostream &OS) const override { 101 102 auto PrintImmValue = [&](const MCExpr *Val, const char *VName) { 103 if (Val->getKind() == MCExpr::Constant) { 104 if (auto Imm = cast<MCConstantExpr>(Val)->getValue()) 105 OS << VName << Imm; 106 } else if (Val->getKind() == MCExpr::SymbolRef) { 107 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Val)) { 108 const MCSymbol &Sym = SRE->getSymbol(); 109 if (const char *SymNameStr = Sym.getName().data()) 110 OS << VName << SymNameStr; 111 } 112 } 113 }; 114 115 switch (Kind) { 116 case Token: 117 OS << Tok.Data; 118 break; 119 case Register: 120 OS << "Reg:" << X86IntelInstPrinter::getRegisterName(Reg.RegNo); 121 break; 122 case DXRegister: 123 OS << "DXReg"; 124 break; 125 case Immediate: 126 PrintImmValue(Imm.Val, "Imm:"); 127 break; 128 case Prefix: 129 OS << "Prefix:" << Pref.Prefixes; 130 break; 131 case Memory: 132 OS << "Memory: ModeSize=" << Mem.ModeSize; 133 if (Mem.Size) 134 OS << ",Size=" << Mem.Size; 135 if (Mem.BaseReg) 136 OS << ",BaseReg=" << X86IntelInstPrinter::getRegisterName(Mem.BaseReg); 137 if (Mem.IndexReg) 138 OS << ",IndexReg=" 139 << X86IntelInstPrinter::getRegisterName(Mem.IndexReg); 140 if (Mem.Scale) 141 OS << ",Scale=" << Mem.Scale; 142 if (Mem.Disp) 143 PrintImmValue(Mem.Disp, ",Disp="); 144 if (Mem.SegReg) 145 OS << ",SegReg=" << X86IntelInstPrinter::getRegisterName(Mem.SegReg); 146 break; 147 } 148 } 149 150 StringRef getToken() const { 151 assert(Kind == Token && "Invalid access!"); 152 return StringRef(Tok.Data, Tok.Length); 153 } 154 void setTokenValue(StringRef Value) { 155 assert(Kind == Token && "Invalid access!"); 156 Tok.Data = Value.data(); 157 Tok.Length = Value.size(); 158 } 159 160 unsigned getReg() const override { 161 assert(Kind == Register && "Invalid access!"); 162 return Reg.RegNo; 163 } 164 165 unsigned getPrefix() const { 166 assert(Kind == Prefix && "Invalid access!"); 167 return Pref.Prefixes; 168 } 169 170 const MCExpr *getImm() const { 171 assert(Kind == Immediate && "Invalid access!"); 172 return Imm.Val; 173 } 174 175 const MCExpr *getMemDisp() const { 176 assert(Kind == Memory && "Invalid access!"); 177 return Mem.Disp; 178 } 179 unsigned getMemSegReg() const { 180 assert(Kind == Memory && "Invalid access!"); 181 return Mem.SegReg; 182 } 183 unsigned getMemBaseReg() const { 184 assert(Kind == Memory && "Invalid access!"); 185 return Mem.BaseReg; 186 } 187 unsigned getMemIndexReg() const { 188 assert(Kind == Memory && "Invalid access!"); 189 return Mem.IndexReg; 190 } 191 unsigned getMemScale() const { 192 assert(Kind == Memory && "Invalid access!"); 193 return Mem.Scale; 194 } 195 unsigned getMemModeSize() const { 196 assert(Kind == Memory && "Invalid access!"); 197 return Mem.ModeSize; 198 } 199 unsigned getMemFrontendSize() const { 200 assert(Kind == Memory && "Invalid access!"); 201 return Mem.FrontendSize; 202 } 203 204 bool isToken() const override {return Kind == Token; } 205 206 bool isImm() const override { return Kind == Immediate; } 207 208 bool isImmSExti16i8() const { 209 if (!isImm()) 210 return false; 211 212 // If this isn't a constant expr, just assume it fits and let relaxation 213 // handle it. 214 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 215 if (!CE) 216 return true; 217 218 // Otherwise, check the value is in a range that makes sense for this 219 // extension. 220 return isImmSExti16i8Value(CE->getValue()); 221 } 222 bool isImmSExti32i8() const { 223 if (!isImm()) 224 return false; 225 226 // If this isn't a constant expr, just assume it fits and let relaxation 227 // handle it. 228 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 229 if (!CE) 230 return true; 231 232 // Otherwise, check the value is in a range that makes sense for this 233 // extension. 234 return isImmSExti32i8Value(CE->getValue()); 235 } 236 bool isImmSExti64i8() const { 237 if (!isImm()) 238 return false; 239 240 // If this isn't a constant expr, just assume it fits and let relaxation 241 // handle it. 242 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 243 if (!CE) 244 return true; 245 246 // Otherwise, check the value is in a range that makes sense for this 247 // extension. 248 return isImmSExti64i8Value(CE->getValue()); 249 } 250 bool isImmSExti64i32() const { 251 if (!isImm()) 252 return false; 253 254 // If this isn't a constant expr, just assume it fits and let relaxation 255 // handle it. 256 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 257 if (!CE) 258 return true; 259 260 // Otherwise, check the value is in a range that makes sense for this 261 // extension. 262 return isImmSExti64i32Value(CE->getValue()); 263 } 264 265 bool isImmUnsignedi4() const { 266 if (!isImm()) return false; 267 // If this isn't a constant expr, reject it. The immediate byte is shared 268 // with a register encoding. We can't have it affected by a relocation. 269 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 270 if (!CE) return false; 271 return isImmUnsignedi4Value(CE->getValue()); 272 } 273 274 bool isImmUnsignedi8() const { 275 if (!isImm()) return false; 276 // If this isn't a constant expr, just assume it fits and let relaxation 277 // handle it. 278 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 279 if (!CE) return true; 280 return isImmUnsignedi8Value(CE->getValue()); 281 } 282 283 bool isOffsetOfLocal() const override { return isImm() && Imm.LocalRef; } 284 285 bool needAddressOf() const override { return AddressOf; } 286 287 bool isMem() const override { return Kind == Memory; } 288 bool isMemUnsized() const { 289 return Kind == Memory && Mem.Size == 0; 290 } 291 bool isMem8() const { 292 return Kind == Memory && (!Mem.Size || Mem.Size == 8); 293 } 294 bool isMem16() const { 295 return Kind == Memory && (!Mem.Size || Mem.Size == 16); 296 } 297 bool isMem32() const { 298 return Kind == Memory && (!Mem.Size || Mem.Size == 32); 299 } 300 bool isMem64() const { 301 return Kind == Memory && (!Mem.Size || Mem.Size == 64); 302 } 303 bool isMem80() const { 304 return Kind == Memory && (!Mem.Size || Mem.Size == 80); 305 } 306 bool isMem128() const { 307 return Kind == Memory && (!Mem.Size || Mem.Size == 128); 308 } 309 bool isMem256() const { 310 return Kind == Memory && (!Mem.Size || Mem.Size == 256); 311 } 312 bool isMem512() const { 313 return Kind == Memory && (!Mem.Size || Mem.Size == 512); 314 } 315 bool isMemIndexReg(unsigned LowR, unsigned HighR) const { 316 assert(Kind == Memory && "Invalid access!"); 317 return Mem.IndexReg >= LowR && Mem.IndexReg <= HighR; 318 } 319 320 bool isMem64_RC128() const { 321 return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM15); 322 } 323 bool isMem128_RC128() const { 324 return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM15); 325 } 326 bool isMem128_RC256() const { 327 return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM15); 328 } 329 bool isMem256_RC128() const { 330 return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM15); 331 } 332 bool isMem256_RC256() const { 333 return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM15); 334 } 335 336 bool isMem64_RC128X() const { 337 return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM31); 338 } 339 bool isMem128_RC128X() const { 340 return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM31); 341 } 342 bool isMem128_RC256X() const { 343 return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM31); 344 } 345 bool isMem256_RC128X() const { 346 return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM31); 347 } 348 bool isMem256_RC256X() const { 349 return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM31); 350 } 351 bool isMem256_RC512() const { 352 return isMem256() && isMemIndexReg(X86::ZMM0, X86::ZMM31); 353 } 354 bool isMem512_RC256X() const { 355 return isMem512() && isMemIndexReg(X86::YMM0, X86::YMM31); 356 } 357 bool isMem512_RC512() const { 358 return isMem512() && isMemIndexReg(X86::ZMM0, X86::ZMM31); 359 } 360 361 bool isAbsMem() const { 362 return Kind == Memory && !getMemSegReg() && !getMemBaseReg() && 363 !getMemIndexReg() && getMemScale() == 1; 364 } 365 bool isAVX512RC() const{ 366 return isImm(); 367 } 368 369 bool isAbsMem16() const { 370 return isAbsMem() && Mem.ModeSize == 16; 371 } 372 373 bool isSrcIdx() const { 374 return !getMemIndexReg() && getMemScale() == 1 && 375 (getMemBaseReg() == X86::RSI || getMemBaseReg() == X86::ESI || 376 getMemBaseReg() == X86::SI) && isa<MCConstantExpr>(getMemDisp()) && 377 cast<MCConstantExpr>(getMemDisp())->getValue() == 0; 378 } 379 bool isSrcIdx8() const { 380 return isMem8() && isSrcIdx(); 381 } 382 bool isSrcIdx16() const { 383 return isMem16() && isSrcIdx(); 384 } 385 bool isSrcIdx32() const { 386 return isMem32() && isSrcIdx(); 387 } 388 bool isSrcIdx64() const { 389 return isMem64() && isSrcIdx(); 390 } 391 392 bool isDstIdx() const { 393 return !getMemIndexReg() && getMemScale() == 1 && 394 (getMemSegReg() == 0 || getMemSegReg() == X86::ES) && 395 (getMemBaseReg() == X86::RDI || getMemBaseReg() == X86::EDI || 396 getMemBaseReg() == X86::DI) && isa<MCConstantExpr>(getMemDisp()) && 397 cast<MCConstantExpr>(getMemDisp())->getValue() == 0; 398 } 399 bool isDstIdx8() const { 400 return isMem8() && isDstIdx(); 401 } 402 bool isDstIdx16() const { 403 return isMem16() && isDstIdx(); 404 } 405 bool isDstIdx32() const { 406 return isMem32() && isDstIdx(); 407 } 408 bool isDstIdx64() const { 409 return isMem64() && isDstIdx(); 410 } 411 412 bool isMemOffs() const { 413 return Kind == Memory && !getMemBaseReg() && !getMemIndexReg() && 414 getMemScale() == 1; 415 } 416 417 bool isMemOffs16_8() const { 418 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 8); 419 } 420 bool isMemOffs16_16() const { 421 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 16); 422 } 423 bool isMemOffs16_32() const { 424 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 32); 425 } 426 bool isMemOffs32_8() const { 427 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 8); 428 } 429 bool isMemOffs32_16() const { 430 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 16); 431 } 432 bool isMemOffs32_32() const { 433 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 32); 434 } 435 bool isMemOffs32_64() const { 436 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 64); 437 } 438 bool isMemOffs64_8() const { 439 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 8); 440 } 441 bool isMemOffs64_16() const { 442 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 16); 443 } 444 bool isMemOffs64_32() const { 445 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 32); 446 } 447 bool isMemOffs64_64() const { 448 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 64); 449 } 450 451 bool isPrefix() const { return Kind == Prefix; } 452 bool isReg() const override { return Kind == Register; } 453 bool isDXReg() const { return Kind == DXRegister; } 454 455 bool isGR32orGR64() const { 456 return Kind == Register && 457 (X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) || 458 X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg())); 459 } 460 461 bool isVK1Pair() const { 462 return Kind == Register && 463 X86MCRegisterClasses[X86::VK1RegClassID].contains(getReg()); 464 } 465 466 bool isVK2Pair() const { 467 return Kind == Register && 468 X86MCRegisterClasses[X86::VK2RegClassID].contains(getReg()); 469 } 470 471 bool isVK4Pair() const { 472 return Kind == Register && 473 X86MCRegisterClasses[X86::VK4RegClassID].contains(getReg()); 474 } 475 476 bool isVK8Pair() const { 477 return Kind == Register && 478 X86MCRegisterClasses[X86::VK8RegClassID].contains(getReg()); 479 } 480 481 bool isVK16Pair() const { 482 return Kind == Register && 483 X86MCRegisterClasses[X86::VK16RegClassID].contains(getReg()); 484 } 485 486 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 487 // Add as immediates when possible. 488 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 489 Inst.addOperand(MCOperand::createImm(CE->getValue())); 490 else 491 Inst.addOperand(MCOperand::createExpr(Expr)); 492 } 493 494 void addRegOperands(MCInst &Inst, unsigned N) const { 495 assert(N == 1 && "Invalid number of operands!"); 496 Inst.addOperand(MCOperand::createReg(getReg())); 497 } 498 499 void addGR32orGR64Operands(MCInst &Inst, unsigned N) const { 500 assert(N == 1 && "Invalid number of operands!"); 501 MCRegister RegNo = getReg(); 502 if (X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo)) 503 RegNo = getX86SubSuperRegister(RegNo, 32); 504 Inst.addOperand(MCOperand::createReg(RegNo)); 505 } 506 507 void addAVX512RCOperands(MCInst &Inst, unsigned N) const { 508 assert(N == 1 && "Invalid number of operands!"); 509 addExpr(Inst, getImm()); 510 } 511 512 void addImmOperands(MCInst &Inst, unsigned N) const { 513 assert(N == 1 && "Invalid number of operands!"); 514 addExpr(Inst, getImm()); 515 } 516 517 void addMaskPairOperands(MCInst &Inst, unsigned N) const { 518 assert(N == 1 && "Invalid number of operands!"); 519 unsigned Reg = getReg(); 520 switch (Reg) { 521 case X86::K0: 522 case X86::K1: 523 Reg = X86::K0_K1; 524 break; 525 case X86::K2: 526 case X86::K3: 527 Reg = X86::K2_K3; 528 break; 529 case X86::K4: 530 case X86::K5: 531 Reg = X86::K4_K5; 532 break; 533 case X86::K6: 534 case X86::K7: 535 Reg = X86::K6_K7; 536 break; 537 } 538 Inst.addOperand(MCOperand::createReg(Reg)); 539 } 540 541 void addMemOperands(MCInst &Inst, unsigned N) const { 542 assert((N == 5) && "Invalid number of operands!"); 543 Inst.addOperand(MCOperand::createReg(getMemBaseReg())); 544 Inst.addOperand(MCOperand::createImm(getMemScale())); 545 Inst.addOperand(MCOperand::createReg(getMemIndexReg())); 546 addExpr(Inst, getMemDisp()); 547 Inst.addOperand(MCOperand::createReg(getMemSegReg())); 548 } 549 550 void addAbsMemOperands(MCInst &Inst, unsigned N) const { 551 assert((N == 1) && "Invalid number of operands!"); 552 // Add as immediates when possible. 553 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp())) 554 Inst.addOperand(MCOperand::createImm(CE->getValue())); 555 else 556 Inst.addOperand(MCOperand::createExpr(getMemDisp())); 557 } 558 559 void addSrcIdxOperands(MCInst &Inst, unsigned N) const { 560 assert((N == 2) && "Invalid number of operands!"); 561 Inst.addOperand(MCOperand::createReg(getMemBaseReg())); 562 Inst.addOperand(MCOperand::createReg(getMemSegReg())); 563 } 564 565 void addDstIdxOperands(MCInst &Inst, unsigned N) const { 566 assert((N == 1) && "Invalid number of operands!"); 567 Inst.addOperand(MCOperand::createReg(getMemBaseReg())); 568 } 569 570 void addMemOffsOperands(MCInst &Inst, unsigned N) const { 571 assert((N == 2) && "Invalid number of operands!"); 572 // Add as immediates when possible. 573 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp())) 574 Inst.addOperand(MCOperand::createImm(CE->getValue())); 575 else 576 Inst.addOperand(MCOperand::createExpr(getMemDisp())); 577 Inst.addOperand(MCOperand::createReg(getMemSegReg())); 578 } 579 580 static std::unique_ptr<X86Operand> CreateToken(StringRef Str, SMLoc Loc) { 581 SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size()); 582 auto Res = std::make_unique<X86Operand>(Token, Loc, EndLoc); 583 Res->Tok.Data = Str.data(); 584 Res->Tok.Length = Str.size(); 585 return Res; 586 } 587 588 static std::unique_ptr<X86Operand> 589 CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc, 590 bool AddressOf = false, SMLoc OffsetOfLoc = SMLoc(), 591 StringRef SymName = StringRef(), void *OpDecl = nullptr) { 592 auto Res = std::make_unique<X86Operand>(Register, StartLoc, EndLoc); 593 Res->Reg.RegNo = RegNo; 594 Res->AddressOf = AddressOf; 595 Res->OffsetOfLoc = OffsetOfLoc; 596 Res->SymName = SymName; 597 Res->OpDecl = OpDecl; 598 return Res; 599 } 600 601 static std::unique_ptr<X86Operand> 602 CreateDXReg(SMLoc StartLoc, SMLoc EndLoc) { 603 return std::make_unique<X86Operand>(DXRegister, StartLoc, EndLoc); 604 } 605 606 static std::unique_ptr<X86Operand> 607 CreatePrefix(unsigned Prefixes, SMLoc StartLoc, SMLoc EndLoc) { 608 auto Res = std::make_unique<X86Operand>(Prefix, StartLoc, EndLoc); 609 Res->Pref.Prefixes = Prefixes; 610 return Res; 611 } 612 613 static std::unique_ptr<X86Operand> CreateImm(const MCExpr *Val, 614 SMLoc StartLoc, SMLoc EndLoc, 615 StringRef SymName = StringRef(), 616 void *OpDecl = nullptr, 617 bool GlobalRef = true) { 618 auto Res = std::make_unique<X86Operand>(Immediate, StartLoc, EndLoc); 619 Res->Imm.Val = Val; 620 Res->Imm.LocalRef = !GlobalRef; 621 Res->SymName = SymName; 622 Res->OpDecl = OpDecl; 623 Res->AddressOf = true; 624 return Res; 625 } 626 627 /// Create an absolute memory operand. 628 static std::unique_ptr<X86Operand> 629 CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc, 630 unsigned Size = 0, StringRef SymName = StringRef(), 631 void *OpDecl = nullptr, unsigned FrontendSize = 0) { 632 auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc); 633 Res->Mem.SegReg = 0; 634 Res->Mem.Disp = Disp; 635 Res->Mem.BaseReg = 0; 636 Res->Mem.IndexReg = 0; 637 Res->Mem.Scale = 1; 638 Res->Mem.Size = Size; 639 Res->Mem.ModeSize = ModeSize; 640 Res->Mem.FrontendSize = FrontendSize; 641 Res->SymName = SymName; 642 Res->OpDecl = OpDecl; 643 Res->AddressOf = false; 644 return Res; 645 } 646 647 /// Create a generalized memory operand. 648 static std::unique_ptr<X86Operand> 649 CreateMem(unsigned ModeSize, unsigned SegReg, const MCExpr *Disp, 650 unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc, 651 SMLoc EndLoc, unsigned Size = 0, StringRef SymName = StringRef(), 652 void *OpDecl = nullptr, unsigned FrontendSize = 0) { 653 // We should never just have a displacement, that should be parsed as an 654 // absolute memory operand. 655 assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!"); 656 657 // The scale should always be one of {1,2,4,8}. 658 assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) && 659 "Invalid scale!"); 660 auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc); 661 Res->Mem.SegReg = SegReg; 662 Res->Mem.Disp = Disp; 663 Res->Mem.BaseReg = BaseReg; 664 Res->Mem.IndexReg = IndexReg; 665 Res->Mem.Scale = Scale; 666 Res->Mem.Size = Size; 667 Res->Mem.ModeSize = ModeSize; 668 Res->Mem.FrontendSize = FrontendSize; 669 Res->SymName = SymName; 670 Res->OpDecl = OpDecl; 671 Res->AddressOf = false; 672 return Res; 673 } 674 }; 675 676 } // end namespace llvm 677 678 #endif // LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H 679