1 //===- TGLexer.cpp - Lexer for TableGen -----------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Implement the Lexer for TableGen. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "TGLexer.h" 14 #include "llvm/ADT/ArrayRef.h" 15 #include "llvm/ADT/StringSwitch.h" 16 #include "llvm/ADT/Twine.h" 17 #include "llvm/Config/config.h" // for strtoull()/strtoll() define 18 #include "llvm/Support/Compiler.h" 19 #include "llvm/Support/MemoryBuffer.h" 20 #include "llvm/Support/SourceMgr.h" 21 #include "llvm/TableGen/Error.h" 22 #include <algorithm> 23 #include <cctype> 24 #include <cerrno> 25 #include <cstdint> 26 #include <cstdio> 27 #include <cstdlib> 28 #include <cstring> 29 30 using namespace llvm; 31 32 namespace { 33 // A list of supported preprocessing directives with their 34 // internal token kinds and names. 35 struct { 36 tgtok::TokKind Kind; 37 const char *Word; 38 } PreprocessorDirs[] = { 39 { tgtok::Ifdef, "ifdef" }, 40 { tgtok::Ifndef, "ifndef" }, 41 { tgtok::Else, "else" }, 42 { tgtok::Endif, "endif" }, 43 { tgtok::Define, "define" } 44 }; 45 } // end anonymous namespace 46 47 TGLexer::TGLexer(SourceMgr &SM, ArrayRef<std::string> Macros) : SrcMgr(SM) { 48 CurBuffer = SrcMgr.getMainFileID(); 49 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer(); 50 CurPtr = CurBuf.begin(); 51 TokStart = nullptr; 52 53 // Pretend that we enter the "top-level" include file. 54 PrepIncludeStack.push_back( 55 std::make_unique<std::vector<PreprocessorControlDesc>>()); 56 57 // Put all macros defined in the command line into the DefinedMacros set. 58 for (const std::string &MacroName : Macros) 59 DefinedMacros.insert(MacroName); 60 } 61 62 SMLoc TGLexer::getLoc() const { 63 return SMLoc::getFromPointer(TokStart); 64 } 65 66 SMRange TGLexer::getLocRange() const { 67 return {getLoc(), SMLoc::getFromPointer(CurPtr)}; 68 } 69 70 /// ReturnError - Set the error to the specified string at the specified 71 /// location. This is defined to always return tgtok::Error. 72 tgtok::TokKind TGLexer::ReturnError(SMLoc Loc, const Twine &Msg) { 73 PrintError(Loc, Msg); 74 return tgtok::Error; 75 } 76 77 tgtok::TokKind TGLexer::ReturnError(const char *Loc, const Twine &Msg) { 78 return ReturnError(SMLoc::getFromPointer(Loc), Msg); 79 } 80 81 bool TGLexer::processEOF() { 82 SMLoc ParentIncludeLoc = SrcMgr.getParentIncludeLoc(CurBuffer); 83 if (ParentIncludeLoc != SMLoc()) { 84 // If prepExitInclude() detects a problem with the preprocessing 85 // control stack, it will return false. Pretend that we reached 86 // the final EOF and stop lexing more tokens by returning false 87 // to LexToken(). 88 if (!prepExitInclude(false)) 89 return false; 90 91 CurBuffer = SrcMgr.FindBufferContainingLoc(ParentIncludeLoc); 92 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer(); 93 CurPtr = ParentIncludeLoc.getPointer(); 94 // Make sure TokStart points into the parent file's buffer. 95 // LexToken() assigns to it before calling getNextChar(), 96 // so it is pointing into the included file now. 97 TokStart = CurPtr; 98 return true; 99 } 100 101 // Pretend that we exit the "top-level" include file. 102 // Note that in case of an error (e.g. control stack imbalance) 103 // the routine will issue a fatal error. 104 prepExitInclude(true); 105 return false; 106 } 107 108 int TGLexer::getNextChar() { 109 char CurChar = *CurPtr++; 110 switch (CurChar) { 111 default: 112 return (unsigned char)CurChar; 113 114 case 0: { 115 // A NUL character in the stream is either the end of the current buffer or 116 // a spurious NUL in the file. Disambiguate that here. 117 if (CurPtr - 1 == CurBuf.end()) { 118 --CurPtr; // Arrange for another call to return EOF again. 119 return EOF; 120 } 121 PrintError(getLoc(), 122 "NUL character is invalid in source; treated as space"); 123 return ' '; 124 } 125 126 case '\n': 127 case '\r': 128 // Handle the newline character by ignoring it and incrementing the line 129 // count. However, be careful about 'dos style' files with \n\r in them. 130 // Only treat a \n\r or \r\n as a single line. 131 if ((*CurPtr == '\n' || (*CurPtr == '\r')) && 132 *CurPtr != CurChar) 133 ++CurPtr; // Eat the two char newline sequence. 134 return '\n'; 135 } 136 } 137 138 int TGLexer::peekNextChar(int Index) const { 139 return *(CurPtr + Index); 140 } 141 142 tgtok::TokKind TGLexer::LexToken(bool FileOrLineStart) { 143 TokStart = CurPtr; 144 // This always consumes at least one character. 145 int CurChar = getNextChar(); 146 147 switch (CurChar) { 148 default: 149 // Handle letters: [a-zA-Z_] 150 if (isalpha(CurChar) || CurChar == '_') 151 return LexIdentifier(); 152 153 // Unknown character, emit an error. 154 return ReturnError(TokStart, "Unexpected character"); 155 case EOF: 156 // Lex next token, if we just left an include file. 157 // Note that leaving an include file means that the next 158 // symbol is located at the end of the 'include "..."' 159 // construct, so LexToken() is called with default 160 // false parameter. 161 if (processEOF()) 162 return LexToken(); 163 164 // Return EOF denoting the end of lexing. 165 return tgtok::Eof; 166 167 case ':': return tgtok::colon; 168 case ';': return tgtok::semi; 169 case ',': return tgtok::comma; 170 case '<': return tgtok::less; 171 case '>': return tgtok::greater; 172 case ']': return tgtok::r_square; 173 case '{': return tgtok::l_brace; 174 case '}': return tgtok::r_brace; 175 case '(': return tgtok::l_paren; 176 case ')': return tgtok::r_paren; 177 case '=': return tgtok::equal; 178 case '?': return tgtok::question; 179 case '#': 180 if (FileOrLineStart) { 181 tgtok::TokKind Kind = prepIsDirective(); 182 if (Kind != tgtok::Error) 183 return lexPreprocessor(Kind); 184 } 185 186 return tgtok::paste; 187 188 // The period is a separate case so we can recognize the "..." 189 // range punctuator. 190 case '.': 191 if (peekNextChar(0) == '.') { 192 ++CurPtr; // Eat second dot. 193 if (peekNextChar(0) == '.') { 194 ++CurPtr; // Eat third dot. 195 return tgtok::dotdotdot; 196 } 197 return ReturnError(TokStart, "Invalid '..' punctuation"); 198 } 199 return tgtok::dot; 200 201 case '\r': 202 PrintFatalError("getNextChar() must never return '\r'"); 203 return tgtok::Error; 204 205 case ' ': 206 case '\t': 207 // Ignore whitespace. 208 return LexToken(FileOrLineStart); 209 case '\n': 210 // Ignore whitespace, and identify the new line. 211 return LexToken(true); 212 case '/': 213 // If this is the start of a // comment, skip until the end of the line or 214 // the end of the buffer. 215 if (*CurPtr == '/') 216 SkipBCPLComment(); 217 else if (*CurPtr == '*') { 218 if (SkipCComment()) 219 return tgtok::Error; 220 } else // Otherwise, this is an error. 221 return ReturnError(TokStart, "Unexpected character"); 222 return LexToken(FileOrLineStart); 223 case '-': case '+': 224 case '0': case '1': case '2': case '3': case '4': case '5': case '6': 225 case '7': case '8': case '9': { 226 int NextChar = 0; 227 if (isdigit(CurChar)) { 228 // Allow identifiers to start with a number if it is followed by 229 // an identifier. This can happen with paste operations like 230 // foo#8i. 231 int i = 0; 232 do { 233 NextChar = peekNextChar(i++); 234 } while (isdigit(NextChar)); 235 236 if (NextChar == 'x' || NextChar == 'b') { 237 // If this is [0-9]b[01] or [0-9]x[0-9A-fa-f] this is most 238 // likely a number. 239 int NextNextChar = peekNextChar(i); 240 switch (NextNextChar) { 241 default: 242 break; 243 case '0': case '1': 244 if (NextChar == 'b') 245 return LexNumber(); 246 [[fallthrough]]; 247 case '2': case '3': case '4': case '5': 248 case '6': case '7': case '8': case '9': 249 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 250 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 251 if (NextChar == 'x') 252 return LexNumber(); 253 break; 254 } 255 } 256 } 257 258 if (isalpha(NextChar) || NextChar == '_') 259 return LexIdentifier(); 260 261 return LexNumber(); 262 } 263 case '"': return LexString(); 264 case '$': return LexVarName(); 265 case '[': return LexBracket(); 266 case '!': return LexExclaim(); 267 } 268 } 269 270 /// LexString - Lex "[^"]*" 271 tgtok::TokKind TGLexer::LexString() { 272 const char *StrStart = CurPtr; 273 274 CurStrVal = ""; 275 276 while (*CurPtr != '"') { 277 // If we hit the end of the buffer, report an error. 278 if (*CurPtr == 0 && CurPtr == CurBuf.end()) 279 return ReturnError(StrStart, "End of file in string literal"); 280 281 if (*CurPtr == '\n' || *CurPtr == '\r') 282 return ReturnError(StrStart, "End of line in string literal"); 283 284 if (*CurPtr != '\\') { 285 CurStrVal += *CurPtr++; 286 continue; 287 } 288 289 ++CurPtr; 290 291 switch (*CurPtr) { 292 case '\\': case '\'': case '"': 293 // These turn into their literal character. 294 CurStrVal += *CurPtr++; 295 break; 296 case 't': 297 CurStrVal += '\t'; 298 ++CurPtr; 299 break; 300 case 'n': 301 CurStrVal += '\n'; 302 ++CurPtr; 303 break; 304 305 case '\n': 306 case '\r': 307 return ReturnError(CurPtr, "escaped newlines not supported in tblgen"); 308 309 // If we hit the end of the buffer, report an error. 310 case '\0': 311 if (CurPtr == CurBuf.end()) 312 return ReturnError(StrStart, "End of file in string literal"); 313 [[fallthrough]]; 314 default: 315 return ReturnError(CurPtr, "invalid escape in string literal"); 316 } 317 } 318 319 ++CurPtr; 320 return tgtok::StrVal; 321 } 322 323 tgtok::TokKind TGLexer::LexVarName() { 324 if (!isalpha(CurPtr[0]) && CurPtr[0] != '_') 325 return ReturnError(TokStart, "Invalid variable name"); 326 327 // Otherwise, we're ok, consume the rest of the characters. 328 const char *VarNameStart = CurPtr++; 329 330 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_') 331 ++CurPtr; 332 333 CurStrVal.assign(VarNameStart, CurPtr); 334 return tgtok::VarName; 335 } 336 337 tgtok::TokKind TGLexer::LexIdentifier() { 338 // The first letter is [a-zA-Z_]. 339 const char *IdentStart = TokStart; 340 341 // Match the rest of the identifier regex: [0-9a-zA-Z_]* 342 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_') 343 ++CurPtr; 344 345 // Check to see if this identifier is a reserved keyword. 346 StringRef Str(IdentStart, CurPtr-IdentStart); 347 348 tgtok::TokKind Kind = StringSwitch<tgtok::TokKind>(Str) 349 .Case("int", tgtok::Int) 350 .Case("bit", tgtok::Bit) 351 .Case("bits", tgtok::Bits) 352 .Case("string", tgtok::String) 353 .Case("list", tgtok::List) 354 .Case("code", tgtok::Code) 355 .Case("dag", tgtok::Dag) 356 .Case("class", tgtok::Class) 357 .Case("def", tgtok::Def) 358 .Case("true", tgtok::TrueVal) 359 .Case("false", tgtok::FalseVal) 360 .Case("foreach", tgtok::Foreach) 361 .Case("defm", tgtok::Defm) 362 .Case("defset", tgtok::Defset) 363 .Case("multiclass", tgtok::MultiClass) 364 .Case("field", tgtok::Field) 365 .Case("let", tgtok::Let) 366 .Case("in", tgtok::In) 367 .Case("defvar", tgtok::Defvar) 368 .Case("include", tgtok::Include) 369 .Case("if", tgtok::If) 370 .Case("then", tgtok::Then) 371 .Case("else", tgtok::ElseKW) 372 .Case("assert", tgtok::Assert) 373 .Default(tgtok::Id); 374 375 // A couple of tokens require special processing. 376 switch (Kind) { 377 case tgtok::Include: 378 if (LexInclude()) return tgtok::Error; 379 return Lex(); 380 case tgtok::Id: 381 CurStrVal.assign(Str.begin(), Str.end()); 382 break; 383 default: 384 break; 385 } 386 387 return Kind; 388 } 389 390 /// LexInclude - We just read the "include" token. Get the string token that 391 /// comes next and enter the include. 392 bool TGLexer::LexInclude() { 393 // The token after the include must be a string. 394 tgtok::TokKind Tok = LexToken(); 395 if (Tok == tgtok::Error) return true; 396 if (Tok != tgtok::StrVal) { 397 PrintError(getLoc(), "Expected filename after include"); 398 return true; 399 } 400 401 // Get the string. 402 std::string Filename = CurStrVal; 403 std::string IncludedFile; 404 405 CurBuffer = SrcMgr.AddIncludeFile(Filename, SMLoc::getFromPointer(CurPtr), 406 IncludedFile); 407 if (!CurBuffer) { 408 PrintError(getLoc(), "Could not find include file '" + Filename + "'"); 409 return true; 410 } 411 412 Dependencies.insert(IncludedFile); 413 // Save the line number and lex buffer of the includer. 414 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer(); 415 CurPtr = CurBuf.begin(); 416 417 PrepIncludeStack.push_back( 418 std::make_unique<std::vector<PreprocessorControlDesc>>()); 419 return false; 420 } 421 422 /// SkipBCPLComment - Skip over the comment by finding the next CR or LF. 423 /// Or we may end up at the end of the buffer. 424 void TGLexer::SkipBCPLComment() { 425 ++CurPtr; // skip the second slash. 426 auto EOLPos = CurBuf.find_first_of("\r\n", CurPtr - CurBuf.data()); 427 CurPtr = (EOLPos == StringRef::npos) ? CurBuf.end() : CurBuf.data() + EOLPos; 428 } 429 430 /// SkipCComment - This skips C-style /**/ comments. The only difference from C 431 /// is that we allow nesting. 432 bool TGLexer::SkipCComment() { 433 ++CurPtr; // skip the star. 434 unsigned CommentDepth = 1; 435 436 while (true) { 437 int CurChar = getNextChar(); 438 switch (CurChar) { 439 case EOF: 440 PrintError(TokStart, "Unterminated comment!"); 441 return true; 442 case '*': 443 // End of the comment? 444 if (CurPtr[0] != '/') break; 445 446 ++CurPtr; // End the */. 447 if (--CommentDepth == 0) 448 return false; 449 break; 450 case '/': 451 // Start of a nested comment? 452 if (CurPtr[0] != '*') break; 453 ++CurPtr; 454 ++CommentDepth; 455 break; 456 } 457 } 458 } 459 460 /// LexNumber - Lex: 461 /// [-+]?[0-9]+ 462 /// 0x[0-9a-fA-F]+ 463 /// 0b[01]+ 464 tgtok::TokKind TGLexer::LexNumber() { 465 if (CurPtr[-1] == '0') { 466 if (CurPtr[0] == 'x') { 467 ++CurPtr; 468 const char *NumStart = CurPtr; 469 while (isxdigit(CurPtr[0])) 470 ++CurPtr; 471 472 // Requires at least one hex digit. 473 if (CurPtr == NumStart) 474 return ReturnError(TokStart, "Invalid hexadecimal number"); 475 476 errno = 0; 477 CurIntVal = strtoll(NumStart, nullptr, 16); 478 if (errno == EINVAL) 479 return ReturnError(TokStart, "Invalid hexadecimal number"); 480 if (errno == ERANGE) { 481 errno = 0; 482 CurIntVal = (int64_t)strtoull(NumStart, nullptr, 16); 483 if (errno == EINVAL) 484 return ReturnError(TokStart, "Invalid hexadecimal number"); 485 if (errno == ERANGE) 486 return ReturnError(TokStart, "Hexadecimal number out of range"); 487 } 488 return tgtok::IntVal; 489 } else if (CurPtr[0] == 'b') { 490 ++CurPtr; 491 const char *NumStart = CurPtr; 492 while (CurPtr[0] == '0' || CurPtr[0] == '1') 493 ++CurPtr; 494 495 // Requires at least one binary digit. 496 if (CurPtr == NumStart) 497 return ReturnError(CurPtr-2, "Invalid binary number"); 498 CurIntVal = strtoll(NumStart, nullptr, 2); 499 return tgtok::BinaryIntVal; 500 } 501 } 502 503 // Check for a sign without a digit. 504 if (!isdigit(CurPtr[0])) { 505 if (CurPtr[-1] == '-') 506 return tgtok::minus; 507 else if (CurPtr[-1] == '+') 508 return tgtok::plus; 509 } 510 511 while (isdigit(CurPtr[0])) 512 ++CurPtr; 513 CurIntVal = strtoll(TokStart, nullptr, 10); 514 return tgtok::IntVal; 515 } 516 517 /// LexBracket - We just read '['. If this is a code block, return it, 518 /// otherwise return the bracket. Match: '[' and '[{ ( [^}]+ | }[^]] )* }]' 519 tgtok::TokKind TGLexer::LexBracket() { 520 if (CurPtr[0] != '{') 521 return tgtok::l_square; 522 ++CurPtr; 523 const char *CodeStart = CurPtr; 524 while (true) { 525 int Char = getNextChar(); 526 if (Char == EOF) break; 527 528 if (Char != '}') continue; 529 530 Char = getNextChar(); 531 if (Char == EOF) break; 532 if (Char == ']') { 533 CurStrVal.assign(CodeStart, CurPtr-2); 534 return tgtok::CodeFragment; 535 } 536 } 537 538 return ReturnError(CodeStart - 2, "Unterminated code block"); 539 } 540 541 /// LexExclaim - Lex '!' and '![a-zA-Z]+'. 542 tgtok::TokKind TGLexer::LexExclaim() { 543 if (!isalpha(*CurPtr)) 544 return ReturnError(CurPtr - 1, "Invalid \"!operator\""); 545 546 const char *Start = CurPtr++; 547 while (isalpha(*CurPtr)) 548 ++CurPtr; 549 550 // Check to see which operator this is. 551 tgtok::TokKind Kind = 552 StringSwitch<tgtok::TokKind>(StringRef(Start, CurPtr - Start)) 553 .Case("eq", tgtok::XEq) 554 .Case("ne", tgtok::XNe) 555 .Case("le", tgtok::XLe) 556 .Case("lt", tgtok::XLt) 557 .Case("ge", tgtok::XGe) 558 .Case("gt", tgtok::XGt) 559 .Case("if", tgtok::XIf) 560 .Case("cond", tgtok::XCond) 561 .Case("isa", tgtok::XIsA) 562 .Case("head", tgtok::XHead) 563 .Case("tail", tgtok::XTail) 564 .Case("size", tgtok::XSize) 565 .Case("con", tgtok::XConcat) 566 .Case("dag", tgtok::XDag) 567 .Case("add", tgtok::XADD) 568 .Case("sub", tgtok::XSUB) 569 .Case("mul", tgtok::XMUL) 570 .Case("div", tgtok::XDIV) 571 .Case("not", tgtok::XNOT) 572 .Case("logtwo", tgtok::XLOG2) 573 .Case("and", tgtok::XAND) 574 .Case("or", tgtok::XOR) 575 .Case("xor", tgtok::XXOR) 576 .Case("shl", tgtok::XSHL) 577 .Case("sra", tgtok::XSRA) 578 .Case("srl", tgtok::XSRL) 579 .Case("cast", tgtok::XCast) 580 .Case("empty", tgtok::XEmpty) 581 .Case("subst", tgtok::XSubst) 582 .Case("foldl", tgtok::XFoldl) 583 .Case("foreach", tgtok::XForEach) 584 .Case("filter", tgtok::XFilter) 585 .Case("listconcat", tgtok::XListConcat) 586 .Case("listsplat", tgtok::XListSplat) 587 .Case("listremove", tgtok::XListRemove) 588 .Case("strconcat", tgtok::XStrConcat) 589 .Case("interleave", tgtok::XInterleave) 590 .Case("substr", tgtok::XSubstr) 591 .Case("find", tgtok::XFind) 592 .Cases("setdagop", "setop", tgtok::XSetDagOp) // !setop is deprecated. 593 .Cases("getdagop", "getop", tgtok::XGetDagOp) // !getop is deprecated. 594 .Case("exists", tgtok::XExists) 595 .Default(tgtok::Error); 596 597 return Kind != tgtok::Error ? Kind : ReturnError(Start-1, "Unknown operator"); 598 } 599 600 bool TGLexer::prepExitInclude(bool IncludeStackMustBeEmpty) { 601 // Report an error, if preprocessor control stack for the current 602 // file is not empty. 603 if (!PrepIncludeStack.back()->empty()) { 604 prepReportPreprocessorStackError(); 605 606 return false; 607 } 608 609 // Pop the preprocessing controls from the include stack. 610 if (PrepIncludeStack.empty()) { 611 PrintFatalError("Preprocessor include stack is empty"); 612 } 613 614 PrepIncludeStack.pop_back(); 615 616 if (IncludeStackMustBeEmpty) { 617 if (!PrepIncludeStack.empty()) 618 PrintFatalError("Preprocessor include stack is not empty"); 619 } else { 620 if (PrepIncludeStack.empty()) 621 PrintFatalError("Preprocessor include stack is empty"); 622 } 623 624 return true; 625 } 626 627 tgtok::TokKind TGLexer::prepIsDirective() const { 628 for (const auto &PD : PreprocessorDirs) { 629 int NextChar = *CurPtr; 630 bool Match = true; 631 unsigned I = 0; 632 for (; I < strlen(PD.Word); ++I) { 633 if (NextChar != PD.Word[I]) { 634 Match = false; 635 break; 636 } 637 638 NextChar = peekNextChar(I + 1); 639 } 640 641 // Check for whitespace after the directive. If there is no whitespace, 642 // then we do not recognize it as a preprocessing directive. 643 if (Match) { 644 tgtok::TokKind Kind = PD.Kind; 645 646 // New line and EOF may follow only #else/#endif. It will be reported 647 // as an error for #ifdef/#define after the call to prepLexMacroName(). 648 if (NextChar == ' ' || NextChar == '\t' || NextChar == EOF || 649 NextChar == '\n' || 650 // It looks like TableGen does not support '\r' as the actual 651 // carriage return, e.g. getNextChar() treats a single '\r' 652 // as '\n'. So we do the same here. 653 NextChar == '\r') 654 return Kind; 655 656 // Allow comments after some directives, e.g.: 657 // #else// OR #else/**/ 658 // #endif// OR #endif/**/ 659 // 660 // Note that we do allow comments after #ifdef/#define here, e.g. 661 // #ifdef/**/ AND #ifdef// 662 // #define/**/ AND #define// 663 // 664 // These cases will be reported as incorrect after calling 665 // prepLexMacroName(). We could have supported C-style comments 666 // after #ifdef/#define, but this would complicate the code 667 // for little benefit. 668 if (NextChar == '/') { 669 NextChar = peekNextChar(I + 1); 670 671 if (NextChar == '*' || NextChar == '/') 672 return Kind; 673 674 // Pretend that we do not recognize the directive. 675 } 676 } 677 } 678 679 return tgtok::Error; 680 } 681 682 bool TGLexer::prepEatPreprocessorDirective(tgtok::TokKind Kind) { 683 TokStart = CurPtr; 684 685 for (const auto &PD : PreprocessorDirs) 686 if (PD.Kind == Kind) { 687 // Advance CurPtr to the end of the preprocessing word. 688 CurPtr += strlen(PD.Word); 689 return true; 690 } 691 692 PrintFatalError("Unsupported preprocessing token in " 693 "prepEatPreprocessorDirective()"); 694 return false; 695 } 696 697 tgtok::TokKind TGLexer::lexPreprocessor( 698 tgtok::TokKind Kind, bool ReturnNextLiveToken) { 699 700 // We must be looking at a preprocessing directive. Eat it! 701 if (!prepEatPreprocessorDirective(Kind)) 702 PrintFatalError("lexPreprocessor() called for unknown " 703 "preprocessor directive"); 704 705 if (Kind == tgtok::Ifdef || Kind == tgtok::Ifndef) { 706 StringRef MacroName = prepLexMacroName(); 707 StringRef IfTokName = Kind == tgtok::Ifdef ? "#ifdef" : "#ifndef"; 708 if (MacroName.empty()) 709 return ReturnError(TokStart, "Expected macro name after " + IfTokName); 710 711 bool MacroIsDefined = DefinedMacros.count(MacroName) != 0; 712 713 // Canonicalize ifndef to ifdef equivalent 714 if (Kind == tgtok::Ifndef) { 715 MacroIsDefined = !MacroIsDefined; 716 Kind = tgtok::Ifdef; 717 } 718 719 // Regardless of whether we are processing tokens or not, 720 // we put the #ifdef control on stack. 721 PrepIncludeStack.back()->push_back( 722 {Kind, MacroIsDefined, SMLoc::getFromPointer(TokStart)}); 723 724 if (!prepSkipDirectiveEnd()) 725 return ReturnError(CurPtr, "Only comments are supported after " + 726 IfTokName + " NAME"); 727 728 // If we were not processing tokens before this #ifdef, 729 // then just return back to the lines skipping code. 730 if (!ReturnNextLiveToken) 731 return Kind; 732 733 // If we were processing tokens before this #ifdef, 734 // and the macro is defined, then just return the next token. 735 if (MacroIsDefined) 736 return LexToken(); 737 738 // We were processing tokens before this #ifdef, and the macro 739 // is not defined, so we have to start skipping the lines. 740 // If the skipping is successful, it will return the token following 741 // either #else or #endif corresponding to this #ifdef. 742 if (prepSkipRegion(ReturnNextLiveToken)) 743 return LexToken(); 744 745 return tgtok::Error; 746 } else if (Kind == tgtok::Else) { 747 // Check if this #else is correct before calling prepSkipDirectiveEnd(), 748 // which will move CurPtr away from the beginning of #else. 749 if (PrepIncludeStack.back()->empty()) 750 return ReturnError(TokStart, "#else without #ifdef or #ifndef"); 751 752 PreprocessorControlDesc IfdefEntry = PrepIncludeStack.back()->back(); 753 754 if (IfdefEntry.Kind != tgtok::Ifdef) { 755 PrintError(TokStart, "double #else"); 756 return ReturnError(IfdefEntry.SrcPos, "Previous #else is here"); 757 } 758 759 // Replace the corresponding #ifdef's control with its negation 760 // on the control stack. 761 PrepIncludeStack.back()->pop_back(); 762 PrepIncludeStack.back()->push_back( 763 {Kind, !IfdefEntry.IsDefined, SMLoc::getFromPointer(TokStart)}); 764 765 if (!prepSkipDirectiveEnd()) 766 return ReturnError(CurPtr, "Only comments are supported after #else"); 767 768 // If we were processing tokens before this #else, 769 // we have to start skipping lines until the matching #endif. 770 if (ReturnNextLiveToken) { 771 if (prepSkipRegion(ReturnNextLiveToken)) 772 return LexToken(); 773 774 return tgtok::Error; 775 } 776 777 // Return to the lines skipping code. 778 return Kind; 779 } else if (Kind == tgtok::Endif) { 780 // Check if this #endif is correct before calling prepSkipDirectiveEnd(), 781 // which will move CurPtr away from the beginning of #endif. 782 if (PrepIncludeStack.back()->empty()) 783 return ReturnError(TokStart, "#endif without #ifdef"); 784 785 auto &IfdefOrElseEntry = PrepIncludeStack.back()->back(); 786 787 if (IfdefOrElseEntry.Kind != tgtok::Ifdef && 788 IfdefOrElseEntry.Kind != tgtok::Else) { 789 PrintFatalError("Invalid preprocessor control on the stack"); 790 return tgtok::Error; 791 } 792 793 if (!prepSkipDirectiveEnd()) 794 return ReturnError(CurPtr, "Only comments are supported after #endif"); 795 796 PrepIncludeStack.back()->pop_back(); 797 798 // If we were processing tokens before this #endif, then 799 // we should continue it. 800 if (ReturnNextLiveToken) { 801 return LexToken(); 802 } 803 804 // Return to the lines skipping code. 805 return Kind; 806 } else if (Kind == tgtok::Define) { 807 StringRef MacroName = prepLexMacroName(); 808 if (MacroName.empty()) 809 return ReturnError(TokStart, "Expected macro name after #define"); 810 811 if (!DefinedMacros.insert(MacroName).second) 812 PrintWarning(getLoc(), 813 "Duplicate definition of macro: " + Twine(MacroName)); 814 815 if (!prepSkipDirectiveEnd()) 816 return ReturnError(CurPtr, 817 "Only comments are supported after #define NAME"); 818 819 if (!ReturnNextLiveToken) { 820 PrintFatalError("#define must be ignored during the lines skipping"); 821 return tgtok::Error; 822 } 823 824 return LexToken(); 825 } 826 827 PrintFatalError("Preprocessing directive is not supported"); 828 return tgtok::Error; 829 } 830 831 bool TGLexer::prepSkipRegion(bool MustNeverBeFalse) { 832 if (!MustNeverBeFalse) 833 PrintFatalError("Invalid recursion."); 834 835 do { 836 // Skip all symbols to the line end. 837 prepSkipToLineEnd(); 838 839 // Find the first non-whitespace symbol in the next line(s). 840 if (!prepSkipLineBegin()) 841 return false; 842 843 // If the first non-blank/comment symbol on the line is '#', 844 // it may be a start of preprocessing directive. 845 // 846 // If it is not '#' just go to the next line. 847 if (*CurPtr == '#') 848 ++CurPtr; 849 else 850 continue; 851 852 tgtok::TokKind Kind = prepIsDirective(); 853 854 // If we did not find a preprocessing directive or it is #define, 855 // then just skip to the next line. We do not have to do anything 856 // for #define in the line-skipping mode. 857 if (Kind == tgtok::Error || Kind == tgtok::Define) 858 continue; 859 860 tgtok::TokKind ProcessedKind = lexPreprocessor(Kind, false); 861 862 // If lexPreprocessor() encountered an error during lexing this 863 // preprocessor idiom, then return false to the calling lexPreprocessor(). 864 // This will force tgtok::Error to be returned to the tokens processing. 865 if (ProcessedKind == tgtok::Error) 866 return false; 867 868 if (Kind != ProcessedKind) 869 PrintFatalError("prepIsDirective() and lexPreprocessor() " 870 "returned different token kinds"); 871 872 // If this preprocessing directive enables tokens processing, 873 // then return to the lexPreprocessor() and get to the next token. 874 // We can move from line-skipping mode to processing tokens only 875 // due to #else or #endif. 876 if (prepIsProcessingEnabled()) { 877 if (Kind != tgtok::Else && Kind != tgtok::Endif) { 878 PrintFatalError("Tokens processing was enabled by an unexpected " 879 "preprocessing directive"); 880 return false; 881 } 882 883 return true; 884 } 885 } while (CurPtr != CurBuf.end()); 886 887 // We have reached the end of the file, but never left the lines-skipping 888 // mode. This means there is no matching #endif. 889 prepReportPreprocessorStackError(); 890 return false; 891 } 892 893 StringRef TGLexer::prepLexMacroName() { 894 // Skip whitespaces between the preprocessing directive and the macro name. 895 while (*CurPtr == ' ' || *CurPtr == '\t') 896 ++CurPtr; 897 898 TokStart = CurPtr; 899 // Macro names start with [a-zA-Z_]. 900 if (*CurPtr != '_' && !isalpha(*CurPtr)) 901 return ""; 902 903 // Match the rest of the identifier regex: [0-9a-zA-Z_]* 904 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_') 905 ++CurPtr; 906 907 return StringRef(TokStart, CurPtr - TokStart); 908 } 909 910 bool TGLexer::prepSkipLineBegin() { 911 while (CurPtr != CurBuf.end()) { 912 switch (*CurPtr) { 913 case ' ': 914 case '\t': 915 case '\n': 916 case '\r': 917 break; 918 919 case '/': { 920 int NextChar = peekNextChar(1); 921 if (NextChar == '*') { 922 // Skip C-style comment. 923 // Note that we do not care about skipping the C++-style comments. 924 // If the line contains "//", it may not contain any processable 925 // preprocessing directive. Just return CurPtr pointing to 926 // the first '/' in this case. We also do not care about 927 // incorrect symbols after the first '/' - we are in lines-skipping 928 // mode, so incorrect code is allowed to some extent. 929 930 // Set TokStart to the beginning of the comment to enable proper 931 // diagnostic printing in case of error in SkipCComment(). 932 TokStart = CurPtr; 933 934 // CurPtr must point to '*' before call to SkipCComment(). 935 ++CurPtr; 936 if (SkipCComment()) 937 return false; 938 } else { 939 // CurPtr points to the non-whitespace '/'. 940 return true; 941 } 942 943 // We must not increment CurPtr after the comment was lexed. 944 continue; 945 } 946 947 default: 948 return true; 949 } 950 951 ++CurPtr; 952 } 953 954 // We have reached the end of the file. Return to the lines skipping 955 // code, and allow it to handle the EOF as needed. 956 return true; 957 } 958 959 bool TGLexer::prepSkipDirectiveEnd() { 960 while (CurPtr != CurBuf.end()) { 961 switch (*CurPtr) { 962 case ' ': 963 case '\t': 964 break; 965 966 case '\n': 967 case '\r': 968 return true; 969 970 case '/': { 971 int NextChar = peekNextChar(1); 972 if (NextChar == '/') { 973 // Skip C++-style comment. 974 // We may just return true now, but let's skip to the line/buffer end 975 // to simplify the method specification. 976 ++CurPtr; 977 SkipBCPLComment(); 978 } else if (NextChar == '*') { 979 // When we are skipping C-style comment at the end of a preprocessing 980 // directive, we can skip several lines. If any meaningful TD token 981 // follows the end of the C-style comment on the same line, it will 982 // be considered as an invalid usage of TD token. 983 // For example, we want to forbid usages like this one: 984 // #define MACRO class Class {} 985 // But with C-style comments we also disallow the following: 986 // #define MACRO /* This macro is used 987 // to ... */ class Class {} 988 // One can argue that this should be allowed, but it does not seem 989 // to be worth of the complication. Moreover, this matches 990 // the C preprocessor behavior. 991 992 // Set TokStart to the beginning of the comment to enable proper 993 // diagnostic printer in case of error in SkipCComment(). 994 TokStart = CurPtr; 995 ++CurPtr; 996 if (SkipCComment()) 997 return false; 998 } else { 999 TokStart = CurPtr; 1000 PrintError(CurPtr, "Unexpected character"); 1001 return false; 1002 } 1003 1004 // We must not increment CurPtr after the comment was lexed. 1005 continue; 1006 } 1007 1008 default: 1009 // Do not allow any non-whitespaces after the directive. 1010 TokStart = CurPtr; 1011 return false; 1012 } 1013 1014 ++CurPtr; 1015 } 1016 1017 return true; 1018 } 1019 1020 void TGLexer::prepSkipToLineEnd() { 1021 while (*CurPtr != '\n' && *CurPtr != '\r' && CurPtr != CurBuf.end()) 1022 ++CurPtr; 1023 } 1024 1025 bool TGLexer::prepIsProcessingEnabled() { 1026 for (const PreprocessorControlDesc &I : 1027 llvm::reverse(*PrepIncludeStack.back())) 1028 if (!I.IsDefined) 1029 return false; 1030 1031 return true; 1032 } 1033 1034 void TGLexer::prepReportPreprocessorStackError() { 1035 if (PrepIncludeStack.back()->empty()) 1036 PrintFatalError("prepReportPreprocessorStackError() called with " 1037 "empty control stack"); 1038 1039 auto &PrepControl = PrepIncludeStack.back()->back(); 1040 PrintError(CurBuf.end(), "Reached EOF without matching #endif"); 1041 PrintError(PrepControl.SrcPos, "The latest preprocessor control is here"); 1042 1043 TokStart = CurPtr; 1044 } 1045