1 //===- TGLexer.cpp - Lexer for TableGen -----------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Implement the Lexer for TableGen. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "TGLexer.h" 14 #include "llvm/ADT/ArrayRef.h" 15 #include "llvm/ADT/StringSwitch.h" 16 #include "llvm/ADT/Twine.h" 17 #include "llvm/Config/config.h" // for strtoull()/strtoll() define 18 #include "llvm/Support/Compiler.h" 19 #include "llvm/Support/MemoryBuffer.h" 20 #include "llvm/Support/SourceMgr.h" 21 #include "llvm/TableGen/Error.h" 22 #include <algorithm> 23 #include <cctype> 24 #include <cerrno> 25 #include <cstdint> 26 #include <cstdio> 27 #include <cstdlib> 28 #include <cstring> 29 30 using namespace llvm; 31 32 namespace { 33 // A list of supported preprocessing directives with their 34 // internal token kinds and names. 35 struct { 36 tgtok::TokKind Kind; 37 const char *Word; 38 } PreprocessorDirs[] = { 39 { tgtok::Ifdef, "ifdef" }, 40 { tgtok::Ifndef, "ifndef" }, 41 { tgtok::Else, "else" }, 42 { tgtok::Endif, "endif" }, 43 { tgtok::Define, "define" } 44 }; 45 } // end anonymous namespace 46 47 TGLexer::TGLexer(SourceMgr &SM, ArrayRef<std::string> Macros) : SrcMgr(SM) { 48 CurBuffer = SrcMgr.getMainFileID(); 49 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer(); 50 CurPtr = CurBuf.begin(); 51 TokStart = nullptr; 52 53 // Pretend that we enter the "top-level" include file. 54 PrepIncludeStack.push_back( 55 std::make_unique<std::vector<PreprocessorControlDesc>>()); 56 57 // Put all macros defined in the command line into the DefinedMacros set. 58 for (const std::string &MacroName : Macros) 59 DefinedMacros.insert(MacroName); 60 } 61 62 SMLoc TGLexer::getLoc() const { 63 return SMLoc::getFromPointer(TokStart); 64 } 65 66 SMRange TGLexer::getLocRange() const { 67 return {getLoc(), SMLoc::getFromPointer(CurPtr)}; 68 } 69 70 /// ReturnError - Set the error to the specified string at the specified 71 /// location. This is defined to always return tgtok::Error. 72 tgtok::TokKind TGLexer::ReturnError(SMLoc Loc, const Twine &Msg) { 73 PrintError(Loc, Msg); 74 return tgtok::Error; 75 } 76 77 tgtok::TokKind TGLexer::ReturnError(const char *Loc, const Twine &Msg) { 78 return ReturnError(SMLoc::getFromPointer(Loc), Msg); 79 } 80 81 bool TGLexer::processEOF() { 82 SMLoc ParentIncludeLoc = SrcMgr.getParentIncludeLoc(CurBuffer); 83 if (ParentIncludeLoc != SMLoc()) { 84 // If prepExitInclude() detects a problem with the preprocessing 85 // control stack, it will return false. Pretend that we reached 86 // the final EOF and stop lexing more tokens by returning false 87 // to LexToken(). 88 if (!prepExitInclude(false)) 89 return false; 90 91 CurBuffer = SrcMgr.FindBufferContainingLoc(ParentIncludeLoc); 92 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer(); 93 CurPtr = ParentIncludeLoc.getPointer(); 94 // Make sure TokStart points into the parent file's buffer. 95 // LexToken() assigns to it before calling getNextChar(), 96 // so it is pointing into the included file now. 97 TokStart = CurPtr; 98 return true; 99 } 100 101 // Pretend that we exit the "top-level" include file. 102 // Note that in case of an error (e.g. control stack imbalance) 103 // the routine will issue a fatal error. 104 prepExitInclude(true); 105 return false; 106 } 107 108 int TGLexer::getNextChar() { 109 char CurChar = *CurPtr++; 110 switch (CurChar) { 111 default: 112 return (unsigned char)CurChar; 113 114 case 0: { 115 // A NUL character in the stream is either the end of the current buffer or 116 // a spurious NUL in the file. Disambiguate that here. 117 if (CurPtr - 1 == CurBuf.end()) { 118 --CurPtr; // Arrange for another call to return EOF again. 119 return EOF; 120 } 121 PrintError(getLoc(), 122 "NUL character is invalid in source; treated as space"); 123 return ' '; 124 } 125 126 case '\n': 127 case '\r': 128 // Handle the newline character by ignoring it and incrementing the line 129 // count. However, be careful about 'dos style' files with \n\r in them. 130 // Only treat a \n\r or \r\n as a single line. 131 if ((*CurPtr == '\n' || (*CurPtr == '\r')) && 132 *CurPtr != CurChar) 133 ++CurPtr; // Eat the two char newline sequence. 134 return '\n'; 135 } 136 } 137 138 int TGLexer::peekNextChar(int Index) const { 139 return *(CurPtr + Index); 140 } 141 142 tgtok::TokKind TGLexer::LexToken(bool FileOrLineStart) { 143 TokStart = CurPtr; 144 // This always consumes at least one character. 145 int CurChar = getNextChar(); 146 147 switch (CurChar) { 148 default: 149 // Handle letters: [a-zA-Z_] 150 if (isalpha(CurChar) || CurChar == '_') 151 return LexIdentifier(); 152 153 // Unknown character, emit an error. 154 return ReturnError(TokStart, "Unexpected character"); 155 case EOF: 156 // Lex next token, if we just left an include file. 157 // Note that leaving an include file means that the next 158 // symbol is located at the end of the 'include "..."' 159 // construct, so LexToken() is called with default 160 // false parameter. 161 if (processEOF()) 162 return LexToken(); 163 164 // Return EOF denoting the end of lexing. 165 return tgtok::Eof; 166 167 case ':': return tgtok::colon; 168 case ';': return tgtok::semi; 169 case ',': return tgtok::comma; 170 case '<': return tgtok::less; 171 case '>': return tgtok::greater; 172 case ']': return tgtok::r_square; 173 case '{': return tgtok::l_brace; 174 case '}': return tgtok::r_brace; 175 case '(': return tgtok::l_paren; 176 case ')': return tgtok::r_paren; 177 case '=': return tgtok::equal; 178 case '?': return tgtok::question; 179 case '#': 180 if (FileOrLineStart) { 181 tgtok::TokKind Kind = prepIsDirective(); 182 if (Kind != tgtok::Error) 183 return lexPreprocessor(Kind); 184 } 185 186 return tgtok::paste; 187 188 // The period is a separate case so we can recognize the "..." 189 // range punctuator. 190 case '.': 191 if (peekNextChar(0) == '.') { 192 ++CurPtr; // Eat second dot. 193 if (peekNextChar(0) == '.') { 194 ++CurPtr; // Eat third dot. 195 return tgtok::dotdotdot; 196 } 197 return ReturnError(TokStart, "Invalid '..' punctuation"); 198 } 199 return tgtok::dot; 200 201 case '\r': 202 PrintFatalError("getNextChar() must never return '\r'"); 203 return tgtok::Error; 204 205 case ' ': 206 case '\t': 207 // Ignore whitespace. 208 return LexToken(FileOrLineStart); 209 case '\n': 210 // Ignore whitespace, and identify the new line. 211 return LexToken(true); 212 case '/': 213 // If this is the start of a // comment, skip until the end of the line or 214 // the end of the buffer. 215 if (*CurPtr == '/') 216 SkipBCPLComment(); 217 else if (*CurPtr == '*') { 218 if (SkipCComment()) 219 return tgtok::Error; 220 } else // Otherwise, this is an error. 221 return ReturnError(TokStart, "Unexpected character"); 222 return LexToken(FileOrLineStart); 223 case '-': case '+': 224 case '0': case '1': case '2': case '3': case '4': case '5': case '6': 225 case '7': case '8': case '9': { 226 int NextChar = 0; 227 if (isdigit(CurChar)) { 228 // Allow identifiers to start with a number if it is followed by 229 // an identifier. This can happen with paste operations like 230 // foo#8i. 231 int i = 0; 232 do { 233 NextChar = peekNextChar(i++); 234 } while (isdigit(NextChar)); 235 236 if (NextChar == 'x' || NextChar == 'b') { 237 // If this is [0-9]b[01] or [0-9]x[0-9A-fa-f] this is most 238 // likely a number. 239 int NextNextChar = peekNextChar(i); 240 switch (NextNextChar) { 241 default: 242 break; 243 case '0': case '1': 244 if (NextChar == 'b') 245 return LexNumber(); 246 [[fallthrough]]; 247 case '2': case '3': case '4': case '5': 248 case '6': case '7': case '8': case '9': 249 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 250 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 251 if (NextChar == 'x') 252 return LexNumber(); 253 break; 254 } 255 } 256 } 257 258 if (isalpha(NextChar) || NextChar == '_') 259 return LexIdentifier(); 260 261 return LexNumber(); 262 } 263 case '"': return LexString(); 264 case '$': return LexVarName(); 265 case '[': return LexBracket(); 266 case '!': return LexExclaim(); 267 } 268 } 269 270 /// LexString - Lex "[^"]*" 271 tgtok::TokKind TGLexer::LexString() { 272 const char *StrStart = CurPtr; 273 274 CurStrVal = ""; 275 276 while (*CurPtr != '"') { 277 // If we hit the end of the buffer, report an error. 278 if (*CurPtr == 0 && CurPtr == CurBuf.end()) 279 return ReturnError(StrStart, "End of file in string literal"); 280 281 if (*CurPtr == '\n' || *CurPtr == '\r') 282 return ReturnError(StrStart, "End of line in string literal"); 283 284 if (*CurPtr != '\\') { 285 CurStrVal += *CurPtr++; 286 continue; 287 } 288 289 ++CurPtr; 290 291 switch (*CurPtr) { 292 case '\\': case '\'': case '"': 293 // These turn into their literal character. 294 CurStrVal += *CurPtr++; 295 break; 296 case 't': 297 CurStrVal += '\t'; 298 ++CurPtr; 299 break; 300 case 'n': 301 CurStrVal += '\n'; 302 ++CurPtr; 303 break; 304 305 case '\n': 306 case '\r': 307 return ReturnError(CurPtr, "escaped newlines not supported in tblgen"); 308 309 // If we hit the end of the buffer, report an error. 310 case '\0': 311 if (CurPtr == CurBuf.end()) 312 return ReturnError(StrStart, "End of file in string literal"); 313 [[fallthrough]]; 314 default: 315 return ReturnError(CurPtr, "invalid escape in string literal"); 316 } 317 } 318 319 ++CurPtr; 320 return tgtok::StrVal; 321 } 322 323 tgtok::TokKind TGLexer::LexVarName() { 324 if (!isalpha(CurPtr[0]) && CurPtr[0] != '_') 325 return ReturnError(TokStart, "Invalid variable name"); 326 327 // Otherwise, we're ok, consume the rest of the characters. 328 const char *VarNameStart = CurPtr++; 329 330 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_') 331 ++CurPtr; 332 333 CurStrVal.assign(VarNameStart, CurPtr); 334 return tgtok::VarName; 335 } 336 337 tgtok::TokKind TGLexer::LexIdentifier() { 338 // The first letter is [a-zA-Z_]. 339 const char *IdentStart = TokStart; 340 341 // Match the rest of the identifier regex: [0-9a-zA-Z_]* 342 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_') 343 ++CurPtr; 344 345 // Check to see if this identifier is a reserved keyword. 346 StringRef Str(IdentStart, CurPtr-IdentStart); 347 348 tgtok::TokKind Kind = StringSwitch<tgtok::TokKind>(Str) 349 .Case("int", tgtok::Int) 350 .Case("bit", tgtok::Bit) 351 .Case("bits", tgtok::Bits) 352 .Case("string", tgtok::String) 353 .Case("list", tgtok::List) 354 .Case("code", tgtok::Code) 355 .Case("dag", tgtok::Dag) 356 .Case("class", tgtok::Class) 357 .Case("def", tgtok::Def) 358 .Case("true", tgtok::TrueVal) 359 .Case("false", tgtok::FalseVal) 360 .Case("foreach", tgtok::Foreach) 361 .Case("defm", tgtok::Defm) 362 .Case("defset", tgtok::Defset) 363 .Case("multiclass", tgtok::MultiClass) 364 .Case("field", tgtok::Field) 365 .Case("let", tgtok::Let) 366 .Case("in", tgtok::In) 367 .Case("defvar", tgtok::Defvar) 368 .Case("include", tgtok::Include) 369 .Case("if", tgtok::If) 370 .Case("then", tgtok::Then) 371 .Case("else", tgtok::ElseKW) 372 .Case("assert", tgtok::Assert) 373 .Default(tgtok::Id); 374 375 // A couple of tokens require special processing. 376 switch (Kind) { 377 case tgtok::Include: 378 if (LexInclude()) return tgtok::Error; 379 return Lex(); 380 case tgtok::Id: 381 CurStrVal.assign(Str.begin(), Str.end()); 382 break; 383 default: 384 break; 385 } 386 387 return Kind; 388 } 389 390 /// LexInclude - We just read the "include" token. Get the string token that 391 /// comes next and enter the include. 392 bool TGLexer::LexInclude() { 393 // The token after the include must be a string. 394 tgtok::TokKind Tok = LexToken(); 395 if (Tok == tgtok::Error) return true; 396 if (Tok != tgtok::StrVal) { 397 PrintError(getLoc(), "Expected filename after include"); 398 return true; 399 } 400 401 // Get the string. 402 std::string Filename = CurStrVal; 403 std::string IncludedFile; 404 405 CurBuffer = SrcMgr.AddIncludeFile(Filename, SMLoc::getFromPointer(CurPtr), 406 IncludedFile); 407 if (!CurBuffer) { 408 PrintError(getLoc(), "Could not find include file '" + Filename + "'"); 409 return true; 410 } 411 412 Dependencies.insert(IncludedFile); 413 // Save the line number and lex buffer of the includer. 414 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer(); 415 CurPtr = CurBuf.begin(); 416 417 PrepIncludeStack.push_back( 418 std::make_unique<std::vector<PreprocessorControlDesc>>()); 419 return false; 420 } 421 422 /// SkipBCPLComment - Skip over the comment by finding the next CR or LF. 423 /// Or we may end up at the end of the buffer. 424 void TGLexer::SkipBCPLComment() { 425 ++CurPtr; // skip the second slash. 426 auto EOLPos = CurBuf.find_first_of("\r\n", CurPtr - CurBuf.data()); 427 CurPtr = (EOLPos == StringRef::npos) ? CurBuf.end() : CurBuf.data() + EOLPos; 428 } 429 430 /// SkipCComment - This skips C-style /**/ comments. The only difference from C 431 /// is that we allow nesting. 432 bool TGLexer::SkipCComment() { 433 ++CurPtr; // skip the star. 434 unsigned CommentDepth = 1; 435 436 while (true) { 437 int CurChar = getNextChar(); 438 switch (CurChar) { 439 case EOF: 440 PrintError(TokStart, "Unterminated comment!"); 441 return true; 442 case '*': 443 // End of the comment? 444 if (CurPtr[0] != '/') break; 445 446 ++CurPtr; // End the */. 447 if (--CommentDepth == 0) 448 return false; 449 break; 450 case '/': 451 // Start of a nested comment? 452 if (CurPtr[0] != '*') break; 453 ++CurPtr; 454 ++CommentDepth; 455 break; 456 } 457 } 458 } 459 460 /// LexNumber - Lex: 461 /// [-+]?[0-9]+ 462 /// 0x[0-9a-fA-F]+ 463 /// 0b[01]+ 464 tgtok::TokKind TGLexer::LexNumber() { 465 if (CurPtr[-1] == '0') { 466 if (CurPtr[0] == 'x') { 467 ++CurPtr; 468 const char *NumStart = CurPtr; 469 while (isxdigit(CurPtr[0])) 470 ++CurPtr; 471 472 // Requires at least one hex digit. 473 if (CurPtr == NumStart) 474 return ReturnError(TokStart, "Invalid hexadecimal number"); 475 476 errno = 0; 477 CurIntVal = strtoll(NumStart, nullptr, 16); 478 if (errno == EINVAL) 479 return ReturnError(TokStart, "Invalid hexadecimal number"); 480 if (errno == ERANGE) { 481 errno = 0; 482 CurIntVal = (int64_t)strtoull(NumStart, nullptr, 16); 483 if (errno == EINVAL) 484 return ReturnError(TokStart, "Invalid hexadecimal number"); 485 if (errno == ERANGE) 486 return ReturnError(TokStart, "Hexadecimal number out of range"); 487 } 488 return tgtok::IntVal; 489 } else if (CurPtr[0] == 'b') { 490 ++CurPtr; 491 const char *NumStart = CurPtr; 492 while (CurPtr[0] == '0' || CurPtr[0] == '1') 493 ++CurPtr; 494 495 // Requires at least one binary digit. 496 if (CurPtr == NumStart) 497 return ReturnError(CurPtr-2, "Invalid binary number"); 498 CurIntVal = strtoll(NumStart, nullptr, 2); 499 return tgtok::BinaryIntVal; 500 } 501 } 502 503 // Check for a sign without a digit. 504 if (!isdigit(CurPtr[0])) { 505 if (CurPtr[-1] == '-') 506 return tgtok::minus; 507 else if (CurPtr[-1] == '+') 508 return tgtok::plus; 509 } 510 511 while (isdigit(CurPtr[0])) 512 ++CurPtr; 513 CurIntVal = strtoll(TokStart, nullptr, 10); 514 return tgtok::IntVal; 515 } 516 517 /// LexBracket - We just read '['. If this is a code block, return it, 518 /// otherwise return the bracket. Match: '[' and '[{ ( [^}]+ | }[^]] )* }]' 519 tgtok::TokKind TGLexer::LexBracket() { 520 if (CurPtr[0] != '{') 521 return tgtok::l_square; 522 ++CurPtr; 523 const char *CodeStart = CurPtr; 524 while (true) { 525 int Char = getNextChar(); 526 if (Char == EOF) break; 527 528 if (Char != '}') continue; 529 530 Char = getNextChar(); 531 if (Char == EOF) break; 532 if (Char == ']') { 533 CurStrVal.assign(CodeStart, CurPtr-2); 534 return tgtok::CodeFragment; 535 } 536 } 537 538 return ReturnError(CodeStart - 2, "Unterminated code block"); 539 } 540 541 /// LexExclaim - Lex '!' and '![a-zA-Z]+'. 542 tgtok::TokKind TGLexer::LexExclaim() { 543 if (!isalpha(*CurPtr)) 544 return ReturnError(CurPtr - 1, "Invalid \"!operator\""); 545 546 const char *Start = CurPtr++; 547 while (isalpha(*CurPtr)) 548 ++CurPtr; 549 550 // Check to see which operator this is. 551 tgtok::TokKind Kind = 552 StringSwitch<tgtok::TokKind>(StringRef(Start, CurPtr - Start)) 553 .Case("eq", tgtok::XEq) 554 .Case("ne", tgtok::XNe) 555 .Case("le", tgtok::XLe) 556 .Case("lt", tgtok::XLt) 557 .Case("ge", tgtok::XGe) 558 .Case("gt", tgtok::XGt) 559 .Case("if", tgtok::XIf) 560 .Case("cond", tgtok::XCond) 561 .Case("isa", tgtok::XIsA) 562 .Case("head", tgtok::XHead) 563 .Case("tail", tgtok::XTail) 564 .Case("size", tgtok::XSize) 565 .Case("con", tgtok::XConcat) 566 .Case("dag", tgtok::XDag) 567 .Case("add", tgtok::XADD) 568 .Case("sub", tgtok::XSUB) 569 .Case("mul", tgtok::XMUL) 570 .Case("div", tgtok::XDIV) 571 .Case("not", tgtok::XNOT) 572 .Case("logtwo", tgtok::XLOG2) 573 .Case("and", tgtok::XAND) 574 .Case("or", tgtok::XOR) 575 .Case("xor", tgtok::XXOR) 576 .Case("shl", tgtok::XSHL) 577 .Case("sra", tgtok::XSRA) 578 .Case("srl", tgtok::XSRL) 579 .Case("cast", tgtok::XCast) 580 .Case("empty", tgtok::XEmpty) 581 .Case("subst", tgtok::XSubst) 582 .Case("foldl", tgtok::XFoldl) 583 .Case("foreach", tgtok::XForEach) 584 .Case("filter", tgtok::XFilter) 585 .Case("listconcat", tgtok::XListConcat) 586 .Case("listsplat", tgtok::XListSplat) 587 .Case("listremove", tgtok::XListRemove) 588 .Case("range", tgtok::XRange) 589 .Case("strconcat", tgtok::XStrConcat) 590 .Case("interleave", tgtok::XInterleave) 591 .Case("substr", tgtok::XSubstr) 592 .Case("find", tgtok::XFind) 593 .Cases("setdagop", "setop", tgtok::XSetDagOp) // !setop is deprecated. 594 .Cases("getdagop", "getop", tgtok::XGetDagOp) // !getop is deprecated. 595 .Case("getdagarg", tgtok::XGetDagArg) 596 .Case("getdagname", tgtok::XGetDagName) 597 .Case("setdagarg", tgtok::XSetDagArg) 598 .Case("setdagname", tgtok::XSetDagName) 599 .Case("exists", tgtok::XExists) 600 .Case("tolower", tgtok::XToLower) 601 .Case("toupper", tgtok::XToUpper) 602 .Default(tgtok::Error); 603 604 return Kind != tgtok::Error ? Kind : ReturnError(Start-1, "Unknown operator"); 605 } 606 607 bool TGLexer::prepExitInclude(bool IncludeStackMustBeEmpty) { 608 // Report an error, if preprocessor control stack for the current 609 // file is not empty. 610 if (!PrepIncludeStack.back()->empty()) { 611 prepReportPreprocessorStackError(); 612 613 return false; 614 } 615 616 // Pop the preprocessing controls from the include stack. 617 if (PrepIncludeStack.empty()) { 618 PrintFatalError("Preprocessor include stack is empty"); 619 } 620 621 PrepIncludeStack.pop_back(); 622 623 if (IncludeStackMustBeEmpty) { 624 if (!PrepIncludeStack.empty()) 625 PrintFatalError("Preprocessor include stack is not empty"); 626 } else { 627 if (PrepIncludeStack.empty()) 628 PrintFatalError("Preprocessor include stack is empty"); 629 } 630 631 return true; 632 } 633 634 tgtok::TokKind TGLexer::prepIsDirective() const { 635 for (const auto &PD : PreprocessorDirs) { 636 int NextChar = *CurPtr; 637 bool Match = true; 638 unsigned I = 0; 639 for (; I < strlen(PD.Word); ++I) { 640 if (NextChar != PD.Word[I]) { 641 Match = false; 642 break; 643 } 644 645 NextChar = peekNextChar(I + 1); 646 } 647 648 // Check for whitespace after the directive. If there is no whitespace, 649 // then we do not recognize it as a preprocessing directive. 650 if (Match) { 651 tgtok::TokKind Kind = PD.Kind; 652 653 // New line and EOF may follow only #else/#endif. It will be reported 654 // as an error for #ifdef/#define after the call to prepLexMacroName(). 655 if (NextChar == ' ' || NextChar == '\t' || NextChar == EOF || 656 NextChar == '\n' || 657 // It looks like TableGen does not support '\r' as the actual 658 // carriage return, e.g. getNextChar() treats a single '\r' 659 // as '\n'. So we do the same here. 660 NextChar == '\r') 661 return Kind; 662 663 // Allow comments after some directives, e.g.: 664 // #else// OR #else/**/ 665 // #endif// OR #endif/**/ 666 // 667 // Note that we do allow comments after #ifdef/#define here, e.g. 668 // #ifdef/**/ AND #ifdef// 669 // #define/**/ AND #define// 670 // 671 // These cases will be reported as incorrect after calling 672 // prepLexMacroName(). We could have supported C-style comments 673 // after #ifdef/#define, but this would complicate the code 674 // for little benefit. 675 if (NextChar == '/') { 676 NextChar = peekNextChar(I + 1); 677 678 if (NextChar == '*' || NextChar == '/') 679 return Kind; 680 681 // Pretend that we do not recognize the directive. 682 } 683 } 684 } 685 686 return tgtok::Error; 687 } 688 689 bool TGLexer::prepEatPreprocessorDirective(tgtok::TokKind Kind) { 690 TokStart = CurPtr; 691 692 for (const auto &PD : PreprocessorDirs) 693 if (PD.Kind == Kind) { 694 // Advance CurPtr to the end of the preprocessing word. 695 CurPtr += strlen(PD.Word); 696 return true; 697 } 698 699 PrintFatalError("Unsupported preprocessing token in " 700 "prepEatPreprocessorDirective()"); 701 return false; 702 } 703 704 tgtok::TokKind TGLexer::lexPreprocessor( 705 tgtok::TokKind Kind, bool ReturnNextLiveToken) { 706 707 // We must be looking at a preprocessing directive. Eat it! 708 if (!prepEatPreprocessorDirective(Kind)) 709 PrintFatalError("lexPreprocessor() called for unknown " 710 "preprocessor directive"); 711 712 if (Kind == tgtok::Ifdef || Kind == tgtok::Ifndef) { 713 StringRef MacroName = prepLexMacroName(); 714 StringRef IfTokName = Kind == tgtok::Ifdef ? "#ifdef" : "#ifndef"; 715 if (MacroName.empty()) 716 return ReturnError(TokStart, "Expected macro name after " + IfTokName); 717 718 bool MacroIsDefined = DefinedMacros.count(MacroName) != 0; 719 720 // Canonicalize ifndef to ifdef equivalent 721 if (Kind == tgtok::Ifndef) { 722 MacroIsDefined = !MacroIsDefined; 723 Kind = tgtok::Ifdef; 724 } 725 726 // Regardless of whether we are processing tokens or not, 727 // we put the #ifdef control on stack. 728 PrepIncludeStack.back()->push_back( 729 {Kind, MacroIsDefined, SMLoc::getFromPointer(TokStart)}); 730 731 if (!prepSkipDirectiveEnd()) 732 return ReturnError(CurPtr, "Only comments are supported after " + 733 IfTokName + " NAME"); 734 735 // If we were not processing tokens before this #ifdef, 736 // then just return back to the lines skipping code. 737 if (!ReturnNextLiveToken) 738 return Kind; 739 740 // If we were processing tokens before this #ifdef, 741 // and the macro is defined, then just return the next token. 742 if (MacroIsDefined) 743 return LexToken(); 744 745 // We were processing tokens before this #ifdef, and the macro 746 // is not defined, so we have to start skipping the lines. 747 // If the skipping is successful, it will return the token following 748 // either #else or #endif corresponding to this #ifdef. 749 if (prepSkipRegion(ReturnNextLiveToken)) 750 return LexToken(); 751 752 return tgtok::Error; 753 } else if (Kind == tgtok::Else) { 754 // Check if this #else is correct before calling prepSkipDirectiveEnd(), 755 // which will move CurPtr away from the beginning of #else. 756 if (PrepIncludeStack.back()->empty()) 757 return ReturnError(TokStart, "#else without #ifdef or #ifndef"); 758 759 PreprocessorControlDesc IfdefEntry = PrepIncludeStack.back()->back(); 760 761 if (IfdefEntry.Kind != tgtok::Ifdef) { 762 PrintError(TokStart, "double #else"); 763 return ReturnError(IfdefEntry.SrcPos, "Previous #else is here"); 764 } 765 766 // Replace the corresponding #ifdef's control with its negation 767 // on the control stack. 768 PrepIncludeStack.back()->pop_back(); 769 PrepIncludeStack.back()->push_back( 770 {Kind, !IfdefEntry.IsDefined, SMLoc::getFromPointer(TokStart)}); 771 772 if (!prepSkipDirectiveEnd()) 773 return ReturnError(CurPtr, "Only comments are supported after #else"); 774 775 // If we were processing tokens before this #else, 776 // we have to start skipping lines until the matching #endif. 777 if (ReturnNextLiveToken) { 778 if (prepSkipRegion(ReturnNextLiveToken)) 779 return LexToken(); 780 781 return tgtok::Error; 782 } 783 784 // Return to the lines skipping code. 785 return Kind; 786 } else if (Kind == tgtok::Endif) { 787 // Check if this #endif is correct before calling prepSkipDirectiveEnd(), 788 // which will move CurPtr away from the beginning of #endif. 789 if (PrepIncludeStack.back()->empty()) 790 return ReturnError(TokStart, "#endif without #ifdef"); 791 792 auto &IfdefOrElseEntry = PrepIncludeStack.back()->back(); 793 794 if (IfdefOrElseEntry.Kind != tgtok::Ifdef && 795 IfdefOrElseEntry.Kind != tgtok::Else) { 796 PrintFatalError("Invalid preprocessor control on the stack"); 797 return tgtok::Error; 798 } 799 800 if (!prepSkipDirectiveEnd()) 801 return ReturnError(CurPtr, "Only comments are supported after #endif"); 802 803 PrepIncludeStack.back()->pop_back(); 804 805 // If we were processing tokens before this #endif, then 806 // we should continue it. 807 if (ReturnNextLiveToken) { 808 return LexToken(); 809 } 810 811 // Return to the lines skipping code. 812 return Kind; 813 } else if (Kind == tgtok::Define) { 814 StringRef MacroName = prepLexMacroName(); 815 if (MacroName.empty()) 816 return ReturnError(TokStart, "Expected macro name after #define"); 817 818 if (!DefinedMacros.insert(MacroName).second) 819 PrintWarning(getLoc(), 820 "Duplicate definition of macro: " + Twine(MacroName)); 821 822 if (!prepSkipDirectiveEnd()) 823 return ReturnError(CurPtr, 824 "Only comments are supported after #define NAME"); 825 826 if (!ReturnNextLiveToken) { 827 PrintFatalError("#define must be ignored during the lines skipping"); 828 return tgtok::Error; 829 } 830 831 return LexToken(); 832 } 833 834 PrintFatalError("Preprocessing directive is not supported"); 835 return tgtok::Error; 836 } 837 838 bool TGLexer::prepSkipRegion(bool MustNeverBeFalse) { 839 if (!MustNeverBeFalse) 840 PrintFatalError("Invalid recursion."); 841 842 do { 843 // Skip all symbols to the line end. 844 prepSkipToLineEnd(); 845 846 // Find the first non-whitespace symbol in the next line(s). 847 if (!prepSkipLineBegin()) 848 return false; 849 850 // If the first non-blank/comment symbol on the line is '#', 851 // it may be a start of preprocessing directive. 852 // 853 // If it is not '#' just go to the next line. 854 if (*CurPtr == '#') 855 ++CurPtr; 856 else 857 continue; 858 859 tgtok::TokKind Kind = prepIsDirective(); 860 861 // If we did not find a preprocessing directive or it is #define, 862 // then just skip to the next line. We do not have to do anything 863 // for #define in the line-skipping mode. 864 if (Kind == tgtok::Error || Kind == tgtok::Define) 865 continue; 866 867 tgtok::TokKind ProcessedKind = lexPreprocessor(Kind, false); 868 869 // If lexPreprocessor() encountered an error during lexing this 870 // preprocessor idiom, then return false to the calling lexPreprocessor(). 871 // This will force tgtok::Error to be returned to the tokens processing. 872 if (ProcessedKind == tgtok::Error) 873 return false; 874 875 if (Kind != ProcessedKind) 876 PrintFatalError("prepIsDirective() and lexPreprocessor() " 877 "returned different token kinds"); 878 879 // If this preprocessing directive enables tokens processing, 880 // then return to the lexPreprocessor() and get to the next token. 881 // We can move from line-skipping mode to processing tokens only 882 // due to #else or #endif. 883 if (prepIsProcessingEnabled()) { 884 if (Kind != tgtok::Else && Kind != tgtok::Endif) { 885 PrintFatalError("Tokens processing was enabled by an unexpected " 886 "preprocessing directive"); 887 return false; 888 } 889 890 return true; 891 } 892 } while (CurPtr != CurBuf.end()); 893 894 // We have reached the end of the file, but never left the lines-skipping 895 // mode. This means there is no matching #endif. 896 prepReportPreprocessorStackError(); 897 return false; 898 } 899 900 StringRef TGLexer::prepLexMacroName() { 901 // Skip whitespaces between the preprocessing directive and the macro name. 902 while (*CurPtr == ' ' || *CurPtr == '\t') 903 ++CurPtr; 904 905 TokStart = CurPtr; 906 // Macro names start with [a-zA-Z_]. 907 if (*CurPtr != '_' && !isalpha(*CurPtr)) 908 return ""; 909 910 // Match the rest of the identifier regex: [0-9a-zA-Z_]* 911 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_') 912 ++CurPtr; 913 914 return StringRef(TokStart, CurPtr - TokStart); 915 } 916 917 bool TGLexer::prepSkipLineBegin() { 918 while (CurPtr != CurBuf.end()) { 919 switch (*CurPtr) { 920 case ' ': 921 case '\t': 922 case '\n': 923 case '\r': 924 break; 925 926 case '/': { 927 int NextChar = peekNextChar(1); 928 if (NextChar == '*') { 929 // Skip C-style comment. 930 // Note that we do not care about skipping the C++-style comments. 931 // If the line contains "//", it may not contain any processable 932 // preprocessing directive. Just return CurPtr pointing to 933 // the first '/' in this case. We also do not care about 934 // incorrect symbols after the first '/' - we are in lines-skipping 935 // mode, so incorrect code is allowed to some extent. 936 937 // Set TokStart to the beginning of the comment to enable proper 938 // diagnostic printing in case of error in SkipCComment(). 939 TokStart = CurPtr; 940 941 // CurPtr must point to '*' before call to SkipCComment(). 942 ++CurPtr; 943 if (SkipCComment()) 944 return false; 945 } else { 946 // CurPtr points to the non-whitespace '/'. 947 return true; 948 } 949 950 // We must not increment CurPtr after the comment was lexed. 951 continue; 952 } 953 954 default: 955 return true; 956 } 957 958 ++CurPtr; 959 } 960 961 // We have reached the end of the file. Return to the lines skipping 962 // code, and allow it to handle the EOF as needed. 963 return true; 964 } 965 966 bool TGLexer::prepSkipDirectiveEnd() { 967 while (CurPtr != CurBuf.end()) { 968 switch (*CurPtr) { 969 case ' ': 970 case '\t': 971 break; 972 973 case '\n': 974 case '\r': 975 return true; 976 977 case '/': { 978 int NextChar = peekNextChar(1); 979 if (NextChar == '/') { 980 // Skip C++-style comment. 981 // We may just return true now, but let's skip to the line/buffer end 982 // to simplify the method specification. 983 ++CurPtr; 984 SkipBCPLComment(); 985 } else if (NextChar == '*') { 986 // When we are skipping C-style comment at the end of a preprocessing 987 // directive, we can skip several lines. If any meaningful TD token 988 // follows the end of the C-style comment on the same line, it will 989 // be considered as an invalid usage of TD token. 990 // For example, we want to forbid usages like this one: 991 // #define MACRO class Class {} 992 // But with C-style comments we also disallow the following: 993 // #define MACRO /* This macro is used 994 // to ... */ class Class {} 995 // One can argue that this should be allowed, but it does not seem 996 // to be worth of the complication. Moreover, this matches 997 // the C preprocessor behavior. 998 999 // Set TokStart to the beginning of the comment to enable proper 1000 // diagnostic printer in case of error in SkipCComment(). 1001 TokStart = CurPtr; 1002 ++CurPtr; 1003 if (SkipCComment()) 1004 return false; 1005 } else { 1006 TokStart = CurPtr; 1007 PrintError(CurPtr, "Unexpected character"); 1008 return false; 1009 } 1010 1011 // We must not increment CurPtr after the comment was lexed. 1012 continue; 1013 } 1014 1015 default: 1016 // Do not allow any non-whitespaces after the directive. 1017 TokStart = CurPtr; 1018 return false; 1019 } 1020 1021 ++CurPtr; 1022 } 1023 1024 return true; 1025 } 1026 1027 void TGLexer::prepSkipToLineEnd() { 1028 while (*CurPtr != '\n' && *CurPtr != '\r' && CurPtr != CurBuf.end()) 1029 ++CurPtr; 1030 } 1031 1032 bool TGLexer::prepIsProcessingEnabled() { 1033 for (const PreprocessorControlDesc &I : 1034 llvm::reverse(*PrepIncludeStack.back())) 1035 if (!I.IsDefined) 1036 return false; 1037 1038 return true; 1039 } 1040 1041 void TGLexer::prepReportPreprocessorStackError() { 1042 if (PrepIncludeStack.back()->empty()) 1043 PrintFatalError("prepReportPreprocessorStackError() called with " 1044 "empty control stack"); 1045 1046 auto &PrepControl = PrepIncludeStack.back()->back(); 1047 PrintError(CurBuf.end(), "Reached EOF without matching #endif"); 1048 PrintError(PrepControl.SrcPos, "The latest preprocessor control is here"); 1049 1050 TokStart = CurPtr; 1051 } 1052