1 //===- Lexer.cpp - C Language Family Lexer --------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the Lexer and Token interfaces. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/Lex/Lexer.h" 14 #include "UnicodeCharSets.h" 15 #include "clang/Basic/CharInfo.h" 16 #include "clang/Basic/Diagnostic.h" 17 #include "clang/Basic/IdentifierTable.h" 18 #include "clang/Basic/LLVM.h" 19 #include "clang/Basic/LangOptions.h" 20 #include "clang/Basic/SourceLocation.h" 21 #include "clang/Basic/SourceManager.h" 22 #include "clang/Basic/TokenKinds.h" 23 #include "clang/Lex/LexDiagnostic.h" 24 #include "clang/Lex/LiteralSupport.h" 25 #include "clang/Lex/MultipleIncludeOpt.h" 26 #include "clang/Lex/Preprocessor.h" 27 #include "clang/Lex/PreprocessorOptions.h" 28 #include "clang/Lex/Token.h" 29 #include "llvm/ADT/STLExtras.h" 30 #include "llvm/ADT/StringExtras.h" 31 #include "llvm/ADT/StringRef.h" 32 #include "llvm/ADT/StringSwitch.h" 33 #include "llvm/Support/Compiler.h" 34 #include "llvm/Support/ConvertUTF.h" 35 #include "llvm/Support/MathExtras.h" 36 #include "llvm/Support/MemoryBufferRef.h" 37 #include "llvm/Support/NativeFormatting.h" 38 #include "llvm/Support/Unicode.h" 39 #include "llvm/Support/UnicodeCharRanges.h" 40 #include <algorithm> 41 #include <cassert> 42 #include <cstddef> 43 #include <cstdint> 44 #include <cstring> 45 #include <optional> 46 #include <string> 47 #include <tuple> 48 #include <utility> 49 50 using namespace clang; 51 52 //===----------------------------------------------------------------------===// 53 // Token Class Implementation 54 //===----------------------------------------------------------------------===// 55 56 /// isObjCAtKeyword - Return true if we have an ObjC keyword identifier. 57 bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const { 58 if (isAnnotation()) 59 return false; 60 if (IdentifierInfo *II = getIdentifierInfo()) 61 return II->getObjCKeywordID() == objcKey; 62 return false; 63 } 64 65 /// getObjCKeywordID - Return the ObjC keyword kind. 66 tok::ObjCKeywordKind Token::getObjCKeywordID() const { 67 if (isAnnotation()) 68 return tok::objc_not_keyword; 69 IdentifierInfo *specId = getIdentifierInfo(); 70 return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword; 71 } 72 73 //===----------------------------------------------------------------------===// 74 // Lexer Class Implementation 75 //===----------------------------------------------------------------------===// 76 77 void Lexer::anchor() {} 78 79 void Lexer::InitLexer(const char *BufStart, const char *BufPtr, 80 const char *BufEnd) { 81 BufferStart = BufStart; 82 BufferPtr = BufPtr; 83 BufferEnd = BufEnd; 84 85 assert(BufEnd[0] == 0 && 86 "We assume that the input buffer has a null character at the end" 87 " to simplify lexing!"); 88 89 // Check whether we have a BOM in the beginning of the buffer. If yes - act 90 // accordingly. Right now we support only UTF-8 with and without BOM, so, just 91 // skip the UTF-8 BOM if it's present. 92 if (BufferStart == BufferPtr) { 93 // Determine the size of the BOM. 94 StringRef Buf(BufferStart, BufferEnd - BufferStart); 95 size_t BOMLength = llvm::StringSwitch<size_t>(Buf) 96 .StartsWith("\xEF\xBB\xBF", 3) // UTF-8 BOM 97 .Default(0); 98 99 // Skip the BOM. 100 BufferPtr += BOMLength; 101 } 102 103 Is_PragmaLexer = false; 104 CurrentConflictMarkerState = CMK_None; 105 106 // Start of the file is a start of line. 107 IsAtStartOfLine = true; 108 IsAtPhysicalStartOfLine = true; 109 110 HasLeadingSpace = false; 111 HasLeadingEmptyMacro = false; 112 113 // We are not after parsing a #. 114 ParsingPreprocessorDirective = false; 115 116 // We are not after parsing #include. 117 ParsingFilename = false; 118 119 // We are not in raw mode. Raw mode disables diagnostics and interpretation 120 // of tokens (e.g. identifiers, thus disabling macro expansion). It is used 121 // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block 122 // or otherwise skipping over tokens. 123 LexingRawMode = false; 124 125 // Default to not keeping comments. 126 ExtendedTokenMode = 0; 127 128 NewLinePtr = nullptr; 129 } 130 131 /// Lexer constructor - Create a new lexer object for the specified buffer 132 /// with the specified preprocessor managing the lexing process. This lexer 133 /// assumes that the associated file buffer and Preprocessor objects will 134 /// outlive it, so it doesn't take ownership of either of them. 135 Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &InputFile, 136 Preprocessor &PP, bool IsFirstIncludeOfFile) 137 : PreprocessorLexer(&PP, FID), 138 FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)), 139 LangOpts(PP.getLangOpts()), LineComment(LangOpts.LineComment), 140 IsFirstTimeLexingFile(IsFirstIncludeOfFile) { 141 InitLexer(InputFile.getBufferStart(), InputFile.getBufferStart(), 142 InputFile.getBufferEnd()); 143 144 resetExtendedTokenMode(); 145 } 146 147 /// Lexer constructor - Create a new raw lexer object. This object is only 148 /// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text 149 /// range will outlive it, so it doesn't take ownership of it. 150 Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts, 151 const char *BufStart, const char *BufPtr, const char *BufEnd, 152 bool IsFirstIncludeOfFile) 153 : FileLoc(fileloc), LangOpts(langOpts), LineComment(LangOpts.LineComment), 154 IsFirstTimeLexingFile(IsFirstIncludeOfFile) { 155 InitLexer(BufStart, BufPtr, BufEnd); 156 157 // We *are* in raw mode. 158 LexingRawMode = true; 159 } 160 161 /// Lexer constructor - Create a new raw lexer object. This object is only 162 /// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text 163 /// range will outlive it, so it doesn't take ownership of it. 164 Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &FromFile, 165 const SourceManager &SM, const LangOptions &langOpts, 166 bool IsFirstIncludeOfFile) 167 : Lexer(SM.getLocForStartOfFile(FID), langOpts, FromFile.getBufferStart(), 168 FromFile.getBufferStart(), FromFile.getBufferEnd(), 169 IsFirstIncludeOfFile) {} 170 171 void Lexer::resetExtendedTokenMode() { 172 assert(PP && "Cannot reset token mode without a preprocessor"); 173 if (LangOpts.TraditionalCPP) 174 SetKeepWhitespaceMode(true); 175 else 176 SetCommentRetentionState(PP->getCommentRetentionState()); 177 } 178 179 /// Create_PragmaLexer: Lexer constructor - Create a new lexer object for 180 /// _Pragma expansion. This has a variety of magic semantics that this method 181 /// sets up. It returns a new'd Lexer that must be delete'd when done. 182 /// 183 /// On entrance to this routine, TokStartLoc is a macro location which has a 184 /// spelling loc that indicates the bytes to be lexed for the token and an 185 /// expansion location that indicates where all lexed tokens should be 186 /// "expanded from". 187 /// 188 /// TODO: It would really be nice to make _Pragma just be a wrapper around a 189 /// normal lexer that remaps tokens as they fly by. This would require making 190 /// Preprocessor::Lex virtual. Given that, we could just dump in a magic lexer 191 /// interface that could handle this stuff. This would pull GetMappedTokenLoc 192 /// out of the critical path of the lexer! 193 /// 194 Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc, 195 SourceLocation ExpansionLocStart, 196 SourceLocation ExpansionLocEnd, 197 unsigned TokLen, Preprocessor &PP) { 198 SourceManager &SM = PP.getSourceManager(); 199 200 // Create the lexer as if we were going to lex the file normally. 201 FileID SpellingFID = SM.getFileID(SpellingLoc); 202 llvm::MemoryBufferRef InputFile = SM.getBufferOrFake(SpellingFID); 203 Lexer *L = new Lexer(SpellingFID, InputFile, PP); 204 205 // Now that the lexer is created, change the start/end locations so that we 206 // just lex the subsection of the file that we want. This is lexing from a 207 // scratch buffer. 208 const char *StrData = SM.getCharacterData(SpellingLoc); 209 210 L->BufferPtr = StrData; 211 L->BufferEnd = StrData+TokLen; 212 assert(L->BufferEnd[0] == 0 && "Buffer is not nul terminated!"); 213 214 // Set the SourceLocation with the remapping information. This ensures that 215 // GetMappedTokenLoc will remap the tokens as they are lexed. 216 L->FileLoc = SM.createExpansionLoc(SM.getLocForStartOfFile(SpellingFID), 217 ExpansionLocStart, 218 ExpansionLocEnd, TokLen); 219 220 // Ensure that the lexer thinks it is inside a directive, so that end \n will 221 // return an EOD token. 222 L->ParsingPreprocessorDirective = true; 223 224 // This lexer really is for _Pragma. 225 L->Is_PragmaLexer = true; 226 return L; 227 } 228 229 void Lexer::seek(unsigned Offset, bool IsAtStartOfLine) { 230 this->IsAtPhysicalStartOfLine = IsAtStartOfLine; 231 this->IsAtStartOfLine = IsAtStartOfLine; 232 assert((BufferStart + Offset) <= BufferEnd); 233 BufferPtr = BufferStart + Offset; 234 } 235 236 template <typename T> static void StringifyImpl(T &Str, char Quote) { 237 typename T::size_type i = 0, e = Str.size(); 238 while (i < e) { 239 if (Str[i] == '\\' || Str[i] == Quote) { 240 Str.insert(Str.begin() + i, '\\'); 241 i += 2; 242 ++e; 243 } else if (Str[i] == '\n' || Str[i] == '\r') { 244 // Replace '\r\n' and '\n\r' to '\\' followed by 'n'. 245 if ((i < e - 1) && (Str[i + 1] == '\n' || Str[i + 1] == '\r') && 246 Str[i] != Str[i + 1]) { 247 Str[i] = '\\'; 248 Str[i + 1] = 'n'; 249 } else { 250 // Replace '\n' and '\r' to '\\' followed by 'n'. 251 Str[i] = '\\'; 252 Str.insert(Str.begin() + i + 1, 'n'); 253 ++e; 254 } 255 i += 2; 256 } else 257 ++i; 258 } 259 } 260 261 std::string Lexer::Stringify(StringRef Str, bool Charify) { 262 std::string Result = std::string(Str); 263 char Quote = Charify ? '\'' : '"'; 264 StringifyImpl(Result, Quote); 265 return Result; 266 } 267 268 void Lexer::Stringify(SmallVectorImpl<char> &Str) { StringifyImpl(Str, '"'); } 269 270 //===----------------------------------------------------------------------===// 271 // Token Spelling 272 //===----------------------------------------------------------------------===// 273 274 /// Slow case of getSpelling. Extract the characters comprising the 275 /// spelling of this token from the provided input buffer. 276 static size_t getSpellingSlow(const Token &Tok, const char *BufPtr, 277 const LangOptions &LangOpts, char *Spelling) { 278 assert(Tok.needsCleaning() && "getSpellingSlow called on simple token"); 279 280 size_t Length = 0; 281 const char *BufEnd = BufPtr + Tok.getLength(); 282 283 if (tok::isStringLiteral(Tok.getKind())) { 284 // Munch the encoding-prefix and opening double-quote. 285 while (BufPtr < BufEnd) { 286 unsigned Size; 287 Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts); 288 BufPtr += Size; 289 290 if (Spelling[Length - 1] == '"') 291 break; 292 } 293 294 // Raw string literals need special handling; trigraph expansion and line 295 // splicing do not occur within their d-char-sequence nor within their 296 // r-char-sequence. 297 if (Length >= 2 && 298 Spelling[Length - 2] == 'R' && Spelling[Length - 1] == '"') { 299 // Search backwards from the end of the token to find the matching closing 300 // quote. 301 const char *RawEnd = BufEnd; 302 do --RawEnd; while (*RawEnd != '"'); 303 size_t RawLength = RawEnd - BufPtr + 1; 304 305 // Everything between the quotes is included verbatim in the spelling. 306 memcpy(Spelling + Length, BufPtr, RawLength); 307 Length += RawLength; 308 BufPtr += RawLength; 309 310 // The rest of the token is lexed normally. 311 } 312 } 313 314 while (BufPtr < BufEnd) { 315 unsigned Size; 316 Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts); 317 BufPtr += Size; 318 } 319 320 assert(Length < Tok.getLength() && 321 "NeedsCleaning flag set on token that didn't need cleaning!"); 322 return Length; 323 } 324 325 /// getSpelling() - Return the 'spelling' of this token. The spelling of a 326 /// token are the characters used to represent the token in the source file 327 /// after trigraph expansion and escaped-newline folding. In particular, this 328 /// wants to get the true, uncanonicalized, spelling of things like digraphs 329 /// UCNs, etc. 330 StringRef Lexer::getSpelling(SourceLocation loc, 331 SmallVectorImpl<char> &buffer, 332 const SourceManager &SM, 333 const LangOptions &options, 334 bool *invalid) { 335 // Break down the source location. 336 std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc); 337 338 // Try to the load the file buffer. 339 bool invalidTemp = false; 340 StringRef file = SM.getBufferData(locInfo.first, &invalidTemp); 341 if (invalidTemp) { 342 if (invalid) *invalid = true; 343 return {}; 344 } 345 346 const char *tokenBegin = file.data() + locInfo.second; 347 348 // Lex from the start of the given location. 349 Lexer lexer(SM.getLocForStartOfFile(locInfo.first), options, 350 file.begin(), tokenBegin, file.end()); 351 Token token; 352 lexer.LexFromRawLexer(token); 353 354 unsigned length = token.getLength(); 355 356 // Common case: no need for cleaning. 357 if (!token.needsCleaning()) 358 return StringRef(tokenBegin, length); 359 360 // Hard case, we need to relex the characters into the string. 361 buffer.resize(length); 362 buffer.resize(getSpellingSlow(token, tokenBegin, options, buffer.data())); 363 return StringRef(buffer.data(), buffer.size()); 364 } 365 366 /// getSpelling() - Return the 'spelling' of this token. The spelling of a 367 /// token are the characters used to represent the token in the source file 368 /// after trigraph expansion and escaped-newline folding. In particular, this 369 /// wants to get the true, uncanonicalized, spelling of things like digraphs 370 /// UCNs, etc. 371 std::string Lexer::getSpelling(const Token &Tok, const SourceManager &SourceMgr, 372 const LangOptions &LangOpts, bool *Invalid) { 373 assert((int)Tok.getLength() >= 0 && "Token character range is bogus!"); 374 375 bool CharDataInvalid = false; 376 const char *TokStart = SourceMgr.getCharacterData(Tok.getLocation(), 377 &CharDataInvalid); 378 if (Invalid) 379 *Invalid = CharDataInvalid; 380 if (CharDataInvalid) 381 return {}; 382 383 // If this token contains nothing interesting, return it directly. 384 if (!Tok.needsCleaning()) 385 return std::string(TokStart, TokStart + Tok.getLength()); 386 387 std::string Result; 388 Result.resize(Tok.getLength()); 389 Result.resize(getSpellingSlow(Tok, TokStart, LangOpts, &*Result.begin())); 390 return Result; 391 } 392 393 /// getSpelling - This method is used to get the spelling of a token into a 394 /// preallocated buffer, instead of as an std::string. The caller is required 395 /// to allocate enough space for the token, which is guaranteed to be at least 396 /// Tok.getLength() bytes long. The actual length of the token is returned. 397 /// 398 /// Note that this method may do two possible things: it may either fill in 399 /// the buffer specified with characters, or it may *change the input pointer* 400 /// to point to a constant buffer with the data already in it (avoiding a 401 /// copy). The caller is not allowed to modify the returned buffer pointer 402 /// if an internal buffer is returned. 403 unsigned Lexer::getSpelling(const Token &Tok, const char *&Buffer, 404 const SourceManager &SourceMgr, 405 const LangOptions &LangOpts, bool *Invalid) { 406 assert((int)Tok.getLength() >= 0 && "Token character range is bogus!"); 407 408 const char *TokStart = nullptr; 409 // NOTE: this has to be checked *before* testing for an IdentifierInfo. 410 if (Tok.is(tok::raw_identifier)) 411 TokStart = Tok.getRawIdentifier().data(); 412 else if (!Tok.hasUCN()) { 413 if (const IdentifierInfo *II = Tok.getIdentifierInfo()) { 414 // Just return the string from the identifier table, which is very quick. 415 Buffer = II->getNameStart(); 416 return II->getLength(); 417 } 418 } 419 420 // NOTE: this can be checked even after testing for an IdentifierInfo. 421 if (Tok.isLiteral()) 422 TokStart = Tok.getLiteralData(); 423 424 if (!TokStart) { 425 // Compute the start of the token in the input lexer buffer. 426 bool CharDataInvalid = false; 427 TokStart = SourceMgr.getCharacterData(Tok.getLocation(), &CharDataInvalid); 428 if (Invalid) 429 *Invalid = CharDataInvalid; 430 if (CharDataInvalid) { 431 Buffer = ""; 432 return 0; 433 } 434 } 435 436 // If this token contains nothing interesting, return it directly. 437 if (!Tok.needsCleaning()) { 438 Buffer = TokStart; 439 return Tok.getLength(); 440 } 441 442 // Otherwise, hard case, relex the characters into the string. 443 return getSpellingSlow(Tok, TokStart, LangOpts, const_cast<char*>(Buffer)); 444 } 445 446 /// MeasureTokenLength - Relex the token at the specified location and return 447 /// its length in bytes in the input file. If the token needs cleaning (e.g. 448 /// includes a trigraph or an escaped newline) then this count includes bytes 449 /// that are part of that. 450 unsigned Lexer::MeasureTokenLength(SourceLocation Loc, 451 const SourceManager &SM, 452 const LangOptions &LangOpts) { 453 Token TheTok; 454 if (getRawToken(Loc, TheTok, SM, LangOpts)) 455 return 0; 456 return TheTok.getLength(); 457 } 458 459 /// Relex the token at the specified location. 460 /// \returns true if there was a failure, false on success. 461 bool Lexer::getRawToken(SourceLocation Loc, Token &Result, 462 const SourceManager &SM, 463 const LangOptions &LangOpts, 464 bool IgnoreWhiteSpace) { 465 // TODO: this could be special cased for common tokens like identifiers, ')', 466 // etc to make this faster, if it mattered. Just look at StrData[0] to handle 467 // all obviously single-char tokens. This could use 468 // Lexer::isObviouslySimpleCharacter for example to handle identifiers or 469 // something. 470 471 // If this comes from a macro expansion, we really do want the macro name, not 472 // the token this macro expanded to. 473 Loc = SM.getExpansionLoc(Loc); 474 std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc); 475 bool Invalid = false; 476 StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid); 477 if (Invalid) 478 return true; 479 480 const char *StrData = Buffer.data()+LocInfo.second; 481 482 if (!IgnoreWhiteSpace && isWhitespace(StrData[0])) 483 return true; 484 485 // Create a lexer starting at the beginning of this token. 486 Lexer TheLexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts, 487 Buffer.begin(), StrData, Buffer.end()); 488 TheLexer.SetCommentRetentionState(true); 489 TheLexer.LexFromRawLexer(Result); 490 return false; 491 } 492 493 /// Returns the pointer that points to the beginning of line that contains 494 /// the given offset, or null if the offset if invalid. 495 static const char *findBeginningOfLine(StringRef Buffer, unsigned Offset) { 496 const char *BufStart = Buffer.data(); 497 if (Offset >= Buffer.size()) 498 return nullptr; 499 500 const char *LexStart = BufStart + Offset; 501 for (; LexStart != BufStart; --LexStart) { 502 if (isVerticalWhitespace(LexStart[0]) && 503 !Lexer::isNewLineEscaped(BufStart, LexStart)) { 504 // LexStart should point at first character of logical line. 505 ++LexStart; 506 break; 507 } 508 } 509 return LexStart; 510 } 511 512 static SourceLocation getBeginningOfFileToken(SourceLocation Loc, 513 const SourceManager &SM, 514 const LangOptions &LangOpts) { 515 assert(Loc.isFileID()); 516 std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc); 517 if (LocInfo.first.isInvalid()) 518 return Loc; 519 520 bool Invalid = false; 521 StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid); 522 if (Invalid) 523 return Loc; 524 525 // Back up from the current location until we hit the beginning of a line 526 // (or the buffer). We'll relex from that point. 527 const char *StrData = Buffer.data() + LocInfo.second; 528 const char *LexStart = findBeginningOfLine(Buffer, LocInfo.second); 529 if (!LexStart || LexStart == StrData) 530 return Loc; 531 532 // Create a lexer starting at the beginning of this token. 533 SourceLocation LexerStartLoc = Loc.getLocWithOffset(-LocInfo.second); 534 Lexer TheLexer(LexerStartLoc, LangOpts, Buffer.data(), LexStart, 535 Buffer.end()); 536 TheLexer.SetCommentRetentionState(true); 537 538 // Lex tokens until we find the token that contains the source location. 539 Token TheTok; 540 do { 541 TheLexer.LexFromRawLexer(TheTok); 542 543 if (TheLexer.getBufferLocation() > StrData) { 544 // Lexing this token has taken the lexer past the source location we're 545 // looking for. If the current token encompasses our source location, 546 // return the beginning of that token. 547 if (TheLexer.getBufferLocation() - TheTok.getLength() <= StrData) 548 return TheTok.getLocation(); 549 550 // We ended up skipping over the source location entirely, which means 551 // that it points into whitespace. We're done here. 552 break; 553 } 554 } while (TheTok.getKind() != tok::eof); 555 556 // We've passed our source location; just return the original source location. 557 return Loc; 558 } 559 560 SourceLocation Lexer::GetBeginningOfToken(SourceLocation Loc, 561 const SourceManager &SM, 562 const LangOptions &LangOpts) { 563 if (Loc.isFileID()) 564 return getBeginningOfFileToken(Loc, SM, LangOpts); 565 566 if (!SM.isMacroArgExpansion(Loc)) 567 return Loc; 568 569 SourceLocation FileLoc = SM.getSpellingLoc(Loc); 570 SourceLocation BeginFileLoc = getBeginningOfFileToken(FileLoc, SM, LangOpts); 571 std::pair<FileID, unsigned> FileLocInfo = SM.getDecomposedLoc(FileLoc); 572 std::pair<FileID, unsigned> BeginFileLocInfo = 573 SM.getDecomposedLoc(BeginFileLoc); 574 assert(FileLocInfo.first == BeginFileLocInfo.first && 575 FileLocInfo.second >= BeginFileLocInfo.second); 576 return Loc.getLocWithOffset(BeginFileLocInfo.second - FileLocInfo.second); 577 } 578 579 namespace { 580 581 enum PreambleDirectiveKind { 582 PDK_Skipped, 583 PDK_Unknown 584 }; 585 586 } // namespace 587 588 PreambleBounds Lexer::ComputePreamble(StringRef Buffer, 589 const LangOptions &LangOpts, 590 unsigned MaxLines) { 591 // Create a lexer starting at the beginning of the file. Note that we use a 592 // "fake" file source location at offset 1 so that the lexer will track our 593 // position within the file. 594 const SourceLocation::UIntTy StartOffset = 1; 595 SourceLocation FileLoc = SourceLocation::getFromRawEncoding(StartOffset); 596 Lexer TheLexer(FileLoc, LangOpts, Buffer.begin(), Buffer.begin(), 597 Buffer.end()); 598 TheLexer.SetCommentRetentionState(true); 599 600 bool InPreprocessorDirective = false; 601 Token TheTok; 602 SourceLocation ActiveCommentLoc; 603 604 unsigned MaxLineOffset = 0; 605 if (MaxLines) { 606 const char *CurPtr = Buffer.begin(); 607 unsigned CurLine = 0; 608 while (CurPtr != Buffer.end()) { 609 char ch = *CurPtr++; 610 if (ch == '\n') { 611 ++CurLine; 612 if (CurLine == MaxLines) 613 break; 614 } 615 } 616 if (CurPtr != Buffer.end()) 617 MaxLineOffset = CurPtr - Buffer.begin(); 618 } 619 620 do { 621 TheLexer.LexFromRawLexer(TheTok); 622 623 if (InPreprocessorDirective) { 624 // If we've hit the end of the file, we're done. 625 if (TheTok.getKind() == tok::eof) { 626 break; 627 } 628 629 // If we haven't hit the end of the preprocessor directive, skip this 630 // token. 631 if (!TheTok.isAtStartOfLine()) 632 continue; 633 634 // We've passed the end of the preprocessor directive, and will look 635 // at this token again below. 636 InPreprocessorDirective = false; 637 } 638 639 // Keep track of the # of lines in the preamble. 640 if (TheTok.isAtStartOfLine()) { 641 unsigned TokOffset = TheTok.getLocation().getRawEncoding() - StartOffset; 642 643 // If we were asked to limit the number of lines in the preamble, 644 // and we're about to exceed that limit, we're done. 645 if (MaxLineOffset && TokOffset >= MaxLineOffset) 646 break; 647 } 648 649 // Comments are okay; skip over them. 650 if (TheTok.getKind() == tok::comment) { 651 if (ActiveCommentLoc.isInvalid()) 652 ActiveCommentLoc = TheTok.getLocation(); 653 continue; 654 } 655 656 if (TheTok.isAtStartOfLine() && TheTok.getKind() == tok::hash) { 657 // This is the start of a preprocessor directive. 658 Token HashTok = TheTok; 659 InPreprocessorDirective = true; 660 ActiveCommentLoc = SourceLocation(); 661 662 // Figure out which directive this is. Since we're lexing raw tokens, 663 // we don't have an identifier table available. Instead, just look at 664 // the raw identifier to recognize and categorize preprocessor directives. 665 TheLexer.LexFromRawLexer(TheTok); 666 if (TheTok.getKind() == tok::raw_identifier && !TheTok.needsCleaning()) { 667 StringRef Keyword = TheTok.getRawIdentifier(); 668 PreambleDirectiveKind PDK 669 = llvm::StringSwitch<PreambleDirectiveKind>(Keyword) 670 .Case("include", PDK_Skipped) 671 .Case("__include_macros", PDK_Skipped) 672 .Case("define", PDK_Skipped) 673 .Case("undef", PDK_Skipped) 674 .Case("line", PDK_Skipped) 675 .Case("error", PDK_Skipped) 676 .Case("pragma", PDK_Skipped) 677 .Case("import", PDK_Skipped) 678 .Case("include_next", PDK_Skipped) 679 .Case("warning", PDK_Skipped) 680 .Case("ident", PDK_Skipped) 681 .Case("sccs", PDK_Skipped) 682 .Case("assert", PDK_Skipped) 683 .Case("unassert", PDK_Skipped) 684 .Case("if", PDK_Skipped) 685 .Case("ifdef", PDK_Skipped) 686 .Case("ifndef", PDK_Skipped) 687 .Case("elif", PDK_Skipped) 688 .Case("elifdef", PDK_Skipped) 689 .Case("elifndef", PDK_Skipped) 690 .Case("else", PDK_Skipped) 691 .Case("endif", PDK_Skipped) 692 .Default(PDK_Unknown); 693 694 switch (PDK) { 695 case PDK_Skipped: 696 continue; 697 698 case PDK_Unknown: 699 // We don't know what this directive is; stop at the '#'. 700 break; 701 } 702 } 703 704 // We only end up here if we didn't recognize the preprocessor 705 // directive or it was one that can't occur in the preamble at this 706 // point. Roll back the current token to the location of the '#'. 707 TheTok = HashTok; 708 } 709 710 // We hit a token that we don't recognize as being in the 711 // "preprocessing only" part of the file, so we're no longer in 712 // the preamble. 713 break; 714 } while (true); 715 716 SourceLocation End; 717 if (ActiveCommentLoc.isValid()) 718 End = ActiveCommentLoc; // don't truncate a decl comment. 719 else 720 End = TheTok.getLocation(); 721 722 return PreambleBounds(End.getRawEncoding() - FileLoc.getRawEncoding(), 723 TheTok.isAtStartOfLine()); 724 } 725 726 unsigned Lexer::getTokenPrefixLength(SourceLocation TokStart, unsigned CharNo, 727 const SourceManager &SM, 728 const LangOptions &LangOpts) { 729 // Figure out how many physical characters away the specified expansion 730 // character is. This needs to take into consideration newlines and 731 // trigraphs. 732 bool Invalid = false; 733 const char *TokPtr = SM.getCharacterData(TokStart, &Invalid); 734 735 // If they request the first char of the token, we're trivially done. 736 if (Invalid || (CharNo == 0 && Lexer::isObviouslySimpleCharacter(*TokPtr))) 737 return 0; 738 739 unsigned PhysOffset = 0; 740 741 // The usual case is that tokens don't contain anything interesting. Skip 742 // over the uninteresting characters. If a token only consists of simple 743 // chars, this method is extremely fast. 744 while (Lexer::isObviouslySimpleCharacter(*TokPtr)) { 745 if (CharNo == 0) 746 return PhysOffset; 747 ++TokPtr; 748 --CharNo; 749 ++PhysOffset; 750 } 751 752 // If we have a character that may be a trigraph or escaped newline, use a 753 // lexer to parse it correctly. 754 for (; CharNo; --CharNo) { 755 unsigned Size; 756 Lexer::getCharAndSizeNoWarn(TokPtr, Size, LangOpts); 757 TokPtr += Size; 758 PhysOffset += Size; 759 } 760 761 // Final detail: if we end up on an escaped newline, we want to return the 762 // location of the actual byte of the token. For example foo\<newline>bar 763 // advanced by 3 should return the location of b, not of \\. One compounding 764 // detail of this is that the escape may be made by a trigraph. 765 if (!Lexer::isObviouslySimpleCharacter(*TokPtr)) 766 PhysOffset += Lexer::SkipEscapedNewLines(TokPtr)-TokPtr; 767 768 return PhysOffset; 769 } 770 771 /// Computes the source location just past the end of the 772 /// token at this source location. 773 /// 774 /// This routine can be used to produce a source location that 775 /// points just past the end of the token referenced by \p Loc, and 776 /// is generally used when a diagnostic needs to point just after a 777 /// token where it expected something different that it received. If 778 /// the returned source location would not be meaningful (e.g., if 779 /// it points into a macro), this routine returns an invalid 780 /// source location. 781 /// 782 /// \param Offset an offset from the end of the token, where the source 783 /// location should refer to. The default offset (0) produces a source 784 /// location pointing just past the end of the token; an offset of 1 produces 785 /// a source location pointing to the last character in the token, etc. 786 SourceLocation Lexer::getLocForEndOfToken(SourceLocation Loc, unsigned Offset, 787 const SourceManager &SM, 788 const LangOptions &LangOpts) { 789 if (Loc.isInvalid()) 790 return {}; 791 792 if (Loc.isMacroID()) { 793 if (Offset > 0 || !isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc)) 794 return {}; // Points inside the macro expansion. 795 } 796 797 unsigned Len = Lexer::MeasureTokenLength(Loc, SM, LangOpts); 798 if (Len > Offset) 799 Len = Len - Offset; 800 else 801 return Loc; 802 803 return Loc.getLocWithOffset(Len); 804 } 805 806 /// Returns true if the given MacroID location points at the first 807 /// token of the macro expansion. 808 bool Lexer::isAtStartOfMacroExpansion(SourceLocation loc, 809 const SourceManager &SM, 810 const LangOptions &LangOpts, 811 SourceLocation *MacroBegin) { 812 assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc"); 813 814 SourceLocation expansionLoc; 815 if (!SM.isAtStartOfImmediateMacroExpansion(loc, &expansionLoc)) 816 return false; 817 818 if (expansionLoc.isFileID()) { 819 // No other macro expansions, this is the first. 820 if (MacroBegin) 821 *MacroBegin = expansionLoc; 822 return true; 823 } 824 825 return isAtStartOfMacroExpansion(expansionLoc, SM, LangOpts, MacroBegin); 826 } 827 828 /// Returns true if the given MacroID location points at the last 829 /// token of the macro expansion. 830 bool Lexer::isAtEndOfMacroExpansion(SourceLocation loc, 831 const SourceManager &SM, 832 const LangOptions &LangOpts, 833 SourceLocation *MacroEnd) { 834 assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc"); 835 836 SourceLocation spellLoc = SM.getSpellingLoc(loc); 837 unsigned tokLen = MeasureTokenLength(spellLoc, SM, LangOpts); 838 if (tokLen == 0) 839 return false; 840 841 SourceLocation afterLoc = loc.getLocWithOffset(tokLen); 842 SourceLocation expansionLoc; 843 if (!SM.isAtEndOfImmediateMacroExpansion(afterLoc, &expansionLoc)) 844 return false; 845 846 if (expansionLoc.isFileID()) { 847 // No other macro expansions. 848 if (MacroEnd) 849 *MacroEnd = expansionLoc; 850 return true; 851 } 852 853 return isAtEndOfMacroExpansion(expansionLoc, SM, LangOpts, MacroEnd); 854 } 855 856 static CharSourceRange makeRangeFromFileLocs(CharSourceRange Range, 857 const SourceManager &SM, 858 const LangOptions &LangOpts) { 859 SourceLocation Begin = Range.getBegin(); 860 SourceLocation End = Range.getEnd(); 861 assert(Begin.isFileID() && End.isFileID()); 862 if (Range.isTokenRange()) { 863 End = Lexer::getLocForEndOfToken(End, 0, SM,LangOpts); 864 if (End.isInvalid()) 865 return {}; 866 } 867 868 // Break down the source locations. 869 FileID FID; 870 unsigned BeginOffs; 871 std::tie(FID, BeginOffs) = SM.getDecomposedLoc(Begin); 872 if (FID.isInvalid()) 873 return {}; 874 875 unsigned EndOffs; 876 if (!SM.isInFileID(End, FID, &EndOffs) || 877 BeginOffs > EndOffs) 878 return {}; 879 880 return CharSourceRange::getCharRange(Begin, End); 881 } 882 883 // Assumes that `Loc` is in an expansion. 884 static bool isInExpansionTokenRange(const SourceLocation Loc, 885 const SourceManager &SM) { 886 return SM.getSLocEntry(SM.getFileID(Loc)) 887 .getExpansion() 888 .isExpansionTokenRange(); 889 } 890 891 CharSourceRange Lexer::makeFileCharRange(CharSourceRange Range, 892 const SourceManager &SM, 893 const LangOptions &LangOpts) { 894 SourceLocation Begin = Range.getBegin(); 895 SourceLocation End = Range.getEnd(); 896 if (Begin.isInvalid() || End.isInvalid()) 897 return {}; 898 899 if (Begin.isFileID() && End.isFileID()) 900 return makeRangeFromFileLocs(Range, SM, LangOpts); 901 902 if (Begin.isMacroID() && End.isFileID()) { 903 if (!isAtStartOfMacroExpansion(Begin, SM, LangOpts, &Begin)) 904 return {}; 905 Range.setBegin(Begin); 906 return makeRangeFromFileLocs(Range, SM, LangOpts); 907 } 908 909 if (Begin.isFileID() && End.isMacroID()) { 910 if (Range.isTokenRange()) { 911 if (!isAtEndOfMacroExpansion(End, SM, LangOpts, &End)) 912 return {}; 913 // Use the *original* end, not the expanded one in `End`. 914 Range.setTokenRange(isInExpansionTokenRange(Range.getEnd(), SM)); 915 } else if (!isAtStartOfMacroExpansion(End, SM, LangOpts, &End)) 916 return {}; 917 Range.setEnd(End); 918 return makeRangeFromFileLocs(Range, SM, LangOpts); 919 } 920 921 assert(Begin.isMacroID() && End.isMacroID()); 922 SourceLocation MacroBegin, MacroEnd; 923 if (isAtStartOfMacroExpansion(Begin, SM, LangOpts, &MacroBegin) && 924 ((Range.isTokenRange() && isAtEndOfMacroExpansion(End, SM, LangOpts, 925 &MacroEnd)) || 926 (Range.isCharRange() && isAtStartOfMacroExpansion(End, SM, LangOpts, 927 &MacroEnd)))) { 928 Range.setBegin(MacroBegin); 929 Range.setEnd(MacroEnd); 930 // Use the *original* `End`, not the expanded one in `MacroEnd`. 931 if (Range.isTokenRange()) 932 Range.setTokenRange(isInExpansionTokenRange(End, SM)); 933 return makeRangeFromFileLocs(Range, SM, LangOpts); 934 } 935 936 bool Invalid = false; 937 const SrcMgr::SLocEntry &BeginEntry = SM.getSLocEntry(SM.getFileID(Begin), 938 &Invalid); 939 if (Invalid) 940 return {}; 941 942 if (BeginEntry.getExpansion().isMacroArgExpansion()) { 943 const SrcMgr::SLocEntry &EndEntry = SM.getSLocEntry(SM.getFileID(End), 944 &Invalid); 945 if (Invalid) 946 return {}; 947 948 if (EndEntry.getExpansion().isMacroArgExpansion() && 949 BeginEntry.getExpansion().getExpansionLocStart() == 950 EndEntry.getExpansion().getExpansionLocStart()) { 951 Range.setBegin(SM.getImmediateSpellingLoc(Begin)); 952 Range.setEnd(SM.getImmediateSpellingLoc(End)); 953 return makeFileCharRange(Range, SM, LangOpts); 954 } 955 } 956 957 return {}; 958 } 959 960 StringRef Lexer::getSourceText(CharSourceRange Range, 961 const SourceManager &SM, 962 const LangOptions &LangOpts, 963 bool *Invalid) { 964 Range = makeFileCharRange(Range, SM, LangOpts); 965 if (Range.isInvalid()) { 966 if (Invalid) *Invalid = true; 967 return {}; 968 } 969 970 // Break down the source location. 971 std::pair<FileID, unsigned> beginInfo = SM.getDecomposedLoc(Range.getBegin()); 972 if (beginInfo.first.isInvalid()) { 973 if (Invalid) *Invalid = true; 974 return {}; 975 } 976 977 unsigned EndOffs; 978 if (!SM.isInFileID(Range.getEnd(), beginInfo.first, &EndOffs) || 979 beginInfo.second > EndOffs) { 980 if (Invalid) *Invalid = true; 981 return {}; 982 } 983 984 // Try to the load the file buffer. 985 bool invalidTemp = false; 986 StringRef file = SM.getBufferData(beginInfo.first, &invalidTemp); 987 if (invalidTemp) { 988 if (Invalid) *Invalid = true; 989 return {}; 990 } 991 992 if (Invalid) *Invalid = false; 993 return file.substr(beginInfo.second, EndOffs - beginInfo.second); 994 } 995 996 StringRef Lexer::getImmediateMacroName(SourceLocation Loc, 997 const SourceManager &SM, 998 const LangOptions &LangOpts) { 999 assert(Loc.isMacroID() && "Only reasonable to call this on macros"); 1000 1001 // Find the location of the immediate macro expansion. 1002 while (true) { 1003 FileID FID = SM.getFileID(Loc); 1004 const SrcMgr::SLocEntry *E = &SM.getSLocEntry(FID); 1005 const SrcMgr::ExpansionInfo &Expansion = E->getExpansion(); 1006 Loc = Expansion.getExpansionLocStart(); 1007 if (!Expansion.isMacroArgExpansion()) 1008 break; 1009 1010 // For macro arguments we need to check that the argument did not come 1011 // from an inner macro, e.g: "MAC1( MAC2(foo) )" 1012 1013 // Loc points to the argument id of the macro definition, move to the 1014 // macro expansion. 1015 Loc = SM.getImmediateExpansionRange(Loc).getBegin(); 1016 SourceLocation SpellLoc = Expansion.getSpellingLoc(); 1017 if (SpellLoc.isFileID()) 1018 break; // No inner macro. 1019 1020 // If spelling location resides in the same FileID as macro expansion 1021 // location, it means there is no inner macro. 1022 FileID MacroFID = SM.getFileID(Loc); 1023 if (SM.isInFileID(SpellLoc, MacroFID)) 1024 break; 1025 1026 // Argument came from inner macro. 1027 Loc = SpellLoc; 1028 } 1029 1030 // Find the spelling location of the start of the non-argument expansion 1031 // range. This is where the macro name was spelled in order to begin 1032 // expanding this macro. 1033 Loc = SM.getSpellingLoc(Loc); 1034 1035 // Dig out the buffer where the macro name was spelled and the extents of the 1036 // name so that we can render it into the expansion note. 1037 std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc); 1038 unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts); 1039 StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first); 1040 return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength); 1041 } 1042 1043 StringRef Lexer::getImmediateMacroNameForDiagnostics( 1044 SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts) { 1045 assert(Loc.isMacroID() && "Only reasonable to call this on macros"); 1046 // Walk past macro argument expansions. 1047 while (SM.isMacroArgExpansion(Loc)) 1048 Loc = SM.getImmediateExpansionRange(Loc).getBegin(); 1049 1050 // If the macro's spelling isn't FileID or from scratch space, then it's 1051 // actually a token paste or stringization (or similar) and not a macro at 1052 // all. 1053 SourceLocation SpellLoc = SM.getSpellingLoc(Loc); 1054 if (!SpellLoc.isFileID() || SM.isWrittenInScratchSpace(SpellLoc)) 1055 return {}; 1056 1057 // Find the spelling location of the start of the non-argument expansion 1058 // range. This is where the macro name was spelled in order to begin 1059 // expanding this macro. 1060 Loc = SM.getSpellingLoc(SM.getImmediateExpansionRange(Loc).getBegin()); 1061 1062 // Dig out the buffer where the macro name was spelled and the extents of the 1063 // name so that we can render it into the expansion note. 1064 std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc); 1065 unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts); 1066 StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first); 1067 return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength); 1068 } 1069 1070 bool Lexer::isAsciiIdentifierContinueChar(char c, const LangOptions &LangOpts) { 1071 return isAsciiIdentifierContinue(c, LangOpts.DollarIdents); 1072 } 1073 1074 bool Lexer::isNewLineEscaped(const char *BufferStart, const char *Str) { 1075 assert(isVerticalWhitespace(Str[0])); 1076 if (Str - 1 < BufferStart) 1077 return false; 1078 1079 if ((Str[0] == '\n' && Str[-1] == '\r') || 1080 (Str[0] == '\r' && Str[-1] == '\n')) { 1081 if (Str - 2 < BufferStart) 1082 return false; 1083 --Str; 1084 } 1085 --Str; 1086 1087 // Rewind to first non-space character: 1088 while (Str > BufferStart && isHorizontalWhitespace(*Str)) 1089 --Str; 1090 1091 return *Str == '\\'; 1092 } 1093 1094 StringRef Lexer::getIndentationForLine(SourceLocation Loc, 1095 const SourceManager &SM) { 1096 if (Loc.isInvalid() || Loc.isMacroID()) 1097 return {}; 1098 std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc); 1099 if (LocInfo.first.isInvalid()) 1100 return {}; 1101 bool Invalid = false; 1102 StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid); 1103 if (Invalid) 1104 return {}; 1105 const char *Line = findBeginningOfLine(Buffer, LocInfo.second); 1106 if (!Line) 1107 return {}; 1108 StringRef Rest = Buffer.substr(Line - Buffer.data()); 1109 size_t NumWhitespaceChars = Rest.find_first_not_of(" \t"); 1110 return NumWhitespaceChars == StringRef::npos 1111 ? "" 1112 : Rest.take_front(NumWhitespaceChars); 1113 } 1114 1115 //===----------------------------------------------------------------------===// 1116 // Diagnostics forwarding code. 1117 //===----------------------------------------------------------------------===// 1118 1119 /// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the 1120 /// lexer buffer was all expanded at a single point, perform the mapping. 1121 /// This is currently only used for _Pragma implementation, so it is the slow 1122 /// path of the hot getSourceLocation method. Do not allow it to be inlined. 1123 static LLVM_ATTRIBUTE_NOINLINE SourceLocation GetMappedTokenLoc( 1124 Preprocessor &PP, SourceLocation FileLoc, unsigned CharNo, unsigned TokLen); 1125 static SourceLocation GetMappedTokenLoc(Preprocessor &PP, 1126 SourceLocation FileLoc, 1127 unsigned CharNo, unsigned TokLen) { 1128 assert(FileLoc.isMacroID() && "Must be a macro expansion"); 1129 1130 // Otherwise, we're lexing "mapped tokens". This is used for things like 1131 // _Pragma handling. Combine the expansion location of FileLoc with the 1132 // spelling location. 1133 SourceManager &SM = PP.getSourceManager(); 1134 1135 // Create a new SLoc which is expanded from Expansion(FileLoc) but whose 1136 // characters come from spelling(FileLoc)+Offset. 1137 SourceLocation SpellingLoc = SM.getSpellingLoc(FileLoc); 1138 SpellingLoc = SpellingLoc.getLocWithOffset(CharNo); 1139 1140 // Figure out the expansion loc range, which is the range covered by the 1141 // original _Pragma(...) sequence. 1142 CharSourceRange II = SM.getImmediateExpansionRange(FileLoc); 1143 1144 return SM.createExpansionLoc(SpellingLoc, II.getBegin(), II.getEnd(), TokLen); 1145 } 1146 1147 /// getSourceLocation - Return a source location identifier for the specified 1148 /// offset in the current file. 1149 SourceLocation Lexer::getSourceLocation(const char *Loc, 1150 unsigned TokLen) const { 1151 assert(Loc >= BufferStart && Loc <= BufferEnd && 1152 "Location out of range for this buffer!"); 1153 1154 // In the normal case, we're just lexing from a simple file buffer, return 1155 // the file id from FileLoc with the offset specified. 1156 unsigned CharNo = Loc-BufferStart; 1157 if (FileLoc.isFileID()) 1158 return FileLoc.getLocWithOffset(CharNo); 1159 1160 // Otherwise, this is the _Pragma lexer case, which pretends that all of the 1161 // tokens are lexed from where the _Pragma was defined. 1162 assert(PP && "This doesn't work on raw lexers"); 1163 return GetMappedTokenLoc(*PP, FileLoc, CharNo, TokLen); 1164 } 1165 1166 /// Diag - Forwarding function for diagnostics. This translate a source 1167 /// position in the current buffer into a SourceLocation object for rendering. 1168 DiagnosticBuilder Lexer::Diag(const char *Loc, unsigned DiagID) const { 1169 return PP->Diag(getSourceLocation(Loc), DiagID); 1170 } 1171 1172 //===----------------------------------------------------------------------===// 1173 // Trigraph and Escaped Newline Handling Code. 1174 //===----------------------------------------------------------------------===// 1175 1176 /// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair, 1177 /// return the decoded trigraph letter it corresponds to, or '\0' if nothing. 1178 static char GetTrigraphCharForLetter(char Letter) { 1179 switch (Letter) { 1180 default: return 0; 1181 case '=': return '#'; 1182 case ')': return ']'; 1183 case '(': return '['; 1184 case '!': return '|'; 1185 case '\'': return '^'; 1186 case '>': return '}'; 1187 case '/': return '\\'; 1188 case '<': return '{'; 1189 case '-': return '~'; 1190 } 1191 } 1192 1193 /// DecodeTrigraphChar - If the specified character is a legal trigraph when 1194 /// prefixed with ??, emit a trigraph warning. If trigraphs are enabled, 1195 /// return the result character. Finally, emit a warning about trigraph use 1196 /// whether trigraphs are enabled or not. 1197 static char DecodeTrigraphChar(const char *CP, Lexer *L, bool Trigraphs) { 1198 char Res = GetTrigraphCharForLetter(*CP); 1199 if (!Res) 1200 return Res; 1201 1202 if (!Trigraphs) { 1203 if (L && !L->isLexingRawMode()) 1204 L->Diag(CP-2, diag::trigraph_ignored); 1205 return 0; 1206 } 1207 1208 if (L && !L->isLexingRawMode()) 1209 L->Diag(CP-2, diag::trigraph_converted) << StringRef(&Res, 1); 1210 return Res; 1211 } 1212 1213 /// getEscapedNewLineSize - Return the size of the specified escaped newline, 1214 /// or 0 if it is not an escaped newline. P[-1] is known to be a "\" or a 1215 /// trigraph equivalent on entry to this function. 1216 unsigned Lexer::getEscapedNewLineSize(const char *Ptr) { 1217 unsigned Size = 0; 1218 while (isWhitespace(Ptr[Size])) { 1219 ++Size; 1220 1221 if (Ptr[Size-1] != '\n' && Ptr[Size-1] != '\r') 1222 continue; 1223 1224 // If this is a \r\n or \n\r, skip the other half. 1225 if ((Ptr[Size] == '\r' || Ptr[Size] == '\n') && 1226 Ptr[Size-1] != Ptr[Size]) 1227 ++Size; 1228 1229 return Size; 1230 } 1231 1232 // Not an escaped newline, must be a \t or something else. 1233 return 0; 1234 } 1235 1236 /// SkipEscapedNewLines - If P points to an escaped newline (or a series of 1237 /// them), skip over them and return the first non-escaped-newline found, 1238 /// otherwise return P. 1239 const char *Lexer::SkipEscapedNewLines(const char *P) { 1240 while (true) { 1241 const char *AfterEscape; 1242 if (*P == '\\') { 1243 AfterEscape = P+1; 1244 } else if (*P == '?') { 1245 // If not a trigraph for escape, bail out. 1246 if (P[1] != '?' || P[2] != '/') 1247 return P; 1248 // FIXME: Take LangOpts into account; the language might not 1249 // support trigraphs. 1250 AfterEscape = P+3; 1251 } else { 1252 return P; 1253 } 1254 1255 unsigned NewLineSize = Lexer::getEscapedNewLineSize(AfterEscape); 1256 if (NewLineSize == 0) return P; 1257 P = AfterEscape+NewLineSize; 1258 } 1259 } 1260 1261 std::optional<Token> Lexer::findNextToken(SourceLocation Loc, 1262 const SourceManager &SM, 1263 const LangOptions &LangOpts) { 1264 if (Loc.isMacroID()) { 1265 if (!Lexer::isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc)) 1266 return std::nullopt; 1267 } 1268 Loc = Lexer::getLocForEndOfToken(Loc, 0, SM, LangOpts); 1269 1270 // Break down the source location. 1271 std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc); 1272 1273 // Try to load the file buffer. 1274 bool InvalidTemp = false; 1275 StringRef File = SM.getBufferData(LocInfo.first, &InvalidTemp); 1276 if (InvalidTemp) 1277 return std::nullopt; 1278 1279 const char *TokenBegin = File.data() + LocInfo.second; 1280 1281 // Lex from the start of the given location. 1282 Lexer lexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts, File.begin(), 1283 TokenBegin, File.end()); 1284 // Find the token. 1285 Token Tok; 1286 lexer.LexFromRawLexer(Tok); 1287 return Tok; 1288 } 1289 1290 /// Checks that the given token is the first token that occurs after the 1291 /// given location (this excludes comments and whitespace). Returns the location 1292 /// immediately after the specified token. If the token is not found or the 1293 /// location is inside a macro, the returned source location will be invalid. 1294 SourceLocation Lexer::findLocationAfterToken( 1295 SourceLocation Loc, tok::TokenKind TKind, const SourceManager &SM, 1296 const LangOptions &LangOpts, bool SkipTrailingWhitespaceAndNewLine) { 1297 std::optional<Token> Tok = findNextToken(Loc, SM, LangOpts); 1298 if (!Tok || Tok->isNot(TKind)) 1299 return {}; 1300 SourceLocation TokenLoc = Tok->getLocation(); 1301 1302 // Calculate how much whitespace needs to be skipped if any. 1303 unsigned NumWhitespaceChars = 0; 1304 if (SkipTrailingWhitespaceAndNewLine) { 1305 const char *TokenEnd = SM.getCharacterData(TokenLoc) + Tok->getLength(); 1306 unsigned char C = *TokenEnd; 1307 while (isHorizontalWhitespace(C)) { 1308 C = *(++TokenEnd); 1309 NumWhitespaceChars++; 1310 } 1311 1312 // Skip \r, \n, \r\n, or \n\r 1313 if (C == '\n' || C == '\r') { 1314 char PrevC = C; 1315 C = *(++TokenEnd); 1316 NumWhitespaceChars++; 1317 if ((C == '\n' || C == '\r') && C != PrevC) 1318 NumWhitespaceChars++; 1319 } 1320 } 1321 1322 return TokenLoc.getLocWithOffset(Tok->getLength() + NumWhitespaceChars); 1323 } 1324 1325 /// getCharAndSizeSlow - Peek a single 'character' from the specified buffer, 1326 /// get its size, and return it. This is tricky in several cases: 1327 /// 1. If currently at the start of a trigraph, we warn about the trigraph, 1328 /// then either return the trigraph (skipping 3 chars) or the '?', 1329 /// depending on whether trigraphs are enabled or not. 1330 /// 2. If this is an escaped newline (potentially with whitespace between 1331 /// the backslash and newline), implicitly skip the newline and return 1332 /// the char after it. 1333 /// 1334 /// This handles the slow/uncommon case of the getCharAndSize method. Here we 1335 /// know that we can accumulate into Size, and that we have already incremented 1336 /// Ptr by Size bytes. 1337 /// 1338 /// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should 1339 /// be updated to match. 1340 char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size, 1341 Token *Tok) { 1342 // If we have a slash, look for an escaped newline. 1343 if (Ptr[0] == '\\') { 1344 ++Size; 1345 ++Ptr; 1346 Slash: 1347 // Common case, backslash-char where the char is not whitespace. 1348 if (!isWhitespace(Ptr[0])) return '\\'; 1349 1350 // See if we have optional whitespace characters between the slash and 1351 // newline. 1352 if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) { 1353 // Remember that this token needs to be cleaned. 1354 if (Tok) Tok->setFlag(Token::NeedsCleaning); 1355 1356 // Warn if there was whitespace between the backslash and newline. 1357 if (Ptr[0] != '\n' && Ptr[0] != '\r' && Tok && !isLexingRawMode()) 1358 Diag(Ptr, diag::backslash_newline_space); 1359 1360 // Found backslash<whitespace><newline>. Parse the char after it. 1361 Size += EscapedNewLineSize; 1362 Ptr += EscapedNewLineSize; 1363 1364 // Use slow version to accumulate a correct size field. 1365 return getCharAndSizeSlow(Ptr, Size, Tok); 1366 } 1367 1368 // Otherwise, this is not an escaped newline, just return the slash. 1369 return '\\'; 1370 } 1371 1372 // If this is a trigraph, process it. 1373 if (Ptr[0] == '?' && Ptr[1] == '?') { 1374 // If this is actually a legal trigraph (not something like "??x"), emit 1375 // a trigraph warning. If so, and if trigraphs are enabled, return it. 1376 if (char C = DecodeTrigraphChar(Ptr + 2, Tok ? this : nullptr, 1377 LangOpts.Trigraphs)) { 1378 // Remember that this token needs to be cleaned. 1379 if (Tok) Tok->setFlag(Token::NeedsCleaning); 1380 1381 Ptr += 3; 1382 Size += 3; 1383 if (C == '\\') goto Slash; 1384 return C; 1385 } 1386 } 1387 1388 // If this is neither, return a single character. 1389 ++Size; 1390 return *Ptr; 1391 } 1392 1393 /// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the 1394 /// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size, 1395 /// and that we have already incremented Ptr by Size bytes. 1396 /// 1397 /// NOTE: When this method is updated, getCharAndSizeSlow (above) should 1398 /// be updated to match. 1399 char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size, 1400 const LangOptions &LangOpts) { 1401 // If we have a slash, look for an escaped newline. 1402 if (Ptr[0] == '\\') { 1403 ++Size; 1404 ++Ptr; 1405 Slash: 1406 // Common case, backslash-char where the char is not whitespace. 1407 if (!isWhitespace(Ptr[0])) return '\\'; 1408 1409 // See if we have optional whitespace characters followed by a newline. 1410 if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) { 1411 // Found backslash<whitespace><newline>. Parse the char after it. 1412 Size += EscapedNewLineSize; 1413 Ptr += EscapedNewLineSize; 1414 1415 // Use slow version to accumulate a correct size field. 1416 return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts); 1417 } 1418 1419 // Otherwise, this is not an escaped newline, just return the slash. 1420 return '\\'; 1421 } 1422 1423 // If this is a trigraph, process it. 1424 if (LangOpts.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') { 1425 // If this is actually a legal trigraph (not something like "??x"), return 1426 // it. 1427 if (char C = GetTrigraphCharForLetter(Ptr[2])) { 1428 Ptr += 3; 1429 Size += 3; 1430 if (C == '\\') goto Slash; 1431 return C; 1432 } 1433 } 1434 1435 // If this is neither, return a single character. 1436 ++Size; 1437 return *Ptr; 1438 } 1439 1440 //===----------------------------------------------------------------------===// 1441 // Helper methods for lexing. 1442 //===----------------------------------------------------------------------===// 1443 1444 /// Routine that indiscriminately sets the offset into the source file. 1445 void Lexer::SetByteOffset(unsigned Offset, bool StartOfLine) { 1446 BufferPtr = BufferStart + Offset; 1447 if (BufferPtr > BufferEnd) 1448 BufferPtr = BufferEnd; 1449 // FIXME: What exactly does the StartOfLine bit mean? There are two 1450 // possible meanings for the "start" of the line: the first token on the 1451 // unexpanded line, or the first token on the expanded line. 1452 IsAtStartOfLine = StartOfLine; 1453 IsAtPhysicalStartOfLine = StartOfLine; 1454 } 1455 1456 static bool isUnicodeWhitespace(uint32_t Codepoint) { 1457 static const llvm::sys::UnicodeCharSet UnicodeWhitespaceChars( 1458 UnicodeWhitespaceCharRanges); 1459 return UnicodeWhitespaceChars.contains(Codepoint); 1460 } 1461 1462 static llvm::SmallString<5> codepointAsHexString(uint32_t C) { 1463 llvm::SmallString<5> CharBuf; 1464 llvm::raw_svector_ostream CharOS(CharBuf); 1465 llvm::write_hex(CharOS, C, llvm::HexPrintStyle::Upper, 4); 1466 return CharBuf; 1467 } 1468 1469 // To mitigate https://github.com/llvm/llvm-project/issues/54732, 1470 // we allow "Mathematical Notation Characters" in identifiers. 1471 // This is a proposed profile that extends the XID_Start/XID_continue 1472 // with mathematical symbols, superscipts and subscripts digits 1473 // found in some production software. 1474 // https://www.unicode.org/L2/L2022/22230-math-profile.pdf 1475 static bool isMathematicalExtensionID(uint32_t C, const LangOptions &LangOpts, 1476 bool IsStart, bool &IsExtension) { 1477 static const llvm::sys::UnicodeCharSet MathStartChars( 1478 MathematicalNotationProfileIDStartRanges); 1479 static const llvm::sys::UnicodeCharSet MathContinueChars( 1480 MathematicalNotationProfileIDContinueRanges); 1481 if (MathStartChars.contains(C) || 1482 (!IsStart && MathContinueChars.contains(C))) { 1483 IsExtension = true; 1484 return true; 1485 } 1486 return false; 1487 } 1488 1489 static bool isAllowedIDChar(uint32_t C, const LangOptions &LangOpts, 1490 bool &IsExtension) { 1491 if (LangOpts.AsmPreprocessor) { 1492 return false; 1493 } else if (LangOpts.DollarIdents && '$' == C) { 1494 return true; 1495 } else if (LangOpts.CPlusPlus || LangOpts.C2x) { 1496 // A non-leading codepoint must have the XID_Continue property. 1497 // XIDContinueRanges doesn't contains characters also in XIDStartRanges, 1498 // so we need to check both tables. 1499 // '_' doesn't have the XID_Continue property but is allowed in C and C++. 1500 static const llvm::sys::UnicodeCharSet XIDStartChars(XIDStartRanges); 1501 static const llvm::sys::UnicodeCharSet XIDContinueChars(XIDContinueRanges); 1502 if (C == '_' || XIDStartChars.contains(C) || XIDContinueChars.contains(C)) 1503 return true; 1504 return isMathematicalExtensionID(C, LangOpts, /*IsStart=*/false, 1505 IsExtension); 1506 } else if (LangOpts.C11) { 1507 static const llvm::sys::UnicodeCharSet C11AllowedIDChars( 1508 C11AllowedIDCharRanges); 1509 return C11AllowedIDChars.contains(C); 1510 } else { 1511 static const llvm::sys::UnicodeCharSet C99AllowedIDChars( 1512 C99AllowedIDCharRanges); 1513 return C99AllowedIDChars.contains(C); 1514 } 1515 } 1516 1517 static bool isAllowedInitiallyIDChar(uint32_t C, const LangOptions &LangOpts, 1518 bool &IsExtension) { 1519 assert(C > 0x7F && "isAllowedInitiallyIDChar called with an ASCII codepoint"); 1520 IsExtension = false; 1521 if (LangOpts.AsmPreprocessor) { 1522 return false; 1523 } 1524 if (LangOpts.CPlusPlus || LangOpts.C2x) { 1525 static const llvm::sys::UnicodeCharSet XIDStartChars(XIDStartRanges); 1526 if (XIDStartChars.contains(C)) 1527 return true; 1528 return isMathematicalExtensionID(C, LangOpts, /*IsStart=*/true, 1529 IsExtension); 1530 } 1531 if (!isAllowedIDChar(C, LangOpts, IsExtension)) 1532 return false; 1533 if (LangOpts.C11) { 1534 static const llvm::sys::UnicodeCharSet C11DisallowedInitialIDChars( 1535 C11DisallowedInitialIDCharRanges); 1536 return !C11DisallowedInitialIDChars.contains(C); 1537 } 1538 static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars( 1539 C99DisallowedInitialIDCharRanges); 1540 return !C99DisallowedInitialIDChars.contains(C); 1541 } 1542 1543 static void diagnoseExtensionInIdentifier(DiagnosticsEngine &Diags, uint32_t C, 1544 CharSourceRange Range) { 1545 1546 static const llvm::sys::UnicodeCharSet MathStartChars( 1547 MathematicalNotationProfileIDStartRanges); 1548 static const llvm::sys::UnicodeCharSet MathContinueChars( 1549 MathematicalNotationProfileIDContinueRanges); 1550 1551 (void)MathStartChars; 1552 (void)MathContinueChars; 1553 assert((MathStartChars.contains(C) || MathContinueChars.contains(C)) && 1554 "Unexpected mathematical notation codepoint"); 1555 Diags.Report(Range.getBegin(), diag::ext_mathematical_notation) 1556 << codepointAsHexString(C) << Range; 1557 } 1558 1559 static inline CharSourceRange makeCharRange(Lexer &L, const char *Begin, 1560 const char *End) { 1561 return CharSourceRange::getCharRange(L.getSourceLocation(Begin), 1562 L.getSourceLocation(End)); 1563 } 1564 1565 static void maybeDiagnoseIDCharCompat(DiagnosticsEngine &Diags, uint32_t C, 1566 CharSourceRange Range, bool IsFirst) { 1567 // Check C99 compatibility. 1568 if (!Diags.isIgnored(diag::warn_c99_compat_unicode_id, Range.getBegin())) { 1569 enum { 1570 CannotAppearInIdentifier = 0, 1571 CannotStartIdentifier 1572 }; 1573 1574 static const llvm::sys::UnicodeCharSet C99AllowedIDChars( 1575 C99AllowedIDCharRanges); 1576 static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars( 1577 C99DisallowedInitialIDCharRanges); 1578 if (!C99AllowedIDChars.contains(C)) { 1579 Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id) 1580 << Range 1581 << CannotAppearInIdentifier; 1582 } else if (IsFirst && C99DisallowedInitialIDChars.contains(C)) { 1583 Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id) 1584 << Range 1585 << CannotStartIdentifier; 1586 } 1587 } 1588 } 1589 1590 /// After encountering UTF-8 character C and interpreting it as an identifier 1591 /// character, check whether it's a homoglyph for a common non-identifier 1592 /// source character that is unlikely to be an intentional identifier 1593 /// character and warn if so. 1594 static void maybeDiagnoseUTF8Homoglyph(DiagnosticsEngine &Diags, uint32_t C, 1595 CharSourceRange Range) { 1596 // FIXME: Handle Unicode quotation marks (smart quotes, fullwidth quotes). 1597 struct HomoglyphPair { 1598 uint32_t Character; 1599 char LooksLike; 1600 bool operator<(HomoglyphPair R) const { return Character < R.Character; } 1601 }; 1602 static constexpr HomoglyphPair SortedHomoglyphs[] = { 1603 {U'\u00ad', 0}, // SOFT HYPHEN 1604 {U'\u01c3', '!'}, // LATIN LETTER RETROFLEX CLICK 1605 {U'\u037e', ';'}, // GREEK QUESTION MARK 1606 {U'\u200b', 0}, // ZERO WIDTH SPACE 1607 {U'\u200c', 0}, // ZERO WIDTH NON-JOINER 1608 {U'\u200d', 0}, // ZERO WIDTH JOINER 1609 {U'\u2060', 0}, // WORD JOINER 1610 {U'\u2061', 0}, // FUNCTION APPLICATION 1611 {U'\u2062', 0}, // INVISIBLE TIMES 1612 {U'\u2063', 0}, // INVISIBLE SEPARATOR 1613 {U'\u2064', 0}, // INVISIBLE PLUS 1614 {U'\u2212', '-'}, // MINUS SIGN 1615 {U'\u2215', '/'}, // DIVISION SLASH 1616 {U'\u2216', '\\'}, // SET MINUS 1617 {U'\u2217', '*'}, // ASTERISK OPERATOR 1618 {U'\u2223', '|'}, // DIVIDES 1619 {U'\u2227', '^'}, // LOGICAL AND 1620 {U'\u2236', ':'}, // RATIO 1621 {U'\u223c', '~'}, // TILDE OPERATOR 1622 {U'\ua789', ':'}, // MODIFIER LETTER COLON 1623 {U'\ufeff', 0}, // ZERO WIDTH NO-BREAK SPACE 1624 {U'\uff01', '!'}, // FULLWIDTH EXCLAMATION MARK 1625 {U'\uff03', '#'}, // FULLWIDTH NUMBER SIGN 1626 {U'\uff04', '$'}, // FULLWIDTH DOLLAR SIGN 1627 {U'\uff05', '%'}, // FULLWIDTH PERCENT SIGN 1628 {U'\uff06', '&'}, // FULLWIDTH AMPERSAND 1629 {U'\uff08', '('}, // FULLWIDTH LEFT PARENTHESIS 1630 {U'\uff09', ')'}, // FULLWIDTH RIGHT PARENTHESIS 1631 {U'\uff0a', '*'}, // FULLWIDTH ASTERISK 1632 {U'\uff0b', '+'}, // FULLWIDTH ASTERISK 1633 {U'\uff0c', ','}, // FULLWIDTH COMMA 1634 {U'\uff0d', '-'}, // FULLWIDTH HYPHEN-MINUS 1635 {U'\uff0e', '.'}, // FULLWIDTH FULL STOP 1636 {U'\uff0f', '/'}, // FULLWIDTH SOLIDUS 1637 {U'\uff1a', ':'}, // FULLWIDTH COLON 1638 {U'\uff1b', ';'}, // FULLWIDTH SEMICOLON 1639 {U'\uff1c', '<'}, // FULLWIDTH LESS-THAN SIGN 1640 {U'\uff1d', '='}, // FULLWIDTH EQUALS SIGN 1641 {U'\uff1e', '>'}, // FULLWIDTH GREATER-THAN SIGN 1642 {U'\uff1f', '?'}, // FULLWIDTH QUESTION MARK 1643 {U'\uff20', '@'}, // FULLWIDTH COMMERCIAL AT 1644 {U'\uff3b', '['}, // FULLWIDTH LEFT SQUARE BRACKET 1645 {U'\uff3c', '\\'}, // FULLWIDTH REVERSE SOLIDUS 1646 {U'\uff3d', ']'}, // FULLWIDTH RIGHT SQUARE BRACKET 1647 {U'\uff3e', '^'}, // FULLWIDTH CIRCUMFLEX ACCENT 1648 {U'\uff5b', '{'}, // FULLWIDTH LEFT CURLY BRACKET 1649 {U'\uff5c', '|'}, // FULLWIDTH VERTICAL LINE 1650 {U'\uff5d', '}'}, // FULLWIDTH RIGHT CURLY BRACKET 1651 {U'\uff5e', '~'}, // FULLWIDTH TILDE 1652 {0, 0} 1653 }; 1654 auto Homoglyph = 1655 std::lower_bound(std::begin(SortedHomoglyphs), 1656 std::end(SortedHomoglyphs) - 1, HomoglyphPair{C, '\0'}); 1657 if (Homoglyph->Character == C) { 1658 if (Homoglyph->LooksLike) { 1659 const char LooksLikeStr[] = {Homoglyph->LooksLike, 0}; 1660 Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_homoglyph) 1661 << Range << codepointAsHexString(C) << LooksLikeStr; 1662 } else { 1663 Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_zero_width) 1664 << Range << codepointAsHexString(C); 1665 } 1666 } 1667 } 1668 1669 static void diagnoseInvalidUnicodeCodepointInIdentifier( 1670 DiagnosticsEngine &Diags, const LangOptions &LangOpts, uint32_t CodePoint, 1671 CharSourceRange Range, bool IsFirst) { 1672 if (isASCII(CodePoint)) 1673 return; 1674 1675 bool IsExtension; 1676 bool IsIDStart = isAllowedInitiallyIDChar(CodePoint, LangOpts, IsExtension); 1677 bool IsIDContinue = 1678 IsIDStart || isAllowedIDChar(CodePoint, LangOpts, IsExtension); 1679 1680 if ((IsFirst && IsIDStart) || (!IsFirst && IsIDContinue)) 1681 return; 1682 1683 bool InvalidOnlyAtStart = IsFirst && !IsIDStart && IsIDContinue; 1684 1685 if (!IsFirst || InvalidOnlyAtStart) { 1686 Diags.Report(Range.getBegin(), diag::err_character_not_allowed_identifier) 1687 << Range << codepointAsHexString(CodePoint) << int(InvalidOnlyAtStart) 1688 << FixItHint::CreateRemoval(Range); 1689 } else { 1690 Diags.Report(Range.getBegin(), diag::err_character_not_allowed) 1691 << Range << codepointAsHexString(CodePoint) 1692 << FixItHint::CreateRemoval(Range); 1693 } 1694 } 1695 1696 bool Lexer::tryConsumeIdentifierUCN(const char *&CurPtr, unsigned Size, 1697 Token &Result) { 1698 const char *UCNPtr = CurPtr + Size; 1699 uint32_t CodePoint = tryReadUCN(UCNPtr, CurPtr, /*Token=*/nullptr); 1700 if (CodePoint == 0) { 1701 return false; 1702 } 1703 bool IsExtension = false; 1704 if (!isAllowedIDChar(CodePoint, LangOpts, IsExtension)) { 1705 if (isASCII(CodePoint) || isUnicodeWhitespace(CodePoint)) 1706 return false; 1707 if (!isLexingRawMode() && !ParsingPreprocessorDirective && 1708 !PP->isPreprocessedOutput()) 1709 diagnoseInvalidUnicodeCodepointInIdentifier( 1710 PP->getDiagnostics(), LangOpts, CodePoint, 1711 makeCharRange(*this, CurPtr, UCNPtr), 1712 /*IsFirst=*/false); 1713 1714 // We got a unicode codepoint that is neither a space nor a 1715 // a valid identifier part. 1716 // Carry on as if the codepoint was valid for recovery purposes. 1717 } else if (!isLexingRawMode()) { 1718 if (IsExtension) 1719 diagnoseExtensionInIdentifier(PP->getDiagnostics(), CodePoint, 1720 makeCharRange(*this, CurPtr, UCNPtr)); 1721 1722 maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint, 1723 makeCharRange(*this, CurPtr, UCNPtr), 1724 /*IsFirst=*/false); 1725 } 1726 1727 Result.setFlag(Token::HasUCN); 1728 if ((UCNPtr - CurPtr == 6 && CurPtr[1] == 'u') || 1729 (UCNPtr - CurPtr == 10 && CurPtr[1] == 'U')) 1730 CurPtr = UCNPtr; 1731 else 1732 while (CurPtr != UCNPtr) 1733 (void)getAndAdvanceChar(CurPtr, Result); 1734 return true; 1735 } 1736 1737 bool Lexer::tryConsumeIdentifierUTF8Char(const char *&CurPtr) { 1738 const char *UnicodePtr = CurPtr; 1739 llvm::UTF32 CodePoint; 1740 llvm::ConversionResult Result = 1741 llvm::convertUTF8Sequence((const llvm::UTF8 **)&UnicodePtr, 1742 (const llvm::UTF8 *)BufferEnd, 1743 &CodePoint, 1744 llvm::strictConversion); 1745 if (Result != llvm::conversionOK) 1746 return false; 1747 1748 bool IsExtension = false; 1749 if (!isAllowedIDChar(static_cast<uint32_t>(CodePoint), LangOpts, 1750 IsExtension)) { 1751 if (isASCII(CodePoint) || isUnicodeWhitespace(CodePoint)) 1752 return false; 1753 1754 if (!isLexingRawMode() && !ParsingPreprocessorDirective && 1755 !PP->isPreprocessedOutput()) 1756 diagnoseInvalidUnicodeCodepointInIdentifier( 1757 PP->getDiagnostics(), LangOpts, CodePoint, 1758 makeCharRange(*this, CurPtr, UnicodePtr), /*IsFirst=*/false); 1759 // We got a unicode codepoint that is neither a space nor a 1760 // a valid identifier part. Carry on as if the codepoint was 1761 // valid for recovery purposes. 1762 } else if (!isLexingRawMode()) { 1763 if (IsExtension) 1764 diagnoseExtensionInIdentifier(PP->getDiagnostics(), CodePoint, 1765 makeCharRange(*this, CurPtr, UnicodePtr)); 1766 maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint, 1767 makeCharRange(*this, CurPtr, UnicodePtr), 1768 /*IsFirst=*/false); 1769 maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), CodePoint, 1770 makeCharRange(*this, CurPtr, UnicodePtr)); 1771 } 1772 1773 CurPtr = UnicodePtr; 1774 return true; 1775 } 1776 1777 bool Lexer::LexUnicodeIdentifierStart(Token &Result, uint32_t C, 1778 const char *CurPtr) { 1779 bool IsExtension = false; 1780 if (isAllowedInitiallyIDChar(C, LangOpts, IsExtension)) { 1781 if (!isLexingRawMode() && !ParsingPreprocessorDirective && 1782 !PP->isPreprocessedOutput()) { 1783 if (IsExtension) 1784 diagnoseExtensionInIdentifier(PP->getDiagnostics(), C, 1785 makeCharRange(*this, BufferPtr, CurPtr)); 1786 maybeDiagnoseIDCharCompat(PP->getDiagnostics(), C, 1787 makeCharRange(*this, BufferPtr, CurPtr), 1788 /*IsFirst=*/true); 1789 maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), C, 1790 makeCharRange(*this, BufferPtr, CurPtr)); 1791 } 1792 1793 MIOpt.ReadToken(); 1794 return LexIdentifierContinue(Result, CurPtr); 1795 } 1796 1797 if (!isLexingRawMode() && !ParsingPreprocessorDirective && 1798 !PP->isPreprocessedOutput() && !isASCII(*BufferPtr) && 1799 !isUnicodeWhitespace(C)) { 1800 // Non-ASCII characters tend to creep into source code unintentionally. 1801 // Instead of letting the parser complain about the unknown token, 1802 // just drop the character. 1803 // Note that we can /only/ do this when the non-ASCII character is actually 1804 // spelled as Unicode, not written as a UCN. The standard requires that 1805 // we not throw away any possible preprocessor tokens, but there's a 1806 // loophole in the mapping of Unicode characters to basic character set 1807 // characters that allows us to map these particular characters to, say, 1808 // whitespace. 1809 diagnoseInvalidUnicodeCodepointInIdentifier( 1810 PP->getDiagnostics(), LangOpts, C, 1811 makeCharRange(*this, BufferPtr, CurPtr), /*IsStart*/ true); 1812 BufferPtr = CurPtr; 1813 return false; 1814 } 1815 1816 // Otherwise, we have an explicit UCN or a character that's unlikely to show 1817 // up by accident. 1818 MIOpt.ReadToken(); 1819 FormTokenWithChars(Result, CurPtr, tok::unknown); 1820 return true; 1821 } 1822 1823 bool Lexer::LexIdentifierContinue(Token &Result, const char *CurPtr) { 1824 // Match [_A-Za-z0-9]*, we have already matched an identifier start. 1825 while (true) { 1826 unsigned char C = *CurPtr; 1827 // Fast path. 1828 if (isAsciiIdentifierContinue(C)) { 1829 ++CurPtr; 1830 continue; 1831 } 1832 1833 unsigned Size; 1834 // Slow path: handle trigraph, unicode codepoints, UCNs. 1835 C = getCharAndSize(CurPtr, Size); 1836 if (isAsciiIdentifierContinue(C)) { 1837 CurPtr = ConsumeChar(CurPtr, Size, Result); 1838 continue; 1839 } 1840 if (C == '$') { 1841 // If we hit a $ and they are not supported in identifiers, we are done. 1842 if (!LangOpts.DollarIdents) 1843 break; 1844 // Otherwise, emit a diagnostic and continue. 1845 if (!isLexingRawMode()) 1846 Diag(CurPtr, diag::ext_dollar_in_identifier); 1847 CurPtr = ConsumeChar(CurPtr, Size, Result); 1848 continue; 1849 } 1850 if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) 1851 continue; 1852 if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) 1853 continue; 1854 // Neither an expected Unicode codepoint nor a UCN. 1855 break; 1856 } 1857 1858 const char *IdStart = BufferPtr; 1859 FormTokenWithChars(Result, CurPtr, tok::raw_identifier); 1860 Result.setRawIdentifierData(IdStart); 1861 1862 // If we are in raw mode, return this identifier raw. There is no need to 1863 // look up identifier information or attempt to macro expand it. 1864 if (LexingRawMode) 1865 return true; 1866 1867 // Fill in Result.IdentifierInfo and update the token kind, 1868 // looking up the identifier in the identifier table. 1869 IdentifierInfo *II = PP->LookUpIdentifierInfo(Result); 1870 // Note that we have to call PP->LookUpIdentifierInfo() even for code 1871 // completion, it writes IdentifierInfo into Result, and callers rely on it. 1872 1873 // If the completion point is at the end of an identifier, we want to treat 1874 // the identifier as incomplete even if it resolves to a macro or a keyword. 1875 // This allows e.g. 'class^' to complete to 'classifier'. 1876 if (isCodeCompletionPoint(CurPtr)) { 1877 // Return the code-completion token. 1878 Result.setKind(tok::code_completion); 1879 // Skip the code-completion char and all immediate identifier characters. 1880 // This ensures we get consistent behavior when completing at any point in 1881 // an identifier (i.e. at the start, in the middle, at the end). Note that 1882 // only simple cases (i.e. [a-zA-Z0-9_]) are supported to keep the code 1883 // simpler. 1884 assert(*CurPtr == 0 && "Completion character must be 0"); 1885 ++CurPtr; 1886 // Note that code completion token is not added as a separate character 1887 // when the completion point is at the end of the buffer. Therefore, we need 1888 // to check if the buffer has ended. 1889 if (CurPtr < BufferEnd) { 1890 while (isAsciiIdentifierContinue(*CurPtr)) 1891 ++CurPtr; 1892 } 1893 BufferPtr = CurPtr; 1894 return true; 1895 } 1896 1897 // Finally, now that we know we have an identifier, pass this off to the 1898 // preprocessor, which may macro expand it or something. 1899 if (II->isHandleIdentifierCase()) 1900 return PP->HandleIdentifier(Result); 1901 1902 return true; 1903 } 1904 1905 /// isHexaLiteral - Return true if Start points to a hex constant. 1906 /// in microsoft mode (where this is supposed to be several different tokens). 1907 bool Lexer::isHexaLiteral(const char *Start, const LangOptions &LangOpts) { 1908 unsigned Size; 1909 char C1 = Lexer::getCharAndSizeNoWarn(Start, Size, LangOpts); 1910 if (C1 != '0') 1911 return false; 1912 char C2 = Lexer::getCharAndSizeNoWarn(Start + Size, Size, LangOpts); 1913 return (C2 == 'x' || C2 == 'X'); 1914 } 1915 1916 /// LexNumericConstant - Lex the remainder of a integer or floating point 1917 /// constant. From[-1] is the first character lexed. Return the end of the 1918 /// constant. 1919 bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) { 1920 unsigned Size; 1921 char C = getCharAndSize(CurPtr, Size); 1922 char PrevCh = 0; 1923 while (isPreprocessingNumberBody(C)) { 1924 CurPtr = ConsumeChar(CurPtr, Size, Result); 1925 PrevCh = C; 1926 C = getCharAndSize(CurPtr, Size); 1927 } 1928 1929 // If we fell out, check for a sign, due to 1e+12. If we have one, continue. 1930 if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) { 1931 // If we are in Microsoft mode, don't continue if the constant is hex. 1932 // For example, MSVC will accept the following as 3 tokens: 0x1234567e+1 1933 if (!LangOpts.MicrosoftExt || !isHexaLiteral(BufferPtr, LangOpts)) 1934 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 1935 } 1936 1937 // If we have a hex FP constant, continue. 1938 if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p')) { 1939 // Outside C99 and C++17, we accept hexadecimal floating point numbers as a 1940 // not-quite-conforming extension. Only do so if this looks like it's 1941 // actually meant to be a hexfloat, and not if it has a ud-suffix. 1942 bool IsHexFloat = true; 1943 if (!LangOpts.C99) { 1944 if (!isHexaLiteral(BufferPtr, LangOpts)) 1945 IsHexFloat = false; 1946 else if (!LangOpts.CPlusPlus17 && 1947 std::find(BufferPtr, CurPtr, '_') != CurPtr) 1948 IsHexFloat = false; 1949 } 1950 if (IsHexFloat) 1951 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 1952 } 1953 1954 // If we have a digit separator, continue. 1955 if (C == '\'' && (LangOpts.CPlusPlus14 || LangOpts.C2x)) { 1956 unsigned NextSize; 1957 char Next = getCharAndSizeNoWarn(CurPtr + Size, NextSize, LangOpts); 1958 if (isAsciiIdentifierContinue(Next)) { 1959 if (!isLexingRawMode()) 1960 Diag(CurPtr, LangOpts.CPlusPlus 1961 ? diag::warn_cxx11_compat_digit_separator 1962 : diag::warn_c2x_compat_digit_separator); 1963 CurPtr = ConsumeChar(CurPtr, Size, Result); 1964 CurPtr = ConsumeChar(CurPtr, NextSize, Result); 1965 return LexNumericConstant(Result, CurPtr); 1966 } 1967 } 1968 1969 // If we have a UCN or UTF-8 character (perhaps in a ud-suffix), continue. 1970 if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) 1971 return LexNumericConstant(Result, CurPtr); 1972 if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) 1973 return LexNumericConstant(Result, CurPtr); 1974 1975 // Update the location of token as well as BufferPtr. 1976 const char *TokStart = BufferPtr; 1977 FormTokenWithChars(Result, CurPtr, tok::numeric_constant); 1978 Result.setLiteralData(TokStart); 1979 return true; 1980 } 1981 1982 /// LexUDSuffix - Lex the ud-suffix production for user-defined literal suffixes 1983 /// in C++11, or warn on a ud-suffix in C++98. 1984 const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr, 1985 bool IsStringLiteral) { 1986 assert(LangOpts.CPlusPlus); 1987 1988 // Maximally munch an identifier. 1989 unsigned Size; 1990 char C = getCharAndSize(CurPtr, Size); 1991 bool Consumed = false; 1992 1993 if (!isAsciiIdentifierStart(C)) { 1994 if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) 1995 Consumed = true; 1996 else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) 1997 Consumed = true; 1998 else 1999 return CurPtr; 2000 } 2001 2002 if (!LangOpts.CPlusPlus11) { 2003 if (!isLexingRawMode()) 2004 Diag(CurPtr, 2005 C == '_' ? diag::warn_cxx11_compat_user_defined_literal 2006 : diag::warn_cxx11_compat_reserved_user_defined_literal) 2007 << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " "); 2008 return CurPtr; 2009 } 2010 2011 // C++11 [lex.ext]p10, [usrlit.suffix]p1: A program containing a ud-suffix 2012 // that does not start with an underscore is ill-formed. As a conforming 2013 // extension, we treat all such suffixes as if they had whitespace before 2014 // them. We assume a suffix beginning with a UCN or UTF-8 character is more 2015 // likely to be a ud-suffix than a macro, however, and accept that. 2016 if (!Consumed) { 2017 bool IsUDSuffix = false; 2018 if (C == '_') 2019 IsUDSuffix = true; 2020 else if (IsStringLiteral && LangOpts.CPlusPlus14) { 2021 // In C++1y, we need to look ahead a few characters to see if this is a 2022 // valid suffix for a string literal or a numeric literal (this could be 2023 // the 'operator""if' defining a numeric literal operator). 2024 const unsigned MaxStandardSuffixLength = 3; 2025 char Buffer[MaxStandardSuffixLength] = { C }; 2026 unsigned Consumed = Size; 2027 unsigned Chars = 1; 2028 while (true) { 2029 unsigned NextSize; 2030 char Next = getCharAndSizeNoWarn(CurPtr + Consumed, NextSize, LangOpts); 2031 if (!isAsciiIdentifierContinue(Next)) { 2032 // End of suffix. Check whether this is on the allowed list. 2033 const StringRef CompleteSuffix(Buffer, Chars); 2034 IsUDSuffix = 2035 StringLiteralParser::isValidUDSuffix(LangOpts, CompleteSuffix); 2036 break; 2037 } 2038 2039 if (Chars == MaxStandardSuffixLength) 2040 // Too long: can't be a standard suffix. 2041 break; 2042 2043 Buffer[Chars++] = Next; 2044 Consumed += NextSize; 2045 } 2046 } 2047 2048 if (!IsUDSuffix) { 2049 if (!isLexingRawMode()) 2050 Diag(CurPtr, LangOpts.MSVCCompat 2051 ? diag::ext_ms_reserved_user_defined_literal 2052 : diag::ext_reserved_user_defined_literal) 2053 << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " "); 2054 return CurPtr; 2055 } 2056 2057 CurPtr = ConsumeChar(CurPtr, Size, Result); 2058 } 2059 2060 Result.setFlag(Token::HasUDSuffix); 2061 while (true) { 2062 C = getCharAndSize(CurPtr, Size); 2063 if (isAsciiIdentifierContinue(C)) { 2064 CurPtr = ConsumeChar(CurPtr, Size, Result); 2065 } else if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) { 2066 } else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) { 2067 } else 2068 break; 2069 } 2070 2071 return CurPtr; 2072 } 2073 2074 /// LexStringLiteral - Lex the remainder of a string literal, after having lexed 2075 /// either " or L" or u8" or u" or U". 2076 bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr, 2077 tok::TokenKind Kind) { 2078 const char *AfterQuote = CurPtr; 2079 // Does this string contain the \0 character? 2080 const char *NulCharacter = nullptr; 2081 2082 if (!isLexingRawMode() && 2083 (Kind == tok::utf8_string_literal || 2084 Kind == tok::utf16_string_literal || 2085 Kind == tok::utf32_string_literal)) 2086 Diag(BufferPtr, LangOpts.CPlusPlus ? diag::warn_cxx98_compat_unicode_literal 2087 : diag::warn_c99_compat_unicode_literal); 2088 2089 char C = getAndAdvanceChar(CurPtr, Result); 2090 while (C != '"') { 2091 // Skip escaped characters. Escaped newlines will already be processed by 2092 // getAndAdvanceChar. 2093 if (C == '\\') 2094 C = getAndAdvanceChar(CurPtr, Result); 2095 2096 if (C == '\n' || C == '\r' || // Newline. 2097 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 2098 if (!isLexingRawMode() && !LangOpts.AsmPreprocessor) 2099 Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 1; 2100 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 2101 return true; 2102 } 2103 2104 if (C == 0) { 2105 if (isCodeCompletionPoint(CurPtr-1)) { 2106 if (ParsingFilename) 2107 codeCompleteIncludedFile(AfterQuote, CurPtr - 1, /*IsAngled=*/false); 2108 else 2109 PP->CodeCompleteNaturalLanguage(); 2110 FormTokenWithChars(Result, CurPtr - 1, tok::unknown); 2111 cutOffLexing(); 2112 return true; 2113 } 2114 2115 NulCharacter = CurPtr-1; 2116 } 2117 C = getAndAdvanceChar(CurPtr, Result); 2118 } 2119 2120 // If we are in C++11, lex the optional ud-suffix. 2121 if (LangOpts.CPlusPlus) 2122 CurPtr = LexUDSuffix(Result, CurPtr, true); 2123 2124 // If a nul character existed in the string, warn about it. 2125 if (NulCharacter && !isLexingRawMode()) 2126 Diag(NulCharacter, diag::null_in_char_or_string) << 1; 2127 2128 // Update the location of the token as well as the BufferPtr instance var. 2129 const char *TokStart = BufferPtr; 2130 FormTokenWithChars(Result, CurPtr, Kind); 2131 Result.setLiteralData(TokStart); 2132 return true; 2133 } 2134 2135 /// LexRawStringLiteral - Lex the remainder of a raw string literal, after 2136 /// having lexed R", LR", u8R", uR", or UR". 2137 bool Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr, 2138 tok::TokenKind Kind) { 2139 // This function doesn't use getAndAdvanceChar because C++0x [lex.pptoken]p3: 2140 // Between the initial and final double quote characters of the raw string, 2141 // any transformations performed in phases 1 and 2 (trigraphs, 2142 // universal-character-names, and line splicing) are reverted. 2143 2144 if (!isLexingRawMode()) 2145 Diag(BufferPtr, diag::warn_cxx98_compat_raw_string_literal); 2146 2147 unsigned PrefixLen = 0; 2148 2149 while (PrefixLen != 16 && isRawStringDelimBody(CurPtr[PrefixLen])) 2150 ++PrefixLen; 2151 2152 // If the last character was not a '(', then we didn't lex a valid delimiter. 2153 if (CurPtr[PrefixLen] != '(') { 2154 if (!isLexingRawMode()) { 2155 const char *PrefixEnd = &CurPtr[PrefixLen]; 2156 if (PrefixLen == 16) { 2157 Diag(PrefixEnd, diag::err_raw_delim_too_long); 2158 } else { 2159 Diag(PrefixEnd, diag::err_invalid_char_raw_delim) 2160 << StringRef(PrefixEnd, 1); 2161 } 2162 } 2163 2164 // Search for the next '"' in hopes of salvaging the lexer. Unfortunately, 2165 // it's possible the '"' was intended to be part of the raw string, but 2166 // there's not much we can do about that. 2167 while (true) { 2168 char C = *CurPtr++; 2169 2170 if (C == '"') 2171 break; 2172 if (C == 0 && CurPtr-1 == BufferEnd) { 2173 --CurPtr; 2174 break; 2175 } 2176 } 2177 2178 FormTokenWithChars(Result, CurPtr, tok::unknown); 2179 return true; 2180 } 2181 2182 // Save prefix and move CurPtr past it 2183 const char *Prefix = CurPtr; 2184 CurPtr += PrefixLen + 1; // skip over prefix and '(' 2185 2186 while (true) { 2187 char C = *CurPtr++; 2188 2189 if (C == ')') { 2190 // Check for prefix match and closing quote. 2191 if (strncmp(CurPtr, Prefix, PrefixLen) == 0 && CurPtr[PrefixLen] == '"') { 2192 CurPtr += PrefixLen + 1; // skip over prefix and '"' 2193 break; 2194 } 2195 } else if (C == 0 && CurPtr-1 == BufferEnd) { // End of file. 2196 if (!isLexingRawMode()) 2197 Diag(BufferPtr, diag::err_unterminated_raw_string) 2198 << StringRef(Prefix, PrefixLen); 2199 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 2200 return true; 2201 } 2202 } 2203 2204 // If we are in C++11, lex the optional ud-suffix. 2205 if (LangOpts.CPlusPlus) 2206 CurPtr = LexUDSuffix(Result, CurPtr, true); 2207 2208 // Update the location of token as well as BufferPtr. 2209 const char *TokStart = BufferPtr; 2210 FormTokenWithChars(Result, CurPtr, Kind); 2211 Result.setLiteralData(TokStart); 2212 return true; 2213 } 2214 2215 /// LexAngledStringLiteral - Lex the remainder of an angled string literal, 2216 /// after having lexed the '<' character. This is used for #include filenames. 2217 bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) { 2218 // Does this string contain the \0 character? 2219 const char *NulCharacter = nullptr; 2220 const char *AfterLessPos = CurPtr; 2221 char C = getAndAdvanceChar(CurPtr, Result); 2222 while (C != '>') { 2223 // Skip escaped characters. Escaped newlines will already be processed by 2224 // getAndAdvanceChar. 2225 if (C == '\\') 2226 C = getAndAdvanceChar(CurPtr, Result); 2227 2228 if (isVerticalWhitespace(C) || // Newline. 2229 (C == 0 && (CurPtr - 1 == BufferEnd))) { // End of file. 2230 // If the filename is unterminated, then it must just be a lone < 2231 // character. Return this as such. 2232 FormTokenWithChars(Result, AfterLessPos, tok::less); 2233 return true; 2234 } 2235 2236 if (C == 0) { 2237 if (isCodeCompletionPoint(CurPtr - 1)) { 2238 codeCompleteIncludedFile(AfterLessPos, CurPtr - 1, /*IsAngled=*/true); 2239 cutOffLexing(); 2240 FormTokenWithChars(Result, CurPtr - 1, tok::unknown); 2241 return true; 2242 } 2243 NulCharacter = CurPtr-1; 2244 } 2245 C = getAndAdvanceChar(CurPtr, Result); 2246 } 2247 2248 // If a nul character existed in the string, warn about it. 2249 if (NulCharacter && !isLexingRawMode()) 2250 Diag(NulCharacter, diag::null_in_char_or_string) << 1; 2251 2252 // Update the location of token as well as BufferPtr. 2253 const char *TokStart = BufferPtr; 2254 FormTokenWithChars(Result, CurPtr, tok::header_name); 2255 Result.setLiteralData(TokStart); 2256 return true; 2257 } 2258 2259 void Lexer::codeCompleteIncludedFile(const char *PathStart, 2260 const char *CompletionPoint, 2261 bool IsAngled) { 2262 // Completion only applies to the filename, after the last slash. 2263 StringRef PartialPath(PathStart, CompletionPoint - PathStart); 2264 llvm::StringRef SlashChars = LangOpts.MSVCCompat ? "/\\" : "/"; 2265 auto Slash = PartialPath.find_last_of(SlashChars); 2266 StringRef Dir = 2267 (Slash == StringRef::npos) ? "" : PartialPath.take_front(Slash); 2268 const char *StartOfFilename = 2269 (Slash == StringRef::npos) ? PathStart : PathStart + Slash + 1; 2270 // Code completion filter range is the filename only, up to completion point. 2271 PP->setCodeCompletionIdentifierInfo(&PP->getIdentifierTable().get( 2272 StringRef(StartOfFilename, CompletionPoint - StartOfFilename))); 2273 // We should replace the characters up to the closing quote or closest slash, 2274 // if any. 2275 while (CompletionPoint < BufferEnd) { 2276 char Next = *(CompletionPoint + 1); 2277 if (Next == 0 || Next == '\r' || Next == '\n') 2278 break; 2279 ++CompletionPoint; 2280 if (Next == (IsAngled ? '>' : '"')) 2281 break; 2282 if (SlashChars.contains(Next)) 2283 break; 2284 } 2285 2286 PP->setCodeCompletionTokenRange( 2287 FileLoc.getLocWithOffset(StartOfFilename - BufferStart), 2288 FileLoc.getLocWithOffset(CompletionPoint - BufferStart)); 2289 PP->CodeCompleteIncludedFile(Dir, IsAngled); 2290 } 2291 2292 /// LexCharConstant - Lex the remainder of a character constant, after having 2293 /// lexed either ' or L' or u8' or u' or U'. 2294 bool Lexer::LexCharConstant(Token &Result, const char *CurPtr, 2295 tok::TokenKind Kind) { 2296 // Does this character contain the \0 character? 2297 const char *NulCharacter = nullptr; 2298 2299 if (!isLexingRawMode()) { 2300 if (Kind == tok::utf16_char_constant || Kind == tok::utf32_char_constant) 2301 Diag(BufferPtr, LangOpts.CPlusPlus 2302 ? diag::warn_cxx98_compat_unicode_literal 2303 : diag::warn_c99_compat_unicode_literal); 2304 else if (Kind == tok::utf8_char_constant) 2305 Diag(BufferPtr, diag::warn_cxx14_compat_u8_character_literal); 2306 } 2307 2308 char C = getAndAdvanceChar(CurPtr, Result); 2309 if (C == '\'') { 2310 if (!isLexingRawMode() && !LangOpts.AsmPreprocessor) 2311 Diag(BufferPtr, diag::ext_empty_character); 2312 FormTokenWithChars(Result, CurPtr, tok::unknown); 2313 return true; 2314 } 2315 2316 while (C != '\'') { 2317 // Skip escaped characters. 2318 if (C == '\\') 2319 C = getAndAdvanceChar(CurPtr, Result); 2320 2321 if (C == '\n' || C == '\r' || // Newline. 2322 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 2323 if (!isLexingRawMode() && !LangOpts.AsmPreprocessor) 2324 Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 0; 2325 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 2326 return true; 2327 } 2328 2329 if (C == 0) { 2330 if (isCodeCompletionPoint(CurPtr-1)) { 2331 PP->CodeCompleteNaturalLanguage(); 2332 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 2333 cutOffLexing(); 2334 return true; 2335 } 2336 2337 NulCharacter = CurPtr-1; 2338 } 2339 C = getAndAdvanceChar(CurPtr, Result); 2340 } 2341 2342 // If we are in C++11, lex the optional ud-suffix. 2343 if (LangOpts.CPlusPlus) 2344 CurPtr = LexUDSuffix(Result, CurPtr, false); 2345 2346 // If a nul character existed in the character, warn about it. 2347 if (NulCharacter && !isLexingRawMode()) 2348 Diag(NulCharacter, diag::null_in_char_or_string) << 0; 2349 2350 // Update the location of token as well as BufferPtr. 2351 const char *TokStart = BufferPtr; 2352 FormTokenWithChars(Result, CurPtr, Kind); 2353 Result.setLiteralData(TokStart); 2354 return true; 2355 } 2356 2357 /// SkipWhitespace - Efficiently skip over a series of whitespace characters. 2358 /// Update BufferPtr to point to the next non-whitespace character and return. 2359 /// 2360 /// This method forms a token and returns true if KeepWhitespaceMode is enabled. 2361 bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr, 2362 bool &TokAtPhysicalStartOfLine) { 2363 // Whitespace - Skip it, then return the token after the whitespace. 2364 bool SawNewline = isVerticalWhitespace(CurPtr[-1]); 2365 2366 unsigned char Char = *CurPtr; 2367 2368 const char *lastNewLine = nullptr; 2369 auto setLastNewLine = [&](const char *Ptr) { 2370 lastNewLine = Ptr; 2371 if (!NewLinePtr) 2372 NewLinePtr = Ptr; 2373 }; 2374 if (SawNewline) 2375 setLastNewLine(CurPtr - 1); 2376 2377 // Skip consecutive spaces efficiently. 2378 while (true) { 2379 // Skip horizontal whitespace very aggressively. 2380 while (isHorizontalWhitespace(Char)) 2381 Char = *++CurPtr; 2382 2383 // Otherwise if we have something other than whitespace, we're done. 2384 if (!isVerticalWhitespace(Char)) 2385 break; 2386 2387 if (ParsingPreprocessorDirective) { 2388 // End of preprocessor directive line, let LexTokenInternal handle this. 2389 BufferPtr = CurPtr; 2390 return false; 2391 } 2392 2393 // OK, but handle newline. 2394 if (*CurPtr == '\n') 2395 setLastNewLine(CurPtr); 2396 SawNewline = true; 2397 Char = *++CurPtr; 2398 } 2399 2400 // If the client wants us to return whitespace, return it now. 2401 if (isKeepWhitespaceMode()) { 2402 FormTokenWithChars(Result, CurPtr, tok::unknown); 2403 if (SawNewline) { 2404 IsAtStartOfLine = true; 2405 IsAtPhysicalStartOfLine = true; 2406 } 2407 // FIXME: The next token will not have LeadingSpace set. 2408 return true; 2409 } 2410 2411 // If this isn't immediately after a newline, there is leading space. 2412 char PrevChar = CurPtr[-1]; 2413 bool HasLeadingSpace = !isVerticalWhitespace(PrevChar); 2414 2415 Result.setFlagValue(Token::LeadingSpace, HasLeadingSpace); 2416 if (SawNewline) { 2417 Result.setFlag(Token::StartOfLine); 2418 TokAtPhysicalStartOfLine = true; 2419 2420 if (NewLinePtr && lastNewLine && NewLinePtr != lastNewLine && PP) { 2421 if (auto *Handler = PP->getEmptylineHandler()) 2422 Handler->HandleEmptyline(SourceRange(getSourceLocation(NewLinePtr + 1), 2423 getSourceLocation(lastNewLine))); 2424 } 2425 } 2426 2427 BufferPtr = CurPtr; 2428 return false; 2429 } 2430 2431 /// We have just read the // characters from input. Skip until we find the 2432 /// newline character that terminates the comment. Then update BufferPtr and 2433 /// return. 2434 /// 2435 /// If we're in KeepCommentMode or any CommentHandler has inserted 2436 /// some tokens, this will store the first token and return true. 2437 bool Lexer::SkipLineComment(Token &Result, const char *CurPtr, 2438 bool &TokAtPhysicalStartOfLine) { 2439 // If Line comments aren't explicitly enabled for this language, emit an 2440 // extension warning. 2441 if (!LineComment) { 2442 if (!isLexingRawMode()) // There's no PP in raw mode, so can't emit diags. 2443 Diag(BufferPtr, diag::ext_line_comment); 2444 2445 // Mark them enabled so we only emit one warning for this translation 2446 // unit. 2447 LineComment = true; 2448 } 2449 2450 // Scan over the body of the comment. The common case, when scanning, is that 2451 // the comment contains normal ascii characters with nothing interesting in 2452 // them. As such, optimize for this case with the inner loop. 2453 // 2454 // This loop terminates with CurPtr pointing at the newline (or end of buffer) 2455 // character that ends the line comment. 2456 2457 // C++23 [lex.phases] p1 2458 // Diagnose invalid UTF-8 if the corresponding warning is enabled, emitting a 2459 // diagnostic only once per entire ill-formed subsequence to avoid 2460 // emiting to many diagnostics (see http://unicode.org/review/pr-121.html). 2461 bool UnicodeDecodingAlreadyDiagnosed = false; 2462 2463 char C; 2464 while (true) { 2465 C = *CurPtr; 2466 // Skip over characters in the fast loop. 2467 while (isASCII(C) && C != 0 && // Potentially EOF. 2468 C != '\n' && C != '\r') { // Newline or DOS-style newline. 2469 C = *++CurPtr; 2470 UnicodeDecodingAlreadyDiagnosed = false; 2471 } 2472 2473 if (!isASCII(C)) { 2474 unsigned Length = llvm::getUTF8SequenceSize( 2475 (const llvm::UTF8 *)CurPtr, (const llvm::UTF8 *)BufferEnd); 2476 if (Length == 0) { 2477 if (!UnicodeDecodingAlreadyDiagnosed && !isLexingRawMode()) 2478 Diag(CurPtr, diag::warn_invalid_utf8_in_comment); 2479 UnicodeDecodingAlreadyDiagnosed = true; 2480 ++CurPtr; 2481 } else { 2482 UnicodeDecodingAlreadyDiagnosed = false; 2483 CurPtr += Length; 2484 } 2485 continue; 2486 } 2487 2488 const char *NextLine = CurPtr; 2489 if (C != 0) { 2490 // We found a newline, see if it's escaped. 2491 const char *EscapePtr = CurPtr-1; 2492 bool HasSpace = false; 2493 while (isHorizontalWhitespace(*EscapePtr)) { // Skip whitespace. 2494 --EscapePtr; 2495 HasSpace = true; 2496 } 2497 2498 if (*EscapePtr == '\\') 2499 // Escaped newline. 2500 CurPtr = EscapePtr; 2501 else if (EscapePtr[0] == '/' && EscapePtr[-1] == '?' && 2502 EscapePtr[-2] == '?' && LangOpts.Trigraphs) 2503 // Trigraph-escaped newline. 2504 CurPtr = EscapePtr-2; 2505 else 2506 break; // This is a newline, we're done. 2507 2508 // If there was space between the backslash and newline, warn about it. 2509 if (HasSpace && !isLexingRawMode()) 2510 Diag(EscapePtr, diag::backslash_newline_space); 2511 } 2512 2513 // Otherwise, this is a hard case. Fall back on getAndAdvanceChar to 2514 // properly decode the character. Read it in raw mode to avoid emitting 2515 // diagnostics about things like trigraphs. If we see an escaped newline, 2516 // we'll handle it below. 2517 const char *OldPtr = CurPtr; 2518 bool OldRawMode = isLexingRawMode(); 2519 LexingRawMode = true; 2520 C = getAndAdvanceChar(CurPtr, Result); 2521 LexingRawMode = OldRawMode; 2522 2523 // If we only read only one character, then no special handling is needed. 2524 // We're done and can skip forward to the newline. 2525 if (C != 0 && CurPtr == OldPtr+1) { 2526 CurPtr = NextLine; 2527 break; 2528 } 2529 2530 // If we read multiple characters, and one of those characters was a \r or 2531 // \n, then we had an escaped newline within the comment. Emit diagnostic 2532 // unless the next line is also a // comment. 2533 if (CurPtr != OldPtr + 1 && C != '/' && 2534 (CurPtr == BufferEnd + 1 || CurPtr[0] != '/')) { 2535 for (; OldPtr != CurPtr; ++OldPtr) 2536 if (OldPtr[0] == '\n' || OldPtr[0] == '\r') { 2537 // Okay, we found a // comment that ends in a newline, if the next 2538 // line is also a // comment, but has spaces, don't emit a diagnostic. 2539 if (isWhitespace(C)) { 2540 const char *ForwardPtr = CurPtr; 2541 while (isWhitespace(*ForwardPtr)) // Skip whitespace. 2542 ++ForwardPtr; 2543 if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/') 2544 break; 2545 } 2546 2547 if (!isLexingRawMode()) 2548 Diag(OldPtr-1, diag::ext_multi_line_line_comment); 2549 break; 2550 } 2551 } 2552 2553 if (C == '\r' || C == '\n' || CurPtr == BufferEnd + 1) { 2554 --CurPtr; 2555 break; 2556 } 2557 2558 if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) { 2559 PP->CodeCompleteNaturalLanguage(); 2560 cutOffLexing(); 2561 return false; 2562 } 2563 } 2564 2565 // Found but did not consume the newline. Notify comment handlers about the 2566 // comment unless we're in a #if 0 block. 2567 if (PP && !isLexingRawMode() && 2568 PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr), 2569 getSourceLocation(CurPtr)))) { 2570 BufferPtr = CurPtr; 2571 return true; // A token has to be returned. 2572 } 2573 2574 // If we are returning comments as tokens, return this comment as a token. 2575 if (inKeepCommentMode()) 2576 return SaveLineComment(Result, CurPtr); 2577 2578 // If we are inside a preprocessor directive and we see the end of line, 2579 // return immediately, so that the lexer can return this as an EOD token. 2580 if (ParsingPreprocessorDirective || CurPtr == BufferEnd) { 2581 BufferPtr = CurPtr; 2582 return false; 2583 } 2584 2585 // Otherwise, eat the \n character. We don't care if this is a \n\r or 2586 // \r\n sequence. This is an efficiency hack (because we know the \n can't 2587 // contribute to another token), it isn't needed for correctness. Note that 2588 // this is ok even in KeepWhitespaceMode, because we would have returned the 2589 /// comment above in that mode. 2590 NewLinePtr = CurPtr++; 2591 2592 // The next returned token is at the start of the line. 2593 Result.setFlag(Token::StartOfLine); 2594 TokAtPhysicalStartOfLine = true; 2595 // No leading whitespace seen so far. 2596 Result.clearFlag(Token::LeadingSpace); 2597 BufferPtr = CurPtr; 2598 return false; 2599 } 2600 2601 /// If in save-comment mode, package up this Line comment in an appropriate 2602 /// way and return it. 2603 bool Lexer::SaveLineComment(Token &Result, const char *CurPtr) { 2604 // If we're not in a preprocessor directive, just return the // comment 2605 // directly. 2606 FormTokenWithChars(Result, CurPtr, tok::comment); 2607 2608 if (!ParsingPreprocessorDirective || LexingRawMode) 2609 return true; 2610 2611 // If this Line-style comment is in a macro definition, transmogrify it into 2612 // a C-style block comment. 2613 bool Invalid = false; 2614 std::string Spelling = PP->getSpelling(Result, &Invalid); 2615 if (Invalid) 2616 return true; 2617 2618 assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not line comment?"); 2619 Spelling[1] = '*'; // Change prefix to "/*". 2620 Spelling += "*/"; // add suffix. 2621 2622 Result.setKind(tok::comment); 2623 PP->CreateString(Spelling, Result, 2624 Result.getLocation(), Result.getLocation()); 2625 return true; 2626 } 2627 2628 /// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline 2629 /// character (either \\n or \\r) is part of an escaped newline sequence. Issue 2630 /// a diagnostic if so. We know that the newline is inside of a block comment. 2631 static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr, Lexer *L, 2632 bool Trigraphs) { 2633 assert(CurPtr[0] == '\n' || CurPtr[0] == '\r'); 2634 2635 // Position of the first trigraph in the ending sequence. 2636 const char *TrigraphPos = nullptr; 2637 // Position of the first whitespace after a '\' in the ending sequence. 2638 const char *SpacePos = nullptr; 2639 2640 while (true) { 2641 // Back up off the newline. 2642 --CurPtr; 2643 2644 // If this is a two-character newline sequence, skip the other character. 2645 if (CurPtr[0] == '\n' || CurPtr[0] == '\r') { 2646 // \n\n or \r\r -> not escaped newline. 2647 if (CurPtr[0] == CurPtr[1]) 2648 return false; 2649 // \n\r or \r\n -> skip the newline. 2650 --CurPtr; 2651 } 2652 2653 // If we have horizontal whitespace, skip over it. We allow whitespace 2654 // between the slash and newline. 2655 while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) { 2656 SpacePos = CurPtr; 2657 --CurPtr; 2658 } 2659 2660 // If we have a slash, this is an escaped newline. 2661 if (*CurPtr == '\\') { 2662 --CurPtr; 2663 } else if (CurPtr[0] == '/' && CurPtr[-1] == '?' && CurPtr[-2] == '?') { 2664 // This is a trigraph encoding of a slash. 2665 TrigraphPos = CurPtr - 2; 2666 CurPtr -= 3; 2667 } else { 2668 return false; 2669 } 2670 2671 // If the character preceding the escaped newline is a '*', then after line 2672 // splicing we have a '*/' ending the comment. 2673 if (*CurPtr == '*') 2674 break; 2675 2676 if (*CurPtr != '\n' && *CurPtr != '\r') 2677 return false; 2678 } 2679 2680 if (TrigraphPos) { 2681 // If no trigraphs are enabled, warn that we ignored this trigraph and 2682 // ignore this * character. 2683 if (!Trigraphs) { 2684 if (!L->isLexingRawMode()) 2685 L->Diag(TrigraphPos, diag::trigraph_ignored_block_comment); 2686 return false; 2687 } 2688 if (!L->isLexingRawMode()) 2689 L->Diag(TrigraphPos, diag::trigraph_ends_block_comment); 2690 } 2691 2692 // Warn about having an escaped newline between the */ characters. 2693 if (!L->isLexingRawMode()) 2694 L->Diag(CurPtr + 1, diag::escaped_newline_block_comment_end); 2695 2696 // If there was space between the backslash and newline, warn about it. 2697 if (SpacePos && !L->isLexingRawMode()) 2698 L->Diag(SpacePos, diag::backslash_newline_space); 2699 2700 return true; 2701 } 2702 2703 #ifdef __SSE2__ 2704 #include <emmintrin.h> 2705 #elif __ALTIVEC__ 2706 #include <altivec.h> 2707 #undef bool 2708 #endif 2709 2710 /// We have just read from input the / and * characters that started a comment. 2711 /// Read until we find the * and / characters that terminate the comment. 2712 /// Note that we don't bother decoding trigraphs or escaped newlines in block 2713 /// comments, because they cannot cause the comment to end. The only thing 2714 /// that can happen is the comment could end with an escaped newline between 2715 /// the terminating * and /. 2716 /// 2717 /// If we're in KeepCommentMode or any CommentHandler has inserted 2718 /// some tokens, this will store the first token and return true. 2719 bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr, 2720 bool &TokAtPhysicalStartOfLine) { 2721 // Scan one character past where we should, looking for a '/' character. Once 2722 // we find it, check to see if it was preceded by a *. This common 2723 // optimization helps people who like to put a lot of * characters in their 2724 // comments. 2725 2726 // The first character we get with newlines and trigraphs skipped to handle 2727 // the degenerate /*/ case below correctly if the * has an escaped newline 2728 // after it. 2729 unsigned CharSize; 2730 unsigned char C = getCharAndSize(CurPtr, CharSize); 2731 CurPtr += CharSize; 2732 if (C == 0 && CurPtr == BufferEnd+1) { 2733 if (!isLexingRawMode()) 2734 Diag(BufferPtr, diag::err_unterminated_block_comment); 2735 --CurPtr; 2736 2737 // KeepWhitespaceMode should return this broken comment as a token. Since 2738 // it isn't a well formed comment, just return it as an 'unknown' token. 2739 if (isKeepWhitespaceMode()) { 2740 FormTokenWithChars(Result, CurPtr, tok::unknown); 2741 return true; 2742 } 2743 2744 BufferPtr = CurPtr; 2745 return false; 2746 } 2747 2748 // Check to see if the first character after the '/*' is another /. If so, 2749 // then this slash does not end the block comment, it is part of it. 2750 if (C == '/') 2751 C = *CurPtr++; 2752 2753 // C++23 [lex.phases] p1 2754 // Diagnose invalid UTF-8 if the corresponding warning is enabled, emitting a 2755 // diagnostic only once per entire ill-formed subsequence to avoid 2756 // emiting to many diagnostics (see http://unicode.org/review/pr-121.html). 2757 bool UnicodeDecodingAlreadyDiagnosed = false; 2758 2759 while (true) { 2760 // Skip over all non-interesting characters until we find end of buffer or a 2761 // (probably ending) '/' character. 2762 if (CurPtr + 24 < BufferEnd && 2763 // If there is a code-completion point avoid the fast scan because it 2764 // doesn't check for '\0'. 2765 !(PP && PP->getCodeCompletionFileLoc() == FileLoc)) { 2766 // While not aligned to a 16-byte boundary. 2767 while (C != '/' && (intptr_t)CurPtr % 16 != 0) { 2768 if (!isASCII(C)) 2769 goto MultiByteUTF8; 2770 C = *CurPtr++; 2771 } 2772 if (C == '/') goto FoundSlash; 2773 2774 #ifdef __SSE2__ 2775 __m128i Slashes = _mm_set1_epi8('/'); 2776 while (CurPtr + 16 < BufferEnd) { 2777 int Mask = _mm_movemask_epi8(*(const __m128i *)CurPtr); 2778 if (LLVM_UNLIKELY(Mask != 0)) { 2779 goto MultiByteUTF8; 2780 } 2781 // look for slashes 2782 int cmp = _mm_movemask_epi8(_mm_cmpeq_epi8(*(const __m128i*)CurPtr, 2783 Slashes)); 2784 if (cmp != 0) { 2785 // Adjust the pointer to point directly after the first slash. It's 2786 // not necessary to set C here, it will be overwritten at the end of 2787 // the outer loop. 2788 CurPtr += llvm::countr_zero<unsigned>(cmp) + 1; 2789 goto FoundSlash; 2790 } 2791 CurPtr += 16; 2792 } 2793 #elif __ALTIVEC__ 2794 __vector unsigned char LongUTF = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 2795 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 2796 0x80, 0x80, 0x80, 0x80}; 2797 __vector unsigned char Slashes = { 2798 '/', '/', '/', '/', '/', '/', '/', '/', 2799 '/', '/', '/', '/', '/', '/', '/', '/' 2800 }; 2801 while (CurPtr + 16 < BufferEnd) { 2802 if (LLVM_UNLIKELY( 2803 vec_any_ge(*(const __vector unsigned char *)CurPtr, LongUTF))) 2804 goto MultiByteUTF8; 2805 if (vec_any_eq(*(const __vector unsigned char *)CurPtr, Slashes)) { 2806 break; 2807 } 2808 CurPtr += 16; 2809 } 2810 2811 #else 2812 while (CurPtr + 16 < BufferEnd) { 2813 bool HasNonASCII = false; 2814 for (unsigned I = 0; I < 16; ++I) 2815 HasNonASCII |= !isASCII(CurPtr[I]); 2816 2817 if (LLVM_UNLIKELY(HasNonASCII)) 2818 goto MultiByteUTF8; 2819 2820 bool HasSlash = false; 2821 for (unsigned I = 0; I < 16; ++I) 2822 HasSlash |= CurPtr[I] == '/'; 2823 if (HasSlash) 2824 break; 2825 CurPtr += 16; 2826 } 2827 #endif 2828 2829 // It has to be one of the bytes scanned, increment to it and read one. 2830 C = *CurPtr++; 2831 } 2832 2833 // Loop to scan the remainder, warning on invalid UTF-8 2834 // if the corresponding warning is enabled, emitting a diagnostic only once 2835 // per sequence that cannot be decoded. 2836 while (C != '/' && C != '\0') { 2837 if (isASCII(C)) { 2838 UnicodeDecodingAlreadyDiagnosed = false; 2839 C = *CurPtr++; 2840 continue; 2841 } 2842 MultiByteUTF8: 2843 // CurPtr is 1 code unit past C, so to decode 2844 // the codepoint, we need to read from the previous position. 2845 unsigned Length = llvm::getUTF8SequenceSize( 2846 (const llvm::UTF8 *)CurPtr - 1, (const llvm::UTF8 *)BufferEnd); 2847 if (Length == 0) { 2848 if (!UnicodeDecodingAlreadyDiagnosed && !isLexingRawMode()) 2849 Diag(CurPtr - 1, diag::warn_invalid_utf8_in_comment); 2850 UnicodeDecodingAlreadyDiagnosed = true; 2851 } else { 2852 UnicodeDecodingAlreadyDiagnosed = false; 2853 CurPtr += Length - 1; 2854 } 2855 C = *CurPtr++; 2856 } 2857 2858 if (C == '/') { 2859 FoundSlash: 2860 if (CurPtr[-2] == '*') // We found the final */. We're done! 2861 break; 2862 2863 if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) { 2864 if (isEndOfBlockCommentWithEscapedNewLine(CurPtr - 2, this, 2865 LangOpts.Trigraphs)) { 2866 // We found the final */, though it had an escaped newline between the 2867 // * and /. We're done! 2868 break; 2869 } 2870 } 2871 if (CurPtr[0] == '*' && CurPtr[1] != '/') { 2872 // If this is a /* inside of the comment, emit a warning. Don't do this 2873 // if this is a /*/, which will end the comment. This misses cases with 2874 // embedded escaped newlines, but oh well. 2875 if (!isLexingRawMode()) 2876 Diag(CurPtr-1, diag::warn_nested_block_comment); 2877 } 2878 } else if (C == 0 && CurPtr == BufferEnd+1) { 2879 if (!isLexingRawMode()) 2880 Diag(BufferPtr, diag::err_unterminated_block_comment); 2881 // Note: the user probably forgot a */. We could continue immediately 2882 // after the /*, but this would involve lexing a lot of what really is the 2883 // comment, which surely would confuse the parser. 2884 --CurPtr; 2885 2886 // KeepWhitespaceMode should return this broken comment as a token. Since 2887 // it isn't a well formed comment, just return it as an 'unknown' token. 2888 if (isKeepWhitespaceMode()) { 2889 FormTokenWithChars(Result, CurPtr, tok::unknown); 2890 return true; 2891 } 2892 2893 BufferPtr = CurPtr; 2894 return false; 2895 } else if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) { 2896 PP->CodeCompleteNaturalLanguage(); 2897 cutOffLexing(); 2898 return false; 2899 } 2900 2901 C = *CurPtr++; 2902 } 2903 2904 // Notify comment handlers about the comment unless we're in a #if 0 block. 2905 if (PP && !isLexingRawMode() && 2906 PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr), 2907 getSourceLocation(CurPtr)))) { 2908 BufferPtr = CurPtr; 2909 return true; // A token has to be returned. 2910 } 2911 2912 // If we are returning comments as tokens, return this comment as a token. 2913 if (inKeepCommentMode()) { 2914 FormTokenWithChars(Result, CurPtr, tok::comment); 2915 return true; 2916 } 2917 2918 // It is common for the tokens immediately after a /**/ comment to be 2919 // whitespace. Instead of going through the big switch, handle it 2920 // efficiently now. This is safe even in KeepWhitespaceMode because we would 2921 // have already returned above with the comment as a token. 2922 if (isHorizontalWhitespace(*CurPtr)) { 2923 SkipWhitespace(Result, CurPtr+1, TokAtPhysicalStartOfLine); 2924 return false; 2925 } 2926 2927 // Otherwise, just return so that the next character will be lexed as a token. 2928 BufferPtr = CurPtr; 2929 Result.setFlag(Token::LeadingSpace); 2930 return false; 2931 } 2932 2933 //===----------------------------------------------------------------------===// 2934 // Primary Lexing Entry Points 2935 //===----------------------------------------------------------------------===// 2936 2937 /// ReadToEndOfLine - Read the rest of the current preprocessor line as an 2938 /// uninterpreted string. This switches the lexer out of directive mode. 2939 void Lexer::ReadToEndOfLine(SmallVectorImpl<char> *Result) { 2940 assert(ParsingPreprocessorDirective && ParsingFilename == false && 2941 "Must be in a preprocessing directive!"); 2942 Token Tmp; 2943 Tmp.startToken(); 2944 2945 // CurPtr - Cache BufferPtr in an automatic variable. 2946 const char *CurPtr = BufferPtr; 2947 while (true) { 2948 char Char = getAndAdvanceChar(CurPtr, Tmp); 2949 switch (Char) { 2950 default: 2951 if (Result) 2952 Result->push_back(Char); 2953 break; 2954 case 0: // Null. 2955 // Found end of file? 2956 if (CurPtr-1 != BufferEnd) { 2957 if (isCodeCompletionPoint(CurPtr-1)) { 2958 PP->CodeCompleteNaturalLanguage(); 2959 cutOffLexing(); 2960 return; 2961 } 2962 2963 // Nope, normal character, continue. 2964 if (Result) 2965 Result->push_back(Char); 2966 break; 2967 } 2968 // FALL THROUGH. 2969 [[fallthrough]]; 2970 case '\r': 2971 case '\n': 2972 // Okay, we found the end of the line. First, back up past the \0, \r, \n. 2973 assert(CurPtr[-1] == Char && "Trigraphs for newline?"); 2974 BufferPtr = CurPtr-1; 2975 2976 // Next, lex the character, which should handle the EOD transition. 2977 Lex(Tmp); 2978 if (Tmp.is(tok::code_completion)) { 2979 if (PP) 2980 PP->CodeCompleteNaturalLanguage(); 2981 Lex(Tmp); 2982 } 2983 assert(Tmp.is(tok::eod) && "Unexpected token!"); 2984 2985 // Finally, we're done; 2986 return; 2987 } 2988 } 2989 } 2990 2991 /// LexEndOfFile - CurPtr points to the end of this file. Handle this 2992 /// condition, reporting diagnostics and handling other edge cases as required. 2993 /// This returns true if Result contains a token, false if PP.Lex should be 2994 /// called again. 2995 bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) { 2996 // If we hit the end of the file while parsing a preprocessor directive, 2997 // end the preprocessor directive first. The next token returned will 2998 // then be the end of file. 2999 if (ParsingPreprocessorDirective) { 3000 // Done parsing the "line". 3001 ParsingPreprocessorDirective = false; 3002 // Update the location of token as well as BufferPtr. 3003 FormTokenWithChars(Result, CurPtr, tok::eod); 3004 3005 // Restore comment saving mode, in case it was disabled for directive. 3006 if (PP) 3007 resetExtendedTokenMode(); 3008 return true; // Have a token. 3009 } 3010 3011 // If we are in raw mode, return this event as an EOF token. Let the caller 3012 // that put us in raw mode handle the event. 3013 if (isLexingRawMode()) { 3014 Result.startToken(); 3015 BufferPtr = BufferEnd; 3016 FormTokenWithChars(Result, BufferEnd, tok::eof); 3017 return true; 3018 } 3019 3020 if (PP->isRecordingPreamble() && PP->isInPrimaryFile()) { 3021 PP->setRecordedPreambleConditionalStack(ConditionalStack); 3022 // If the preamble cuts off the end of a header guard, consider it guarded. 3023 // The guard is valid for the preamble content itself, and for tools the 3024 // most useful answer is "yes, this file has a header guard". 3025 if (!ConditionalStack.empty()) 3026 MIOpt.ExitTopLevelConditional(); 3027 ConditionalStack.clear(); 3028 } 3029 3030 // Issue diagnostics for unterminated #if and missing newline. 3031 3032 // If we are in a #if directive, emit an error. 3033 while (!ConditionalStack.empty()) { 3034 if (PP->getCodeCompletionFileLoc() != FileLoc) 3035 PP->Diag(ConditionalStack.back().IfLoc, 3036 diag::err_pp_unterminated_conditional); 3037 ConditionalStack.pop_back(); 3038 } 3039 3040 // C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue 3041 // a pedwarn. 3042 if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')) { 3043 DiagnosticsEngine &Diags = PP->getDiagnostics(); 3044 SourceLocation EndLoc = getSourceLocation(BufferEnd); 3045 unsigned DiagID; 3046 3047 if (LangOpts.CPlusPlus11) { 3048 // C++11 [lex.phases] 2.2 p2 3049 // Prefer the C++98 pedantic compatibility warning over the generic, 3050 // non-extension, user-requested "missing newline at EOF" warning. 3051 if (!Diags.isIgnored(diag::warn_cxx98_compat_no_newline_eof, EndLoc)) { 3052 DiagID = diag::warn_cxx98_compat_no_newline_eof; 3053 } else { 3054 DiagID = diag::warn_no_newline_eof; 3055 } 3056 } else { 3057 DiagID = diag::ext_no_newline_eof; 3058 } 3059 3060 Diag(BufferEnd, DiagID) 3061 << FixItHint::CreateInsertion(EndLoc, "\n"); 3062 } 3063 3064 BufferPtr = CurPtr; 3065 3066 // Finally, let the preprocessor handle this. 3067 return PP->HandleEndOfFile(Result, isPragmaLexer()); 3068 } 3069 3070 /// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from 3071 /// the specified lexer will return a tok::l_paren token, 0 if it is something 3072 /// else and 2 if there are no more tokens in the buffer controlled by the 3073 /// lexer. 3074 unsigned Lexer::isNextPPTokenLParen() { 3075 assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?"); 3076 3077 if (isDependencyDirectivesLexer()) { 3078 if (NextDepDirectiveTokenIndex == DepDirectives.front().Tokens.size()) 3079 return 2; 3080 return DepDirectives.front().Tokens[NextDepDirectiveTokenIndex].is( 3081 tok::l_paren); 3082 } 3083 3084 // Switch to 'skipping' mode. This will ensure that we can lex a token 3085 // without emitting diagnostics, disables macro expansion, and will cause EOF 3086 // to return an EOF token instead of popping the include stack. 3087 LexingRawMode = true; 3088 3089 // Save state that can be changed while lexing so that we can restore it. 3090 const char *TmpBufferPtr = BufferPtr; 3091 bool inPPDirectiveMode = ParsingPreprocessorDirective; 3092 bool atStartOfLine = IsAtStartOfLine; 3093 bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine; 3094 bool leadingSpace = HasLeadingSpace; 3095 3096 Token Tok; 3097 Lex(Tok); 3098 3099 // Restore state that may have changed. 3100 BufferPtr = TmpBufferPtr; 3101 ParsingPreprocessorDirective = inPPDirectiveMode; 3102 HasLeadingSpace = leadingSpace; 3103 IsAtStartOfLine = atStartOfLine; 3104 IsAtPhysicalStartOfLine = atPhysicalStartOfLine; 3105 3106 // Restore the lexer back to non-skipping mode. 3107 LexingRawMode = false; 3108 3109 if (Tok.is(tok::eof)) 3110 return 2; 3111 return Tok.is(tok::l_paren); 3112 } 3113 3114 /// Find the end of a version control conflict marker. 3115 static const char *FindConflictEnd(const char *CurPtr, const char *BufferEnd, 3116 ConflictMarkerKind CMK) { 3117 const char *Terminator = CMK == CMK_Perforce ? "<<<<\n" : ">>>>>>>"; 3118 size_t TermLen = CMK == CMK_Perforce ? 5 : 7; 3119 auto RestOfBuffer = StringRef(CurPtr, BufferEnd - CurPtr).substr(TermLen); 3120 size_t Pos = RestOfBuffer.find(Terminator); 3121 while (Pos != StringRef::npos) { 3122 // Must occur at start of line. 3123 if (Pos == 0 || 3124 (RestOfBuffer[Pos - 1] != '\r' && RestOfBuffer[Pos - 1] != '\n')) { 3125 RestOfBuffer = RestOfBuffer.substr(Pos+TermLen); 3126 Pos = RestOfBuffer.find(Terminator); 3127 continue; 3128 } 3129 return RestOfBuffer.data()+Pos; 3130 } 3131 return nullptr; 3132 } 3133 3134 /// IsStartOfConflictMarker - If the specified pointer is the start of a version 3135 /// control conflict marker like '<<<<<<<', recognize it as such, emit an error 3136 /// and recover nicely. This returns true if it is a conflict marker and false 3137 /// if not. 3138 bool Lexer::IsStartOfConflictMarker(const char *CurPtr) { 3139 // Only a conflict marker if it starts at the beginning of a line. 3140 if (CurPtr != BufferStart && 3141 CurPtr[-1] != '\n' && CurPtr[-1] != '\r') 3142 return false; 3143 3144 // Check to see if we have <<<<<<< or >>>>. 3145 if (!StringRef(CurPtr, BufferEnd - CurPtr).startswith("<<<<<<<") && 3146 !StringRef(CurPtr, BufferEnd - CurPtr).startswith(">>>> ")) 3147 return false; 3148 3149 // If we have a situation where we don't care about conflict markers, ignore 3150 // it. 3151 if (CurrentConflictMarkerState || isLexingRawMode()) 3152 return false; 3153 3154 ConflictMarkerKind Kind = *CurPtr == '<' ? CMK_Normal : CMK_Perforce; 3155 3156 // Check to see if there is an ending marker somewhere in the buffer at the 3157 // start of a line to terminate this conflict marker. 3158 if (FindConflictEnd(CurPtr, BufferEnd, Kind)) { 3159 // We found a match. We are really in a conflict marker. 3160 // Diagnose this, and ignore to the end of line. 3161 Diag(CurPtr, diag::err_conflict_marker); 3162 CurrentConflictMarkerState = Kind; 3163 3164 // Skip ahead to the end of line. We know this exists because the 3165 // end-of-conflict marker starts with \r or \n. 3166 while (*CurPtr != '\r' && *CurPtr != '\n') { 3167 assert(CurPtr != BufferEnd && "Didn't find end of line"); 3168 ++CurPtr; 3169 } 3170 BufferPtr = CurPtr; 3171 return true; 3172 } 3173 3174 // No end of conflict marker found. 3175 return false; 3176 } 3177 3178 /// HandleEndOfConflictMarker - If this is a '====' or '||||' or '>>>>', or if 3179 /// it is '<<<<' and the conflict marker started with a '>>>>' marker, then it 3180 /// is the end of a conflict marker. Handle it by ignoring up until the end of 3181 /// the line. This returns true if it is a conflict marker and false if not. 3182 bool Lexer::HandleEndOfConflictMarker(const char *CurPtr) { 3183 // Only a conflict marker if it starts at the beginning of a line. 3184 if (CurPtr != BufferStart && 3185 CurPtr[-1] != '\n' && CurPtr[-1] != '\r') 3186 return false; 3187 3188 // If we have a situation where we don't care about conflict markers, ignore 3189 // it. 3190 if (!CurrentConflictMarkerState || isLexingRawMode()) 3191 return false; 3192 3193 // Check to see if we have the marker (4 characters in a row). 3194 for (unsigned i = 1; i != 4; ++i) 3195 if (CurPtr[i] != CurPtr[0]) 3196 return false; 3197 3198 // If we do have it, search for the end of the conflict marker. This could 3199 // fail if it got skipped with a '#if 0' or something. Note that CurPtr might 3200 // be the end of conflict marker. 3201 if (const char *End = FindConflictEnd(CurPtr, BufferEnd, 3202 CurrentConflictMarkerState)) { 3203 CurPtr = End; 3204 3205 // Skip ahead to the end of line. 3206 while (CurPtr != BufferEnd && *CurPtr != '\r' && *CurPtr != '\n') 3207 ++CurPtr; 3208 3209 BufferPtr = CurPtr; 3210 3211 // No longer in the conflict marker. 3212 CurrentConflictMarkerState = CMK_None; 3213 return true; 3214 } 3215 3216 return false; 3217 } 3218 3219 static const char *findPlaceholderEnd(const char *CurPtr, 3220 const char *BufferEnd) { 3221 if (CurPtr == BufferEnd) 3222 return nullptr; 3223 BufferEnd -= 1; // Scan until the second last character. 3224 for (; CurPtr != BufferEnd; ++CurPtr) { 3225 if (CurPtr[0] == '#' && CurPtr[1] == '>') 3226 return CurPtr + 2; 3227 } 3228 return nullptr; 3229 } 3230 3231 bool Lexer::lexEditorPlaceholder(Token &Result, const char *CurPtr) { 3232 assert(CurPtr[-1] == '<' && CurPtr[0] == '#' && "Not a placeholder!"); 3233 if (!PP || !PP->getPreprocessorOpts().LexEditorPlaceholders || LexingRawMode) 3234 return false; 3235 const char *End = findPlaceholderEnd(CurPtr + 1, BufferEnd); 3236 if (!End) 3237 return false; 3238 const char *Start = CurPtr - 1; 3239 if (!LangOpts.AllowEditorPlaceholders) 3240 Diag(Start, diag::err_placeholder_in_source); 3241 Result.startToken(); 3242 FormTokenWithChars(Result, End, tok::raw_identifier); 3243 Result.setRawIdentifierData(Start); 3244 PP->LookUpIdentifierInfo(Result); 3245 Result.setFlag(Token::IsEditorPlaceholder); 3246 BufferPtr = End; 3247 return true; 3248 } 3249 3250 bool Lexer::isCodeCompletionPoint(const char *CurPtr) const { 3251 if (PP && PP->isCodeCompletionEnabled()) { 3252 SourceLocation Loc = FileLoc.getLocWithOffset(CurPtr-BufferStart); 3253 return Loc == PP->getCodeCompletionLoc(); 3254 } 3255 3256 return false; 3257 } 3258 3259 std::optional<uint32_t> Lexer::tryReadNumericUCN(const char *&StartPtr, 3260 const char *SlashLoc, 3261 Token *Result) { 3262 unsigned CharSize; 3263 char Kind = getCharAndSize(StartPtr, CharSize); 3264 assert((Kind == 'u' || Kind == 'U') && "expected a UCN"); 3265 3266 unsigned NumHexDigits; 3267 if (Kind == 'u') 3268 NumHexDigits = 4; 3269 else if (Kind == 'U') 3270 NumHexDigits = 8; 3271 3272 bool Delimited = false; 3273 bool FoundEndDelimiter = false; 3274 unsigned Count = 0; 3275 bool Diagnose = Result && !isLexingRawMode(); 3276 3277 if (!LangOpts.CPlusPlus && !LangOpts.C99) { 3278 if (Diagnose) 3279 Diag(SlashLoc, diag::warn_ucn_not_valid_in_c89); 3280 return std::nullopt; 3281 } 3282 3283 const char *CurPtr = StartPtr + CharSize; 3284 const char *KindLoc = &CurPtr[-1]; 3285 3286 uint32_t CodePoint = 0; 3287 while (Count != NumHexDigits || Delimited) { 3288 char C = getCharAndSize(CurPtr, CharSize); 3289 if (!Delimited && Count == 0 && C == '{') { 3290 Delimited = true; 3291 CurPtr += CharSize; 3292 continue; 3293 } 3294 3295 if (Delimited && C == '}') { 3296 CurPtr += CharSize; 3297 FoundEndDelimiter = true; 3298 break; 3299 } 3300 3301 unsigned Value = llvm::hexDigitValue(C); 3302 if (Value == -1U) { 3303 if (!Delimited) 3304 break; 3305 if (Diagnose) 3306 Diag(SlashLoc, diag::warn_delimited_ucn_incomplete) 3307 << StringRef(KindLoc, 1); 3308 return std::nullopt; 3309 } 3310 3311 if (CodePoint & 0xF000'0000) { 3312 if (Diagnose) 3313 Diag(KindLoc, diag::err_escape_too_large) << 0; 3314 return std::nullopt; 3315 } 3316 3317 CodePoint <<= 4; 3318 CodePoint |= Value; 3319 CurPtr += CharSize; 3320 Count++; 3321 } 3322 3323 if (Count == 0) { 3324 if (Diagnose) 3325 Diag(SlashLoc, FoundEndDelimiter ? diag::warn_delimited_ucn_empty 3326 : diag::warn_ucn_escape_no_digits) 3327 << StringRef(KindLoc, 1); 3328 return std::nullopt; 3329 } 3330 3331 if (Delimited && Kind == 'U') { 3332 if (Diagnose) 3333 Diag(SlashLoc, diag::err_hex_escape_no_digits) << StringRef(KindLoc, 1); 3334 return std::nullopt; 3335 } 3336 3337 if (!Delimited && Count != NumHexDigits) { 3338 if (Diagnose) { 3339 Diag(SlashLoc, diag::warn_ucn_escape_incomplete); 3340 // If the user wrote \U1234, suggest a fixit to \u. 3341 if (Count == 4 && NumHexDigits == 8) { 3342 CharSourceRange URange = makeCharRange(*this, KindLoc, KindLoc + 1); 3343 Diag(KindLoc, diag::note_ucn_four_not_eight) 3344 << FixItHint::CreateReplacement(URange, "u"); 3345 } 3346 } 3347 return std::nullopt; 3348 } 3349 3350 if (Delimited && PP) { 3351 Diag(SlashLoc, PP->getLangOpts().CPlusPlus23 3352 ? diag::warn_cxx23_delimited_escape_sequence 3353 : diag::ext_delimited_escape_sequence) 3354 << /*delimited*/ 0 << (PP->getLangOpts().CPlusPlus ? 1 : 0); 3355 } 3356 3357 if (Result) { 3358 Result->setFlag(Token::HasUCN); 3359 // If the UCN contains either a trigraph or a line splicing, 3360 // we need to call getAndAdvanceChar again to set the appropriate flags 3361 // on Result. 3362 if (CurPtr - StartPtr == (ptrdiff_t)(Count + 1 + (Delimited ? 2 : 0))) 3363 StartPtr = CurPtr; 3364 else 3365 while (StartPtr != CurPtr) 3366 (void)getAndAdvanceChar(StartPtr, *Result); 3367 } else { 3368 StartPtr = CurPtr; 3369 } 3370 return CodePoint; 3371 } 3372 3373 std::optional<uint32_t> Lexer::tryReadNamedUCN(const char *&StartPtr, 3374 const char *SlashLoc, 3375 Token *Result) { 3376 unsigned CharSize; 3377 bool Diagnose = Result && !isLexingRawMode(); 3378 3379 char C = getCharAndSize(StartPtr, CharSize); 3380 assert(C == 'N' && "expected \\N{...}"); 3381 3382 const char *CurPtr = StartPtr + CharSize; 3383 const char *KindLoc = &CurPtr[-1]; 3384 3385 C = getCharAndSize(CurPtr, CharSize); 3386 if (C != '{') { 3387 if (Diagnose) 3388 Diag(SlashLoc, diag::warn_ucn_escape_incomplete); 3389 return std::nullopt; 3390 } 3391 CurPtr += CharSize; 3392 const char *StartName = CurPtr; 3393 bool FoundEndDelimiter = false; 3394 llvm::SmallVector<char, 30> Buffer; 3395 while (C) { 3396 C = getCharAndSize(CurPtr, CharSize); 3397 CurPtr += CharSize; 3398 if (C == '}') { 3399 FoundEndDelimiter = true; 3400 break; 3401 } 3402 3403 if (isVerticalWhitespace(C)) 3404 break; 3405 Buffer.push_back(C); 3406 } 3407 3408 if (!FoundEndDelimiter || Buffer.empty()) { 3409 if (Diagnose) 3410 Diag(SlashLoc, FoundEndDelimiter ? diag::warn_delimited_ucn_empty 3411 : diag::warn_delimited_ucn_incomplete) 3412 << StringRef(KindLoc, 1); 3413 return std::nullopt; 3414 } 3415 3416 StringRef Name(Buffer.data(), Buffer.size()); 3417 std::optional<char32_t> Match = 3418 llvm::sys::unicode::nameToCodepointStrict(Name); 3419 std::optional<llvm::sys::unicode::LooseMatchingResult> LooseMatch; 3420 if (!Match) { 3421 LooseMatch = llvm::sys::unicode::nameToCodepointLooseMatching(Name); 3422 if (Diagnose) { 3423 Diag(StartName, diag::err_invalid_ucn_name) 3424 << StringRef(Buffer.data(), Buffer.size()) 3425 << makeCharRange(*this, StartName, CurPtr - CharSize); 3426 if (LooseMatch) { 3427 Diag(StartName, diag::note_invalid_ucn_name_loose_matching) 3428 << FixItHint::CreateReplacement( 3429 makeCharRange(*this, StartName, CurPtr - CharSize), 3430 LooseMatch->Name); 3431 } 3432 } 3433 // We do not offer misspelled character names suggestions here 3434 // as the set of what would be a valid suggestion depends on context, 3435 // and we should not make invalid suggestions. 3436 } 3437 3438 if (Diagnose && Match) 3439 Diag(SlashLoc, PP->getLangOpts().CPlusPlus23 3440 ? diag::warn_cxx23_delimited_escape_sequence 3441 : diag::ext_delimited_escape_sequence) 3442 << /*named*/ 1 << (PP->getLangOpts().CPlusPlus ? 1 : 0); 3443 3444 // If no diagnostic has been emitted yet, likely because we are doing a 3445 // tentative lexing, we do not want to recover here to make sure the token 3446 // will not be incorrectly considered valid. This function will be called 3447 // again and a diagnostic emitted then. 3448 if (LooseMatch && Diagnose) 3449 Match = LooseMatch->CodePoint; 3450 3451 if (Result) { 3452 Result->setFlag(Token::HasUCN); 3453 // If the UCN contains either a trigraph or a line splicing, 3454 // we need to call getAndAdvanceChar again to set the appropriate flags 3455 // on Result. 3456 if (CurPtr - StartPtr == (ptrdiff_t)(Buffer.size() + 3)) 3457 StartPtr = CurPtr; 3458 else 3459 while (StartPtr != CurPtr) 3460 (void)getAndAdvanceChar(StartPtr, *Result); 3461 } else { 3462 StartPtr = CurPtr; 3463 } 3464 return Match ? std::optional<uint32_t>(*Match) : std::nullopt; 3465 } 3466 3467 uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc, 3468 Token *Result) { 3469 3470 unsigned CharSize; 3471 std::optional<uint32_t> CodePointOpt; 3472 char Kind = getCharAndSize(StartPtr, CharSize); 3473 if (Kind == 'u' || Kind == 'U') 3474 CodePointOpt = tryReadNumericUCN(StartPtr, SlashLoc, Result); 3475 else if (Kind == 'N') 3476 CodePointOpt = tryReadNamedUCN(StartPtr, SlashLoc, Result); 3477 3478 if (!CodePointOpt) 3479 return 0; 3480 3481 uint32_t CodePoint = *CodePointOpt; 3482 3483 // Don't apply C family restrictions to UCNs in assembly mode 3484 if (LangOpts.AsmPreprocessor) 3485 return CodePoint; 3486 3487 // C2x 6.4.3p2: A universal character name shall not designate a code point 3488 // where the hexadecimal value is: 3489 // - in the range D800 through DFFF inclusive; or 3490 // - greater than 10FFFF. 3491 // A universal-character-name outside the c-char-sequence of a character 3492 // constant, or the s-char-sequence of a string-literal shall not designate 3493 // a control character or a character in the basic character set. 3494 3495 // C++11 [lex.charset]p2: If the hexadecimal value for a 3496 // universal-character-name corresponds to a surrogate code point (in the 3497 // range 0xD800-0xDFFF, inclusive), the program is ill-formed. Additionally, 3498 // if the hexadecimal value for a universal-character-name outside the 3499 // c-char-sequence, s-char-sequence, or r-char-sequence of a character or 3500 // string literal corresponds to a control character (in either of the 3501 // ranges 0x00-0x1F or 0x7F-0x9F, both inclusive) or to a character in the 3502 // basic source character set, the program is ill-formed. 3503 if (CodePoint < 0xA0) { 3504 // We don't use isLexingRawMode() here because we need to warn about bad 3505 // UCNs even when skipping preprocessing tokens in a #if block. 3506 if (Result && PP) { 3507 if (CodePoint < 0x20 || CodePoint >= 0x7F) 3508 Diag(BufferPtr, diag::err_ucn_control_character); 3509 else { 3510 char C = static_cast<char>(CodePoint); 3511 Diag(BufferPtr, diag::err_ucn_escape_basic_scs) << StringRef(&C, 1); 3512 } 3513 } 3514 3515 return 0; 3516 } else if (CodePoint >= 0xD800 && CodePoint <= 0xDFFF) { 3517 // C++03 allows UCNs representing surrogate characters. C99 and C++11 don't. 3518 // We don't use isLexingRawMode() here because we need to diagnose bad 3519 // UCNs even when skipping preprocessing tokens in a #if block. 3520 if (Result && PP) { 3521 if (LangOpts.CPlusPlus && !LangOpts.CPlusPlus11) 3522 Diag(BufferPtr, diag::warn_ucn_escape_surrogate); 3523 else 3524 Diag(BufferPtr, diag::err_ucn_escape_invalid); 3525 } 3526 return 0; 3527 } 3528 3529 return CodePoint; 3530 } 3531 3532 bool Lexer::CheckUnicodeWhitespace(Token &Result, uint32_t C, 3533 const char *CurPtr) { 3534 if (!isLexingRawMode() && !PP->isPreprocessedOutput() && 3535 isUnicodeWhitespace(C)) { 3536 Diag(BufferPtr, diag::ext_unicode_whitespace) 3537 << makeCharRange(*this, BufferPtr, CurPtr); 3538 3539 Result.setFlag(Token::LeadingSpace); 3540 return true; 3541 } 3542 return false; 3543 } 3544 3545 void Lexer::PropagateLineStartLeadingSpaceInfo(Token &Result) { 3546 IsAtStartOfLine = Result.isAtStartOfLine(); 3547 HasLeadingSpace = Result.hasLeadingSpace(); 3548 HasLeadingEmptyMacro = Result.hasLeadingEmptyMacro(); 3549 // Note that this doesn't affect IsAtPhysicalStartOfLine. 3550 } 3551 3552 bool Lexer::Lex(Token &Result) { 3553 assert(!isDependencyDirectivesLexer()); 3554 3555 // Start a new token. 3556 Result.startToken(); 3557 3558 // Set up misc whitespace flags for LexTokenInternal. 3559 if (IsAtStartOfLine) { 3560 Result.setFlag(Token::StartOfLine); 3561 IsAtStartOfLine = false; 3562 } 3563 3564 if (HasLeadingSpace) { 3565 Result.setFlag(Token::LeadingSpace); 3566 HasLeadingSpace = false; 3567 } 3568 3569 if (HasLeadingEmptyMacro) { 3570 Result.setFlag(Token::LeadingEmptyMacro); 3571 HasLeadingEmptyMacro = false; 3572 } 3573 3574 bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine; 3575 IsAtPhysicalStartOfLine = false; 3576 bool isRawLex = isLexingRawMode(); 3577 (void) isRawLex; 3578 bool returnedToken = LexTokenInternal(Result, atPhysicalStartOfLine); 3579 // (After the LexTokenInternal call, the lexer might be destroyed.) 3580 assert((returnedToken || !isRawLex) && "Raw lex must succeed"); 3581 return returnedToken; 3582 } 3583 3584 /// LexTokenInternal - This implements a simple C family lexer. It is an 3585 /// extremely performance critical piece of code. This assumes that the buffer 3586 /// has a null character at the end of the file. This returns a preprocessing 3587 /// token, not a normal token, as such, it is an internal interface. It assumes 3588 /// that the Flags of result have been cleared before calling this. 3589 bool Lexer::LexTokenInternal(Token &Result, bool TokAtPhysicalStartOfLine) { 3590 LexStart: 3591 assert(!Result.needsCleaning() && "Result needs cleaning"); 3592 assert(!Result.hasPtrData() && "Result has not been reset"); 3593 3594 // CurPtr - Cache BufferPtr in an automatic variable. 3595 const char *CurPtr = BufferPtr; 3596 3597 // Small amounts of horizontal whitespace is very common between tokens. 3598 if (isHorizontalWhitespace(*CurPtr)) { 3599 do { 3600 ++CurPtr; 3601 } while (isHorizontalWhitespace(*CurPtr)); 3602 3603 // If we are keeping whitespace and other tokens, just return what we just 3604 // skipped. The next lexer invocation will return the token after the 3605 // whitespace. 3606 if (isKeepWhitespaceMode()) { 3607 FormTokenWithChars(Result, CurPtr, tok::unknown); 3608 // FIXME: The next token will not have LeadingSpace set. 3609 return true; 3610 } 3611 3612 BufferPtr = CurPtr; 3613 Result.setFlag(Token::LeadingSpace); 3614 } 3615 3616 unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below. 3617 3618 // Read a character, advancing over it. 3619 char Char = getAndAdvanceChar(CurPtr, Result); 3620 tok::TokenKind Kind; 3621 3622 if (!isVerticalWhitespace(Char)) 3623 NewLinePtr = nullptr; 3624 3625 switch (Char) { 3626 case 0: // Null. 3627 // Found end of file? 3628 if (CurPtr-1 == BufferEnd) 3629 return LexEndOfFile(Result, CurPtr-1); 3630 3631 // Check if we are performing code completion. 3632 if (isCodeCompletionPoint(CurPtr-1)) { 3633 // Return the code-completion token. 3634 Result.startToken(); 3635 FormTokenWithChars(Result, CurPtr, tok::code_completion); 3636 return true; 3637 } 3638 3639 if (!isLexingRawMode()) 3640 Diag(CurPtr-1, diag::null_in_file); 3641 Result.setFlag(Token::LeadingSpace); 3642 if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine)) 3643 return true; // KeepWhitespaceMode 3644 3645 // We know the lexer hasn't changed, so just try again with this lexer. 3646 // (We manually eliminate the tail call to avoid recursion.) 3647 goto LexNextToken; 3648 3649 case 26: // DOS & CP/M EOF: "^Z". 3650 // If we're in Microsoft extensions mode, treat this as end of file. 3651 if (LangOpts.MicrosoftExt) { 3652 if (!isLexingRawMode()) 3653 Diag(CurPtr-1, diag::ext_ctrl_z_eof_microsoft); 3654 return LexEndOfFile(Result, CurPtr-1); 3655 } 3656 3657 // If Microsoft extensions are disabled, this is just random garbage. 3658 Kind = tok::unknown; 3659 break; 3660 3661 case '\r': 3662 if (CurPtr[0] == '\n') 3663 (void)getAndAdvanceChar(CurPtr, Result); 3664 [[fallthrough]]; 3665 case '\n': 3666 // If we are inside a preprocessor directive and we see the end of line, 3667 // we know we are done with the directive, so return an EOD token. 3668 if (ParsingPreprocessorDirective) { 3669 // Done parsing the "line". 3670 ParsingPreprocessorDirective = false; 3671 3672 // Restore comment saving mode, in case it was disabled for directive. 3673 if (PP) 3674 resetExtendedTokenMode(); 3675 3676 // Since we consumed a newline, we are back at the start of a line. 3677 IsAtStartOfLine = true; 3678 IsAtPhysicalStartOfLine = true; 3679 NewLinePtr = CurPtr - 1; 3680 3681 Kind = tok::eod; 3682 break; 3683 } 3684 3685 // No leading whitespace seen so far. 3686 Result.clearFlag(Token::LeadingSpace); 3687 3688 if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine)) 3689 return true; // KeepWhitespaceMode 3690 3691 // We only saw whitespace, so just try again with this lexer. 3692 // (We manually eliminate the tail call to avoid recursion.) 3693 goto LexNextToken; 3694 case ' ': 3695 case '\t': 3696 case '\f': 3697 case '\v': 3698 SkipHorizontalWhitespace: 3699 Result.setFlag(Token::LeadingSpace); 3700 if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine)) 3701 return true; // KeepWhitespaceMode 3702 3703 SkipIgnoredUnits: 3704 CurPtr = BufferPtr; 3705 3706 // If the next token is obviously a // or /* */ comment, skip it efficiently 3707 // too (without going through the big switch stmt). 3708 if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() && 3709 LineComment && (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP)) { 3710 if (SkipLineComment(Result, CurPtr+2, TokAtPhysicalStartOfLine)) 3711 return true; // There is a token to return. 3712 goto SkipIgnoredUnits; 3713 } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) { 3714 if (SkipBlockComment(Result, CurPtr+2, TokAtPhysicalStartOfLine)) 3715 return true; // There is a token to return. 3716 goto SkipIgnoredUnits; 3717 } else if (isHorizontalWhitespace(*CurPtr)) { 3718 goto SkipHorizontalWhitespace; 3719 } 3720 // We only saw whitespace, so just try again with this lexer. 3721 // (We manually eliminate the tail call to avoid recursion.) 3722 goto LexNextToken; 3723 3724 // C99 6.4.4.1: Integer Constants. 3725 // C99 6.4.4.2: Floating Constants. 3726 case '0': case '1': case '2': case '3': case '4': 3727 case '5': case '6': case '7': case '8': case '9': 3728 // Notify MIOpt that we read a non-whitespace/non-comment token. 3729 MIOpt.ReadToken(); 3730 return LexNumericConstant(Result, CurPtr); 3731 3732 // Identifier (e.g., uber), or 3733 // UTF-8 (C2x/C++17) or UTF-16 (C11/C++11) character literal, or 3734 // UTF-8 or UTF-16 string literal (C11/C++11). 3735 case 'u': 3736 // Notify MIOpt that we read a non-whitespace/non-comment token. 3737 MIOpt.ReadToken(); 3738 3739 if (LangOpts.CPlusPlus11 || LangOpts.C11) { 3740 Char = getCharAndSize(CurPtr, SizeTmp); 3741 3742 // UTF-16 string literal 3743 if (Char == '"') 3744 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3745 tok::utf16_string_literal); 3746 3747 // UTF-16 character constant 3748 if (Char == '\'') 3749 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3750 tok::utf16_char_constant); 3751 3752 // UTF-16 raw string literal 3753 if (Char == 'R' && LangOpts.CPlusPlus11 && 3754 getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"') 3755 return LexRawStringLiteral(Result, 3756 ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3757 SizeTmp2, Result), 3758 tok::utf16_string_literal); 3759 3760 if (Char == '8') { 3761 char Char2 = getCharAndSize(CurPtr + SizeTmp, SizeTmp2); 3762 3763 // UTF-8 string literal 3764 if (Char2 == '"') 3765 return LexStringLiteral(Result, 3766 ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3767 SizeTmp2, Result), 3768 tok::utf8_string_literal); 3769 if (Char2 == '\'' && (LangOpts.CPlusPlus17 || LangOpts.C2x)) 3770 return LexCharConstant( 3771 Result, ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3772 SizeTmp2, Result), 3773 tok::utf8_char_constant); 3774 3775 if (Char2 == 'R' && LangOpts.CPlusPlus11) { 3776 unsigned SizeTmp3; 3777 char Char3 = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3); 3778 // UTF-8 raw string literal 3779 if (Char3 == '"') { 3780 return LexRawStringLiteral(Result, 3781 ConsumeChar(ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3782 SizeTmp2, Result), 3783 SizeTmp3, Result), 3784 tok::utf8_string_literal); 3785 } 3786 } 3787 } 3788 } 3789 3790 // treat u like the start of an identifier. 3791 return LexIdentifierContinue(Result, CurPtr); 3792 3793 case 'U': // Identifier (e.g. Uber) or C11/C++11 UTF-32 string literal 3794 // Notify MIOpt that we read a non-whitespace/non-comment token. 3795 MIOpt.ReadToken(); 3796 3797 if (LangOpts.CPlusPlus11 || LangOpts.C11) { 3798 Char = getCharAndSize(CurPtr, SizeTmp); 3799 3800 // UTF-32 string literal 3801 if (Char == '"') 3802 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3803 tok::utf32_string_literal); 3804 3805 // UTF-32 character constant 3806 if (Char == '\'') 3807 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3808 tok::utf32_char_constant); 3809 3810 // UTF-32 raw string literal 3811 if (Char == 'R' && LangOpts.CPlusPlus11 && 3812 getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"') 3813 return LexRawStringLiteral(Result, 3814 ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3815 SizeTmp2, Result), 3816 tok::utf32_string_literal); 3817 } 3818 3819 // treat U like the start of an identifier. 3820 return LexIdentifierContinue(Result, CurPtr); 3821 3822 case 'R': // Identifier or C++0x raw string literal 3823 // Notify MIOpt that we read a non-whitespace/non-comment token. 3824 MIOpt.ReadToken(); 3825 3826 if (LangOpts.CPlusPlus11) { 3827 Char = getCharAndSize(CurPtr, SizeTmp); 3828 3829 if (Char == '"') 3830 return LexRawStringLiteral(Result, 3831 ConsumeChar(CurPtr, SizeTmp, Result), 3832 tok::string_literal); 3833 } 3834 3835 // treat R like the start of an identifier. 3836 return LexIdentifierContinue(Result, CurPtr); 3837 3838 case 'L': // Identifier (Loony) or wide literal (L'x' or L"xyz"). 3839 // Notify MIOpt that we read a non-whitespace/non-comment token. 3840 MIOpt.ReadToken(); 3841 Char = getCharAndSize(CurPtr, SizeTmp); 3842 3843 // Wide string literal. 3844 if (Char == '"') 3845 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3846 tok::wide_string_literal); 3847 3848 // Wide raw string literal. 3849 if (LangOpts.CPlusPlus11 && Char == 'R' && 3850 getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"') 3851 return LexRawStringLiteral(Result, 3852 ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3853 SizeTmp2, Result), 3854 tok::wide_string_literal); 3855 3856 // Wide character constant. 3857 if (Char == '\'') 3858 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3859 tok::wide_char_constant); 3860 // FALL THROUGH, treating L like the start of an identifier. 3861 [[fallthrough]]; 3862 3863 // C99 6.4.2: Identifiers. 3864 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': 3865 case 'H': case 'I': case 'J': case 'K': /*'L'*/case 'M': case 'N': 3866 case 'O': case 'P': case 'Q': /*'R'*/case 'S': case 'T': /*'U'*/ 3867 case 'V': case 'W': case 'X': case 'Y': case 'Z': 3868 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': 3869 case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': 3870 case 'o': case 'p': case 'q': case 'r': case 's': case 't': /*'u'*/ 3871 case 'v': case 'w': case 'x': case 'y': case 'z': 3872 case '_': 3873 // Notify MIOpt that we read a non-whitespace/non-comment token. 3874 MIOpt.ReadToken(); 3875 return LexIdentifierContinue(Result, CurPtr); 3876 3877 case '$': // $ in identifiers. 3878 if (LangOpts.DollarIdents) { 3879 if (!isLexingRawMode()) 3880 Diag(CurPtr-1, diag::ext_dollar_in_identifier); 3881 // Notify MIOpt that we read a non-whitespace/non-comment token. 3882 MIOpt.ReadToken(); 3883 return LexIdentifierContinue(Result, CurPtr); 3884 } 3885 3886 Kind = tok::unknown; 3887 break; 3888 3889 // C99 6.4.4: Character Constants. 3890 case '\'': 3891 // Notify MIOpt that we read a non-whitespace/non-comment token. 3892 MIOpt.ReadToken(); 3893 return LexCharConstant(Result, CurPtr, tok::char_constant); 3894 3895 // C99 6.4.5: String Literals. 3896 case '"': 3897 // Notify MIOpt that we read a non-whitespace/non-comment token. 3898 MIOpt.ReadToken(); 3899 return LexStringLiteral(Result, CurPtr, 3900 ParsingFilename ? tok::header_name 3901 : tok::string_literal); 3902 3903 // C99 6.4.6: Punctuators. 3904 case '?': 3905 Kind = tok::question; 3906 break; 3907 case '[': 3908 Kind = tok::l_square; 3909 break; 3910 case ']': 3911 Kind = tok::r_square; 3912 break; 3913 case '(': 3914 Kind = tok::l_paren; 3915 break; 3916 case ')': 3917 Kind = tok::r_paren; 3918 break; 3919 case '{': 3920 Kind = tok::l_brace; 3921 break; 3922 case '}': 3923 Kind = tok::r_brace; 3924 break; 3925 case '.': 3926 Char = getCharAndSize(CurPtr, SizeTmp); 3927 if (Char >= '0' && Char <= '9') { 3928 // Notify MIOpt that we read a non-whitespace/non-comment token. 3929 MIOpt.ReadToken(); 3930 3931 return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result)); 3932 } else if (LangOpts.CPlusPlus && Char == '*') { 3933 Kind = tok::periodstar; 3934 CurPtr += SizeTmp; 3935 } else if (Char == '.' && 3936 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') { 3937 Kind = tok::ellipsis; 3938 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3939 SizeTmp2, Result); 3940 } else { 3941 Kind = tok::period; 3942 } 3943 break; 3944 case '&': 3945 Char = getCharAndSize(CurPtr, SizeTmp); 3946 if (Char == '&') { 3947 Kind = tok::ampamp; 3948 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3949 } else if (Char == '=') { 3950 Kind = tok::ampequal; 3951 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3952 } else { 3953 Kind = tok::amp; 3954 } 3955 break; 3956 case '*': 3957 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 3958 Kind = tok::starequal; 3959 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3960 } else { 3961 Kind = tok::star; 3962 } 3963 break; 3964 case '+': 3965 Char = getCharAndSize(CurPtr, SizeTmp); 3966 if (Char == '+') { 3967 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3968 Kind = tok::plusplus; 3969 } else if (Char == '=') { 3970 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3971 Kind = tok::plusequal; 3972 } else { 3973 Kind = tok::plus; 3974 } 3975 break; 3976 case '-': 3977 Char = getCharAndSize(CurPtr, SizeTmp); 3978 if (Char == '-') { // -- 3979 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3980 Kind = tok::minusminus; 3981 } else if (Char == '>' && LangOpts.CPlusPlus && 3982 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') { // C++ ->* 3983 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3984 SizeTmp2, Result); 3985 Kind = tok::arrowstar; 3986 } else if (Char == '>') { // -> 3987 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3988 Kind = tok::arrow; 3989 } else if (Char == '=') { // -= 3990 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3991 Kind = tok::minusequal; 3992 } else { 3993 Kind = tok::minus; 3994 } 3995 break; 3996 case '~': 3997 Kind = tok::tilde; 3998 break; 3999 case '!': 4000 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 4001 Kind = tok::exclaimequal; 4002 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4003 } else { 4004 Kind = tok::exclaim; 4005 } 4006 break; 4007 case '/': 4008 // 6.4.9: Comments 4009 Char = getCharAndSize(CurPtr, SizeTmp); 4010 if (Char == '/') { // Line comment. 4011 // Even if Line comments are disabled (e.g. in C89 mode), we generally 4012 // want to lex this as a comment. There is one problem with this though, 4013 // that in one particular corner case, this can change the behavior of the 4014 // resultant program. For example, In "foo //**/ bar", C89 would lex 4015 // this as "foo / bar" and languages with Line comments would lex it as 4016 // "foo". Check to see if the character after the second slash is a '*'. 4017 // If so, we will lex that as a "/" instead of the start of a comment. 4018 // However, we never do this if we are just preprocessing. 4019 bool TreatAsComment = 4020 LineComment && (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP); 4021 if (!TreatAsComment) 4022 if (!(PP && PP->isPreprocessedOutput())) 4023 TreatAsComment = getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*'; 4024 4025 if (TreatAsComment) { 4026 if (SkipLineComment(Result, ConsumeChar(CurPtr, SizeTmp, Result), 4027 TokAtPhysicalStartOfLine)) 4028 return true; // There is a token to return. 4029 4030 // It is common for the tokens immediately after a // comment to be 4031 // whitespace (indentation for the next line). Instead of going through 4032 // the big switch, handle it efficiently now. 4033 goto SkipIgnoredUnits; 4034 } 4035 } 4036 4037 if (Char == '*') { // /**/ comment. 4038 if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result), 4039 TokAtPhysicalStartOfLine)) 4040 return true; // There is a token to return. 4041 4042 // We only saw whitespace, so just try again with this lexer. 4043 // (We manually eliminate the tail call to avoid recursion.) 4044 goto LexNextToken; 4045 } 4046 4047 if (Char == '=') { 4048 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4049 Kind = tok::slashequal; 4050 } else { 4051 Kind = tok::slash; 4052 } 4053 break; 4054 case '%': 4055 Char = getCharAndSize(CurPtr, SizeTmp); 4056 if (Char == '=') { 4057 Kind = tok::percentequal; 4058 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4059 } else if (LangOpts.Digraphs && Char == '>') { 4060 Kind = tok::r_brace; // '%>' -> '}' 4061 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4062 } else if (LangOpts.Digraphs && Char == ':') { 4063 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4064 Char = getCharAndSize(CurPtr, SizeTmp); 4065 if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') { 4066 Kind = tok::hashhash; // '%:%:' -> '##' 4067 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 4068 SizeTmp2, Result); 4069 } else if (Char == '@' && LangOpts.MicrosoftExt) {// %:@ -> #@ -> Charize 4070 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4071 if (!isLexingRawMode()) 4072 Diag(BufferPtr, diag::ext_charize_microsoft); 4073 Kind = tok::hashat; 4074 } else { // '%:' -> '#' 4075 // We parsed a # character. If this occurs at the start of the line, 4076 // it's actually the start of a preprocessing directive. Callback to 4077 // the preprocessor to handle it. 4078 // TODO: -fpreprocessed mode?? 4079 if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer) 4080 goto HandleDirective; 4081 4082 Kind = tok::hash; 4083 } 4084 } else { 4085 Kind = tok::percent; 4086 } 4087 break; 4088 case '<': 4089 Char = getCharAndSize(CurPtr, SizeTmp); 4090 if (ParsingFilename) { 4091 return LexAngledStringLiteral(Result, CurPtr); 4092 } else if (Char == '<') { 4093 char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2); 4094 if (After == '=') { 4095 Kind = tok::lesslessequal; 4096 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 4097 SizeTmp2, Result); 4098 } else if (After == '<' && IsStartOfConflictMarker(CurPtr-1)) { 4099 // If this is actually a '<<<<<<<' version control conflict marker, 4100 // recognize it as such and recover nicely. 4101 goto LexNextToken; 4102 } else if (After == '<' && HandleEndOfConflictMarker(CurPtr-1)) { 4103 // If this is '<<<<' and we're in a Perforce-style conflict marker, 4104 // ignore it. 4105 goto LexNextToken; 4106 } else if (LangOpts.CUDA && After == '<') { 4107 Kind = tok::lesslessless; 4108 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 4109 SizeTmp2, Result); 4110 } else { 4111 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4112 Kind = tok::lessless; 4113 } 4114 } else if (Char == '=') { 4115 char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2); 4116 if (After == '>') { 4117 if (LangOpts.CPlusPlus20) { 4118 if (!isLexingRawMode()) 4119 Diag(BufferPtr, diag::warn_cxx17_compat_spaceship); 4120 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 4121 SizeTmp2, Result); 4122 Kind = tok::spaceship; 4123 break; 4124 } 4125 // Suggest adding a space between the '<=' and the '>' to avoid a 4126 // change in semantics if this turns up in C++ <=17 mode. 4127 if (LangOpts.CPlusPlus && !isLexingRawMode()) { 4128 Diag(BufferPtr, diag::warn_cxx20_compat_spaceship) 4129 << FixItHint::CreateInsertion( 4130 getSourceLocation(CurPtr + SizeTmp, SizeTmp2), " "); 4131 } 4132 } 4133 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4134 Kind = tok::lessequal; 4135 } else if (LangOpts.Digraphs && Char == ':') { // '<:' -> '[' 4136 if (LangOpts.CPlusPlus11 && 4137 getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == ':') { 4138 // C++0x [lex.pptoken]p3: 4139 // Otherwise, if the next three characters are <:: and the subsequent 4140 // character is neither : nor >, the < is treated as a preprocessor 4141 // token by itself and not as the first character of the alternative 4142 // token <:. 4143 unsigned SizeTmp3; 4144 char After = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3); 4145 if (After != ':' && After != '>') { 4146 Kind = tok::less; 4147 if (!isLexingRawMode()) 4148 Diag(BufferPtr, diag::warn_cxx98_compat_less_colon_colon); 4149 break; 4150 } 4151 } 4152 4153 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4154 Kind = tok::l_square; 4155 } else if (LangOpts.Digraphs && Char == '%') { // '<%' -> '{' 4156 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4157 Kind = tok::l_brace; 4158 } else if (Char == '#' && /*Not a trigraph*/ SizeTmp == 1 && 4159 lexEditorPlaceholder(Result, CurPtr)) { 4160 return true; 4161 } else { 4162 Kind = tok::less; 4163 } 4164 break; 4165 case '>': 4166 Char = getCharAndSize(CurPtr, SizeTmp); 4167 if (Char == '=') { 4168 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4169 Kind = tok::greaterequal; 4170 } else if (Char == '>') { 4171 char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2); 4172 if (After == '=') { 4173 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 4174 SizeTmp2, Result); 4175 Kind = tok::greatergreaterequal; 4176 } else if (After == '>' && IsStartOfConflictMarker(CurPtr-1)) { 4177 // If this is actually a '>>>>' conflict marker, recognize it as such 4178 // and recover nicely. 4179 goto LexNextToken; 4180 } else if (After == '>' && HandleEndOfConflictMarker(CurPtr-1)) { 4181 // If this is '>>>>>>>' and we're in a conflict marker, ignore it. 4182 goto LexNextToken; 4183 } else if (LangOpts.CUDA && After == '>') { 4184 Kind = tok::greatergreatergreater; 4185 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 4186 SizeTmp2, Result); 4187 } else { 4188 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4189 Kind = tok::greatergreater; 4190 } 4191 } else { 4192 Kind = tok::greater; 4193 } 4194 break; 4195 case '^': 4196 Char = getCharAndSize(CurPtr, SizeTmp); 4197 if (Char == '=') { 4198 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4199 Kind = tok::caretequal; 4200 } else if (LangOpts.OpenCL && Char == '^') { 4201 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4202 Kind = tok::caretcaret; 4203 } else { 4204 Kind = tok::caret; 4205 } 4206 break; 4207 case '|': 4208 Char = getCharAndSize(CurPtr, SizeTmp); 4209 if (Char == '=') { 4210 Kind = tok::pipeequal; 4211 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4212 } else if (Char == '|') { 4213 // If this is '|||||||' and we're in a conflict marker, ignore it. 4214 if (CurPtr[1] == '|' && HandleEndOfConflictMarker(CurPtr-1)) 4215 goto LexNextToken; 4216 Kind = tok::pipepipe; 4217 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4218 } else { 4219 Kind = tok::pipe; 4220 } 4221 break; 4222 case ':': 4223 Char = getCharAndSize(CurPtr, SizeTmp); 4224 if (LangOpts.Digraphs && Char == '>') { 4225 Kind = tok::r_square; // ':>' -> ']' 4226 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4227 } else if (Char == ':') { 4228 Kind = tok::coloncolon; 4229 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4230 } else { 4231 Kind = tok::colon; 4232 } 4233 break; 4234 case ';': 4235 Kind = tok::semi; 4236 break; 4237 case '=': 4238 Char = getCharAndSize(CurPtr, SizeTmp); 4239 if (Char == '=') { 4240 // If this is '====' and we're in a conflict marker, ignore it. 4241 if (CurPtr[1] == '=' && HandleEndOfConflictMarker(CurPtr-1)) 4242 goto LexNextToken; 4243 4244 Kind = tok::equalequal; 4245 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4246 } else { 4247 Kind = tok::equal; 4248 } 4249 break; 4250 case ',': 4251 Kind = tok::comma; 4252 break; 4253 case '#': 4254 Char = getCharAndSize(CurPtr, SizeTmp); 4255 if (Char == '#') { 4256 Kind = tok::hashhash; 4257 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4258 } else if (Char == '@' && LangOpts.MicrosoftExt) { // #@ -> Charize 4259 Kind = tok::hashat; 4260 if (!isLexingRawMode()) 4261 Diag(BufferPtr, diag::ext_charize_microsoft); 4262 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4263 } else { 4264 // We parsed a # character. If this occurs at the start of the line, 4265 // it's actually the start of a preprocessing directive. Callback to 4266 // the preprocessor to handle it. 4267 // TODO: -fpreprocessed mode?? 4268 if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer) 4269 goto HandleDirective; 4270 4271 Kind = tok::hash; 4272 } 4273 break; 4274 4275 case '@': 4276 // Objective C support. 4277 if (CurPtr[-1] == '@' && LangOpts.ObjC) 4278 Kind = tok::at; 4279 else 4280 Kind = tok::unknown; 4281 break; 4282 4283 // UCNs (C99 6.4.3, C++11 [lex.charset]p2) 4284 case '\\': 4285 if (!LangOpts.AsmPreprocessor) { 4286 if (uint32_t CodePoint = tryReadUCN(CurPtr, BufferPtr, &Result)) { 4287 if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) { 4288 if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine)) 4289 return true; // KeepWhitespaceMode 4290 4291 // We only saw whitespace, so just try again with this lexer. 4292 // (We manually eliminate the tail call to avoid recursion.) 4293 goto LexNextToken; 4294 } 4295 4296 return LexUnicodeIdentifierStart(Result, CodePoint, CurPtr); 4297 } 4298 } 4299 4300 Kind = tok::unknown; 4301 break; 4302 4303 default: { 4304 if (isASCII(Char)) { 4305 Kind = tok::unknown; 4306 break; 4307 } 4308 4309 llvm::UTF32 CodePoint; 4310 4311 // We can't just reset CurPtr to BufferPtr because BufferPtr may point to 4312 // an escaped newline. 4313 --CurPtr; 4314 llvm::ConversionResult Status = 4315 llvm::convertUTF8Sequence((const llvm::UTF8 **)&CurPtr, 4316 (const llvm::UTF8 *)BufferEnd, 4317 &CodePoint, 4318 llvm::strictConversion); 4319 if (Status == llvm::conversionOK) { 4320 if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) { 4321 if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine)) 4322 return true; // KeepWhitespaceMode 4323 4324 // We only saw whitespace, so just try again with this lexer. 4325 // (We manually eliminate the tail call to avoid recursion.) 4326 goto LexNextToken; 4327 } 4328 return LexUnicodeIdentifierStart(Result, CodePoint, CurPtr); 4329 } 4330 4331 if (isLexingRawMode() || ParsingPreprocessorDirective || 4332 PP->isPreprocessedOutput()) { 4333 ++CurPtr; 4334 Kind = tok::unknown; 4335 break; 4336 } 4337 4338 // Non-ASCII characters tend to creep into source code unintentionally. 4339 // Instead of letting the parser complain about the unknown token, 4340 // just diagnose the invalid UTF-8, then drop the character. 4341 Diag(CurPtr, diag::err_invalid_utf8); 4342 4343 BufferPtr = CurPtr+1; 4344 // We're pretending the character didn't exist, so just try again with 4345 // this lexer. 4346 // (We manually eliminate the tail call to avoid recursion.) 4347 goto LexNextToken; 4348 } 4349 } 4350 4351 // Notify MIOpt that we read a non-whitespace/non-comment token. 4352 MIOpt.ReadToken(); 4353 4354 // Update the location of token as well as BufferPtr. 4355 FormTokenWithChars(Result, CurPtr, Kind); 4356 return true; 4357 4358 HandleDirective: 4359 // We parsed a # character and it's the start of a preprocessing directive. 4360 4361 FormTokenWithChars(Result, CurPtr, tok::hash); 4362 PP->HandleDirective(Result); 4363 4364 if (PP->hadModuleLoaderFatalFailure()) 4365 // With a fatal failure in the module loader, we abort parsing. 4366 return true; 4367 4368 // We parsed the directive; lex a token with the new state. 4369 return false; 4370 4371 LexNextToken: 4372 Result.clearFlag(Token::NeedsCleaning); 4373 goto LexStart; 4374 } 4375 4376 const char *Lexer::convertDependencyDirectiveToken( 4377 const dependency_directives_scan::Token &DDTok, Token &Result) { 4378 const char *TokPtr = BufferStart + DDTok.Offset; 4379 Result.startToken(); 4380 Result.setLocation(getSourceLocation(TokPtr)); 4381 Result.setKind(DDTok.Kind); 4382 Result.setFlag((Token::TokenFlags)DDTok.Flags); 4383 Result.setLength(DDTok.Length); 4384 BufferPtr = TokPtr + DDTok.Length; 4385 return TokPtr; 4386 } 4387 4388 bool Lexer::LexDependencyDirectiveToken(Token &Result) { 4389 assert(isDependencyDirectivesLexer()); 4390 4391 using namespace dependency_directives_scan; 4392 4393 while (NextDepDirectiveTokenIndex == DepDirectives.front().Tokens.size()) { 4394 if (DepDirectives.front().Kind == pp_eof) 4395 return LexEndOfFile(Result, BufferEnd); 4396 if (DepDirectives.front().Kind == tokens_present_before_eof) 4397 MIOpt.ReadToken(); 4398 NextDepDirectiveTokenIndex = 0; 4399 DepDirectives = DepDirectives.drop_front(); 4400 } 4401 4402 const dependency_directives_scan::Token &DDTok = 4403 DepDirectives.front().Tokens[NextDepDirectiveTokenIndex++]; 4404 if (NextDepDirectiveTokenIndex > 1 || DDTok.Kind != tok::hash) { 4405 // Read something other than a preprocessor directive hash. 4406 MIOpt.ReadToken(); 4407 } 4408 4409 if (ParsingFilename && DDTok.is(tok::less)) { 4410 BufferPtr = BufferStart + DDTok.Offset; 4411 LexAngledStringLiteral(Result, BufferPtr + 1); 4412 if (Result.isNot(tok::header_name)) 4413 return true; 4414 // Advance the index of lexed tokens. 4415 while (true) { 4416 const dependency_directives_scan::Token &NextTok = 4417 DepDirectives.front().Tokens[NextDepDirectiveTokenIndex]; 4418 if (BufferStart + NextTok.Offset >= BufferPtr) 4419 break; 4420 ++NextDepDirectiveTokenIndex; 4421 } 4422 return true; 4423 } 4424 4425 const char *TokPtr = convertDependencyDirectiveToken(DDTok, Result); 4426 4427 if (Result.is(tok::hash) && Result.isAtStartOfLine()) { 4428 PP->HandleDirective(Result); 4429 return false; 4430 } 4431 if (Result.is(tok::raw_identifier)) { 4432 Result.setRawIdentifierData(TokPtr); 4433 if (!isLexingRawMode()) { 4434 IdentifierInfo *II = PP->LookUpIdentifierInfo(Result); 4435 if (II->isHandleIdentifierCase()) 4436 return PP->HandleIdentifier(Result); 4437 } 4438 return true; 4439 } 4440 if (Result.isLiteral()) { 4441 Result.setLiteralData(TokPtr); 4442 return true; 4443 } 4444 if (Result.is(tok::colon)) { 4445 // Convert consecutive colons to 'tok::coloncolon'. 4446 if (*BufferPtr == ':') { 4447 assert(DepDirectives.front().Tokens[NextDepDirectiveTokenIndex].is( 4448 tok::colon)); 4449 ++NextDepDirectiveTokenIndex; 4450 Result.setKind(tok::coloncolon); 4451 } 4452 return true; 4453 } 4454 if (Result.is(tok::eod)) 4455 ParsingPreprocessorDirective = false; 4456 4457 return true; 4458 } 4459 4460 bool Lexer::LexDependencyDirectiveTokenWhileSkipping(Token &Result) { 4461 assert(isDependencyDirectivesLexer()); 4462 4463 using namespace dependency_directives_scan; 4464 4465 bool Stop = false; 4466 unsigned NestedIfs = 0; 4467 do { 4468 DepDirectives = DepDirectives.drop_front(); 4469 switch (DepDirectives.front().Kind) { 4470 case pp_none: 4471 llvm_unreachable("unexpected 'pp_none'"); 4472 case pp_include: 4473 case pp___include_macros: 4474 case pp_define: 4475 case pp_undef: 4476 case pp_import: 4477 case pp_pragma_import: 4478 case pp_pragma_once: 4479 case pp_pragma_push_macro: 4480 case pp_pragma_pop_macro: 4481 case pp_pragma_include_alias: 4482 case pp_pragma_system_header: 4483 case pp_include_next: 4484 case decl_at_import: 4485 case cxx_module_decl: 4486 case cxx_import_decl: 4487 case cxx_export_module_decl: 4488 case cxx_export_import_decl: 4489 case tokens_present_before_eof: 4490 break; 4491 case pp_if: 4492 case pp_ifdef: 4493 case pp_ifndef: 4494 ++NestedIfs; 4495 break; 4496 case pp_elif: 4497 case pp_elifdef: 4498 case pp_elifndef: 4499 case pp_else: 4500 if (!NestedIfs) { 4501 Stop = true; 4502 } 4503 break; 4504 case pp_endif: 4505 if (!NestedIfs) { 4506 Stop = true; 4507 } else { 4508 --NestedIfs; 4509 } 4510 break; 4511 case pp_eof: 4512 NextDepDirectiveTokenIndex = 0; 4513 return LexEndOfFile(Result, BufferEnd); 4514 } 4515 } while (!Stop); 4516 4517 const dependency_directives_scan::Token &DDTok = 4518 DepDirectives.front().Tokens.front(); 4519 assert(DDTok.is(tok::hash)); 4520 NextDepDirectiveTokenIndex = 1; 4521 4522 convertDependencyDirectiveToken(DDTok, Result); 4523 return false; 4524 } 4525