xref: /freebsd/contrib/llvm-project/clang/lib/Lex/Lexer.cpp (revision 8ddb146abcdf061be9f2c0db7e391697dafad85c)
1 //===- Lexer.cpp - C Language Family Lexer --------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  This file implements the Lexer and Token interfaces.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "clang/Lex/Lexer.h"
14 #include "UnicodeCharSets.h"
15 #include "clang/Basic/CharInfo.h"
16 #include "clang/Basic/Diagnostic.h"
17 #include "clang/Basic/IdentifierTable.h"
18 #include "clang/Basic/LLVM.h"
19 #include "clang/Basic/LangOptions.h"
20 #include "clang/Basic/SourceLocation.h"
21 #include "clang/Basic/SourceManager.h"
22 #include "clang/Basic/TokenKinds.h"
23 #include "clang/Lex/LexDiagnostic.h"
24 #include "clang/Lex/LiteralSupport.h"
25 #include "clang/Lex/MultipleIncludeOpt.h"
26 #include "clang/Lex/Preprocessor.h"
27 #include "clang/Lex/PreprocessorOptions.h"
28 #include "clang/Lex/Token.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/Optional.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/ADT/StringExtras.h"
33 #include "llvm/ADT/StringRef.h"
34 #include "llvm/ADT/StringSwitch.h"
35 #include "llvm/Support/Compiler.h"
36 #include "llvm/Support/ConvertUTF.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Support/MemoryBufferRef.h"
39 #include "llvm/Support/NativeFormatting.h"
40 #include "llvm/Support/UnicodeCharRanges.h"
41 #include <algorithm>
42 #include <cassert>
43 #include <cstddef>
44 #include <cstdint>
45 #include <cstring>
46 #include <string>
47 #include <tuple>
48 #include <utility>
49 
50 using namespace clang;
51 
52 //===----------------------------------------------------------------------===//
53 // Token Class Implementation
54 //===----------------------------------------------------------------------===//
55 
56 /// isObjCAtKeyword - Return true if we have an ObjC keyword identifier.
57 bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const {
58   if (isAnnotation())
59     return false;
60   if (IdentifierInfo *II = getIdentifierInfo())
61     return II->getObjCKeywordID() == objcKey;
62   return false;
63 }
64 
65 /// getObjCKeywordID - Return the ObjC keyword kind.
66 tok::ObjCKeywordKind Token::getObjCKeywordID() const {
67   if (isAnnotation())
68     return tok::objc_not_keyword;
69   IdentifierInfo *specId = getIdentifierInfo();
70   return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword;
71 }
72 
73 //===----------------------------------------------------------------------===//
74 // Lexer Class Implementation
75 //===----------------------------------------------------------------------===//
76 
77 void Lexer::anchor() {}
78 
79 void Lexer::InitLexer(const char *BufStart, const char *BufPtr,
80                       const char *BufEnd) {
81   BufferStart = BufStart;
82   BufferPtr = BufPtr;
83   BufferEnd = BufEnd;
84 
85   assert(BufEnd[0] == 0 &&
86          "We assume that the input buffer has a null character at the end"
87          " to simplify lexing!");
88 
89   // Check whether we have a BOM in the beginning of the buffer. If yes - act
90   // accordingly. Right now we support only UTF-8 with and without BOM, so, just
91   // skip the UTF-8 BOM if it's present.
92   if (BufferStart == BufferPtr) {
93     // Determine the size of the BOM.
94     StringRef Buf(BufferStart, BufferEnd - BufferStart);
95     size_t BOMLength = llvm::StringSwitch<size_t>(Buf)
96       .StartsWith("\xEF\xBB\xBF", 3) // UTF-8 BOM
97       .Default(0);
98 
99     // Skip the BOM.
100     BufferPtr += BOMLength;
101   }
102 
103   Is_PragmaLexer = false;
104   CurrentConflictMarkerState = CMK_None;
105 
106   // Start of the file is a start of line.
107   IsAtStartOfLine = true;
108   IsAtPhysicalStartOfLine = true;
109 
110   HasLeadingSpace = false;
111   HasLeadingEmptyMacro = false;
112 
113   // We are not after parsing a #.
114   ParsingPreprocessorDirective = false;
115 
116   // We are not after parsing #include.
117   ParsingFilename = false;
118 
119   // We are not in raw mode.  Raw mode disables diagnostics and interpretation
120   // of tokens (e.g. identifiers, thus disabling macro expansion).  It is used
121   // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block
122   // or otherwise skipping over tokens.
123   LexingRawMode = false;
124 
125   // Default to not keeping comments.
126   ExtendedTokenMode = 0;
127 
128   NewLinePtr = nullptr;
129 }
130 
131 /// Lexer constructor - Create a new lexer object for the specified buffer
132 /// with the specified preprocessor managing the lexing process.  This lexer
133 /// assumes that the associated file buffer and Preprocessor objects will
134 /// outlive it, so it doesn't take ownership of either of them.
135 Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &InputFile,
136              Preprocessor &PP, bool IsFirstIncludeOfFile)
137     : PreprocessorLexer(&PP, FID),
138       FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)),
139       LangOpts(PP.getLangOpts()), IsFirstTimeLexingFile(IsFirstIncludeOfFile) {
140   InitLexer(InputFile.getBufferStart(), InputFile.getBufferStart(),
141             InputFile.getBufferEnd());
142 
143   resetExtendedTokenMode();
144 }
145 
146 /// Lexer constructor - Create a new raw lexer object.  This object is only
147 /// suitable for calls to 'LexFromRawLexer'.  This lexer assumes that the text
148 /// range will outlive it, so it doesn't take ownership of it.
149 Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts,
150              const char *BufStart, const char *BufPtr, const char *BufEnd,
151              bool IsFirstIncludeOfFile)
152     : FileLoc(fileloc), LangOpts(langOpts),
153       IsFirstTimeLexingFile(IsFirstIncludeOfFile) {
154   InitLexer(BufStart, BufPtr, BufEnd);
155 
156   // We *are* in raw mode.
157   LexingRawMode = true;
158 }
159 
160 /// Lexer constructor - Create a new raw lexer object.  This object is only
161 /// suitable for calls to 'LexFromRawLexer'.  This lexer assumes that the text
162 /// range will outlive it, so it doesn't take ownership of it.
163 Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &FromFile,
164              const SourceManager &SM, const LangOptions &langOpts,
165              bool IsFirstIncludeOfFile)
166     : Lexer(SM.getLocForStartOfFile(FID), langOpts, FromFile.getBufferStart(),
167             FromFile.getBufferStart(), FromFile.getBufferEnd(),
168             IsFirstIncludeOfFile) {}
169 
170 void Lexer::resetExtendedTokenMode() {
171   assert(PP && "Cannot reset token mode without a preprocessor");
172   if (LangOpts.TraditionalCPP)
173     SetKeepWhitespaceMode(true);
174   else
175     SetCommentRetentionState(PP->getCommentRetentionState());
176 }
177 
178 /// Create_PragmaLexer: Lexer constructor - Create a new lexer object for
179 /// _Pragma expansion.  This has a variety of magic semantics that this method
180 /// sets up.  It returns a new'd Lexer that must be delete'd when done.
181 ///
182 /// On entrance to this routine, TokStartLoc is a macro location which has a
183 /// spelling loc that indicates the bytes to be lexed for the token and an
184 /// expansion location that indicates where all lexed tokens should be
185 /// "expanded from".
186 ///
187 /// TODO: It would really be nice to make _Pragma just be a wrapper around a
188 /// normal lexer that remaps tokens as they fly by.  This would require making
189 /// Preprocessor::Lex virtual.  Given that, we could just dump in a magic lexer
190 /// interface that could handle this stuff.  This would pull GetMappedTokenLoc
191 /// out of the critical path of the lexer!
192 ///
193 Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc,
194                                  SourceLocation ExpansionLocStart,
195                                  SourceLocation ExpansionLocEnd,
196                                  unsigned TokLen, Preprocessor &PP) {
197   SourceManager &SM = PP.getSourceManager();
198 
199   // Create the lexer as if we were going to lex the file normally.
200   FileID SpellingFID = SM.getFileID(SpellingLoc);
201   llvm::MemoryBufferRef InputFile = SM.getBufferOrFake(SpellingFID);
202   Lexer *L = new Lexer(SpellingFID, InputFile, PP);
203 
204   // Now that the lexer is created, change the start/end locations so that we
205   // just lex the subsection of the file that we want.  This is lexing from a
206   // scratch buffer.
207   const char *StrData = SM.getCharacterData(SpellingLoc);
208 
209   L->BufferPtr = StrData;
210   L->BufferEnd = StrData+TokLen;
211   assert(L->BufferEnd[0] == 0 && "Buffer is not nul terminated!");
212 
213   // Set the SourceLocation with the remapping information.  This ensures that
214   // GetMappedTokenLoc will remap the tokens as they are lexed.
215   L->FileLoc = SM.createExpansionLoc(SM.getLocForStartOfFile(SpellingFID),
216                                      ExpansionLocStart,
217                                      ExpansionLocEnd, TokLen);
218 
219   // Ensure that the lexer thinks it is inside a directive, so that end \n will
220   // return an EOD token.
221   L->ParsingPreprocessorDirective = true;
222 
223   // This lexer really is for _Pragma.
224   L->Is_PragmaLexer = true;
225   return L;
226 }
227 
228 bool Lexer::skipOver(unsigned NumBytes) {
229   IsAtPhysicalStartOfLine = true;
230   IsAtStartOfLine = true;
231   if ((BufferPtr + NumBytes) > BufferEnd)
232     return true;
233   BufferPtr += NumBytes;
234   return false;
235 }
236 
237 template <typename T> static void StringifyImpl(T &Str, char Quote) {
238   typename T::size_type i = 0, e = Str.size();
239   while (i < e) {
240     if (Str[i] == '\\' || Str[i] == Quote) {
241       Str.insert(Str.begin() + i, '\\');
242       i += 2;
243       ++e;
244     } else if (Str[i] == '\n' || Str[i] == '\r') {
245       // Replace '\r\n' and '\n\r' to '\\' followed by 'n'.
246       if ((i < e - 1) && (Str[i + 1] == '\n' || Str[i + 1] == '\r') &&
247           Str[i] != Str[i + 1]) {
248         Str[i] = '\\';
249         Str[i + 1] = 'n';
250       } else {
251         // Replace '\n' and '\r' to '\\' followed by 'n'.
252         Str[i] = '\\';
253         Str.insert(Str.begin() + i + 1, 'n');
254         ++e;
255       }
256       i += 2;
257     } else
258       ++i;
259   }
260 }
261 
262 std::string Lexer::Stringify(StringRef Str, bool Charify) {
263   std::string Result = std::string(Str);
264   char Quote = Charify ? '\'' : '"';
265   StringifyImpl(Result, Quote);
266   return Result;
267 }
268 
269 void Lexer::Stringify(SmallVectorImpl<char> &Str) { StringifyImpl(Str, '"'); }
270 
271 //===----------------------------------------------------------------------===//
272 // Token Spelling
273 //===----------------------------------------------------------------------===//
274 
275 /// Slow case of getSpelling. Extract the characters comprising the
276 /// spelling of this token from the provided input buffer.
277 static size_t getSpellingSlow(const Token &Tok, const char *BufPtr,
278                               const LangOptions &LangOpts, char *Spelling) {
279   assert(Tok.needsCleaning() && "getSpellingSlow called on simple token");
280 
281   size_t Length = 0;
282   const char *BufEnd = BufPtr + Tok.getLength();
283 
284   if (tok::isStringLiteral(Tok.getKind())) {
285     // Munch the encoding-prefix and opening double-quote.
286     while (BufPtr < BufEnd) {
287       unsigned Size;
288       Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts);
289       BufPtr += Size;
290 
291       if (Spelling[Length - 1] == '"')
292         break;
293     }
294 
295     // Raw string literals need special handling; trigraph expansion and line
296     // splicing do not occur within their d-char-sequence nor within their
297     // r-char-sequence.
298     if (Length >= 2 &&
299         Spelling[Length - 2] == 'R' && Spelling[Length - 1] == '"') {
300       // Search backwards from the end of the token to find the matching closing
301       // quote.
302       const char *RawEnd = BufEnd;
303       do --RawEnd; while (*RawEnd != '"');
304       size_t RawLength = RawEnd - BufPtr + 1;
305 
306       // Everything between the quotes is included verbatim in the spelling.
307       memcpy(Spelling + Length, BufPtr, RawLength);
308       Length += RawLength;
309       BufPtr += RawLength;
310 
311       // The rest of the token is lexed normally.
312     }
313   }
314 
315   while (BufPtr < BufEnd) {
316     unsigned Size;
317     Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts);
318     BufPtr += Size;
319   }
320 
321   assert(Length < Tok.getLength() &&
322          "NeedsCleaning flag set on token that didn't need cleaning!");
323   return Length;
324 }
325 
326 /// getSpelling() - Return the 'spelling' of this token.  The spelling of a
327 /// token are the characters used to represent the token in the source file
328 /// after trigraph expansion and escaped-newline folding.  In particular, this
329 /// wants to get the true, uncanonicalized, spelling of things like digraphs
330 /// UCNs, etc.
331 StringRef Lexer::getSpelling(SourceLocation loc,
332                              SmallVectorImpl<char> &buffer,
333                              const SourceManager &SM,
334                              const LangOptions &options,
335                              bool *invalid) {
336   // Break down the source location.
337   std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
338 
339   // Try to the load the file buffer.
340   bool invalidTemp = false;
341   StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
342   if (invalidTemp) {
343     if (invalid) *invalid = true;
344     return {};
345   }
346 
347   const char *tokenBegin = file.data() + locInfo.second;
348 
349   // Lex from the start of the given location.
350   Lexer lexer(SM.getLocForStartOfFile(locInfo.first), options,
351               file.begin(), tokenBegin, file.end());
352   Token token;
353   lexer.LexFromRawLexer(token);
354 
355   unsigned length = token.getLength();
356 
357   // Common case:  no need for cleaning.
358   if (!token.needsCleaning())
359     return StringRef(tokenBegin, length);
360 
361   // Hard case, we need to relex the characters into the string.
362   buffer.resize(length);
363   buffer.resize(getSpellingSlow(token, tokenBegin, options, buffer.data()));
364   return StringRef(buffer.data(), buffer.size());
365 }
366 
367 /// getSpelling() - Return the 'spelling' of this token.  The spelling of a
368 /// token are the characters used to represent the token in the source file
369 /// after trigraph expansion and escaped-newline folding.  In particular, this
370 /// wants to get the true, uncanonicalized, spelling of things like digraphs
371 /// UCNs, etc.
372 std::string Lexer::getSpelling(const Token &Tok, const SourceManager &SourceMgr,
373                                const LangOptions &LangOpts, bool *Invalid) {
374   assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
375 
376   bool CharDataInvalid = false;
377   const char *TokStart = SourceMgr.getCharacterData(Tok.getLocation(),
378                                                     &CharDataInvalid);
379   if (Invalid)
380     *Invalid = CharDataInvalid;
381   if (CharDataInvalid)
382     return {};
383 
384   // If this token contains nothing interesting, return it directly.
385   if (!Tok.needsCleaning())
386     return std::string(TokStart, TokStart + Tok.getLength());
387 
388   std::string Result;
389   Result.resize(Tok.getLength());
390   Result.resize(getSpellingSlow(Tok, TokStart, LangOpts, &*Result.begin()));
391   return Result;
392 }
393 
394 /// getSpelling - This method is used to get the spelling of a token into a
395 /// preallocated buffer, instead of as an std::string.  The caller is required
396 /// to allocate enough space for the token, which is guaranteed to be at least
397 /// Tok.getLength() bytes long.  The actual length of the token is returned.
398 ///
399 /// Note that this method may do two possible things: it may either fill in
400 /// the buffer specified with characters, or it may *change the input pointer*
401 /// to point to a constant buffer with the data already in it (avoiding a
402 /// copy).  The caller is not allowed to modify the returned buffer pointer
403 /// if an internal buffer is returned.
404 unsigned Lexer::getSpelling(const Token &Tok, const char *&Buffer,
405                             const SourceManager &SourceMgr,
406                             const LangOptions &LangOpts, bool *Invalid) {
407   assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
408 
409   const char *TokStart = nullptr;
410   // NOTE: this has to be checked *before* testing for an IdentifierInfo.
411   if (Tok.is(tok::raw_identifier))
412     TokStart = Tok.getRawIdentifier().data();
413   else if (!Tok.hasUCN()) {
414     if (const IdentifierInfo *II = Tok.getIdentifierInfo()) {
415       // Just return the string from the identifier table, which is very quick.
416       Buffer = II->getNameStart();
417       return II->getLength();
418     }
419   }
420 
421   // NOTE: this can be checked even after testing for an IdentifierInfo.
422   if (Tok.isLiteral())
423     TokStart = Tok.getLiteralData();
424 
425   if (!TokStart) {
426     // Compute the start of the token in the input lexer buffer.
427     bool CharDataInvalid = false;
428     TokStart = SourceMgr.getCharacterData(Tok.getLocation(), &CharDataInvalid);
429     if (Invalid)
430       *Invalid = CharDataInvalid;
431     if (CharDataInvalid) {
432       Buffer = "";
433       return 0;
434     }
435   }
436 
437   // If this token contains nothing interesting, return it directly.
438   if (!Tok.needsCleaning()) {
439     Buffer = TokStart;
440     return Tok.getLength();
441   }
442 
443   // Otherwise, hard case, relex the characters into the string.
444   return getSpellingSlow(Tok, TokStart, LangOpts, const_cast<char*>(Buffer));
445 }
446 
447 /// MeasureTokenLength - Relex the token at the specified location and return
448 /// its length in bytes in the input file.  If the token needs cleaning (e.g.
449 /// includes a trigraph or an escaped newline) then this count includes bytes
450 /// that are part of that.
451 unsigned Lexer::MeasureTokenLength(SourceLocation Loc,
452                                    const SourceManager &SM,
453                                    const LangOptions &LangOpts) {
454   Token TheTok;
455   if (getRawToken(Loc, TheTok, SM, LangOpts))
456     return 0;
457   return TheTok.getLength();
458 }
459 
460 /// Relex the token at the specified location.
461 /// \returns true if there was a failure, false on success.
462 bool Lexer::getRawToken(SourceLocation Loc, Token &Result,
463                         const SourceManager &SM,
464                         const LangOptions &LangOpts,
465                         bool IgnoreWhiteSpace) {
466   // TODO: this could be special cased for common tokens like identifiers, ')',
467   // etc to make this faster, if it mattered.  Just look at StrData[0] to handle
468   // all obviously single-char tokens.  This could use
469   // Lexer::isObviouslySimpleCharacter for example to handle identifiers or
470   // something.
471 
472   // If this comes from a macro expansion, we really do want the macro name, not
473   // the token this macro expanded to.
474   Loc = SM.getExpansionLoc(Loc);
475   std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
476   bool Invalid = false;
477   StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
478   if (Invalid)
479     return true;
480 
481   const char *StrData = Buffer.data()+LocInfo.second;
482 
483   if (!IgnoreWhiteSpace && isWhitespace(StrData[0]))
484     return true;
485 
486   // Create a lexer starting at the beginning of this token.
487   Lexer TheLexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts,
488                  Buffer.begin(), StrData, Buffer.end());
489   TheLexer.SetCommentRetentionState(true);
490   TheLexer.LexFromRawLexer(Result);
491   return false;
492 }
493 
494 /// Returns the pointer that points to the beginning of line that contains
495 /// the given offset, or null if the offset if invalid.
496 static const char *findBeginningOfLine(StringRef Buffer, unsigned Offset) {
497   const char *BufStart = Buffer.data();
498   if (Offset >= Buffer.size())
499     return nullptr;
500 
501   const char *LexStart = BufStart + Offset;
502   for (; LexStart != BufStart; --LexStart) {
503     if (isVerticalWhitespace(LexStart[0]) &&
504         !Lexer::isNewLineEscaped(BufStart, LexStart)) {
505       // LexStart should point at first character of logical line.
506       ++LexStart;
507       break;
508     }
509   }
510   return LexStart;
511 }
512 
513 static SourceLocation getBeginningOfFileToken(SourceLocation Loc,
514                                               const SourceManager &SM,
515                                               const LangOptions &LangOpts) {
516   assert(Loc.isFileID());
517   std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
518   if (LocInfo.first.isInvalid())
519     return Loc;
520 
521   bool Invalid = false;
522   StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
523   if (Invalid)
524     return Loc;
525 
526   // Back up from the current location until we hit the beginning of a line
527   // (or the buffer). We'll relex from that point.
528   const char *StrData = Buffer.data() + LocInfo.second;
529   const char *LexStart = findBeginningOfLine(Buffer, LocInfo.second);
530   if (!LexStart || LexStart == StrData)
531     return Loc;
532 
533   // Create a lexer starting at the beginning of this token.
534   SourceLocation LexerStartLoc = Loc.getLocWithOffset(-LocInfo.second);
535   Lexer TheLexer(LexerStartLoc, LangOpts, Buffer.data(), LexStart,
536                  Buffer.end());
537   TheLexer.SetCommentRetentionState(true);
538 
539   // Lex tokens until we find the token that contains the source location.
540   Token TheTok;
541   do {
542     TheLexer.LexFromRawLexer(TheTok);
543 
544     if (TheLexer.getBufferLocation() > StrData) {
545       // Lexing this token has taken the lexer past the source location we're
546       // looking for. If the current token encompasses our source location,
547       // return the beginning of that token.
548       if (TheLexer.getBufferLocation() - TheTok.getLength() <= StrData)
549         return TheTok.getLocation();
550 
551       // We ended up skipping over the source location entirely, which means
552       // that it points into whitespace. We're done here.
553       break;
554     }
555   } while (TheTok.getKind() != tok::eof);
556 
557   // We've passed our source location; just return the original source location.
558   return Loc;
559 }
560 
561 SourceLocation Lexer::GetBeginningOfToken(SourceLocation Loc,
562                                           const SourceManager &SM,
563                                           const LangOptions &LangOpts) {
564   if (Loc.isFileID())
565     return getBeginningOfFileToken(Loc, SM, LangOpts);
566 
567   if (!SM.isMacroArgExpansion(Loc))
568     return Loc;
569 
570   SourceLocation FileLoc = SM.getSpellingLoc(Loc);
571   SourceLocation BeginFileLoc = getBeginningOfFileToken(FileLoc, SM, LangOpts);
572   std::pair<FileID, unsigned> FileLocInfo = SM.getDecomposedLoc(FileLoc);
573   std::pair<FileID, unsigned> BeginFileLocInfo =
574       SM.getDecomposedLoc(BeginFileLoc);
575   assert(FileLocInfo.first == BeginFileLocInfo.first &&
576          FileLocInfo.second >= BeginFileLocInfo.second);
577   return Loc.getLocWithOffset(BeginFileLocInfo.second - FileLocInfo.second);
578 }
579 
580 namespace {
581 
582 enum PreambleDirectiveKind {
583   PDK_Skipped,
584   PDK_Unknown
585 };
586 
587 } // namespace
588 
589 PreambleBounds Lexer::ComputePreamble(StringRef Buffer,
590                                       const LangOptions &LangOpts,
591                                       unsigned MaxLines) {
592   // Create a lexer starting at the beginning of the file. Note that we use a
593   // "fake" file source location at offset 1 so that the lexer will track our
594   // position within the file.
595   const SourceLocation::UIntTy StartOffset = 1;
596   SourceLocation FileLoc = SourceLocation::getFromRawEncoding(StartOffset);
597   Lexer TheLexer(FileLoc, LangOpts, Buffer.begin(), Buffer.begin(),
598                  Buffer.end());
599   TheLexer.SetCommentRetentionState(true);
600 
601   bool InPreprocessorDirective = false;
602   Token TheTok;
603   SourceLocation ActiveCommentLoc;
604 
605   unsigned MaxLineOffset = 0;
606   if (MaxLines) {
607     const char *CurPtr = Buffer.begin();
608     unsigned CurLine = 0;
609     while (CurPtr != Buffer.end()) {
610       char ch = *CurPtr++;
611       if (ch == '\n') {
612         ++CurLine;
613         if (CurLine == MaxLines)
614           break;
615       }
616     }
617     if (CurPtr != Buffer.end())
618       MaxLineOffset = CurPtr - Buffer.begin();
619   }
620 
621   do {
622     TheLexer.LexFromRawLexer(TheTok);
623 
624     if (InPreprocessorDirective) {
625       // If we've hit the end of the file, we're done.
626       if (TheTok.getKind() == tok::eof) {
627         break;
628       }
629 
630       // If we haven't hit the end of the preprocessor directive, skip this
631       // token.
632       if (!TheTok.isAtStartOfLine())
633         continue;
634 
635       // We've passed the end of the preprocessor directive, and will look
636       // at this token again below.
637       InPreprocessorDirective = false;
638     }
639 
640     // Keep track of the # of lines in the preamble.
641     if (TheTok.isAtStartOfLine()) {
642       unsigned TokOffset = TheTok.getLocation().getRawEncoding() - StartOffset;
643 
644       // If we were asked to limit the number of lines in the preamble,
645       // and we're about to exceed that limit, we're done.
646       if (MaxLineOffset && TokOffset >= MaxLineOffset)
647         break;
648     }
649 
650     // Comments are okay; skip over them.
651     if (TheTok.getKind() == tok::comment) {
652       if (ActiveCommentLoc.isInvalid())
653         ActiveCommentLoc = TheTok.getLocation();
654       continue;
655     }
656 
657     if (TheTok.isAtStartOfLine() && TheTok.getKind() == tok::hash) {
658       // This is the start of a preprocessor directive.
659       Token HashTok = TheTok;
660       InPreprocessorDirective = true;
661       ActiveCommentLoc = SourceLocation();
662 
663       // Figure out which directive this is. Since we're lexing raw tokens,
664       // we don't have an identifier table available. Instead, just look at
665       // the raw identifier to recognize and categorize preprocessor directives.
666       TheLexer.LexFromRawLexer(TheTok);
667       if (TheTok.getKind() == tok::raw_identifier && !TheTok.needsCleaning()) {
668         StringRef Keyword = TheTok.getRawIdentifier();
669         PreambleDirectiveKind PDK
670           = llvm::StringSwitch<PreambleDirectiveKind>(Keyword)
671               .Case("include", PDK_Skipped)
672               .Case("__include_macros", PDK_Skipped)
673               .Case("define", PDK_Skipped)
674               .Case("undef", PDK_Skipped)
675               .Case("line", PDK_Skipped)
676               .Case("error", PDK_Skipped)
677               .Case("pragma", PDK_Skipped)
678               .Case("import", PDK_Skipped)
679               .Case("include_next", PDK_Skipped)
680               .Case("warning", PDK_Skipped)
681               .Case("ident", PDK_Skipped)
682               .Case("sccs", PDK_Skipped)
683               .Case("assert", PDK_Skipped)
684               .Case("unassert", PDK_Skipped)
685               .Case("if", PDK_Skipped)
686               .Case("ifdef", PDK_Skipped)
687               .Case("ifndef", PDK_Skipped)
688               .Case("elif", PDK_Skipped)
689               .Case("elifdef", PDK_Skipped)
690               .Case("elifndef", PDK_Skipped)
691               .Case("else", PDK_Skipped)
692               .Case("endif", PDK_Skipped)
693               .Default(PDK_Unknown);
694 
695         switch (PDK) {
696         case PDK_Skipped:
697           continue;
698 
699         case PDK_Unknown:
700           // We don't know what this directive is; stop at the '#'.
701           break;
702         }
703       }
704 
705       // We only end up here if we didn't recognize the preprocessor
706       // directive or it was one that can't occur in the preamble at this
707       // point. Roll back the current token to the location of the '#'.
708       TheTok = HashTok;
709     }
710 
711     // We hit a token that we don't recognize as being in the
712     // "preprocessing only" part of the file, so we're no longer in
713     // the preamble.
714     break;
715   } while (true);
716 
717   SourceLocation End;
718   if (ActiveCommentLoc.isValid())
719     End = ActiveCommentLoc; // don't truncate a decl comment.
720   else
721     End = TheTok.getLocation();
722 
723   return PreambleBounds(End.getRawEncoding() - FileLoc.getRawEncoding(),
724                         TheTok.isAtStartOfLine());
725 }
726 
727 unsigned Lexer::getTokenPrefixLength(SourceLocation TokStart, unsigned CharNo,
728                                      const SourceManager &SM,
729                                      const LangOptions &LangOpts) {
730   // Figure out how many physical characters away the specified expansion
731   // character is.  This needs to take into consideration newlines and
732   // trigraphs.
733   bool Invalid = false;
734   const char *TokPtr = SM.getCharacterData(TokStart, &Invalid);
735 
736   // If they request the first char of the token, we're trivially done.
737   if (Invalid || (CharNo == 0 && Lexer::isObviouslySimpleCharacter(*TokPtr)))
738     return 0;
739 
740   unsigned PhysOffset = 0;
741 
742   // The usual case is that tokens don't contain anything interesting.  Skip
743   // over the uninteresting characters.  If a token only consists of simple
744   // chars, this method is extremely fast.
745   while (Lexer::isObviouslySimpleCharacter(*TokPtr)) {
746     if (CharNo == 0)
747       return PhysOffset;
748     ++TokPtr;
749     --CharNo;
750     ++PhysOffset;
751   }
752 
753   // If we have a character that may be a trigraph or escaped newline, use a
754   // lexer to parse it correctly.
755   for (; CharNo; --CharNo) {
756     unsigned Size;
757     Lexer::getCharAndSizeNoWarn(TokPtr, Size, LangOpts);
758     TokPtr += Size;
759     PhysOffset += Size;
760   }
761 
762   // Final detail: if we end up on an escaped newline, we want to return the
763   // location of the actual byte of the token.  For example foo\<newline>bar
764   // advanced by 3 should return the location of b, not of \\.  One compounding
765   // detail of this is that the escape may be made by a trigraph.
766   if (!Lexer::isObviouslySimpleCharacter(*TokPtr))
767     PhysOffset += Lexer::SkipEscapedNewLines(TokPtr)-TokPtr;
768 
769   return PhysOffset;
770 }
771 
772 /// Computes the source location just past the end of the
773 /// token at this source location.
774 ///
775 /// This routine can be used to produce a source location that
776 /// points just past the end of the token referenced by \p Loc, and
777 /// is generally used when a diagnostic needs to point just after a
778 /// token where it expected something different that it received. If
779 /// the returned source location would not be meaningful (e.g., if
780 /// it points into a macro), this routine returns an invalid
781 /// source location.
782 ///
783 /// \param Offset an offset from the end of the token, where the source
784 /// location should refer to. The default offset (0) produces a source
785 /// location pointing just past the end of the token; an offset of 1 produces
786 /// a source location pointing to the last character in the token, etc.
787 SourceLocation Lexer::getLocForEndOfToken(SourceLocation Loc, unsigned Offset,
788                                           const SourceManager &SM,
789                                           const LangOptions &LangOpts) {
790   if (Loc.isInvalid())
791     return {};
792 
793   if (Loc.isMacroID()) {
794     if (Offset > 0 || !isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc))
795       return {}; // Points inside the macro expansion.
796   }
797 
798   unsigned Len = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
799   if (Len > Offset)
800     Len = Len - Offset;
801   else
802     return Loc;
803 
804   return Loc.getLocWithOffset(Len);
805 }
806 
807 /// Returns true if the given MacroID location points at the first
808 /// token of the macro expansion.
809 bool Lexer::isAtStartOfMacroExpansion(SourceLocation loc,
810                                       const SourceManager &SM,
811                                       const LangOptions &LangOpts,
812                                       SourceLocation *MacroBegin) {
813   assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
814 
815   SourceLocation expansionLoc;
816   if (!SM.isAtStartOfImmediateMacroExpansion(loc, &expansionLoc))
817     return false;
818 
819   if (expansionLoc.isFileID()) {
820     // No other macro expansions, this is the first.
821     if (MacroBegin)
822       *MacroBegin = expansionLoc;
823     return true;
824   }
825 
826   return isAtStartOfMacroExpansion(expansionLoc, SM, LangOpts, MacroBegin);
827 }
828 
829 /// Returns true if the given MacroID location points at the last
830 /// token of the macro expansion.
831 bool Lexer::isAtEndOfMacroExpansion(SourceLocation loc,
832                                     const SourceManager &SM,
833                                     const LangOptions &LangOpts,
834                                     SourceLocation *MacroEnd) {
835   assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
836 
837   SourceLocation spellLoc = SM.getSpellingLoc(loc);
838   unsigned tokLen = MeasureTokenLength(spellLoc, SM, LangOpts);
839   if (tokLen == 0)
840     return false;
841 
842   SourceLocation afterLoc = loc.getLocWithOffset(tokLen);
843   SourceLocation expansionLoc;
844   if (!SM.isAtEndOfImmediateMacroExpansion(afterLoc, &expansionLoc))
845     return false;
846 
847   if (expansionLoc.isFileID()) {
848     // No other macro expansions.
849     if (MacroEnd)
850       *MacroEnd = expansionLoc;
851     return true;
852   }
853 
854   return isAtEndOfMacroExpansion(expansionLoc, SM, LangOpts, MacroEnd);
855 }
856 
857 static CharSourceRange makeRangeFromFileLocs(CharSourceRange Range,
858                                              const SourceManager &SM,
859                                              const LangOptions &LangOpts) {
860   SourceLocation Begin = Range.getBegin();
861   SourceLocation End = Range.getEnd();
862   assert(Begin.isFileID() && End.isFileID());
863   if (Range.isTokenRange()) {
864     End = Lexer::getLocForEndOfToken(End, 0, SM,LangOpts);
865     if (End.isInvalid())
866       return {};
867   }
868 
869   // Break down the source locations.
870   FileID FID;
871   unsigned BeginOffs;
872   std::tie(FID, BeginOffs) = SM.getDecomposedLoc(Begin);
873   if (FID.isInvalid())
874     return {};
875 
876   unsigned EndOffs;
877   if (!SM.isInFileID(End, FID, &EndOffs) ||
878       BeginOffs > EndOffs)
879     return {};
880 
881   return CharSourceRange::getCharRange(Begin, End);
882 }
883 
884 // Assumes that `Loc` is in an expansion.
885 static bool isInExpansionTokenRange(const SourceLocation Loc,
886                                     const SourceManager &SM) {
887   return SM.getSLocEntry(SM.getFileID(Loc))
888       .getExpansion()
889       .isExpansionTokenRange();
890 }
891 
892 CharSourceRange Lexer::makeFileCharRange(CharSourceRange Range,
893                                          const SourceManager &SM,
894                                          const LangOptions &LangOpts) {
895   SourceLocation Begin = Range.getBegin();
896   SourceLocation End = Range.getEnd();
897   if (Begin.isInvalid() || End.isInvalid())
898     return {};
899 
900   if (Begin.isFileID() && End.isFileID())
901     return makeRangeFromFileLocs(Range, SM, LangOpts);
902 
903   if (Begin.isMacroID() && End.isFileID()) {
904     if (!isAtStartOfMacroExpansion(Begin, SM, LangOpts, &Begin))
905       return {};
906     Range.setBegin(Begin);
907     return makeRangeFromFileLocs(Range, SM, LangOpts);
908   }
909 
910   if (Begin.isFileID() && End.isMacroID()) {
911     if (Range.isTokenRange()) {
912       if (!isAtEndOfMacroExpansion(End, SM, LangOpts, &End))
913         return {};
914       // Use the *original* end, not the expanded one in `End`.
915       Range.setTokenRange(isInExpansionTokenRange(Range.getEnd(), SM));
916     } else if (!isAtStartOfMacroExpansion(End, SM, LangOpts, &End))
917       return {};
918     Range.setEnd(End);
919     return makeRangeFromFileLocs(Range, SM, LangOpts);
920   }
921 
922   assert(Begin.isMacroID() && End.isMacroID());
923   SourceLocation MacroBegin, MacroEnd;
924   if (isAtStartOfMacroExpansion(Begin, SM, LangOpts, &MacroBegin) &&
925       ((Range.isTokenRange() && isAtEndOfMacroExpansion(End, SM, LangOpts,
926                                                         &MacroEnd)) ||
927        (Range.isCharRange() && isAtStartOfMacroExpansion(End, SM, LangOpts,
928                                                          &MacroEnd)))) {
929     Range.setBegin(MacroBegin);
930     Range.setEnd(MacroEnd);
931     // Use the *original* `End`, not the expanded one in `MacroEnd`.
932     if (Range.isTokenRange())
933       Range.setTokenRange(isInExpansionTokenRange(End, SM));
934     return makeRangeFromFileLocs(Range, SM, LangOpts);
935   }
936 
937   bool Invalid = false;
938   const SrcMgr::SLocEntry &BeginEntry = SM.getSLocEntry(SM.getFileID(Begin),
939                                                         &Invalid);
940   if (Invalid)
941     return {};
942 
943   if (BeginEntry.getExpansion().isMacroArgExpansion()) {
944     const SrcMgr::SLocEntry &EndEntry = SM.getSLocEntry(SM.getFileID(End),
945                                                         &Invalid);
946     if (Invalid)
947       return {};
948 
949     if (EndEntry.getExpansion().isMacroArgExpansion() &&
950         BeginEntry.getExpansion().getExpansionLocStart() ==
951             EndEntry.getExpansion().getExpansionLocStart()) {
952       Range.setBegin(SM.getImmediateSpellingLoc(Begin));
953       Range.setEnd(SM.getImmediateSpellingLoc(End));
954       return makeFileCharRange(Range, SM, LangOpts);
955     }
956   }
957 
958   return {};
959 }
960 
961 StringRef Lexer::getSourceText(CharSourceRange Range,
962                                const SourceManager &SM,
963                                const LangOptions &LangOpts,
964                                bool *Invalid) {
965   Range = makeFileCharRange(Range, SM, LangOpts);
966   if (Range.isInvalid()) {
967     if (Invalid) *Invalid = true;
968     return {};
969   }
970 
971   // Break down the source location.
972   std::pair<FileID, unsigned> beginInfo = SM.getDecomposedLoc(Range.getBegin());
973   if (beginInfo.first.isInvalid()) {
974     if (Invalid) *Invalid = true;
975     return {};
976   }
977 
978   unsigned EndOffs;
979   if (!SM.isInFileID(Range.getEnd(), beginInfo.first, &EndOffs) ||
980       beginInfo.second > EndOffs) {
981     if (Invalid) *Invalid = true;
982     return {};
983   }
984 
985   // Try to the load the file buffer.
986   bool invalidTemp = false;
987   StringRef file = SM.getBufferData(beginInfo.first, &invalidTemp);
988   if (invalidTemp) {
989     if (Invalid) *Invalid = true;
990     return {};
991   }
992 
993   if (Invalid) *Invalid = false;
994   return file.substr(beginInfo.second, EndOffs - beginInfo.second);
995 }
996 
997 StringRef Lexer::getImmediateMacroName(SourceLocation Loc,
998                                        const SourceManager &SM,
999                                        const LangOptions &LangOpts) {
1000   assert(Loc.isMacroID() && "Only reasonable to call this on macros");
1001 
1002   // Find the location of the immediate macro expansion.
1003   while (true) {
1004     FileID FID = SM.getFileID(Loc);
1005     const SrcMgr::SLocEntry *E = &SM.getSLocEntry(FID);
1006     const SrcMgr::ExpansionInfo &Expansion = E->getExpansion();
1007     Loc = Expansion.getExpansionLocStart();
1008     if (!Expansion.isMacroArgExpansion())
1009       break;
1010 
1011     // For macro arguments we need to check that the argument did not come
1012     // from an inner macro, e.g: "MAC1( MAC2(foo) )"
1013 
1014     // Loc points to the argument id of the macro definition, move to the
1015     // macro expansion.
1016     Loc = SM.getImmediateExpansionRange(Loc).getBegin();
1017     SourceLocation SpellLoc = Expansion.getSpellingLoc();
1018     if (SpellLoc.isFileID())
1019       break; // No inner macro.
1020 
1021     // If spelling location resides in the same FileID as macro expansion
1022     // location, it means there is no inner macro.
1023     FileID MacroFID = SM.getFileID(Loc);
1024     if (SM.isInFileID(SpellLoc, MacroFID))
1025       break;
1026 
1027     // Argument came from inner macro.
1028     Loc = SpellLoc;
1029   }
1030 
1031   // Find the spelling location of the start of the non-argument expansion
1032   // range. This is where the macro name was spelled in order to begin
1033   // expanding this macro.
1034   Loc = SM.getSpellingLoc(Loc);
1035 
1036   // Dig out the buffer where the macro name was spelled and the extents of the
1037   // name so that we can render it into the expansion note.
1038   std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc);
1039   unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
1040   StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first);
1041   return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
1042 }
1043 
1044 StringRef Lexer::getImmediateMacroNameForDiagnostics(
1045     SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts) {
1046   assert(Loc.isMacroID() && "Only reasonable to call this on macros");
1047   // Walk past macro argument expansions.
1048   while (SM.isMacroArgExpansion(Loc))
1049     Loc = SM.getImmediateExpansionRange(Loc).getBegin();
1050 
1051   // If the macro's spelling has no FileID, then it's actually a token paste
1052   // or stringization (or similar) and not a macro at all.
1053   if (!SM.getFileEntryForID(SM.getFileID(SM.getSpellingLoc(Loc))))
1054     return {};
1055 
1056   // Find the spelling location of the start of the non-argument expansion
1057   // range. This is where the macro name was spelled in order to begin
1058   // expanding this macro.
1059   Loc = SM.getSpellingLoc(SM.getImmediateExpansionRange(Loc).getBegin());
1060 
1061   // Dig out the buffer where the macro name was spelled and the extents of the
1062   // name so that we can render it into the expansion note.
1063   std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc);
1064   unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
1065   StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first);
1066   return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
1067 }
1068 
1069 bool Lexer::isAsciiIdentifierContinueChar(char c, const LangOptions &LangOpts) {
1070   return isAsciiIdentifierContinue(c, LangOpts.DollarIdents);
1071 }
1072 
1073 bool Lexer::isNewLineEscaped(const char *BufferStart, const char *Str) {
1074   assert(isVerticalWhitespace(Str[0]));
1075   if (Str - 1 < BufferStart)
1076     return false;
1077 
1078   if ((Str[0] == '\n' && Str[-1] == '\r') ||
1079       (Str[0] == '\r' && Str[-1] == '\n')) {
1080     if (Str - 2 < BufferStart)
1081       return false;
1082     --Str;
1083   }
1084   --Str;
1085 
1086   // Rewind to first non-space character:
1087   while (Str > BufferStart && isHorizontalWhitespace(*Str))
1088     --Str;
1089 
1090   return *Str == '\\';
1091 }
1092 
1093 StringRef Lexer::getIndentationForLine(SourceLocation Loc,
1094                                        const SourceManager &SM) {
1095   if (Loc.isInvalid() || Loc.isMacroID())
1096     return {};
1097   std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
1098   if (LocInfo.first.isInvalid())
1099     return {};
1100   bool Invalid = false;
1101   StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
1102   if (Invalid)
1103     return {};
1104   const char *Line = findBeginningOfLine(Buffer, LocInfo.second);
1105   if (!Line)
1106     return {};
1107   StringRef Rest = Buffer.substr(Line - Buffer.data());
1108   size_t NumWhitespaceChars = Rest.find_first_not_of(" \t");
1109   return NumWhitespaceChars == StringRef::npos
1110              ? ""
1111              : Rest.take_front(NumWhitespaceChars);
1112 }
1113 
1114 //===----------------------------------------------------------------------===//
1115 // Diagnostics forwarding code.
1116 //===----------------------------------------------------------------------===//
1117 
1118 /// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the
1119 /// lexer buffer was all expanded at a single point, perform the mapping.
1120 /// This is currently only used for _Pragma implementation, so it is the slow
1121 /// path of the hot getSourceLocation method.  Do not allow it to be inlined.
1122 static LLVM_ATTRIBUTE_NOINLINE SourceLocation GetMappedTokenLoc(
1123     Preprocessor &PP, SourceLocation FileLoc, unsigned CharNo, unsigned TokLen);
1124 static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
1125                                         SourceLocation FileLoc,
1126                                         unsigned CharNo, unsigned TokLen) {
1127   assert(FileLoc.isMacroID() && "Must be a macro expansion");
1128 
1129   // Otherwise, we're lexing "mapped tokens".  This is used for things like
1130   // _Pragma handling.  Combine the expansion location of FileLoc with the
1131   // spelling location.
1132   SourceManager &SM = PP.getSourceManager();
1133 
1134   // Create a new SLoc which is expanded from Expansion(FileLoc) but whose
1135   // characters come from spelling(FileLoc)+Offset.
1136   SourceLocation SpellingLoc = SM.getSpellingLoc(FileLoc);
1137   SpellingLoc = SpellingLoc.getLocWithOffset(CharNo);
1138 
1139   // Figure out the expansion loc range, which is the range covered by the
1140   // original _Pragma(...) sequence.
1141   CharSourceRange II = SM.getImmediateExpansionRange(FileLoc);
1142 
1143   return SM.createExpansionLoc(SpellingLoc, II.getBegin(), II.getEnd(), TokLen);
1144 }
1145 
1146 /// getSourceLocation - Return a source location identifier for the specified
1147 /// offset in the current file.
1148 SourceLocation Lexer::getSourceLocation(const char *Loc,
1149                                         unsigned TokLen) const {
1150   assert(Loc >= BufferStart && Loc <= BufferEnd &&
1151          "Location out of range for this buffer!");
1152 
1153   // In the normal case, we're just lexing from a simple file buffer, return
1154   // the file id from FileLoc with the offset specified.
1155   unsigned CharNo = Loc-BufferStart;
1156   if (FileLoc.isFileID())
1157     return FileLoc.getLocWithOffset(CharNo);
1158 
1159   // Otherwise, this is the _Pragma lexer case, which pretends that all of the
1160   // tokens are lexed from where the _Pragma was defined.
1161   assert(PP && "This doesn't work on raw lexers");
1162   return GetMappedTokenLoc(*PP, FileLoc, CharNo, TokLen);
1163 }
1164 
1165 /// Diag - Forwarding function for diagnostics.  This translate a source
1166 /// position in the current buffer into a SourceLocation object for rendering.
1167 DiagnosticBuilder Lexer::Diag(const char *Loc, unsigned DiagID) const {
1168   return PP->Diag(getSourceLocation(Loc), DiagID);
1169 }
1170 
1171 //===----------------------------------------------------------------------===//
1172 // Trigraph and Escaped Newline Handling Code.
1173 //===----------------------------------------------------------------------===//
1174 
1175 /// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair,
1176 /// return the decoded trigraph letter it corresponds to, or '\0' if nothing.
1177 static char GetTrigraphCharForLetter(char Letter) {
1178   switch (Letter) {
1179   default:   return 0;
1180   case '=':  return '#';
1181   case ')':  return ']';
1182   case '(':  return '[';
1183   case '!':  return '|';
1184   case '\'': return '^';
1185   case '>':  return '}';
1186   case '/':  return '\\';
1187   case '<':  return '{';
1188   case '-':  return '~';
1189   }
1190 }
1191 
1192 /// DecodeTrigraphChar - If the specified character is a legal trigraph when
1193 /// prefixed with ??, emit a trigraph warning.  If trigraphs are enabled,
1194 /// return the result character.  Finally, emit a warning about trigraph use
1195 /// whether trigraphs are enabled or not.
1196 static char DecodeTrigraphChar(const char *CP, Lexer *L) {
1197   char Res = GetTrigraphCharForLetter(*CP);
1198   if (!Res || !L) return Res;
1199 
1200   if (!L->getLangOpts().Trigraphs) {
1201     if (!L->isLexingRawMode())
1202       L->Diag(CP-2, diag::trigraph_ignored);
1203     return 0;
1204   }
1205 
1206   if (!L->isLexingRawMode())
1207     L->Diag(CP-2, diag::trigraph_converted) << StringRef(&Res, 1);
1208   return Res;
1209 }
1210 
1211 /// getEscapedNewLineSize - Return the size of the specified escaped newline,
1212 /// or 0 if it is not an escaped newline. P[-1] is known to be a "\" or a
1213 /// trigraph equivalent on entry to this function.
1214 unsigned Lexer::getEscapedNewLineSize(const char *Ptr) {
1215   unsigned Size = 0;
1216   while (isWhitespace(Ptr[Size])) {
1217     ++Size;
1218 
1219     if (Ptr[Size-1] != '\n' && Ptr[Size-1] != '\r')
1220       continue;
1221 
1222     // If this is a \r\n or \n\r, skip the other half.
1223     if ((Ptr[Size] == '\r' || Ptr[Size] == '\n') &&
1224         Ptr[Size-1] != Ptr[Size])
1225       ++Size;
1226 
1227     return Size;
1228   }
1229 
1230   // Not an escaped newline, must be a \t or something else.
1231   return 0;
1232 }
1233 
1234 /// SkipEscapedNewLines - If P points to an escaped newline (or a series of
1235 /// them), skip over them and return the first non-escaped-newline found,
1236 /// otherwise return P.
1237 const char *Lexer::SkipEscapedNewLines(const char *P) {
1238   while (true) {
1239     const char *AfterEscape;
1240     if (*P == '\\') {
1241       AfterEscape = P+1;
1242     } else if (*P == '?') {
1243       // If not a trigraph for escape, bail out.
1244       if (P[1] != '?' || P[2] != '/')
1245         return P;
1246       // FIXME: Take LangOpts into account; the language might not
1247       // support trigraphs.
1248       AfterEscape = P+3;
1249     } else {
1250       return P;
1251     }
1252 
1253     unsigned NewLineSize = Lexer::getEscapedNewLineSize(AfterEscape);
1254     if (NewLineSize == 0) return P;
1255     P = AfterEscape+NewLineSize;
1256   }
1257 }
1258 
1259 Optional<Token> Lexer::findNextToken(SourceLocation Loc,
1260                                      const SourceManager &SM,
1261                                      const LangOptions &LangOpts) {
1262   if (Loc.isMacroID()) {
1263     if (!Lexer::isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc))
1264       return None;
1265   }
1266   Loc = Lexer::getLocForEndOfToken(Loc, 0, SM, LangOpts);
1267 
1268   // Break down the source location.
1269   std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
1270 
1271   // Try to load the file buffer.
1272   bool InvalidTemp = false;
1273   StringRef File = SM.getBufferData(LocInfo.first, &InvalidTemp);
1274   if (InvalidTemp)
1275     return None;
1276 
1277   const char *TokenBegin = File.data() + LocInfo.second;
1278 
1279   // Lex from the start of the given location.
1280   Lexer lexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts, File.begin(),
1281                                       TokenBegin, File.end());
1282   // Find the token.
1283   Token Tok;
1284   lexer.LexFromRawLexer(Tok);
1285   return Tok;
1286 }
1287 
1288 /// Checks that the given token is the first token that occurs after the
1289 /// given location (this excludes comments and whitespace). Returns the location
1290 /// immediately after the specified token. If the token is not found or the
1291 /// location is inside a macro, the returned source location will be invalid.
1292 SourceLocation Lexer::findLocationAfterToken(
1293     SourceLocation Loc, tok::TokenKind TKind, const SourceManager &SM,
1294     const LangOptions &LangOpts, bool SkipTrailingWhitespaceAndNewLine) {
1295   Optional<Token> Tok = findNextToken(Loc, SM, LangOpts);
1296   if (!Tok || Tok->isNot(TKind))
1297     return {};
1298   SourceLocation TokenLoc = Tok->getLocation();
1299 
1300   // Calculate how much whitespace needs to be skipped if any.
1301   unsigned NumWhitespaceChars = 0;
1302   if (SkipTrailingWhitespaceAndNewLine) {
1303     const char *TokenEnd = SM.getCharacterData(TokenLoc) + Tok->getLength();
1304     unsigned char C = *TokenEnd;
1305     while (isHorizontalWhitespace(C)) {
1306       C = *(++TokenEnd);
1307       NumWhitespaceChars++;
1308     }
1309 
1310     // Skip \r, \n, \r\n, or \n\r
1311     if (C == '\n' || C == '\r') {
1312       char PrevC = C;
1313       C = *(++TokenEnd);
1314       NumWhitespaceChars++;
1315       if ((C == '\n' || C == '\r') && C != PrevC)
1316         NumWhitespaceChars++;
1317     }
1318   }
1319 
1320   return TokenLoc.getLocWithOffset(Tok->getLength() + NumWhitespaceChars);
1321 }
1322 
1323 /// getCharAndSizeSlow - Peek a single 'character' from the specified buffer,
1324 /// get its size, and return it.  This is tricky in several cases:
1325 ///   1. If currently at the start of a trigraph, we warn about the trigraph,
1326 ///      then either return the trigraph (skipping 3 chars) or the '?',
1327 ///      depending on whether trigraphs are enabled or not.
1328 ///   2. If this is an escaped newline (potentially with whitespace between
1329 ///      the backslash and newline), implicitly skip the newline and return
1330 ///      the char after it.
1331 ///
1332 /// This handles the slow/uncommon case of the getCharAndSize method.  Here we
1333 /// know that we can accumulate into Size, and that we have already incremented
1334 /// Ptr by Size bytes.
1335 ///
1336 /// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should
1337 /// be updated to match.
1338 char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size,
1339                                Token *Tok) {
1340   // If we have a slash, look for an escaped newline.
1341   if (Ptr[0] == '\\') {
1342     ++Size;
1343     ++Ptr;
1344 Slash:
1345     // Common case, backslash-char where the char is not whitespace.
1346     if (!isWhitespace(Ptr[0])) return '\\';
1347 
1348     // See if we have optional whitespace characters between the slash and
1349     // newline.
1350     if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
1351       // Remember that this token needs to be cleaned.
1352       if (Tok) Tok->setFlag(Token::NeedsCleaning);
1353 
1354       // Warn if there was whitespace between the backslash and newline.
1355       if (Ptr[0] != '\n' && Ptr[0] != '\r' && Tok && !isLexingRawMode())
1356         Diag(Ptr, diag::backslash_newline_space);
1357 
1358       // Found backslash<whitespace><newline>.  Parse the char after it.
1359       Size += EscapedNewLineSize;
1360       Ptr  += EscapedNewLineSize;
1361 
1362       // Use slow version to accumulate a correct size field.
1363       return getCharAndSizeSlow(Ptr, Size, Tok);
1364     }
1365 
1366     // Otherwise, this is not an escaped newline, just return the slash.
1367     return '\\';
1368   }
1369 
1370   // If this is a trigraph, process it.
1371   if (Ptr[0] == '?' && Ptr[1] == '?') {
1372     // If this is actually a legal trigraph (not something like "??x"), emit
1373     // a trigraph warning.  If so, and if trigraphs are enabled, return it.
1374     if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : nullptr)) {
1375       // Remember that this token needs to be cleaned.
1376       if (Tok) Tok->setFlag(Token::NeedsCleaning);
1377 
1378       Ptr += 3;
1379       Size += 3;
1380       if (C == '\\') goto Slash;
1381       return C;
1382     }
1383   }
1384 
1385   // If this is neither, return a single character.
1386   ++Size;
1387   return *Ptr;
1388 }
1389 
1390 /// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the
1391 /// getCharAndSizeNoWarn method.  Here we know that we can accumulate into Size,
1392 /// and that we have already incremented Ptr by Size bytes.
1393 ///
1394 /// NOTE: When this method is updated, getCharAndSizeSlow (above) should
1395 /// be updated to match.
1396 char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
1397                                      const LangOptions &LangOpts) {
1398   // If we have a slash, look for an escaped newline.
1399   if (Ptr[0] == '\\') {
1400     ++Size;
1401     ++Ptr;
1402 Slash:
1403     // Common case, backslash-char where the char is not whitespace.
1404     if (!isWhitespace(Ptr[0])) return '\\';
1405 
1406     // See if we have optional whitespace characters followed by a newline.
1407     if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
1408       // Found backslash<whitespace><newline>.  Parse the char after it.
1409       Size += EscapedNewLineSize;
1410       Ptr  += EscapedNewLineSize;
1411 
1412       // Use slow version to accumulate a correct size field.
1413       return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts);
1414     }
1415 
1416     // Otherwise, this is not an escaped newline, just return the slash.
1417     return '\\';
1418   }
1419 
1420   // If this is a trigraph, process it.
1421   if (LangOpts.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') {
1422     // If this is actually a legal trigraph (not something like "??x"), return
1423     // it.
1424     if (char C = GetTrigraphCharForLetter(Ptr[2])) {
1425       Ptr += 3;
1426       Size += 3;
1427       if (C == '\\') goto Slash;
1428       return C;
1429     }
1430   }
1431 
1432   // If this is neither, return a single character.
1433   ++Size;
1434   return *Ptr;
1435 }
1436 
1437 //===----------------------------------------------------------------------===//
1438 // Helper methods for lexing.
1439 //===----------------------------------------------------------------------===//
1440 
1441 /// Routine that indiscriminately sets the offset into the source file.
1442 void Lexer::SetByteOffset(unsigned Offset, bool StartOfLine) {
1443   BufferPtr = BufferStart + Offset;
1444   if (BufferPtr > BufferEnd)
1445     BufferPtr = BufferEnd;
1446   // FIXME: What exactly does the StartOfLine bit mean?  There are two
1447   // possible meanings for the "start" of the line: the first token on the
1448   // unexpanded line, or the first token on the expanded line.
1449   IsAtStartOfLine = StartOfLine;
1450   IsAtPhysicalStartOfLine = StartOfLine;
1451 }
1452 
1453 static bool isUnicodeWhitespace(uint32_t Codepoint) {
1454   static const llvm::sys::UnicodeCharSet UnicodeWhitespaceChars(
1455       UnicodeWhitespaceCharRanges);
1456   return UnicodeWhitespaceChars.contains(Codepoint);
1457 }
1458 
1459 static bool isAllowedIDChar(uint32_t C, const LangOptions &LangOpts) {
1460   if (LangOpts.AsmPreprocessor) {
1461     return false;
1462   } else if (LangOpts.DollarIdents && '$' == C) {
1463     return true;
1464   } else if (LangOpts.CPlusPlus) {
1465     // A non-leading codepoint must have the XID_Continue property.
1466     // XIDContinueRanges doesn't contains characters also in XIDStartRanges,
1467     // so we need to check both tables.
1468     // '_' doesn't have the XID_Continue property but is allowed in C++.
1469     static const llvm::sys::UnicodeCharSet XIDStartChars(XIDStartRanges);
1470     static const llvm::sys::UnicodeCharSet XIDContinueChars(XIDContinueRanges);
1471     return C == '_' || XIDStartChars.contains(C) ||
1472            XIDContinueChars.contains(C);
1473   } else if (LangOpts.C11) {
1474     static const llvm::sys::UnicodeCharSet C11AllowedIDChars(
1475         C11AllowedIDCharRanges);
1476     return C11AllowedIDChars.contains(C);
1477   } else {
1478     static const llvm::sys::UnicodeCharSet C99AllowedIDChars(
1479         C99AllowedIDCharRanges);
1480     return C99AllowedIDChars.contains(C);
1481   }
1482 }
1483 
1484 static bool isAllowedInitiallyIDChar(uint32_t C, const LangOptions &LangOpts) {
1485   if (LangOpts.AsmPreprocessor) {
1486     return false;
1487   }
1488   if (LangOpts.CPlusPlus) {
1489     static const llvm::sys::UnicodeCharSet XIDStartChars(XIDStartRanges);
1490     // '_' doesn't have the XID_Start property but is allowed in C++.
1491     return C == '_' || XIDStartChars.contains(C);
1492   }
1493   if (!isAllowedIDChar(C, LangOpts))
1494     return false;
1495   if (LangOpts.C11) {
1496     static const llvm::sys::UnicodeCharSet C11DisallowedInitialIDChars(
1497         C11DisallowedInitialIDCharRanges);
1498     return !C11DisallowedInitialIDChars.contains(C);
1499   }
1500   static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars(
1501       C99DisallowedInitialIDCharRanges);
1502   return !C99DisallowedInitialIDChars.contains(C);
1503 }
1504 
1505 static inline CharSourceRange makeCharRange(Lexer &L, const char *Begin,
1506                                             const char *End) {
1507   return CharSourceRange::getCharRange(L.getSourceLocation(Begin),
1508                                        L.getSourceLocation(End));
1509 }
1510 
1511 static void maybeDiagnoseIDCharCompat(DiagnosticsEngine &Diags, uint32_t C,
1512                                       CharSourceRange Range, bool IsFirst) {
1513   // Check C99 compatibility.
1514   if (!Diags.isIgnored(diag::warn_c99_compat_unicode_id, Range.getBegin())) {
1515     enum {
1516       CannotAppearInIdentifier = 0,
1517       CannotStartIdentifier
1518     };
1519 
1520     static const llvm::sys::UnicodeCharSet C99AllowedIDChars(
1521         C99AllowedIDCharRanges);
1522     static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars(
1523         C99DisallowedInitialIDCharRanges);
1524     if (!C99AllowedIDChars.contains(C)) {
1525       Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id)
1526         << Range
1527         << CannotAppearInIdentifier;
1528     } else if (IsFirst && C99DisallowedInitialIDChars.contains(C)) {
1529       Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id)
1530         << Range
1531         << CannotStartIdentifier;
1532     }
1533   }
1534 }
1535 
1536 /// After encountering UTF-8 character C and interpreting it as an identifier
1537 /// character, check whether it's a homoglyph for a common non-identifier
1538 /// source character that is unlikely to be an intentional identifier
1539 /// character and warn if so.
1540 static void maybeDiagnoseUTF8Homoglyph(DiagnosticsEngine &Diags, uint32_t C,
1541                                        CharSourceRange Range) {
1542   // FIXME: Handle Unicode quotation marks (smart quotes, fullwidth quotes).
1543   struct HomoglyphPair {
1544     uint32_t Character;
1545     char LooksLike;
1546     bool operator<(HomoglyphPair R) const { return Character < R.Character; }
1547   };
1548   static constexpr HomoglyphPair SortedHomoglyphs[] = {
1549     {U'\u00ad', 0},   // SOFT HYPHEN
1550     {U'\u01c3', '!'}, // LATIN LETTER RETROFLEX CLICK
1551     {U'\u037e', ';'}, // GREEK QUESTION MARK
1552     {U'\u200b', 0},   // ZERO WIDTH SPACE
1553     {U'\u200c', 0},   // ZERO WIDTH NON-JOINER
1554     {U'\u200d', 0},   // ZERO WIDTH JOINER
1555     {U'\u2060', 0},   // WORD JOINER
1556     {U'\u2061', 0},   // FUNCTION APPLICATION
1557     {U'\u2062', 0},   // INVISIBLE TIMES
1558     {U'\u2063', 0},   // INVISIBLE SEPARATOR
1559     {U'\u2064', 0},   // INVISIBLE PLUS
1560     {U'\u2212', '-'}, // MINUS SIGN
1561     {U'\u2215', '/'}, // DIVISION SLASH
1562     {U'\u2216', '\\'}, // SET MINUS
1563     {U'\u2217', '*'}, // ASTERISK OPERATOR
1564     {U'\u2223', '|'}, // DIVIDES
1565     {U'\u2227', '^'}, // LOGICAL AND
1566     {U'\u2236', ':'}, // RATIO
1567     {U'\u223c', '~'}, // TILDE OPERATOR
1568     {U'\ua789', ':'}, // MODIFIER LETTER COLON
1569     {U'\ufeff', 0},   // ZERO WIDTH NO-BREAK SPACE
1570     {U'\uff01', '!'}, // FULLWIDTH EXCLAMATION MARK
1571     {U'\uff03', '#'}, // FULLWIDTH NUMBER SIGN
1572     {U'\uff04', '$'}, // FULLWIDTH DOLLAR SIGN
1573     {U'\uff05', '%'}, // FULLWIDTH PERCENT SIGN
1574     {U'\uff06', '&'}, // FULLWIDTH AMPERSAND
1575     {U'\uff08', '('}, // FULLWIDTH LEFT PARENTHESIS
1576     {U'\uff09', ')'}, // FULLWIDTH RIGHT PARENTHESIS
1577     {U'\uff0a', '*'}, // FULLWIDTH ASTERISK
1578     {U'\uff0b', '+'}, // FULLWIDTH ASTERISK
1579     {U'\uff0c', ','}, // FULLWIDTH COMMA
1580     {U'\uff0d', '-'}, // FULLWIDTH HYPHEN-MINUS
1581     {U'\uff0e', '.'}, // FULLWIDTH FULL STOP
1582     {U'\uff0f', '/'}, // FULLWIDTH SOLIDUS
1583     {U'\uff1a', ':'}, // FULLWIDTH COLON
1584     {U'\uff1b', ';'}, // FULLWIDTH SEMICOLON
1585     {U'\uff1c', '<'}, // FULLWIDTH LESS-THAN SIGN
1586     {U'\uff1d', '='}, // FULLWIDTH EQUALS SIGN
1587     {U'\uff1e', '>'}, // FULLWIDTH GREATER-THAN SIGN
1588     {U'\uff1f', '?'}, // FULLWIDTH QUESTION MARK
1589     {U'\uff20', '@'}, // FULLWIDTH COMMERCIAL AT
1590     {U'\uff3b', '['}, // FULLWIDTH LEFT SQUARE BRACKET
1591     {U'\uff3c', '\\'}, // FULLWIDTH REVERSE SOLIDUS
1592     {U'\uff3d', ']'}, // FULLWIDTH RIGHT SQUARE BRACKET
1593     {U'\uff3e', '^'}, // FULLWIDTH CIRCUMFLEX ACCENT
1594     {U'\uff5b', '{'}, // FULLWIDTH LEFT CURLY BRACKET
1595     {U'\uff5c', '|'}, // FULLWIDTH VERTICAL LINE
1596     {U'\uff5d', '}'}, // FULLWIDTH RIGHT CURLY BRACKET
1597     {U'\uff5e', '~'}, // FULLWIDTH TILDE
1598     {0, 0}
1599   };
1600   auto Homoglyph =
1601       std::lower_bound(std::begin(SortedHomoglyphs),
1602                        std::end(SortedHomoglyphs) - 1, HomoglyphPair{C, '\0'});
1603   if (Homoglyph->Character == C) {
1604     llvm::SmallString<5> CharBuf;
1605     {
1606       llvm::raw_svector_ostream CharOS(CharBuf);
1607       llvm::write_hex(CharOS, C, llvm::HexPrintStyle::Upper, 4);
1608     }
1609     if (Homoglyph->LooksLike) {
1610       const char LooksLikeStr[] = {Homoglyph->LooksLike, 0};
1611       Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_homoglyph)
1612           << Range << CharBuf << LooksLikeStr;
1613     } else {
1614       Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_zero_width)
1615           << Range << CharBuf;
1616     }
1617   }
1618 }
1619 
1620 static void diagnoseInvalidUnicodeCodepointInIdentifier(
1621     DiagnosticsEngine &Diags, const LangOptions &LangOpts, uint32_t CodePoint,
1622     CharSourceRange Range, bool IsFirst) {
1623   if (isASCII(CodePoint))
1624     return;
1625 
1626   bool IsIDStart = isAllowedInitiallyIDChar(CodePoint, LangOpts);
1627   bool IsIDContinue = IsIDStart || isAllowedIDChar(CodePoint, LangOpts);
1628 
1629   if ((IsFirst && IsIDStart) || (!IsFirst && IsIDContinue))
1630     return;
1631 
1632   bool InvalidOnlyAtStart = IsFirst && !IsIDStart && IsIDContinue;
1633 
1634   llvm::SmallString<5> CharBuf;
1635   llvm::raw_svector_ostream CharOS(CharBuf);
1636   llvm::write_hex(CharOS, CodePoint, llvm::HexPrintStyle::Upper, 4);
1637 
1638   if (!IsFirst || InvalidOnlyAtStart) {
1639     Diags.Report(Range.getBegin(), diag::err_character_not_allowed_identifier)
1640         << Range << CharBuf << int(InvalidOnlyAtStart)
1641         << FixItHint::CreateRemoval(Range);
1642   } else {
1643     Diags.Report(Range.getBegin(), diag::err_character_not_allowed)
1644         << Range << CharBuf << FixItHint::CreateRemoval(Range);
1645   }
1646 }
1647 
1648 bool Lexer::tryConsumeIdentifierUCN(const char *&CurPtr, unsigned Size,
1649                                     Token &Result) {
1650   const char *UCNPtr = CurPtr + Size;
1651   uint32_t CodePoint = tryReadUCN(UCNPtr, CurPtr, /*Token=*/nullptr);
1652   if (CodePoint == 0) {
1653     return false;
1654   }
1655 
1656   if (!isAllowedIDChar(CodePoint, LangOpts)) {
1657     if (isASCII(CodePoint) || isUnicodeWhitespace(CodePoint))
1658       return false;
1659     if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
1660         !PP->isPreprocessedOutput())
1661       diagnoseInvalidUnicodeCodepointInIdentifier(
1662           PP->getDiagnostics(), LangOpts, CodePoint,
1663           makeCharRange(*this, CurPtr, UCNPtr),
1664           /*IsFirst=*/false);
1665 
1666     // We got a unicode codepoint that is neither a space nor a
1667     // a valid identifier part.
1668     // Carry on as if the codepoint was valid for recovery purposes.
1669   } else if (!isLexingRawMode())
1670     maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
1671                               makeCharRange(*this, CurPtr, UCNPtr),
1672                               /*IsFirst=*/false);
1673 
1674   Result.setFlag(Token::HasUCN);
1675   if ((UCNPtr - CurPtr ==  6 && CurPtr[1] == 'u') ||
1676       (UCNPtr - CurPtr == 10 && CurPtr[1] == 'U'))
1677     CurPtr = UCNPtr;
1678   else
1679     while (CurPtr != UCNPtr)
1680       (void)getAndAdvanceChar(CurPtr, Result);
1681   return true;
1682 }
1683 
1684 bool Lexer::tryConsumeIdentifierUTF8Char(const char *&CurPtr) {
1685   const char *UnicodePtr = CurPtr;
1686   llvm::UTF32 CodePoint;
1687   llvm::ConversionResult Result =
1688       llvm::convertUTF8Sequence((const llvm::UTF8 **)&UnicodePtr,
1689                                 (const llvm::UTF8 *)BufferEnd,
1690                                 &CodePoint,
1691                                 llvm::strictConversion);
1692   if (Result != llvm::conversionOK)
1693     return false;
1694 
1695   if (!isAllowedIDChar(static_cast<uint32_t>(CodePoint), LangOpts)) {
1696     if (isASCII(CodePoint) || isUnicodeWhitespace(CodePoint))
1697       return false;
1698 
1699     if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
1700         !PP->isPreprocessedOutput())
1701       diagnoseInvalidUnicodeCodepointInIdentifier(
1702           PP->getDiagnostics(), LangOpts, CodePoint,
1703           makeCharRange(*this, CurPtr, UnicodePtr), /*IsFirst=*/false);
1704     // We got a unicode codepoint that is neither a space nor a
1705     // a valid identifier part. Carry on as if the codepoint was
1706     // valid for recovery purposes.
1707   } else if (!isLexingRawMode()) {
1708     maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
1709                               makeCharRange(*this, CurPtr, UnicodePtr),
1710                               /*IsFirst=*/false);
1711     maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), CodePoint,
1712                                makeCharRange(*this, CurPtr, UnicodePtr));
1713   }
1714 
1715   CurPtr = UnicodePtr;
1716   return true;
1717 }
1718 
1719 bool Lexer::LexUnicodeIdentifierStart(Token &Result, uint32_t C,
1720                                       const char *CurPtr) {
1721   if (isAllowedInitiallyIDChar(C, LangOpts)) {
1722     if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
1723         !PP->isPreprocessedOutput()) {
1724       maybeDiagnoseIDCharCompat(PP->getDiagnostics(), C,
1725                                 makeCharRange(*this, BufferPtr, CurPtr),
1726                                 /*IsFirst=*/true);
1727       maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), C,
1728                                  makeCharRange(*this, BufferPtr, CurPtr));
1729     }
1730 
1731     MIOpt.ReadToken();
1732     return LexIdentifierContinue(Result, CurPtr);
1733   }
1734 
1735   if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
1736       !PP->isPreprocessedOutput() && !isASCII(*BufferPtr) &&
1737       !isAllowedInitiallyIDChar(C, LangOpts) && !isUnicodeWhitespace(C)) {
1738     // Non-ASCII characters tend to creep into source code unintentionally.
1739     // Instead of letting the parser complain about the unknown token,
1740     // just drop the character.
1741     // Note that we can /only/ do this when the non-ASCII character is actually
1742     // spelled as Unicode, not written as a UCN. The standard requires that
1743     // we not throw away any possible preprocessor tokens, but there's a
1744     // loophole in the mapping of Unicode characters to basic character set
1745     // characters that allows us to map these particular characters to, say,
1746     // whitespace.
1747     diagnoseInvalidUnicodeCodepointInIdentifier(
1748         PP->getDiagnostics(), LangOpts, C,
1749         makeCharRange(*this, BufferPtr, CurPtr), /*IsStart*/ true);
1750     BufferPtr = CurPtr;
1751     return false;
1752   }
1753 
1754   // Otherwise, we have an explicit UCN or a character that's unlikely to show
1755   // up by accident.
1756   MIOpt.ReadToken();
1757   FormTokenWithChars(Result, CurPtr, tok::unknown);
1758   return true;
1759 }
1760 
1761 bool Lexer::LexIdentifierContinue(Token &Result, const char *CurPtr) {
1762   // Match [_A-Za-z0-9]*, we have already matched an identifier start.
1763   while (true) {
1764     unsigned char C = *CurPtr;
1765     // Fast path.
1766     if (isAsciiIdentifierContinue(C)) {
1767       ++CurPtr;
1768       continue;
1769     }
1770 
1771     unsigned Size;
1772     // Slow path: handle trigraph, unicode codepoints, UCNs.
1773     C = getCharAndSize(CurPtr, Size);
1774     if (isAsciiIdentifierContinue(C)) {
1775       CurPtr = ConsumeChar(CurPtr, Size, Result);
1776       continue;
1777     }
1778     if (C == '$') {
1779       // If we hit a $ and they are not supported in identifiers, we are done.
1780       if (!LangOpts.DollarIdents)
1781         break;
1782       // Otherwise, emit a diagnostic and continue.
1783       if (!isLexingRawMode())
1784         Diag(CurPtr, diag::ext_dollar_in_identifier);
1785       CurPtr = ConsumeChar(CurPtr, Size, Result);
1786       continue;
1787     }
1788     if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
1789       continue;
1790     if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr))
1791       continue;
1792     // Neither an expected Unicode codepoint nor a UCN.
1793     break;
1794   }
1795 
1796   const char *IdStart = BufferPtr;
1797   FormTokenWithChars(Result, CurPtr, tok::raw_identifier);
1798   Result.setRawIdentifierData(IdStart);
1799 
1800   // If we are in raw mode, return this identifier raw.  There is no need to
1801   // look up identifier information or attempt to macro expand it.
1802   if (LexingRawMode)
1803     return true;
1804 
1805   // Fill in Result.IdentifierInfo and update the token kind,
1806   // looking up the identifier in the identifier table.
1807   IdentifierInfo *II = PP->LookUpIdentifierInfo(Result);
1808   // Note that we have to call PP->LookUpIdentifierInfo() even for code
1809   // completion, it writes IdentifierInfo into Result, and callers rely on it.
1810 
1811   // If the completion point is at the end of an identifier, we want to treat
1812   // the identifier as incomplete even if it resolves to a macro or a keyword.
1813   // This allows e.g. 'class^' to complete to 'classifier'.
1814   if (isCodeCompletionPoint(CurPtr)) {
1815     // Return the code-completion token.
1816     Result.setKind(tok::code_completion);
1817     // Skip the code-completion char and all immediate identifier characters.
1818     // This ensures we get consistent behavior when completing at any point in
1819     // an identifier (i.e. at the start, in the middle, at the end). Note that
1820     // only simple cases (i.e. [a-zA-Z0-9_]) are supported to keep the code
1821     // simpler.
1822     assert(*CurPtr == 0 && "Completion character must be 0");
1823     ++CurPtr;
1824     // Note that code completion token is not added as a separate character
1825     // when the completion point is at the end of the buffer. Therefore, we need
1826     // to check if the buffer has ended.
1827     if (CurPtr < BufferEnd) {
1828       while (isAsciiIdentifierContinue(*CurPtr))
1829         ++CurPtr;
1830     }
1831     BufferPtr = CurPtr;
1832     return true;
1833   }
1834 
1835   // Finally, now that we know we have an identifier, pass this off to the
1836   // preprocessor, which may macro expand it or something.
1837   if (II->isHandleIdentifierCase())
1838     return PP->HandleIdentifier(Result);
1839 
1840   return true;
1841 }
1842 
1843 /// isHexaLiteral - Return true if Start points to a hex constant.
1844 /// in microsoft mode (where this is supposed to be several different tokens).
1845 bool Lexer::isHexaLiteral(const char *Start, const LangOptions &LangOpts) {
1846   unsigned Size;
1847   char C1 = Lexer::getCharAndSizeNoWarn(Start, Size, LangOpts);
1848   if (C1 != '0')
1849     return false;
1850   char C2 = Lexer::getCharAndSizeNoWarn(Start + Size, Size, LangOpts);
1851   return (C2 == 'x' || C2 == 'X');
1852 }
1853 
1854 /// LexNumericConstant - Lex the remainder of a integer or floating point
1855 /// constant. From[-1] is the first character lexed.  Return the end of the
1856 /// constant.
1857 bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
1858   unsigned Size;
1859   char C = getCharAndSize(CurPtr, Size);
1860   char PrevCh = 0;
1861   while (isPreprocessingNumberBody(C)) {
1862     CurPtr = ConsumeChar(CurPtr, Size, Result);
1863     PrevCh = C;
1864     C = getCharAndSize(CurPtr, Size);
1865   }
1866 
1867   // If we fell out, check for a sign, due to 1e+12.  If we have one, continue.
1868   if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) {
1869     // If we are in Microsoft mode, don't continue if the constant is hex.
1870     // For example, MSVC will accept the following as 3 tokens: 0x1234567e+1
1871     if (!LangOpts.MicrosoftExt || !isHexaLiteral(BufferPtr, LangOpts))
1872       return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
1873   }
1874 
1875   // If we have a hex FP constant, continue.
1876   if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p')) {
1877     // Outside C99 and C++17, we accept hexadecimal floating point numbers as a
1878     // not-quite-conforming extension. Only do so if this looks like it's
1879     // actually meant to be a hexfloat, and not if it has a ud-suffix.
1880     bool IsHexFloat = true;
1881     if (!LangOpts.C99) {
1882       if (!isHexaLiteral(BufferPtr, LangOpts))
1883         IsHexFloat = false;
1884       else if (!getLangOpts().CPlusPlus17 &&
1885                std::find(BufferPtr, CurPtr, '_') != CurPtr)
1886         IsHexFloat = false;
1887     }
1888     if (IsHexFloat)
1889       return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
1890   }
1891 
1892   // If we have a digit separator, continue.
1893   if (C == '\'' && (getLangOpts().CPlusPlus14 || getLangOpts().C2x)) {
1894     unsigned NextSize;
1895     char Next = getCharAndSizeNoWarn(CurPtr + Size, NextSize, getLangOpts());
1896     if (isAsciiIdentifierContinue(Next)) {
1897       if (!isLexingRawMode())
1898         Diag(CurPtr, getLangOpts().CPlusPlus
1899                          ? diag::warn_cxx11_compat_digit_separator
1900                          : diag::warn_c2x_compat_digit_separator);
1901       CurPtr = ConsumeChar(CurPtr, Size, Result);
1902       CurPtr = ConsumeChar(CurPtr, NextSize, Result);
1903       return LexNumericConstant(Result, CurPtr);
1904     }
1905   }
1906 
1907   // If we have a UCN or UTF-8 character (perhaps in a ud-suffix), continue.
1908   if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
1909     return LexNumericConstant(Result, CurPtr);
1910   if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr))
1911     return LexNumericConstant(Result, CurPtr);
1912 
1913   // Update the location of token as well as BufferPtr.
1914   const char *TokStart = BufferPtr;
1915   FormTokenWithChars(Result, CurPtr, tok::numeric_constant);
1916   Result.setLiteralData(TokStart);
1917   return true;
1918 }
1919 
1920 /// LexUDSuffix - Lex the ud-suffix production for user-defined literal suffixes
1921 /// in C++11, or warn on a ud-suffix in C++98.
1922 const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
1923                                bool IsStringLiteral) {
1924   assert(getLangOpts().CPlusPlus);
1925 
1926   // Maximally munch an identifier.
1927   unsigned Size;
1928   char C = getCharAndSize(CurPtr, Size);
1929   bool Consumed = false;
1930 
1931   if (!isAsciiIdentifierStart(C)) {
1932     if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
1933       Consumed = true;
1934     else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr))
1935       Consumed = true;
1936     else
1937       return CurPtr;
1938   }
1939 
1940   if (!getLangOpts().CPlusPlus11) {
1941     if (!isLexingRawMode())
1942       Diag(CurPtr,
1943            C == '_' ? diag::warn_cxx11_compat_user_defined_literal
1944                     : diag::warn_cxx11_compat_reserved_user_defined_literal)
1945         << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
1946     return CurPtr;
1947   }
1948 
1949   // C++11 [lex.ext]p10, [usrlit.suffix]p1: A program containing a ud-suffix
1950   // that does not start with an underscore is ill-formed. As a conforming
1951   // extension, we treat all such suffixes as if they had whitespace before
1952   // them. We assume a suffix beginning with a UCN or UTF-8 character is more
1953   // likely to be a ud-suffix than a macro, however, and accept that.
1954   if (!Consumed) {
1955     bool IsUDSuffix = false;
1956     if (C == '_')
1957       IsUDSuffix = true;
1958     else if (IsStringLiteral && getLangOpts().CPlusPlus14) {
1959       // In C++1y, we need to look ahead a few characters to see if this is a
1960       // valid suffix for a string literal or a numeric literal (this could be
1961       // the 'operator""if' defining a numeric literal operator).
1962       const unsigned MaxStandardSuffixLength = 3;
1963       char Buffer[MaxStandardSuffixLength] = { C };
1964       unsigned Consumed = Size;
1965       unsigned Chars = 1;
1966       while (true) {
1967         unsigned NextSize;
1968         char Next = getCharAndSizeNoWarn(CurPtr + Consumed, NextSize,
1969                                          getLangOpts());
1970         if (!isAsciiIdentifierContinue(Next)) {
1971           // End of suffix. Check whether this is on the allowed list.
1972           const StringRef CompleteSuffix(Buffer, Chars);
1973           IsUDSuffix = StringLiteralParser::isValidUDSuffix(getLangOpts(),
1974                                                             CompleteSuffix);
1975           break;
1976         }
1977 
1978         if (Chars == MaxStandardSuffixLength)
1979           // Too long: can't be a standard suffix.
1980           break;
1981 
1982         Buffer[Chars++] = Next;
1983         Consumed += NextSize;
1984       }
1985     }
1986 
1987     if (!IsUDSuffix) {
1988       if (!isLexingRawMode())
1989         Diag(CurPtr, getLangOpts().MSVCCompat
1990                          ? diag::ext_ms_reserved_user_defined_literal
1991                          : diag::ext_reserved_user_defined_literal)
1992           << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
1993       return CurPtr;
1994     }
1995 
1996     CurPtr = ConsumeChar(CurPtr, Size, Result);
1997   }
1998 
1999   Result.setFlag(Token::HasUDSuffix);
2000   while (true) {
2001     C = getCharAndSize(CurPtr, Size);
2002     if (isAsciiIdentifierContinue(C)) {
2003       CurPtr = ConsumeChar(CurPtr, Size, Result);
2004     } else if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) {
2005     } else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) {
2006     } else
2007       break;
2008   }
2009 
2010   return CurPtr;
2011 }
2012 
2013 /// LexStringLiteral - Lex the remainder of a string literal, after having lexed
2014 /// either " or L" or u8" or u" or U".
2015 bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
2016                              tok::TokenKind Kind) {
2017   const char *AfterQuote = CurPtr;
2018   // Does this string contain the \0 character?
2019   const char *NulCharacter = nullptr;
2020 
2021   if (!isLexingRawMode() &&
2022       (Kind == tok::utf8_string_literal ||
2023        Kind == tok::utf16_string_literal ||
2024        Kind == tok::utf32_string_literal))
2025     Diag(BufferPtr, getLangOpts().CPlusPlus
2026            ? diag::warn_cxx98_compat_unicode_literal
2027            : diag::warn_c99_compat_unicode_literal);
2028 
2029   char C = getAndAdvanceChar(CurPtr, Result);
2030   while (C != '"') {
2031     // Skip escaped characters.  Escaped newlines will already be processed by
2032     // getAndAdvanceChar.
2033     if (C == '\\')
2034       C = getAndAdvanceChar(CurPtr, Result);
2035 
2036     if (C == '\n' || C == '\r' ||             // Newline.
2037         (C == 0 && CurPtr-1 == BufferEnd)) {  // End of file.
2038       if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
2039         Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 1;
2040       FormTokenWithChars(Result, CurPtr-1, tok::unknown);
2041       return true;
2042     }
2043 
2044     if (C == 0) {
2045       if (isCodeCompletionPoint(CurPtr-1)) {
2046         if (ParsingFilename)
2047           codeCompleteIncludedFile(AfterQuote, CurPtr - 1, /*IsAngled=*/false);
2048         else
2049           PP->CodeCompleteNaturalLanguage();
2050         FormTokenWithChars(Result, CurPtr - 1, tok::unknown);
2051         cutOffLexing();
2052         return true;
2053       }
2054 
2055       NulCharacter = CurPtr-1;
2056     }
2057     C = getAndAdvanceChar(CurPtr, Result);
2058   }
2059 
2060   // If we are in C++11, lex the optional ud-suffix.
2061   if (getLangOpts().CPlusPlus)
2062     CurPtr = LexUDSuffix(Result, CurPtr, true);
2063 
2064   // If a nul character existed in the string, warn about it.
2065   if (NulCharacter && !isLexingRawMode())
2066     Diag(NulCharacter, diag::null_in_char_or_string) << 1;
2067 
2068   // Update the location of the token as well as the BufferPtr instance var.
2069   const char *TokStart = BufferPtr;
2070   FormTokenWithChars(Result, CurPtr, Kind);
2071   Result.setLiteralData(TokStart);
2072   return true;
2073 }
2074 
2075 /// LexRawStringLiteral - Lex the remainder of a raw string literal, after
2076 /// having lexed R", LR", u8R", uR", or UR".
2077 bool Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr,
2078                                 tok::TokenKind Kind) {
2079   // This function doesn't use getAndAdvanceChar because C++0x [lex.pptoken]p3:
2080   //  Between the initial and final double quote characters of the raw string,
2081   //  any transformations performed in phases 1 and 2 (trigraphs,
2082   //  universal-character-names, and line splicing) are reverted.
2083 
2084   if (!isLexingRawMode())
2085     Diag(BufferPtr, diag::warn_cxx98_compat_raw_string_literal);
2086 
2087   unsigned PrefixLen = 0;
2088 
2089   while (PrefixLen != 16 && isRawStringDelimBody(CurPtr[PrefixLen]))
2090     ++PrefixLen;
2091 
2092   // If the last character was not a '(', then we didn't lex a valid delimiter.
2093   if (CurPtr[PrefixLen] != '(') {
2094     if (!isLexingRawMode()) {
2095       const char *PrefixEnd = &CurPtr[PrefixLen];
2096       if (PrefixLen == 16) {
2097         Diag(PrefixEnd, diag::err_raw_delim_too_long);
2098       } else {
2099         Diag(PrefixEnd, diag::err_invalid_char_raw_delim)
2100           << StringRef(PrefixEnd, 1);
2101       }
2102     }
2103 
2104     // Search for the next '"' in hopes of salvaging the lexer. Unfortunately,
2105     // it's possible the '"' was intended to be part of the raw string, but
2106     // there's not much we can do about that.
2107     while (true) {
2108       char C = *CurPtr++;
2109 
2110       if (C == '"')
2111         break;
2112       if (C == 0 && CurPtr-1 == BufferEnd) {
2113         --CurPtr;
2114         break;
2115       }
2116     }
2117 
2118     FormTokenWithChars(Result, CurPtr, tok::unknown);
2119     return true;
2120   }
2121 
2122   // Save prefix and move CurPtr past it
2123   const char *Prefix = CurPtr;
2124   CurPtr += PrefixLen + 1; // skip over prefix and '('
2125 
2126   while (true) {
2127     char C = *CurPtr++;
2128 
2129     if (C == ')') {
2130       // Check for prefix match and closing quote.
2131       if (strncmp(CurPtr, Prefix, PrefixLen) == 0 && CurPtr[PrefixLen] == '"') {
2132         CurPtr += PrefixLen + 1; // skip over prefix and '"'
2133         break;
2134       }
2135     } else if (C == 0 && CurPtr-1 == BufferEnd) { // End of file.
2136       if (!isLexingRawMode())
2137         Diag(BufferPtr, diag::err_unterminated_raw_string)
2138           << StringRef(Prefix, PrefixLen);
2139       FormTokenWithChars(Result, CurPtr-1, tok::unknown);
2140       return true;
2141     }
2142   }
2143 
2144   // If we are in C++11, lex the optional ud-suffix.
2145   if (getLangOpts().CPlusPlus)
2146     CurPtr = LexUDSuffix(Result, CurPtr, true);
2147 
2148   // Update the location of token as well as BufferPtr.
2149   const char *TokStart = BufferPtr;
2150   FormTokenWithChars(Result, CurPtr, Kind);
2151   Result.setLiteralData(TokStart);
2152   return true;
2153 }
2154 
2155 /// LexAngledStringLiteral - Lex the remainder of an angled string literal,
2156 /// after having lexed the '<' character.  This is used for #include filenames.
2157 bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
2158   // Does this string contain the \0 character?
2159   const char *NulCharacter = nullptr;
2160   const char *AfterLessPos = CurPtr;
2161   char C = getAndAdvanceChar(CurPtr, Result);
2162   while (C != '>') {
2163     // Skip escaped characters.  Escaped newlines will already be processed by
2164     // getAndAdvanceChar.
2165     if (C == '\\')
2166       C = getAndAdvanceChar(CurPtr, Result);
2167 
2168     if (isVerticalWhitespace(C) ||               // Newline.
2169         (C == 0 && (CurPtr - 1 == BufferEnd))) { // End of file.
2170       // If the filename is unterminated, then it must just be a lone <
2171       // character.  Return this as such.
2172       FormTokenWithChars(Result, AfterLessPos, tok::less);
2173       return true;
2174     }
2175 
2176     if (C == 0) {
2177       if (isCodeCompletionPoint(CurPtr - 1)) {
2178         codeCompleteIncludedFile(AfterLessPos, CurPtr - 1, /*IsAngled=*/true);
2179         cutOffLexing();
2180         FormTokenWithChars(Result, CurPtr - 1, tok::unknown);
2181         return true;
2182       }
2183       NulCharacter = CurPtr-1;
2184     }
2185     C = getAndAdvanceChar(CurPtr, Result);
2186   }
2187 
2188   // If a nul character existed in the string, warn about it.
2189   if (NulCharacter && !isLexingRawMode())
2190     Diag(NulCharacter, diag::null_in_char_or_string) << 1;
2191 
2192   // Update the location of token as well as BufferPtr.
2193   const char *TokStart = BufferPtr;
2194   FormTokenWithChars(Result, CurPtr, tok::header_name);
2195   Result.setLiteralData(TokStart);
2196   return true;
2197 }
2198 
2199 void Lexer::codeCompleteIncludedFile(const char *PathStart,
2200                                      const char *CompletionPoint,
2201                                      bool IsAngled) {
2202   // Completion only applies to the filename, after the last slash.
2203   StringRef PartialPath(PathStart, CompletionPoint - PathStart);
2204   llvm::StringRef SlashChars = LangOpts.MSVCCompat ? "/\\" : "/";
2205   auto Slash = PartialPath.find_last_of(SlashChars);
2206   StringRef Dir =
2207       (Slash == StringRef::npos) ? "" : PartialPath.take_front(Slash);
2208   const char *StartOfFilename =
2209       (Slash == StringRef::npos) ? PathStart : PathStart + Slash + 1;
2210   // Code completion filter range is the filename only, up to completion point.
2211   PP->setCodeCompletionIdentifierInfo(&PP->getIdentifierTable().get(
2212       StringRef(StartOfFilename, CompletionPoint - StartOfFilename)));
2213   // We should replace the characters up to the closing quote or closest slash,
2214   // if any.
2215   while (CompletionPoint < BufferEnd) {
2216     char Next = *(CompletionPoint + 1);
2217     if (Next == 0 || Next == '\r' || Next == '\n')
2218       break;
2219     ++CompletionPoint;
2220     if (Next == (IsAngled ? '>' : '"'))
2221       break;
2222     if (llvm::is_contained(SlashChars, Next))
2223       break;
2224   }
2225 
2226   PP->setCodeCompletionTokenRange(
2227       FileLoc.getLocWithOffset(StartOfFilename - BufferStart),
2228       FileLoc.getLocWithOffset(CompletionPoint - BufferStart));
2229   PP->CodeCompleteIncludedFile(Dir, IsAngled);
2230 }
2231 
2232 /// LexCharConstant - Lex the remainder of a character constant, after having
2233 /// lexed either ' or L' or u8' or u' or U'.
2234 bool Lexer::LexCharConstant(Token &Result, const char *CurPtr,
2235                             tok::TokenKind Kind) {
2236   // Does this character contain the \0 character?
2237   const char *NulCharacter = nullptr;
2238 
2239   if (!isLexingRawMode()) {
2240     if (Kind == tok::utf16_char_constant || Kind == tok::utf32_char_constant)
2241       Diag(BufferPtr, getLangOpts().CPlusPlus
2242                           ? diag::warn_cxx98_compat_unicode_literal
2243                           : diag::warn_c99_compat_unicode_literal);
2244     else if (Kind == tok::utf8_char_constant)
2245       Diag(BufferPtr, diag::warn_cxx14_compat_u8_character_literal);
2246   }
2247 
2248   char C = getAndAdvanceChar(CurPtr, Result);
2249   if (C == '\'') {
2250     if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
2251       Diag(BufferPtr, diag::ext_empty_character);
2252     FormTokenWithChars(Result, CurPtr, tok::unknown);
2253     return true;
2254   }
2255 
2256   while (C != '\'') {
2257     // Skip escaped characters.
2258     if (C == '\\')
2259       C = getAndAdvanceChar(CurPtr, Result);
2260 
2261     if (C == '\n' || C == '\r' ||             // Newline.
2262         (C == 0 && CurPtr-1 == BufferEnd)) {  // End of file.
2263       if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
2264         Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 0;
2265       FormTokenWithChars(Result, CurPtr-1, tok::unknown);
2266       return true;
2267     }
2268 
2269     if (C == 0) {
2270       if (isCodeCompletionPoint(CurPtr-1)) {
2271         PP->CodeCompleteNaturalLanguage();
2272         FormTokenWithChars(Result, CurPtr-1, tok::unknown);
2273         cutOffLexing();
2274         return true;
2275       }
2276 
2277       NulCharacter = CurPtr-1;
2278     }
2279     C = getAndAdvanceChar(CurPtr, Result);
2280   }
2281 
2282   // If we are in C++11, lex the optional ud-suffix.
2283   if (getLangOpts().CPlusPlus)
2284     CurPtr = LexUDSuffix(Result, CurPtr, false);
2285 
2286   // If a nul character existed in the character, warn about it.
2287   if (NulCharacter && !isLexingRawMode())
2288     Diag(NulCharacter, diag::null_in_char_or_string) << 0;
2289 
2290   // Update the location of token as well as BufferPtr.
2291   const char *TokStart = BufferPtr;
2292   FormTokenWithChars(Result, CurPtr, Kind);
2293   Result.setLiteralData(TokStart);
2294   return true;
2295 }
2296 
2297 /// SkipWhitespace - Efficiently skip over a series of whitespace characters.
2298 /// Update BufferPtr to point to the next non-whitespace character and return.
2299 ///
2300 /// This method forms a token and returns true if KeepWhitespaceMode is enabled.
2301 bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr,
2302                            bool &TokAtPhysicalStartOfLine) {
2303   // Whitespace - Skip it, then return the token after the whitespace.
2304   bool SawNewline = isVerticalWhitespace(CurPtr[-1]);
2305 
2306   unsigned char Char = *CurPtr;
2307 
2308   const char *lastNewLine = nullptr;
2309   auto setLastNewLine = [&](const char *Ptr) {
2310     lastNewLine = Ptr;
2311     if (!NewLinePtr)
2312       NewLinePtr = Ptr;
2313   };
2314   if (SawNewline)
2315     setLastNewLine(CurPtr - 1);
2316 
2317   // Skip consecutive spaces efficiently.
2318   while (true) {
2319     // Skip horizontal whitespace very aggressively.
2320     while (isHorizontalWhitespace(Char))
2321       Char = *++CurPtr;
2322 
2323     // Otherwise if we have something other than whitespace, we're done.
2324     if (!isVerticalWhitespace(Char))
2325       break;
2326 
2327     if (ParsingPreprocessorDirective) {
2328       // End of preprocessor directive line, let LexTokenInternal handle this.
2329       BufferPtr = CurPtr;
2330       return false;
2331     }
2332 
2333     // OK, but handle newline.
2334     if (*CurPtr == '\n')
2335       setLastNewLine(CurPtr);
2336     SawNewline = true;
2337     Char = *++CurPtr;
2338   }
2339 
2340   // If the client wants us to return whitespace, return it now.
2341   if (isKeepWhitespaceMode()) {
2342     FormTokenWithChars(Result, CurPtr, tok::unknown);
2343     if (SawNewline) {
2344       IsAtStartOfLine = true;
2345       IsAtPhysicalStartOfLine = true;
2346     }
2347     // FIXME: The next token will not have LeadingSpace set.
2348     return true;
2349   }
2350 
2351   // If this isn't immediately after a newline, there is leading space.
2352   char PrevChar = CurPtr[-1];
2353   bool HasLeadingSpace = !isVerticalWhitespace(PrevChar);
2354 
2355   Result.setFlagValue(Token::LeadingSpace, HasLeadingSpace);
2356   if (SawNewline) {
2357     Result.setFlag(Token::StartOfLine);
2358     TokAtPhysicalStartOfLine = true;
2359 
2360     if (NewLinePtr && lastNewLine && NewLinePtr != lastNewLine && PP) {
2361       if (auto *Handler = PP->getEmptylineHandler())
2362         Handler->HandleEmptyline(SourceRange(getSourceLocation(NewLinePtr + 1),
2363                                              getSourceLocation(lastNewLine)));
2364     }
2365   }
2366 
2367   BufferPtr = CurPtr;
2368   return false;
2369 }
2370 
2371 /// We have just read the // characters from input.  Skip until we find the
2372 /// newline character that terminates the comment.  Then update BufferPtr and
2373 /// return.
2374 ///
2375 /// If we're in KeepCommentMode or any CommentHandler has inserted
2376 /// some tokens, this will store the first token and return true.
2377 bool Lexer::SkipLineComment(Token &Result, const char *CurPtr,
2378                             bool &TokAtPhysicalStartOfLine) {
2379   // If Line comments aren't explicitly enabled for this language, emit an
2380   // extension warning.
2381   if (!LangOpts.LineComment) {
2382     if (!isLexingRawMode()) // There's no PP in raw mode, so can't emit diags.
2383       Diag(BufferPtr, diag::ext_line_comment);
2384 
2385     // Mark them enabled so we only emit one warning for this translation
2386     // unit.
2387     LangOpts.LineComment = true;
2388   }
2389 
2390   // Scan over the body of the comment.  The common case, when scanning, is that
2391   // the comment contains normal ascii characters with nothing interesting in
2392   // them.  As such, optimize for this case with the inner loop.
2393   //
2394   // This loop terminates with CurPtr pointing at the newline (or end of buffer)
2395   // character that ends the line comment.
2396   char C;
2397   while (true) {
2398     C = *CurPtr;
2399     // Skip over characters in the fast loop.
2400     while (C != 0 &&                // Potentially EOF.
2401            C != '\n' && C != '\r')  // Newline or DOS-style newline.
2402       C = *++CurPtr;
2403 
2404     const char *NextLine = CurPtr;
2405     if (C != 0) {
2406       // We found a newline, see if it's escaped.
2407       const char *EscapePtr = CurPtr-1;
2408       bool HasSpace = false;
2409       while (isHorizontalWhitespace(*EscapePtr)) { // Skip whitespace.
2410         --EscapePtr;
2411         HasSpace = true;
2412       }
2413 
2414       if (*EscapePtr == '\\')
2415         // Escaped newline.
2416         CurPtr = EscapePtr;
2417       else if (EscapePtr[0] == '/' && EscapePtr[-1] == '?' &&
2418                EscapePtr[-2] == '?' && LangOpts.Trigraphs)
2419         // Trigraph-escaped newline.
2420         CurPtr = EscapePtr-2;
2421       else
2422         break; // This is a newline, we're done.
2423 
2424       // If there was space between the backslash and newline, warn about it.
2425       if (HasSpace && !isLexingRawMode())
2426         Diag(EscapePtr, diag::backslash_newline_space);
2427     }
2428 
2429     // Otherwise, this is a hard case.  Fall back on getAndAdvanceChar to
2430     // properly decode the character.  Read it in raw mode to avoid emitting
2431     // diagnostics about things like trigraphs.  If we see an escaped newline,
2432     // we'll handle it below.
2433     const char *OldPtr = CurPtr;
2434     bool OldRawMode = isLexingRawMode();
2435     LexingRawMode = true;
2436     C = getAndAdvanceChar(CurPtr, Result);
2437     LexingRawMode = OldRawMode;
2438 
2439     // If we only read only one character, then no special handling is needed.
2440     // We're done and can skip forward to the newline.
2441     if (C != 0 && CurPtr == OldPtr+1) {
2442       CurPtr = NextLine;
2443       break;
2444     }
2445 
2446     // If we read multiple characters, and one of those characters was a \r or
2447     // \n, then we had an escaped newline within the comment.  Emit diagnostic
2448     // unless the next line is also a // comment.
2449     if (CurPtr != OldPtr + 1 && C != '/' &&
2450         (CurPtr == BufferEnd + 1 || CurPtr[0] != '/')) {
2451       for (; OldPtr != CurPtr; ++OldPtr)
2452         if (OldPtr[0] == '\n' || OldPtr[0] == '\r') {
2453           // Okay, we found a // comment that ends in a newline, if the next
2454           // line is also a // comment, but has spaces, don't emit a diagnostic.
2455           if (isWhitespace(C)) {
2456             const char *ForwardPtr = CurPtr;
2457             while (isWhitespace(*ForwardPtr))  // Skip whitespace.
2458               ++ForwardPtr;
2459             if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/')
2460               break;
2461           }
2462 
2463           if (!isLexingRawMode())
2464             Diag(OldPtr-1, diag::ext_multi_line_line_comment);
2465           break;
2466         }
2467     }
2468 
2469     if (C == '\r' || C == '\n' || CurPtr == BufferEnd + 1) {
2470       --CurPtr;
2471       break;
2472     }
2473 
2474     if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) {
2475       PP->CodeCompleteNaturalLanguage();
2476       cutOffLexing();
2477       return false;
2478     }
2479   }
2480 
2481   // Found but did not consume the newline.  Notify comment handlers about the
2482   // comment unless we're in a #if 0 block.
2483   if (PP && !isLexingRawMode() &&
2484       PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr),
2485                                             getSourceLocation(CurPtr)))) {
2486     BufferPtr = CurPtr;
2487     return true; // A token has to be returned.
2488   }
2489 
2490   // If we are returning comments as tokens, return this comment as a token.
2491   if (inKeepCommentMode())
2492     return SaveLineComment(Result, CurPtr);
2493 
2494   // If we are inside a preprocessor directive and we see the end of line,
2495   // return immediately, so that the lexer can return this as an EOD token.
2496   if (ParsingPreprocessorDirective || CurPtr == BufferEnd) {
2497     BufferPtr = CurPtr;
2498     return false;
2499   }
2500 
2501   // Otherwise, eat the \n character.  We don't care if this is a \n\r or
2502   // \r\n sequence.  This is an efficiency hack (because we know the \n can't
2503   // contribute to another token), it isn't needed for correctness.  Note that
2504   // this is ok even in KeepWhitespaceMode, because we would have returned the
2505   /// comment above in that mode.
2506   NewLinePtr = CurPtr++;
2507 
2508   // The next returned token is at the start of the line.
2509   Result.setFlag(Token::StartOfLine);
2510   TokAtPhysicalStartOfLine = true;
2511   // No leading whitespace seen so far.
2512   Result.clearFlag(Token::LeadingSpace);
2513   BufferPtr = CurPtr;
2514   return false;
2515 }
2516 
2517 /// If in save-comment mode, package up this Line comment in an appropriate
2518 /// way and return it.
2519 bool Lexer::SaveLineComment(Token &Result, const char *CurPtr) {
2520   // If we're not in a preprocessor directive, just return the // comment
2521   // directly.
2522   FormTokenWithChars(Result, CurPtr, tok::comment);
2523 
2524   if (!ParsingPreprocessorDirective || LexingRawMode)
2525     return true;
2526 
2527   // If this Line-style comment is in a macro definition, transmogrify it into
2528   // a C-style block comment.
2529   bool Invalid = false;
2530   std::string Spelling = PP->getSpelling(Result, &Invalid);
2531   if (Invalid)
2532     return true;
2533 
2534   assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not line comment?");
2535   Spelling[1] = '*';   // Change prefix to "/*".
2536   Spelling += "*/";    // add suffix.
2537 
2538   Result.setKind(tok::comment);
2539   PP->CreateString(Spelling, Result,
2540                    Result.getLocation(), Result.getLocation());
2541   return true;
2542 }
2543 
2544 /// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline
2545 /// character (either \\n or \\r) is part of an escaped newline sequence.  Issue
2546 /// a diagnostic if so.  We know that the newline is inside of a block comment.
2547 static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
2548                                                   Lexer *L) {
2549   assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
2550 
2551   // Position of the first trigraph in the ending sequence.
2552   const char *TrigraphPos = nullptr;
2553   // Position of the first whitespace after a '\' in the ending sequence.
2554   const char *SpacePos = nullptr;
2555 
2556   while (true) {
2557     // Back up off the newline.
2558     --CurPtr;
2559 
2560     // If this is a two-character newline sequence, skip the other character.
2561     if (CurPtr[0] == '\n' || CurPtr[0] == '\r') {
2562       // \n\n or \r\r -> not escaped newline.
2563       if (CurPtr[0] == CurPtr[1])
2564         return false;
2565       // \n\r or \r\n -> skip the newline.
2566       --CurPtr;
2567     }
2568 
2569     // If we have horizontal whitespace, skip over it.  We allow whitespace
2570     // between the slash and newline.
2571     while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) {
2572       SpacePos = CurPtr;
2573       --CurPtr;
2574     }
2575 
2576     // If we have a slash, this is an escaped newline.
2577     if (*CurPtr == '\\') {
2578       --CurPtr;
2579     } else if (CurPtr[0] == '/' && CurPtr[-1] == '?' && CurPtr[-2] == '?') {
2580       // This is a trigraph encoding of a slash.
2581       TrigraphPos = CurPtr - 2;
2582       CurPtr -= 3;
2583     } else {
2584       return false;
2585     }
2586 
2587     // If the character preceding the escaped newline is a '*', then after line
2588     // splicing we have a '*/' ending the comment.
2589     if (*CurPtr == '*')
2590       break;
2591 
2592     if (*CurPtr != '\n' && *CurPtr != '\r')
2593       return false;
2594   }
2595 
2596   if (TrigraphPos) {
2597     // If no trigraphs are enabled, warn that we ignored this trigraph and
2598     // ignore this * character.
2599     if (!L->getLangOpts().Trigraphs) {
2600       if (!L->isLexingRawMode())
2601         L->Diag(TrigraphPos, diag::trigraph_ignored_block_comment);
2602       return false;
2603     }
2604     if (!L->isLexingRawMode())
2605       L->Diag(TrigraphPos, diag::trigraph_ends_block_comment);
2606   }
2607 
2608   // Warn about having an escaped newline between the */ characters.
2609   if (!L->isLexingRawMode())
2610     L->Diag(CurPtr + 1, diag::escaped_newline_block_comment_end);
2611 
2612   // If there was space between the backslash and newline, warn about it.
2613   if (SpacePos && !L->isLexingRawMode())
2614     L->Diag(SpacePos, diag::backslash_newline_space);
2615 
2616   return true;
2617 }
2618 
2619 #ifdef __SSE2__
2620 #include <emmintrin.h>
2621 #elif __ALTIVEC__
2622 #include <altivec.h>
2623 #undef bool
2624 #endif
2625 
2626 /// We have just read from input the / and * characters that started a comment.
2627 /// Read until we find the * and / characters that terminate the comment.
2628 /// Note that we don't bother decoding trigraphs or escaped newlines in block
2629 /// comments, because they cannot cause the comment to end.  The only thing
2630 /// that can happen is the comment could end with an escaped newline between
2631 /// the terminating * and /.
2632 ///
2633 /// If we're in KeepCommentMode or any CommentHandler has inserted
2634 /// some tokens, this will store the first token and return true.
2635 bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr,
2636                              bool &TokAtPhysicalStartOfLine) {
2637   // Scan one character past where we should, looking for a '/' character.  Once
2638   // we find it, check to see if it was preceded by a *.  This common
2639   // optimization helps people who like to put a lot of * characters in their
2640   // comments.
2641 
2642   // The first character we get with newlines and trigraphs skipped to handle
2643   // the degenerate /*/ case below correctly if the * has an escaped newline
2644   // after it.
2645   unsigned CharSize;
2646   unsigned char C = getCharAndSize(CurPtr, CharSize);
2647   CurPtr += CharSize;
2648   if (C == 0 && CurPtr == BufferEnd+1) {
2649     if (!isLexingRawMode())
2650       Diag(BufferPtr, diag::err_unterminated_block_comment);
2651     --CurPtr;
2652 
2653     // KeepWhitespaceMode should return this broken comment as a token.  Since
2654     // it isn't a well formed comment, just return it as an 'unknown' token.
2655     if (isKeepWhitespaceMode()) {
2656       FormTokenWithChars(Result, CurPtr, tok::unknown);
2657       return true;
2658     }
2659 
2660     BufferPtr = CurPtr;
2661     return false;
2662   }
2663 
2664   // Check to see if the first character after the '/*' is another /.  If so,
2665   // then this slash does not end the block comment, it is part of it.
2666   if (C == '/')
2667     C = *CurPtr++;
2668 
2669   while (true) {
2670     // Skip over all non-interesting characters until we find end of buffer or a
2671     // (probably ending) '/' character.
2672     if (CurPtr + 24 < BufferEnd &&
2673         // If there is a code-completion point avoid the fast scan because it
2674         // doesn't check for '\0'.
2675         !(PP && PP->getCodeCompletionFileLoc() == FileLoc)) {
2676       // While not aligned to a 16-byte boundary.
2677       while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0)
2678         C = *CurPtr++;
2679 
2680       if (C == '/') goto FoundSlash;
2681 
2682 #ifdef __SSE2__
2683       __m128i Slashes = _mm_set1_epi8('/');
2684       while (CurPtr+16 <= BufferEnd) {
2685         int cmp = _mm_movemask_epi8(_mm_cmpeq_epi8(*(const __m128i*)CurPtr,
2686                                     Slashes));
2687         if (cmp != 0) {
2688           // Adjust the pointer to point directly after the first slash. It's
2689           // not necessary to set C here, it will be overwritten at the end of
2690           // the outer loop.
2691           CurPtr += llvm::countTrailingZeros<unsigned>(cmp) + 1;
2692           goto FoundSlash;
2693         }
2694         CurPtr += 16;
2695       }
2696 #elif __ALTIVEC__
2697       __vector unsigned char Slashes = {
2698         '/', '/', '/', '/',  '/', '/', '/', '/',
2699         '/', '/', '/', '/',  '/', '/', '/', '/'
2700       };
2701       while (CurPtr + 16 <= BufferEnd &&
2702              !vec_any_eq(*(const __vector unsigned char *)CurPtr, Slashes))
2703         CurPtr += 16;
2704 #else
2705       // Scan for '/' quickly.  Many block comments are very large.
2706       while (CurPtr[0] != '/' &&
2707              CurPtr[1] != '/' &&
2708              CurPtr[2] != '/' &&
2709              CurPtr[3] != '/' &&
2710              CurPtr+4 < BufferEnd) {
2711         CurPtr += 4;
2712       }
2713 #endif
2714 
2715       // It has to be one of the bytes scanned, increment to it and read one.
2716       C = *CurPtr++;
2717     }
2718 
2719     // Loop to scan the remainder.
2720     while (C != '/' && C != '\0')
2721       C = *CurPtr++;
2722 
2723     if (C == '/') {
2724   FoundSlash:
2725       if (CurPtr[-2] == '*')  // We found the final */.  We're done!
2726         break;
2727 
2728       if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) {
2729         if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) {
2730           // We found the final */, though it had an escaped newline between the
2731           // * and /.  We're done!
2732           break;
2733         }
2734       }
2735       if (CurPtr[0] == '*' && CurPtr[1] != '/') {
2736         // If this is a /* inside of the comment, emit a warning.  Don't do this
2737         // if this is a /*/, which will end the comment.  This misses cases with
2738         // embedded escaped newlines, but oh well.
2739         if (!isLexingRawMode())
2740           Diag(CurPtr-1, diag::warn_nested_block_comment);
2741       }
2742     } else if (C == 0 && CurPtr == BufferEnd+1) {
2743       if (!isLexingRawMode())
2744         Diag(BufferPtr, diag::err_unterminated_block_comment);
2745       // Note: the user probably forgot a */.  We could continue immediately
2746       // after the /*, but this would involve lexing a lot of what really is the
2747       // comment, which surely would confuse the parser.
2748       --CurPtr;
2749 
2750       // KeepWhitespaceMode should return this broken comment as a token.  Since
2751       // it isn't a well formed comment, just return it as an 'unknown' token.
2752       if (isKeepWhitespaceMode()) {
2753         FormTokenWithChars(Result, CurPtr, tok::unknown);
2754         return true;
2755       }
2756 
2757       BufferPtr = CurPtr;
2758       return false;
2759     } else if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) {
2760       PP->CodeCompleteNaturalLanguage();
2761       cutOffLexing();
2762       return false;
2763     }
2764 
2765     C = *CurPtr++;
2766   }
2767 
2768   // Notify comment handlers about the comment unless we're in a #if 0 block.
2769   if (PP && !isLexingRawMode() &&
2770       PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr),
2771                                             getSourceLocation(CurPtr)))) {
2772     BufferPtr = CurPtr;
2773     return true; // A token has to be returned.
2774   }
2775 
2776   // If we are returning comments as tokens, return this comment as a token.
2777   if (inKeepCommentMode()) {
2778     FormTokenWithChars(Result, CurPtr, tok::comment);
2779     return true;
2780   }
2781 
2782   // It is common for the tokens immediately after a /**/ comment to be
2783   // whitespace.  Instead of going through the big switch, handle it
2784   // efficiently now.  This is safe even in KeepWhitespaceMode because we would
2785   // have already returned above with the comment as a token.
2786   if (isHorizontalWhitespace(*CurPtr)) {
2787     SkipWhitespace(Result, CurPtr+1, TokAtPhysicalStartOfLine);
2788     return false;
2789   }
2790 
2791   // Otherwise, just return so that the next character will be lexed as a token.
2792   BufferPtr = CurPtr;
2793   Result.setFlag(Token::LeadingSpace);
2794   return false;
2795 }
2796 
2797 //===----------------------------------------------------------------------===//
2798 // Primary Lexing Entry Points
2799 //===----------------------------------------------------------------------===//
2800 
2801 /// ReadToEndOfLine - Read the rest of the current preprocessor line as an
2802 /// uninterpreted string.  This switches the lexer out of directive mode.
2803 void Lexer::ReadToEndOfLine(SmallVectorImpl<char> *Result) {
2804   assert(ParsingPreprocessorDirective && ParsingFilename == false &&
2805          "Must be in a preprocessing directive!");
2806   Token Tmp;
2807   Tmp.startToken();
2808 
2809   // CurPtr - Cache BufferPtr in an automatic variable.
2810   const char *CurPtr = BufferPtr;
2811   while (true) {
2812     char Char = getAndAdvanceChar(CurPtr, Tmp);
2813     switch (Char) {
2814     default:
2815       if (Result)
2816         Result->push_back(Char);
2817       break;
2818     case 0:  // Null.
2819       // Found end of file?
2820       if (CurPtr-1 != BufferEnd) {
2821         if (isCodeCompletionPoint(CurPtr-1)) {
2822           PP->CodeCompleteNaturalLanguage();
2823           cutOffLexing();
2824           return;
2825         }
2826 
2827         // Nope, normal character, continue.
2828         if (Result)
2829           Result->push_back(Char);
2830         break;
2831       }
2832       // FALL THROUGH.
2833       LLVM_FALLTHROUGH;
2834     case '\r':
2835     case '\n':
2836       // Okay, we found the end of the line. First, back up past the \0, \r, \n.
2837       assert(CurPtr[-1] == Char && "Trigraphs for newline?");
2838       BufferPtr = CurPtr-1;
2839 
2840       // Next, lex the character, which should handle the EOD transition.
2841       Lex(Tmp);
2842       if (Tmp.is(tok::code_completion)) {
2843         if (PP)
2844           PP->CodeCompleteNaturalLanguage();
2845         Lex(Tmp);
2846       }
2847       assert(Tmp.is(tok::eod) && "Unexpected token!");
2848 
2849       // Finally, we're done;
2850       return;
2851     }
2852   }
2853 }
2854 
2855 /// LexEndOfFile - CurPtr points to the end of this file.  Handle this
2856 /// condition, reporting diagnostics and handling other edge cases as required.
2857 /// This returns true if Result contains a token, false if PP.Lex should be
2858 /// called again.
2859 bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
2860   // If we hit the end of the file while parsing a preprocessor directive,
2861   // end the preprocessor directive first.  The next token returned will
2862   // then be the end of file.
2863   if (ParsingPreprocessorDirective) {
2864     // Done parsing the "line".
2865     ParsingPreprocessorDirective = false;
2866     // Update the location of token as well as BufferPtr.
2867     FormTokenWithChars(Result, CurPtr, tok::eod);
2868 
2869     // Restore comment saving mode, in case it was disabled for directive.
2870     if (PP)
2871       resetExtendedTokenMode();
2872     return true;  // Have a token.
2873   }
2874 
2875   // If we are in raw mode, return this event as an EOF token.  Let the caller
2876   // that put us in raw mode handle the event.
2877   if (isLexingRawMode()) {
2878     Result.startToken();
2879     BufferPtr = BufferEnd;
2880     FormTokenWithChars(Result, BufferEnd, tok::eof);
2881     return true;
2882   }
2883 
2884   if (PP->isRecordingPreamble() && PP->isInPrimaryFile()) {
2885     PP->setRecordedPreambleConditionalStack(ConditionalStack);
2886     // If the preamble cuts off the end of a header guard, consider it guarded.
2887     // The guard is valid for the preamble content itself, and for tools the
2888     // most useful answer is "yes, this file has a header guard".
2889     if (!ConditionalStack.empty())
2890       MIOpt.ExitTopLevelConditional();
2891     ConditionalStack.clear();
2892   }
2893 
2894   // Issue diagnostics for unterminated #if and missing newline.
2895 
2896   // If we are in a #if directive, emit an error.
2897   while (!ConditionalStack.empty()) {
2898     if (PP->getCodeCompletionFileLoc() != FileLoc)
2899       PP->Diag(ConditionalStack.back().IfLoc,
2900                diag::err_pp_unterminated_conditional);
2901     ConditionalStack.pop_back();
2902   }
2903 
2904   SourceLocation EndLoc = getSourceLocation(BufferEnd);
2905   // C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue
2906   // a pedwarn.
2907   if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')) {
2908     DiagnosticsEngine &Diags = PP->getDiagnostics();
2909     unsigned DiagID;
2910 
2911     if (LangOpts.CPlusPlus11) {
2912       // C++11 [lex.phases] 2.2 p2
2913       // Prefer the C++98 pedantic compatibility warning over the generic,
2914       // non-extension, user-requested "missing newline at EOF" warning.
2915       if (!Diags.isIgnored(diag::warn_cxx98_compat_no_newline_eof, EndLoc)) {
2916         DiagID = diag::warn_cxx98_compat_no_newline_eof;
2917       } else {
2918         DiagID = diag::warn_no_newline_eof;
2919       }
2920     } else {
2921       DiagID = diag::ext_no_newline_eof;
2922     }
2923 
2924     Diag(BufferEnd, DiagID)
2925       << FixItHint::CreateInsertion(EndLoc, "\n");
2926   }
2927 
2928   BufferPtr = CurPtr;
2929 
2930   // Finally, let the preprocessor handle this.
2931   return PP->HandleEndOfFile(Result, EndLoc, isPragmaLexer());
2932 }
2933 
2934 /// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from
2935 /// the specified lexer will return a tok::l_paren token, 0 if it is something
2936 /// else and 2 if there are no more tokens in the buffer controlled by the
2937 /// lexer.
2938 unsigned Lexer::isNextPPTokenLParen() {
2939   assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?");
2940 
2941   // Switch to 'skipping' mode.  This will ensure that we can lex a token
2942   // without emitting diagnostics, disables macro expansion, and will cause EOF
2943   // to return an EOF token instead of popping the include stack.
2944   LexingRawMode = true;
2945 
2946   // Save state that can be changed while lexing so that we can restore it.
2947   const char *TmpBufferPtr = BufferPtr;
2948   bool inPPDirectiveMode = ParsingPreprocessorDirective;
2949   bool atStartOfLine = IsAtStartOfLine;
2950   bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine;
2951   bool leadingSpace = HasLeadingSpace;
2952 
2953   Token Tok;
2954   Lex(Tok);
2955 
2956   // Restore state that may have changed.
2957   BufferPtr = TmpBufferPtr;
2958   ParsingPreprocessorDirective = inPPDirectiveMode;
2959   HasLeadingSpace = leadingSpace;
2960   IsAtStartOfLine = atStartOfLine;
2961   IsAtPhysicalStartOfLine = atPhysicalStartOfLine;
2962 
2963   // Restore the lexer back to non-skipping mode.
2964   LexingRawMode = false;
2965 
2966   if (Tok.is(tok::eof))
2967     return 2;
2968   return Tok.is(tok::l_paren);
2969 }
2970 
2971 /// Find the end of a version control conflict marker.
2972 static const char *FindConflictEnd(const char *CurPtr, const char *BufferEnd,
2973                                    ConflictMarkerKind CMK) {
2974   const char *Terminator = CMK == CMK_Perforce ? "<<<<\n" : ">>>>>>>";
2975   size_t TermLen = CMK == CMK_Perforce ? 5 : 7;
2976   auto RestOfBuffer = StringRef(CurPtr, BufferEnd - CurPtr).substr(TermLen);
2977   size_t Pos = RestOfBuffer.find(Terminator);
2978   while (Pos != StringRef::npos) {
2979     // Must occur at start of line.
2980     if (Pos == 0 ||
2981         (RestOfBuffer[Pos - 1] != '\r' && RestOfBuffer[Pos - 1] != '\n')) {
2982       RestOfBuffer = RestOfBuffer.substr(Pos+TermLen);
2983       Pos = RestOfBuffer.find(Terminator);
2984       continue;
2985     }
2986     return RestOfBuffer.data()+Pos;
2987   }
2988   return nullptr;
2989 }
2990 
2991 /// IsStartOfConflictMarker - If the specified pointer is the start of a version
2992 /// control conflict marker like '<<<<<<<', recognize it as such, emit an error
2993 /// and recover nicely.  This returns true if it is a conflict marker and false
2994 /// if not.
2995 bool Lexer::IsStartOfConflictMarker(const char *CurPtr) {
2996   // Only a conflict marker if it starts at the beginning of a line.
2997   if (CurPtr != BufferStart &&
2998       CurPtr[-1] != '\n' && CurPtr[-1] != '\r')
2999     return false;
3000 
3001   // Check to see if we have <<<<<<< or >>>>.
3002   if (!StringRef(CurPtr, BufferEnd - CurPtr).startswith("<<<<<<<") &&
3003       !StringRef(CurPtr, BufferEnd - CurPtr).startswith(">>>> "))
3004     return false;
3005 
3006   // If we have a situation where we don't care about conflict markers, ignore
3007   // it.
3008   if (CurrentConflictMarkerState || isLexingRawMode())
3009     return false;
3010 
3011   ConflictMarkerKind Kind = *CurPtr == '<' ? CMK_Normal : CMK_Perforce;
3012 
3013   // Check to see if there is an ending marker somewhere in the buffer at the
3014   // start of a line to terminate this conflict marker.
3015   if (FindConflictEnd(CurPtr, BufferEnd, Kind)) {
3016     // We found a match.  We are really in a conflict marker.
3017     // Diagnose this, and ignore to the end of line.
3018     Diag(CurPtr, diag::err_conflict_marker);
3019     CurrentConflictMarkerState = Kind;
3020 
3021     // Skip ahead to the end of line.  We know this exists because the
3022     // end-of-conflict marker starts with \r or \n.
3023     while (*CurPtr != '\r' && *CurPtr != '\n') {
3024       assert(CurPtr != BufferEnd && "Didn't find end of line");
3025       ++CurPtr;
3026     }
3027     BufferPtr = CurPtr;
3028     return true;
3029   }
3030 
3031   // No end of conflict marker found.
3032   return false;
3033 }
3034 
3035 /// HandleEndOfConflictMarker - If this is a '====' or '||||' or '>>>>', or if
3036 /// it is '<<<<' and the conflict marker started with a '>>>>' marker, then it
3037 /// is the end of a conflict marker.  Handle it by ignoring up until the end of
3038 /// the line.  This returns true if it is a conflict marker and false if not.
3039 bool Lexer::HandleEndOfConflictMarker(const char *CurPtr) {
3040   // Only a conflict marker if it starts at the beginning of a line.
3041   if (CurPtr != BufferStart &&
3042       CurPtr[-1] != '\n' && CurPtr[-1] != '\r')
3043     return false;
3044 
3045   // If we have a situation where we don't care about conflict markers, ignore
3046   // it.
3047   if (!CurrentConflictMarkerState || isLexingRawMode())
3048     return false;
3049 
3050   // Check to see if we have the marker (4 characters in a row).
3051   for (unsigned i = 1; i != 4; ++i)
3052     if (CurPtr[i] != CurPtr[0])
3053       return false;
3054 
3055   // If we do have it, search for the end of the conflict marker.  This could
3056   // fail if it got skipped with a '#if 0' or something.  Note that CurPtr might
3057   // be the end of conflict marker.
3058   if (const char *End = FindConflictEnd(CurPtr, BufferEnd,
3059                                         CurrentConflictMarkerState)) {
3060     CurPtr = End;
3061 
3062     // Skip ahead to the end of line.
3063     while (CurPtr != BufferEnd && *CurPtr != '\r' && *CurPtr != '\n')
3064       ++CurPtr;
3065 
3066     BufferPtr = CurPtr;
3067 
3068     // No longer in the conflict marker.
3069     CurrentConflictMarkerState = CMK_None;
3070     return true;
3071   }
3072 
3073   return false;
3074 }
3075 
3076 static const char *findPlaceholderEnd(const char *CurPtr,
3077                                       const char *BufferEnd) {
3078   if (CurPtr == BufferEnd)
3079     return nullptr;
3080   BufferEnd -= 1; // Scan until the second last character.
3081   for (; CurPtr != BufferEnd; ++CurPtr) {
3082     if (CurPtr[0] == '#' && CurPtr[1] == '>')
3083       return CurPtr + 2;
3084   }
3085   return nullptr;
3086 }
3087 
3088 bool Lexer::lexEditorPlaceholder(Token &Result, const char *CurPtr) {
3089   assert(CurPtr[-1] == '<' && CurPtr[0] == '#' && "Not a placeholder!");
3090   if (!PP || !PP->getPreprocessorOpts().LexEditorPlaceholders || LexingRawMode)
3091     return false;
3092   const char *End = findPlaceholderEnd(CurPtr + 1, BufferEnd);
3093   if (!End)
3094     return false;
3095   const char *Start = CurPtr - 1;
3096   if (!LangOpts.AllowEditorPlaceholders)
3097     Diag(Start, diag::err_placeholder_in_source);
3098   Result.startToken();
3099   FormTokenWithChars(Result, End, tok::raw_identifier);
3100   Result.setRawIdentifierData(Start);
3101   PP->LookUpIdentifierInfo(Result);
3102   Result.setFlag(Token::IsEditorPlaceholder);
3103   BufferPtr = End;
3104   return true;
3105 }
3106 
3107 bool Lexer::isCodeCompletionPoint(const char *CurPtr) const {
3108   if (PP && PP->isCodeCompletionEnabled()) {
3109     SourceLocation Loc = FileLoc.getLocWithOffset(CurPtr-BufferStart);
3110     return Loc == PP->getCodeCompletionLoc();
3111   }
3112 
3113   return false;
3114 }
3115 
3116 uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
3117                            Token *Result) {
3118   unsigned CharSize;
3119   char Kind = getCharAndSize(StartPtr, CharSize);
3120   bool Delimited = false;
3121   bool FoundEndDelimiter = false;
3122   unsigned Count = 0;
3123   bool Diagnose = Result && !isLexingRawMode();
3124 
3125   unsigned NumHexDigits;
3126   if (Kind == 'u')
3127     NumHexDigits = 4;
3128   else if (Kind == 'U')
3129     NumHexDigits = 8;
3130   else
3131     return 0;
3132 
3133   if (!LangOpts.CPlusPlus && !LangOpts.C99) {
3134     if (Diagnose)
3135       Diag(SlashLoc, diag::warn_ucn_not_valid_in_c89);
3136     return 0;
3137   }
3138 
3139   const char *CurPtr = StartPtr + CharSize;
3140   const char *KindLoc = &CurPtr[-1];
3141 
3142   uint32_t CodePoint = 0;
3143   while (Count != NumHexDigits || Delimited) {
3144     char C = getCharAndSize(CurPtr, CharSize);
3145     if (!Delimited && C == '{') {
3146       Delimited = true;
3147       CurPtr += CharSize;
3148       continue;
3149     }
3150 
3151     if (Delimited && C == '}') {
3152       CurPtr += CharSize;
3153       FoundEndDelimiter = true;
3154       break;
3155     }
3156 
3157     unsigned Value = llvm::hexDigitValue(C);
3158     if (Value == -1U) {
3159       if (!Delimited)
3160         break;
3161       if (Diagnose)
3162         Diag(BufferPtr, diag::warn_delimited_ucn_incomplete)
3163             << StringRef(&C, 1);
3164       return 0;
3165     }
3166 
3167     if (CodePoint & 0xF000'0000) {
3168       if (Diagnose)
3169         Diag(KindLoc, diag::err_escape_too_large) << 0;
3170       return 0;
3171     }
3172 
3173     CodePoint <<= 4;
3174     CodePoint |= Value;
3175     CurPtr += CharSize;
3176     Count++;
3177   }
3178 
3179   if (Count == 0) {
3180     if (Diagnose)
3181       Diag(StartPtr, FoundEndDelimiter ? diag::warn_delimited_ucn_empty
3182                                        : diag::warn_ucn_escape_no_digits)
3183           << StringRef(KindLoc, 1);
3184     return 0;
3185   }
3186 
3187   if (!Delimited && Count != NumHexDigits) {
3188     if (Diagnose) {
3189       Diag(BufferPtr, diag::warn_ucn_escape_incomplete);
3190       // If the user wrote \U1234, suggest a fixit to \u.
3191       if (Count == 4 && NumHexDigits == 8) {
3192         CharSourceRange URange = makeCharRange(*this, KindLoc, KindLoc + 1);
3193         Diag(KindLoc, diag::note_ucn_four_not_eight)
3194             << FixItHint::CreateReplacement(URange, "u");
3195       }
3196     }
3197     return 0;
3198   }
3199 
3200   if (Delimited && PP) {
3201     Diag(BufferPtr, diag::ext_delimited_escape_sequence);
3202   }
3203 
3204   if (Result) {
3205     Result->setFlag(Token::HasUCN);
3206     if (CurPtr - StartPtr == (ptrdiff_t)(Count + 2 + (Delimited ? 2 : 0)))
3207       StartPtr = CurPtr;
3208     else
3209       while (StartPtr != CurPtr)
3210         (void)getAndAdvanceChar(StartPtr, *Result);
3211   } else {
3212     StartPtr = CurPtr;
3213   }
3214 
3215   // Don't apply C family restrictions to UCNs in assembly mode
3216   if (LangOpts.AsmPreprocessor)
3217     return CodePoint;
3218 
3219   // C99 6.4.3p2: A universal character name shall not specify a character whose
3220   //   short identifier is less than 00A0 other than 0024 ($), 0040 (@), or
3221   //   0060 (`), nor one in the range D800 through DFFF inclusive.)
3222   // C++11 [lex.charset]p2: If the hexadecimal value for a
3223   //   universal-character-name corresponds to a surrogate code point (in the
3224   //   range 0xD800-0xDFFF, inclusive), the program is ill-formed. Additionally,
3225   //   if the hexadecimal value for a universal-character-name outside the
3226   //   c-char-sequence, s-char-sequence, or r-char-sequence of a character or
3227   //   string literal corresponds to a control character (in either of the
3228   //   ranges 0x00-0x1F or 0x7F-0x9F, both inclusive) or to a character in the
3229   //   basic source character set, the program is ill-formed.
3230   if (CodePoint < 0xA0) {
3231     if (CodePoint == 0x24 || CodePoint == 0x40 || CodePoint == 0x60)
3232       return CodePoint;
3233 
3234     // We don't use isLexingRawMode() here because we need to warn about bad
3235     // UCNs even when skipping preprocessing tokens in a #if block.
3236     if (Result && PP) {
3237       if (CodePoint < 0x20 || CodePoint >= 0x7F)
3238         Diag(BufferPtr, diag::err_ucn_control_character);
3239       else {
3240         char C = static_cast<char>(CodePoint);
3241         Diag(BufferPtr, diag::err_ucn_escape_basic_scs) << StringRef(&C, 1);
3242       }
3243     }
3244 
3245     return 0;
3246   } else if (CodePoint >= 0xD800 && CodePoint <= 0xDFFF) {
3247     // C++03 allows UCNs representing surrogate characters. C99 and C++11 don't.
3248     // We don't use isLexingRawMode() here because we need to diagnose bad
3249     // UCNs even when skipping preprocessing tokens in a #if block.
3250     if (Result && PP) {
3251       if (LangOpts.CPlusPlus && !LangOpts.CPlusPlus11)
3252         Diag(BufferPtr, diag::warn_ucn_escape_surrogate);
3253       else
3254         Diag(BufferPtr, diag::err_ucn_escape_invalid);
3255     }
3256     return 0;
3257   }
3258 
3259   return CodePoint;
3260 }
3261 
3262 bool Lexer::CheckUnicodeWhitespace(Token &Result, uint32_t C,
3263                                    const char *CurPtr) {
3264   if (!isLexingRawMode() && !PP->isPreprocessedOutput() &&
3265       isUnicodeWhitespace(C)) {
3266     Diag(BufferPtr, diag::ext_unicode_whitespace)
3267       << makeCharRange(*this, BufferPtr, CurPtr);
3268 
3269     Result.setFlag(Token::LeadingSpace);
3270     return true;
3271   }
3272   return false;
3273 }
3274 
3275 void Lexer::PropagateLineStartLeadingSpaceInfo(Token &Result) {
3276   IsAtStartOfLine = Result.isAtStartOfLine();
3277   HasLeadingSpace = Result.hasLeadingSpace();
3278   HasLeadingEmptyMacro = Result.hasLeadingEmptyMacro();
3279   // Note that this doesn't affect IsAtPhysicalStartOfLine.
3280 }
3281 
3282 bool Lexer::Lex(Token &Result) {
3283   // Start a new token.
3284   Result.startToken();
3285 
3286   // Set up misc whitespace flags for LexTokenInternal.
3287   if (IsAtStartOfLine) {
3288     Result.setFlag(Token::StartOfLine);
3289     IsAtStartOfLine = false;
3290   }
3291 
3292   if (HasLeadingSpace) {
3293     Result.setFlag(Token::LeadingSpace);
3294     HasLeadingSpace = false;
3295   }
3296 
3297   if (HasLeadingEmptyMacro) {
3298     Result.setFlag(Token::LeadingEmptyMacro);
3299     HasLeadingEmptyMacro = false;
3300   }
3301 
3302   bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine;
3303   IsAtPhysicalStartOfLine = false;
3304   bool isRawLex = isLexingRawMode();
3305   (void) isRawLex;
3306   bool returnedToken = LexTokenInternal(Result, atPhysicalStartOfLine);
3307   // (After the LexTokenInternal call, the lexer might be destroyed.)
3308   assert((returnedToken || !isRawLex) && "Raw lex must succeed");
3309   return returnedToken;
3310 }
3311 
3312 /// LexTokenInternal - This implements a simple C family lexer.  It is an
3313 /// extremely performance critical piece of code.  This assumes that the buffer
3314 /// has a null character at the end of the file.  This returns a preprocessing
3315 /// token, not a normal token, as such, it is an internal interface.  It assumes
3316 /// that the Flags of result have been cleared before calling this.
3317 bool Lexer::LexTokenInternal(Token &Result, bool TokAtPhysicalStartOfLine) {
3318 LexNextToken:
3319   // New token, can't need cleaning yet.
3320   Result.clearFlag(Token::NeedsCleaning);
3321   Result.setIdentifierInfo(nullptr);
3322 
3323   // CurPtr - Cache BufferPtr in an automatic variable.
3324   const char *CurPtr = BufferPtr;
3325 
3326   // Small amounts of horizontal whitespace is very common between tokens.
3327   if (isHorizontalWhitespace(*CurPtr)) {
3328     do {
3329       ++CurPtr;
3330     } while (isHorizontalWhitespace(*CurPtr));
3331 
3332     // If we are keeping whitespace and other tokens, just return what we just
3333     // skipped.  The next lexer invocation will return the token after the
3334     // whitespace.
3335     if (isKeepWhitespaceMode()) {
3336       FormTokenWithChars(Result, CurPtr, tok::unknown);
3337       // FIXME: The next token will not have LeadingSpace set.
3338       return true;
3339     }
3340 
3341     BufferPtr = CurPtr;
3342     Result.setFlag(Token::LeadingSpace);
3343   }
3344 
3345   unsigned SizeTmp, SizeTmp2;   // Temporaries for use in cases below.
3346 
3347   // Read a character, advancing over it.
3348   char Char = getAndAdvanceChar(CurPtr, Result);
3349   tok::TokenKind Kind;
3350 
3351   if (!isVerticalWhitespace(Char))
3352     NewLinePtr = nullptr;
3353 
3354   switch (Char) {
3355   case 0:  // Null.
3356     // Found end of file?
3357     if (CurPtr-1 == BufferEnd)
3358       return LexEndOfFile(Result, CurPtr-1);
3359 
3360     // Check if we are performing code completion.
3361     if (isCodeCompletionPoint(CurPtr-1)) {
3362       // Return the code-completion token.
3363       Result.startToken();
3364       FormTokenWithChars(Result, CurPtr, tok::code_completion);
3365       return true;
3366     }
3367 
3368     if (!isLexingRawMode())
3369       Diag(CurPtr-1, diag::null_in_file);
3370     Result.setFlag(Token::LeadingSpace);
3371     if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
3372       return true; // KeepWhitespaceMode
3373 
3374     // We know the lexer hasn't changed, so just try again with this lexer.
3375     // (We manually eliminate the tail call to avoid recursion.)
3376     goto LexNextToken;
3377 
3378   case 26:  // DOS & CP/M EOF: "^Z".
3379     // If we're in Microsoft extensions mode, treat this as end of file.
3380     if (LangOpts.MicrosoftExt) {
3381       if (!isLexingRawMode())
3382         Diag(CurPtr-1, diag::ext_ctrl_z_eof_microsoft);
3383       return LexEndOfFile(Result, CurPtr-1);
3384     }
3385 
3386     // If Microsoft extensions are disabled, this is just random garbage.
3387     Kind = tok::unknown;
3388     break;
3389 
3390   case '\r':
3391     if (CurPtr[0] == '\n')
3392       (void)getAndAdvanceChar(CurPtr, Result);
3393     LLVM_FALLTHROUGH;
3394   case '\n':
3395     // If we are inside a preprocessor directive and we see the end of line,
3396     // we know we are done with the directive, so return an EOD token.
3397     if (ParsingPreprocessorDirective) {
3398       // Done parsing the "line".
3399       ParsingPreprocessorDirective = false;
3400 
3401       // Restore comment saving mode, in case it was disabled for directive.
3402       if (PP)
3403         resetExtendedTokenMode();
3404 
3405       // Since we consumed a newline, we are back at the start of a line.
3406       IsAtStartOfLine = true;
3407       IsAtPhysicalStartOfLine = true;
3408       NewLinePtr = CurPtr - 1;
3409 
3410       Kind = tok::eod;
3411       break;
3412     }
3413 
3414     // No leading whitespace seen so far.
3415     Result.clearFlag(Token::LeadingSpace);
3416 
3417     if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
3418       return true; // KeepWhitespaceMode
3419 
3420     // We only saw whitespace, so just try again with this lexer.
3421     // (We manually eliminate the tail call to avoid recursion.)
3422     goto LexNextToken;
3423   case ' ':
3424   case '\t':
3425   case '\f':
3426   case '\v':
3427   SkipHorizontalWhitespace:
3428     Result.setFlag(Token::LeadingSpace);
3429     if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
3430       return true; // KeepWhitespaceMode
3431 
3432   SkipIgnoredUnits:
3433     CurPtr = BufferPtr;
3434 
3435     // If the next token is obviously a // or /* */ comment, skip it efficiently
3436     // too (without going through the big switch stmt).
3437     if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() &&
3438         LangOpts.LineComment &&
3439         (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP)) {
3440       if (SkipLineComment(Result, CurPtr+2, TokAtPhysicalStartOfLine))
3441         return true; // There is a token to return.
3442       goto SkipIgnoredUnits;
3443     } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) {
3444       if (SkipBlockComment(Result, CurPtr+2, TokAtPhysicalStartOfLine))
3445         return true; // There is a token to return.
3446       goto SkipIgnoredUnits;
3447     } else if (isHorizontalWhitespace(*CurPtr)) {
3448       goto SkipHorizontalWhitespace;
3449     }
3450     // We only saw whitespace, so just try again with this lexer.
3451     // (We manually eliminate the tail call to avoid recursion.)
3452     goto LexNextToken;
3453 
3454   // C99 6.4.4.1: Integer Constants.
3455   // C99 6.4.4.2: Floating Constants.
3456   case '0': case '1': case '2': case '3': case '4':
3457   case '5': case '6': case '7': case '8': case '9':
3458     // Notify MIOpt that we read a non-whitespace/non-comment token.
3459     MIOpt.ReadToken();
3460     return LexNumericConstant(Result, CurPtr);
3461 
3462   case 'u':   // Identifier (uber) or C11/C++11 UTF-8 or UTF-16 string literal
3463     // Notify MIOpt that we read a non-whitespace/non-comment token.
3464     MIOpt.ReadToken();
3465 
3466     if (LangOpts.CPlusPlus11 || LangOpts.C11) {
3467       Char = getCharAndSize(CurPtr, SizeTmp);
3468 
3469       // UTF-16 string literal
3470       if (Char == '"')
3471         return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3472                                 tok::utf16_string_literal);
3473 
3474       // UTF-16 character constant
3475       if (Char == '\'')
3476         return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3477                                tok::utf16_char_constant);
3478 
3479       // UTF-16 raw string literal
3480       if (Char == 'R' && LangOpts.CPlusPlus11 &&
3481           getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
3482         return LexRawStringLiteral(Result,
3483                                ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3484                                            SizeTmp2, Result),
3485                                tok::utf16_string_literal);
3486 
3487       if (Char == '8') {
3488         char Char2 = getCharAndSize(CurPtr + SizeTmp, SizeTmp2);
3489 
3490         // UTF-8 string literal
3491         if (Char2 == '"')
3492           return LexStringLiteral(Result,
3493                                ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3494                                            SizeTmp2, Result),
3495                                tok::utf8_string_literal);
3496         if (Char2 == '\'' && LangOpts.CPlusPlus17)
3497           return LexCharConstant(
3498               Result, ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3499                                   SizeTmp2, Result),
3500               tok::utf8_char_constant);
3501 
3502         if (Char2 == 'R' && LangOpts.CPlusPlus11) {
3503           unsigned SizeTmp3;
3504           char Char3 = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3);
3505           // UTF-8 raw string literal
3506           if (Char3 == '"') {
3507             return LexRawStringLiteral(Result,
3508                    ConsumeChar(ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3509                                            SizeTmp2, Result),
3510                                SizeTmp3, Result),
3511                    tok::utf8_string_literal);
3512           }
3513         }
3514       }
3515     }
3516 
3517     // treat u like the start of an identifier.
3518     return LexIdentifierContinue(Result, CurPtr);
3519 
3520   case 'U':   // Identifier (Uber) or C11/C++11 UTF-32 string literal
3521     // Notify MIOpt that we read a non-whitespace/non-comment token.
3522     MIOpt.ReadToken();
3523 
3524     if (LangOpts.CPlusPlus11 || LangOpts.C11) {
3525       Char = getCharAndSize(CurPtr, SizeTmp);
3526 
3527       // UTF-32 string literal
3528       if (Char == '"')
3529         return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3530                                 tok::utf32_string_literal);
3531 
3532       // UTF-32 character constant
3533       if (Char == '\'')
3534         return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3535                                tok::utf32_char_constant);
3536 
3537       // UTF-32 raw string literal
3538       if (Char == 'R' && LangOpts.CPlusPlus11 &&
3539           getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
3540         return LexRawStringLiteral(Result,
3541                                ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3542                                            SizeTmp2, Result),
3543                                tok::utf32_string_literal);
3544     }
3545 
3546     // treat U like the start of an identifier.
3547     return LexIdentifierContinue(Result, CurPtr);
3548 
3549   case 'R': // Identifier or C++0x raw string literal
3550     // Notify MIOpt that we read a non-whitespace/non-comment token.
3551     MIOpt.ReadToken();
3552 
3553     if (LangOpts.CPlusPlus11) {
3554       Char = getCharAndSize(CurPtr, SizeTmp);
3555 
3556       if (Char == '"')
3557         return LexRawStringLiteral(Result,
3558                                    ConsumeChar(CurPtr, SizeTmp, Result),
3559                                    tok::string_literal);
3560     }
3561 
3562     // treat R like the start of an identifier.
3563     return LexIdentifierContinue(Result, CurPtr);
3564 
3565   case 'L':   // Identifier (Loony) or wide literal (L'x' or L"xyz").
3566     // Notify MIOpt that we read a non-whitespace/non-comment token.
3567     MIOpt.ReadToken();
3568     Char = getCharAndSize(CurPtr, SizeTmp);
3569 
3570     // Wide string literal.
3571     if (Char == '"')
3572       return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3573                               tok::wide_string_literal);
3574 
3575     // Wide raw string literal.
3576     if (LangOpts.CPlusPlus11 && Char == 'R' &&
3577         getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
3578       return LexRawStringLiteral(Result,
3579                                ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3580                                            SizeTmp2, Result),
3581                                tok::wide_string_literal);
3582 
3583     // Wide character constant.
3584     if (Char == '\'')
3585       return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3586                              tok::wide_char_constant);
3587     // FALL THROUGH, treating L like the start of an identifier.
3588     LLVM_FALLTHROUGH;
3589 
3590   // C99 6.4.2: Identifiers.
3591   case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G':
3592   case 'H': case 'I': case 'J': case 'K':    /*'L'*/case 'M': case 'N':
3593   case 'O': case 'P': case 'Q':    /*'R'*/case 'S': case 'T':    /*'U'*/
3594   case 'V': case 'W': case 'X': case 'Y': case 'Z':
3595   case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g':
3596   case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n':
3597   case 'o': case 'p': case 'q': case 'r': case 's': case 't':    /*'u'*/
3598   case 'v': case 'w': case 'x': case 'y': case 'z':
3599   case '_':
3600     // Notify MIOpt that we read a non-whitespace/non-comment token.
3601     MIOpt.ReadToken();
3602     return LexIdentifierContinue(Result, CurPtr);
3603 
3604   case '$':   // $ in identifiers.
3605     if (LangOpts.DollarIdents) {
3606       if (!isLexingRawMode())
3607         Diag(CurPtr-1, diag::ext_dollar_in_identifier);
3608       // Notify MIOpt that we read a non-whitespace/non-comment token.
3609       MIOpt.ReadToken();
3610       return LexIdentifierContinue(Result, CurPtr);
3611     }
3612 
3613     Kind = tok::unknown;
3614     break;
3615 
3616   // C99 6.4.4: Character Constants.
3617   case '\'':
3618     // Notify MIOpt that we read a non-whitespace/non-comment token.
3619     MIOpt.ReadToken();
3620     return LexCharConstant(Result, CurPtr, tok::char_constant);
3621 
3622   // C99 6.4.5: String Literals.
3623   case '"':
3624     // Notify MIOpt that we read a non-whitespace/non-comment token.
3625     MIOpt.ReadToken();
3626     return LexStringLiteral(Result, CurPtr,
3627                             ParsingFilename ? tok::header_name
3628                                             : tok::string_literal);
3629 
3630   // C99 6.4.6: Punctuators.
3631   case '?':
3632     Kind = tok::question;
3633     break;
3634   case '[':
3635     Kind = tok::l_square;
3636     break;
3637   case ']':
3638     Kind = tok::r_square;
3639     break;
3640   case '(':
3641     Kind = tok::l_paren;
3642     break;
3643   case ')':
3644     Kind = tok::r_paren;
3645     break;
3646   case '{':
3647     Kind = tok::l_brace;
3648     break;
3649   case '}':
3650     Kind = tok::r_brace;
3651     break;
3652   case '.':
3653     Char = getCharAndSize(CurPtr, SizeTmp);
3654     if (Char >= '0' && Char <= '9') {
3655       // Notify MIOpt that we read a non-whitespace/non-comment token.
3656       MIOpt.ReadToken();
3657 
3658       return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
3659     } else if (LangOpts.CPlusPlus && Char == '*') {
3660       Kind = tok::periodstar;
3661       CurPtr += SizeTmp;
3662     } else if (Char == '.' &&
3663                getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') {
3664       Kind = tok::ellipsis;
3665       CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3666                            SizeTmp2, Result);
3667     } else {
3668       Kind = tok::period;
3669     }
3670     break;
3671   case '&':
3672     Char = getCharAndSize(CurPtr, SizeTmp);
3673     if (Char == '&') {
3674       Kind = tok::ampamp;
3675       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3676     } else if (Char == '=') {
3677       Kind = tok::ampequal;
3678       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3679     } else {
3680       Kind = tok::amp;
3681     }
3682     break;
3683   case '*':
3684     if (getCharAndSize(CurPtr, SizeTmp) == '=') {
3685       Kind = tok::starequal;
3686       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3687     } else {
3688       Kind = tok::star;
3689     }
3690     break;
3691   case '+':
3692     Char = getCharAndSize(CurPtr, SizeTmp);
3693     if (Char == '+') {
3694       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3695       Kind = tok::plusplus;
3696     } else if (Char == '=') {
3697       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3698       Kind = tok::plusequal;
3699     } else {
3700       Kind = tok::plus;
3701     }
3702     break;
3703   case '-':
3704     Char = getCharAndSize(CurPtr, SizeTmp);
3705     if (Char == '-') {      // --
3706       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3707       Kind = tok::minusminus;
3708     } else if (Char == '>' && LangOpts.CPlusPlus &&
3709                getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') {  // C++ ->*
3710       CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3711                            SizeTmp2, Result);
3712       Kind = tok::arrowstar;
3713     } else if (Char == '>') {   // ->
3714       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3715       Kind = tok::arrow;
3716     } else if (Char == '=') {   // -=
3717       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3718       Kind = tok::minusequal;
3719     } else {
3720       Kind = tok::minus;
3721     }
3722     break;
3723   case '~':
3724     Kind = tok::tilde;
3725     break;
3726   case '!':
3727     if (getCharAndSize(CurPtr, SizeTmp) == '=') {
3728       Kind = tok::exclaimequal;
3729       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3730     } else {
3731       Kind = tok::exclaim;
3732     }
3733     break;
3734   case '/':
3735     // 6.4.9: Comments
3736     Char = getCharAndSize(CurPtr, SizeTmp);
3737     if (Char == '/') {         // Line comment.
3738       // Even if Line comments are disabled (e.g. in C89 mode), we generally
3739       // want to lex this as a comment.  There is one problem with this though,
3740       // that in one particular corner case, this can change the behavior of the
3741       // resultant program.  For example, In  "foo //**/ bar", C89 would lex
3742       // this as "foo / bar" and languages with Line comments would lex it as
3743       // "foo".  Check to see if the character after the second slash is a '*'.
3744       // If so, we will lex that as a "/" instead of the start of a comment.
3745       // However, we never do this if we are just preprocessing.
3746       bool TreatAsComment = LangOpts.LineComment &&
3747                             (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP);
3748       if (!TreatAsComment)
3749         if (!(PP && PP->isPreprocessedOutput()))
3750           TreatAsComment = getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*';
3751 
3752       if (TreatAsComment) {
3753         if (SkipLineComment(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3754                             TokAtPhysicalStartOfLine))
3755           return true; // There is a token to return.
3756 
3757         // It is common for the tokens immediately after a // comment to be
3758         // whitespace (indentation for the next line).  Instead of going through
3759         // the big switch, handle it efficiently now.
3760         goto SkipIgnoredUnits;
3761       }
3762     }
3763 
3764     if (Char == '*') {  // /**/ comment.
3765       if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3766                            TokAtPhysicalStartOfLine))
3767         return true; // There is a token to return.
3768 
3769       // We only saw whitespace, so just try again with this lexer.
3770       // (We manually eliminate the tail call to avoid recursion.)
3771       goto LexNextToken;
3772     }
3773 
3774     if (Char == '=') {
3775       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3776       Kind = tok::slashequal;
3777     } else {
3778       Kind = tok::slash;
3779     }
3780     break;
3781   case '%':
3782     Char = getCharAndSize(CurPtr, SizeTmp);
3783     if (Char == '=') {
3784       Kind = tok::percentequal;
3785       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3786     } else if (LangOpts.Digraphs && Char == '>') {
3787       Kind = tok::r_brace;                             // '%>' -> '}'
3788       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3789     } else if (LangOpts.Digraphs && Char == ':') {
3790       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3791       Char = getCharAndSize(CurPtr, SizeTmp);
3792       if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') {
3793         Kind = tok::hashhash;                          // '%:%:' -> '##'
3794         CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3795                              SizeTmp2, Result);
3796       } else if (Char == '@' && LangOpts.MicrosoftExt) {// %:@ -> #@ -> Charize
3797         CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3798         if (!isLexingRawMode())
3799           Diag(BufferPtr, diag::ext_charize_microsoft);
3800         Kind = tok::hashat;
3801       } else {                                         // '%:' -> '#'
3802         // We parsed a # character.  If this occurs at the start of the line,
3803         // it's actually the start of a preprocessing directive.  Callback to
3804         // the preprocessor to handle it.
3805         // TODO: -fpreprocessed mode??
3806         if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer)
3807           goto HandleDirective;
3808 
3809         Kind = tok::hash;
3810       }
3811     } else {
3812       Kind = tok::percent;
3813     }
3814     break;
3815   case '<':
3816     Char = getCharAndSize(CurPtr, SizeTmp);
3817     if (ParsingFilename) {
3818       return LexAngledStringLiteral(Result, CurPtr);
3819     } else if (Char == '<') {
3820       char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
3821       if (After == '=') {
3822         Kind = tok::lesslessequal;
3823         CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3824                              SizeTmp2, Result);
3825       } else if (After == '<' && IsStartOfConflictMarker(CurPtr-1)) {
3826         // If this is actually a '<<<<<<<' version control conflict marker,
3827         // recognize it as such and recover nicely.
3828         goto LexNextToken;
3829       } else if (After == '<' && HandleEndOfConflictMarker(CurPtr-1)) {
3830         // If this is '<<<<' and we're in a Perforce-style conflict marker,
3831         // ignore it.
3832         goto LexNextToken;
3833       } else if (LangOpts.CUDA && After == '<') {
3834         Kind = tok::lesslessless;
3835         CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3836                              SizeTmp2, Result);
3837       } else {
3838         CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3839         Kind = tok::lessless;
3840       }
3841     } else if (Char == '=') {
3842       char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
3843       if (After == '>') {
3844         if (getLangOpts().CPlusPlus20) {
3845           if (!isLexingRawMode())
3846             Diag(BufferPtr, diag::warn_cxx17_compat_spaceship);
3847           CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3848                                SizeTmp2, Result);
3849           Kind = tok::spaceship;
3850           break;
3851         }
3852         // Suggest adding a space between the '<=' and the '>' to avoid a
3853         // change in semantics if this turns up in C++ <=17 mode.
3854         if (getLangOpts().CPlusPlus && !isLexingRawMode()) {
3855           Diag(BufferPtr, diag::warn_cxx20_compat_spaceship)
3856             << FixItHint::CreateInsertion(
3857                    getSourceLocation(CurPtr + SizeTmp, SizeTmp2), " ");
3858         }
3859       }
3860       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3861       Kind = tok::lessequal;
3862     } else if (LangOpts.Digraphs && Char == ':') {     // '<:' -> '['
3863       if (LangOpts.CPlusPlus11 &&
3864           getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == ':') {
3865         // C++0x [lex.pptoken]p3:
3866         //  Otherwise, if the next three characters are <:: and the subsequent
3867         //  character is neither : nor >, the < is treated as a preprocessor
3868         //  token by itself and not as the first character of the alternative
3869         //  token <:.
3870         unsigned SizeTmp3;
3871         char After = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3);
3872         if (After != ':' && After != '>') {
3873           Kind = tok::less;
3874           if (!isLexingRawMode())
3875             Diag(BufferPtr, diag::warn_cxx98_compat_less_colon_colon);
3876           break;
3877         }
3878       }
3879 
3880       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3881       Kind = tok::l_square;
3882     } else if (LangOpts.Digraphs && Char == '%') {     // '<%' -> '{'
3883       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3884       Kind = tok::l_brace;
3885     } else if (Char == '#' && /*Not a trigraph*/ SizeTmp == 1 &&
3886                lexEditorPlaceholder(Result, CurPtr)) {
3887       return true;
3888     } else {
3889       Kind = tok::less;
3890     }
3891     break;
3892   case '>':
3893     Char = getCharAndSize(CurPtr, SizeTmp);
3894     if (Char == '=') {
3895       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3896       Kind = tok::greaterequal;
3897     } else if (Char == '>') {
3898       char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
3899       if (After == '=') {
3900         CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3901                              SizeTmp2, Result);
3902         Kind = tok::greatergreaterequal;
3903       } else if (After == '>' && IsStartOfConflictMarker(CurPtr-1)) {
3904         // If this is actually a '>>>>' conflict marker, recognize it as such
3905         // and recover nicely.
3906         goto LexNextToken;
3907       } else if (After == '>' && HandleEndOfConflictMarker(CurPtr-1)) {
3908         // If this is '>>>>>>>' and we're in a conflict marker, ignore it.
3909         goto LexNextToken;
3910       } else if (LangOpts.CUDA && After == '>') {
3911         Kind = tok::greatergreatergreater;
3912         CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3913                              SizeTmp2, Result);
3914       } else {
3915         CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3916         Kind = tok::greatergreater;
3917       }
3918     } else {
3919       Kind = tok::greater;
3920     }
3921     break;
3922   case '^':
3923     Char = getCharAndSize(CurPtr, SizeTmp);
3924     if (Char == '=') {
3925       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3926       Kind = tok::caretequal;
3927     } else if (LangOpts.OpenCL && Char == '^') {
3928       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3929       Kind = tok::caretcaret;
3930     } else {
3931       Kind = tok::caret;
3932     }
3933     break;
3934   case '|':
3935     Char = getCharAndSize(CurPtr, SizeTmp);
3936     if (Char == '=') {
3937       Kind = tok::pipeequal;
3938       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3939     } else if (Char == '|') {
3940       // If this is '|||||||' and we're in a conflict marker, ignore it.
3941       if (CurPtr[1] == '|' && HandleEndOfConflictMarker(CurPtr-1))
3942         goto LexNextToken;
3943       Kind = tok::pipepipe;
3944       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3945     } else {
3946       Kind = tok::pipe;
3947     }
3948     break;
3949   case ':':
3950     Char = getCharAndSize(CurPtr, SizeTmp);
3951     if (LangOpts.Digraphs && Char == '>') {
3952       Kind = tok::r_square; // ':>' -> ']'
3953       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3954     } else if ((LangOpts.CPlusPlus ||
3955                 LangOpts.DoubleSquareBracketAttributes) &&
3956                Char == ':') {
3957       Kind = tok::coloncolon;
3958       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3959     } else {
3960       Kind = tok::colon;
3961     }
3962     break;
3963   case ';':
3964     Kind = tok::semi;
3965     break;
3966   case '=':
3967     Char = getCharAndSize(CurPtr, SizeTmp);
3968     if (Char == '=') {
3969       // If this is '====' and we're in a conflict marker, ignore it.
3970       if (CurPtr[1] == '=' && HandleEndOfConflictMarker(CurPtr-1))
3971         goto LexNextToken;
3972 
3973       Kind = tok::equalequal;
3974       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3975     } else {
3976       Kind = tok::equal;
3977     }
3978     break;
3979   case ',':
3980     Kind = tok::comma;
3981     break;
3982   case '#':
3983     Char = getCharAndSize(CurPtr, SizeTmp);
3984     if (Char == '#') {
3985       Kind = tok::hashhash;
3986       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3987     } else if (Char == '@' && LangOpts.MicrosoftExt) {  // #@ -> Charize
3988       Kind = tok::hashat;
3989       if (!isLexingRawMode())
3990         Diag(BufferPtr, diag::ext_charize_microsoft);
3991       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3992     } else {
3993       // We parsed a # character.  If this occurs at the start of the line,
3994       // it's actually the start of a preprocessing directive.  Callback to
3995       // the preprocessor to handle it.
3996       // TODO: -fpreprocessed mode??
3997       if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer)
3998         goto HandleDirective;
3999 
4000       Kind = tok::hash;
4001     }
4002     break;
4003 
4004   case '@':
4005     // Objective C support.
4006     if (CurPtr[-1] == '@' && LangOpts.ObjC)
4007       Kind = tok::at;
4008     else
4009       Kind = tok::unknown;
4010     break;
4011 
4012   // UCNs (C99 6.4.3, C++11 [lex.charset]p2)
4013   case '\\':
4014     if (!LangOpts.AsmPreprocessor) {
4015       if (uint32_t CodePoint = tryReadUCN(CurPtr, BufferPtr, &Result)) {
4016         if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) {
4017           if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
4018             return true; // KeepWhitespaceMode
4019 
4020           // We only saw whitespace, so just try again with this lexer.
4021           // (We manually eliminate the tail call to avoid recursion.)
4022           goto LexNextToken;
4023         }
4024 
4025         return LexUnicodeIdentifierStart(Result, CodePoint, CurPtr);
4026       }
4027     }
4028 
4029     Kind = tok::unknown;
4030     break;
4031 
4032   default: {
4033     if (isASCII(Char)) {
4034       Kind = tok::unknown;
4035       break;
4036     }
4037 
4038     llvm::UTF32 CodePoint;
4039 
4040     // We can't just reset CurPtr to BufferPtr because BufferPtr may point to
4041     // an escaped newline.
4042     --CurPtr;
4043     llvm::ConversionResult Status =
4044         llvm::convertUTF8Sequence((const llvm::UTF8 **)&CurPtr,
4045                                   (const llvm::UTF8 *)BufferEnd,
4046                                   &CodePoint,
4047                                   llvm::strictConversion);
4048     if (Status == llvm::conversionOK) {
4049       if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) {
4050         if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
4051           return true; // KeepWhitespaceMode
4052 
4053         // We only saw whitespace, so just try again with this lexer.
4054         // (We manually eliminate the tail call to avoid recursion.)
4055         goto LexNextToken;
4056       }
4057       return LexUnicodeIdentifierStart(Result, CodePoint, CurPtr);
4058     }
4059 
4060     if (isLexingRawMode() || ParsingPreprocessorDirective ||
4061         PP->isPreprocessedOutput()) {
4062       ++CurPtr;
4063       Kind = tok::unknown;
4064       break;
4065     }
4066 
4067     // Non-ASCII characters tend to creep into source code unintentionally.
4068     // Instead of letting the parser complain about the unknown token,
4069     // just diagnose the invalid UTF-8, then drop the character.
4070     Diag(CurPtr, diag::err_invalid_utf8);
4071 
4072     BufferPtr = CurPtr+1;
4073     // We're pretending the character didn't exist, so just try again with
4074     // this lexer.
4075     // (We manually eliminate the tail call to avoid recursion.)
4076     goto LexNextToken;
4077   }
4078   }
4079 
4080   // Notify MIOpt that we read a non-whitespace/non-comment token.
4081   MIOpt.ReadToken();
4082 
4083   // Update the location of token as well as BufferPtr.
4084   FormTokenWithChars(Result, CurPtr, Kind);
4085   return true;
4086 
4087 HandleDirective:
4088   // We parsed a # character and it's the start of a preprocessing directive.
4089 
4090   FormTokenWithChars(Result, CurPtr, tok::hash);
4091   PP->HandleDirective(Result);
4092 
4093   if (PP->hadModuleLoaderFatalFailure()) {
4094     // With a fatal failure in the module loader, we abort parsing.
4095     assert(Result.is(tok::eof) && "Preprocessor did not set tok:eof");
4096     return true;
4097   }
4098 
4099   // We parsed the directive; lex a token with the new state.
4100   return false;
4101 }
4102