1 //===- DependencyDirectivesScanner.cpp ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This is the interface for scanning header and source files to get the
11 /// minimum necessary preprocessor directives for evaluating includes. It
12 /// reduces the source down to #define, #include, #import, @import, and any
13 /// conditional preprocessor logic that contains one of those.
14 ///
15 //===----------------------------------------------------------------------===//
16
17 #include "clang/Lex/DependencyDirectivesScanner.h"
18 #include "clang/Basic/CharInfo.h"
19 #include "clang/Basic/Diagnostic.h"
20 #include "clang/Lex/LexDiagnostic.h"
21 #include "clang/Lex/Lexer.h"
22 #include "clang/Lex/Pragma.h"
23 #include "llvm/ADT/ScopeExit.h"
24 #include "llvm/ADT/SmallString.h"
25 #include "llvm/ADT/StringMap.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include <optional>
28
29 using namespace clang;
30 using namespace clang::dependency_directives_scan;
31 using namespace llvm;
32
33 namespace {
34
35 struct DirectiveWithTokens {
36 DirectiveKind Kind;
37 unsigned NumTokens;
38
DirectiveWithTokens__anon1b39d32d0111::DirectiveWithTokens39 DirectiveWithTokens(DirectiveKind Kind, unsigned NumTokens)
40 : Kind(Kind), NumTokens(NumTokens) {}
41 };
42
43 /// Does an efficient "scan" of the sources to detect the presence of
44 /// preprocessor (or module import) directives and collects the raw lexed tokens
45 /// for those directives so that the \p Lexer can "replay" them when the file is
46 /// included.
47 ///
48 /// Note that the behavior of the raw lexer is affected by the language mode,
49 /// while at this point we want to do a scan and collect tokens once,
50 /// irrespective of the language mode that the file will get included in. To
51 /// compensate for that the \p Lexer, while "replaying", will adjust a token
52 /// where appropriate, when it could affect the preprocessor's state.
53 /// For example in a directive like
54 ///
55 /// \code
56 /// #if __has_cpp_attribute(clang::fallthrough)
57 /// \endcode
58 ///
59 /// The preprocessor needs to see '::' as 'tok::coloncolon' instead of 2
60 /// 'tok::colon'. The \p Lexer will adjust if it sees consecutive 'tok::colon'
61 /// while in C++ mode.
62 struct Scanner {
Scanner__anon1b39d32d0111::Scanner63 Scanner(StringRef Input,
64 SmallVectorImpl<dependency_directives_scan::Token> &Tokens,
65 DiagnosticsEngine *Diags, SourceLocation InputSourceLoc)
66 : Input(Input), Tokens(Tokens), Diags(Diags),
67 InputSourceLoc(InputSourceLoc), LangOpts(getLangOptsForDepScanning()),
68 TheLexer(InputSourceLoc, LangOpts, Input.begin(), Input.begin(),
69 Input.end()) {}
70
getLangOptsForDepScanning__anon1b39d32d0111::Scanner71 static LangOptions getLangOptsForDepScanning() {
72 LangOptions LangOpts;
73 // Set the lexer to use 'tok::at' for '@', instead of 'tok::unknown'.
74 LangOpts.ObjC = true;
75 LangOpts.LineComment = true;
76 LangOpts.RawStringLiterals = true;
77 // FIXME: we do not enable C11 or C++11, so we are missing u/u8/U"".
78 return LangOpts;
79 }
80
81 /// Lex the provided source and emit the directive tokens.
82 ///
83 /// \returns True on error.
84 bool scan(SmallVectorImpl<Directive> &Directives);
85
86 private:
87 /// Lexes next token and advances \p First and the \p Lexer.
88 [[nodiscard]] dependency_directives_scan::Token &
89 lexToken(const char *&First, const char *const End);
90
91 [[nodiscard]] dependency_directives_scan::Token &
92 lexIncludeFilename(const char *&First, const char *const End);
93
94 void skipLine(const char *&First, const char *const End);
95 void skipDirective(StringRef Name, const char *&First, const char *const End);
96
97 /// Returns the spelling of a string literal or identifier after performing
98 /// any processing needed to handle \c clang::Token::NeedsCleaning.
99 StringRef cleanStringIfNeeded(const dependency_directives_scan::Token &Tok);
100
101 /// Lexes next token and if it is identifier returns its string, otherwise
102 /// it skips the current line and returns \p std::nullopt.
103 ///
104 /// In any case (whatever the token kind) \p First and the \p Lexer will
105 /// advance beyond the token.
106 [[nodiscard]] std::optional<StringRef>
107 tryLexIdentifierOrSkipLine(const char *&First, const char *const End);
108
109 /// Used when it is certain that next token is an identifier.
110 [[nodiscard]] StringRef lexIdentifier(const char *&First,
111 const char *const End);
112
113 /// Lexes next token and returns true iff it is an identifier that matches \p
114 /// Id, otherwise it skips the current line and returns false.
115 ///
116 /// In any case (whatever the token kind) \p First and the \p Lexer will
117 /// advance beyond the token.
118 [[nodiscard]] bool isNextIdentifierOrSkipLine(StringRef Id,
119 const char *&First,
120 const char *const End);
121
122 /// Lexes next token and returns true iff it matches the kind \p K.
123 /// Otherwise it skips the current line and returns false.
124 ///
125 /// In any case (whatever the token kind) \p First and the \p Lexer will
126 /// advance beyond the token.
127 [[nodiscard]] bool isNextTokenOrSkipLine(tok::TokenKind K, const char *&First,
128 const char *const End);
129
130 /// Lexes next token and if it is string literal, returns its string.
131 /// Otherwise, it skips the current line and returns \p std::nullopt.
132 ///
133 /// In any case (whatever the token kind) \p First and the \p Lexer will
134 /// advance beyond the token.
135 [[nodiscard]] std::optional<StringRef>
136 tryLexStringLiteralOrSkipLine(const char *&First, const char *const End);
137
138 [[nodiscard]] bool scanImpl(const char *First, const char *const End);
139 [[nodiscard]] bool lexPPLine(const char *&First, const char *const End);
140 [[nodiscard]] bool lexAt(const char *&First, const char *const End);
141 [[nodiscard]] bool lexModule(const char *&First, const char *const End);
142 [[nodiscard]] bool lexDefine(const char *HashLoc, const char *&First,
143 const char *const End);
144 [[nodiscard]] bool lexPragma(const char *&First, const char *const End);
145 [[nodiscard]] bool lex_Pragma(const char *&First, const char *const End);
146 [[nodiscard]] bool lexEndif(const char *&First, const char *const End);
147 [[nodiscard]] bool lexDefault(DirectiveKind Kind, const char *&First,
148 const char *const End);
149 [[nodiscard]] bool lexModuleDirectiveBody(DirectiveKind Kind,
150 const char *&First,
151 const char *const End);
152 void lexPPDirectiveBody(const char *&First, const char *const End);
153
pushDirective__anon1b39d32d0111::Scanner154 DirectiveWithTokens &pushDirective(DirectiveKind Kind) {
155 Tokens.append(CurDirToks);
156 DirsWithToks.emplace_back(Kind, CurDirToks.size());
157 CurDirToks.clear();
158 return DirsWithToks.back();
159 }
popDirective__anon1b39d32d0111::Scanner160 void popDirective() {
161 Tokens.pop_back_n(DirsWithToks.pop_back_val().NumTokens);
162 }
topDirective__anon1b39d32d0111::Scanner163 DirectiveKind topDirective() const {
164 return DirsWithToks.empty() ? pp_none : DirsWithToks.back().Kind;
165 }
166
getOffsetAt__anon1b39d32d0111::Scanner167 unsigned getOffsetAt(const char *CurPtr) const {
168 return CurPtr - Input.data();
169 }
170
171 /// Reports a diagnostic if the diagnostic engine is provided. Always returns
172 /// true at the end.
173 bool reportError(const char *CurPtr, unsigned Err);
174
175 StringMap<char> SplitIds;
176 StringRef Input;
177 SmallVectorImpl<dependency_directives_scan::Token> &Tokens;
178 DiagnosticsEngine *Diags;
179 SourceLocation InputSourceLoc;
180
181 const char *LastTokenPtr = nullptr;
182 /// Keeps track of the tokens for the currently lexed directive. Once a
183 /// directive is fully lexed and "committed" then the tokens get appended to
184 /// \p Tokens and \p CurDirToks is cleared for the next directive.
185 SmallVector<dependency_directives_scan::Token, 32> CurDirToks;
186 /// The directives that were lexed along with the number of tokens that each
187 /// directive contains. The tokens of all the directives are kept in \p Tokens
188 /// vector, in the same order as the directives order in \p DirsWithToks.
189 SmallVector<DirectiveWithTokens, 64> DirsWithToks;
190 LangOptions LangOpts;
191 Lexer TheLexer;
192 };
193
194 } // end anonymous namespace
195
reportError(const char * CurPtr,unsigned Err)196 bool Scanner::reportError(const char *CurPtr, unsigned Err) {
197 if (!Diags)
198 return true;
199 assert(CurPtr >= Input.data() && "invalid buffer ptr");
200 Diags->Report(InputSourceLoc.getLocWithOffset(getOffsetAt(CurPtr)), Err);
201 return true;
202 }
203
skipOverSpaces(const char * & First,const char * const End)204 static void skipOverSpaces(const char *&First, const char *const End) {
205 while (First != End && isHorizontalWhitespace(*First))
206 ++First;
207 }
208
209 // Move back by one character, skipping escaped newlines (backslash + \n)
previousChar(const char * First,const char * & Current)210 static char previousChar(const char *First, const char *&Current) {
211 assert(Current > First);
212 --Current;
213 while (Current > First && isVerticalWhitespace(*Current)) {
214 // Check if the previous character is a backslash
215 if (Current > First && *(Current - 1) == '\\') {
216 // Use Lexer's getEscapedNewLineSize to get the size of the escaped
217 // newline
218 unsigned EscapeSize = Lexer::getEscapedNewLineSize(Current);
219 if (EscapeSize > 0) {
220 // Skip back over the entire escaped newline sequence (backslash +
221 // newline)
222 Current -= (1 + EscapeSize);
223 } else {
224 break;
225 }
226 } else {
227 break;
228 }
229 }
230 return *Current;
231 }
232
isRawStringLiteral(const char * First,const char * Current)233 [[nodiscard]] static bool isRawStringLiteral(const char *First,
234 const char *Current) {
235 assert(First <= Current);
236
237 // Check if we can even back up.
238 if (*Current != '"' || First == Current)
239 return false;
240
241 // Check for an "R".
242 if (previousChar(First, Current) != 'R')
243 return false;
244 if (First == Current ||
245 !isAsciiIdentifierContinue(previousChar(First, Current)))
246 return true;
247
248 // Check for a prefix of "u", "U", or "L".
249 if (*Current == 'u' || *Current == 'U' || *Current == 'L')
250 return First == Current ||
251 !isAsciiIdentifierContinue(previousChar(First, Current));
252
253 // Check for a prefix of "u8".
254 if (*Current != '8' || First == Current ||
255 previousChar(First, Current) != 'u')
256 return false;
257 return First == Current ||
258 !isAsciiIdentifierContinue(previousChar(First, Current));
259 }
260
skipRawString(const char * & First,const char * const End)261 static void skipRawString(const char *&First, const char *const End) {
262 assert(First[0] == '"');
263
264 const char *Last = ++First;
265 while (Last != End && *Last != '(')
266 ++Last;
267 if (Last == End) {
268 First = Last; // Hit the end... just give up.
269 return;
270 }
271
272 StringRef Terminator(First, Last - First);
273 for (;;) {
274 // Move First to just past the next ")".
275 First = Last;
276 while (First != End && *First != ')')
277 ++First;
278 if (First == End)
279 return;
280 ++First;
281
282 // Look ahead for the terminator sequence.
283 Last = First;
284 while (Last != End && size_t(Last - First) < Terminator.size() &&
285 Terminator[Last - First] == *Last)
286 ++Last;
287
288 // Check if we hit it (or the end of the file).
289 if (Last == End) {
290 First = Last;
291 return;
292 }
293 if (size_t(Last - First) < Terminator.size())
294 continue;
295 if (*Last != '"')
296 continue;
297 First = Last + 1;
298 return;
299 }
300 }
301
302 // Returns the length of EOL, either 0 (no end-of-line), 1 (\n) or 2 (\r\n)
isEOL(const char * First,const char * const End)303 static unsigned isEOL(const char *First, const char *const End) {
304 if (First == End)
305 return 0;
306 if (End - First > 1 && isVerticalWhitespace(First[0]) &&
307 isVerticalWhitespace(First[1]) && First[0] != First[1])
308 return 2;
309 return !!isVerticalWhitespace(First[0]);
310 }
311
skipString(const char * & First,const char * const End)312 static void skipString(const char *&First, const char *const End) {
313 assert(*First == '\'' || *First == '"' || *First == '<');
314 const char Terminator = *First == '<' ? '>' : *First;
315 for (++First; First != End && *First != Terminator; ++First) {
316 // String and character literals don't extend past the end of the line.
317 if (isVerticalWhitespace(*First))
318 return;
319 if (*First != '\\')
320 continue;
321 // Skip past backslash to the next character. This ensures that the
322 // character right after it is skipped as well, which matters if it's
323 // the terminator.
324 if (++First == End)
325 return;
326 if (!isWhitespace(*First))
327 continue;
328 // Whitespace after the backslash might indicate a line continuation.
329 const char *FirstAfterBackslashPastSpace = First;
330 skipOverSpaces(FirstAfterBackslashPastSpace, End);
331 if (unsigned NLSize = isEOL(FirstAfterBackslashPastSpace, End)) {
332 // Advance the character pointer to the next line for the next
333 // iteration.
334 First = FirstAfterBackslashPastSpace + NLSize - 1;
335 }
336 }
337 if (First != End)
338 ++First; // Finish off the string.
339 }
340
341 // Returns the length of the skipped newline
skipNewline(const char * & First,const char * End)342 static unsigned skipNewline(const char *&First, const char *End) {
343 if (First == End)
344 return 0;
345 assert(isVerticalWhitespace(*First));
346 unsigned Len = isEOL(First, End);
347 assert(Len && "expected newline");
348 First += Len;
349 return Len;
350 }
351
skipToNewlineRaw(const char * & First,const char * const End)352 static void skipToNewlineRaw(const char *&First, const char *const End) {
353 for (;;) {
354 if (First == End)
355 return;
356
357 unsigned Len = isEOL(First, End);
358 if (Len)
359 return;
360
361 char LastNonWhitespace = ' ';
362 do {
363 if (!isHorizontalWhitespace(*First))
364 LastNonWhitespace = *First;
365 if (++First == End)
366 return;
367 Len = isEOL(First, End);
368 } while (!Len);
369
370 if (LastNonWhitespace != '\\')
371 return;
372
373 First += Len;
374 // Keep skipping lines...
375 }
376 }
377
skipLineComment(const char * & First,const char * const End)378 static void skipLineComment(const char *&First, const char *const End) {
379 assert(First[0] == '/' && First[1] == '/');
380 First += 2;
381 skipToNewlineRaw(First, End);
382 }
383
skipBlockComment(const char * & First,const char * const End)384 static void skipBlockComment(const char *&First, const char *const End) {
385 assert(First[0] == '/' && First[1] == '*');
386 if (End - First < 4) {
387 First = End;
388 return;
389 }
390 for (First += 3; First != End; ++First)
391 if (First[-1] == '*' && First[0] == '/') {
392 ++First;
393 return;
394 }
395 }
396
397 /// \returns True if the current single quotation mark character is a C++14
398 /// digit separator.
isQuoteCppDigitSeparator(const char * const Start,const char * const Cur,const char * const End)399 static bool isQuoteCppDigitSeparator(const char *const Start,
400 const char *const Cur,
401 const char *const End) {
402 assert(*Cur == '\'' && "expected quotation character");
403 // skipLine called in places where we don't expect a valid number
404 // body before `start` on the same line, so always return false at the start.
405 if (Start == Cur)
406 return false;
407 // The previous character must be a valid PP number character.
408 // Make sure that the L, u, U, u8 prefixes don't get marked as a
409 // separator though.
410 char Prev = *(Cur - 1);
411 if (Prev == 'L' || Prev == 'U' || Prev == 'u')
412 return false;
413 if (Prev == '8' && (Cur - 1 != Start) && *(Cur - 2) == 'u')
414 return false;
415 if (!isPreprocessingNumberBody(Prev))
416 return false;
417 // The next character should be a valid identifier body character.
418 return (Cur + 1) < End && isAsciiIdentifierContinue(*(Cur + 1));
419 }
420
skipLine(const char * & First,const char * const End)421 void Scanner::skipLine(const char *&First, const char *const End) {
422 for (;;) {
423 assert(First <= End);
424 if (First == End)
425 return;
426
427 if (isVerticalWhitespace(*First)) {
428 skipNewline(First, End);
429 return;
430 }
431 const char *Start = First;
432 // Use `LastNonWhitespace`to track if a line-continuation has ever been seen
433 // before a new-line character:
434 char LastNonWhitespace = ' ';
435 while (First != End && !isVerticalWhitespace(*First)) {
436 // Iterate over strings correctly to avoid comments and newlines.
437 if (*First == '"' ||
438 (*First == '\'' && !isQuoteCppDigitSeparator(Start, First, End))) {
439 LastTokenPtr = First;
440 if (isRawStringLiteral(Start, First))
441 skipRawString(First, End);
442 else
443 skipString(First, End);
444 continue;
445 }
446
447 // Continue on the same line if an EOL is preceded with backslash
448 if (First + 1 < End && *First == '\\') {
449 if (unsigned Len = isEOL(First + 1, End)) {
450 First += 1 + Len;
451 continue;
452 }
453 }
454
455 // Iterate over comments correctly.
456 if (*First != '/' || End - First < 2) {
457 LastTokenPtr = First;
458 if (!isWhitespace(*First))
459 LastNonWhitespace = *First;
460 ++First;
461 continue;
462 }
463
464 if (First[1] == '/') {
465 // "//...".
466 skipLineComment(First, End);
467 continue;
468 }
469
470 if (First[1] != '*') {
471 LastTokenPtr = First;
472 if (!isWhitespace(*First))
473 LastNonWhitespace = *First;
474 ++First;
475 continue;
476 }
477
478 // "/*...*/".
479 skipBlockComment(First, End);
480 }
481 if (First == End)
482 return;
483
484 // Skip over the newline.
485 skipNewline(First, End);
486
487 if (LastNonWhitespace != '\\')
488 break;
489 }
490 }
491
skipDirective(StringRef Name,const char * & First,const char * const End)492 void Scanner::skipDirective(StringRef Name, const char *&First,
493 const char *const End) {
494 if (llvm::StringSwitch<bool>(Name)
495 .Case("warning", true)
496 .Case("error", true)
497 .Default(false))
498 // Do not process quotes or comments.
499 skipToNewlineRaw(First, End);
500 else
501 skipLine(First, End);
502 }
503
skipWhitespace(const char * & First,const char * const End)504 static void skipWhitespace(const char *&First, const char *const End) {
505 for (;;) {
506 assert(First <= End);
507 skipOverSpaces(First, End);
508
509 if (End - First < 2)
510 return;
511
512 if (*First == '\\') {
513 const char *Ptr = First + 1;
514 while (Ptr < End && isHorizontalWhitespace(*Ptr))
515 ++Ptr;
516 if (Ptr != End && isVerticalWhitespace(*Ptr)) {
517 skipNewline(Ptr, End);
518 First = Ptr;
519 continue;
520 }
521 return;
522 }
523
524 // Check for a non-comment character.
525 if (First[0] != '/')
526 return;
527
528 // "// ...".
529 if (First[1] == '/') {
530 skipLineComment(First, End);
531 return;
532 }
533
534 // Cannot be a comment.
535 if (First[1] != '*')
536 return;
537
538 // "/*...*/".
539 skipBlockComment(First, End);
540 }
541 }
542
lexModuleDirectiveBody(DirectiveKind Kind,const char * & First,const char * const End)543 bool Scanner::lexModuleDirectiveBody(DirectiveKind Kind, const char *&First,
544 const char *const End) {
545 const char *DirectiveLoc = Input.data() + CurDirToks.front().Offset;
546 for (;;) {
547 // Keep a copy of the First char incase it needs to be reset.
548 const char *Previous = First;
549 const dependency_directives_scan::Token &Tok = lexToken(First, End);
550 if ((Tok.is(tok::hash) || Tok.is(tok::at)) &&
551 (Tok.Flags & clang::Token::StartOfLine)) {
552 CurDirToks.pop_back();
553 First = Previous;
554 return false;
555 }
556 if (Tok.is(tok::eof))
557 return reportError(
558 DirectiveLoc,
559 diag::err_dep_source_scanner_missing_semi_after_at_import);
560 if (Tok.is(tok::semi))
561 break;
562 }
563 pushDirective(Kind);
564 skipWhitespace(First, End);
565 if (First == End)
566 return false;
567 if (!isVerticalWhitespace(*First))
568 return reportError(
569 DirectiveLoc, diag::err_dep_source_scanner_unexpected_tokens_at_import);
570 skipNewline(First, End);
571 return false;
572 }
573
lexToken(const char * & First,const char * const End)574 dependency_directives_scan::Token &Scanner::lexToken(const char *&First,
575 const char *const End) {
576 clang::Token Tok;
577 TheLexer.LexFromRawLexer(Tok);
578 First = Input.data() + TheLexer.getCurrentBufferOffset();
579 assert(First <= End);
580
581 unsigned Offset = TheLexer.getCurrentBufferOffset() - Tok.getLength();
582 CurDirToks.emplace_back(Offset, Tok.getLength(), Tok.getKind(),
583 Tok.getFlags());
584 return CurDirToks.back();
585 }
586
587 dependency_directives_scan::Token &
lexIncludeFilename(const char * & First,const char * const End)588 Scanner::lexIncludeFilename(const char *&First, const char *const End) {
589 clang::Token Tok;
590 TheLexer.LexIncludeFilename(Tok);
591 First = Input.data() + TheLexer.getCurrentBufferOffset();
592 assert(First <= End);
593
594 unsigned Offset = TheLexer.getCurrentBufferOffset() - Tok.getLength();
595 CurDirToks.emplace_back(Offset, Tok.getLength(), Tok.getKind(),
596 Tok.getFlags());
597 return CurDirToks.back();
598 }
599
lexPPDirectiveBody(const char * & First,const char * const End)600 void Scanner::lexPPDirectiveBody(const char *&First, const char *const End) {
601 while (true) {
602 const dependency_directives_scan::Token &Tok = lexToken(First, End);
603 if (Tok.is(tok::eod) || Tok.is(tok::eof))
604 break;
605 }
606 }
607
608 StringRef
cleanStringIfNeeded(const dependency_directives_scan::Token & Tok)609 Scanner::cleanStringIfNeeded(const dependency_directives_scan::Token &Tok) {
610 bool NeedsCleaning = Tok.Flags & clang::Token::NeedsCleaning;
611 if (LLVM_LIKELY(!NeedsCleaning))
612 return Input.slice(Tok.Offset, Tok.getEnd());
613
614 SmallString<64> Spelling;
615 Spelling.resize(Tok.Length);
616
617 // FIXME: C++11 raw string literals need special handling (see getSpellingSlow
618 // in the Lexer). Currently we cannot see them due to our LangOpts.
619
620 unsigned SpellingLength = 0;
621 const char *BufPtr = Input.begin() + Tok.Offset;
622 const char *AfterIdent = Input.begin() + Tok.getEnd();
623 while (BufPtr < AfterIdent) {
624 auto [Char, Size] = Lexer::getCharAndSizeNoWarn(BufPtr, LangOpts);
625 Spelling[SpellingLength++] = Char;
626 BufPtr += Size;
627 }
628
629 return SplitIds.try_emplace(StringRef(Spelling.begin(), SpellingLength), 0)
630 .first->first();
631 }
632
633 std::optional<StringRef>
tryLexIdentifierOrSkipLine(const char * & First,const char * const End)634 Scanner::tryLexIdentifierOrSkipLine(const char *&First, const char *const End) {
635 const dependency_directives_scan::Token &Tok = lexToken(First, End);
636 if (Tok.isNot(tok::raw_identifier)) {
637 if (!Tok.is(tok::eod))
638 skipLine(First, End);
639 return std::nullopt;
640 }
641
642 return cleanStringIfNeeded(Tok);
643 }
644
lexIdentifier(const char * & First,const char * const End)645 StringRef Scanner::lexIdentifier(const char *&First, const char *const End) {
646 std::optional<StringRef> Id = tryLexIdentifierOrSkipLine(First, End);
647 assert(Id && "expected identifier token");
648 return *Id;
649 }
650
isNextIdentifierOrSkipLine(StringRef Id,const char * & First,const char * const End)651 bool Scanner::isNextIdentifierOrSkipLine(StringRef Id, const char *&First,
652 const char *const End) {
653 if (std::optional<StringRef> FoundId =
654 tryLexIdentifierOrSkipLine(First, End)) {
655 if (*FoundId == Id)
656 return true;
657 skipLine(First, End);
658 }
659 return false;
660 }
661
isNextTokenOrSkipLine(tok::TokenKind K,const char * & First,const char * const End)662 bool Scanner::isNextTokenOrSkipLine(tok::TokenKind K, const char *&First,
663 const char *const End) {
664 const dependency_directives_scan::Token &Tok = lexToken(First, End);
665 if (Tok.is(K))
666 return true;
667 skipLine(First, End);
668 return false;
669 }
670
671 std::optional<StringRef>
tryLexStringLiteralOrSkipLine(const char * & First,const char * const End)672 Scanner::tryLexStringLiteralOrSkipLine(const char *&First,
673 const char *const End) {
674 const dependency_directives_scan::Token &Tok = lexToken(First, End);
675 if (!tok::isStringLiteral(Tok.Kind)) {
676 if (!Tok.is(tok::eod))
677 skipLine(First, End);
678 return std::nullopt;
679 }
680
681 return cleanStringIfNeeded(Tok);
682 }
683
lexAt(const char * & First,const char * const End)684 bool Scanner::lexAt(const char *&First, const char *const End) {
685 // Handle "@import".
686
687 // Lex '@'.
688 const dependency_directives_scan::Token &AtTok = lexToken(First, End);
689 assert(AtTok.is(tok::at));
690 (void)AtTok;
691
692 if (!isNextIdentifierOrSkipLine("import", First, End))
693 return false;
694 return lexModuleDirectiveBody(decl_at_import, First, End);
695 }
696
lexModule(const char * & First,const char * const End)697 bool Scanner::lexModule(const char *&First, const char *const End) {
698 StringRef Id = lexIdentifier(First, End);
699 bool Export = false;
700 if (Id == "export") {
701 Export = true;
702 std::optional<StringRef> NextId = tryLexIdentifierOrSkipLine(First, End);
703 if (!NextId)
704 return false;
705 Id = *NextId;
706 }
707
708 if (Id != "module" && Id != "import") {
709 skipLine(First, End);
710 return false;
711 }
712
713 skipWhitespace(First, End);
714
715 // Ignore this as a module directive if the next character can't be part of
716 // an import.
717
718 switch (*First) {
719 case ':': {
720 // `module :` is never the start of a valid module declaration.
721 if (Id == "module") {
722 skipLine(First, End);
723 return false;
724 }
725 // A module partition starts with exactly one ':'. If we have '::', this is
726 // a scope resolution instead and shouldn't be recognized as a directive
727 // per P1857R3.
728 if (First + 1 != End && First[1] == ':') {
729 skipLine(First, End);
730 return false;
731 }
732 // `import:(type)name` is a valid ObjC method decl, so check one more token.
733 (void)lexToken(First, End);
734 if (!tryLexIdentifierOrSkipLine(First, End))
735 return false;
736 break;
737 }
738 case '<':
739 case '"':
740 break;
741 default:
742 if (!isAsciiIdentifierContinue(*First)) {
743 skipLine(First, End);
744 return false;
745 }
746 }
747
748 TheLexer.seek(getOffsetAt(First), /*IsAtStartOfLine*/ false);
749
750 DirectiveKind Kind;
751 if (Id == "module")
752 Kind = Export ? cxx_export_module_decl : cxx_module_decl;
753 else
754 Kind = Export ? cxx_export_import_decl : cxx_import_decl;
755
756 return lexModuleDirectiveBody(Kind, First, End);
757 }
758
lex_Pragma(const char * & First,const char * const End)759 bool Scanner::lex_Pragma(const char *&First, const char *const End) {
760 if (!isNextTokenOrSkipLine(tok::l_paren, First, End))
761 return false;
762
763 std::optional<StringRef> Str = tryLexStringLiteralOrSkipLine(First, End);
764
765 if (!Str || !isNextTokenOrSkipLine(tok::r_paren, First, End))
766 return false;
767
768 SmallString<64> Buffer(*Str);
769 prepare_PragmaString(Buffer);
770
771 // Use a new scanner instance since the tokens will be inside the allocated
772 // string. We should already have captured all the relevant tokens in the
773 // current scanner.
774 SmallVector<dependency_directives_scan::Token> DiscardTokens;
775 const char *Begin = Buffer.c_str();
776 Scanner PragmaScanner{StringRef(Begin, Buffer.size()), DiscardTokens, Diags,
777 InputSourceLoc};
778
779 PragmaScanner.TheLexer.setParsingPreprocessorDirective(true);
780 if (PragmaScanner.lexPragma(Begin, Buffer.end()))
781 return true;
782
783 DirectiveKind K = PragmaScanner.topDirective();
784 if (K == pp_none) {
785 skipLine(First, End);
786 return false;
787 }
788
789 assert(Begin == Buffer.end());
790 pushDirective(K);
791 return false;
792 }
793
lexPragma(const char * & First,const char * const End)794 bool Scanner::lexPragma(const char *&First, const char *const End) {
795 std::optional<StringRef> FoundId = tryLexIdentifierOrSkipLine(First, End);
796 if (!FoundId)
797 return false;
798
799 StringRef Id = *FoundId;
800 auto Kind = llvm::StringSwitch<DirectiveKind>(Id)
801 .Case("once", pp_pragma_once)
802 .Case("push_macro", pp_pragma_push_macro)
803 .Case("pop_macro", pp_pragma_pop_macro)
804 .Case("include_alias", pp_pragma_include_alias)
805 .Default(pp_none);
806 if (Kind != pp_none) {
807 lexPPDirectiveBody(First, End);
808 pushDirective(Kind);
809 return false;
810 }
811
812 if (Id != "clang") {
813 skipLine(First, End);
814 return false;
815 }
816
817 FoundId = tryLexIdentifierOrSkipLine(First, End);
818 if (!FoundId)
819 return false;
820 Id = *FoundId;
821
822 // #pragma clang system_header
823 if (Id == "system_header") {
824 lexPPDirectiveBody(First, End);
825 pushDirective(pp_pragma_system_header);
826 return false;
827 }
828
829 if (Id != "module") {
830 skipLine(First, End);
831 return false;
832 }
833
834 // #pragma clang module.
835 if (!isNextIdentifierOrSkipLine("import", First, End))
836 return false;
837
838 // #pragma clang module import.
839 lexPPDirectiveBody(First, End);
840 pushDirective(pp_pragma_import);
841 return false;
842 }
843
lexEndif(const char * & First,const char * const End)844 bool Scanner::lexEndif(const char *&First, const char *const End) {
845 // Strip out "#else" if it's empty.
846 if (topDirective() == pp_else)
847 popDirective();
848
849 // If "#ifdef" is empty, strip it and skip the "#endif".
850 //
851 // FIXME: Once/if Clang starts disallowing __has_include in macro expansions,
852 // we can skip empty `#if` and `#elif` blocks as well after scanning for a
853 // literal __has_include in the condition. Even without that rule we could
854 // drop the tokens if we scan for identifiers in the condition and find none.
855 if (topDirective() == pp_ifdef || topDirective() == pp_ifndef) {
856 popDirective();
857 skipLine(First, End);
858 return false;
859 }
860
861 return lexDefault(pp_endif, First, End);
862 }
863
lexDefault(DirectiveKind Kind,const char * & First,const char * const End)864 bool Scanner::lexDefault(DirectiveKind Kind, const char *&First,
865 const char *const End) {
866 lexPPDirectiveBody(First, End);
867 pushDirective(Kind);
868 return false;
869 }
870
isStartOfRelevantLine(char First)871 static bool isStartOfRelevantLine(char First) {
872 switch (First) {
873 case '#':
874 case '@':
875 case 'i':
876 case 'e':
877 case 'm':
878 case '_':
879 return true;
880 }
881 return false;
882 }
883
lexPPLine(const char * & First,const char * const End)884 bool Scanner::lexPPLine(const char *&First, const char *const End) {
885 assert(First != End);
886
887 skipWhitespace(First, End);
888 assert(First <= End);
889 if (First == End)
890 return false;
891
892 if (!isStartOfRelevantLine(*First)) {
893 skipLine(First, End);
894 assert(First <= End);
895 return false;
896 }
897
898 LastTokenPtr = First;
899
900 TheLexer.seek(getOffsetAt(First), /*IsAtStartOfLine*/ true);
901
902 auto ScEx1 = make_scope_exit([&]() {
903 /// Clear Scanner's CurDirToks before returning, in case we didn't push a
904 /// new directive.
905 CurDirToks.clear();
906 });
907
908 // Handle "@import".
909 if (*First == '@')
910 return lexAt(First, End);
911
912 // Handle module directives for C++20 modules.
913 if (*First == 'i' || *First == 'e' || *First == 'm')
914 return lexModule(First, End);
915
916 if (*First == '_') {
917 if (isNextIdentifierOrSkipLine("_Pragma", First, End))
918 return lex_Pragma(First, End);
919 return false;
920 }
921
922 // Handle preprocessing directives.
923
924 TheLexer.setParsingPreprocessorDirective(true);
925 auto ScEx2 = make_scope_exit(
926 [&]() { TheLexer.setParsingPreprocessorDirective(false); });
927
928 // Lex '#'.
929 const dependency_directives_scan::Token &HashTok = lexToken(First, End);
930 if (HashTok.is(tok::hashhash)) {
931 // A \p tok::hashhash at this location is passed by the preprocessor to the
932 // parser to interpret, like any other token. So for dependency scanning
933 // skip it like a normal token not affecting the preprocessor.
934 skipLine(First, End);
935 assert(First <= End);
936 return false;
937 }
938 assert(HashTok.is(tok::hash));
939 (void)HashTok;
940
941 std::optional<StringRef> FoundId = tryLexIdentifierOrSkipLine(First, End);
942 if (!FoundId)
943 return false;
944
945 StringRef Id = *FoundId;
946
947 if (Id == "pragma")
948 return lexPragma(First, End);
949
950 auto Kind = llvm::StringSwitch<DirectiveKind>(Id)
951 .Case("include", pp_include)
952 .Case("__include_macros", pp___include_macros)
953 .Case("define", pp_define)
954 .Case("undef", pp_undef)
955 .Case("import", pp_import)
956 .Case("include_next", pp_include_next)
957 .Case("if", pp_if)
958 .Case("ifdef", pp_ifdef)
959 .Case("ifndef", pp_ifndef)
960 .Case("elif", pp_elif)
961 .Case("elifdef", pp_elifdef)
962 .Case("elifndef", pp_elifndef)
963 .Case("else", pp_else)
964 .Case("endif", pp_endif)
965 .Default(pp_none);
966 if (Kind == pp_none) {
967 skipDirective(Id, First, End);
968 return false;
969 }
970
971 if (Kind == pp_endif)
972 return lexEndif(First, End);
973
974 switch (Kind) {
975 case pp_include:
976 case pp___include_macros:
977 case pp_include_next:
978 case pp_import:
979 // Ignore missing filenames in include or import directives.
980 if (lexIncludeFilename(First, End).is(tok::eod)) {
981 return false;
982 }
983 break;
984 default:
985 break;
986 }
987
988 // Everything else.
989 return lexDefault(Kind, First, End);
990 }
991
skipUTF8ByteOrderMark(const char * & First,const char * const End)992 static void skipUTF8ByteOrderMark(const char *&First, const char *const End) {
993 if ((End - First) >= 3 && First[0] == '\xef' && First[1] == '\xbb' &&
994 First[2] == '\xbf')
995 First += 3;
996 }
997
scanImpl(const char * First,const char * const End)998 bool Scanner::scanImpl(const char *First, const char *const End) {
999 skipUTF8ByteOrderMark(First, End);
1000 while (First != End)
1001 if (lexPPLine(First, End))
1002 return true;
1003 return false;
1004 }
1005
scan(SmallVectorImpl<Directive> & Directives)1006 bool Scanner::scan(SmallVectorImpl<Directive> &Directives) {
1007 bool Error = scanImpl(Input.begin(), Input.end());
1008
1009 if (!Error) {
1010 // Add an EOF on success.
1011 if (LastTokenPtr &&
1012 (Tokens.empty() || LastTokenPtr > Input.begin() + Tokens.back().Offset))
1013 pushDirective(tokens_present_before_eof);
1014 pushDirective(pp_eof);
1015 }
1016
1017 ArrayRef<dependency_directives_scan::Token> RemainingTokens = Tokens;
1018 for (const DirectiveWithTokens &DirWithToks : DirsWithToks) {
1019 assert(RemainingTokens.size() >= DirWithToks.NumTokens);
1020 Directives.emplace_back(DirWithToks.Kind,
1021 RemainingTokens.take_front(DirWithToks.NumTokens));
1022 RemainingTokens = RemainingTokens.drop_front(DirWithToks.NumTokens);
1023 }
1024 assert(RemainingTokens.empty());
1025
1026 return Error;
1027 }
1028
scanSourceForDependencyDirectives(StringRef Input,SmallVectorImpl<dependency_directives_scan::Token> & Tokens,SmallVectorImpl<Directive> & Directives,DiagnosticsEngine * Diags,SourceLocation InputSourceLoc)1029 bool clang::scanSourceForDependencyDirectives(
1030 StringRef Input, SmallVectorImpl<dependency_directives_scan::Token> &Tokens,
1031 SmallVectorImpl<Directive> &Directives, DiagnosticsEngine *Diags,
1032 SourceLocation InputSourceLoc) {
1033 return Scanner(Input, Tokens, Diags, InputSourceLoc).scan(Directives);
1034 }
1035
printDependencyDirectivesAsSource(StringRef Source,ArrayRef<dependency_directives_scan::Directive> Directives,llvm::raw_ostream & OS)1036 void clang::printDependencyDirectivesAsSource(
1037 StringRef Source,
1038 ArrayRef<dependency_directives_scan::Directive> Directives,
1039 llvm::raw_ostream &OS) {
1040 // Add a space separator where it is convenient for testing purposes.
1041 auto needsSpaceSeparator =
1042 [](tok::TokenKind Prev,
1043 const dependency_directives_scan::Token &Tok) -> bool {
1044 if (Prev == Tok.Kind)
1045 return !Tok.isOneOf(tok::l_paren, tok::r_paren, tok::l_square,
1046 tok::r_square);
1047 if (Prev == tok::raw_identifier &&
1048 Tok.isOneOf(tok::hash, tok::numeric_constant, tok::string_literal,
1049 tok::char_constant, tok::header_name))
1050 return true;
1051 if (Prev == tok::r_paren &&
1052 Tok.isOneOf(tok::raw_identifier, tok::hash, tok::string_literal,
1053 tok::char_constant, tok::unknown))
1054 return true;
1055 if (Prev == tok::comma &&
1056 Tok.isOneOf(tok::l_paren, tok::string_literal, tok::less))
1057 return true;
1058 return false;
1059 };
1060
1061 for (const dependency_directives_scan::Directive &Directive : Directives) {
1062 if (Directive.Kind == tokens_present_before_eof)
1063 OS << "<TokBeforeEOF>";
1064 std::optional<tok::TokenKind> PrevTokenKind;
1065 for (const dependency_directives_scan::Token &Tok : Directive.Tokens) {
1066 if (PrevTokenKind && needsSpaceSeparator(*PrevTokenKind, Tok))
1067 OS << ' ';
1068 PrevTokenKind = Tok.Kind;
1069 OS << Source.slice(Tok.Offset, Tok.getEnd());
1070 }
1071 }
1072 }
1073