1//===--- PPCaching.cpp - Handle caching lexed tokens ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements pieces of the Preprocessor interface that manage the
10// caching of lexed tokens.
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/Lex/Preprocessor.h"
15using namespace clang;
16
17// EnableBacktrackAtThisPos - From the point that this method is called, and
18// until CommitBacktrackedTokens() or Backtrack() is called, the Preprocessor
19// keeps track of the lexed tokens so that a subsequent Backtrack() call will
20// make the Preprocessor re-lex the same tokens.
21//
22// Nested backtracks are allowed, meaning that EnableBacktrackAtThisPos can
23// be called multiple times and CommitBacktrackedTokens/Backtrack calls will
24// be combined with the EnableBacktrackAtThisPos calls in reverse order.
25void Preprocessor::EnableBacktrackAtThisPos() {
26 assert(LexLevel == 0 && "cannot use lookahead while lexing");
27 BacktrackPositions.push_back(x: CachedLexPos);
28 EnterCachingLexMode();
29}
30
31// Disable the last EnableBacktrackAtThisPos call.
32void Preprocessor::CommitBacktrackedTokens() {
33 assert(!BacktrackPositions.empty()
34 && "EnableBacktrackAtThisPos was not called!");
35 BacktrackPositions.pop_back();
36}
37
38// Make Preprocessor re-lex the tokens that were lexed since
39// EnableBacktrackAtThisPos() was previously called.
40void Preprocessor::Backtrack() {
41 assert(!BacktrackPositions.empty()
42 && "EnableBacktrackAtThisPos was not called!");
43 CachedLexPos = BacktrackPositions.back();
44 BacktrackPositions.pop_back();
45 recomputeCurLexerKind();
46}
47
48void Preprocessor::CachingLex(Token &Result) {
49 if (!InCachingLexMode())
50 return;
51
52 // The assert in EnterCachingLexMode should prevent this from happening.
53 assert(LexLevel == 1 &&
54 "should not use token caching within the preprocessor");
55
56 if (CachedLexPos < CachedTokens.size()) {
57 Result = CachedTokens[CachedLexPos++];
58 Result.setFlag(Token::IsReinjected);
59 return;
60 }
61
62 ExitCachingLexMode();
63 Lex(Result);
64
65 if (isBacktrackEnabled()) {
66 // Cache the lexed token.
67 EnterCachingLexModeUnchecked();
68 CachedTokens.push_back(Elt: Result);
69 ++CachedLexPos;
70 return;
71 }
72
73 if (CachedLexPos < CachedTokens.size()) {
74 EnterCachingLexModeUnchecked();
75 } else {
76 // All cached tokens were consumed.
77 CachedTokens.clear();
78 CachedLexPos = 0;
79 }
80}
81
82void Preprocessor::EnterCachingLexMode() {
83 // The caching layer sits on top of all the other lexers, so it's incorrect
84 // to cache tokens while inside a nested lex action. The cached tokens would
85 // be retained after returning to the enclosing lex action and, at best,
86 // would appear at the wrong position in the token stream.
87 assert(LexLevel == 0 &&
88 "entered caching lex mode while lexing something else");
89
90 if (InCachingLexMode()) {
91 assert(CurLexerCallback == CLK_CachingLexer && "Unexpected lexer kind");
92 return;
93 }
94
95 EnterCachingLexModeUnchecked();
96}
97
98void Preprocessor::EnterCachingLexModeUnchecked() {
99 assert(CurLexerCallback != CLK_CachingLexer && "already in caching lex mode");
100 PushIncludeMacroStack();
101 CurLexerCallback = CLK_CachingLexer;
102}
103
104
105const Token &Preprocessor::PeekAhead(unsigned N) {
106 assert(CachedLexPos + N > CachedTokens.size() && "Confused caching.");
107 ExitCachingLexMode();
108 for (size_t C = CachedLexPos + N - CachedTokens.size(); C > 0; --C) {
109 CachedTokens.push_back(Elt: Token());
110 Lex(Result&: CachedTokens.back());
111 }
112 EnterCachingLexMode();
113 return CachedTokens.back();
114}
115
116void Preprocessor::AnnotatePreviousCachedTokens(const Token &Tok) {
117 assert(Tok.isAnnotation() && "Expected annotation token");
118 assert(CachedLexPos != 0 && "Expected to have some cached tokens");
119 assert(CachedTokens[CachedLexPos-1].getLastLoc() == Tok.getAnnotationEndLoc()
120 && "The annotation should be until the most recent cached token");
121
122 // Start from the end of the cached tokens list and look for the token
123 // that is the beginning of the annotation token.
124 for (CachedTokensTy::size_type i = CachedLexPos; i != 0; --i) {
125 CachedTokensTy::iterator AnnotBegin = CachedTokens.begin() + i-1;
126 if (AnnotBegin->getLocation() == Tok.getLocation()) {
127 assert((BacktrackPositions.empty() || BacktrackPositions.back() <= i) &&
128 "The backtrack pos points inside the annotated tokens!");
129 // Replace the cached tokens with the single annotation token.
130 if (i < CachedLexPos)
131 CachedTokens.erase(CS: AnnotBegin + 1, CE: CachedTokens.begin() + CachedLexPos);
132 *AnnotBegin = Tok;
133 CachedLexPos = i;
134 return;
135 }
136 }
137}
138
139bool Preprocessor::IsPreviousCachedToken(const Token &Tok) const {
140 // There's currently no cached token...
141 if (!CachedLexPos)
142 return false;
143
144 const Token LastCachedTok = CachedTokens[CachedLexPos - 1];
145 if (LastCachedTok.getKind() != Tok.getKind())
146 return false;
147
148 SourceLocation::IntTy RelOffset = 0;
149 if ((!getSourceManager().isInSameSLocAddrSpace(
150 LHS: Tok.getLocation(), RHS: getLastCachedTokenLocation(), RelativeOffset: &RelOffset)) ||
151 RelOffset)
152 return false;
153
154 return true;
155}
156
157void Preprocessor::ReplacePreviousCachedToken(ArrayRef<Token> NewToks) {
158 assert(CachedLexPos != 0 && "Expected to have some cached tokens");
159 CachedTokens.insert(I: CachedTokens.begin() + CachedLexPos - 1, From: NewToks.begin(),
160 To: NewToks.end());
161 CachedTokens.erase(CI: CachedTokens.begin() + CachedLexPos - 1 + NewToks.size());
162 CachedLexPos += NewToks.size() - 1;
163}
164