1 //===- ScriptLexer.cpp ----------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines a lexer for the linker script. 10 // 11 // The linker script's grammar is not complex but ambiguous due to the 12 // lack of the formal specification of the language. What we are trying to 13 // do in this and other files in LLD is to make a "reasonable" linker 14 // script processor. 15 // 16 // Among simplicity, compatibility and efficiency, we put the most 17 // emphasis on simplicity when we wrote this lexer. Compatibility with the 18 // GNU linkers is important, but we did not try to clone every tiny corner 19 // case of their lexers, as even ld.bfd and ld.gold are subtly different 20 // in various corner cases. We do not care much about efficiency because 21 // the time spent in parsing linker scripts is usually negligible. 22 // 23 // Our grammar of the linker script is LL(2), meaning that it needs at 24 // most two-token lookahead to parse. The only place we need two-token 25 // lookahead is labels in version scripts, where we need to parse "local :" 26 // as if "local:". 27 // 28 // Overall, this lexer works fine for most linker scripts. There might 29 // be room for improving compatibility, but that's probably not at the 30 // top of our todo list. 31 // 32 //===----------------------------------------------------------------------===// 33 34 #include "ScriptLexer.h" 35 #include "lld/Common/ErrorHandler.h" 36 #include "llvm/ADT/Twine.h" 37 38 using namespace llvm; 39 using namespace lld; 40 using namespace lld::elf; 41 42 // Returns a whole line containing the current token. 43 StringRef ScriptLexer::getLine() { 44 StringRef s = getCurrentMB().getBuffer(); 45 StringRef tok = tokens[pos - 1]; 46 47 size_t pos = s.rfind('\n', tok.data() - s.data()); 48 if (pos != StringRef::npos) 49 s = s.substr(pos + 1); 50 return s.substr(0, s.find_first_of("\r\n")); 51 } 52 53 // Returns 1-based line number of the current token. 54 size_t ScriptLexer::getLineNumber() { 55 if (pos == 0) 56 return 1; 57 StringRef s = getCurrentMB().getBuffer(); 58 StringRef tok = tokens[pos - 1]; 59 const size_t tokOffset = tok.data() - s.data(); 60 61 // For the first token, or when going backwards, start from the beginning of 62 // the buffer. If this token is after the previous token, start from the 63 // previous token. 64 size_t line = 1; 65 size_t start = 0; 66 if (lastLineNumberOffset > 0 && tokOffset >= lastLineNumberOffset) { 67 start = lastLineNumberOffset; 68 line = lastLineNumber; 69 } 70 71 line += s.substr(start, tokOffset - start).count('\n'); 72 73 // Store the line number of this token for reuse. 74 lastLineNumberOffset = tokOffset; 75 lastLineNumber = line; 76 77 return line; 78 } 79 80 // Returns 0-based column number of the current token. 81 size_t ScriptLexer::getColumnNumber() { 82 StringRef tok = tokens[pos - 1]; 83 return tok.data() - getLine().data(); 84 } 85 86 std::string ScriptLexer::getCurrentLocation() { 87 std::string filename = std::string(getCurrentMB().getBufferIdentifier()); 88 return (filename + ":" + Twine(getLineNumber())).str(); 89 } 90 91 ScriptLexer::ScriptLexer(MemoryBufferRef mb) { tokenize(mb); } 92 93 // We don't want to record cascading errors. Keep only the first one. 94 void ScriptLexer::setError(const Twine &msg) { 95 if (errorCount()) 96 return; 97 98 std::string s = (getCurrentLocation() + ": " + msg).str(); 99 if (pos) 100 s += "\n>>> " + getLine().str() + "\n>>> " + 101 std::string(getColumnNumber(), ' ') + "^"; 102 error(s); 103 } 104 105 // Split S into linker script tokens. 106 void ScriptLexer::tokenize(MemoryBufferRef mb) { 107 std::vector<StringRef> vec; 108 mbs.push_back(mb); 109 StringRef s = mb.getBuffer(); 110 StringRef begin = s; 111 112 for (;;) { 113 s = skipSpace(s); 114 if (s.empty()) 115 break; 116 117 // Quoted token. Note that double-quote characters are parts of a token 118 // because, in a glob match context, only unquoted tokens are interpreted 119 // as glob patterns. Double-quoted tokens are literal patterns in that 120 // context. 121 if (s.startswith("\"")) { 122 size_t e = s.find("\"", 1); 123 if (e == StringRef::npos) { 124 StringRef filename = mb.getBufferIdentifier(); 125 size_t lineno = begin.substr(0, s.data() - begin.data()).count('\n'); 126 error(filename + ":" + Twine(lineno + 1) + ": unclosed quote"); 127 return; 128 } 129 130 vec.push_back(s.take_front(e + 1)); 131 s = s.substr(e + 1); 132 continue; 133 } 134 135 // ">foo" is parsed to ">" and "foo", but ">>" is parsed to ">>". 136 // "|", "||", "&" and "&&" are different operators. 137 if (s.startswith("<<") || s.startswith("<=") || s.startswith(">>") || 138 s.startswith(">=") || s.startswith("||") || s.startswith("&&")) { 139 vec.push_back(s.substr(0, 2)); 140 s = s.substr(2); 141 continue; 142 } 143 144 // Unquoted token. This is more relaxed than tokens in C-like language, 145 // so that you can write "file-name.cpp" as one bare token, for example. 146 size_t pos = s.find_first_not_of( 147 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" 148 "0123456789_.$/\\~=+[]*?-!^:"); 149 150 // A character that cannot start a word (which is usually a 151 // punctuation) forms a single character token. 152 if (pos == 0) 153 pos = 1; 154 vec.push_back(s.substr(0, pos)); 155 s = s.substr(pos); 156 } 157 158 tokens.insert(tokens.begin() + pos, vec.begin(), vec.end()); 159 } 160 161 // Skip leading whitespace characters or comments. 162 StringRef ScriptLexer::skipSpace(StringRef s) { 163 for (;;) { 164 if (s.startswith("/*")) { 165 size_t e = s.find("*/", 2); 166 if (e == StringRef::npos) { 167 setError("unclosed comment in a linker script"); 168 return ""; 169 } 170 s = s.substr(e + 2); 171 continue; 172 } 173 if (s.startswith("#")) { 174 size_t e = s.find('\n', 1); 175 if (e == StringRef::npos) 176 e = s.size() - 1; 177 s = s.substr(e + 1); 178 continue; 179 } 180 size_t size = s.size(); 181 s = s.ltrim(); 182 if (s.size() == size) 183 return s; 184 } 185 } 186 187 // An erroneous token is handled as if it were the last token before EOF. 188 bool ScriptLexer::atEOF() { return errorCount() || tokens.size() == pos; } 189 190 // Split a given string as an expression. 191 // This function returns "3", "*" and "5" for "3*5" for example. 192 static std::vector<StringRef> tokenizeExpr(StringRef s) { 193 StringRef ops = "+-*/:!~=<>"; // List of operators 194 195 // Quoted strings are literal strings, so we don't want to split it. 196 if (s.startswith("\"")) 197 return {s}; 198 199 // Split S with operators as separators. 200 std::vector<StringRef> ret; 201 while (!s.empty()) { 202 size_t e = s.find_first_of(ops); 203 204 // No need to split if there is no operator. 205 if (e == StringRef::npos) { 206 ret.push_back(s); 207 break; 208 } 209 210 // Get a token before the operator. 211 if (e != 0) 212 ret.push_back(s.substr(0, e)); 213 214 // Get the operator as a token. 215 // Keep !=, ==, >=, <=, << and >> operators as a single tokens. 216 if (s.substr(e).startswith("!=") || s.substr(e).startswith("==") || 217 s.substr(e).startswith(">=") || s.substr(e).startswith("<=") || 218 s.substr(e).startswith("<<") || s.substr(e).startswith(">>")) { 219 ret.push_back(s.substr(e, 2)); 220 s = s.substr(e + 2); 221 } else { 222 ret.push_back(s.substr(e, 1)); 223 s = s.substr(e + 1); 224 } 225 } 226 return ret; 227 } 228 229 // In contexts where expressions are expected, the lexer should apply 230 // different tokenization rules than the default one. By default, 231 // arithmetic operator characters are regular characters, but in the 232 // expression context, they should be independent tokens. 233 // 234 // For example, "foo*3" should be tokenized to "foo", "*" and "3" only 235 // in the expression context. 236 // 237 // This function may split the current token into multiple tokens. 238 void ScriptLexer::maybeSplitExpr() { 239 if (!inExpr || errorCount() || atEOF()) 240 return; 241 242 std::vector<StringRef> v = tokenizeExpr(tokens[pos]); 243 if (v.size() == 1) 244 return; 245 tokens.erase(tokens.begin() + pos); 246 tokens.insert(tokens.begin() + pos, v.begin(), v.end()); 247 } 248 249 StringRef ScriptLexer::next() { 250 maybeSplitExpr(); 251 252 if (errorCount()) 253 return ""; 254 if (atEOF()) { 255 setError("unexpected EOF"); 256 return ""; 257 } 258 return tokens[pos++]; 259 } 260 261 StringRef ScriptLexer::peek() { 262 StringRef tok = next(); 263 if (errorCount()) 264 return ""; 265 pos = pos - 1; 266 return tok; 267 } 268 269 StringRef ScriptLexer::peek2() { 270 skip(); 271 StringRef tok = next(); 272 if (errorCount()) 273 return ""; 274 pos = pos - 2; 275 return tok; 276 } 277 278 bool ScriptLexer::consume(StringRef tok) { 279 if (peek() == tok) { 280 skip(); 281 return true; 282 } 283 return false; 284 } 285 286 // Consumes Tok followed by ":". Space is allowed between Tok and ":". 287 bool ScriptLexer::consumeLabel(StringRef tok) { 288 if (consume((tok + ":").str())) 289 return true; 290 if (tokens.size() >= pos + 2 && tokens[pos] == tok && 291 tokens[pos + 1] == ":") { 292 pos += 2; 293 return true; 294 } 295 return false; 296 } 297 298 void ScriptLexer::skip() { (void)next(); } 299 300 void ScriptLexer::expect(StringRef expect) { 301 if (errorCount()) 302 return; 303 StringRef tok = next(); 304 if (tok != expect) 305 setError(expect + " expected, but got " + tok); 306 } 307 308 // Returns true if S encloses T. 309 static bool encloses(StringRef s, StringRef t) { 310 return s.bytes_begin() <= t.bytes_begin() && t.bytes_end() <= s.bytes_end(); 311 } 312 313 MemoryBufferRef ScriptLexer::getCurrentMB() { 314 // Find input buffer containing the current token. 315 assert(!mbs.empty()); 316 if (pos == 0) 317 return mbs.back(); 318 for (MemoryBufferRef mb : mbs) 319 if (encloses(mb.getBuffer(), tokens[pos - 1])) 320 return mb; 321 llvm_unreachable("getCurrentMB: failed to find a token"); 322 } 323