1 2 /* ntp_scanner.c 3 * 4 * The source code for a simple lexical analyzer. 5 * 6 * Written By: Sachin Kamboj 7 * University of Delaware 8 * Newark, DE 19711 9 * Copyright (c) 2006 10 */ 11 12 #ifdef HAVE_CONFIG_H 13 # include <config.h> 14 #endif 15 16 #include <stdio.h> 17 #include <ctype.h> 18 #include <stdlib.h> 19 #include <errno.h> 20 #include <string.h> 21 22 #include "ntpd.h" 23 #include "ntp_config.h" 24 #include "ntpsim.h" 25 #include "ntp_scanner.h" 26 #include "ntp_parser.h" 27 28 /* ntp_keyword.h declares finite state machine and token text */ 29 #include "ntp_keyword.h" 30 31 32 33 /* SCANNER GLOBAL VARIABLES 34 * ------------------------ 35 */ 36 37 #define MAX_LEXEME (1024 + 1) /* The maximum size of a lexeme */ 38 char yytext[MAX_LEXEME]; /* Buffer for storing the input text/lexeme */ 39 u_int32 conf_file_sum; /* Simple sum of characters read */ 40 41 static struct FILE_INFO * lex_stack = NULL; 42 43 44 45 /* CONSTANTS 46 * --------- 47 */ 48 49 50 /* SCANNER GLOBAL VARIABLES 51 * ------------------------ 52 */ 53 const char special_chars[] = "{}(),;|="; 54 55 56 /* FUNCTIONS 57 * --------- 58 */ 59 60 static int is_keyword(char *lexeme, follby *pfollowedby); 61 62 63 /* 64 * keyword() - Return the keyword associated with token T_ identifier. 65 * See also token_name() for the string-ized T_ identifier. 66 * Example: keyword(T_Server) returns "server" 67 * token_name(T_Server) returns "T_Server" 68 */ 69 const char * 70 keyword( 71 int token 72 ) 73 { 74 size_t i; 75 const char *text; 76 77 i = token - LOWEST_KEYWORD_ID; 78 79 if (i < COUNTOF(keyword_text)) 80 text = keyword_text[i]; 81 else 82 text = NULL; 83 84 return (text != NULL) 85 ? text 86 : "(keyword not found)"; 87 } 88 89 90 /* FILE & STRING BUFFER INTERFACE 91 * ------------------------------ 92 * 93 * This set out as a couple of wrapper functions around the standard C 94 * fgetc and ungetc functions in order to include positional 95 * bookkeeping. Alas, this is no longer a good solution with nested 96 * input files and the possibility to send configuration commands via 97 * 'ntpdc' and 'ntpq'. 98 * 99 * Now there are a few functions to maintain a stack of nested input 100 * sources (though nesting is only allowd for disk files) and from the 101 * scanner / parser point of view there's no difference between both 102 * types of sources. 103 * 104 * The 'fgetc()' / 'ungetc()' replacements now operate on a FILE_INFO 105 * structure. Instead of trying different 'ungetc()' strategies for file 106 * and buffer based parsing, we keep the backup char in our own 107 * FILE_INFO structure. This is sufficient, as the parser does *not* 108 * jump around via 'seek' or the like, and there's no need to 109 * check/clear the backup store in other places than 'lex_getch()'. 110 */ 111 112 /* 113 * Allocate an info structure and attach it to a file. 114 * 115 * Note: When 'mode' is NULL, then the INFO block will be set up to 116 * contain a NULL file pointer, as suited for remote config command 117 * parsing. Otherwise having a NULL file pointer is considered an error, 118 * and a NULL info block pointer is returned to indicate failure! 119 * 120 * Note: We use a variable-sized structure to hold a copy of the file 121 * name (or, more proper, the input source description). This is more 122 * secure than keeping a reference to some other storage that might go 123 * out of scope. 124 */ 125 static struct FILE_INFO * 126 lex_open( 127 const char *path, 128 const char *mode 129 ) 130 { 131 struct FILE_INFO *stream; 132 size_t nnambuf; 133 134 nnambuf = strlen(path); 135 stream = emalloc_zero(sizeof(*stream) + nnambuf); 136 stream->curpos.nline = 1; 137 stream->backch = EOF; 138 /* copy name with memcpy -- trailing NUL already there! */ 139 memcpy(stream->fname, path, nnambuf); 140 141 if (NULL != mode) { 142 stream->fpi = fopen(path, mode); 143 if (NULL == stream->fpi) { 144 free(stream); 145 stream = NULL; 146 } 147 } 148 return stream; 149 } 150 151 /* get next character from buffer or file. This will return any putback 152 * character first; it will also make sure the last line is at least 153 * virtually terminated with a '\n'. 154 */ 155 static int 156 lex_getch( 157 struct FILE_INFO *stream 158 ) 159 { 160 int ch; 161 162 if (NULL == stream || stream->force_eof) 163 return EOF; 164 165 if (EOF != stream->backch) { 166 ch = stream->backch; 167 stream->backch = EOF; 168 if (stream->fpi) 169 conf_file_sum += ch; 170 } else if (stream->fpi) { 171 /* fetch next 7-bit ASCII char (or EOF) from file */ 172 while ((ch = fgetc(stream->fpi)) != EOF && ch > SCHAR_MAX) 173 stream->curpos.ncol++; 174 if (EOF != ch) { 175 conf_file_sum += ch; 176 stream->curpos.ncol++; 177 } 178 } else { 179 /* fetch next 7-bit ASCII char from buffer */ 180 const char * scan; 181 scan = &remote_config.buffer[remote_config.pos]; 182 while ((ch = (u_char)*scan) > SCHAR_MAX) { 183 scan++; 184 stream->curpos.ncol++; 185 } 186 if ('\0' != ch) { 187 scan++; 188 stream->curpos.ncol++; 189 } else { 190 ch = EOF; 191 } 192 remote_config.pos = (int)(scan - remote_config.buffer); 193 } 194 195 /* If the last line ends without '\n', generate one. This 196 * happens most likely on Windows, where editors often have a 197 * sloppy concept of a line. 198 */ 199 if (EOF == ch && stream->curpos.ncol != 0) 200 ch = '\n'; 201 202 /* update scan position tallies */ 203 if (ch == '\n') { 204 stream->bakpos = stream->curpos; 205 stream->curpos.nline++; 206 stream->curpos.ncol = 0; 207 } 208 209 return ch; 210 } 211 212 /* Note: lex_ungetch will fail to track more than one line of push 213 * back. But since it guarantees only one char of back storage anyway, 214 * this should not be a problem. 215 */ 216 static int 217 lex_ungetch( 218 int ch, 219 struct FILE_INFO *stream 220 ) 221 { 222 /* check preconditions */ 223 if (NULL == stream || stream->force_eof) 224 return EOF; 225 if (EOF != stream->backch || EOF == ch) 226 return EOF; 227 228 /* keep for later reference and update checksum */ 229 stream->backch = (u_char)ch; 230 if (stream->fpi) 231 conf_file_sum -= stream->backch; 232 233 /* update position */ 234 if (stream->backch == '\n') { 235 stream->curpos = stream->bakpos; 236 stream->bakpos.ncol = -1; 237 } 238 stream->curpos.ncol--; 239 return stream->backch; 240 } 241 242 /* dispose of an input structure. If the file pointer is not NULL, close 243 * the file. This function does not check the result of 'fclose()'. 244 */ 245 static void 246 lex_close( 247 struct FILE_INFO *stream 248 ) 249 { 250 if (NULL != stream) { 251 if (NULL != stream->fpi) 252 fclose(stream->fpi); 253 free(stream); 254 } 255 } 256 257 /* INPUT STACK 258 * ----------- 259 * 260 * Nested input sources are a bit tricky at first glance. We deal with 261 * this problem using a stack of input sources, that is, a forward 262 * linked list of FILE_INFO structs. 263 * 264 * This stack is never empty during parsing; while an encounter with EOF 265 * can and will remove nested input sources, removing the last element 266 * in the stack will not work during parsing, and the EOF condition of 267 * the outermost input file remains until the parser folds up. 268 */ 269 270 static struct FILE_INFO * 271 _drop_stack_do( 272 struct FILE_INFO * head 273 ) 274 { 275 struct FILE_INFO * tail; 276 while (NULL != head) { 277 tail = head->st_next; 278 lex_close(head); 279 head = tail; 280 } 281 return head; 282 } 283 284 285 286 /* Create a singleton input source on an empty lexer stack. This will 287 * fail if there is already an input source, or if the underlying disk 288 * file cannot be opened. 289 * 290 * Returns TRUE if a new input object was successfully created. 291 */ 292 int/*BOOL*/ 293 lex_init_stack( 294 const char * path, 295 const char * mode 296 ) 297 { 298 if (NULL != lex_stack || NULL == path) 299 return FALSE; 300 301 lex_stack = lex_open(path, mode); 302 return (NULL != lex_stack); 303 } 304 305 /* This removes *all* input sources from the stack, leaving the head 306 * pointer as NULL. Any attempt to parse in that state is likely to bomb 307 * with segmentation faults or the like. 308 * 309 * In other words: Use this to clean up after parsing, and do not parse 310 * anything until the next 'lex_init_stack()' succeeded. 311 */ 312 void 313 lex_drop_stack() 314 { 315 lex_stack = _drop_stack_do(lex_stack); 316 } 317 318 /* Flush the lexer input stack: This will nip all input objects on the 319 * stack (but keeps the current top-of-stack) and marks the top-of-stack 320 * as inactive. Any further calls to lex_getch yield only EOF, and it's 321 * no longer possible to push something back. 322 * 323 * Returns TRUE if there is a head element (top-of-stack) that was not 324 * in the force-eof mode before this call. 325 */ 326 int/*BOOL*/ 327 lex_flush_stack() 328 { 329 int retv = FALSE; 330 331 if (NULL != lex_stack) { 332 retv = !lex_stack->force_eof; 333 lex_stack->force_eof = TRUE; 334 lex_stack->st_next = _drop_stack_do( 335 lex_stack->st_next); 336 } 337 return retv; 338 } 339 340 /* Push another file on the parsing stack. If the mode is NULL, create a 341 * FILE_INFO suitable for in-memory parsing; otherwise, create a 342 * FILE_INFO that is bound to a local/disc file. Note that 'path' must 343 * not be NULL, or the function will fail. 344 * 345 * Returns TRUE if a new info record was pushed onto the stack. 346 */ 347 int/*BOOL*/ lex_push_file( 348 const char * path, 349 const char * mode 350 ) 351 { 352 struct FILE_INFO * next = NULL; 353 354 if (NULL != path) { 355 next = lex_open(path, mode); 356 if (NULL != next) { 357 next->st_next = lex_stack; 358 lex_stack = next; 359 } 360 } 361 return (NULL != next); 362 } 363 364 /* Pop, close & free the top of the include stack, unless the stack 365 * contains only a singleton input object. In that case the function 366 * fails, because the parser does not expect the input stack to be 367 * empty. 368 * 369 * Returns TRUE if an object was successfuly popped from the stack. 370 */ 371 int/*BOOL*/ 372 lex_pop_file(void) 373 { 374 struct FILE_INFO * head = lex_stack; 375 struct FILE_INFO * tail = NULL; 376 377 if (NULL != head) { 378 tail = head->st_next; 379 if (NULL != tail) { 380 lex_stack = tail; 381 lex_close(head); 382 } 383 } 384 return (NULL != tail); 385 } 386 387 /* Get include nesting level. This currently loops over the stack and 388 * counts elements; but since this is of concern only with an include 389 * statement and the nesting depth has a small limit, there's no 390 * bottleneck expected here. 391 * 392 * Returns the nesting level of includes, that is, the current depth of 393 * the lexer input stack. 394 * 395 * Note: 396 */ 397 size_t 398 lex_level(void) 399 { 400 size_t cnt = 0; 401 struct FILE_INFO *ipf = lex_stack; 402 403 while (NULL != ipf) { 404 cnt++; 405 ipf = ipf->st_next; 406 } 407 return cnt; 408 } 409 410 /* check if the current input is from a file */ 411 int/*BOOL*/ 412 lex_from_file(void) 413 { 414 return (NULL != lex_stack) && (NULL != lex_stack->fpi); 415 } 416 417 struct FILE_INFO * 418 lex_current() 419 { 420 /* this became so simple, it could be a macro. But then, 421 * lex_stack needed to be global... 422 */ 423 return lex_stack; 424 } 425 426 427 /* STATE MACHINES 428 * -------------- 429 */ 430 431 /* Keywords */ 432 static int 433 is_keyword( 434 char *lexeme, 435 follby *pfollowedby 436 ) 437 { 438 follby fb; 439 int curr_s; /* current state index */ 440 int token; 441 int i; 442 443 curr_s = SCANNER_INIT_S; 444 token = 0; 445 446 for (i = 0; lexeme[i]; i++) { 447 while (curr_s && (lexeme[i] != SS_CH(sst[curr_s]))) 448 curr_s = SS_OTHER_N(sst[curr_s]); 449 450 if (curr_s && (lexeme[i] == SS_CH(sst[curr_s]))) { 451 if ('\0' == lexeme[i + 1] 452 && FOLLBY_NON_ACCEPTING 453 != SS_FB(sst[curr_s])) { 454 fb = SS_FB(sst[curr_s]); 455 *pfollowedby = fb; 456 token = curr_s; 457 break; 458 } 459 curr_s = SS_MATCH_N(sst[curr_s]); 460 } else 461 break; 462 } 463 464 return token; 465 } 466 467 468 /* Integer */ 469 static int 470 is_integer( 471 char *lexeme 472 ) 473 { 474 int i; 475 int is_neg; 476 u_int u_val; 477 478 i = 0; 479 480 /* Allow a leading minus sign */ 481 if (lexeme[i] == '-') { 482 i++; 483 is_neg = TRUE; 484 } else { 485 is_neg = FALSE; 486 } 487 488 /* Check that all the remaining characters are digits */ 489 for (; lexeme[i] != '\0'; i++) { 490 if (!isdigit((u_char)lexeme[i])) 491 return FALSE; 492 } 493 494 if (is_neg) 495 return TRUE; 496 497 /* Reject numbers that fit in unsigned but not in signed int */ 498 if (1 == sscanf(lexeme, "%u", &u_val)) 499 return (u_val <= INT_MAX); 500 else 501 return FALSE; 502 } 503 504 505 /* U_int -- assumes is_integer() has returned FALSE */ 506 static int 507 is_u_int( 508 char *lexeme 509 ) 510 { 511 int i; 512 int is_hex; 513 514 i = 0; 515 if ('0' == lexeme[i] && 'x' == tolower((u_char)lexeme[i + 1])) { 516 i += 2; 517 is_hex = TRUE; 518 } else { 519 is_hex = FALSE; 520 } 521 522 /* Check that all the remaining characters are digits */ 523 for (; lexeme[i] != '\0'; i++) { 524 if (is_hex && !isxdigit((u_char)lexeme[i])) 525 return FALSE; 526 if (!is_hex && !isdigit((u_char)lexeme[i])) 527 return FALSE; 528 } 529 530 return TRUE; 531 } 532 533 534 /* Double */ 535 static int 536 is_double( 537 char *lexeme 538 ) 539 { 540 u_int num_digits = 0; /* Number of digits read */ 541 u_int i; 542 543 i = 0; 544 545 /* Check for an optional '+' or '-' */ 546 if ('+' == lexeme[i] || '-' == lexeme[i]) 547 i++; 548 549 /* Read the integer part */ 550 for (; lexeme[i] && isdigit((u_char)lexeme[i]); i++) 551 num_digits++; 552 553 /* Check for the optional decimal point */ 554 if ('.' == lexeme[i]) { 555 i++; 556 /* Check for any digits after the decimal point */ 557 for (; lexeme[i] && isdigit((u_char)lexeme[i]); i++) 558 num_digits++; 559 } 560 561 /* 562 * The number of digits in both the decimal part and the 563 * fraction part must not be zero at this point 564 */ 565 if (!num_digits) 566 return 0; 567 568 /* Check if we are done */ 569 if (!lexeme[i]) 570 return 1; 571 572 /* There is still more input, read the exponent */ 573 if ('e' == tolower((u_char)lexeme[i])) 574 i++; 575 else 576 return 0; 577 578 /* Read an optional Sign */ 579 if ('+' == lexeme[i] || '-' == lexeme[i]) 580 i++; 581 582 /* Now read the exponent part */ 583 while (lexeme[i] && isdigit((u_char)lexeme[i])) 584 i++; 585 586 /* Check if we are done */ 587 if (!lexeme[i]) 588 return 1; 589 else 590 return 0; 591 } 592 593 594 /* is_special() - Test whether a character is a token */ 595 static inline int 596 is_special( 597 int ch 598 ) 599 { 600 return strchr(special_chars, ch) != NULL; 601 } 602 603 604 static int 605 is_EOC( 606 int ch 607 ) 608 { 609 if ((old_config_style && (ch == '\n')) || 610 (!old_config_style && (ch == ';'))) 611 return 1; 612 return 0; 613 } 614 615 616 char * 617 quote_if_needed(char *str) 618 { 619 char *ret; 620 size_t len; 621 size_t octets; 622 623 len = strlen(str); 624 octets = len + 2 + 1; 625 ret = emalloc(octets); 626 if ('"' != str[0] 627 && (strcspn(str, special_chars) < len 628 || strchr(str, ' ') != NULL)) { 629 snprintf(ret, octets, "\"%s\"", str); 630 } else 631 strlcpy(ret, str, octets); 632 633 return ret; 634 } 635 636 637 static int 638 create_string_token( 639 char *lexeme 640 ) 641 { 642 char *pch; 643 644 /* 645 * ignore end of line whitespace 646 */ 647 pch = lexeme; 648 while (*pch && isspace((u_char)*pch)) 649 pch++; 650 651 if (!*pch) { 652 yylval.Integer = T_EOC; 653 return yylval.Integer; 654 } 655 656 yylval.String = estrdup(lexeme); 657 return T_String; 658 } 659 660 661 /* 662 * yylex() - function that does the actual scanning. 663 * Bison expects this function to be called yylex and for it to take no 664 * input and return an int. 665 * Conceptually yylex "returns" yylval as well as the actual return 666 * value representing the token or type. 667 */ 668 int 669 yylex(void) 670 { 671 static follby followedby = FOLLBY_TOKEN; 672 size_t i; 673 int instring; 674 int yylval_was_set; 675 int converted; 676 int token; /* The return value */ 677 int ch; 678 679 instring = FALSE; 680 yylval_was_set = FALSE; 681 682 do { 683 /* Ignore whitespace at the beginning */ 684 while (EOF != (ch = lex_getch(lex_stack)) && 685 isspace(ch) && 686 !is_EOC(ch)) 687 688 ; /* Null Statement */ 689 690 if (EOF == ch) { 691 692 if ( ! lex_pop_file()) 693 return 0; 694 token = T_EOC; 695 goto normal_return; 696 697 } else if (is_EOC(ch)) { 698 699 /* end FOLLBY_STRINGS_TO_EOC effect */ 700 followedby = FOLLBY_TOKEN; 701 token = T_EOC; 702 goto normal_return; 703 704 } else if (is_special(ch) && FOLLBY_TOKEN == followedby) { 705 /* special chars are their own token values */ 706 token = ch; 707 /* 708 * '=' outside simulator configuration implies 709 * a single string following as in: 710 * setvar Owner = "The Boss" default 711 */ 712 if ('=' == ch && old_config_style) 713 followedby = FOLLBY_STRING; 714 yytext[0] = (char)ch; 715 yytext[1] = '\0'; 716 goto normal_return; 717 } else 718 lex_ungetch(ch, lex_stack); 719 720 /* save the position of start of the token */ 721 lex_stack->tokpos = lex_stack->curpos; 722 723 /* Read in the lexeme */ 724 i = 0; 725 while (EOF != (ch = lex_getch(lex_stack))) { 726 727 yytext[i] = (char)ch; 728 729 /* Break on whitespace or a special character */ 730 if (isspace(ch) || is_EOC(ch) 731 || '"' == ch 732 || (FOLLBY_TOKEN == followedby 733 && is_special(ch))) 734 break; 735 736 /* Read the rest of the line on reading a start 737 of comment character */ 738 if ('#' == ch) { 739 while (EOF != (ch = lex_getch(lex_stack)) 740 && '\n' != ch) 741 ; /* Null Statement */ 742 break; 743 } 744 745 i++; 746 if (i >= COUNTOF(yytext)) 747 goto lex_too_long; 748 } 749 /* Pick up all of the string inside between " marks, to 750 * end of line. If we make it to EOL without a 751 * terminating " assume it for them. 752 * 753 * XXX - HMS: I'm not sure we want to assume the closing " 754 */ 755 if ('"' == ch) { 756 instring = TRUE; 757 while (EOF != (ch = lex_getch(lex_stack)) && 758 ch != '"' && ch != '\n') { 759 yytext[i++] = (char)ch; 760 if (i >= COUNTOF(yytext)) 761 goto lex_too_long; 762 } 763 /* 764 * yytext[i] will be pushed back as not part of 765 * this lexeme, but any closing quote should 766 * not be pushed back, so we read another char. 767 */ 768 if ('"' == ch) 769 ch = lex_getch(lex_stack); 770 } 771 /* Pushback the last character read that is not a part 772 * of this lexeme. This fails silently if ch is EOF, 773 * but then the EOF condition persists and is handled on 774 * the next turn by the include stack mechanism. 775 */ 776 lex_ungetch(ch, lex_stack); 777 778 yytext[i] = '\0'; 779 } while (i == 0); 780 781 /* Now return the desired token */ 782 783 /* First make sure that the parser is *not* expecting a string 784 * as the next token (based on the previous token that was 785 * returned) and that we haven't read a string. 786 */ 787 788 if (followedby == FOLLBY_TOKEN && !instring) { 789 token = is_keyword(yytext, &followedby); 790 if (token) { 791 /* 792 * T_Server is exceptional as it forces the 793 * following token to be a string in the 794 * non-simulator parts of the configuration, 795 * but in the simulator configuration section, 796 * "server" is followed by "=" which must be 797 * recognized as a token not a string. 798 */ 799 if (T_Server == token && !old_config_style) 800 followedby = FOLLBY_TOKEN; 801 goto normal_return; 802 } else if (is_integer(yytext)) { 803 yylval_was_set = TRUE; 804 errno = 0; 805 if ((yylval.Integer = strtol(yytext, NULL, 10)) == 0 806 && ((errno == EINVAL) || (errno == ERANGE))) { 807 msyslog(LOG_ERR, 808 "Integer cannot be represented: %s", 809 yytext); 810 if (lex_from_file()) { 811 exit(1); 812 } else { 813 /* force end of parsing */ 814 yylval.Integer = 0; 815 return 0; 816 } 817 } 818 token = T_Integer; 819 goto normal_return; 820 } else if (is_u_int(yytext)) { 821 yylval_was_set = TRUE; 822 if ('0' == yytext[0] && 823 'x' == tolower((unsigned long)yytext[1])) 824 converted = sscanf(&yytext[2], "%x", 825 &yylval.U_int); 826 else 827 converted = sscanf(yytext, "%u", 828 &yylval.U_int); 829 if (1 != converted) { 830 msyslog(LOG_ERR, 831 "U_int cannot be represented: %s", 832 yytext); 833 if (lex_from_file()) { 834 exit(1); 835 } else { 836 /* force end of parsing */ 837 yylval.Integer = 0; 838 return 0; 839 } 840 } 841 token = T_U_int; 842 goto normal_return; 843 } else if (is_double(yytext)) { 844 yylval_was_set = TRUE; 845 errno = 0; 846 if ((yylval.Double = atof(yytext)) == 0 && errno == ERANGE) { 847 msyslog(LOG_ERR, 848 "Double too large to represent: %s", 849 yytext); 850 exit(1); 851 } else { 852 token = T_Double; 853 goto normal_return; 854 } 855 } else { 856 /* Default: Everything is a string */ 857 yylval_was_set = TRUE; 858 token = create_string_token(yytext); 859 goto normal_return; 860 } 861 } 862 863 /* 864 * Either followedby is not FOLLBY_TOKEN or this lexeme is part 865 * of a string. Hence, we need to return T_String. 866 * 867 * _Except_ we might have a -4 or -6 flag on a an association 868 * configuration line (server, peer, pool, etc.). 869 * 870 * This is a terrible hack, but the grammar is ambiguous so we 871 * don't have a choice. [SK] 872 * 873 * The ambiguity is in the keyword scanner, not ntp_parser.y. 874 * We do not require server addresses be quoted in ntp.conf, 875 * complicating the scanner's job. To avoid trying (and 876 * failing) to match an IP address or DNS name to a keyword, 877 * the association keywords use FOLLBY_STRING in the keyword 878 * table, which tells the scanner to force the next token to be 879 * a T_String, so it does not try to match a keyword but rather 880 * expects a string when -4/-6 modifiers to server, peer, etc. 881 * are encountered. 882 * restrict -4 and restrict -6 parsing works correctly without 883 * this hack, as restrict uses FOLLBY_TOKEN. [DH] 884 */ 885 if ('-' == yytext[0]) { 886 if ('4' == yytext[1]) { 887 token = T_Ipv4_flag; 888 goto normal_return; 889 } else if ('6' == yytext[1]) { 890 token = T_Ipv6_flag; 891 goto normal_return; 892 } 893 } 894 895 if (FOLLBY_STRING == followedby) 896 followedby = FOLLBY_TOKEN; 897 898 yylval_was_set = TRUE; 899 token = create_string_token(yytext); 900 901 normal_return: 902 if (T_EOC == token) 903 DPRINTF(4,("\t<end of command>\n")); 904 else 905 DPRINTF(4, ("yylex: lexeme '%s' -> %s\n", yytext, 906 token_name(token))); 907 908 if (!yylval_was_set) 909 yylval.Integer = token; 910 911 return token; 912 913 lex_too_long: 914 yytext[min(sizeof(yytext) - 1, 50)] = 0; 915 msyslog(LOG_ERR, 916 "configuration item on line %d longer than limit of %lu, began with '%s'", 917 lex_stack->curpos.nline, (u_long)min(sizeof(yytext) - 1, 50), 918 yytext); 919 920 /* 921 * If we hit the length limit reading the startup configuration 922 * file, abort. 923 */ 924 if (lex_from_file()) 925 exit(sizeof(yytext) - 1); 926 927 /* 928 * If it's runtime configuration via ntpq :config treat it as 929 * if the configuration text ended before the too-long lexeme, 930 * hostname, or string. 931 */ 932 yylval.Integer = 0; 933 return 0; 934 } 935