1 2 /* ntp_scanner.c 3 * 4 * The source code for a simple lexical analyzer. 5 * 6 * Written By: Sachin Kamboj 7 * University of Delaware 8 * Newark, DE 19711 9 * Copyright (c) 2006 10 */ 11 12 #ifdef HAVE_CONFIG_H 13 # include <config.h> 14 #endif 15 16 #include <stdio.h> 17 #include <ctype.h> 18 #include <stdlib.h> 19 #include <errno.h> 20 #include <string.h> 21 22 #include "ntpd.h" 23 #include "ntp_config.h" 24 #include "ntpsim.h" 25 #include "ntp_scanner.h" 26 #include "ntp_parser.h" 27 28 /* ntp_keyword.h declares finite state machine and token text */ 29 #include "ntp_keyword.h" 30 31 32 33 /* SCANNER GLOBAL VARIABLES 34 * ------------------------ 35 */ 36 37 #define MAX_LEXEME 128 /* The maximum size of a lexeme */ 38 char yytext[MAX_LEXEME]; /* Buffer for storing the input text/lexeme */ 39 u_int32 conf_file_sum; /* Simple sum of characters read */ 40 41 static struct FILE_INFO * lex_stack = NULL; 42 43 44 45 /* CONSTANTS 46 * --------- 47 */ 48 49 50 /* SCANNER GLOBAL VARIABLES 51 * ------------------------ 52 */ 53 const char special_chars[] = "{}(),;|="; 54 55 56 /* FUNCTIONS 57 * --------- 58 */ 59 60 static int is_keyword(char *lexeme, follby *pfollowedby); 61 62 63 /* 64 * keyword() - Return the keyword associated with token T_ identifier. 65 * See also token_name() for the string-ized T_ identifier. 66 * Example: keyword(T_Server) returns "server" 67 * token_name(T_Server) returns "T_Server" 68 */ 69 const char * 70 keyword( 71 int token 72 ) 73 { 74 size_t i; 75 const char *text; 76 static char sbuf[64]; 77 78 i = token - LOWEST_KEYWORD_ID; 79 80 switch (token) { 81 case T_ServerresponseFuzz: 82 text = "serverresponse fuzz"; 83 break; 84 85 default: 86 if (i < COUNTOF(keyword_text)) { 87 text = keyword_text[i]; 88 } else { 89 snprintf(sbuf, sizeof sbuf, 90 "(keyword #%u not found)", token); 91 text = sbuf; 92 } 93 } 94 95 return text; 96 } 97 98 99 /* FILE & STRING BUFFER INTERFACE 100 * ------------------------------ 101 * 102 * This set out as a couple of wrapper functions around the standard C 103 * fgetc and ungetc functions in order to include positional 104 * bookkeeping. Alas, this is no longer a good solution with nested 105 * input files and the possibility to send configuration commands via 106 * 'ntpdc' and 'ntpq'. 107 * 108 * Now there are a few functions to maintain a stack of nested input 109 * sources (though nesting is only allowd for disk files) and from the 110 * scanner / parser point of view there's no difference between both 111 * types of sources. 112 * 113 * The 'fgetc()' / 'ungetc()' replacements now operate on a FILE_INFO 114 * structure. Instead of trying different 'ungetc()' strategies for file 115 * and buffer based parsing, we keep the backup char in our own 116 * FILE_INFO structure. This is sufficient, as the parser does *not* 117 * jump around via 'seek' or the like, and there's no need to 118 * check/clear the backup store in other places than 'lex_getch()'. 119 */ 120 121 /* 122 * Allocate an info structure and attach it to a file. 123 * 124 * Note: When 'mode' is NULL, then the INFO block will be set up to 125 * contain a NULL file pointer, as suited for remote config command 126 * parsing. Otherwise having a NULL file pointer is considered an error, 127 * and a NULL info block pointer is returned to indicate failure! 128 * 129 * Note: We use a variable-sized structure to hold a copy of the file 130 * name (or, more proper, the input source description). This is more 131 * secure than keeping a reference to some other storage that might go 132 * out of scope. 133 */ 134 static struct FILE_INFO * 135 lex_open( 136 const char *path, 137 const char *mode 138 ) 139 { 140 struct FILE_INFO *stream; 141 size_t nnambuf; 142 143 nnambuf = strlen(path); 144 stream = emalloc_zero(sizeof(*stream) + nnambuf); 145 stream->curpos.nline = 1; 146 stream->backch = EOF; 147 /* copy name with memcpy -- trailing NUL already there! */ 148 memcpy(stream->fname, path, nnambuf); 149 150 if (NULL != mode) { 151 stream->fpi = fopen(path, mode); 152 if (NULL == stream->fpi) { 153 free(stream); 154 stream = NULL; 155 } 156 } 157 return stream; 158 } 159 160 /* get next character from buffer or file. This will return any putback 161 * character first; it will also make sure the last line is at least 162 * virtually terminated with a '\n'. 163 */ 164 static int 165 lex_getch( 166 struct FILE_INFO *stream 167 ) 168 { 169 int ch; 170 171 if (NULL == stream || stream->force_eof) 172 return EOF; 173 174 if (EOF != stream->backch) { 175 ch = stream->backch; 176 stream->backch = EOF; 177 if (stream->fpi) 178 conf_file_sum += ch; 179 stream->curpos.ncol++; 180 } else if (stream->fpi) { 181 /* fetch next 7-bit ASCII char (or EOF) from file */ 182 while ((ch = fgetc(stream->fpi)) != EOF && ch > SCHAR_MAX) 183 stream->curpos.ncol++; 184 if (EOF != ch) { 185 conf_file_sum += ch; 186 stream->curpos.ncol++; 187 } 188 } else { 189 /* fetch next 7-bit ASCII char from buffer */ 190 const char * scan; 191 scan = &remote_config.buffer[remote_config.pos]; 192 while ((ch = (u_char)*scan) > SCHAR_MAX) { 193 scan++; 194 stream->curpos.ncol++; 195 } 196 if ('\0' != ch) { 197 scan++; 198 stream->curpos.ncol++; 199 } else { 200 ch = EOF; 201 } 202 remote_config.pos = (int)(scan - remote_config.buffer); 203 } 204 205 /* If the last line ends without '\n', generate one. This 206 * happens most likely on Windows, where editors often have a 207 * sloppy concept of a line. 208 */ 209 if (EOF == ch && stream->curpos.ncol != 0) 210 ch = '\n'; 211 212 /* update scan position tallies */ 213 if (ch == '\n') { 214 stream->bakpos = stream->curpos; 215 stream->curpos.nline++; 216 stream->curpos.ncol = 0; 217 } 218 219 return ch; 220 } 221 222 /* Note: lex_ungetch will fail to track more than one line of push 223 * back. But since it guarantees only one char of back storage anyway, 224 * this should not be a problem. 225 */ 226 static int 227 lex_ungetch( 228 int ch, 229 struct FILE_INFO *stream 230 ) 231 { 232 /* check preconditions */ 233 if (NULL == stream || stream->force_eof) 234 return EOF; 235 if (EOF != stream->backch || EOF == ch) 236 return EOF; 237 238 /* keep for later reference and update checksum */ 239 stream->backch = (u_char)ch; 240 if (stream->fpi) 241 conf_file_sum -= stream->backch; 242 243 /* update position */ 244 if (stream->backch == '\n') { 245 stream->curpos = stream->bakpos; 246 stream->bakpos.ncol = -1; 247 } 248 stream->curpos.ncol--; 249 return stream->backch; 250 } 251 252 /* dispose of an input structure. If the file pointer is not NULL, close 253 * the file. This function does not check the result of 'fclose()'. 254 */ 255 static void 256 lex_close( 257 struct FILE_INFO *stream 258 ) 259 { 260 if (NULL != stream) { 261 if (NULL != stream->fpi) 262 fclose(stream->fpi); 263 free(stream); 264 } 265 } 266 267 /* INPUT STACK 268 * ----------- 269 * 270 * Nested input sources are a bit tricky at first glance. We deal with 271 * this problem using a stack of input sources, that is, a forward 272 * linked list of FILE_INFO structs. 273 * 274 * This stack is never empty during parsing; while an encounter with EOF 275 * can and will remove nested input sources, removing the last element 276 * in the stack will not work during parsing, and the EOF condition of 277 * the outermost input file remains until the parser folds up. 278 */ 279 280 static struct FILE_INFO * 281 drop_stack_do( 282 struct FILE_INFO * head 283 ) 284 { 285 struct FILE_INFO * tail; 286 while (NULL != head) { 287 tail = head->st_next; 288 lex_close(head); 289 head = tail; 290 } 291 return head; 292 } 293 294 295 296 /* Create a singleton input source on an empty lexer stack. This will 297 * fail if there is already an input source, or if the underlying disk 298 * file cannot be opened. 299 * 300 * Returns TRUE if a new input object was successfully created. 301 */ 302 int/*BOOL*/ 303 lex_init_stack( 304 const char * path, 305 const char * mode 306 ) 307 { 308 if (NULL != lex_stack || NULL == path) 309 return FALSE; 310 311 lex_stack = lex_open(path, mode); 312 return (NULL != lex_stack); 313 } 314 315 /* This removes *all* input sources from the stack, leaving the head 316 * pointer as NULL. Any attempt to parse in that state is likely to bomb 317 * with segmentation faults or the like. 318 * 319 * In other words: Use this to clean up after parsing, and do not parse 320 * anything until the next 'lex_init_stack()' succeeded. 321 */ 322 void 323 lex_drop_stack(void) 324 { 325 lex_stack = drop_stack_do(lex_stack); 326 } 327 328 /* Flush the lexer input stack: This will nip all input objects on the 329 * stack (but keeps the current top-of-stack) and marks the top-of-stack 330 * as inactive. Any further calls to lex_getch yield only EOF, and it's 331 * no longer possible to push something back. 332 * 333 * Returns TRUE if there is a head element (top-of-stack) that was not 334 * in the force-eof mode before this call. 335 */ 336 int/*BOOL*/ 337 lex_flush_stack(void) 338 { 339 int retv = FALSE; 340 341 if (NULL != lex_stack) { 342 retv = !lex_stack->force_eof; 343 lex_stack->force_eof = TRUE; 344 lex_stack->st_next = drop_stack_do( 345 lex_stack->st_next); 346 } 347 return retv; 348 } 349 350 /* Push another file on the parsing stack. If the mode is NULL, create a 351 * FILE_INFO suitable for in-memory parsing; otherwise, create a 352 * FILE_INFO that is bound to a local/disc file. Note that 'path' must 353 * not be NULL, or the function will fail. 354 * 355 * Returns TRUE if a new info record was pushed onto the stack. 356 */ 357 int/*BOOL*/ lex_push_file( 358 const char * path, 359 const char * mode 360 ) 361 { 362 struct FILE_INFO * next = NULL; 363 364 if (NULL != path) { 365 next = lex_open(path, mode); 366 if (NULL != next) { 367 next->st_next = lex_stack; 368 lex_stack = next; 369 } 370 } 371 return (NULL != next); 372 } 373 374 /* Pop, close & free the top of the include stack, unless the stack 375 * contains only a singleton input object. In that case the function 376 * fails, because the parser does not expect the input stack to be 377 * empty. 378 * 379 * Returns TRUE if an object was successfuly popped from the stack. 380 */ 381 int/*BOOL*/ 382 lex_pop_file(void) 383 { 384 struct FILE_INFO * head = lex_stack; 385 struct FILE_INFO * tail = NULL; 386 387 if (NULL != head) { 388 tail = head->st_next; 389 if (NULL != tail) { 390 lex_stack = tail; 391 lex_close(head); 392 } 393 } 394 return (NULL != tail); 395 } 396 397 /* Get include nesting level. This currently loops over the stack and 398 * counts elements; but since this is of concern only with an include 399 * statement and the nesting depth has a small limit, there's no 400 * bottleneck expected here. 401 * 402 * Returns the nesting level of includes, that is, the current depth of 403 * the lexer input stack. 404 * 405 * Note: 406 */ 407 size_t 408 lex_level(void) 409 { 410 size_t cnt = 0; 411 struct FILE_INFO *ipf = lex_stack; 412 413 while (NULL != ipf) { 414 cnt++; 415 ipf = ipf->st_next; 416 } 417 return cnt; 418 } 419 420 /* check if the current input is from a file */ 421 int/*BOOL*/ 422 lex_from_file(void) 423 { 424 return (NULL != lex_stack) && (NULL != lex_stack->fpi); 425 } 426 427 struct FILE_INFO * 428 lex_current(void) 429 { 430 /* this became so simple, it could be a macro. But then, 431 * lex_stack needed to be global... 432 */ 433 return lex_stack; 434 } 435 436 437 /* STATE MACHINES 438 * -------------- 439 */ 440 441 /* Keywords */ 442 static int 443 is_keyword( 444 char *lexeme, 445 follby *pfollowedby 446 ) 447 { 448 follby fb; 449 int curr_s; /* current state index */ 450 int token; 451 int i; 452 453 curr_s = SCANNER_INIT_S; 454 token = 0; 455 456 for (i = 0; lexeme[i]; i++) { 457 while (curr_s && (lexeme[i] != SS_CH(sst[curr_s]))) 458 curr_s = SS_OTHER_N(sst[curr_s]); 459 460 if (curr_s && (lexeme[i] == SS_CH(sst[curr_s]))) { 461 if ('\0' == lexeme[i + 1] 462 && FOLLBY_NON_ACCEPTING 463 != SS_FB(sst[curr_s])) { 464 fb = SS_FB(sst[curr_s]); 465 *pfollowedby = fb; 466 token = curr_s; 467 break; 468 } 469 curr_s = SS_MATCH_N(sst[curr_s]); 470 } else 471 break; 472 } 473 474 return token; 475 } 476 477 478 /* Integer */ 479 static int 480 is_integer( 481 char *lexeme 482 ) 483 { 484 int i; 485 int is_neg; 486 u_int u_val; 487 488 i = 0; 489 490 /* Allow a leading minus sign */ 491 if (lexeme[i] == '-') { 492 i++; 493 is_neg = TRUE; 494 } else { 495 is_neg = FALSE; 496 } 497 498 /* Check that all the remaining characters are digits */ 499 for (; lexeme[i] != '\0'; i++) { 500 if (!isdigit((u_char)lexeme[i])) 501 return FALSE; 502 } 503 504 if (is_neg) 505 return TRUE; 506 507 /* Reject numbers that fit in unsigned but not in signed int */ 508 if (1 == sscanf(lexeme, "%u", &u_val)) 509 return (u_val <= INT_MAX); 510 else 511 return FALSE; 512 } 513 514 515 /* U_int -- assumes is_integer() has returned FALSE */ 516 static int 517 is_u_int( 518 char *lexeme 519 ) 520 { 521 int i; 522 int is_hex; 523 524 i = 0; 525 if ('0' == lexeme[i] && 'x' == tolower((u_char)lexeme[i + 1])) { 526 i += 2; 527 is_hex = TRUE; 528 } else { 529 is_hex = FALSE; 530 } 531 532 /* Check that all the remaining characters are digits */ 533 for (; lexeme[i] != '\0'; i++) { 534 if (is_hex && !isxdigit((u_char)lexeme[i])) 535 return FALSE; 536 if (!is_hex && !isdigit((u_char)lexeme[i])) 537 return FALSE; 538 } 539 540 return TRUE; 541 } 542 543 544 /* Double */ 545 static int 546 is_double( 547 char *lexeme 548 ) 549 { 550 u_int num_digits = 0; /* Number of digits read */ 551 u_int i; 552 553 i = 0; 554 555 /* Check for an optional '+' or '-' */ 556 if ('+' == lexeme[i] || '-' == lexeme[i]) 557 i++; 558 559 /* Read the integer part */ 560 for (; lexeme[i] && isdigit((u_char)lexeme[i]); i++) 561 num_digits++; 562 563 /* Check for the optional decimal point */ 564 if ('.' == lexeme[i]) { 565 i++; 566 /* Check for any digits after the decimal point */ 567 for (; lexeme[i] && isdigit((u_char)lexeme[i]); i++) 568 num_digits++; 569 } 570 571 /* 572 * The number of digits in both the decimal part and the 573 * fraction part must not be zero at this point 574 */ 575 if (!num_digits) 576 return 0; 577 578 /* Check if we are done */ 579 if (!lexeme[i]) 580 return 1; 581 582 /* There is still more input, read the exponent */ 583 if ('e' == tolower((u_char)lexeme[i])) 584 i++; 585 else 586 return 0; 587 588 /* Read an optional Sign */ 589 if ('+' == lexeme[i] || '-' == lexeme[i]) 590 i++; 591 592 /* Now read the exponent part */ 593 while (lexeme[i] && isdigit((u_char)lexeme[i])) 594 i++; 595 596 /* Check if we are done */ 597 if (!lexeme[i]) 598 return 1; 599 else 600 return 0; 601 } 602 603 604 /* is_special() - Test whether a character is a token */ 605 static inline int 606 is_special( 607 int ch 608 ) 609 { 610 return strchr(special_chars, ch) != NULL; 611 } 612 613 614 static int 615 is_EOC( 616 int ch 617 ) 618 { 619 if ((old_config_style && (ch == '\n')) || 620 (!old_config_style && (ch == ';'))) 621 return 1; 622 return 0; 623 } 624 625 626 char * 627 quote_if_needed(char *str) 628 { 629 char *ret; 630 size_t len; 631 size_t octets; 632 633 len = strlen(str); 634 octets = len + 2 + 1; 635 ret = emalloc(octets); 636 if ('"' != str[0] 637 && (strcspn(str, special_chars) < len 638 || strchr(str, ' ') != NULL)) { 639 snprintf(ret, octets, "\"%s\"", str); 640 } else 641 strlcpy(ret, str, octets); 642 643 return ret; 644 } 645 646 647 static int 648 create_string_token( 649 char *lexeme 650 ) 651 { 652 char *pch; 653 654 /* 655 * ignore end of line whitespace 656 */ 657 pch = lexeme; 658 while (*pch && isspace((u_char)*pch)) 659 pch++; 660 661 if (!*pch) { 662 yylval.Integer = T_EOC; 663 return yylval.Integer; 664 } 665 666 yylval.String = estrdup(lexeme); 667 return T_String; 668 } 669 670 671 /* 672 * yylex() - function that does the actual scanning. 673 * Bison expects this function to be called yylex and for it to take no 674 * input and return an int. 675 * Conceptually yylex "returns" yylval as well as the actual return 676 * value representing the token or type. 677 */ 678 int 679 yylex(void) 680 { 681 static follby followedby = FOLLBY_TOKEN; 682 size_t i; 683 int instring; 684 int yylval_was_set; 685 int converted; 686 int token; /* The return value */ 687 int ch; 688 689 instring = FALSE; 690 yylval_was_set = FALSE; 691 692 do { 693 /* Ignore whitespace at the beginning */ 694 while (EOF != (ch = lex_getch(lex_stack)) && 695 isspace(ch) && 696 !is_EOC(ch)) 697 698 ; /* Null Statement */ 699 700 if (EOF == ch) { 701 702 if ( ! lex_pop_file()) 703 return 0; 704 token = T_EOC; 705 goto normal_return; 706 707 } else if (is_EOC(ch)) { 708 709 /* end FOLLBY_STRINGS_TO_EOC effect */ 710 followedby = FOLLBY_TOKEN; 711 token = T_EOC; 712 goto normal_return; 713 714 } else if (is_special(ch) && FOLLBY_TOKEN == followedby) { 715 /* special chars are their own token values */ 716 token = ch; 717 /* 718 * '=' outside simulator configuration implies 719 * a single string following as in: 720 * setvar Owner = "The Boss" default 721 */ 722 if ('=' == ch && old_config_style) 723 followedby = FOLLBY_STRING; 724 yytext[0] = (char)ch; 725 yytext[1] = '\0'; 726 goto normal_return; 727 } else 728 lex_ungetch(ch, lex_stack); 729 730 /* save the position of start of the token */ 731 lex_stack->tokpos = lex_stack->curpos; 732 733 /* Read in the lexeme */ 734 i = 0; 735 while (EOF != (ch = lex_getch(lex_stack))) { 736 737 yytext[i] = (char)ch; 738 739 /* Break on whitespace or a special character */ 740 if (isspace(ch) || is_EOC(ch) 741 || '"' == ch 742 || (FOLLBY_TOKEN == followedby 743 && is_special(ch))) 744 break; 745 746 /* Read the rest of the line on reading a start 747 of comment character */ 748 if ('#' == ch) { 749 while (EOF != (ch = lex_getch(lex_stack)) 750 && '\n' != ch) 751 ; /* Null Statement */ 752 break; 753 } 754 755 i++; 756 if (i >= COUNTOF(yytext)) 757 goto lex_too_long; 758 } 759 /* Pick up all of the string inside between " marks, to 760 * end of line. If we make it to EOL without a 761 * terminating " assume it for them. 762 * 763 * XXX - HMS: I'm not sure we want to assume the closing " 764 */ 765 if ('"' == ch) { 766 instring = TRUE; 767 while (EOF != (ch = lex_getch(lex_stack)) && 768 ch != '"' && ch != '\n') { 769 yytext[i++] = (char)ch; 770 if (i >= COUNTOF(yytext)) 771 goto lex_too_long; 772 } 773 /* 774 * yytext[i] will be pushed back as not part of 775 * this lexeme, but any closing quote should 776 * not be pushed back, so we read another char. 777 */ 778 if ('"' == ch) 779 ch = lex_getch(lex_stack); 780 } 781 /* Pushback the last character read that is not a part 782 * of this lexeme. This fails silently if ch is EOF, 783 * but then the EOF condition persists and is handled on 784 * the next turn by the include stack mechanism. 785 */ 786 lex_ungetch(ch, lex_stack); 787 788 yytext[i] = '\0'; 789 } while (i == 0); 790 791 /* Now return the desired token */ 792 793 /* First make sure that the parser is *not* expecting a string 794 * as the next token (based on the previous token that was 795 * returned) and that we haven't read a string. 796 */ 797 798 if (followedby == FOLLBY_TOKEN && !instring) { 799 token = is_keyword(yytext, &followedby); 800 if (token) { 801 /* 802 * T_Server is exceptional as it forces the 803 * following token to be a string in the 804 * non-simulator parts of the configuration, 805 * but in the simulator configuration section, 806 * "server" is followed by "=" which must be 807 * recognized as a token not a string. 808 */ 809 if (T_Server == token && !old_config_style) 810 followedby = FOLLBY_TOKEN; 811 goto normal_return; 812 } else if (is_integer(yytext)) { 813 yylval_was_set = TRUE; 814 errno = 0; 815 if ((yylval.Integer = strtol(yytext, NULL, 10)) == 0 816 && ((errno == EINVAL) || (errno == ERANGE))) { 817 msyslog(LOG_ERR, 818 "Integer cannot be represented: %s", 819 yytext); 820 if (lex_from_file()) { 821 exit(1); 822 } else { 823 /* force end of parsing */ 824 yylval.Integer = 0; 825 return 0; 826 } 827 } 828 token = T_Integer; 829 goto normal_return; 830 } else if (is_u_int(yytext)) { 831 yylval_was_set = TRUE; 832 if ('0' == yytext[0] && 833 'x' == tolower((unsigned long)yytext[1])) 834 converted = sscanf(&yytext[2], "%x", 835 &yylval.U_int); 836 else 837 converted = sscanf(yytext, "%u", 838 &yylval.U_int); 839 if (1 != converted) { 840 msyslog(LOG_ERR, 841 "U_int cannot be represented: %s", 842 yytext); 843 if (lex_from_file()) { 844 exit(1); 845 } else { 846 /* force end of parsing */ 847 yylval.Integer = 0; 848 return 0; 849 } 850 } 851 token = T_U_int; 852 goto normal_return; 853 } else if (is_double(yytext)) { 854 yylval_was_set = TRUE; 855 errno = 0; 856 if ((yylval.Double = atof(yytext)) == 0 && errno == ERANGE) { 857 msyslog(LOG_ERR, 858 "Double too large to represent: %s", 859 yytext); 860 exit(1); 861 } else { 862 token = T_Double; 863 goto normal_return; 864 } 865 } else { 866 /* Default: Everything is a string */ 867 yylval_was_set = TRUE; 868 token = create_string_token(yytext); 869 goto normal_return; 870 } 871 } 872 873 /* 874 * Either followedby is not FOLLBY_TOKEN or this lexeme is part 875 * of a string. Hence, we need to return T_String. 876 * 877 * _Except_ we might have a -4 or -6 flag on a an association 878 * configuration line (server, peer, pool, etc.). 879 * 880 * This is a terrible hack, but the grammar is ambiguous so we 881 * don't have a choice. [SK] 882 * 883 * The ambiguity is in the keyword scanner, not ntp_parser.y. 884 * We do not require server addresses be quoted in ntp.conf, 885 * complicating the scanner's job. To avoid trying (and 886 * failing) to match an IP address or DNS name to a keyword, 887 * the association keywords use FOLLBY_STRING in the keyword 888 * table, which tells the scanner to force the next token to be 889 * a T_String, so it does not try to match a keyword but rather 890 * expects a string when -4/-6 modifiers to server, peer, etc. 891 * are encountered. 892 * restrict -4 and restrict -6 parsing works correctly without 893 * this hack, as restrict uses FOLLBY_TOKEN. [DH] 894 */ 895 if ('-' == yytext[0]) { 896 if ('4' == yytext[1]) { 897 token = T_Ipv4_flag; 898 goto normal_return; 899 } else if ('6' == yytext[1]) { 900 token = T_Ipv6_flag; 901 goto normal_return; 902 } 903 } 904 905 if (FOLLBY_STRING == followedby) 906 followedby = FOLLBY_TOKEN; 907 908 yylval_was_set = TRUE; 909 token = create_string_token(yytext); 910 911 normal_return: 912 if (T_EOC == token) 913 DPRINTF(10, ("\t<end of command>\n")); 914 else 915 DPRINTF(10, ("yylex: lexeme '%s' -> %s\n", yytext, 916 token_name(token))); 917 918 if (!yylval_was_set) 919 yylval.Integer = token; 920 921 return token; 922 923 lex_too_long: 924 /* 925 * DLH: What is the purpose of the limit of 50? 926 * Is there any reason for yytext[] to be bigger? 927 */ 928 yytext[min(sizeof(yytext) - 1, 50)] = 0; 929 msyslog(LOG_ERR, 930 "configuration item on line %d longer than limit of %lu, began with '%s'", 931 lex_stack->curpos.nline, (u_long)min(sizeof(yytext) - 1, 50), 932 yytext); 933 934 /* 935 * If we hit the length limit reading the startup configuration 936 * file, abort. 937 */ 938 if (lex_from_file()) 939 exit(sizeof(yytext) - 1); 940 941 /* 942 * If it's runtime configuration via ntpq :config treat it as 943 * if the configuration text ended before the too-long lexeme, 944 * hostname, or string. 945 */ 946 yylval.Integer = 0; 947 return 0; 948 } 949