1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 * 26 * esclex.c -- lexer for esc 27 * 28 * this module provides lexical analysis and error handling routine 29 * expected by the yacc-generated parser (i.e. yylex() and yyerror()). 30 * it also does lots of tracking of things like filenames, line numbers, 31 * and what tokens are seen on a line up to the point where a syntax error 32 * was found. this module also arranges for the input source files to 33 * be run through cpp. 34 */ 35 36 #pragma ident "%Z%%M% %I% %E% SMI" 37 38 #include <stdio.h> 39 #include <ctype.h> 40 #include <string.h> 41 #include <stdlib.h> 42 #include <unistd.h> 43 #include <time.h> 44 #include <errno.h> 45 #include "out.h" 46 #include "alloc.h" 47 #include "stats.h" 48 #include "stable.h" 49 #include "lut.h" 50 #include "literals.h" 51 #include "tree.h" 52 #include "esclex.h" 53 #include "eftread.h" 54 #include "check.h" 55 #include "y.tab.h" 56 57 /* ridiculously long token buffer -- disallow any token longer than this */ 58 #define MAXTOK 8192 59 static char Tok[MAXTOK]; 60 61 /* some misc stats we keep on the lexer & parser */ 62 static struct stats *Tokcount; 63 static struct stats *Lexelapse; 64 struct stats *Filecount; 65 struct filestats { 66 struct filestats *next; 67 struct stats *stats; 68 } *Fstats; 69 70 static int Errcount; 71 72 /* input file state */ 73 static char **Files; 74 static const char *Fileopened; 75 static FILE *Fp; 76 static int Line; 77 static const char *File; 78 static const char *Cpp = "/usr/bin/cpp"; 79 #ifdef ESC 80 static const char *Cppargs; 81 static const char *Cppstdargs = "-undef -Y."; 82 #endif /* ESC */ 83 84 /* for debugging */ 85 static int Lexecho; /* echo tokens as we read them */ 86 87 /* forward declarations of our internal routines */ 88 static int record(int tok, const char *s); 89 static void dumpline(int flags); 90 static void doident(); 91 static void dopragma(const char *tok); 92 93 /* 94 * table of reserved words. this table is only used by lex_init() 95 * to intialize the Rwords lookup table. 96 */ 97 static const struct { 98 const char *word; 99 const int val; 100 } Rwords[] = { 101 { "asru", ASRU }, 102 { "div", DIV }, 103 { "engine", ENGINE }, 104 { "event", EVENT }, 105 { "fru", FRU }, 106 { "if", IF }, 107 { "mask", MASK }, 108 { "prop", PROP }, 109 { "config", CONFIG }, 110 /* 111 * PATHFUNC indicates functions that operate only on paths 112 * and quotes 113 */ 114 { "is_connected", PATHFUNC }, 115 { "is_under", PATHFUNC }, 116 }; 117 118 /* 119 * Rwordslut is a lookup table of reserved words. lhs is the word 120 * (in the string table) and the rhs is the token value returned 121 * by the yylex() for that word. 122 */ 123 static struct lut *Rwordslut; 124 125 static const struct { 126 const char *suffix; 127 const unsigned long long nsec; 128 } Timesuffix[] = { 129 { "nanosecond", 1ULL }, 130 { "nanoseconds", 1ULL }, 131 { "nsec", 1ULL }, 132 { "nsecs", 1ULL }, 133 { "ns", 1ULL }, 134 { "microsecond", 1000ULL }, 135 { "microseconds", 1000ULL }, 136 { "usec", 1000ULL }, 137 { "usecs", 1000ULL }, 138 { "us", 1000ULL }, 139 { "millisecond", 1000000ULL }, 140 { "milliseconds", 1000000ULL }, 141 { "msec", 1000000ULL }, 142 { "msecs", 1000000ULL }, 143 { "ms", 1000000ULL }, 144 { "second", 1000000000ULL }, 145 { "seconds", 1000000000ULL }, 146 { "s", 1000000000ULL }, 147 { "minute", 1000000000ULL * 60 }, 148 { "minutes", 1000000000ULL * 60 }, 149 { "min", 1000000000ULL * 60 }, 150 { "mins", 1000000000ULL * 60 }, 151 { "m", 1000000000ULL * 60 }, 152 { "hour", 1000000000ULL * 60 * 60 }, 153 { "hours", 1000000000ULL * 60 * 60 }, 154 { "hr", 1000000000ULL * 60 * 60 }, 155 { "hrs", 1000000000ULL * 60 * 60 }, 156 { "h", 1000000000ULL * 60 * 60 }, 157 { "day", 1000000000ULL * 60 * 60 * 24 }, 158 { "days", 1000000000ULL * 60 * 60 * 24 }, 159 { "d", 1000000000ULL * 60 * 60 * 24 }, 160 { "week", 1000000000ULL * 60 * 60 * 24 * 7 }, 161 { "weeks", 1000000000ULL * 60 * 60 * 24 * 7 }, 162 { "wk", 1000000000ULL * 60 * 60 * 24 * 7 }, 163 { "wks", 1000000000ULL * 60 * 60 * 24 * 7 }, 164 { "month", 1000000000ULL * 60 * 60 * 24 * 30 }, 165 { "months", 1000000000ULL * 60 * 60 * 24 * 30 }, 166 { "year", 1000000000ULL * 60 * 60 * 24 * 365 }, 167 { "years", 1000000000ULL * 60 * 60 * 24 * 365 }, 168 { "yr", 1000000000ULL * 60 * 60 * 24 * 365 }, 169 { "yrs", 1000000000ULL * 60 * 60 * 24 * 365 }, 170 }; 171 172 /* 173 * some wrappers around the general lut functions to provide type checking... 174 */ 175 176 static struct lut * 177 lex_s2i_lut_add(struct lut *root, const char *s, int i) 178 { 179 return (lut_add(root, (void *)s, (void *)i, NULL)); 180 } 181 182 static int 183 lex_s2i_lut_lookup(struct lut *root, const char *s) 184 { 185 return ((int)lut_lookup(root, (void *)s, NULL)); 186 } 187 188 static struct lut * 189 lex_s2ullp_lut_add(struct lut *root, const char *s, 190 const unsigned long long *ullp) 191 { 192 return (lut_add(root, (void *)s, (void *)ullp, NULL)); 193 } 194 195 const unsigned long long * 196 lex_s2ullp_lut_lookup(struct lut *root, const char *s) 197 { 198 return ((unsigned long long *)lut_lookup(root, (void *)s, NULL)); 199 } 200 201 /* 202 * lex_init -- initialize the lexer with appropriate filenames & debug flags 203 */ 204 205 /*ARGSUSED*/ 206 void 207 lex_init(char **av, const char *cppargs, int lexecho) 208 { 209 int i; 210 #ifdef ESC 211 const char *ptr; 212 #endif /* ESC */ 213 214 Lexecho = lexecho; 215 Tokcount = stats_new_counter("lex.tokens", "total tokens in", 1); 216 Filecount = stats_new_counter("lex.files", "total files read", 0); 217 Lexelapse = stats_new_elapse("lex.time", "elapsed lex/parse time", 1); 218 219 #ifdef ESC 220 Cppargs = cppargs; 221 222 /* allow user to tell us where cpp is if it is some weird place */ 223 if (ptr = getenv("_ESC_CPP")) 224 Cpp = ptr; 225 226 /* and in case it takes some special stdargs */ 227 if (ptr = getenv("_ESC_CPP_STDARGS")) 228 Cppstdargs = ptr; 229 230 /* verify we can find cpp */ 231 if (access(Cpp, X_OK) < 0) { 232 Cpp = "/usr/lib/cpp"; 233 if (access(Cpp, X_OK) < 0) 234 out(O_DIE, "can't locate cpp"); 235 } 236 #endif /* ESC */ 237 238 Files = av; 239 240 /* verify we can find all the input files */ 241 while (*av) { 242 if (strlen(*av) >= MAXTOK - strlen(Cpp) - 3) 243 out(O_DIE, "filename too long: %.100s...", *av); 244 if (access(*av, R_OK) < 0) 245 out(O_DIE|O_SYS, "%s", *av); 246 av++; 247 stats_counter_bump(Filecount); 248 } 249 250 /* put reserved words into the string table & a lookup table */ 251 for (i = 0; i < sizeof (Rwords) / sizeof (*Rwords); i++) 252 Rwordslut = lex_s2i_lut_add(Rwordslut, 253 stable(Rwords[i].word), Rwords[i].val); 254 255 /* initialize table of timeval suffixes */ 256 for (i = 0; i < sizeof (Timesuffix) / sizeof (*Timesuffix); i++) { 257 Timesuffixlut = lex_s2ullp_lut_add(Timesuffixlut, 258 stable(Timesuffix[i].suffix), &Timesuffix[i].nsec); 259 } 260 261 /* record start time */ 262 stats_elapse_start(Lexelapse); 263 } 264 265 void 266 closefile(void) 267 { 268 if (Fp != NULL) { 269 #ifdef ESC 270 if (pclose(Fp) > 0) 271 out(O_DIE, "cpp errors while reading \"%s\", " 272 "bailing out.", Fileopened); 273 #else 274 (void) fclose(Fp); 275 #endif /* ESC */ 276 } 277 Fp = NULL; 278 } 279 280 /* 281 * yylex -- the lexer, called yylex() because that's what yacc wants 282 */ 283 284 int 285 yylex() 286 { 287 int c; 288 int nextc; 289 char *ptr = Tok; 290 char *eptr = &Tok[MAXTOK]; 291 const char *cptr; 292 int startline; 293 int val; 294 static int bol = 1; /* true if we're at beginning of line */ 295 296 for (;;) { 297 while (Fp == NULL) { 298 if (*Files == NULL) 299 return (record(EOF, NULL)); 300 Fileopened = stable(*Files++); 301 #ifdef ESC 302 sprintf(Tok, "%s %s %s %s", 303 Cpp, Cppstdargs, Cppargs, Fileopened); 304 if ((Fp = popen(Tok, "r")) == NULL) 305 out(O_DIE|O_SYS, "%s", Tok); 306 #else 307 Fp = eftread_fopen(Fileopened); 308 #endif /* ESC */ 309 Line = 1; 310 bol = 1; 311 312 /* add name to stats for visibility */ 313 if (Fp != NULL) { 314 static int fnum; 315 char nbuf[100]; 316 struct filestats *nfs = MALLOC(sizeof (*nfs)); 317 (void) sprintf(nbuf, "lex.file%d", fnum++); 318 nfs->stats = stats_new_string(nbuf, "", 0); 319 stats_string_set(nfs->stats, Fileopened); 320 nfs->next = Fstats; 321 Fstats = nfs; 322 } 323 } 324 325 switch (c = getc(Fp)) { 326 case '#': 327 /* enforce that we're at beginning of line */ 328 if (!bol) 329 return (record(c, NULL)); 330 331 while ((c = getc(Fp)) != EOF && 332 (c == ' ' || c == '\t')) 333 ; 334 if (!isdigit(c)) { 335 /* 336 * three cases here: 337 * #pragma 338 * #ident 339 * #something-we-don't-understand 340 * anything we don't expect we just ignore. 341 */ 342 *ptr++ = c; 343 while ((c = getc(Fp)) != EOF && isalnum(c)) 344 if (ptr < eptr - 1) 345 *ptr++ = c; 346 *ptr++ = '\0'; 347 if (strcmp(Tok, "pragma") == 0) { 348 /* skip white space */ 349 while ((c = getc(Fp)) != EOF && 350 (c == ' ' || c == '\t')) 351 ; 352 353 if (c == EOF || c == '\n') 354 outfl(O_DIE, File, Line, 355 "bad #pragma"); 356 357 /* pull in next token */ 358 ptr = Tok; 359 *ptr++ = c; 360 while ((c = getc(Fp)) != EOF && 361 !isspace(c)) 362 if (ptr < eptr - 1) 363 *ptr++ = c; 364 *ptr++ = '\0'; 365 (void) ungetc(c, Fp); 366 367 dopragma(Tok); 368 } else if (strcmp(Tok, "ident") == 0) 369 doident(); 370 } else { 371 /* handle file & line info from cpp */ 372 Line = 0; 373 do { 374 if (!isdigit(c)) 375 break; 376 Line = Line * 10 + c - '0'; 377 } while ((c = getc(Fp)) != EOF); 378 Line--; /* newline will increment it */ 379 while (c != EOF && isspace(c)) 380 c = getc(Fp); 381 if (c != '"') 382 outfl(O_DIE, File, Line, 383 "bad # statement (file name)"); 384 while ((c = getc(Fp)) != EOF && c != '"') 385 if (ptr < eptr - 1) 386 *ptr++ = c; 387 *ptr++ = '\0'; 388 if (c != '"') 389 outfl(O_DIE, File, Line, 390 "bad # statement (quotes)"); 391 File = stable(Tok); 392 } 393 /* skip the rest of the cpp line */ 394 while ((c = getc(Fp)) != EOF && c != '\n' && c != '\r') 395 ; 396 if (c == EOF) 397 return (record(c, NULL)); 398 else 399 (void) ungetc(c, Fp); 400 ptr = Tok; 401 break; 402 403 case EOF: 404 closefile(); 405 continue; 406 407 case '\n': 408 Line++; 409 bol = 1; 410 break; 411 412 case '\r': 413 case ' ': 414 case '\t': 415 bol = 0; 416 break; 417 418 case '/': 419 bol = 0; 420 /* comment handling */ 421 if ((nextc = getc(Fp)) == EOF) 422 outfl(O_DIE, File, Line, "unexpected EOF"); 423 else if (nextc == '*') { 424 startline = Line; 425 while ((c = getc(Fp)) != EOF) { 426 if (c == '\n') 427 Line++; 428 else if (c == '*' && 429 (((c = getc(Fp)) == EOF) || 430 (c == '/'))) 431 break; 432 } 433 if (c == EOF) { 434 outfl(O_DIE, File, Line, 435 "end of comment not seen " 436 "(started on line %d)", 437 startline); 438 } 439 } else { 440 /* wasn't a comment, return the '/' token */ 441 (void) ungetc(nextc, Fp); 442 return (record(c, NULL)); 443 } 444 break; 445 446 case '"': { 447 int prevc; 448 449 bol = 0; 450 prevc = '\0'; 451 /* quoted string handling */ 452 startline = Line; 453 for (;;) { 454 c = getc(Fp); 455 if (c == EOF) 456 outfl(O_DIE, File, Line, 457 "end of string not seen " 458 "(started on line %d)", 459 startline); 460 else if (c == '\n') 461 Line++; 462 else if (c == '"' && prevc != '\\') 463 break; 464 else if (ptr < eptr) 465 *ptr++ = c; 466 prevc = c; 467 } 468 if (ptr >= eptr) 469 out(O_DIE, File, Line, "string too long"); 470 *ptr++ = '\0'; 471 return (record(QUOTE, stable(Tok))); 472 } 473 case '&': 474 bol = 0; 475 /* && */ 476 if ((nextc = getc(Fp)) == '&') 477 return (record(AND, NULL)); 478 else { 479 (void) ungetc(nextc, Fp); 480 return (record(c, NULL)); 481 } 482 /*NOTREACHED*/ 483 break; 484 485 case '|': 486 bol = 0; 487 /* || */ 488 if ((nextc = getc(Fp)) == '|') 489 return (record(OR, NULL)); 490 else { 491 (void) ungetc(nextc, Fp); 492 return (record(c, NULL)); 493 } 494 /*NOTREACHED*/ 495 break; 496 497 case '!': 498 bol = 0; 499 /* ! or != */ 500 if ((nextc = getc(Fp)) == '=') 501 return (record(NE, NULL)); 502 else { 503 (void) ungetc(nextc, Fp); 504 return (record(c, NULL)); 505 } 506 /*NOTREACHED*/ 507 break; 508 509 case '=': 510 bol = 0; 511 /* == */ 512 if ((nextc = getc(Fp)) == '=') 513 return (record(EQ, NULL)); 514 else { 515 (void) ungetc(nextc, Fp); 516 return (record(c, NULL)); 517 } 518 /*NOTREACHED*/ 519 break; 520 521 case '-': 522 bol = 0; 523 /* -> */ 524 if ((nextc = getc(Fp)) == '>') 525 return (record(ARROW, stable(Tok))); 526 else { 527 (void) ungetc(nextc, Fp); 528 return (record(c, NULL)); 529 } 530 /*NOTREACHED*/ 531 break; 532 533 case '<': 534 bol = 0; 535 if ((nextc = getc(Fp)) == '=') 536 /* <= */ 537 return (record(LE, NULL)); 538 else if (nextc == '<') 539 /* << */ 540 return (record(LSHIFT, NULL)); 541 else { 542 (void) ungetc(nextc, Fp); 543 return (record(c, NULL)); 544 } 545 /*NOTREACHED*/ 546 break; 547 548 case '>': 549 bol = 0; 550 if ((nextc = getc(Fp)) == '=') 551 /* >= */ 552 return (record(GE, NULL)); 553 else if (nextc == '>') 554 /* >> */ 555 return (record(RSHIFT, NULL)); 556 else { 557 (void) ungetc(nextc, Fp); 558 return (record(c, NULL)); 559 } 560 /*NOTREACHED*/ 561 break; 562 563 default: 564 bol = 0; 565 if (isdigit(c)) { 566 int base; 567 568 /* collect rest of number */ 569 if (c == '0') { 570 *ptr++ = c; 571 if ((c = getc(Fp)) == EOF) { 572 *ptr++ = '\0'; 573 return (record(NUMBER, 574 stable(Tok))); 575 } else if (c == 'x' || c == 'X') { 576 *ptr++ = c; 577 base = 16; 578 } else { 579 (void) ungetc(c, Fp); 580 base = 8; 581 } 582 } else { 583 *ptr++ = c; 584 base = 10; 585 } 586 while ((c = getc(Fp)) != EOF) { 587 if (ptr >= eptr) 588 out(O_DIE, File, Line, 589 "number too long"); 590 591 switch (base) { 592 case 16: 593 if (c >= 'a' && c <= 'f' || 594 c >= 'A' && c <= 'F') { 595 *ptr++ = c; 596 continue; 597 } 598 /*FALLTHRU*/ 599 case 10: 600 if (c >= '8' && c <= '9') { 601 *ptr++ = c; 602 continue; 603 } 604 /*FALLTHRU*/ 605 case 8: 606 if (c >= '0' && c <= '7') { 607 *ptr++ = c; 608 continue; 609 } 610 /* not valid for this base */ 611 *ptr++ = '\0'; 612 (void) ungetc(c, Fp); 613 return (record(NUMBER, 614 stable(Tok))); 615 } 616 } 617 *ptr++ = '\0'; 618 return (record(NUMBER, stable(Tok))); 619 } else if (isalpha(c)) { 620 /* collect identifier */ 621 *ptr++ = c; 622 for (;;) { 623 c = getc(Fp); 624 if ((isalnum(c) || c == '_') && 625 ptr < eptr) 626 *ptr++ = c; 627 else { 628 (void) ungetc(c, Fp); 629 break; 630 } 631 } 632 if (ptr >= eptr) 633 out(O_DIE, File, Line, 634 "identifier too long"); 635 *ptr++ = '\0'; 636 cptr = stable(Tok); 637 if (val = lex_s2i_lut_lookup(Rwordslut, cptr)) { 638 return (record(val, cptr)); 639 } 640 return (record(ID, cptr)); 641 } else 642 return (record(c, NULL)); 643 } 644 /*NOTREACHED*/ 645 } 646 } 647 648 /* 649 * the record()/dumpline() routines are used to track & report 650 * the list of tokens seen on a given line. this is used in two ways. 651 * first, syntax errors found by the parser are reported by us (via 652 * yyerror()) and we tack on the tokens processed so far on the current 653 * line to help indicate exactly where the error is. second, if "lexecho" 654 * debugging is turned on, these routines provide it. 655 */ 656 #define MAXRECORD 1000 657 static int Recordedline; 658 static struct { 659 int tok; 660 const char *s; 661 } Recorded[MAXRECORD]; 662 static int Recordnext; 663 664 static int 665 record(int tok, const char *s) 666 { 667 stats_counter_bump(Tokcount); 668 if (Line != Recordedline) { 669 /* starting new line, dump out the previous line */ 670 if (Lexecho && Recordedline) { 671 outfl(O_NONL, File, Recordedline, "lex: "); 672 dumpline(O_OK); 673 } 674 Recordedline = Line; 675 Recordnext = 0; 676 } 677 if (Recordnext >= MAXRECORD) 678 outfl(O_DIE, File, Line, "line too long, bailing out"); 679 Recorded[Recordnext].tok = tok; 680 Recorded[Recordnext++].s = s; 681 682 yylval.tok.s = s; 683 yylval.tok.file = File; 684 yylval.tok.line = Line; 685 return (tok); 686 } 687 688 /*ARGSUSED*/ 689 static void 690 dumpline(int flags) 691 { 692 int i; 693 694 for (i = 0; i < Recordnext; i++) 695 if (Recorded[i].s && Recorded[i].tok != ARROW) 696 switch (Recorded[i].tok) { 697 case T_QUOTE: 698 out(flags|O_NONL, " \"%s\"", 699 Recorded[i].s); 700 break; 701 702 default: 703 out(flags|O_NONL, " %s", 704 Recorded[i].s); 705 break; 706 } 707 else 708 switch (Recorded[i].tok) { 709 case EOF: 710 out(flags|O_NONL, " EOF"); 711 break; 712 case ARROW: 713 out(flags|O_NONL, " ->%s", 714 Recorded[i].s); 715 break; 716 case EQ: 717 out(flags|O_NONL, " =="); 718 break; 719 case NE: 720 out(flags|O_NONL, " !="); 721 break; 722 case OR: 723 out(flags|O_NONL, " ||"); 724 break; 725 case AND: 726 out(flags|O_NONL, " &&"); 727 break; 728 case LE: 729 out(flags|O_NONL, " <="); 730 break; 731 case GE: 732 out(flags|O_NONL, " >="); 733 break; 734 case LSHIFT: 735 out(flags|O_NONL, " <<"); 736 break; 737 case RSHIFT: 738 out(flags|O_NONL, " >>"); 739 break; 740 default: 741 if (isprint(Recorded[i].tok)) 742 out(flags|O_NONL, " %c", 743 Recorded[i].tok); 744 else 745 out(flags|O_NONL, " '\\%03o'", 746 Recorded[i].tok); 747 break; 748 } 749 out(flags, NULL); 750 } 751 752 /* 753 * yyerror -- report a pareser error, called yyerror because yacc wants it 754 */ 755 756 void 757 yyerror(const char *s) 758 { 759 Errcount++; 760 outfl(O_ERR|O_NONL, File, Line, "%s, tokens: ", s); 761 dumpline(O_ERR); 762 } 763 764 /* 765 * doident -- handle "#pragma ident" directives 766 */ 767 static void 768 doident() 769 { 770 int c; 771 char *ptr = Tok; 772 char *eptr = &Tok[MAXTOK]; 773 774 /* skip white space and quotes */ 775 while ((c = getc(Fp)) != EOF && 776 (c == ' ' || c == '\t' || c == '"')) 777 ; 778 779 if (c == EOF || c == '\n') 780 outfl(O_DIE, File, Line, "bad ident"); 781 782 /* pull in next token */ 783 ptr = Tok; 784 *ptr++ = c; 785 while ((c = getc(Fp)) != EOF && c != '"' && c != '\n') 786 if (ptr < eptr - 1) 787 *ptr++ = c; 788 *ptr++ = '\0'; 789 if (c != '\n') { 790 /* skip to end of line (including close quote, if any) */ 791 while ((c = getc(Fp)) != EOF && c != '\n') 792 ; 793 } 794 (void) ungetc(c, Fp); 795 Ident = lut_add(Ident, (void *)stable(Tok), (void *)0, NULL); 796 797 outfl(O_VERB, File, Line, "pragma set: ident \"%s\"", Tok); 798 } 799 800 /* 801 * dodictionary -- handle "#pragma dictionary" directives 802 */ 803 static void 804 dodictionary() 805 { 806 int c; 807 char *ptr = Tok; 808 char *eptr = &Tok[MAXTOK]; 809 810 /* skip white space and quotes */ 811 while ((c = getc(Fp)) != EOF && 812 (c == ' ' || c == '\t' || c == '"')) 813 ; 814 815 if (c == EOF || c == '\n') 816 outfl(O_DIE, File, Line, "bad dictionary"); 817 818 /* pull in next token */ 819 ptr = Tok; 820 *ptr++ = c; 821 while ((c = getc(Fp)) != EOF && c != '"' && c != '\n') 822 if (ptr < eptr - 1) 823 *ptr++ = c; 824 *ptr++ = '\0'; 825 if (c != '\n') { 826 /* skip to end of line (including close quote, if any) */ 827 while ((c = getc(Fp)) != EOF && c != '\n') 828 ; 829 } 830 (void) ungetc(c, Fp); 831 Dicts = lut_add(Dicts, (void *)stable(Tok), (void *)0, NULL); 832 833 outfl(O_VERB, File, Line, "pragma set: dictionary \"%s\"", Tok); 834 } 835 836 /* 837 * doallow_cycles -- handle "#pragma allow_cycles" directives 838 */ 839 static void 840 doallow_cycles() 841 { 842 int c; 843 char *ptr = Tok; 844 char *eptr = &Tok[MAXTOK]; 845 unsigned long long newlevel; 846 847 /* 848 * by default the compiler does not allow cycles or loops 849 * in propagations. when cycles are encountered, the 850 * compiler prints out an error message. 851 * 852 * "#pragma allow_cycles" and 853 * "#pragma allow_cycles 0" 854 * allow cycles, but any such cycle will produce a warning 855 * message. 856 * 857 * "#pragma allow_cycles N" 858 * with N > 0 will allow cycles and not produce any 859 * warning messages. 860 */ 861 862 /* skip white space and quotes */ 863 while ((c = getc(Fp)) != EOF && 864 (c == ' ' || c == '\t' || c == '"')) 865 ; 866 867 if (c == EOF || c == '\n') 868 newlevel = 0ULL; 869 else { 870 871 /* pull in next token */ 872 ptr = Tok; 873 *ptr++ = c; 874 while ((c = getc(Fp)) != EOF && c != '"' && c != '\n') 875 if (ptr < eptr - 1) 876 *ptr++ = c; 877 *ptr++ = '\0'; 878 if (c != '\n') { 879 /* skip to end of line */ 880 while ((c = getc(Fp)) != EOF && c != '\n') 881 ; 882 } 883 newlevel = strtoll(Tok, NULL, 0); 884 } 885 (void) ungetc(c, Fp); 886 887 (void) check_cycle_level(newlevel); 888 outfl(O_VERB, File, Line, 889 "pragma set: allow_cycles (%s)", 890 newlevel ? "no warnings" : "with warnings"); 891 } 892 893 /* 894 * dopragma -- handle #pragma directives 895 */ 896 static void 897 dopragma(const char *tok) 898 { 899 if (strcmp(tok, "ident") == 0) 900 doident(); 901 else if (strcmp(tok, "dictionary") == 0) 902 dodictionary(); 903 else if (strcmp(tok, "new_errors_only") == 0) { 904 if (Pragma_new_errors_only++ == 0) 905 outfl(O_VERB, File, Line, 906 "pragma set: new_errors_only"); 907 } else if (strcmp(tok, "trust_ereports") == 0) { 908 if (Pragma_trust_ereports++ == 0) 909 outfl(O_VERB, File, Line, 910 "pragma set: trust_ereports"); 911 } else if (strcmp(tok, "allow_cycles") == 0) 912 doallow_cycles(); 913 else 914 outfl(O_VERB, File, Line, 915 "unknown pragma ignored: \"%s\"", tok); 916 } 917 918 /* 919 * lex_fini -- finalize the lexer 920 */ 921 922 int 923 lex_fini(void) 924 { 925 stats_elapse_stop(Lexelapse); 926 closefile(); 927 if (Lexecho) { 928 outfl(O_OK, File, Line, "lex: "); 929 dumpline(O_OK); 930 } 931 return (Errcount); 932 } 933 934 void 935 lex_free(void) 936 { 937 struct filestats *nfstats = Fstats; 938 939 /* 940 * Free up memory consumed by the lexer 941 */ 942 stats_delete(Tokcount); 943 stats_delete(Filecount); 944 stats_delete(Lexelapse); 945 while (nfstats != NULL) { 946 Fstats = nfstats->next; 947 stats_delete(nfstats->stats); 948 FREE(nfstats); 949 nfstats = Fstats; 950 } 951 lut_free(Timesuffixlut, NULL, NULL); 952 lut_free(Rwordslut, NULL, NULL); 953 lut_free(Ident, NULL, NULL); 954 lut_free(Dicts, NULL, NULL); 955 } 956