1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 * 26 * esclex.c -- lexer for esc 27 * 28 * this module provides lexical analysis and error handling routine 29 * expected by the yacc-generated parser (i.e. yylex() and yyerror()). 30 * it also does lots of tracking of things like filenames, line numbers, 31 * and what tokens are seen on a line up to the point where a syntax error 32 * was found. this module also arranges for the input source files to 33 * be run through cpp. 34 */ 35 36 #pragma ident "%Z%%M% %I% %E% SMI" 37 38 #include <stdio.h> 39 #include <ctype.h> 40 #include <string.h> 41 #include <stdlib.h> 42 #include <unistd.h> 43 #include <time.h> 44 #include <errno.h> 45 #include "out.h" 46 #include "alloc.h" 47 #include "stats.h" 48 #include "stable.h" 49 #include "lut.h" 50 #include "literals.h" 51 #include "tree.h" 52 #include "esclex.h" 53 #include "eftread.h" 54 #include "check.h" 55 #include "y.tab.h" 56 57 /* ridiculously long token buffer -- disallow any token longer than this */ 58 #define MAXTOK 8192 59 static char Tok[MAXTOK]; 60 61 /* some misc stats we keep on the lexer & parser */ 62 static struct stats *Tokcount; 63 static struct stats *Lexelapse; 64 struct stats *Filecount; 65 struct filestats { 66 struct filestats *next; 67 struct stats *stats; 68 } *Fstats; 69 70 static int Errcount; 71 72 /* input file state */ 73 static char **Files; 74 static const char *Fileopened; 75 static FILE *Fp; 76 static int Line; 77 static const char *File; 78 static const char *Cpp = "/usr/bin/cpp"; 79 #ifdef ESC 80 static const char *Cppargs; 81 static const char *Cppstdargs = "-undef -Y."; 82 #endif /* ESC */ 83 84 /* for debugging */ 85 static int Lexecho; /* echo tokens as we read them */ 86 87 /* forward declarations of our internal routines */ 88 static int record(int tok, const char *s); 89 static void dumpline(int flags); 90 static void doident(); 91 static void dopragma(const char *tok); 92 93 /* 94 * table of reserved words. this table is only used by lex_init() 95 * to intialize the Rwords lookup table. 96 */ 97 static const struct { 98 const char *word; 99 const int val; 100 } Rwords[] = { 101 { "asru", ASRU }, 102 { "count", COUNT }, 103 { "div", DIV }, 104 { "engine", ENGINE }, 105 { "event", EVENT }, 106 { "fru", FRU }, 107 { "if", IF }, 108 { "mask", MASK }, 109 { "prop", PROP }, 110 { "config", CONFIG }, 111 /* 112 * PATHFUNC indicates functions that operate only on paths 113 * and quotes 114 */ 115 { "is_connected", PATHFUNC }, 116 { "is_under", PATHFUNC }, 117 }; 118 119 /* 120 * Rwordslut is a lookup table of reserved words. lhs is the word 121 * (in the string table) and the rhs is the token value returned 122 * by the yylex() for that word. 123 */ 124 static struct lut *Rwordslut; 125 126 static const struct { 127 const char *suffix; 128 const unsigned long long nsec; 129 } Timesuffix[] = { 130 { "nanosecond", 1ULL }, 131 { "nanoseconds", 1ULL }, 132 { "nsec", 1ULL }, 133 { "nsecs", 1ULL }, 134 { "ns", 1ULL }, 135 { "microsecond", 1000ULL }, 136 { "microseconds", 1000ULL }, 137 { "usec", 1000ULL }, 138 { "usecs", 1000ULL }, 139 { "us", 1000ULL }, 140 { "millisecond", 1000000ULL }, 141 { "milliseconds", 1000000ULL }, 142 { "msec", 1000000ULL }, 143 { "msecs", 1000000ULL }, 144 { "ms", 1000000ULL }, 145 { "second", 1000000000ULL }, 146 { "seconds", 1000000000ULL }, 147 { "s", 1000000000ULL }, 148 { "minute", 1000000000ULL * 60 }, 149 { "minutes", 1000000000ULL * 60 }, 150 { "min", 1000000000ULL * 60 }, 151 { "mins", 1000000000ULL * 60 }, 152 { "m", 1000000000ULL * 60 }, 153 { "hour", 1000000000ULL * 60 * 60 }, 154 { "hours", 1000000000ULL * 60 * 60 }, 155 { "hr", 1000000000ULL * 60 * 60 }, 156 { "hrs", 1000000000ULL * 60 * 60 }, 157 { "h", 1000000000ULL * 60 * 60 }, 158 { "day", 1000000000ULL * 60 * 60 * 24 }, 159 { "days", 1000000000ULL * 60 * 60 * 24 }, 160 { "d", 1000000000ULL * 60 * 60 * 24 }, 161 { "week", 1000000000ULL * 60 * 60 * 24 * 7 }, 162 { "weeks", 1000000000ULL * 60 * 60 * 24 * 7 }, 163 { "wk", 1000000000ULL * 60 * 60 * 24 * 7 }, 164 { "wks", 1000000000ULL * 60 * 60 * 24 * 7 }, 165 { "month", 1000000000ULL * 60 * 60 * 24 * 30 }, 166 { "months", 1000000000ULL * 60 * 60 * 24 * 30 }, 167 { "year", 1000000000ULL * 60 * 60 * 24 * 365 }, 168 { "years", 1000000000ULL * 60 * 60 * 24 * 365 }, 169 { "yr", 1000000000ULL * 60 * 60 * 24 * 365 }, 170 { "yrs", 1000000000ULL * 60 * 60 * 24 * 365 }, 171 }; 172 173 /* 174 * some wrappers around the general lut functions to provide type checking... 175 */ 176 177 static struct lut * 178 lex_s2i_lut_add(struct lut *root, const char *s, int i) 179 { 180 return (lut_add(root, (void *)s, (void *)i, NULL)); 181 } 182 183 static int 184 lex_s2i_lut_lookup(struct lut *root, const char *s) 185 { 186 return ((int)lut_lookup(root, (void *)s, NULL)); 187 } 188 189 static struct lut * 190 lex_s2ullp_lut_add(struct lut *root, const char *s, 191 const unsigned long long *ullp) 192 { 193 return (lut_add(root, (void *)s, (void *)ullp, NULL)); 194 } 195 196 const unsigned long long * 197 lex_s2ullp_lut_lookup(struct lut *root, const char *s) 198 { 199 return ((unsigned long long *)lut_lookup(root, (void *)s, NULL)); 200 } 201 202 /* 203 * lex_init -- initialize the lexer with appropriate filenames & debug flags 204 */ 205 206 /*ARGSUSED*/ 207 void 208 lex_init(char **av, const char *cppargs, int lexecho) 209 { 210 int i; 211 #ifdef ESC 212 const char *ptr; 213 #endif /* ESC */ 214 215 Lexecho = lexecho; 216 Tokcount = stats_new_counter("lex.tokens", "total tokens in", 1); 217 Filecount = stats_new_counter("lex.files", "total files read", 0); 218 Lexelapse = stats_new_elapse("lex.time", "elapsed lex/parse time", 1); 219 220 #ifdef ESC 221 Cppargs = cppargs; 222 223 /* allow user to tell us where cpp is if it is some weird place */ 224 if (ptr = getenv("_ESC_CPP")) 225 Cpp = ptr; 226 227 /* and in case it takes some special stdargs */ 228 if (ptr = getenv("_ESC_CPP_STDARGS")) 229 Cppstdargs = ptr; 230 231 /* verify we can find cpp */ 232 if (access(Cpp, X_OK) < 0) { 233 Cpp = "/usr/lib/cpp"; 234 if (access(Cpp, X_OK) < 0) 235 out(O_DIE, "can't locate cpp"); 236 } 237 #endif /* ESC */ 238 239 Files = av; 240 241 /* verify we can find all the input files */ 242 while (*av) { 243 if (strlen(*av) >= MAXTOK - strlen(Cpp) - 3) 244 out(O_DIE, "filename too long: %.100s...", *av); 245 if (access(*av, R_OK) < 0) 246 out(O_DIE|O_SYS, "%s", *av); 247 av++; 248 stats_counter_bump(Filecount); 249 } 250 251 /* put reserved words into the string table & a lookup table */ 252 for (i = 0; i < sizeof (Rwords) / sizeof (*Rwords); i++) 253 Rwordslut = lex_s2i_lut_add(Rwordslut, 254 stable(Rwords[i].word), Rwords[i].val); 255 256 /* initialize table of timeval suffixes */ 257 for (i = 0; i < sizeof (Timesuffix) / sizeof (*Timesuffix); i++) { 258 Timesuffixlut = lex_s2ullp_lut_add(Timesuffixlut, 259 stable(Timesuffix[i].suffix), &Timesuffix[i].nsec); 260 } 261 262 /* record start time */ 263 stats_elapse_start(Lexelapse); 264 } 265 266 void 267 closefile(void) 268 { 269 if (Fp != NULL) { 270 #ifdef ESC 271 if (pclose(Fp) > 0) 272 out(O_DIE, "cpp errors while reading \"%s\", " 273 "bailing out.", Fileopened); 274 #else 275 (void) fclose(Fp); 276 #endif /* ESC */ 277 } 278 Fp = NULL; 279 } 280 281 /* 282 * yylex -- the lexer, called yylex() because that's what yacc wants 283 */ 284 285 int 286 yylex() 287 { 288 int c; 289 int nextc; 290 char *ptr = Tok; 291 char *eptr = &Tok[MAXTOK]; 292 const char *cptr; 293 int startline; 294 int val; 295 static int bol = 1; /* true if we're at beginning of line */ 296 297 for (;;) { 298 while (Fp == NULL) { 299 if (*Files == NULL) 300 return (record(EOF, NULL)); 301 Fileopened = stable(*Files++); 302 #ifdef ESC 303 sprintf(Tok, "%s %s %s %s", 304 Cpp, Cppstdargs, Cppargs, Fileopened); 305 if ((Fp = popen(Tok, "r")) == NULL) 306 out(O_DIE|O_SYS, "%s", Tok); 307 #else 308 Fp = eftread_fopen(Fileopened); 309 #endif /* ESC */ 310 Line = 1; 311 bol = 1; 312 313 /* add name to stats for visibility */ 314 if (Fp != NULL) { 315 static int fnum; 316 char nbuf[100]; 317 struct filestats *nfs = MALLOC(sizeof (*nfs)); 318 (void) sprintf(nbuf, "lex.file%d", fnum++); 319 nfs->stats = stats_new_string(nbuf, "", 0); 320 stats_string_set(nfs->stats, Fileopened); 321 nfs->next = Fstats; 322 Fstats = nfs; 323 } 324 } 325 326 switch (c = getc(Fp)) { 327 case '#': 328 /* enforce that we're at beginning of line */ 329 if (!bol) 330 return (record(c, NULL)); 331 332 while ((c = getc(Fp)) != EOF && 333 (c == ' ' || c == '\t')) 334 ; 335 if (!isdigit(c)) { 336 /* 337 * three cases here: 338 * #pragma 339 * #ident 340 * #something-we-don't-understand 341 * anything we don't expect we just ignore. 342 */ 343 *ptr++ = c; 344 while ((c = getc(Fp)) != EOF && isalnum(c)) 345 if (ptr < eptr - 1) 346 *ptr++ = c; 347 *ptr++ = '\0'; 348 if (strcmp(Tok, "pragma") == 0) { 349 /* skip white space */ 350 while ((c = getc(Fp)) != EOF && 351 (c == ' ' || c == '\t')) 352 ; 353 354 if (c == EOF || c == '\n') 355 outfl(O_DIE, File, Line, 356 "bad #pragma"); 357 358 /* pull in next token */ 359 ptr = Tok; 360 *ptr++ = c; 361 while ((c = getc(Fp)) != EOF && 362 !isspace(c)) 363 if (ptr < eptr - 1) 364 *ptr++ = c; 365 *ptr++ = '\0'; 366 (void) ungetc(c, Fp); 367 368 dopragma(Tok); 369 } else if (strcmp(Tok, "ident") == 0) 370 doident(); 371 } else { 372 /* handle file & line info from cpp */ 373 Line = 0; 374 do { 375 if (!isdigit(c)) 376 break; 377 Line = Line * 10 + c - '0'; 378 } while ((c = getc(Fp)) != EOF); 379 Line--; /* newline will increment it */ 380 while (c != EOF && isspace(c)) 381 c = getc(Fp); 382 if (c != '"') 383 outfl(O_DIE, File, Line, 384 "bad # statement (file name)"); 385 while ((c = getc(Fp)) != EOF && c != '"') 386 if (ptr < eptr - 1) 387 *ptr++ = c; 388 *ptr++ = '\0'; 389 if (c != '"') 390 outfl(O_DIE, File, Line, 391 "bad # statement (quotes)"); 392 File = stable(Tok); 393 } 394 /* skip the rest of the cpp line */ 395 while ((c = getc(Fp)) != EOF && c != '\n' && c != '\r') 396 ; 397 if (c == EOF) 398 return (record(c, NULL)); 399 else 400 (void) ungetc(c, Fp); 401 ptr = Tok; 402 break; 403 404 case EOF: 405 closefile(); 406 continue; 407 408 case '\n': 409 Line++; 410 bol = 1; 411 break; 412 413 case '\r': 414 case ' ': 415 case '\t': 416 bol = 0; 417 break; 418 419 case '/': 420 bol = 0; 421 /* comment handling */ 422 if ((nextc = getc(Fp)) == EOF) 423 outfl(O_DIE, File, Line, "unexpected EOF"); 424 else if (nextc == '*') { 425 startline = Line; 426 while ((c = getc(Fp)) != EOF) { 427 if (c == '\n') 428 Line++; 429 else if (c == '*' && 430 (((c = getc(Fp)) == EOF) || 431 (c == '/'))) 432 break; 433 } 434 if (c == EOF) { 435 outfl(O_DIE, File, Line, 436 "end of comment not seen " 437 "(started on line %d)", 438 startline); 439 } 440 } else { 441 /* wasn't a comment, return the '/' token */ 442 (void) ungetc(nextc, Fp); 443 return (record(c, NULL)); 444 } 445 break; 446 447 case '"': { 448 int prevc; 449 450 bol = 0; 451 prevc = '\0'; 452 /* quoted string handling */ 453 startline = Line; 454 for (;;) { 455 c = getc(Fp); 456 if (c == EOF) 457 outfl(O_DIE, File, Line, 458 "end of string not seen " 459 "(started on line %d)", 460 startline); 461 else if (c == '\n') 462 Line++; 463 else if (c == '"' && prevc != '\\') 464 break; 465 else if (ptr < eptr) 466 *ptr++ = c; 467 prevc = c; 468 } 469 if (ptr >= eptr) 470 out(O_DIE, File, Line, "string too long"); 471 *ptr++ = '\0'; 472 return (record(QUOTE, stable(Tok))); 473 } 474 case '&': 475 bol = 0; 476 /* && */ 477 if ((nextc = getc(Fp)) == '&') 478 return (record(AND, NULL)); 479 else { 480 (void) ungetc(nextc, Fp); 481 return (record(c, NULL)); 482 } 483 /*NOTREACHED*/ 484 break; 485 486 case '|': 487 bol = 0; 488 /* || */ 489 if ((nextc = getc(Fp)) == '|') 490 return (record(OR, NULL)); 491 else { 492 (void) ungetc(nextc, Fp); 493 return (record(c, NULL)); 494 } 495 /*NOTREACHED*/ 496 break; 497 498 case '!': 499 bol = 0; 500 /* ! or != */ 501 if ((nextc = getc(Fp)) == '=') 502 return (record(NE, NULL)); 503 else { 504 (void) ungetc(nextc, Fp); 505 return (record(c, NULL)); 506 } 507 /*NOTREACHED*/ 508 break; 509 510 case '=': 511 bol = 0; 512 /* == */ 513 if ((nextc = getc(Fp)) == '=') 514 return (record(EQ, NULL)); 515 else { 516 (void) ungetc(nextc, Fp); 517 return (record(c, NULL)); 518 } 519 /*NOTREACHED*/ 520 break; 521 522 case '-': 523 bol = 0; 524 /* -> */ 525 if ((nextc = getc(Fp)) == '>') 526 return (record(ARROW, stable(Tok))); 527 else { 528 (void) ungetc(nextc, Fp); 529 return (record(c, NULL)); 530 } 531 /*NOTREACHED*/ 532 break; 533 534 case '<': 535 bol = 0; 536 if ((nextc = getc(Fp)) == '=') 537 /* <= */ 538 return (record(LE, NULL)); 539 else if (nextc == '<') 540 /* << */ 541 return (record(LSHIFT, NULL)); 542 else { 543 (void) ungetc(nextc, Fp); 544 return (record(c, NULL)); 545 } 546 /*NOTREACHED*/ 547 break; 548 549 case '>': 550 bol = 0; 551 if ((nextc = getc(Fp)) == '=') 552 /* >= */ 553 return (record(GE, NULL)); 554 else if (nextc == '>') 555 /* >> */ 556 return (record(RSHIFT, NULL)); 557 else { 558 (void) ungetc(nextc, Fp); 559 return (record(c, NULL)); 560 } 561 /*NOTREACHED*/ 562 break; 563 564 default: 565 bol = 0; 566 if (isdigit(c)) { 567 int base; 568 569 /* collect rest of number */ 570 if (c == '0') { 571 *ptr++ = c; 572 if ((c = getc(Fp)) == EOF) { 573 *ptr++ = '\0'; 574 return (record(NUMBER, 575 stable(Tok))); 576 } else if (c == 'x' || c == 'X') { 577 *ptr++ = c; 578 base = 16; 579 } else { 580 (void) ungetc(c, Fp); 581 base = 8; 582 } 583 } else { 584 *ptr++ = c; 585 base = 10; 586 } 587 while ((c = getc(Fp)) != EOF) { 588 if (ptr >= eptr) 589 out(O_DIE, File, Line, 590 "number too long"); 591 592 switch (base) { 593 case 16: 594 if (c >= 'a' && c <= 'f' || 595 c >= 'A' && c <= 'F') { 596 *ptr++ = c; 597 continue; 598 } 599 /*FALLTHRU*/ 600 case 10: 601 if (c >= '8' && c <= '9') { 602 *ptr++ = c; 603 continue; 604 } 605 /*FALLTHRU*/ 606 case 8: 607 if (c >= '0' && c <= '7') { 608 *ptr++ = c; 609 continue; 610 } 611 /* not valid for this base */ 612 *ptr++ = '\0'; 613 (void) ungetc(c, Fp); 614 return (record(NUMBER, 615 stable(Tok))); 616 } 617 } 618 *ptr++ = '\0'; 619 return (record(NUMBER, stable(Tok))); 620 } else if (isalpha(c)) { 621 /* collect identifier */ 622 *ptr++ = c; 623 for (;;) { 624 c = getc(Fp); 625 if ((isalnum(c) || c == '_') && 626 ptr < eptr) 627 *ptr++ = c; 628 else { 629 (void) ungetc(c, Fp); 630 break; 631 } 632 } 633 if (ptr >= eptr) 634 out(O_DIE, File, Line, 635 "identifier too long"); 636 *ptr++ = '\0'; 637 cptr = stable(Tok); 638 if (val = lex_s2i_lut_lookup(Rwordslut, cptr)) { 639 return (record(val, cptr)); 640 } 641 return (record(ID, cptr)); 642 } else 643 return (record(c, NULL)); 644 } 645 /*NOTREACHED*/ 646 } 647 } 648 649 /* 650 * the record()/dumpline() routines are used to track & report 651 * the list of tokens seen on a given line. this is used in two ways. 652 * first, syntax errors found by the parser are reported by us (via 653 * yyerror()) and we tack on the tokens processed so far on the current 654 * line to help indicate exactly where the error is. second, if "lexecho" 655 * debugging is turned on, these routines provide it. 656 */ 657 #define MAXRECORD 1000 658 static int Recordedline; 659 static struct { 660 int tok; 661 const char *s; 662 } Recorded[MAXRECORD]; 663 static int Recordnext; 664 665 static int 666 record(int tok, const char *s) 667 { 668 stats_counter_bump(Tokcount); 669 if (Line != Recordedline) { 670 /* starting new line, dump out the previous line */ 671 if (Lexecho && Recordedline) { 672 outfl(O_NONL, File, Recordedline, "lex: "); 673 dumpline(O_OK); 674 } 675 Recordedline = Line; 676 Recordnext = 0; 677 } 678 if (Recordnext >= MAXRECORD) 679 outfl(O_DIE, File, Line, "line too long, bailing out"); 680 Recorded[Recordnext].tok = tok; 681 Recorded[Recordnext++].s = s; 682 683 yylval.tok.s = s; 684 yylval.tok.file = File; 685 yylval.tok.line = Line; 686 return (tok); 687 } 688 689 /*ARGSUSED*/ 690 static void 691 dumpline(int flags) 692 { 693 int i; 694 695 for (i = 0; i < Recordnext; i++) 696 if (Recorded[i].s && Recorded[i].tok != ARROW) 697 switch (Recorded[i].tok) { 698 case T_QUOTE: 699 out(flags|O_NONL, " \"%s\"", 700 Recorded[i].s); 701 break; 702 703 default: 704 out(flags|O_NONL, " %s", 705 Recorded[i].s); 706 break; 707 } 708 else 709 switch (Recorded[i].tok) { 710 case EOF: 711 out(flags|O_NONL, " EOF"); 712 break; 713 case ARROW: 714 out(flags|O_NONL, " ->%s", 715 Recorded[i].s); 716 break; 717 case EQ: 718 out(flags|O_NONL, " =="); 719 break; 720 case NE: 721 out(flags|O_NONL, " !="); 722 break; 723 case OR: 724 out(flags|O_NONL, " ||"); 725 break; 726 case AND: 727 out(flags|O_NONL, " &&"); 728 break; 729 case LE: 730 out(flags|O_NONL, " <="); 731 break; 732 case GE: 733 out(flags|O_NONL, " >="); 734 break; 735 case LSHIFT: 736 out(flags|O_NONL, " <<"); 737 break; 738 case RSHIFT: 739 out(flags|O_NONL, " >>"); 740 break; 741 default: 742 if (isprint(Recorded[i].tok)) 743 out(flags|O_NONL, " %c", 744 Recorded[i].tok); 745 else 746 out(flags|O_NONL, " '\\%03o'", 747 Recorded[i].tok); 748 break; 749 } 750 out(flags, NULL); 751 } 752 753 /* 754 * yyerror -- report a pareser error, called yyerror because yacc wants it 755 */ 756 757 void 758 yyerror(const char *s) 759 { 760 Errcount++; 761 outfl(O_ERR|O_NONL, File, Line, "%s, tokens: ", s); 762 dumpline(O_ERR); 763 } 764 765 /* 766 * doident -- handle "#pragma ident" directives 767 */ 768 static void 769 doident() 770 { 771 int c; 772 char *ptr = Tok; 773 char *eptr = &Tok[MAXTOK]; 774 775 /* skip white space and quotes */ 776 while ((c = getc(Fp)) != EOF && 777 (c == ' ' || c == '\t' || c == '"')) 778 ; 779 780 if (c == EOF || c == '\n') 781 outfl(O_DIE, File, Line, "bad ident"); 782 783 /* pull in next token */ 784 ptr = Tok; 785 *ptr++ = c; 786 while ((c = getc(Fp)) != EOF && c != '"' && c != '\n') 787 if (ptr < eptr - 1) 788 *ptr++ = c; 789 *ptr++ = '\0'; 790 if (c != '\n') { 791 /* skip to end of line (including close quote, if any) */ 792 while ((c = getc(Fp)) != EOF && c != '\n') 793 ; 794 } 795 (void) ungetc(c, Fp); 796 Ident = lut_add(Ident, (void *)stable(Tok), (void *)0, NULL); 797 798 outfl(O_VERB, File, Line, "pragma set: ident \"%s\"", Tok); 799 } 800 801 /* 802 * dodictionary -- handle "#pragma dictionary" directives 803 */ 804 static void 805 dodictionary() 806 { 807 int c; 808 char *ptr = Tok; 809 char *eptr = &Tok[MAXTOK]; 810 811 /* skip white space and quotes */ 812 while ((c = getc(Fp)) != EOF && 813 (c == ' ' || c == '\t' || c == '"')) 814 ; 815 816 if (c == EOF || c == '\n') 817 outfl(O_DIE, File, Line, "bad dictionary"); 818 819 /* pull in next token */ 820 ptr = Tok; 821 *ptr++ = c; 822 while ((c = getc(Fp)) != EOF && c != '"' && c != '\n') 823 if (ptr < eptr - 1) 824 *ptr++ = c; 825 *ptr++ = '\0'; 826 if (c != '\n') { 827 /* skip to end of line (including close quote, if any) */ 828 while ((c = getc(Fp)) != EOF && c != '\n') 829 ; 830 } 831 (void) ungetc(c, Fp); 832 Dicts = lut_add(Dicts, (void *)stable(Tok), (void *)0, NULL); 833 834 outfl(O_VERB, File, Line, "pragma set: dictionary \"%s\"", Tok); 835 } 836 837 /* 838 * doallow_cycles -- handle "#pragma allow_cycles" directives 839 */ 840 static void 841 doallow_cycles() 842 { 843 int c; 844 char *ptr = Tok; 845 char *eptr = &Tok[MAXTOK]; 846 unsigned long long newlevel; 847 848 /* 849 * by default the compiler does not allow cycles or loops 850 * in propagations. when cycles are encountered, the 851 * compiler prints out an error message. 852 * 853 * "#pragma allow_cycles" and 854 * "#pragma allow_cycles 0" 855 * allow cycles, but any such cycle will produce a warning 856 * message. 857 * 858 * "#pragma allow_cycles N" 859 * with N > 0 will allow cycles and not produce any 860 * warning messages. 861 */ 862 863 /* skip white space and quotes */ 864 while ((c = getc(Fp)) != EOF && 865 (c == ' ' || c == '\t' || c == '"')) 866 ; 867 868 if (c == EOF || c == '\n') 869 newlevel = 0ULL; 870 else { 871 872 /* pull in next token */ 873 ptr = Tok; 874 *ptr++ = c; 875 while ((c = getc(Fp)) != EOF && c != '"' && c != '\n') 876 if (ptr < eptr - 1) 877 *ptr++ = c; 878 *ptr++ = '\0'; 879 if (c != '\n') { 880 /* skip to end of line */ 881 while ((c = getc(Fp)) != EOF && c != '\n') 882 ; 883 } 884 newlevel = strtoll(Tok, NULL, 0); 885 } 886 (void) ungetc(c, Fp); 887 888 (void) check_cycle_level(newlevel); 889 outfl(O_VERB, File, Line, 890 "pragma set: allow_cycles (%s)", 891 newlevel ? "no warnings" : "with warnings"); 892 } 893 894 /* 895 * dopragma -- handle #pragma directives 896 */ 897 static void 898 dopragma(const char *tok) 899 { 900 if (strcmp(tok, "ident") == 0) 901 doident(); 902 else if (strcmp(tok, "dictionary") == 0) 903 dodictionary(); 904 else if (strcmp(tok, "new_errors_only") == 0) { 905 if (Pragma_new_errors_only++ == 0) 906 outfl(O_VERB, File, Line, 907 "pragma set: new_errors_only"); 908 } else if (strcmp(tok, "trust_ereports") == 0) { 909 if (Pragma_trust_ereports++ == 0) 910 outfl(O_VERB, File, Line, 911 "pragma set: trust_ereports"); 912 } else if (strcmp(tok, "allow_cycles") == 0) 913 doallow_cycles(); 914 else 915 outfl(O_VERB, File, Line, 916 "unknown pragma ignored: \"%s\"", tok); 917 } 918 919 /* 920 * lex_fini -- finalize the lexer 921 */ 922 923 int 924 lex_fini(void) 925 { 926 stats_elapse_stop(Lexelapse); 927 closefile(); 928 if (Lexecho) { 929 outfl(O_OK, File, Line, "lex: "); 930 dumpline(O_OK); 931 } 932 return (Errcount); 933 } 934 935 void 936 lex_free(void) 937 { 938 struct filestats *nfstats = Fstats; 939 940 /* 941 * Free up memory consumed by the lexer 942 */ 943 stats_delete(Tokcount); 944 stats_delete(Filecount); 945 stats_delete(Lexelapse); 946 while (nfstats != NULL) { 947 Fstats = nfstats->next; 948 stats_delete(nfstats->stats); 949 FREE(nfstats); 950 nfstats = Fstats; 951 } 952 lut_free(Timesuffixlut, NULL, NULL); 953 lut_free(Rwordslut, NULL, NULL); 954 lut_free(Ident, NULL, NULL); 955 lut_free(Dicts, NULL, NULL); 956 } 957