1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1988 AT&T */ 28 /* All Rights Reserved */ 29 30 #pragma ident "%Z%%M% %I% %E% SMI" 31 32 #include "lint.h" 33 #include "mtlib.h" 34 #include "file64.h" 35 #include <sys/types.h> 36 #include <stdlib.h> 37 #include <stdio.h> 38 #include <thread.h> 39 #include <synch.h> 40 #include <unistd.h> 41 #include <string.h> 42 #include "stdiom.h" 43 #include <wchar.h> 44 #include <sys/stat.h> 45 #include <stddef.h> 46 #include <errno.h> 47 #include <fcntl.h> 48 49 #define _iob __iob 50 51 #undef end 52 53 #define FILE_ARY_SZ 8 /* a nice size for FILE array & end_buffer_ptrs */ 54 55 #ifdef _LP64 56 57 /* 58 * Macros to declare and loop over a fp or fp/xfp combo to 59 * avoid some of the _LP64 ifdef hell. 60 */ 61 62 #define FPDECL(fp) FILE *fp 63 #define FIRSTFP(lp, fp) fp = lp->iobp 64 #define NEXTFP(fp) fp++ 65 #define FPLOCK(fp) &fp->_lock 66 #define FPSTATE(fp) &fp->_state 67 68 #define xFILE FILE 69 70 #else 71 72 #define FPDECL(fp) FILE *fp; xFILE *x##fp 73 #define FIRSTFP(lp, fp) x##fp = lp->iobp; \ 74 fp = x##fp ? &x##fp->_iob : &_iob[0] 75 #define NEXTFP(fp) (x##fp ? fp = &(++x##fp)->_iob : ++fp) 76 #define FPLOCK(fp) x##fp ? \ 77 &x##fp->xlock : &_xftab[IOPIND(fp)]._lock 78 #define FPSTATE(fp) x##fp ? \ 79 &x##fp->xstate : &_xftab[IOPIND(fp)]._state 80 81 /* The extended 32-bit file structure for use in link buffers */ 82 typedef struct xFILE { 83 FILE _iob; /* must be first! */ 84 struct xFILEdata _xdat; 85 } xFILE; 86 87 #define xmagic _xdat._magic 88 #define xend _xdat._end 89 #define xlock _xdat._lock 90 #define xstate _xdat._state 91 92 #define FILEx(fp) ((struct xFILE *)(uintptr_t)fp) 93 94 /* 95 * The magic number stored is actually the pointer scrambled with 96 * a magic number. Pointers to data items live everywhere in memory 97 * so we scramble the pointer in order to avoid accidental collisions. 98 */ 99 #define XFILEMAGIC 0x63687367 100 #define XMAGIC(xfp) ((uintptr_t)(xfp) ^ XFILEMAGIC) 101 102 #endif /* _LP64 */ 103 104 struct _link_ /* manages a list of streams */ 105 { 106 xFILE *iobp; /* the array of (x)FILE's */ 107 /* NULL for the __first_link in ILP32 */ 108 int niob; /* length of the arrays */ 109 struct _link_ *next; /* next in the list */ 110 }; 111 112 /* 113 * With dynamic linking, iob may be in either the library or in the user's 114 * a.out, so the run time linker fixes up the first entry in __first_link at 115 * process startup time. 116 * 117 * In 32 bit processes, we don't have xFILE[FILE_ARY_SZ] but FILE[], 118 * and _xftab[] instead; this is denoted by having iobp set to NULL in 119 * 32 bit mode for the first link entry. 120 */ 121 struct _link_ __first_link = /* first in linked list */ 122 { 123 #if !defined(_LP64) 124 NULL, 125 #else 126 &_iob[0], 127 #endif 128 _NFILE, 129 NULL 130 }; 131 132 /* 133 * Information cached to speed up searches. We remember where we 134 * last found a free FILE* and we remember whether we saw any fcloses 135 * in between. We also count the number of chunks we allocated, see 136 * _findiop() for an explanation. 137 * These variables are all protected by _first_link_lock. 138 */ 139 static struct _link_ *lastlink = NULL; 140 static int fcloses; 141 static int nchunks; 142 143 static mutex_t _first_link_lock = DEFAULTMUTEX; 144 145 static int _fflush_l_iops(void); 146 static FILE *getiop(FILE *, rmutex_t *, mbstate_t *); 147 148 /* 149 * All functions that understand the linked list of iob's follow. 150 */ 151 #pragma weak _cleanup = __cleanup 152 void 153 __cleanup(void) /* called at process end to flush ouput streams */ 154 { 155 (void) fflush(NULL); 156 } 157 158 /* 159 * For fork1-safety (see libc_prepare_atfork(), etc). 160 */ 161 void 162 stdio_locks() 163 { 164 (void) mutex_lock(&_first_link_lock); 165 /* 166 * XXX: We should acquire all of the iob locks here. 167 */ 168 } 169 170 void 171 stdio_unlocks() 172 { 173 /* 174 * XXX: We should release all of the iob locks here. 175 */ 176 (void) mutex_unlock(&_first_link_lock); 177 } 178 179 void 180 _flushlbf(void) /* fflush() all line-buffered streams */ 181 { 182 FPDECL(fp); 183 int i; 184 struct _link_ *lp; 185 /* Allow compiler to optimize the loop */ 186 int threaded = __libc_threaded; 187 188 if (threaded) 189 cancel_safe_mutex_lock(&_first_link_lock); 190 191 lp = &__first_link; 192 do { 193 FIRSTFP(lp, fp); 194 for (i = lp->niob; --i >= 0; NEXTFP(fp)) { 195 /* 196 * The additional _IONBF check guards againsts 197 * allocated but uninitialized iops (see _findiop). 198 * We also automatically skip non allocated iop's. 199 * Don't block on locks. 200 */ 201 if ((fp->_flag & (_IOLBF | _IOWRT | _IONBF)) == 202 (_IOLBF | _IOWRT)) { 203 if (threaded) { 204 rmutex_t *lk = FPLOCK(fp); 205 if (cancel_safe_mutex_trylock(lk) != 0) 206 continue; 207 /* Recheck after locking */ 208 if ((fp->_flag & (_IOLBF | _IOWRT)) == 209 (_IOLBF | _IOWRT)) { 210 (void) _fflush_u(fp); 211 } 212 cancel_safe_mutex_unlock(lk); 213 } else { 214 (void) _fflush_u(fp); 215 } 216 } 217 } 218 } while ((lp = lp->next) != NULL); 219 220 if (threaded) 221 cancel_safe_mutex_unlock(&_first_link_lock); 222 } 223 224 /* allocate an unused stream; NULL if cannot */ 225 FILE * 226 _findiop(void) 227 { 228 struct _link_ *lp, **prev; 229 230 /* used so there only needs to be one malloc() */ 231 #ifdef _LP64 232 typedef struct { 233 struct _link_ hdr; 234 FILE iob[FILE_ARY_SZ]; 235 } Pkg; 236 #else 237 typedef union { 238 struct { /* Normal */ 239 struct _link_ hdr; 240 xFILE iob[FILE_ARY_SZ]; 241 } Pkgn; 242 struct { /* Reversed */ 243 xFILE iob[FILE_ARY_SZ]; 244 struct _link_ hdr; 245 } Pkgr; 246 } Pkg; 247 uintptr_t delta; 248 #endif 249 Pkg *pkgp; 250 struct _link_ *hdr; 251 FPDECL(fp); 252 int i; 253 int threaded = __libc_threaded; 254 255 if (threaded) 256 cancel_safe_mutex_lock(&_first_link_lock); 257 258 if (lastlink == NULL) { 259 rescan: 260 fcloses = 0; 261 lastlink = &__first_link; 262 } 263 264 lp = lastlink; 265 266 /* 267 * lock to make testing of fp->_flag == 0 and acquiring the fp atomic 268 * and for allocation of new links 269 * low contention expected on _findiop(), hence coarse locking. 270 * for finer granularity, use fp->_lock for allocating an iop 271 * and make the testing of lp->next and allocation of new link atomic 272 * using lp->_lock 273 */ 274 275 do { 276 prev = &lp->next; 277 FIRSTFP(lp, fp); 278 279 for (i = lp->niob; --i >= 0; NEXTFP(fp)) { 280 FILE *ret; 281 if (threaded) { 282 ret = getiop(fp, FPLOCK(fp), FPSTATE(fp)); 283 if (ret != NULL) { 284 cancel_safe_mutex_unlock( 285 &_first_link_lock); 286 return (ret); 287 } 288 } else { 289 ret = getiop(fp, NULL, FPSTATE(fp)); 290 if (ret != NULL) 291 return (ret); 292 } 293 } 294 } while ((lastlink = lp = lp->next) != NULL); 295 296 /* 297 * If there was a sufficient number of fcloses since we last started 298 * at __first_link, we rescan all fp's again. We do not rescan for 299 * all fcloses; that would simplify the algorithm but would make 300 * search times near O(n) again. 301 * Worst case behaviour would still be pretty bad (open a full set, 302 * then continously opening and closing one FILE * gets you a full 303 * scan each time). That's why we over allocate 1 FILE for each 304 * 32 chunks. More over allocation is better; this is a nice 305 * empirical value which doesn't cost a lot of memory, doesn't 306 * overallocate until we reach 256 FILE *s and keeps the performance 307 * pretty close to the optimum. 308 */ 309 if (fcloses > nchunks/32) 310 goto rescan; 311 312 /* 313 * Need to allocate another and put it in the linked list. 314 */ 315 if ((pkgp = malloc(sizeof (Pkg))) == NULL) { 316 if (threaded) 317 cancel_safe_mutex_unlock(&_first_link_lock); 318 return (NULL); 319 } 320 321 (void) memset(pkgp, 0, sizeof (Pkg)); 322 323 #ifdef _LP64 324 hdr = &pkgp->hdr; 325 hdr->iobp = &pkgp->iob[0]; 326 #else 327 /* 328 * The problem with referencing a word after a FILE* is the possibility 329 * of a SIGSEGV if a non-stdio issue FILE structure ends on a page 330 * boundary. We run this check so we never need to run an expensive 331 * check like mincore() in order to know whether it is 332 * safe to dereference ((xFILE*)fp)->xmagic. 333 * We allocate the block with two alternative layouts; if one 334 * layout is not properly aligned for our purposes, the other layout 335 * will be because the size of _link_ is small compared to 336 * sizeof (xFILE). 337 * The check performed is this: 338 * If the distance from pkgp to the end of the page is 339 * less than the the offset of the last xmagic field in the 340 * xFILE structure, (the 0x1000 boundary is inside our just 341 * allocated structure) and the distance modulo the size of xFILE 342 * is identical to the offset of the first xmagic in the 343 * structure (i.e., XXXXXX000 points to an xmagic field), 344 * we need to use the reverse structure. 345 */ 346 if ((delta = 0x1000 - ((uintptr_t)pkgp & 0xfff)) <= 347 offsetof(Pkg, Pkgn.iob[FILE_ARY_SZ-1].xmagic) && 348 delta % sizeof (struct xFILE) == 349 offsetof(Pkg, Pkgn.iob[0].xmagic)) { 350 /* Use reversed structure */ 351 hdr = &pkgp->Pkgr.hdr; 352 hdr->iobp = &pkgp->Pkgr.iob[0]; 353 } else { 354 /* Use normal structure */ 355 hdr = &pkgp->Pkgn.hdr; 356 hdr->iobp = &pkgp->Pkgn.iob[0]; 357 } 358 #endif /* _LP64 */ 359 360 hdr->niob = FILE_ARY_SZ; 361 nchunks++; 362 363 #ifdef _LP64 364 fp = hdr->iobp; 365 for (i = 0; i < FILE_ARY_SZ; i++) 366 (void) mutex_init(&fp[i]._lock, 367 USYNC_THREAD | LOCK_RECURSIVE, NULL); 368 #else 369 xfp = hdr->iobp; 370 fp = &xfp->_iob; 371 372 for (i = 0; i < FILE_ARY_SZ; i++) { 373 xfp[i].xmagic = XMAGIC(&xfp[i]); 374 (void) mutex_init(&xfp[i].xlock, 375 USYNC_THREAD | LOCK_RECURSIVE, NULL); 376 } 377 #endif /* _LP64 */ 378 379 lastlink = *prev = hdr; 380 fp->_ptr = 0; 381 fp->_base = 0; 382 fp->_flag = 0377; /* claim the fp by setting low 8 bits */ 383 if (threaded) 384 cancel_safe_mutex_unlock(&_first_link_lock); 385 386 return (fp); 387 } 388 389 static void 390 isseekable(FILE *iop) 391 { 392 struct stat64 fstatbuf; 393 int save_errno; 394 395 save_errno = errno; 396 397 if (fstat64(GET_FD(iop), &fstatbuf) != 0) { 398 /* 399 * when we don't know what it is we'll 400 * do the old behaviour and flush 401 * the stream 402 */ 403 SET_SEEKABLE(iop); 404 errno = save_errno; 405 return; 406 } 407 408 /* 409 * check for what is non-SEEKABLE 410 * otherwise assume it's SEEKABLE so we get the old 411 * behaviour and flush the stream 412 */ 413 414 if (S_ISFIFO(fstatbuf.st_mode) || S_ISCHR(fstatbuf.st_mode) || 415 S_ISSOCK(fstatbuf.st_mode) || S_ISDOOR(fstatbuf.st_mode)) { 416 CLEAR_SEEKABLE(iop); 417 } else { 418 SET_SEEKABLE(iop); 419 } 420 421 errno = save_errno; 422 } 423 424 #ifdef _LP64 425 void 426 _setbufend(FILE *iop, Uchar *end) /* set the end pointer for this iop */ 427 { 428 iop->_end = end; 429 430 isseekable(iop); 431 } 432 433 #undef _realbufend 434 435 Uchar * 436 _realbufend(FILE *iop) /* get the end pointer for this iop */ 437 { 438 return (iop->_end); 439 } 440 441 #else /* _LP64 */ 442 443 /* 444 * Awkward functions not needed for the sane 64 bit environment. 445 */ 446 /* 447 * xmagic must not be aligned on a 4K boundary. We guarantee this in 448 * _findiop(). 449 */ 450 #define VALIDXFILE(xfp) \ 451 (((uintptr_t)&(xfp)->xmagic & 0xfff) && \ 452 (xfp)->xmagic == XMAGIC(FILEx(xfp))) 453 454 static struct xFILEdata * 455 getxfdat(FILE *iop) 456 { 457 if (STDIOP(iop)) 458 return (&_xftab[IOPIND(iop)]); 459 else if (VALIDXFILE(FILEx(iop))) 460 return (&FILEx(iop)->_xdat); 461 else 462 return (NULL); 463 } 464 465 void 466 _setbufend(FILE *iop, Uchar *end) /* set the end pointer for this iop */ 467 { 468 struct xFILEdata *dat = getxfdat(iop); 469 470 if (dat != NULL) 471 dat->_end = end; 472 473 isseekable(iop); 474 475 /* 476 * For binary compatibility with user programs using the 477 * old _bufend macro. This is *so* broken, fileno() 478 * is not the proper index. 479 */ 480 if (iop->_magic < _NFILE) 481 _bufendtab[iop->_magic] = end; 482 483 } 484 485 Uchar * 486 _realbufend(FILE *iop) /* get the end pointer for this iop */ 487 { 488 struct xFILEdata *dat = getxfdat(iop); 489 490 if (dat != NULL) 491 return (dat->_end); 492 493 return (NULL); 494 } 495 496 /* 497 * _reallock() is invoked in each stdio call through the IOB_LCK() macro, 498 * it is therefor extremely performance sensitive. We get better performance 499 * by inlining the STDIOP check in IOB_LCK and inlining a custom version 500 * of getfxdat() here. 501 */ 502 rmutex_t * 503 _reallock(FILE *iop) 504 { 505 if (VALIDXFILE(FILEx(iop))) 506 return (&FILEx(iop)->xlock); 507 508 return (NULL); 509 } 510 511 #endif /* _LP64 */ 512 513 /* make sure _cnt, _ptr are correct */ 514 void 515 _bufsync(FILE *iop, Uchar *bufend) 516 { 517 ssize_t spaceleft; 518 519 spaceleft = bufend - iop->_ptr; 520 if (bufend < iop->_ptr) { 521 iop->_ptr = bufend; 522 iop->_cnt = 0; 523 } else if (spaceleft < iop->_cnt) 524 iop->_cnt = spaceleft; 525 } 526 527 /* really write out current buffer contents */ 528 int 529 _xflsbuf(FILE *iop) 530 { 531 ssize_t n; 532 Uchar *base = iop->_base; 533 Uchar *bufend; 534 ssize_t num_wrote; 535 536 /* 537 * Hopefully, be stable with respect to interrupts... 538 */ 539 n = iop->_ptr - base; 540 iop->_ptr = base; 541 bufend = _bufend(iop); 542 if (iop->_flag & (_IOLBF | _IONBF)) 543 iop->_cnt = 0; /* always go to a flush */ 544 else 545 iop->_cnt = bufend - base; 546 547 if (_needsync(iop, bufend)) /* recover from interrupts */ 548 _bufsync(iop, bufend); 549 550 if (n > 0) { 551 int fd = GET_FD(iop); 552 while ((num_wrote = write(fd, base, (size_t)n)) != n) { 553 if (num_wrote <= 0) { 554 if (!cancel_active()) 555 iop->_flag |= _IOERR; 556 return (EOF); 557 } 558 n -= num_wrote; 559 base += num_wrote; 560 } 561 } 562 return (0); 563 } 564 565 /* flush (write) buffer */ 566 int 567 fflush(FILE *iop) 568 { 569 int res; 570 rmutex_t *lk; 571 572 if (iop) { 573 FLOCKFILE(lk, iop); 574 res = _fflush_u(iop); 575 FUNLOCKFILE(lk); 576 } else { 577 res = _fflush_l_iops(); /* flush all iops */ 578 } 579 return (res); 580 } 581 582 static int 583 _fflush_l_iops(void) /* flush all buffers */ 584 { 585 FPDECL(iop); 586 587 int i; 588 struct _link_ *lp; 589 int res = 0; 590 rmutex_t *lk; 591 /* Allow the compiler to optimize the load out of the loop */ 592 int threaded = __libc_threaded; 593 594 if (threaded) 595 cancel_safe_mutex_lock(&_first_link_lock); 596 597 lp = &__first_link; 598 599 do { 600 /* 601 * We need to grab the file locks or file corruption 602 * will happen. But we first check the flags field 603 * knowing that when it is 0, it isn't allocated and 604 * cannot be allocated while we're holding the 605 * _first_link_lock. And when _IONBF is set (also the 606 * case when _flag is 0377, or alloc in progress), we 607 * also ignore it. 608 * 609 * Ignore locked streams; it will appear as if 610 * concurrent updates happened after fflush(NULL). Note 611 * that we even attempt to lock if the locking is set to 612 * "by caller". We don't want to penalize callers of 613 * __fsetlocking() by not flushing their files. Note: if 614 * __fsetlocking() callers don't employ any locking, they 615 * may still face corruption in fflush(NULL); but that's 616 * no change from earlier releases. 617 */ 618 FIRSTFP(lp, iop); 619 for (i = lp->niob; --i >= 0; NEXTFP(iop)) { 620 unsigned int flag = iop->_flag; 621 622 /* flag 0, flag 0377, or _IONBF set */ 623 if (flag == 0 || (flag & _IONBF) != 0) 624 continue; 625 626 if (threaded) { 627 lk = FPLOCK(iop); 628 if (cancel_safe_mutex_trylock(lk) != 0) 629 continue; 630 } 631 632 if (!(iop->_flag & _IONBF)) { 633 /* 634 * don't need to worry about the _IORW case 635 * since the iop will also marked with _IOREAD 636 * or _IOWRT whichever we are really doing 637 */ 638 if (iop->_flag & _IOWRT) { 639 /* Flush write buffers */ 640 res |= _fflush_u(iop); 641 } else if (iop->_flag & _IOREAD) { 642 /* 643 * flush seekable read buffers 644 * don't flush non-seekable read buffers 645 */ 646 if (GET_SEEKABLE(iop)) { 647 res |= _fflush_u(iop); 648 } 649 } 650 } 651 if (threaded) 652 cancel_safe_mutex_unlock(lk); 653 } 654 } while ((lp = lp->next) != NULL); 655 if (threaded) 656 cancel_safe_mutex_unlock(&_first_link_lock); 657 return (res); 658 } 659 660 /* flush buffer */ 661 int 662 _fflush_u(FILE *iop) 663 { 664 int res = 0; 665 666 /* this portion is always assumed locked */ 667 if (!(iop->_flag & _IOWRT)) { 668 (void) lseek64(GET_FD(iop), -iop->_cnt, SEEK_CUR); 669 iop->_cnt = 0; 670 /* needed for ungetc & multibyte pushbacks */ 671 iop->_ptr = iop->_base; 672 if (iop->_flag & _IORW) { 673 iop->_flag &= ~_IOREAD; 674 } 675 return (0); 676 } 677 if (iop->_base != NULL && iop->_ptr > iop->_base) { 678 res = _xflsbuf(iop); 679 } 680 if (iop->_flag & _IORW) { 681 iop->_flag &= ~_IOWRT; 682 iop->_cnt = 0; 683 } 684 return (res); 685 } 686 687 /* flush buffer and close stream */ 688 int 689 fclose(FILE *iop) 690 { 691 int res = 0; 692 rmutex_t *lk; 693 694 if (iop == NULL) { 695 return (EOF); /* avoid passing zero to FLOCKFILE */ 696 } 697 698 FLOCKFILE(lk, iop); 699 if (iop->_flag == 0) { 700 FUNLOCKFILE(lk); 701 return (EOF); 702 } 703 /* Is not unbuffered and opened for read and/or write ? */ 704 if (!(iop->_flag & _IONBF) && (iop->_flag & (_IOWRT | _IOREAD | _IORW))) 705 res = _fflush_u(iop); 706 if (close(GET_FD(iop)) < 0) 707 res = EOF; 708 if (iop->_flag & _IOMYBUF) { 709 (void) free((char *)iop->_base - PUSHBACK); 710 } 711 iop->_base = NULL; 712 iop->_ptr = NULL; 713 iop->_cnt = 0; 714 iop->_flag = 0; /* marks it as available */ 715 FUNLOCKFILE(lk); 716 717 if (__libc_threaded) 718 cancel_safe_mutex_lock(&_first_link_lock); 719 fcloses++; 720 if (__libc_threaded) 721 cancel_safe_mutex_unlock(&_first_link_lock); 722 723 return (res); 724 } 725 726 /* flush buffer, close fd but keep the stream used by freopen() */ 727 int 728 close_fd(FILE *iop) 729 { 730 int res = 0; 731 mbstate_t *mb; 732 733 if (iop == NULL || iop->_flag == 0) 734 return (EOF); 735 /* Is not unbuffered and opened for read and/or write ? */ 736 if (!(iop->_flag & _IONBF) && (iop->_flag & (_IOWRT | _IOREAD | _IORW))) 737 res = _fflush_u(iop); 738 if (close(GET_FD(iop)) < 0) 739 res = EOF; 740 if (iop->_flag & _IOMYBUF) { 741 (void) free((char *)iop->_base - PUSHBACK); 742 } 743 iop->_base = NULL; 744 iop->_ptr = NULL; 745 mb = _getmbstate(iop); 746 if (mb != NULL) 747 (void) memset(mb, 0, sizeof (mbstate_t)); 748 iop->_cnt = 0; 749 _setorientation(iop, _NO_MODE); 750 return (res); 751 } 752 753 static FILE * 754 getiop(FILE *fp, rmutex_t *lk, mbstate_t *mb) 755 { 756 if (lk != NULL && cancel_safe_mutex_trylock(lk) != 0) 757 return (NULL); /* locked: fp in use */ 758 759 if (fp->_flag == 0) { /* unused */ 760 #ifndef _LP64 761 fp->__orientation = 0; 762 #endif /* _LP64 */ 763 fp->_cnt = 0; 764 fp->_ptr = NULL; 765 fp->_base = NULL; 766 fp->_flag = 0377; /* claim the fp by setting low 8 bits */ 767 (void) memset(mb, 0, sizeof (mbstate_t)); 768 FUNLOCKFILE(lk); 769 return (fp); 770 } 771 FUNLOCKFILE(lk); 772 return (NULL); 773 } 774 775 #ifndef _LP64 776 /* 777 * DESCRIPTION: 778 * This function gets the pointer to the mbstate_t structure associated 779 * with the specified iop. 780 * 781 * RETURNS: 782 * If the associated mbstate_t found, the pointer to the mbstate_t is 783 * returned. Otherwise, NULL is returned. 784 */ 785 mbstate_t * 786 _getmbstate(FILE *iop) 787 { 788 struct xFILEdata *dat = getxfdat(iop); 789 790 if (dat != NULL) 791 return (&dat->_state); 792 793 return (NULL); 794 } 795 796 /* 797 * More 32-bit only functions. 798 * They lookup/set large fd's for extended FILE support. 799 */ 800 801 /* 802 * The negative value indicates that Extended fd FILE's has not 803 * been enabled by the user. 804 */ 805 static int bad_fd = -1; 806 807 int 808 _file_get(FILE *iop) 809 { 810 int altfd; 811 812 /* 813 * Failure indicates a FILE * not allocated through stdio; 814 * it means the flag values are probably bogus and that if 815 * a file descriptor is set, it's in _magic. 816 * Inline getxfdat() for performance reasons. 817 */ 818 if (STDIOP(iop)) 819 altfd = _xftab[IOPIND(iop)]._altfd; 820 else if (VALIDXFILE(FILEx(iop))) 821 altfd = FILEx(iop)->_xdat._altfd; 822 else 823 return (iop->_magic); 824 /* 825 * if this is not an internal extended FILE then check 826 * if _file is being changed from underneath us. 827 * It should not be because if 828 * it is then then we lose our ability to guard against 829 * silent data corruption. 830 */ 831 if (!iop->__xf_nocheck && bad_fd > -1 && iop->_magic != bad_fd) { 832 (void) fprintf(stderr, 833 "Application violated extended FILE safety mechanism.\n" 834 "Please read the man page for extendedFILE.\nAborting\n"); 835 abort(); 836 } 837 return (altfd); 838 } 839 840 int 841 _file_set(FILE *iop, int fd, const char *type) 842 { 843 struct xFILEdata *dat; 844 int Fflag; 845 846 /* Already known to contain at least one byte */ 847 while (*++type != '\0') 848 ; 849 850 Fflag = type[-1] == 'F'; 851 if (!Fflag && bad_fd < 0) { 852 errno = EMFILE; 853 return (-1); 854 } 855 856 dat = getxfdat(iop); 857 iop->__extendedfd = 1; 858 iop->__xf_nocheck = Fflag; 859 dat->_altfd = fd; 860 iop->_magic = (unsigned char)bad_fd; 861 return (0); 862 } 863 864 /* 865 * Activates extended fd's in FILE's 866 */ 867 868 static const int tries[] = {196, 120, 60, 3}; 869 #define NTRIES (sizeof (tries)/sizeof (int)) 870 871 int 872 enable_extended_FILE_stdio(int fd, int action) 873 { 874 int i; 875 876 if (action < 0) 877 action = SIGABRT; /* default signal */ 878 879 if (fd < 0) { 880 /* 881 * search for an available fd and make it the badfd 882 */ 883 for (i = 0; i < NTRIES; i++) { 884 fd = fcntl(tries[i], F_BADFD, action); 885 if (fd >= 0) 886 break; 887 } 888 if (fd < 0) /* failed to find an available fd */ 889 return (-1); 890 } else { 891 /* caller requests that fd be the chosen badfd */ 892 int nfd = fcntl(fd, F_BADFD, action); 893 if (nfd < 0 || nfd != fd) 894 return (-1); 895 } 896 bad_fd = fd; 897 return (0); 898 } 899 #endif 900