1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* Copyright (c) 1988 AT&T */ 30 /* All Rights Reserved */ 31 32 33 #include "synonyms.h" 34 #include "mtlib.h" 35 #include "file64.h" 36 37 #define _iob __iob 38 39 #include <sys/types.h> 40 #include <stdlib.h> 41 #include <stdio.h> 42 #include <thread.h> 43 #include <synch.h> 44 #include <unistd.h> 45 #include <string.h> 46 #include "stdiom.h" 47 #include <wchar.h> 48 #include <sys/stat.h> 49 #include <stddef.h> 50 #include <errno.h> 51 #include <fcntl.h> 52 53 #undef end 54 55 #define FILE_ARY_SZ 8 /* a nice size for FILE array & end_buffer_ptrs */ 56 57 #ifdef _LP64 58 59 /* 60 * Macros to declare and loop over a fp or fp/xfp combo to 61 * avoid some of the _LP64 ifdef hell. 62 */ 63 64 #define FPDECL(fp) FILE *fp 65 #define FIRSTFP(lp, fp) fp = lp->iobp 66 #define NEXTFP(fp) fp++ 67 #define FPLOCK(fp) &fp->_lock 68 #define FPSTATE(fp) &fp->_state 69 70 #define xFILE FILE 71 72 #else 73 74 #define FPDECL(fp) FILE *fp; xFILE *x##fp 75 #define FIRSTFP(lp, fp) x##fp = lp->iobp; \ 76 fp = x##fp ? &x##fp->_iob : &_iob[0] 77 #define NEXTFP(fp) (x##fp ? fp = &(++x##fp)->_iob : ++fp) 78 #define FPLOCK(fp) x##fp ? \ 79 &x##fp->xlock : &_xftab[IOPIND(fp)]._lock 80 #define FPSTATE(fp) x##fp ? \ 81 &x##fp->xstate : &_xftab[IOPIND(fp)]._state 82 83 /* The extended 32-bit file structure for use in link buffers */ 84 typedef struct xFILE { 85 FILE _iob; /* must be first! */ 86 struct xFILEdata _xdat; 87 } xFILE; 88 89 #define xmagic _xdat._magic 90 #define xend _xdat._end 91 #define xlock _xdat._lock 92 #define xstate _xdat._state 93 94 #define FILEx(fp) ((struct xFILE *)(uintptr_t)fp) 95 96 /* 97 * The magic number stored is actually the pointer scrambled with 98 * a magic number. Pointers to data items live everywhere in memory 99 * so we scramble the pointer in order to avoid accidental collisions. 100 */ 101 #define XFILEMAGIC 0x63687367 102 #define XMAGIC(xfp) ((uintptr_t)(xfp) ^ XFILEMAGIC) 103 104 #endif /* _LP64 */ 105 106 struct _link_ /* manages a list of streams */ 107 { 108 xFILE *iobp; /* the array of (x)FILE's */ 109 /* NULL for the __first_link in ILP32 */ 110 int niob; /* length of the arrays */ 111 struct _link_ *next; /* next in the list */ 112 }; 113 114 /* 115 * With dynamic linking, iob may be in either the library or in the user's 116 * a.out, so the run time linker fixes up the first entry in __first_link at 117 * process startup time. 118 * 119 * In 32 bit processes, we don't have xFILE[FILE_ARY_SZ] but FILE[], 120 * and _xftab[] instead; this is denoted by having iobp set to NULL in 121 * 32 bit mode for the first link entry. 122 */ 123 struct _link_ __first_link = /* first in linked list */ 124 { 125 #if !defined(_LP64) 126 NULL, 127 #else 128 &_iob[0], 129 #endif 130 _NFILE, 131 NULL 132 }; 133 134 /* 135 * Information cached to speed up searches. We remember where we 136 * last found a free FILE* and we remember whether we saw any fcloses 137 * in between. We also count the number of chunks we allocated, see 138 * _findiop() for an explanation. 139 * These variables are all protected by _first_link_lock. 140 */ 141 static struct _link_ *lastlink = NULL; 142 static int fcloses; 143 static int nchunks; 144 145 static mutex_t _first_link_lock = DEFAULTMUTEX; 146 147 static int _fflush_l_iops(void); 148 static FILE *getiop(FILE *, rmutex_t *, mbstate_t *); 149 150 /* 151 * All functions that understand the linked list of iob's follow. 152 */ 153 #pragma weak _cleanup = __cleanup 154 void 155 __cleanup(void) /* called at process end to flush ouput streams */ 156 { 157 (void) fflush(NULL); 158 } 159 160 /* 161 * For fork1-safety (see libc_prepare_atfork(), etc). 162 */ 163 void 164 stdio_locks() 165 { 166 (void) mutex_lock(&_first_link_lock); 167 /* 168 * XXX: We should acquire all of the iob locks here. 169 */ 170 } 171 172 void 173 stdio_unlocks() 174 { 175 /* 176 * XXX: We should release all of the iob locks here. 177 */ 178 (void) mutex_unlock(&_first_link_lock); 179 } 180 181 void 182 _flushlbf(void) /* fflush() all line-buffered streams */ 183 { 184 FPDECL(fp); 185 int i; 186 struct _link_ *lp; 187 /* Allow compiler to optimize the loop */ 188 int threaded = __libc_threaded; 189 190 if (threaded) 191 cancel_safe_mutex_lock(&_first_link_lock); 192 193 lp = &__first_link; 194 do { 195 FIRSTFP(lp, fp); 196 for (i = lp->niob; --i >= 0; NEXTFP(fp)) { 197 /* 198 * The additional _IONBF check guards againsts 199 * allocated but uninitialized iops (see _findiop). 200 * We also automatically skip non allocated iop's. 201 * Don't block on locks. 202 */ 203 if ((fp->_flag & (_IOLBF | _IOWRT | _IONBF)) == 204 (_IOLBF | _IOWRT)) { 205 if (threaded) { 206 rmutex_t *lk = FPLOCK(fp); 207 if (cancel_safe_mutex_trylock(lk) != 0) 208 continue; 209 /* Recheck after locking */ 210 if ((fp->_flag & (_IOLBF | _IOWRT)) == 211 (_IOLBF | _IOWRT)) { 212 (void) _fflush_u(fp); 213 } 214 cancel_safe_mutex_unlock(lk); 215 } else { 216 (void) _fflush_u(fp); 217 } 218 } 219 } 220 } while ((lp = lp->next) != NULL); 221 222 if (threaded) 223 cancel_safe_mutex_unlock(&_first_link_lock); 224 } 225 226 /* allocate an unused stream; NULL if cannot */ 227 FILE * 228 _findiop(void) 229 { 230 struct _link_ *lp, **prev; 231 232 /* used so there only needs to be one malloc() */ 233 #ifdef _LP64 234 typedef struct { 235 struct _link_ hdr; 236 FILE iob[FILE_ARY_SZ]; 237 } Pkg; 238 #else 239 typedef union { 240 struct { /* Normal */ 241 struct _link_ hdr; 242 xFILE iob[FILE_ARY_SZ]; 243 } Pkgn; 244 struct { /* Reversed */ 245 xFILE iob[FILE_ARY_SZ]; 246 struct _link_ hdr; 247 } Pkgr; 248 } Pkg; 249 uintptr_t delta; 250 #endif 251 Pkg *pkgp; 252 struct _link_ *hdr; 253 FPDECL(fp); 254 int i; 255 int threaded = __libc_threaded; 256 257 if (threaded) 258 cancel_safe_mutex_lock(&_first_link_lock); 259 260 if (lastlink == NULL) { 261 rescan: 262 fcloses = 0; 263 lastlink = &__first_link; 264 } 265 266 lp = lastlink; 267 268 /* 269 * lock to make testing of fp->_flag == 0 and acquiring the fp atomic 270 * and for allocation of new links 271 * low contention expected on _findiop(), hence coarse locking. 272 * for finer granularity, use fp->_lock for allocating an iop 273 * and make the testing of lp->next and allocation of new link atomic 274 * using lp->_lock 275 */ 276 277 do { 278 prev = &lp->next; 279 FIRSTFP(lp, fp); 280 281 for (i = lp->niob; --i >= 0; NEXTFP(fp)) { 282 FILE *ret; 283 if (threaded) { 284 ret = getiop(fp, FPLOCK(fp), FPSTATE(fp)); 285 if (ret != NULL) { 286 cancel_safe_mutex_unlock( 287 &_first_link_lock); 288 return (ret); 289 } 290 } else { 291 ret = getiop(fp, NULL, FPSTATE(fp)); 292 if (ret != NULL) 293 return (ret); 294 } 295 } 296 } while ((lastlink = lp = lp->next) != NULL); 297 298 /* 299 * If there was a sufficient number of fcloses since we last started 300 * at __first_link, we rescan all fp's again. We do not rescan for 301 * all fcloses; that would simplify the algorithm but would make 302 * search times near O(n) again. 303 * Worst case behaviour would still be pretty bad (open a full set, 304 * then continously opening and closing one FILE * gets you a full 305 * scan each time). That's why we over allocate 1 FILE for each 306 * 32 chunks. More over allocation is better; this is a nice 307 * empirical value which doesn't cost a lot of memory, doesn't 308 * overallocate until we reach 256 FILE *s and keeps the performance 309 * pretty close to the optimum. 310 */ 311 if (fcloses > nchunks/32) 312 goto rescan; 313 314 /* 315 * Need to allocate another and put it in the linked list. 316 */ 317 if ((pkgp = malloc(sizeof (Pkg))) == NULL) { 318 if (threaded) 319 cancel_safe_mutex_unlock(&_first_link_lock); 320 return (NULL); 321 } 322 323 (void) memset(pkgp, 0, sizeof (Pkg)); 324 325 #ifdef _LP64 326 hdr = &pkgp->hdr; 327 hdr->iobp = &pkgp->iob[0]; 328 #else 329 /* 330 * The problem with referencing a word after a FILE* is the possibility 331 * of a SIGSEGV if a non-stdio issue FILE structure ends on a page 332 * boundary. We run this check so we never need to run an expensive 333 * check like mincore() in order to know whether it is 334 * safe to dereference ((xFILE*)fp)->xmagic. 335 * We allocate the block with two alternative layouts; if one 336 * layout is not properly aligned for our purposes, the other layout 337 * will be because the size of _link_ is small compared to 338 * sizeof (xFILE). 339 * The check performed is this: 340 * If the distance from pkgp to the end of the page is 341 * less than the the offset of the last xmagic field in the 342 * xFILE structure, (the 0x1000 boundary is inside our just 343 * allocated structure) and the distance modulo the size of xFILE 344 * is identical to the offset of the first xmagic in the 345 * structure (i.e., XXXXXX000 points to an xmagic field), 346 * we need to use the reverse structure. 347 */ 348 if ((delta = 0x1000 - ((uintptr_t)pkgp & 0xfff)) <= 349 offsetof(Pkg, Pkgn.iob[FILE_ARY_SZ-1].xmagic) && 350 delta % sizeof (struct xFILE) == 351 offsetof(Pkg, Pkgn.iob[0].xmagic)) { 352 /* Use reversed structure */ 353 hdr = &pkgp->Pkgr.hdr; 354 hdr->iobp = &pkgp->Pkgr.iob[0]; 355 } else { 356 /* Use normal structure */ 357 hdr = &pkgp->Pkgn.hdr; 358 hdr->iobp = &pkgp->Pkgn.iob[0]; 359 } 360 #endif /* _LP64 */ 361 362 hdr->niob = FILE_ARY_SZ; 363 nchunks++; 364 365 #ifdef _LP64 366 fp = hdr->iobp; 367 for (i = 0; i < FILE_ARY_SZ; i++) 368 mutex_init(&fp[i]._lock, USYNC_THREAD | LOCK_RECURSIVE, NULL); 369 #else 370 xfp = hdr->iobp; 371 fp = &xfp->_iob; 372 373 for (i = 0; i < FILE_ARY_SZ; i++) { 374 xfp[i].xmagic = XMAGIC(&xfp[i]); 375 mutex_init(&xfp[i].xlock, USYNC_THREAD | LOCK_RECURSIVE, NULL); 376 } 377 #endif /* _LP64 */ 378 379 lastlink = *prev = hdr; 380 fp->_ptr = 0; 381 fp->_base = 0; 382 fp->_flag = 0377; /* claim the fp by setting low 8 bits */ 383 if (threaded) 384 cancel_safe_mutex_unlock(&_first_link_lock); 385 386 return (fp); 387 } 388 389 static void 390 isseekable(FILE *iop) 391 { 392 struct stat64 fstatbuf; 393 int save_errno; 394 395 save_errno = errno; 396 397 if (fstat64(GET_FD(iop), &fstatbuf) != 0) { 398 /* 399 * when we don't know what it is we'll 400 * do the old behaviour and flush 401 * the stream 402 */ 403 SET_SEEKABLE(iop); 404 errno = save_errno; 405 return; 406 } 407 408 /* 409 * check for what is non-SEEKABLE 410 * otherwise assume it's SEEKABLE so we get the old 411 * behaviour and flush the stream 412 */ 413 414 if (S_ISFIFO(fstatbuf.st_mode) || S_ISCHR(fstatbuf.st_mode) || 415 S_ISSOCK(fstatbuf.st_mode) || S_ISDOOR(fstatbuf.st_mode)) { 416 CLEAR_SEEKABLE(iop); 417 } else { 418 SET_SEEKABLE(iop); 419 } 420 421 errno = save_errno; 422 } 423 424 #ifdef _LP64 425 void 426 _setbufend(FILE *iop, Uchar *end) /* set the end pointer for this iop */ 427 { 428 iop->_end = end; 429 430 isseekable(iop); 431 } 432 433 #undef _realbufend 434 435 Uchar * 436 _realbufend(FILE *iop) /* get the end pointer for this iop */ 437 { 438 return (iop->_end); 439 } 440 441 #else /* _LP64 */ 442 443 /* 444 * Awkward functions not needed for the sane 64 bit environment. 445 */ 446 /* 447 * xmagic must not be aligned on a 4K boundary. We guarantee this in 448 * _findiop(). 449 */ 450 #define VALIDXFILE(xfp) \ 451 (((uintptr_t)&(xfp)->xmagic & 0xfff) && \ 452 (xfp)->xmagic == XMAGIC(FILEx(xfp))) 453 454 static struct xFILEdata * 455 getxfdat(FILE *iop) 456 { 457 if (STDIOP(iop)) 458 return (&_xftab[IOPIND(iop)]); 459 else if (VALIDXFILE(FILEx(iop))) 460 return (&FILEx(iop)->_xdat); 461 else 462 return (NULL); 463 } 464 465 void 466 _setbufend(FILE *iop, Uchar *end) /* set the end pointer for this iop */ 467 { 468 struct xFILEdata *dat = getxfdat(iop); 469 470 if (dat != NULL) 471 dat->_end = end; 472 473 isseekable(iop); 474 475 /* 476 * For binary compatibility with user programs using the 477 * old _bufend macro. This is *so* broken, fileno() 478 * is not the proper index. 479 */ 480 if (iop->_magic < _NFILE) 481 _bufendtab[iop->_magic] = end; 482 483 } 484 485 Uchar * 486 _realbufend(FILE *iop) /* get the end pointer for this iop */ 487 { 488 struct xFILEdata *dat = getxfdat(iop); 489 490 if (dat != NULL) 491 return (dat->_end); 492 493 return (NULL); 494 } 495 496 /* 497 * _reallock() is invoked in each stdio call through the IOB_LCK() macro, 498 * it is therefor extremely performance sensitive. We get better performance 499 * by inlining the STDIOP check in IOB_LCK and inlining a custom version 500 * of getfxdat() here. 501 */ 502 rmutex_t * 503 _reallock(FILE *iop) 504 { 505 if (VALIDXFILE(FILEx(iop))) 506 return (&FILEx(iop)->xlock); 507 508 return (NULL); 509 } 510 511 #endif /* _LP64 */ 512 513 /* make sure _cnt, _ptr are correct */ 514 void 515 _bufsync(FILE *iop, Uchar *bufend) 516 { 517 ssize_t spaceleft; 518 519 spaceleft = bufend - iop->_ptr; 520 if (bufend < iop->_ptr) { 521 iop->_ptr = bufend; 522 iop->_cnt = 0; 523 } else if (spaceleft < iop->_cnt) 524 iop->_cnt = spaceleft; 525 } 526 527 /* really write out current buffer contents */ 528 int 529 _xflsbuf(FILE *iop) 530 { 531 ssize_t n; 532 Uchar *base = iop->_base; 533 Uchar *bufend; 534 ssize_t num_wrote; 535 536 /* 537 * Hopefully, be stable with respect to interrupts... 538 */ 539 n = iop->_ptr - base; 540 iop->_ptr = base; 541 bufend = _bufend(iop); 542 if (iop->_flag & (_IOLBF | _IONBF)) 543 iop->_cnt = 0; /* always go to a flush */ 544 else 545 iop->_cnt = bufend - base; 546 547 if (_needsync(iop, bufend)) /* recover from interrupts */ 548 _bufsync(iop, bufend); 549 550 if (n > 0) { 551 int fd = GET_FD(iop); 552 while ((num_wrote = write(fd, base, (size_t)n)) != n) { 553 if (num_wrote <= 0) { 554 if (!cancel_active()) 555 iop->_flag |= _IOERR; 556 return (EOF); 557 } 558 n -= num_wrote; 559 base += num_wrote; 560 } 561 } 562 return (0); 563 } 564 565 /* flush (write) buffer */ 566 int 567 fflush(FILE *iop) 568 { 569 int res; 570 rmutex_t *lk; 571 572 if (iop) { 573 FLOCKFILE(lk, iop); 574 res = _fflush_u(iop); 575 FUNLOCKFILE(lk); 576 } else { 577 res = _fflush_l_iops(); /* flush all iops */ 578 } 579 return (res); 580 } 581 582 static int 583 _fflush_l_iops(void) /* flush all buffers */ 584 { 585 FPDECL(iop); 586 587 int i; 588 struct _link_ *lp; 589 int res = 0; 590 rmutex_t *lk; 591 /* Allow the compiler to optimize the load out of the loop */ 592 int threaded = __libc_threaded; 593 594 if (threaded) 595 cancel_safe_mutex_lock(&_first_link_lock); 596 597 lp = &__first_link; 598 599 do { 600 /* 601 * We need to grab the file locks or file corruption 602 * will happen. But we first check the flags field 603 * knowing that when it is 0, it isn't allocated and 604 * cannot be allocated while we're holding the 605 * _first_link_lock. And when _IONBF is set (also the 606 * case when _flag is 0377, or alloc in progress), we 607 * also ignore it. 608 * 609 * Ignore locked streams; it will appear as if 610 * concurrent updates happened after fflush(NULL). Note 611 * that we even attempt to lock if the locking is set to 612 * "by caller". We don't want to penalize callers of 613 * __fsetlocking() by not flushing their files. Note: if 614 * __fsetlocking() callers don't employ any locking, they 615 * may still face corruption in fflush(NULL); but that's 616 * no change from earlier releases. 617 */ 618 FIRSTFP(lp, iop); 619 for (i = lp->niob; --i >= 0; NEXTFP(iop)) { 620 unsigned int flag = iop->_flag; 621 622 /* flag 0, flag 0377, or _IONBF set */ 623 if (flag == 0 || (flag & _IONBF) != 0) 624 continue; 625 626 if (threaded) { 627 lk = FPLOCK(iop); 628 if (cancel_safe_mutex_trylock(lk) != 0) 629 continue; 630 } 631 632 if (!(iop->_flag & _IONBF)) { 633 /* 634 * don't need to worry about the _IORW case 635 * since the iop will also marked with _IOREAD 636 * or _IOWRT whichever we are really doing 637 */ 638 if (iop->_flag & _IOWRT) { 639 /* Flush write buffers */ 640 res |= _fflush_u(iop); 641 } else if (iop->_flag & _IOREAD) { 642 /* 643 * flush seekable read buffers 644 * don't flush non-seekable read buffers 645 */ 646 if (GET_SEEKABLE(iop)) { 647 res |= _fflush_u(iop); 648 } 649 } 650 } 651 if (threaded) 652 cancel_safe_mutex_unlock(lk); 653 } 654 } while ((lp = lp->next) != NULL); 655 if (threaded) 656 cancel_safe_mutex_unlock(&_first_link_lock); 657 return (res); 658 } 659 660 /* flush buffer */ 661 int 662 _fflush_u(FILE *iop) 663 { 664 int res = 0; 665 666 /* this portion is always assumed locked */ 667 if (!(iop->_flag & _IOWRT)) { 668 (void) lseek64(GET_FD(iop), -iop->_cnt, SEEK_CUR); 669 iop->_cnt = 0; 670 /* needed for ungetc & multibyte pushbacks */ 671 iop->_ptr = iop->_base; 672 if (iop->_flag & _IORW) { 673 iop->_flag &= ~_IOREAD; 674 } 675 return (0); 676 } 677 if (iop->_base != NULL && iop->_ptr > iop->_base) { 678 res = _xflsbuf(iop); 679 } 680 if (iop->_flag & _IORW) { 681 iop->_flag &= ~_IOWRT; 682 iop->_cnt = 0; 683 } 684 return (res); 685 } 686 687 /* flush buffer and close stream */ 688 int 689 fclose(FILE *iop) 690 { 691 int res = 0; 692 rmutex_t *lk; 693 694 if (iop == NULL) { 695 return (EOF); /* avoid passing zero to FLOCKFILE */ 696 } 697 698 FLOCKFILE(lk, iop); 699 if (iop->_flag == 0) { 700 FUNLOCKFILE(lk); 701 return (EOF); 702 } 703 /* Is not unbuffered and opened for read and/or write ? */ 704 if (!(iop->_flag & _IONBF) && (iop->_flag & (_IOWRT | _IOREAD | _IORW))) 705 res = _fflush_u(iop); 706 if (close(GET_FD(iop)) < 0) 707 res = EOF; 708 if (iop->_flag & _IOMYBUF) { 709 (void) free((char *)iop->_base - PUSHBACK); 710 } 711 iop->_base = NULL; 712 iop->_ptr = NULL; 713 iop->_cnt = 0; 714 iop->_flag = 0; /* marks it as available */ 715 FUNLOCKFILE(lk); 716 717 if (__libc_threaded) 718 cancel_safe_mutex_lock(&_first_link_lock); 719 fcloses++; 720 if (__libc_threaded) 721 cancel_safe_mutex_unlock(&_first_link_lock); 722 723 return (res); 724 } 725 726 /* flush buffer, close fd but keep the stream used by freopen() */ 727 int 728 close_fd(FILE *iop) 729 { 730 int res = 0; 731 mbstate_t *mb; 732 733 if (iop == NULL || iop->_flag == 0) 734 return (EOF); 735 /* Is not unbuffered and opened for read and/or write ? */ 736 if (!(iop->_flag & _IONBF) && (iop->_flag & (_IOWRT | _IOREAD | _IORW))) 737 res = _fflush_u(iop); 738 if (close(GET_FD(iop)) < 0) 739 res = EOF; 740 if (iop->_flag & _IOMYBUF) { 741 (void) free((char *)iop->_base - PUSHBACK); 742 } 743 iop->_base = NULL; 744 iop->_ptr = NULL; 745 mb = _getmbstate(iop); 746 if (mb != NULL) 747 (void) memset(mb, 0, sizeof (mbstate_t)); 748 iop->_cnt = 0; 749 _setorientation(iop, _NO_MODE); 750 return (res); 751 } 752 753 static FILE * 754 getiop(FILE *fp, rmutex_t *lk, mbstate_t *mb) 755 { 756 if (lk != NULL && cancel_safe_mutex_trylock(lk) != 0) 757 return (NULL); /* locked: fp in use */ 758 759 if (fp->_flag == 0) { /* unused */ 760 #ifndef _LP64 761 fp->__orientation = 0; 762 #endif /* _LP64 */ 763 fp->_cnt = 0; 764 fp->_ptr = NULL; 765 fp->_base = NULL; 766 fp->_flag = 0377; /* claim the fp by setting low 8 bits */ 767 (void) memset(mb, 0, sizeof (mbstate_t)); 768 FUNLOCKFILE(lk); 769 return (fp); 770 } 771 FUNLOCKFILE(lk); 772 return (NULL); 773 } 774 775 #ifndef _LP64 776 /* 777 * DESCRIPTION: 778 * This function gets the pointer to the mbstate_t structure associated 779 * with the specified iop. 780 * 781 * RETURNS: 782 * If the associated mbstate_t found, the pointer to the mbstate_t is 783 * returned. Otherwise, NULL is returned. 784 */ 785 mbstate_t * 786 _getmbstate(FILE *iop) 787 { 788 struct xFILEdata *dat = getxfdat(iop); 789 790 if (dat != NULL) 791 return (&dat->_state); 792 793 return (NULL); 794 } 795 796 /* 797 * More 32-bit only functions. 798 * They lookup/set large fd's for extended FILE support. 799 */ 800 801 /* 802 * The negative value indicates that Extended fd FILE's has not 803 * been enabled by the user. 804 */ 805 static int bad_fd = -1; 806 807 int 808 _file_get(FILE *iop) 809 { 810 int altfd; 811 812 /* 813 * Failure indicates a FILE * not allocated through stdio; 814 * it means the flag values are probably bogus and that if 815 * a file descriptor is set, it's in _magic. 816 * Inline getxfdat() for performance reasons. 817 */ 818 if (STDIOP(iop)) 819 altfd = _xftab[IOPIND(iop)]._altfd; 820 else if (VALIDXFILE(FILEx(iop))) 821 altfd = FILEx(iop)->_xdat._altfd; 822 else 823 return (iop->_magic); 824 /* 825 * if this is not an internal extended FILE then check 826 * if _file is being changed from underneath us. 827 * It should not be because if 828 * it is then then we lose our ability to guard against 829 * silent data corruption. 830 */ 831 if (!iop->__xf_nocheck && bad_fd > -1 && iop->_magic != bad_fd) { 832 (void) fprintf(stderr, 833 "Application violated extended FILE safety mechanism.\n" 834 "Please read the man page for extendedFILE.\nAborting\n"); 835 abort(); 836 } 837 return (altfd); 838 } 839 840 int 841 _file_set(FILE *iop, int fd, const char *type) 842 { 843 struct xFILEdata *dat; 844 int Fflag; 845 846 /* Already known to contain at least one byte */ 847 while (*++type != '\0') 848 ; 849 850 Fflag = type[-1] == 'F'; 851 if (!Fflag && bad_fd < 0) { 852 errno = EMFILE; 853 return (-1); 854 } 855 856 dat = getxfdat(iop); 857 iop->__extendedfd = 1; 858 iop->__xf_nocheck = Fflag; 859 dat->_altfd = fd; 860 iop->_magic = (unsigned char)bad_fd; 861 return (0); 862 } 863 864 /* 865 * Activates extended fd's in FILE's 866 */ 867 868 static const int tries[] = {196, 120, 60, 3}; 869 #define NTRIES (sizeof (tries)/sizeof (int)) 870 871 int 872 enable_extended_FILE_stdio(int fd, int action) 873 { 874 int i; 875 876 if (action < 0) 877 action = SIGABRT; /* default signal */ 878 879 if (fd < 0) { 880 /* 881 * search for an available fd and make it the badfd 882 */ 883 for (i = 0; i < NTRIES; i++) { 884 fd = fcntl(tries[i], F_BADFD, action); 885 if (fd >= 0) 886 break; 887 } 888 if (fd < 0) /* failed to find an available fd */ 889 return (-1); 890 } else { 891 /* caller requests that fd be the chosen badfd */ 892 int nfd = fcntl(fd, F_BADFD, action); 893 if (nfd < 0 || nfd != fd) 894 return (-1); 895 } 896 bad_fd = fd; 897 return (0); 898 } 899 #endif 900