xref: /titanic_41/usr/src/lib/libc/port/stdio/flush.c (revision c0c79a3f09914f35651895ffc111883455b7f62d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*	Copyright (c) 1988 AT&T	*/
30 /*	  All Rights Reserved  	*/
31 
32 
33 #include "synonyms.h"
34 #include "mtlib.h"
35 #include "file64.h"
36 
37 #define	_iob	__iob
38 
39 #include <sys/types.h>
40 #include <stdlib.h>
41 #include <stdio.h>
42 #include <thread.h>
43 #include <synch.h>
44 #include <unistd.h>
45 #include <string.h>
46 #include "stdiom.h"
47 #include <wchar.h>
48 #include <sys/stat.h>
49 #include <stddef.h>
50 #include <errno.h>
51 #include <fcntl.h>
52 
53 #undef end
54 
55 #define	FILE_ARY_SZ	8 /* a nice size for FILE array & end_buffer_ptrs */
56 
57 #ifdef	_LP64
58 
59 /*
60  * Macros to declare and loop over a fp or fp/xfp combo to
61  * avoid some of the _LP64 ifdef hell.
62  */
63 
64 #define	FPDECL(fp)		FILE *fp
65 #define	FIRSTFP(lp, fp)		fp = lp->iobp
66 #define	NEXTFP(fp)		fp++
67 #define	FPLOCK(fp)		&fp->_lock
68 #define	FPSTATE(fp)		&fp->_state
69 
70 #define	xFILE			FILE
71 
72 #else
73 
74 #define	FPDECL(fp)		FILE *fp; xFILE *x##fp
75 #define	FIRSTFP(lp, fp)		x##fp = lp->iobp; \
76 				fp = x##fp ? &x##fp->_iob : &_iob[0]
77 #define	NEXTFP(fp)		(x##fp ? fp = &(++x##fp)->_iob : ++fp)
78 #define	FPLOCK(fp)		x##fp ? \
79 				    &x##fp->xlock : &_xftab[IOPIND(fp)]._lock
80 #define	FPSTATE(fp)		x##fp ? \
81 				    &x##fp->xstate : &_xftab[IOPIND(fp)]._state
82 
83 /* The extended 32-bit file structure for use in link buffers */
84 typedef struct xFILE {
85 	FILE			_iob;		/* must be first! */
86 	struct xFILEdata	_xdat;
87 } xFILE;
88 
89 #define	xmagic			_xdat._magic
90 #define	xend			_xdat._end
91 #define	xlock			_xdat._lock
92 #define	xstate			_xdat._state
93 
94 #define	FILEx(fp)		((struct xFILE *)(uintptr_t)fp)
95 
96 /*
97  * The magic number stored is actually the pointer scrambled with
98  * a magic number.  Pointers to data items live everywhere in memory
99  * so we scramble the pointer in order to avoid accidental collisions.
100  */
101 #define	XFILEMAGIC		0x63687367
102 #define	XMAGIC(xfp)		((uintptr_t)(xfp) ^ XFILEMAGIC)
103 
104 #endif /* _LP64 */
105 
106 struct _link_	/* manages a list of streams */
107 {
108 	xFILE *iobp;		/* the array of (x)FILE's */
109 				/* NULL for the __first_link in ILP32 */
110 	int	niob;		/* length of the arrays */
111 	struct _link_	*next;	/* next in the list */
112 };
113 
114 /*
115  * With dynamic linking, iob may be in either the library or in the user's
116  * a.out, so the run time linker fixes up the first entry in __first_link at
117  * process startup time.
118  *
119  * In 32 bit processes, we don't have xFILE[FILE_ARY_SZ] but FILE[],
120  * and _xftab[] instead; this is denoted by having iobp set to NULL in
121  * 32 bit mode for the first link entry.
122  */
123 struct _link_ __first_link =	/* first in linked list */
124 {
125 #if !defined(_LP64)
126 	NULL,
127 #else
128 	&_iob[0],
129 #endif
130 	_NFILE,
131 	NULL
132 };
133 
134 /*
135  * Information cached to speed up searches.  We remember where we
136  * last found a free FILE* and we remember whether we saw any fcloses
137  * in between.  We also count the number of chunks we allocated, see
138  * _findiop() for an explanation.
139  * These variables are all protected by _first_link_lock.
140  */
141 static struct _link_ *lastlink = NULL;
142 static int fcloses;
143 static int nchunks;
144 
145 static mutex_t _first_link_lock = DEFAULTMUTEX;
146 
147 static int _fflush_l_iops(void);
148 static FILE *getiop(FILE *, rmutex_t *, mbstate_t *);
149 
150 /*
151  * All functions that understand the linked list of iob's follow.
152  */
153 #pragma weak _cleanup = __cleanup
154 void
155 __cleanup(void)		/* called at process end to flush ouput streams */
156 {
157 	(void) fflush(NULL);
158 }
159 
160 /*
161  * For fork1-safety (see libc_prepare_atfork(), etc).
162  */
163 void
164 stdio_locks()
165 {
166 	(void) _private_mutex_lock(&_first_link_lock);
167 	/*
168 	 * XXX: We should acquire all of the iob locks here.
169 	 */
170 }
171 
172 void
173 stdio_unlocks()
174 {
175 	/*
176 	 * XXX: We should release all of the iob locks here.
177 	 */
178 	(void) _private_mutex_unlock(&_first_link_lock);
179 }
180 
181 void
182 _flushlbf(void)		/* fflush() all line-buffered streams */
183 {
184 	FPDECL(fp);
185 	int i;
186 	struct _link_ *lp;
187 	/* Allow compiler to optimize the loop */
188 	int threaded = __libc_threaded;
189 
190 	if (threaded)
191 		(void) _private_mutex_lock(&_first_link_lock);
192 
193 	lp = &__first_link;
194 	do {
195 		FIRSTFP(lp, fp);
196 		for (i = lp->niob; --i >= 0; NEXTFP(fp)) {
197 			/*
198 			 * The additional _IONBF check guards againsts
199 			 * allocated but uninitialized iops (see _findiop).
200 			 * We also automatically skip non allocated iop's.
201 			 * Don't block on locks.
202 			 */
203 			if ((fp->_flag & (_IOLBF | _IOWRT | _IONBF)) ==
204 			    (_IOLBF | _IOWRT)) {
205 				if (threaded) {
206 					rmutex_t *lk = FPLOCK(fp);
207 					if (rmutex_trylock(lk) != 0)
208 						continue;
209 					/* Recheck after locking */
210 					if ((fp->_flag & (_IOLBF | _IOWRT)) ==
211 					    (_IOLBF | _IOWRT)) {
212 						(void) _fflush_u(fp);
213 					}
214 					(void) rmutex_unlock(lk);
215 				} else {
216 					(void) _fflush_u(fp);
217 				}
218 			}
219 		}
220 	} while ((lp = lp->next) != NULL);
221 
222 	if (threaded)
223 		(void) _private_mutex_unlock(&_first_link_lock);
224 }
225 
226 /* allocate an unused stream; NULL if cannot */
227 FILE *
228 _findiop(void)
229 {
230 	struct _link_ *lp, **prev;
231 
232 	/* used so there only needs to be one malloc() */
233 #ifdef _LP64
234 	typedef	struct	{
235 		struct _link_	hdr;
236 		FILE	iob[FILE_ARY_SZ];
237 	} Pkg;
238 #else
239 	typedef union {
240 		struct {				/* Normal */
241 			struct _link_	hdr;
242 			xFILE	iob[FILE_ARY_SZ];
243 		} Pkgn;
244 		struct {				/* Reversed */
245 			xFILE	iob[FILE_ARY_SZ];
246 			struct _link_	hdr;
247 		} Pkgr;
248 	} Pkg;
249 	uintptr_t delta;
250 #endif
251 	Pkg *pkgp;
252 	struct _link_ *hdr;
253 	FPDECL(fp);
254 	int i;
255 	int threaded = __libc_threaded;
256 
257 	if (threaded)
258 		(void) _private_mutex_lock(&_first_link_lock);
259 
260 	if (lastlink == NULL) {
261 rescan:
262 		fcloses = 0;
263 		lastlink = &__first_link;
264 	}
265 
266 	lp = lastlink;
267 
268 	/*
269 	 * lock to make testing of fp->_flag == 0 and acquiring the fp atomic
270 	 * and for allocation of new links
271 	 * low contention expected on _findiop(), hence coarse locking.
272 	 * for finer granularity, use fp->_lock for allocating an iop
273 	 * and make the testing of lp->next and allocation of new link atomic
274 	 * using lp->_lock
275 	 */
276 
277 	do {
278 		prev = &lp->next;
279 		FIRSTFP(lp, fp);
280 
281 		for (i = lp->niob; --i >= 0; NEXTFP(fp)) {
282 			FILE *ret;
283 			if (threaded) {
284 				ret = getiop(fp, FPLOCK(fp), FPSTATE(fp));
285 				if (ret != NULL) {
286 					(void) _private_mutex_unlock(
287 					    &_first_link_lock);
288 					return (ret);
289 				}
290 			} else {
291 				ret = getiop(fp, NULL, FPSTATE(fp));
292 				if (ret != NULL)
293 					return (ret);
294 			}
295 		}
296 	} while ((lastlink = lp = lp->next) != NULL);
297 
298 	/*
299 	 * If there was a sufficient number of  fcloses since we last started
300 	 * at __first_link, we rescan all fp's again.  We do not rescan for
301 	 * all fcloses; that would simplify the algorithm but would make
302 	 * search times near O(n) again.
303 	 * Worst case behaviour would still be pretty bad (open a full set,
304 	 * then continously opening and closing one FILE * gets you a full
305 	 * scan each time).  That's why we over allocate 1 FILE for each
306 	 * 32 chunks.  More over allocation is better; this is a nice
307 	 * empirical value which doesn't cost a lot of memory, doesn't
308 	 * overallocate until we reach 256 FILE *s and keeps the performance
309 	 * pretty close to the optimum.
310 	 */
311 	if (fcloses > nchunks/32)
312 		goto rescan;
313 
314 	/*
315 	 * Need to allocate another and put it in the linked list.
316 	 */
317 	if ((pkgp = malloc(sizeof (Pkg))) == NULL) {
318 		if (threaded)
319 			(void) _private_mutex_unlock(&_first_link_lock);
320 		return (NULL);
321 	}
322 
323 	(void) memset(pkgp, 0, sizeof (Pkg));
324 
325 #ifdef _LP64
326 	hdr = &pkgp->hdr;
327 	hdr->iobp = &pkgp->iob[0];
328 #else
329 	/*
330 	 * The problem with referencing a word after a FILE* is the possibility
331 	 * of a SIGSEGV if a non-stdio issue FILE structure ends on a page
332 	 * boundary.  We run this check so we never need to run an expensive
333 	 * check like mincore() in order to know whether it is
334 	 * safe to dereference ((xFILE*)fp)->xmagic.
335 	 * We allocate the block with two alternative layouts; if one
336 	 * layout is not properly aligned for our purposes, the other layout
337 	 * will be because the size of _link_ is small compared to
338 	 * sizeof (xFILE).
339 	 * The check performed is this:
340 	 *	If the distance from pkgp to the end of the page is
341 	 *	less than the the offset of the last xmagic field in the
342 	 *	xFILE structure, (the 0x1000 boundary is inside our just
343 	 *	allocated structure) and the distance modulo the size of xFILE
344 	 *	is identical to the offset of the first xmagic in the
345 	 *	structure (i.e., XXXXXX000 points to an xmagic field),
346 	 *	we need to use the reverse structure.
347 	 */
348 	if ((delta = 0x1000 - ((uintptr_t)pkgp & 0xfff)) <=
349 				offsetof(Pkg, Pkgn.iob[FILE_ARY_SZ-1].xmagic) &&
350 	    delta % sizeof (struct xFILE) ==
351 		    offsetof(Pkg, Pkgn.iob[0].xmagic)) {
352 		/* Use reversed structure */
353 		hdr = &pkgp->Pkgr.hdr;
354 		hdr->iobp = &pkgp->Pkgr.iob[0];
355 	} else {
356 		/* Use normal structure */
357 		hdr = &pkgp->Pkgn.hdr;
358 		hdr->iobp = &pkgp->Pkgn.iob[0];
359 	}
360 #endif /* _LP64 */
361 
362 	hdr->niob = FILE_ARY_SZ;
363 	nchunks++;
364 
365 #ifdef	_LP64
366 	fp = hdr->iobp;
367 	for (i = 0; i < FILE_ARY_SZ; i++)
368 		_private_mutex_init(&fp[i]._lock,
369 			USYNC_THREAD|LOCK_RECURSIVE, NULL);
370 #else
371 	xfp = hdr->iobp;
372 	fp = &xfp->_iob;
373 
374 	for (i = 0; i < FILE_ARY_SZ; i++) {
375 		xfp[i].xmagic = XMAGIC(&xfp[i]);
376 		_private_mutex_init(&xfp[i].xlock,
377 			USYNC_THREAD|LOCK_RECURSIVE, NULL);
378 	}
379 #endif	/*	_LP64	*/
380 
381 	lastlink = *prev = hdr;
382 	fp->_ptr = 0;
383 	fp->_base = 0;
384 	fp->_flag = 0377; /* claim the fp by setting low 8 bits */
385 	if (threaded)
386 		(void) _private_mutex_unlock(&_first_link_lock);
387 
388 	return (fp);
389 }
390 
391 static void
392 isseekable(FILE *iop)
393 {
394 	struct stat64 fstatbuf;
395 	int save_errno;
396 
397 	save_errno = errno;
398 
399 	if (fstat64(GET_FD(iop), &fstatbuf) != 0) {
400 		/*
401 		 * when we don't know what it is we'll
402 		 * do the old behaviour and flush
403 		 * the stream
404 		 */
405 		SET_SEEKABLE(iop);
406 		errno = save_errno;
407 		return;
408 	}
409 
410 	/*
411 	 * check for what is non-SEEKABLE
412 	 * otherwise assume it's SEEKABLE so we get the old
413 	 * behaviour and flush the stream
414 	 */
415 
416 	if (S_ISFIFO(fstatbuf.st_mode) || S_ISCHR(fstatbuf.st_mode) ||
417 	    S_ISSOCK(fstatbuf.st_mode) || S_ISDOOR(fstatbuf.st_mode)) {
418 		CLEAR_SEEKABLE(iop);
419 	} else {
420 		SET_SEEKABLE(iop);
421 	}
422 
423 	errno = save_errno;
424 }
425 
426 #ifdef	_LP64
427 void
428 _setbufend(FILE *iop, Uchar *end)	/* set the end pointer for this iop */
429 {
430 	iop->_end = end;
431 
432 	isseekable(iop);
433 }
434 
435 #undef _realbufend
436 
437 Uchar *
438 _realbufend(FILE *iop)		/* get the end pointer for this iop */
439 {
440 	return (iop->_end);
441 }
442 
443 #else /* _LP64 */
444 
445 /*
446  * Awkward functions not needed for the sane 64 bit environment.
447  */
448 /*
449  * xmagic must not be aligned on a 4K boundary. We guarantee this in
450  * _findiop().
451  */
452 #define	VALIDXFILE(xfp) \
453 	(((uintptr_t)&(xfp)->xmagic & 0xfff) && \
454 	    (xfp)->xmagic == XMAGIC(FILEx(xfp)))
455 
456 static struct xFILEdata *
457 getxfdat(FILE *iop)
458 {
459 	if (STDIOP(iop))
460 		return (&_xftab[IOPIND(iop)]);
461 	else if (VALIDXFILE(FILEx(iop)))
462 		return (&FILEx(iop)->_xdat);
463 	else
464 		return (NULL);
465 }
466 
467 void
468 _setbufend(FILE *iop, Uchar *end)	/* set the end pointer for this iop */
469 {
470 	struct xFILEdata *dat = getxfdat(iop);
471 
472 	if (dat != NULL)
473 		dat->_end = end;
474 
475 	isseekable(iop);
476 
477 	/*
478 	 * For binary compatibility with user programs using the
479 	 * old _bufend macro.  This is *so* broken, fileno()
480 	 * is not the proper index.
481 	 */
482 	if (iop->_magic < _NFILE)
483 		_bufendtab[iop->_magic] = end;
484 
485 }
486 
487 Uchar *
488 _realbufend(FILE *iop)		/* get the end pointer for this iop */
489 {
490 	struct xFILEdata *dat = getxfdat(iop);
491 
492 	if (dat != NULL)
493 		return (dat->_end);
494 
495 	return (NULL);
496 }
497 
498 /*
499  * _reallock() is invoked in each stdio call through the IOB_LCK() macro,
500  * it is therefor extremely performance sensitive.  We get better performance
501  * by inlining the STDIOP check in IOB_LCK and inlining a custom version
502  * of getfxdat() here.
503  */
504 rmutex_t *
505 _reallock(FILE *iop)
506 {
507 	if (VALIDXFILE(FILEx(iop)))
508 		return (&FILEx(iop)->xlock);
509 
510 	return (NULL);
511 }
512 
513 #endif	/*	_LP64	*/
514 
515 /* make sure _cnt, _ptr are correct */
516 void
517 _bufsync(FILE *iop, Uchar *bufend)
518 {
519 	ssize_t spaceleft;
520 
521 	spaceleft = bufend - iop->_ptr;
522 	if (bufend < iop->_ptr) {
523 		iop->_ptr = bufend;
524 		iop->_cnt = 0;
525 	} else if (spaceleft < iop->_cnt)
526 		iop->_cnt = spaceleft;
527 }
528 
529 /* really write out current buffer contents */
530 int
531 _xflsbuf(FILE *iop)
532 {
533 	ssize_t n;
534 	Uchar *base = iop->_base;
535 	Uchar *bufend;
536 	ssize_t num_wrote;
537 
538 	/*
539 	 * Hopefully, be stable with respect to interrupts...
540 	 */
541 	n = iop->_ptr - base;
542 	iop->_ptr = base;
543 	bufend = _bufend(iop);
544 	if (iop->_flag & (_IOLBF | _IONBF))
545 		iop->_cnt = 0;		/* always go to a flush */
546 	else
547 		iop->_cnt = bufend - base;
548 
549 	if (_needsync(iop, bufend))	/* recover from interrupts */
550 		_bufsync(iop, bufend);
551 
552 	if (n > 0) {
553 		int fd = GET_FD(iop);
554 		while ((num_wrote =
555 			write(fd, base, (size_t)n)) != n) {
556 			if (num_wrote <= 0) {
557 				iop->_flag |= _IOERR;
558 				return (EOF);
559 			}
560 			n -= num_wrote;
561 			base += num_wrote;
562 		}
563 	}
564 	return (0);
565 }
566 
567 /* flush (write) buffer */
568 int
569 fflush(FILE *iop)
570 {
571 	int res;
572 	rmutex_t *lk;
573 
574 	if (iop) {
575 		FLOCKFILE(lk, iop);
576 		res = _fflush_u(iop);
577 		FUNLOCKFILE(lk);
578 	} else {
579 		res = _fflush_l_iops();		/* flush all iops */
580 	}
581 	return (res);
582 }
583 
584 static int
585 _fflush_l_iops(void)		/* flush all buffers */
586 {
587 	FPDECL(iop);
588 
589 	int i;
590 	struct _link_ *lp;
591 	int res = 0;
592 	rmutex_t *lk;
593 	/* Allow the compiler to optimize the load out of the loop */
594 	int threaded = __libc_threaded;
595 
596 	if (threaded)
597 		(void) _private_mutex_lock(&_first_link_lock);
598 
599 	lp = &__first_link;
600 
601 	do {
602 		/*
603 		 * We need to grab the file locks or file corruption
604 		 * will happen.  But we first check the flags field
605 		 * knowing that when it is 0, it isn't allocated and
606 		 * cannot be allocated while we're holding the
607 		 * _first_link_lock.  And when _IONBF is set (also the
608 		 * case when _flag is 0377, or alloc in progress), we
609 		 * also ignore it.
610 		 *
611 		 * Ignore locked streams; it will appear as if
612 		 * concurrent updates happened after fflush(NULL).  Note
613 		 * that we even attempt to lock if the locking is set to
614 		 * "by caller".  We don't want to penalize callers of
615 		 * __fsetlocking() by not flushing their files.  Note: if
616 		 * __fsetlocking() callers don't employ any locking, they
617 		 * may still face corruption in fflush(NULL); but that's
618 		 * no change from earlier releases.
619 		 */
620 		FIRSTFP(lp, iop);
621 		for (i = lp->niob; --i >= 0; NEXTFP(iop)) {
622 			unsigned int flag = iop->_flag;
623 
624 			/* flag 0, flag 0377, or _IONBF set */
625 			if (flag == 0 || (flag & _IONBF) != 0)
626 				continue;
627 
628 			if (threaded) {
629 				lk = FPLOCK(iop);
630 				if (rmutex_trylock(lk) != 0)
631 					continue;
632 			}
633 
634 			if (!(iop->_flag & _IONBF)) {
635 				/*
636 				 * don't need to worry about the _IORW case
637 				 * since the iop will also marked with _IOREAD
638 				 * or _IOWRT whichever we are really doing
639 				 */
640 				if (iop->_flag & _IOWRT) {
641 					/* Flush write buffers */
642 					res |= _fflush_u(iop);
643 				} else if (iop->_flag & _IOREAD) {
644 					/*
645 					 * flush seekable read buffers
646 					 * don't flush non-seekable read buffers
647 					 */
648 					if (GET_SEEKABLE(iop)) {
649 						res |= _fflush_u(iop);
650 					}
651 				}
652 			}
653 			if (threaded)
654 				(void) rmutex_unlock(lk);
655 		}
656 	} while ((lp = lp->next) != NULL);
657 	if (threaded)
658 		(void) _private_mutex_unlock(&_first_link_lock);
659 	return (res);
660 }
661 
662 /* flush buffer */
663 int
664 _fflush_u(FILE *iop)
665 {
666 	int res = 0;
667 
668 	/* this portion is always assumed locked */
669 	if (!(iop->_flag & _IOWRT)) {
670 		(void) lseek64(GET_FD(iop), -iop->_cnt, SEEK_CUR);
671 		iop->_cnt = 0;
672 		/* needed for ungetc & multibyte pushbacks */
673 		iop->_ptr = iop->_base;
674 		if (iop->_flag & _IORW) {
675 			iop->_flag &= ~_IOREAD;
676 		}
677 		return (0);
678 	}
679 	if (iop->_base != NULL && iop->_ptr > iop->_base) {
680 		res = _xflsbuf(iop);
681 	}
682 	if (iop->_flag & _IORW) {
683 		iop->_flag &= ~_IOWRT;
684 		iop->_cnt = 0;
685 	}
686 	return (res);
687 }
688 
689 /* flush buffer and close stream */
690 int
691 fclose(FILE *iop)
692 {
693 	int res = 0;
694 	rmutex_t *lk;
695 
696 	if (iop == NULL) {
697 		return (EOF);		/* avoid passing zero to FLOCKFILE */
698 	}
699 
700 	FLOCKFILE(lk, iop);
701 	if (iop->_flag == 0) {
702 		FUNLOCKFILE(lk);
703 		return (EOF);
704 	}
705 	/* Is not unbuffered and opened for read and/or write ? */
706 	if (!(iop->_flag & _IONBF) && (iop->_flag & (_IOWRT | _IOREAD | _IORW)))
707 		res = _fflush_u(iop);
708 	if (close(GET_FD(iop)) < 0)
709 		res = EOF;
710 	if (iop->_flag & _IOMYBUF) {
711 		(void) free((char *)iop->_base - PUSHBACK);
712 	}
713 	iop->_base = NULL;
714 	iop->_ptr = NULL;
715 	iop->_cnt = 0;
716 	iop->_flag = 0;			/* marks it as available */
717 	FUNLOCKFILE(lk);
718 
719 	if (__libc_threaded)
720 		(void) _private_mutex_lock(&_first_link_lock);
721 	fcloses++;
722 	if (__libc_threaded)
723 		(void) _private_mutex_unlock(&_first_link_lock);
724 
725 	return (res);
726 }
727 
728 /* flush buffer, close fd but keep the stream used by freopen() */
729 int
730 close_fd(FILE *iop)
731 {
732 	int res = 0;
733 	mbstate_t *mb;
734 
735 	if (iop == NULL || iop->_flag == 0)
736 		return (EOF);
737 	/* Is not unbuffered and opened for read and/or write ? */
738 	if (!(iop->_flag & _IONBF) && (iop->_flag & (_IOWRT | _IOREAD | _IORW)))
739 		res = _fflush_u(iop);
740 	if (close(GET_FD(iop)) < 0)
741 		res = EOF;
742 	if (iop->_flag & _IOMYBUF) {
743 		(void) free((char *)iop->_base - PUSHBACK);
744 	}
745 	iop->_base = NULL;
746 	iop->_ptr = NULL;
747 	mb = _getmbstate(iop);
748 	if (mb != NULL)
749 		(void) memset(mb, 0, sizeof (mbstate_t));
750 	iop->_cnt = 0;
751 	_setorientation(iop, _NO_MODE);
752 	return (res);
753 }
754 
755 static FILE *
756 getiop(FILE *fp, rmutex_t *lk, mbstate_t *mb)
757 {
758 	if (lk != NULL && rmutex_trylock(lk))
759 		return (NULL);	/* locked: fp in use */
760 
761 	if (fp->_flag == 0) {	/* unused */
762 #ifndef	_LP64
763 		fp->__orientation = 0;
764 #endif /* _LP64 */
765 		fp->_cnt = 0;
766 		fp->_ptr = NULL;
767 		fp->_base = NULL;
768 		fp->_flag = 0377;	/* claim the fp by setting low 8 bits */
769 		(void) memset(mb, 0, sizeof (mbstate_t));
770 		FUNLOCKFILE(lk);
771 		return (fp);
772 	}
773 	FUNLOCKFILE(lk);
774 	return (NULL);
775 }
776 
777 #ifndef	_LP64
778 /*
779  * DESCRIPTION:
780  * This function gets the pointer to the mbstate_t structure associated
781  * with the specified iop.
782  *
783  * RETURNS:
784  * If the associated mbstate_t found, the pointer to the mbstate_t is
785  * returned.  Otherwise, NULL is returned.
786  */
787 mbstate_t *
788 _getmbstate(FILE *iop)
789 {
790 	struct xFILEdata *dat = getxfdat(iop);
791 
792 	if (dat != NULL)
793 		return (&dat->_state);
794 
795 	return (NULL);
796 }
797 
798 /*
799  * More 32-bit only functions.
800  * They lookup/set large fd's for extended FILE support.
801  */
802 
803 /*
804  * The negative value indicates that Extended fd FILE's has not
805  * been enabled by the user.
806  */
807 static int bad_fd = -1;
808 
809 int
810 _file_get(FILE *iop)
811 {
812 	int altfd;
813 
814 	/*
815 	 * Failure indicates a FILE * not allocated through stdio;
816 	 * it means the flag values are probably bogus and that if
817 	 * a file descriptor is set, it's in _magic.
818 	 * Inline getxfdat() for performance reasons.
819 	 */
820 	if (STDIOP(iop))
821 		altfd = _xftab[IOPIND(iop)]._altfd;
822 	else if (VALIDXFILE(FILEx(iop)))
823 		altfd = FILEx(iop)->_xdat._altfd;
824 	else
825 		return (iop->_magic);
826 	/*
827 	 * if this is not an internal extended FILE then check
828 	 * if _file is being changed from underneath us.
829 	 * It should not be because if
830 	 * it is then then we lose our ability to guard against
831 	 * silent data corruption.
832 	 */
833 	if (!iop->__xf_nocheck && bad_fd > -1 && iop->_magic != bad_fd) {
834 		(void) fprintf(stderr,
835 		    "Application violated extended FILE safety mechanism.\n"
836 		    "Please read the man page for extendedFILE.\nAborting\n");
837 		abort();
838 	}
839 	return (altfd);
840 }
841 
842 int
843 _file_set(FILE *iop, int fd, const char *type)
844 {
845 	struct xFILEdata *dat;
846 	int Fflag;
847 
848 	/* Already known to contain at least one byte */
849 	while (*++type != '\0')
850 		;
851 
852 	Fflag = type[-1] == 'F';
853 	if (!Fflag && bad_fd < 0) {
854 		errno = EMFILE;
855 		return (-1);
856 	}
857 
858 	dat = getxfdat(iop);
859 	iop->__extendedfd = 1;
860 	iop->__xf_nocheck = Fflag;
861 	dat->_altfd = fd;
862 	iop->_magic = (unsigned char)bad_fd;
863 	return (0);
864 }
865 
866 /*
867  * Activates extended fd's in FILE's
868  */
869 
870 static const int tries[] = {196, 120, 60, 3};
871 #define	NTRIES	(sizeof (tries)/sizeof (int))
872 
873 int
874 enable_extended_FILE_stdio(int fd, int action)
875 {
876 	int i;
877 
878 	if (action < 0)
879 		action = SIGABRT;	/* default signal */
880 
881 	if (fd < 0) {
882 		/*
883 		 * search for an available fd and make it the badfd
884 		 */
885 		for (i = 0; i < NTRIES; i++) {
886 			fd = fcntl(tries[i], F_BADFD, action);
887 			if (fd >= 0)
888 				break;
889 		}
890 		if (fd < 0)	/* failed to find an available fd */
891 			return (-1);
892 	} else {
893 		/* caller requests that fd be the chosen badfd */
894 		int nfd = fcntl(fd, F_BADFD, action);
895 		if (nfd < 0 || nfd != fd)
896 			return (-1);
897 	}
898 	bad_fd = fd;
899 	return (0);
900 }
901 #endif
902