xref: /titanic_41/usr/src/lib/libc/port/stdio/flush.c (revision bb25c06cca41ca78e5fb87fbb8e81d55beb18c95)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*	Copyright (c) 1988 AT&T	*/
30 /*	  All Rights Reserved  	*/
31 
32 
33 #include "synonyms.h"
34 #include "mtlib.h"
35 #include "file64.h"
36 #include "../gen/_libc_gettext.h"
37 
38 #define	_iob	__iob
39 
40 #include <sys/types.h>
41 #include <stdlib.h>
42 #include <stdio.h>
43 #include <thread.h>
44 #include <synch.h>
45 #include <unistd.h>
46 #include <string.h>
47 #include "stdiom.h"
48 #include <wchar.h>
49 #include <sys/stat.h>
50 #include <stddef.h>
51 #include <errno.h>
52 #include <fcntl.h>
53 
54 #undef end
55 
56 #define	FILE_ARY_SZ	8 /* a nice size for FILE array & end_buffer_ptrs */
57 
58 #ifdef	_LP64
59 
60 /*
61  * Macros to declare and loop over a fp or fp/xfp combo to
62  * avoid some of the _LP64 ifdef hell.
63  */
64 
65 #define	FPDECL(fp)		FILE *fp
66 #define	FIRSTFP(lp, fp)		fp = lp->iobp
67 #define	NEXTFP(fp)		fp++
68 #define	FPLOCK(fp)		&fp->_lock
69 #define	FPSTATE(fp)		&fp->_state
70 
71 #define	xFILE			FILE
72 
73 #else
74 
75 #define	FPDECL(fp)		FILE *fp; xFILE *x##fp
76 #define	FIRSTFP(lp, fp)		x##fp = lp->iobp; \
77 				fp = x##fp ? &x##fp->_iob : &_iob[0]
78 #define	NEXTFP(fp)		(x##fp ? fp = &(++x##fp)->_iob : ++fp)
79 #define	FPLOCK(fp)		x##fp ? \
80 				    &x##fp->xlock : &_xftab[IOPIND(fp)]._lock
81 #define	FPSTATE(fp)		x##fp ? \
82 				    &x##fp->xstate : &_xftab[IOPIND(fp)]._state
83 
84 /* The extended 32-bit file structure for use in link buffers */
85 typedef struct xFILE {
86 	FILE			_iob;		/* must be first! */
87 	struct xFILEdata	_xdat;
88 } xFILE;
89 
90 #define	xmagic			_xdat._magic
91 #define	xend			_xdat._end
92 #define	xlock			_xdat._lock
93 #define	xstate			_xdat._state
94 
95 #define	FILEx(fp)		((struct xFILE *)(uintptr_t)fp)
96 
97 /*
98  * The magic number stored is actually the pointer scrambled with
99  * a magic number.  Pointers to data items live everywhere in memory
100  * so we scramble the pointer in order to avoid accidental collisions.
101  */
102 #define	XFILEMAGIC		0x63687367
103 #define	XMAGIC(xfp)		((uintptr_t)(xfp) ^ XFILEMAGIC)
104 
105 #endif /* _LP64 */
106 
107 struct _link_	/* manages a list of streams */
108 {
109 	xFILE *iobp;		/* the array of (x)FILE's */
110 				/* NULL for the __first_link in ILP32 */
111 	int	niob;		/* length of the arrays */
112 	struct _link_	*next;	/* next in the list */
113 };
114 
115 /*
116  * With dynamic linking, iob may be in either the library or in the user's
117  * a.out, so the run time linker fixes up the first entry in __first_link at
118  * process startup time.
119  *
120  * In 32 bit processes, we don't have xFILE[FILE_ARY_SZ] but FILE[],
121  * and _xftab[] instead; this is denoted by having iobp set to NULL in
122  * 32 bit mode for the first link entry.
123  */
124 struct _link_ __first_link =	/* first in linked list */
125 {
126 #if !defined(_LP64)
127 	NULL,
128 #else
129 	&_iob[0],
130 #endif
131 	_NFILE,
132 	NULL
133 };
134 
135 /*
136  * Information cached to speed up searches.  We remember where we
137  * last found a free FILE* and we remember whether we saw any fcloses
138  * in between.  We also count the number of chunks we allocated, see
139  * _findiop() for an explanation.
140  * These variables are all protected by _first_link_lock.
141  */
142 static struct _link_ *lastlink = NULL;
143 static int fcloses;
144 static int nchunks;
145 
146 static mutex_t _first_link_lock = DEFAULTMUTEX;
147 
148 static int _fflush_l_iops(void);
149 static FILE *getiop(FILE *, rmutex_t *, mbstate_t *);
150 
151 /*
152  * All functions that understand the linked list of iob's follow.
153  */
154 #pragma weak _cleanup = __cleanup
155 void
156 __cleanup(void)		/* called at process end to flush ouput streams */
157 {
158 	(void) fflush(NULL);
159 }
160 
161 /*
162  * For fork1-safety (see libc_prepare_atfork(), etc).
163  */
164 void
165 stdio_locks()
166 {
167 	(void) _private_mutex_lock(&_first_link_lock);
168 	/*
169 	 * XXX: We should acquire all of the iob locks here.
170 	 */
171 }
172 
173 void
174 stdio_unlocks()
175 {
176 	/*
177 	 * XXX: We should release all of the iob locks here.
178 	 */
179 	(void) _private_mutex_unlock(&_first_link_lock);
180 }
181 
182 void
183 _flushlbf(void)		/* fflush() all line-buffered streams */
184 {
185 	FPDECL(fp);
186 	int i;
187 	struct _link_ *lp;
188 	/* Allow compiler to optimize the loop */
189 	int threaded = __libc_threaded;
190 
191 	if (threaded)
192 		(void) _private_mutex_lock(&_first_link_lock);
193 
194 	lp = &__first_link;
195 	do {
196 		FIRSTFP(lp, fp);
197 		for (i = lp->niob; --i >= 0; NEXTFP(fp)) {
198 			/*
199 			 * The additional _IONBF check guards againsts
200 			 * allocated but uninitialized iops (see _findiop).
201 			 * We also automatically skip non allocated iop's.
202 			 * Don't block on locks.
203 			 */
204 			if ((fp->_flag & (_IOLBF | _IOWRT | _IONBF)) ==
205 			    (_IOLBF | _IOWRT)) {
206 				if (threaded) {
207 					rmutex_t *lk = FPLOCK(fp);
208 					if (rmutex_trylock(lk) != 0)
209 						continue;
210 					/* Recheck after locking */
211 					if ((fp->_flag & (_IOLBF | _IOWRT)) ==
212 					    (_IOLBF | _IOWRT)) {
213 						(void) _fflush_u(fp);
214 					}
215 					(void) rmutex_unlock(lk);
216 				} else {
217 					(void) _fflush_u(fp);
218 				}
219 			}
220 		}
221 	} while ((lp = lp->next) != NULL);
222 
223 	if (threaded)
224 		(void) _private_mutex_unlock(&_first_link_lock);
225 }
226 
227 /* allocate an unused stream; NULL if cannot */
228 FILE *
229 _findiop(void)
230 {
231 	struct _link_ *lp, **prev;
232 
233 	/* used so there only needs to be one malloc() */
234 #ifdef _LP64
235 	typedef	struct	{
236 		struct _link_	hdr;
237 		FILE	iob[FILE_ARY_SZ];
238 	} Pkg;
239 #else
240 	typedef union {
241 		struct {				/* Normal */
242 			struct _link_	hdr;
243 			xFILE	iob[FILE_ARY_SZ];
244 		} Pkgn;
245 		struct {				/* Reversed */
246 			xFILE	iob[FILE_ARY_SZ];
247 			struct _link_	hdr;
248 		} Pkgr;
249 	} Pkg;
250 	uintptr_t delta;
251 #endif
252 	Pkg *pkgp;
253 	struct _link_ *hdr;
254 	FPDECL(fp);
255 	int i;
256 	int threaded = __libc_threaded;
257 
258 	if (threaded)
259 		(void) _private_mutex_lock(&_first_link_lock);
260 
261 	if (lastlink == NULL) {
262 rescan:
263 		fcloses = 0;
264 		lastlink = &__first_link;
265 	}
266 
267 	lp = lastlink;
268 
269 	/*
270 	 * lock to make testing of fp->_flag == 0 and acquiring the fp atomic
271 	 * and for allocation of new links
272 	 * low contention expected on _findiop(), hence coarse locking.
273 	 * for finer granularity, use fp->_lock for allocating an iop
274 	 * and make the testing of lp->next and allocation of new link atomic
275 	 * using lp->_lock
276 	 */
277 
278 	do {
279 		prev = &lp->next;
280 		FIRSTFP(lp, fp);
281 
282 		for (i = lp->niob; --i >= 0; NEXTFP(fp)) {
283 			FILE *ret;
284 			if (threaded) {
285 				ret = getiop(fp, FPLOCK(fp), FPSTATE(fp));
286 				if (ret != NULL) {
287 					(void) _private_mutex_unlock(
288 					    &_first_link_lock);
289 					return (ret);
290 				}
291 			} else {
292 				ret = getiop(fp, NULL, FPSTATE(fp));
293 				if (ret != NULL)
294 					return (ret);
295 			}
296 		}
297 	} while ((lastlink = lp = lp->next) != NULL);
298 
299 	/*
300 	 * If there was a sufficient number of  fcloses since we last started
301 	 * at __first_link, we rescan all fp's again.  We do not rescan for
302 	 * all fcloses; that would simplify the algorithm but would make
303 	 * search times near O(n) again.
304 	 * Worst case behaviour would still be pretty bad (open a full set,
305 	 * then continously opening and closing one FILE * gets you a full
306 	 * scan each time).  That's why we over allocate 1 FILE for each
307 	 * 32 chunks.  More over allocation is better; this is a nice
308 	 * empirical value which doesn't cost a lot of memory, doesn't
309 	 * overallocate until we reach 256 FILE *s and keeps the performance
310 	 * pretty close to the optimum.
311 	 */
312 	if (fcloses > nchunks/32)
313 		goto rescan;
314 
315 	/*
316 	 * Need to allocate another and put it in the linked list.
317 	 */
318 	if ((pkgp = malloc(sizeof (Pkg))) == NULL) {
319 		if (threaded)
320 			(void) _private_mutex_unlock(&_first_link_lock);
321 		return (NULL);
322 	}
323 
324 	(void) memset(pkgp, 0, sizeof (Pkg));
325 
326 #ifdef _LP64
327 	hdr = &pkgp->hdr;
328 	hdr->iobp = &pkgp->iob[0];
329 #else
330 	/*
331 	 * The problem with referencing a word after a FILE* is the possibility
332 	 * of a SIGSEGV if a non-stdio issue FILE structure ends on a page
333 	 * boundary.  We run this check so we never need to run an expensive
334 	 * check like mincore() in order to know whether it is
335 	 * safe to dereference ((xFILE*)fp)->xmagic.
336 	 * We allocate the block with two alternative layouts; if one
337 	 * layout is not properly aligned for our purposes, the other layout
338 	 * will be because the size of _link_ is small compared to
339 	 * sizeof (xFILE).
340 	 * The check performed is this:
341 	 *	If the distance from pkgp to the end of the page is
342 	 *	less than the the offset of the last xmagic field in the
343 	 *	xFILE structure, (the 0x1000 boundary is inside our just
344 	 *	allocated structure) and the distance modulo the size of xFILE
345 	 *	is identical to the offset of the first xmagic in the
346 	 *	structure (i.e., XXXXXX000 points to an xmagic field),
347 	 *	we need to use the reverse structure.
348 	 */
349 	if ((delta = 0x1000 - ((uintptr_t)pkgp & 0xfff)) <=
350 				offsetof(Pkg, Pkgn.iob[FILE_ARY_SZ-1].xmagic) &&
351 	    delta % sizeof (struct xFILE) ==
352 		    offsetof(Pkg, Pkgn.iob[0].xmagic)) {
353 		/* Use reversed structure */
354 		hdr = &pkgp->Pkgr.hdr;
355 		hdr->iobp = &pkgp->Pkgr.iob[0];
356 	} else {
357 		/* Use normal structure */
358 		hdr = &pkgp->Pkgn.hdr;
359 		hdr->iobp = &pkgp->Pkgn.iob[0];
360 	}
361 #endif /* _LP64 */
362 
363 	hdr->niob = FILE_ARY_SZ;
364 	nchunks++;
365 
366 #ifdef	_LP64
367 	fp = hdr->iobp;
368 	for (i = 0; i < FILE_ARY_SZ; i++)
369 		_private_mutex_init(&fp[i]._lock,
370 			USYNC_THREAD|LOCK_RECURSIVE, NULL);
371 #else
372 	xfp = hdr->iobp;
373 	fp = &xfp->_iob;
374 
375 	for (i = 0; i < FILE_ARY_SZ; i++) {
376 		xfp[i].xmagic = XMAGIC(&xfp[i]);
377 		_private_mutex_init(&xfp[i].xlock,
378 			USYNC_THREAD|LOCK_RECURSIVE, NULL);
379 	}
380 #endif	/*	_LP64	*/
381 
382 	lastlink = *prev = hdr;
383 	fp->_ptr = 0;
384 	fp->_base = 0;
385 	fp->_flag = 0377; /* claim the fp by setting low 8 bits */
386 	if (threaded)
387 		(void) _private_mutex_unlock(&_first_link_lock);
388 
389 	return (fp);
390 }
391 
392 static void
393 isseekable(FILE *iop)
394 {
395 	struct stat64 fstatbuf;
396 	int save_errno;
397 
398 	save_errno = errno;
399 
400 	if (fstat64(GET_FD(iop), &fstatbuf) != 0) {
401 		/*
402 		 * when we don't know what it is we'll
403 		 * do the old behaviour and flush
404 		 * the stream
405 		 */
406 		SET_SEEKABLE(iop);
407 		errno = save_errno;
408 		return;
409 	}
410 
411 	/*
412 	 * check for what is non-SEEKABLE
413 	 * otherwise assume it's SEEKABLE so we get the old
414 	 * behaviour and flush the stream
415 	 */
416 
417 	if (S_ISFIFO(fstatbuf.st_mode) || S_ISCHR(fstatbuf.st_mode) ||
418 	    S_ISSOCK(fstatbuf.st_mode) || S_ISDOOR(fstatbuf.st_mode)) {
419 		CLEAR_SEEKABLE(iop);
420 	} else {
421 		SET_SEEKABLE(iop);
422 	}
423 
424 	errno = save_errno;
425 }
426 
427 #ifdef	_LP64
428 void
429 _setbufend(FILE *iop, Uchar *end)	/* set the end pointer for this iop */
430 {
431 	iop->_end = end;
432 
433 	isseekable(iop);
434 }
435 
436 #undef _realbufend
437 
438 Uchar *
439 _realbufend(FILE *iop)		/* get the end pointer for this iop */
440 {
441 	return (iop->_end);
442 }
443 
444 #else /* _LP64 */
445 
446 /*
447  * Awkward functions not needed for the sane 64 bit environment.
448  */
449 /*
450  * xmagic must not be aligned on a 4K boundary. We guarantee this in
451  * _findiop().
452  */
453 #define	VALIDXFILE(xfp) \
454 	(((uintptr_t)&(xfp)->xmagic & 0xfff) && \
455 	    (xfp)->xmagic == XMAGIC(FILEx(xfp)))
456 
457 static struct xFILEdata *
458 getxfdat(FILE *iop)
459 {
460 	if (STDIOP(iop))
461 		return (&_xftab[IOPIND(iop)]);
462 	else if (VALIDXFILE(FILEx(iop)))
463 		return (&FILEx(iop)->_xdat);
464 	else
465 		return (NULL);
466 }
467 
468 void
469 _setbufend(FILE *iop, Uchar *end)	/* set the end pointer for this iop */
470 {
471 	struct xFILEdata *dat = getxfdat(iop);
472 
473 	if (dat != NULL)
474 		dat->_end = end;
475 
476 	isseekable(iop);
477 
478 	/*
479 	 * For binary compatibility with user programs using the
480 	 * old _bufend macro.  This is *so* broken, fileno()
481 	 * is not the proper index.
482 	 */
483 	if (iop->_magic < _NFILE)
484 		_bufendtab[iop->_magic] = end;
485 
486 }
487 
488 Uchar *
489 _realbufend(FILE *iop)		/* get the end pointer for this iop */
490 {
491 	struct xFILEdata *dat = getxfdat(iop);
492 
493 	if (dat != NULL)
494 		return (dat->_end);
495 
496 	return (NULL);
497 }
498 
499 /*
500  * _reallock() is invoked in each stdio call through the IOB_LCK() macro,
501  * it is therefor extremely performance sensitive.  We get better performance
502  * by inlining the STDIOP check in IOB_LCK and inlining a custom version
503  * of getfxdat() here.
504  */
505 rmutex_t *
506 _reallock(FILE *iop)
507 {
508 	if (VALIDXFILE(FILEx(iop)))
509 		return (&FILEx(iop)->xlock);
510 
511 	return (NULL);
512 }
513 
514 #endif	/*	_LP64	*/
515 
516 /* make sure _cnt, _ptr are correct */
517 void
518 _bufsync(FILE *iop, Uchar *bufend)
519 {
520 	ssize_t spaceleft;
521 
522 	spaceleft = bufend - iop->_ptr;
523 	if (bufend < iop->_ptr) {
524 		iop->_ptr = bufend;
525 		iop->_cnt = 0;
526 	} else if (spaceleft < iop->_cnt)
527 		iop->_cnt = spaceleft;
528 }
529 
530 /* really write out current buffer contents */
531 int
532 _xflsbuf(FILE *iop)
533 {
534 	ssize_t n;
535 	Uchar *base = iop->_base;
536 	Uchar *bufend;
537 	ssize_t num_wrote;
538 
539 	/*
540 	 * Hopefully, be stable with respect to interrupts...
541 	 */
542 	n = iop->_ptr - base;
543 	iop->_ptr = base;
544 	bufend = _bufend(iop);
545 	if (iop->_flag & (_IOLBF | _IONBF))
546 		iop->_cnt = 0;		/* always go to a flush */
547 	else
548 		iop->_cnt = bufend - base;
549 
550 	if (_needsync(iop, bufend))	/* recover from interrupts */
551 		_bufsync(iop, bufend);
552 
553 	if (n > 0) {
554 		int fd = GET_FD(iop);
555 		while ((num_wrote =
556 			write(fd, base, (size_t)n)) != n) {
557 			if (num_wrote <= 0) {
558 				iop->_flag |= _IOERR;
559 				return (EOF);
560 			}
561 			n -= num_wrote;
562 			base += num_wrote;
563 		}
564 	}
565 	return (0);
566 }
567 
568 /* flush (write) buffer */
569 int
570 fflush(FILE *iop)
571 {
572 	int res;
573 	rmutex_t *lk;
574 
575 	if (iop) {
576 		FLOCKFILE(lk, iop);
577 		res = _fflush_u(iop);
578 		FUNLOCKFILE(lk);
579 	} else {
580 		res = _fflush_l_iops();		/* flush all iops */
581 	}
582 	return (res);
583 }
584 
585 static int
586 _fflush_l_iops(void)		/* flush all buffers */
587 {
588 	FPDECL(iop);
589 
590 	int i;
591 	struct _link_ *lp;
592 	int res = 0;
593 	rmutex_t *lk;
594 	/* Allow the compiler to optimize the load out of the loop */
595 	int threaded = __libc_threaded;
596 
597 	if (threaded)
598 		(void) _private_mutex_lock(&_first_link_lock);
599 
600 	lp = &__first_link;
601 
602 	do {
603 		/*
604 		 * We need to grab the file locks or file corruption
605 		 * will happen.  But we first check the flags field
606 		 * knowing that when it is 0, it isn't allocated and
607 		 * cannot be allocated while we're holding the
608 		 * _first_link_lock.  And when _IONBF is set (also the
609 		 * case when _flag is 0377, or alloc in progress), we
610 		 * also ignore it.
611 		 *
612 		 * Ignore locked streams; it will appear as if
613 		 * concurrent updates happened after fflush(NULL).  Note
614 		 * that we even attempt to lock if the locking is set to
615 		 * "by caller".  We don't want to penalize callers of
616 		 * __fsetlocking() by not flushing their files.  Note: if
617 		 * __fsetlocking() callers don't employ any locking, they
618 		 * may still face corruption in fflush(NULL); but that's
619 		 * no change from earlier releases.
620 		 */
621 		FIRSTFP(lp, iop);
622 		for (i = lp->niob; --i >= 0; NEXTFP(iop)) {
623 			unsigned int flag = iop->_flag;
624 
625 			/* flag 0, flag 0377, or _IONBF set */
626 			if (flag == 0 || (flag & _IONBF) != 0)
627 				continue;
628 
629 			if (threaded) {
630 				lk = FPLOCK(iop);
631 				if (rmutex_trylock(lk) != 0)
632 					continue;
633 			}
634 
635 			if (!(iop->_flag & _IONBF)) {
636 				/*
637 				 * don't need to worry about the _IORW case
638 				 * since the iop will also marked with _IOREAD
639 				 * or _IOWRT whichever we are really doing
640 				 */
641 				if (iop->_flag & _IOWRT) {
642 					/* Flush write buffers */
643 					res |= _fflush_u(iop);
644 				} else if (iop->_flag & _IOREAD) {
645 					/*
646 					 * flush seekable read buffers
647 					 * don't flush non-seekable read buffers
648 					 */
649 					if (GET_SEEKABLE(iop)) {
650 						res |= _fflush_u(iop);
651 					}
652 				}
653 			}
654 			if (threaded)
655 				(void) rmutex_unlock(lk);
656 		}
657 	} while ((lp = lp->next) != NULL);
658 	if (threaded)
659 		(void) _private_mutex_unlock(&_first_link_lock);
660 	return (res);
661 }
662 
663 /* flush buffer */
664 int
665 _fflush_u(FILE *iop)
666 {
667 	int res = 0;
668 
669 	/* this portion is always assumed locked */
670 	if (!(iop->_flag & _IOWRT)) {
671 		(void) lseek64(GET_FD(iop), -iop->_cnt, SEEK_CUR);
672 		iop->_cnt = 0;
673 		/* needed for ungetc & multibyte pushbacks */
674 		iop->_ptr = iop->_base;
675 		if (iop->_flag & _IORW) {
676 			iop->_flag &= ~_IOREAD;
677 		}
678 		return (0);
679 	}
680 	if (iop->_base != NULL && iop->_ptr > iop->_base) {
681 		res = _xflsbuf(iop);
682 	}
683 	if (iop->_flag & _IORW) {
684 		iop->_flag &= ~_IOWRT;
685 		iop->_cnt = 0;
686 	}
687 	return (res);
688 }
689 
690 /* flush buffer and close stream */
691 int
692 fclose(FILE *iop)
693 {
694 	int res = 0;
695 	rmutex_t *lk;
696 
697 	if (iop == NULL) {
698 		return (EOF);		/* avoid passing zero to FLOCKFILE */
699 	}
700 
701 	FLOCKFILE(lk, iop);
702 	if (iop->_flag == 0) {
703 		FUNLOCKFILE(lk);
704 		return (EOF);
705 	}
706 	/* Is not unbuffered and opened for read and/or write ? */
707 	if (!(iop->_flag & _IONBF) && (iop->_flag & (_IOWRT | _IOREAD | _IORW)))
708 		res = _fflush_u(iop);
709 	if (close(GET_FD(iop)) < 0)
710 		res = EOF;
711 	if (iop->_flag & _IOMYBUF) {
712 		(void) free((char *)iop->_base - PUSHBACK);
713 	}
714 	iop->_base = NULL;
715 	iop->_ptr = NULL;
716 	iop->_cnt = 0;
717 	iop->_flag = 0;			/* marks it as available */
718 	FUNLOCKFILE(lk);
719 
720 	if (__libc_threaded)
721 		(void) _private_mutex_lock(&_first_link_lock);
722 	fcloses++;
723 	if (__libc_threaded)
724 		(void) _private_mutex_unlock(&_first_link_lock);
725 
726 	return (res);
727 }
728 
729 /* flush buffer, close fd but keep the stream used by freopen() */
730 int
731 close_fd(FILE *iop)
732 {
733 	int res = 0;
734 	mbstate_t *mb;
735 
736 	if (iop == NULL || iop->_flag == 0)
737 		return (EOF);
738 	/* Is not unbuffered and opened for read and/or write ? */
739 	if (!(iop->_flag & _IONBF) && (iop->_flag & (_IOWRT | _IOREAD | _IORW)))
740 		res = _fflush_u(iop);
741 	if (close(GET_FD(iop)) < 0)
742 		res = EOF;
743 	if (iop->_flag & _IOMYBUF) {
744 		(void) free((char *)iop->_base - PUSHBACK);
745 	}
746 	iop->_base = NULL;
747 	iop->_ptr = NULL;
748 	mb = _getmbstate(iop);
749 	if (mb != NULL)
750 		(void) memset(mb, 0, sizeof (mbstate_t));
751 	iop->_cnt = 0;
752 	_setorientation(iop, _NO_MODE);
753 	return (res);
754 }
755 
756 static FILE *
757 getiop(FILE *fp, rmutex_t *lk, mbstate_t *mb)
758 {
759 	if (lk != NULL && rmutex_trylock(lk))
760 		return (NULL);	/* locked: fp in use */
761 
762 	if (fp->_flag == 0) {	/* unused */
763 #ifndef	_LP64
764 		fp->__orientation = 0;
765 #endif /* _LP64 */
766 		fp->_cnt = 0;
767 		fp->_ptr = NULL;
768 		fp->_base = NULL;
769 		fp->_flag = 0377;	/* claim the fp by setting low 8 bits */
770 		(void) memset(mb, 0, sizeof (mbstate_t));
771 		FUNLOCKFILE(lk);
772 		return (fp);
773 	}
774 	FUNLOCKFILE(lk);
775 	return (NULL);
776 }
777 
778 #ifndef	_LP64
779 /*
780  * DESCRIPTION:
781  * This function gets the pointer to the mbstate_t structure associated
782  * with the specified iop.
783  *
784  * RETURNS:
785  * If the associated mbstate_t found, the pointer to the mbstate_t is
786  * returned.  Otherwise, NULL is returned.
787  */
788 mbstate_t *
789 _getmbstate(FILE *iop)
790 {
791 	struct xFILEdata *dat = getxfdat(iop);
792 
793 	if (dat != NULL)
794 		return (&dat->_state);
795 
796 	return (NULL);
797 }
798 
799 /*
800  * More 32-bit only functions.
801  * They lookup/set large fd's for extended FILE support.
802  */
803 
804 /*
805  * The negative value indicates that Extended fd FILE's has not
806  * been enabled by the user.
807  */
808 static int bad_fd = -1;
809 
810 int
811 _file_get(FILE *iop)
812 {
813 	int altfd;
814 
815 	/*
816 	 * Failure indicates a FILE * not allocated through stdio;
817 	 * it means the flag values are probably bogus and that if
818 	 * a file descriptor is set, it's in _magic.
819 	 * Inline getxfdat() for performance reasons.
820 	 */
821 	if (STDIOP(iop))
822 		altfd = _xftab[IOPIND(iop)]._altfd;
823 	else if (VALIDXFILE(FILEx(iop)))
824 		altfd = FILEx(iop)->_xdat._altfd;
825 	else
826 		return (iop->_magic);
827 	/*
828 	 * if this is not an internal extended FILE then check
829 	 * if _file is being changed from underneath us.
830 	 * It should not be because if
831 	 * it is then then we lose our ability to guard against
832 	 * silent data corruption.
833 	 */
834 	if (!iop->__xf_nocheck && bad_fd > -1 && iop->_magic != bad_fd) {
835 		/* LINTED: variable format specifier */
836 		(void) fprintf(stderr, _libc_gettext(
837 		    "Application violated extended FILE safety mechanism.\n"
838 		    "Please read the man page for extendedFILE.\nAborting\n"));
839 		abort();
840 	}
841 	return (altfd);
842 }
843 
844 int
845 _file_set(FILE *iop, int fd, const char *type)
846 {
847 	struct xFILEdata *dat;
848 	int Fflag;
849 
850 	/* Already known to contain at least one byte */
851 	while (*++type != '\0')
852 		;
853 
854 	Fflag = type[-1] == 'F';
855 	if (!Fflag && bad_fd < 0) {
856 		errno = EMFILE;
857 		return (-1);
858 	}
859 
860 	dat = getxfdat(iop);
861 	iop->__extendedfd = 1;
862 	iop->__xf_nocheck = Fflag;
863 	dat->_altfd = fd;
864 	iop->_magic = (unsigned char)bad_fd;
865 	return (0);
866 }
867 
868 /*
869  * Activates extended fd's in FILE's
870  */
871 
872 static const int tries[] = {196, 120, 60, 3};
873 #define	NTRIES	(sizeof (tries)/sizeof (int))
874 
875 int
876 enable_extended_FILE_stdio(int fd, int action)
877 {
878 	int i;
879 
880 	if (action < 0)
881 		action = SIGABRT;	/* default signal */
882 
883 	if (fd < 0) {
884 		/*
885 		 * search for an available fd and make it the badfd
886 		 */
887 		for (i = 0; i < NTRIES; i++) {
888 			fd = fcntl(tries[i], F_BADFD, action);
889 			if (fd >= 0)
890 				break;
891 		}
892 		if (fd < 0)	/* failed to find an available fd */
893 			return (-1);
894 	} else {
895 		/* caller requests that fd be the chosen badfd */
896 		int nfd = fcntl(fd, F_BADFD, action);
897 		if (nfd < 0 || nfd != fd)
898 			return (-1);
899 	}
900 	bad_fd = fd;
901 	return (0);
902 }
903 #endif
904