xref: /illumos-gate/usr/src/lib/libc/port/stdio/flush.c (revision 66582b606a8194f7f3ba5b3a3a6dca5b0d346361)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*	Copyright (c) 1988 AT&T	*/
27 /*	  All Rights Reserved  	*/
28 
29 #include "lint.h"
30 #include "mtlib.h"
31 #include "file64.h"
32 #include <sys/types.h>
33 #include <stdlib.h>
34 #include <stdio.h>
35 #include <thread.h>
36 #include <synch.h>
37 #include <unistd.h>
38 #include <string.h>
39 #include "stdiom.h"
40 #include <wchar.h>
41 #include <sys/stat.h>
42 #include <stddef.h>
43 #include <errno.h>
44 #include <fcntl.h>
45 
46 #define	_iob	__iob
47 
48 #undef end
49 
50 #define	FILE_ARY_SZ	8 /* a nice size for FILE array & end_buffer_ptrs */
51 
52 #ifdef	_LP64
53 
54 /*
55  * Macros to declare and loop over a fp or fp/xfp combo to
56  * avoid some of the _LP64 ifdef hell.
57  */
58 
59 #define	FPDECL(fp)		FILE *fp
60 #define	FIRSTFP(lp, fp)		fp = lp->iobp
61 #define	NEXTFP(fp)		fp++
62 #define	FPLOCK(fp)		&fp->_lock
63 #define	FPSTATE(fp)		&fp->_state
64 
65 #define	xFILE			FILE
66 
67 #else
68 
69 #define	FPDECL(fp)		FILE *fp; xFILE *x##fp
70 #define	FIRSTFP(lp, fp)		x##fp = lp->iobp; \
71 				fp = x##fp ? &x##fp->_iob : &_iob[0]
72 #define	NEXTFP(fp)		(x##fp ? fp = &(++x##fp)->_iob : ++fp)
73 #define	FPLOCK(fp)		x##fp ? \
74 				    &x##fp->xlock : &_xftab[IOPIND(fp)]._lock
75 #define	FPSTATE(fp)		x##fp ? \
76 				    &x##fp->xstate : &_xftab[IOPIND(fp)]._state
77 
78 /* The extended 32-bit file structure for use in link buffers */
79 typedef struct xFILE {
80 	FILE			_iob;		/* must be first! */
81 	struct xFILEdata	_xdat;
82 } xFILE;
83 
84 #define	xmagic			_xdat._magic
85 #define	xend			_xdat._end
86 #define	xlock			_xdat._lock
87 #define	xstate			_xdat._state
88 
89 #define	FILEx(fp)		((struct xFILE *)(uintptr_t)fp)
90 
91 /*
92  * The magic number stored is actually the pointer scrambled with
93  * a magic number.  Pointers to data items live everywhere in memory
94  * so we scramble the pointer in order to avoid accidental collisions.
95  */
96 #define	XFILEMAGIC		0x63687367
97 #define	XMAGIC(xfp)		((uintptr_t)(xfp) ^ XFILEMAGIC)
98 
99 #endif /* _LP64 */
100 
101 struct _link_	/* manages a list of streams */
102 {
103 	xFILE *iobp;		/* the array of (x)FILE's */
104 				/* NULL for the __first_link in ILP32 */
105 	int	niob;		/* length of the arrays */
106 	struct _link_	*next;	/* next in the list */
107 };
108 
109 /*
110  * With dynamic linking, iob may be in either the library or in the user's
111  * a.out, so the run time linker fixes up the first entry in __first_link at
112  * process startup time.
113  *
114  * In 32 bit processes, we don't have xFILE[FILE_ARY_SZ] but FILE[],
115  * and _xftab[] instead; this is denoted by having iobp set to NULL in
116  * 32 bit mode for the first link entry.
117  */
118 struct _link_ __first_link =	/* first in linked list */
119 {
120 #if !defined(_LP64)
121 	NULL,
122 #else
123 	&_iob[0],
124 #endif
125 	_NFILE,
126 	NULL
127 };
128 
129 /*
130  * Information cached to speed up searches.  We remember where we
131  * last found a free FILE* and we remember whether we saw any fcloses
132  * in between.  We also count the number of chunks we allocated, see
133  * _findiop() for an explanation.
134  * These variables are all protected by _first_link_lock.
135  */
136 static struct _link_ *lastlink = NULL;
137 static int fcloses;
138 static int nchunks;
139 
140 static mutex_t _first_link_lock = DEFAULTMUTEX;
141 
142 static int _fflush_l_iops(void);
143 static FILE *getiop(FILE *, rmutex_t *, mbstate_t *);
144 
145 /*
146  * All functions that understand the linked list of iob's follow.
147  */
148 #pragma weak _cleanup = __cleanup
149 void
150 __cleanup(void)		/* called at process end to flush ouput streams */
151 {
152 	(void) fflush(NULL);
153 }
154 
155 /*
156  * For fork1-safety (see libc_prepare_atfork(), etc).
157  */
158 void
159 stdio_locks()
160 {
161 	(void) mutex_lock(&_first_link_lock);
162 	/*
163 	 * XXX: We should acquire all of the iob locks here.
164 	 */
165 }
166 
167 void
168 stdio_unlocks()
169 {
170 	/*
171 	 * XXX: We should release all of the iob locks here.
172 	 */
173 	(void) mutex_unlock(&_first_link_lock);
174 }
175 
176 void
177 _flushlbf(void)		/* fflush() all line-buffered streams */
178 {
179 	FPDECL(fp);
180 	int i;
181 	struct _link_ *lp;
182 	/* Allow compiler to optimize the loop */
183 	int threaded = __libc_threaded;
184 
185 	if (threaded)
186 		cancel_safe_mutex_lock(&_first_link_lock);
187 
188 	lp = &__first_link;
189 	do {
190 		FIRSTFP(lp, fp);
191 		for (i = lp->niob; --i >= 0; NEXTFP(fp)) {
192 			/*
193 			 * The additional _IONBF check guards againsts
194 			 * allocated but uninitialized iops (see _findiop).
195 			 * We also automatically skip non allocated iop's.
196 			 * Don't block on locks.
197 			 */
198 			if ((fp->_flag & (_IOLBF | _IOWRT | _IONBF)) ==
199 			    (_IOLBF | _IOWRT)) {
200 				if (threaded) {
201 					rmutex_t *lk = FPLOCK(fp);
202 					if (cancel_safe_mutex_trylock(lk) != 0)
203 						continue;
204 					/* Recheck after locking */
205 					if ((fp->_flag & (_IOLBF | _IOWRT)) ==
206 					    (_IOLBF | _IOWRT)) {
207 						(void) _fflush_u(fp);
208 					}
209 					cancel_safe_mutex_unlock(lk);
210 				} else {
211 					(void) _fflush_u(fp);
212 				}
213 			}
214 		}
215 	} while ((lp = lp->next) != NULL);
216 
217 	if (threaded)
218 		cancel_safe_mutex_unlock(&_first_link_lock);
219 }
220 
221 /* allocate an unused stream; NULL if cannot */
222 FILE *
223 _findiop(void)
224 {
225 	struct _link_ *lp, **prev;
226 
227 	/* used so there only needs to be one malloc() */
228 #ifdef _LP64
229 	typedef	struct	{
230 		struct _link_	hdr;
231 		FILE	iob[FILE_ARY_SZ];
232 	} Pkg;
233 #else
234 	typedef union {
235 		struct {				/* Normal */
236 			struct _link_	hdr;
237 			xFILE	iob[FILE_ARY_SZ];
238 		} Pkgn;
239 		struct {				/* Reversed */
240 			xFILE	iob[FILE_ARY_SZ];
241 			struct _link_	hdr;
242 		} Pkgr;
243 	} Pkg;
244 	uintptr_t delta;
245 #endif
246 	Pkg *pkgp;
247 	struct _link_ *hdr;
248 	FPDECL(fp);
249 	int i;
250 	int threaded = __libc_threaded;
251 
252 	if (threaded)
253 		cancel_safe_mutex_lock(&_first_link_lock);
254 
255 	if (lastlink == NULL) {
256 rescan:
257 		fcloses = 0;
258 		lastlink = &__first_link;
259 	}
260 
261 	lp = lastlink;
262 
263 	/*
264 	 * lock to make testing of fp->_flag == 0 and acquiring the fp atomic
265 	 * and for allocation of new links
266 	 * low contention expected on _findiop(), hence coarse locking.
267 	 * for finer granularity, use fp->_lock for allocating an iop
268 	 * and make the testing of lp->next and allocation of new link atomic
269 	 * using lp->_lock
270 	 */
271 
272 	do {
273 		prev = &lp->next;
274 		FIRSTFP(lp, fp);
275 
276 		for (i = lp->niob; --i >= 0; NEXTFP(fp)) {
277 			FILE *ret;
278 			if (threaded) {
279 				ret = getiop(fp, FPLOCK(fp), FPSTATE(fp));
280 				if (ret != NULL) {
281 					cancel_safe_mutex_unlock(
282 					    &_first_link_lock);
283 					return (ret);
284 				}
285 			} else {
286 				ret = getiop(fp, NULL, FPSTATE(fp));
287 				if (ret != NULL)
288 					return (ret);
289 			}
290 		}
291 	} while ((lastlink = lp = lp->next) != NULL);
292 
293 	/*
294 	 * If there was a sufficient number of  fcloses since we last started
295 	 * at __first_link, we rescan all fp's again.  We do not rescan for
296 	 * all fcloses; that would simplify the algorithm but would make
297 	 * search times near O(n) again.
298 	 * Worst case behaviour would still be pretty bad (open a full set,
299 	 * then continously opening and closing one FILE * gets you a full
300 	 * scan each time).  That's why we over allocate 1 FILE for each
301 	 * 32 chunks.  More over allocation is better; this is a nice
302 	 * empirical value which doesn't cost a lot of memory, doesn't
303 	 * overallocate until we reach 256 FILE *s and keeps the performance
304 	 * pretty close to the optimum.
305 	 */
306 	if (fcloses > nchunks/32)
307 		goto rescan;
308 
309 	/*
310 	 * Need to allocate another and put it in the linked list.
311 	 */
312 	if ((pkgp = malloc(sizeof (Pkg))) == NULL) {
313 		if (threaded)
314 			cancel_safe_mutex_unlock(&_first_link_lock);
315 		return (NULL);
316 	}
317 
318 	(void) memset(pkgp, 0, sizeof (Pkg));
319 
320 #ifdef _LP64
321 	hdr = &pkgp->hdr;
322 	hdr->iobp = &pkgp->iob[0];
323 #else
324 	/*
325 	 * The problem with referencing a word after a FILE* is the possibility
326 	 * of a SIGSEGV if a non-stdio issue FILE structure ends on a page
327 	 * boundary.  We run this check so we never need to run an expensive
328 	 * check like mincore() in order to know whether it is
329 	 * safe to dereference ((xFILE*)fp)->xmagic.
330 	 * We allocate the block with two alternative layouts; if one
331 	 * layout is not properly aligned for our purposes, the other layout
332 	 * will be because the size of _link_ is small compared to
333 	 * sizeof (xFILE).
334 	 * The check performed is this:
335 	 *	If the distance from pkgp to the end of the page is
336 	 *	less than the the offset of the last xmagic field in the
337 	 *	xFILE structure, (the 0x1000 boundary is inside our just
338 	 *	allocated structure) and the distance modulo the size of xFILE
339 	 *	is identical to the offset of the first xmagic in the
340 	 *	structure (i.e., XXXXXX000 points to an xmagic field),
341 	 *	we need to use the reverse structure.
342 	 */
343 	if ((delta = 0x1000 - ((uintptr_t)pkgp & 0xfff)) <=
344 	    offsetof(Pkg, Pkgn.iob[FILE_ARY_SZ-1].xmagic) &&
345 	    delta % sizeof (struct xFILE) ==
346 	    offsetof(Pkg, Pkgn.iob[0].xmagic)) {
347 		/* Use reversed structure */
348 		hdr = &pkgp->Pkgr.hdr;
349 		hdr->iobp = &pkgp->Pkgr.iob[0];
350 	} else {
351 		/* Use normal structure */
352 		hdr = &pkgp->Pkgn.hdr;
353 		hdr->iobp = &pkgp->Pkgn.iob[0];
354 	}
355 #endif /* _LP64 */
356 
357 	hdr->niob = FILE_ARY_SZ;
358 	nchunks++;
359 
360 #ifdef	_LP64
361 	fp = hdr->iobp;
362 	for (i = 0; i < FILE_ARY_SZ; i++)
363 		(void) mutex_init(&fp[i]._lock,
364 		    USYNC_THREAD | LOCK_RECURSIVE, NULL);
365 #else
366 	xfp = hdr->iobp;
367 	fp = &xfp->_iob;
368 
369 	for (i = 0; i < FILE_ARY_SZ; i++) {
370 		xfp[i].xmagic = XMAGIC(&xfp[i]);
371 		(void) mutex_init(&xfp[i].xlock,
372 		    USYNC_THREAD | LOCK_RECURSIVE, NULL);
373 	}
374 #endif	/*	_LP64	*/
375 
376 	lastlink = *prev = hdr;
377 	fp->_ptr = 0;
378 	fp->_base = 0;
379 	fp->_flag = 0377; /* claim the fp by setting low 8 bits */
380 	if (threaded)
381 		cancel_safe_mutex_unlock(&_first_link_lock);
382 
383 	return (fp);
384 }
385 
386 static void
387 isseekable(FILE *iop)
388 {
389 	struct stat64 fstatbuf;
390 	int save_errno;
391 
392 	save_errno = errno;
393 
394 	if (fstat64(GET_FD(iop), &fstatbuf) != 0) {
395 		/*
396 		 * when we don't know what it is we'll
397 		 * do the old behaviour and flush
398 		 * the stream
399 		 */
400 		SET_SEEKABLE(iop);
401 		errno = save_errno;
402 		return;
403 	}
404 
405 	/*
406 	 * check for what is non-SEEKABLE
407 	 * otherwise assume it's SEEKABLE so we get the old
408 	 * behaviour and flush the stream
409 	 */
410 
411 	if (S_ISFIFO(fstatbuf.st_mode) || S_ISCHR(fstatbuf.st_mode) ||
412 	    S_ISSOCK(fstatbuf.st_mode) || S_ISDOOR(fstatbuf.st_mode)) {
413 		CLEAR_SEEKABLE(iop);
414 	} else {
415 		SET_SEEKABLE(iop);
416 	}
417 
418 	errno = save_errno;
419 }
420 
421 #ifdef	_LP64
422 void
423 _setbufend(FILE *iop, Uchar *end)	/* set the end pointer for this iop */
424 {
425 	iop->_end = end;
426 
427 	isseekable(iop);
428 }
429 
430 #undef _realbufend
431 
432 Uchar *
433 _realbufend(FILE *iop)		/* get the end pointer for this iop */
434 {
435 	return (iop->_end);
436 }
437 
438 #else /* _LP64 */
439 
440 /*
441  * Awkward functions not needed for the sane 64 bit environment.
442  */
443 /*
444  * xmagic must not be aligned on a 4K boundary. We guarantee this in
445  * _findiop().
446  */
447 #define	VALIDXFILE(xfp) \
448 	(((uintptr_t)&(xfp)->xmagic & 0xfff) && \
449 	    (xfp)->xmagic == XMAGIC(FILEx(xfp)))
450 
451 static struct xFILEdata *
452 getxfdat(FILE *iop)
453 {
454 	if (STDIOP(iop))
455 		return (&_xftab[IOPIND(iop)]);
456 	else if (VALIDXFILE(FILEx(iop)))
457 		return (&FILEx(iop)->_xdat);
458 	else
459 		return (NULL);
460 }
461 
462 void
463 _setbufend(FILE *iop, Uchar *end)	/* set the end pointer for this iop */
464 {
465 	struct xFILEdata *dat = getxfdat(iop);
466 
467 	if (dat != NULL)
468 		dat->_end = end;
469 
470 	isseekable(iop);
471 
472 	/*
473 	 * For binary compatibility with user programs using the
474 	 * old _bufend macro.  This is *so* broken, fileno()
475 	 * is not the proper index.
476 	 */
477 	if (iop->_magic < _NFILE)
478 		_bufendtab[iop->_magic] = end;
479 
480 }
481 
482 Uchar *
483 _realbufend(FILE *iop)		/* get the end pointer for this iop */
484 {
485 	struct xFILEdata *dat = getxfdat(iop);
486 
487 	if (dat != NULL)
488 		return (dat->_end);
489 
490 	return (NULL);
491 }
492 
493 /*
494  * _reallock() is invoked in each stdio call through the IOB_LCK() macro,
495  * it is therefor extremely performance sensitive.  We get better performance
496  * by inlining the STDIOP check in IOB_LCK and inlining a custom version
497  * of getfxdat() here.
498  */
499 rmutex_t *
500 _reallock(FILE *iop)
501 {
502 	if (VALIDXFILE(FILEx(iop)))
503 		return (&FILEx(iop)->xlock);
504 
505 	return (NULL);
506 }
507 
508 #endif	/*	_LP64	*/
509 
510 /* make sure _cnt, _ptr are correct */
511 void
512 _bufsync(FILE *iop, Uchar *bufend)
513 {
514 	ssize_t spaceleft;
515 
516 	spaceleft = bufend - iop->_ptr;
517 	if (bufend < iop->_ptr) {
518 		iop->_ptr = bufend;
519 		iop->_cnt = 0;
520 	} else if (spaceleft < iop->_cnt)
521 		iop->_cnt = spaceleft;
522 }
523 
524 /* really write out current buffer contents */
525 int
526 _xflsbuf(FILE *iop)
527 {
528 	ssize_t n;
529 	Uchar *base = iop->_base;
530 	Uchar *bufend;
531 	ssize_t num_wrote;
532 
533 	/*
534 	 * Hopefully, be stable with respect to interrupts...
535 	 */
536 	n = iop->_ptr - base;
537 	iop->_ptr = base;
538 	bufend = _bufend(iop);
539 	if (iop->_flag & (_IOLBF | _IONBF))
540 		iop->_cnt = 0;		/* always go to a flush */
541 	else
542 		iop->_cnt = bufend - base;
543 
544 	if (_needsync(iop, bufend))	/* recover from interrupts */
545 		_bufsync(iop, bufend);
546 
547 	if (n > 0) {
548 		int fd = GET_FD(iop);
549 		while ((num_wrote = write(fd, base, (size_t)n)) != n) {
550 			if (num_wrote <= 0) {
551 				if (!cancel_active())
552 					iop->_flag |= _IOERR;
553 				return (EOF);
554 			}
555 			n -= num_wrote;
556 			base += num_wrote;
557 		}
558 	}
559 	return (0);
560 }
561 
562 /* flush (write) buffer */
563 int
564 fflush(FILE *iop)
565 {
566 	int res;
567 	rmutex_t *lk;
568 
569 	if (iop) {
570 		FLOCKFILE(lk, iop);
571 		res = _fflush_u(iop);
572 		FUNLOCKFILE(lk);
573 	} else {
574 		res = _fflush_l_iops();		/* flush all iops */
575 	}
576 	return (res);
577 }
578 
579 static int
580 _fflush_l_iops(void)		/* flush all buffers */
581 {
582 	FPDECL(iop);
583 
584 	int i;
585 	struct _link_ *lp;
586 	int res = 0;
587 	rmutex_t *lk;
588 	/* Allow the compiler to optimize the load out of the loop */
589 	int threaded = __libc_threaded;
590 
591 	if (threaded)
592 		cancel_safe_mutex_lock(&_first_link_lock);
593 
594 	lp = &__first_link;
595 
596 	do {
597 		/*
598 		 * We need to grab the file locks or file corruption
599 		 * will happen.  But we first check the flags field
600 		 * knowing that when it is 0, it isn't allocated and
601 		 * cannot be allocated while we're holding the
602 		 * _first_link_lock.  And when _IONBF is set (also the
603 		 * case when _flag is 0377, or alloc in progress), we
604 		 * also ignore it.
605 		 *
606 		 * Ignore locked streams; it will appear as if
607 		 * concurrent updates happened after fflush(NULL).  Note
608 		 * that we even attempt to lock if the locking is set to
609 		 * "by caller".  We don't want to penalize callers of
610 		 * __fsetlocking() by not flushing their files.  Note: if
611 		 * __fsetlocking() callers don't employ any locking, they
612 		 * may still face corruption in fflush(NULL); but that's
613 		 * no change from earlier releases.
614 		 */
615 		FIRSTFP(lp, iop);
616 		for (i = lp->niob; --i >= 0; NEXTFP(iop)) {
617 			unsigned int flag = iop->_flag;
618 
619 			/* flag 0, flag 0377, or _IONBF set */
620 			if (flag == 0 || (flag & _IONBF) != 0)
621 				continue;
622 
623 			if (threaded) {
624 				lk = FPLOCK(iop);
625 				if (cancel_safe_mutex_trylock(lk) != 0)
626 					continue;
627 			}
628 
629 			if (!(iop->_flag & _IONBF)) {
630 				/*
631 				 * don't need to worry about the _IORW case
632 				 * since the iop will also marked with _IOREAD
633 				 * or _IOWRT whichever we are really doing
634 				 */
635 				if (iop->_flag & _IOWRT) {
636 					/* Flush write buffers */
637 					res |= _fflush_u(iop);
638 				} else if (iop->_flag & _IOREAD) {
639 					/*
640 					 * flush seekable read buffers
641 					 * don't flush non-seekable read buffers
642 					 */
643 					if (GET_SEEKABLE(iop)) {
644 						res |= _fflush_u(iop);
645 					}
646 				}
647 			}
648 			if (threaded)
649 				cancel_safe_mutex_unlock(lk);
650 		}
651 	} while ((lp = lp->next) != NULL);
652 	if (threaded)
653 		cancel_safe_mutex_unlock(&_first_link_lock);
654 	return (res);
655 }
656 
657 /* flush buffer */
658 int
659 _fflush_u(FILE *iop)
660 {
661 	int res = 0;
662 
663 	/* this portion is always assumed locked */
664 	if (!(iop->_flag & _IOWRT)) {
665 		(void) lseek64(GET_FD(iop), -iop->_cnt, SEEK_CUR);
666 		iop->_cnt = 0;
667 		/* needed for ungetc & multibyte pushbacks */
668 		iop->_ptr = iop->_base;
669 		if (iop->_flag & _IORW) {
670 			iop->_flag &= ~_IOREAD;
671 		}
672 		return (0);
673 	}
674 	if (iop->_base != NULL && iop->_ptr > iop->_base) {
675 		res = _xflsbuf(iop);
676 	}
677 	if (iop->_flag & _IORW) {
678 		iop->_flag &= ~_IOWRT;
679 		iop->_cnt = 0;
680 	}
681 	return (res);
682 }
683 
684 /* flush buffer and close stream */
685 int
686 fclose(FILE *iop)
687 {
688 	int res = 0;
689 	rmutex_t *lk;
690 
691 	if (iop == NULL) {
692 		return (EOF);		/* avoid passing zero to FLOCKFILE */
693 	}
694 
695 	FLOCKFILE(lk, iop);
696 	if (iop->_flag == 0) {
697 		FUNLOCKFILE(lk);
698 		return (EOF);
699 	}
700 	/* Is not unbuffered and opened for read and/or write ? */
701 	if (!(iop->_flag & _IONBF) && (iop->_flag & (_IOWRT | _IOREAD | _IORW)))
702 		res = _fflush_u(iop);
703 	if (close(GET_FD(iop)) < 0)
704 		res = EOF;
705 	if (iop->_flag & _IOMYBUF) {
706 		(void) free((char *)iop->_base - PUSHBACK);
707 	}
708 	iop->_base = NULL;
709 	iop->_ptr = NULL;
710 	iop->_cnt = 0;
711 	iop->_flag = 0;			/* marks it as available */
712 	FUNLOCKFILE(lk);
713 
714 	if (__libc_threaded)
715 		cancel_safe_mutex_lock(&_first_link_lock);
716 	fcloses++;
717 	if (__libc_threaded)
718 		cancel_safe_mutex_unlock(&_first_link_lock);
719 
720 	return (res);
721 }
722 
723 /* close all open streams */
724 int
725 fcloseall(void)
726 {
727 	FPDECL(iop);
728 
729 	struct _link_ *lp;
730 	rmutex_t *lk;
731 
732 	if (__libc_threaded)
733 		cancel_safe_mutex_lock(&_first_link_lock);
734 
735 	lp = &__first_link;
736 
737 	do {
738 		int i;
739 
740 		FIRSTFP(lp, iop);
741 		for (i = lp->niob; --i >= 0; NEXTFP(iop)) {
742 			/* code stolen from fclose(), above */
743 
744 			FLOCKFILE(lk, iop);
745 			if (iop->_flag == 0) {
746 				FUNLOCKFILE(lk);
747 				continue;
748 			}
749 
750 			/* Not unbuffered and opened for read and/or write? */
751 			if (!(iop->_flag & _IONBF) &&
752 			    (iop->_flag & (_IOWRT | _IOREAD | _IORW)))
753 				(void) _fflush_u(iop);
754 			(void) close(GET_FD(iop));
755 			if (iop->_flag & _IOMYBUF)
756 				free((char *)iop->_base - PUSHBACK);
757 			iop->_base = NULL;
758 			iop->_ptr = NULL;
759 			iop->_cnt = 0;
760 			iop->_flag = 0;		/* marks it as available */
761 			FUNLOCKFILE(lk);
762 			fcloses++;
763 		}
764 	} while ((lp = lp->next) != NULL);
765 
766 	if (__libc_threaded)
767 		cancel_safe_mutex_unlock(&_first_link_lock);
768 
769 	return (0);
770 }
771 
772 /* flush buffer, close fd but keep the stream used by freopen() */
773 int
774 close_fd(FILE *iop)
775 {
776 	int res = 0;
777 	mbstate_t *mb;
778 
779 	if (iop == NULL || iop->_flag == 0)
780 		return (EOF);
781 	/* Is not unbuffered and opened for read and/or write ? */
782 	if (!(iop->_flag & _IONBF) && (iop->_flag & (_IOWRT | _IOREAD | _IORW)))
783 		res = _fflush_u(iop);
784 	if (close(GET_FD(iop)) < 0)
785 		res = EOF;
786 	if (iop->_flag & _IOMYBUF) {
787 		(void) free((char *)iop->_base - PUSHBACK);
788 	}
789 	iop->_base = NULL;
790 	iop->_ptr = NULL;
791 	mb = _getmbstate(iop);
792 	if (mb != NULL)
793 		(void) memset(mb, 0, sizeof (mbstate_t));
794 	iop->_cnt = 0;
795 	_setorientation(iop, _NO_MODE);
796 	return (res);
797 }
798 
799 static FILE *
800 getiop(FILE *fp, rmutex_t *lk, mbstate_t *mb)
801 {
802 	if (lk != NULL && cancel_safe_mutex_trylock(lk) != 0)
803 		return (NULL);	/* locked: fp in use */
804 
805 	if (fp->_flag == 0) {	/* unused */
806 #ifndef	_LP64
807 		fp->__orientation = 0;
808 #endif /* _LP64 */
809 		fp->_cnt = 0;
810 		fp->_ptr = NULL;
811 		fp->_base = NULL;
812 		fp->_flag = 0377;	/* claim the fp by setting low 8 bits */
813 		(void) memset(mb, 0, sizeof (mbstate_t));
814 		FUNLOCKFILE(lk);
815 		return (fp);
816 	}
817 	FUNLOCKFILE(lk);
818 	return (NULL);
819 }
820 
821 #ifndef	_LP64
822 /*
823  * DESCRIPTION:
824  * This function gets the pointer to the mbstate_t structure associated
825  * with the specified iop.
826  *
827  * RETURNS:
828  * If the associated mbstate_t found, the pointer to the mbstate_t is
829  * returned.  Otherwise, NULL is returned.
830  */
831 mbstate_t *
832 _getmbstate(FILE *iop)
833 {
834 	struct xFILEdata *dat = getxfdat(iop);
835 
836 	if (dat != NULL)
837 		return (&dat->_state);
838 
839 	return (NULL);
840 }
841 
842 /*
843  * More 32-bit only functions.
844  * They lookup/set large fd's for extended FILE support.
845  */
846 
847 /*
848  * The negative value indicates that Extended fd FILE's has not
849  * been enabled by the user.
850  */
851 static int bad_fd = -1;
852 
853 int
854 _file_get(FILE *iop)
855 {
856 	int altfd;
857 
858 	/*
859 	 * Failure indicates a FILE * not allocated through stdio;
860 	 * it means the flag values are probably bogus and that if
861 	 * a file descriptor is set, it's in _magic.
862 	 * Inline getxfdat() for performance reasons.
863 	 */
864 	if (STDIOP(iop))
865 		altfd = _xftab[IOPIND(iop)]._altfd;
866 	else if (VALIDXFILE(FILEx(iop)))
867 		altfd = FILEx(iop)->_xdat._altfd;
868 	else
869 		return (iop->_magic);
870 	/*
871 	 * if this is not an internal extended FILE then check
872 	 * if _file is being changed from underneath us.
873 	 * It should not be because if
874 	 * it is then then we lose our ability to guard against
875 	 * silent data corruption.
876 	 */
877 	if (!iop->__xf_nocheck && bad_fd > -1 && iop->_magic != bad_fd) {
878 		(void) fprintf(stderr,
879 		    "Application violated extended FILE safety mechanism.\n"
880 		    "Please read the man page for extendedFILE.\nAborting\n");
881 		abort();
882 	}
883 	return (altfd);
884 }
885 
886 int
887 _file_set(FILE *iop, int fd, const char *type)
888 {
889 	struct xFILEdata *dat;
890 	int Fflag;
891 
892 	/* Already known to contain at least one byte */
893 	while (*++type != '\0')
894 		;
895 
896 	Fflag = type[-1] == 'F';
897 	if (!Fflag && bad_fd < 0) {
898 		errno = EMFILE;
899 		return (-1);
900 	}
901 
902 	dat = getxfdat(iop);
903 	iop->__extendedfd = 1;
904 	iop->__xf_nocheck = Fflag;
905 	dat->_altfd = fd;
906 	iop->_magic = (unsigned char)bad_fd;
907 	return (0);
908 }
909 
910 /*
911  * Activates extended fd's in FILE's
912  */
913 
914 static const int tries[] = {196, 120, 60, 3};
915 #define	NTRIES	(sizeof (tries)/sizeof (int))
916 
917 int
918 enable_extended_FILE_stdio(int fd, int action)
919 {
920 	int i;
921 
922 	if (action < 0)
923 		action = SIGABRT;	/* default signal */
924 
925 	if (fd < 0) {
926 		/*
927 		 * search for an available fd and make it the badfd
928 		 */
929 		for (i = 0; i < NTRIES; i++) {
930 			fd = fcntl(tries[i], F_BADFD, action);
931 			if (fd >= 0)
932 				break;
933 		}
934 		if (fd < 0)	/* failed to find an available fd */
935 			return (-1);
936 	} else {
937 		/* caller requests that fd be the chosen badfd */
938 		int nfd = fcntl(fd, F_BADFD, action);
939 		if (nfd < 0 || nfd != fd)
940 			return (-1);
941 	}
942 	bad_fd = fd;
943 	return (0);
944 }
945 #endif
946