xref: /titanic_41/usr/src/lib/libc/port/stdio/flush.c (revision 45916cd2fec6e79bca5dee0421bd39e3c2910d1e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*	Copyright (c) 1988 AT&T	*/
30 /*	  All Rights Reserved  	*/
31 
32 
33 #include "synonyms.h"
34 #include "mtlib.h"
35 #include "file64.h"
36 
37 #define	_iob	__iob
38 
39 #include <sys/types.h>
40 #include <stdlib.h>
41 #include <stdio.h>
42 #include <thread.h>
43 #include <synch.h>
44 #include <unistd.h>
45 #include <string.h>
46 #include "stdiom.h"
47 #include <wchar.h>
48 #include <sys/stat.h>
49 #include <stddef.h>
50 #include <errno.h>
51 
52 #undef end
53 
54 #define	FILE_ARY_SZ	8 /* a nice size for FILE array & end_buffer_ptrs */
55 
56 #ifdef	_LP64
57 
58 /*
59  * Macros to declare and loop over a fp or fp/xfp combo to
60  * avoid some of the _LP64 ifdef hell.
61  */
62 
63 #define	FPDECL(fp)		FILE *fp
64 #define	FIRSTFP(lp, fp)		fp = lp->iobp
65 #define	NEXTFP(fp)		fp++
66 
67 #define	xFILE			FILE
68 
69 #else
70 
71 #define	FPDECL(fp)		FILE *fp; xFILE *x##fp
72 #define	FIRSTFP(lp, fp)		x##fp = lp->iobp; \
73 				fp = x##fp ? &x##fp->_iob : &_iob[0]
74 #define	NEXTFP(fp)		(x##fp ? fp = &(++x##fp)->_iob : ++fp)
75 
76 /* The extended 32-bit file structure for use in link buffers */
77 typedef struct xFILE {
78 	FILE			_iob;		/* must be first! */
79 	struct xFILEdata	_xdat;
80 } xFILE;
81 
82 #define	xmagic			_xdat._magic
83 #define	xend			_xdat._end
84 #define	xlock			_xdat._lock
85 #define	xstate			_xdat._state
86 
87 #define	FILEx(fp)		((struct xFILE *)(uintptr_t)fp)
88 
89 /*
90  * The magic number stored is actually the pointer scrambled with
91  * a magic number.  Pointers to data items live everywhere in memory
92  * so we scramble the pointer in order to avoid accidental collisions.
93  */
94 #define	XFILEMAGIC		0x63687367
95 #define	XMAGIC(xfp)		((uintptr_t)(xfp) ^ XFILEMAGIC)
96 
97 #endif /* _LP64 */
98 
99 struct _link_	/* manages a list of streams */
100 {
101 	xFILE *iobp;		/* the array of (x)FILE's */
102 				/* NULL for the __first_link in ILP32 */
103 	int	niob;		/* length of the arrays */
104 	struct _link_	*next;	/* next in the list */
105 };
106 
107 /*
108  * With dynamic linking, iob may be in either the library or in the user's
109  * a.out, so the run time linker fixes up the first entry in __first_link at
110  * process startup time.
111  *
112  * In 32 bit processes, we don't have xFILE[FILE_ARY_SZ] but FILE[],
113  * and _xftab[] instead; this is denoted by having iobp set to NULL in
114  * 32 bit mode for the first link entry.
115  */
116 struct _link_ __first_link =	/* first in linked list */
117 {
118 #if !defined(_LP64)
119 	NULL,
120 #else
121 	&_iob[0],
122 #endif
123 	_NFILE,
124 	NULL
125 };
126 
127 /*
128  * Information cached to speed up searches.  We remember where we
129  * last found a free FILE* and we remember whether we saw any fcloses
130  * in between.  We also count the number of chunks we allocated, see
131  * _findiop() for an explanation.
132  * These variables are all protected by _first_link_lock.
133  */
134 static struct _link_ *lastlink = NULL;
135 static int fcloses;
136 static int nchunks;
137 
138 static rwlock_t _first_link_lock = DEFAULTRWLOCK;
139 
140 static int _fflush_u_iops(void);
141 static FILE *getiop(FILE *, rmutex_t *, mbstate_t *);
142 
143 #define	GETIOP(fp, lk, mb)	{FILE *ret; \
144 	if ((ret = getiop((fp), __libc_threaded? (lk): NULL, (mb))) != NULL) { \
145 		if (__libc_threaded) \
146 			(void) __rw_unlock(&_first_link_lock); \
147 		return (ret); \
148 	}; \
149 	}
150 
151 /*
152  * All functions that understand the linked list of iob's follow.
153  */
154 #pragma weak _cleanup = __cleanup
155 void
156 __cleanup(void)		/* called at process end to flush ouput streams */
157 {
158 	(void) fflush(NULL);
159 }
160 
161 /*
162  * For fork1-safety (see libc_prepare_atfork(), etc).
163  */
164 void
165 stdio_locks()
166 {
167 	(void) __rw_wrlock(&_first_link_lock);
168 	/*
169 	 * XXX: We should acquire all of the iob locks here.
170 	 */
171 }
172 
173 void
174 stdio_unlocks()
175 {
176 	/*
177 	 * XXX: We should release all of the iob locks here.
178 	 */
179 	(void) __rw_unlock(&_first_link_lock);
180 }
181 
182 void
183 _flushlbf(void)		/* fflush() all line-buffered streams */
184 {
185 	FPDECL(fp);
186 	int i;
187 	struct _link_ *lp;
188 
189 	if (__libc_threaded)
190 		(void) __rw_rdlock(&_first_link_lock);
191 
192 	lp = &__first_link;
193 	do {
194 		FIRSTFP(lp, fp);
195 		for (i = lp->niob; --i >= 0; NEXTFP(fp)) {
196 			if ((fp->_flag & (_IOLBF | _IOWRT)) ==
197 			    (_IOLBF | _IOWRT))
198 				(void) _fflush_u(fp);
199 		}
200 	} while ((lp = lp->next) != NULL);
201 
202 	if (__libc_threaded)
203 		(void) __rw_unlock(&_first_link_lock);
204 }
205 
206 /* allocate an unused stream; NULL if cannot */
207 FILE *
208 _findiop(void)
209 {
210 	struct _link_ *lp, **prev;
211 
212 	/* used so there only needs to be one malloc() */
213 #ifdef _LP64
214 	typedef	struct	{
215 		struct _link_	hdr;
216 		FILE	iob[FILE_ARY_SZ];
217 	} Pkg;
218 #else
219 	typedef union {
220 		struct {				/* Normal */
221 			struct _link_	hdr;
222 			xFILE	iob[FILE_ARY_SZ];
223 		} Pkgn;
224 		struct {				/* Reversed */
225 			xFILE	iob[FILE_ARY_SZ];
226 			struct _link_	hdr;
227 		} Pkgr;
228 	} Pkg;
229 	uintptr_t delta;
230 #endif
231 	Pkg *pkgp;
232 	struct _link_ *hdr;
233 	FPDECL(fp);
234 	int i;
235 
236 	if (__libc_threaded)
237 		(void) __rw_wrlock(&_first_link_lock);
238 
239 	if (lastlink == NULL) {
240 rescan:
241 		fcloses = 0;
242 		lastlink = &__first_link;
243 	}
244 
245 	lp = lastlink;
246 
247 	/*
248 	 * lock to make testing of fp->_flag == 0 and acquiring the fp atomic
249 	 * and for allocation of new links
250 	 * low contention expected on _findiop(), hence coarse locking.
251 	 * for finer granularity, use fp->_lock for allocating an iop
252 	 * and make the testing of lp->next and allocation of new link atomic
253 	 * using lp->_lock
254 	 */
255 
256 	do {
257 		prev = &lp->next;
258 		FIRSTFP(lp, fp);
259 
260 		for (i = lp->niob; --i >= 0; NEXTFP(fp)) {
261 #ifdef	_LP64
262 			GETIOP(fp, &fp->_lock, &fp->_state);
263 #else
264 			GETIOP(fp,
265 			    xfp ? &xfp->xlock : &_xftab[IOPIND(fp)]._lock,
266 			    xfp ? &xfp->xstate : &_xftab[IOPIND(fp)]._state);
267 #endif	/*	_LP64	*/
268 		}
269 	} while ((lastlink = lp = lp->next) != NULL);
270 
271 	/*
272 	 * If there was a sufficient number of  fcloses since we last started
273 	 * at __first_link, we rescan all fp's again.  We do not rescan for
274 	 * all fcloses; that would simplify the algorithm but would make
275 	 * search times near O(n) again.
276 	 * Worst case behaviour would still be pretty bad (open a full set,
277 	 * then continously opening and closing one FILE * gets you a full
278 	 * scan each time).  That's why we over allocate 1 FILE for each
279 	 * 32 chunks.  More over allocation is better; this is a nice
280 	 * empirical value which doesn't cost a lot of memory, doesn't
281 	 * overallocate until we reach 256 FILE *s and keeps the performance
282 	 * pretty close to the optimum.
283 	 */
284 	if (fcloses > nchunks/32)
285 		goto rescan;
286 
287 	/*
288 	 * Need to allocate another and put it in the linked list.
289 	 */
290 	if ((pkgp = malloc(sizeof (Pkg))) == NULL) {
291 		if (__libc_threaded)
292 			(void) __rw_unlock(&_first_link_lock);
293 		return (NULL);
294 	}
295 
296 	(void) memset(pkgp, 0, sizeof (Pkg));
297 
298 #ifdef _LP64
299 	hdr = &pkgp->hdr;
300 	hdr->iobp = &pkgp->iob[0];
301 #else
302 	/*
303 	 * The problem with referencing a word after a FILE* is the possibility
304 	 * of a SIGSEGV if a non-stdio issue FILE structure ends on a page
305 	 * boundary.  We run this check so we never need to run an expensive
306 	 * check like mincore() in order to know whether it is
307 	 * safe to dereference ((xFILE*)fp)->xmagic.
308 	 * We allocate the block with two alternative layouts; if one
309 	 * layout is not properly aligned for our purposes, the other layout
310 	 * will be because the size of _link_ is small compared to
311 	 * sizeof (xFILE).
312 	 * The check performed is this:
313 	 *	If the distance from pkgp to the end of the page is
314 	 *	less than the the offset of the last xmagic field in the
315 	 *	xFILE structure, (the 0x1000 boundary is inside our just
316 	 *	allocated structure) and the distance modulo the size of xFILE
317 	 *	is identical to the offset of the first xmagic in the
318 	 *	structure (i.e., XXXXXX000 points to an xmagic field),
319 	 *	we need to use the reverse structure.
320 	 */
321 	if ((delta = 0x1000 - ((uintptr_t)pkgp & 0xfff)) <=
322 				offsetof(Pkg, Pkgn.iob[FILE_ARY_SZ-1].xmagic) &&
323 	    delta % sizeof (struct xFILE) ==
324 		    offsetof(Pkg, Pkgn.iob[0].xmagic)) {
325 		/* Use reversed structure */
326 		hdr = &pkgp->Pkgr.hdr;
327 		hdr->iobp = &pkgp->Pkgr.iob[0];
328 	} else {
329 		/* Use normal structure */
330 		hdr = &pkgp->Pkgn.hdr;
331 		hdr->iobp = &pkgp->Pkgn.iob[0];
332 	}
333 #endif /* _LP64 */
334 
335 	hdr->niob = FILE_ARY_SZ;
336 	nchunks++;
337 
338 #ifdef	_LP64
339 	fp = hdr->iobp;
340 	for (i = 0; i < FILE_ARY_SZ; i++)
341 		_private_mutex_init(&fp[i]._lock,
342 			USYNC_THREAD|LOCK_RECURSIVE, NULL);
343 #else
344 	xfp = hdr->iobp;
345 	fp = &xfp->_iob;
346 
347 	for (i = 0; i < FILE_ARY_SZ; i++) {
348 		xfp[i].xmagic = XMAGIC(&xfp[i]);
349 		_private_mutex_init(&xfp[i].xlock,
350 			USYNC_THREAD|LOCK_RECURSIVE, NULL);
351 	}
352 #endif	/*	_LP64	*/
353 
354 	lastlink = *prev = hdr;
355 	fp->_ptr = 0;
356 	fp->_base = 0;
357 	fp->_flag = 0377; /* claim the fp by setting low 8 bits */
358 	if (__libc_threaded)
359 		(void) __rw_unlock(&_first_link_lock);
360 
361 	return (fp);
362 }
363 
364 static void
365 isseekable(FILE *iop)
366 {
367 	struct stat64 fstatbuf;
368 	int save_errno;
369 
370 	save_errno = errno;
371 
372 	if (fstat64(iop->_file, &fstatbuf) != 0) {
373 		/*
374 		 * when we don't know what it is we'll
375 		 * do the old behaviour and flush
376 		 * the stream
377 		 */
378 		SET_SEEKABLE(iop);
379 		errno = save_errno;
380 		return;
381 	}
382 
383 	/*
384 	 * check for what is non-SEEKABLE
385 	 * otherwise assume it's SEEKABLE so we get the old
386 	 * behaviour and flush the stream
387 	 */
388 
389 	if (S_ISFIFO(fstatbuf.st_mode) || S_ISCHR(fstatbuf.st_mode) ||
390 	    S_ISSOCK(fstatbuf.st_mode) || S_ISDOOR(fstatbuf.st_mode)) {
391 		CLEAR_SEEKABLE(iop);
392 	} else {
393 		SET_SEEKABLE(iop);
394 	}
395 
396 	errno = save_errno;
397 }
398 
399 #ifdef	_LP64
400 void
401 _setbufend(FILE *iop, Uchar *end)	/* set the end pointer for this iop */
402 {
403 	iop->_end = end;
404 
405 	isseekable(iop);
406 }
407 
408 #undef _realbufend
409 
410 Uchar *
411 _realbufend(FILE *iop)		/* get the end pointer for this iop */
412 {
413 	return (iop->_end);
414 }
415 
416 #else /* _LP64 */
417 
418 /*
419  * Awkward functions not needed for the sane 64 bit environment.
420  */
421 /*
422  * xmagic must not be aligned on a 4K boundary. We guarantee this in
423  * _findiop().
424  */
425 #define	VALIDXFILE(xfp) \
426 	(((uintptr_t)&(xfp)->xmagic & 0xfff) && \
427 	    (xfp)->xmagic == XMAGIC(FILEx(xfp)))
428 
429 static struct xFILEdata *
430 getxfdat(FILE *iop)
431 {
432 	if (STDIOP(iop))
433 		return (&_xftab[IOPIND(iop)]);
434 	else if (VALIDXFILE(FILEx(iop)))
435 		return (&FILEx(iop)->_xdat);
436 	else
437 		return (NULL);
438 }
439 
440 void
441 _setbufend(FILE *iop, Uchar *end)	/* set the end pointer for this iop */
442 {
443 	struct xFILEdata *dat = getxfdat(iop);
444 
445 	if (dat != NULL)
446 		dat->_end = end;
447 
448 	isseekable(iop);
449 
450 	/*
451 	 * For binary compatibility with user programs using the
452 	 * old _bufend macro.  This is *so* broken, fileno()
453 	 * is not the proper index.
454 	 */
455 	if (iop->_file < _NFILE)
456 		_bufendtab[iop->_file] = end;
457 
458 }
459 
460 Uchar *
461 _realbufend(FILE *iop)		/* get the end pointer for this iop */
462 {
463 	struct xFILEdata *dat = getxfdat(iop);
464 
465 	if (dat != NULL)
466 		return (dat->_end);
467 
468 	return (NULL);
469 }
470 
471 /*
472  * _reallock() is invoked in each stdio call through the IOB_LCK() macro,
473  * it is therefor extremely performance sensitive.  We get better performance
474  * by inlining the STDIOP check in IOB_LCK and inlining a custom version
475  * of getfxdat() here.
476  */
477 rmutex_t *
478 _reallock(FILE *iop)
479 {
480 	if (VALIDXFILE(FILEx(iop)))
481 		return (&FILEx(iop)->xlock);
482 
483 	return (NULL);
484 }
485 
486 #endif	/*	_LP64	*/
487 
488 /* make sure _cnt, _ptr are correct */
489 void
490 _bufsync(FILE *iop, Uchar *bufend)
491 {
492 	ssize_t spaceleft;
493 
494 	spaceleft = bufend - iop->_ptr;
495 	if (bufend < iop->_ptr) {
496 		iop->_ptr = bufend;
497 		iop->_cnt = 0;
498 	} else if (spaceleft < iop->_cnt)
499 		iop->_cnt = spaceleft;
500 }
501 
502 /* really write out current buffer contents */
503 int
504 _xflsbuf(FILE *iop)
505 {
506 	ssize_t n;
507 	Uchar *base = iop->_base;
508 	Uchar *bufend;
509 	ssize_t num_wrote;
510 
511 	/*
512 	 * Hopefully, be stable with respect to interrupts...
513 	 */
514 	n = iop->_ptr - base;
515 	iop->_ptr = base;
516 	bufend = _bufend(iop);
517 	if (iop->_flag & (_IOLBF | _IONBF))
518 		iop->_cnt = 0;		/* always go to a flush */
519 	else
520 		iop->_cnt = bufend - base;
521 
522 	if (_needsync(iop, bufend))	/* recover from interrupts */
523 		_bufsync(iop, bufend);
524 
525 	if (n > 0) {
526 		while ((num_wrote =
527 			write(iop->_file, base, (size_t)n)) != n) {
528 			if (num_wrote <= 0) {
529 				iop->_flag |= _IOERR;
530 				return (EOF);
531 			}
532 			n -= num_wrote;
533 			base += num_wrote;
534 		}
535 	}
536 	return (0);
537 }
538 
539 /* flush (write) buffer */
540 int
541 fflush(FILE *iop)
542 {
543 	int res;
544 	rmutex_t *lk;
545 
546 	if (iop) {
547 		FLOCKFILE(lk, iop);
548 		res = _fflush_u(iop);
549 		FUNLOCKFILE(lk);
550 	} else {
551 		res = _fflush_u_iops();		/* flush all iops */
552 	}
553 	return (res);
554 }
555 
556 static int
557 _fflush_u_iops(void)		/* flush all buffers */
558 {
559 	FPDECL(iop);
560 
561 	int i;
562 	struct _link_ *lp;
563 	int res = 0;
564 
565 	if (__libc_threaded)
566 		(void) __rw_rdlock(&_first_link_lock);
567 
568 	lp = &__first_link;
569 
570 	do {
571 		/*
572 		 * Don't grab the locks for these file pointers
573 		 * since they are supposed to be flushed anyway
574 		 * It could also be the case in which the 2nd
575 		 * portion (base and lock) are not initialized
576 		 */
577 		FIRSTFP(lp, iop);
578 		for (i = lp->niob; --i >= 0; NEXTFP(iop)) {
579 		    if (!(iop->_flag & _IONBF)) {
580 			/*
581 			 * don't need to worry about the _IORW case
582 			 * since the iop will also marked with _IOREAD
583 			 * or _IOWRT whichever we are really doing
584 			 */
585 			if (iop->_flag & _IOWRT) {    /* flush write buffers */
586 			    res |= _fflush_u(iop);
587 			} else if (iop->_flag & _IOREAD) {
588 				/*
589 				 * flush seekable read buffers
590 				 * don't flush non-seekable read buffers
591 				 */
592 			    if (GET_SEEKABLE(iop)) {
593 				res |= _fflush_u(iop);
594 			    }
595 			}
596 		    }
597 		}
598 	} while ((lp = lp->next) != NULL);
599 	if (__libc_threaded)
600 		(void) __rw_unlock(&_first_link_lock);
601 	return (res);
602 }
603 
604 /* flush buffer */
605 int
606 _fflush_u(FILE *iop)
607 {
608 	int res = 0;
609 
610 	/* this portion is always assumed locked */
611 	if (!(iop->_flag & _IOWRT)) {
612 		(void) lseek64(iop->_file, -iop->_cnt, SEEK_CUR);
613 		iop->_cnt = 0;
614 		/* needed for ungetc & multibyte pushbacks */
615 		iop->_ptr = iop->_base;
616 		if (iop->_flag & _IORW) {
617 			iop->_flag &= ~_IOREAD;
618 		}
619 		return (0);
620 	}
621 	if (iop->_base != NULL && iop->_ptr > iop->_base) {
622 		res = _xflsbuf(iop);
623 	}
624 	if (iop->_flag & _IORW) {
625 		iop->_flag &= ~_IOWRT;
626 		iop->_cnt = 0;
627 	}
628 	return (res);
629 }
630 
631 /* flush buffer and close stream */
632 int
633 fclose(FILE *iop)
634 {
635 	int res = 0;
636 	rmutex_t *lk;
637 
638 	if (iop == NULL) {
639 		return (EOF);		/* avoid passing zero to FLOCKFILE */
640 	}
641 
642 	FLOCKFILE(lk, iop);
643 	if (iop->_flag == 0) {
644 		FUNLOCKFILE(lk);
645 		return (EOF);
646 	}
647 	/* Is not unbuffered and opened for read and/or write ? */
648 	if (!(iop->_flag & _IONBF) && (iop->_flag & (_IOWRT | _IOREAD | _IORW)))
649 		res = _fflush_u(iop);
650 	if (close(iop->_file) < 0)
651 		res = EOF;
652 	if (iop->_flag & _IOMYBUF) {
653 		(void) free((char *)iop->_base - PUSHBACK);
654 	}
655 	iop->_base = NULL;
656 	iop->_ptr = NULL;
657 	iop->_cnt = 0;
658 	iop->_flag = 0;			/* marks it as available */
659 	FUNLOCKFILE(lk);
660 
661 	if (__libc_threaded)
662 		(void) __rw_wrlock(&_first_link_lock);
663 	fcloses++;
664 	if (__libc_threaded)
665 		(void) __rw_unlock(&_first_link_lock);
666 
667 	return (res);
668 }
669 
670 /* flush buffer, close fd but keep the stream used by freopen() */
671 int
672 close_fd(FILE *iop)
673 {
674 	int res = 0;
675 	mbstate_t *mb;
676 
677 	if (iop == NULL || iop->_flag == 0)
678 		return (EOF);
679 	/* Is not unbuffered and opened for read and/or write ? */
680 	if (!(iop->_flag & _IONBF) && (iop->_flag & (_IOWRT | _IOREAD | _IORW)))
681 		res = _fflush_u(iop);
682 	if (close(iop->_file) < 0)
683 		res = EOF;
684 	if (iop->_flag & _IOMYBUF) {
685 		(void) free((char *)iop->_base - PUSHBACK);
686 	}
687 	iop->_base = NULL;
688 	iop->_ptr = NULL;
689 	mb = _getmbstate(iop);
690 	if (mb != NULL)
691 		(void) memset(mb, 0, sizeof (mbstate_t));
692 	iop->_cnt = 0;
693 	_setorientation(iop, _NO_MODE);
694 	return (res);
695 }
696 
697 static FILE *
698 getiop(FILE *fp, rmutex_t *lk, mbstate_t *mb)
699 {
700 	if (lk != NULL && rmutex_trylock(lk))
701 		return (NULL);	/* locked: fp in use */
702 
703 	if (fp->_flag == 0) {	/* unused */
704 #ifndef	_LP64
705 		fp->__orientation = 0;
706 #endif /* _LP64 */
707 		fp->_cnt = 0;
708 		fp->_ptr = NULL;
709 		fp->_base = NULL;
710 		fp->_flag = 0377;	/* claim the fp by setting low 8 bits */
711 		(void) memset(mb, 0, sizeof (mbstate_t));
712 		FUNLOCKFILE(lk);
713 		return (fp);
714 	}
715 	FUNLOCKFILE(lk);
716 	return (NULL);
717 }
718 
719 #ifndef	_LP64
720 /*
721  * DESCRIPTION:
722  * This function gets the pointer to the mbstate_t structure associated
723  * with the specified iop.
724  *
725  * RETURNS:
726  * If the associated mbstate_t found, the pointer to the mbstate_t is
727  * returned.  Otherwise, NULL is returned.
728  */
729 mbstate_t *
730 _getmbstate(FILE *iop)
731 {
732 	struct xFILEdata *dat = getxfdat(iop);
733 
734 	if (dat != NULL)
735 		return (&dat->_state);
736 
737 	return (NULL);
738 }
739 #endif
740