xref: /illumos-gate/usr/src/uts/common/fs/tmpfs/tmp_vnops.c (revision 48edc7cf07b5dccc3ad84bf2dafe4150bd666d60)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2015, Joyent, Inc. All rights reserved.
29  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
30  * Copyright 2016 RackTop Systems.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/t_lock.h>
36 #include <sys/systm.h>
37 #include <sys/sysmacros.h>
38 #include <sys/user.h>
39 #include <sys/time.h>
40 #include <sys/vfs.h>
41 #include <sys/vfs_opreg.h>
42 #include <sys/vnode.h>
43 #include <sys/file.h>
44 #include <sys/fcntl.h>
45 #include <sys/flock.h>
46 #include <sys/kmem.h>
47 #include <sys/uio.h>
48 #include <sys/errno.h>
49 #include <sys/stat.h>
50 #include <sys/cred.h>
51 #include <sys/dirent.h>
52 #include <sys/pathname.h>
53 #include <sys/vmsystm.h>
54 #include <sys/fs/tmp.h>
55 #include <sys/fs/tmpnode.h>
56 #include <sys/mman.h>
57 #include <vm/hat.h>
58 #include <vm/seg_vn.h>
59 #include <vm/seg_map.h>
60 #include <vm/seg.h>
61 #include <vm/anon.h>
62 #include <vm/as.h>
63 #include <vm/page.h>
64 #include <vm/pvn.h>
65 #include <sys/cmn_err.h>
66 #include <sys/debug.h>
67 #include <sys/swap.h>
68 #include <sys/buf.h>
69 #include <sys/vm.h>
70 #include <sys/vtrace.h>
71 #include <sys/policy.h>
72 #include <fs/fs_subr.h>
73 
74 static int	tmp_getapage(struct vnode *, u_offset_t, size_t, uint_t *,
75 	page_t **, size_t, struct seg *, caddr_t, enum seg_rw, struct cred *);
76 static int 	tmp_putapage(struct vnode *, page_t *, u_offset_t *, size_t *,
77 	int, struct cred *);
78 
79 /* ARGSUSED1 */
80 static int
81 tmp_open(struct vnode **vpp, int flag, struct cred *cred, caller_context_t *ct)
82 {
83 	/*
84 	 * swapon to a tmpfs file is not supported so access
85 	 * is denied on open if VISSWAP is set.
86 	 */
87 	if ((*vpp)->v_flag & VISSWAP)
88 		return (EINVAL);
89 	return (0);
90 }
91 
92 /* ARGSUSED1 */
93 static int
94 tmp_close(
95 	struct vnode *vp,
96 	int flag,
97 	int count,
98 	offset_t offset,
99 	struct cred *cred,
100 	caller_context_t *ct)
101 {
102 	cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
103 	cleanshares(vp, ttoproc(curthread)->p_pid);
104 	return (0);
105 }
106 
107 /*
108  * wrtmp does the real work of write requests for tmpfs.
109  */
110 static int
111 wrtmp(
112 	struct tmount *tm,
113 	struct tmpnode *tp,
114 	struct uio *uio,
115 	struct cred *cr,
116 	struct caller_context *ct)
117 {
118 	pgcnt_t pageoffset;	/* offset in pages */
119 	ulong_t segmap_offset;	/* pagesize byte offset into segmap */
120 	caddr_t base;		/* base of segmap */
121 	ssize_t bytes;		/* bytes to uiomove */
122 	pfn_t pagenumber;	/* offset in pages into tmp file */
123 	struct vnode *vp;
124 	int error = 0;
125 	int	pagecreate;	/* == 1 if we allocated a page */
126 	int	newpage;
127 	rlim64_t limit = uio->uio_llimit;
128 	long oresid = uio->uio_resid;
129 	timestruc_t now;
130 
131 	long tn_size_changed = 0;
132 	long old_tn_size;
133 	long new_tn_size;
134 
135 	vp = TNTOV(tp);
136 	ASSERT(vp->v_type == VREG);
137 
138 	TRACE_1(TR_FAC_TMPFS, TR_TMPFS_RWTMP_START,
139 	    "tmp_wrtmp_start:vp %p", vp);
140 
141 	ASSERT(RW_WRITE_HELD(&tp->tn_contents));
142 	ASSERT(RW_WRITE_HELD(&tp->tn_rwlock));
143 
144 	if (MANDLOCK(vp, tp->tn_mode)) {
145 		rw_exit(&tp->tn_contents);
146 		/*
147 		 * tmp_getattr ends up being called by chklock
148 		 */
149 		error = chklock(vp, FWRITE, uio->uio_loffset, uio->uio_resid,
150 		    uio->uio_fmode, ct);
151 		rw_enter(&tp->tn_contents, RW_WRITER);
152 		if (error != 0) {
153 			TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
154 			    "tmp_wrtmp_end:vp %p error %d", vp, error);
155 			return (error);
156 		}
157 	}
158 
159 	if (uio->uio_loffset < 0)
160 		return (EINVAL);
161 
162 	if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
163 		limit = MAXOFFSET_T;
164 
165 	if (uio->uio_loffset >= limit) {
166 		proc_t *p = ttoproc(curthread);
167 
168 		mutex_enter(&p->p_lock);
169 		(void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE], p->p_rctls,
170 		    p, RCA_UNSAFE_SIGINFO);
171 		mutex_exit(&p->p_lock);
172 		return (EFBIG);
173 	}
174 
175 	if (uio->uio_loffset >= MAXOFF_T) {
176 		TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
177 		    "tmp_wrtmp_end:vp %p error %d", vp, EINVAL);
178 		return (EFBIG);
179 	}
180 
181 	if (uio->uio_resid == 0) {
182 		TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
183 		    "tmp_wrtmp_end:vp %p error %d", vp, 0);
184 		return (0);
185 	}
186 
187 	if (limit > MAXOFF_T)
188 		limit = MAXOFF_T;
189 
190 	do {
191 		long	offset;
192 		long	delta;
193 
194 		offset = (long)uio->uio_offset;
195 		pageoffset = offset & PAGEOFFSET;
196 		/*
197 		 * A maximum of PAGESIZE bytes of data is transferred
198 		 * each pass through this loop
199 		 */
200 		bytes = MIN(PAGESIZE - pageoffset, uio->uio_resid);
201 
202 		if (offset + bytes >= limit) {
203 			if (offset >= limit) {
204 				error = EFBIG;
205 				goto out;
206 			}
207 			bytes = limit - offset;
208 		}
209 		pagenumber = btop(offset);
210 
211 		/*
212 		 * delta is the amount of anonymous memory
213 		 * to reserve for the file.
214 		 * We always reserve in pagesize increments so
215 		 * unless we're extending the file into a new page,
216 		 * we don't need to call tmp_resv.
217 		 */
218 		delta = offset + bytes -
219 		    P2ROUNDUP_TYPED(tp->tn_size, PAGESIZE, u_offset_t);
220 		if (delta > 0) {
221 			pagecreate = 1;
222 			if (tmp_resv(tm, tp, delta, pagecreate)) {
223 				/*
224 				 * Log file system full in the zone that owns
225 				 * the tmpfs mount, as well as in the global
226 				 * zone if necessary.
227 				 */
228 				zcmn_err(tm->tm_vfsp->vfs_zone->zone_id,
229 				    CE_WARN, "%s: File system full, "
230 				    "swap space limit exceeded",
231 				    tm->tm_mntpath);
232 
233 				if (tm->tm_vfsp->vfs_zone->zone_id !=
234 				    GLOBAL_ZONEID) {
235 
236 					vfs_t *vfs = tm->tm_vfsp;
237 
238 					zcmn_err(GLOBAL_ZONEID,
239 					    CE_WARN, "%s: File system full, "
240 					    "swap space limit exceeded",
241 					    vfs->vfs_vnodecovered->v_path);
242 				}
243 				error = ENOSPC;
244 				break;
245 			}
246 			tmpnode_growmap(tp, (ulong_t)offset + bytes);
247 		}
248 		/* grow the file to the new length */
249 		if (offset + bytes > tp->tn_size) {
250 			tn_size_changed = 1;
251 			old_tn_size = tp->tn_size;
252 			/*
253 			 * Postpone updating tp->tn_size until uiomove() is
254 			 * done.
255 			 */
256 			new_tn_size = offset + bytes;
257 		}
258 		if (bytes == PAGESIZE) {
259 			/*
260 			 * Writing whole page so reading from disk
261 			 * is a waste
262 			 */
263 			pagecreate = 1;
264 		} else {
265 			pagecreate = 0;
266 		}
267 		/*
268 		 * If writing past EOF or filling in a hole
269 		 * we need to allocate an anon slot.
270 		 */
271 		if (anon_get_ptr(tp->tn_anon, pagenumber) == NULL) {
272 			(void) anon_set_ptr(tp->tn_anon, pagenumber,
273 			    anon_alloc(vp, ptob(pagenumber)), ANON_SLEEP);
274 			pagecreate = 1;
275 			tp->tn_nblocks++;
276 		}
277 
278 		/*
279 		 * We have to drop the contents lock to allow the VM
280 		 * system to reacquire it in tmp_getpage()
281 		 */
282 		rw_exit(&tp->tn_contents);
283 
284 		/*
285 		 * Touch the page and fault it in if it is not in core
286 		 * before segmap_getmapflt or vpm_data_copy can lock it.
287 		 * This is to avoid the deadlock if the buffer is mapped
288 		 * to the same file through mmap which we want to write.
289 		 */
290 		uio_prefaultpages((long)bytes, uio);
291 
292 		newpage = 0;
293 		if (vpm_enable) {
294 			/*
295 			 * Copy data. If new pages are created, part of
296 			 * the page that is not written will be initizliazed
297 			 * with zeros.
298 			 */
299 			error = vpm_data_copy(vp, offset, bytes, uio,
300 			    !pagecreate, &newpage, 1, S_WRITE);
301 		} else {
302 			/* Get offset within the segmap mapping */
303 			segmap_offset = (offset & PAGEMASK) & MAXBOFFSET;
304 			base = segmap_getmapflt(segkmap, vp,
305 			    (offset &  MAXBMASK), PAGESIZE, !pagecreate,
306 			    S_WRITE);
307 		}
308 
309 
310 		if (!vpm_enable && pagecreate) {
311 			/*
312 			 * segmap_pagecreate() returns 1 if it calls
313 			 * page_create_va() to allocate any pages.
314 			 */
315 			newpage = segmap_pagecreate(segkmap,
316 			    base + segmap_offset, (size_t)PAGESIZE, 0);
317 			/*
318 			 * Clear from the beginning of the page to the starting
319 			 * offset of the data.
320 			 */
321 			if (pageoffset != 0)
322 				(void) kzero(base + segmap_offset,
323 				    (size_t)pageoffset);
324 		}
325 
326 		if (!vpm_enable) {
327 			error = uiomove(base + segmap_offset + pageoffset,
328 			    (long)bytes, UIO_WRITE, uio);
329 		}
330 
331 		if (!vpm_enable && pagecreate &&
332 		    uio->uio_offset < P2ROUNDUP(offset + bytes, PAGESIZE)) {
333 			long	zoffset; /* zero from offset into page */
334 			/*
335 			 * We created pages w/o initializing them completely,
336 			 * thus we need to zero the part that wasn't set up.
337 			 * This happens on most EOF write cases and if
338 			 * we had some sort of error during the uiomove.
339 			 */
340 			long nmoved;
341 
342 			nmoved = uio->uio_offset - offset;
343 			ASSERT((nmoved + pageoffset) <= PAGESIZE);
344 
345 			/*
346 			 * Zero from the end of data in the page to the
347 			 * end of the page.
348 			 */
349 			if ((zoffset = pageoffset + nmoved) < PAGESIZE)
350 				(void) kzero(base + segmap_offset + zoffset,
351 				    (size_t)PAGESIZE - zoffset);
352 		}
353 
354 		/*
355 		 * Unlock the pages which have been allocated by
356 		 * page_create_va() in segmap_pagecreate()
357 		 */
358 		if (!vpm_enable && newpage) {
359 			segmap_pageunlock(segkmap, base + segmap_offset,
360 			    (size_t)PAGESIZE, S_WRITE);
361 		}
362 
363 		if (error) {
364 			/*
365 			 * If we failed on a write, we must
366 			 * be sure to invalidate any pages that may have
367 			 * been allocated.
368 			 */
369 			if (vpm_enable) {
370 				(void) vpm_sync_pages(vp, offset, PAGESIZE,
371 				    SM_INVAL);
372 			} else {
373 				(void) segmap_release(segkmap, base, SM_INVAL);
374 			}
375 		} else {
376 			if (vpm_enable) {
377 				error = vpm_sync_pages(vp, offset, PAGESIZE,
378 				    0);
379 			} else {
380 				error = segmap_release(segkmap, base, 0);
381 			}
382 		}
383 
384 		/*
385 		 * Re-acquire contents lock.
386 		 */
387 		rw_enter(&tp->tn_contents, RW_WRITER);
388 
389 		/*
390 		 * Update tn_size.
391 		 */
392 		if (tn_size_changed)
393 			tp->tn_size = new_tn_size;
394 
395 		/*
396 		 * If the uiomove failed, fix up tn_size.
397 		 */
398 		if (error) {
399 			if (tn_size_changed) {
400 				/*
401 				 * The uiomove failed, and we
402 				 * allocated blocks,so get rid
403 				 * of them.
404 				 */
405 				(void) tmpnode_trunc(tm, tp,
406 				    (ulong_t)old_tn_size);
407 			}
408 		} else {
409 			/*
410 			 * XXX - Can this be out of the loop?
411 			 */
412 			if ((tp->tn_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) &&
413 			    (tp->tn_mode & (S_ISUID | S_ISGID)) &&
414 			    secpolicy_vnode_setid_retain(cr,
415 			    (tp->tn_mode & S_ISUID) != 0 && tp->tn_uid == 0)) {
416 				/*
417 				 * Clear Set-UID & Set-GID bits on
418 				 * successful write if not privileged
419 				 * and at least one of the execute bits
420 				 * is set.  If we always clear Set-GID,
421 				 * mandatory file and record locking is
422 				 * unuseable.
423 				 */
424 				tp->tn_mode &= ~(S_ISUID | S_ISGID);
425 			}
426 			gethrestime(&now);
427 			tp->tn_mtime = now;
428 			tp->tn_ctime = now;
429 		}
430 	} while (error == 0 && uio->uio_resid > 0 && bytes != 0);
431 
432 out:
433 	/*
434 	 * If we've already done a partial-write, terminate
435 	 * the write but return no error.
436 	 */
437 	if (oresid != uio->uio_resid)
438 		error = 0;
439 	TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
440 	    "tmp_wrtmp_end:vp %p error %d", vp, error);
441 	return (error);
442 }
443 
444 /*
445  * rdtmp does the real work of read requests for tmpfs.
446  */
447 static int
448 rdtmp(
449 	struct tmount *tm,
450 	struct tmpnode *tp,
451 	struct uio *uio,
452 	struct caller_context *ct)
453 {
454 	ulong_t pageoffset;	/* offset in tmpfs file (uio_offset) */
455 	ulong_t segmap_offset;	/* pagesize byte offset into segmap */
456 	caddr_t base;		/* base of segmap */
457 	ssize_t bytes;		/* bytes to uiomove */
458 	struct vnode *vp;
459 	int error;
460 	long oresid = uio->uio_resid;
461 
462 #if defined(lint)
463 	tm = tm;
464 #endif
465 	vp = TNTOV(tp);
466 
467 	TRACE_1(TR_FAC_TMPFS, TR_TMPFS_RWTMP_START, "tmp_rdtmp_start:vp %p",
468 	    vp);
469 
470 	ASSERT(RW_LOCK_HELD(&tp->tn_contents));
471 
472 	if (MANDLOCK(vp, tp->tn_mode)) {
473 		rw_exit(&tp->tn_contents);
474 		/*
475 		 * tmp_getattr ends up being called by chklock
476 		 */
477 		error = chklock(vp, FREAD, uio->uio_loffset, uio->uio_resid,
478 		    uio->uio_fmode, ct);
479 		rw_enter(&tp->tn_contents, RW_READER);
480 		if (error != 0) {
481 			TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
482 			    "tmp_rdtmp_end:vp %p error %d", vp, error);
483 			return (error);
484 		}
485 	}
486 	ASSERT(tp->tn_type == VREG);
487 
488 	if (uio->uio_loffset >= MAXOFF_T) {
489 		TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
490 		    "tmp_rdtmp_end:vp %p error %d", vp, EINVAL);
491 		return (0);
492 	}
493 	if (uio->uio_loffset < 0)
494 		return (EINVAL);
495 	if (uio->uio_resid == 0) {
496 		TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
497 		    "tmp_rdtmp_end:vp %p error %d", vp, 0);
498 		return (0);
499 	}
500 
501 	vp = TNTOV(tp);
502 
503 	do {
504 		long diff;
505 		long offset;
506 
507 		offset = uio->uio_offset;
508 		pageoffset = offset & PAGEOFFSET;
509 		bytes = MIN(PAGESIZE - pageoffset, uio->uio_resid);
510 
511 		diff = tp->tn_size - offset;
512 
513 		if (diff <= 0) {
514 			error = 0;
515 			goto out;
516 		}
517 		if (diff < bytes)
518 			bytes = diff;
519 
520 		/*
521 		 * We have to drop the contents lock to allow the VM system
522 		 * to reacquire it in tmp_getpage() should the uiomove cause a
523 		 * pagefault.
524 		 */
525 		rw_exit(&tp->tn_contents);
526 
527 		if (vpm_enable) {
528 			/*
529 			 * Copy data.
530 			 */
531 			error = vpm_data_copy(vp, offset, bytes, uio, 1, NULL,
532 			    0, S_READ);
533 		} else {
534 			segmap_offset = (offset & PAGEMASK) & MAXBOFFSET;
535 			base = segmap_getmapflt(segkmap, vp, offset & MAXBMASK,
536 			    bytes, 1, S_READ);
537 
538 			error = uiomove(base + segmap_offset + pageoffset,
539 			    (long)bytes, UIO_READ, uio);
540 		}
541 
542 		if (error) {
543 			if (vpm_enable) {
544 				(void) vpm_sync_pages(vp, offset, PAGESIZE, 0);
545 			} else {
546 				(void) segmap_release(segkmap, base, 0);
547 			}
548 		} else {
549 			if (vpm_enable) {
550 				error = vpm_sync_pages(vp, offset, PAGESIZE,
551 				    0);
552 			} else {
553 				error = segmap_release(segkmap, base, 0);
554 			}
555 		}
556 
557 		/*
558 		 * Re-acquire contents lock.
559 		 */
560 		rw_enter(&tp->tn_contents, RW_READER);
561 
562 	} while (error == 0 && uio->uio_resid > 0);
563 
564 out:
565 	gethrestime(&tp->tn_atime);
566 
567 	/*
568 	 * If we've already done a partial read, terminate
569 	 * the read but return no error.
570 	 */
571 	if (oresid != uio->uio_resid)
572 		error = 0;
573 
574 	TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
575 	    "tmp_rdtmp_end:vp %x error %d", vp, error);
576 	return (error);
577 }
578 
579 /* ARGSUSED2 */
580 static int
581 tmp_read(struct vnode *vp, struct uio *uiop, int ioflag, cred_t *cred,
582     struct caller_context *ct)
583 {
584 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
585 	struct tmount *tm = (struct tmount *)VTOTM(vp);
586 	int error;
587 
588 	/*
589 	 * We don't currently support reading non-regular files
590 	 */
591 	if (vp->v_type == VDIR)
592 		return (EISDIR);
593 	if (vp->v_type != VREG)
594 		return (EINVAL);
595 	/*
596 	 * tmp_rwlock should have already been called from layers above
597 	 */
598 	ASSERT(RW_READ_HELD(&tp->tn_rwlock));
599 
600 	rw_enter(&tp->tn_contents, RW_READER);
601 
602 	error = rdtmp(tm, tp, uiop, ct);
603 
604 	rw_exit(&tp->tn_contents);
605 
606 	return (error);
607 }
608 
609 static int
610 tmp_write(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cred,
611     struct caller_context *ct)
612 {
613 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
614 	struct tmount *tm = (struct tmount *)VTOTM(vp);
615 	int error;
616 
617 	/*
618 	 * We don't currently support writing to non-regular files
619 	 */
620 	if (vp->v_type != VREG)
621 		return (EINVAL);	/* XXX EISDIR? */
622 
623 	/*
624 	 * tmp_rwlock should have already been called from layers above
625 	 */
626 	ASSERT(RW_WRITE_HELD(&tp->tn_rwlock));
627 
628 	rw_enter(&tp->tn_contents, RW_WRITER);
629 
630 	if (ioflag & FAPPEND) {
631 		/*
632 		 * In append mode start at end of file.
633 		 */
634 		uiop->uio_loffset = tp->tn_size;
635 	}
636 
637 	error = wrtmp(tm, tp, uiop, cred, ct);
638 
639 	rw_exit(&tp->tn_contents);
640 
641 	return (error);
642 }
643 
644 /* ARGSUSED */
645 static int
646 tmp_ioctl(
647 	struct vnode *vp,
648 	int com,
649 	intptr_t data,
650 	int flag,
651 	struct cred *cred,
652 	int *rvalp,
653 	caller_context_t *ct)
654 {
655 	return (ENOTTY);
656 }
657 
658 /* ARGSUSED2 */
659 static int
660 tmp_getattr(
661 	struct vnode *vp,
662 	struct vattr *vap,
663 	int flags,
664 	struct cred *cred,
665 	caller_context_t *ct)
666 {
667 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
668 	struct vnode *mvp;
669 	struct vattr va;
670 	int attrs = 1;
671 
672 	/*
673 	 * A special case to handle the root tnode on a diskless nfs
674 	 * client who may have had its uid and gid inherited
675 	 * from an nfs vnode with nobody ownership.  Likely the
676 	 * root filesystem. After nfs is fully functional the uid/gid
677 	 * may be mapable so ask again.
678 	 * vfsp can't get unmounted because we hold vp.
679 	 */
680 	if (vp->v_flag & VROOT &&
681 	    (mvp = vp->v_vfsp->vfs_vnodecovered) != NULL) {
682 		mutex_enter(&tp->tn_tlock);
683 		if (tp->tn_uid == UID_NOBODY || tp->tn_gid == GID_NOBODY) {
684 			mutex_exit(&tp->tn_tlock);
685 			bzero(&va, sizeof (struct vattr));
686 			va.va_mask = AT_UID|AT_GID;
687 			attrs = VOP_GETATTR(mvp, &va, 0, cred, ct);
688 		} else {
689 			mutex_exit(&tp->tn_tlock);
690 		}
691 	}
692 	mutex_enter(&tp->tn_tlock);
693 	if (attrs == 0) {
694 		tp->tn_uid = va.va_uid;
695 		tp->tn_gid = va.va_gid;
696 	}
697 	vap->va_type = vp->v_type;
698 	vap->va_mode = tp->tn_mode & MODEMASK;
699 	vap->va_uid = tp->tn_uid;
700 	vap->va_gid = tp->tn_gid;
701 	vap->va_fsid = tp->tn_fsid;
702 	vap->va_nodeid = (ino64_t)tp->tn_nodeid;
703 	vap->va_nlink = tp->tn_nlink;
704 	vap->va_size = (u_offset_t)tp->tn_size;
705 	vap->va_atime = tp->tn_atime;
706 	vap->va_mtime = tp->tn_mtime;
707 	vap->va_ctime = tp->tn_ctime;
708 	vap->va_blksize = PAGESIZE;
709 	vap->va_rdev = tp->tn_rdev;
710 	vap->va_seq = tp->tn_seq;
711 
712 	/*
713 	 * XXX Holes are not taken into account.  We could take the time to
714 	 * run through the anon array looking for allocated slots...
715 	 */
716 	vap->va_nblocks = (fsblkcnt64_t)btodb(ptob(btopr(vap->va_size)));
717 	mutex_exit(&tp->tn_tlock);
718 	return (0);
719 }
720 
721 /*ARGSUSED4*/
722 static int
723 tmp_setattr(
724 	struct vnode *vp,
725 	struct vattr *vap,
726 	int flags,
727 	struct cred *cred,
728 	caller_context_t *ct)
729 {
730 	struct tmount *tm = (struct tmount *)VTOTM(vp);
731 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
732 	int error = 0;
733 	struct vattr *get;
734 	long mask;
735 
736 	/*
737 	 * Cannot set these attributes
738 	 */
739 	if ((vap->va_mask & AT_NOSET) || (vap->va_mask & AT_XVATTR))
740 		return (EINVAL);
741 
742 	mutex_enter(&tp->tn_tlock);
743 
744 	get = &tp->tn_attr;
745 	/*
746 	 * Change file access modes. Must be owner or have sufficient
747 	 * privileges.
748 	 */
749 	error = secpolicy_vnode_setattr(cred, vp, vap, get, flags, tmp_taccess,
750 	    tp);
751 
752 	if (error)
753 		goto out;
754 
755 	mask = vap->va_mask;
756 
757 	if (mask & AT_MODE) {
758 		get->va_mode &= S_IFMT;
759 		get->va_mode |= vap->va_mode & ~S_IFMT;
760 	}
761 
762 	if (mask & AT_UID)
763 		get->va_uid = vap->va_uid;
764 	if (mask & AT_GID)
765 		get->va_gid = vap->va_gid;
766 	if (mask & AT_ATIME)
767 		get->va_atime = vap->va_atime;
768 	if (mask & AT_MTIME)
769 		get->va_mtime = vap->va_mtime;
770 
771 	if (mask & (AT_UID | AT_GID | AT_MODE | AT_MTIME))
772 		gethrestime(&tp->tn_ctime);
773 
774 	if (mask & AT_SIZE) {
775 		ASSERT(vp->v_type != VDIR);
776 
777 		/* Don't support large files. */
778 		if (vap->va_size > MAXOFF_T) {
779 			error = EFBIG;
780 			goto out;
781 		}
782 		mutex_exit(&tp->tn_tlock);
783 
784 		rw_enter(&tp->tn_rwlock, RW_WRITER);
785 		rw_enter(&tp->tn_contents, RW_WRITER);
786 		error = tmpnode_trunc(tm, tp, (ulong_t)vap->va_size);
787 		rw_exit(&tp->tn_contents);
788 		rw_exit(&tp->tn_rwlock);
789 
790 		if (error == 0 && vap->va_size == 0)
791 			vnevent_truncate(vp, ct);
792 
793 		goto out1;
794 	}
795 out:
796 	mutex_exit(&tp->tn_tlock);
797 out1:
798 	return (error);
799 }
800 
801 /* ARGSUSED2 */
802 static int
803 tmp_access(
804 	struct vnode *vp,
805 	int mode,
806 	int flags,
807 	struct cred *cred,
808 	caller_context_t *ct)
809 {
810 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
811 	int error;
812 
813 	mutex_enter(&tp->tn_tlock);
814 	error = tmp_taccess(tp, mode, cred);
815 	mutex_exit(&tp->tn_tlock);
816 	return (error);
817 }
818 
819 /* ARGSUSED3 */
820 static int
821 tmp_lookup(
822 	struct vnode *dvp,
823 	char *nm,
824 	struct vnode **vpp,
825 	struct pathname *pnp,
826 	int flags,
827 	struct vnode *rdir,
828 	struct cred *cred,
829 	caller_context_t *ct,
830 	int *direntflags,
831 	pathname_t *realpnp)
832 {
833 	struct tmpnode *tp = (struct tmpnode *)VTOTN(dvp);
834 	struct tmpnode *ntp = NULL;
835 	int error;
836 
837 
838 	/* allow cd into @ dir */
839 	if (flags & LOOKUP_XATTR) {
840 		struct tmpnode *xdp;
841 		struct tmount *tm;
842 
843 		/*
844 		 * don't allow attributes if not mounted XATTR support
845 		 */
846 		if (!(dvp->v_vfsp->vfs_flag & VFS_XATTR))
847 			return (EINVAL);
848 
849 		if (tp->tn_flags & ISXATTR)
850 			/* No attributes on attributes */
851 			return (EINVAL);
852 
853 		rw_enter(&tp->tn_rwlock, RW_WRITER);
854 		if (tp->tn_xattrdp == NULL) {
855 			if (!(flags & CREATE_XATTR_DIR)) {
856 				rw_exit(&tp->tn_rwlock);
857 				return (ENOENT);
858 			}
859 
860 			/*
861 			 * No attribute directory exists for this
862 			 * node - create the attr dir as a side effect
863 			 * of this lookup.
864 			 */
865 
866 			/*
867 			 * Make sure we have adequate permission...
868 			 */
869 
870 			if ((error = tmp_taccess(tp, VWRITE, cred)) != 0) {
871 				rw_exit(&tp->tn_rwlock);
872 				return (error);
873 			}
874 
875 			xdp = tmp_memalloc(sizeof (struct tmpnode),
876 			    TMP_MUSTHAVE);
877 			tm = VTOTM(dvp);
878 			tmpnode_init(tm, xdp, &tp->tn_attr, NULL);
879 			/*
880 			 * Fix-up fields unique to attribute directories.
881 			 */
882 			xdp->tn_flags = ISXATTR;
883 			xdp->tn_type = VDIR;
884 			if (tp->tn_type == VDIR) {
885 				xdp->tn_mode = tp->tn_attr.va_mode;
886 			} else {
887 				xdp->tn_mode = 0700;
888 				if (tp->tn_attr.va_mode & 0040)
889 					xdp->tn_mode |= 0750;
890 				if (tp->tn_attr.va_mode & 0004)
891 					xdp->tn_mode |= 0705;
892 			}
893 			xdp->tn_vnode->v_type = VDIR;
894 			xdp->tn_vnode->v_flag |= V_XATTRDIR;
895 			tdirinit(tp, xdp);
896 			tp->tn_xattrdp = xdp;
897 		} else {
898 			VN_HOLD(tp->tn_xattrdp->tn_vnode);
899 		}
900 		*vpp = TNTOV(tp->tn_xattrdp);
901 		rw_exit(&tp->tn_rwlock);
902 		return (0);
903 	}
904 
905 	/*
906 	 * Null component name is a synonym for directory being searched.
907 	 */
908 	if (*nm == '\0') {
909 		VN_HOLD(dvp);
910 		*vpp = dvp;
911 		return (0);
912 	}
913 	ASSERT(tp);
914 
915 	error = tdirlookup(tp, nm, &ntp, cred);
916 
917 	if (error == 0) {
918 		ASSERT(ntp);
919 		*vpp = TNTOV(ntp);
920 		/*
921 		 * If vnode is a device return special vnode instead
922 		 */
923 		if (IS_DEVVP(*vpp)) {
924 			struct vnode *newvp;
925 
926 			newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type,
927 			    cred);
928 			VN_RELE(*vpp);
929 			*vpp = newvp;
930 		}
931 	}
932 	TRACE_4(TR_FAC_TMPFS, TR_TMPFS_LOOKUP,
933 	    "tmpfs lookup:vp %p name %s vpp %p error %d",
934 	    dvp, nm, vpp, error);
935 	return (error);
936 }
937 
938 /*ARGSUSED7*/
939 static int
940 tmp_create(
941 	struct vnode *dvp,
942 	char *nm,
943 	struct vattr *vap,
944 	enum vcexcl exclusive,
945 	int mode,
946 	struct vnode **vpp,
947 	struct cred *cred,
948 	int flag,
949 	caller_context_t *ct,
950 	vsecattr_t *vsecp)
951 {
952 	struct tmpnode *parent;
953 	struct tmount *tm;
954 	struct tmpnode *self;
955 	int error;
956 	struct tmpnode *oldtp;
957 
958 again:
959 	parent = (struct tmpnode *)VTOTN(dvp);
960 	tm = (struct tmount *)VTOTM(dvp);
961 	self = NULL;
962 	error = 0;
963 	oldtp = NULL;
964 
965 	/* device files not allowed in ext. attr dirs */
966 	if ((parent->tn_flags & ISXATTR) &&
967 	    (vap->va_type == VBLK || vap->va_type == VCHR ||
968 	    vap->va_type == VFIFO || vap->va_type == VDOOR ||
969 	    vap->va_type == VSOCK || vap->va_type == VPORT))
970 			return (EINVAL);
971 
972 	if (vap->va_type == VREG && (vap->va_mode & VSVTX)) {
973 		/* Must be privileged to set sticky bit */
974 		if (secpolicy_vnode_stky_modify(cred))
975 			vap->va_mode &= ~VSVTX;
976 	} else if (vap->va_type == VNON) {
977 		return (EINVAL);
978 	}
979 
980 	/*
981 	 * Null component name is a synonym for directory being searched.
982 	 */
983 	if (*nm == '\0') {
984 		VN_HOLD(dvp);
985 		oldtp = parent;
986 	} else {
987 		error = tdirlookup(parent, nm, &oldtp, cred);
988 	}
989 
990 	if (error == 0) {	/* name found */
991 		boolean_t trunc = B_FALSE;
992 
993 		ASSERT(oldtp);
994 
995 		rw_enter(&oldtp->tn_rwlock, RW_WRITER);
996 
997 		/*
998 		 * if create/read-only an existing
999 		 * directory, allow it
1000 		 */
1001 		if (exclusive == EXCL)
1002 			error = EEXIST;
1003 		else if ((oldtp->tn_type == VDIR) && (mode & VWRITE))
1004 			error = EISDIR;
1005 		else {
1006 			error = tmp_taccess(oldtp, mode, cred);
1007 		}
1008 
1009 		if (error) {
1010 			rw_exit(&oldtp->tn_rwlock);
1011 			tmpnode_rele(oldtp);
1012 			return (error);
1013 		}
1014 		*vpp = TNTOV(oldtp);
1015 		if ((*vpp)->v_type == VREG && (vap->va_mask & AT_SIZE) &&
1016 		    vap->va_size == 0) {
1017 			rw_enter(&oldtp->tn_contents, RW_WRITER);
1018 			(void) tmpnode_trunc(tm, oldtp, 0);
1019 			rw_exit(&oldtp->tn_contents);
1020 			trunc = B_TRUE;
1021 		}
1022 		rw_exit(&oldtp->tn_rwlock);
1023 		if (IS_DEVVP(*vpp)) {
1024 			struct vnode *newvp;
1025 
1026 			newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type,
1027 			    cred);
1028 			VN_RELE(*vpp);
1029 			if (newvp == NULL) {
1030 				return (ENOSYS);
1031 			}
1032 			*vpp = newvp;
1033 		}
1034 
1035 		if (trunc)
1036 			vnevent_create(*vpp, ct);
1037 
1038 		return (0);
1039 	}
1040 
1041 	if (error != ENOENT)
1042 		return (error);
1043 
1044 	rw_enter(&parent->tn_rwlock, RW_WRITER);
1045 	error = tdirenter(tm, parent, nm, DE_CREATE,
1046 	    (struct tmpnode *)NULL, (struct tmpnode *)NULL,
1047 	    vap, &self, cred, ct);
1048 	rw_exit(&parent->tn_rwlock);
1049 
1050 	if (error) {
1051 		if (self)
1052 			tmpnode_rele(self);
1053 
1054 		if (error == EEXIST) {
1055 			/*
1056 			 * This means that the file was created sometime
1057 			 * after we checked and did not find it and when
1058 			 * we went to create it.
1059 			 * Since creat() is supposed to truncate a file
1060 			 * that already exits go back to the begining
1061 			 * of the function. This time we will find it
1062 			 * and go down the tmp_trunc() path
1063 			 */
1064 			goto again;
1065 		}
1066 		return (error);
1067 	}
1068 
1069 	*vpp = TNTOV(self);
1070 
1071 	if (!error && IS_DEVVP(*vpp)) {
1072 		struct vnode *newvp;
1073 
1074 		newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cred);
1075 		VN_RELE(*vpp);
1076 		if (newvp == NULL)
1077 			return (ENOSYS);
1078 		*vpp = newvp;
1079 	}
1080 	TRACE_3(TR_FAC_TMPFS, TR_TMPFS_CREATE,
1081 	    "tmpfs create:dvp %p nm %s vpp %p", dvp, nm, vpp);
1082 	return (0);
1083 }
1084 
1085 /* ARGSUSED3 */
1086 static int
1087 tmp_remove(
1088 	struct vnode *dvp,
1089 	char *nm,
1090 	struct cred *cred,
1091 	caller_context_t *ct,
1092 	int flags)
1093 {
1094 	struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1095 	int error;
1096 	struct tmpnode *tp = NULL;
1097 
1098 	error = tdirlookup(parent, nm, &tp, cred);
1099 	if (error)
1100 		return (error);
1101 
1102 	ASSERT(tp);
1103 	rw_enter(&parent->tn_rwlock, RW_WRITER);
1104 	rw_enter(&tp->tn_rwlock, RW_WRITER);
1105 
1106 	if (tp->tn_type != VDIR ||
1107 	    (error = secpolicy_fs_linkdir(cred, dvp->v_vfsp)) == 0)
1108 		error = tdirdelete(parent, tp, nm, tp->tn_type == VDIR ?
1109 		    DR_RMDIR : DR_REMOVE, cred);
1110 
1111 	rw_exit(&tp->tn_rwlock);
1112 	rw_exit(&parent->tn_rwlock);
1113 	vnevent_remove(TNTOV(tp), dvp, nm, ct);
1114 	tmpnode_rele(tp);
1115 
1116 	TRACE_3(TR_FAC_TMPFS, TR_TMPFS_REMOVE,
1117 	    "tmpfs remove:dvp %p nm %s error %d", dvp, nm, error);
1118 	return (error);
1119 }
1120 
1121 /* ARGSUSED4 */
1122 static int
1123 tmp_link(
1124 	struct vnode *dvp,
1125 	struct vnode *srcvp,
1126 	char *tnm,
1127 	struct cred *cred,
1128 	caller_context_t *ct,
1129 	int flags)
1130 {
1131 	struct tmpnode *parent;
1132 	struct tmpnode *from;
1133 	struct tmount *tm = (struct tmount *)VTOTM(dvp);
1134 	int error;
1135 	struct tmpnode *found = NULL;
1136 	struct vnode *realvp;
1137 
1138 	if (VOP_REALVP(srcvp, &realvp, ct) == 0)
1139 		srcvp = realvp;
1140 
1141 	parent = (struct tmpnode *)VTOTN(dvp);
1142 	from = (struct tmpnode *)VTOTN(srcvp);
1143 
1144 	if ((srcvp->v_type == VDIR &&
1145 	    secpolicy_fs_linkdir(cred, dvp->v_vfsp)) ||
1146 	    (from->tn_uid != crgetuid(cred) && secpolicy_basic_link(cred)))
1147 		return (EPERM);
1148 
1149 	/*
1150 	 * Make sure link for extended attributes is valid
1151 	 * We only support hard linking of xattr's in xattrdir to an xattrdir
1152 	 */
1153 	if ((from->tn_flags & ISXATTR) != (parent->tn_flags & ISXATTR))
1154 		return (EINVAL);
1155 
1156 	error = tdirlookup(parent, tnm, &found, cred);
1157 	if (error == 0) {
1158 		ASSERT(found);
1159 		tmpnode_rele(found);
1160 		return (EEXIST);
1161 	}
1162 
1163 	if (error != ENOENT)
1164 		return (error);
1165 
1166 	rw_enter(&parent->tn_rwlock, RW_WRITER);
1167 	error = tdirenter(tm, parent, tnm, DE_LINK, (struct tmpnode *)NULL,
1168 	    from, NULL, (struct tmpnode **)NULL, cred, ct);
1169 	rw_exit(&parent->tn_rwlock);
1170 	if (error == 0) {
1171 		vnevent_link(srcvp, ct);
1172 	}
1173 	return (error);
1174 }
1175 
1176 /* ARGSUSED5 */
1177 static int
1178 tmp_rename(
1179 	struct vnode *odvp,	/* source parent vnode */
1180 	char *onm,		/* source name */
1181 	struct vnode *ndvp,	/* destination parent vnode */
1182 	char *nnm,		/* destination name */
1183 	struct cred *cred,
1184 	caller_context_t *ct,
1185 	int flags)
1186 {
1187 	struct tmpnode *fromparent;
1188 	struct tmpnode *toparent;
1189 	struct tmpnode *fromtp = NULL;	/* source tmpnode */
1190 	struct tmpnode *totp;		/* target tmpnode */
1191 	struct tmount *tm = (struct tmount *)VTOTM(odvp);
1192 	int error;
1193 	int samedir = 0;	/* set if odvp == ndvp */
1194 	struct vnode *realvp;
1195 
1196 	if (VOP_REALVP(ndvp, &realvp, ct) == 0)
1197 		ndvp = realvp;
1198 
1199 	fromparent = (struct tmpnode *)VTOTN(odvp);
1200 	toparent = (struct tmpnode *)VTOTN(ndvp);
1201 
1202 	if ((fromparent->tn_flags & ISXATTR) != (toparent->tn_flags & ISXATTR))
1203 		return (EINVAL);
1204 
1205 	mutex_enter(&tm->tm_renamelck);
1206 
1207 	/*
1208 	 * Look up tmpnode of file we're supposed to rename.
1209 	 */
1210 	error = tdirlookup(fromparent, onm, &fromtp, cred);
1211 	if (error) {
1212 		mutex_exit(&tm->tm_renamelck);
1213 		return (error);
1214 	}
1215 
1216 	/*
1217 	 * Make sure we can delete the old (source) entry.  This
1218 	 * requires write permission on the containing directory.  If
1219 	 * that directory is "sticky" it requires further checks.
1220 	 */
1221 	if (((error = tmp_taccess(fromparent, VWRITE, cred)) != 0) ||
1222 	    (error = tmp_sticky_remove_access(fromparent, fromtp, cred)) != 0)
1223 		goto done;
1224 
1225 	/*
1226 	 * Check for renaming to or from '.' or '..' or that
1227 	 * fromtp == fromparent
1228 	 */
1229 	if ((onm[0] == '.' &&
1230 	    (onm[1] == '\0' || (onm[1] == '.' && onm[2] == '\0'))) ||
1231 	    (nnm[0] == '.' &&
1232 	    (nnm[1] == '\0' || (nnm[1] == '.' && nnm[2] == '\0'))) ||
1233 	    (fromparent == fromtp)) {
1234 		error = EINVAL;
1235 		goto done;
1236 	}
1237 
1238 	samedir = (fromparent == toparent);
1239 	/*
1240 	 * Make sure we can search and rename into the new
1241 	 * (destination) directory.
1242 	 */
1243 	if (!samedir) {
1244 		error = tmp_taccess(toparent, VEXEC|VWRITE, cred);
1245 		if (error)
1246 			goto done;
1247 	}
1248 
1249 	if (tdirlookup(toparent, nnm, &totp, cred) == 0) {
1250 		vnevent_pre_rename_dest(TNTOV(totp), ndvp, nnm, ct);
1251 		tmpnode_rele(totp);
1252 	}
1253 
1254 	/* Notify the target dir. if not the same as the source dir. */
1255 	if (ndvp != odvp) {
1256 		vnevent_pre_rename_dest_dir(ndvp, TNTOV(fromtp), nnm, ct);
1257 	}
1258 
1259 	vnevent_pre_rename_src(TNTOV(fromtp), odvp, onm, ct);
1260 
1261 	/*
1262 	 * Link source to new target
1263 	 */
1264 	rw_enter(&toparent->tn_rwlock, RW_WRITER);
1265 	error = tdirenter(tm, toparent, nnm, DE_RENAME,
1266 	    fromparent, fromtp, (struct vattr *)NULL,
1267 	    (struct tmpnode **)NULL, cred, ct);
1268 	rw_exit(&toparent->tn_rwlock);
1269 
1270 	if (error) {
1271 		/*
1272 		 * ESAME isn't really an error; it indicates that the
1273 		 * operation should not be done because the source and target
1274 		 * are the same file, but that no error should be reported.
1275 		 */
1276 		if (error == ESAME)
1277 			error = 0;
1278 		goto done;
1279 	}
1280 
1281 	/*
1282 	 * Unlink from source.
1283 	 */
1284 	rw_enter(&fromparent->tn_rwlock, RW_WRITER);
1285 	rw_enter(&fromtp->tn_rwlock, RW_WRITER);
1286 
1287 	error = tdirdelete(fromparent, fromtp, onm, DR_RENAME, cred);
1288 
1289 	/*
1290 	 * The following handles the case where our source tmpnode was
1291 	 * removed before we got to it.
1292 	 *
1293 	 * XXX We should also cleanup properly in the case where tdirdelete
1294 	 * fails for some other reason.  Currently this case shouldn't happen.
1295 	 * (see 1184991).
1296 	 */
1297 	if (error == ENOENT)
1298 		error = 0;
1299 
1300 	rw_exit(&fromtp->tn_rwlock);
1301 	rw_exit(&fromparent->tn_rwlock);
1302 
1303 	if (error == 0) {
1304 		vnevent_rename_src(TNTOV(fromtp), odvp, onm, ct);
1305 		/*
1306 		 * vnevent_rename_dest is called in tdirenter().
1307 		 * Notify the target dir if not same as source dir.
1308 		 */
1309 		if (ndvp != odvp)
1310 			vnevent_rename_dest_dir(ndvp, ct);
1311 	}
1312 
1313 done:
1314 	tmpnode_rele(fromtp);
1315 	mutex_exit(&tm->tm_renamelck);
1316 
1317 	TRACE_5(TR_FAC_TMPFS, TR_TMPFS_RENAME,
1318 	    "tmpfs rename:ovp %p onm %s nvp %p nnm %s error %d", odvp, onm,
1319 	    ndvp, nnm, error);
1320 	return (error);
1321 }
1322 
1323 /* ARGSUSED5 */
1324 static int
1325 tmp_mkdir(
1326 	struct vnode *dvp,
1327 	char *nm,
1328 	struct vattr *va,
1329 	struct vnode **vpp,
1330 	struct cred *cred,
1331 	caller_context_t *ct,
1332 	int flags,
1333 	vsecattr_t *vsecp)
1334 {
1335 	struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1336 	struct tmpnode *self = NULL;
1337 	struct tmount *tm = (struct tmount *)VTOTM(dvp);
1338 	int error;
1339 
1340 	/* no new dirs allowed in xattr dirs */
1341 	if (parent->tn_flags & ISXATTR)
1342 		return (EINVAL);
1343 
1344 	/*
1345 	 * Might be dangling directory.  Catch it here,
1346 	 * because a ENOENT return from tdirlookup() is
1347 	 * an "o.k. return".
1348 	 */
1349 	if (parent->tn_nlink == 0)
1350 		return (ENOENT);
1351 
1352 	error = tdirlookup(parent, nm, &self, cred);
1353 	if (error == 0) {
1354 		ASSERT(self);
1355 		tmpnode_rele(self);
1356 		return (EEXIST);
1357 	}
1358 	if (error != ENOENT)
1359 		return (error);
1360 
1361 	rw_enter(&parent->tn_rwlock, RW_WRITER);
1362 	error = tdirenter(tm, parent, nm, DE_MKDIR, (struct tmpnode *)NULL,
1363 	    (struct tmpnode *)NULL, va, &self, cred, ct);
1364 	if (error) {
1365 		rw_exit(&parent->tn_rwlock);
1366 		if (self)
1367 			tmpnode_rele(self);
1368 		return (error);
1369 	}
1370 	rw_exit(&parent->tn_rwlock);
1371 	*vpp = TNTOV(self);
1372 	return (0);
1373 }
1374 
1375 /* ARGSUSED4 */
1376 static int
1377 tmp_rmdir(
1378 	struct vnode *dvp,
1379 	char *nm,
1380 	struct vnode *cdir,
1381 	struct cred *cred,
1382 	caller_context_t *ct,
1383 	int flags)
1384 {
1385 	struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1386 	struct tmpnode *self = NULL;
1387 	struct vnode *vp;
1388 	int error = 0;
1389 
1390 	/*
1391 	 * Return error when removing . and ..
1392 	 */
1393 	if (strcmp(nm, ".") == 0)
1394 		return (EINVAL);
1395 	if (strcmp(nm, "..") == 0)
1396 		return (EEXIST); /* Should be ENOTEMPTY */
1397 	error = tdirlookup(parent, nm, &self, cred);
1398 	if (error)
1399 		return (error);
1400 
1401 	rw_enter(&parent->tn_rwlock, RW_WRITER);
1402 	rw_enter(&self->tn_rwlock, RW_WRITER);
1403 
1404 	vp = TNTOV(self);
1405 	if (vp == dvp || vp == cdir) {
1406 		error = EINVAL;
1407 		goto done1;
1408 	}
1409 	if (self->tn_type != VDIR) {
1410 		error = ENOTDIR;
1411 		goto done1;
1412 	}
1413 
1414 	mutex_enter(&self->tn_tlock);
1415 	if (self->tn_nlink > 2) {
1416 		mutex_exit(&self->tn_tlock);
1417 		error = EEXIST;
1418 		goto done1;
1419 	}
1420 	mutex_exit(&self->tn_tlock);
1421 
1422 	if (vn_vfswlock(vp)) {
1423 		error = EBUSY;
1424 		goto done1;
1425 	}
1426 	if (vn_mountedvfs(vp) != NULL) {
1427 		error = EBUSY;
1428 		goto done;
1429 	}
1430 
1431 	/*
1432 	 * Check for an empty directory
1433 	 * i.e. only includes entries for "." and ".."
1434 	 */
1435 	if (self->tn_dirents > 2) {
1436 		error = EEXIST;		/* SIGH should be ENOTEMPTY */
1437 		/*
1438 		 * Update atime because checking tn_dirents is logically
1439 		 * equivalent to reading the directory
1440 		 */
1441 		gethrestime(&self->tn_atime);
1442 		goto done;
1443 	}
1444 
1445 	error = tdirdelete(parent, self, nm, DR_RMDIR, cred);
1446 done:
1447 	vn_vfsunlock(vp);
1448 done1:
1449 	rw_exit(&self->tn_rwlock);
1450 	rw_exit(&parent->tn_rwlock);
1451 	vnevent_rmdir(TNTOV(self), dvp, nm, ct);
1452 	tmpnode_rele(self);
1453 
1454 	return (error);
1455 }
1456 
1457 /* ARGSUSED2 */
1458 static int
1459 tmp_readdir(
1460 	struct vnode *vp,
1461 	struct uio *uiop,
1462 	struct cred *cred,
1463 	int *eofp,
1464 	caller_context_t *ct,
1465 	int flags)
1466 {
1467 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1468 	struct tdirent *tdp;
1469 	int error = 0;
1470 	size_t namelen;
1471 	struct dirent64 *dp;
1472 	ulong_t offset;
1473 	ulong_t total_bytes_wanted;
1474 	long outcount = 0;
1475 	long bufsize;
1476 	int reclen;
1477 	caddr_t outbuf;
1478 
1479 	if (uiop->uio_loffset >= MAXOFF_T) {
1480 		if (eofp)
1481 			*eofp = 1;
1482 		return (0);
1483 	}
1484 	/*
1485 	 * assuming system call has already called tmp_rwlock
1486 	 */
1487 	ASSERT(RW_READ_HELD(&tp->tn_rwlock));
1488 
1489 	if (uiop->uio_iovcnt != 1)
1490 		return (EINVAL);
1491 
1492 	if (vp->v_type != VDIR)
1493 		return (ENOTDIR);
1494 
1495 	/*
1496 	 * There's a window here where someone could have removed
1497 	 * all the entries in the directory after we put a hold on the
1498 	 * vnode but before we grabbed the rwlock.  Just return.
1499 	 */
1500 	if (tp->tn_dir == NULL) {
1501 		if (tp->tn_nlink) {
1502 			panic("empty directory 0x%p", (void *)tp);
1503 			/*NOTREACHED*/
1504 		}
1505 		return (0);
1506 	}
1507 
1508 	/*
1509 	 * Get space for multiple directory entries
1510 	 */
1511 	total_bytes_wanted = uiop->uio_iov->iov_len;
1512 	bufsize = total_bytes_wanted + sizeof (struct dirent64);
1513 	outbuf = kmem_alloc(bufsize, KM_SLEEP);
1514 
1515 	dp = (struct dirent64 *)outbuf;
1516 
1517 
1518 	offset = 0;
1519 	tdp = tp->tn_dir;
1520 	while (tdp) {
1521 		namelen = strlen(tdp->td_name);	/* no +1 needed */
1522 		offset = tdp->td_offset;
1523 		if (offset >= uiop->uio_offset) {
1524 			reclen = (int)DIRENT64_RECLEN(namelen);
1525 			if (outcount + reclen > total_bytes_wanted) {
1526 				if (!outcount)
1527 					/*
1528 					 * Buffer too small for any entries.
1529 					 */
1530 					error = EINVAL;
1531 				break;
1532 			}
1533 			ASSERT(tdp->td_tmpnode != NULL);
1534 
1535 			/* use strncpy(9f) to zero out uninitialized bytes */
1536 
1537 			(void) strncpy(dp->d_name, tdp->td_name,
1538 			    DIRENT64_NAMELEN(reclen));
1539 			dp->d_reclen = (ushort_t)reclen;
1540 			dp->d_ino = (ino64_t)tdp->td_tmpnode->tn_nodeid;
1541 			dp->d_off = (offset_t)tdp->td_offset + 1;
1542 			dp = (struct dirent64 *)
1543 			    ((uintptr_t)dp + dp->d_reclen);
1544 			outcount += reclen;
1545 			ASSERT(outcount <= bufsize);
1546 		}
1547 		tdp = tdp->td_next;
1548 	}
1549 
1550 	if (!error)
1551 		error = uiomove(outbuf, outcount, UIO_READ, uiop);
1552 
1553 	if (!error) {
1554 		/* If we reached the end of the list our offset */
1555 		/* should now be just past the end. */
1556 		if (!tdp) {
1557 			offset += 1;
1558 			if (eofp)
1559 				*eofp = 1;
1560 		} else if (eofp)
1561 			*eofp = 0;
1562 		uiop->uio_offset = offset;
1563 	}
1564 	gethrestime(&tp->tn_atime);
1565 	kmem_free(outbuf, bufsize);
1566 	return (error);
1567 }
1568 
1569 /* ARGSUSED5 */
1570 static int
1571 tmp_symlink(
1572 	struct vnode *dvp,
1573 	char *lnm,
1574 	struct vattr *tva,
1575 	char *tnm,
1576 	struct cred *cred,
1577 	caller_context_t *ct,
1578 	int flags)
1579 {
1580 	struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1581 	struct tmpnode *self = (struct tmpnode *)NULL;
1582 	struct tmount *tm = (struct tmount *)VTOTM(dvp);
1583 	char *cp = NULL;
1584 	int error;
1585 	size_t len;
1586 
1587 	/* no symlinks allowed to files in xattr dirs */
1588 	if (parent->tn_flags & ISXATTR)
1589 		return (EINVAL);
1590 
1591 	error = tdirlookup(parent, lnm, &self, cred);
1592 	if (error == 0) {
1593 		/*
1594 		 * The entry already exists
1595 		 */
1596 		tmpnode_rele(self);
1597 		return (EEXIST);	/* was 0 */
1598 	}
1599 
1600 	if (error != ENOENT) {
1601 		if (self != NULL)
1602 			tmpnode_rele(self);
1603 		return (error);
1604 	}
1605 
1606 	rw_enter(&parent->tn_rwlock, RW_WRITER);
1607 	error = tdirenter(tm, parent, lnm, DE_CREATE, (struct tmpnode *)NULL,
1608 	    (struct tmpnode *)NULL, tva, &self, cred, ct);
1609 	rw_exit(&parent->tn_rwlock);
1610 
1611 	if (error) {
1612 		if (self)
1613 			tmpnode_rele(self);
1614 		return (error);
1615 	}
1616 	len = strlen(tnm) + 1;
1617 	cp = tmp_memalloc(len, 0);
1618 	if (cp == NULL) {
1619 		tmpnode_rele(self);
1620 		return (ENOSPC);
1621 	}
1622 	(void) strcpy(cp, tnm);
1623 
1624 	self->tn_symlink = cp;
1625 	self->tn_size = len - 1;
1626 	tmpnode_rele(self);
1627 	return (error);
1628 }
1629 
1630 /* ARGSUSED2 */
1631 static int
1632 tmp_readlink(
1633 	struct vnode *vp,
1634 	struct uio *uiop,
1635 	struct cred *cred,
1636 	caller_context_t *ct)
1637 {
1638 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1639 	int error = 0;
1640 
1641 	if (vp->v_type != VLNK)
1642 		return (EINVAL);
1643 
1644 	rw_enter(&tp->tn_rwlock, RW_READER);
1645 	rw_enter(&tp->tn_contents, RW_READER);
1646 	error = uiomove(tp->tn_symlink, tp->tn_size, UIO_READ, uiop);
1647 	gethrestime(&tp->tn_atime);
1648 	rw_exit(&tp->tn_contents);
1649 	rw_exit(&tp->tn_rwlock);
1650 	return (error);
1651 }
1652 
1653 /* ARGSUSED */
1654 static int
1655 tmp_fsync(
1656 	struct vnode *vp,
1657 	int syncflag,
1658 	struct cred *cred,
1659 	caller_context_t *ct)
1660 {
1661 	return (0);
1662 }
1663 
1664 /* ARGSUSED */
1665 static void
1666 tmp_inactive(struct vnode *vp, struct cred *cred, caller_context_t *ct)
1667 {
1668 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1669 	struct tmount *tm = (struct tmount *)VFSTOTM(vp->v_vfsp);
1670 
1671 	rw_enter(&tp->tn_rwlock, RW_WRITER);
1672 top:
1673 	mutex_enter(&tp->tn_tlock);
1674 	mutex_enter(&vp->v_lock);
1675 	ASSERT(vp->v_count >= 1);
1676 
1677 	/*
1678 	 * If we don't have the last hold or the link count is non-zero,
1679 	 * there's little to do -- just drop our hold.
1680 	 */
1681 	if (vp->v_count > 1 || tp->tn_nlink != 0) {
1682 		vp->v_count--;
1683 		mutex_exit(&vp->v_lock);
1684 		mutex_exit(&tp->tn_tlock);
1685 		rw_exit(&tp->tn_rwlock);
1686 		return;
1687 	}
1688 
1689 	/*
1690 	 * We have the last hold *and* the link count is zero, so this
1691 	 * tmpnode is dead from the filesystem's viewpoint.  However,
1692 	 * if the tmpnode has any pages associated with it (i.e. if it's
1693 	 * a normal file with non-zero size), the tmpnode can still be
1694 	 * discovered by pageout or fsflush via the page vnode pointers.
1695 	 * In this case we must drop all our locks, truncate the tmpnode,
1696 	 * and try the whole dance again.
1697 	 */
1698 	if (tp->tn_size != 0) {
1699 		if (tp->tn_type == VREG) {
1700 			mutex_exit(&vp->v_lock);
1701 			mutex_exit(&tp->tn_tlock);
1702 			rw_enter(&tp->tn_contents, RW_WRITER);
1703 			(void) tmpnode_trunc(tm, tp, 0);
1704 			rw_exit(&tp->tn_contents);
1705 			ASSERT(tp->tn_size == 0);
1706 			ASSERT(tp->tn_nblocks == 0);
1707 			goto top;
1708 		}
1709 		if (tp->tn_type == VLNK)
1710 			tmp_memfree(tp->tn_symlink, tp->tn_size + 1);
1711 	}
1712 
1713 	/*
1714 	 * Remove normal file/dir's xattr dir and xattrs.
1715 	 */
1716 	if (tp->tn_xattrdp) {
1717 		struct tmpnode *xtp = tp->tn_xattrdp;
1718 
1719 		ASSERT(xtp->tn_flags & ISXATTR);
1720 		tmpnode_hold(xtp);
1721 		rw_enter(&xtp->tn_rwlock, RW_WRITER);
1722 		tdirtrunc(xtp);
1723 		DECR_COUNT(&xtp->tn_nlink, &xtp->tn_tlock);
1724 		tp->tn_xattrdp = NULL;
1725 		rw_exit(&xtp->tn_rwlock);
1726 		tmpnode_rele(xtp);
1727 	}
1728 
1729 	mutex_exit(&vp->v_lock);
1730 	mutex_exit(&tp->tn_tlock);
1731 	/* Here's our chance to send invalid event while we're between locks */
1732 	vn_invalid(TNTOV(tp));
1733 	mutex_enter(&tm->tm_contents);
1734 	if (tp->tn_forw == NULL)
1735 		tm->tm_rootnode->tn_back = tp->tn_back;
1736 	else
1737 		tp->tn_forw->tn_back = tp->tn_back;
1738 	tp->tn_back->tn_forw = tp->tn_forw;
1739 	mutex_exit(&tm->tm_contents);
1740 	rw_exit(&tp->tn_rwlock);
1741 	rw_destroy(&tp->tn_rwlock);
1742 	mutex_destroy(&tp->tn_tlock);
1743 	vn_free(TNTOV(tp));
1744 	tmp_memfree(tp, sizeof (struct tmpnode));
1745 }
1746 
1747 /* ARGSUSED2 */
1748 static int
1749 tmp_fid(struct vnode *vp, struct fid *fidp, caller_context_t *ct)
1750 {
1751 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1752 	struct tfid *tfid;
1753 
1754 	if (fidp->fid_len < (sizeof (struct tfid) - sizeof (ushort_t))) {
1755 		fidp->fid_len = sizeof (struct tfid) - sizeof (ushort_t);
1756 		return (ENOSPC);
1757 	}
1758 
1759 	tfid = (struct tfid *)fidp;
1760 	bzero(tfid, sizeof (struct tfid));
1761 	tfid->tfid_len = (int)sizeof (struct tfid) - sizeof (ushort_t);
1762 
1763 	tfid->tfid_ino = tp->tn_nodeid;
1764 	tfid->tfid_gen = tp->tn_gen;
1765 
1766 	return (0);
1767 }
1768 
1769 
1770 /*
1771  * Return all the pages from [off..off+len] in given file
1772  */
1773 /* ARGSUSED */
1774 static int
1775 tmp_getpage(
1776 	struct vnode *vp,
1777 	offset_t off,
1778 	size_t len,
1779 	uint_t *protp,
1780 	page_t *pl[],
1781 	size_t plsz,
1782 	struct seg *seg,
1783 	caddr_t addr,
1784 	enum seg_rw rw,
1785 	struct cred *cr,
1786 	caller_context_t *ct)
1787 {
1788 	int err = 0;
1789 	struct tmpnode *tp = VTOTN(vp);
1790 	anoff_t toff = (anoff_t)off;
1791 	size_t tlen = len;
1792 	u_offset_t tmpoff;
1793 	timestruc_t now;
1794 
1795 	rw_enter(&tp->tn_contents, RW_READER);
1796 
1797 	if (off + len  > tp->tn_size + PAGEOFFSET) {
1798 		err = EFAULT;
1799 		goto out;
1800 	}
1801 	/*
1802 	 * Look for holes (no anon slot) in faulting range. If there are
1803 	 * holes we have to switch to a write lock and fill them in. Swap
1804 	 * space for holes was already reserved when the file was grown.
1805 	 */
1806 	tmpoff = toff;
1807 	if (non_anon(tp->tn_anon, btop(off), &tmpoff, &tlen)) {
1808 		if (!rw_tryupgrade(&tp->tn_contents)) {
1809 			rw_exit(&tp->tn_contents);
1810 			rw_enter(&tp->tn_contents, RW_WRITER);
1811 			/* Size may have changed when lock was dropped */
1812 			if (off + len  > tp->tn_size + PAGEOFFSET) {
1813 				err = EFAULT;
1814 				goto out;
1815 			}
1816 		}
1817 		for (toff = (anoff_t)off; toff < (anoff_t)off + len;
1818 		    toff += PAGESIZE) {
1819 			if (anon_get_ptr(tp->tn_anon, btop(toff)) == NULL) {
1820 				/* XXX - may allocate mem w. write lock held */
1821 				(void) anon_set_ptr(tp->tn_anon, btop(toff),
1822 				    anon_alloc(vp, toff), ANON_SLEEP);
1823 				tp->tn_nblocks++;
1824 			}
1825 		}
1826 		rw_downgrade(&tp->tn_contents);
1827 	}
1828 
1829 
1830 	err = pvn_getpages(tmp_getapage, vp, (u_offset_t)off, len, protp,
1831 	    pl, plsz, seg, addr, rw, cr);
1832 
1833 	gethrestime(&now);
1834 	tp->tn_atime = now;
1835 	if (rw == S_WRITE)
1836 		tp->tn_mtime = now;
1837 
1838 out:
1839 	rw_exit(&tp->tn_contents);
1840 	return (err);
1841 }
1842 
1843 /*
1844  * Called from pvn_getpages to get a particular page.
1845  */
1846 /*ARGSUSED*/
1847 static int
1848 tmp_getapage(
1849 	struct vnode *vp,
1850 	u_offset_t off,
1851 	size_t len,
1852 	uint_t *protp,
1853 	page_t *pl[],
1854 	size_t plsz,
1855 	struct seg *seg,
1856 	caddr_t addr,
1857 	enum seg_rw rw,
1858 	struct cred *cr)
1859 {
1860 	struct page *pp;
1861 	int flags;
1862 	int err = 0;
1863 	struct vnode *pvp;
1864 	u_offset_t poff;
1865 
1866 	if (protp != NULL)
1867 		*protp = PROT_ALL;
1868 again:
1869 	if (pp = page_lookup(vp, off, rw == S_CREATE ? SE_EXCL : SE_SHARED)) {
1870 		if (pl) {
1871 			pl[0] = pp;
1872 			pl[1] = NULL;
1873 		} else {
1874 			page_unlock(pp);
1875 		}
1876 	} else {
1877 		pp = page_create_va(vp, off, PAGESIZE,
1878 		    PG_WAIT | PG_EXCL, seg, addr);
1879 		/*
1880 		 * Someone raced in and created the page after we did the
1881 		 * lookup but before we did the create, so go back and
1882 		 * try to look it up again.
1883 		 */
1884 		if (pp == NULL)
1885 			goto again;
1886 		/*
1887 		 * Fill page from backing store, if any. If none, then
1888 		 * either this is a newly filled hole or page must have
1889 		 * been unmodified and freed so just zero it out.
1890 		 */
1891 		err = swap_getphysname(vp, off, &pvp, &poff);
1892 		if (err) {
1893 			panic("tmp_getapage: no anon slot vp %p "
1894 			    "off %llx pp %p\n", (void *)vp, off, (void *)pp);
1895 		}
1896 		if (pvp) {
1897 			flags = (pl == NULL ? B_ASYNC|B_READ : B_READ);
1898 			err = VOP_PAGEIO(pvp, pp, (u_offset_t)poff, PAGESIZE,
1899 			    flags, cr, NULL);
1900 			if (flags & B_ASYNC)
1901 				pp = NULL;
1902 		} else if (rw != S_CREATE) {
1903 			pagezero(pp, 0, PAGESIZE);
1904 		}
1905 		if (err && pp)
1906 			pvn_read_done(pp, B_ERROR);
1907 		if (err == 0) {
1908 			if (pl)
1909 				pvn_plist_init(pp, pl, plsz, off, PAGESIZE, rw);
1910 			else
1911 				pvn_io_done(pp);
1912 		}
1913 	}
1914 	return (err);
1915 }
1916 
1917 
1918 /*
1919  * Flags are composed of {B_INVAL, B_DIRTY B_FREE, B_DONTNEED}.
1920  * If len == 0, do from off to EOF.
1921  */
1922 static int tmp_nopage = 0;	/* Don't do tmp_putpage's if set */
1923 
1924 /* ARGSUSED */
1925 int
1926 tmp_putpage(
1927 	register struct vnode *vp,
1928 	offset_t off,
1929 	size_t len,
1930 	int flags,
1931 	struct cred *cr,
1932 	caller_context_t *ct)
1933 {
1934 	register page_t *pp;
1935 	u_offset_t io_off;
1936 	size_t io_len = 0;
1937 	int err = 0;
1938 	struct tmpnode *tp = VTOTN(vp);
1939 	int dolock;
1940 
1941 	if (tmp_nopage)
1942 		return (0);
1943 
1944 	ASSERT(vp->v_count != 0);
1945 
1946 	if (vp->v_flag & VNOMAP)
1947 		return (ENOSYS);
1948 
1949 	/*
1950 	 * This being tmpfs, we don't ever do i/o unless we really
1951 	 * have to (when we're low on memory and pageout calls us
1952 	 * with B_ASYNC | B_FREE or the user explicitly asks for it with
1953 	 * B_DONTNEED).
1954 	 * XXX to approximately track the mod time like ufs we should
1955 	 * update the times here. The problem is, once someone does a
1956 	 * store we never clear the mod bit and do i/o, thus fsflush
1957 	 * will keep calling us every 30 seconds to do the i/o and we'll
1958 	 * continually update the mod time. At least we update the mod
1959 	 * time on the first store because this results in a call to getpage.
1960 	 */
1961 	if (flags != (B_ASYNC | B_FREE) && (flags & B_INVAL) == 0 &&
1962 	    (flags & B_DONTNEED) == 0)
1963 		return (0);
1964 	/*
1965 	 * If this thread owns the lock, i.e., this thread grabbed it
1966 	 * as writer somewhere above, then we don't need to grab the
1967 	 * lock as reader in this routine.
1968 	 */
1969 	dolock = (rw_owner(&tp->tn_contents) != curthread);
1970 
1971 	/*
1972 	 * If this is pageout don't block on the lock as you could deadlock
1973 	 * when freemem == 0 (another thread has the read lock and is blocked
1974 	 * creating a page, and a third thread is waiting to get the writers
1975 	 * lock - waiting writers priority blocks us from getting the read
1976 	 * lock). Of course, if the only freeable pages are on this tmpnode
1977 	 * we're hosed anyways. A better solution might be a new lock type.
1978 	 * Note: ufs has the same problem.
1979 	 */
1980 	if (curproc == proc_pageout) {
1981 		if (!rw_tryenter(&tp->tn_contents, RW_READER))
1982 			return (ENOMEM);
1983 	} else if (dolock)
1984 		rw_enter(&tp->tn_contents, RW_READER);
1985 
1986 	if (!vn_has_cached_data(vp))
1987 		goto out;
1988 
1989 	if (len == 0) {
1990 		if (curproc == proc_pageout) {
1991 			panic("tmp: pageout can't block");
1992 			/*NOTREACHED*/
1993 		}
1994 
1995 		/* Search the entire vp list for pages >= off. */
1996 		err = pvn_vplist_dirty(vp, (u_offset_t)off, tmp_putapage,
1997 		    flags, cr);
1998 	} else {
1999 		u_offset_t eoff;
2000 
2001 		/*
2002 		 * Loop over all offsets in the range [off...off + len]
2003 		 * looking for pages to deal with.
2004 		 */
2005 		eoff = MIN(off + len, tp->tn_size);
2006 		for (io_off = off; io_off < eoff; io_off += io_len) {
2007 			/*
2008 			 * If we are not invalidating, synchronously
2009 			 * freeing or writing pages use the routine
2010 			 * page_lookup_nowait() to prevent reclaiming
2011 			 * them from the free list.
2012 			 */
2013 			if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
2014 				pp = page_lookup(vp, io_off,
2015 				    (flags & (B_INVAL | B_FREE)) ?
2016 				    SE_EXCL : SE_SHARED);
2017 			} else {
2018 				pp = page_lookup_nowait(vp, io_off,
2019 				    (flags & B_FREE) ? SE_EXCL : SE_SHARED);
2020 			}
2021 
2022 			if (pp == NULL || pvn_getdirty(pp, flags) == 0)
2023 				io_len = PAGESIZE;
2024 			else {
2025 				err = tmp_putapage(vp, pp, &io_off, &io_len,
2026 				    flags, cr);
2027 				if (err != 0)
2028 					break;
2029 			}
2030 		}
2031 	}
2032 	/* If invalidating, verify all pages on vnode list are gone. */
2033 	if (err == 0 && off == 0 && len == 0 &&
2034 	    (flags & B_INVAL) && vn_has_cached_data(vp)) {
2035 		panic("tmp_putpage: B_INVAL, pages not gone");
2036 		/*NOTREACHED*/
2037 	}
2038 out:
2039 	if ((curproc == proc_pageout) || dolock)
2040 		rw_exit(&tp->tn_contents);
2041 	/*
2042 	 * Only reason putapage is going to give us SE_NOSWAP as error
2043 	 * is when we ask a page to be written to physical backing store
2044 	 * and there is none. Ignore this because we might be dealing
2045 	 * with a swap page which does not have any backing store
2046 	 * on disk. In any other case we won't get this error over here.
2047 	 */
2048 	if (err == SE_NOSWAP)
2049 		err = 0;
2050 	return (err);
2051 }
2052 
2053 long tmp_putpagecnt, tmp_pagespushed;
2054 
2055 /*
2056  * Write out a single page.
2057  * For tmpfs this means choose a physical swap slot and write the page
2058  * out using VOP_PAGEIO. For performance, we attempt to kluster; i.e.,
2059  * we try to find a bunch of other dirty pages adjacent in the file
2060  * and a bunch of contiguous swap slots, and then write all the pages
2061  * out in a single i/o.
2062  */
2063 /*ARGSUSED*/
2064 static int
2065 tmp_putapage(
2066 	struct vnode *vp,
2067 	page_t *pp,
2068 	u_offset_t *offp,
2069 	size_t *lenp,
2070 	int flags,
2071 	struct cred *cr)
2072 {
2073 	int err;
2074 	ulong_t klstart, kllen;
2075 	page_t *pplist, *npplist;
2076 	extern int klustsize;
2077 	long tmp_klustsize;
2078 	struct tmpnode *tp;
2079 	size_t pp_off, pp_len;
2080 	u_offset_t io_off;
2081 	size_t io_len;
2082 	struct vnode *pvp;
2083 	u_offset_t pstart;
2084 	u_offset_t offset;
2085 	u_offset_t tmpoff;
2086 
2087 	ASSERT(PAGE_LOCKED(pp));
2088 
2089 	/* Kluster in tmp_klustsize chunks */
2090 	tp = VTOTN(vp);
2091 	tmp_klustsize = klustsize;
2092 	offset = pp->p_offset;
2093 	klstart = (offset / tmp_klustsize) * tmp_klustsize;
2094 	kllen = MIN(tmp_klustsize, tp->tn_size - klstart);
2095 
2096 	/* Get a kluster of pages */
2097 	pplist =
2098 	    pvn_write_kluster(vp, pp, &tmpoff, &pp_len, klstart, kllen, flags);
2099 
2100 	pp_off = (size_t)tmpoff;
2101 
2102 	/*
2103 	 * Get a cluster of physical offsets for the pages; the amount we
2104 	 * get may be some subrange of what we ask for (io_off, io_len).
2105 	 */
2106 	io_off = pp_off;
2107 	io_len = pp_len;
2108 	err = swap_newphysname(vp, offset, &io_off, &io_len, &pvp, &pstart);
2109 	ASSERT(err != SE_NOANON); /* anon slot must have been filled */
2110 	if (err) {
2111 		pvn_write_done(pplist, B_ERROR | B_WRITE | flags);
2112 		/*
2113 		 * If this routine is called as a result of segvn_sync
2114 		 * operation and we have no physical swap then we can get an
2115 		 * error here. In such case we would return SE_NOSWAP as error.
2116 		 * At this point, we expect only SE_NOSWAP.
2117 		 */
2118 		ASSERT(err == SE_NOSWAP);
2119 		if (flags & B_INVAL)
2120 			err = ENOMEM;
2121 		goto out;
2122 	}
2123 	ASSERT(pp_off <= io_off && io_off + io_len <= pp_off + pp_len);
2124 	ASSERT(io_off <= offset && offset < io_off + io_len);
2125 
2126 	/* Toss pages at front/rear that we couldn't get physical backing for */
2127 	if (io_off != pp_off) {
2128 		npplist = NULL;
2129 		page_list_break(&pplist, &npplist, btop(io_off - pp_off));
2130 		ASSERT(pplist->p_offset == pp_off);
2131 		ASSERT(pplist->p_prev->p_offset == io_off - PAGESIZE);
2132 		pvn_write_done(pplist, B_ERROR | B_WRITE | flags);
2133 		pplist = npplist;
2134 	}
2135 	if (io_off + io_len < pp_off + pp_len) {
2136 		npplist = NULL;
2137 		page_list_break(&pplist, &npplist, btop(io_len));
2138 		ASSERT(npplist->p_offset == io_off + io_len);
2139 		ASSERT(npplist->p_prev->p_offset == pp_off + pp_len - PAGESIZE);
2140 		pvn_write_done(npplist, B_ERROR | B_WRITE | flags);
2141 	}
2142 
2143 	ASSERT(pplist->p_offset == io_off);
2144 	ASSERT(pplist->p_prev->p_offset == io_off + io_len - PAGESIZE);
2145 	ASSERT(btopr(io_len) <= btopr(kllen));
2146 
2147 	/* Do i/o on the remaining kluster */
2148 	err = VOP_PAGEIO(pvp, pplist, (u_offset_t)pstart, io_len,
2149 	    B_WRITE | flags, cr, NULL);
2150 
2151 	if ((flags & B_ASYNC) == 0) {
2152 		pvn_write_done(pplist, ((err) ? B_ERROR : 0) | B_WRITE | flags);
2153 	}
2154 out:
2155 	if (!err) {
2156 		if (offp)
2157 			*offp = io_off;
2158 		if (lenp)
2159 			*lenp = io_len;
2160 		tmp_putpagecnt++;
2161 		tmp_pagespushed += btop(io_len);
2162 	}
2163 	if (err && err != ENOMEM && err != SE_NOSWAP)
2164 		cmn_err(CE_WARN, "tmp_putapage: err %d\n", err);
2165 	return (err);
2166 }
2167 
2168 /* ARGSUSED */
2169 static int
2170 tmp_map(
2171 	struct vnode *vp,
2172 	offset_t off,
2173 	struct as *as,
2174 	caddr_t *addrp,
2175 	size_t len,
2176 	uchar_t prot,
2177 	uchar_t maxprot,
2178 	uint_t flags,
2179 	struct cred *cred,
2180 	caller_context_t *ct)
2181 {
2182 	struct segvn_crargs vn_a;
2183 	struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
2184 	int error;
2185 
2186 #ifdef _ILP32
2187 	if (len > MAXOFF_T)
2188 		return (ENOMEM);
2189 #endif
2190 
2191 	if (vp->v_flag & VNOMAP)
2192 		return (ENOSYS);
2193 
2194 	if (off < 0 || (offset_t)(off + len) < 0 ||
2195 	    off > MAXOFF_T || (off + len) > MAXOFF_T)
2196 		return (ENXIO);
2197 
2198 	if (vp->v_type != VREG)
2199 		return (ENODEV);
2200 
2201 	/*
2202 	 * Don't allow mapping to locked file
2203 	 */
2204 	if (vn_has_mandatory_locks(vp, tp->tn_mode)) {
2205 		return (EAGAIN);
2206 	}
2207 
2208 	as_rangelock(as);
2209 	error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
2210 	if (error != 0) {
2211 		as_rangeunlock(as);
2212 		return (error);
2213 	}
2214 
2215 	vn_a.vp = vp;
2216 	vn_a.offset = (u_offset_t)off;
2217 	vn_a.type = flags & MAP_TYPE;
2218 	vn_a.prot = prot;
2219 	vn_a.maxprot = maxprot;
2220 	vn_a.flags = flags & ~MAP_TYPE;
2221 	vn_a.cred = cred;
2222 	vn_a.amp = NULL;
2223 	vn_a.szc = 0;
2224 	vn_a.lgrp_mem_policy_flags = 0;
2225 
2226 	error = as_map(as, *addrp, len, segvn_create, &vn_a);
2227 	as_rangeunlock(as);
2228 	return (error);
2229 }
2230 
2231 /*
2232  * tmp_addmap and tmp_delmap can't be called since the vp
2233  * maintained in the segvn mapping is NULL.
2234  */
2235 /* ARGSUSED */
2236 static int
2237 tmp_addmap(
2238 	struct vnode *vp,
2239 	offset_t off,
2240 	struct as *as,
2241 	caddr_t addr,
2242 	size_t len,
2243 	uchar_t prot,
2244 	uchar_t maxprot,
2245 	uint_t flags,
2246 	struct cred *cred,
2247 	caller_context_t *ct)
2248 {
2249 	return (0);
2250 }
2251 
2252 /* ARGSUSED */
2253 static int
2254 tmp_delmap(
2255 	struct vnode *vp,
2256 	offset_t off,
2257 	struct as *as,
2258 	caddr_t addr,
2259 	size_t len,
2260 	uint_t prot,
2261 	uint_t maxprot,
2262 	uint_t flags,
2263 	struct cred *cred,
2264 	caller_context_t *ct)
2265 {
2266 	return (0);
2267 }
2268 
2269 static int
2270 tmp_freesp(struct vnode *vp, struct flock64 *lp, int flag)
2271 {
2272 	register int i;
2273 	register struct tmpnode *tp = VTOTN(vp);
2274 	int error;
2275 
2276 	ASSERT(vp->v_type == VREG);
2277 	ASSERT(lp->l_start >= 0);
2278 
2279 	if (lp->l_len != 0)
2280 		return (EINVAL);
2281 
2282 	rw_enter(&tp->tn_rwlock, RW_WRITER);
2283 	if (tp->tn_size == lp->l_start) {
2284 		rw_exit(&tp->tn_rwlock);
2285 		return (0);
2286 	}
2287 
2288 	/*
2289 	 * Check for any mandatory locks on the range
2290 	 */
2291 	if (MANDLOCK(vp, tp->tn_mode)) {
2292 		long save_start;
2293 
2294 		save_start = lp->l_start;
2295 
2296 		if (tp->tn_size < lp->l_start) {
2297 			/*
2298 			 * "Truncate up" case: need to make sure there
2299 			 * is no lock beyond current end-of-file. To
2300 			 * do so, we need to set l_start to the size
2301 			 * of the file temporarily.
2302 			 */
2303 			lp->l_start = tp->tn_size;
2304 		}
2305 		lp->l_type = F_WRLCK;
2306 		lp->l_sysid = 0;
2307 		lp->l_pid = ttoproc(curthread)->p_pid;
2308 		i = (flag & (FNDELAY|FNONBLOCK)) ? 0 : SLPFLCK;
2309 		if ((i = reclock(vp, lp, i, 0, lp->l_start, NULL)) != 0 ||
2310 		    lp->l_type != F_UNLCK) {
2311 			rw_exit(&tp->tn_rwlock);
2312 			return (i ? i : EAGAIN);
2313 		}
2314 
2315 		lp->l_start = save_start;
2316 	}
2317 	VFSTOTM(vp->v_vfsp);
2318 
2319 	rw_enter(&tp->tn_contents, RW_WRITER);
2320 	error = tmpnode_trunc((struct tmount *)VFSTOTM(vp->v_vfsp),
2321 	    tp, (ulong_t)lp->l_start);
2322 	rw_exit(&tp->tn_contents);
2323 	rw_exit(&tp->tn_rwlock);
2324 	return (error);
2325 }
2326 
2327 /* ARGSUSED */
2328 static int
2329 tmp_space(
2330 	struct vnode *vp,
2331 	int cmd,
2332 	struct flock64 *bfp,
2333 	int flag,
2334 	offset_t offset,
2335 	cred_t *cred,
2336 	caller_context_t *ct)
2337 {
2338 	int error;
2339 
2340 	if (cmd != F_FREESP)
2341 		return (EINVAL);
2342 	if ((error = convoff(vp, bfp, 0, (offset_t)offset)) == 0) {
2343 		if ((bfp->l_start > MAXOFF_T) || (bfp->l_len > MAXOFF_T))
2344 			return (EFBIG);
2345 		error = tmp_freesp(vp, bfp, flag);
2346 
2347 		if (error == 0 && bfp->l_start == 0)
2348 			vnevent_truncate(vp, ct);
2349 	}
2350 	return (error);
2351 }
2352 
2353 /* ARGSUSED */
2354 static int
2355 tmp_seek(
2356 	struct vnode *vp,
2357 	offset_t ooff,
2358 	offset_t *noffp,
2359 	caller_context_t *ct)
2360 {
2361 	return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
2362 }
2363 
2364 /* ARGSUSED2 */
2365 static int
2366 tmp_rwlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
2367 {
2368 	struct tmpnode *tp = VTOTN(vp);
2369 
2370 	if (write_lock) {
2371 		rw_enter(&tp->tn_rwlock, RW_WRITER);
2372 	} else {
2373 		rw_enter(&tp->tn_rwlock, RW_READER);
2374 	}
2375 	return (write_lock);
2376 }
2377 
2378 /* ARGSUSED1 */
2379 static void
2380 tmp_rwunlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
2381 {
2382 	struct tmpnode *tp = VTOTN(vp);
2383 
2384 	rw_exit(&tp->tn_rwlock);
2385 }
2386 
2387 static int
2388 tmp_pathconf(
2389 	struct vnode *vp,
2390 	int cmd,
2391 	ulong_t *valp,
2392 	cred_t *cr,
2393 	caller_context_t *ct)
2394 {
2395 	struct tmpnode *tp = NULL;
2396 	int error;
2397 
2398 	switch (cmd) {
2399 	case _PC_XATTR_EXISTS:
2400 		if (vp->v_vfsp->vfs_flag & VFS_XATTR) {
2401 			*valp = 0;	/* assume no attributes */
2402 			error = 0;	/* okay to ask */
2403 			tp = VTOTN(vp);
2404 			rw_enter(&tp->tn_rwlock, RW_READER);
2405 			if (tp->tn_xattrdp) {
2406 				rw_enter(&tp->tn_xattrdp->tn_rwlock, RW_READER);
2407 				/* do not count "." and ".." */
2408 				if (tp->tn_xattrdp->tn_dirents > 2)
2409 					*valp = 1;
2410 				rw_exit(&tp->tn_xattrdp->tn_rwlock);
2411 			}
2412 			rw_exit(&tp->tn_rwlock);
2413 		} else {
2414 			error = EINVAL;
2415 		}
2416 		break;
2417 	case _PC_SATTR_ENABLED:
2418 	case _PC_SATTR_EXISTS:
2419 		*valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
2420 		    (vp->v_type == VREG || vp->v_type == VDIR);
2421 		error = 0;
2422 		break;
2423 	case _PC_TIMESTAMP_RESOLUTION:
2424 		/* nanosecond timestamp resolution */
2425 		*valp = 1L;
2426 		error = 0;
2427 		break;
2428 	default:
2429 		error = fs_pathconf(vp, cmd, valp, cr, ct);
2430 	}
2431 	return (error);
2432 }
2433 
2434 
2435 struct vnodeops *tmp_vnodeops;
2436 
2437 const fs_operation_def_t tmp_vnodeops_template[] = {
2438 	VOPNAME_OPEN,		{ .vop_open = tmp_open },
2439 	VOPNAME_CLOSE,		{ .vop_close = tmp_close },
2440 	VOPNAME_READ,		{ .vop_read = tmp_read },
2441 	VOPNAME_WRITE,		{ .vop_write = tmp_write },
2442 	VOPNAME_IOCTL,		{ .vop_ioctl = tmp_ioctl },
2443 	VOPNAME_GETATTR,	{ .vop_getattr = tmp_getattr },
2444 	VOPNAME_SETATTR,	{ .vop_setattr = tmp_setattr },
2445 	VOPNAME_ACCESS,		{ .vop_access = tmp_access },
2446 	VOPNAME_LOOKUP,		{ .vop_lookup = tmp_lookup },
2447 	VOPNAME_CREATE,		{ .vop_create = tmp_create },
2448 	VOPNAME_REMOVE,		{ .vop_remove = tmp_remove },
2449 	VOPNAME_LINK,		{ .vop_link = tmp_link },
2450 	VOPNAME_RENAME,		{ .vop_rename = tmp_rename },
2451 	VOPNAME_MKDIR,		{ .vop_mkdir = tmp_mkdir },
2452 	VOPNAME_RMDIR,		{ .vop_rmdir = tmp_rmdir },
2453 	VOPNAME_READDIR,	{ .vop_readdir = tmp_readdir },
2454 	VOPNAME_SYMLINK,	{ .vop_symlink = tmp_symlink },
2455 	VOPNAME_READLINK,	{ .vop_readlink = tmp_readlink },
2456 	VOPNAME_FSYNC,		{ .vop_fsync = tmp_fsync },
2457 	VOPNAME_INACTIVE,	{ .vop_inactive = tmp_inactive },
2458 	VOPNAME_FID,		{ .vop_fid = tmp_fid },
2459 	VOPNAME_RWLOCK,		{ .vop_rwlock = tmp_rwlock },
2460 	VOPNAME_RWUNLOCK,	{ .vop_rwunlock = tmp_rwunlock },
2461 	VOPNAME_SEEK,		{ .vop_seek = tmp_seek },
2462 	VOPNAME_SPACE,		{ .vop_space = tmp_space },
2463 	VOPNAME_GETPAGE,	{ .vop_getpage = tmp_getpage },
2464 	VOPNAME_PUTPAGE,	{ .vop_putpage = tmp_putpage },
2465 	VOPNAME_MAP,		{ .vop_map = tmp_map },
2466 	VOPNAME_ADDMAP,		{ .vop_addmap = tmp_addmap },
2467 	VOPNAME_DELMAP,		{ .vop_delmap = tmp_delmap },
2468 	VOPNAME_PATHCONF,	{ .vop_pathconf = tmp_pathconf },
2469 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
2470 	NULL,			NULL
2471 };
2472