17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5a5652762Spraks * Common Development and Distribution License (the "License").
6a5652762Spraks * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
213b862e9aSRoger A. Faulkner
227c478bd9Sstevel@tonic-gate /*
233b862e9aSRoger A. Faulkner * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
247c478bd9Sstevel@tonic-gate * Use is subject to license terms.
257c478bd9Sstevel@tonic-gate */
267c478bd9Sstevel@tonic-gate
2772102e74SBryan Cantrill /*
2872102e74SBryan Cantrill * Copyright (c) 2012, Joyent, Inc. All rights reserved.
29*06e6833aSJosef 'Jeff' Sipek * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
3072102e74SBryan Cantrill */
3172102e74SBryan Cantrill
327c478bd9Sstevel@tonic-gate #include <sys/types.h>
337c478bd9Sstevel@tonic-gate #include <sys/param.h>
347c478bd9Sstevel@tonic-gate #include <sys/t_lock.h>
357c478bd9Sstevel@tonic-gate #include <sys/systm.h>
367c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
377c478bd9Sstevel@tonic-gate #include <sys/user.h>
387c478bd9Sstevel@tonic-gate #include <sys/time.h>
397c478bd9Sstevel@tonic-gate #include <sys/vfs.h>
40aa59c4cbSrsb #include <sys/vfs_opreg.h>
417c478bd9Sstevel@tonic-gate #include <sys/vnode.h>
427c478bd9Sstevel@tonic-gate #include <sys/file.h>
437c478bd9Sstevel@tonic-gate #include <sys/fcntl.h>
447c478bd9Sstevel@tonic-gate #include <sys/flock.h>
457c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
467c478bd9Sstevel@tonic-gate #include <sys/uio.h>
477c478bd9Sstevel@tonic-gate #include <sys/errno.h>
487c478bd9Sstevel@tonic-gate #include <sys/stat.h>
497c478bd9Sstevel@tonic-gate #include <sys/cred.h>
507c478bd9Sstevel@tonic-gate #include <sys/dirent.h>
517c478bd9Sstevel@tonic-gate #include <sys/pathname.h>
527c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h>
537c478bd9Sstevel@tonic-gate #include <sys/fs/tmp.h>
547c478bd9Sstevel@tonic-gate #include <sys/fs/tmpnode.h>
557c478bd9Sstevel@tonic-gate #include <sys/mman.h>
567c478bd9Sstevel@tonic-gate #include <vm/hat.h>
577c478bd9Sstevel@tonic-gate #include <vm/seg_vn.h>
587c478bd9Sstevel@tonic-gate #include <vm/seg_map.h>
597c478bd9Sstevel@tonic-gate #include <vm/seg.h>
607c478bd9Sstevel@tonic-gate #include <vm/anon.h>
617c478bd9Sstevel@tonic-gate #include <vm/as.h>
627c478bd9Sstevel@tonic-gate #include <vm/page.h>
637c478bd9Sstevel@tonic-gate #include <vm/pvn.h>
647c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
657c478bd9Sstevel@tonic-gate #include <sys/debug.h>
667c478bd9Sstevel@tonic-gate #include <sys/swap.h>
677c478bd9Sstevel@tonic-gate #include <sys/buf.h>
687c478bd9Sstevel@tonic-gate #include <sys/vm.h>
697c478bd9Sstevel@tonic-gate #include <sys/vtrace.h>
707c478bd9Sstevel@tonic-gate #include <sys/policy.h>
717c478bd9Sstevel@tonic-gate #include <fs/fs_subr.h>
727c478bd9Sstevel@tonic-gate
737c478bd9Sstevel@tonic-gate static int tmp_getapage(struct vnode *, u_offset_t, size_t, uint_t *,
747c478bd9Sstevel@tonic-gate page_t **, size_t, struct seg *, caddr_t, enum seg_rw, struct cred *);
757c478bd9Sstevel@tonic-gate static int tmp_putapage(struct vnode *, page_t *, u_offset_t *, size_t *,
767c478bd9Sstevel@tonic-gate int, struct cred *);
777c478bd9Sstevel@tonic-gate
787c478bd9Sstevel@tonic-gate /* ARGSUSED1 */
797c478bd9Sstevel@tonic-gate static int
tmp_open(struct vnode ** vpp,int flag,struct cred * cred,caller_context_t * ct)80da6c28aaSamw tmp_open(struct vnode **vpp, int flag, struct cred *cred, caller_context_t *ct)
817c478bd9Sstevel@tonic-gate {
827c478bd9Sstevel@tonic-gate /*
837c478bd9Sstevel@tonic-gate * swapon to a tmpfs file is not supported so access
847c478bd9Sstevel@tonic-gate * is denied on open if VISSWAP is set.
857c478bd9Sstevel@tonic-gate */
867c478bd9Sstevel@tonic-gate if ((*vpp)->v_flag & VISSWAP)
877c478bd9Sstevel@tonic-gate return (EINVAL);
887c478bd9Sstevel@tonic-gate return (0);
897c478bd9Sstevel@tonic-gate }
907c478bd9Sstevel@tonic-gate
917c478bd9Sstevel@tonic-gate /* ARGSUSED1 */
927c478bd9Sstevel@tonic-gate static int
tmp_close(struct vnode * vp,int flag,int count,offset_t offset,struct cred * cred,caller_context_t * ct)93da6c28aaSamw tmp_close(
94da6c28aaSamw struct vnode *vp,
95da6c28aaSamw int flag,
96da6c28aaSamw int count,
97da6c28aaSamw offset_t offset,
98da6c28aaSamw struct cred *cred,
99da6c28aaSamw caller_context_t *ct)
1007c478bd9Sstevel@tonic-gate {
1017c478bd9Sstevel@tonic-gate cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
1027c478bd9Sstevel@tonic-gate cleanshares(vp, ttoproc(curthread)->p_pid);
1037c478bd9Sstevel@tonic-gate return (0);
1047c478bd9Sstevel@tonic-gate }
1057c478bd9Sstevel@tonic-gate
1067c478bd9Sstevel@tonic-gate /*
1077c478bd9Sstevel@tonic-gate * wrtmp does the real work of write requests for tmpfs.
1087c478bd9Sstevel@tonic-gate */
1097c478bd9Sstevel@tonic-gate static int
wrtmp(struct tmount * tm,struct tmpnode * tp,struct uio * uio,struct cred * cr,struct caller_context * ct)1107c478bd9Sstevel@tonic-gate wrtmp(
1117c478bd9Sstevel@tonic-gate struct tmount *tm,
1127c478bd9Sstevel@tonic-gate struct tmpnode *tp,
1137c478bd9Sstevel@tonic-gate struct uio *uio,
1147c478bd9Sstevel@tonic-gate struct cred *cr,
1157c478bd9Sstevel@tonic-gate struct caller_context *ct)
1167c478bd9Sstevel@tonic-gate {
1177c478bd9Sstevel@tonic-gate pgcnt_t pageoffset; /* offset in pages */
1187c478bd9Sstevel@tonic-gate ulong_t segmap_offset; /* pagesize byte offset into segmap */
1197c478bd9Sstevel@tonic-gate caddr_t base; /* base of segmap */
1207c478bd9Sstevel@tonic-gate ssize_t bytes; /* bytes to uiomove */
1217c478bd9Sstevel@tonic-gate pfn_t pagenumber; /* offset in pages into tmp file */
1227c478bd9Sstevel@tonic-gate struct vnode *vp;
1237c478bd9Sstevel@tonic-gate int error = 0;
1247c478bd9Sstevel@tonic-gate int pagecreate; /* == 1 if we allocated a page */
1257c478bd9Sstevel@tonic-gate int newpage;
1267c478bd9Sstevel@tonic-gate rlim64_t limit = uio->uio_llimit;
1277c478bd9Sstevel@tonic-gate long oresid = uio->uio_resid;
1287c478bd9Sstevel@tonic-gate timestruc_t now;
1297c478bd9Sstevel@tonic-gate
1307c478bd9Sstevel@tonic-gate long tn_size_changed = 0;
1317c478bd9Sstevel@tonic-gate long old_tn_size;
132c6f08383Sjj204856 long new_tn_size;
1337c478bd9Sstevel@tonic-gate
1347c478bd9Sstevel@tonic-gate vp = TNTOV(tp);
1357c478bd9Sstevel@tonic-gate ASSERT(vp->v_type == VREG);
1367c478bd9Sstevel@tonic-gate
1377c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_TMPFS, TR_TMPFS_RWTMP_START,
1387c478bd9Sstevel@tonic-gate "tmp_wrtmp_start:vp %p", vp);
1397c478bd9Sstevel@tonic-gate
1407c478bd9Sstevel@tonic-gate ASSERT(RW_WRITE_HELD(&tp->tn_contents));
1417c478bd9Sstevel@tonic-gate ASSERT(RW_WRITE_HELD(&tp->tn_rwlock));
1427c478bd9Sstevel@tonic-gate
1437c478bd9Sstevel@tonic-gate if (MANDLOCK(vp, tp->tn_mode)) {
1447c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_contents);
1457c478bd9Sstevel@tonic-gate /*
1467c478bd9Sstevel@tonic-gate * tmp_getattr ends up being called by chklock
1477c478bd9Sstevel@tonic-gate */
148c6f08383Sjj204856 error = chklock(vp, FWRITE, uio->uio_loffset, uio->uio_resid,
149c6f08383Sjj204856 uio->uio_fmode, ct);
1507c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_contents, RW_WRITER);
1517c478bd9Sstevel@tonic-gate if (error != 0) {
1527c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
1537c478bd9Sstevel@tonic-gate "tmp_wrtmp_end:vp %p error %d", vp, error);
1547c478bd9Sstevel@tonic-gate return (error);
1557c478bd9Sstevel@tonic-gate }
1567c478bd9Sstevel@tonic-gate }
1577c478bd9Sstevel@tonic-gate
1587c478bd9Sstevel@tonic-gate if (uio->uio_loffset < 0)
1597c478bd9Sstevel@tonic-gate return (EINVAL);
1607c478bd9Sstevel@tonic-gate
1617c478bd9Sstevel@tonic-gate if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
1627c478bd9Sstevel@tonic-gate limit = MAXOFFSET_T;
1637c478bd9Sstevel@tonic-gate
1647c478bd9Sstevel@tonic-gate if (uio->uio_loffset >= limit) {
1657c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(curthread);
1667c478bd9Sstevel@tonic-gate
1677c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock);
1687c478bd9Sstevel@tonic-gate (void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE], p->p_rctls,
1697c478bd9Sstevel@tonic-gate p, RCA_UNSAFE_SIGINFO);
1707c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock);
1717c478bd9Sstevel@tonic-gate return (EFBIG);
1727c478bd9Sstevel@tonic-gate }
1737c478bd9Sstevel@tonic-gate
1747c478bd9Sstevel@tonic-gate if (uio->uio_loffset >= MAXOFF_T) {
1757c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
1767c478bd9Sstevel@tonic-gate "tmp_wrtmp_end:vp %p error %d", vp, EINVAL);
1777c478bd9Sstevel@tonic-gate return (EFBIG);
1787c478bd9Sstevel@tonic-gate }
1797c478bd9Sstevel@tonic-gate
1807c478bd9Sstevel@tonic-gate if (uio->uio_resid == 0) {
1817c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
1827c478bd9Sstevel@tonic-gate "tmp_wrtmp_end:vp %p error %d", vp, 0);
1837c478bd9Sstevel@tonic-gate return (0);
1847c478bd9Sstevel@tonic-gate }
1857c478bd9Sstevel@tonic-gate
1867c478bd9Sstevel@tonic-gate if (limit > MAXOFF_T)
1877c478bd9Sstevel@tonic-gate limit = MAXOFF_T;
1887c478bd9Sstevel@tonic-gate
1897c478bd9Sstevel@tonic-gate do {
1907c478bd9Sstevel@tonic-gate long offset;
1917c478bd9Sstevel@tonic-gate long delta;
1927c478bd9Sstevel@tonic-gate
1937c478bd9Sstevel@tonic-gate offset = (long)uio->uio_offset;
1947c478bd9Sstevel@tonic-gate pageoffset = offset & PAGEOFFSET;
1957c478bd9Sstevel@tonic-gate /*
1967c478bd9Sstevel@tonic-gate * A maximum of PAGESIZE bytes of data is transferred
1977c478bd9Sstevel@tonic-gate * each pass through this loop
1987c478bd9Sstevel@tonic-gate */
1997c478bd9Sstevel@tonic-gate bytes = MIN(PAGESIZE - pageoffset, uio->uio_resid);
2007c478bd9Sstevel@tonic-gate
2017c478bd9Sstevel@tonic-gate if (offset + bytes >= limit) {
2027c478bd9Sstevel@tonic-gate if (offset >= limit) {
2037c478bd9Sstevel@tonic-gate error = EFBIG;
2047c478bd9Sstevel@tonic-gate goto out;
2057c478bd9Sstevel@tonic-gate }
2067c478bd9Sstevel@tonic-gate bytes = limit - offset;
2077c478bd9Sstevel@tonic-gate }
2087c478bd9Sstevel@tonic-gate pagenumber = btop(offset);
2097c478bd9Sstevel@tonic-gate
2107c478bd9Sstevel@tonic-gate /*
2117c478bd9Sstevel@tonic-gate * delta is the amount of anonymous memory
2127c478bd9Sstevel@tonic-gate * to reserve for the file.
2137c478bd9Sstevel@tonic-gate * We always reserve in pagesize increments so
2147c478bd9Sstevel@tonic-gate * unless we're extending the file into a new page,
2157c478bd9Sstevel@tonic-gate * we don't need to call tmp_resv.
2167c478bd9Sstevel@tonic-gate */
2177c478bd9Sstevel@tonic-gate delta = offset + bytes -
2187c478bd9Sstevel@tonic-gate P2ROUNDUP_TYPED(tp->tn_size, PAGESIZE, u_offset_t);
2197c478bd9Sstevel@tonic-gate if (delta > 0) {
2207c478bd9Sstevel@tonic-gate pagecreate = 1;
2217c478bd9Sstevel@tonic-gate if (tmp_resv(tm, tp, delta, pagecreate)) {
2220209230bSgjelinek /*
2230209230bSgjelinek * Log file system full in the zone that owns
2240209230bSgjelinek * the tmpfs mount, as well as in the global
2250209230bSgjelinek * zone if necessary.
2260209230bSgjelinek */
2270209230bSgjelinek zcmn_err(tm->tm_vfsp->vfs_zone->zone_id,
2280209230bSgjelinek CE_WARN, "%s: File system full, "
2290209230bSgjelinek "swap space limit exceeded",
2307c478bd9Sstevel@tonic-gate tm->tm_mntpath);
2310209230bSgjelinek
2320209230bSgjelinek if (tm->tm_vfsp->vfs_zone->zone_id !=
2330209230bSgjelinek GLOBAL_ZONEID) {
2340209230bSgjelinek
2350209230bSgjelinek vfs_t *vfs = tm->tm_vfsp;
2360209230bSgjelinek
2370209230bSgjelinek zcmn_err(GLOBAL_ZONEID,
2380209230bSgjelinek CE_WARN, "%s: File system full, "
2390209230bSgjelinek "swap space limit exceeded",
2400209230bSgjelinek vfs->vfs_vnodecovered->v_path);
2410209230bSgjelinek }
2427c478bd9Sstevel@tonic-gate error = ENOSPC;
2437c478bd9Sstevel@tonic-gate break;
2447c478bd9Sstevel@tonic-gate }
2457c478bd9Sstevel@tonic-gate tmpnode_growmap(tp, (ulong_t)offset + bytes);
2467c478bd9Sstevel@tonic-gate }
2477c478bd9Sstevel@tonic-gate /* grow the file to the new length */
2487c478bd9Sstevel@tonic-gate if (offset + bytes > tp->tn_size) {
2497c478bd9Sstevel@tonic-gate tn_size_changed = 1;
2507c478bd9Sstevel@tonic-gate old_tn_size = tp->tn_size;
251c6f08383Sjj204856 /*
252c6f08383Sjj204856 * Postpone updating tp->tn_size until uiomove() is
253c6f08383Sjj204856 * done.
254c6f08383Sjj204856 */
255c6f08383Sjj204856 new_tn_size = offset + bytes;
2567c478bd9Sstevel@tonic-gate }
2577c478bd9Sstevel@tonic-gate if (bytes == PAGESIZE) {
2587c478bd9Sstevel@tonic-gate /*
2597c478bd9Sstevel@tonic-gate * Writing whole page so reading from disk
2607c478bd9Sstevel@tonic-gate * is a waste
2617c478bd9Sstevel@tonic-gate */
2627c478bd9Sstevel@tonic-gate pagecreate = 1;
2637c478bd9Sstevel@tonic-gate } else {
2647c478bd9Sstevel@tonic-gate pagecreate = 0;
2657c478bd9Sstevel@tonic-gate }
2667c478bd9Sstevel@tonic-gate /*
2677c478bd9Sstevel@tonic-gate * If writing past EOF or filling in a hole
2687c478bd9Sstevel@tonic-gate * we need to allocate an anon slot.
2697c478bd9Sstevel@tonic-gate */
2707c478bd9Sstevel@tonic-gate if (anon_get_ptr(tp->tn_anon, pagenumber) == NULL) {
2717c478bd9Sstevel@tonic-gate (void) anon_set_ptr(tp->tn_anon, pagenumber,
2727c478bd9Sstevel@tonic-gate anon_alloc(vp, ptob(pagenumber)), ANON_SLEEP);
2737c478bd9Sstevel@tonic-gate pagecreate = 1;
2747c478bd9Sstevel@tonic-gate tp->tn_nblocks++;
2757c478bd9Sstevel@tonic-gate }
2767c478bd9Sstevel@tonic-gate
2777c478bd9Sstevel@tonic-gate /*
278b5dcc7dfSpraks * We have to drop the contents lock to allow the VM
279da6c28aaSamw * system to reacquire it in tmp_getpage()
2807c478bd9Sstevel@tonic-gate */
2817c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_contents);
2827c478bd9Sstevel@tonic-gate
2836f5f1c63SDonghai Qiao /*
2846f5f1c63SDonghai Qiao * Touch the page and fault it in if it is not in core
2856f5f1c63SDonghai Qiao * before segmap_getmapflt or vpm_data_copy can lock it.
2866f5f1c63SDonghai Qiao * This is to avoid the deadlock if the buffer is mapped
2876f5f1c63SDonghai Qiao * to the same file through mmap which we want to write.
2886f5f1c63SDonghai Qiao */
2896f5f1c63SDonghai Qiao uio_prefaultpages((long)bytes, uio);
2906f5f1c63SDonghai Qiao
2917c478bd9Sstevel@tonic-gate newpage = 0;
292a5652762Spraks if (vpm_enable) {
293a5652762Spraks /*
294a5652762Spraks * Copy data. If new pages are created, part of
295a5652762Spraks * the page that is not written will be initizliazed
296a5652762Spraks * with zeros.
297a5652762Spraks */
298a5652762Spraks error = vpm_data_copy(vp, offset, bytes, uio,
299a5652762Spraks !pagecreate, &newpage, 1, S_WRITE);
300a5652762Spraks } else {
301a5652762Spraks /* Get offset within the segmap mapping */
302a5652762Spraks segmap_offset = (offset & PAGEMASK) & MAXBOFFSET;
303a5652762Spraks base = segmap_getmapflt(segkmap, vp,
304c6f08383Sjj204856 (offset & MAXBMASK), PAGESIZE, !pagecreate,
305c6f08383Sjj204856 S_WRITE);
306a5652762Spraks }
307a5652762Spraks
308a5652762Spraks
309a5652762Spraks if (!vpm_enable && pagecreate) {
3107c478bd9Sstevel@tonic-gate /*
3117c478bd9Sstevel@tonic-gate * segmap_pagecreate() returns 1 if it calls
3127c478bd9Sstevel@tonic-gate * page_create_va() to allocate any pages.
3137c478bd9Sstevel@tonic-gate */
3147c478bd9Sstevel@tonic-gate newpage = segmap_pagecreate(segkmap,
3157c478bd9Sstevel@tonic-gate base + segmap_offset, (size_t)PAGESIZE, 0);
3167c478bd9Sstevel@tonic-gate /*
3177c478bd9Sstevel@tonic-gate * Clear from the beginning of the page to the starting
3187c478bd9Sstevel@tonic-gate * offset of the data.
3197c478bd9Sstevel@tonic-gate */
3207c478bd9Sstevel@tonic-gate if (pageoffset != 0)
3217c478bd9Sstevel@tonic-gate (void) kzero(base + segmap_offset,
3227c478bd9Sstevel@tonic-gate (size_t)pageoffset);
3237c478bd9Sstevel@tonic-gate }
3247c478bd9Sstevel@tonic-gate
325a5652762Spraks if (!vpm_enable) {
3267c478bd9Sstevel@tonic-gate error = uiomove(base + segmap_offset + pageoffset,
3277c478bd9Sstevel@tonic-gate (long)bytes, UIO_WRITE, uio);
328a5652762Spraks }
3297c478bd9Sstevel@tonic-gate
330a5652762Spraks if (!vpm_enable && pagecreate &&
3317c478bd9Sstevel@tonic-gate uio->uio_offset < P2ROUNDUP(offset + bytes, PAGESIZE)) {
3327c478bd9Sstevel@tonic-gate long zoffset; /* zero from offset into page */
3337c478bd9Sstevel@tonic-gate /*
3347c478bd9Sstevel@tonic-gate * We created pages w/o initializing them completely,
3357c478bd9Sstevel@tonic-gate * thus we need to zero the part that wasn't set up.
3367c478bd9Sstevel@tonic-gate * This happens on most EOF write cases and if
3377c478bd9Sstevel@tonic-gate * we had some sort of error during the uiomove.
3387c478bd9Sstevel@tonic-gate */
3397c478bd9Sstevel@tonic-gate long nmoved;
3407c478bd9Sstevel@tonic-gate
3417c478bd9Sstevel@tonic-gate nmoved = uio->uio_offset - offset;
3427c478bd9Sstevel@tonic-gate ASSERT((nmoved + pageoffset) <= PAGESIZE);
3437c478bd9Sstevel@tonic-gate
3447c478bd9Sstevel@tonic-gate /*
3457c478bd9Sstevel@tonic-gate * Zero from the end of data in the page to the
3467c478bd9Sstevel@tonic-gate * end of the page.
3477c478bd9Sstevel@tonic-gate */
3487c478bd9Sstevel@tonic-gate if ((zoffset = pageoffset + nmoved) < PAGESIZE)
3497c478bd9Sstevel@tonic-gate (void) kzero(base + segmap_offset + zoffset,
3507c478bd9Sstevel@tonic-gate (size_t)PAGESIZE - zoffset);
3517c478bd9Sstevel@tonic-gate }
3527c478bd9Sstevel@tonic-gate
3537c478bd9Sstevel@tonic-gate /*
3547c478bd9Sstevel@tonic-gate * Unlock the pages which have been allocated by
3557c478bd9Sstevel@tonic-gate * page_create_va() in segmap_pagecreate()
3567c478bd9Sstevel@tonic-gate */
357a5652762Spraks if (!vpm_enable && newpage) {
3587c478bd9Sstevel@tonic-gate segmap_pageunlock(segkmap, base + segmap_offset,
3597c478bd9Sstevel@tonic-gate (size_t)PAGESIZE, S_WRITE);
360a5652762Spraks }
3617c478bd9Sstevel@tonic-gate
3627c478bd9Sstevel@tonic-gate if (error) {
3637c478bd9Sstevel@tonic-gate /*
3647c478bd9Sstevel@tonic-gate * If we failed on a write, we must
3657c478bd9Sstevel@tonic-gate * be sure to invalidate any pages that may have
3667c478bd9Sstevel@tonic-gate * been allocated.
3677c478bd9Sstevel@tonic-gate */
368a5652762Spraks if (vpm_enable) {
369c6f08383Sjj204856 (void) vpm_sync_pages(vp, offset, PAGESIZE,
370c6f08383Sjj204856 SM_INVAL);
371a5652762Spraks } else {
3727c478bd9Sstevel@tonic-gate (void) segmap_release(segkmap, base, SM_INVAL);
373a5652762Spraks }
374a5652762Spraks } else {
375a5652762Spraks if (vpm_enable) {
376c6f08383Sjj204856 error = vpm_sync_pages(vp, offset, PAGESIZE,
377c6f08383Sjj204856 0);
3787c478bd9Sstevel@tonic-gate } else {
3797c478bd9Sstevel@tonic-gate error = segmap_release(segkmap, base, 0);
3807c478bd9Sstevel@tonic-gate }
381a5652762Spraks }
3827c478bd9Sstevel@tonic-gate
3837c478bd9Sstevel@tonic-gate /*
3847c478bd9Sstevel@tonic-gate * Re-acquire contents lock.
3857c478bd9Sstevel@tonic-gate */
3867c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_contents, RW_WRITER);
387c6f08383Sjj204856
388c6f08383Sjj204856 /*
389c6f08383Sjj204856 * Update tn_size.
390c6f08383Sjj204856 */
391c6f08383Sjj204856 if (tn_size_changed)
392c6f08383Sjj204856 tp->tn_size = new_tn_size;
393c6f08383Sjj204856
3947c478bd9Sstevel@tonic-gate /*
3957c478bd9Sstevel@tonic-gate * If the uiomove failed, fix up tn_size.
3967c478bd9Sstevel@tonic-gate */
3977c478bd9Sstevel@tonic-gate if (error) {
3987c478bd9Sstevel@tonic-gate if (tn_size_changed) {
3997c478bd9Sstevel@tonic-gate /*
4007c478bd9Sstevel@tonic-gate * The uiomove failed, and we
4017c478bd9Sstevel@tonic-gate * allocated blocks,so get rid
4027c478bd9Sstevel@tonic-gate * of them.
4037c478bd9Sstevel@tonic-gate */
4047c478bd9Sstevel@tonic-gate (void) tmpnode_trunc(tm, tp,
4057c478bd9Sstevel@tonic-gate (ulong_t)old_tn_size);
4067c478bd9Sstevel@tonic-gate }
4077c478bd9Sstevel@tonic-gate } else {
4087c478bd9Sstevel@tonic-gate /*
4097c478bd9Sstevel@tonic-gate * XXX - Can this be out of the loop?
4107c478bd9Sstevel@tonic-gate */
4117c478bd9Sstevel@tonic-gate if ((tp->tn_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) &&
4127c478bd9Sstevel@tonic-gate (tp->tn_mode & (S_ISUID | S_ISGID)) &&
4137c478bd9Sstevel@tonic-gate secpolicy_vnode_setid_retain(cr,
4147c478bd9Sstevel@tonic-gate (tp->tn_mode & S_ISUID) != 0 && tp->tn_uid == 0)) {
4157c478bd9Sstevel@tonic-gate /*
4167c478bd9Sstevel@tonic-gate * Clear Set-UID & Set-GID bits on
4177c478bd9Sstevel@tonic-gate * successful write if not privileged
4187c478bd9Sstevel@tonic-gate * and at least one of the execute bits
4197c478bd9Sstevel@tonic-gate * is set. If we always clear Set-GID,
4207c478bd9Sstevel@tonic-gate * mandatory file and record locking is
4217c478bd9Sstevel@tonic-gate * unuseable.
4227c478bd9Sstevel@tonic-gate */
4237c478bd9Sstevel@tonic-gate tp->tn_mode &= ~(S_ISUID | S_ISGID);
4247c478bd9Sstevel@tonic-gate }
4257c478bd9Sstevel@tonic-gate gethrestime(&now);
4267c478bd9Sstevel@tonic-gate tp->tn_mtime = now;
4277c478bd9Sstevel@tonic-gate tp->tn_ctime = now;
4287c478bd9Sstevel@tonic-gate }
4297c478bd9Sstevel@tonic-gate } while (error == 0 && uio->uio_resid > 0 && bytes != 0);
4307c478bd9Sstevel@tonic-gate
4317c478bd9Sstevel@tonic-gate out:
4327c478bd9Sstevel@tonic-gate /*
4337c478bd9Sstevel@tonic-gate * If we've already done a partial-write, terminate
4347c478bd9Sstevel@tonic-gate * the write but return no error.
4357c478bd9Sstevel@tonic-gate */
4367c478bd9Sstevel@tonic-gate if (oresid != uio->uio_resid)
4377c478bd9Sstevel@tonic-gate error = 0;
4387c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
4397c478bd9Sstevel@tonic-gate "tmp_wrtmp_end:vp %p error %d", vp, error);
4407c478bd9Sstevel@tonic-gate return (error);
4417c478bd9Sstevel@tonic-gate }
4427c478bd9Sstevel@tonic-gate
4437c478bd9Sstevel@tonic-gate /*
4447c478bd9Sstevel@tonic-gate * rdtmp does the real work of read requests for tmpfs.
4457c478bd9Sstevel@tonic-gate */
4467c478bd9Sstevel@tonic-gate static int
rdtmp(struct tmount * tm,struct tmpnode * tp,struct uio * uio,struct caller_context * ct)4477c478bd9Sstevel@tonic-gate rdtmp(
4487c478bd9Sstevel@tonic-gate struct tmount *tm,
4497c478bd9Sstevel@tonic-gate struct tmpnode *tp,
4507c478bd9Sstevel@tonic-gate struct uio *uio,
4517c478bd9Sstevel@tonic-gate struct caller_context *ct)
4527c478bd9Sstevel@tonic-gate {
4537c478bd9Sstevel@tonic-gate ulong_t pageoffset; /* offset in tmpfs file (uio_offset) */
4547c478bd9Sstevel@tonic-gate ulong_t segmap_offset; /* pagesize byte offset into segmap */
4557c478bd9Sstevel@tonic-gate caddr_t base; /* base of segmap */
4567c478bd9Sstevel@tonic-gate ssize_t bytes; /* bytes to uiomove */
4577c478bd9Sstevel@tonic-gate struct vnode *vp;
4587c478bd9Sstevel@tonic-gate int error;
4597c478bd9Sstevel@tonic-gate long oresid = uio->uio_resid;
4607c478bd9Sstevel@tonic-gate
4617c478bd9Sstevel@tonic-gate #if defined(lint)
4627c478bd9Sstevel@tonic-gate tm = tm;
4637c478bd9Sstevel@tonic-gate #endif
4647c478bd9Sstevel@tonic-gate vp = TNTOV(tp);
4657c478bd9Sstevel@tonic-gate
466c6f08383Sjj204856 TRACE_1(TR_FAC_TMPFS, TR_TMPFS_RWTMP_START, "tmp_rdtmp_start:vp %p",
467c6f08383Sjj204856 vp);
4687c478bd9Sstevel@tonic-gate
4697c478bd9Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&tp->tn_contents));
4707c478bd9Sstevel@tonic-gate
4717c478bd9Sstevel@tonic-gate if (MANDLOCK(vp, tp->tn_mode)) {
4727c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_contents);
4737c478bd9Sstevel@tonic-gate /*
4747c478bd9Sstevel@tonic-gate * tmp_getattr ends up being called by chklock
4757c478bd9Sstevel@tonic-gate */
476c6f08383Sjj204856 error = chklock(vp, FREAD, uio->uio_loffset, uio->uio_resid,
477c6f08383Sjj204856 uio->uio_fmode, ct);
4787c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_contents, RW_READER);
4797c478bd9Sstevel@tonic-gate if (error != 0) {
4807c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
4817c478bd9Sstevel@tonic-gate "tmp_rdtmp_end:vp %p error %d", vp, error);
4827c478bd9Sstevel@tonic-gate return (error);
4837c478bd9Sstevel@tonic-gate }
4847c478bd9Sstevel@tonic-gate }
4857c478bd9Sstevel@tonic-gate ASSERT(tp->tn_type == VREG);
4867c478bd9Sstevel@tonic-gate
4877c478bd9Sstevel@tonic-gate if (uio->uio_loffset >= MAXOFF_T) {
4887c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
4897c478bd9Sstevel@tonic-gate "tmp_rdtmp_end:vp %p error %d", vp, EINVAL);
4907c478bd9Sstevel@tonic-gate return (0);
4917c478bd9Sstevel@tonic-gate }
4927c478bd9Sstevel@tonic-gate if (uio->uio_loffset < 0)
4937c478bd9Sstevel@tonic-gate return (EINVAL);
4947c478bd9Sstevel@tonic-gate if (uio->uio_resid == 0) {
4957c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
4967c478bd9Sstevel@tonic-gate "tmp_rdtmp_end:vp %p error %d", vp, 0);
4977c478bd9Sstevel@tonic-gate return (0);
4987c478bd9Sstevel@tonic-gate }
4997c478bd9Sstevel@tonic-gate
5007c478bd9Sstevel@tonic-gate vp = TNTOV(tp);
5017c478bd9Sstevel@tonic-gate
5027c478bd9Sstevel@tonic-gate do {
5037c478bd9Sstevel@tonic-gate long diff;
5047c478bd9Sstevel@tonic-gate long offset;
5057c478bd9Sstevel@tonic-gate
5067c478bd9Sstevel@tonic-gate offset = uio->uio_offset;
5077c478bd9Sstevel@tonic-gate pageoffset = offset & PAGEOFFSET;
5087c478bd9Sstevel@tonic-gate bytes = MIN(PAGESIZE - pageoffset, uio->uio_resid);
5097c478bd9Sstevel@tonic-gate
5107c478bd9Sstevel@tonic-gate diff = tp->tn_size - offset;
5117c478bd9Sstevel@tonic-gate
5127c478bd9Sstevel@tonic-gate if (diff <= 0) {
5137c478bd9Sstevel@tonic-gate error = 0;
5147c478bd9Sstevel@tonic-gate goto out;
5157c478bd9Sstevel@tonic-gate }
5167c478bd9Sstevel@tonic-gate if (diff < bytes)
5177c478bd9Sstevel@tonic-gate bytes = diff;
5187c478bd9Sstevel@tonic-gate
5197c478bd9Sstevel@tonic-gate /*
520c6f08383Sjj204856 * We have to drop the contents lock to allow the VM system
521c6f08383Sjj204856 * to reacquire it in tmp_getpage() should the uiomove cause a
522c6f08383Sjj204856 * pagefault.
5237c478bd9Sstevel@tonic-gate */
5247c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_contents);
5257c478bd9Sstevel@tonic-gate
526a5652762Spraks if (vpm_enable) {
527a5652762Spraks /*
528a5652762Spraks * Copy data.
529a5652762Spraks */
530c6f08383Sjj204856 error = vpm_data_copy(vp, offset, bytes, uio, 1, NULL,
531c6f08383Sjj204856 0, S_READ);
532a5652762Spraks } else {
5337c478bd9Sstevel@tonic-gate segmap_offset = (offset & PAGEMASK) & MAXBOFFSET;
5347c478bd9Sstevel@tonic-gate base = segmap_getmapflt(segkmap, vp, offset & MAXBMASK,
5357c478bd9Sstevel@tonic-gate bytes, 1, S_READ);
5367c478bd9Sstevel@tonic-gate
5377c478bd9Sstevel@tonic-gate error = uiomove(base + segmap_offset + pageoffset,
5387c478bd9Sstevel@tonic-gate (long)bytes, UIO_READ, uio);
539a5652762Spraks }
5407c478bd9Sstevel@tonic-gate
541a5652762Spraks if (error) {
542a5652762Spraks if (vpm_enable) {
543c6f08383Sjj204856 (void) vpm_sync_pages(vp, offset, PAGESIZE, 0);
544a5652762Spraks } else {
5457c478bd9Sstevel@tonic-gate (void) segmap_release(segkmap, base, 0);
546a5652762Spraks }
547a5652762Spraks } else {
548a5652762Spraks if (vpm_enable) {
549c6f08383Sjj204856 error = vpm_sync_pages(vp, offset, PAGESIZE,
550c6f08383Sjj204856 0);
551a5652762Spraks } else {
5527c478bd9Sstevel@tonic-gate error = segmap_release(segkmap, base, 0);
553a5652762Spraks }
554a5652762Spraks }
5557c478bd9Sstevel@tonic-gate
5567c478bd9Sstevel@tonic-gate /*
5577c478bd9Sstevel@tonic-gate * Re-acquire contents lock.
5587c478bd9Sstevel@tonic-gate */
5597c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_contents, RW_READER);
5607c478bd9Sstevel@tonic-gate
5617c478bd9Sstevel@tonic-gate } while (error == 0 && uio->uio_resid > 0);
5627c478bd9Sstevel@tonic-gate
5637c478bd9Sstevel@tonic-gate out:
5647c478bd9Sstevel@tonic-gate gethrestime(&tp->tn_atime);
5657c478bd9Sstevel@tonic-gate
5667c478bd9Sstevel@tonic-gate /*
5677c478bd9Sstevel@tonic-gate * If we've already done a partial read, terminate
5687c478bd9Sstevel@tonic-gate * the read but return no error.
5697c478bd9Sstevel@tonic-gate */
5707c478bd9Sstevel@tonic-gate if (oresid != uio->uio_resid)
5717c478bd9Sstevel@tonic-gate error = 0;
5727c478bd9Sstevel@tonic-gate
5737c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
5747c478bd9Sstevel@tonic-gate "tmp_rdtmp_end:vp %x error %d", vp, error);
5757c478bd9Sstevel@tonic-gate return (error);
5767c478bd9Sstevel@tonic-gate }
5777c478bd9Sstevel@tonic-gate
5787c478bd9Sstevel@tonic-gate /* ARGSUSED2 */
5797c478bd9Sstevel@tonic-gate static int
tmp_read(struct vnode * vp,struct uio * uiop,int ioflag,cred_t * cred,struct caller_context * ct)5807c478bd9Sstevel@tonic-gate tmp_read(struct vnode *vp, struct uio *uiop, int ioflag, cred_t *cred,
5817c478bd9Sstevel@tonic-gate struct caller_context *ct)
5827c478bd9Sstevel@tonic-gate {
5837c478bd9Sstevel@tonic-gate struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
5847c478bd9Sstevel@tonic-gate struct tmount *tm = (struct tmount *)VTOTM(vp);
5857c478bd9Sstevel@tonic-gate int error;
5867c478bd9Sstevel@tonic-gate
5877c478bd9Sstevel@tonic-gate /*
5887c478bd9Sstevel@tonic-gate * We don't currently support reading non-regular files
5897c478bd9Sstevel@tonic-gate */
5907c478bd9Sstevel@tonic-gate if (vp->v_type == VDIR)
5917c478bd9Sstevel@tonic-gate return (EISDIR);
5927c478bd9Sstevel@tonic-gate if (vp->v_type != VREG)
5937c478bd9Sstevel@tonic-gate return (EINVAL);
5947c478bd9Sstevel@tonic-gate /*
5957c478bd9Sstevel@tonic-gate * tmp_rwlock should have already been called from layers above
5967c478bd9Sstevel@tonic-gate */
5977c478bd9Sstevel@tonic-gate ASSERT(RW_READ_HELD(&tp->tn_rwlock));
5987c478bd9Sstevel@tonic-gate
5997c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_contents, RW_READER);
6007c478bd9Sstevel@tonic-gate
6017c478bd9Sstevel@tonic-gate error = rdtmp(tm, tp, uiop, ct);
6027c478bd9Sstevel@tonic-gate
6037c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_contents);
6047c478bd9Sstevel@tonic-gate
6057c478bd9Sstevel@tonic-gate return (error);
6067c478bd9Sstevel@tonic-gate }
6077c478bd9Sstevel@tonic-gate
6087c478bd9Sstevel@tonic-gate static int
tmp_write(struct vnode * vp,struct uio * uiop,int ioflag,struct cred * cred,struct caller_context * ct)6097c478bd9Sstevel@tonic-gate tmp_write(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cred,
6107c478bd9Sstevel@tonic-gate struct caller_context *ct)
6117c478bd9Sstevel@tonic-gate {
6127c478bd9Sstevel@tonic-gate struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
6137c478bd9Sstevel@tonic-gate struct tmount *tm = (struct tmount *)VTOTM(vp);
6147c478bd9Sstevel@tonic-gate int error;
6157c478bd9Sstevel@tonic-gate
6167c478bd9Sstevel@tonic-gate /*
6177c478bd9Sstevel@tonic-gate * We don't currently support writing to non-regular files
6187c478bd9Sstevel@tonic-gate */
6197c478bd9Sstevel@tonic-gate if (vp->v_type != VREG)
6207c478bd9Sstevel@tonic-gate return (EINVAL); /* XXX EISDIR? */
6217c478bd9Sstevel@tonic-gate
6227c478bd9Sstevel@tonic-gate /*
6237c478bd9Sstevel@tonic-gate * tmp_rwlock should have already been called from layers above
6247c478bd9Sstevel@tonic-gate */
6257c478bd9Sstevel@tonic-gate ASSERT(RW_WRITE_HELD(&tp->tn_rwlock));
6267c478bd9Sstevel@tonic-gate
6277c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_contents, RW_WRITER);
6287c478bd9Sstevel@tonic-gate
6297c478bd9Sstevel@tonic-gate if (ioflag & FAPPEND) {
6307c478bd9Sstevel@tonic-gate /*
6317c478bd9Sstevel@tonic-gate * In append mode start at end of file.
6327c478bd9Sstevel@tonic-gate */
6337c478bd9Sstevel@tonic-gate uiop->uio_loffset = tp->tn_size;
6347c478bd9Sstevel@tonic-gate }
6357c478bd9Sstevel@tonic-gate
6367c478bd9Sstevel@tonic-gate error = wrtmp(tm, tp, uiop, cred, ct);
6377c478bd9Sstevel@tonic-gate
6387c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_contents);
6397c478bd9Sstevel@tonic-gate
6407c478bd9Sstevel@tonic-gate return (error);
6417c478bd9Sstevel@tonic-gate }
6427c478bd9Sstevel@tonic-gate
6437c478bd9Sstevel@tonic-gate /* ARGSUSED */
6447c478bd9Sstevel@tonic-gate static int
tmp_ioctl(struct vnode * vp,int com,intptr_t data,int flag,struct cred * cred,int * rvalp,caller_context_t * ct)645da6c28aaSamw tmp_ioctl(
646da6c28aaSamw struct vnode *vp,
647da6c28aaSamw int com,
648da6c28aaSamw intptr_t data,
649da6c28aaSamw int flag,
650da6c28aaSamw struct cred *cred,
651da6c28aaSamw int *rvalp,
652da6c28aaSamw caller_context_t *ct)
6537c478bd9Sstevel@tonic-gate {
6547c478bd9Sstevel@tonic-gate return (ENOTTY);
6557c478bd9Sstevel@tonic-gate }
6567c478bd9Sstevel@tonic-gate
6577c478bd9Sstevel@tonic-gate /* ARGSUSED2 */
6587c478bd9Sstevel@tonic-gate static int
tmp_getattr(struct vnode * vp,struct vattr * vap,int flags,struct cred * cred,caller_context_t * ct)659da6c28aaSamw tmp_getattr(
660da6c28aaSamw struct vnode *vp,
661da6c28aaSamw struct vattr *vap,
662da6c28aaSamw int flags,
663da6c28aaSamw struct cred *cred,
664da6c28aaSamw caller_context_t *ct)
6657c478bd9Sstevel@tonic-gate {
6667c478bd9Sstevel@tonic-gate struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
6677c478bd9Sstevel@tonic-gate struct vnode *mvp;
6687c478bd9Sstevel@tonic-gate struct vattr va;
6697c478bd9Sstevel@tonic-gate int attrs = 1;
6707c478bd9Sstevel@tonic-gate
6717c478bd9Sstevel@tonic-gate /*
6727c478bd9Sstevel@tonic-gate * A special case to handle the root tnode on a diskless nfs
6737c478bd9Sstevel@tonic-gate * client who may have had its uid and gid inherited
6747c478bd9Sstevel@tonic-gate * from an nfs vnode with nobody ownership. Likely the
6757c478bd9Sstevel@tonic-gate * root filesystem. After nfs is fully functional the uid/gid
6767c478bd9Sstevel@tonic-gate * may be mapable so ask again.
6777c478bd9Sstevel@tonic-gate * vfsp can't get unmounted because we hold vp.
6787c478bd9Sstevel@tonic-gate */
6797c478bd9Sstevel@tonic-gate if (vp->v_flag & VROOT &&
6807c478bd9Sstevel@tonic-gate (mvp = vp->v_vfsp->vfs_vnodecovered) != NULL) {
6817c478bd9Sstevel@tonic-gate mutex_enter(&tp->tn_tlock);
6827c478bd9Sstevel@tonic-gate if (tp->tn_uid == UID_NOBODY || tp->tn_gid == GID_NOBODY) {
6837c478bd9Sstevel@tonic-gate mutex_exit(&tp->tn_tlock);
6847c478bd9Sstevel@tonic-gate bzero(&va, sizeof (struct vattr));
6857c478bd9Sstevel@tonic-gate va.va_mask = AT_UID|AT_GID;
686da6c28aaSamw attrs = VOP_GETATTR(mvp, &va, 0, cred, ct);
6877c478bd9Sstevel@tonic-gate } else {
6887c478bd9Sstevel@tonic-gate mutex_exit(&tp->tn_tlock);
6897c478bd9Sstevel@tonic-gate }
6907c478bd9Sstevel@tonic-gate }
6917c478bd9Sstevel@tonic-gate mutex_enter(&tp->tn_tlock);
6927c478bd9Sstevel@tonic-gate if (attrs == 0) {
6937c478bd9Sstevel@tonic-gate tp->tn_uid = va.va_uid;
6947c478bd9Sstevel@tonic-gate tp->tn_gid = va.va_gid;
6957c478bd9Sstevel@tonic-gate }
6967c478bd9Sstevel@tonic-gate vap->va_type = vp->v_type;
6977c478bd9Sstevel@tonic-gate vap->va_mode = tp->tn_mode & MODEMASK;
6987c478bd9Sstevel@tonic-gate vap->va_uid = tp->tn_uid;
6997c478bd9Sstevel@tonic-gate vap->va_gid = tp->tn_gid;
7007c478bd9Sstevel@tonic-gate vap->va_fsid = tp->tn_fsid;
7017c478bd9Sstevel@tonic-gate vap->va_nodeid = (ino64_t)tp->tn_nodeid;
7027c478bd9Sstevel@tonic-gate vap->va_nlink = tp->tn_nlink;
7037c478bd9Sstevel@tonic-gate vap->va_size = (u_offset_t)tp->tn_size;
7047c478bd9Sstevel@tonic-gate vap->va_atime = tp->tn_atime;
7057c478bd9Sstevel@tonic-gate vap->va_mtime = tp->tn_mtime;
7067c478bd9Sstevel@tonic-gate vap->va_ctime = tp->tn_ctime;
7077c478bd9Sstevel@tonic-gate vap->va_blksize = PAGESIZE;
7087c478bd9Sstevel@tonic-gate vap->va_rdev = tp->tn_rdev;
7097c478bd9Sstevel@tonic-gate vap->va_seq = tp->tn_seq;
7107c478bd9Sstevel@tonic-gate
7117c478bd9Sstevel@tonic-gate /*
7127c478bd9Sstevel@tonic-gate * XXX Holes are not taken into account. We could take the time to
7137c478bd9Sstevel@tonic-gate * run through the anon array looking for allocated slots...
7147c478bd9Sstevel@tonic-gate */
7157c478bd9Sstevel@tonic-gate vap->va_nblocks = (fsblkcnt64_t)btodb(ptob(btopr(vap->va_size)));
7167c478bd9Sstevel@tonic-gate mutex_exit(&tp->tn_tlock);
7177c478bd9Sstevel@tonic-gate return (0);
7187c478bd9Sstevel@tonic-gate }
7197c478bd9Sstevel@tonic-gate
7207c478bd9Sstevel@tonic-gate /*ARGSUSED4*/
7217c478bd9Sstevel@tonic-gate static int
tmp_setattr(struct vnode * vp,struct vattr * vap,int flags,struct cred * cred,caller_context_t * ct)7227c478bd9Sstevel@tonic-gate tmp_setattr(
7237c478bd9Sstevel@tonic-gate struct vnode *vp,
7247c478bd9Sstevel@tonic-gate struct vattr *vap,
7257c478bd9Sstevel@tonic-gate int flags,
7267c478bd9Sstevel@tonic-gate struct cred *cred,
7277c478bd9Sstevel@tonic-gate caller_context_t *ct)
7287c478bd9Sstevel@tonic-gate {
7297c478bd9Sstevel@tonic-gate struct tmount *tm = (struct tmount *)VTOTM(vp);
7307c478bd9Sstevel@tonic-gate struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
7317c478bd9Sstevel@tonic-gate int error = 0;
7327c478bd9Sstevel@tonic-gate struct vattr *get;
7337c478bd9Sstevel@tonic-gate long mask;
7347c478bd9Sstevel@tonic-gate
7357c478bd9Sstevel@tonic-gate /*
7367c478bd9Sstevel@tonic-gate * Cannot set these attributes
7377c478bd9Sstevel@tonic-gate */
738da6c28aaSamw if ((vap->va_mask & AT_NOSET) || (vap->va_mask & AT_XVATTR))
7397c478bd9Sstevel@tonic-gate return (EINVAL);
7407c478bd9Sstevel@tonic-gate
7417c478bd9Sstevel@tonic-gate mutex_enter(&tp->tn_tlock);
7427c478bd9Sstevel@tonic-gate
7437c478bd9Sstevel@tonic-gate get = &tp->tn_attr;
7447c478bd9Sstevel@tonic-gate /*
7457c478bd9Sstevel@tonic-gate * Change file access modes. Must be owner or have sufficient
7467c478bd9Sstevel@tonic-gate * privileges.
7477c478bd9Sstevel@tonic-gate */
748c6f08383Sjj204856 error = secpolicy_vnode_setattr(cred, vp, vap, get, flags, tmp_taccess,
749c6f08383Sjj204856 tp);
7507c478bd9Sstevel@tonic-gate
7517c478bd9Sstevel@tonic-gate if (error)
7527c478bd9Sstevel@tonic-gate goto out;
7537c478bd9Sstevel@tonic-gate
7547c478bd9Sstevel@tonic-gate mask = vap->va_mask;
7557c478bd9Sstevel@tonic-gate
7567c478bd9Sstevel@tonic-gate if (mask & AT_MODE) {
7577c478bd9Sstevel@tonic-gate get->va_mode &= S_IFMT;
7587c478bd9Sstevel@tonic-gate get->va_mode |= vap->va_mode & ~S_IFMT;
7597c478bd9Sstevel@tonic-gate }
7607c478bd9Sstevel@tonic-gate
7617c478bd9Sstevel@tonic-gate if (mask & AT_UID)
7627c478bd9Sstevel@tonic-gate get->va_uid = vap->va_uid;
7637c478bd9Sstevel@tonic-gate if (mask & AT_GID)
7647c478bd9Sstevel@tonic-gate get->va_gid = vap->va_gid;
7657c478bd9Sstevel@tonic-gate if (mask & AT_ATIME)
7667c478bd9Sstevel@tonic-gate get->va_atime = vap->va_atime;
7677c478bd9Sstevel@tonic-gate if (mask & AT_MTIME)
7687c478bd9Sstevel@tonic-gate get->va_mtime = vap->va_mtime;
7697c478bd9Sstevel@tonic-gate
7707c478bd9Sstevel@tonic-gate if (mask & (AT_UID | AT_GID | AT_MODE | AT_MTIME))
7717c478bd9Sstevel@tonic-gate gethrestime(&tp->tn_ctime);
7727c478bd9Sstevel@tonic-gate
7737c478bd9Sstevel@tonic-gate if (mask & AT_SIZE) {
7747c478bd9Sstevel@tonic-gate ASSERT(vp->v_type != VDIR);
7757c478bd9Sstevel@tonic-gate
7767c478bd9Sstevel@tonic-gate /* Don't support large files. */
7777c478bd9Sstevel@tonic-gate if (vap->va_size > MAXOFF_T) {
7787c478bd9Sstevel@tonic-gate error = EFBIG;
7797c478bd9Sstevel@tonic-gate goto out;
7807c478bd9Sstevel@tonic-gate }
7817c478bd9Sstevel@tonic-gate mutex_exit(&tp->tn_tlock);
7827c478bd9Sstevel@tonic-gate
7837c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_rwlock, RW_WRITER);
7847c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_contents, RW_WRITER);
7857c478bd9Sstevel@tonic-gate error = tmpnode_trunc(tm, tp, (ulong_t)vap->va_size);
7867c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_contents);
7877c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_rwlock);
78872102e74SBryan Cantrill
78972102e74SBryan Cantrill if (error == 0 && vap->va_size == 0)
79072102e74SBryan Cantrill vnevent_truncate(vp, ct);
79172102e74SBryan Cantrill
7927c478bd9Sstevel@tonic-gate goto out1;
7937c478bd9Sstevel@tonic-gate }
7947c478bd9Sstevel@tonic-gate out:
7957c478bd9Sstevel@tonic-gate mutex_exit(&tp->tn_tlock);
7967c478bd9Sstevel@tonic-gate out1:
7977c478bd9Sstevel@tonic-gate return (error);
7987c478bd9Sstevel@tonic-gate }
7997c478bd9Sstevel@tonic-gate
8007c478bd9Sstevel@tonic-gate /* ARGSUSED2 */
8017c478bd9Sstevel@tonic-gate static int
tmp_access(struct vnode * vp,int mode,int flags,struct cred * cred,caller_context_t * ct)802da6c28aaSamw tmp_access(
803da6c28aaSamw struct vnode *vp,
804da6c28aaSamw int mode,
805da6c28aaSamw int flags,
806da6c28aaSamw struct cred *cred,
807da6c28aaSamw caller_context_t *ct)
8087c478bd9Sstevel@tonic-gate {
8097c478bd9Sstevel@tonic-gate struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
8107c478bd9Sstevel@tonic-gate int error;
8117c478bd9Sstevel@tonic-gate
8127c478bd9Sstevel@tonic-gate mutex_enter(&tp->tn_tlock);
8137c478bd9Sstevel@tonic-gate error = tmp_taccess(tp, mode, cred);
8147c478bd9Sstevel@tonic-gate mutex_exit(&tp->tn_tlock);
8157c478bd9Sstevel@tonic-gate return (error);
8167c478bd9Sstevel@tonic-gate }
8177c478bd9Sstevel@tonic-gate
8187c478bd9Sstevel@tonic-gate /* ARGSUSED3 */
8197c478bd9Sstevel@tonic-gate static int
tmp_lookup(struct vnode * dvp,char * nm,struct vnode ** vpp,struct pathname * pnp,int flags,struct vnode * rdir,struct cred * cred,caller_context_t * ct,int * direntflags,pathname_t * realpnp)8207c478bd9Sstevel@tonic-gate tmp_lookup(
8217c478bd9Sstevel@tonic-gate struct vnode *dvp,
8227c478bd9Sstevel@tonic-gate char *nm,
8237c478bd9Sstevel@tonic-gate struct vnode **vpp,
8247c478bd9Sstevel@tonic-gate struct pathname *pnp,
8257c478bd9Sstevel@tonic-gate int flags,
8267c478bd9Sstevel@tonic-gate struct vnode *rdir,
827da6c28aaSamw struct cred *cred,
828da6c28aaSamw caller_context_t *ct,
829da6c28aaSamw int *direntflags,
830da6c28aaSamw pathname_t *realpnp)
8317c478bd9Sstevel@tonic-gate {
8327c478bd9Sstevel@tonic-gate struct tmpnode *tp = (struct tmpnode *)VTOTN(dvp);
8337c478bd9Sstevel@tonic-gate struct tmpnode *ntp = NULL;
8347c478bd9Sstevel@tonic-gate int error;
8357c478bd9Sstevel@tonic-gate
8367c478bd9Sstevel@tonic-gate
8377c478bd9Sstevel@tonic-gate /* allow cd into @ dir */
8387c478bd9Sstevel@tonic-gate if (flags & LOOKUP_XATTR) {
8397c478bd9Sstevel@tonic-gate struct tmpnode *xdp;
8407c478bd9Sstevel@tonic-gate struct tmount *tm;
8417c478bd9Sstevel@tonic-gate
842da6c28aaSamw /*
843da6c28aaSamw * don't allow attributes if not mounted XATTR support
844da6c28aaSamw */
845da6c28aaSamw if (!(dvp->v_vfsp->vfs_flag & VFS_XATTR))
846da6c28aaSamw return (EINVAL);
847da6c28aaSamw
8487c478bd9Sstevel@tonic-gate if (tp->tn_flags & ISXATTR)
8497c478bd9Sstevel@tonic-gate /* No attributes on attributes */
8507c478bd9Sstevel@tonic-gate return (EINVAL);
8517c478bd9Sstevel@tonic-gate
8527c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_rwlock, RW_WRITER);
8537c478bd9Sstevel@tonic-gate if (tp->tn_xattrdp == NULL) {
8547c478bd9Sstevel@tonic-gate if (!(flags & CREATE_XATTR_DIR)) {
8557c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_rwlock);
8567c478bd9Sstevel@tonic-gate return (ENOENT);
8577c478bd9Sstevel@tonic-gate }
8587c478bd9Sstevel@tonic-gate
8597c478bd9Sstevel@tonic-gate /*
8607c478bd9Sstevel@tonic-gate * No attribute directory exists for this
8617c478bd9Sstevel@tonic-gate * node - create the attr dir as a side effect
8627c478bd9Sstevel@tonic-gate * of this lookup.
8637c478bd9Sstevel@tonic-gate */
8647c478bd9Sstevel@tonic-gate
8657c478bd9Sstevel@tonic-gate /*
8667c478bd9Sstevel@tonic-gate * Make sure we have adequate permission...
8677c478bd9Sstevel@tonic-gate */
8687c478bd9Sstevel@tonic-gate
8697c478bd9Sstevel@tonic-gate if ((error = tmp_taccess(tp, VWRITE, cred)) != 0) {
8707c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_rwlock);
8717c478bd9Sstevel@tonic-gate return (error);
8727c478bd9Sstevel@tonic-gate }
8737c478bd9Sstevel@tonic-gate
8747c478bd9Sstevel@tonic-gate xdp = tmp_memalloc(sizeof (struct tmpnode),
8757c478bd9Sstevel@tonic-gate TMP_MUSTHAVE);
8767c478bd9Sstevel@tonic-gate tm = VTOTM(dvp);
8777c478bd9Sstevel@tonic-gate tmpnode_init(tm, xdp, &tp->tn_attr, NULL);
8787c478bd9Sstevel@tonic-gate /*
8797c478bd9Sstevel@tonic-gate * Fix-up fields unique to attribute directories.
8807c478bd9Sstevel@tonic-gate */
8817c478bd9Sstevel@tonic-gate xdp->tn_flags = ISXATTR;
8827c478bd9Sstevel@tonic-gate xdp->tn_type = VDIR;
8837c478bd9Sstevel@tonic-gate if (tp->tn_type == VDIR) {
8847c478bd9Sstevel@tonic-gate xdp->tn_mode = tp->tn_attr.va_mode;
8857c478bd9Sstevel@tonic-gate } else {
8867c478bd9Sstevel@tonic-gate xdp->tn_mode = 0700;
8877c478bd9Sstevel@tonic-gate if (tp->tn_attr.va_mode & 0040)
8887c478bd9Sstevel@tonic-gate xdp->tn_mode |= 0750;
8897c478bd9Sstevel@tonic-gate if (tp->tn_attr.va_mode & 0004)
8907c478bd9Sstevel@tonic-gate xdp->tn_mode |= 0705;
8917c478bd9Sstevel@tonic-gate }
8927c478bd9Sstevel@tonic-gate xdp->tn_vnode->v_type = VDIR;
8937c478bd9Sstevel@tonic-gate xdp->tn_vnode->v_flag |= V_XATTRDIR;
8947c478bd9Sstevel@tonic-gate tdirinit(tp, xdp);
8957c478bd9Sstevel@tonic-gate tp->tn_xattrdp = xdp;
8967c478bd9Sstevel@tonic-gate } else {
8977c478bd9Sstevel@tonic-gate VN_HOLD(tp->tn_xattrdp->tn_vnode);
8987c478bd9Sstevel@tonic-gate }
8997c478bd9Sstevel@tonic-gate *vpp = TNTOV(tp->tn_xattrdp);
9007c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_rwlock);
9017c478bd9Sstevel@tonic-gate return (0);
9027c478bd9Sstevel@tonic-gate }
9037c478bd9Sstevel@tonic-gate
9047c478bd9Sstevel@tonic-gate /*
9057c478bd9Sstevel@tonic-gate * Null component name is a synonym for directory being searched.
9067c478bd9Sstevel@tonic-gate */
9077c478bd9Sstevel@tonic-gate if (*nm == '\0') {
9087c478bd9Sstevel@tonic-gate VN_HOLD(dvp);
9097c478bd9Sstevel@tonic-gate *vpp = dvp;
9107c478bd9Sstevel@tonic-gate return (0);
9117c478bd9Sstevel@tonic-gate }
9127c478bd9Sstevel@tonic-gate ASSERT(tp);
9137c478bd9Sstevel@tonic-gate
9147c478bd9Sstevel@tonic-gate error = tdirlookup(tp, nm, &ntp, cred);
9157c478bd9Sstevel@tonic-gate
9167c478bd9Sstevel@tonic-gate if (error == 0) {
9177c478bd9Sstevel@tonic-gate ASSERT(ntp);
9187c478bd9Sstevel@tonic-gate *vpp = TNTOV(ntp);
9197c478bd9Sstevel@tonic-gate /*
9207c478bd9Sstevel@tonic-gate * If vnode is a device return special vnode instead
9217c478bd9Sstevel@tonic-gate */
9227c478bd9Sstevel@tonic-gate if (IS_DEVVP(*vpp)) {
9237c478bd9Sstevel@tonic-gate struct vnode *newvp;
9247c478bd9Sstevel@tonic-gate
9257c478bd9Sstevel@tonic-gate newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type,
9267c478bd9Sstevel@tonic-gate cred);
9277c478bd9Sstevel@tonic-gate VN_RELE(*vpp);
9287c478bd9Sstevel@tonic-gate *vpp = newvp;
9297c478bd9Sstevel@tonic-gate }
9307c478bd9Sstevel@tonic-gate }
9317c478bd9Sstevel@tonic-gate TRACE_4(TR_FAC_TMPFS, TR_TMPFS_LOOKUP,
9327c478bd9Sstevel@tonic-gate "tmpfs lookup:vp %p name %s vpp %p error %d",
9337c478bd9Sstevel@tonic-gate dvp, nm, vpp, error);
9347c478bd9Sstevel@tonic-gate return (error);
9357c478bd9Sstevel@tonic-gate }
9367c478bd9Sstevel@tonic-gate
9377c478bd9Sstevel@tonic-gate /*ARGSUSED7*/
9387c478bd9Sstevel@tonic-gate static int
tmp_create(struct vnode * dvp,char * nm,struct vattr * vap,enum vcexcl exclusive,int mode,struct vnode ** vpp,struct cred * cred,int flag,caller_context_t * ct,vsecattr_t * vsecp)9397c478bd9Sstevel@tonic-gate tmp_create(
9407c478bd9Sstevel@tonic-gate struct vnode *dvp,
9417c478bd9Sstevel@tonic-gate char *nm,
9427c478bd9Sstevel@tonic-gate struct vattr *vap,
9437c478bd9Sstevel@tonic-gate enum vcexcl exclusive,
9447c478bd9Sstevel@tonic-gate int mode,
9457c478bd9Sstevel@tonic-gate struct vnode **vpp,
9467c478bd9Sstevel@tonic-gate struct cred *cred,
947da6c28aaSamw int flag,
948da6c28aaSamw caller_context_t *ct,
949da6c28aaSamw vsecattr_t *vsecp)
9507c478bd9Sstevel@tonic-gate {
9517c478bd9Sstevel@tonic-gate struct tmpnode *parent;
9527c478bd9Sstevel@tonic-gate struct tmount *tm;
9537c478bd9Sstevel@tonic-gate struct tmpnode *self;
9547c478bd9Sstevel@tonic-gate int error;
9557c478bd9Sstevel@tonic-gate struct tmpnode *oldtp;
9567c478bd9Sstevel@tonic-gate
9577c478bd9Sstevel@tonic-gate again:
9587c478bd9Sstevel@tonic-gate parent = (struct tmpnode *)VTOTN(dvp);
9597c478bd9Sstevel@tonic-gate tm = (struct tmount *)VTOTM(dvp);
9607c478bd9Sstevel@tonic-gate self = NULL;
9617c478bd9Sstevel@tonic-gate error = 0;
9627c478bd9Sstevel@tonic-gate oldtp = NULL;
9637c478bd9Sstevel@tonic-gate
9647c478bd9Sstevel@tonic-gate /* device files not allowed in ext. attr dirs */
9657c478bd9Sstevel@tonic-gate if ((parent->tn_flags & ISXATTR) &&
9667c478bd9Sstevel@tonic-gate (vap->va_type == VBLK || vap->va_type == VCHR ||
9677c478bd9Sstevel@tonic-gate vap->va_type == VFIFO || vap->va_type == VDOOR ||
9687c478bd9Sstevel@tonic-gate vap->va_type == VSOCK || vap->va_type == VPORT))
9697c478bd9Sstevel@tonic-gate return (EINVAL);
9707c478bd9Sstevel@tonic-gate
9717c478bd9Sstevel@tonic-gate if (vap->va_type == VREG && (vap->va_mode & VSVTX)) {
9727c478bd9Sstevel@tonic-gate /* Must be privileged to set sticky bit */
9737c478bd9Sstevel@tonic-gate if (secpolicy_vnode_stky_modify(cred))
9747c478bd9Sstevel@tonic-gate vap->va_mode &= ~VSVTX;
9757c478bd9Sstevel@tonic-gate } else if (vap->va_type == VNON) {
9767c478bd9Sstevel@tonic-gate return (EINVAL);
9777c478bd9Sstevel@tonic-gate }
9787c478bd9Sstevel@tonic-gate
9797c478bd9Sstevel@tonic-gate /*
9807c478bd9Sstevel@tonic-gate * Null component name is a synonym for directory being searched.
9817c478bd9Sstevel@tonic-gate */
9827c478bd9Sstevel@tonic-gate if (*nm == '\0') {
9837c478bd9Sstevel@tonic-gate VN_HOLD(dvp);
9847c478bd9Sstevel@tonic-gate oldtp = parent;
9857c478bd9Sstevel@tonic-gate } else {
9867c478bd9Sstevel@tonic-gate error = tdirlookup(parent, nm, &oldtp, cred);
9877c478bd9Sstevel@tonic-gate }
9887c478bd9Sstevel@tonic-gate
9897c478bd9Sstevel@tonic-gate if (error == 0) { /* name found */
99072102e74SBryan Cantrill boolean_t trunc = B_FALSE;
99172102e74SBryan Cantrill
9927c478bd9Sstevel@tonic-gate ASSERT(oldtp);
9937c478bd9Sstevel@tonic-gate
9947c478bd9Sstevel@tonic-gate rw_enter(&oldtp->tn_rwlock, RW_WRITER);
9957c478bd9Sstevel@tonic-gate
9967c478bd9Sstevel@tonic-gate /*
9977c478bd9Sstevel@tonic-gate * if create/read-only an existing
9987c478bd9Sstevel@tonic-gate * directory, allow it
9997c478bd9Sstevel@tonic-gate */
10007c478bd9Sstevel@tonic-gate if (exclusive == EXCL)
10017c478bd9Sstevel@tonic-gate error = EEXIST;
10027c478bd9Sstevel@tonic-gate else if ((oldtp->tn_type == VDIR) && (mode & VWRITE))
10037c478bd9Sstevel@tonic-gate error = EISDIR;
10047c478bd9Sstevel@tonic-gate else {
10057c478bd9Sstevel@tonic-gate error = tmp_taccess(oldtp, mode, cred);
10067c478bd9Sstevel@tonic-gate }
10077c478bd9Sstevel@tonic-gate
10087c478bd9Sstevel@tonic-gate if (error) {
10097c478bd9Sstevel@tonic-gate rw_exit(&oldtp->tn_rwlock);
10107c478bd9Sstevel@tonic-gate tmpnode_rele(oldtp);
10117c478bd9Sstevel@tonic-gate return (error);
10127c478bd9Sstevel@tonic-gate }
10137c478bd9Sstevel@tonic-gate *vpp = TNTOV(oldtp);
10147c478bd9Sstevel@tonic-gate if ((*vpp)->v_type == VREG && (vap->va_mask & AT_SIZE) &&
10157c478bd9Sstevel@tonic-gate vap->va_size == 0) {
10167c478bd9Sstevel@tonic-gate rw_enter(&oldtp->tn_contents, RW_WRITER);
10177c478bd9Sstevel@tonic-gate (void) tmpnode_trunc(tm, oldtp, 0);
10187c478bd9Sstevel@tonic-gate rw_exit(&oldtp->tn_contents);
101972102e74SBryan Cantrill trunc = B_TRUE;
10207c478bd9Sstevel@tonic-gate }
10217c478bd9Sstevel@tonic-gate rw_exit(&oldtp->tn_rwlock);
10227c478bd9Sstevel@tonic-gate if (IS_DEVVP(*vpp)) {
10237c478bd9Sstevel@tonic-gate struct vnode *newvp;
10247c478bd9Sstevel@tonic-gate
10257c478bd9Sstevel@tonic-gate newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type,
10267c478bd9Sstevel@tonic-gate cred);
10277c478bd9Sstevel@tonic-gate VN_RELE(*vpp);
10287c478bd9Sstevel@tonic-gate if (newvp == NULL) {
10297c478bd9Sstevel@tonic-gate return (ENOSYS);
10307c478bd9Sstevel@tonic-gate }
10317c478bd9Sstevel@tonic-gate *vpp = newvp;
10327c478bd9Sstevel@tonic-gate }
1033df2381bfSpraks
103472102e74SBryan Cantrill if (trunc)
1035da6c28aaSamw vnevent_create(*vpp, ct);
103672102e74SBryan Cantrill
10377c478bd9Sstevel@tonic-gate return (0);
10387c478bd9Sstevel@tonic-gate }
10397c478bd9Sstevel@tonic-gate
10407c478bd9Sstevel@tonic-gate if (error != ENOENT)
10417c478bd9Sstevel@tonic-gate return (error);
10427c478bd9Sstevel@tonic-gate
10437c478bd9Sstevel@tonic-gate rw_enter(&parent->tn_rwlock, RW_WRITER);
10447c478bd9Sstevel@tonic-gate error = tdirenter(tm, parent, nm, DE_CREATE,
10457c478bd9Sstevel@tonic-gate (struct tmpnode *)NULL, (struct tmpnode *)NULL,
1046da6c28aaSamw vap, &self, cred, ct);
10477c478bd9Sstevel@tonic-gate rw_exit(&parent->tn_rwlock);
10487c478bd9Sstevel@tonic-gate
10497c478bd9Sstevel@tonic-gate if (error) {
10507c478bd9Sstevel@tonic-gate if (self)
10517c478bd9Sstevel@tonic-gate tmpnode_rele(self);
10527c478bd9Sstevel@tonic-gate
10537c478bd9Sstevel@tonic-gate if (error == EEXIST) {
10547c478bd9Sstevel@tonic-gate /*
10557c478bd9Sstevel@tonic-gate * This means that the file was created sometime
10567c478bd9Sstevel@tonic-gate * after we checked and did not find it and when
10577c478bd9Sstevel@tonic-gate * we went to create it.
10587c478bd9Sstevel@tonic-gate * Since creat() is supposed to truncate a file
10597c478bd9Sstevel@tonic-gate * that already exits go back to the begining
10607c478bd9Sstevel@tonic-gate * of the function. This time we will find it
10617c478bd9Sstevel@tonic-gate * and go down the tmp_trunc() path
10627c478bd9Sstevel@tonic-gate */
10637c478bd9Sstevel@tonic-gate goto again;
10647c478bd9Sstevel@tonic-gate }
10657c478bd9Sstevel@tonic-gate return (error);
10667c478bd9Sstevel@tonic-gate }
10677c478bd9Sstevel@tonic-gate
10687c478bd9Sstevel@tonic-gate *vpp = TNTOV(self);
10697c478bd9Sstevel@tonic-gate
10707c478bd9Sstevel@tonic-gate if (!error && IS_DEVVP(*vpp)) {
10717c478bd9Sstevel@tonic-gate struct vnode *newvp;
10727c478bd9Sstevel@tonic-gate
10737c478bd9Sstevel@tonic-gate newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cred);
10747c478bd9Sstevel@tonic-gate VN_RELE(*vpp);
10757c478bd9Sstevel@tonic-gate if (newvp == NULL)
10767c478bd9Sstevel@tonic-gate return (ENOSYS);
10777c478bd9Sstevel@tonic-gate *vpp = newvp;
10787c478bd9Sstevel@tonic-gate }
10797c478bd9Sstevel@tonic-gate TRACE_3(TR_FAC_TMPFS, TR_TMPFS_CREATE,
10807c478bd9Sstevel@tonic-gate "tmpfs create:dvp %p nm %s vpp %p", dvp, nm, vpp);
10817c478bd9Sstevel@tonic-gate return (0);
10827c478bd9Sstevel@tonic-gate }
10837c478bd9Sstevel@tonic-gate
1084da6c28aaSamw /* ARGSUSED3 */
10857c478bd9Sstevel@tonic-gate static int
tmp_remove(struct vnode * dvp,char * nm,struct cred * cred,caller_context_t * ct,int flags)1086da6c28aaSamw tmp_remove(
1087da6c28aaSamw struct vnode *dvp,
1088da6c28aaSamw char *nm,
1089da6c28aaSamw struct cred *cred,
1090da6c28aaSamw caller_context_t *ct,
1091da6c28aaSamw int flags)
10927c478bd9Sstevel@tonic-gate {
10937c478bd9Sstevel@tonic-gate struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
10947c478bd9Sstevel@tonic-gate int error;
10957c478bd9Sstevel@tonic-gate struct tmpnode *tp = NULL;
10967c478bd9Sstevel@tonic-gate
10977c478bd9Sstevel@tonic-gate error = tdirlookup(parent, nm, &tp, cred);
10987c478bd9Sstevel@tonic-gate if (error)
10997c478bd9Sstevel@tonic-gate return (error);
11007c478bd9Sstevel@tonic-gate
11017c478bd9Sstevel@tonic-gate ASSERT(tp);
11027c478bd9Sstevel@tonic-gate rw_enter(&parent->tn_rwlock, RW_WRITER);
11037c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_rwlock, RW_WRITER);
11047c478bd9Sstevel@tonic-gate
11057c478bd9Sstevel@tonic-gate if (tp->tn_type != VDIR ||
11067c478bd9Sstevel@tonic-gate (error = secpolicy_fs_linkdir(cred, dvp->v_vfsp)) == 0)
11077c478bd9Sstevel@tonic-gate error = tdirdelete(parent, tp, nm, DR_REMOVE, cred);
11087c478bd9Sstevel@tonic-gate
11097c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_rwlock);
11107c478bd9Sstevel@tonic-gate rw_exit(&parent->tn_rwlock);
1111da6c28aaSamw vnevent_remove(TNTOV(tp), dvp, nm, ct);
11127c478bd9Sstevel@tonic-gate tmpnode_rele(tp);
11137c478bd9Sstevel@tonic-gate
11147c478bd9Sstevel@tonic-gate TRACE_3(TR_FAC_TMPFS, TR_TMPFS_REMOVE,
11157c478bd9Sstevel@tonic-gate "tmpfs remove:dvp %p nm %s error %d", dvp, nm, error);
11167c478bd9Sstevel@tonic-gate return (error);
11177c478bd9Sstevel@tonic-gate }
11187c478bd9Sstevel@tonic-gate
1119da6c28aaSamw /* ARGSUSED4 */
11207c478bd9Sstevel@tonic-gate static int
tmp_link(struct vnode * dvp,struct vnode * srcvp,char * tnm,struct cred * cred,caller_context_t * ct,int flags)1121da6c28aaSamw tmp_link(
1122da6c28aaSamw struct vnode *dvp,
1123da6c28aaSamw struct vnode *srcvp,
1124da6c28aaSamw char *tnm,
1125da6c28aaSamw struct cred *cred,
1126da6c28aaSamw caller_context_t *ct,
1127da6c28aaSamw int flags)
11287c478bd9Sstevel@tonic-gate {
11297c478bd9Sstevel@tonic-gate struct tmpnode *parent;
11307c478bd9Sstevel@tonic-gate struct tmpnode *from;
11317c478bd9Sstevel@tonic-gate struct tmount *tm = (struct tmount *)VTOTM(dvp);
11327c478bd9Sstevel@tonic-gate int error;
11337c478bd9Sstevel@tonic-gate struct tmpnode *found = NULL;
11347c478bd9Sstevel@tonic-gate struct vnode *realvp;
11357c478bd9Sstevel@tonic-gate
1136da6c28aaSamw if (VOP_REALVP(srcvp, &realvp, ct) == 0)
11377c478bd9Sstevel@tonic-gate srcvp = realvp;
11387c478bd9Sstevel@tonic-gate
11397c478bd9Sstevel@tonic-gate parent = (struct tmpnode *)VTOTN(dvp);
11407c478bd9Sstevel@tonic-gate from = (struct tmpnode *)VTOTN(srcvp);
11417c478bd9Sstevel@tonic-gate
11427c478bd9Sstevel@tonic-gate if ((srcvp->v_type == VDIR &&
11437c478bd9Sstevel@tonic-gate secpolicy_fs_linkdir(cred, dvp->v_vfsp)) ||
11447c478bd9Sstevel@tonic-gate (from->tn_uid != crgetuid(cred) && secpolicy_basic_link(cred)))
11457c478bd9Sstevel@tonic-gate return (EPERM);
11467c478bd9Sstevel@tonic-gate
11477c478bd9Sstevel@tonic-gate /*
11487c478bd9Sstevel@tonic-gate * Make sure link for extended attributes is valid
11497c478bd9Sstevel@tonic-gate * We only support hard linking of xattr's in xattrdir to an xattrdir
11507c478bd9Sstevel@tonic-gate */
11517c478bd9Sstevel@tonic-gate if ((from->tn_flags & ISXATTR) != (parent->tn_flags & ISXATTR))
11527c478bd9Sstevel@tonic-gate return (EINVAL);
11537c478bd9Sstevel@tonic-gate
11547c478bd9Sstevel@tonic-gate error = tdirlookup(parent, tnm, &found, cred);
11557c478bd9Sstevel@tonic-gate if (error == 0) {
11567c478bd9Sstevel@tonic-gate ASSERT(found);
11577c478bd9Sstevel@tonic-gate tmpnode_rele(found);
11587c478bd9Sstevel@tonic-gate return (EEXIST);
11597c478bd9Sstevel@tonic-gate }
11607c478bd9Sstevel@tonic-gate
11617c478bd9Sstevel@tonic-gate if (error != ENOENT)
11627c478bd9Sstevel@tonic-gate return (error);
11637c478bd9Sstevel@tonic-gate
11647c478bd9Sstevel@tonic-gate rw_enter(&parent->tn_rwlock, RW_WRITER);
11657c478bd9Sstevel@tonic-gate error = tdirenter(tm, parent, tnm, DE_LINK, (struct tmpnode *)NULL,
1166da6c28aaSamw from, NULL, (struct tmpnode **)NULL, cred, ct);
11677c478bd9Sstevel@tonic-gate rw_exit(&parent->tn_rwlock);
1168df2381bfSpraks if (error == 0) {
1169da6c28aaSamw vnevent_link(srcvp, ct);
1170df2381bfSpraks }
11717c478bd9Sstevel@tonic-gate return (error);
11727c478bd9Sstevel@tonic-gate }
11737c478bd9Sstevel@tonic-gate
1174da6c28aaSamw /* ARGSUSED5 */
11757c478bd9Sstevel@tonic-gate static int
tmp_rename(struct vnode * odvp,char * onm,struct vnode * ndvp,char * nnm,struct cred * cred,caller_context_t * ct,int flags)11767c478bd9Sstevel@tonic-gate tmp_rename(
11777c478bd9Sstevel@tonic-gate struct vnode *odvp, /* source parent vnode */
11787c478bd9Sstevel@tonic-gate char *onm, /* source name */
11797c478bd9Sstevel@tonic-gate struct vnode *ndvp, /* destination parent vnode */
11807c478bd9Sstevel@tonic-gate char *nnm, /* destination name */
1181da6c28aaSamw struct cred *cred,
1182da6c28aaSamw caller_context_t *ct,
1183da6c28aaSamw int flags)
11847c478bd9Sstevel@tonic-gate {
11857c478bd9Sstevel@tonic-gate struct tmpnode *fromparent;
11867c478bd9Sstevel@tonic-gate struct tmpnode *toparent;
11877c478bd9Sstevel@tonic-gate struct tmpnode *fromtp = NULL; /* source tmpnode */
11887c478bd9Sstevel@tonic-gate struct tmount *tm = (struct tmount *)VTOTM(odvp);
11897c478bd9Sstevel@tonic-gate int error;
11907c478bd9Sstevel@tonic-gate int samedir = 0; /* set if odvp == ndvp */
11917c478bd9Sstevel@tonic-gate struct vnode *realvp;
11927c478bd9Sstevel@tonic-gate
1193da6c28aaSamw if (VOP_REALVP(ndvp, &realvp, ct) == 0)
11947c478bd9Sstevel@tonic-gate ndvp = realvp;
11957c478bd9Sstevel@tonic-gate
11967c478bd9Sstevel@tonic-gate fromparent = (struct tmpnode *)VTOTN(odvp);
11977c478bd9Sstevel@tonic-gate toparent = (struct tmpnode *)VTOTN(ndvp);
11987c478bd9Sstevel@tonic-gate
11997c478bd9Sstevel@tonic-gate if ((fromparent->tn_flags & ISXATTR) != (toparent->tn_flags & ISXATTR))
12007c478bd9Sstevel@tonic-gate return (EINVAL);
12017c478bd9Sstevel@tonic-gate
12027c478bd9Sstevel@tonic-gate mutex_enter(&tm->tm_renamelck);
12037c478bd9Sstevel@tonic-gate
12047c478bd9Sstevel@tonic-gate /*
12057c478bd9Sstevel@tonic-gate * Look up tmpnode of file we're supposed to rename.
12067c478bd9Sstevel@tonic-gate */
12077c478bd9Sstevel@tonic-gate error = tdirlookup(fromparent, onm, &fromtp, cred);
12087c478bd9Sstevel@tonic-gate if (error) {
12097c478bd9Sstevel@tonic-gate mutex_exit(&tm->tm_renamelck);
12107c478bd9Sstevel@tonic-gate return (error);
12117c478bd9Sstevel@tonic-gate }
12127c478bd9Sstevel@tonic-gate
12137c478bd9Sstevel@tonic-gate /*
12147c478bd9Sstevel@tonic-gate * Make sure we can delete the old (source) entry. This
12157c478bd9Sstevel@tonic-gate * requires write permission on the containing directory. If
12167c478bd9Sstevel@tonic-gate * that directory is "sticky" it requires further checks.
12177c478bd9Sstevel@tonic-gate */
12187c478bd9Sstevel@tonic-gate if (((error = tmp_taccess(fromparent, VWRITE, cred)) != 0) ||
12197c478bd9Sstevel@tonic-gate (error = tmp_sticky_remove_access(fromparent, fromtp, cred)) != 0)
12207c478bd9Sstevel@tonic-gate goto done;
12217c478bd9Sstevel@tonic-gate
12227c478bd9Sstevel@tonic-gate /*
12237c478bd9Sstevel@tonic-gate * Check for renaming to or from '.' or '..' or that
12247c478bd9Sstevel@tonic-gate * fromtp == fromparent
12257c478bd9Sstevel@tonic-gate */
12267c478bd9Sstevel@tonic-gate if ((onm[0] == '.' &&
12277c478bd9Sstevel@tonic-gate (onm[1] == '\0' || (onm[1] == '.' && onm[2] == '\0'))) ||
12287c478bd9Sstevel@tonic-gate (nnm[0] == '.' &&
12297c478bd9Sstevel@tonic-gate (nnm[1] == '\0' || (nnm[1] == '.' && nnm[2] == '\0'))) ||
12307c478bd9Sstevel@tonic-gate (fromparent == fromtp)) {
12317c478bd9Sstevel@tonic-gate error = EINVAL;
12327c478bd9Sstevel@tonic-gate goto done;
12337c478bd9Sstevel@tonic-gate }
12347c478bd9Sstevel@tonic-gate
12357c478bd9Sstevel@tonic-gate samedir = (fromparent == toparent);
12367c478bd9Sstevel@tonic-gate /*
12377c478bd9Sstevel@tonic-gate * Make sure we can search and rename into the new
12387c478bd9Sstevel@tonic-gate * (destination) directory.
12397c478bd9Sstevel@tonic-gate */
12407c478bd9Sstevel@tonic-gate if (!samedir) {
12417c478bd9Sstevel@tonic-gate error = tmp_taccess(toparent, VEXEC|VWRITE, cred);
12427c478bd9Sstevel@tonic-gate if (error)
12437c478bd9Sstevel@tonic-gate goto done;
12447c478bd9Sstevel@tonic-gate }
12457c478bd9Sstevel@tonic-gate
12467c478bd9Sstevel@tonic-gate /*
12477c478bd9Sstevel@tonic-gate * Link source to new target
12487c478bd9Sstevel@tonic-gate */
12497c478bd9Sstevel@tonic-gate rw_enter(&toparent->tn_rwlock, RW_WRITER);
12507c478bd9Sstevel@tonic-gate error = tdirenter(tm, toparent, nnm, DE_RENAME,
12517c478bd9Sstevel@tonic-gate fromparent, fromtp, (struct vattr *)NULL,
1252da6c28aaSamw (struct tmpnode **)NULL, cred, ct);
12537c478bd9Sstevel@tonic-gate rw_exit(&toparent->tn_rwlock);
12547c478bd9Sstevel@tonic-gate
12557c478bd9Sstevel@tonic-gate if (error) {
12567c478bd9Sstevel@tonic-gate /*
12577c478bd9Sstevel@tonic-gate * ESAME isn't really an error; it indicates that the
12587c478bd9Sstevel@tonic-gate * operation should not be done because the source and target
12597c478bd9Sstevel@tonic-gate * are the same file, but that no error should be reported.
12607c478bd9Sstevel@tonic-gate */
12617c478bd9Sstevel@tonic-gate if (error == ESAME)
12627c478bd9Sstevel@tonic-gate error = 0;
12637c478bd9Sstevel@tonic-gate goto done;
12647c478bd9Sstevel@tonic-gate }
1265da6c28aaSamw vnevent_rename_src(TNTOV(fromtp), odvp, onm, ct);
1266df2381bfSpraks
1267df2381bfSpraks /*
1268df2381bfSpraks * Notify the target directory if not same as
1269df2381bfSpraks * source directory.
1270df2381bfSpraks */
1271df2381bfSpraks if (ndvp != odvp) {
1272da6c28aaSamw vnevent_rename_dest_dir(ndvp, ct);
1273df2381bfSpraks }
12747c478bd9Sstevel@tonic-gate
12757c478bd9Sstevel@tonic-gate /*
12767c478bd9Sstevel@tonic-gate * Unlink from source.
12777c478bd9Sstevel@tonic-gate */
12787c478bd9Sstevel@tonic-gate rw_enter(&fromparent->tn_rwlock, RW_WRITER);
12797c478bd9Sstevel@tonic-gate rw_enter(&fromtp->tn_rwlock, RW_WRITER);
12807c478bd9Sstevel@tonic-gate
12817c478bd9Sstevel@tonic-gate error = tdirdelete(fromparent, fromtp, onm, DR_RENAME, cred);
12827c478bd9Sstevel@tonic-gate
12837c478bd9Sstevel@tonic-gate /*
12847c478bd9Sstevel@tonic-gate * The following handles the case where our source tmpnode was
12857c478bd9Sstevel@tonic-gate * removed before we got to it.
12867c478bd9Sstevel@tonic-gate *
12877c478bd9Sstevel@tonic-gate * XXX We should also cleanup properly in the case where tdirdelete
12887c478bd9Sstevel@tonic-gate * fails for some other reason. Currently this case shouldn't happen.
12897c478bd9Sstevel@tonic-gate * (see 1184991).
12907c478bd9Sstevel@tonic-gate */
12917c478bd9Sstevel@tonic-gate if (error == ENOENT)
12927c478bd9Sstevel@tonic-gate error = 0;
12937c478bd9Sstevel@tonic-gate
12947c478bd9Sstevel@tonic-gate rw_exit(&fromtp->tn_rwlock);
12957c478bd9Sstevel@tonic-gate rw_exit(&fromparent->tn_rwlock);
12967c478bd9Sstevel@tonic-gate done:
12977c478bd9Sstevel@tonic-gate tmpnode_rele(fromtp);
12987c478bd9Sstevel@tonic-gate mutex_exit(&tm->tm_renamelck);
12997c478bd9Sstevel@tonic-gate
13007c478bd9Sstevel@tonic-gate TRACE_5(TR_FAC_TMPFS, TR_TMPFS_RENAME,
1301c6f08383Sjj204856 "tmpfs rename:ovp %p onm %s nvp %p nnm %s error %d", odvp, onm,
1302c6f08383Sjj204856 ndvp, nnm, error);
13037c478bd9Sstevel@tonic-gate return (error);
13047c478bd9Sstevel@tonic-gate }
13057c478bd9Sstevel@tonic-gate
1306da6c28aaSamw /* ARGSUSED5 */
13077c478bd9Sstevel@tonic-gate static int
tmp_mkdir(struct vnode * dvp,char * nm,struct vattr * va,struct vnode ** vpp,struct cred * cred,caller_context_t * ct,int flags,vsecattr_t * vsecp)13087c478bd9Sstevel@tonic-gate tmp_mkdir(
13097c478bd9Sstevel@tonic-gate struct vnode *dvp,
13107c478bd9Sstevel@tonic-gate char *nm,
13117c478bd9Sstevel@tonic-gate struct vattr *va,
13127c478bd9Sstevel@tonic-gate struct vnode **vpp,
1313da6c28aaSamw struct cred *cred,
1314da6c28aaSamw caller_context_t *ct,
1315da6c28aaSamw int flags,
1316da6c28aaSamw vsecattr_t *vsecp)
13177c478bd9Sstevel@tonic-gate {
13187c478bd9Sstevel@tonic-gate struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
13197c478bd9Sstevel@tonic-gate struct tmpnode *self = NULL;
13207c478bd9Sstevel@tonic-gate struct tmount *tm = (struct tmount *)VTOTM(dvp);
13217c478bd9Sstevel@tonic-gate int error;
13227c478bd9Sstevel@tonic-gate
13237c478bd9Sstevel@tonic-gate /* no new dirs allowed in xattr dirs */
13247c478bd9Sstevel@tonic-gate if (parent->tn_flags & ISXATTR)
13257c478bd9Sstevel@tonic-gate return (EINVAL);
13267c478bd9Sstevel@tonic-gate
13277c478bd9Sstevel@tonic-gate /*
13287c478bd9Sstevel@tonic-gate * Might be dangling directory. Catch it here,
13297c478bd9Sstevel@tonic-gate * because a ENOENT return from tdirlookup() is
13307c478bd9Sstevel@tonic-gate * an "o.k. return".
13317c478bd9Sstevel@tonic-gate */
13327c478bd9Sstevel@tonic-gate if (parent->tn_nlink == 0)
13337c478bd9Sstevel@tonic-gate return (ENOENT);
13347c478bd9Sstevel@tonic-gate
13357c478bd9Sstevel@tonic-gate error = tdirlookup(parent, nm, &self, cred);
13367c478bd9Sstevel@tonic-gate if (error == 0) {
13377c478bd9Sstevel@tonic-gate ASSERT(self);
13387c478bd9Sstevel@tonic-gate tmpnode_rele(self);
13397c478bd9Sstevel@tonic-gate return (EEXIST);
13407c478bd9Sstevel@tonic-gate }
13417c478bd9Sstevel@tonic-gate if (error != ENOENT)
13427c478bd9Sstevel@tonic-gate return (error);
13437c478bd9Sstevel@tonic-gate
13447c478bd9Sstevel@tonic-gate rw_enter(&parent->tn_rwlock, RW_WRITER);
1345c6f08383Sjj204856 error = tdirenter(tm, parent, nm, DE_MKDIR, (struct tmpnode *)NULL,
1346c6f08383Sjj204856 (struct tmpnode *)NULL, va, &self, cred, ct);
13477c478bd9Sstevel@tonic-gate if (error) {
13487c478bd9Sstevel@tonic-gate rw_exit(&parent->tn_rwlock);
13497c478bd9Sstevel@tonic-gate if (self)
13507c478bd9Sstevel@tonic-gate tmpnode_rele(self);
13517c478bd9Sstevel@tonic-gate return (error);
13527c478bd9Sstevel@tonic-gate }
13537c478bd9Sstevel@tonic-gate rw_exit(&parent->tn_rwlock);
13547c478bd9Sstevel@tonic-gate *vpp = TNTOV(self);
13557c478bd9Sstevel@tonic-gate return (0);
13567c478bd9Sstevel@tonic-gate }
13577c478bd9Sstevel@tonic-gate
1358da6c28aaSamw /* ARGSUSED4 */
13597c478bd9Sstevel@tonic-gate static int
tmp_rmdir(struct vnode * dvp,char * nm,struct vnode * cdir,struct cred * cred,caller_context_t * ct,int flags)13607c478bd9Sstevel@tonic-gate tmp_rmdir(
13617c478bd9Sstevel@tonic-gate struct vnode *dvp,
13627c478bd9Sstevel@tonic-gate char *nm,
13637c478bd9Sstevel@tonic-gate struct vnode *cdir,
1364da6c28aaSamw struct cred *cred,
1365da6c28aaSamw caller_context_t *ct,
1366da6c28aaSamw int flags)
13677c478bd9Sstevel@tonic-gate {
13687c478bd9Sstevel@tonic-gate struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
13697c478bd9Sstevel@tonic-gate struct tmpnode *self = NULL;
13707c478bd9Sstevel@tonic-gate struct vnode *vp;
13717c478bd9Sstevel@tonic-gate int error = 0;
13727c478bd9Sstevel@tonic-gate
13737c478bd9Sstevel@tonic-gate /*
13747c478bd9Sstevel@tonic-gate * Return error when removing . and ..
13757c478bd9Sstevel@tonic-gate */
13767c478bd9Sstevel@tonic-gate if (strcmp(nm, ".") == 0)
13777c478bd9Sstevel@tonic-gate return (EINVAL);
13787c478bd9Sstevel@tonic-gate if (strcmp(nm, "..") == 0)
13797c478bd9Sstevel@tonic-gate return (EEXIST); /* Should be ENOTEMPTY */
13807c478bd9Sstevel@tonic-gate error = tdirlookup(parent, nm, &self, cred);
13817c478bd9Sstevel@tonic-gate if (error)
13827c478bd9Sstevel@tonic-gate return (error);
13837c478bd9Sstevel@tonic-gate
13847c478bd9Sstevel@tonic-gate rw_enter(&parent->tn_rwlock, RW_WRITER);
13857c478bd9Sstevel@tonic-gate rw_enter(&self->tn_rwlock, RW_WRITER);
13867c478bd9Sstevel@tonic-gate
13877c478bd9Sstevel@tonic-gate vp = TNTOV(self);
13887c478bd9Sstevel@tonic-gate if (vp == dvp || vp == cdir) {
13897c478bd9Sstevel@tonic-gate error = EINVAL;
13907c478bd9Sstevel@tonic-gate goto done1;
13917c478bd9Sstevel@tonic-gate }
13927c478bd9Sstevel@tonic-gate if (self->tn_type != VDIR) {
13937c478bd9Sstevel@tonic-gate error = ENOTDIR;
13947c478bd9Sstevel@tonic-gate goto done1;
13957c478bd9Sstevel@tonic-gate }
13967c478bd9Sstevel@tonic-gate
13977c478bd9Sstevel@tonic-gate mutex_enter(&self->tn_tlock);
13987c478bd9Sstevel@tonic-gate if (self->tn_nlink > 2) {
13997c478bd9Sstevel@tonic-gate mutex_exit(&self->tn_tlock);
14007c478bd9Sstevel@tonic-gate error = EEXIST;
14017c478bd9Sstevel@tonic-gate goto done1;
14027c478bd9Sstevel@tonic-gate }
14037c478bd9Sstevel@tonic-gate mutex_exit(&self->tn_tlock);
14047c478bd9Sstevel@tonic-gate
1405d5dbd18dSbatschul if (vn_vfswlock(vp)) {
14067c478bd9Sstevel@tonic-gate error = EBUSY;
14077c478bd9Sstevel@tonic-gate goto done1;
14087c478bd9Sstevel@tonic-gate }
14097c478bd9Sstevel@tonic-gate if (vn_mountedvfs(vp) != NULL) {
14107c478bd9Sstevel@tonic-gate error = EBUSY;
14117c478bd9Sstevel@tonic-gate goto done;
14127c478bd9Sstevel@tonic-gate }
14137c478bd9Sstevel@tonic-gate
14147c478bd9Sstevel@tonic-gate /*
14157c478bd9Sstevel@tonic-gate * Check for an empty directory
14167c478bd9Sstevel@tonic-gate * i.e. only includes entries for "." and ".."
14177c478bd9Sstevel@tonic-gate */
14187c478bd9Sstevel@tonic-gate if (self->tn_dirents > 2) {
14197c478bd9Sstevel@tonic-gate error = EEXIST; /* SIGH should be ENOTEMPTY */
14207c478bd9Sstevel@tonic-gate /*
14217c478bd9Sstevel@tonic-gate * Update atime because checking tn_dirents is logically
14227c478bd9Sstevel@tonic-gate * equivalent to reading the directory
14237c478bd9Sstevel@tonic-gate */
14247c478bd9Sstevel@tonic-gate gethrestime(&self->tn_atime);
14257c478bd9Sstevel@tonic-gate goto done;
14267c478bd9Sstevel@tonic-gate }
14277c478bd9Sstevel@tonic-gate
14287c478bd9Sstevel@tonic-gate error = tdirdelete(parent, self, nm, DR_RMDIR, cred);
14297c478bd9Sstevel@tonic-gate done:
14307c478bd9Sstevel@tonic-gate vn_vfsunlock(vp);
14317c478bd9Sstevel@tonic-gate done1:
14327c478bd9Sstevel@tonic-gate rw_exit(&self->tn_rwlock);
14337c478bd9Sstevel@tonic-gate rw_exit(&parent->tn_rwlock);
1434da6c28aaSamw vnevent_rmdir(TNTOV(self), dvp, nm, ct);
14357c478bd9Sstevel@tonic-gate tmpnode_rele(self);
14367c478bd9Sstevel@tonic-gate
14377c478bd9Sstevel@tonic-gate return (error);
14387c478bd9Sstevel@tonic-gate }
14397c478bd9Sstevel@tonic-gate
14407c478bd9Sstevel@tonic-gate /* ARGSUSED2 */
14417c478bd9Sstevel@tonic-gate static int
tmp_readdir(struct vnode * vp,struct uio * uiop,struct cred * cred,int * eofp,caller_context_t * ct,int flags)1442da6c28aaSamw tmp_readdir(
1443da6c28aaSamw struct vnode *vp,
1444da6c28aaSamw struct uio *uiop,
1445da6c28aaSamw struct cred *cred,
1446da6c28aaSamw int *eofp,
1447da6c28aaSamw caller_context_t *ct,
1448da6c28aaSamw int flags)
14497c478bd9Sstevel@tonic-gate {
14507c478bd9Sstevel@tonic-gate struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
14517c478bd9Sstevel@tonic-gate struct tdirent *tdp;
14527c478bd9Sstevel@tonic-gate int error = 0;
14537c478bd9Sstevel@tonic-gate size_t namelen;
14547c478bd9Sstevel@tonic-gate struct dirent64 *dp;
14557c478bd9Sstevel@tonic-gate ulong_t offset;
14567c478bd9Sstevel@tonic-gate ulong_t total_bytes_wanted;
14577c478bd9Sstevel@tonic-gate long outcount = 0;
14587c478bd9Sstevel@tonic-gate long bufsize;
14597c478bd9Sstevel@tonic-gate int reclen;
14607c478bd9Sstevel@tonic-gate caddr_t outbuf;
14617c478bd9Sstevel@tonic-gate
14627c478bd9Sstevel@tonic-gate if (uiop->uio_loffset >= MAXOFF_T) {
14637c478bd9Sstevel@tonic-gate if (eofp)
14647c478bd9Sstevel@tonic-gate *eofp = 1;
14657c478bd9Sstevel@tonic-gate return (0);
14667c478bd9Sstevel@tonic-gate }
14677c478bd9Sstevel@tonic-gate /*
14687c478bd9Sstevel@tonic-gate * assuming system call has already called tmp_rwlock
14697c478bd9Sstevel@tonic-gate */
14707c478bd9Sstevel@tonic-gate ASSERT(RW_READ_HELD(&tp->tn_rwlock));
14717c478bd9Sstevel@tonic-gate
14727c478bd9Sstevel@tonic-gate if (uiop->uio_iovcnt != 1)
14737c478bd9Sstevel@tonic-gate return (EINVAL);
14747c478bd9Sstevel@tonic-gate
14757c478bd9Sstevel@tonic-gate if (vp->v_type != VDIR)
14767c478bd9Sstevel@tonic-gate return (ENOTDIR);
14777c478bd9Sstevel@tonic-gate
14787c478bd9Sstevel@tonic-gate /*
14797c478bd9Sstevel@tonic-gate * There's a window here where someone could have removed
14807c478bd9Sstevel@tonic-gate * all the entries in the directory after we put a hold on the
14817c478bd9Sstevel@tonic-gate * vnode but before we grabbed the rwlock. Just return.
14827c478bd9Sstevel@tonic-gate */
14837c478bd9Sstevel@tonic-gate if (tp->tn_dir == NULL) {
14847c478bd9Sstevel@tonic-gate if (tp->tn_nlink) {
14857c478bd9Sstevel@tonic-gate panic("empty directory 0x%p", (void *)tp);
14867c478bd9Sstevel@tonic-gate /*NOTREACHED*/
14877c478bd9Sstevel@tonic-gate }
14887c478bd9Sstevel@tonic-gate return (0);
14897c478bd9Sstevel@tonic-gate }
14907c478bd9Sstevel@tonic-gate
14917c478bd9Sstevel@tonic-gate /*
14927c478bd9Sstevel@tonic-gate * Get space for multiple directory entries
14937c478bd9Sstevel@tonic-gate */
14947c478bd9Sstevel@tonic-gate total_bytes_wanted = uiop->uio_iov->iov_len;
14957c478bd9Sstevel@tonic-gate bufsize = total_bytes_wanted + sizeof (struct dirent64);
14967c478bd9Sstevel@tonic-gate outbuf = kmem_alloc(bufsize, KM_SLEEP);
14977c478bd9Sstevel@tonic-gate
14987c478bd9Sstevel@tonic-gate dp = (struct dirent64 *)outbuf;
14997c478bd9Sstevel@tonic-gate
15007c478bd9Sstevel@tonic-gate
15017c478bd9Sstevel@tonic-gate offset = 0;
15027c478bd9Sstevel@tonic-gate tdp = tp->tn_dir;
15037c478bd9Sstevel@tonic-gate while (tdp) {
15047c478bd9Sstevel@tonic-gate namelen = strlen(tdp->td_name); /* no +1 needed */
15057c478bd9Sstevel@tonic-gate offset = tdp->td_offset;
15067c478bd9Sstevel@tonic-gate if (offset >= uiop->uio_offset) {
15077c478bd9Sstevel@tonic-gate reclen = (int)DIRENT64_RECLEN(namelen);
15087c478bd9Sstevel@tonic-gate if (outcount + reclen > total_bytes_wanted) {
15097c478bd9Sstevel@tonic-gate if (!outcount)
15107c478bd9Sstevel@tonic-gate /*
15117c478bd9Sstevel@tonic-gate * Buffer too small for any entries.
15127c478bd9Sstevel@tonic-gate */
15137c478bd9Sstevel@tonic-gate error = EINVAL;
15147c478bd9Sstevel@tonic-gate break;
15157c478bd9Sstevel@tonic-gate }
15167c478bd9Sstevel@tonic-gate ASSERT(tdp->td_tmpnode != NULL);
15177c478bd9Sstevel@tonic-gate
15187c478bd9Sstevel@tonic-gate /* use strncpy(9f) to zero out uninitialized bytes */
15197c478bd9Sstevel@tonic-gate
15207c478bd9Sstevel@tonic-gate (void) strncpy(dp->d_name, tdp->td_name,
15217c478bd9Sstevel@tonic-gate DIRENT64_NAMELEN(reclen));
15227c478bd9Sstevel@tonic-gate dp->d_reclen = (ushort_t)reclen;
15237c478bd9Sstevel@tonic-gate dp->d_ino = (ino64_t)tdp->td_tmpnode->tn_nodeid;
15247c478bd9Sstevel@tonic-gate dp->d_off = (offset_t)tdp->td_offset + 1;
15257c478bd9Sstevel@tonic-gate dp = (struct dirent64 *)
15267c478bd9Sstevel@tonic-gate ((uintptr_t)dp + dp->d_reclen);
15277c478bd9Sstevel@tonic-gate outcount += reclen;
15287c478bd9Sstevel@tonic-gate ASSERT(outcount <= bufsize);
15297c478bd9Sstevel@tonic-gate }
15307c478bd9Sstevel@tonic-gate tdp = tdp->td_next;
15317c478bd9Sstevel@tonic-gate }
15327c478bd9Sstevel@tonic-gate
15337c478bd9Sstevel@tonic-gate if (!error)
15347c478bd9Sstevel@tonic-gate error = uiomove(outbuf, outcount, UIO_READ, uiop);
15357c478bd9Sstevel@tonic-gate
15367c478bd9Sstevel@tonic-gate if (!error) {
15377c478bd9Sstevel@tonic-gate /* If we reached the end of the list our offset */
15387c478bd9Sstevel@tonic-gate /* should now be just past the end. */
15397c478bd9Sstevel@tonic-gate if (!tdp) {
15407c478bd9Sstevel@tonic-gate offset += 1;
15417c478bd9Sstevel@tonic-gate if (eofp)
15427c478bd9Sstevel@tonic-gate *eofp = 1;
15437c478bd9Sstevel@tonic-gate } else if (eofp)
15447c478bd9Sstevel@tonic-gate *eofp = 0;
15457c478bd9Sstevel@tonic-gate uiop->uio_offset = offset;
15467c478bd9Sstevel@tonic-gate }
15477c478bd9Sstevel@tonic-gate gethrestime(&tp->tn_atime);
15487c478bd9Sstevel@tonic-gate kmem_free(outbuf, bufsize);
15497c478bd9Sstevel@tonic-gate return (error);
15507c478bd9Sstevel@tonic-gate }
15517c478bd9Sstevel@tonic-gate
1552da6c28aaSamw /* ARGSUSED5 */
15537c478bd9Sstevel@tonic-gate static int
tmp_symlink(struct vnode * dvp,char * lnm,struct vattr * tva,char * tnm,struct cred * cred,caller_context_t * ct,int flags)15547c478bd9Sstevel@tonic-gate tmp_symlink(
15557c478bd9Sstevel@tonic-gate struct vnode *dvp,
15567c478bd9Sstevel@tonic-gate char *lnm,
15577c478bd9Sstevel@tonic-gate struct vattr *tva,
15587c478bd9Sstevel@tonic-gate char *tnm,
1559da6c28aaSamw struct cred *cred,
1560da6c28aaSamw caller_context_t *ct,
1561da6c28aaSamw int flags)
15627c478bd9Sstevel@tonic-gate {
15637c478bd9Sstevel@tonic-gate struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
15647c478bd9Sstevel@tonic-gate struct tmpnode *self = (struct tmpnode *)NULL;
15657c478bd9Sstevel@tonic-gate struct tmount *tm = (struct tmount *)VTOTM(dvp);
15667c478bd9Sstevel@tonic-gate char *cp = NULL;
15677c478bd9Sstevel@tonic-gate int error;
15687c478bd9Sstevel@tonic-gate size_t len;
15697c478bd9Sstevel@tonic-gate
15707c478bd9Sstevel@tonic-gate /* no symlinks allowed to files in xattr dirs */
15717c478bd9Sstevel@tonic-gate if (parent->tn_flags & ISXATTR)
15727c478bd9Sstevel@tonic-gate return (EINVAL);
15737c478bd9Sstevel@tonic-gate
15747c478bd9Sstevel@tonic-gate error = tdirlookup(parent, lnm, &self, cred);
15757c478bd9Sstevel@tonic-gate if (error == 0) {
15767c478bd9Sstevel@tonic-gate /*
15777c478bd9Sstevel@tonic-gate * The entry already exists
15787c478bd9Sstevel@tonic-gate */
15797c478bd9Sstevel@tonic-gate tmpnode_rele(self);
15807c478bd9Sstevel@tonic-gate return (EEXIST); /* was 0 */
15817c478bd9Sstevel@tonic-gate }
15827c478bd9Sstevel@tonic-gate
15837c478bd9Sstevel@tonic-gate if (error != ENOENT) {
15847c478bd9Sstevel@tonic-gate if (self != NULL)
15857c478bd9Sstevel@tonic-gate tmpnode_rele(self);
15867c478bd9Sstevel@tonic-gate return (error);
15877c478bd9Sstevel@tonic-gate }
15887c478bd9Sstevel@tonic-gate
15897c478bd9Sstevel@tonic-gate rw_enter(&parent->tn_rwlock, RW_WRITER);
15907c478bd9Sstevel@tonic-gate error = tdirenter(tm, parent, lnm, DE_CREATE, (struct tmpnode *)NULL,
1591da6c28aaSamw (struct tmpnode *)NULL, tva, &self, cred, ct);
15927c478bd9Sstevel@tonic-gate rw_exit(&parent->tn_rwlock);
15937c478bd9Sstevel@tonic-gate
15947c478bd9Sstevel@tonic-gate if (error) {
15957c478bd9Sstevel@tonic-gate if (self)
15967c478bd9Sstevel@tonic-gate tmpnode_rele(self);
15977c478bd9Sstevel@tonic-gate return (error);
15987c478bd9Sstevel@tonic-gate }
15997c478bd9Sstevel@tonic-gate len = strlen(tnm) + 1;
16007c478bd9Sstevel@tonic-gate cp = tmp_memalloc(len, 0);
16017c478bd9Sstevel@tonic-gate if (cp == NULL) {
16027c478bd9Sstevel@tonic-gate tmpnode_rele(self);
16037c478bd9Sstevel@tonic-gate return (ENOSPC);
16047c478bd9Sstevel@tonic-gate }
16057c478bd9Sstevel@tonic-gate (void) strcpy(cp, tnm);
16067c478bd9Sstevel@tonic-gate
16077c478bd9Sstevel@tonic-gate self->tn_symlink = cp;
16087c478bd9Sstevel@tonic-gate self->tn_size = len - 1;
16097c478bd9Sstevel@tonic-gate tmpnode_rele(self);
16107c478bd9Sstevel@tonic-gate return (error);
16117c478bd9Sstevel@tonic-gate }
16127c478bd9Sstevel@tonic-gate
16137c478bd9Sstevel@tonic-gate /* ARGSUSED2 */
16147c478bd9Sstevel@tonic-gate static int
tmp_readlink(struct vnode * vp,struct uio * uiop,struct cred * cred,caller_context_t * ct)1615da6c28aaSamw tmp_readlink(
1616da6c28aaSamw struct vnode *vp,
1617da6c28aaSamw struct uio *uiop,
1618da6c28aaSamw struct cred *cred,
1619da6c28aaSamw caller_context_t *ct)
16207c478bd9Sstevel@tonic-gate {
16217c478bd9Sstevel@tonic-gate struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
16227c478bd9Sstevel@tonic-gate int error = 0;
16237c478bd9Sstevel@tonic-gate
16247c478bd9Sstevel@tonic-gate if (vp->v_type != VLNK)
16257c478bd9Sstevel@tonic-gate return (EINVAL);
16267c478bd9Sstevel@tonic-gate
16277c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_rwlock, RW_READER);
16287c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_contents, RW_READER);
16297c478bd9Sstevel@tonic-gate error = uiomove(tp->tn_symlink, tp->tn_size, UIO_READ, uiop);
16307c478bd9Sstevel@tonic-gate gethrestime(&tp->tn_atime);
16317c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_contents);
16327c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_rwlock);
16337c478bd9Sstevel@tonic-gate return (error);
16347c478bd9Sstevel@tonic-gate }
16357c478bd9Sstevel@tonic-gate
16367c478bd9Sstevel@tonic-gate /* ARGSUSED */
16377c478bd9Sstevel@tonic-gate static int
tmp_fsync(struct vnode * vp,int syncflag,struct cred * cred,caller_context_t * ct)1638da6c28aaSamw tmp_fsync(
1639da6c28aaSamw struct vnode *vp,
1640da6c28aaSamw int syncflag,
1641da6c28aaSamw struct cred *cred,
1642da6c28aaSamw caller_context_t *ct)
16437c478bd9Sstevel@tonic-gate {
16447c478bd9Sstevel@tonic-gate return (0);
16457c478bd9Sstevel@tonic-gate }
16467c478bd9Sstevel@tonic-gate
16477c478bd9Sstevel@tonic-gate /* ARGSUSED */
16487c478bd9Sstevel@tonic-gate static void
tmp_inactive(struct vnode * vp,struct cred * cred,caller_context_t * ct)1649da6c28aaSamw tmp_inactive(struct vnode *vp, struct cred *cred, caller_context_t *ct)
16507c478bd9Sstevel@tonic-gate {
16517c478bd9Sstevel@tonic-gate struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
16527c478bd9Sstevel@tonic-gate struct tmount *tm = (struct tmount *)VFSTOTM(vp->v_vfsp);
16537c478bd9Sstevel@tonic-gate
16547c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_rwlock, RW_WRITER);
16557c478bd9Sstevel@tonic-gate top:
16567c478bd9Sstevel@tonic-gate mutex_enter(&tp->tn_tlock);
16577c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock);
16587c478bd9Sstevel@tonic-gate ASSERT(vp->v_count >= 1);
16597c478bd9Sstevel@tonic-gate
16607c478bd9Sstevel@tonic-gate /*
16617c478bd9Sstevel@tonic-gate * If we don't have the last hold or the link count is non-zero,
16627c478bd9Sstevel@tonic-gate * there's little to do -- just drop our hold.
16637c478bd9Sstevel@tonic-gate */
16647c478bd9Sstevel@tonic-gate if (vp->v_count > 1 || tp->tn_nlink != 0) {
16657c478bd9Sstevel@tonic-gate vp->v_count--;
16667c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock);
16677c478bd9Sstevel@tonic-gate mutex_exit(&tp->tn_tlock);
16687c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_rwlock);
16697c478bd9Sstevel@tonic-gate return;
16707c478bd9Sstevel@tonic-gate }
16717c478bd9Sstevel@tonic-gate
16727c478bd9Sstevel@tonic-gate /*
16737c478bd9Sstevel@tonic-gate * We have the last hold *and* the link count is zero, so this
16747c478bd9Sstevel@tonic-gate * tmpnode is dead from the filesystem's viewpoint. However,
16757c478bd9Sstevel@tonic-gate * if the tmpnode has any pages associated with it (i.e. if it's
16767c478bd9Sstevel@tonic-gate * a normal file with non-zero size), the tmpnode can still be
16777c478bd9Sstevel@tonic-gate * discovered by pageout or fsflush via the page vnode pointers.
16787c478bd9Sstevel@tonic-gate * In this case we must drop all our locks, truncate the tmpnode,
16797c478bd9Sstevel@tonic-gate * and try the whole dance again.
16807c478bd9Sstevel@tonic-gate */
16817c478bd9Sstevel@tonic-gate if (tp->tn_size != 0) {
16827c478bd9Sstevel@tonic-gate if (tp->tn_type == VREG) {
16837c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock);
16847c478bd9Sstevel@tonic-gate mutex_exit(&tp->tn_tlock);
16857c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_contents, RW_WRITER);
16867c478bd9Sstevel@tonic-gate (void) tmpnode_trunc(tm, tp, 0);
16877c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_contents);
16887c478bd9Sstevel@tonic-gate ASSERT(tp->tn_size == 0);
16897c478bd9Sstevel@tonic-gate ASSERT(tp->tn_nblocks == 0);
16907c478bd9Sstevel@tonic-gate goto top;
16917c478bd9Sstevel@tonic-gate }
16927c478bd9Sstevel@tonic-gate if (tp->tn_type == VLNK)
16937c478bd9Sstevel@tonic-gate tmp_memfree(tp->tn_symlink, tp->tn_size + 1);
16947c478bd9Sstevel@tonic-gate }
16957c478bd9Sstevel@tonic-gate
16967c478bd9Sstevel@tonic-gate /*
16977c478bd9Sstevel@tonic-gate * Remove normal file/dir's xattr dir and xattrs.
16987c478bd9Sstevel@tonic-gate */
16997c478bd9Sstevel@tonic-gate if (tp->tn_xattrdp) {
17007c478bd9Sstevel@tonic-gate struct tmpnode *xtp = tp->tn_xattrdp;
17017c478bd9Sstevel@tonic-gate
17027c478bd9Sstevel@tonic-gate ASSERT(xtp->tn_flags & ISXATTR);
17037c478bd9Sstevel@tonic-gate tmpnode_hold(xtp);
17047c478bd9Sstevel@tonic-gate rw_enter(&xtp->tn_rwlock, RW_WRITER);
17057c478bd9Sstevel@tonic-gate tdirtrunc(xtp);
17067c478bd9Sstevel@tonic-gate DECR_COUNT(&xtp->tn_nlink, &xtp->tn_tlock);
17077c478bd9Sstevel@tonic-gate tp->tn_xattrdp = NULL;
17087c478bd9Sstevel@tonic-gate rw_exit(&xtp->tn_rwlock);
17097c478bd9Sstevel@tonic-gate tmpnode_rele(xtp);
17107c478bd9Sstevel@tonic-gate }
17117c478bd9Sstevel@tonic-gate
17127c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock);
17137c478bd9Sstevel@tonic-gate mutex_exit(&tp->tn_tlock);
17147c478bd9Sstevel@tonic-gate /* Here's our chance to send invalid event while we're between locks */
17157c478bd9Sstevel@tonic-gate vn_invalid(TNTOV(tp));
17167c478bd9Sstevel@tonic-gate mutex_enter(&tm->tm_contents);
17177c478bd9Sstevel@tonic-gate if (tp->tn_forw == NULL)
17187c478bd9Sstevel@tonic-gate tm->tm_rootnode->tn_back = tp->tn_back;
17197c478bd9Sstevel@tonic-gate else
17207c478bd9Sstevel@tonic-gate tp->tn_forw->tn_back = tp->tn_back;
17217c478bd9Sstevel@tonic-gate tp->tn_back->tn_forw = tp->tn_forw;
17227c478bd9Sstevel@tonic-gate mutex_exit(&tm->tm_contents);
17237c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_rwlock);
17247c478bd9Sstevel@tonic-gate rw_destroy(&tp->tn_rwlock);
17257c478bd9Sstevel@tonic-gate mutex_destroy(&tp->tn_tlock);
17267c478bd9Sstevel@tonic-gate vn_free(TNTOV(tp));
17277c478bd9Sstevel@tonic-gate tmp_memfree(tp, sizeof (struct tmpnode));
17287c478bd9Sstevel@tonic-gate }
17297c478bd9Sstevel@tonic-gate
1730da6c28aaSamw /* ARGSUSED2 */
17317c478bd9Sstevel@tonic-gate static int
tmp_fid(struct vnode * vp,struct fid * fidp,caller_context_t * ct)1732da6c28aaSamw tmp_fid(struct vnode *vp, struct fid *fidp, caller_context_t *ct)
17337c478bd9Sstevel@tonic-gate {
17347c478bd9Sstevel@tonic-gate struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
17357c478bd9Sstevel@tonic-gate struct tfid *tfid;
17367c478bd9Sstevel@tonic-gate
17377c478bd9Sstevel@tonic-gate if (fidp->fid_len < (sizeof (struct tfid) - sizeof (ushort_t))) {
17387c478bd9Sstevel@tonic-gate fidp->fid_len = sizeof (struct tfid) - sizeof (ushort_t);
17397c478bd9Sstevel@tonic-gate return (ENOSPC);
17407c478bd9Sstevel@tonic-gate }
17417c478bd9Sstevel@tonic-gate
17427c478bd9Sstevel@tonic-gate tfid = (struct tfid *)fidp;
17437c478bd9Sstevel@tonic-gate bzero(tfid, sizeof (struct tfid));
17447c478bd9Sstevel@tonic-gate tfid->tfid_len = (int)sizeof (struct tfid) - sizeof (ushort_t);
17457c478bd9Sstevel@tonic-gate
17467c478bd9Sstevel@tonic-gate tfid->tfid_ino = tp->tn_nodeid;
17477c478bd9Sstevel@tonic-gate tfid->tfid_gen = tp->tn_gen;
17487c478bd9Sstevel@tonic-gate
17497c478bd9Sstevel@tonic-gate return (0);
17507c478bd9Sstevel@tonic-gate }
17517c478bd9Sstevel@tonic-gate
17527c478bd9Sstevel@tonic-gate
17537c478bd9Sstevel@tonic-gate /*
17547c478bd9Sstevel@tonic-gate * Return all the pages from [off..off+len] in given file
17557c478bd9Sstevel@tonic-gate */
1756da6c28aaSamw /* ARGSUSED */
17577c478bd9Sstevel@tonic-gate static int
tmp_getpage(struct vnode * vp,offset_t off,size_t len,uint_t * protp,page_t * pl[],size_t plsz,struct seg * seg,caddr_t addr,enum seg_rw rw,struct cred * cr,caller_context_t * ct)17587c478bd9Sstevel@tonic-gate tmp_getpage(
17597c478bd9Sstevel@tonic-gate struct vnode *vp,
17607c478bd9Sstevel@tonic-gate offset_t off,
17617c478bd9Sstevel@tonic-gate size_t len,
17627c478bd9Sstevel@tonic-gate uint_t *protp,
17637c478bd9Sstevel@tonic-gate page_t *pl[],
17647c478bd9Sstevel@tonic-gate size_t plsz,
17657c478bd9Sstevel@tonic-gate struct seg *seg,
17667c478bd9Sstevel@tonic-gate caddr_t addr,
17677c478bd9Sstevel@tonic-gate enum seg_rw rw,
1768da6c28aaSamw struct cred *cr,
1769da6c28aaSamw caller_context_t *ct)
17707c478bd9Sstevel@tonic-gate {
17717c478bd9Sstevel@tonic-gate int err = 0;
17727c478bd9Sstevel@tonic-gate struct tmpnode *tp = VTOTN(vp);
17737c478bd9Sstevel@tonic-gate anoff_t toff = (anoff_t)off;
17747c478bd9Sstevel@tonic-gate size_t tlen = len;
17757c478bd9Sstevel@tonic-gate u_offset_t tmpoff;
17767c478bd9Sstevel@tonic-gate timestruc_t now;
17777c478bd9Sstevel@tonic-gate
17787c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_contents, RW_READER);
17797c478bd9Sstevel@tonic-gate
17807c478bd9Sstevel@tonic-gate if (off + len > tp->tn_size + PAGEOFFSET) {
17817c478bd9Sstevel@tonic-gate err = EFAULT;
17827c478bd9Sstevel@tonic-gate goto out;
17837c478bd9Sstevel@tonic-gate }
17847c478bd9Sstevel@tonic-gate /*
17857c478bd9Sstevel@tonic-gate * Look for holes (no anon slot) in faulting range. If there are
17867c478bd9Sstevel@tonic-gate * holes we have to switch to a write lock and fill them in. Swap
17877c478bd9Sstevel@tonic-gate * space for holes was already reserved when the file was grown.
17887c478bd9Sstevel@tonic-gate */
17897c478bd9Sstevel@tonic-gate tmpoff = toff;
17907c478bd9Sstevel@tonic-gate if (non_anon(tp->tn_anon, btop(off), &tmpoff, &tlen)) {
17917c478bd9Sstevel@tonic-gate if (!rw_tryupgrade(&tp->tn_contents)) {
17927c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_contents);
17937c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_contents, RW_WRITER);
17947c478bd9Sstevel@tonic-gate /* Size may have changed when lock was dropped */
17957c478bd9Sstevel@tonic-gate if (off + len > tp->tn_size + PAGEOFFSET) {
17967c478bd9Sstevel@tonic-gate err = EFAULT;
17977c478bd9Sstevel@tonic-gate goto out;
17987c478bd9Sstevel@tonic-gate }
17997c478bd9Sstevel@tonic-gate }
18007c478bd9Sstevel@tonic-gate for (toff = (anoff_t)off; toff < (anoff_t)off + len;
18017c478bd9Sstevel@tonic-gate toff += PAGESIZE) {
18027c478bd9Sstevel@tonic-gate if (anon_get_ptr(tp->tn_anon, btop(toff)) == NULL) {
18037c478bd9Sstevel@tonic-gate /* XXX - may allocate mem w. write lock held */
18047c478bd9Sstevel@tonic-gate (void) anon_set_ptr(tp->tn_anon, btop(toff),
1805c6f08383Sjj204856 anon_alloc(vp, toff), ANON_SLEEP);
18067c478bd9Sstevel@tonic-gate tp->tn_nblocks++;
18077c478bd9Sstevel@tonic-gate }
18087c478bd9Sstevel@tonic-gate }
18097c478bd9Sstevel@tonic-gate rw_downgrade(&tp->tn_contents);
18107c478bd9Sstevel@tonic-gate }
18117c478bd9Sstevel@tonic-gate
18127c478bd9Sstevel@tonic-gate
1813*06e6833aSJosef 'Jeff' Sipek err = pvn_getpages(tmp_getapage, vp, (u_offset_t)off, len, protp,
1814*06e6833aSJosef 'Jeff' Sipek pl, plsz, seg, addr, rw, cr);
18157c478bd9Sstevel@tonic-gate
18167c478bd9Sstevel@tonic-gate gethrestime(&now);
18177c478bd9Sstevel@tonic-gate tp->tn_atime = now;
18187c478bd9Sstevel@tonic-gate if (rw == S_WRITE)
18197c478bd9Sstevel@tonic-gate tp->tn_mtime = now;
18207c478bd9Sstevel@tonic-gate
18217c478bd9Sstevel@tonic-gate out:
18227c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_contents);
18237c478bd9Sstevel@tonic-gate return (err);
18247c478bd9Sstevel@tonic-gate }
18257c478bd9Sstevel@tonic-gate
18267c478bd9Sstevel@tonic-gate /*
1827*06e6833aSJosef 'Jeff' Sipek * Called from pvn_getpages to get a particular page.
18287c478bd9Sstevel@tonic-gate */
18297c478bd9Sstevel@tonic-gate /*ARGSUSED*/
18307c478bd9Sstevel@tonic-gate static int
tmp_getapage(struct vnode * vp,u_offset_t off,size_t len,uint_t * protp,page_t * pl[],size_t plsz,struct seg * seg,caddr_t addr,enum seg_rw rw,struct cred * cr)18317c478bd9Sstevel@tonic-gate tmp_getapage(
18327c478bd9Sstevel@tonic-gate struct vnode *vp,
18337c478bd9Sstevel@tonic-gate u_offset_t off,
18347c478bd9Sstevel@tonic-gate size_t len,
18357c478bd9Sstevel@tonic-gate uint_t *protp,
18367c478bd9Sstevel@tonic-gate page_t *pl[],
18377c478bd9Sstevel@tonic-gate size_t plsz,
18387c478bd9Sstevel@tonic-gate struct seg *seg,
18397c478bd9Sstevel@tonic-gate caddr_t addr,
18407c478bd9Sstevel@tonic-gate enum seg_rw rw,
18417c478bd9Sstevel@tonic-gate struct cred *cr)
18427c478bd9Sstevel@tonic-gate {
18437c478bd9Sstevel@tonic-gate struct page *pp;
18447c478bd9Sstevel@tonic-gate int flags;
18457c478bd9Sstevel@tonic-gate int err = 0;
18467c478bd9Sstevel@tonic-gate struct vnode *pvp;
18477c478bd9Sstevel@tonic-gate u_offset_t poff;
18487c478bd9Sstevel@tonic-gate
18497c478bd9Sstevel@tonic-gate if (protp != NULL)
18507c478bd9Sstevel@tonic-gate *protp = PROT_ALL;
18517c478bd9Sstevel@tonic-gate again:
18527c478bd9Sstevel@tonic-gate if (pp = page_lookup(vp, off, rw == S_CREATE ? SE_EXCL : SE_SHARED)) {
18537c478bd9Sstevel@tonic-gate if (pl) {
18547c478bd9Sstevel@tonic-gate pl[0] = pp;
18557c478bd9Sstevel@tonic-gate pl[1] = NULL;
18567c478bd9Sstevel@tonic-gate } else {
18577c478bd9Sstevel@tonic-gate page_unlock(pp);
18587c478bd9Sstevel@tonic-gate }
18597c478bd9Sstevel@tonic-gate } else {
18607c478bd9Sstevel@tonic-gate pp = page_create_va(vp, off, PAGESIZE,
18617c478bd9Sstevel@tonic-gate PG_WAIT | PG_EXCL, seg, addr);
18627c478bd9Sstevel@tonic-gate /*
18637c478bd9Sstevel@tonic-gate * Someone raced in and created the page after we did the
18647c478bd9Sstevel@tonic-gate * lookup but before we did the create, so go back and
18657c478bd9Sstevel@tonic-gate * try to look it up again.
18667c478bd9Sstevel@tonic-gate */
18677c478bd9Sstevel@tonic-gate if (pp == NULL)
18687c478bd9Sstevel@tonic-gate goto again;
18697c478bd9Sstevel@tonic-gate /*
18707c478bd9Sstevel@tonic-gate * Fill page from backing store, if any. If none, then
18717c478bd9Sstevel@tonic-gate * either this is a newly filled hole or page must have
18727c478bd9Sstevel@tonic-gate * been unmodified and freed so just zero it out.
18737c478bd9Sstevel@tonic-gate */
18747c478bd9Sstevel@tonic-gate err = swap_getphysname(vp, off, &pvp, &poff);
18757c478bd9Sstevel@tonic-gate if (err) {
18767c478bd9Sstevel@tonic-gate panic("tmp_getapage: no anon slot vp %p "
18777c478bd9Sstevel@tonic-gate "off %llx pp %p\n", (void *)vp, off, (void *)pp);
18787c478bd9Sstevel@tonic-gate }
18797c478bd9Sstevel@tonic-gate if (pvp) {
18807c478bd9Sstevel@tonic-gate flags = (pl == NULL ? B_ASYNC|B_READ : B_READ);
18817c478bd9Sstevel@tonic-gate err = VOP_PAGEIO(pvp, pp, (u_offset_t)poff, PAGESIZE,
1882da6c28aaSamw flags, cr, NULL);
18837c478bd9Sstevel@tonic-gate if (flags & B_ASYNC)
18847c478bd9Sstevel@tonic-gate pp = NULL;
18857c478bd9Sstevel@tonic-gate } else if (rw != S_CREATE) {
18867c478bd9Sstevel@tonic-gate pagezero(pp, 0, PAGESIZE);
18877c478bd9Sstevel@tonic-gate }
18887c478bd9Sstevel@tonic-gate if (err && pp)
18897c478bd9Sstevel@tonic-gate pvn_read_done(pp, B_ERROR);
18907c478bd9Sstevel@tonic-gate if (err == 0) {
18917c478bd9Sstevel@tonic-gate if (pl)
18927c478bd9Sstevel@tonic-gate pvn_plist_init(pp, pl, plsz, off, PAGESIZE, rw);
18937c478bd9Sstevel@tonic-gate else
18947c478bd9Sstevel@tonic-gate pvn_io_done(pp);
18957c478bd9Sstevel@tonic-gate }
18967c478bd9Sstevel@tonic-gate }
18977c478bd9Sstevel@tonic-gate return (err);
18987c478bd9Sstevel@tonic-gate }
18997c478bd9Sstevel@tonic-gate
19007c478bd9Sstevel@tonic-gate
19017c478bd9Sstevel@tonic-gate /*
19027c478bd9Sstevel@tonic-gate * Flags are composed of {B_INVAL, B_DIRTY B_FREE, B_DONTNEED}.
19037c478bd9Sstevel@tonic-gate * If len == 0, do from off to EOF.
19047c478bd9Sstevel@tonic-gate */
19057c478bd9Sstevel@tonic-gate static int tmp_nopage = 0; /* Don't do tmp_putpage's if set */
19067c478bd9Sstevel@tonic-gate
19077c478bd9Sstevel@tonic-gate /* ARGSUSED */
19087c478bd9Sstevel@tonic-gate int
tmp_putpage(register struct vnode * vp,offset_t off,size_t len,int flags,struct cred * cr,caller_context_t * ct)19097c478bd9Sstevel@tonic-gate tmp_putpage(
19107c478bd9Sstevel@tonic-gate register struct vnode *vp,
19117c478bd9Sstevel@tonic-gate offset_t off,
19127c478bd9Sstevel@tonic-gate size_t len,
19137c478bd9Sstevel@tonic-gate int flags,
1914da6c28aaSamw struct cred *cr,
1915da6c28aaSamw caller_context_t *ct)
19167c478bd9Sstevel@tonic-gate {
19177c478bd9Sstevel@tonic-gate register page_t *pp;
19187c478bd9Sstevel@tonic-gate u_offset_t io_off;
19197c478bd9Sstevel@tonic-gate size_t io_len = 0;
19207c478bd9Sstevel@tonic-gate int err = 0;
19217c478bd9Sstevel@tonic-gate struct tmpnode *tp = VTOTN(vp);
19227c478bd9Sstevel@tonic-gate int dolock;
19237c478bd9Sstevel@tonic-gate
19247c478bd9Sstevel@tonic-gate if (tmp_nopage)
19257c478bd9Sstevel@tonic-gate return (0);
19267c478bd9Sstevel@tonic-gate
19277c478bd9Sstevel@tonic-gate ASSERT(vp->v_count != 0);
19287c478bd9Sstevel@tonic-gate
19297c478bd9Sstevel@tonic-gate if (vp->v_flag & VNOMAP)
19307c478bd9Sstevel@tonic-gate return (ENOSYS);
19317c478bd9Sstevel@tonic-gate
19327c478bd9Sstevel@tonic-gate /*
19337c478bd9Sstevel@tonic-gate * This being tmpfs, we don't ever do i/o unless we really
19347c478bd9Sstevel@tonic-gate * have to (when we're low on memory and pageout calls us
19357c478bd9Sstevel@tonic-gate * with B_ASYNC | B_FREE or the user explicitly asks for it with
19367c478bd9Sstevel@tonic-gate * B_DONTNEED).
19377c478bd9Sstevel@tonic-gate * XXX to approximately track the mod time like ufs we should
19387c478bd9Sstevel@tonic-gate * update the times here. The problem is, once someone does a
19397c478bd9Sstevel@tonic-gate * store we never clear the mod bit and do i/o, thus fsflush
19407c478bd9Sstevel@tonic-gate * will keep calling us every 30 seconds to do the i/o and we'll
19417c478bd9Sstevel@tonic-gate * continually update the mod time. At least we update the mod
19427c478bd9Sstevel@tonic-gate * time on the first store because this results in a call to getpage.
19437c478bd9Sstevel@tonic-gate */
19447c478bd9Sstevel@tonic-gate if (flags != (B_ASYNC | B_FREE) && (flags & B_INVAL) == 0 &&
19457c478bd9Sstevel@tonic-gate (flags & B_DONTNEED) == 0)
19467c478bd9Sstevel@tonic-gate return (0);
19477c478bd9Sstevel@tonic-gate /*
19487c478bd9Sstevel@tonic-gate * If this thread owns the lock, i.e., this thread grabbed it
19497c478bd9Sstevel@tonic-gate * as writer somewhere above, then we don't need to grab the
19507c478bd9Sstevel@tonic-gate * lock as reader in this routine.
19517c478bd9Sstevel@tonic-gate */
19527c478bd9Sstevel@tonic-gate dolock = (rw_owner(&tp->tn_contents) != curthread);
19537c478bd9Sstevel@tonic-gate
19547c478bd9Sstevel@tonic-gate /*
19557c478bd9Sstevel@tonic-gate * If this is pageout don't block on the lock as you could deadlock
19567c478bd9Sstevel@tonic-gate * when freemem == 0 (another thread has the read lock and is blocked
19577c478bd9Sstevel@tonic-gate * creating a page, and a third thread is waiting to get the writers
19587c478bd9Sstevel@tonic-gate * lock - waiting writers priority blocks us from getting the read
19597c478bd9Sstevel@tonic-gate * lock). Of course, if the only freeable pages are on this tmpnode
19607c478bd9Sstevel@tonic-gate * we're hosed anyways. A better solution might be a new lock type.
19617c478bd9Sstevel@tonic-gate * Note: ufs has the same problem.
19627c478bd9Sstevel@tonic-gate */
19637c478bd9Sstevel@tonic-gate if (curproc == proc_pageout) {
19647c478bd9Sstevel@tonic-gate if (!rw_tryenter(&tp->tn_contents, RW_READER))
19657c478bd9Sstevel@tonic-gate return (ENOMEM);
19667c478bd9Sstevel@tonic-gate } else if (dolock)
19677c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_contents, RW_READER);
19687c478bd9Sstevel@tonic-gate
19697c478bd9Sstevel@tonic-gate if (!vn_has_cached_data(vp))
19707c478bd9Sstevel@tonic-gate goto out;
19717c478bd9Sstevel@tonic-gate
19727c478bd9Sstevel@tonic-gate if (len == 0) {
19737c478bd9Sstevel@tonic-gate if (curproc == proc_pageout) {
19747c478bd9Sstevel@tonic-gate panic("tmp: pageout can't block");
19757c478bd9Sstevel@tonic-gate /*NOTREACHED*/
19767c478bd9Sstevel@tonic-gate }
19777c478bd9Sstevel@tonic-gate
19787c478bd9Sstevel@tonic-gate /* Search the entire vp list for pages >= off. */
19797c478bd9Sstevel@tonic-gate err = pvn_vplist_dirty(vp, (u_offset_t)off, tmp_putapage,
19807c478bd9Sstevel@tonic-gate flags, cr);
19817c478bd9Sstevel@tonic-gate } else {
19827c478bd9Sstevel@tonic-gate u_offset_t eoff;
19837c478bd9Sstevel@tonic-gate
19847c478bd9Sstevel@tonic-gate /*
19857c478bd9Sstevel@tonic-gate * Loop over all offsets in the range [off...off + len]
19867c478bd9Sstevel@tonic-gate * looking for pages to deal with.
19877c478bd9Sstevel@tonic-gate */
19887c478bd9Sstevel@tonic-gate eoff = MIN(off + len, tp->tn_size);
19897c478bd9Sstevel@tonic-gate for (io_off = off; io_off < eoff; io_off += io_len) {
19907c478bd9Sstevel@tonic-gate /*
19917c478bd9Sstevel@tonic-gate * If we are not invalidating, synchronously
19927c478bd9Sstevel@tonic-gate * freeing or writing pages use the routine
19937c478bd9Sstevel@tonic-gate * page_lookup_nowait() to prevent reclaiming
19947c478bd9Sstevel@tonic-gate * them from the free list.
19957c478bd9Sstevel@tonic-gate */
19967c478bd9Sstevel@tonic-gate if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
19977c478bd9Sstevel@tonic-gate pp = page_lookup(vp, io_off,
19987c478bd9Sstevel@tonic-gate (flags & (B_INVAL | B_FREE)) ?
19997c478bd9Sstevel@tonic-gate SE_EXCL : SE_SHARED);
20007c478bd9Sstevel@tonic-gate } else {
20017c478bd9Sstevel@tonic-gate pp = page_lookup_nowait(vp, io_off,
20027c478bd9Sstevel@tonic-gate (flags & B_FREE) ? SE_EXCL : SE_SHARED);
20037c478bd9Sstevel@tonic-gate }
20047c478bd9Sstevel@tonic-gate
20057c478bd9Sstevel@tonic-gate if (pp == NULL || pvn_getdirty(pp, flags) == 0)
20067c478bd9Sstevel@tonic-gate io_len = PAGESIZE;
20077c478bd9Sstevel@tonic-gate else {
20087c478bd9Sstevel@tonic-gate err = tmp_putapage(vp, pp, &io_off, &io_len,
20097c478bd9Sstevel@tonic-gate flags, cr);
20107c478bd9Sstevel@tonic-gate if (err != 0)
20117c478bd9Sstevel@tonic-gate break;
20127c478bd9Sstevel@tonic-gate }
20137c478bd9Sstevel@tonic-gate }
20147c478bd9Sstevel@tonic-gate }
20157c478bd9Sstevel@tonic-gate /* If invalidating, verify all pages on vnode list are gone. */
20167c478bd9Sstevel@tonic-gate if (err == 0 && off == 0 && len == 0 &&
20177c478bd9Sstevel@tonic-gate (flags & B_INVAL) && vn_has_cached_data(vp)) {
20187c478bd9Sstevel@tonic-gate panic("tmp_putpage: B_INVAL, pages not gone");
20197c478bd9Sstevel@tonic-gate /*NOTREACHED*/
20207c478bd9Sstevel@tonic-gate }
20217c478bd9Sstevel@tonic-gate out:
20227c478bd9Sstevel@tonic-gate if ((curproc == proc_pageout) || dolock)
20237c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_contents);
20247c478bd9Sstevel@tonic-gate /*
20257c478bd9Sstevel@tonic-gate * Only reason putapage is going to give us SE_NOSWAP as error
20267c478bd9Sstevel@tonic-gate * is when we ask a page to be written to physical backing store
20277c478bd9Sstevel@tonic-gate * and there is none. Ignore this because we might be dealing
20287c478bd9Sstevel@tonic-gate * with a swap page which does not have any backing store
20297c478bd9Sstevel@tonic-gate * on disk. In any other case we won't get this error over here.
20307c478bd9Sstevel@tonic-gate */
20317c478bd9Sstevel@tonic-gate if (err == SE_NOSWAP)
20327c478bd9Sstevel@tonic-gate err = 0;
20337c478bd9Sstevel@tonic-gate return (err);
20347c478bd9Sstevel@tonic-gate }
20357c478bd9Sstevel@tonic-gate
20367c478bd9Sstevel@tonic-gate long tmp_putpagecnt, tmp_pagespushed;
20377c478bd9Sstevel@tonic-gate
20387c478bd9Sstevel@tonic-gate /*
20397c478bd9Sstevel@tonic-gate * Write out a single page.
20407c478bd9Sstevel@tonic-gate * For tmpfs this means choose a physical swap slot and write the page
20417c478bd9Sstevel@tonic-gate * out using VOP_PAGEIO. For performance, we attempt to kluster; i.e.,
20427c478bd9Sstevel@tonic-gate * we try to find a bunch of other dirty pages adjacent in the file
20437c478bd9Sstevel@tonic-gate * and a bunch of contiguous swap slots, and then write all the pages
20447c478bd9Sstevel@tonic-gate * out in a single i/o.
20457c478bd9Sstevel@tonic-gate */
20467c478bd9Sstevel@tonic-gate /*ARGSUSED*/
20477c478bd9Sstevel@tonic-gate static int
tmp_putapage(struct vnode * vp,page_t * pp,u_offset_t * offp,size_t * lenp,int flags,struct cred * cr)20487c478bd9Sstevel@tonic-gate tmp_putapage(
20497c478bd9Sstevel@tonic-gate struct vnode *vp,
20507c478bd9Sstevel@tonic-gate page_t *pp,
20517c478bd9Sstevel@tonic-gate u_offset_t *offp,
20527c478bd9Sstevel@tonic-gate size_t *lenp,
20537c478bd9Sstevel@tonic-gate int flags,
20547c478bd9Sstevel@tonic-gate struct cred *cr)
20557c478bd9Sstevel@tonic-gate {
20567c478bd9Sstevel@tonic-gate int err;
20577c478bd9Sstevel@tonic-gate ulong_t klstart, kllen;
20587c478bd9Sstevel@tonic-gate page_t *pplist, *npplist;
20597c478bd9Sstevel@tonic-gate extern int klustsize;
20607c478bd9Sstevel@tonic-gate long tmp_klustsize;
20617c478bd9Sstevel@tonic-gate struct tmpnode *tp;
20627c478bd9Sstevel@tonic-gate size_t pp_off, pp_len;
20637c478bd9Sstevel@tonic-gate u_offset_t io_off;
20647c478bd9Sstevel@tonic-gate size_t io_len;
20657c478bd9Sstevel@tonic-gate struct vnode *pvp;
20667c478bd9Sstevel@tonic-gate u_offset_t pstart;
20677c478bd9Sstevel@tonic-gate u_offset_t offset;
20687c478bd9Sstevel@tonic-gate u_offset_t tmpoff;
20697c478bd9Sstevel@tonic-gate
20707c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp));
20717c478bd9Sstevel@tonic-gate
20727c478bd9Sstevel@tonic-gate /* Kluster in tmp_klustsize chunks */
20737c478bd9Sstevel@tonic-gate tp = VTOTN(vp);
20747c478bd9Sstevel@tonic-gate tmp_klustsize = klustsize;
20757c478bd9Sstevel@tonic-gate offset = pp->p_offset;
20767c478bd9Sstevel@tonic-gate klstart = (offset / tmp_klustsize) * tmp_klustsize;
20777c478bd9Sstevel@tonic-gate kllen = MIN(tmp_klustsize, tp->tn_size - klstart);
20787c478bd9Sstevel@tonic-gate
20797c478bd9Sstevel@tonic-gate /* Get a kluster of pages */
20807c478bd9Sstevel@tonic-gate pplist =
20817c478bd9Sstevel@tonic-gate pvn_write_kluster(vp, pp, &tmpoff, &pp_len, klstart, kllen, flags);
20827c478bd9Sstevel@tonic-gate
20837c478bd9Sstevel@tonic-gate pp_off = (size_t)tmpoff;
20847c478bd9Sstevel@tonic-gate
20857c478bd9Sstevel@tonic-gate /*
20867c478bd9Sstevel@tonic-gate * Get a cluster of physical offsets for the pages; the amount we
20877c478bd9Sstevel@tonic-gate * get may be some subrange of what we ask for (io_off, io_len).
20887c478bd9Sstevel@tonic-gate */
20897c478bd9Sstevel@tonic-gate io_off = pp_off;
20907c478bd9Sstevel@tonic-gate io_len = pp_len;
20917c478bd9Sstevel@tonic-gate err = swap_newphysname(vp, offset, &io_off, &io_len, &pvp, &pstart);
20927c478bd9Sstevel@tonic-gate ASSERT(err != SE_NOANON); /* anon slot must have been filled */
20937c478bd9Sstevel@tonic-gate if (err) {
20947c478bd9Sstevel@tonic-gate pvn_write_done(pplist, B_ERROR | B_WRITE | flags);
20957c478bd9Sstevel@tonic-gate /*
20967c478bd9Sstevel@tonic-gate * If this routine is called as a result of segvn_sync
20977c478bd9Sstevel@tonic-gate * operation and we have no physical swap then we can get an
20987c478bd9Sstevel@tonic-gate * error here. In such case we would return SE_NOSWAP as error.
20997c478bd9Sstevel@tonic-gate * At this point, we expect only SE_NOSWAP.
21007c478bd9Sstevel@tonic-gate */
21017c478bd9Sstevel@tonic-gate ASSERT(err == SE_NOSWAP);
21027c478bd9Sstevel@tonic-gate if (flags & B_INVAL)
21037c478bd9Sstevel@tonic-gate err = ENOMEM;
21047c478bd9Sstevel@tonic-gate goto out;
21057c478bd9Sstevel@tonic-gate }
21067c478bd9Sstevel@tonic-gate ASSERT(pp_off <= io_off && io_off + io_len <= pp_off + pp_len);
21077c478bd9Sstevel@tonic-gate ASSERT(io_off <= offset && offset < io_off + io_len);
21087c478bd9Sstevel@tonic-gate
21097c478bd9Sstevel@tonic-gate /* Toss pages at front/rear that we couldn't get physical backing for */
21107c478bd9Sstevel@tonic-gate if (io_off != pp_off) {
21117c478bd9Sstevel@tonic-gate npplist = NULL;
21127c478bd9Sstevel@tonic-gate page_list_break(&pplist, &npplist, btop(io_off - pp_off));
21137c478bd9Sstevel@tonic-gate ASSERT(pplist->p_offset == pp_off);
21147c478bd9Sstevel@tonic-gate ASSERT(pplist->p_prev->p_offset == io_off - PAGESIZE);
21157c478bd9Sstevel@tonic-gate pvn_write_done(pplist, B_ERROR | B_WRITE | flags);
21167c478bd9Sstevel@tonic-gate pplist = npplist;
21177c478bd9Sstevel@tonic-gate }
21187c478bd9Sstevel@tonic-gate if (io_off + io_len < pp_off + pp_len) {
21197c478bd9Sstevel@tonic-gate npplist = NULL;
21207c478bd9Sstevel@tonic-gate page_list_break(&pplist, &npplist, btop(io_len));
21217c478bd9Sstevel@tonic-gate ASSERT(npplist->p_offset == io_off + io_len);
21227c478bd9Sstevel@tonic-gate ASSERT(npplist->p_prev->p_offset == pp_off + pp_len - PAGESIZE);
21237c478bd9Sstevel@tonic-gate pvn_write_done(npplist, B_ERROR | B_WRITE | flags);
21247c478bd9Sstevel@tonic-gate }
21257c478bd9Sstevel@tonic-gate
21267c478bd9Sstevel@tonic-gate ASSERT(pplist->p_offset == io_off);
21277c478bd9Sstevel@tonic-gate ASSERT(pplist->p_prev->p_offset == io_off + io_len - PAGESIZE);
21287c478bd9Sstevel@tonic-gate ASSERT(btopr(io_len) <= btopr(kllen));
21297c478bd9Sstevel@tonic-gate
21307c478bd9Sstevel@tonic-gate /* Do i/o on the remaining kluster */
21317c478bd9Sstevel@tonic-gate err = VOP_PAGEIO(pvp, pplist, (u_offset_t)pstart, io_len,
2132da6c28aaSamw B_WRITE | flags, cr, NULL);
21337c478bd9Sstevel@tonic-gate
21347c478bd9Sstevel@tonic-gate if ((flags & B_ASYNC) == 0) {
21357c478bd9Sstevel@tonic-gate pvn_write_done(pplist, ((err) ? B_ERROR : 0) | B_WRITE | flags);
21367c478bd9Sstevel@tonic-gate }
21377c478bd9Sstevel@tonic-gate out:
21387c478bd9Sstevel@tonic-gate if (!err) {
21397c478bd9Sstevel@tonic-gate if (offp)
21407c478bd9Sstevel@tonic-gate *offp = io_off;
21417c478bd9Sstevel@tonic-gate if (lenp)
21427c478bd9Sstevel@tonic-gate *lenp = io_len;
21437c478bd9Sstevel@tonic-gate tmp_putpagecnt++;
21447c478bd9Sstevel@tonic-gate tmp_pagespushed += btop(io_len);
21457c478bd9Sstevel@tonic-gate }
21467c478bd9Sstevel@tonic-gate if (err && err != ENOMEM && err != SE_NOSWAP)
21477c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "tmp_putapage: err %d\n", err);
21487c478bd9Sstevel@tonic-gate return (err);
21497c478bd9Sstevel@tonic-gate }
21507c478bd9Sstevel@tonic-gate
2151da6c28aaSamw /* ARGSUSED */
21527c478bd9Sstevel@tonic-gate static int
tmp_map(struct vnode * vp,offset_t off,struct as * as,caddr_t * addrp,size_t len,uchar_t prot,uchar_t maxprot,uint_t flags,struct cred * cred,caller_context_t * ct)21537c478bd9Sstevel@tonic-gate tmp_map(
21547c478bd9Sstevel@tonic-gate struct vnode *vp,
21557c478bd9Sstevel@tonic-gate offset_t off,
21567c478bd9Sstevel@tonic-gate struct as *as,
21577c478bd9Sstevel@tonic-gate caddr_t *addrp,
21587c478bd9Sstevel@tonic-gate size_t len,
21597c478bd9Sstevel@tonic-gate uchar_t prot,
21607c478bd9Sstevel@tonic-gate uchar_t maxprot,
21617c478bd9Sstevel@tonic-gate uint_t flags,
2162da6c28aaSamw struct cred *cred,
2163da6c28aaSamw caller_context_t *ct)
21647c478bd9Sstevel@tonic-gate {
21657c478bd9Sstevel@tonic-gate struct segvn_crargs vn_a;
21667c478bd9Sstevel@tonic-gate struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
21677c478bd9Sstevel@tonic-gate int error;
21687c478bd9Sstevel@tonic-gate
21697c478bd9Sstevel@tonic-gate #ifdef _ILP32
21707c478bd9Sstevel@tonic-gate if (len > MAXOFF_T)
21717c478bd9Sstevel@tonic-gate return (ENOMEM);
21727c478bd9Sstevel@tonic-gate #endif
21737c478bd9Sstevel@tonic-gate
21747c478bd9Sstevel@tonic-gate if (vp->v_flag & VNOMAP)
21757c478bd9Sstevel@tonic-gate return (ENOSYS);
21767c478bd9Sstevel@tonic-gate
2177ae115bc7Smrj if (off < 0 || (offset_t)(off + len) < 0 ||
217840c009a9Speterte off > MAXOFF_T || (off + len) > MAXOFF_T)
217940c009a9Speterte return (ENXIO);
21807c478bd9Sstevel@tonic-gate
21817c478bd9Sstevel@tonic-gate if (vp->v_type != VREG)
21827c478bd9Sstevel@tonic-gate return (ENODEV);
21837c478bd9Sstevel@tonic-gate
21847c478bd9Sstevel@tonic-gate /*
21857c478bd9Sstevel@tonic-gate * Don't allow mapping to locked file
21867c478bd9Sstevel@tonic-gate */
21877c478bd9Sstevel@tonic-gate if (vn_has_mandatory_locks(vp, tp->tn_mode)) {
21887c478bd9Sstevel@tonic-gate return (EAGAIN);
21897c478bd9Sstevel@tonic-gate }
21907c478bd9Sstevel@tonic-gate
21917c478bd9Sstevel@tonic-gate as_rangelock(as);
219260946fe0Smec error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
219360946fe0Smec if (error != 0) {
21947c478bd9Sstevel@tonic-gate as_rangeunlock(as);
219560946fe0Smec return (error);
21967c478bd9Sstevel@tonic-gate }
21977c478bd9Sstevel@tonic-gate
21987c478bd9Sstevel@tonic-gate vn_a.vp = vp;
21997c478bd9Sstevel@tonic-gate vn_a.offset = (u_offset_t)off;
22007c478bd9Sstevel@tonic-gate vn_a.type = flags & MAP_TYPE;
22017c478bd9Sstevel@tonic-gate vn_a.prot = prot;
22027c478bd9Sstevel@tonic-gate vn_a.maxprot = maxprot;
22037c478bd9Sstevel@tonic-gate vn_a.flags = flags & ~MAP_TYPE;
22047c478bd9Sstevel@tonic-gate vn_a.cred = cred;
22057c478bd9Sstevel@tonic-gate vn_a.amp = NULL;
22067c478bd9Sstevel@tonic-gate vn_a.szc = 0;
22077c478bd9Sstevel@tonic-gate vn_a.lgrp_mem_policy_flags = 0;
22087c478bd9Sstevel@tonic-gate
22097c478bd9Sstevel@tonic-gate error = as_map(as, *addrp, len, segvn_create, &vn_a);
22107c478bd9Sstevel@tonic-gate as_rangeunlock(as);
22117c478bd9Sstevel@tonic-gate return (error);
22127c478bd9Sstevel@tonic-gate }
22137c478bd9Sstevel@tonic-gate
22147c478bd9Sstevel@tonic-gate /*
22157c478bd9Sstevel@tonic-gate * tmp_addmap and tmp_delmap can't be called since the vp
22167c478bd9Sstevel@tonic-gate * maintained in the segvn mapping is NULL.
22177c478bd9Sstevel@tonic-gate */
22187c478bd9Sstevel@tonic-gate /* ARGSUSED */
22197c478bd9Sstevel@tonic-gate static int
tmp_addmap(struct vnode * vp,offset_t off,struct as * as,caddr_t addr,size_t len,uchar_t prot,uchar_t maxprot,uint_t flags,struct cred * cred,caller_context_t * ct)22207c478bd9Sstevel@tonic-gate tmp_addmap(
22217c478bd9Sstevel@tonic-gate struct vnode *vp,
22227c478bd9Sstevel@tonic-gate offset_t off,
22237c478bd9Sstevel@tonic-gate struct as *as,
22247c478bd9Sstevel@tonic-gate caddr_t addr,
22257c478bd9Sstevel@tonic-gate size_t len,
22267c478bd9Sstevel@tonic-gate uchar_t prot,
22277c478bd9Sstevel@tonic-gate uchar_t maxprot,
22287c478bd9Sstevel@tonic-gate uint_t flags,
2229da6c28aaSamw struct cred *cred,
2230da6c28aaSamw caller_context_t *ct)
22317c478bd9Sstevel@tonic-gate {
22327c478bd9Sstevel@tonic-gate return (0);
22337c478bd9Sstevel@tonic-gate }
22347c478bd9Sstevel@tonic-gate
22357c478bd9Sstevel@tonic-gate /* ARGSUSED */
22367c478bd9Sstevel@tonic-gate static int
tmp_delmap(struct vnode * vp,offset_t off,struct as * as,caddr_t addr,size_t len,uint_t prot,uint_t maxprot,uint_t flags,struct cred * cred,caller_context_t * ct)22377c478bd9Sstevel@tonic-gate tmp_delmap(
22387c478bd9Sstevel@tonic-gate struct vnode *vp,
22397c478bd9Sstevel@tonic-gate offset_t off,
22407c478bd9Sstevel@tonic-gate struct as *as,
22417c478bd9Sstevel@tonic-gate caddr_t addr,
22427c478bd9Sstevel@tonic-gate size_t len,
22437c478bd9Sstevel@tonic-gate uint_t prot,
22447c478bd9Sstevel@tonic-gate uint_t maxprot,
22457c478bd9Sstevel@tonic-gate uint_t flags,
2246da6c28aaSamw struct cred *cred,
2247da6c28aaSamw caller_context_t *ct)
22487c478bd9Sstevel@tonic-gate {
22497c478bd9Sstevel@tonic-gate return (0);
22507c478bd9Sstevel@tonic-gate }
22517c478bd9Sstevel@tonic-gate
22527c478bd9Sstevel@tonic-gate static int
tmp_freesp(struct vnode * vp,struct flock64 * lp,int flag)22537c478bd9Sstevel@tonic-gate tmp_freesp(struct vnode *vp, struct flock64 *lp, int flag)
22547c478bd9Sstevel@tonic-gate {
22557c478bd9Sstevel@tonic-gate register int i;
22567c478bd9Sstevel@tonic-gate register struct tmpnode *tp = VTOTN(vp);
22577c478bd9Sstevel@tonic-gate int error;
22587c478bd9Sstevel@tonic-gate
22597c478bd9Sstevel@tonic-gate ASSERT(vp->v_type == VREG);
22607c478bd9Sstevel@tonic-gate ASSERT(lp->l_start >= 0);
22617c478bd9Sstevel@tonic-gate
22627c478bd9Sstevel@tonic-gate if (lp->l_len != 0)
22637c478bd9Sstevel@tonic-gate return (EINVAL);
22647c478bd9Sstevel@tonic-gate
22657c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_rwlock, RW_WRITER);
22667c478bd9Sstevel@tonic-gate if (tp->tn_size == lp->l_start) {
22677c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_rwlock);
22687c478bd9Sstevel@tonic-gate return (0);
22697c478bd9Sstevel@tonic-gate }
22707c478bd9Sstevel@tonic-gate
22717c478bd9Sstevel@tonic-gate /*
22727c478bd9Sstevel@tonic-gate * Check for any mandatory locks on the range
22737c478bd9Sstevel@tonic-gate */
22747c478bd9Sstevel@tonic-gate if (MANDLOCK(vp, tp->tn_mode)) {
22757c478bd9Sstevel@tonic-gate long save_start;
22767c478bd9Sstevel@tonic-gate
22777c478bd9Sstevel@tonic-gate save_start = lp->l_start;
22787c478bd9Sstevel@tonic-gate
22797c478bd9Sstevel@tonic-gate if (tp->tn_size < lp->l_start) {
22807c478bd9Sstevel@tonic-gate /*
22817c478bd9Sstevel@tonic-gate * "Truncate up" case: need to make sure there
22827c478bd9Sstevel@tonic-gate * is no lock beyond current end-of-file. To
22837c478bd9Sstevel@tonic-gate * do so, we need to set l_start to the size
22847c478bd9Sstevel@tonic-gate * of the file temporarily.
22857c478bd9Sstevel@tonic-gate */
22867c478bd9Sstevel@tonic-gate lp->l_start = tp->tn_size;
22877c478bd9Sstevel@tonic-gate }
22887c478bd9Sstevel@tonic-gate lp->l_type = F_WRLCK;
22897c478bd9Sstevel@tonic-gate lp->l_sysid = 0;
22907c478bd9Sstevel@tonic-gate lp->l_pid = ttoproc(curthread)->p_pid;
22917c478bd9Sstevel@tonic-gate i = (flag & (FNDELAY|FNONBLOCK)) ? 0 : SLPFLCK;
22927c478bd9Sstevel@tonic-gate if ((i = reclock(vp, lp, i, 0, lp->l_start, NULL)) != 0 ||
22937c478bd9Sstevel@tonic-gate lp->l_type != F_UNLCK) {
22947c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_rwlock);
22957c478bd9Sstevel@tonic-gate return (i ? i : EAGAIN);
22967c478bd9Sstevel@tonic-gate }
22977c478bd9Sstevel@tonic-gate
22987c478bd9Sstevel@tonic-gate lp->l_start = save_start;
22997c478bd9Sstevel@tonic-gate }
23007c478bd9Sstevel@tonic-gate VFSTOTM(vp->v_vfsp);
23017c478bd9Sstevel@tonic-gate
23027c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_contents, RW_WRITER);
23037c478bd9Sstevel@tonic-gate error = tmpnode_trunc((struct tmount *)VFSTOTM(vp->v_vfsp),
23047c478bd9Sstevel@tonic-gate tp, (ulong_t)lp->l_start);
23057c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_contents);
23067c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_rwlock);
23077c478bd9Sstevel@tonic-gate return (error);
23087c478bd9Sstevel@tonic-gate }
23097c478bd9Sstevel@tonic-gate
23107c478bd9Sstevel@tonic-gate /* ARGSUSED */
23117c478bd9Sstevel@tonic-gate static int
tmp_space(struct vnode * vp,int cmd,struct flock64 * bfp,int flag,offset_t offset,cred_t * cred,caller_context_t * ct)23127c478bd9Sstevel@tonic-gate tmp_space(
23137c478bd9Sstevel@tonic-gate struct vnode *vp,
23147c478bd9Sstevel@tonic-gate int cmd,
23157c478bd9Sstevel@tonic-gate struct flock64 *bfp,
23167c478bd9Sstevel@tonic-gate int flag,
23177c478bd9Sstevel@tonic-gate offset_t offset,
23187c478bd9Sstevel@tonic-gate cred_t *cred,
23197c478bd9Sstevel@tonic-gate caller_context_t *ct)
23207c478bd9Sstevel@tonic-gate {
23217c478bd9Sstevel@tonic-gate int error;
23227c478bd9Sstevel@tonic-gate
23237c478bd9Sstevel@tonic-gate if (cmd != F_FREESP)
23247c478bd9Sstevel@tonic-gate return (EINVAL);
23257c478bd9Sstevel@tonic-gate if ((error = convoff(vp, bfp, 0, (offset_t)offset)) == 0) {
23267c478bd9Sstevel@tonic-gate if ((bfp->l_start > MAXOFF_T) || (bfp->l_len > MAXOFF_T))
23277c478bd9Sstevel@tonic-gate return (EFBIG);
23287c478bd9Sstevel@tonic-gate error = tmp_freesp(vp, bfp, flag);
232972102e74SBryan Cantrill
233072102e74SBryan Cantrill if (error == 0 && bfp->l_start == 0)
233172102e74SBryan Cantrill vnevent_truncate(vp, ct);
23327c478bd9Sstevel@tonic-gate }
23337c478bd9Sstevel@tonic-gate return (error);
23347c478bd9Sstevel@tonic-gate }
23357c478bd9Sstevel@tonic-gate
23367c478bd9Sstevel@tonic-gate /* ARGSUSED */
23377c478bd9Sstevel@tonic-gate static int
tmp_seek(struct vnode * vp,offset_t ooff,offset_t * noffp,caller_context_t * ct)2338da6c28aaSamw tmp_seek(
2339da6c28aaSamw struct vnode *vp,
2340da6c28aaSamw offset_t ooff,
2341da6c28aaSamw offset_t *noffp,
2342da6c28aaSamw caller_context_t *ct)
23437c478bd9Sstevel@tonic-gate {
23447c478bd9Sstevel@tonic-gate return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
23457c478bd9Sstevel@tonic-gate }
23467c478bd9Sstevel@tonic-gate
23477c478bd9Sstevel@tonic-gate /* ARGSUSED2 */
23487c478bd9Sstevel@tonic-gate static int
tmp_rwlock(struct vnode * vp,int write_lock,caller_context_t * ctp)23497c478bd9Sstevel@tonic-gate tmp_rwlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
23507c478bd9Sstevel@tonic-gate {
23517c478bd9Sstevel@tonic-gate struct tmpnode *tp = VTOTN(vp);
23527c478bd9Sstevel@tonic-gate
23537c478bd9Sstevel@tonic-gate if (write_lock) {
23547c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_rwlock, RW_WRITER);
23557c478bd9Sstevel@tonic-gate } else {
23567c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_rwlock, RW_READER);
23577c478bd9Sstevel@tonic-gate }
23587c478bd9Sstevel@tonic-gate return (write_lock);
23597c478bd9Sstevel@tonic-gate }
23607c478bd9Sstevel@tonic-gate
23617c478bd9Sstevel@tonic-gate /* ARGSUSED1 */
23627c478bd9Sstevel@tonic-gate static void
tmp_rwunlock(struct vnode * vp,int write_lock,caller_context_t * ctp)23637c478bd9Sstevel@tonic-gate tmp_rwunlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
23647c478bd9Sstevel@tonic-gate {
23657c478bd9Sstevel@tonic-gate struct tmpnode *tp = VTOTN(vp);
23667c478bd9Sstevel@tonic-gate
23677c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_rwlock);
23687c478bd9Sstevel@tonic-gate }
23697c478bd9Sstevel@tonic-gate
23707c478bd9Sstevel@tonic-gate static int
tmp_pathconf(struct vnode * vp,int cmd,ulong_t * valp,cred_t * cr,caller_context_t * ct)2371da6c28aaSamw tmp_pathconf(
2372da6c28aaSamw struct vnode *vp,
2373da6c28aaSamw int cmd,
2374da6c28aaSamw ulong_t *valp,
2375da6c28aaSamw cred_t *cr,
2376da6c28aaSamw caller_context_t *ct)
23777c478bd9Sstevel@tonic-gate {
23787c478bd9Sstevel@tonic-gate struct tmpnode *tp = NULL;
23797c478bd9Sstevel@tonic-gate int error;
23807c478bd9Sstevel@tonic-gate
23817c478bd9Sstevel@tonic-gate switch (cmd) {
23827c478bd9Sstevel@tonic-gate case _PC_XATTR_EXISTS:
23837c478bd9Sstevel@tonic-gate if (vp->v_vfsp->vfs_flag & VFS_XATTR) {
23847c478bd9Sstevel@tonic-gate *valp = 0; /* assume no attributes */
23857c478bd9Sstevel@tonic-gate error = 0; /* okay to ask */
23867c478bd9Sstevel@tonic-gate tp = VTOTN(vp);
23877c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_rwlock, RW_READER);
23887c478bd9Sstevel@tonic-gate if (tp->tn_xattrdp) {
23897c478bd9Sstevel@tonic-gate rw_enter(&tp->tn_xattrdp->tn_rwlock, RW_READER);
23907c478bd9Sstevel@tonic-gate /* do not count "." and ".." */
23917c478bd9Sstevel@tonic-gate if (tp->tn_xattrdp->tn_dirents > 2)
23927c478bd9Sstevel@tonic-gate *valp = 1;
23937c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_xattrdp->tn_rwlock);
23947c478bd9Sstevel@tonic-gate }
23957c478bd9Sstevel@tonic-gate rw_exit(&tp->tn_rwlock);
23967c478bd9Sstevel@tonic-gate } else {
23977c478bd9Sstevel@tonic-gate error = EINVAL;
23987c478bd9Sstevel@tonic-gate }
23997c478bd9Sstevel@tonic-gate break;
2400da6c28aaSamw case _PC_SATTR_ENABLED:
2401da6c28aaSamw case _PC_SATTR_EXISTS:
24029660e5cbSJanice Chang *valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
2403da6c28aaSamw (vp->v_type == VREG || vp->v_type == VDIR);
2404da6c28aaSamw error = 0;
2405da6c28aaSamw break;
24063b862e9aSRoger A. Faulkner case _PC_TIMESTAMP_RESOLUTION:
24073b862e9aSRoger A. Faulkner /* nanosecond timestamp resolution */
24083b862e9aSRoger A. Faulkner *valp = 1L;
24093b862e9aSRoger A. Faulkner error = 0;
24103b862e9aSRoger A. Faulkner break;
24117c478bd9Sstevel@tonic-gate default:
2412da6c28aaSamw error = fs_pathconf(vp, cmd, valp, cr, ct);
24137c478bd9Sstevel@tonic-gate }
24147c478bd9Sstevel@tonic-gate return (error);
24157c478bd9Sstevel@tonic-gate }
24167c478bd9Sstevel@tonic-gate
24177c478bd9Sstevel@tonic-gate
24187c478bd9Sstevel@tonic-gate struct vnodeops *tmp_vnodeops;
24197c478bd9Sstevel@tonic-gate
24207c478bd9Sstevel@tonic-gate const fs_operation_def_t tmp_vnodeops_template[] = {
2421aa59c4cbSrsb VOPNAME_OPEN, { .vop_open = tmp_open },
2422aa59c4cbSrsb VOPNAME_CLOSE, { .vop_close = tmp_close },
2423aa59c4cbSrsb VOPNAME_READ, { .vop_read = tmp_read },
2424aa59c4cbSrsb VOPNAME_WRITE, { .vop_write = tmp_write },
2425aa59c4cbSrsb VOPNAME_IOCTL, { .vop_ioctl = tmp_ioctl },
2426aa59c4cbSrsb VOPNAME_GETATTR, { .vop_getattr = tmp_getattr },
2427aa59c4cbSrsb VOPNAME_SETATTR, { .vop_setattr = tmp_setattr },
2428aa59c4cbSrsb VOPNAME_ACCESS, { .vop_access = tmp_access },
2429aa59c4cbSrsb VOPNAME_LOOKUP, { .vop_lookup = tmp_lookup },
2430aa59c4cbSrsb VOPNAME_CREATE, { .vop_create = tmp_create },
2431aa59c4cbSrsb VOPNAME_REMOVE, { .vop_remove = tmp_remove },
2432aa59c4cbSrsb VOPNAME_LINK, { .vop_link = tmp_link },
2433aa59c4cbSrsb VOPNAME_RENAME, { .vop_rename = tmp_rename },
2434aa59c4cbSrsb VOPNAME_MKDIR, { .vop_mkdir = tmp_mkdir },
2435aa59c4cbSrsb VOPNAME_RMDIR, { .vop_rmdir = tmp_rmdir },
2436aa59c4cbSrsb VOPNAME_READDIR, { .vop_readdir = tmp_readdir },
2437aa59c4cbSrsb VOPNAME_SYMLINK, { .vop_symlink = tmp_symlink },
2438aa59c4cbSrsb VOPNAME_READLINK, { .vop_readlink = tmp_readlink },
2439aa59c4cbSrsb VOPNAME_FSYNC, { .vop_fsync = tmp_fsync },
2440aa59c4cbSrsb VOPNAME_INACTIVE, { .vop_inactive = tmp_inactive },
2441aa59c4cbSrsb VOPNAME_FID, { .vop_fid = tmp_fid },
2442aa59c4cbSrsb VOPNAME_RWLOCK, { .vop_rwlock = tmp_rwlock },
2443aa59c4cbSrsb VOPNAME_RWUNLOCK, { .vop_rwunlock = tmp_rwunlock },
2444aa59c4cbSrsb VOPNAME_SEEK, { .vop_seek = tmp_seek },
2445aa59c4cbSrsb VOPNAME_SPACE, { .vop_space = tmp_space },
2446aa59c4cbSrsb VOPNAME_GETPAGE, { .vop_getpage = tmp_getpage },
2447aa59c4cbSrsb VOPNAME_PUTPAGE, { .vop_putpage = tmp_putpage },
2448aa59c4cbSrsb VOPNAME_MAP, { .vop_map = tmp_map },
2449aa59c4cbSrsb VOPNAME_ADDMAP, { .vop_addmap = tmp_addmap },
2450aa59c4cbSrsb VOPNAME_DELMAP, { .vop_delmap = tmp_delmap },
2451aa59c4cbSrsb VOPNAME_PATHCONF, { .vop_pathconf = tmp_pathconf },
2452aa59c4cbSrsb VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
24537c478bd9Sstevel@tonic-gate NULL, NULL
24547c478bd9Sstevel@tonic-gate };
2455