xref: /titanic_51/usr/src/uts/common/fs/zfs/zfs_vnops.c (revision 8276d7915abc7c44934b5701e6ae35ec26eba740)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /* Portions Copyright 2007 Jeremy Teo */
26 /* Portions Copyright 2010 Robert Milkowski */
27 
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/time.h>
31 #include <sys/systm.h>
32 #include <sys/sysmacros.h>
33 #include <sys/resource.h>
34 #include <sys/vfs.h>
35 #include <sys/vfs_opreg.h>
36 #include <sys/vnode.h>
37 #include <sys/file.h>
38 #include <sys/stat.h>
39 #include <sys/kmem.h>
40 #include <sys/taskq.h>
41 #include <sys/uio.h>
42 #include <sys/vmsystm.h>
43 #include <sys/atomic.h>
44 #include <sys/vm.h>
45 #include <vm/seg_vn.h>
46 #include <vm/pvn.h>
47 #include <vm/as.h>
48 #include <vm/kpm.h>
49 #include <vm/seg_kpm.h>
50 #include <sys/mman.h>
51 #include <sys/pathname.h>
52 #include <sys/cmn_err.h>
53 #include <sys/errno.h>
54 #include <sys/unistd.h>
55 #include <sys/zfs_dir.h>
56 #include <sys/zfs_acl.h>
57 #include <sys/zfs_ioctl.h>
58 #include <sys/fs/zfs.h>
59 #include <sys/dmu.h>
60 #include <sys/dmu_objset.h>
61 #include <sys/spa.h>
62 #include <sys/txg.h>
63 #include <sys/dbuf.h>
64 #include <sys/zap.h>
65 #include <sys/sa.h>
66 #include <sys/dirent.h>
67 #include <sys/policy.h>
68 #include <sys/sunddi.h>
69 #include <sys/filio.h>
70 #include <sys/sid.h>
71 #include "fs/fs_subr.h"
72 #include <sys/zfs_ctldir.h>
73 #include <sys/zfs_fuid.h>
74 #include <sys/zfs_sa.h>
75 #include <sys/dnlc.h>
76 #include <sys/zfs_rlock.h>
77 #include <sys/extdirent.h>
78 #include <sys/kidmap.h>
79 #include <sys/cred.h>
80 #include <sys/attr.h>
81 
82 /*
83  * Programming rules.
84  *
85  * Each vnode op performs some logical unit of work.  To do this, the ZPL must
86  * properly lock its in-core state, create a DMU transaction, do the work,
87  * record this work in the intent log (ZIL), commit the DMU transaction,
88  * and wait for the intent log to commit if it is a synchronous operation.
89  * Moreover, the vnode ops must work in both normal and log replay context.
90  * The ordering of events is important to avoid deadlocks and references
91  * to freed memory.  The example below illustrates the following Big Rules:
92  *
93  *  (1) A check must be made in each zfs thread for a mounted file system.
94  *	This is done avoiding races using ZFS_ENTER(zfsvfs).
95  *      A ZFS_EXIT(zfsvfs) is needed before all returns.  Any znodes
96  *      must be checked with ZFS_VERIFY_ZP(zp).  Both of these macros
97  *      can return EIO from the calling function.
98  *
99  *  (2)	VN_RELE() should always be the last thing except for zil_commit()
100  *	(if necessary) and ZFS_EXIT(). This is for 3 reasons:
101  *	First, if it's the last reference, the vnode/znode
102  *	can be freed, so the zp may point to freed memory.  Second, the last
103  *	reference will call zfs_zinactive(), which may induce a lot of work --
104  *	pushing cached pages (which acquires range locks) and syncing out
105  *	cached atime changes.  Third, zfs_zinactive() may require a new tx,
106  *	which could deadlock the system if you were already holding one.
107  *	If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
108  *
109  *  (3)	All range locks must be grabbed before calling dmu_tx_assign(),
110  *	as they can span dmu_tx_assign() calls.
111  *
112  *  (4)	Always pass TXG_NOWAIT as the second argument to dmu_tx_assign().
113  *	This is critical because we don't want to block while holding locks.
114  *	Note, in particular, that if a lock is sometimes acquired before
115  *	the tx assigns, and sometimes after (e.g. z_lock), then failing to
116  *	use a non-blocking assign can deadlock the system.  The scenario:
117  *
118  *	Thread A has grabbed a lock before calling dmu_tx_assign().
119  *	Thread B is in an already-assigned tx, and blocks for this lock.
120  *	Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
121  *	forever, because the previous txg can't quiesce until B's tx commits.
122  *
123  *	If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
124  *	then drop all locks, call dmu_tx_wait(), and try again.
125  *
126  *  (5)	If the operation succeeded, generate the intent log entry for it
127  *	before dropping locks.  This ensures that the ordering of events
128  *	in the intent log matches the order in which they actually occurred.
129  *      During ZIL replay the zfs_log_* functions will update the sequence
130  *	number to indicate the zil transaction has replayed.
131  *
132  *  (6)	At the end of each vnode op, the DMU tx must always commit,
133  *	regardless of whether there were any errors.
134  *
135  *  (7)	After dropping all locks, invoke zil_commit(zilog, foid)
136  *	to ensure that synchronous semantics are provided when necessary.
137  *
138  * In general, this is how things should be ordered in each vnode op:
139  *
140  *	ZFS_ENTER(zfsvfs);		// exit if unmounted
141  * top:
142  *	zfs_dirent_lock(&dl, ...)	// lock directory entry (may VN_HOLD())
143  *	rw_enter(...);			// grab any other locks you need
144  *	tx = dmu_tx_create(...);	// get DMU tx
145  *	dmu_tx_hold_*();		// hold each object you might modify
146  *	error = dmu_tx_assign(tx, TXG_NOWAIT);	// try to assign
147  *	if (error) {
148  *		rw_exit(...);		// drop locks
149  *		zfs_dirent_unlock(dl);	// unlock directory entry
150  *		VN_RELE(...);		// release held vnodes
151  *		if (error == ERESTART) {
152  *			dmu_tx_wait(tx);
153  *			dmu_tx_abort(tx);
154  *			goto top;
155  *		}
156  *		dmu_tx_abort(tx);	// abort DMU tx
157  *		ZFS_EXIT(zfsvfs);	// finished in zfs
158  *		return (error);		// really out of space
159  *	}
160  *	error = do_real_work();		// do whatever this VOP does
161  *	if (error == 0)
162  *		zfs_log_*(...);		// on success, make ZIL entry
163  *	dmu_tx_commit(tx);		// commit DMU tx -- error or not
164  *	rw_exit(...);			// drop locks
165  *	zfs_dirent_unlock(dl);		// unlock directory entry
166  *	VN_RELE(...);			// release held vnodes
167  *	zil_commit(zilog, foid);	// synchronous when necessary
168  *	ZFS_EXIT(zfsvfs);		// finished in zfs
169  *	return (error);			// done, report error
170  */
171 
172 /* ARGSUSED */
173 static int
174 zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
175 {
176 	znode_t	*zp = VTOZ(*vpp);
177 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
178 
179 	ZFS_ENTER(zfsvfs);
180 	ZFS_VERIFY_ZP(zp);
181 
182 	if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
183 	    ((flag & FAPPEND) == 0)) {
184 		ZFS_EXIT(zfsvfs);
185 		return (EPERM);
186 	}
187 
188 	if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
189 	    ZTOV(zp)->v_type == VREG &&
190 	    !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
191 		if (fs_vscan(*vpp, cr, 0) != 0) {
192 			ZFS_EXIT(zfsvfs);
193 			return (EACCES);
194 		}
195 	}
196 
197 	/* Keep a count of the synchronous opens in the znode */
198 	if (flag & (FSYNC | FDSYNC))
199 		atomic_inc_32(&zp->z_sync_cnt);
200 
201 	ZFS_EXIT(zfsvfs);
202 	return (0);
203 }
204 
205 /* ARGSUSED */
206 static int
207 zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
208     caller_context_t *ct)
209 {
210 	znode_t	*zp = VTOZ(vp);
211 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
212 
213 	/*
214 	 * Clean up any locks held by this process on the vp.
215 	 */
216 	cleanlocks(vp, ddi_get_pid(), 0);
217 	cleanshares(vp, ddi_get_pid());
218 
219 	ZFS_ENTER(zfsvfs);
220 	ZFS_VERIFY_ZP(zp);
221 
222 	/* Decrement the synchronous opens in the znode */
223 	if ((flag & (FSYNC | FDSYNC)) && (count == 1))
224 		atomic_dec_32(&zp->z_sync_cnt);
225 
226 	if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
227 	    ZTOV(zp)->v_type == VREG &&
228 	    !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
229 		VERIFY(fs_vscan(vp, cr, 1) == 0);
230 
231 	ZFS_EXIT(zfsvfs);
232 	return (0);
233 }
234 
235 /*
236  * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and
237  * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter.
238  */
239 static int
240 zfs_holey(vnode_t *vp, int cmd, offset_t *off)
241 {
242 	znode_t	*zp = VTOZ(vp);
243 	uint64_t noff = (uint64_t)*off; /* new offset */
244 	uint64_t file_sz;
245 	int error;
246 	boolean_t hole;
247 
248 	file_sz = zp->z_size;
249 	if (noff >= file_sz)  {
250 		return (ENXIO);
251 	}
252 
253 	if (cmd == _FIO_SEEK_HOLE)
254 		hole = B_TRUE;
255 	else
256 		hole = B_FALSE;
257 
258 	error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff);
259 
260 	/* end of file? */
261 	if ((error == ESRCH) || (noff > file_sz)) {
262 		/*
263 		 * Handle the virtual hole at the end of file.
264 		 */
265 		if (hole) {
266 			*off = file_sz;
267 			return (0);
268 		}
269 		return (ENXIO);
270 	}
271 
272 	if (noff < *off)
273 		return (error);
274 	*off = noff;
275 	return (error);
276 }
277 
278 /* ARGSUSED */
279 static int
280 zfs_ioctl(vnode_t *vp, int com, intptr_t data, int flag, cred_t *cred,
281     int *rvalp, caller_context_t *ct)
282 {
283 	offset_t off;
284 	int error;
285 	zfsvfs_t *zfsvfs;
286 	znode_t *zp;
287 
288 	switch (com) {
289 	case _FIOFFS:
290 		return (zfs_sync(vp->v_vfsp, 0, cred));
291 
292 		/*
293 		 * The following two ioctls are used by bfu.  Faking out,
294 		 * necessary to avoid bfu errors.
295 		 */
296 	case _FIOGDIO:
297 	case _FIOSDIO:
298 		return (0);
299 
300 	case _FIO_SEEK_DATA:
301 	case _FIO_SEEK_HOLE:
302 		if (ddi_copyin((void *)data, &off, sizeof (off), flag))
303 			return (EFAULT);
304 
305 		zp = VTOZ(vp);
306 		zfsvfs = zp->z_zfsvfs;
307 		ZFS_ENTER(zfsvfs);
308 		ZFS_VERIFY_ZP(zp);
309 
310 		/* offset parameter is in/out */
311 		error = zfs_holey(vp, com, &off);
312 		ZFS_EXIT(zfsvfs);
313 		if (error)
314 			return (error);
315 		if (ddi_copyout(&off, (void *)data, sizeof (off), flag))
316 			return (EFAULT);
317 		return (0);
318 	}
319 	return (ENOTTY);
320 }
321 
322 /*
323  * Utility functions to map and unmap a single physical page.  These
324  * are used to manage the mappable copies of ZFS file data, and therefore
325  * do not update ref/mod bits.
326  */
327 caddr_t
328 zfs_map_page(page_t *pp, enum seg_rw rw)
329 {
330 	if (kpm_enable)
331 		return (hat_kpm_mapin(pp, 0));
332 	ASSERT(rw == S_READ || rw == S_WRITE);
333 	return (ppmapin(pp, PROT_READ | ((rw == S_WRITE) ? PROT_WRITE : 0),
334 	    (caddr_t)-1));
335 }
336 
337 void
338 zfs_unmap_page(page_t *pp, caddr_t addr)
339 {
340 	if (kpm_enable) {
341 		hat_kpm_mapout(pp, 0, addr);
342 	} else {
343 		ppmapout(addr);
344 	}
345 }
346 
347 /*
348  * When a file is memory mapped, we must keep the IO data synchronized
349  * between the DMU cache and the memory mapped pages.  What this means:
350  *
351  * On Write:	If we find a memory mapped page, we write to *both*
352  *		the page and the dmu buffer.
353  */
354 static void
355 update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid)
356 {
357 	int64_t	off;
358 
359 	off = start & PAGEOFFSET;
360 	for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
361 		page_t *pp;
362 		uint64_t nbytes = MIN(PAGESIZE - off, len);
363 
364 		if (pp = page_lookup(vp, start, SE_SHARED)) {
365 			caddr_t va;
366 
367 			va = zfs_map_page(pp, S_WRITE);
368 			(void) dmu_read(os, oid, start+off, nbytes, va+off,
369 			    DMU_READ_PREFETCH);
370 			zfs_unmap_page(pp, va);
371 			page_unlock(pp);
372 		}
373 		len -= nbytes;
374 		off = 0;
375 	}
376 }
377 
378 /*
379  * When a file is memory mapped, we must keep the IO data synchronized
380  * between the DMU cache and the memory mapped pages.  What this means:
381  *
382  * On Read:	We "read" preferentially from memory mapped pages,
383  *		else we default from the dmu buffer.
384  *
385  * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
386  *	the file is memory mapped.
387  */
388 static int
389 mappedread(vnode_t *vp, int nbytes, uio_t *uio)
390 {
391 	znode_t *zp = VTOZ(vp);
392 	objset_t *os = zp->z_zfsvfs->z_os;
393 	int64_t	start, off;
394 	int len = nbytes;
395 	int error = 0;
396 
397 	start = uio->uio_loffset;
398 	off = start & PAGEOFFSET;
399 	for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
400 		page_t *pp;
401 		uint64_t bytes = MIN(PAGESIZE - off, len);
402 
403 		if (pp = page_lookup(vp, start, SE_SHARED)) {
404 			caddr_t va;
405 
406 			va = zfs_map_page(pp, S_READ);
407 			error = uiomove(va + off, bytes, UIO_READ, uio);
408 			zfs_unmap_page(pp, va);
409 			page_unlock(pp);
410 		} else {
411 			error = dmu_read_uio(os, zp->z_id, uio, bytes);
412 		}
413 		len -= bytes;
414 		off = 0;
415 		if (error)
416 			break;
417 	}
418 	return (error);
419 }
420 
421 offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
422 
423 /*
424  * Read bytes from specified file into supplied buffer.
425  *
426  *	IN:	vp	- vnode of file to be read from.
427  *		uio	- structure supplying read location, range info,
428  *			  and return buffer.
429  *		ioflag	- SYNC flags; used to provide FRSYNC semantics.
430  *		cr	- credentials of caller.
431  *		ct	- caller context
432  *
433  *	OUT:	uio	- updated offset and range, buffer filled.
434  *
435  *	RETURN:	0 if success
436  *		error code if failure
437  *
438  * Side Effects:
439  *	vp - atime updated if byte count > 0
440  */
441 /* ARGSUSED */
442 static int
443 zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
444 {
445 	znode_t		*zp = VTOZ(vp);
446 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
447 	objset_t	*os;
448 	ssize_t		n, nbytes;
449 	int		error;
450 	rl_t		*rl;
451 	xuio_t		*xuio = NULL;
452 
453 	ZFS_ENTER(zfsvfs);
454 	ZFS_VERIFY_ZP(zp);
455 	os = zfsvfs->z_os;
456 
457 	if (zp->z_pflags & ZFS_AV_QUARANTINED) {
458 		ZFS_EXIT(zfsvfs);
459 		return (EACCES);
460 	}
461 
462 	/*
463 	 * Validate file offset
464 	 */
465 	if (uio->uio_loffset < (offset_t)0) {
466 		ZFS_EXIT(zfsvfs);
467 		return (EINVAL);
468 	}
469 
470 	/*
471 	 * Fasttrack empty reads
472 	 */
473 	if (uio->uio_resid == 0) {
474 		ZFS_EXIT(zfsvfs);
475 		return (0);
476 	}
477 
478 	/*
479 	 * Check for mandatory locks
480 	 */
481 	if (MANDMODE(zp->z_mode)) {
482 		if (error = chklock(vp, FREAD,
483 		    uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) {
484 			ZFS_EXIT(zfsvfs);
485 			return (error);
486 		}
487 	}
488 
489 	/*
490 	 * If we're in FRSYNC mode, sync out this znode before reading it.
491 	 */
492 	if (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
493 		zil_commit(zfsvfs->z_log, zp->z_id);
494 
495 	/*
496 	 * Lock the range against changes.
497 	 */
498 	rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER);
499 
500 	/*
501 	 * If we are reading past end-of-file we can skip
502 	 * to the end; but we might still need to set atime.
503 	 */
504 	if (uio->uio_loffset >= zp->z_size) {
505 		error = 0;
506 		goto out;
507 	}
508 
509 	ASSERT(uio->uio_loffset < zp->z_size);
510 	n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
511 
512 	if ((uio->uio_extflg == UIO_XUIO) &&
513 	    (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
514 		int nblk;
515 		int blksz = zp->z_blksz;
516 		uint64_t offset = uio->uio_loffset;
517 
518 		xuio = (xuio_t *)uio;
519 		if ((ISP2(blksz))) {
520 			nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
521 			    blksz)) / blksz;
522 		} else {
523 			ASSERT(offset + n <= blksz);
524 			nblk = 1;
525 		}
526 		(void) dmu_xuio_init(xuio, nblk);
527 
528 		if (vn_has_cached_data(vp)) {
529 			/*
530 			 * For simplicity, we always allocate a full buffer
531 			 * even if we only expect to read a portion of a block.
532 			 */
533 			while (--nblk >= 0) {
534 				(void) dmu_xuio_add(xuio,
535 				    dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
536 				    blksz), 0, blksz);
537 			}
538 		}
539 	}
540 
541 	while (n > 0) {
542 		nbytes = MIN(n, zfs_read_chunk_size -
543 		    P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
544 
545 		if (vn_has_cached_data(vp))
546 			error = mappedread(vp, nbytes, uio);
547 		else
548 			error = dmu_read_uio(os, zp->z_id, uio, nbytes);
549 		if (error) {
550 			/* convert checksum errors into IO errors */
551 			if (error == ECKSUM)
552 				error = EIO;
553 			break;
554 		}
555 
556 		n -= nbytes;
557 	}
558 out:
559 	zfs_range_unlock(rl);
560 
561 	ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
562 	ZFS_EXIT(zfsvfs);
563 	return (error);
564 }
565 
566 /*
567  * Write the bytes to a file.
568  *
569  *	IN:	vp	- vnode of file to be written to.
570  *		uio	- structure supplying write location, range info,
571  *			  and data buffer.
572  *		ioflag	- FAPPEND flag set if in append mode.
573  *		cr	- credentials of caller.
574  *		ct	- caller context (NFS/CIFS fem monitor only)
575  *
576  *	OUT:	uio	- updated offset and range.
577  *
578  *	RETURN:	0 if success
579  *		error code if failure
580  *
581  * Timestamps:
582  *	vp - ctime|mtime updated if byte count > 0
583  */
584 
585 /* ARGSUSED */
586 static int
587 zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
588 {
589 	znode_t		*zp = VTOZ(vp);
590 	rlim64_t	limit = uio->uio_llimit;
591 	ssize_t		start_resid = uio->uio_resid;
592 	ssize_t		tx_bytes;
593 	uint64_t	end_size;
594 	dmu_tx_t	*tx;
595 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
596 	zilog_t		*zilog;
597 	offset_t	woff;
598 	ssize_t		n, nbytes;
599 	rl_t		*rl;
600 	int		max_blksz = zfsvfs->z_max_blksz;
601 	int		error;
602 	arc_buf_t	*abuf;
603 	iovec_t		*aiov;
604 	xuio_t		*xuio = NULL;
605 	int		i_iov = 0;
606 	int		iovcnt = uio->uio_iovcnt;
607 	iovec_t		*iovp = uio->uio_iov;
608 	int		write_eof;
609 	int		count = 0;
610 	sa_bulk_attr_t	bulk[4];
611 	uint64_t	mtime[2], ctime[2];
612 
613 	/*
614 	 * Fasttrack empty write
615 	 */
616 	n = start_resid;
617 	if (n == 0)
618 		return (0);
619 
620 	if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
621 		limit = MAXOFFSET_T;
622 
623 	ZFS_ENTER(zfsvfs);
624 	ZFS_VERIFY_ZP(zp);
625 
626 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
627 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
628 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
629 	    &zp->z_size, 8);
630 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
631 	    &zp->z_pflags, 8);
632 
633 	/*
634 	 * If immutable or not appending then return EPERM
635 	 */
636 	if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
637 	    ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
638 	    (uio->uio_loffset < zp->z_size))) {
639 		ZFS_EXIT(zfsvfs);
640 		return (EPERM);
641 	}
642 
643 	zilog = zfsvfs->z_log;
644 
645 	/*
646 	 * Validate file offset
647 	 */
648 	woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
649 	if (woff < 0) {
650 		ZFS_EXIT(zfsvfs);
651 		return (EINVAL);
652 	}
653 
654 	/*
655 	 * Check for mandatory locks before calling zfs_range_lock()
656 	 * in order to prevent a deadlock with locks set via fcntl().
657 	 */
658 	if (MANDMODE((mode_t)zp->z_mode) &&
659 	    (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) {
660 		ZFS_EXIT(zfsvfs);
661 		return (error);
662 	}
663 
664 	/*
665 	 * Pre-fault the pages to ensure slow (eg NFS) pages
666 	 * don't hold up txg.
667 	 * Skip this if uio contains loaned arc_buf.
668 	 */
669 	if ((uio->uio_extflg == UIO_XUIO) &&
670 	    (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
671 		xuio = (xuio_t *)uio;
672 	else
673 		uio_prefaultpages(MIN(n, max_blksz), uio);
674 
675 	/*
676 	 * If in append mode, set the io offset pointer to eof.
677 	 */
678 	if (ioflag & FAPPEND) {
679 		/*
680 		 * Obtain an appending range lock to guarantee file append
681 		 * semantics.  We reset the write offset once we have the lock.
682 		 */
683 		rl = zfs_range_lock(zp, 0, n, RL_APPEND);
684 		woff = rl->r_off;
685 		if (rl->r_len == UINT64_MAX) {
686 			/*
687 			 * We overlocked the file because this write will cause
688 			 * the file block size to increase.
689 			 * Note that zp_size cannot change with this lock held.
690 			 */
691 			woff = zp->z_size;
692 		}
693 		uio->uio_loffset = woff;
694 	} else {
695 		/*
696 		 * Note that if the file block size will change as a result of
697 		 * this write, then this range lock will lock the entire file
698 		 * so that we can re-write the block safely.
699 		 */
700 		rl = zfs_range_lock(zp, woff, n, RL_WRITER);
701 	}
702 
703 	if (woff >= limit) {
704 		zfs_range_unlock(rl);
705 		ZFS_EXIT(zfsvfs);
706 		return (EFBIG);
707 	}
708 
709 	if ((woff + n) > limit || woff > (limit - n))
710 		n = limit - woff;
711 
712 	/* Will this write extend the file length? */
713 	write_eof = (woff + n > zp->z_size);
714 
715 	end_size = MAX(zp->z_size, woff + n);
716 
717 	/*
718 	 * Write the file in reasonable size chunks.  Each chunk is written
719 	 * in a separate transaction; this keeps the intent log records small
720 	 * and allows us to do more fine-grained space accounting.
721 	 */
722 	while (n > 0) {
723 		abuf = NULL;
724 		woff = uio->uio_loffset;
725 again:
726 		if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
727 		    zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
728 			if (abuf != NULL)
729 				dmu_return_arcbuf(abuf);
730 			error = EDQUOT;
731 			break;
732 		}
733 
734 		if (xuio && abuf == NULL) {
735 			ASSERT(i_iov < iovcnt);
736 			aiov = &iovp[i_iov];
737 			abuf = dmu_xuio_arcbuf(xuio, i_iov);
738 			dmu_xuio_clear(xuio, i_iov);
739 			DTRACE_PROBE3(zfs_cp_write, int, i_iov,
740 			    iovec_t *, aiov, arc_buf_t *, abuf);
741 			ASSERT((aiov->iov_base == abuf->b_data) ||
742 			    ((char *)aiov->iov_base - (char *)abuf->b_data +
743 			    aiov->iov_len == arc_buf_size(abuf)));
744 			i_iov++;
745 		} else if (abuf == NULL && n >= max_blksz &&
746 		    woff >= zp->z_size &&
747 		    P2PHASE(woff, max_blksz) == 0 &&
748 		    zp->z_blksz == max_blksz) {
749 			/*
750 			 * This write covers a full block.  "Borrow" a buffer
751 			 * from the dmu so that we can fill it before we enter
752 			 * a transaction.  This avoids the possibility of
753 			 * holding up the transaction if the data copy hangs
754 			 * up on a pagefault (e.g., from an NFS server mapping).
755 			 */
756 			size_t cbytes;
757 
758 			abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
759 			    max_blksz);
760 			ASSERT(abuf != NULL);
761 			ASSERT(arc_buf_size(abuf) == max_blksz);
762 			if (error = uiocopy(abuf->b_data, max_blksz,
763 			    UIO_WRITE, uio, &cbytes)) {
764 				dmu_return_arcbuf(abuf);
765 				break;
766 			}
767 			ASSERT(cbytes == max_blksz);
768 		}
769 
770 		/*
771 		 * Start a transaction.
772 		 */
773 		tx = dmu_tx_create(zfsvfs->z_os);
774 		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
775 		dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
776 		zfs_sa_upgrade_txholds(tx, zp);
777 		error = dmu_tx_assign(tx, TXG_NOWAIT);
778 		if (error) {
779 			if (error == ERESTART) {
780 				dmu_tx_wait(tx);
781 				dmu_tx_abort(tx);
782 				goto again;
783 			}
784 			dmu_tx_abort(tx);
785 			if (abuf != NULL)
786 				dmu_return_arcbuf(abuf);
787 			break;
788 		}
789 
790 		/*
791 		 * If zfs_range_lock() over-locked we grow the blocksize
792 		 * and then reduce the lock range.  This will only happen
793 		 * on the first iteration since zfs_range_reduce() will
794 		 * shrink down r_len to the appropriate size.
795 		 */
796 		if (rl->r_len == UINT64_MAX) {
797 			uint64_t new_blksz;
798 
799 			if (zp->z_blksz > max_blksz) {
800 				ASSERT(!ISP2(zp->z_blksz));
801 				new_blksz = MIN(end_size, SPA_MAXBLOCKSIZE);
802 			} else {
803 				new_blksz = MIN(end_size, max_blksz);
804 			}
805 			zfs_grow_blocksize(zp, new_blksz, tx);
806 			zfs_range_reduce(rl, woff, n);
807 		}
808 
809 		/*
810 		 * XXX - should we really limit each write to z_max_blksz?
811 		 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
812 		 */
813 		nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
814 
815 		if (abuf == NULL) {
816 			tx_bytes = uio->uio_resid;
817 			error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
818 			    uio, nbytes, tx);
819 			tx_bytes -= uio->uio_resid;
820 		} else {
821 			tx_bytes = nbytes;
822 			ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
823 			/*
824 			 * If this is not a full block write, but we are
825 			 * extending the file past EOF and this data starts
826 			 * block-aligned, use assign_arcbuf().  Otherwise,
827 			 * write via dmu_write().
828 			 */
829 			if (tx_bytes < max_blksz && (!write_eof ||
830 			    aiov->iov_base != abuf->b_data)) {
831 				ASSERT(xuio);
832 				dmu_write(zfsvfs->z_os, zp->z_id, woff,
833 				    aiov->iov_len, aiov->iov_base, tx);
834 				dmu_return_arcbuf(abuf);
835 				xuio_stat_wbuf_copied();
836 			} else {
837 				ASSERT(xuio || tx_bytes == max_blksz);
838 				dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl),
839 				    woff, abuf, tx);
840 			}
841 			ASSERT(tx_bytes <= uio->uio_resid);
842 			uioskip(uio, tx_bytes);
843 		}
844 		if (tx_bytes && vn_has_cached_data(vp)) {
845 			update_pages(vp, woff,
846 			    tx_bytes, zfsvfs->z_os, zp->z_id);
847 		}
848 
849 		/*
850 		 * If we made no progress, we're done.  If we made even
851 		 * partial progress, update the znode and ZIL accordingly.
852 		 */
853 		if (tx_bytes == 0) {
854 			(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
855 			    (void *)&zp->z_size, sizeof (uint64_t), tx);
856 			dmu_tx_commit(tx);
857 			ASSERT(error != 0);
858 			break;
859 		}
860 
861 		/*
862 		 * Clear Set-UID/Set-GID bits on successful write if not
863 		 * privileged and at least one of the excute bits is set.
864 		 *
865 		 * It would be nice to to this after all writes have
866 		 * been done, but that would still expose the ISUID/ISGID
867 		 * to another app after the partial write is committed.
868 		 *
869 		 * Note: we don't call zfs_fuid_map_id() here because
870 		 * user 0 is not an ephemeral uid.
871 		 */
872 		mutex_enter(&zp->z_acl_lock);
873 		if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
874 		    (S_IXUSR >> 6))) != 0 &&
875 		    (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
876 		    secpolicy_vnode_setid_retain(cr,
877 		    (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) {
878 			uint64_t newmode;
879 			zp->z_mode &= ~(S_ISUID | S_ISGID);
880 			newmode = zp->z_mode;
881 			(void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
882 			    (void *)&newmode, sizeof (uint64_t), tx);
883 		}
884 		mutex_exit(&zp->z_acl_lock);
885 
886 		zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
887 		    B_TRUE);
888 
889 		/*
890 		 * Update the file size (zp_size) if it has changed;
891 		 * account for possible concurrent updates.
892 		 */
893 		while ((end_size = zp->z_size) < uio->uio_loffset) {
894 			(void) atomic_cas_64(&zp->z_size, end_size,
895 			    uio->uio_loffset);
896 			ASSERT(error == 0);
897 		}
898 		/*
899 		 * If we are replaying and eof is non zero then force
900 		 * the file size to the specified eof. Note, there's no
901 		 * concurrency during replay.
902 		 */
903 		if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
904 			zp->z_size = zfsvfs->z_replay_eof;
905 
906 		error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
907 
908 		zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag);
909 		dmu_tx_commit(tx);
910 
911 		if (error != 0)
912 			break;
913 		ASSERT(tx_bytes == nbytes);
914 		n -= nbytes;
915 
916 		if (!xuio && n > 0)
917 			uio_prefaultpages(MIN(n, max_blksz), uio);
918 	}
919 
920 	zfs_range_unlock(rl);
921 
922 	/*
923 	 * If we're in replay mode, or we made no progress, return error.
924 	 * Otherwise, it's at least a partial write, so it's successful.
925 	 */
926 	if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
927 		ZFS_EXIT(zfsvfs);
928 		return (error);
929 	}
930 
931 	if (ioflag & (FSYNC | FDSYNC) ||
932 	    zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
933 		zil_commit(zilog, zp->z_id);
934 
935 	ZFS_EXIT(zfsvfs);
936 	return (0);
937 }
938 
939 void
940 zfs_get_done(zgd_t *zgd, int error)
941 {
942 	znode_t *zp = zgd->zgd_private;
943 	objset_t *os = zp->z_zfsvfs->z_os;
944 
945 	if (zgd->zgd_db)
946 		dmu_buf_rele(zgd->zgd_db, zgd);
947 
948 	zfs_range_unlock(zgd->zgd_rl);
949 
950 	/*
951 	 * Release the vnode asynchronously as we currently have the
952 	 * txg stopped from syncing.
953 	 */
954 	VN_RELE_ASYNC(ZTOV(zp), dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
955 
956 	if (error == 0 && zgd->zgd_bp)
957 		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
958 
959 	kmem_free(zgd, sizeof (zgd_t));
960 }
961 
962 #ifdef DEBUG
963 static int zil_fault_io = 0;
964 #endif
965 
966 /*
967  * Get data to generate a TX_WRITE intent log record.
968  */
969 int
970 zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
971 {
972 	zfsvfs_t *zfsvfs = arg;
973 	objset_t *os = zfsvfs->z_os;
974 	znode_t *zp;
975 	uint64_t object = lr->lr_foid;
976 	uint64_t offset = lr->lr_offset;
977 	uint64_t size = lr->lr_length;
978 	blkptr_t *bp = &lr->lr_blkptr;
979 	dmu_buf_t *db;
980 	zgd_t *zgd;
981 	int error = 0;
982 
983 	ASSERT(zio != NULL);
984 	ASSERT(size != 0);
985 
986 	/*
987 	 * Nothing to do if the file has been removed
988 	 */
989 	if (zfs_zget(zfsvfs, object, &zp) != 0)
990 		return (ENOENT);
991 	if (zp->z_unlinked) {
992 		/*
993 		 * Release the vnode asynchronously as we currently have the
994 		 * txg stopped from syncing.
995 		 */
996 		VN_RELE_ASYNC(ZTOV(zp),
997 		    dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
998 		return (ENOENT);
999 	}
1000 
1001 	zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1002 	zgd->zgd_zilog = zfsvfs->z_log;
1003 	zgd->zgd_private = zp;
1004 
1005 	/*
1006 	 * Write records come in two flavors: immediate and indirect.
1007 	 * For small writes it's cheaper to store the data with the
1008 	 * log record (immediate); for large writes it's cheaper to
1009 	 * sync the data and get a pointer to it (indirect) so that
1010 	 * we don't have to write the data twice.
1011 	 */
1012 	if (buf != NULL) { /* immediate write */
1013 		zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER);
1014 		/* test for truncation needs to be done while range locked */
1015 		if (offset >= zp->z_size) {
1016 			error = ENOENT;
1017 		} else {
1018 			error = dmu_read(os, object, offset, size, buf,
1019 			    DMU_READ_NO_PREFETCH);
1020 		}
1021 		ASSERT(error == 0 || error == ENOENT);
1022 	} else { /* indirect write */
1023 		/*
1024 		 * Have to lock the whole block to ensure when it's
1025 		 * written out and it's checksum is being calculated
1026 		 * that no one can change the data. We need to re-check
1027 		 * blocksize after we get the lock in case it's changed!
1028 		 */
1029 		for (;;) {
1030 			uint64_t blkoff;
1031 			size = zp->z_blksz;
1032 			blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
1033 			offset -= blkoff;
1034 			zgd->zgd_rl = zfs_range_lock(zp, offset, size,
1035 			    RL_READER);
1036 			if (zp->z_blksz == size)
1037 				break;
1038 			offset += blkoff;
1039 			zfs_range_unlock(zgd->zgd_rl);
1040 		}
1041 		/* test for truncation needs to be done while range locked */
1042 		if (lr->lr_offset >= zp->z_size)
1043 			error = ENOENT;
1044 #ifdef DEBUG
1045 		if (zil_fault_io) {
1046 			error = EIO;
1047 			zil_fault_io = 0;
1048 		}
1049 #endif
1050 		if (error == 0)
1051 			error = dmu_buf_hold(os, object, offset, zgd, &db,
1052 			    DMU_READ_NO_PREFETCH);
1053 
1054 		if (error == 0) {
1055 			zgd->zgd_db = db;
1056 			zgd->zgd_bp = bp;
1057 
1058 			ASSERT(db->db_offset == offset);
1059 			ASSERT(db->db_size == size);
1060 
1061 			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1062 			    zfs_get_done, zgd);
1063 			ASSERT(error || lr->lr_length <= zp->z_blksz);
1064 
1065 			/*
1066 			 * On success, we need to wait for the write I/O
1067 			 * initiated by dmu_sync() to complete before we can
1068 			 * release this dbuf.  We will finish everything up
1069 			 * in the zfs_get_done() callback.
1070 			 */
1071 			if (error == 0)
1072 				return (0);
1073 
1074 			if (error == EALREADY) {
1075 				lr->lr_common.lrc_txtype = TX_WRITE2;
1076 				error = 0;
1077 			}
1078 		}
1079 	}
1080 
1081 	zfs_get_done(zgd, error);
1082 
1083 	return (error);
1084 }
1085 
1086 /*ARGSUSED*/
1087 static int
1088 zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr,
1089     caller_context_t *ct)
1090 {
1091 	znode_t *zp = VTOZ(vp);
1092 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1093 	int error;
1094 
1095 	ZFS_ENTER(zfsvfs);
1096 	ZFS_VERIFY_ZP(zp);
1097 
1098 	if (flag & V_ACE_MASK)
1099 		error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
1100 	else
1101 		error = zfs_zaccess_rwx(zp, mode, flag, cr);
1102 
1103 	ZFS_EXIT(zfsvfs);
1104 	return (error);
1105 }
1106 
1107 /*
1108  * If vnode is for a device return a specfs vnode instead.
1109  */
1110 static int
1111 specvp_check(vnode_t **vpp, cred_t *cr)
1112 {
1113 	int error = 0;
1114 
1115 	if (IS_DEVVP(*vpp)) {
1116 		struct vnode *svp;
1117 
1118 		svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr);
1119 		VN_RELE(*vpp);
1120 		if (svp == NULL)
1121 			error = ENOSYS;
1122 		*vpp = svp;
1123 	}
1124 	return (error);
1125 }
1126 
1127 
1128 /*
1129  * Lookup an entry in a directory, or an extended attribute directory.
1130  * If it exists, return a held vnode reference for it.
1131  *
1132  *	IN:	dvp	- vnode of directory to search.
1133  *		nm	- name of entry to lookup.
1134  *		pnp	- full pathname to lookup [UNUSED].
1135  *		flags	- LOOKUP_XATTR set if looking for an attribute.
1136  *		rdir	- root directory vnode [UNUSED].
1137  *		cr	- credentials of caller.
1138  *		ct	- caller context
1139  *		direntflags - directory lookup flags
1140  *		realpnp - returned pathname.
1141  *
1142  *	OUT:	vpp	- vnode of located entry, NULL if not found.
1143  *
1144  *	RETURN:	0 if success
1145  *		error code if failure
1146  *
1147  * Timestamps:
1148  *	NA
1149  */
1150 /* ARGSUSED */
1151 static int
1152 zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
1153     int flags, vnode_t *rdir, cred_t *cr,  caller_context_t *ct,
1154     int *direntflags, pathname_t *realpnp)
1155 {
1156 	znode_t *zdp = VTOZ(dvp);
1157 	zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
1158 	int	error = 0;
1159 
1160 	/* fast path */
1161 	if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
1162 
1163 		if (dvp->v_type != VDIR) {
1164 			return (ENOTDIR);
1165 		} else if (zdp->z_sa_hdl == NULL) {
1166 			return (EIO);
1167 		}
1168 
1169 		if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
1170 			error = zfs_fastaccesschk_execute(zdp, cr);
1171 			if (!error) {
1172 				*vpp = dvp;
1173 				VN_HOLD(*vpp);
1174 				return (0);
1175 			}
1176 			return (error);
1177 		} else {
1178 			vnode_t *tvp = dnlc_lookup(dvp, nm);
1179 
1180 			if (tvp) {
1181 				error = zfs_fastaccesschk_execute(zdp, cr);
1182 				if (error) {
1183 					VN_RELE(tvp);
1184 					return (error);
1185 				}
1186 				if (tvp == DNLC_NO_VNODE) {
1187 					VN_RELE(tvp);
1188 					return (ENOENT);
1189 				} else {
1190 					*vpp = tvp;
1191 					return (specvp_check(vpp, cr));
1192 				}
1193 			}
1194 		}
1195 	}
1196 
1197 	DTRACE_PROBE2(zfs__fastpath__lookup__miss, vnode_t *, dvp, char *, nm);
1198 
1199 	ZFS_ENTER(zfsvfs);
1200 	ZFS_VERIFY_ZP(zdp);
1201 
1202 	*vpp = NULL;
1203 
1204 	if (flags & LOOKUP_XATTR) {
1205 		/*
1206 		 * If the xattr property is off, refuse the lookup request.
1207 		 */
1208 		if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) {
1209 			ZFS_EXIT(zfsvfs);
1210 			return (EINVAL);
1211 		}
1212 
1213 		/*
1214 		 * We don't allow recursive attributes..
1215 		 * Maybe someday we will.
1216 		 */
1217 		if (zdp->z_pflags & ZFS_XATTR) {
1218 			ZFS_EXIT(zfsvfs);
1219 			return (EINVAL);
1220 		}
1221 
1222 		if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) {
1223 			ZFS_EXIT(zfsvfs);
1224 			return (error);
1225 		}
1226 
1227 		/*
1228 		 * Do we have permission to get into attribute directory?
1229 		 */
1230 
1231 		if (error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, 0,
1232 		    B_FALSE, cr)) {
1233 			VN_RELE(*vpp);
1234 			*vpp = NULL;
1235 		}
1236 
1237 		ZFS_EXIT(zfsvfs);
1238 		return (error);
1239 	}
1240 
1241 	if (dvp->v_type != VDIR) {
1242 		ZFS_EXIT(zfsvfs);
1243 		return (ENOTDIR);
1244 	}
1245 
1246 	/*
1247 	 * Check accessibility of directory.
1248 	 */
1249 
1250 	if (error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr)) {
1251 		ZFS_EXIT(zfsvfs);
1252 		return (error);
1253 	}
1254 
1255 	if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
1256 	    NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1257 		ZFS_EXIT(zfsvfs);
1258 		return (EILSEQ);
1259 	}
1260 
1261 	error = zfs_dirlook(zdp, nm, vpp, flags, direntflags, realpnp);
1262 	if (error == 0)
1263 		error = specvp_check(vpp, cr);
1264 
1265 	ZFS_EXIT(zfsvfs);
1266 	return (error);
1267 }
1268 
1269 /*
1270  * Attempt to create a new entry in a directory.  If the entry
1271  * already exists, truncate the file if permissible, else return
1272  * an error.  Return the vp of the created or trunc'd file.
1273  *
1274  *	IN:	dvp	- vnode of directory to put new file entry in.
1275  *		name	- name of new file entry.
1276  *		vap	- attributes of new file.
1277  *		excl	- flag indicating exclusive or non-exclusive mode.
1278  *		mode	- mode to open file with.
1279  *		cr	- credentials of caller.
1280  *		flag	- large file flag [UNUSED].
1281  *		ct	- caller context
1282  *		vsecp 	- ACL to be set
1283  *
1284  *	OUT:	vpp	- vnode of created or trunc'd entry.
1285  *
1286  *	RETURN:	0 if success
1287  *		error code if failure
1288  *
1289  * Timestamps:
1290  *	dvp - ctime|mtime updated if new entry created
1291  *	 vp - ctime|mtime always, atime if new
1292  */
1293 
1294 /* ARGSUSED */
1295 static int
1296 zfs_create(vnode_t *dvp, char *name, vattr_t *vap, vcexcl_t excl,
1297     int mode, vnode_t **vpp, cred_t *cr, int flag, caller_context_t *ct,
1298     vsecattr_t *vsecp)
1299 {
1300 	znode_t		*zp, *dzp = VTOZ(dvp);
1301 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1302 	zilog_t		*zilog;
1303 	objset_t	*os;
1304 	zfs_dirlock_t	*dl;
1305 	dmu_tx_t	*tx;
1306 	int		error;
1307 	ksid_t		*ksid;
1308 	uid_t		uid;
1309 	gid_t		gid = crgetgid(cr);
1310 	zfs_acl_ids_t   acl_ids;
1311 	boolean_t	fuid_dirtied;
1312 	boolean_t	have_acl = B_FALSE;
1313 
1314 	/*
1315 	 * If we have an ephemeral id, ACL, or XVATTR then
1316 	 * make sure file system is at proper version
1317 	 */
1318 
1319 	ksid = crgetsid(cr, KSID_OWNER);
1320 	if (ksid)
1321 		uid = ksid_getid(ksid);
1322 	else
1323 		uid = crgetuid(cr);
1324 
1325 	if (zfsvfs->z_use_fuids == B_FALSE &&
1326 	    (vsecp || (vap->va_mask & AT_XVATTR) ||
1327 	    IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1328 		return (EINVAL);
1329 
1330 	ZFS_ENTER(zfsvfs);
1331 	ZFS_VERIFY_ZP(dzp);
1332 	os = zfsvfs->z_os;
1333 	zilog = zfsvfs->z_log;
1334 
1335 	if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
1336 	    NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1337 		ZFS_EXIT(zfsvfs);
1338 		return (EILSEQ);
1339 	}
1340 
1341 	if (vap->va_mask & AT_XVATTR) {
1342 		if ((error = secpolicy_xvattr((xvattr_t *)vap,
1343 		    crgetuid(cr), cr, vap->va_type)) != 0) {
1344 			ZFS_EXIT(zfsvfs);
1345 			return (error);
1346 		}
1347 	}
1348 top:
1349 	*vpp = NULL;
1350 
1351 	if ((vap->va_mode & VSVTX) && secpolicy_vnode_stky_modify(cr))
1352 		vap->va_mode &= ~VSVTX;
1353 
1354 	if (*name == '\0') {
1355 		/*
1356 		 * Null component name refers to the directory itself.
1357 		 */
1358 		VN_HOLD(dvp);
1359 		zp = dzp;
1360 		dl = NULL;
1361 		error = 0;
1362 	} else {
1363 		/* possible VN_HOLD(zp) */
1364 		int zflg = 0;
1365 
1366 		if (flag & FIGNORECASE)
1367 			zflg |= ZCILOOK;
1368 
1369 		error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1370 		    NULL, NULL);
1371 		if (error) {
1372 			if (strcmp(name, "..") == 0)
1373 				error = EISDIR;
1374 			ZFS_EXIT(zfsvfs);
1375 			return (error);
1376 		}
1377 	}
1378 
1379 	if (zp == NULL) {
1380 		uint64_t txtype;
1381 
1382 		/*
1383 		 * Create a new file object and update the directory
1384 		 * to reference it.
1385 		 */
1386 		if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
1387 			goto out;
1388 		}
1389 
1390 		/*
1391 		 * We only support the creation of regular files in
1392 		 * extended attribute directories.
1393 		 */
1394 
1395 		if ((dzp->z_pflags & ZFS_XATTR) &&
1396 		    (vap->va_type != VREG)) {
1397 			error = EINVAL;
1398 			goto out;
1399 		}
1400 
1401 		if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1402 		    cr, vsecp, &acl_ids)) != 0)
1403 			goto out;
1404 		have_acl = B_TRUE;
1405 
1406 		if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1407 			zfs_acl_ids_free(&acl_ids);
1408 			error = EDQUOT;
1409 			goto out;
1410 		}
1411 
1412 		tx = dmu_tx_create(os);
1413 
1414 		dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1415 		    ZFS_SA_BASE_ATTR_SIZE);
1416 
1417 		fuid_dirtied = zfsvfs->z_fuid_dirty;
1418 		if (fuid_dirtied)
1419 			zfs_fuid_txhold(zfsvfs, tx);
1420 		dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1421 		dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1422 		if (!zfsvfs->z_use_sa &&
1423 		    acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1424 			dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1425 			    0, acl_ids.z_aclp->z_acl_bytes);
1426 		}
1427 		error = dmu_tx_assign(tx, TXG_NOWAIT);
1428 		if (error) {
1429 			zfs_dirent_unlock(dl);
1430 			if (error == ERESTART) {
1431 				dmu_tx_wait(tx);
1432 				dmu_tx_abort(tx);
1433 				goto top;
1434 			}
1435 			zfs_acl_ids_free(&acl_ids);
1436 			dmu_tx_abort(tx);
1437 			ZFS_EXIT(zfsvfs);
1438 			return (error);
1439 		}
1440 		zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1441 
1442 		if (fuid_dirtied)
1443 			zfs_fuid_sync(zfsvfs, tx);
1444 
1445 		(void) zfs_link_create(dl, zp, tx, ZNEW);
1446 		txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1447 		if (flag & FIGNORECASE)
1448 			txtype |= TX_CI;
1449 		zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1450 		    vsecp, acl_ids.z_fuidp, vap);
1451 		zfs_acl_ids_free(&acl_ids);
1452 		dmu_tx_commit(tx);
1453 	} else {
1454 		int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1455 
1456 		/*
1457 		 * A directory entry already exists for this name.
1458 		 */
1459 		/*
1460 		 * Can't truncate an existing file if in exclusive mode.
1461 		 */
1462 		if (excl == EXCL) {
1463 			error = EEXIST;
1464 			goto out;
1465 		}
1466 		/*
1467 		 * Can't open a directory for writing.
1468 		 */
1469 		if ((ZTOV(zp)->v_type == VDIR) && (mode & S_IWRITE)) {
1470 			error = EISDIR;
1471 			goto out;
1472 		}
1473 		/*
1474 		 * Verify requested access to file.
1475 		 */
1476 		if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
1477 			goto out;
1478 		}
1479 
1480 		mutex_enter(&dzp->z_lock);
1481 		dzp->z_seq++;
1482 		mutex_exit(&dzp->z_lock);
1483 
1484 		/*
1485 		 * Truncate regular files if requested.
1486 		 */
1487 		if ((ZTOV(zp)->v_type == VREG) &&
1488 		    (vap->va_mask & AT_SIZE) && (vap->va_size == 0)) {
1489 			/* we can't hold any locks when calling zfs_freesp() */
1490 			zfs_dirent_unlock(dl);
1491 			dl = NULL;
1492 			error = zfs_freesp(zp, 0, 0, mode, TRUE);
1493 			if (error == 0) {
1494 				vnevent_create(ZTOV(zp), ct);
1495 			}
1496 		}
1497 	}
1498 out:
1499 
1500 	if (dl)
1501 		zfs_dirent_unlock(dl);
1502 
1503 	if (error) {
1504 		if (zp)
1505 			VN_RELE(ZTOV(zp));
1506 	} else {
1507 		*vpp = ZTOV(zp);
1508 		error = specvp_check(vpp, cr);
1509 	}
1510 
1511 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1512 		zil_commit(zilog, 0);
1513 
1514 	ZFS_EXIT(zfsvfs);
1515 	return (error);
1516 }
1517 
1518 /*
1519  * Remove an entry from a directory.
1520  *
1521  *	IN:	dvp	- vnode of directory to remove entry from.
1522  *		name	- name of entry to remove.
1523  *		cr	- credentials of caller.
1524  *		ct	- caller context
1525  *		flags	- case flags
1526  *
1527  *	RETURN:	0 if success
1528  *		error code if failure
1529  *
1530  * Timestamps:
1531  *	dvp - ctime|mtime
1532  *	 vp - ctime (if nlink > 0)
1533  */
1534 
1535 uint64_t null_xattr = 0;
1536 
1537 /*ARGSUSED*/
1538 static int
1539 zfs_remove(vnode_t *dvp, char *name, cred_t *cr, caller_context_t *ct,
1540     int flags)
1541 {
1542 	znode_t		*zp, *dzp = VTOZ(dvp);
1543 	znode_t		*xzp = NULL;
1544 	vnode_t		*vp;
1545 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1546 	zilog_t		*zilog;
1547 	uint64_t	acl_obj, xattr_obj = 0;
1548 	uint64_t 	xattr_obj_unlinked = 0;
1549 	uint64_t	obj = 0;
1550 	zfs_dirlock_t	*dl;
1551 	dmu_tx_t	*tx;
1552 	boolean_t	may_delete_now, delete_now = FALSE;
1553 	boolean_t	unlinked, toobig = FALSE;
1554 	uint64_t	txtype;
1555 	pathname_t	*realnmp = NULL;
1556 	pathname_t	realnm;
1557 	int		error;
1558 	int		zflg = ZEXISTS;
1559 
1560 	ZFS_ENTER(zfsvfs);
1561 	ZFS_VERIFY_ZP(dzp);
1562 	zilog = zfsvfs->z_log;
1563 
1564 	if (flags & FIGNORECASE) {
1565 		zflg |= ZCILOOK;
1566 		pn_alloc(&realnm);
1567 		realnmp = &realnm;
1568 	}
1569 
1570 top:
1571 	/*
1572 	 * Attempt to lock directory; fail if entry doesn't exist.
1573 	 */
1574 	if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1575 	    NULL, realnmp)) {
1576 		if (realnmp)
1577 			pn_free(realnmp);
1578 		ZFS_EXIT(zfsvfs);
1579 		return (error);
1580 	}
1581 
1582 	vp = ZTOV(zp);
1583 
1584 	if (error = zfs_zaccess_delete(dzp, zp, cr)) {
1585 		goto out;
1586 	}
1587 
1588 	/*
1589 	 * Need to use rmdir for removing directories.
1590 	 */
1591 	if (vp->v_type == VDIR) {
1592 		error = EPERM;
1593 		goto out;
1594 	}
1595 
1596 	vnevent_remove(vp, dvp, name, ct);
1597 
1598 	if (realnmp)
1599 		dnlc_remove(dvp, realnmp->pn_buf);
1600 	else
1601 		dnlc_remove(dvp, name);
1602 
1603 	mutex_enter(&vp->v_lock);
1604 	may_delete_now = vp->v_count == 1 && !vn_has_cached_data(vp);
1605 	mutex_exit(&vp->v_lock);
1606 
1607 	/*
1608 	 * We may delete the znode now, or we may put it in the unlinked set;
1609 	 * it depends on whether we're the last link, and on whether there are
1610 	 * other holds on the vnode.  So we dmu_tx_hold() the right things to
1611 	 * allow for either case.
1612 	 */
1613 	obj = zp->z_id;
1614 	tx = dmu_tx_create(zfsvfs->z_os);
1615 	dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1616 	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1617 	zfs_sa_upgrade_txholds(tx, zp);
1618 	zfs_sa_upgrade_txholds(tx, dzp);
1619 	if (may_delete_now) {
1620 		toobig =
1621 		    zp->z_size > zp->z_blksz * DMU_MAX_DELETEBLKCNT;
1622 		/* if the file is too big, only hold_free a token amount */
1623 		dmu_tx_hold_free(tx, zp->z_id, 0,
1624 		    (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
1625 	}
1626 
1627 	/* are there any extended attributes? */
1628 	error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1629 	    &xattr_obj, sizeof (xattr_obj));
1630 	if (xattr_obj) {
1631 		error = zfs_zget(zfsvfs, xattr_obj, &xzp);
1632 		ASSERT3U(error, ==, 0);
1633 		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1634 		dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1635 	}
1636 
1637 	mutex_enter(&zp->z_lock);
1638 	if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
1639 		dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
1640 	mutex_exit(&zp->z_lock);
1641 
1642 	/* charge as an update -- would be nice not to charge at all */
1643 	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1644 
1645 	error = dmu_tx_assign(tx, TXG_NOWAIT);
1646 	if (error) {
1647 		zfs_dirent_unlock(dl);
1648 		VN_RELE(vp);
1649 		if (error == ERESTART) {
1650 			dmu_tx_wait(tx);
1651 			dmu_tx_abort(tx);
1652 			goto top;
1653 		}
1654 		if (realnmp)
1655 			pn_free(realnmp);
1656 		dmu_tx_abort(tx);
1657 		ZFS_EXIT(zfsvfs);
1658 		return (error);
1659 	}
1660 
1661 	/*
1662 	 * Remove the directory entry.
1663 	 */
1664 	error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
1665 
1666 	if (error) {
1667 		dmu_tx_commit(tx);
1668 		goto out;
1669 	}
1670 
1671 	if (unlinked) {
1672 
1673 		/*
1674 		 * Hold z_lock so that we can make sure that the ACL obj
1675 		 * hasn't changed.  Could have been deleted due to
1676 		 * zfs_sa_upgrade().
1677 		 */
1678 		mutex_enter(&zp->z_lock);
1679 		mutex_enter(&vp->v_lock);
1680 		(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1681 		    &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
1682 		delete_now = may_delete_now && !toobig &&
1683 		    vp->v_count == 1 && !vn_has_cached_data(vp) &&
1684 		    xattr_obj == xattr_obj_unlinked && zfs_external_acl(zp) ==
1685 		    acl_obj;
1686 		mutex_exit(&vp->v_lock);
1687 	}
1688 
1689 	if (delete_now) {
1690 		if (xattr_obj_unlinked) {
1691 			ASSERT3U(xzp->z_links, ==, 2);
1692 			mutex_enter(&xzp->z_lock);
1693 			xzp->z_unlinked = 1;
1694 			xzp->z_links = 0;
1695 			error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
1696 			    &xzp->z_links, sizeof (xzp->z_links), tx);
1697 			ASSERT3U(error,  ==,  0);
1698 			mutex_exit(&xzp->z_lock);
1699 			zfs_unlinked_add(xzp, tx);
1700 
1701 			if (zp->z_is_sa)
1702 				error = sa_remove(zp->z_sa_hdl,
1703 				    SA_ZPL_XATTR(zfsvfs), tx);
1704 			else
1705 				error = sa_update(zp->z_sa_hdl,
1706 				    SA_ZPL_XATTR(zfsvfs), &null_xattr,
1707 				    sizeof (uint64_t), tx);
1708 			ASSERT3U(error, ==, 0);
1709 		}
1710 		mutex_enter(&vp->v_lock);
1711 		vp->v_count--;
1712 		ASSERT3U(vp->v_count, ==, 0);
1713 		mutex_exit(&vp->v_lock);
1714 		mutex_exit(&zp->z_lock);
1715 		zfs_znode_delete(zp, tx);
1716 	} else if (unlinked) {
1717 		mutex_exit(&zp->z_lock);
1718 		zfs_unlinked_add(zp, tx);
1719 	}
1720 
1721 	txtype = TX_REMOVE;
1722 	if (flags & FIGNORECASE)
1723 		txtype |= TX_CI;
1724 	zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
1725 
1726 	dmu_tx_commit(tx);
1727 out:
1728 	if (realnmp)
1729 		pn_free(realnmp);
1730 
1731 	zfs_dirent_unlock(dl);
1732 
1733 	if (!delete_now)
1734 		VN_RELE(vp);
1735 	if (xzp)
1736 		VN_RELE(ZTOV(xzp));
1737 
1738 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1739 		zil_commit(zilog, 0);
1740 
1741 	ZFS_EXIT(zfsvfs);
1742 	return (error);
1743 }
1744 
1745 /*
1746  * Create a new directory and insert it into dvp using the name
1747  * provided.  Return a pointer to the inserted directory.
1748  *
1749  *	IN:	dvp	- vnode of directory to add subdir to.
1750  *		dirname	- name of new directory.
1751  *		vap	- attributes of new directory.
1752  *		cr	- credentials of caller.
1753  *		ct	- caller context
1754  *		vsecp	- ACL to be set
1755  *
1756  *	OUT:	vpp	- vnode of created directory.
1757  *
1758  *	RETURN:	0 if success
1759  *		error code if failure
1760  *
1761  * Timestamps:
1762  *	dvp - ctime|mtime updated
1763  *	 vp - ctime|mtime|atime updated
1764  */
1765 /*ARGSUSED*/
1766 static int
1767 zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr,
1768     caller_context_t *ct, int flags, vsecattr_t *vsecp)
1769 {
1770 	znode_t		*zp, *dzp = VTOZ(dvp);
1771 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1772 	zilog_t		*zilog;
1773 	zfs_dirlock_t	*dl;
1774 	uint64_t	txtype;
1775 	dmu_tx_t	*tx;
1776 	int		error;
1777 	int		zf = ZNEW;
1778 	ksid_t		*ksid;
1779 	uid_t		uid;
1780 	gid_t		gid = crgetgid(cr);
1781 	zfs_acl_ids_t   acl_ids;
1782 	boolean_t	fuid_dirtied;
1783 
1784 	ASSERT(vap->va_type == VDIR);
1785 
1786 	/*
1787 	 * If we have an ephemeral id, ACL, or XVATTR then
1788 	 * make sure file system is at proper version
1789 	 */
1790 
1791 	ksid = crgetsid(cr, KSID_OWNER);
1792 	if (ksid)
1793 		uid = ksid_getid(ksid);
1794 	else
1795 		uid = crgetuid(cr);
1796 	if (zfsvfs->z_use_fuids == B_FALSE &&
1797 	    (vsecp || (vap->va_mask & AT_XVATTR) ||
1798 	    IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1799 		return (EINVAL);
1800 
1801 	ZFS_ENTER(zfsvfs);
1802 	ZFS_VERIFY_ZP(dzp);
1803 	zilog = zfsvfs->z_log;
1804 
1805 	if (dzp->z_pflags & ZFS_XATTR) {
1806 		ZFS_EXIT(zfsvfs);
1807 		return (EINVAL);
1808 	}
1809 
1810 	if (zfsvfs->z_utf8 && u8_validate(dirname,
1811 	    strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1812 		ZFS_EXIT(zfsvfs);
1813 		return (EILSEQ);
1814 	}
1815 	if (flags & FIGNORECASE)
1816 		zf |= ZCILOOK;
1817 
1818 	if (vap->va_mask & AT_XVATTR) {
1819 		if ((error = secpolicy_xvattr((xvattr_t *)vap,
1820 		    crgetuid(cr), cr, vap->va_type)) != 0) {
1821 			ZFS_EXIT(zfsvfs);
1822 			return (error);
1823 		}
1824 	}
1825 
1826 	if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
1827 	    vsecp, &acl_ids)) != 0) {
1828 		ZFS_EXIT(zfsvfs);
1829 		return (error);
1830 	}
1831 	/*
1832 	 * First make sure the new directory doesn't exist.
1833 	 *
1834 	 * Existence is checked first to make sure we don't return
1835 	 * EACCES instead of EEXIST which can cause some applications
1836 	 * to fail.
1837 	 */
1838 top:
1839 	*vpp = NULL;
1840 
1841 	if (error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
1842 	    NULL, NULL)) {
1843 		zfs_acl_ids_free(&acl_ids);
1844 		ZFS_EXIT(zfsvfs);
1845 		return (error);
1846 	}
1847 
1848 	if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr)) {
1849 		zfs_acl_ids_free(&acl_ids);
1850 		zfs_dirent_unlock(dl);
1851 		ZFS_EXIT(zfsvfs);
1852 		return (error);
1853 	}
1854 
1855 	if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1856 		zfs_acl_ids_free(&acl_ids);
1857 		zfs_dirent_unlock(dl);
1858 		ZFS_EXIT(zfsvfs);
1859 		return (EDQUOT);
1860 	}
1861 
1862 	/*
1863 	 * Add a new entry to the directory.
1864 	 */
1865 	tx = dmu_tx_create(zfsvfs->z_os);
1866 	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
1867 	dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1868 	fuid_dirtied = zfsvfs->z_fuid_dirty;
1869 	if (fuid_dirtied)
1870 		zfs_fuid_txhold(zfsvfs, tx);
1871 	if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1872 		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
1873 		    acl_ids.z_aclp->z_acl_bytes);
1874 	}
1875 
1876 	dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1877 	    ZFS_SA_BASE_ATTR_SIZE);
1878 
1879 	error = dmu_tx_assign(tx, TXG_NOWAIT);
1880 	if (error) {
1881 		zfs_dirent_unlock(dl);
1882 		if (error == ERESTART) {
1883 			dmu_tx_wait(tx);
1884 			dmu_tx_abort(tx);
1885 			goto top;
1886 		}
1887 		zfs_acl_ids_free(&acl_ids);
1888 		dmu_tx_abort(tx);
1889 		ZFS_EXIT(zfsvfs);
1890 		return (error);
1891 	}
1892 
1893 	/*
1894 	 * Create new node.
1895 	 */
1896 	zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1897 
1898 	if (fuid_dirtied)
1899 		zfs_fuid_sync(zfsvfs, tx);
1900 
1901 	/*
1902 	 * Now put new name in parent dir.
1903 	 */
1904 	(void) zfs_link_create(dl, zp, tx, ZNEW);
1905 
1906 	*vpp = ZTOV(zp);
1907 
1908 	txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
1909 	if (flags & FIGNORECASE)
1910 		txtype |= TX_CI;
1911 	zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
1912 	    acl_ids.z_fuidp, vap);
1913 
1914 	zfs_acl_ids_free(&acl_ids);
1915 
1916 	dmu_tx_commit(tx);
1917 
1918 	zfs_dirent_unlock(dl);
1919 
1920 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1921 		zil_commit(zilog, 0);
1922 
1923 	ZFS_EXIT(zfsvfs);
1924 	return (0);
1925 }
1926 
1927 /*
1928  * Remove a directory subdir entry.  If the current working
1929  * directory is the same as the subdir to be removed, the
1930  * remove will fail.
1931  *
1932  *	IN:	dvp	- vnode of directory to remove from.
1933  *		name	- name of directory to be removed.
1934  *		cwd	- vnode of current working directory.
1935  *		cr	- credentials of caller.
1936  *		ct	- caller context
1937  *		flags	- case flags
1938  *
1939  *	RETURN:	0 if success
1940  *		error code if failure
1941  *
1942  * Timestamps:
1943  *	dvp - ctime|mtime updated
1944  */
1945 /*ARGSUSED*/
1946 static int
1947 zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr,
1948     caller_context_t *ct, int flags)
1949 {
1950 	znode_t		*dzp = VTOZ(dvp);
1951 	znode_t		*zp;
1952 	vnode_t		*vp;
1953 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1954 	zilog_t		*zilog;
1955 	zfs_dirlock_t	*dl;
1956 	dmu_tx_t	*tx;
1957 	int		error;
1958 	int		zflg = ZEXISTS;
1959 
1960 	ZFS_ENTER(zfsvfs);
1961 	ZFS_VERIFY_ZP(dzp);
1962 	zilog = zfsvfs->z_log;
1963 
1964 	if (flags & FIGNORECASE)
1965 		zflg |= ZCILOOK;
1966 top:
1967 	zp = NULL;
1968 
1969 	/*
1970 	 * Attempt to lock directory; fail if entry doesn't exist.
1971 	 */
1972 	if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1973 	    NULL, NULL)) {
1974 		ZFS_EXIT(zfsvfs);
1975 		return (error);
1976 	}
1977 
1978 	vp = ZTOV(zp);
1979 
1980 	if (error = zfs_zaccess_delete(dzp, zp, cr)) {
1981 		goto out;
1982 	}
1983 
1984 	if (vp->v_type != VDIR) {
1985 		error = ENOTDIR;
1986 		goto out;
1987 	}
1988 
1989 	if (vp == cwd) {
1990 		error = EINVAL;
1991 		goto out;
1992 	}
1993 
1994 	vnevent_rmdir(vp, dvp, name, ct);
1995 
1996 	/*
1997 	 * Grab a lock on the directory to make sure that noone is
1998 	 * trying to add (or lookup) entries while we are removing it.
1999 	 */
2000 	rw_enter(&zp->z_name_lock, RW_WRITER);
2001 
2002 	/*
2003 	 * Grab a lock on the parent pointer to make sure we play well
2004 	 * with the treewalk and directory rename code.
2005 	 */
2006 	rw_enter(&zp->z_parent_lock, RW_WRITER);
2007 
2008 	tx = dmu_tx_create(zfsvfs->z_os);
2009 	dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
2010 	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2011 	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
2012 	zfs_sa_upgrade_txholds(tx, zp);
2013 	zfs_sa_upgrade_txholds(tx, dzp);
2014 	error = dmu_tx_assign(tx, TXG_NOWAIT);
2015 	if (error) {
2016 		rw_exit(&zp->z_parent_lock);
2017 		rw_exit(&zp->z_name_lock);
2018 		zfs_dirent_unlock(dl);
2019 		VN_RELE(vp);
2020 		if (error == ERESTART) {
2021 			dmu_tx_wait(tx);
2022 			dmu_tx_abort(tx);
2023 			goto top;
2024 		}
2025 		dmu_tx_abort(tx);
2026 		ZFS_EXIT(zfsvfs);
2027 		return (error);
2028 	}
2029 
2030 	error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
2031 
2032 	if (error == 0) {
2033 		uint64_t txtype = TX_RMDIR;
2034 		if (flags & FIGNORECASE)
2035 			txtype |= TX_CI;
2036 		zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
2037 	}
2038 
2039 	dmu_tx_commit(tx);
2040 
2041 	rw_exit(&zp->z_parent_lock);
2042 	rw_exit(&zp->z_name_lock);
2043 out:
2044 	zfs_dirent_unlock(dl);
2045 
2046 	VN_RELE(vp);
2047 
2048 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2049 		zil_commit(zilog, 0);
2050 
2051 	ZFS_EXIT(zfsvfs);
2052 	return (error);
2053 }
2054 
2055 /*
2056  * Read as many directory entries as will fit into the provided
2057  * buffer from the given directory cursor position (specified in
2058  * the uio structure.
2059  *
2060  *	IN:	vp	- vnode of directory to read.
2061  *		uio	- structure supplying read location, range info,
2062  *			  and return buffer.
2063  *		cr	- credentials of caller.
2064  *		ct	- caller context
2065  *		flags	- case flags
2066  *
2067  *	OUT:	uio	- updated offset and range, buffer filled.
2068  *		eofp	- set to true if end-of-file detected.
2069  *
2070  *	RETURN:	0 if success
2071  *		error code if failure
2072  *
2073  * Timestamps:
2074  *	vp - atime updated
2075  *
2076  * Note that the low 4 bits of the cookie returned by zap is always zero.
2077  * This allows us to use the low range for "special" directory entries:
2078  * We use 0 for '.', and 1 for '..'.  If this is the root of the filesystem,
2079  * we use the offset 2 for the '.zfs' directory.
2080  */
2081 /* ARGSUSED */
2082 static int
2083 zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
2084     caller_context_t *ct, int flags)
2085 {
2086 	znode_t		*zp = VTOZ(vp);
2087 	iovec_t		*iovp;
2088 	edirent_t	*eodp;
2089 	dirent64_t	*odp;
2090 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
2091 	objset_t	*os;
2092 	caddr_t		outbuf;
2093 	size_t		bufsize;
2094 	zap_cursor_t	zc;
2095 	zap_attribute_t	zap;
2096 	uint_t		bytes_wanted;
2097 	uint64_t	offset; /* must be unsigned; checks for < 1 */
2098 	uint64_t	parent;
2099 	int		local_eof;
2100 	int		outcount;
2101 	int		error;
2102 	uint8_t		prefetch;
2103 	boolean_t	check_sysattrs;
2104 
2105 	ZFS_ENTER(zfsvfs);
2106 	ZFS_VERIFY_ZP(zp);
2107 
2108 	if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
2109 	    &parent, sizeof (parent))) != 0) {
2110 		ZFS_EXIT(zfsvfs);
2111 		return (error);
2112 	}
2113 
2114 	/*
2115 	 * If we are not given an eof variable,
2116 	 * use a local one.
2117 	 */
2118 	if (eofp == NULL)
2119 		eofp = &local_eof;
2120 
2121 	/*
2122 	 * Check for valid iov_len.
2123 	 */
2124 	if (uio->uio_iov->iov_len <= 0) {
2125 		ZFS_EXIT(zfsvfs);
2126 		return (EINVAL);
2127 	}
2128 
2129 	/*
2130 	 * Quit if directory has been removed (posix)
2131 	 */
2132 	if ((*eofp = zp->z_unlinked) != 0) {
2133 		ZFS_EXIT(zfsvfs);
2134 		return (0);
2135 	}
2136 
2137 	error = 0;
2138 	os = zfsvfs->z_os;
2139 	offset = uio->uio_loffset;
2140 	prefetch = zp->z_zn_prefetch;
2141 
2142 	/*
2143 	 * Initialize the iterator cursor.
2144 	 */
2145 	if (offset <= 3) {
2146 		/*
2147 		 * Start iteration from the beginning of the directory.
2148 		 */
2149 		zap_cursor_init(&zc, os, zp->z_id);
2150 	} else {
2151 		/*
2152 		 * The offset is a serialized cursor.
2153 		 */
2154 		zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
2155 	}
2156 
2157 	/*
2158 	 * Get space to change directory entries into fs independent format.
2159 	 */
2160 	iovp = uio->uio_iov;
2161 	bytes_wanted = iovp->iov_len;
2162 	if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) {
2163 		bufsize = bytes_wanted;
2164 		outbuf = kmem_alloc(bufsize, KM_SLEEP);
2165 		odp = (struct dirent64 *)outbuf;
2166 	} else {
2167 		bufsize = bytes_wanted;
2168 		odp = (struct dirent64 *)iovp->iov_base;
2169 	}
2170 	eodp = (struct edirent *)odp;
2171 
2172 	/*
2173 	 * If this VFS supports the system attribute view interface; and
2174 	 * we're looking at an extended attribute directory; and we care
2175 	 * about normalization conflicts on this vfs; then we must check
2176 	 * for normalization conflicts with the sysattr name space.
2177 	 */
2178 	check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
2179 	    (vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm &&
2180 	    (flags & V_RDDIR_ENTFLAGS);
2181 
2182 	/*
2183 	 * Transform to file-system independent format
2184 	 */
2185 	outcount = 0;
2186 	while (outcount < bytes_wanted) {
2187 		ino64_t objnum;
2188 		ushort_t reclen;
2189 		off64_t *next = NULL;
2190 
2191 		/*
2192 		 * Special case `.', `..', and `.zfs'.
2193 		 */
2194 		if (offset == 0) {
2195 			(void) strcpy(zap.za_name, ".");
2196 			zap.za_normalization_conflict = 0;
2197 			objnum = zp->z_id;
2198 		} else if (offset == 1) {
2199 			(void) strcpy(zap.za_name, "..");
2200 			zap.za_normalization_conflict = 0;
2201 			objnum = parent;
2202 		} else if (offset == 2 && zfs_show_ctldir(zp)) {
2203 			(void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
2204 			zap.za_normalization_conflict = 0;
2205 			objnum = ZFSCTL_INO_ROOT;
2206 		} else {
2207 			/*
2208 			 * Grab next entry.
2209 			 */
2210 			if (error = zap_cursor_retrieve(&zc, &zap)) {
2211 				if ((*eofp = (error == ENOENT)) != 0)
2212 					break;
2213 				else
2214 					goto update;
2215 			}
2216 
2217 			if (zap.za_integer_length != 8 ||
2218 			    zap.za_num_integers != 1) {
2219 				cmn_err(CE_WARN, "zap_readdir: bad directory "
2220 				    "entry, obj = %lld, offset = %lld\n",
2221 				    (u_longlong_t)zp->z_id,
2222 				    (u_longlong_t)offset);
2223 				error = ENXIO;
2224 				goto update;
2225 			}
2226 
2227 			objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
2228 			/*
2229 			 * MacOS X can extract the object type here such as:
2230 			 * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2231 			 */
2232 
2233 			if (check_sysattrs && !zap.za_normalization_conflict) {
2234 				zap.za_normalization_conflict =
2235 				    xattr_sysattr_casechk(zap.za_name);
2236 			}
2237 		}
2238 
2239 		if (flags & V_RDDIR_ACCFILTER) {
2240 			/*
2241 			 * If we have no access at all, don't include
2242 			 * this entry in the returned information
2243 			 */
2244 			znode_t	*ezp;
2245 			if (zfs_zget(zp->z_zfsvfs, objnum, &ezp) != 0)
2246 				goto skip_entry;
2247 			if (!zfs_has_access(ezp, cr)) {
2248 				VN_RELE(ZTOV(ezp));
2249 				goto skip_entry;
2250 			}
2251 			VN_RELE(ZTOV(ezp));
2252 		}
2253 
2254 		if (flags & V_RDDIR_ENTFLAGS)
2255 			reclen = EDIRENT_RECLEN(strlen(zap.za_name));
2256 		else
2257 			reclen = DIRENT64_RECLEN(strlen(zap.za_name));
2258 
2259 		/*
2260 		 * Will this entry fit in the buffer?
2261 		 */
2262 		if (outcount + reclen > bufsize) {
2263 			/*
2264 			 * Did we manage to fit anything in the buffer?
2265 			 */
2266 			if (!outcount) {
2267 				error = EINVAL;
2268 				goto update;
2269 			}
2270 			break;
2271 		}
2272 		if (flags & V_RDDIR_ENTFLAGS) {
2273 			/*
2274 			 * Add extended flag entry:
2275 			 */
2276 			eodp->ed_ino = objnum;
2277 			eodp->ed_reclen = reclen;
2278 			/* NOTE: ed_off is the offset for the *next* entry */
2279 			next = &(eodp->ed_off);
2280 			eodp->ed_eflags = zap.za_normalization_conflict ?
2281 			    ED_CASE_CONFLICT : 0;
2282 			(void) strncpy(eodp->ed_name, zap.za_name,
2283 			    EDIRENT_NAMELEN(reclen));
2284 			eodp = (edirent_t *)((intptr_t)eodp + reclen);
2285 		} else {
2286 			/*
2287 			 * Add normal entry:
2288 			 */
2289 			odp->d_ino = objnum;
2290 			odp->d_reclen = reclen;
2291 			/* NOTE: d_off is the offset for the *next* entry */
2292 			next = &(odp->d_off);
2293 			(void) strncpy(odp->d_name, zap.za_name,
2294 			    DIRENT64_NAMELEN(reclen));
2295 			odp = (dirent64_t *)((intptr_t)odp + reclen);
2296 		}
2297 		outcount += reclen;
2298 
2299 		ASSERT(outcount <= bufsize);
2300 
2301 		/* Prefetch znode */
2302 		if (prefetch)
2303 			dmu_prefetch(os, objnum, 0, 0);
2304 
2305 	skip_entry:
2306 		/*
2307 		 * Move to the next entry, fill in the previous offset.
2308 		 */
2309 		if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
2310 			zap_cursor_advance(&zc);
2311 			offset = zap_cursor_serialize(&zc);
2312 		} else {
2313 			offset += 1;
2314 		}
2315 		if (next)
2316 			*next = offset;
2317 	}
2318 	zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
2319 
2320 	if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) {
2321 		iovp->iov_base += outcount;
2322 		iovp->iov_len -= outcount;
2323 		uio->uio_resid -= outcount;
2324 	} else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) {
2325 		/*
2326 		 * Reset the pointer.
2327 		 */
2328 		offset = uio->uio_loffset;
2329 	}
2330 
2331 update:
2332 	zap_cursor_fini(&zc);
2333 	if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
2334 		kmem_free(outbuf, bufsize);
2335 
2336 	if (error == ENOENT)
2337 		error = 0;
2338 
2339 	ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
2340 
2341 	uio->uio_loffset = offset;
2342 	ZFS_EXIT(zfsvfs);
2343 	return (error);
2344 }
2345 
2346 ulong_t zfs_fsync_sync_cnt = 4;
2347 
2348 static int
2349 zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
2350 {
2351 	znode_t	*zp = VTOZ(vp);
2352 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2353 
2354 	/*
2355 	 * Regardless of whether this is required for standards conformance,
2356 	 * this is the logical behavior when fsync() is called on a file with
2357 	 * dirty pages.  We use B_ASYNC since the ZIL transactions are already
2358 	 * going to be pushed out as part of the zil_commit().
2359 	 */
2360 	if (vn_has_cached_data(vp) && !(syncflag & FNODSYNC) &&
2361 	    (vp->v_type == VREG) && !(IS_SWAPVP(vp)))
2362 		(void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, B_ASYNC, cr, ct);
2363 
2364 	(void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2365 
2366 	if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
2367 		ZFS_ENTER(zfsvfs);
2368 		ZFS_VERIFY_ZP(zp);
2369 		zil_commit(zfsvfs->z_log, zp->z_id);
2370 		ZFS_EXIT(zfsvfs);
2371 	}
2372 	return (0);
2373 }
2374 
2375 
2376 /*
2377  * Get the requested file attributes and place them in the provided
2378  * vattr structure.
2379  *
2380  *	IN:	vp	- vnode of file.
2381  *		vap	- va_mask identifies requested attributes.
2382  *			  If AT_XVATTR set, then optional attrs are requested
2383  *		flags	- ATTR_NOACLCHECK (CIFS server context)
2384  *		cr	- credentials of caller.
2385  *		ct	- caller context
2386  *
2387  *	OUT:	vap	- attribute values.
2388  *
2389  *	RETURN:	0 (always succeeds)
2390  */
2391 /* ARGSUSED */
2392 static int
2393 zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2394     caller_context_t *ct)
2395 {
2396 	znode_t *zp = VTOZ(vp);
2397 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2398 	int	error = 0;
2399 	uint64_t links;
2400 	uint64_t mtime[2], ctime[2];
2401 	xvattr_t *xvap = (xvattr_t *)vap;	/* vap may be an xvattr_t * */
2402 	xoptattr_t *xoap = NULL;
2403 	boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2404 	sa_bulk_attr_t bulk[2];
2405 	int count = 0;
2406 
2407 	ZFS_ENTER(zfsvfs);
2408 	ZFS_VERIFY_ZP(zp);
2409 
2410 	zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
2411 
2412 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
2413 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
2414 
2415 	if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
2416 		ZFS_EXIT(zfsvfs);
2417 		return (error);
2418 	}
2419 
2420 	/*
2421 	 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2422 	 * Also, if we are the owner don't bother, since owner should
2423 	 * always be allowed to read basic attributes of file.
2424 	 */
2425 	if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
2426 	    (vap->va_uid != crgetuid(cr))) {
2427 		if (error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2428 		    skipaclchk, cr)) {
2429 			ZFS_EXIT(zfsvfs);
2430 			return (error);
2431 		}
2432 	}
2433 
2434 	/*
2435 	 * Return all attributes.  It's cheaper to provide the answer
2436 	 * than to determine whether we were asked the question.
2437 	 */
2438 
2439 	mutex_enter(&zp->z_lock);
2440 	vap->va_type = vp->v_type;
2441 	vap->va_mode = zp->z_mode & MODEMASK;
2442 	vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev;
2443 	vap->va_nodeid = zp->z_id;
2444 	if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp))
2445 		links = zp->z_links + 1;
2446 	else
2447 		links = zp->z_links;
2448 	vap->va_nlink = MIN(links, UINT32_MAX);	/* nlink_t limit! */
2449 	vap->va_size = zp->z_size;
2450 	vap->va_rdev = vp->v_rdev;
2451 	vap->va_seq = zp->z_seq;
2452 
2453 	/*
2454 	 * Add in any requested optional attributes and the create time.
2455 	 * Also set the corresponding bits in the returned attribute bitmap.
2456 	 */
2457 	if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
2458 		if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2459 			xoap->xoa_archive =
2460 			    ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2461 			XVA_SET_RTN(xvap, XAT_ARCHIVE);
2462 		}
2463 
2464 		if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2465 			xoap->xoa_readonly =
2466 			    ((zp->z_pflags & ZFS_READONLY) != 0);
2467 			XVA_SET_RTN(xvap, XAT_READONLY);
2468 		}
2469 
2470 		if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2471 			xoap->xoa_system =
2472 			    ((zp->z_pflags & ZFS_SYSTEM) != 0);
2473 			XVA_SET_RTN(xvap, XAT_SYSTEM);
2474 		}
2475 
2476 		if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2477 			xoap->xoa_hidden =
2478 			    ((zp->z_pflags & ZFS_HIDDEN) != 0);
2479 			XVA_SET_RTN(xvap, XAT_HIDDEN);
2480 		}
2481 
2482 		if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2483 			xoap->xoa_nounlink =
2484 			    ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2485 			XVA_SET_RTN(xvap, XAT_NOUNLINK);
2486 		}
2487 
2488 		if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2489 			xoap->xoa_immutable =
2490 			    ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2491 			XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2492 		}
2493 
2494 		if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2495 			xoap->xoa_appendonly =
2496 			    ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2497 			XVA_SET_RTN(xvap, XAT_APPENDONLY);
2498 		}
2499 
2500 		if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2501 			xoap->xoa_nodump =
2502 			    ((zp->z_pflags & ZFS_NODUMP) != 0);
2503 			XVA_SET_RTN(xvap, XAT_NODUMP);
2504 		}
2505 
2506 		if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2507 			xoap->xoa_opaque =
2508 			    ((zp->z_pflags & ZFS_OPAQUE) != 0);
2509 			XVA_SET_RTN(xvap, XAT_OPAQUE);
2510 		}
2511 
2512 		if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2513 			xoap->xoa_av_quarantined =
2514 			    ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2515 			XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2516 		}
2517 
2518 		if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2519 			xoap->xoa_av_modified =
2520 			    ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2521 			XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2522 		}
2523 
2524 		if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2525 		    vp->v_type == VREG) {
2526 			zfs_sa_get_scanstamp(zp, xvap);
2527 		}
2528 
2529 		if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
2530 			uint64_t times[2];
2531 
2532 			(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
2533 			    times, sizeof (times));
2534 			ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
2535 			XVA_SET_RTN(xvap, XAT_CREATETIME);
2536 		}
2537 
2538 		if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2539 			xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2540 			XVA_SET_RTN(xvap, XAT_REPARSE);
2541 		}
2542 		if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2543 			xoap->xoa_generation = zp->z_gen;
2544 			XVA_SET_RTN(xvap, XAT_GEN);
2545 		}
2546 	}
2547 
2548 	ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
2549 	ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2550 	ZFS_TIME_DECODE(&vap->va_ctime, ctime);
2551 
2552 	mutex_exit(&zp->z_lock);
2553 
2554 	sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks);
2555 
2556 	if (zp->z_blksz == 0) {
2557 		/*
2558 		 * Block size hasn't been set; suggest maximal I/O transfers.
2559 		 */
2560 		vap->va_blksize = zfsvfs->z_max_blksz;
2561 	}
2562 
2563 	ZFS_EXIT(zfsvfs);
2564 	return (0);
2565 }
2566 
2567 /*
2568  * Set the file attributes to the values contained in the
2569  * vattr structure.
2570  *
2571  *	IN:	vp	- vnode of file to be modified.
2572  *		vap	- new attribute values.
2573  *			  If AT_XVATTR set, then optional attrs are being set
2574  *		flags	- ATTR_UTIME set if non-default time values provided.
2575  *			- ATTR_NOACLCHECK (CIFS context only).
2576  *		cr	- credentials of caller.
2577  *		ct	- caller context
2578  *
2579  *	RETURN:	0 if success
2580  *		error code if failure
2581  *
2582  * Timestamps:
2583  *	vp - ctime updated, mtime updated if size changed.
2584  */
2585 /* ARGSUSED */
2586 static int
2587 zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2588 	caller_context_t *ct)
2589 {
2590 	znode_t		*zp = VTOZ(vp);
2591 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
2592 	zilog_t		*zilog;
2593 	dmu_tx_t	*tx;
2594 	vattr_t		oldva;
2595 	xvattr_t	tmpxvattr;
2596 	uint_t		mask = vap->va_mask;
2597 	uint_t		saved_mask;
2598 	int		trim_mask = 0;
2599 	uint64_t	new_mode;
2600 	uint64_t	new_uid, new_gid;
2601 	uint64_t	xattr_obj = 0;
2602 	uint64_t	mtime[2], ctime[2];
2603 	znode_t		*attrzp;
2604 	int		need_policy = FALSE;
2605 	int		err, err2;
2606 	zfs_fuid_info_t *fuidp = NULL;
2607 	xvattr_t *xvap = (xvattr_t *)vap;	/* vap may be an xvattr_t * */
2608 	xoptattr_t	*xoap;
2609 	zfs_acl_t	*aclp = NULL;
2610 	boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2611 	boolean_t	fuid_dirtied = B_FALSE;
2612 	sa_bulk_attr_t	bulk[7], xattr_bulk[7];
2613 	int		count = 0, xattr_count = 0;
2614 
2615 	if (mask == 0)
2616 		return (0);
2617 
2618 	if (mask & AT_NOSET)
2619 		return (EINVAL);
2620 
2621 	ZFS_ENTER(zfsvfs);
2622 	ZFS_VERIFY_ZP(zp);
2623 
2624 	zilog = zfsvfs->z_log;
2625 
2626 	/*
2627 	 * Make sure that if we have ephemeral uid/gid or xvattr specified
2628 	 * that file system is at proper version level
2629 	 */
2630 
2631 	if (zfsvfs->z_use_fuids == B_FALSE &&
2632 	    (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2633 	    ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2634 	    (mask & AT_XVATTR))) {
2635 		ZFS_EXIT(zfsvfs);
2636 		return (EINVAL);
2637 	}
2638 
2639 	if (mask & AT_SIZE && vp->v_type == VDIR) {
2640 		ZFS_EXIT(zfsvfs);
2641 		return (EISDIR);
2642 	}
2643 
2644 	if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) {
2645 		ZFS_EXIT(zfsvfs);
2646 		return (EINVAL);
2647 	}
2648 
2649 	/*
2650 	 * If this is an xvattr_t, then get a pointer to the structure of
2651 	 * optional attributes.  If this is NULL, then we have a vattr_t.
2652 	 */
2653 	xoap = xva_getxoptattr(xvap);
2654 
2655 	xva_init(&tmpxvattr);
2656 
2657 	/*
2658 	 * Immutable files can only alter immutable bit and atime
2659 	 */
2660 	if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2661 	    ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) ||
2662 	    ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2663 		ZFS_EXIT(zfsvfs);
2664 		return (EPERM);
2665 	}
2666 
2667 	if ((mask & AT_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
2668 		ZFS_EXIT(zfsvfs);
2669 		return (EPERM);
2670 	}
2671 
2672 	/*
2673 	 * Verify timestamps doesn't overflow 32 bits.
2674 	 * ZFS can handle large timestamps, but 32bit syscalls can't
2675 	 * handle times greater than 2039.  This check should be removed
2676 	 * once large timestamps are fully supported.
2677 	 */
2678 	if (mask & (AT_ATIME | AT_MTIME)) {
2679 		if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2680 		    ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2681 			ZFS_EXIT(zfsvfs);
2682 			return (EOVERFLOW);
2683 		}
2684 	}
2685 
2686 top:
2687 	attrzp = NULL;
2688 
2689 	/* Can this be moved to before the top label? */
2690 	if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
2691 		ZFS_EXIT(zfsvfs);
2692 		return (EROFS);
2693 	}
2694 
2695 	/*
2696 	 * First validate permissions
2697 	 */
2698 
2699 	if (mask & AT_SIZE) {
2700 		err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
2701 		if (err) {
2702 			ZFS_EXIT(zfsvfs);
2703 			return (err);
2704 		}
2705 		/*
2706 		 * XXX - Note, we are not providing any open
2707 		 * mode flags here (like FNDELAY), so we may
2708 		 * block if there are locks present... this
2709 		 * should be addressed in openat().
2710 		 */
2711 		/* XXX - would it be OK to generate a log record here? */
2712 		err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2713 		if (err) {
2714 			ZFS_EXIT(zfsvfs);
2715 			return (err);
2716 		}
2717 	}
2718 
2719 	if (mask & (AT_ATIME|AT_MTIME) ||
2720 	    ((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
2721 	    XVA_ISSET_REQ(xvap, XAT_READONLY) ||
2722 	    XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
2723 	    XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
2724 	    XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
2725 		need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
2726 		    skipaclchk, cr);
2727 	}
2728 
2729 	if (mask & (AT_UID|AT_GID)) {
2730 		int	idmask = (mask & (AT_UID|AT_GID));
2731 		int	take_owner;
2732 		int	take_group;
2733 
2734 		/*
2735 		 * NOTE: even if a new mode is being set,
2736 		 * we may clear S_ISUID/S_ISGID bits.
2737 		 */
2738 
2739 		if (!(mask & AT_MODE))
2740 			vap->va_mode = zp->z_mode;
2741 
2742 		/*
2743 		 * Take ownership or chgrp to group we are a member of
2744 		 */
2745 
2746 		take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr));
2747 		take_group = (mask & AT_GID) &&
2748 		    zfs_groupmember(zfsvfs, vap->va_gid, cr);
2749 
2750 		/*
2751 		 * If both AT_UID and AT_GID are set then take_owner and
2752 		 * take_group must both be set in order to allow taking
2753 		 * ownership.
2754 		 *
2755 		 * Otherwise, send the check through secpolicy_vnode_setattr()
2756 		 *
2757 		 */
2758 
2759 		if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) ||
2760 		    ((idmask == AT_UID) && take_owner) ||
2761 		    ((idmask == AT_GID) && take_group)) {
2762 			if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
2763 			    skipaclchk, cr) == 0) {
2764 				/*
2765 				 * Remove setuid/setgid for non-privileged users
2766 				 */
2767 				secpolicy_setid_clear(vap, cr);
2768 				trim_mask = (mask & (AT_UID|AT_GID));
2769 			} else {
2770 				need_policy =  TRUE;
2771 			}
2772 		} else {
2773 			need_policy =  TRUE;
2774 		}
2775 	}
2776 
2777 	mutex_enter(&zp->z_lock);
2778 	oldva.va_mode = zp->z_mode;
2779 	zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
2780 	if (mask & AT_XVATTR) {
2781 		/*
2782 		 * Update xvattr mask to include only those attributes
2783 		 * that are actually changing.
2784 		 *
2785 		 * the bits will be restored prior to actually setting
2786 		 * the attributes so the caller thinks they were set.
2787 		 */
2788 		if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2789 			if (xoap->xoa_appendonly !=
2790 			    ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
2791 				need_policy = TRUE;
2792 			} else {
2793 				XVA_CLR_REQ(xvap, XAT_APPENDONLY);
2794 				XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY);
2795 			}
2796 		}
2797 
2798 		if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2799 			if (xoap->xoa_nounlink !=
2800 			    ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
2801 				need_policy = TRUE;
2802 			} else {
2803 				XVA_CLR_REQ(xvap, XAT_NOUNLINK);
2804 				XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK);
2805 			}
2806 		}
2807 
2808 		if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2809 			if (xoap->xoa_immutable !=
2810 			    ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
2811 				need_policy = TRUE;
2812 			} else {
2813 				XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
2814 				XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE);
2815 			}
2816 		}
2817 
2818 		if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2819 			if (xoap->xoa_nodump !=
2820 			    ((zp->z_pflags & ZFS_NODUMP) != 0)) {
2821 				need_policy = TRUE;
2822 			} else {
2823 				XVA_CLR_REQ(xvap, XAT_NODUMP);
2824 				XVA_SET_REQ(&tmpxvattr, XAT_NODUMP);
2825 			}
2826 		}
2827 
2828 		if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2829 			if (xoap->xoa_av_modified !=
2830 			    ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
2831 				need_policy = TRUE;
2832 			} else {
2833 				XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
2834 				XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED);
2835 			}
2836 		}
2837 
2838 		if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2839 			if ((vp->v_type != VREG &&
2840 			    xoap->xoa_av_quarantined) ||
2841 			    xoap->xoa_av_quarantined !=
2842 			    ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
2843 				need_policy = TRUE;
2844 			} else {
2845 				XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
2846 				XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED);
2847 			}
2848 		}
2849 
2850 		if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2851 			mutex_exit(&zp->z_lock);
2852 			ZFS_EXIT(zfsvfs);
2853 			return (EPERM);
2854 		}
2855 
2856 		if (need_policy == FALSE &&
2857 		    (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
2858 		    XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
2859 			need_policy = TRUE;
2860 		}
2861 	}
2862 
2863 	mutex_exit(&zp->z_lock);
2864 
2865 	if (mask & AT_MODE) {
2866 		if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
2867 			err = secpolicy_setid_setsticky_clear(vp, vap,
2868 			    &oldva, cr);
2869 			if (err) {
2870 				ZFS_EXIT(zfsvfs);
2871 				return (err);
2872 			}
2873 			trim_mask |= AT_MODE;
2874 		} else {
2875 			need_policy = TRUE;
2876 		}
2877 	}
2878 
2879 	if (need_policy) {
2880 		/*
2881 		 * If trim_mask is set then take ownership
2882 		 * has been granted or write_acl is present and user
2883 		 * has the ability to modify mode.  In that case remove
2884 		 * UID|GID and or MODE from mask so that
2885 		 * secpolicy_vnode_setattr() doesn't revoke it.
2886 		 */
2887 
2888 		if (trim_mask) {
2889 			saved_mask = vap->va_mask;
2890 			vap->va_mask &= ~trim_mask;
2891 		}
2892 		err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
2893 		    (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
2894 		if (err) {
2895 			ZFS_EXIT(zfsvfs);
2896 			return (err);
2897 		}
2898 
2899 		if (trim_mask)
2900 			vap->va_mask |= saved_mask;
2901 	}
2902 
2903 	/*
2904 	 * secpolicy_vnode_setattr, or take ownership may have
2905 	 * changed va_mask
2906 	 */
2907 	mask = vap->va_mask;
2908 
2909 	if ((mask & (AT_UID | AT_GID))) {
2910 		(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xattr_obj,
2911 		    sizeof (xattr_obj));
2912 
2913 		if (xattr_obj) {
2914 			err = zfs_zget(zp->z_zfsvfs, xattr_obj, &attrzp);
2915 			if (err)
2916 				goto out2;
2917 		}
2918 		if (mask & AT_UID) {
2919 			new_uid = zfs_fuid_create(zfsvfs,
2920 			    (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
2921 			if (new_uid != zp->z_uid &&
2922 			    zfs_fuid_overquota(zfsvfs, B_FALSE, new_uid)) {
2923 				err = EDQUOT;
2924 				goto out2;
2925 			}
2926 		}
2927 
2928 		if (mask & AT_GID) {
2929 			new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
2930 			    cr, ZFS_GROUP, &fuidp);
2931 			if (new_gid != zp->z_gid &&
2932 			    zfs_fuid_overquota(zfsvfs, B_TRUE, new_gid)) {
2933 				err = EDQUOT;
2934 				goto out2;
2935 			}
2936 		}
2937 	}
2938 	tx = dmu_tx_create(zfsvfs->z_os);
2939 
2940 	if (mask & AT_MODE) {
2941 		uint64_t pmode = zp->z_mode;
2942 		uint64_t acl_obj;
2943 		new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
2944 
2945 		if (err = zfs_acl_chmod_setattr(zp, &aclp, new_mode))
2946 			goto out;
2947 
2948 		mutex_enter(&zp->z_lock);
2949 		if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
2950 			/*
2951 			 * Are we upgrading ACL from old V0 format
2952 			 * to V1 format?
2953 			 */
2954 			if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
2955 			    zfs_znode_acl_version(zp) ==
2956 			    ZFS_ACL_VERSION_INITIAL) {
2957 				dmu_tx_hold_free(tx, acl_obj, 0,
2958 				    DMU_OBJECT_END);
2959 				dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2960 				    0, aclp->z_acl_bytes);
2961 			} else {
2962 				dmu_tx_hold_write(tx, acl_obj, 0,
2963 				    aclp->z_acl_bytes);
2964 			}
2965 		} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2966 			dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2967 			    0, aclp->z_acl_bytes);
2968 		}
2969 		mutex_exit(&zp->z_lock);
2970 		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2971 	} else {
2972 		if ((mask & AT_XVATTR) &&
2973 		    XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
2974 			dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2975 		else
2976 			dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2977 	}
2978 
2979 	if (attrzp) {
2980 		dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
2981 	}
2982 
2983 	fuid_dirtied = zfsvfs->z_fuid_dirty;
2984 	if (fuid_dirtied)
2985 		zfs_fuid_txhold(zfsvfs, tx);
2986 
2987 	zfs_sa_upgrade_txholds(tx, zp);
2988 
2989 	err = dmu_tx_assign(tx, TXG_NOWAIT);
2990 	if (err) {
2991 		if (err == ERESTART)
2992 			dmu_tx_wait(tx);
2993 		goto out;
2994 	}
2995 
2996 	count = 0;
2997 	/*
2998 	 * Set each attribute requested.
2999 	 * We group settings according to the locks they need to acquire.
3000 	 *
3001 	 * Note: you cannot set ctime directly, although it will be
3002 	 * updated as a side-effect of calling this function.
3003 	 */
3004 
3005 
3006 	if (mask & (AT_UID|AT_GID|AT_MODE))
3007 		mutex_enter(&zp->z_acl_lock);
3008 	mutex_enter(&zp->z_lock);
3009 
3010 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
3011 	    &zp->z_pflags, sizeof (zp->z_pflags));
3012 
3013 	if (attrzp) {
3014 		if (mask & (AT_UID|AT_GID|AT_MODE))
3015 			mutex_enter(&attrzp->z_acl_lock);
3016 		mutex_enter(&attrzp->z_lock);
3017 		SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3018 		    SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
3019 		    sizeof (attrzp->z_pflags));
3020 	}
3021 
3022 	if (mask & (AT_UID|AT_GID)) {
3023 
3024 		if (mask & AT_UID) {
3025 			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
3026 			    &new_uid, sizeof (new_uid));
3027 			zp->z_uid = new_uid;
3028 			if (attrzp) {
3029 				SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3030 				    SA_ZPL_UID(zfsvfs), NULL, &new_uid,
3031 				    sizeof (new_uid));
3032 				attrzp->z_uid = new_uid;
3033 			}
3034 		}
3035 
3036 		if (mask & AT_GID) {
3037 			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
3038 			    NULL, &new_gid, sizeof (new_gid));
3039 			zp->z_gid = new_gid;
3040 			if (attrzp) {
3041 				SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3042 				    SA_ZPL_GID(zfsvfs), NULL, &new_gid,
3043 				    sizeof (new_gid));
3044 				attrzp->z_gid = new_gid;
3045 			}
3046 		}
3047 		if (!(mask & AT_MODE)) {
3048 			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
3049 			    NULL, &new_mode, sizeof (new_mode));
3050 			new_mode = zp->z_mode;
3051 		}
3052 		err = zfs_acl_chown_setattr(zp);
3053 		ASSERT(err == 0);
3054 		if (attrzp) {
3055 			err = zfs_acl_chown_setattr(attrzp);
3056 			ASSERT(err == 0);
3057 		}
3058 	}
3059 
3060 	if (mask & AT_MODE) {
3061 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
3062 		    &new_mode, sizeof (new_mode));
3063 		zp->z_mode = new_mode;
3064 		ASSERT3U((uintptr_t)aclp, !=, NULL);
3065 		err = zfs_aclset_common(zp, aclp, cr, tx);
3066 		ASSERT3U(err, ==, 0);
3067 		zp->z_acl_cached = aclp;
3068 		aclp = NULL;
3069 	}
3070 
3071 
3072 	if (mask & AT_ATIME) {
3073 		ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
3074 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
3075 		    &zp->z_atime, sizeof (zp->z_atime));
3076 	}
3077 
3078 	if (mask & AT_MTIME) {
3079 		ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
3080 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
3081 		    mtime, sizeof (mtime));
3082 	}
3083 
3084 	/* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
3085 	if (mask & AT_SIZE && !(mask & AT_MTIME)) {
3086 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
3087 		    NULL, mtime, sizeof (mtime));
3088 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3089 		    &ctime, sizeof (ctime));
3090 		zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
3091 		    B_TRUE);
3092 	} else if (mask != 0) {
3093 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3094 		    &ctime, sizeof (ctime));
3095 		zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime,
3096 		    B_TRUE);
3097 		if (attrzp) {
3098 			SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3099 			    SA_ZPL_CTIME(zfsvfs), NULL,
3100 			    &ctime, sizeof (ctime));
3101 			zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
3102 			    mtime, ctime, B_TRUE);
3103 		}
3104 	}
3105 	/*
3106 	 * Do this after setting timestamps to prevent timestamp
3107 	 * update from toggling bit
3108 	 */
3109 
3110 	if (xoap && (mask & AT_XVATTR)) {
3111 
3112 		/*
3113 		 * restore trimmed off masks
3114 		 * so that return masks can be set for caller.
3115 		 */
3116 
3117 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) {
3118 			XVA_SET_REQ(xvap, XAT_APPENDONLY);
3119 		}
3120 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) {
3121 			XVA_SET_REQ(xvap, XAT_NOUNLINK);
3122 		}
3123 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) {
3124 			XVA_SET_REQ(xvap, XAT_IMMUTABLE);
3125 		}
3126 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) {
3127 			XVA_SET_REQ(xvap, XAT_NODUMP);
3128 		}
3129 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) {
3130 			XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
3131 		}
3132 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) {
3133 			XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
3134 		}
3135 
3136 		if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3137 			ASSERT(vp->v_type == VREG);
3138 
3139 		zfs_xvattr_set(zp, xvap, tx);
3140 	}
3141 
3142 	if (fuid_dirtied)
3143 		zfs_fuid_sync(zfsvfs, tx);
3144 
3145 	if (mask != 0)
3146 		zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
3147 
3148 	mutex_exit(&zp->z_lock);
3149 	if (mask & (AT_UID|AT_GID|AT_MODE))
3150 		mutex_exit(&zp->z_acl_lock);
3151 
3152 	if (attrzp) {
3153 		if (mask & (AT_UID|AT_GID|AT_MODE))
3154 			mutex_exit(&attrzp->z_acl_lock);
3155 		mutex_exit(&attrzp->z_lock);
3156 	}
3157 out:
3158 	if (err == 0 && attrzp) {
3159 		err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
3160 		    xattr_count, tx);
3161 		ASSERT(err2 == 0);
3162 	}
3163 
3164 	if (attrzp)
3165 		VN_RELE(ZTOV(attrzp));
3166 	if (aclp)
3167 		zfs_acl_free(aclp);
3168 
3169 	if (fuidp) {
3170 		zfs_fuid_info_free(fuidp);
3171 		fuidp = NULL;
3172 	}
3173 
3174 	if (err) {
3175 		dmu_tx_abort(tx);
3176 		if (err == ERESTART)
3177 			goto top;
3178 	} else {
3179 		err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
3180 		dmu_tx_commit(tx);
3181 	}
3182 
3183 
3184 out2:
3185 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3186 		zil_commit(zilog, 0);
3187 
3188 	ZFS_EXIT(zfsvfs);
3189 	return (err);
3190 }
3191 
3192 typedef struct zfs_zlock {
3193 	krwlock_t	*zl_rwlock;	/* lock we acquired */
3194 	znode_t		*zl_znode;	/* znode we held */
3195 	struct zfs_zlock *zl_next;	/* next in list */
3196 } zfs_zlock_t;
3197 
3198 /*
3199  * Drop locks and release vnodes that were held by zfs_rename_lock().
3200  */
3201 static void
3202 zfs_rename_unlock(zfs_zlock_t **zlpp)
3203 {
3204 	zfs_zlock_t *zl;
3205 
3206 	while ((zl = *zlpp) != NULL) {
3207 		if (zl->zl_znode != NULL)
3208 			VN_RELE(ZTOV(zl->zl_znode));
3209 		rw_exit(zl->zl_rwlock);
3210 		*zlpp = zl->zl_next;
3211 		kmem_free(zl, sizeof (*zl));
3212 	}
3213 }
3214 
3215 /*
3216  * Search back through the directory tree, using the ".." entries.
3217  * Lock each directory in the chain to prevent concurrent renames.
3218  * Fail any attempt to move a directory into one of its own descendants.
3219  * XXX - z_parent_lock can overlap with map or grow locks
3220  */
3221 static int
3222 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
3223 {
3224 	zfs_zlock_t	*zl;
3225 	znode_t		*zp = tdzp;
3226 	uint64_t	rootid = zp->z_zfsvfs->z_root;
3227 	uint64_t	oidp = zp->z_id;
3228 	krwlock_t	*rwlp = &szp->z_parent_lock;
3229 	krw_t		rw = RW_WRITER;
3230 
3231 	/*
3232 	 * First pass write-locks szp and compares to zp->z_id.
3233 	 * Later passes read-lock zp and compare to zp->z_parent.
3234 	 */
3235 	do {
3236 		if (!rw_tryenter(rwlp, rw)) {
3237 			/*
3238 			 * Another thread is renaming in this path.
3239 			 * Note that if we are a WRITER, we don't have any
3240 			 * parent_locks held yet.
3241 			 */
3242 			if (rw == RW_READER && zp->z_id > szp->z_id) {
3243 				/*
3244 				 * Drop our locks and restart
3245 				 */
3246 				zfs_rename_unlock(&zl);
3247 				*zlpp = NULL;
3248 				zp = tdzp;
3249 				oidp = zp->z_id;
3250 				rwlp = &szp->z_parent_lock;
3251 				rw = RW_WRITER;
3252 				continue;
3253 			} else {
3254 				/*
3255 				 * Wait for other thread to drop its locks
3256 				 */
3257 				rw_enter(rwlp, rw);
3258 			}
3259 		}
3260 
3261 		zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
3262 		zl->zl_rwlock = rwlp;
3263 		zl->zl_znode = NULL;
3264 		zl->zl_next = *zlpp;
3265 		*zlpp = zl;
3266 
3267 		if (oidp == szp->z_id)		/* We're a descendant of szp */
3268 			return (EINVAL);
3269 
3270 		if (oidp == rootid)		/* We've hit the top */
3271 			return (0);
3272 
3273 		if (rw == RW_READER) {		/* i.e. not the first pass */
3274 			int error = zfs_zget(zp->z_zfsvfs, oidp, &zp);
3275 			if (error)
3276 				return (error);
3277 			zl->zl_znode = zp;
3278 		}
3279 		(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zp->z_zfsvfs),
3280 		    &oidp, sizeof (oidp));
3281 		rwlp = &zp->z_parent_lock;
3282 		rw = RW_READER;
3283 
3284 	} while (zp->z_id != sdzp->z_id);
3285 
3286 	return (0);
3287 }
3288 
3289 /*
3290  * Move an entry from the provided source directory to the target
3291  * directory.  Change the entry name as indicated.
3292  *
3293  *	IN:	sdvp	- Source directory containing the "old entry".
3294  *		snm	- Old entry name.
3295  *		tdvp	- Target directory to contain the "new entry".
3296  *		tnm	- New entry name.
3297  *		cr	- credentials of caller.
3298  *		ct	- caller context
3299  *		flags	- case flags
3300  *
3301  *	RETURN:	0 if success
3302  *		error code if failure
3303  *
3304  * Timestamps:
3305  *	sdvp,tdvp - ctime|mtime updated
3306  */
3307 /*ARGSUSED*/
3308 static int
3309 zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr,
3310     caller_context_t *ct, int flags)
3311 {
3312 	znode_t		*tdzp, *szp, *tzp;
3313 	znode_t		*sdzp = VTOZ(sdvp);
3314 	zfsvfs_t	*zfsvfs = sdzp->z_zfsvfs;
3315 	zilog_t		*zilog;
3316 	vnode_t		*realvp;
3317 	zfs_dirlock_t	*sdl, *tdl;
3318 	dmu_tx_t	*tx;
3319 	zfs_zlock_t	*zl;
3320 	int		cmp, serr, terr;
3321 	int		error = 0;
3322 	int		zflg = 0;
3323 
3324 	ZFS_ENTER(zfsvfs);
3325 	ZFS_VERIFY_ZP(sdzp);
3326 	zilog = zfsvfs->z_log;
3327 
3328 	/*
3329 	 * Make sure we have the real vp for the target directory.
3330 	 */
3331 	if (VOP_REALVP(tdvp, &realvp, ct) == 0)
3332 		tdvp = realvp;
3333 
3334 	if (tdvp->v_vfsp != sdvp->v_vfsp || zfsctl_is_node(tdvp)) {
3335 		ZFS_EXIT(zfsvfs);
3336 		return (EXDEV);
3337 	}
3338 
3339 	tdzp = VTOZ(tdvp);
3340 	ZFS_VERIFY_ZP(tdzp);
3341 	if (zfsvfs->z_utf8 && u8_validate(tnm,
3342 	    strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3343 		ZFS_EXIT(zfsvfs);
3344 		return (EILSEQ);
3345 	}
3346 
3347 	if (flags & FIGNORECASE)
3348 		zflg |= ZCILOOK;
3349 
3350 top:
3351 	szp = NULL;
3352 	tzp = NULL;
3353 	zl = NULL;
3354 
3355 	/*
3356 	 * This is to prevent the creation of links into attribute space
3357 	 * by renaming a linked file into/outof an attribute directory.
3358 	 * See the comment in zfs_link() for why this is considered bad.
3359 	 */
3360 	if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3361 		ZFS_EXIT(zfsvfs);
3362 		return (EINVAL);
3363 	}
3364 
3365 	/*
3366 	 * Lock source and target directory entries.  To prevent deadlock,
3367 	 * a lock ordering must be defined.  We lock the directory with
3368 	 * the smallest object id first, or if it's a tie, the one with
3369 	 * the lexically first name.
3370 	 */
3371 	if (sdzp->z_id < tdzp->z_id) {
3372 		cmp = -1;
3373 	} else if (sdzp->z_id > tdzp->z_id) {
3374 		cmp = 1;
3375 	} else {
3376 		/*
3377 		 * First compare the two name arguments without
3378 		 * considering any case folding.
3379 		 */
3380 		int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
3381 
3382 		cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
3383 		ASSERT(error == 0 || !zfsvfs->z_utf8);
3384 		if (cmp == 0) {
3385 			/*
3386 			 * POSIX: "If the old argument and the new argument
3387 			 * both refer to links to the same existing file,
3388 			 * the rename() function shall return successfully
3389 			 * and perform no other action."
3390 			 */
3391 			ZFS_EXIT(zfsvfs);
3392 			return (0);
3393 		}
3394 		/*
3395 		 * If the file system is case-folding, then we may
3396 		 * have some more checking to do.  A case-folding file
3397 		 * system is either supporting mixed case sensitivity
3398 		 * access or is completely case-insensitive.  Note
3399 		 * that the file system is always case preserving.
3400 		 *
3401 		 * In mixed sensitivity mode case sensitive behavior
3402 		 * is the default.  FIGNORECASE must be used to
3403 		 * explicitly request case insensitive behavior.
3404 		 *
3405 		 * If the source and target names provided differ only
3406 		 * by case (e.g., a request to rename 'tim' to 'Tim'),
3407 		 * we will treat this as a special case in the
3408 		 * case-insensitive mode: as long as the source name
3409 		 * is an exact match, we will allow this to proceed as
3410 		 * a name-change request.
3411 		 */
3412 		if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
3413 		    (zfsvfs->z_case == ZFS_CASE_MIXED &&
3414 		    flags & FIGNORECASE)) &&
3415 		    u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
3416 		    &error) == 0) {
3417 			/*
3418 			 * case preserving rename request, require exact
3419 			 * name matches
3420 			 */
3421 			zflg |= ZCIEXACT;
3422 			zflg &= ~ZCILOOK;
3423 		}
3424 	}
3425 
3426 	/*
3427 	 * If the source and destination directories are the same, we should
3428 	 * grab the z_name_lock of that directory only once.
3429 	 */
3430 	if (sdzp == tdzp) {
3431 		zflg |= ZHAVELOCK;
3432 		rw_enter(&sdzp->z_name_lock, RW_READER);
3433 	}
3434 
3435 	if (cmp < 0) {
3436 		serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
3437 		    ZEXISTS | zflg, NULL, NULL);
3438 		terr = zfs_dirent_lock(&tdl,
3439 		    tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
3440 	} else {
3441 		terr = zfs_dirent_lock(&tdl,
3442 		    tdzp, tnm, &tzp, zflg, NULL, NULL);
3443 		serr = zfs_dirent_lock(&sdl,
3444 		    sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
3445 		    NULL, NULL);
3446 	}
3447 
3448 	if (serr) {
3449 		/*
3450 		 * Source entry invalid or not there.
3451 		 */
3452 		if (!terr) {
3453 			zfs_dirent_unlock(tdl);
3454 			if (tzp)
3455 				VN_RELE(ZTOV(tzp));
3456 		}
3457 
3458 		if (sdzp == tdzp)
3459 			rw_exit(&sdzp->z_name_lock);
3460 
3461 		if (strcmp(snm, "..") == 0)
3462 			serr = EINVAL;
3463 		ZFS_EXIT(zfsvfs);
3464 		return (serr);
3465 	}
3466 	if (terr) {
3467 		zfs_dirent_unlock(sdl);
3468 		VN_RELE(ZTOV(szp));
3469 
3470 		if (sdzp == tdzp)
3471 			rw_exit(&sdzp->z_name_lock);
3472 
3473 		if (strcmp(tnm, "..") == 0)
3474 			terr = EINVAL;
3475 		ZFS_EXIT(zfsvfs);
3476 		return (terr);
3477 	}
3478 
3479 	/*
3480 	 * Must have write access at the source to remove the old entry
3481 	 * and write access at the target to create the new entry.
3482 	 * Note that if target and source are the same, this can be
3483 	 * done in a single check.
3484 	 */
3485 
3486 	if (error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr))
3487 		goto out;
3488 
3489 	if (ZTOV(szp)->v_type == VDIR) {
3490 		/*
3491 		 * Check to make sure rename is valid.
3492 		 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3493 		 */
3494 		if (error = zfs_rename_lock(szp, tdzp, sdzp, &zl))
3495 			goto out;
3496 	}
3497 
3498 	/*
3499 	 * Does target exist?
3500 	 */
3501 	if (tzp) {
3502 		/*
3503 		 * Source and target must be the same type.
3504 		 */
3505 		if (ZTOV(szp)->v_type == VDIR) {
3506 			if (ZTOV(tzp)->v_type != VDIR) {
3507 				error = ENOTDIR;
3508 				goto out;
3509 			}
3510 		} else {
3511 			if (ZTOV(tzp)->v_type == VDIR) {
3512 				error = EISDIR;
3513 				goto out;
3514 			}
3515 		}
3516 		/*
3517 		 * POSIX dictates that when the source and target
3518 		 * entries refer to the same file object, rename
3519 		 * must do nothing and exit without error.
3520 		 */
3521 		if (szp->z_id == tzp->z_id) {
3522 			error = 0;
3523 			goto out;
3524 		}
3525 	}
3526 
3527 	vnevent_rename_src(ZTOV(szp), sdvp, snm, ct);
3528 	if (tzp)
3529 		vnevent_rename_dest(ZTOV(tzp), tdvp, tnm, ct);
3530 
3531 	/*
3532 	 * notify the target directory if it is not the same
3533 	 * as source directory.
3534 	 */
3535 	if (tdvp != sdvp) {
3536 		vnevent_rename_dest_dir(tdvp, ct);
3537 	}
3538 
3539 	tx = dmu_tx_create(zfsvfs->z_os);
3540 	dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3541 	dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3542 	dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3543 	dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3544 	if (sdzp != tdzp) {
3545 		dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3546 		zfs_sa_upgrade_txholds(tx, tdzp);
3547 	}
3548 	if (tzp) {
3549 		dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3550 		zfs_sa_upgrade_txholds(tx, tzp);
3551 	}
3552 
3553 	zfs_sa_upgrade_txholds(tx, szp);
3554 	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3555 	error = dmu_tx_assign(tx, TXG_NOWAIT);
3556 	if (error) {
3557 		if (zl != NULL)
3558 			zfs_rename_unlock(&zl);
3559 		zfs_dirent_unlock(sdl);
3560 		zfs_dirent_unlock(tdl);
3561 
3562 		if (sdzp == tdzp)
3563 			rw_exit(&sdzp->z_name_lock);
3564 
3565 		VN_RELE(ZTOV(szp));
3566 		if (tzp)
3567 			VN_RELE(ZTOV(tzp));
3568 		if (error == ERESTART) {
3569 			dmu_tx_wait(tx);
3570 			dmu_tx_abort(tx);
3571 			goto top;
3572 		}
3573 		dmu_tx_abort(tx);
3574 		ZFS_EXIT(zfsvfs);
3575 		return (error);
3576 	}
3577 
3578 	if (tzp)	/* Attempt to remove the existing target */
3579 		error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
3580 
3581 	if (error == 0) {
3582 		error = zfs_link_create(tdl, szp, tx, ZRENAMING);
3583 		if (error == 0) {
3584 			szp->z_pflags |= ZFS_AV_MODIFIED;
3585 
3586 			error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
3587 			    (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3588 			ASSERT3U(error, ==, 0);
3589 
3590 			error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
3591 			if (error == 0) {
3592 				zfs_log_rename(zilog, tx, TX_RENAME |
3593 				    (flags & FIGNORECASE ? TX_CI : 0), sdzp,
3594 				    sdl->dl_name, tdzp, tdl->dl_name, szp);
3595 
3596 				/*
3597 				 * Update path information for the target vnode
3598 				 */
3599 				vn_renamepath(tdvp, ZTOV(szp), tnm,
3600 				    strlen(tnm));
3601 			} else {
3602 				/*
3603 				 * At this point, we have successfully created
3604 				 * the target name, but have failed to remove
3605 				 * the source name.  Since the create was done
3606 				 * with the ZRENAMING flag, there are
3607 				 * complications; for one, the link count is
3608 				 * wrong.  The easiest way to deal with this
3609 				 * is to remove the newly created target, and
3610 				 * return the original error.  This must
3611 				 * succeed; fortunately, it is very unlikely to
3612 				 * fail, since we just created it.
3613 				 */
3614 				VERIFY3U(zfs_link_destroy(tdl, szp, tx,
3615 				    ZRENAMING, NULL), ==, 0);
3616 			}
3617 		}
3618 	}
3619 
3620 	dmu_tx_commit(tx);
3621 out:
3622 	if (zl != NULL)
3623 		zfs_rename_unlock(&zl);
3624 
3625 	zfs_dirent_unlock(sdl);
3626 	zfs_dirent_unlock(tdl);
3627 
3628 	if (sdzp == tdzp)
3629 		rw_exit(&sdzp->z_name_lock);
3630 
3631 
3632 	VN_RELE(ZTOV(szp));
3633 	if (tzp)
3634 		VN_RELE(ZTOV(tzp));
3635 
3636 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3637 		zil_commit(zilog, 0);
3638 
3639 	ZFS_EXIT(zfsvfs);
3640 	return (error);
3641 }
3642 
3643 /*
3644  * Insert the indicated symbolic reference entry into the directory.
3645  *
3646  *	IN:	dvp	- Directory to contain new symbolic link.
3647  *		link	- Name for new symlink entry.
3648  *		vap	- Attributes of new entry.
3649  *		target	- Target path of new symlink.
3650  *		cr	- credentials of caller.
3651  *		ct	- caller context
3652  *		flags	- case flags
3653  *
3654  *	RETURN:	0 if success
3655  *		error code if failure
3656  *
3657  * Timestamps:
3658  *	dvp - ctime|mtime updated
3659  */
3660 /*ARGSUSED*/
3661 static int
3662 zfs_symlink(vnode_t *dvp, char *name, vattr_t *vap, char *link, cred_t *cr,
3663     caller_context_t *ct, int flags)
3664 {
3665 	znode_t		*zp, *dzp = VTOZ(dvp);
3666 	zfs_dirlock_t	*dl;
3667 	dmu_tx_t	*tx;
3668 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
3669 	zilog_t		*zilog;
3670 	uint64_t	len = strlen(link);
3671 	int		error;
3672 	int		zflg = ZNEW;
3673 	zfs_acl_ids_t	acl_ids;
3674 	boolean_t	fuid_dirtied;
3675 	uint64_t	txtype = TX_SYMLINK;
3676 
3677 	ASSERT(vap->va_type == VLNK);
3678 
3679 	ZFS_ENTER(zfsvfs);
3680 	ZFS_VERIFY_ZP(dzp);
3681 	zilog = zfsvfs->z_log;
3682 
3683 	if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
3684 	    NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3685 		ZFS_EXIT(zfsvfs);
3686 		return (EILSEQ);
3687 	}
3688 	if (flags & FIGNORECASE)
3689 		zflg |= ZCILOOK;
3690 
3691 	if (len > MAXPATHLEN) {
3692 		ZFS_EXIT(zfsvfs);
3693 		return (ENAMETOOLONG);
3694 	}
3695 
3696 	if ((error = zfs_acl_ids_create(dzp, 0,
3697 	    vap, cr, NULL, &acl_ids)) != 0) {
3698 		ZFS_EXIT(zfsvfs);
3699 		return (error);
3700 	}
3701 top:
3702 	/*
3703 	 * Attempt to lock directory; fail if entry already exists.
3704 	 */
3705 	error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
3706 	if (error) {
3707 		zfs_acl_ids_free(&acl_ids);
3708 		ZFS_EXIT(zfsvfs);
3709 		return (error);
3710 	}
3711 
3712 	if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
3713 		zfs_acl_ids_free(&acl_ids);
3714 		zfs_dirent_unlock(dl);
3715 		ZFS_EXIT(zfsvfs);
3716 		return (error);
3717 	}
3718 
3719 	if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
3720 		zfs_acl_ids_free(&acl_ids);
3721 		zfs_dirent_unlock(dl);
3722 		ZFS_EXIT(zfsvfs);
3723 		return (EDQUOT);
3724 	}
3725 	tx = dmu_tx_create(zfsvfs->z_os);
3726 	fuid_dirtied = zfsvfs->z_fuid_dirty;
3727 	dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
3728 	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3729 	dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
3730 	    ZFS_SA_BASE_ATTR_SIZE + len);
3731 	dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
3732 	if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3733 		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
3734 		    acl_ids.z_aclp->z_acl_bytes);
3735 	}
3736 	if (fuid_dirtied)
3737 		zfs_fuid_txhold(zfsvfs, tx);
3738 	error = dmu_tx_assign(tx, TXG_NOWAIT);
3739 	if (error) {
3740 		zfs_dirent_unlock(dl);
3741 		if (error == ERESTART) {
3742 			dmu_tx_wait(tx);
3743 			dmu_tx_abort(tx);
3744 			goto top;
3745 		}
3746 		zfs_acl_ids_free(&acl_ids);
3747 		dmu_tx_abort(tx);
3748 		ZFS_EXIT(zfsvfs);
3749 		return (error);
3750 	}
3751 
3752 	/*
3753 	 * Create a new object for the symlink.
3754 	 * for version 4 ZPL datsets the symlink will be an SA attribute
3755 	 */
3756 	zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
3757 
3758 	if (fuid_dirtied)
3759 		zfs_fuid_sync(zfsvfs, tx);
3760 
3761 	mutex_enter(&zp->z_lock);
3762 	if (zp->z_is_sa)
3763 		error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
3764 		    link, len, tx);
3765 	else
3766 		zfs_sa_symlink(zp, link, len, tx);
3767 	mutex_exit(&zp->z_lock);
3768 
3769 	zp->z_size = len;
3770 	(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
3771 	    &zp->z_size, sizeof (zp->z_size), tx);
3772 	/*
3773 	 * Insert the new object into the directory.
3774 	 */
3775 	(void) zfs_link_create(dl, zp, tx, ZNEW);
3776 
3777 	if (flags & FIGNORECASE)
3778 		txtype |= TX_CI;
3779 	zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
3780 
3781 	zfs_acl_ids_free(&acl_ids);
3782 
3783 	dmu_tx_commit(tx);
3784 
3785 	zfs_dirent_unlock(dl);
3786 
3787 	VN_RELE(ZTOV(zp));
3788 
3789 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3790 		zil_commit(zilog, 0);
3791 
3792 	ZFS_EXIT(zfsvfs);
3793 	return (error);
3794 }
3795 
3796 /*
3797  * Return, in the buffer contained in the provided uio structure,
3798  * the symbolic path referred to by vp.
3799  *
3800  *	IN:	vp	- vnode of symbolic link.
3801  *		uoip	- structure to contain the link path.
3802  *		cr	- credentials of caller.
3803  *		ct	- caller context
3804  *
3805  *	OUT:	uio	- structure to contain the link path.
3806  *
3807  *	RETURN:	0 if success
3808  *		error code if failure
3809  *
3810  * Timestamps:
3811  *	vp - atime updated
3812  */
3813 /* ARGSUSED */
3814 static int
3815 zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr, caller_context_t *ct)
3816 {
3817 	znode_t		*zp = VTOZ(vp);
3818 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
3819 	int		error;
3820 
3821 	ZFS_ENTER(zfsvfs);
3822 	ZFS_VERIFY_ZP(zp);
3823 
3824 	mutex_enter(&zp->z_lock);
3825 	if (zp->z_is_sa)
3826 		error = sa_lookup_uio(zp->z_sa_hdl,
3827 		    SA_ZPL_SYMLINK(zfsvfs), uio);
3828 	else
3829 		error = zfs_sa_readlink(zp, uio);
3830 	mutex_exit(&zp->z_lock);
3831 
3832 	ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
3833 
3834 	ZFS_EXIT(zfsvfs);
3835 	return (error);
3836 }
3837 
3838 /*
3839  * Insert a new entry into directory tdvp referencing svp.
3840  *
3841  *	IN:	tdvp	- Directory to contain new entry.
3842  *		svp	- vnode of new entry.
3843  *		name	- name of new entry.
3844  *		cr	- credentials of caller.
3845  *		ct	- caller context
3846  *
3847  *	RETURN:	0 if success
3848  *		error code if failure
3849  *
3850  * Timestamps:
3851  *	tdvp - ctime|mtime updated
3852  *	 svp - ctime updated
3853  */
3854 /* ARGSUSED */
3855 static int
3856 zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr,
3857     caller_context_t *ct, int flags)
3858 {
3859 	znode_t		*dzp = VTOZ(tdvp);
3860 	znode_t		*tzp, *szp;
3861 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
3862 	zilog_t		*zilog;
3863 	zfs_dirlock_t	*dl;
3864 	dmu_tx_t	*tx;
3865 	vnode_t		*realvp;
3866 	int		error;
3867 	int		zf = ZNEW;
3868 	uint64_t	parent;
3869 	uid_t		owner;
3870 
3871 	ASSERT(tdvp->v_type == VDIR);
3872 
3873 	ZFS_ENTER(zfsvfs);
3874 	ZFS_VERIFY_ZP(dzp);
3875 	zilog = zfsvfs->z_log;
3876 
3877 	if (VOP_REALVP(svp, &realvp, ct) == 0)
3878 		svp = realvp;
3879 
3880 	/*
3881 	 * POSIX dictates that we return EPERM here.
3882 	 * Better choices include ENOTSUP or EISDIR.
3883 	 */
3884 	if (svp->v_type == VDIR) {
3885 		ZFS_EXIT(zfsvfs);
3886 		return (EPERM);
3887 	}
3888 
3889 	if (svp->v_vfsp != tdvp->v_vfsp || zfsctl_is_node(svp)) {
3890 		ZFS_EXIT(zfsvfs);
3891 		return (EXDEV);
3892 	}
3893 
3894 	szp = VTOZ(svp);
3895 	ZFS_VERIFY_ZP(szp);
3896 
3897 	/* Prevent links to .zfs/shares files */
3898 
3899 	if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
3900 	    &parent, sizeof (uint64_t))) != 0) {
3901 		ZFS_EXIT(zfsvfs);
3902 		return (error);
3903 	}
3904 	if (parent == zfsvfs->z_shares_dir) {
3905 		ZFS_EXIT(zfsvfs);
3906 		return (EPERM);
3907 	}
3908 
3909 	if (zfsvfs->z_utf8 && u8_validate(name,
3910 	    strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3911 		ZFS_EXIT(zfsvfs);
3912 		return (EILSEQ);
3913 	}
3914 	if (flags & FIGNORECASE)
3915 		zf |= ZCILOOK;
3916 
3917 	/*
3918 	 * We do not support links between attributes and non-attributes
3919 	 * because of the potential security risk of creating links
3920 	 * into "normal" file space in order to circumvent restrictions
3921 	 * imposed in attribute space.
3922 	 */
3923 	if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
3924 		ZFS_EXIT(zfsvfs);
3925 		return (EINVAL);
3926 	}
3927 
3928 
3929 	owner = zfs_fuid_map_id(zfsvfs, szp->z_uid, cr, ZFS_OWNER);
3930 	if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
3931 		ZFS_EXIT(zfsvfs);
3932 		return (EPERM);
3933 	}
3934 
3935 	if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
3936 		ZFS_EXIT(zfsvfs);
3937 		return (error);
3938 	}
3939 
3940 top:
3941 	/*
3942 	 * Attempt to lock directory; fail if entry already exists.
3943 	 */
3944 	error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
3945 	if (error) {
3946 		ZFS_EXIT(zfsvfs);
3947 		return (error);
3948 	}
3949 
3950 	tx = dmu_tx_create(zfsvfs->z_os);
3951 	dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3952 	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3953 	zfs_sa_upgrade_txholds(tx, szp);
3954 	zfs_sa_upgrade_txholds(tx, dzp);
3955 	error = dmu_tx_assign(tx, TXG_NOWAIT);
3956 	if (error) {
3957 		zfs_dirent_unlock(dl);
3958 		if (error == ERESTART) {
3959 			dmu_tx_wait(tx);
3960 			dmu_tx_abort(tx);
3961 			goto top;
3962 		}
3963 		dmu_tx_abort(tx);
3964 		ZFS_EXIT(zfsvfs);
3965 		return (error);
3966 	}
3967 
3968 	error = zfs_link_create(dl, szp, tx, 0);
3969 
3970 	if (error == 0) {
3971 		uint64_t txtype = TX_LINK;
3972 		if (flags & FIGNORECASE)
3973 			txtype |= TX_CI;
3974 		zfs_log_link(zilog, tx, txtype, dzp, szp, name);
3975 	}
3976 
3977 	dmu_tx_commit(tx);
3978 
3979 	zfs_dirent_unlock(dl);
3980 
3981 	if (error == 0) {
3982 		vnevent_link(svp, ct);
3983 	}
3984 
3985 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3986 		zil_commit(zilog, 0);
3987 
3988 	ZFS_EXIT(zfsvfs);
3989 	return (error);
3990 }
3991 
3992 /*
3993  * zfs_null_putapage() is used when the file system has been force
3994  * unmounted. It just drops the pages.
3995  */
3996 /* ARGSUSED */
3997 static int
3998 zfs_null_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
3999 		size_t *lenp, int flags, cred_t *cr)
4000 {
4001 	pvn_write_done(pp, B_INVAL|B_FORCE|B_ERROR);
4002 	return (0);
4003 }
4004 
4005 /*
4006  * Push a page out to disk, klustering if possible.
4007  *
4008  *	IN:	vp	- file to push page to.
4009  *		pp	- page to push.
4010  *		flags	- additional flags.
4011  *		cr	- credentials of caller.
4012  *
4013  *	OUT:	offp	- start of range pushed.
4014  *		lenp	- len of range pushed.
4015  *
4016  *	RETURN:	0 if success
4017  *		error code if failure
4018  *
4019  * NOTE: callers must have locked the page to be pushed.  On
4020  * exit, the page (and all other pages in the kluster) must be
4021  * unlocked.
4022  */
4023 /* ARGSUSED */
4024 static int
4025 zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
4026 		size_t *lenp, int flags, cred_t *cr)
4027 {
4028 	znode_t		*zp = VTOZ(vp);
4029 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4030 	dmu_tx_t	*tx;
4031 	u_offset_t	off, koff;
4032 	size_t		len, klen;
4033 	int		err;
4034 
4035 	off = pp->p_offset;
4036 	len = PAGESIZE;
4037 	/*
4038 	 * If our blocksize is bigger than the page size, try to kluster
4039 	 * multiple pages so that we write a full block (thus avoiding
4040 	 * a read-modify-write).
4041 	 */
4042 	if (off < zp->z_size && zp->z_blksz > PAGESIZE) {
4043 		klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE);
4044 		koff = ISP2(klen) ? P2ALIGN(off, (u_offset_t)klen) : 0;
4045 		ASSERT(koff <= zp->z_size);
4046 		if (koff + klen > zp->z_size)
4047 			klen = P2ROUNDUP(zp->z_size - koff, (uint64_t)PAGESIZE);
4048 		pp = pvn_write_kluster(vp, pp, &off, &len, koff, klen, flags);
4049 	}
4050 	ASSERT3U(btop(len), ==, btopr(len));
4051 
4052 	/*
4053 	 * Can't push pages past end-of-file.
4054 	 */
4055 	if (off >= zp->z_size) {
4056 		/* ignore all pages */
4057 		err = 0;
4058 		goto out;
4059 	} else if (off + len > zp->z_size) {
4060 		int npages = btopr(zp->z_size - off);
4061 		page_t *trunc;
4062 
4063 		page_list_break(&pp, &trunc, npages);
4064 		/* ignore pages past end of file */
4065 		if (trunc)
4066 			pvn_write_done(trunc, flags);
4067 		len = zp->z_size - off;
4068 	}
4069 
4070 	if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
4071 	    zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
4072 		err = EDQUOT;
4073 		goto out;
4074 	}
4075 top:
4076 	tx = dmu_tx_create(zfsvfs->z_os);
4077 	dmu_tx_hold_write(tx, zp->z_id, off, len);
4078 
4079 	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4080 	zfs_sa_upgrade_txholds(tx, zp);
4081 	err = dmu_tx_assign(tx, TXG_NOWAIT);
4082 	if (err != 0) {
4083 		if (err == ERESTART) {
4084 			dmu_tx_wait(tx);
4085 			dmu_tx_abort(tx);
4086 			goto top;
4087 		}
4088 		dmu_tx_abort(tx);
4089 		goto out;
4090 	}
4091 
4092 	if (zp->z_blksz <= PAGESIZE) {
4093 		caddr_t va = zfs_map_page(pp, S_READ);
4094 		ASSERT3U(len, <=, PAGESIZE);
4095 		dmu_write(zfsvfs->z_os, zp->z_id, off, len, va, tx);
4096 		zfs_unmap_page(pp, va);
4097 	} else {
4098 		err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, pp, tx);
4099 	}
4100 
4101 	if (err == 0) {
4102 		uint64_t mtime[2], ctime[2];
4103 		sa_bulk_attr_t bulk[3];
4104 		int count = 0;
4105 
4106 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
4107 		    &mtime, 16);
4108 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
4109 		    &ctime, 16);
4110 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
4111 		    &zp->z_pflags, 8);
4112 		zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
4113 		    B_TRUE);
4114 		zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0);
4115 	}
4116 	dmu_tx_commit(tx);
4117 
4118 out:
4119 	pvn_write_done(pp, (err ? B_ERROR : 0) | flags);
4120 	if (offp)
4121 		*offp = off;
4122 	if (lenp)
4123 		*lenp = len;
4124 
4125 	return (err);
4126 }
4127 
4128 /*
4129  * Copy the portion of the file indicated from pages into the file.
4130  * The pages are stored in a page list attached to the files vnode.
4131  *
4132  *	IN:	vp	- vnode of file to push page data to.
4133  *		off	- position in file to put data.
4134  *		len	- amount of data to write.
4135  *		flags	- flags to control the operation.
4136  *		cr	- credentials of caller.
4137  *		ct	- caller context.
4138  *
4139  *	RETURN:	0 if success
4140  *		error code if failure
4141  *
4142  * Timestamps:
4143  *	vp - ctime|mtime updated
4144  */
4145 /*ARGSUSED*/
4146 static int
4147 zfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
4148     caller_context_t *ct)
4149 {
4150 	znode_t		*zp = VTOZ(vp);
4151 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4152 	page_t		*pp;
4153 	size_t		io_len;
4154 	u_offset_t	io_off;
4155 	uint_t		blksz;
4156 	rl_t		*rl;
4157 	int		error = 0;
4158 
4159 	ZFS_ENTER(zfsvfs);
4160 	ZFS_VERIFY_ZP(zp);
4161 
4162 	/*
4163 	 * Align this request to the file block size in case we kluster.
4164 	 * XXX - this can result in pretty aggresive locking, which can
4165 	 * impact simultanious read/write access.  One option might be
4166 	 * to break up long requests (len == 0) into block-by-block
4167 	 * operations to get narrower locking.
4168 	 */
4169 	blksz = zp->z_blksz;
4170 	if (ISP2(blksz))
4171 		io_off = P2ALIGN_TYPED(off, blksz, u_offset_t);
4172 	else
4173 		io_off = 0;
4174 	if (len > 0 && ISP2(blksz))
4175 		io_len = P2ROUNDUP_TYPED(len + (off - io_off), blksz, size_t);
4176 	else
4177 		io_len = 0;
4178 
4179 	if (io_len == 0) {
4180 		/*
4181 		 * Search the entire vp list for pages >= io_off.
4182 		 */
4183 		rl = zfs_range_lock(zp, io_off, UINT64_MAX, RL_WRITER);
4184 		error = pvn_vplist_dirty(vp, io_off, zfs_putapage, flags, cr);
4185 		goto out;
4186 	}
4187 	rl = zfs_range_lock(zp, io_off, io_len, RL_WRITER);
4188 
4189 	if (off > zp->z_size) {
4190 		/* past end of file */
4191 		zfs_range_unlock(rl);
4192 		ZFS_EXIT(zfsvfs);
4193 		return (0);
4194 	}
4195 
4196 	len = MIN(io_len, P2ROUNDUP(zp->z_size, PAGESIZE) - io_off);
4197 
4198 	for (off = io_off; io_off < off + len; io_off += io_len) {
4199 		if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
4200 			pp = page_lookup(vp, io_off,
4201 			    (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED);
4202 		} else {
4203 			pp = page_lookup_nowait(vp, io_off,
4204 			    (flags & B_FREE) ? SE_EXCL : SE_SHARED);
4205 		}
4206 
4207 		if (pp != NULL && pvn_getdirty(pp, flags)) {
4208 			int err;
4209 
4210 			/*
4211 			 * Found a dirty page to push
4212 			 */
4213 			err = zfs_putapage(vp, pp, &io_off, &io_len, flags, cr);
4214 			if (err)
4215 				error = err;
4216 		} else {
4217 			io_len = PAGESIZE;
4218 		}
4219 	}
4220 out:
4221 	zfs_range_unlock(rl);
4222 	if ((flags & B_ASYNC) == 0 || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4223 		zil_commit(zfsvfs->z_log, zp->z_id);
4224 	ZFS_EXIT(zfsvfs);
4225 	return (error);
4226 }
4227 
4228 /*ARGSUSED*/
4229 void
4230 zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
4231 {
4232 	znode_t	*zp = VTOZ(vp);
4233 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4234 	int error;
4235 
4236 	rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
4237 	if (zp->z_sa_hdl == NULL) {
4238 		/*
4239 		 * The fs has been unmounted, or we did a
4240 		 * suspend/resume and this file no longer exists.
4241 		 */
4242 		if (vn_has_cached_data(vp)) {
4243 			(void) pvn_vplist_dirty(vp, 0, zfs_null_putapage,
4244 			    B_INVAL, cr);
4245 		}
4246 
4247 		mutex_enter(&zp->z_lock);
4248 		mutex_enter(&vp->v_lock);
4249 		ASSERT(vp->v_count == 1);
4250 		vp->v_count = 0;
4251 		mutex_exit(&vp->v_lock);
4252 		mutex_exit(&zp->z_lock);
4253 		rw_exit(&zfsvfs->z_teardown_inactive_lock);
4254 		zfs_znode_free(zp);
4255 		return;
4256 	}
4257 
4258 	/*
4259 	 * Attempt to push any data in the page cache.  If this fails
4260 	 * we will get kicked out later in zfs_zinactive().
4261 	 */
4262 	if (vn_has_cached_data(vp)) {
4263 		(void) pvn_vplist_dirty(vp, 0, zfs_putapage, B_INVAL|B_ASYNC,
4264 		    cr);
4265 	}
4266 
4267 	if (zp->z_atime_dirty && zp->z_unlinked == 0) {
4268 		dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
4269 
4270 		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4271 		zfs_sa_upgrade_txholds(tx, zp);
4272 		error = dmu_tx_assign(tx, TXG_WAIT);
4273 		if (error) {
4274 			dmu_tx_abort(tx);
4275 		} else {
4276 			mutex_enter(&zp->z_lock);
4277 			(void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
4278 			    (void *)&zp->z_atime, sizeof (zp->z_atime), tx);
4279 			zp->z_atime_dirty = 0;
4280 			mutex_exit(&zp->z_lock);
4281 			dmu_tx_commit(tx);
4282 		}
4283 	}
4284 
4285 	zfs_zinactive(zp);
4286 	rw_exit(&zfsvfs->z_teardown_inactive_lock);
4287 }
4288 
4289 /*
4290  * Bounds-check the seek operation.
4291  *
4292  *	IN:	vp	- vnode seeking within
4293  *		ooff	- old file offset
4294  *		noffp	- pointer to new file offset
4295  *		ct	- caller context
4296  *
4297  *	RETURN:	0 if success
4298  *		EINVAL if new offset invalid
4299  */
4300 /* ARGSUSED */
4301 static int
4302 zfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp,
4303     caller_context_t *ct)
4304 {
4305 	if (vp->v_type == VDIR)
4306 		return (0);
4307 	return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4308 }
4309 
4310 /*
4311  * Pre-filter the generic locking function to trap attempts to place
4312  * a mandatory lock on a memory mapped file.
4313  */
4314 static int
4315 zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset,
4316     flk_callback_t *flk_cbp, cred_t *cr, caller_context_t *ct)
4317 {
4318 	znode_t *zp = VTOZ(vp);
4319 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4320 
4321 	ZFS_ENTER(zfsvfs);
4322 	ZFS_VERIFY_ZP(zp);
4323 
4324 	/*
4325 	 * We are following the UFS semantics with respect to mapcnt
4326 	 * here: If we see that the file is mapped already, then we will
4327 	 * return an error, but we don't worry about races between this
4328 	 * function and zfs_map().
4329 	 */
4330 	if (zp->z_mapcnt > 0 && MANDMODE(zp->z_mode)) {
4331 		ZFS_EXIT(zfsvfs);
4332 		return (EAGAIN);
4333 	}
4334 	ZFS_EXIT(zfsvfs);
4335 	return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
4336 }
4337 
4338 /*
4339  * If we can't find a page in the cache, we will create a new page
4340  * and fill it with file data.  For efficiency, we may try to fill
4341  * multiple pages at once (klustering) to fill up the supplied page
4342  * list.  Note that the pages to be filled are held with an exclusive
4343  * lock to prevent access by other threads while they are being filled.
4344  */
4345 static int
4346 zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg,
4347     caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw)
4348 {
4349 	znode_t *zp = VTOZ(vp);
4350 	page_t *pp, *cur_pp;
4351 	objset_t *os = zp->z_zfsvfs->z_os;
4352 	u_offset_t io_off, total;
4353 	size_t io_len;
4354 	int err;
4355 
4356 	if (plsz == PAGESIZE || zp->z_blksz <= PAGESIZE) {
4357 		/*
4358 		 * We only have a single page, don't bother klustering
4359 		 */
4360 		io_off = off;
4361 		io_len = PAGESIZE;
4362 		pp = page_create_va(vp, io_off, io_len,
4363 		    PG_EXCL | PG_WAIT, seg, addr);
4364 	} else {
4365 		/*
4366 		 * Try to find enough pages to fill the page list
4367 		 */
4368 		pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
4369 		    &io_len, off, plsz, 0);
4370 	}
4371 	if (pp == NULL) {
4372 		/*
4373 		 * The page already exists, nothing to do here.
4374 		 */
4375 		*pl = NULL;
4376 		return (0);
4377 	}
4378 
4379 	/*
4380 	 * Fill the pages in the kluster.
4381 	 */
4382 	cur_pp = pp;
4383 	for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
4384 		caddr_t va;
4385 
4386 		ASSERT3U(io_off, ==, cur_pp->p_offset);
4387 		va = zfs_map_page(cur_pp, S_WRITE);
4388 		err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
4389 		    DMU_READ_PREFETCH);
4390 		zfs_unmap_page(cur_pp, va);
4391 		if (err) {
4392 			/* On error, toss the entire kluster */
4393 			pvn_read_done(pp, B_ERROR);
4394 			/* convert checksum errors into IO errors */
4395 			if (err == ECKSUM)
4396 				err = EIO;
4397 			return (err);
4398 		}
4399 		cur_pp = cur_pp->p_next;
4400 	}
4401 
4402 	/*
4403 	 * Fill in the page list array from the kluster starting
4404 	 * from the desired offset `off'.
4405 	 * NOTE: the page list will always be null terminated.
4406 	 */
4407 	pvn_plist_init(pp, pl, plsz, off, io_len, rw);
4408 	ASSERT(pl == NULL || (*pl)->p_offset == off);
4409 
4410 	return (0);
4411 }
4412 
4413 /*
4414  * Return pointers to the pages for the file region [off, off + len]
4415  * in the pl array.  If plsz is greater than len, this function may
4416  * also return page pointers from after the specified region
4417  * (i.e. the region [off, off + plsz]).  These additional pages are
4418  * only returned if they are already in the cache, or were created as
4419  * part of a klustered read.
4420  *
4421  *	IN:	vp	- vnode of file to get data from.
4422  *		off	- position in file to get data from.
4423  *		len	- amount of data to retrieve.
4424  *		plsz	- length of provided page list.
4425  *		seg	- segment to obtain pages for.
4426  *		addr	- virtual address of fault.
4427  *		rw	- mode of created pages.
4428  *		cr	- credentials of caller.
4429  *		ct	- caller context.
4430  *
4431  *	OUT:	protp	- protection mode of created pages.
4432  *		pl	- list of pages created.
4433  *
4434  *	RETURN:	0 if success
4435  *		error code if failure
4436  *
4437  * Timestamps:
4438  *	vp - atime updated
4439  */
4440 /* ARGSUSED */
4441 static int
4442 zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
4443 	page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
4444 	enum seg_rw rw, cred_t *cr, caller_context_t *ct)
4445 {
4446 	znode_t		*zp = VTOZ(vp);
4447 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4448 	page_t		**pl0 = pl;
4449 	int		err = 0;
4450 
4451 	/* we do our own caching, faultahead is unnecessary */
4452 	if (pl == NULL)
4453 		return (0);
4454 	else if (len > plsz)
4455 		len = plsz;
4456 	else
4457 		len = P2ROUNDUP(len, PAGESIZE);
4458 	ASSERT(plsz >= len);
4459 
4460 	ZFS_ENTER(zfsvfs);
4461 	ZFS_VERIFY_ZP(zp);
4462 
4463 	if (protp)
4464 		*protp = PROT_ALL;
4465 
4466 	/*
4467 	 * Loop through the requested range [off, off + len) looking
4468 	 * for pages.  If we don't find a page, we will need to create
4469 	 * a new page and fill it with data from the file.
4470 	 */
4471 	while (len > 0) {
4472 		if (*pl = page_lookup(vp, off, SE_SHARED))
4473 			*(pl+1) = NULL;
4474 		else if (err = zfs_fillpage(vp, off, seg, addr, pl, plsz, rw))
4475 			goto out;
4476 		while (*pl) {
4477 			ASSERT3U((*pl)->p_offset, ==, off);
4478 			off += PAGESIZE;
4479 			addr += PAGESIZE;
4480 			if (len > 0) {
4481 				ASSERT3U(len, >=, PAGESIZE);
4482 				len -= PAGESIZE;
4483 			}
4484 			ASSERT3U(plsz, >=, PAGESIZE);
4485 			plsz -= PAGESIZE;
4486 			pl++;
4487 		}
4488 	}
4489 
4490 	/*
4491 	 * Fill out the page array with any pages already in the cache.
4492 	 */
4493 	while (plsz > 0 &&
4494 	    (*pl++ = page_lookup_nowait(vp, off, SE_SHARED))) {
4495 			off += PAGESIZE;
4496 			plsz -= PAGESIZE;
4497 	}
4498 out:
4499 	if (err) {
4500 		/*
4501 		 * Release any pages we have previously locked.
4502 		 */
4503 		while (pl > pl0)
4504 			page_unlock(*--pl);
4505 	} else {
4506 		ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
4507 	}
4508 
4509 	*pl = NULL;
4510 
4511 	ZFS_EXIT(zfsvfs);
4512 	return (err);
4513 }
4514 
4515 /*
4516  * Request a memory map for a section of a file.  This code interacts
4517  * with common code and the VM system as follows:
4518  *
4519  *	common code calls mmap(), which ends up in smmap_common()
4520  *
4521  *	this calls VOP_MAP(), which takes you into (say) zfs
4522  *
4523  *	zfs_map() calls as_map(), passing segvn_create() as the callback
4524  *
4525  *	segvn_create() creates the new segment and calls VOP_ADDMAP()
4526  *
4527  *	zfs_addmap() updates z_mapcnt
4528  */
4529 /*ARGSUSED*/
4530 static int
4531 zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
4532     size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4533     caller_context_t *ct)
4534 {
4535 	znode_t *zp = VTOZ(vp);
4536 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4537 	segvn_crargs_t	vn_a;
4538 	int		error;
4539 
4540 	ZFS_ENTER(zfsvfs);
4541 	ZFS_VERIFY_ZP(zp);
4542 
4543 	if ((prot & PROT_WRITE) && (zp->z_pflags &
4544 	    (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
4545 		ZFS_EXIT(zfsvfs);
4546 		return (EPERM);
4547 	}
4548 
4549 	if ((prot & (PROT_READ | PROT_EXEC)) &&
4550 	    (zp->z_pflags & ZFS_AV_QUARANTINED)) {
4551 		ZFS_EXIT(zfsvfs);
4552 		return (EACCES);
4553 	}
4554 
4555 	if (vp->v_flag & VNOMAP) {
4556 		ZFS_EXIT(zfsvfs);
4557 		return (ENOSYS);
4558 	}
4559 
4560 	if (off < 0 || len > MAXOFFSET_T - off) {
4561 		ZFS_EXIT(zfsvfs);
4562 		return (ENXIO);
4563 	}
4564 
4565 	if (vp->v_type != VREG) {
4566 		ZFS_EXIT(zfsvfs);
4567 		return (ENODEV);
4568 	}
4569 
4570 	/*
4571 	 * If file is locked, disallow mapping.
4572 	 */
4573 	if (MANDMODE(zp->z_mode) && vn_has_flocks(vp)) {
4574 		ZFS_EXIT(zfsvfs);
4575 		return (EAGAIN);
4576 	}
4577 
4578 	as_rangelock(as);
4579 	error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
4580 	if (error != 0) {
4581 		as_rangeunlock(as);
4582 		ZFS_EXIT(zfsvfs);
4583 		return (error);
4584 	}
4585 
4586 	vn_a.vp = vp;
4587 	vn_a.offset = (u_offset_t)off;
4588 	vn_a.type = flags & MAP_TYPE;
4589 	vn_a.prot = prot;
4590 	vn_a.maxprot = maxprot;
4591 	vn_a.cred = cr;
4592 	vn_a.amp = NULL;
4593 	vn_a.flags = flags & ~MAP_TYPE;
4594 	vn_a.szc = 0;
4595 	vn_a.lgrp_mem_policy_flags = 0;
4596 
4597 	error = as_map(as, *addrp, len, segvn_create, &vn_a);
4598 
4599 	as_rangeunlock(as);
4600 	ZFS_EXIT(zfsvfs);
4601 	return (error);
4602 }
4603 
4604 /* ARGSUSED */
4605 static int
4606 zfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4607     size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4608     caller_context_t *ct)
4609 {
4610 	uint64_t pages = btopr(len);
4611 
4612 	atomic_add_64(&VTOZ(vp)->z_mapcnt, pages);
4613 	return (0);
4614 }
4615 
4616 /*
4617  * The reason we push dirty pages as part of zfs_delmap() is so that we get a
4618  * more accurate mtime for the associated file.  Since we don't have a way of
4619  * detecting when the data was actually modified, we have to resort to
4620  * heuristics.  If an explicit msync() is done, then we mark the mtime when the
4621  * last page is pushed.  The problem occurs when the msync() call is omitted,
4622  * which by far the most common case:
4623  *
4624  * 	open()
4625  * 	mmap()
4626  * 	<modify memory>
4627  * 	munmap()
4628  * 	close()
4629  * 	<time lapse>
4630  * 	putpage() via fsflush
4631  *
4632  * If we wait until fsflush to come along, we can have a modification time that
4633  * is some arbitrary point in the future.  In order to prevent this in the
4634  * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is
4635  * torn down.
4636  */
4637 /* ARGSUSED */
4638 static int
4639 zfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4640     size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
4641     caller_context_t *ct)
4642 {
4643 	uint64_t pages = btopr(len);
4644 
4645 	ASSERT3U(VTOZ(vp)->z_mapcnt, >=, pages);
4646 	atomic_add_64(&VTOZ(vp)->z_mapcnt, -pages);
4647 
4648 	if ((flags & MAP_SHARED) && (prot & PROT_WRITE) &&
4649 	    vn_has_cached_data(vp))
4650 		(void) VOP_PUTPAGE(vp, off, len, B_ASYNC, cr, ct);
4651 
4652 	return (0);
4653 }
4654 
4655 /*
4656  * Free or allocate space in a file.  Currently, this function only
4657  * supports the `F_FREESP' command.  However, this command is somewhat
4658  * misnamed, as its functionality includes the ability to allocate as
4659  * well as free space.
4660  *
4661  *	IN:	vp	- vnode of file to free data in.
4662  *		cmd	- action to take (only F_FREESP supported).
4663  *		bfp	- section of file to free/alloc.
4664  *		flag	- current file open mode flags.
4665  *		offset	- current file offset.
4666  *		cr	- credentials of caller [UNUSED].
4667  *		ct	- caller context.
4668  *
4669  *	RETURN:	0 if success
4670  *		error code if failure
4671  *
4672  * Timestamps:
4673  *	vp - ctime|mtime updated
4674  */
4675 /* ARGSUSED */
4676 static int
4677 zfs_space(vnode_t *vp, int cmd, flock64_t *bfp, int flag,
4678     offset_t offset, cred_t *cr, caller_context_t *ct)
4679 {
4680 	znode_t		*zp = VTOZ(vp);
4681 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4682 	uint64_t	off, len;
4683 	int		error;
4684 
4685 	ZFS_ENTER(zfsvfs);
4686 	ZFS_VERIFY_ZP(zp);
4687 
4688 	if (cmd != F_FREESP) {
4689 		ZFS_EXIT(zfsvfs);
4690 		return (EINVAL);
4691 	}
4692 
4693 	if (error = convoff(vp, bfp, 0, offset)) {
4694 		ZFS_EXIT(zfsvfs);
4695 		return (error);
4696 	}
4697 
4698 	if (bfp->l_len < 0) {
4699 		ZFS_EXIT(zfsvfs);
4700 		return (EINVAL);
4701 	}
4702 
4703 	off = bfp->l_start;
4704 	len = bfp->l_len; /* 0 means from off to end of file */
4705 
4706 	error = zfs_freesp(zp, off, len, flag, TRUE);
4707 
4708 	ZFS_EXIT(zfsvfs);
4709 	return (error);
4710 }
4711 
4712 /*ARGSUSED*/
4713 static int
4714 zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
4715 {
4716 	znode_t		*zp = VTOZ(vp);
4717 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4718 	uint32_t	gen;
4719 	uint64_t	gen64;
4720 	uint64_t	object = zp->z_id;
4721 	zfid_short_t	*zfid;
4722 	int		size, i, error;
4723 
4724 	ZFS_ENTER(zfsvfs);
4725 	ZFS_VERIFY_ZP(zp);
4726 
4727 	if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
4728 	    &gen64, sizeof (uint64_t))) != 0) {
4729 		ZFS_EXIT(zfsvfs);
4730 		return (error);
4731 	}
4732 
4733 	gen = (uint32_t)gen64;
4734 
4735 	size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN;
4736 	if (fidp->fid_len < size) {
4737 		fidp->fid_len = size;
4738 		ZFS_EXIT(zfsvfs);
4739 		return (ENOSPC);
4740 	}
4741 
4742 	zfid = (zfid_short_t *)fidp;
4743 
4744 	zfid->zf_len = size;
4745 
4746 	for (i = 0; i < sizeof (zfid->zf_object); i++)
4747 		zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
4748 
4749 	/* Must have a non-zero generation number to distinguish from .zfs */
4750 	if (gen == 0)
4751 		gen = 1;
4752 	for (i = 0; i < sizeof (zfid->zf_gen); i++)
4753 		zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
4754 
4755 	if (size == LONG_FID_LEN) {
4756 		uint64_t	objsetid = dmu_objset_id(zfsvfs->z_os);
4757 		zfid_long_t	*zlfid;
4758 
4759 		zlfid = (zfid_long_t *)fidp;
4760 
4761 		for (i = 0; i < sizeof (zlfid->zf_setid); i++)
4762 			zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
4763 
4764 		/* XXX - this should be the generation number for the objset */
4765 		for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
4766 			zlfid->zf_setgen[i] = 0;
4767 	}
4768 
4769 	ZFS_EXIT(zfsvfs);
4770 	return (0);
4771 }
4772 
4773 static int
4774 zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
4775     caller_context_t *ct)
4776 {
4777 	znode_t		*zp, *xzp;
4778 	zfsvfs_t	*zfsvfs;
4779 	zfs_dirlock_t	*dl;
4780 	int		error;
4781 
4782 	switch (cmd) {
4783 	case _PC_LINK_MAX:
4784 		*valp = ULONG_MAX;
4785 		return (0);
4786 
4787 	case _PC_FILESIZEBITS:
4788 		*valp = 64;
4789 		return (0);
4790 
4791 	case _PC_XATTR_EXISTS:
4792 		zp = VTOZ(vp);
4793 		zfsvfs = zp->z_zfsvfs;
4794 		ZFS_ENTER(zfsvfs);
4795 		ZFS_VERIFY_ZP(zp);
4796 		*valp = 0;
4797 		error = zfs_dirent_lock(&dl, zp, "", &xzp,
4798 		    ZXATTR | ZEXISTS | ZSHARED, NULL, NULL);
4799 		if (error == 0) {
4800 			zfs_dirent_unlock(dl);
4801 			if (!zfs_dirempty(xzp))
4802 				*valp = 1;
4803 			VN_RELE(ZTOV(xzp));
4804 		} else if (error == ENOENT) {
4805 			/*
4806 			 * If there aren't extended attributes, it's the
4807 			 * same as having zero of them.
4808 			 */
4809 			error = 0;
4810 		}
4811 		ZFS_EXIT(zfsvfs);
4812 		return (error);
4813 
4814 	case _PC_SATTR_ENABLED:
4815 	case _PC_SATTR_EXISTS:
4816 		*valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
4817 		    (vp->v_type == VREG || vp->v_type == VDIR);
4818 		return (0);
4819 
4820 	case _PC_ACCESS_FILTERING:
4821 		*valp = vfs_has_feature(vp->v_vfsp, VFSFT_ACCESS_FILTER) &&
4822 		    vp->v_type == VDIR;
4823 		return (0);
4824 
4825 	case _PC_ACL_ENABLED:
4826 		*valp = _ACL_ACE_ENABLED;
4827 		return (0);
4828 
4829 	case _PC_MIN_HOLE_SIZE:
4830 		*valp = (ulong_t)SPA_MINBLOCKSIZE;
4831 		return (0);
4832 
4833 	case _PC_TIMESTAMP_RESOLUTION:
4834 		/* nanosecond timestamp resolution */
4835 		*valp = 1L;
4836 		return (0);
4837 
4838 	default:
4839 		return (fs_pathconf(vp, cmd, valp, cr, ct));
4840 	}
4841 }
4842 
4843 /*ARGSUSED*/
4844 static int
4845 zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
4846     caller_context_t *ct)
4847 {
4848 	znode_t *zp = VTOZ(vp);
4849 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4850 	int error;
4851 	boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4852 
4853 	ZFS_ENTER(zfsvfs);
4854 	ZFS_VERIFY_ZP(zp);
4855 	error = zfs_getacl(zp, vsecp, skipaclchk, cr);
4856 	ZFS_EXIT(zfsvfs);
4857 
4858 	return (error);
4859 }
4860 
4861 /*ARGSUSED*/
4862 static int
4863 zfs_setsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
4864     caller_context_t *ct)
4865 {
4866 	znode_t *zp = VTOZ(vp);
4867 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4868 	int error;
4869 	boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4870 	zilog_t	*zilog = zfsvfs->z_log;
4871 
4872 	ZFS_ENTER(zfsvfs);
4873 	ZFS_VERIFY_ZP(zp);
4874 
4875 	error = zfs_setacl(zp, vsecp, skipaclchk, cr);
4876 
4877 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4878 		zil_commit(zilog, 0);
4879 
4880 	ZFS_EXIT(zfsvfs);
4881 	return (error);
4882 }
4883 
4884 /*
4885  * Tunable, both must be a power of 2.
4886  *
4887  * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
4888  * zcr_blksz_max: if set to less than the file block size, allow loaning out of
4889  *                an arcbuf for a partial block read
4890  */
4891 int zcr_blksz_min = (1 << 10);	/* 1K */
4892 int zcr_blksz_max = (1 << 17);	/* 128K */
4893 
4894 /*ARGSUSED*/
4895 static int
4896 zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr,
4897     caller_context_t *ct)
4898 {
4899 	znode_t	*zp = VTOZ(vp);
4900 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4901 	int max_blksz = zfsvfs->z_max_blksz;
4902 	uio_t *uio = &xuio->xu_uio;
4903 	ssize_t size = uio->uio_resid;
4904 	offset_t offset = uio->uio_loffset;
4905 	int blksz;
4906 	int fullblk, i;
4907 	arc_buf_t *abuf;
4908 	ssize_t maxsize;
4909 	int preamble, postamble;
4910 
4911 	if (xuio->xu_type != UIOTYPE_ZEROCOPY)
4912 		return (EINVAL);
4913 
4914 	ZFS_ENTER(zfsvfs);
4915 	ZFS_VERIFY_ZP(zp);
4916 	switch (ioflag) {
4917 	case UIO_WRITE:
4918 		/*
4919 		 * Loan out an arc_buf for write if write size is bigger than
4920 		 * max_blksz, and the file's block size is also max_blksz.
4921 		 */
4922 		blksz = max_blksz;
4923 		if (size < blksz || zp->z_blksz != blksz) {
4924 			ZFS_EXIT(zfsvfs);
4925 			return (EINVAL);
4926 		}
4927 		/*
4928 		 * Caller requests buffers for write before knowing where the
4929 		 * write offset might be (e.g. NFS TCP write).
4930 		 */
4931 		if (offset == -1) {
4932 			preamble = 0;
4933 		} else {
4934 			preamble = P2PHASE(offset, blksz);
4935 			if (preamble) {
4936 				preamble = blksz - preamble;
4937 				size -= preamble;
4938 			}
4939 		}
4940 
4941 		postamble = P2PHASE(size, blksz);
4942 		size -= postamble;
4943 
4944 		fullblk = size / blksz;
4945 		(void) dmu_xuio_init(xuio,
4946 		    (preamble != 0) + fullblk + (postamble != 0));
4947 		DTRACE_PROBE3(zfs_reqzcbuf_align, int, preamble,
4948 		    int, postamble, int,
4949 		    (preamble != 0) + fullblk + (postamble != 0));
4950 
4951 		/*
4952 		 * Have to fix iov base/len for partial buffers.  They
4953 		 * currently represent full arc_buf's.
4954 		 */
4955 		if (preamble) {
4956 			/* data begins in the middle of the arc_buf */
4957 			abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4958 			    blksz);
4959 			ASSERT(abuf);
4960 			(void) dmu_xuio_add(xuio, abuf,
4961 			    blksz - preamble, preamble);
4962 		}
4963 
4964 		for (i = 0; i < fullblk; i++) {
4965 			abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4966 			    blksz);
4967 			ASSERT(abuf);
4968 			(void) dmu_xuio_add(xuio, abuf, 0, blksz);
4969 		}
4970 
4971 		if (postamble) {
4972 			/* data ends in the middle of the arc_buf */
4973 			abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4974 			    blksz);
4975 			ASSERT(abuf);
4976 			(void) dmu_xuio_add(xuio, abuf, 0, postamble);
4977 		}
4978 		break;
4979 	case UIO_READ:
4980 		/*
4981 		 * Loan out an arc_buf for read if the read size is larger than
4982 		 * the current file block size.  Block alignment is not
4983 		 * considered.  Partial arc_buf will be loaned out for read.
4984 		 */
4985 		blksz = zp->z_blksz;
4986 		if (blksz < zcr_blksz_min)
4987 			blksz = zcr_blksz_min;
4988 		if (blksz > zcr_blksz_max)
4989 			blksz = zcr_blksz_max;
4990 		/* avoid potential complexity of dealing with it */
4991 		if (blksz > max_blksz) {
4992 			ZFS_EXIT(zfsvfs);
4993 			return (EINVAL);
4994 		}
4995 
4996 		maxsize = zp->z_size - uio->uio_loffset;
4997 		if (size > maxsize)
4998 			size = maxsize;
4999 
5000 		if (size < blksz || vn_has_cached_data(vp)) {
5001 			ZFS_EXIT(zfsvfs);
5002 			return (EINVAL);
5003 		}
5004 		break;
5005 	default:
5006 		ZFS_EXIT(zfsvfs);
5007 		return (EINVAL);
5008 	}
5009 
5010 	uio->uio_extflg = UIO_XUIO;
5011 	XUIO_XUZC_RW(xuio) = ioflag;
5012 	ZFS_EXIT(zfsvfs);
5013 	return (0);
5014 }
5015 
5016 /*ARGSUSED*/
5017 static int
5018 zfs_retzcbuf(vnode_t *vp, xuio_t *xuio, cred_t *cr, caller_context_t *ct)
5019 {
5020 	int i;
5021 	arc_buf_t *abuf;
5022 	int ioflag = XUIO_XUZC_RW(xuio);
5023 
5024 	ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
5025 
5026 	i = dmu_xuio_cnt(xuio);
5027 	while (i-- > 0) {
5028 		abuf = dmu_xuio_arcbuf(xuio, i);
5029 		/*
5030 		 * if abuf == NULL, it must be a write buffer
5031 		 * that has been returned in zfs_write().
5032 		 */
5033 		if (abuf)
5034 			dmu_return_arcbuf(abuf);
5035 		ASSERT(abuf || ioflag == UIO_WRITE);
5036 	}
5037 
5038 	dmu_xuio_fini(xuio);
5039 	return (0);
5040 }
5041 
5042 /*
5043  * Predeclare these here so that the compiler assumes that
5044  * this is an "old style" function declaration that does
5045  * not include arguments => we won't get type mismatch errors
5046  * in the initializations that follow.
5047  */
5048 static int zfs_inval();
5049 static int zfs_isdir();
5050 
5051 static int
5052 zfs_inval()
5053 {
5054 	return (EINVAL);
5055 }
5056 
5057 static int
5058 zfs_isdir()
5059 {
5060 	return (EISDIR);
5061 }
5062 /*
5063  * Directory vnode operations template
5064  */
5065 vnodeops_t *zfs_dvnodeops;
5066 const fs_operation_def_t zfs_dvnodeops_template[] = {
5067 	VOPNAME_OPEN,		{ .vop_open = zfs_open },
5068 	VOPNAME_CLOSE,		{ .vop_close = zfs_close },
5069 	VOPNAME_READ,		{ .error = zfs_isdir },
5070 	VOPNAME_WRITE,		{ .error = zfs_isdir },
5071 	VOPNAME_IOCTL,		{ .vop_ioctl = zfs_ioctl },
5072 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5073 	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
5074 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5075 	VOPNAME_LOOKUP,		{ .vop_lookup = zfs_lookup },
5076 	VOPNAME_CREATE,		{ .vop_create = zfs_create },
5077 	VOPNAME_REMOVE,		{ .vop_remove = zfs_remove },
5078 	VOPNAME_LINK,		{ .vop_link = zfs_link },
5079 	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
5080 	VOPNAME_MKDIR,		{ .vop_mkdir = zfs_mkdir },
5081 	VOPNAME_RMDIR,		{ .vop_rmdir = zfs_rmdir },
5082 	VOPNAME_READDIR,	{ .vop_readdir = zfs_readdir },
5083 	VOPNAME_SYMLINK,	{ .vop_symlink = zfs_symlink },
5084 	VOPNAME_FSYNC,		{ .vop_fsync = zfs_fsync },
5085 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5086 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5087 	VOPNAME_SEEK,		{ .vop_seek = zfs_seek },
5088 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5089 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
5090 	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
5091 	VOPNAME_VNEVENT, 	{ .vop_vnevent = fs_vnevent_support },
5092 	NULL,			NULL
5093 };
5094 
5095 /*
5096  * Regular file vnode operations template
5097  */
5098 vnodeops_t *zfs_fvnodeops;
5099 const fs_operation_def_t zfs_fvnodeops_template[] = {
5100 	VOPNAME_OPEN,		{ .vop_open = zfs_open },
5101 	VOPNAME_CLOSE,		{ .vop_close = zfs_close },
5102 	VOPNAME_READ,		{ .vop_read = zfs_read },
5103 	VOPNAME_WRITE,		{ .vop_write = zfs_write },
5104 	VOPNAME_IOCTL,		{ .vop_ioctl = zfs_ioctl },
5105 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5106 	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
5107 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5108 	VOPNAME_LOOKUP,		{ .vop_lookup = zfs_lookup },
5109 	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
5110 	VOPNAME_FSYNC,		{ .vop_fsync = zfs_fsync },
5111 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5112 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5113 	VOPNAME_SEEK,		{ .vop_seek = zfs_seek },
5114 	VOPNAME_FRLOCK,		{ .vop_frlock = zfs_frlock },
5115 	VOPNAME_SPACE,		{ .vop_space = zfs_space },
5116 	VOPNAME_GETPAGE,	{ .vop_getpage = zfs_getpage },
5117 	VOPNAME_PUTPAGE,	{ .vop_putpage = zfs_putpage },
5118 	VOPNAME_MAP,		{ .vop_map = zfs_map },
5119 	VOPNAME_ADDMAP,		{ .vop_addmap = zfs_addmap },
5120 	VOPNAME_DELMAP,		{ .vop_delmap = zfs_delmap },
5121 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5122 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
5123 	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
5124 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5125 	VOPNAME_REQZCBUF, 	{ .vop_reqzcbuf = zfs_reqzcbuf },
5126 	VOPNAME_RETZCBUF, 	{ .vop_retzcbuf = zfs_retzcbuf },
5127 	NULL,			NULL
5128 };
5129 
5130 /*
5131  * Symbolic link vnode operations template
5132  */
5133 vnodeops_t *zfs_symvnodeops;
5134 const fs_operation_def_t zfs_symvnodeops_template[] = {
5135 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5136 	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
5137 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5138 	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
5139 	VOPNAME_READLINK,	{ .vop_readlink = zfs_readlink },
5140 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5141 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5142 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5143 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5144 	NULL,			NULL
5145 };
5146 
5147 /*
5148  * special share hidden files vnode operations template
5149  */
5150 vnodeops_t *zfs_sharevnodeops;
5151 const fs_operation_def_t zfs_sharevnodeops_template[] = {
5152 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5153 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5154 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5155 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5156 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5157 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
5158 	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
5159 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5160 	NULL,			NULL
5161 };
5162 
5163 /*
5164  * Extended attribute directory vnode operations template
5165  *	This template is identical to the directory vnodes
5166  *	operation template except for restricted operations:
5167  *		VOP_MKDIR()
5168  *		VOP_SYMLINK()
5169  * Note that there are other restrictions embedded in:
5170  *	zfs_create()	- restrict type to VREG
5171  *	zfs_link()	- no links into/out of attribute space
5172  *	zfs_rename()	- no moves into/out of attribute space
5173  */
5174 vnodeops_t *zfs_xdvnodeops;
5175 const fs_operation_def_t zfs_xdvnodeops_template[] = {
5176 	VOPNAME_OPEN,		{ .vop_open = zfs_open },
5177 	VOPNAME_CLOSE,		{ .vop_close = zfs_close },
5178 	VOPNAME_IOCTL,		{ .vop_ioctl = zfs_ioctl },
5179 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5180 	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
5181 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5182 	VOPNAME_LOOKUP,		{ .vop_lookup = zfs_lookup },
5183 	VOPNAME_CREATE,		{ .vop_create = zfs_create },
5184 	VOPNAME_REMOVE,		{ .vop_remove = zfs_remove },
5185 	VOPNAME_LINK,		{ .vop_link = zfs_link },
5186 	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
5187 	VOPNAME_MKDIR,		{ .error = zfs_inval },
5188 	VOPNAME_RMDIR,		{ .vop_rmdir = zfs_rmdir },
5189 	VOPNAME_READDIR,	{ .vop_readdir = zfs_readdir },
5190 	VOPNAME_SYMLINK,	{ .error = zfs_inval },
5191 	VOPNAME_FSYNC,		{ .vop_fsync = zfs_fsync },
5192 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5193 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5194 	VOPNAME_SEEK,		{ .vop_seek = zfs_seek },
5195 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5196 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
5197 	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
5198 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5199 	NULL,			NULL
5200 };
5201 
5202 /*
5203  * Error vnode operations template
5204  */
5205 vnodeops_t *zfs_evnodeops;
5206 const fs_operation_def_t zfs_evnodeops_template[] = {
5207 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5208 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5209 	NULL,			NULL
5210 };
5211