xref: /titanic_51/usr/src/uts/common/fs/zfs/zfs_vnops.c (revision 2bb8e5e2ef829a38e0408e7207b4aa15f5eb947b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24  * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
25  * Copyright (c) 2014 Integros [integros.com]
26  */
27 
28 /* Portions Copyright 2007 Jeremy Teo */
29 /* Portions Copyright 2010 Robert Milkowski */
30 
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/time.h>
34 #include <sys/systm.h>
35 #include <sys/sysmacros.h>
36 #include <sys/resource.h>
37 #include <sys/vfs.h>
38 #include <sys/vfs_opreg.h>
39 #include <sys/vnode.h>
40 #include <sys/file.h>
41 #include <sys/stat.h>
42 #include <sys/kmem.h>
43 #include <sys/taskq.h>
44 #include <sys/uio.h>
45 #include <sys/vmsystm.h>
46 #include <sys/atomic.h>
47 #include <sys/vm.h>
48 #include <vm/seg_vn.h>
49 #include <vm/pvn.h>
50 #include <vm/as.h>
51 #include <vm/kpm.h>
52 #include <vm/seg_kpm.h>
53 #include <sys/mman.h>
54 #include <sys/pathname.h>
55 #include <sys/cmn_err.h>
56 #include <sys/errno.h>
57 #include <sys/unistd.h>
58 #include <sys/zfs_dir.h>
59 #include <sys/zfs_acl.h>
60 #include <sys/zfs_ioctl.h>
61 #include <sys/fs/zfs.h>
62 #include <sys/dmu.h>
63 #include <sys/dmu_objset.h>
64 #include <sys/spa.h>
65 #include <sys/txg.h>
66 #include <sys/dbuf.h>
67 #include <sys/zap.h>
68 #include <sys/sa.h>
69 #include <sys/dirent.h>
70 #include <sys/policy.h>
71 #include <sys/sunddi.h>
72 #include <sys/filio.h>
73 #include <sys/sid.h>
74 #include "fs/fs_subr.h"
75 #include <sys/zfs_ctldir.h>
76 #include <sys/zfs_fuid.h>
77 #include <sys/zfs_sa.h>
78 #include <sys/dnlc.h>
79 #include <sys/zfs_rlock.h>
80 #include <sys/extdirent.h>
81 #include <sys/kidmap.h>
82 #include <sys/cred.h>
83 #include <sys/attr.h>
84 #include <sys/zfs_events.h>
85 
86 /*
87  * Programming rules.
88  *
89  * Each vnode op performs some logical unit of work.  To do this, the ZPL must
90  * properly lock its in-core state, create a DMU transaction, do the work,
91  * record this work in the intent log (ZIL), commit the DMU transaction,
92  * and wait for the intent log to commit if it is a synchronous operation.
93  * Moreover, the vnode ops must work in both normal and log replay context.
94  * The ordering of events is important to avoid deadlocks and references
95  * to freed memory.  The example below illustrates the following Big Rules:
96  *
97  *  (1)	A check must be made in each zfs thread for a mounted file system.
98  *	This is done avoiding races using ZFS_ENTER(zfsvfs).
99  *	A ZFS_EXIT(zfsvfs) is needed before all returns.  Any znodes
100  *	must be checked with ZFS_VERIFY_ZP(zp).  Both of these macros
101  *	can return EIO from the calling function.
102  *
103  *  (2)	VN_RELE() should always be the last thing except for zil_commit()
104  *	(if necessary) and ZFS_EXIT(). This is for 3 reasons:
105  *	First, if it's the last reference, the vnode/znode
106  *	can be freed, so the zp may point to freed memory.  Second, the last
107  *	reference will call zfs_zinactive(), which may induce a lot of work --
108  *	pushing cached pages (which acquires range locks) and syncing out
109  *	cached atime changes.  Third, zfs_zinactive() may require a new tx,
110  *	which could deadlock the system if you were already holding one.
111  *	If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
112  *
113  *  (3)	All range locks must be grabbed before calling dmu_tx_assign(),
114  *	as they can span dmu_tx_assign() calls.
115  *
116  *  (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
117  *      dmu_tx_assign().  This is critical because we don't want to block
118  *      while holding locks.
119  *
120  *	If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT.  This
121  *	reduces lock contention and CPU usage when we must wait (note that if
122  *	throughput is constrained by the storage, nearly every transaction
123  *	must wait).
124  *
125  *      Note, in particular, that if a lock is sometimes acquired before
126  *      the tx assigns, and sometimes after (e.g. z_lock), then failing
127  *      to use a non-blocking assign can deadlock the system.  The scenario:
128  *
129  *	Thread A has grabbed a lock before calling dmu_tx_assign().
130  *	Thread B is in an already-assigned tx, and blocks for this lock.
131  *	Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
132  *	forever, because the previous txg can't quiesce until B's tx commits.
133  *
134  *	If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
135  *	then drop all locks, call dmu_tx_wait(), and try again.  On subsequent
136  *	calls to dmu_tx_assign(), pass TXG_WAITED rather than TXG_NOWAIT,
137  *	to indicate that this operation has already called dmu_tx_wait().
138  *	This will ensure that we don't retry forever, waiting a short bit
139  *	each time.
140  *
141  *  (5)	If the operation succeeded, generate the intent log entry for it
142  *	before dropping locks.  This ensures that the ordering of events
143  *	in the intent log matches the order in which they actually occurred.
144  *	During ZIL replay the zfs_log_* functions will update the sequence
145  *	number to indicate the zil transaction has replayed.
146  *
147  *  (6)	At the end of each vnode op, the DMU tx must always commit,
148  *	regardless of whether there were any errors.
149  *
150  *  (7)	After dropping all locks, invoke zil_commit(zilog, foid)
151  *	to ensure that synchronous semantics are provided when necessary.
152  *
153  * In general, this is how things should be ordered in each vnode op:
154  *
155  *	ZFS_ENTER(zfsvfs);		// exit if unmounted
156  * top:
157  *	zfs_dirent_lock(&dl, ...)	// lock directory entry (may VN_HOLD())
158  *	rw_enter(...);			// grab any other locks you need
159  *	tx = dmu_tx_create(...);	// get DMU tx
160  *	dmu_tx_hold_*();		// hold each object you might modify
161  *	error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
162  *	if (error) {
163  *		rw_exit(...);		// drop locks
164  *		zfs_dirent_unlock(dl);	// unlock directory entry
165  *		VN_RELE(...);		// release held vnodes
166  *		if (error == ERESTART) {
167  *			waited = B_TRUE;
168  *			dmu_tx_wait(tx);
169  *			dmu_tx_abort(tx);
170  *			goto top;
171  *		}
172  *		dmu_tx_abort(tx);	// abort DMU tx
173  *		ZFS_EXIT(zfsvfs);	// finished in zfs
174  *		return (error);		// really out of space
175  *	}
176  *	error = do_real_work();		// do whatever this VOP does
177  *	if (error == 0)
178  *		zfs_log_*(...);		// on success, make ZIL entry
179  *	dmu_tx_commit(tx);		// commit DMU tx -- error or not
180  *	rw_exit(...);			// drop locks
181  *	zfs_dirent_unlock(dl);		// unlock directory entry
182  *	VN_RELE(...);			// release held vnodes
183  *	zil_commit(zilog, foid);	// synchronous when necessary
184  *	ZFS_EXIT(zfsvfs);		// finished in zfs
185  *	return (error);			// done, report error
186  */
187 
188 /* ARGSUSED */
189 static int
190 zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
191 {
192 	znode_t	*zp = VTOZ(*vpp);
193 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
194 
195 	ZFS_ENTER(zfsvfs);
196 	ZFS_VERIFY_ZP(zp);
197 
198 	if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
199 	    ((flag & FAPPEND) == 0)) {
200 		ZFS_EXIT(zfsvfs);
201 		return (SET_ERROR(EPERM));
202 	}
203 
204 	if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
205 	    ZTOV(zp)->v_type == VREG &&
206 	    !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
207 		if (fs_vscan(*vpp, cr, 0) != 0) {
208 			ZFS_EXIT(zfsvfs);
209 			return (SET_ERROR(EACCES));
210 		}
211 	}
212 
213 	/* Keep a count of the synchronous opens in the znode */
214 	if (flag & (FSYNC | FDSYNC))
215 		atomic_inc_32(&zp->z_sync_cnt);
216 
217 	ZFS_EXIT(zfsvfs);
218 	return (0);
219 }
220 
221 /* ARGSUSED */
222 static int
223 zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
224     caller_context_t *ct)
225 {
226 	znode_t	*zp = VTOZ(vp);
227 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
228 
229 	/*
230 	 * Clean up any locks held by this process on the vp.
231 	 */
232 	cleanlocks(vp, ddi_get_pid(), 0);
233 	cleanshares(vp, ddi_get_pid());
234 
235 	ZFS_ENTER(zfsvfs);
236 	ZFS_VERIFY_ZP(zp);
237 
238 	/* Decrement the synchronous opens in the znode */
239 	if ((flag & (FSYNC | FDSYNC)) && (count == 1))
240 		atomic_dec_32(&zp->z_sync_cnt);
241 
242 	if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
243 	    ZTOV(zp)->v_type == VREG &&
244 	    !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
245 		VERIFY(fs_vscan(vp, cr, 1) == 0);
246 
247 	if (ZTOV(zp)->v_type == VREG && (flag & FWRITE) &&
248 	    zp->z_vnode->v_wrcnt <= 1 && zp->z_new_content) {
249 		zp->z_new_content = 0;
250 		rw_enter(&rz_zev_rwlock, RW_READER);
251 		if (rz_zev_callbacks &&
252 		    rz_zev_callbacks->rz_zev_znode_close_after_update)
253 			rz_zev_callbacks->rz_zev_znode_close_after_update(zp);
254 		rw_exit(&rz_zev_rwlock);
255 	}
256 
257 	ZFS_EXIT(zfsvfs);
258 	return (0);
259 }
260 
261 /*
262  * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and
263  * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter.
264  */
265 static int
266 zfs_holey(vnode_t *vp, int cmd, offset_t *off)
267 {
268 	znode_t	*zp = VTOZ(vp);
269 	uint64_t noff = (uint64_t)*off; /* new offset */
270 	uint64_t file_sz;
271 	int error;
272 	boolean_t hole;
273 
274 	file_sz = zp->z_size;
275 	if (noff >= file_sz)  {
276 		return (SET_ERROR(ENXIO));
277 	}
278 
279 	if (cmd == _FIO_SEEK_HOLE)
280 		hole = B_TRUE;
281 	else
282 		hole = B_FALSE;
283 
284 	error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff);
285 
286 	if (error == ESRCH)
287 		return (SET_ERROR(ENXIO));
288 
289 	/*
290 	 * We could find a hole that begins after the logical end-of-file,
291 	 * because dmu_offset_next() only works on whole blocks.  If the
292 	 * EOF falls mid-block, then indicate that the "virtual hole"
293 	 * at the end of the file begins at the logical EOF, rather than
294 	 * at the end of the last block.
295 	 */
296 	if (noff > file_sz) {
297 		ASSERT(hole);
298 		noff = file_sz;
299 	}
300 
301 	if (noff < *off)
302 		return (error);
303 	*off = noff;
304 	return (error);
305 }
306 
307 /* ARGSUSED */
308 static int
309 zfs_ioctl(vnode_t *vp, int com, intptr_t data, int flag, cred_t *cred,
310     int *rvalp, caller_context_t *ct)
311 {
312 	offset_t off;
313 	offset_t ndata;
314 	dmu_object_info_t doi;
315 	int error;
316 	zfsvfs_t *zfsvfs;
317 	znode_t *zp;
318 
319 	switch (com) {
320 	case _FIOFFS:
321 	{
322 		return (zfs_sync(vp->v_vfsp, 0, cred));
323 
324 		/*
325 		 * The following two ioctls are used by bfu.  Faking out,
326 		 * necessary to avoid bfu errors.
327 		 */
328 	}
329 	case _FIOGDIO:
330 	case _FIOSDIO:
331 	{
332 		return (0);
333 	}
334 
335 	case _FIO_SEEK_DATA:
336 	case _FIO_SEEK_HOLE:
337 	{
338 		if (ddi_copyin((void *)data, &off, sizeof (off), flag))
339 			return (SET_ERROR(EFAULT));
340 
341 		zp = VTOZ(vp);
342 		zfsvfs = zp->z_zfsvfs;
343 		ZFS_ENTER(zfsvfs);
344 		ZFS_VERIFY_ZP(zp);
345 
346 		/* offset parameter is in/out */
347 		error = zfs_holey(vp, com, &off);
348 		ZFS_EXIT(zfsvfs);
349 		if (error)
350 			return (error);
351 		if (ddi_copyout(&off, (void *)data, sizeof (off), flag))
352 			return (SET_ERROR(EFAULT));
353 		return (0);
354 	}
355 	case _FIO_COUNT_FILLED:
356 	{
357 		/*
358 		 * _FIO_COUNT_FILLED adds a new ioctl command which
359 		 * exposes the number of filled blocks in a
360 		 * ZFS object.
361 		 */
362 		zp = VTOZ(vp);
363 		zfsvfs = zp->z_zfsvfs;
364 		ZFS_ENTER(zfsvfs);
365 		ZFS_VERIFY_ZP(zp);
366 
367 		/*
368 		 * Wait for all dirty blocks for this object
369 		 * to get synced out to disk, and the DMU info
370 		 * updated.
371 		 */
372 		error = dmu_object_wait_synced(zfsvfs->z_os, zp->z_id);
373 		if (error) {
374 			ZFS_EXIT(zfsvfs);
375 			return (error);
376 		}
377 
378 		/*
379 		 * Retrieve fill count from DMU object.
380 		 */
381 		error = dmu_object_info(zfsvfs->z_os, zp->z_id, &doi);
382 		if (error) {
383 			ZFS_EXIT(zfsvfs);
384 			return (error);
385 		}
386 
387 		ndata = doi.doi_fill_count;
388 
389 		ZFS_EXIT(zfsvfs);
390 		if (ddi_copyout(&ndata, (void *)data, sizeof (ndata), flag))
391 			return (SET_ERROR(EFAULT));
392 		return (0);
393 	}
394 	}
395 	return (SET_ERROR(ENOTTY));
396 }
397 
398 /*
399  * Utility functions to map and unmap a single physical page.  These
400  * are used to manage the mappable copies of ZFS file data, and therefore
401  * do not update ref/mod bits.
402  */
403 caddr_t
404 zfs_map_page(page_t *pp, enum seg_rw rw)
405 {
406 	if (kpm_enable)
407 		return (hat_kpm_mapin(pp, 0));
408 	ASSERT(rw == S_READ || rw == S_WRITE);
409 	return (ppmapin(pp, PROT_READ | ((rw == S_WRITE) ? PROT_WRITE : 0),
410 	    (caddr_t)-1));
411 }
412 
413 void
414 zfs_unmap_page(page_t *pp, caddr_t addr)
415 {
416 	if (kpm_enable) {
417 		hat_kpm_mapout(pp, 0, addr);
418 	} else {
419 		ppmapout(addr);
420 	}
421 }
422 
423 /*
424  * When a file is memory mapped, we must keep the IO data synchronized
425  * between the DMU cache and the memory mapped pages.  What this means:
426  *
427  * On Write:	If we find a memory mapped page, we write to *both*
428  *		the page and the dmu buffer.
429  */
430 static void
431 update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid)
432 {
433 	int64_t	off;
434 
435 	off = start & PAGEOFFSET;
436 	for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
437 		page_t *pp;
438 		uint64_t nbytes = MIN(PAGESIZE - off, len);
439 
440 		if (pp = page_lookup(vp, start, SE_SHARED)) {
441 			caddr_t va;
442 
443 			va = zfs_map_page(pp, S_WRITE);
444 			(void) dmu_read(os, oid, start+off, nbytes, va+off,
445 			    DMU_READ_PREFETCH);
446 			zfs_unmap_page(pp, va);
447 			page_unlock(pp);
448 		}
449 		len -= nbytes;
450 		off = 0;
451 	}
452 }
453 
454 /*
455  * When a file is memory mapped, we must keep the IO data synchronized
456  * between the DMU cache and the memory mapped pages.  What this means:
457  *
458  * On Read:	We "read" preferentially from memory mapped pages,
459  *		else we default from the dmu buffer.
460  *
461  * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
462  *	 the file is memory mapped.
463  */
464 static int
465 mappedread(vnode_t *vp, int nbytes, uio_t *uio)
466 {
467 	znode_t *zp = VTOZ(vp);
468 	int64_t	start, off;
469 	int len = nbytes;
470 	int error = 0;
471 
472 	start = uio->uio_loffset;
473 	off = start & PAGEOFFSET;
474 	for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
475 		page_t *pp;
476 		uint64_t bytes = MIN(PAGESIZE - off, len);
477 
478 		if (pp = page_lookup(vp, start, SE_SHARED)) {
479 			caddr_t va;
480 
481 			va = zfs_map_page(pp, S_READ);
482 			error = uiomove(va + off, bytes, UIO_READ, uio);
483 			zfs_unmap_page(pp, va);
484 			page_unlock(pp);
485 		} else {
486 			error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
487 			    uio, bytes);
488 		}
489 		len -= bytes;
490 		off = 0;
491 		if (error)
492 			break;
493 	}
494 	return (error);
495 }
496 
497 offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
498 
499 /*
500  * Read bytes from specified file into supplied buffer.
501  *
502  *	IN:	vp	- vnode of file to be read from.
503  *		uio	- structure supplying read location, range info,
504  *			  and return buffer.
505  *		ioflag	- SYNC flags; used to provide FRSYNC semantics.
506  *		cr	- credentials of caller.
507  *		ct	- caller context
508  *
509  *	OUT:	uio	- updated offset and range, buffer filled.
510  *
511  *	RETURN:	0 on success, error code on failure.
512  *
513  * Side Effects:
514  *	vp - atime updated if byte count > 0
515  */
516 /* ARGSUSED */
517 static int
518 zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
519 {
520 	znode_t		*zp = VTOZ(vp);
521 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
522 	ssize_t		n, nbytes;
523 	int		error = 0;
524 	rl_t		*rl;
525 	xuio_t		*xuio = NULL;
526 
527 	ZFS_ENTER(zfsvfs);
528 	ZFS_VERIFY_ZP(zp);
529 
530 	if (zp->z_pflags & ZFS_AV_QUARANTINED) {
531 		ZFS_EXIT(zfsvfs);
532 		return (SET_ERROR(EACCES));
533 	}
534 
535 	/*
536 	 * Validate file offset
537 	 */
538 	if (uio->uio_loffset < (offset_t)0) {
539 		ZFS_EXIT(zfsvfs);
540 		return (SET_ERROR(EINVAL));
541 	}
542 
543 	/*
544 	 * Fasttrack empty reads
545 	 */
546 	if (uio->uio_resid == 0) {
547 		ZFS_EXIT(zfsvfs);
548 		return (0);
549 	}
550 
551 	/*
552 	 * Check for mandatory locks
553 	 */
554 	if (MANDMODE(zp->z_mode)) {
555 		if (error = chklock(vp, FREAD,
556 		    uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) {
557 			ZFS_EXIT(zfsvfs);
558 			return (error);
559 		}
560 	}
561 
562 	/*
563 	 * If we're in FRSYNC mode, sync out this znode before reading it.
564 	 */
565 	if (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
566 		zil_commit(zfsvfs->z_log, zp->z_id);
567 
568 	/*
569 	 * Lock the range against changes.
570 	 */
571 	rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER);
572 
573 	/*
574 	 * If we are reading past end-of-file we can skip
575 	 * to the end; but we might still need to set atime.
576 	 */
577 	if (uio->uio_loffset >= zp->z_size) {
578 		error = 0;
579 		goto out;
580 	}
581 
582 	ASSERT(uio->uio_loffset < zp->z_size);
583 	n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
584 
585 	if ((uio->uio_extflg == UIO_XUIO) &&
586 	    (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
587 		int nblk;
588 		int blksz = zp->z_blksz;
589 		uint64_t offset = uio->uio_loffset;
590 
591 		xuio = (xuio_t *)uio;
592 		if ((ISP2(blksz))) {
593 			nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
594 			    blksz)) / blksz;
595 		} else {
596 			ASSERT(offset + n <= blksz);
597 			nblk = 1;
598 		}
599 		(void) dmu_xuio_init(xuio, nblk);
600 
601 		if (vn_has_cached_data(vp)) {
602 			/*
603 			 * For simplicity, we always allocate a full buffer
604 			 * even if we only expect to read a portion of a block.
605 			 */
606 			while (--nblk >= 0) {
607 				(void) dmu_xuio_add(xuio,
608 				    dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
609 				    blksz), 0, blksz);
610 			}
611 		}
612 	}
613 
614 	while (n > 0) {
615 		nbytes = MIN(n, zfs_read_chunk_size -
616 		    P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
617 
618 		if (vn_has_cached_data(vp)) {
619 			error = mappedread(vp, nbytes, uio);
620 		} else {
621 			error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
622 			    uio, nbytes);
623 		}
624 		if (error) {
625 			/* convert checksum errors into IO errors */
626 			if (error == ECKSUM)
627 				error = SET_ERROR(EIO);
628 			break;
629 		}
630 
631 		n -= nbytes;
632 	}
633 out:
634 	zfs_range_unlock(rl);
635 
636 	ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
637 	ZFS_EXIT(zfsvfs);
638 	return (error);
639 }
640 
641 /*
642  * Write the bytes to a file.
643  *
644  *	IN:	vp	- vnode of file to be written to.
645  *		uio	- structure supplying write location, range info,
646  *			  and data buffer.
647  *		ioflag	- FAPPEND, FSYNC, and/or FDSYNC.  FAPPEND is
648  *			  set if in append mode.
649  *		cr	- credentials of caller.
650  *		ct	- caller context (NFS/CIFS fem monitor only)
651  *
652  *	OUT:	uio	- updated offset and range.
653  *
654  *	RETURN:	0 on success, error code on failure.
655  *
656  * Timestamps:
657  *	vp - ctime|mtime updated if byte count > 0
658  */
659 
660 /* ARGSUSED */
661 static int
662 zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
663 {
664 	znode_t		*zp = VTOZ(vp);
665 	rlim64_t	limit = uio->uio_llimit;
666 	ssize_t		start_resid = uio->uio_resid;
667 	ssize_t		tx_bytes;
668 	uint64_t	end_size;
669 	dmu_tx_t	*tx;
670 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
671 	zilog_t		*zilog;
672 	offset_t	woff;
673 	ssize_t		n, nbytes;
674 	rl_t		*rl;
675 	int		max_blksz = zfsvfs->z_max_blksz;
676 	int		error = 0;
677 	arc_buf_t	*abuf;
678 	iovec_t		*aiov = NULL;
679 	xuio_t		*xuio = NULL;
680 	int		i_iov = 0;
681 	int		iovcnt = uio->uio_iovcnt;
682 	iovec_t		*iovp = uio->uio_iov;
683 	int		write_eof;
684 	int		count = 0;
685 	sa_bulk_attr_t	bulk[4];
686 	uint64_t	mtime[2], ctime[2];
687 
688 	/*
689 	 * Fasttrack empty write
690 	 */
691 	n = start_resid;
692 	if (n == 0)
693 		return (0);
694 
695 	if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
696 		limit = MAXOFFSET_T;
697 
698 	ZFS_ENTER(zfsvfs);
699 	ZFS_VERIFY_ZP(zp);
700 
701 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
702 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
703 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
704 	    &zp->z_size, 8);
705 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
706 	    &zp->z_pflags, 8);
707 
708 	/*
709 	 * In a case vp->v_vfsp != zp->z_zfsvfs->z_vfs (e.g. snapshots) our
710 	 * callers might not be able to detect properly that we are read-only,
711 	 * so check it explicitly here.
712 	 */
713 	if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
714 		ZFS_EXIT(zfsvfs);
715 		return (SET_ERROR(EROFS));
716 	}
717 
718 	/*
719 	 * If immutable or not appending then return EPERM
720 	 */
721 	if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
722 	    ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
723 	    (uio->uio_loffset < zp->z_size))) {
724 		ZFS_EXIT(zfsvfs);
725 		return (SET_ERROR(EPERM));
726 	}
727 
728 	zilog = zfsvfs->z_log;
729 
730 	/*
731 	 * Validate file offset
732 	 */
733 	woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
734 	if (woff < 0) {
735 		ZFS_EXIT(zfsvfs);
736 		return (SET_ERROR(EINVAL));
737 	}
738 
739 	/*
740 	 * Check for mandatory locks before calling zfs_range_lock()
741 	 * in order to prevent a deadlock with locks set via fcntl().
742 	 */
743 	if (MANDMODE((mode_t)zp->z_mode) &&
744 	    (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) {
745 		ZFS_EXIT(zfsvfs);
746 		return (error);
747 	}
748 
749 	/*
750 	 * Pre-fault the pages to ensure slow (eg NFS) pages
751 	 * don't hold up txg.
752 	 * Skip this if uio contains loaned arc_buf.
753 	 */
754 	if ((uio->uio_extflg == UIO_XUIO) &&
755 	    (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
756 		xuio = (xuio_t *)uio;
757 	else
758 		uio_prefaultpages(MIN(n, max_blksz), uio);
759 
760 	/*
761 	 * If in append mode, set the io offset pointer to eof.
762 	 */
763 	if (ioflag & FAPPEND) {
764 		/*
765 		 * Obtain an appending range lock to guarantee file append
766 		 * semantics.  We reset the write offset once we have the lock.
767 		 */
768 		rl = zfs_range_lock(zp, 0, n, RL_APPEND);
769 		woff = rl->r_off;
770 		if (rl->r_len == UINT64_MAX) {
771 			/*
772 			 * We overlocked the file because this write will cause
773 			 * the file block size to increase.
774 			 * Note that zp_size cannot change with this lock held.
775 			 */
776 			woff = zp->z_size;
777 		}
778 		uio->uio_loffset = woff;
779 	} else {
780 		/*
781 		 * Note that if the file block size will change as a result of
782 		 * this write, then this range lock will lock the entire file
783 		 * so that we can re-write the block safely.
784 		 */
785 		rl = zfs_range_lock(zp, woff, n, RL_WRITER);
786 	}
787 
788 	if (woff >= limit) {
789 		zfs_range_unlock(rl);
790 		ZFS_EXIT(zfsvfs);
791 		return (SET_ERROR(EFBIG));
792 	}
793 
794 	if ((woff + n) > limit || woff > (limit - n))
795 		n = limit - woff;
796 
797 	/* Will this write extend the file length? */
798 	write_eof = (woff + n > zp->z_size);
799 
800 	end_size = MAX(zp->z_size, woff + n);
801 
802 	/*
803 	 * Write the file in reasonable size chunks.  Each chunk is written
804 	 * in a separate transaction; this keeps the intent log records small
805 	 * and allows us to do more fine-grained space accounting.
806 	 */
807 	while (n > 0) {
808 		abuf = NULL;
809 		woff = uio->uio_loffset;
810 		if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
811 		    zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
812 			if (abuf != NULL)
813 				dmu_return_arcbuf(abuf);
814 			error = SET_ERROR(EDQUOT);
815 			break;
816 		}
817 
818 		if (xuio && abuf == NULL) {
819 			ASSERT(i_iov < iovcnt);
820 			aiov = &iovp[i_iov];
821 			abuf = dmu_xuio_arcbuf(xuio, i_iov);
822 			dmu_xuio_clear(xuio, i_iov);
823 			DTRACE_PROBE3(zfs_cp_write, int, i_iov,
824 			    iovec_t *, aiov, arc_buf_t *, abuf);
825 			ASSERT((aiov->iov_base == abuf->b_data) ||
826 			    ((char *)aiov->iov_base - (char *)abuf->b_data +
827 			    aiov->iov_len == arc_buf_size(abuf)));
828 			i_iov++;
829 		} else if (abuf == NULL && n >= max_blksz &&
830 		    woff >= zp->z_size &&
831 		    P2PHASE(woff, max_blksz) == 0 &&
832 		    zp->z_blksz == max_blksz) {
833 			/*
834 			 * This write covers a full block.  "Borrow" a buffer
835 			 * from the dmu so that we can fill it before we enter
836 			 * a transaction.  This avoids the possibility of
837 			 * holding up the transaction if the data copy hangs
838 			 * up on a pagefault (e.g., from an NFS server mapping).
839 			 */
840 			size_t cbytes;
841 
842 			abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
843 			    max_blksz);
844 			ASSERT(abuf != NULL);
845 			ASSERT(arc_buf_size(abuf) == max_blksz);
846 			if (error = uiocopy(abuf->b_data, max_blksz,
847 			    UIO_WRITE, uio, &cbytes)) {
848 				dmu_return_arcbuf(abuf);
849 				break;
850 			}
851 			ASSERT(cbytes == max_blksz);
852 		}
853 
854 		/*
855 		 * Start a transaction.
856 		 */
857 		tx = dmu_tx_create(zfsvfs->z_os);
858 		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
859 		dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
860 		zfs_sa_upgrade_txholds(tx, zp);
861 		error = dmu_tx_assign(tx, TXG_WAIT);
862 		if (error) {
863 			dmu_tx_abort(tx);
864 			if (abuf != NULL)
865 				dmu_return_arcbuf(abuf);
866 			break;
867 		}
868 
869 		/*
870 		 * If zfs_range_lock() over-locked we grow the blocksize
871 		 * and then reduce the lock range.  This will only happen
872 		 * on the first iteration since zfs_range_reduce() will
873 		 * shrink down r_len to the appropriate size.
874 		 */
875 		if (rl->r_len == UINT64_MAX) {
876 			uint64_t new_blksz;
877 
878 			if (zp->z_blksz > max_blksz) {
879 				/*
880 				 * File's blocksize is already larger than the
881 				 * "recordsize" property.  Only let it grow to
882 				 * the next power of 2.
883 				 */
884 				ASSERT(!ISP2(zp->z_blksz));
885 				new_blksz = MIN(end_size,
886 				    1 << highbit64(zp->z_blksz));
887 			} else {
888 				new_blksz = MIN(end_size, max_blksz);
889 			}
890 			zfs_grow_blocksize(zp, new_blksz, tx);
891 			zfs_range_reduce(rl, woff, n);
892 		}
893 
894 		/*
895 		 * XXX - should we really limit each write to z_max_blksz?
896 		 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
897 		 */
898 		nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
899 
900 		if (abuf == NULL) {
901 			tx_bytes = uio->uio_resid;
902 			error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
903 			    uio, nbytes, tx);
904 			tx_bytes -= uio->uio_resid;
905 		} else {
906 			tx_bytes = nbytes;
907 			ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
908 			/*
909 			 * If this is not a full block write, but we are
910 			 * extending the file past EOF and this data starts
911 			 * block-aligned, use assign_arcbuf().  Otherwise,
912 			 * write via dmu_write().
913 			 */
914 			if (tx_bytes < max_blksz && (!write_eof ||
915 			    aiov->iov_base != abuf->b_data)) {
916 				ASSERT(xuio);
917 				dmu_write(zfsvfs->z_os, zp->z_id, woff,
918 				    aiov->iov_len, aiov->iov_base, tx);
919 				dmu_return_arcbuf(abuf);
920 				xuio_stat_wbuf_copied();
921 			} else {
922 				ASSERT(xuio || tx_bytes == max_blksz);
923 				dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl),
924 				    woff, abuf, tx);
925 			}
926 			ASSERT(tx_bytes <= uio->uio_resid);
927 			uioskip(uio, tx_bytes);
928 		}
929 		if (tx_bytes && vn_has_cached_data(vp)) {
930 			update_pages(vp, woff,
931 			    tx_bytes, zfsvfs->z_os, zp->z_id);
932 		}
933 
934 		/*
935 		 * If we made no progress, we're done.  If we made even
936 		 * partial progress, update the znode and ZIL accordingly.
937 		 */
938 		if (tx_bytes == 0) {
939 			(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
940 			    (void *)&zp->z_size, sizeof (uint64_t), tx);
941 			dmu_tx_commit(tx);
942 			ASSERT(error != 0);
943 			break;
944 		}
945 
946 		/*
947 		 * Clear Set-UID/Set-GID bits on successful write if not
948 		 * privileged and at least one of the excute bits is set.
949 		 *
950 		 * It would be nice to to this after all writes have
951 		 * been done, but that would still expose the ISUID/ISGID
952 		 * to another app after the partial write is committed.
953 		 *
954 		 * Note: we don't call zfs_fuid_map_id() here because
955 		 * user 0 is not an ephemeral uid.
956 		 */
957 		mutex_enter(&zp->z_acl_lock);
958 		if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
959 		    (S_IXUSR >> 6))) != 0 &&
960 		    (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
961 		    secpolicy_vnode_setid_retain(cr,
962 		    (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) {
963 			uint64_t newmode;
964 			zp->z_mode &= ~(S_ISUID | S_ISGID);
965 			newmode = zp->z_mode;
966 			(void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
967 			    (void *)&newmode, sizeof (uint64_t), tx);
968 		}
969 		mutex_exit(&zp->z_acl_lock);
970 
971 		zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
972 		    B_TRUE);
973 
974 		/*
975 		 * Update the file size (zp_size) if it has changed;
976 		 * account for possible concurrent updates.
977 		 */
978 		while ((end_size = zp->z_size) < uio->uio_loffset) {
979 			(void) atomic_cas_64(&zp->z_size, end_size,
980 			    uio->uio_loffset);
981 			ASSERT(error == 0);
982 		}
983 		/*
984 		 * If we are replaying and eof is non zero then force
985 		 * the file size to the specified eof. Note, there's no
986 		 * concurrency during replay.
987 		 */
988 		if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
989 			zp->z_size = zfsvfs->z_replay_eof;
990 
991 		error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
992 
993 		zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag);
994 		dmu_tx_commit(tx);
995 
996 		if (error != 0)
997 			break;
998 		ASSERT(tx_bytes == nbytes);
999 		n -= nbytes;
1000 
1001 		if (!xuio && n > 0)
1002 			uio_prefaultpages(MIN(n, max_blksz), uio);
1003 	}
1004 
1005 	zfs_range_unlock(rl);
1006 
1007 	/*
1008 	 * If we're in replay mode, or we made no progress, return error.
1009 	 * Otherwise, it's at least a partial write, so it's successful.
1010 	 */
1011 	if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
1012 		ZFS_EXIT(zfsvfs);
1013 		return (error);
1014 	}
1015 
1016 	if (ioflag & (FSYNC | FDSYNC) ||
1017 	    zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1018 		zil_commit(zilog, zp->z_id);
1019 
1020 	ZFS_EXIT(zfsvfs);
1021 	return (0);
1022 }
1023 
1024 void
1025 zfs_get_done(zgd_t *zgd, int error)
1026 {
1027 	znode_t *zp = zgd->zgd_private;
1028 	objset_t *os = zp->z_zfsvfs->z_os;
1029 
1030 	if (zgd->zgd_db)
1031 		dmu_buf_rele(zgd->zgd_db, zgd);
1032 
1033 	zfs_range_unlock(zgd->zgd_rl);
1034 
1035 	/*
1036 	 * Release the vnode asynchronously as we currently have the
1037 	 * txg stopped from syncing.
1038 	 */
1039 	VN_RELE_ASYNC(ZTOV(zp), dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
1040 
1041 	if (error == 0 && zgd->zgd_bp)
1042 		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1043 
1044 	kmem_free(zgd, sizeof (zgd_t));
1045 }
1046 
1047 #ifdef DEBUG
1048 static int zil_fault_io = 0;
1049 #endif
1050 
1051 /*
1052  * Get data to generate a TX_WRITE intent log record.
1053  */
1054 int
1055 zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1056 {
1057 	zfsvfs_t *zfsvfs = arg;
1058 	objset_t *os = zfsvfs->z_os;
1059 	znode_t *zp;
1060 	uint64_t object = lr->lr_foid;
1061 	uint64_t offset = lr->lr_offset;
1062 	uint64_t size = lr->lr_length;
1063 	blkptr_t *bp = &lr->lr_blkptr;
1064 	dmu_buf_t *db;
1065 	zgd_t *zgd;
1066 	int error = 0;
1067 
1068 	ASSERT(zio != NULL);
1069 	ASSERT(size != 0);
1070 
1071 	/*
1072 	 * Nothing to do if the file has been removed
1073 	 */
1074 	if (zfs_zget(zfsvfs, object, &zp) != 0)
1075 		return (SET_ERROR(ENOENT));
1076 	if (zp->z_unlinked) {
1077 		/*
1078 		 * Release the vnode asynchronously as we currently have the
1079 		 * txg stopped from syncing.
1080 		 */
1081 		VN_RELE_ASYNC(ZTOV(zp),
1082 		    dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
1083 		return (SET_ERROR(ENOENT));
1084 	}
1085 
1086 	zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1087 	zgd->zgd_zilog = zfsvfs->z_log;
1088 	zgd->zgd_private = zp;
1089 
1090 	/*
1091 	 * Write records come in two flavors: immediate and indirect.
1092 	 * For small writes it's cheaper to store the data with the
1093 	 * log record (immediate); for large writes it's cheaper to
1094 	 * sync the data and get a pointer to it (indirect) so that
1095 	 * we don't have to write the data twice.
1096 	 */
1097 	if (buf != NULL) { /* immediate write */
1098 		zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER);
1099 		/* test for truncation needs to be done while range locked */
1100 		if (offset >= zp->z_size) {
1101 			error = SET_ERROR(ENOENT);
1102 		} else {
1103 			error = dmu_read(os, object, offset, size, buf,
1104 			    DMU_READ_NO_PREFETCH);
1105 		}
1106 		ASSERT(error == 0 || error == ENOENT);
1107 	} else { /* indirect write */
1108 		/*
1109 		 * Have to lock the whole block to ensure when it's
1110 		 * written out and it's checksum is being calculated
1111 		 * that no one can change the data. We need to re-check
1112 		 * blocksize after we get the lock in case it's changed!
1113 		 */
1114 		for (;;) {
1115 			uint64_t blkoff;
1116 			size = zp->z_blksz;
1117 			blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
1118 			offset -= blkoff;
1119 			zgd->zgd_rl = zfs_range_lock(zp, offset, size,
1120 			    RL_READER);
1121 			if (zp->z_blksz == size)
1122 				break;
1123 			offset += blkoff;
1124 			zfs_range_unlock(zgd->zgd_rl);
1125 		}
1126 		/* test for truncation needs to be done while range locked */
1127 		if (lr->lr_offset >= zp->z_size)
1128 			error = SET_ERROR(ENOENT);
1129 #ifdef DEBUG
1130 		if (zil_fault_io) {
1131 			error = SET_ERROR(EIO);
1132 			zil_fault_io = 0;
1133 		}
1134 #endif
1135 		if (error == 0)
1136 			error = dmu_buf_hold(os, object, offset, zgd, &db,
1137 			    DMU_READ_NO_PREFETCH);
1138 
1139 		if (error == 0) {
1140 			blkptr_t *obp = dmu_buf_get_blkptr(db);
1141 			if (obp) {
1142 				ASSERT(BP_IS_HOLE(bp));
1143 				*bp = *obp;
1144 			}
1145 
1146 			zgd->zgd_db = db;
1147 			zgd->zgd_bp = bp;
1148 
1149 			ASSERT(db->db_offset == offset);
1150 			ASSERT(db->db_size == size);
1151 
1152 			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1153 			    zfs_get_done, zgd);
1154 			ASSERT(error || lr->lr_length <= zp->z_blksz);
1155 
1156 			/*
1157 			 * On success, we need to wait for the write I/O
1158 			 * initiated by dmu_sync() to complete before we can
1159 			 * release this dbuf.  We will finish everything up
1160 			 * in the zfs_get_done() callback.
1161 			 */
1162 			if (error == 0)
1163 				return (0);
1164 
1165 			if (error == EALREADY) {
1166 				lr->lr_common.lrc_txtype = TX_WRITE2;
1167 				error = 0;
1168 			}
1169 		}
1170 	}
1171 
1172 	zfs_get_done(zgd, error);
1173 
1174 	return (error);
1175 }
1176 
1177 /*ARGSUSED*/
1178 static int
1179 zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr,
1180     caller_context_t *ct)
1181 {
1182 	znode_t *zp = VTOZ(vp);
1183 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1184 	int error;
1185 
1186 	ZFS_ENTER(zfsvfs);
1187 	ZFS_VERIFY_ZP(zp);
1188 
1189 	if (flag & V_ACE_MASK)
1190 		error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
1191 	else
1192 		error = zfs_zaccess_rwx(zp, mode, flag, cr);
1193 
1194 	ZFS_EXIT(zfsvfs);
1195 	return (error);
1196 }
1197 
1198 /*
1199  * If vnode is for a device return a specfs vnode instead.
1200  */
1201 static int
1202 specvp_check(vnode_t **vpp, cred_t *cr)
1203 {
1204 	int error = 0;
1205 
1206 	if (IS_DEVVP(*vpp)) {
1207 		struct vnode *svp;
1208 
1209 		svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr);
1210 		VN_RELE(*vpp);
1211 		if (svp == NULL)
1212 			error = SET_ERROR(ENOSYS);
1213 		*vpp = svp;
1214 	}
1215 	return (error);
1216 }
1217 
1218 
1219 /*
1220  * Lookup an entry in a directory, or an extended attribute directory.
1221  * If it exists, return a held vnode reference for it.
1222  *
1223  *	IN:	dvp	- vnode of directory to search.
1224  *		nm	- name of entry to lookup.
1225  *		pnp	- full pathname to lookup [UNUSED].
1226  *		flags	- LOOKUP_XATTR set if looking for an attribute.
1227  *		rdir	- root directory vnode [UNUSED].
1228  *		cr	- credentials of caller.
1229  *		ct	- caller context
1230  *		direntflags - directory lookup flags
1231  *		realpnp - returned pathname.
1232  *
1233  *	OUT:	vpp	- vnode of located entry, NULL if not found.
1234  *
1235  *	RETURN:	0 on success, error code on failure.
1236  *
1237  * Timestamps:
1238  *	NA
1239  */
1240 /* ARGSUSED */
1241 static int
1242 zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
1243     int flags, vnode_t *rdir, cred_t *cr,  caller_context_t *ct,
1244     int *direntflags, pathname_t *realpnp)
1245 {
1246 	znode_t *zdp = VTOZ(dvp);
1247 	zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
1248 	int	error = 0;
1249 
1250 	/* fast path */
1251 	if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
1252 
1253 		if (dvp->v_type != VDIR) {
1254 			return (SET_ERROR(ENOTDIR));
1255 		} else if (zdp->z_sa_hdl == NULL) {
1256 			return (SET_ERROR(EIO));
1257 		}
1258 
1259 		if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
1260 			error = zfs_fastaccesschk_execute(zdp, cr);
1261 			if (!error) {
1262 				*vpp = dvp;
1263 				VN_HOLD(*vpp);
1264 				return (0);
1265 			}
1266 			return (error);
1267 		} else {
1268 			vnode_t *tvp = dnlc_lookup(dvp, nm);
1269 
1270 			if (tvp) {
1271 				error = zfs_fastaccesschk_execute(zdp, cr);
1272 				if (error) {
1273 					VN_RELE(tvp);
1274 					return (error);
1275 				}
1276 				if (tvp == DNLC_NO_VNODE) {
1277 					VN_RELE(tvp);
1278 					return (SET_ERROR(ENOENT));
1279 				} else {
1280 					*vpp = tvp;
1281 					return (specvp_check(vpp, cr));
1282 				}
1283 			}
1284 		}
1285 	}
1286 
1287 	DTRACE_PROBE2(zfs__fastpath__lookup__miss, vnode_t *, dvp, char *, nm);
1288 
1289 	ZFS_ENTER(zfsvfs);
1290 	ZFS_VERIFY_ZP(zdp);
1291 
1292 	*vpp = NULL;
1293 
1294 	if (flags & LOOKUP_XATTR) {
1295 		/*
1296 		 * If the xattr property is off, refuse the lookup request.
1297 		 */
1298 		if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) {
1299 			ZFS_EXIT(zfsvfs);
1300 			return (SET_ERROR(EINVAL));
1301 		}
1302 
1303 		/*
1304 		 * We don't allow recursive attributes..
1305 		 * Maybe someday we will.
1306 		 */
1307 		if (zdp->z_pflags & ZFS_XATTR) {
1308 			ZFS_EXIT(zfsvfs);
1309 			return (SET_ERROR(EINVAL));
1310 		}
1311 
1312 		if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) {
1313 			ZFS_EXIT(zfsvfs);
1314 			return (error);
1315 		}
1316 
1317 		/*
1318 		 * Do we have permission to get into attribute directory?
1319 		 */
1320 
1321 		if (error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, 0,
1322 		    B_FALSE, cr)) {
1323 			VN_RELE(*vpp);
1324 			*vpp = NULL;
1325 		}
1326 
1327 		ZFS_EXIT(zfsvfs);
1328 		return (error);
1329 	}
1330 
1331 	if (dvp->v_type != VDIR) {
1332 		ZFS_EXIT(zfsvfs);
1333 		return (SET_ERROR(ENOTDIR));
1334 	}
1335 
1336 	/*
1337 	 * Check accessibility of directory.
1338 	 */
1339 
1340 	if (error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr)) {
1341 		ZFS_EXIT(zfsvfs);
1342 		return (error);
1343 	}
1344 
1345 	if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
1346 	    NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1347 		ZFS_EXIT(zfsvfs);
1348 		return (SET_ERROR(EILSEQ));
1349 	}
1350 
1351 	error = zfs_dirlook(zdp, nm, vpp, flags, direntflags, realpnp);
1352 	if (error == 0)
1353 		error = specvp_check(vpp, cr);
1354 
1355 	ZFS_EXIT(zfsvfs);
1356 	return (error);
1357 }
1358 
1359 /*
1360  * Attempt to create a new entry in a directory.  If the entry
1361  * already exists, truncate the file if permissible, else return
1362  * an error.  Return the vp of the created or trunc'd file.
1363  *
1364  *	IN:	dvp	- vnode of directory to put new file entry in.
1365  *		name	- name of new file entry.
1366  *		vap	- attributes of new file.
1367  *		excl	- flag indicating exclusive or non-exclusive mode.
1368  *		mode	- mode to open file with.
1369  *		cr	- credentials of caller.
1370  *		flag	- large file flag [UNUSED].
1371  *		ct	- caller context
1372  *		vsecp	- ACL to be set
1373  *
1374  *	OUT:	vpp	- vnode of created or trunc'd entry.
1375  *
1376  *	RETURN:	0 on success, error code on failure.
1377  *
1378  * Timestamps:
1379  *	dvp - ctime|mtime updated if new entry created
1380  *	 vp - ctime|mtime always, atime if new
1381  */
1382 
1383 /* ARGSUSED */
1384 static int
1385 zfs_create(vnode_t *dvp, char *name, vattr_t *vap, vcexcl_t excl,
1386     int mode, vnode_t **vpp, cred_t *cr, int flag, caller_context_t *ct,
1387     vsecattr_t *vsecp)
1388 {
1389 	znode_t		*zp, *dzp = VTOZ(dvp);
1390 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1391 	zilog_t		*zilog;
1392 	objset_t	*os;
1393 	zfs_dirlock_t	*dl;
1394 	dmu_tx_t	*tx;
1395 	int		error;
1396 	ksid_t		*ksid;
1397 	uid_t		uid;
1398 	gid_t		gid = crgetgid(cr);
1399 	zfs_acl_ids_t   acl_ids;
1400 	boolean_t	fuid_dirtied;
1401 	boolean_t	have_acl = B_FALSE;
1402 	boolean_t	waited = B_FALSE;
1403 
1404 	/*
1405 	 * If we have an ephemeral id, ACL, or XVATTR then
1406 	 * make sure file system is at proper version
1407 	 */
1408 
1409 	ksid = crgetsid(cr, KSID_OWNER);
1410 	if (ksid)
1411 		uid = ksid_getid(ksid);
1412 	else
1413 		uid = crgetuid(cr);
1414 
1415 	if (zfsvfs->z_use_fuids == B_FALSE &&
1416 	    (vsecp || (vap->va_mask & AT_XVATTR) ||
1417 	    IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1418 		return (SET_ERROR(EINVAL));
1419 
1420 	ZFS_ENTER(zfsvfs);
1421 	ZFS_VERIFY_ZP(dzp);
1422 	os = zfsvfs->z_os;
1423 	zilog = zfsvfs->z_log;
1424 
1425 	if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
1426 	    NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1427 		ZFS_EXIT(zfsvfs);
1428 		return (SET_ERROR(EILSEQ));
1429 	}
1430 
1431 	if (vap->va_mask & AT_XVATTR) {
1432 		if ((error = secpolicy_xvattr((xvattr_t *)vap,
1433 		    crgetuid(cr), cr, vap->va_type)) != 0) {
1434 			ZFS_EXIT(zfsvfs);
1435 			return (error);
1436 		}
1437 	}
1438 top:
1439 	*vpp = NULL;
1440 
1441 	if ((vap->va_mode & VSVTX) && secpolicy_vnode_stky_modify(cr))
1442 		vap->va_mode &= ~VSVTX;
1443 
1444 	if (*name == '\0') {
1445 		/*
1446 		 * Null component name refers to the directory itself.
1447 		 */
1448 		VN_HOLD(dvp);
1449 		zp = dzp;
1450 		dl = NULL;
1451 		error = 0;
1452 	} else {
1453 		/* possible VN_HOLD(zp) */
1454 		int zflg = 0;
1455 
1456 		if (flag & FIGNORECASE)
1457 			zflg |= ZCILOOK;
1458 
1459 		error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1460 		    NULL, NULL);
1461 		if (error) {
1462 			if (have_acl)
1463 				zfs_acl_ids_free(&acl_ids);
1464 			if (strcmp(name, "..") == 0)
1465 				error = SET_ERROR(EISDIR);
1466 			ZFS_EXIT(zfsvfs);
1467 			return (error);
1468 		}
1469 	}
1470 
1471 	if (zp == NULL) {
1472 		uint64_t txtype;
1473 
1474 		/*
1475 		 * Create a new file object and update the directory
1476 		 * to reference it.
1477 		 */
1478 		if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
1479 			if (have_acl)
1480 				zfs_acl_ids_free(&acl_ids);
1481 			goto out;
1482 		}
1483 
1484 		/*
1485 		 * We only support the creation of regular files in
1486 		 * extended attribute directories.
1487 		 */
1488 
1489 		if ((dzp->z_pflags & ZFS_XATTR) &&
1490 		    (vap->va_type != VREG)) {
1491 			if (have_acl)
1492 				zfs_acl_ids_free(&acl_ids);
1493 			error = SET_ERROR(EINVAL);
1494 			goto out;
1495 		}
1496 
1497 		if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1498 		    cr, vsecp, &acl_ids)) != 0)
1499 			goto out;
1500 		have_acl = B_TRUE;
1501 
1502 		if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1503 			zfs_acl_ids_free(&acl_ids);
1504 			error = SET_ERROR(EDQUOT);
1505 			goto out;
1506 		}
1507 
1508 		tx = dmu_tx_create(os);
1509 
1510 		dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1511 		    ZFS_SA_BASE_ATTR_SIZE);
1512 
1513 		fuid_dirtied = zfsvfs->z_fuid_dirty;
1514 		if (fuid_dirtied)
1515 			zfs_fuid_txhold(zfsvfs, tx);
1516 		dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1517 		dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1518 		if (!zfsvfs->z_use_sa &&
1519 		    acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1520 			dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1521 			    0, acl_ids.z_aclp->z_acl_bytes);
1522 		}
1523 		error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
1524 		if (error) {
1525 			zfs_dirent_unlock(dl);
1526 			if (error == ERESTART) {
1527 				waited = B_TRUE;
1528 				dmu_tx_wait(tx);
1529 				dmu_tx_abort(tx);
1530 				goto top;
1531 			}
1532 			zfs_acl_ids_free(&acl_ids);
1533 			dmu_tx_abort(tx);
1534 			ZFS_EXIT(zfsvfs);
1535 			return (error);
1536 		}
1537 		zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1538 
1539 		if (fuid_dirtied)
1540 			zfs_fuid_sync(zfsvfs, tx);
1541 
1542 		(void) zfs_link_create(dl, zp, tx, ZNEW);
1543 		txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1544 		if (flag & FIGNORECASE)
1545 			txtype |= TX_CI;
1546 		zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1547 		    vsecp, acl_ids.z_fuidp, vap);
1548 		zfs_acl_ids_free(&acl_ids);
1549 		dmu_tx_commit(tx);
1550 	} else {
1551 		int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1552 
1553 		if (have_acl)
1554 			zfs_acl_ids_free(&acl_ids);
1555 		have_acl = B_FALSE;
1556 
1557 		/*
1558 		 * A directory entry already exists for this name.
1559 		 */
1560 		/*
1561 		 * Can't truncate an existing file if in exclusive mode.
1562 		 */
1563 		if (excl == EXCL) {
1564 			error = SET_ERROR(EEXIST);
1565 			goto out;
1566 		}
1567 		/*
1568 		 * Can't open a directory for writing.
1569 		 */
1570 		if ((ZTOV(zp)->v_type == VDIR) && (mode & S_IWRITE)) {
1571 			error = SET_ERROR(EISDIR);
1572 			goto out;
1573 		}
1574 		/*
1575 		 * Verify requested access to file.
1576 		 */
1577 		if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
1578 			goto out;
1579 		}
1580 
1581 		mutex_enter(&dzp->z_lock);
1582 		dzp->z_seq++;
1583 		mutex_exit(&dzp->z_lock);
1584 
1585 		/*
1586 		 * Truncate regular files if requested.
1587 		 */
1588 		if ((ZTOV(zp)->v_type == VREG) &&
1589 		    (vap->va_mask & AT_SIZE) && (vap->va_size == 0)) {
1590 			/* we can't hold any locks when calling zfs_freesp() */
1591 			zfs_dirent_unlock(dl);
1592 			dl = NULL;
1593 			error = zfs_freesp(zp, 0, 0, mode, TRUE);
1594 			if (error == 0) {
1595 				vnevent_create(ZTOV(zp), ct);
1596 			}
1597 		}
1598 	}
1599 out:
1600 
1601 	if (dl)
1602 		zfs_dirent_unlock(dl);
1603 
1604 	if (error) {
1605 		if (zp)
1606 			VN_RELE(ZTOV(zp));
1607 	} else {
1608 		*vpp = ZTOV(zp);
1609 		error = specvp_check(vpp, cr);
1610 	}
1611 
1612 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1613 		zil_commit(zilog, 0);
1614 
1615 	ZFS_EXIT(zfsvfs);
1616 	return (error);
1617 }
1618 
1619 /*
1620  * Remove an entry from a directory.
1621  *
1622  *	IN:	dvp	- vnode of directory to remove entry from.
1623  *		name	- name of entry to remove.
1624  *		cr	- credentials of caller.
1625  *		ct	- caller context
1626  *		flags	- case flags
1627  *
1628  *	RETURN:	0 on success, error code on failure.
1629  *
1630  * Timestamps:
1631  *	dvp - ctime|mtime
1632  *	 vp - ctime (if nlink > 0)
1633  */
1634 
1635 uint64_t null_xattr = 0;
1636 
1637 /*ARGSUSED*/
1638 static int
1639 zfs_remove(vnode_t *dvp, char *name, cred_t *cr, caller_context_t *ct,
1640     int flags)
1641 {
1642 	znode_t		*zp, *dzp = VTOZ(dvp);
1643 	znode_t		*xzp;
1644 	vnode_t		*vp;
1645 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1646 	zilog_t		*zilog;
1647 	uint64_t	acl_obj, xattr_obj;
1648 	uint64_t	xattr_obj_unlinked = 0;
1649 	uint64_t	obj = 0;
1650 	zfs_dirlock_t	*dl;
1651 	dmu_tx_t	*tx;
1652 	boolean_t	may_delete_now, delete_now = FALSE;
1653 	boolean_t	unlinked, toobig = FALSE;
1654 	uint64_t	txtype;
1655 	pathname_t	*realnmp = NULL;
1656 	pathname_t	realnm;
1657 	int		error;
1658 	int		zflg = ZEXISTS;
1659 	boolean_t	waited = B_FALSE;
1660 
1661 	ZFS_ENTER(zfsvfs);
1662 	ZFS_VERIFY_ZP(dzp);
1663 	zilog = zfsvfs->z_log;
1664 
1665 	if (flags & FIGNORECASE) {
1666 		zflg |= ZCILOOK;
1667 		pn_alloc(&realnm);
1668 		realnmp = &realnm;
1669 	}
1670 
1671 top:
1672 	xattr_obj = 0;
1673 	xzp = NULL;
1674 	/*
1675 	 * Attempt to lock directory; fail if entry doesn't exist.
1676 	 */
1677 	if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1678 	    NULL, realnmp)) {
1679 		if (realnmp)
1680 			pn_free(realnmp);
1681 		ZFS_EXIT(zfsvfs);
1682 		return (error);
1683 	}
1684 
1685 	vp = ZTOV(zp);
1686 
1687 	if (error = zfs_zaccess_delete(dzp, zp, cr)) {
1688 		goto out;
1689 	}
1690 
1691 	/*
1692 	 * Need to use rmdir for removing directories.
1693 	 */
1694 	if (vp->v_type == VDIR) {
1695 		error = SET_ERROR(EPERM);
1696 		goto out;
1697 	}
1698 
1699 	vnevent_remove(vp, dvp, name, ct);
1700 
1701 	if (realnmp)
1702 		dnlc_remove(dvp, realnmp->pn_buf);
1703 	else
1704 		dnlc_remove(dvp, name);
1705 
1706 	mutex_enter(&vp->v_lock);
1707 	may_delete_now = vp->v_count == 1 && !vn_has_cached_data(vp);
1708 	mutex_exit(&vp->v_lock);
1709 
1710 	/*
1711 	 * We may delete the znode now, or we may put it in the unlinked set;
1712 	 * it depends on whether we're the last link, and on whether there are
1713 	 * other holds on the vnode.  So we dmu_tx_hold() the right things to
1714 	 * allow for either case.
1715 	 */
1716 	obj = zp->z_id;
1717 	tx = dmu_tx_create(zfsvfs->z_os);
1718 	dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1719 	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1720 	zfs_sa_upgrade_txholds(tx, zp);
1721 	zfs_sa_upgrade_txholds(tx, dzp);
1722 	if (may_delete_now) {
1723 		toobig =
1724 		    zp->z_size > zp->z_blksz * DMU_MAX_DELETEBLKCNT;
1725 		/* if the file is too big, only hold_free a token amount */
1726 		dmu_tx_hold_free(tx, zp->z_id, 0,
1727 		    (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
1728 	}
1729 
1730 	/* are there any extended attributes? */
1731 	error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1732 	    &xattr_obj, sizeof (xattr_obj));
1733 	if (error == 0 && xattr_obj) {
1734 		error = zfs_zget(zfsvfs, xattr_obj, &xzp);
1735 		ASSERT0(error);
1736 		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1737 		dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1738 	}
1739 
1740 	mutex_enter(&zp->z_lock);
1741 	if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
1742 		dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
1743 	mutex_exit(&zp->z_lock);
1744 
1745 	/* charge as an update -- would be nice not to charge at all */
1746 	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1747 
1748 	/*
1749 	 * Mark this transaction as typically resulting in a net free of space
1750 	 */
1751 	dmu_tx_mark_netfree(tx);
1752 
1753 	error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
1754 	if (error) {
1755 		zfs_dirent_unlock(dl);
1756 		VN_RELE(vp);
1757 		if (xzp)
1758 			VN_RELE(ZTOV(xzp));
1759 		if (error == ERESTART) {
1760 			waited = B_TRUE;
1761 			dmu_tx_wait(tx);
1762 			dmu_tx_abort(tx);
1763 			goto top;
1764 		}
1765 		if (realnmp)
1766 			pn_free(realnmp);
1767 		dmu_tx_abort(tx);
1768 		ZFS_EXIT(zfsvfs);
1769 		return (error);
1770 	}
1771 
1772 	/*
1773 	 * Remove the directory entry.
1774 	 */
1775 	error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
1776 
1777 	if (error) {
1778 		dmu_tx_commit(tx);
1779 		goto out;
1780 	}
1781 
1782 	if (unlinked) {
1783 		/*
1784 		 * Hold z_lock so that we can make sure that the ACL obj
1785 		 * hasn't changed.  Could have been deleted due to
1786 		 * zfs_sa_upgrade().
1787 		 */
1788 		mutex_enter(&zp->z_lock);
1789 		mutex_enter(&vp->v_lock);
1790 		(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1791 		    &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
1792 		delete_now = may_delete_now && !toobig &&
1793 		    vp->v_count == 1 && !vn_has_cached_data(vp) &&
1794 		    xattr_obj == xattr_obj_unlinked && zfs_external_acl(zp) ==
1795 		    acl_obj;
1796 		mutex_exit(&vp->v_lock);
1797 	}
1798 
1799 	if (delete_now) {
1800 		if (xattr_obj_unlinked) {
1801 			ASSERT3U(xzp->z_links, ==, 2);
1802 			mutex_enter(&xzp->z_lock);
1803 			xzp->z_unlinked = 1;
1804 			xzp->z_links = 0;
1805 			error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
1806 			    &xzp->z_links, sizeof (xzp->z_links), tx);
1807 			ASSERT3U(error,  ==,  0);
1808 			mutex_exit(&xzp->z_lock);
1809 			zfs_unlinked_add(xzp, tx);
1810 
1811 			if (zp->z_is_sa)
1812 				error = sa_remove(zp->z_sa_hdl,
1813 				    SA_ZPL_XATTR(zfsvfs), tx);
1814 			else
1815 				error = sa_update(zp->z_sa_hdl,
1816 				    SA_ZPL_XATTR(zfsvfs), &null_xattr,
1817 				    sizeof (uint64_t), tx);
1818 			ASSERT0(error);
1819 		}
1820 		mutex_enter(&vp->v_lock);
1821 		vp->v_count--;
1822 		ASSERT0(vp->v_count);
1823 		mutex_exit(&vp->v_lock);
1824 		mutex_exit(&zp->z_lock);
1825 		zfs_znode_delete(zp, tx);
1826 	} else if (unlinked) {
1827 		mutex_exit(&zp->z_lock);
1828 		zfs_unlinked_add(zp, tx);
1829 	}
1830 
1831 	txtype = TX_REMOVE;
1832 	if (flags & FIGNORECASE)
1833 		txtype |= TX_CI;
1834 	zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
1835 
1836 	dmu_tx_commit(tx);
1837 out:
1838 	if (realnmp)
1839 		pn_free(realnmp);
1840 
1841 	zfs_dirent_unlock(dl);
1842 
1843 	if (!delete_now)
1844 		VN_RELE(vp);
1845 	if (xzp)
1846 		VN_RELE(ZTOV(xzp));
1847 
1848 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1849 		zil_commit(zilog, 0);
1850 
1851 	ZFS_EXIT(zfsvfs);
1852 	return (error);
1853 }
1854 
1855 /*
1856  * Create a new directory and insert it into dvp using the name
1857  * provided.  Return a pointer to the inserted directory.
1858  *
1859  *	IN:	dvp	- vnode of directory to add subdir to.
1860  *		dirname	- name of new directory.
1861  *		vap	- attributes of new directory.
1862  *		cr	- credentials of caller.
1863  *		ct	- caller context
1864  *		flags	- case flags
1865  *		vsecp	- ACL to be set
1866  *
1867  *	OUT:	vpp	- vnode of created directory.
1868  *
1869  *	RETURN:	0 on success, error code on failure.
1870  *
1871  * Timestamps:
1872  *	dvp - ctime|mtime updated
1873  *	 vp - ctime|mtime|atime updated
1874  */
1875 /*ARGSUSED*/
1876 static int
1877 zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr,
1878     caller_context_t *ct, int flags, vsecattr_t *vsecp)
1879 {
1880 	znode_t		*zp, *dzp = VTOZ(dvp);
1881 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1882 	zilog_t		*zilog;
1883 	zfs_dirlock_t	*dl;
1884 	uint64_t	txtype;
1885 	dmu_tx_t	*tx;
1886 	int		error;
1887 	int		zf = ZNEW;
1888 	ksid_t		*ksid;
1889 	uid_t		uid;
1890 	gid_t		gid = crgetgid(cr);
1891 	zfs_acl_ids_t   acl_ids;
1892 	boolean_t	fuid_dirtied;
1893 	boolean_t	waited = B_FALSE;
1894 
1895 	ASSERT(vap->va_type == VDIR);
1896 
1897 	/*
1898 	 * If we have an ephemeral id, ACL, or XVATTR then
1899 	 * make sure file system is at proper version
1900 	 */
1901 
1902 	ksid = crgetsid(cr, KSID_OWNER);
1903 	if (ksid)
1904 		uid = ksid_getid(ksid);
1905 	else
1906 		uid = crgetuid(cr);
1907 	if (zfsvfs->z_use_fuids == B_FALSE &&
1908 	    (vsecp || (vap->va_mask & AT_XVATTR) ||
1909 	    IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1910 		return (SET_ERROR(EINVAL));
1911 
1912 	ZFS_ENTER(zfsvfs);
1913 	ZFS_VERIFY_ZP(dzp);
1914 	zilog = zfsvfs->z_log;
1915 
1916 	if (dzp->z_pflags & ZFS_XATTR) {
1917 		ZFS_EXIT(zfsvfs);
1918 		return (SET_ERROR(EINVAL));
1919 	}
1920 
1921 	if (zfsvfs->z_utf8 && u8_validate(dirname,
1922 	    strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1923 		ZFS_EXIT(zfsvfs);
1924 		return (SET_ERROR(EILSEQ));
1925 	}
1926 	if (flags & FIGNORECASE)
1927 		zf |= ZCILOOK;
1928 
1929 	if (vap->va_mask & AT_XVATTR) {
1930 		if ((error = secpolicy_xvattr((xvattr_t *)vap,
1931 		    crgetuid(cr), cr, vap->va_type)) != 0) {
1932 			ZFS_EXIT(zfsvfs);
1933 			return (error);
1934 		}
1935 	}
1936 
1937 	if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
1938 	    vsecp, &acl_ids)) != 0) {
1939 		ZFS_EXIT(zfsvfs);
1940 		return (error);
1941 	}
1942 	/*
1943 	 * First make sure the new directory doesn't exist.
1944 	 *
1945 	 * Existence is checked first to make sure we don't return
1946 	 * EACCES instead of EEXIST which can cause some applications
1947 	 * to fail.
1948 	 */
1949 top:
1950 	*vpp = NULL;
1951 
1952 	if (error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
1953 	    NULL, NULL)) {
1954 		zfs_acl_ids_free(&acl_ids);
1955 		ZFS_EXIT(zfsvfs);
1956 		return (error);
1957 	}
1958 
1959 	if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr)) {
1960 		zfs_acl_ids_free(&acl_ids);
1961 		zfs_dirent_unlock(dl);
1962 		ZFS_EXIT(zfsvfs);
1963 		return (error);
1964 	}
1965 
1966 	if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1967 		zfs_acl_ids_free(&acl_ids);
1968 		zfs_dirent_unlock(dl);
1969 		ZFS_EXIT(zfsvfs);
1970 		return (SET_ERROR(EDQUOT));
1971 	}
1972 
1973 	/*
1974 	 * Add a new entry to the directory.
1975 	 */
1976 	tx = dmu_tx_create(zfsvfs->z_os);
1977 	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
1978 	dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1979 	fuid_dirtied = zfsvfs->z_fuid_dirty;
1980 	if (fuid_dirtied)
1981 		zfs_fuid_txhold(zfsvfs, tx);
1982 	if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1983 		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
1984 		    acl_ids.z_aclp->z_acl_bytes);
1985 	}
1986 
1987 	dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1988 	    ZFS_SA_BASE_ATTR_SIZE);
1989 
1990 	error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
1991 	if (error) {
1992 		zfs_dirent_unlock(dl);
1993 		if (error == ERESTART) {
1994 			waited = B_TRUE;
1995 			dmu_tx_wait(tx);
1996 			dmu_tx_abort(tx);
1997 			goto top;
1998 		}
1999 		zfs_acl_ids_free(&acl_ids);
2000 		dmu_tx_abort(tx);
2001 		ZFS_EXIT(zfsvfs);
2002 		return (error);
2003 	}
2004 
2005 	/*
2006 	 * Create new node.
2007 	 */
2008 	zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
2009 
2010 	if (fuid_dirtied)
2011 		zfs_fuid_sync(zfsvfs, tx);
2012 
2013 	/*
2014 	 * Now put new name in parent dir.
2015 	 */
2016 	(void) zfs_link_create(dl, zp, tx, ZNEW);
2017 
2018 	*vpp = ZTOV(zp);
2019 
2020 	txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
2021 	if (flags & FIGNORECASE)
2022 		txtype |= TX_CI;
2023 	zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
2024 	    acl_ids.z_fuidp, vap);
2025 
2026 	zfs_acl_ids_free(&acl_ids);
2027 
2028 	dmu_tx_commit(tx);
2029 
2030 	zfs_dirent_unlock(dl);
2031 
2032 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2033 		zil_commit(zilog, 0);
2034 
2035 	ZFS_EXIT(zfsvfs);
2036 	return (0);
2037 }
2038 
2039 /*
2040  * Remove a directory subdir entry.  If the current working
2041  * directory is the same as the subdir to be removed, the
2042  * remove will fail.
2043  *
2044  *	IN:	dvp	- vnode of directory to remove from.
2045  *		name	- name of directory to be removed.
2046  *		cwd	- vnode of current working directory.
2047  *		cr	- credentials of caller.
2048  *		ct	- caller context
2049  *		flags	- case flags
2050  *
2051  *	RETURN:	0 on success, error code on failure.
2052  *
2053  * Timestamps:
2054  *	dvp - ctime|mtime updated
2055  */
2056 /*ARGSUSED*/
2057 static int
2058 zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr,
2059     caller_context_t *ct, int flags)
2060 {
2061 	znode_t		*dzp = VTOZ(dvp);
2062 	znode_t		*zp;
2063 	vnode_t		*vp;
2064 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
2065 	zilog_t		*zilog;
2066 	zfs_dirlock_t	*dl;
2067 	dmu_tx_t	*tx;
2068 	int		error;
2069 	int		zflg = ZEXISTS;
2070 	boolean_t	waited = B_FALSE;
2071 
2072 	ZFS_ENTER(zfsvfs);
2073 	ZFS_VERIFY_ZP(dzp);
2074 	zilog = zfsvfs->z_log;
2075 
2076 	if (flags & FIGNORECASE)
2077 		zflg |= ZCILOOK;
2078 top:
2079 	zp = NULL;
2080 
2081 	/*
2082 	 * Attempt to lock directory; fail if entry doesn't exist.
2083 	 */
2084 	if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
2085 	    NULL, NULL)) {
2086 		ZFS_EXIT(zfsvfs);
2087 		return (error);
2088 	}
2089 
2090 	vp = ZTOV(zp);
2091 
2092 	if (error = zfs_zaccess_delete(dzp, zp, cr)) {
2093 		goto out;
2094 	}
2095 
2096 	if (vp->v_type != VDIR) {
2097 		error = SET_ERROR(ENOTDIR);
2098 		goto out;
2099 	}
2100 
2101 	if (vp == cwd) {
2102 		error = SET_ERROR(EINVAL);
2103 		goto out;
2104 	}
2105 
2106 	vnevent_rmdir(vp, dvp, name, ct);
2107 
2108 	/*
2109 	 * Grab a lock on the directory to make sure that noone is
2110 	 * trying to add (or lookup) entries while we are removing it.
2111 	 */
2112 	rw_enter(&zp->z_name_lock, RW_WRITER);
2113 
2114 	/*
2115 	 * Grab a lock on the parent pointer to make sure we play well
2116 	 * with the treewalk and directory rename code.
2117 	 */
2118 	rw_enter(&zp->z_parent_lock, RW_WRITER);
2119 
2120 	tx = dmu_tx_create(zfsvfs->z_os);
2121 	dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
2122 	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2123 	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
2124 	zfs_sa_upgrade_txholds(tx, zp);
2125 	zfs_sa_upgrade_txholds(tx, dzp);
2126 	error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
2127 	if (error) {
2128 		rw_exit(&zp->z_parent_lock);
2129 		rw_exit(&zp->z_name_lock);
2130 		zfs_dirent_unlock(dl);
2131 		VN_RELE(vp);
2132 		if (error == ERESTART) {
2133 			waited = B_TRUE;
2134 			dmu_tx_wait(tx);
2135 			dmu_tx_abort(tx);
2136 			goto top;
2137 		}
2138 		dmu_tx_abort(tx);
2139 		ZFS_EXIT(zfsvfs);
2140 		return (error);
2141 	}
2142 
2143 	error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
2144 
2145 	if (error == 0) {
2146 		uint64_t txtype = TX_RMDIR;
2147 		if (flags & FIGNORECASE)
2148 			txtype |= TX_CI;
2149 		zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
2150 	}
2151 
2152 	dmu_tx_commit(tx);
2153 
2154 	rw_exit(&zp->z_parent_lock);
2155 	rw_exit(&zp->z_name_lock);
2156 out:
2157 	zfs_dirent_unlock(dl);
2158 
2159 	VN_RELE(vp);
2160 
2161 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2162 		zil_commit(zilog, 0);
2163 
2164 	ZFS_EXIT(zfsvfs);
2165 	return (error);
2166 }
2167 
2168 /*
2169  * Read as many directory entries as will fit into the provided
2170  * buffer from the given directory cursor position (specified in
2171  * the uio structure).
2172  *
2173  *	IN:	vp	- vnode of directory to read.
2174  *		uio	- structure supplying read location, range info,
2175  *			  and return buffer.
2176  *		cr	- credentials of caller.
2177  *		ct	- caller context
2178  *		flags	- case flags
2179  *
2180  *	OUT:	uio	- updated offset and range, buffer filled.
2181  *		eofp	- set to true if end-of-file detected.
2182  *
2183  *	RETURN:	0 on success, error code on failure.
2184  *
2185  * Timestamps:
2186  *	vp - atime updated
2187  *
2188  * Note that the low 4 bits of the cookie returned by zap is always zero.
2189  * This allows us to use the low range for "special" directory entries:
2190  * We use 0 for '.', and 1 for '..'.  If this is the root of the filesystem,
2191  * we use the offset 2 for the '.zfs' directory.
2192  */
2193 /* ARGSUSED */
2194 static int
2195 zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
2196     caller_context_t *ct, int flags)
2197 {
2198 	znode_t		*zp = VTOZ(vp);
2199 	iovec_t		*iovp;
2200 	edirent_t	*eodp;
2201 	dirent64_t	*odp;
2202 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
2203 	objset_t	*os;
2204 	caddr_t		outbuf;
2205 	size_t		bufsize;
2206 	zap_cursor_t	zc;
2207 	zap_attribute_t	zap;
2208 	uint_t		bytes_wanted;
2209 	uint64_t	offset; /* must be unsigned; checks for < 1 */
2210 	uint64_t	parent;
2211 	int		local_eof;
2212 	int		outcount;
2213 	int		error;
2214 	uint8_t		prefetch;
2215 	boolean_t	check_sysattrs;
2216 
2217 	ZFS_ENTER(zfsvfs);
2218 	ZFS_VERIFY_ZP(zp);
2219 
2220 	if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
2221 	    &parent, sizeof (parent))) != 0) {
2222 		ZFS_EXIT(zfsvfs);
2223 		return (error);
2224 	}
2225 
2226 	/*
2227 	 * If we are not given an eof variable,
2228 	 * use a local one.
2229 	 */
2230 	if (eofp == NULL)
2231 		eofp = &local_eof;
2232 
2233 	/*
2234 	 * Check for valid iov_len.
2235 	 */
2236 	if (uio->uio_iov->iov_len <= 0) {
2237 		ZFS_EXIT(zfsvfs);
2238 		return (SET_ERROR(EINVAL));
2239 	}
2240 
2241 	/*
2242 	 * Quit if directory has been removed (posix)
2243 	 */
2244 	if ((*eofp = zp->z_unlinked) != 0) {
2245 		ZFS_EXIT(zfsvfs);
2246 		return (0);
2247 	}
2248 
2249 	error = 0;
2250 	os = zfsvfs->z_os;
2251 	offset = uio->uio_loffset;
2252 	prefetch = zp->z_zn_prefetch;
2253 
2254 	/*
2255 	 * Initialize the iterator cursor.
2256 	 */
2257 	if (offset <= 3) {
2258 		/*
2259 		 * Start iteration from the beginning of the directory.
2260 		 */
2261 		zap_cursor_init(&zc, os, zp->z_id);
2262 	} else {
2263 		/*
2264 		 * The offset is a serialized cursor.
2265 		 */
2266 		zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
2267 	}
2268 
2269 	/*
2270 	 * Get space to change directory entries into fs independent format.
2271 	 */
2272 	iovp = uio->uio_iov;
2273 	bytes_wanted = iovp->iov_len;
2274 	if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) {
2275 		bufsize = bytes_wanted;
2276 		outbuf = kmem_alloc(bufsize, KM_SLEEP);
2277 		odp = (struct dirent64 *)outbuf;
2278 	} else {
2279 		bufsize = bytes_wanted;
2280 		outbuf = NULL;
2281 		odp = (struct dirent64 *)iovp->iov_base;
2282 	}
2283 	eodp = (struct edirent *)odp;
2284 
2285 	/*
2286 	 * If this VFS supports the system attribute view interface; and
2287 	 * we're looking at an extended attribute directory; and we care
2288 	 * about normalization conflicts on this vfs; then we must check
2289 	 * for normalization conflicts with the sysattr name space.
2290 	 */
2291 	check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
2292 	    (vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm &&
2293 	    (flags & V_RDDIR_ENTFLAGS);
2294 
2295 	/*
2296 	 * Transform to file-system independent format
2297 	 */
2298 	outcount = 0;
2299 	while (outcount < bytes_wanted) {
2300 		ino64_t objnum;
2301 		ushort_t reclen;
2302 		off64_t *next = NULL;
2303 
2304 		/*
2305 		 * Special case `.', `..', and `.zfs'.
2306 		 */
2307 		if (offset == 0) {
2308 			(void) strcpy(zap.za_name, ".");
2309 			zap.za_normalization_conflict = 0;
2310 			objnum = zp->z_id;
2311 		} else if (offset == 1) {
2312 			(void) strcpy(zap.za_name, "..");
2313 			zap.za_normalization_conflict = 0;
2314 			objnum = parent;
2315 		} else if (offset == 2 && zfs_show_ctldir(zp)) {
2316 			(void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
2317 			zap.za_normalization_conflict = 0;
2318 			objnum = ZFSCTL_INO_ROOT;
2319 		} else {
2320 			/*
2321 			 * Grab next entry.
2322 			 */
2323 			if (error = zap_cursor_retrieve(&zc, &zap)) {
2324 				if ((*eofp = (error == ENOENT)) != 0)
2325 					break;
2326 				else
2327 					goto update;
2328 			}
2329 
2330 			if (zap.za_integer_length != 8 ||
2331 			    zap.za_num_integers != 1) {
2332 				cmn_err(CE_WARN, "zap_readdir: bad directory "
2333 				    "entry, obj = %lld, offset = %lld\n",
2334 				    (u_longlong_t)zp->z_id,
2335 				    (u_longlong_t)offset);
2336 				error = SET_ERROR(ENXIO);
2337 				goto update;
2338 			}
2339 
2340 			objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
2341 			/*
2342 			 * MacOS X can extract the object type here such as:
2343 			 * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2344 			 */
2345 
2346 			if (check_sysattrs && !zap.za_normalization_conflict) {
2347 				zap.za_normalization_conflict =
2348 				    xattr_sysattr_casechk(zap.za_name);
2349 			}
2350 		}
2351 
2352 		if (flags & V_RDDIR_ACCFILTER) {
2353 			/*
2354 			 * If we have no access at all, don't include
2355 			 * this entry in the returned information
2356 			 */
2357 			znode_t	*ezp;
2358 			if (zfs_zget(zp->z_zfsvfs, objnum, &ezp) != 0)
2359 				goto skip_entry;
2360 			if (!zfs_has_access(ezp, cr)) {
2361 				VN_RELE(ZTOV(ezp));
2362 				goto skip_entry;
2363 			}
2364 			VN_RELE(ZTOV(ezp));
2365 		}
2366 
2367 		if (flags & V_RDDIR_ENTFLAGS)
2368 			reclen = EDIRENT_RECLEN(strlen(zap.za_name));
2369 		else
2370 			reclen = DIRENT64_RECLEN(strlen(zap.za_name));
2371 
2372 		/*
2373 		 * Will this entry fit in the buffer?
2374 		 */
2375 		if (outcount + reclen > bufsize) {
2376 			/*
2377 			 * Did we manage to fit anything in the buffer?
2378 			 */
2379 			if (!outcount) {
2380 				error = SET_ERROR(EINVAL);
2381 				goto update;
2382 			}
2383 			break;
2384 		}
2385 		if (flags & V_RDDIR_ENTFLAGS) {
2386 			/*
2387 			 * Add extended flag entry:
2388 			 */
2389 			eodp->ed_ino = objnum;
2390 			eodp->ed_reclen = reclen;
2391 			/* NOTE: ed_off is the offset for the *next* entry */
2392 			next = &(eodp->ed_off);
2393 			eodp->ed_eflags = zap.za_normalization_conflict ?
2394 			    ED_CASE_CONFLICT : 0;
2395 			(void) strncpy(eodp->ed_name, zap.za_name,
2396 			    EDIRENT_NAMELEN(reclen));
2397 			eodp = (edirent_t *)((intptr_t)eodp + reclen);
2398 		} else {
2399 			/*
2400 			 * Add normal entry:
2401 			 */
2402 			odp->d_ino = objnum;
2403 			odp->d_reclen = reclen;
2404 			/* NOTE: d_off is the offset for the *next* entry */
2405 			next = &(odp->d_off);
2406 			(void) strncpy(odp->d_name, zap.za_name,
2407 			    DIRENT64_NAMELEN(reclen));
2408 			odp = (dirent64_t *)((intptr_t)odp + reclen);
2409 		}
2410 		outcount += reclen;
2411 
2412 		ASSERT(outcount <= bufsize);
2413 
2414 		/* Prefetch znode */
2415 		if (prefetch)
2416 			dmu_prefetch(os, objnum, 0, 0, 0,
2417 			    ZIO_PRIORITY_SYNC_READ);
2418 
2419 	skip_entry:
2420 		/*
2421 		 * Move to the next entry, fill in the previous offset.
2422 		 */
2423 		if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
2424 			zap_cursor_advance(&zc);
2425 			offset = zap_cursor_serialize(&zc);
2426 		} else {
2427 			offset += 1;
2428 		}
2429 		if (next)
2430 			*next = offset;
2431 	}
2432 	zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
2433 
2434 	if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) {
2435 		iovp->iov_base += outcount;
2436 		iovp->iov_len -= outcount;
2437 		uio->uio_resid -= outcount;
2438 	} else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) {
2439 		/*
2440 		 * Reset the pointer.
2441 		 */
2442 		offset = uio->uio_loffset;
2443 	}
2444 
2445 update:
2446 	zap_cursor_fini(&zc);
2447 	if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
2448 		kmem_free(outbuf, bufsize);
2449 
2450 	if (error == ENOENT)
2451 		error = 0;
2452 
2453 	ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
2454 
2455 	uio->uio_loffset = offset;
2456 	ZFS_EXIT(zfsvfs);
2457 	return (error);
2458 }
2459 
2460 ulong_t zfs_fsync_sync_cnt = 4;
2461 
2462 static int
2463 zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
2464 {
2465 	znode_t	*zp = VTOZ(vp);
2466 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2467 
2468 	/*
2469 	 * Regardless of whether this is required for standards conformance,
2470 	 * this is the logical behavior when fsync() is called on a file with
2471 	 * dirty pages.  We use B_ASYNC since the ZIL transactions are already
2472 	 * going to be pushed out as part of the zil_commit().
2473 	 */
2474 	if (vn_has_cached_data(vp) && !(syncflag & FNODSYNC) &&
2475 	    (vp->v_type == VREG) && !(IS_SWAPVP(vp)))
2476 		(void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, B_ASYNC, cr, ct);
2477 
2478 	(void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2479 
2480 	if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
2481 		ZFS_ENTER(zfsvfs);
2482 		ZFS_VERIFY_ZP(zp);
2483 		zil_commit(zfsvfs->z_log, zp->z_id);
2484 		ZFS_EXIT(zfsvfs);
2485 	}
2486 	return (0);
2487 }
2488 
2489 
2490 /*
2491  * Get the requested file attributes and place them in the provided
2492  * vattr structure.
2493  *
2494  *	IN:	vp	- vnode of file.
2495  *		vap	- va_mask identifies requested attributes.
2496  *			  If AT_XVATTR set, then optional attrs are requested
2497  *		flags	- ATTR_NOACLCHECK (CIFS server context)
2498  *		cr	- credentials of caller.
2499  *		ct	- caller context
2500  *
2501  *	OUT:	vap	- attribute values.
2502  *
2503  *	RETURN:	0 (always succeeds).
2504  */
2505 /* ARGSUSED */
2506 static int
2507 zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2508     caller_context_t *ct)
2509 {
2510 	znode_t *zp = VTOZ(vp);
2511 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2512 	int	error = 0;
2513 	uint64_t links;
2514 	uint64_t mtime[2], ctime[2];
2515 	xvattr_t *xvap = (xvattr_t *)vap;	/* vap may be an xvattr_t * */
2516 	xoptattr_t *xoap = NULL;
2517 	boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2518 	sa_bulk_attr_t bulk[2];
2519 	int count = 0;
2520 
2521 	ZFS_ENTER(zfsvfs);
2522 	ZFS_VERIFY_ZP(zp);
2523 
2524 	zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
2525 
2526 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
2527 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
2528 
2529 	if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
2530 		ZFS_EXIT(zfsvfs);
2531 		return (error);
2532 	}
2533 
2534 	/*
2535 	 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2536 	 * Also, if we are the owner don't bother, since owner should
2537 	 * always be allowed to read basic attributes of file.
2538 	 */
2539 	if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
2540 	    (vap->va_uid != crgetuid(cr))) {
2541 		if (error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2542 		    skipaclchk, cr)) {
2543 			ZFS_EXIT(zfsvfs);
2544 			return (error);
2545 		}
2546 	}
2547 
2548 	/*
2549 	 * Return all attributes.  It's cheaper to provide the answer
2550 	 * than to determine whether we were asked the question.
2551 	 */
2552 
2553 	mutex_enter(&zp->z_lock);
2554 	vap->va_type = vp->v_type;
2555 	vap->va_mode = zp->z_mode & MODEMASK;
2556 	vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev;
2557 	vap->va_nodeid = zp->z_id;
2558 	if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp))
2559 		links = zp->z_links + 1;
2560 	else
2561 		links = zp->z_links;
2562 	vap->va_nlink = MIN(links, UINT32_MAX);	/* nlink_t limit! */
2563 	vap->va_size = zp->z_size;
2564 	vap->va_rdev = vp->v_rdev;
2565 	vap->va_seq = zp->z_seq;
2566 
2567 	/*
2568 	 * Add in any requested optional attributes and the create time.
2569 	 * Also set the corresponding bits in the returned attribute bitmap.
2570 	 */
2571 	if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
2572 		if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2573 			xoap->xoa_archive =
2574 			    ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2575 			XVA_SET_RTN(xvap, XAT_ARCHIVE);
2576 		}
2577 
2578 		if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2579 			xoap->xoa_readonly =
2580 			    ((zp->z_pflags & ZFS_READONLY) != 0);
2581 			XVA_SET_RTN(xvap, XAT_READONLY);
2582 		}
2583 
2584 		if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2585 			xoap->xoa_system =
2586 			    ((zp->z_pflags & ZFS_SYSTEM) != 0);
2587 			XVA_SET_RTN(xvap, XAT_SYSTEM);
2588 		}
2589 
2590 		if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2591 			xoap->xoa_hidden =
2592 			    ((zp->z_pflags & ZFS_HIDDEN) != 0);
2593 			XVA_SET_RTN(xvap, XAT_HIDDEN);
2594 		}
2595 
2596 		if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2597 			xoap->xoa_nounlink =
2598 			    ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2599 			XVA_SET_RTN(xvap, XAT_NOUNLINK);
2600 		}
2601 
2602 		if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2603 			xoap->xoa_immutable =
2604 			    ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2605 			XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2606 		}
2607 
2608 		if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2609 			xoap->xoa_appendonly =
2610 			    ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2611 			XVA_SET_RTN(xvap, XAT_APPENDONLY);
2612 		}
2613 
2614 		if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2615 			xoap->xoa_nodump =
2616 			    ((zp->z_pflags & ZFS_NODUMP) != 0);
2617 			XVA_SET_RTN(xvap, XAT_NODUMP);
2618 		}
2619 
2620 		if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2621 			xoap->xoa_opaque =
2622 			    ((zp->z_pflags & ZFS_OPAQUE) != 0);
2623 			XVA_SET_RTN(xvap, XAT_OPAQUE);
2624 		}
2625 
2626 		if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2627 			xoap->xoa_av_quarantined =
2628 			    ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2629 			XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2630 		}
2631 
2632 		if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2633 			xoap->xoa_av_modified =
2634 			    ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2635 			XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2636 		}
2637 
2638 		if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2639 		    vp->v_type == VREG) {
2640 			zfs_sa_get_scanstamp(zp, xvap);
2641 		}
2642 
2643 		if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
2644 			uint64_t times[2];
2645 
2646 			(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
2647 			    times, sizeof (times));
2648 			ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
2649 			XVA_SET_RTN(xvap, XAT_CREATETIME);
2650 		}
2651 
2652 		if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2653 			xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2654 			XVA_SET_RTN(xvap, XAT_REPARSE);
2655 		}
2656 		if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2657 			xoap->xoa_generation = zp->z_gen;
2658 			XVA_SET_RTN(xvap, XAT_GEN);
2659 		}
2660 
2661 		if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2662 			xoap->xoa_offline =
2663 			    ((zp->z_pflags & ZFS_OFFLINE) != 0);
2664 			XVA_SET_RTN(xvap, XAT_OFFLINE);
2665 		}
2666 
2667 		if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2668 			xoap->xoa_sparse =
2669 			    ((zp->z_pflags & ZFS_SPARSE) != 0);
2670 			XVA_SET_RTN(xvap, XAT_SPARSE);
2671 		}
2672 	}
2673 
2674 	ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
2675 	ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2676 	ZFS_TIME_DECODE(&vap->va_ctime, ctime);
2677 
2678 	mutex_exit(&zp->z_lock);
2679 
2680 	sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks);
2681 
2682 	if (zp->z_blksz == 0) {
2683 		/*
2684 		 * Block size hasn't been set; suggest maximal I/O transfers.
2685 		 */
2686 		vap->va_blksize = zfsvfs->z_max_blksz;
2687 	}
2688 
2689 	ZFS_EXIT(zfsvfs);
2690 	return (0);
2691 }
2692 
2693 /*
2694  * Set the file attributes to the values contained in the
2695  * vattr structure.
2696  *
2697  *	IN:	vp	- vnode of file to be modified.
2698  *		vap	- new attribute values.
2699  *			  If AT_XVATTR set, then optional attrs are being set
2700  *		flags	- ATTR_UTIME set if non-default time values provided.
2701  *			- ATTR_NOACLCHECK (CIFS context only).
2702  *		cr	- credentials of caller.
2703  *		ct	- caller context
2704  *
2705  *	RETURN:	0 on success, error code on failure.
2706  *
2707  * Timestamps:
2708  *	vp - ctime updated, mtime updated if size changed.
2709  */
2710 /* ARGSUSED */
2711 static int
2712 zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2713     caller_context_t *ct)
2714 {
2715 	znode_t		*zp = VTOZ(vp);
2716 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
2717 	zilog_t		*zilog;
2718 	dmu_tx_t	*tx;
2719 	vattr_t		oldva;
2720 	xvattr_t	tmpxvattr;
2721 	uint_t		mask = vap->va_mask;
2722 	uint_t		saved_mask = 0;
2723 	int		trim_mask = 0;
2724 	uint64_t	new_mode;
2725 	uint64_t	new_uid, new_gid;
2726 	uint64_t	xattr_obj;
2727 	uint64_t	mtime[2], ctime[2];
2728 	znode_t		*attrzp;
2729 	int		need_policy = FALSE;
2730 	int		err, err2;
2731 	zfs_fuid_info_t *fuidp = NULL;
2732 	xvattr_t *xvap = (xvattr_t *)vap;	/* vap may be an xvattr_t * */
2733 	xoptattr_t	*xoap;
2734 	zfs_acl_t	*aclp;
2735 	boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2736 	boolean_t	fuid_dirtied = B_FALSE;
2737 	sa_bulk_attr_t	bulk[7], xattr_bulk[7];
2738 	int		count = 0, xattr_count = 0;
2739 
2740 	if (mask == 0)
2741 		return (0);
2742 
2743 	if (mask & AT_NOSET)
2744 		return (SET_ERROR(EINVAL));
2745 
2746 	ZFS_ENTER(zfsvfs);
2747 	ZFS_VERIFY_ZP(zp);
2748 
2749 	zilog = zfsvfs->z_log;
2750 
2751 	/*
2752 	 * Make sure that if we have ephemeral uid/gid or xvattr specified
2753 	 * that file system is at proper version level
2754 	 */
2755 
2756 	if (zfsvfs->z_use_fuids == B_FALSE &&
2757 	    (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2758 	    ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2759 	    (mask & AT_XVATTR))) {
2760 		ZFS_EXIT(zfsvfs);
2761 		return (SET_ERROR(EINVAL));
2762 	}
2763 
2764 	if (mask & AT_SIZE && vp->v_type == VDIR) {
2765 		ZFS_EXIT(zfsvfs);
2766 		return (SET_ERROR(EISDIR));
2767 	}
2768 
2769 	if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) {
2770 		ZFS_EXIT(zfsvfs);
2771 		return (SET_ERROR(EINVAL));
2772 	}
2773 
2774 	/*
2775 	 * If this is an xvattr_t, then get a pointer to the structure of
2776 	 * optional attributes.  If this is NULL, then we have a vattr_t.
2777 	 */
2778 	xoap = xva_getxoptattr(xvap);
2779 
2780 	xva_init(&tmpxvattr);
2781 
2782 	/*
2783 	 * Immutable files can only alter immutable bit and atime
2784 	 */
2785 	if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2786 	    ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) ||
2787 	    ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2788 		ZFS_EXIT(zfsvfs);
2789 		return (SET_ERROR(EPERM));
2790 	}
2791 
2792 	if ((mask & AT_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
2793 		ZFS_EXIT(zfsvfs);
2794 		return (SET_ERROR(EPERM));
2795 	}
2796 
2797 	/*
2798 	 * Verify timestamps doesn't overflow 32 bits.
2799 	 * ZFS can handle large timestamps, but 32bit syscalls can't
2800 	 * handle times greater than 2039.  This check should be removed
2801 	 * once large timestamps are fully supported.
2802 	 */
2803 	if (mask & (AT_ATIME | AT_MTIME)) {
2804 		if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2805 		    ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2806 			ZFS_EXIT(zfsvfs);
2807 			return (SET_ERROR(EOVERFLOW));
2808 		}
2809 	}
2810 
2811 top:
2812 	attrzp = NULL;
2813 	aclp = NULL;
2814 
2815 	/* Can this be moved to before the top label? */
2816 	if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
2817 		ZFS_EXIT(zfsvfs);
2818 		return (SET_ERROR(EROFS));
2819 	}
2820 
2821 	/*
2822 	 * First validate permissions
2823 	 */
2824 
2825 	if (mask & AT_SIZE) {
2826 		err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
2827 		if (err) {
2828 			ZFS_EXIT(zfsvfs);
2829 			return (err);
2830 		}
2831 		/*
2832 		 * XXX - Note, we are not providing any open
2833 		 * mode flags here (like FNDELAY), so we may
2834 		 * block if there are locks present... this
2835 		 * should be addressed in openat().
2836 		 */
2837 		/* XXX - would it be OK to generate a log record here? */
2838 		err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2839 		if (err) {
2840 			ZFS_EXIT(zfsvfs);
2841 			return (err);
2842 		}
2843 
2844 		if (vap->va_size == 0)
2845 			vnevent_truncate(ZTOV(zp), ct);
2846 	}
2847 
2848 	if (mask & (AT_ATIME|AT_MTIME) ||
2849 	    ((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
2850 	    XVA_ISSET_REQ(xvap, XAT_READONLY) ||
2851 	    XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
2852 	    XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
2853 	    XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
2854 	    XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
2855 	    XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
2856 		need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
2857 		    skipaclchk, cr);
2858 	}
2859 
2860 	if (mask & (AT_UID|AT_GID)) {
2861 		int	idmask = (mask & (AT_UID|AT_GID));
2862 		int	take_owner;
2863 		int	take_group;
2864 
2865 		/*
2866 		 * NOTE: even if a new mode is being set,
2867 		 * we may clear S_ISUID/S_ISGID bits.
2868 		 */
2869 
2870 		if (!(mask & AT_MODE))
2871 			vap->va_mode = zp->z_mode;
2872 
2873 		/*
2874 		 * Take ownership or chgrp to group we are a member of
2875 		 */
2876 
2877 		take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr));
2878 		take_group = (mask & AT_GID) &&
2879 		    zfs_groupmember(zfsvfs, vap->va_gid, cr);
2880 
2881 		/*
2882 		 * If both AT_UID and AT_GID are set then take_owner and
2883 		 * take_group must both be set in order to allow taking
2884 		 * ownership.
2885 		 *
2886 		 * Otherwise, send the check through secpolicy_vnode_setattr()
2887 		 *
2888 		 */
2889 
2890 		if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) ||
2891 		    ((idmask == AT_UID) && take_owner) ||
2892 		    ((idmask == AT_GID) && take_group)) {
2893 			if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
2894 			    skipaclchk, cr) == 0) {
2895 				/*
2896 				 * Remove setuid/setgid for non-privileged users
2897 				 */
2898 				secpolicy_setid_clear(vap, cr);
2899 				trim_mask = (mask & (AT_UID|AT_GID));
2900 			} else {
2901 				need_policy =  TRUE;
2902 			}
2903 		} else {
2904 			need_policy =  TRUE;
2905 		}
2906 	}
2907 
2908 	mutex_enter(&zp->z_lock);
2909 	oldva.va_mode = zp->z_mode;
2910 	zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
2911 	if (mask & AT_XVATTR) {
2912 		/*
2913 		 * Update xvattr mask to include only those attributes
2914 		 * that are actually changing.
2915 		 *
2916 		 * the bits will be restored prior to actually setting
2917 		 * the attributes so the caller thinks they were set.
2918 		 */
2919 		if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2920 			if (xoap->xoa_appendonly !=
2921 			    ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
2922 				need_policy = TRUE;
2923 			} else {
2924 				XVA_CLR_REQ(xvap, XAT_APPENDONLY);
2925 				XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY);
2926 			}
2927 		}
2928 
2929 		if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2930 			if (xoap->xoa_nounlink !=
2931 			    ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
2932 				need_policy = TRUE;
2933 			} else {
2934 				XVA_CLR_REQ(xvap, XAT_NOUNLINK);
2935 				XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK);
2936 			}
2937 		}
2938 
2939 		if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2940 			if (xoap->xoa_immutable !=
2941 			    ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
2942 				need_policy = TRUE;
2943 			} else {
2944 				XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
2945 				XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE);
2946 			}
2947 		}
2948 
2949 		if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2950 			if (xoap->xoa_nodump !=
2951 			    ((zp->z_pflags & ZFS_NODUMP) != 0)) {
2952 				need_policy = TRUE;
2953 			} else {
2954 				XVA_CLR_REQ(xvap, XAT_NODUMP);
2955 				XVA_SET_REQ(&tmpxvattr, XAT_NODUMP);
2956 			}
2957 		}
2958 
2959 		if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2960 			if (xoap->xoa_av_modified !=
2961 			    ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
2962 				need_policy = TRUE;
2963 			} else {
2964 				XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
2965 				XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED);
2966 			}
2967 		}
2968 
2969 		if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2970 			if ((vp->v_type != VREG &&
2971 			    xoap->xoa_av_quarantined) ||
2972 			    xoap->xoa_av_quarantined !=
2973 			    ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
2974 				need_policy = TRUE;
2975 			} else {
2976 				XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
2977 				XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED);
2978 			}
2979 		}
2980 
2981 		if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2982 			mutex_exit(&zp->z_lock);
2983 			ZFS_EXIT(zfsvfs);
2984 			return (SET_ERROR(EPERM));
2985 		}
2986 
2987 		if (need_policy == FALSE &&
2988 		    (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
2989 		    XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
2990 			need_policy = TRUE;
2991 		}
2992 	}
2993 
2994 	mutex_exit(&zp->z_lock);
2995 
2996 	if (mask & AT_MODE) {
2997 		if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
2998 			err = secpolicy_setid_setsticky_clear(vp, vap,
2999 			    &oldva, cr);
3000 			if (err) {
3001 				ZFS_EXIT(zfsvfs);
3002 				return (err);
3003 			}
3004 			trim_mask |= AT_MODE;
3005 		} else {
3006 			need_policy = TRUE;
3007 		}
3008 	}
3009 
3010 	if (need_policy) {
3011 		/*
3012 		 * If trim_mask is set then take ownership
3013 		 * has been granted or write_acl is present and user
3014 		 * has the ability to modify mode.  In that case remove
3015 		 * UID|GID and or MODE from mask so that
3016 		 * secpolicy_vnode_setattr() doesn't revoke it.
3017 		 */
3018 
3019 		if (trim_mask) {
3020 			saved_mask = vap->va_mask;
3021 			vap->va_mask &= ~trim_mask;
3022 		}
3023 		err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
3024 		    (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
3025 		if (err) {
3026 			ZFS_EXIT(zfsvfs);
3027 			return (err);
3028 		}
3029 
3030 		if (trim_mask)
3031 			vap->va_mask |= saved_mask;
3032 	}
3033 
3034 	/*
3035 	 * secpolicy_vnode_setattr, or take ownership may have
3036 	 * changed va_mask
3037 	 */
3038 	mask = vap->va_mask;
3039 
3040 	if ((mask & (AT_UID | AT_GID))) {
3041 		err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
3042 		    &xattr_obj, sizeof (xattr_obj));
3043 
3044 		if (err == 0 && xattr_obj) {
3045 			err = zfs_zget(zp->z_zfsvfs, xattr_obj, &attrzp);
3046 			if (err)
3047 				goto out2;
3048 		}
3049 		if (mask & AT_UID) {
3050 			new_uid = zfs_fuid_create(zfsvfs,
3051 			    (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
3052 			if (new_uid != zp->z_uid &&
3053 			    zfs_fuid_overquota(zfsvfs, B_FALSE, new_uid)) {
3054 				if (attrzp)
3055 					VN_RELE(ZTOV(attrzp));
3056 				err = SET_ERROR(EDQUOT);
3057 				goto out2;
3058 			}
3059 		}
3060 
3061 		if (mask & AT_GID) {
3062 			new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
3063 			    cr, ZFS_GROUP, &fuidp);
3064 			if (new_gid != zp->z_gid &&
3065 			    zfs_fuid_overquota(zfsvfs, B_TRUE, new_gid)) {
3066 				if (attrzp)
3067 					VN_RELE(ZTOV(attrzp));
3068 				err = SET_ERROR(EDQUOT);
3069 				goto out2;
3070 			}
3071 		}
3072 	}
3073 	tx = dmu_tx_create(zfsvfs->z_os);
3074 
3075 	if (mask & AT_MODE) {
3076 		uint64_t pmode = zp->z_mode;
3077 		uint64_t acl_obj;
3078 		new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
3079 
3080 		if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
3081 		    !(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
3082 			err = SET_ERROR(EPERM);
3083 			goto out;
3084 		}
3085 
3086 		if (err = zfs_acl_chmod_setattr(zp, &aclp, new_mode))
3087 			goto out;
3088 
3089 		mutex_enter(&zp->z_lock);
3090 		if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
3091 			/*
3092 			 * Are we upgrading ACL from old V0 format
3093 			 * to V1 format?
3094 			 */
3095 			if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
3096 			    zfs_znode_acl_version(zp) ==
3097 			    ZFS_ACL_VERSION_INITIAL) {
3098 				dmu_tx_hold_free(tx, acl_obj, 0,
3099 				    DMU_OBJECT_END);
3100 				dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3101 				    0, aclp->z_acl_bytes);
3102 			} else {
3103 				dmu_tx_hold_write(tx, acl_obj, 0,
3104 				    aclp->z_acl_bytes);
3105 			}
3106 		} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3107 			dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3108 			    0, aclp->z_acl_bytes);
3109 		}
3110 		mutex_exit(&zp->z_lock);
3111 		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3112 	} else {
3113 		if ((mask & AT_XVATTR) &&
3114 		    XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3115 			dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3116 		else
3117 			dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3118 	}
3119 
3120 	if (attrzp) {
3121 		dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
3122 	}
3123 
3124 	fuid_dirtied = zfsvfs->z_fuid_dirty;
3125 	if (fuid_dirtied)
3126 		zfs_fuid_txhold(zfsvfs, tx);
3127 
3128 	zfs_sa_upgrade_txholds(tx, zp);
3129 
3130 	err = dmu_tx_assign(tx, TXG_WAIT);
3131 	if (err)
3132 		goto out;
3133 
3134 	count = 0;
3135 	/*
3136 	 * Set each attribute requested.
3137 	 * We group settings according to the locks they need to acquire.
3138 	 *
3139 	 * Note: you cannot set ctime directly, although it will be
3140 	 * updated as a side-effect of calling this function.
3141 	 */
3142 
3143 
3144 	if (mask & (AT_UID|AT_GID|AT_MODE))
3145 		mutex_enter(&zp->z_acl_lock);
3146 	mutex_enter(&zp->z_lock);
3147 
3148 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
3149 	    &zp->z_pflags, sizeof (zp->z_pflags));
3150 
3151 	if (attrzp) {
3152 		if (mask & (AT_UID|AT_GID|AT_MODE))
3153 			mutex_enter(&attrzp->z_acl_lock);
3154 		mutex_enter(&attrzp->z_lock);
3155 		SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3156 		    SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
3157 		    sizeof (attrzp->z_pflags));
3158 	}
3159 
3160 	if (mask & (AT_UID|AT_GID)) {
3161 
3162 		if (mask & AT_UID) {
3163 			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
3164 			    &new_uid, sizeof (new_uid));
3165 			zp->z_uid = new_uid;
3166 			if (attrzp) {
3167 				SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3168 				    SA_ZPL_UID(zfsvfs), NULL, &new_uid,
3169 				    sizeof (new_uid));
3170 				attrzp->z_uid = new_uid;
3171 			}
3172 		}
3173 
3174 		if (mask & AT_GID) {
3175 			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
3176 			    NULL, &new_gid, sizeof (new_gid));
3177 			zp->z_gid = new_gid;
3178 			if (attrzp) {
3179 				SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3180 				    SA_ZPL_GID(zfsvfs), NULL, &new_gid,
3181 				    sizeof (new_gid));
3182 				attrzp->z_gid = new_gid;
3183 			}
3184 		}
3185 		if (!(mask & AT_MODE)) {
3186 			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
3187 			    NULL, &new_mode, sizeof (new_mode));
3188 			new_mode = zp->z_mode;
3189 		}
3190 		err = zfs_acl_chown_setattr(zp);
3191 		ASSERT(err == 0);
3192 		if (attrzp) {
3193 			err = zfs_acl_chown_setattr(attrzp);
3194 			ASSERT(err == 0);
3195 		}
3196 	}
3197 
3198 	if (mask & AT_MODE) {
3199 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
3200 		    &new_mode, sizeof (new_mode));
3201 		zp->z_mode = new_mode;
3202 		ASSERT3U((uintptr_t)aclp, !=, NULL);
3203 		err = zfs_aclset_common(zp, aclp, cr, tx);
3204 		ASSERT0(err);
3205 		if (zp->z_acl_cached)
3206 			zfs_acl_free(zp->z_acl_cached);
3207 		zp->z_acl_cached = aclp;
3208 		aclp = NULL;
3209 	}
3210 
3211 
3212 	if (mask & AT_ATIME) {
3213 		ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
3214 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
3215 		    &zp->z_atime, sizeof (zp->z_atime));
3216 	}
3217 
3218 	if (mask & AT_MTIME) {
3219 		ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
3220 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
3221 		    mtime, sizeof (mtime));
3222 	}
3223 
3224 	/* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
3225 	if (mask & AT_SIZE && !(mask & AT_MTIME)) {
3226 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
3227 		    NULL, mtime, sizeof (mtime));
3228 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3229 		    &ctime, sizeof (ctime));
3230 		zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
3231 		    B_TRUE);
3232 	} else if (mask != 0) {
3233 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3234 		    &ctime, sizeof (ctime));
3235 		zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime,
3236 		    B_TRUE);
3237 		if (attrzp) {
3238 			SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3239 			    SA_ZPL_CTIME(zfsvfs), NULL,
3240 			    &ctime, sizeof (ctime));
3241 			zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
3242 			    mtime, ctime, B_TRUE);
3243 		}
3244 	}
3245 	/*
3246 	 * Do this after setting timestamps to prevent timestamp
3247 	 * update from toggling bit
3248 	 */
3249 
3250 	if (xoap && (mask & AT_XVATTR)) {
3251 
3252 		/*
3253 		 * restore trimmed off masks
3254 		 * so that return masks can be set for caller.
3255 		 */
3256 
3257 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) {
3258 			XVA_SET_REQ(xvap, XAT_APPENDONLY);
3259 		}
3260 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) {
3261 			XVA_SET_REQ(xvap, XAT_NOUNLINK);
3262 		}
3263 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) {
3264 			XVA_SET_REQ(xvap, XAT_IMMUTABLE);
3265 		}
3266 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) {
3267 			XVA_SET_REQ(xvap, XAT_NODUMP);
3268 		}
3269 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) {
3270 			XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
3271 		}
3272 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) {
3273 			XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
3274 		}
3275 
3276 		if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3277 			ASSERT(vp->v_type == VREG);
3278 
3279 		zfs_xvattr_set(zp, xvap, tx);
3280 	}
3281 
3282 	if (fuid_dirtied)
3283 		zfs_fuid_sync(zfsvfs, tx);
3284 
3285 	if (mask != 0)
3286 		zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
3287 
3288 	mutex_exit(&zp->z_lock);
3289 	if (mask & (AT_UID|AT_GID|AT_MODE))
3290 		mutex_exit(&zp->z_acl_lock);
3291 
3292 	if (attrzp) {
3293 		if (mask & (AT_UID|AT_GID|AT_MODE))
3294 			mutex_exit(&attrzp->z_acl_lock);
3295 		mutex_exit(&attrzp->z_lock);
3296 	}
3297 out:
3298 	if (err == 0 && attrzp) {
3299 		err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
3300 		    xattr_count, tx);
3301 		ASSERT(err2 == 0);
3302 	}
3303 
3304 	if (attrzp)
3305 		VN_RELE(ZTOV(attrzp));
3306 
3307 	if (aclp)
3308 		zfs_acl_free(aclp);
3309 
3310 	if (fuidp) {
3311 		zfs_fuid_info_free(fuidp);
3312 		fuidp = NULL;
3313 	}
3314 
3315 	if (err) {
3316 		dmu_tx_abort(tx);
3317 		if (err == ERESTART)
3318 			goto top;
3319 	} else {
3320 		err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
3321 		dmu_tx_commit(tx);
3322 	}
3323 
3324 out2:
3325 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3326 		zil_commit(zilog, 0);
3327 
3328 	ZFS_EXIT(zfsvfs);
3329 	return (err);
3330 }
3331 
3332 typedef struct zfs_zlock {
3333 	krwlock_t	*zl_rwlock;	/* lock we acquired */
3334 	znode_t		*zl_znode;	/* znode we held */
3335 	struct zfs_zlock *zl_next;	/* next in list */
3336 } zfs_zlock_t;
3337 
3338 /*
3339  * Drop locks and release vnodes that were held by zfs_rename_lock().
3340  */
3341 static void
3342 zfs_rename_unlock(zfs_zlock_t **zlpp)
3343 {
3344 	zfs_zlock_t *zl;
3345 
3346 	while ((zl = *zlpp) != NULL) {
3347 		if (zl->zl_znode != NULL)
3348 			VN_RELE(ZTOV(zl->zl_znode));
3349 		rw_exit(zl->zl_rwlock);
3350 		*zlpp = zl->zl_next;
3351 		kmem_free(zl, sizeof (*zl));
3352 	}
3353 }
3354 
3355 /*
3356  * Search back through the directory tree, using the ".." entries.
3357  * Lock each directory in the chain to prevent concurrent renames.
3358  * Fail any attempt to move a directory into one of its own descendants.
3359  * XXX - z_parent_lock can overlap with map or grow locks
3360  */
3361 static int
3362 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
3363 {
3364 	zfs_zlock_t	*zl;
3365 	znode_t		*zp = tdzp;
3366 	uint64_t	rootid = zp->z_zfsvfs->z_root;
3367 	uint64_t	oidp = zp->z_id;
3368 	krwlock_t	*rwlp = &szp->z_parent_lock;
3369 	krw_t		rw = RW_WRITER;
3370 
3371 	/*
3372 	 * First pass write-locks szp and compares to zp->z_id.
3373 	 * Later passes read-lock zp and compare to zp->z_parent.
3374 	 */
3375 	do {
3376 		if (!rw_tryenter(rwlp, rw)) {
3377 			/*
3378 			 * Another thread is renaming in this path.
3379 			 * Note that if we are a WRITER, we don't have any
3380 			 * parent_locks held yet.
3381 			 */
3382 			if (rw == RW_READER && zp->z_id > szp->z_id) {
3383 				/*
3384 				 * Drop our locks and restart
3385 				 */
3386 				zfs_rename_unlock(&zl);
3387 				*zlpp = NULL;
3388 				zp = tdzp;
3389 				oidp = zp->z_id;
3390 				rwlp = &szp->z_parent_lock;
3391 				rw = RW_WRITER;
3392 				continue;
3393 			} else {
3394 				/*
3395 				 * Wait for other thread to drop its locks
3396 				 */
3397 				rw_enter(rwlp, rw);
3398 			}
3399 		}
3400 
3401 		zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
3402 		zl->zl_rwlock = rwlp;
3403 		zl->zl_znode = NULL;
3404 		zl->zl_next = *zlpp;
3405 		*zlpp = zl;
3406 
3407 		if (oidp == szp->z_id)		/* We're a descendant of szp */
3408 			return (SET_ERROR(EINVAL));
3409 
3410 		if (oidp == rootid)		/* We've hit the top */
3411 			return (0);
3412 
3413 		if (rw == RW_READER) {		/* i.e. not the first pass */
3414 			int error = zfs_zget(zp->z_zfsvfs, oidp, &zp);
3415 			if (error)
3416 				return (error);
3417 			zl->zl_znode = zp;
3418 		}
3419 		(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zp->z_zfsvfs),
3420 		    &oidp, sizeof (oidp));
3421 		rwlp = &zp->z_parent_lock;
3422 		rw = RW_READER;
3423 
3424 	} while (zp->z_id != sdzp->z_id);
3425 
3426 	return (0);
3427 }
3428 
3429 /*
3430  * Move an entry from the provided source directory to the target
3431  * directory.  Change the entry name as indicated.
3432  *
3433  *	IN:	sdvp	- Source directory containing the "old entry".
3434  *		snm	- Old entry name.
3435  *		tdvp	- Target directory to contain the "new entry".
3436  *		tnm	- New entry name.
3437  *		cr	- credentials of caller.
3438  *		ct	- caller context
3439  *		flags	- case flags
3440  *
3441  *	RETURN:	0 on success, error code on failure.
3442  *
3443  * Timestamps:
3444  *	sdvp,tdvp - ctime|mtime updated
3445  */
3446 /*ARGSUSED*/
3447 static int
3448 zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr,
3449     caller_context_t *ct, int flags)
3450 {
3451 	znode_t		*tdzp, *szp, *tzp;
3452 	znode_t		*sdzp = VTOZ(sdvp);
3453 	zfsvfs_t	*zfsvfs = sdzp->z_zfsvfs;
3454 	zilog_t		*zilog;
3455 	vnode_t		*realvp;
3456 	zfs_dirlock_t	*sdl, *tdl;
3457 	dmu_tx_t	*tx;
3458 	zfs_zlock_t	*zl;
3459 	int		cmp, serr, terr;
3460 	int		error = 0;
3461 	int		zflg = 0;
3462 	boolean_t	waited = B_FALSE;
3463 
3464 	ZFS_ENTER(zfsvfs);
3465 	ZFS_VERIFY_ZP(sdzp);
3466 	zilog = zfsvfs->z_log;
3467 
3468 	/*
3469 	 * Make sure we have the real vp for the target directory.
3470 	 */
3471 	if (VOP_REALVP(tdvp, &realvp, ct) == 0)
3472 		tdvp = realvp;
3473 
3474 	tdzp = VTOZ(tdvp);
3475 	ZFS_VERIFY_ZP(tdzp);
3476 
3477 	/*
3478 	 * We check z_zfsvfs rather than v_vfsp here, because snapshots and the
3479 	 * ctldir appear to have the same v_vfsp.
3480 	 */
3481 	if (tdzp->z_zfsvfs != zfsvfs || zfsctl_is_node(tdvp)) {
3482 		ZFS_EXIT(zfsvfs);
3483 		return (SET_ERROR(EXDEV));
3484 	}
3485 
3486 	if (zfsvfs->z_utf8 && u8_validate(tnm,
3487 	    strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3488 		ZFS_EXIT(zfsvfs);
3489 		return (SET_ERROR(EILSEQ));
3490 	}
3491 
3492 	if (flags & FIGNORECASE)
3493 		zflg |= ZCILOOK;
3494 
3495 top:
3496 	szp = NULL;
3497 	tzp = NULL;
3498 	zl = NULL;
3499 
3500 	/*
3501 	 * This is to prevent the creation of links into attribute space
3502 	 * by renaming a linked file into/outof an attribute directory.
3503 	 * See the comment in zfs_link() for why this is considered bad.
3504 	 */
3505 	if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3506 		ZFS_EXIT(zfsvfs);
3507 		return (SET_ERROR(EINVAL));
3508 	}
3509 
3510 	/*
3511 	 * Lock source and target directory entries.  To prevent deadlock,
3512 	 * a lock ordering must be defined.  We lock the directory with
3513 	 * the smallest object id first, or if it's a tie, the one with
3514 	 * the lexically first name.
3515 	 */
3516 	if (sdzp->z_id < tdzp->z_id) {
3517 		cmp = -1;
3518 	} else if (sdzp->z_id > tdzp->z_id) {
3519 		cmp = 1;
3520 	} else {
3521 		/*
3522 		 * First compare the two name arguments without
3523 		 * considering any case folding.
3524 		 */
3525 		int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
3526 
3527 		cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
3528 		ASSERT(error == 0 || !zfsvfs->z_utf8);
3529 		if (cmp == 0) {
3530 			/*
3531 			 * POSIX: "If the old argument and the new argument
3532 			 * both refer to links to the same existing file,
3533 			 * the rename() function shall return successfully
3534 			 * and perform no other action."
3535 			 */
3536 			ZFS_EXIT(zfsvfs);
3537 			return (0);
3538 		}
3539 		/*
3540 		 * If the file system is case-folding, then we may
3541 		 * have some more checking to do.  A case-folding file
3542 		 * system is either supporting mixed case sensitivity
3543 		 * access or is completely case-insensitive.  Note
3544 		 * that the file system is always case preserving.
3545 		 *
3546 		 * In mixed sensitivity mode case sensitive behavior
3547 		 * is the default.  FIGNORECASE must be used to
3548 		 * explicitly request case insensitive behavior.
3549 		 *
3550 		 * If the source and target names provided differ only
3551 		 * by case (e.g., a request to rename 'tim' to 'Tim'),
3552 		 * we will treat this as a special case in the
3553 		 * case-insensitive mode: as long as the source name
3554 		 * is an exact match, we will allow this to proceed as
3555 		 * a name-change request.
3556 		 */
3557 		if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
3558 		    (zfsvfs->z_case == ZFS_CASE_MIXED &&
3559 		    flags & FIGNORECASE)) &&
3560 		    u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
3561 		    &error) == 0) {
3562 			/*
3563 			 * case preserving rename request, require exact
3564 			 * name matches
3565 			 */
3566 			zflg |= ZCIEXACT;
3567 			zflg &= ~ZCILOOK;
3568 		}
3569 	}
3570 
3571 	/*
3572 	 * If the source and destination directories are the same, we should
3573 	 * grab the z_name_lock of that directory only once.
3574 	 */
3575 	if (sdzp == tdzp) {
3576 		zflg |= ZHAVELOCK;
3577 		rw_enter(&sdzp->z_name_lock, RW_READER);
3578 	}
3579 
3580 	if (cmp < 0) {
3581 		serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
3582 		    ZEXISTS | zflg, NULL, NULL);
3583 		terr = zfs_dirent_lock(&tdl,
3584 		    tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
3585 	} else {
3586 		terr = zfs_dirent_lock(&tdl,
3587 		    tdzp, tnm, &tzp, zflg, NULL, NULL);
3588 		serr = zfs_dirent_lock(&sdl,
3589 		    sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
3590 		    NULL, NULL);
3591 	}
3592 
3593 	if (serr) {
3594 		/*
3595 		 * Source entry invalid or not there.
3596 		 */
3597 		if (!terr) {
3598 			zfs_dirent_unlock(tdl);
3599 			if (tzp)
3600 				VN_RELE(ZTOV(tzp));
3601 		}
3602 
3603 		if (sdzp == tdzp)
3604 			rw_exit(&sdzp->z_name_lock);
3605 
3606 		if (strcmp(snm, "..") == 0)
3607 			serr = SET_ERROR(EINVAL);
3608 		ZFS_EXIT(zfsvfs);
3609 		return (serr);
3610 	}
3611 	if (terr) {
3612 		zfs_dirent_unlock(sdl);
3613 		VN_RELE(ZTOV(szp));
3614 
3615 		if (sdzp == tdzp)
3616 			rw_exit(&sdzp->z_name_lock);
3617 
3618 		if (strcmp(tnm, "..") == 0)
3619 			terr = SET_ERROR(EINVAL);
3620 		ZFS_EXIT(zfsvfs);
3621 		return (terr);
3622 	}
3623 
3624 	/*
3625 	 * Must have write access at the source to remove the old entry
3626 	 * and write access at the target to create the new entry.
3627 	 * Note that if target and source are the same, this can be
3628 	 * done in a single check.
3629 	 */
3630 
3631 	if (error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr))
3632 		goto out;
3633 
3634 	if (ZTOV(szp)->v_type == VDIR) {
3635 		/*
3636 		 * Check to make sure rename is valid.
3637 		 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3638 		 */
3639 		if (error = zfs_rename_lock(szp, tdzp, sdzp, &zl))
3640 			goto out;
3641 	}
3642 
3643 	/*
3644 	 * Does target exist?
3645 	 */
3646 	if (tzp) {
3647 		/*
3648 		 * Source and target must be the same type.
3649 		 */
3650 		if (ZTOV(szp)->v_type == VDIR) {
3651 			if (ZTOV(tzp)->v_type != VDIR) {
3652 				error = SET_ERROR(ENOTDIR);
3653 				goto out;
3654 			}
3655 		} else {
3656 			if (ZTOV(tzp)->v_type == VDIR) {
3657 				error = SET_ERROR(EISDIR);
3658 				goto out;
3659 			}
3660 		}
3661 		/*
3662 		 * POSIX dictates that when the source and target
3663 		 * entries refer to the same file object, rename
3664 		 * must do nothing and exit without error.
3665 		 */
3666 		if (szp->z_id == tzp->z_id) {
3667 			error = 0;
3668 			goto out;
3669 		}
3670 	}
3671 
3672 	vnevent_rename_src(ZTOV(szp), sdvp, snm, ct);
3673 	if (tzp)
3674 		vnevent_rename_dest(ZTOV(tzp), tdvp, tnm, ct);
3675 
3676 	/*
3677 	 * notify the target directory if it is not the same
3678 	 * as source directory.
3679 	 */
3680 	if (tdvp != sdvp) {
3681 		vnevent_rename_dest_dir(tdvp, ct);
3682 	}
3683 
3684 	tx = dmu_tx_create(zfsvfs->z_os);
3685 	dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3686 	dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3687 	dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3688 	dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3689 	if (sdzp != tdzp) {
3690 		dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3691 		zfs_sa_upgrade_txholds(tx, tdzp);
3692 	}
3693 	if (tzp) {
3694 		dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3695 		zfs_sa_upgrade_txholds(tx, tzp);
3696 	}
3697 
3698 	zfs_sa_upgrade_txholds(tx, szp);
3699 	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3700 	error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
3701 	if (error) {
3702 		if (zl != NULL)
3703 			zfs_rename_unlock(&zl);
3704 		zfs_dirent_unlock(sdl);
3705 		zfs_dirent_unlock(tdl);
3706 
3707 		if (sdzp == tdzp)
3708 			rw_exit(&sdzp->z_name_lock);
3709 
3710 		VN_RELE(ZTOV(szp));
3711 		if (tzp)
3712 			VN_RELE(ZTOV(tzp));
3713 		if (error == ERESTART) {
3714 			waited = B_TRUE;
3715 			dmu_tx_wait(tx);
3716 			dmu_tx_abort(tx);
3717 			goto top;
3718 		}
3719 		dmu_tx_abort(tx);
3720 		ZFS_EXIT(zfsvfs);
3721 		return (error);
3722 	}
3723 
3724 	if (tzp)	/* Attempt to remove the existing target */
3725 		error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
3726 
3727 	if (error == 0) {
3728 		error = zfs_link_create(tdl, szp, tx, ZRENAMING);
3729 		if (error == 0) {
3730 			szp->z_pflags |= ZFS_AV_MODIFIED;
3731 
3732 			error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
3733 			    (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3734 			ASSERT0(error);
3735 
3736 			error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
3737 			if (error == 0) {
3738 				zfs_log_rename(zilog, tx, TX_RENAME |
3739 				    (flags & FIGNORECASE ? TX_CI : 0), sdzp,
3740 				    sdl->dl_name, tdzp, tdl->dl_name, szp);
3741 
3742 				/*
3743 				 * Update path information for the target vnode
3744 				 */
3745 				vn_renamepath(tdvp, ZTOV(szp), tnm,
3746 				    strlen(tnm));
3747 			} else {
3748 				/*
3749 				 * At this point, we have successfully created
3750 				 * the target name, but have failed to remove
3751 				 * the source name.  Since the create was done
3752 				 * with the ZRENAMING flag, there are
3753 				 * complications; for one, the link count is
3754 				 * wrong.  The easiest way to deal with this
3755 				 * is to remove the newly created target, and
3756 				 * return the original error.  This must
3757 				 * succeed; fortunately, it is very unlikely to
3758 				 * fail, since we just created it.
3759 				 */
3760 				VERIFY3U(zfs_link_destroy(tdl, szp, tx,
3761 				    ZRENAMING, NULL), ==, 0);
3762 			}
3763 		}
3764 	}
3765 
3766 	dmu_tx_commit(tx);
3767 out:
3768 	if (zl != NULL)
3769 		zfs_rename_unlock(&zl);
3770 
3771 	zfs_dirent_unlock(sdl);
3772 	zfs_dirent_unlock(tdl);
3773 
3774 	if (sdzp == tdzp)
3775 		rw_exit(&sdzp->z_name_lock);
3776 
3777 
3778 	VN_RELE(ZTOV(szp));
3779 	if (tzp)
3780 		VN_RELE(ZTOV(tzp));
3781 
3782 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3783 		zil_commit(zilog, 0);
3784 
3785 	ZFS_EXIT(zfsvfs);
3786 	return (error);
3787 }
3788 
3789 /*
3790  * Insert the indicated symbolic reference entry into the directory.
3791  *
3792  *	IN:	dvp	- Directory to contain new symbolic link.
3793  *		link	- Name for new symlink entry.
3794  *		vap	- Attributes of new entry.
3795  *		cr	- credentials of caller.
3796  *		ct	- caller context
3797  *		flags	- case flags
3798  *
3799  *	RETURN:	0 on success, error code on failure.
3800  *
3801  * Timestamps:
3802  *	dvp - ctime|mtime updated
3803  */
3804 /*ARGSUSED*/
3805 static int
3806 zfs_symlink(vnode_t *dvp, char *name, vattr_t *vap, char *link, cred_t *cr,
3807     caller_context_t *ct, int flags)
3808 {
3809 	znode_t		*zp, *dzp = VTOZ(dvp);
3810 	zfs_dirlock_t	*dl;
3811 	dmu_tx_t	*tx;
3812 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
3813 	zilog_t		*zilog;
3814 	uint64_t	len = strlen(link);
3815 	int		error;
3816 	int		zflg = ZNEW;
3817 	zfs_acl_ids_t	acl_ids;
3818 	boolean_t	fuid_dirtied;
3819 	uint64_t	txtype = TX_SYMLINK;
3820 	boolean_t	waited = B_FALSE;
3821 
3822 	ASSERT(vap->va_type == VLNK);
3823 
3824 	ZFS_ENTER(zfsvfs);
3825 	ZFS_VERIFY_ZP(dzp);
3826 	zilog = zfsvfs->z_log;
3827 
3828 	if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
3829 	    NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3830 		ZFS_EXIT(zfsvfs);
3831 		return (SET_ERROR(EILSEQ));
3832 	}
3833 	if (flags & FIGNORECASE)
3834 		zflg |= ZCILOOK;
3835 
3836 	if (len > MAXPATHLEN) {
3837 		ZFS_EXIT(zfsvfs);
3838 		return (SET_ERROR(ENAMETOOLONG));
3839 	}
3840 
3841 	if ((error = zfs_acl_ids_create(dzp, 0,
3842 	    vap, cr, NULL, &acl_ids)) != 0) {
3843 		ZFS_EXIT(zfsvfs);
3844 		return (error);
3845 	}
3846 top:
3847 	/*
3848 	 * Attempt to lock directory; fail if entry already exists.
3849 	 */
3850 	error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
3851 	if (error) {
3852 		zfs_acl_ids_free(&acl_ids);
3853 		ZFS_EXIT(zfsvfs);
3854 		return (error);
3855 	}
3856 
3857 	if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
3858 		zfs_acl_ids_free(&acl_ids);
3859 		zfs_dirent_unlock(dl);
3860 		ZFS_EXIT(zfsvfs);
3861 		return (error);
3862 	}
3863 
3864 	if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
3865 		zfs_acl_ids_free(&acl_ids);
3866 		zfs_dirent_unlock(dl);
3867 		ZFS_EXIT(zfsvfs);
3868 		return (SET_ERROR(EDQUOT));
3869 	}
3870 	tx = dmu_tx_create(zfsvfs->z_os);
3871 	fuid_dirtied = zfsvfs->z_fuid_dirty;
3872 	dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
3873 	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3874 	dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
3875 	    ZFS_SA_BASE_ATTR_SIZE + len);
3876 	dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
3877 	if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3878 		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
3879 		    acl_ids.z_aclp->z_acl_bytes);
3880 	}
3881 	if (fuid_dirtied)
3882 		zfs_fuid_txhold(zfsvfs, tx);
3883 	error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
3884 	if (error) {
3885 		zfs_dirent_unlock(dl);
3886 		if (error == ERESTART) {
3887 			waited = B_TRUE;
3888 			dmu_tx_wait(tx);
3889 			dmu_tx_abort(tx);
3890 			goto top;
3891 		}
3892 		zfs_acl_ids_free(&acl_ids);
3893 		dmu_tx_abort(tx);
3894 		ZFS_EXIT(zfsvfs);
3895 		return (error);
3896 	}
3897 
3898 	/*
3899 	 * Create a new object for the symlink.
3900 	 * for version 4 ZPL datsets the symlink will be an SA attribute
3901 	 */
3902 	zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
3903 
3904 	if (fuid_dirtied)
3905 		zfs_fuid_sync(zfsvfs, tx);
3906 
3907 	mutex_enter(&zp->z_lock);
3908 	if (zp->z_is_sa)
3909 		error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
3910 		    link, len, tx);
3911 	else
3912 		zfs_sa_symlink(zp, link, len, tx);
3913 	mutex_exit(&zp->z_lock);
3914 
3915 	zp->z_size = len;
3916 	(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
3917 	    &zp->z_size, sizeof (zp->z_size), tx);
3918 	/*
3919 	 * Insert the new object into the directory.
3920 	 */
3921 	(void) zfs_link_create(dl, zp, tx, ZNEW);
3922 
3923 	if (flags & FIGNORECASE)
3924 		txtype |= TX_CI;
3925 	zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
3926 
3927 	zfs_acl_ids_free(&acl_ids);
3928 
3929 	dmu_tx_commit(tx);
3930 
3931 	zfs_dirent_unlock(dl);
3932 
3933 	VN_RELE(ZTOV(zp));
3934 
3935 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3936 		zil_commit(zilog, 0);
3937 
3938 	ZFS_EXIT(zfsvfs);
3939 	return (error);
3940 }
3941 
3942 /*
3943  * Return, in the buffer contained in the provided uio structure,
3944  * the symbolic path referred to by vp.
3945  *
3946  *	IN:	vp	- vnode of symbolic link.
3947  *		uio	- structure to contain the link path.
3948  *		cr	- credentials of caller.
3949  *		ct	- caller context
3950  *
3951  *	OUT:	uio	- structure containing the link path.
3952  *
3953  *	RETURN:	0 on success, error code on failure.
3954  *
3955  * Timestamps:
3956  *	vp - atime updated
3957  */
3958 /* ARGSUSED */
3959 static int
3960 zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr, caller_context_t *ct)
3961 {
3962 	znode_t		*zp = VTOZ(vp);
3963 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
3964 	int		error;
3965 
3966 	ZFS_ENTER(zfsvfs);
3967 	ZFS_VERIFY_ZP(zp);
3968 
3969 	mutex_enter(&zp->z_lock);
3970 	if (zp->z_is_sa)
3971 		error = sa_lookup_uio(zp->z_sa_hdl,
3972 		    SA_ZPL_SYMLINK(zfsvfs), uio);
3973 	else
3974 		error = zfs_sa_readlink(zp, uio);
3975 	mutex_exit(&zp->z_lock);
3976 
3977 	ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
3978 
3979 	ZFS_EXIT(zfsvfs);
3980 	return (error);
3981 }
3982 
3983 /*
3984  * Insert a new entry into directory tdvp referencing svp.
3985  *
3986  *	IN:	tdvp	- Directory to contain new entry.
3987  *		svp	- vnode of new entry.
3988  *		name	- name of new entry.
3989  *		cr	- credentials of caller.
3990  *		ct	- caller context
3991  *
3992  *	RETURN:	0 on success, error code on failure.
3993  *
3994  * Timestamps:
3995  *	tdvp - ctime|mtime updated
3996  *	 svp - ctime updated
3997  */
3998 /* ARGSUSED */
3999 static int
4000 zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr,
4001     caller_context_t *ct, int flags)
4002 {
4003 	znode_t		*dzp = VTOZ(tdvp);
4004 	znode_t		*tzp, *szp;
4005 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
4006 	zilog_t		*zilog;
4007 	zfs_dirlock_t	*dl;
4008 	dmu_tx_t	*tx;
4009 	vnode_t		*realvp;
4010 	int		error;
4011 	int		zf = ZNEW;
4012 	uint64_t	parent;
4013 	uid_t		owner;
4014 	boolean_t	waited = B_FALSE;
4015 
4016 	ASSERT(tdvp->v_type == VDIR);
4017 
4018 	ZFS_ENTER(zfsvfs);
4019 	ZFS_VERIFY_ZP(dzp);
4020 	zilog = zfsvfs->z_log;
4021 
4022 	if (VOP_REALVP(svp, &realvp, ct) == 0)
4023 		svp = realvp;
4024 
4025 	/*
4026 	 * POSIX dictates that we return EPERM here.
4027 	 * Better choices include ENOTSUP or EISDIR.
4028 	 */
4029 	if (svp->v_type == VDIR) {
4030 		ZFS_EXIT(zfsvfs);
4031 		return (SET_ERROR(EPERM));
4032 	}
4033 
4034 	szp = VTOZ(svp);
4035 	ZFS_VERIFY_ZP(szp);
4036 
4037 	/*
4038 	 * We check z_zfsvfs rather than v_vfsp here, because snapshots and the
4039 	 * ctldir appear to have the same v_vfsp.
4040 	 */
4041 	if (szp->z_zfsvfs != zfsvfs || zfsctl_is_node(svp)) {
4042 		ZFS_EXIT(zfsvfs);
4043 		return (SET_ERROR(EXDEV));
4044 	}
4045 
4046 	/* Prevent links to .zfs/shares files */
4047 
4048 	if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
4049 	    &parent, sizeof (uint64_t))) != 0) {
4050 		ZFS_EXIT(zfsvfs);
4051 		return (error);
4052 	}
4053 	if (parent == zfsvfs->z_shares_dir) {
4054 		ZFS_EXIT(zfsvfs);
4055 		return (SET_ERROR(EPERM));
4056 	}
4057 
4058 	if (zfsvfs->z_utf8 && u8_validate(name,
4059 	    strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
4060 		ZFS_EXIT(zfsvfs);
4061 		return (SET_ERROR(EILSEQ));
4062 	}
4063 	if (flags & FIGNORECASE)
4064 		zf |= ZCILOOK;
4065 
4066 	/*
4067 	 * We do not support links between attributes and non-attributes
4068 	 * because of the potential security risk of creating links
4069 	 * into "normal" file space in order to circumvent restrictions
4070 	 * imposed in attribute space.
4071 	 */
4072 	if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
4073 		ZFS_EXIT(zfsvfs);
4074 		return (SET_ERROR(EINVAL));
4075 	}
4076 
4077 
4078 	owner = zfs_fuid_map_id(zfsvfs, szp->z_uid, cr, ZFS_OWNER);
4079 	if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
4080 		ZFS_EXIT(zfsvfs);
4081 		return (SET_ERROR(EPERM));
4082 	}
4083 
4084 	if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
4085 		ZFS_EXIT(zfsvfs);
4086 		return (error);
4087 	}
4088 
4089 top:
4090 	/*
4091 	 * Attempt to lock directory; fail if entry already exists.
4092 	 */
4093 	error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
4094 	if (error) {
4095 		ZFS_EXIT(zfsvfs);
4096 		return (error);
4097 	}
4098 
4099 	tx = dmu_tx_create(zfsvfs->z_os);
4100 	dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
4101 	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4102 	zfs_sa_upgrade_txholds(tx, szp);
4103 	zfs_sa_upgrade_txholds(tx, dzp);
4104 	error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
4105 	if (error) {
4106 		zfs_dirent_unlock(dl);
4107 		if (error == ERESTART) {
4108 			waited = B_TRUE;
4109 			dmu_tx_wait(tx);
4110 			dmu_tx_abort(tx);
4111 			goto top;
4112 		}
4113 		dmu_tx_abort(tx);
4114 		ZFS_EXIT(zfsvfs);
4115 		return (error);
4116 	}
4117 
4118 	error = zfs_link_create(dl, szp, tx, 0);
4119 
4120 	if (error == 0) {
4121 		uint64_t txtype = TX_LINK;
4122 		if (flags & FIGNORECASE)
4123 			txtype |= TX_CI;
4124 		zfs_log_link(zilog, tx, txtype, dzp, szp, name);
4125 	}
4126 
4127 	dmu_tx_commit(tx);
4128 
4129 	zfs_dirent_unlock(dl);
4130 
4131 	if (error == 0) {
4132 		vnevent_link(svp, ct);
4133 	}
4134 
4135 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4136 		zil_commit(zilog, 0);
4137 
4138 	ZFS_EXIT(zfsvfs);
4139 	return (error);
4140 }
4141 
4142 /*
4143  * zfs_null_putapage() is used when the file system has been force
4144  * unmounted. It just drops the pages.
4145  */
4146 /* ARGSUSED */
4147 static int
4148 zfs_null_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
4149     size_t *lenp, int flags, cred_t *cr)
4150 {
4151 	pvn_write_done(pp, B_INVAL|B_FORCE|B_ERROR);
4152 	return (0);
4153 }
4154 
4155 /*
4156  * Push a page out to disk, klustering if possible.
4157  *
4158  *	IN:	vp	- file to push page to.
4159  *		pp	- page to push.
4160  *		flags	- additional flags.
4161  *		cr	- credentials of caller.
4162  *
4163  *	OUT:	offp	- start of range pushed.
4164  *		lenp	- len of range pushed.
4165  *
4166  *	RETURN:	0 on success, error code on failure.
4167  *
4168  * NOTE: callers must have locked the page to be pushed.  On
4169  * exit, the page (and all other pages in the kluster) must be
4170  * unlocked.
4171  */
4172 /* ARGSUSED */
4173 static int
4174 zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
4175     size_t *lenp, int flags, cred_t *cr)
4176 {
4177 	znode_t		*zp = VTOZ(vp);
4178 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4179 	dmu_tx_t	*tx;
4180 	u_offset_t	off, koff;
4181 	size_t		len, klen;
4182 	int		err;
4183 
4184 	off = pp->p_offset;
4185 	len = PAGESIZE;
4186 	/*
4187 	 * If our blocksize is bigger than the page size, try to kluster
4188 	 * multiple pages so that we write a full block (thus avoiding
4189 	 * a read-modify-write).
4190 	 */
4191 	if (off < zp->z_size && zp->z_blksz > PAGESIZE) {
4192 		klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE);
4193 		koff = ISP2(klen) ? P2ALIGN(off, (u_offset_t)klen) : 0;
4194 		ASSERT(koff <= zp->z_size);
4195 		if (koff + klen > zp->z_size)
4196 			klen = P2ROUNDUP(zp->z_size - koff, (uint64_t)PAGESIZE);
4197 		pp = pvn_write_kluster(vp, pp, &off, &len, koff, klen, flags);
4198 	}
4199 	ASSERT3U(btop(len), ==, btopr(len));
4200 
4201 	/*
4202 	 * Can't push pages past end-of-file.
4203 	 */
4204 	if (off >= zp->z_size) {
4205 		/* ignore all pages */
4206 		err = 0;
4207 		goto out;
4208 	} else if (off + len > zp->z_size) {
4209 		int npages = btopr(zp->z_size - off);
4210 		page_t *trunc;
4211 
4212 		page_list_break(&pp, &trunc, npages);
4213 		/* ignore pages past end of file */
4214 		if (trunc)
4215 			pvn_write_done(trunc, flags);
4216 		len = zp->z_size - off;
4217 	}
4218 
4219 	if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
4220 	    zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
4221 		err = SET_ERROR(EDQUOT);
4222 		goto out;
4223 	}
4224 	tx = dmu_tx_create(zfsvfs->z_os);
4225 	dmu_tx_hold_write(tx, zp->z_id, off, len);
4226 
4227 	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4228 	zfs_sa_upgrade_txholds(tx, zp);
4229 	err = dmu_tx_assign(tx, TXG_WAIT);
4230 	if (err != 0) {
4231 		dmu_tx_abort(tx);
4232 		goto out;
4233 	}
4234 
4235 	if (zp->z_blksz <= PAGESIZE) {
4236 		caddr_t va = zfs_map_page(pp, S_READ);
4237 		ASSERT3U(len, <=, PAGESIZE);
4238 		dmu_write(zfsvfs->z_os, zp->z_id, off, len, va, tx);
4239 		zfs_unmap_page(pp, va);
4240 	} else {
4241 		err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, pp, tx);
4242 	}
4243 
4244 	if (err == 0) {
4245 		uint64_t mtime[2], ctime[2];
4246 		sa_bulk_attr_t bulk[3];
4247 		int count = 0;
4248 
4249 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
4250 		    &mtime, 16);
4251 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
4252 		    &ctime, 16);
4253 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
4254 		    &zp->z_pflags, 8);
4255 		zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
4256 		    B_TRUE);
4257 		zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0);
4258 	}
4259 	dmu_tx_commit(tx);
4260 
4261 out:
4262 	pvn_write_done(pp, (err ? B_ERROR : 0) | flags);
4263 	if (offp)
4264 		*offp = off;
4265 	if (lenp)
4266 		*lenp = len;
4267 
4268 	return (err);
4269 }
4270 
4271 /*
4272  * Copy the portion of the file indicated from pages into the file.
4273  * The pages are stored in a page list attached to the files vnode.
4274  *
4275  *	IN:	vp	- vnode of file to push page data to.
4276  *		off	- position in file to put data.
4277  *		len	- amount of data to write.
4278  *		flags	- flags to control the operation.
4279  *		cr	- credentials of caller.
4280  *		ct	- caller context.
4281  *
4282  *	RETURN:	0 on success, error code on failure.
4283  *
4284  * Timestamps:
4285  *	vp - ctime|mtime updated
4286  */
4287 /*ARGSUSED*/
4288 static int
4289 zfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
4290     caller_context_t *ct)
4291 {
4292 	znode_t		*zp = VTOZ(vp);
4293 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4294 	page_t		*pp;
4295 	size_t		io_len;
4296 	u_offset_t	io_off;
4297 	uint_t		blksz;
4298 	rl_t		*rl;
4299 	int		error = 0;
4300 
4301 	ZFS_ENTER(zfsvfs);
4302 	ZFS_VERIFY_ZP(zp);
4303 
4304 	/*
4305 	 * There's nothing to do if no data is cached.
4306 	 */
4307 	if (!vn_has_cached_data(vp)) {
4308 		ZFS_EXIT(zfsvfs);
4309 		return (0);
4310 	}
4311 
4312 	/*
4313 	 * Align this request to the file block size in case we kluster.
4314 	 * XXX - this can result in pretty aggresive locking, which can
4315 	 * impact simultanious read/write access.  One option might be
4316 	 * to break up long requests (len == 0) into block-by-block
4317 	 * operations to get narrower locking.
4318 	 */
4319 	blksz = zp->z_blksz;
4320 	if (ISP2(blksz))
4321 		io_off = P2ALIGN_TYPED(off, blksz, u_offset_t);
4322 	else
4323 		io_off = 0;
4324 	if (len > 0 && ISP2(blksz))
4325 		io_len = P2ROUNDUP_TYPED(len + (off - io_off), blksz, size_t);
4326 	else
4327 		io_len = 0;
4328 
4329 	if (io_len == 0) {
4330 		/*
4331 		 * Search the entire vp list for pages >= io_off.
4332 		 */
4333 		rl = zfs_range_lock(zp, io_off, UINT64_MAX, RL_WRITER);
4334 		error = pvn_vplist_dirty(vp, io_off, zfs_putapage, flags, cr);
4335 		goto out;
4336 	}
4337 	rl = zfs_range_lock(zp, io_off, io_len, RL_WRITER);
4338 
4339 	if (off > zp->z_size) {
4340 		/* past end of file */
4341 		zfs_range_unlock(rl);
4342 		ZFS_EXIT(zfsvfs);
4343 		return (0);
4344 	}
4345 
4346 	len = MIN(io_len, P2ROUNDUP(zp->z_size, PAGESIZE) - io_off);
4347 
4348 	for (off = io_off; io_off < off + len; io_off += io_len) {
4349 		if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
4350 			pp = page_lookup(vp, io_off,
4351 			    (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED);
4352 		} else {
4353 			pp = page_lookup_nowait(vp, io_off,
4354 			    (flags & B_FREE) ? SE_EXCL : SE_SHARED);
4355 		}
4356 
4357 		if (pp != NULL && pvn_getdirty(pp, flags)) {
4358 			int err;
4359 
4360 			/*
4361 			 * Found a dirty page to push
4362 			 */
4363 			err = zfs_putapage(vp, pp, &io_off, &io_len, flags, cr);
4364 			if (err)
4365 				error = err;
4366 		} else {
4367 			io_len = PAGESIZE;
4368 		}
4369 	}
4370 out:
4371 	zfs_range_unlock(rl);
4372 	if ((flags & B_ASYNC) == 0 || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4373 		zil_commit(zfsvfs->z_log, zp->z_id);
4374 	ZFS_EXIT(zfsvfs);
4375 	return (error);
4376 }
4377 
4378 /*ARGSUSED*/
4379 void
4380 zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
4381 {
4382 	znode_t	*zp = VTOZ(vp);
4383 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4384 	int error;
4385 
4386 	rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
4387 	if (zp->z_sa_hdl == NULL) {
4388 		/*
4389 		 * The fs has been unmounted, or we did a
4390 		 * suspend/resume and this file no longer exists.
4391 		 */
4392 		if (vn_has_cached_data(vp)) {
4393 			(void) pvn_vplist_dirty(vp, 0, zfs_null_putapage,
4394 			    B_INVAL, cr);
4395 		}
4396 
4397 		mutex_enter(&zp->z_lock);
4398 		mutex_enter(&vp->v_lock);
4399 		ASSERT(vp->v_count == 1);
4400 		vp->v_count = 0;
4401 		mutex_exit(&vp->v_lock);
4402 		mutex_exit(&zp->z_lock);
4403 		rw_exit(&zfsvfs->z_teardown_inactive_lock);
4404 		zfs_znode_free(zp);
4405 		return;
4406 	}
4407 
4408 	/*
4409 	 * Attempt to push any data in the page cache.  If this fails
4410 	 * we will get kicked out later in zfs_zinactive().
4411 	 */
4412 	if (vn_has_cached_data(vp)) {
4413 		(void) pvn_vplist_dirty(vp, 0, zfs_putapage, B_INVAL|B_ASYNC,
4414 		    cr);
4415 	}
4416 
4417 	if (zp->z_atime_dirty && zp->z_unlinked == 0) {
4418 		dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
4419 
4420 		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4421 		zfs_sa_upgrade_txholds(tx, zp);
4422 		error = dmu_tx_assign(tx, TXG_WAIT);
4423 		if (error) {
4424 			dmu_tx_abort(tx);
4425 		} else {
4426 			mutex_enter(&zp->z_lock);
4427 			(void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
4428 			    (void *)&zp->z_atime, sizeof (zp->z_atime), tx);
4429 			zp->z_atime_dirty = 0;
4430 			mutex_exit(&zp->z_lock);
4431 			dmu_tx_commit(tx);
4432 		}
4433 	}
4434 
4435 	zfs_zinactive(zp);
4436 	rw_exit(&zfsvfs->z_teardown_inactive_lock);
4437 }
4438 
4439 /*
4440  * Bounds-check the seek operation.
4441  *
4442  *	IN:	vp	- vnode seeking within
4443  *		ooff	- old file offset
4444  *		noffp	- pointer to new file offset
4445  *		ct	- caller context
4446  *
4447  *	RETURN:	0 on success, EINVAL if new offset invalid.
4448  */
4449 /* ARGSUSED */
4450 static int
4451 zfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp,
4452     caller_context_t *ct)
4453 {
4454 	if (vp->v_type == VDIR)
4455 		return (0);
4456 	return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4457 }
4458 
4459 /*
4460  * Pre-filter the generic locking function to trap attempts to place
4461  * a mandatory lock on a memory mapped file.
4462  */
4463 static int
4464 zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset,
4465     flk_callback_t *flk_cbp, cred_t *cr, caller_context_t *ct)
4466 {
4467 	znode_t *zp = VTOZ(vp);
4468 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4469 
4470 	ZFS_ENTER(zfsvfs);
4471 	ZFS_VERIFY_ZP(zp);
4472 
4473 	/*
4474 	 * We are following the UFS semantics with respect to mapcnt
4475 	 * here: If we see that the file is mapped already, then we will
4476 	 * return an error, but we don't worry about races between this
4477 	 * function and zfs_map().
4478 	 */
4479 	if (zp->z_mapcnt > 0 && MANDMODE(zp->z_mode)) {
4480 		ZFS_EXIT(zfsvfs);
4481 		return (SET_ERROR(EAGAIN));
4482 	}
4483 	ZFS_EXIT(zfsvfs);
4484 	return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
4485 }
4486 
4487 /*
4488  * If we can't find a page in the cache, we will create a new page
4489  * and fill it with file data.  For efficiency, we may try to fill
4490  * multiple pages at once (klustering) to fill up the supplied page
4491  * list.  Note that the pages to be filled are held with an exclusive
4492  * lock to prevent access by other threads while they are being filled.
4493  */
4494 static int
4495 zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg,
4496     caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw)
4497 {
4498 	znode_t *zp = VTOZ(vp);
4499 	page_t *pp, *cur_pp;
4500 	objset_t *os = zp->z_zfsvfs->z_os;
4501 	u_offset_t io_off, total;
4502 	size_t io_len;
4503 	int err;
4504 
4505 	if (plsz == PAGESIZE || zp->z_blksz <= PAGESIZE) {
4506 		/*
4507 		 * We only have a single page, don't bother klustering
4508 		 */
4509 		io_off = off;
4510 		io_len = PAGESIZE;
4511 		pp = page_create_va(vp, io_off, io_len,
4512 		    PG_EXCL | PG_WAIT, seg, addr);
4513 	} else {
4514 		/*
4515 		 * Try to find enough pages to fill the page list
4516 		 */
4517 		pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
4518 		    &io_len, off, plsz, 0);
4519 	}
4520 	if (pp == NULL) {
4521 		/*
4522 		 * The page already exists, nothing to do here.
4523 		 */
4524 		*pl = NULL;
4525 		return (0);
4526 	}
4527 
4528 	/*
4529 	 * Fill the pages in the kluster.
4530 	 */
4531 	cur_pp = pp;
4532 	for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
4533 		caddr_t va;
4534 
4535 		ASSERT3U(io_off, ==, cur_pp->p_offset);
4536 		va = zfs_map_page(cur_pp, S_WRITE);
4537 		err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
4538 		    DMU_READ_PREFETCH);
4539 		zfs_unmap_page(cur_pp, va);
4540 		if (err) {
4541 			/* On error, toss the entire kluster */
4542 			pvn_read_done(pp, B_ERROR);
4543 			/* convert checksum errors into IO errors */
4544 			if (err == ECKSUM)
4545 				err = SET_ERROR(EIO);
4546 			return (err);
4547 		}
4548 		cur_pp = cur_pp->p_next;
4549 	}
4550 
4551 	/*
4552 	 * Fill in the page list array from the kluster starting
4553 	 * from the desired offset `off'.
4554 	 * NOTE: the page list will always be null terminated.
4555 	 */
4556 	pvn_plist_init(pp, pl, plsz, off, io_len, rw);
4557 	ASSERT(pl == NULL || (*pl)->p_offset == off);
4558 
4559 	return (0);
4560 }
4561 
4562 /*
4563  * Return pointers to the pages for the file region [off, off + len]
4564  * in the pl array.  If plsz is greater than len, this function may
4565  * also return page pointers from after the specified region
4566  * (i.e. the region [off, off + plsz]).  These additional pages are
4567  * only returned if they are already in the cache, or were created as
4568  * part of a klustered read.
4569  *
4570  *	IN:	vp	- vnode of file to get data from.
4571  *		off	- position in file to get data from.
4572  *		len	- amount of data to retrieve.
4573  *		plsz	- length of provided page list.
4574  *		seg	- segment to obtain pages for.
4575  *		addr	- virtual address of fault.
4576  *		rw	- mode of created pages.
4577  *		cr	- credentials of caller.
4578  *		ct	- caller context.
4579  *
4580  *	OUT:	protp	- protection mode of created pages.
4581  *		pl	- list of pages created.
4582  *
4583  *	RETURN:	0 on success, error code on failure.
4584  *
4585  * Timestamps:
4586  *	vp - atime updated
4587  */
4588 /* ARGSUSED */
4589 static int
4590 zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
4591     page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
4592     enum seg_rw rw, cred_t *cr, caller_context_t *ct)
4593 {
4594 	znode_t		*zp = VTOZ(vp);
4595 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4596 	page_t		**pl0 = pl;
4597 	int		err = 0;
4598 
4599 	/* we do our own caching, faultahead is unnecessary */
4600 	if (pl == NULL)
4601 		return (0);
4602 	else if (len > plsz)
4603 		len = plsz;
4604 	else
4605 		len = P2ROUNDUP(len, PAGESIZE);
4606 	ASSERT(plsz >= len);
4607 
4608 	ZFS_ENTER(zfsvfs);
4609 	ZFS_VERIFY_ZP(zp);
4610 
4611 	if (protp)
4612 		*protp = PROT_ALL;
4613 
4614 	/*
4615 	 * Loop through the requested range [off, off + len) looking
4616 	 * for pages.  If we don't find a page, we will need to create
4617 	 * a new page and fill it with data from the file.
4618 	 */
4619 	while (len > 0) {
4620 		if (*pl = page_lookup(vp, off, SE_SHARED))
4621 			*(pl+1) = NULL;
4622 		else if (err = zfs_fillpage(vp, off, seg, addr, pl, plsz, rw))
4623 			goto out;
4624 		while (*pl) {
4625 			ASSERT3U((*pl)->p_offset, ==, off);
4626 			off += PAGESIZE;
4627 			addr += PAGESIZE;
4628 			if (len > 0) {
4629 				ASSERT3U(len, >=, PAGESIZE);
4630 				len -= PAGESIZE;
4631 			}
4632 			ASSERT3U(plsz, >=, PAGESIZE);
4633 			plsz -= PAGESIZE;
4634 			pl++;
4635 		}
4636 	}
4637 
4638 	/*
4639 	 * Fill out the page array with any pages already in the cache.
4640 	 */
4641 	while (plsz > 0 &&
4642 	    (*pl++ = page_lookup_nowait(vp, off, SE_SHARED))) {
4643 			off += PAGESIZE;
4644 			plsz -= PAGESIZE;
4645 	}
4646 out:
4647 	if (err) {
4648 		/*
4649 		 * Release any pages we have previously locked.
4650 		 */
4651 		while (pl > pl0)
4652 			page_unlock(*--pl);
4653 	} else {
4654 		ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
4655 	}
4656 
4657 	*pl = NULL;
4658 
4659 	ZFS_EXIT(zfsvfs);
4660 	return (err);
4661 }
4662 
4663 /*
4664  * Request a memory map for a section of a file.  This code interacts
4665  * with common code and the VM system as follows:
4666  *
4667  * - common code calls mmap(), which ends up in smmap_common()
4668  * - this calls VOP_MAP(), which takes you into (say) zfs
4669  * - zfs_map() calls as_map(), passing segvn_create() as the callback
4670  * - segvn_create() creates the new segment and calls VOP_ADDMAP()
4671  * - zfs_addmap() updates z_mapcnt
4672  */
4673 /*ARGSUSED*/
4674 static int
4675 zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
4676     size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4677     caller_context_t *ct)
4678 {
4679 	znode_t *zp = VTOZ(vp);
4680 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4681 	segvn_crargs_t	vn_a;
4682 	int		error;
4683 
4684 	ZFS_ENTER(zfsvfs);
4685 	ZFS_VERIFY_ZP(zp);
4686 
4687 	if ((prot & PROT_WRITE) && (zp->z_pflags &
4688 	    (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
4689 		ZFS_EXIT(zfsvfs);
4690 		return (SET_ERROR(EPERM));
4691 	}
4692 
4693 	if ((prot & (PROT_READ | PROT_EXEC)) &&
4694 	    (zp->z_pflags & ZFS_AV_QUARANTINED)) {
4695 		ZFS_EXIT(zfsvfs);
4696 		return (SET_ERROR(EACCES));
4697 	}
4698 
4699 	if (vp->v_flag & VNOMAP) {
4700 		ZFS_EXIT(zfsvfs);
4701 		return (SET_ERROR(ENOSYS));
4702 	}
4703 
4704 	if (off < 0 || len > MAXOFFSET_T - off) {
4705 		ZFS_EXIT(zfsvfs);
4706 		return (SET_ERROR(ENXIO));
4707 	}
4708 
4709 	if (vp->v_type != VREG) {
4710 		ZFS_EXIT(zfsvfs);
4711 		return (SET_ERROR(ENODEV));
4712 	}
4713 
4714 	/*
4715 	 * If file is locked, disallow mapping.
4716 	 */
4717 	if (MANDMODE(zp->z_mode) && vn_has_flocks(vp)) {
4718 		ZFS_EXIT(zfsvfs);
4719 		return (SET_ERROR(EAGAIN));
4720 	}
4721 
4722 	as_rangelock(as);
4723 	error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
4724 	if (error != 0) {
4725 		as_rangeunlock(as);
4726 		ZFS_EXIT(zfsvfs);
4727 		return (error);
4728 	}
4729 
4730 	vn_a.vp = vp;
4731 	vn_a.offset = (u_offset_t)off;
4732 	vn_a.type = flags & MAP_TYPE;
4733 	vn_a.prot = prot;
4734 	vn_a.maxprot = maxprot;
4735 	vn_a.cred = cr;
4736 	vn_a.amp = NULL;
4737 	vn_a.flags = flags & ~MAP_TYPE;
4738 	vn_a.szc = 0;
4739 	vn_a.lgrp_mem_policy_flags = 0;
4740 
4741 	error = as_map(as, *addrp, len, segvn_create, &vn_a);
4742 
4743 	as_rangeunlock(as);
4744 	ZFS_EXIT(zfsvfs);
4745 	return (error);
4746 }
4747 
4748 /* ARGSUSED */
4749 static int
4750 zfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4751     size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4752     caller_context_t *ct)
4753 {
4754 	uint64_t pages = btopr(len);
4755 
4756 	atomic_add_64(&VTOZ(vp)->z_mapcnt, pages);
4757 	return (0);
4758 }
4759 
4760 /*
4761  * The reason we push dirty pages as part of zfs_delmap() is so that we get a
4762  * more accurate mtime for the associated file.  Since we don't have a way of
4763  * detecting when the data was actually modified, we have to resort to
4764  * heuristics.  If an explicit msync() is done, then we mark the mtime when the
4765  * last page is pushed.  The problem occurs when the msync() call is omitted,
4766  * which by far the most common case:
4767  *
4768  *	open()
4769  *	mmap()
4770  *	<modify memory>
4771  *	munmap()
4772  *	close()
4773  *	<time lapse>
4774  *	putpage() via fsflush
4775  *
4776  * If we wait until fsflush to come along, we can have a modification time that
4777  * is some arbitrary point in the future.  In order to prevent this in the
4778  * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is
4779  * torn down.
4780  */
4781 /* ARGSUSED */
4782 static int
4783 zfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4784     size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
4785     caller_context_t *ct)
4786 {
4787 	uint64_t pages = btopr(len);
4788 
4789 	ASSERT3U(VTOZ(vp)->z_mapcnt, >=, pages);
4790 	atomic_add_64(&VTOZ(vp)->z_mapcnt, -pages);
4791 
4792 	if ((flags & MAP_SHARED) && (prot & PROT_WRITE) &&
4793 	    vn_has_cached_data(vp))
4794 		(void) VOP_PUTPAGE(vp, off, len, B_ASYNC, cr, ct);
4795 
4796 	return (0);
4797 }
4798 
4799 /*
4800  * Free or allocate space in a file.  Currently, this function only
4801  * supports the `F_FREESP' command.  However, this command is somewhat
4802  * misnamed, as its functionality includes the ability to allocate as
4803  * well as free space.
4804  *
4805  *	IN:	vp	- vnode of file to free data in.
4806  *		cmd	- action to take (only F_FREESP supported).
4807  *		bfp	- section of file to free/alloc.
4808  *		flag	- current file open mode flags.
4809  *		offset	- current file offset.
4810  *		cr	- credentials of caller [UNUSED].
4811  *		ct	- caller context.
4812  *
4813  *	RETURN:	0 on success, error code on failure.
4814  *
4815  * Timestamps:
4816  *	vp - ctime|mtime updated
4817  */
4818 /* ARGSUSED */
4819 static int
4820 zfs_space(vnode_t *vp, int cmd, flock64_t *bfp, int flag,
4821     offset_t offset, cred_t *cr, caller_context_t *ct)
4822 {
4823 	znode_t		*zp = VTOZ(vp);
4824 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4825 	uint64_t	off, len;
4826 	int		error;
4827 
4828 	ZFS_ENTER(zfsvfs);
4829 	ZFS_VERIFY_ZP(zp);
4830 
4831 	if (cmd != F_FREESP) {
4832 		ZFS_EXIT(zfsvfs);
4833 		return (SET_ERROR(EINVAL));
4834 	}
4835 
4836 	/*
4837 	 * In a case vp->v_vfsp != zp->z_zfsvfs->z_vfs (e.g. snapshots) our
4838 	 * callers might not be able to detect properly that we are read-only,
4839 	 * so check it explicitly here.
4840 	 */
4841 	if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
4842 		ZFS_EXIT(zfsvfs);
4843 		return (SET_ERROR(EROFS));
4844 	}
4845 
4846 	if (error = convoff(vp, bfp, 0, offset)) {
4847 		ZFS_EXIT(zfsvfs);
4848 		return (error);
4849 	}
4850 
4851 	if (bfp->l_len < 0) {
4852 		ZFS_EXIT(zfsvfs);
4853 		return (SET_ERROR(EINVAL));
4854 	}
4855 
4856 	off = bfp->l_start;
4857 	len = bfp->l_len; /* 0 means from off to end of file */
4858 
4859 	error = zfs_freesp(zp, off, len, flag, TRUE);
4860 
4861 	if (error == 0 && off == 0 && len == 0)
4862 		vnevent_truncate(ZTOV(zp), ct);
4863 
4864 	ZFS_EXIT(zfsvfs);
4865 	return (error);
4866 }
4867 
4868 /*ARGSUSED*/
4869 static int
4870 zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
4871 {
4872 	znode_t		*zp = VTOZ(vp);
4873 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4874 	uint32_t	gen;
4875 	uint64_t	gen64;
4876 	uint64_t	object = zp->z_id;
4877 	zfid_short_t	*zfid;
4878 	int		size, i, error;
4879 
4880 	ZFS_ENTER(zfsvfs);
4881 	ZFS_VERIFY_ZP(zp);
4882 
4883 	if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
4884 	    &gen64, sizeof (uint64_t))) != 0) {
4885 		ZFS_EXIT(zfsvfs);
4886 		return (error);
4887 	}
4888 
4889 	gen = (uint32_t)gen64;
4890 
4891 	size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN;
4892 	if (fidp->fid_len < size) {
4893 		fidp->fid_len = size;
4894 		ZFS_EXIT(zfsvfs);
4895 		return (SET_ERROR(ENOSPC));
4896 	}
4897 
4898 	zfid = (zfid_short_t *)fidp;
4899 
4900 	zfid->zf_len = size;
4901 
4902 	for (i = 0; i < sizeof (zfid->zf_object); i++)
4903 		zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
4904 
4905 	/* Must have a non-zero generation number to distinguish from .zfs */
4906 	if (gen == 0)
4907 		gen = 1;
4908 	for (i = 0; i < sizeof (zfid->zf_gen); i++)
4909 		zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
4910 
4911 	if (size == LONG_FID_LEN) {
4912 		uint64_t	objsetid = dmu_objset_id(zfsvfs->z_os);
4913 		zfid_long_t	*zlfid;
4914 
4915 		zlfid = (zfid_long_t *)fidp;
4916 
4917 		for (i = 0; i < sizeof (zlfid->zf_setid); i++)
4918 			zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
4919 
4920 		/* XXX - this should be the generation number for the objset */
4921 		for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
4922 			zlfid->zf_setgen[i] = 0;
4923 	}
4924 
4925 	ZFS_EXIT(zfsvfs);
4926 	return (0);
4927 }
4928 
4929 static int
4930 zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
4931     caller_context_t *ct)
4932 {
4933 	znode_t		*zp, *xzp;
4934 	zfsvfs_t	*zfsvfs;
4935 	zfs_dirlock_t	*dl;
4936 	int		error;
4937 
4938 	switch (cmd) {
4939 	case _PC_LINK_MAX:
4940 		*valp = ULONG_MAX;
4941 		return (0);
4942 
4943 	case _PC_FILESIZEBITS:
4944 		*valp = 64;
4945 		return (0);
4946 
4947 	case _PC_XATTR_EXISTS:
4948 		zp = VTOZ(vp);
4949 		zfsvfs = zp->z_zfsvfs;
4950 		ZFS_ENTER(zfsvfs);
4951 		ZFS_VERIFY_ZP(zp);
4952 		*valp = 0;
4953 		error = zfs_dirent_lock(&dl, zp, "", &xzp,
4954 		    ZXATTR | ZEXISTS | ZSHARED, NULL, NULL);
4955 		if (error == 0) {
4956 			zfs_dirent_unlock(dl);
4957 			if (!zfs_dirempty(xzp))
4958 				*valp = 1;
4959 			VN_RELE(ZTOV(xzp));
4960 		} else if (error == ENOENT) {
4961 			/*
4962 			 * If there aren't extended attributes, it's the
4963 			 * same as having zero of them.
4964 			 */
4965 			error = 0;
4966 		}
4967 		ZFS_EXIT(zfsvfs);
4968 		return (error);
4969 
4970 	case _PC_SATTR_ENABLED:
4971 	case _PC_SATTR_EXISTS:
4972 		*valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
4973 		    (vp->v_type == VREG || vp->v_type == VDIR);
4974 		return (0);
4975 
4976 	case _PC_ACCESS_FILTERING:
4977 		*valp = vfs_has_feature(vp->v_vfsp, VFSFT_ACCESS_FILTER) &&
4978 		    vp->v_type == VDIR;
4979 		return (0);
4980 
4981 	case _PC_ACL_ENABLED:
4982 		*valp = _ACL_ACE_ENABLED;
4983 		return (0);
4984 
4985 	case _PC_MIN_HOLE_SIZE:
4986 		*valp = (ulong_t)SPA_MINBLOCKSIZE;
4987 		return (0);
4988 
4989 	case _PC_TIMESTAMP_RESOLUTION:
4990 		/* nanosecond timestamp resolution */
4991 		*valp = 1L;
4992 		return (0);
4993 
4994 	default:
4995 		return (fs_pathconf(vp, cmd, valp, cr, ct));
4996 	}
4997 }
4998 
4999 /*ARGSUSED*/
5000 static int
5001 zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
5002     caller_context_t *ct)
5003 {
5004 	znode_t *zp = VTOZ(vp);
5005 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5006 	int error;
5007 	boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
5008 
5009 	ZFS_ENTER(zfsvfs);
5010 	ZFS_VERIFY_ZP(zp);
5011 	error = zfs_getacl(zp, vsecp, skipaclchk, cr);
5012 	ZFS_EXIT(zfsvfs);
5013 
5014 	return (error);
5015 }
5016 
5017 /*ARGSUSED*/
5018 static int
5019 zfs_setsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
5020     caller_context_t *ct)
5021 {
5022 	znode_t *zp = VTOZ(vp);
5023 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5024 	int error;
5025 	boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
5026 	zilog_t	*zilog = zfsvfs->z_log;
5027 
5028 	ZFS_ENTER(zfsvfs);
5029 	ZFS_VERIFY_ZP(zp);
5030 
5031 	error = zfs_setacl(zp, vsecp, skipaclchk, cr);
5032 
5033 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
5034 		zil_commit(zilog, 0);
5035 
5036 	ZFS_EXIT(zfsvfs);
5037 	return (error);
5038 }
5039 
5040 /*
5041  * The smallest read we may consider to loan out an arcbuf.
5042  * This must be a power of 2.
5043  */
5044 int zcr_blksz_min = (1 << 10);	/* 1K */
5045 /*
5046  * If set to less than the file block size, allow loaning out of an
5047  * arcbuf for a partial block read.  This must be a power of 2.
5048  */
5049 int zcr_blksz_max = (1 << 17);	/* 128K */
5050 
5051 /*ARGSUSED*/
5052 static int
5053 zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr,
5054     caller_context_t *ct)
5055 {
5056 	znode_t	*zp = VTOZ(vp);
5057 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5058 	int max_blksz = zfsvfs->z_max_blksz;
5059 	uio_t *uio = &xuio->xu_uio;
5060 	ssize_t size = uio->uio_resid;
5061 	offset_t offset = uio->uio_loffset;
5062 	int blksz;
5063 	int fullblk, i;
5064 	arc_buf_t *abuf;
5065 	ssize_t maxsize;
5066 	int preamble, postamble;
5067 
5068 	if (xuio->xu_type != UIOTYPE_ZEROCOPY)
5069 		return (SET_ERROR(EINVAL));
5070 
5071 	ZFS_ENTER(zfsvfs);
5072 	ZFS_VERIFY_ZP(zp);
5073 	switch (ioflag) {
5074 	case UIO_WRITE:
5075 		/*
5076 		 * Loan out an arc_buf for write if write size is bigger than
5077 		 * max_blksz, and the file's block size is also max_blksz.
5078 		 */
5079 		blksz = max_blksz;
5080 		if (size < blksz || zp->z_blksz != blksz) {
5081 			ZFS_EXIT(zfsvfs);
5082 			return (SET_ERROR(EINVAL));
5083 		}
5084 		/*
5085 		 * Caller requests buffers for write before knowing where the
5086 		 * write offset might be (e.g. NFS TCP write).
5087 		 */
5088 		if (offset == -1) {
5089 			preamble = 0;
5090 		} else {
5091 			preamble = P2PHASE(offset, blksz);
5092 			if (preamble) {
5093 				preamble = blksz - preamble;
5094 				size -= preamble;
5095 			}
5096 		}
5097 
5098 		postamble = P2PHASE(size, blksz);
5099 		size -= postamble;
5100 
5101 		fullblk = size / blksz;
5102 		(void) dmu_xuio_init(xuio,
5103 		    (preamble != 0) + fullblk + (postamble != 0));
5104 		DTRACE_PROBE3(zfs_reqzcbuf_align, int, preamble,
5105 		    int, postamble, int,
5106 		    (preamble != 0) + fullblk + (postamble != 0));
5107 
5108 		/*
5109 		 * Have to fix iov base/len for partial buffers.  They
5110 		 * currently represent full arc_buf's.
5111 		 */
5112 		if (preamble) {
5113 			/* data begins in the middle of the arc_buf */
5114 			abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5115 			    blksz);
5116 			ASSERT(abuf);
5117 			(void) dmu_xuio_add(xuio, abuf,
5118 			    blksz - preamble, preamble);
5119 		}
5120 
5121 		for (i = 0; i < fullblk; i++) {
5122 			abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5123 			    blksz);
5124 			ASSERT(abuf);
5125 			(void) dmu_xuio_add(xuio, abuf, 0, blksz);
5126 		}
5127 
5128 		if (postamble) {
5129 			/* data ends in the middle of the arc_buf */
5130 			abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5131 			    blksz);
5132 			ASSERT(abuf);
5133 			(void) dmu_xuio_add(xuio, abuf, 0, postamble);
5134 		}
5135 		break;
5136 	case UIO_READ:
5137 		/*
5138 		 * Loan out an arc_buf for read if the read size is larger than
5139 		 * the current file block size.  Block alignment is not
5140 		 * considered.  Partial arc_buf will be loaned out for read.
5141 		 */
5142 		blksz = zp->z_blksz;
5143 		if (blksz < zcr_blksz_min)
5144 			blksz = zcr_blksz_min;
5145 		if (blksz > zcr_blksz_max)
5146 			blksz = zcr_blksz_max;
5147 		/* avoid potential complexity of dealing with it */
5148 		if (blksz > max_blksz) {
5149 			ZFS_EXIT(zfsvfs);
5150 			return (SET_ERROR(EINVAL));
5151 		}
5152 
5153 		maxsize = zp->z_size - uio->uio_loffset;
5154 		if (size > maxsize)
5155 			size = maxsize;
5156 
5157 		if (size < blksz || vn_has_cached_data(vp)) {
5158 			ZFS_EXIT(zfsvfs);
5159 			return (SET_ERROR(EINVAL));
5160 		}
5161 		break;
5162 	default:
5163 		ZFS_EXIT(zfsvfs);
5164 		return (SET_ERROR(EINVAL));
5165 	}
5166 
5167 	uio->uio_extflg = UIO_XUIO;
5168 	XUIO_XUZC_RW(xuio) = ioflag;
5169 	ZFS_EXIT(zfsvfs);
5170 	return (0);
5171 }
5172 
5173 /*ARGSUSED*/
5174 static int
5175 zfs_retzcbuf(vnode_t *vp, xuio_t *xuio, cred_t *cr, caller_context_t *ct)
5176 {
5177 	int i;
5178 	arc_buf_t *abuf;
5179 	int ioflag = XUIO_XUZC_RW(xuio);
5180 
5181 	ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
5182 
5183 	i = dmu_xuio_cnt(xuio);
5184 	while (i-- > 0) {
5185 		abuf = dmu_xuio_arcbuf(xuio, i);
5186 		/*
5187 		 * if abuf == NULL, it must be a write buffer
5188 		 * that has been returned in zfs_write().
5189 		 */
5190 		if (abuf)
5191 			dmu_return_arcbuf(abuf);
5192 		ASSERT(abuf || ioflag == UIO_WRITE);
5193 	}
5194 
5195 	dmu_xuio_fini(xuio);
5196 	return (0);
5197 }
5198 
5199 /*
5200  * Predeclare these here so that the compiler assumes that
5201  * this is an "old style" function declaration that does
5202  * not include arguments => we won't get type mismatch errors
5203  * in the initializations that follow.
5204  */
5205 static int zfs_inval();
5206 static int zfs_isdir();
5207 
5208 static int
5209 zfs_inval()
5210 {
5211 	return (SET_ERROR(EINVAL));
5212 }
5213 
5214 static int
5215 zfs_isdir()
5216 {
5217 	return (SET_ERROR(EISDIR));
5218 }
5219 /*
5220  * Directory vnode operations template
5221  */
5222 vnodeops_t *zfs_dvnodeops;
5223 const fs_operation_def_t zfs_dvnodeops_template[] = {
5224 	VOPNAME_OPEN,		{ .vop_open = zfs_open },
5225 	VOPNAME_CLOSE,		{ .vop_close = zfs_close },
5226 	VOPNAME_READ,		{ .error = zfs_isdir },
5227 	VOPNAME_WRITE,		{ .error = zfs_isdir },
5228 	VOPNAME_IOCTL,		{ .vop_ioctl = zfs_ioctl },
5229 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5230 	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
5231 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5232 	VOPNAME_LOOKUP,		{ .vop_lookup = zfs_lookup },
5233 	VOPNAME_CREATE,		{ .vop_create = zfs_create },
5234 	VOPNAME_REMOVE,		{ .vop_remove = zfs_remove },
5235 	VOPNAME_LINK,		{ .vop_link = zfs_link },
5236 	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
5237 	VOPNAME_MKDIR,		{ .vop_mkdir = zfs_mkdir },
5238 	VOPNAME_RMDIR,		{ .vop_rmdir = zfs_rmdir },
5239 	VOPNAME_READDIR,	{ .vop_readdir = zfs_readdir },
5240 	VOPNAME_SYMLINK,	{ .vop_symlink = zfs_symlink },
5241 	VOPNAME_FSYNC,		{ .vop_fsync = zfs_fsync },
5242 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5243 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5244 	VOPNAME_SEEK,		{ .vop_seek = zfs_seek },
5245 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5246 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
5247 	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
5248 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5249 	NULL,			NULL
5250 };
5251 
5252 /*
5253  * Regular file vnode operations template
5254  */
5255 vnodeops_t *zfs_fvnodeops;
5256 const fs_operation_def_t zfs_fvnodeops_template[] = {
5257 	VOPNAME_OPEN,		{ .vop_open = zfs_open },
5258 	VOPNAME_CLOSE,		{ .vop_close = zfs_close },
5259 	VOPNAME_READ,		{ .vop_read = zfs_read },
5260 	VOPNAME_WRITE,		{ .vop_write = zfs_write },
5261 	VOPNAME_IOCTL,		{ .vop_ioctl = zfs_ioctl },
5262 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5263 	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
5264 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5265 	VOPNAME_LOOKUP,		{ .vop_lookup = zfs_lookup },
5266 	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
5267 	VOPNAME_FSYNC,		{ .vop_fsync = zfs_fsync },
5268 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5269 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5270 	VOPNAME_SEEK,		{ .vop_seek = zfs_seek },
5271 	VOPNAME_FRLOCK,		{ .vop_frlock = zfs_frlock },
5272 	VOPNAME_SPACE,		{ .vop_space = zfs_space },
5273 	VOPNAME_GETPAGE,	{ .vop_getpage = zfs_getpage },
5274 	VOPNAME_PUTPAGE,	{ .vop_putpage = zfs_putpage },
5275 	VOPNAME_MAP,		{ .vop_map = zfs_map },
5276 	VOPNAME_ADDMAP,		{ .vop_addmap = zfs_addmap },
5277 	VOPNAME_DELMAP,		{ .vop_delmap = zfs_delmap },
5278 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5279 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
5280 	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
5281 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5282 	VOPNAME_REQZCBUF,	{ .vop_reqzcbuf = zfs_reqzcbuf },
5283 	VOPNAME_RETZCBUF,	{ .vop_retzcbuf = zfs_retzcbuf },
5284 	NULL,			NULL
5285 };
5286 
5287 /*
5288  * Symbolic link vnode operations template
5289  */
5290 vnodeops_t *zfs_symvnodeops;
5291 const fs_operation_def_t zfs_symvnodeops_template[] = {
5292 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5293 	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
5294 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5295 	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
5296 	VOPNAME_READLINK,	{ .vop_readlink = zfs_readlink },
5297 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5298 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5299 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5300 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5301 	NULL,			NULL
5302 };
5303 
5304 /*
5305  * special share hidden files vnode operations template
5306  */
5307 vnodeops_t *zfs_sharevnodeops;
5308 const fs_operation_def_t zfs_sharevnodeops_template[] = {
5309 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5310 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5311 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5312 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5313 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5314 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
5315 	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
5316 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5317 	NULL,			NULL
5318 };
5319 
5320 /*
5321  * Extended attribute directory vnode operations template
5322  *
5323  * This template is identical to the directory vnodes
5324  * operation template except for restricted operations:
5325  *	VOP_MKDIR()
5326  *	VOP_SYMLINK()
5327  *
5328  * Note that there are other restrictions embedded in:
5329  *	zfs_create()	- restrict type to VREG
5330  *	zfs_link()	- no links into/out of attribute space
5331  *	zfs_rename()	- no moves into/out of attribute space
5332  */
5333 vnodeops_t *zfs_xdvnodeops;
5334 const fs_operation_def_t zfs_xdvnodeops_template[] = {
5335 	VOPNAME_OPEN,		{ .vop_open = zfs_open },
5336 	VOPNAME_CLOSE,		{ .vop_close = zfs_close },
5337 	VOPNAME_IOCTL,		{ .vop_ioctl = zfs_ioctl },
5338 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5339 	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
5340 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5341 	VOPNAME_LOOKUP,		{ .vop_lookup = zfs_lookup },
5342 	VOPNAME_CREATE,		{ .vop_create = zfs_create },
5343 	VOPNAME_REMOVE,		{ .vop_remove = zfs_remove },
5344 	VOPNAME_LINK,		{ .vop_link = zfs_link },
5345 	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
5346 	VOPNAME_MKDIR,		{ .error = zfs_inval },
5347 	VOPNAME_RMDIR,		{ .vop_rmdir = zfs_rmdir },
5348 	VOPNAME_READDIR,	{ .vop_readdir = zfs_readdir },
5349 	VOPNAME_SYMLINK,	{ .error = zfs_inval },
5350 	VOPNAME_FSYNC,		{ .vop_fsync = zfs_fsync },
5351 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5352 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5353 	VOPNAME_SEEK,		{ .vop_seek = zfs_seek },
5354 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5355 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
5356 	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
5357 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5358 	NULL,			NULL
5359 };
5360 
5361 /*
5362  * Error vnode operations template
5363  */
5364 vnodeops_t *zfs_evnodeops;
5365 const fs_operation_def_t zfs_evnodeops_template[] = {
5366 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5367 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5368 	NULL,			NULL
5369 };
5370