xref: /illumos-gate/usr/src/uts/common/fs/zfs/zfs_dir.c (revision 43d18f1c320355e93c47399bea0b2e022fe06364)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/time.h>
32 #include <sys/systm.h>
33 #include <sys/sysmacros.h>
34 #include <sys/resource.h>
35 #include <sys/vfs.h>
36 #include <sys/vnode.h>
37 #include <sys/file.h>
38 #include <sys/mode.h>
39 #include <sys/kmem.h>
40 #include <sys/uio.h>
41 #include <sys/pathname.h>
42 #include <sys/cmn_err.h>
43 #include <sys/errno.h>
44 #include <sys/stat.h>
45 #include <sys/unistd.h>
46 #include <sys/random.h>
47 #include <sys/policy.h>
48 #include <sys/zfs_dir.h>
49 #include <sys/zfs_acl.h>
50 #include <sys/fs/zfs.h>
51 #include "fs/fs_subr.h"
52 #include <sys/zap.h>
53 #include <sys/dmu.h>
54 #include <sys/atomic.h>
55 #include <sys/zfs_ctldir.h>
56 
57 /*
58  * Lock a directory entry.  A dirlock on <dzp, name> protects that name
59  * in dzp's directory zap object.  As long as you hold a dirlock, you can
60  * assume two things: (1) dzp cannot be reaped, and (2) no other thread
61  * can change the zap entry for (i.e. link or unlink) this name.
62  *
63  * Input arguments:
64  *	dzp	- znode for directory
65  *	name	- name of entry to lock
66  *	flag	- ZNEW: if the entry already exists, fail with EEXIST.
67  *		  ZEXISTS: if the entry does not exist, fail with ENOENT.
68  *		  ZSHARED: allow concurrent access with other ZSHARED callers.
69  *		  ZXATTR: we want dzp's xattr directory
70  *
71  * Output arguments:
72  *	zpp	- pointer to the znode for the entry (NULL if there isn't one)
73  *	dlpp	- pointer to the dirlock for this entry (NULL on error)
74  *
75  * Return value: 0 on success or errno on failure.
76  *
77  * NOTE: Always checks for, and rejects, '.' and '..'.
78  */
79 int
80 zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
81 	int flag)
82 {
83 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
84 	zfs_dirlock_t	*dl;
85 	uint64_t	zoid;
86 	int		error;
87 
88 	*zpp = NULL;
89 	*dlpp = NULL;
90 
91 	/*
92 	 * Verify that we are not trying to lock '.', '..', or '.zfs'
93 	 */
94 	if (name[0] == '.' &&
95 	    (name[1] == '\0' || (name[1] == '.' && name[2] == '\0')) ||
96 	    zfs_has_ctldir(dzp) && strcmp(name, ZFS_CTLDIR_NAME) == 0)
97 		return (EEXIST);
98 
99 	/*
100 	 * Wait until there are no locks on this name.
101 	 */
102 	mutex_enter(&dzp->z_lock);
103 	for (;;) {
104 		if (dzp->z_reap) {
105 			mutex_exit(&dzp->z_lock);
106 			return (ENOENT);
107 		}
108 		for (dl = dzp->z_dirlocks; dl != NULL; dl = dl->dl_next)
109 			if (strcmp(name, dl->dl_name) == 0)
110 				break;
111 		if (dl == NULL)	{
112 			/*
113 			 * Allocate a new dirlock and add it to the list.
114 			 */
115 			dl = kmem_alloc(sizeof (zfs_dirlock_t), KM_SLEEP);
116 			cv_init(&dl->dl_cv, NULL, CV_DEFAULT, NULL);
117 			dl->dl_name = name;
118 			dl->dl_sharecnt = 0;
119 			dl->dl_namesize = 0;
120 			dl->dl_dzp = dzp;
121 			dl->dl_next = dzp->z_dirlocks;
122 			dzp->z_dirlocks = dl;
123 			break;
124 		}
125 		if ((flag & ZSHARED) && dl->dl_sharecnt != 0)
126 			break;
127 		cv_wait(&dl->dl_cv, &dzp->z_lock);
128 	}
129 
130 	if ((flag & ZSHARED) && ++dl->dl_sharecnt > 1 && dl->dl_namesize == 0) {
131 		/*
132 		 * We're the second shared reference to dl.  Make a copy of
133 		 * dl_name in case the first thread goes away before we do.
134 		 * Note that we initialize the new name before storing its
135 		 * pointer into dl_name, because the first thread may load
136 		 * dl->dl_name at any time.  He'll either see the old value,
137 		 * which is his, or the new shared copy; either is OK.
138 		 */
139 		dl->dl_namesize = strlen(dl->dl_name) + 1;
140 		name = kmem_alloc(dl->dl_namesize, KM_SLEEP);
141 		bcopy(dl->dl_name, name, dl->dl_namesize);
142 		dl->dl_name = name;
143 	}
144 
145 	mutex_exit(&dzp->z_lock);
146 
147 	/*
148 	 * We have a dirlock on the name.  (Note that it is the dirlock,
149 	 * not the dzp's z_lock, that protects the name in the zap object.)
150 	 * See if there's an object by this name; if so, put a hold on it.
151 	 */
152 	if (flag & ZXATTR) {
153 		zoid = dzp->z_phys->zp_xattr;
154 		error = (zoid == 0 ? ENOENT : 0);
155 	} else {
156 		error = zap_lookup(zfsvfs->z_os, dzp->z_id, name, 8, 1, &zoid);
157 	}
158 	if (error) {
159 		if (error != ENOENT || (flag & ZEXISTS)) {
160 			zfs_dirent_unlock(dl);
161 			return (error);
162 		}
163 	} else {
164 		if (flag & ZNEW) {
165 			zfs_dirent_unlock(dl);
166 			return (EEXIST);
167 		}
168 		error = zfs_zget(zfsvfs, zoid, zpp);
169 		if (error) {
170 			zfs_dirent_unlock(dl);
171 			return (error);
172 		}
173 	}
174 
175 	*dlpp = dl;
176 
177 	return (0);
178 }
179 
180 /*
181  * Unlock this directory entry and wake anyone who was waiting for it.
182  */
183 void
184 zfs_dirent_unlock(zfs_dirlock_t *dl)
185 {
186 	znode_t *dzp = dl->dl_dzp;
187 	zfs_dirlock_t **prev_dl, *cur_dl;
188 
189 	mutex_enter(&dzp->z_lock);
190 	if (dl->dl_sharecnt > 1) {
191 		dl->dl_sharecnt--;
192 		mutex_exit(&dzp->z_lock);
193 		return;
194 	}
195 	prev_dl = &dzp->z_dirlocks;
196 	while ((cur_dl = *prev_dl) != dl)
197 		prev_dl = &cur_dl->dl_next;
198 	*prev_dl = dl->dl_next;
199 	cv_broadcast(&dl->dl_cv);
200 	mutex_exit(&dzp->z_lock);
201 
202 	if (dl->dl_namesize != 0)
203 		kmem_free(dl->dl_name, dl->dl_namesize);
204 	cv_destroy(&dl->dl_cv);
205 	kmem_free(dl, sizeof (*dl));
206 }
207 
208 /*
209  * Look up an entry in a directory.
210  *
211  * NOTE: '.' and '..' are handled as special cases because
212  *	no directory entries are actually stored for them.  If this is
213  *	the root of a filesystem, then '.zfs' is also treated as a
214  *	special pseudo-directory.
215  */
216 int
217 zfs_dirlook(znode_t *dzp, char *name, vnode_t **vpp)
218 {
219 	zfs_dirlock_t *dl;
220 	znode_t *zp;
221 	int error = 0;
222 
223 	if (name[0] == 0 || (name[0] == '.' && name[1] == 0)) {
224 		*vpp = ZTOV(dzp);
225 		VN_HOLD(*vpp);
226 	} else if (name[0] == '.' && name[1] == '.' && name[2] == 0) {
227 		zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
228 		/*
229 		 * If we are a snapshot mounted under .zfs, return
230 		 * the vp for the snapshot directory.
231 		 */
232 		if (zfsvfs->z_parent != zfsvfs) {
233 			error = zfsctl_root_lookup(zfsvfs->z_parent->z_ctldir,
234 			    "snapshot", vpp, NULL, 0, NULL, kcred);
235 			return (error);
236 		}
237 		rw_enter(&dzp->z_parent_lock, RW_READER);
238 		error = zfs_zget(zfsvfs, dzp->z_phys->zp_parent, &zp);
239 		if (error == 0)
240 			*vpp = ZTOV(zp);
241 		rw_exit(&dzp->z_parent_lock);
242 	} else if (zfs_has_ctldir(dzp) && strcmp(name, ZFS_CTLDIR_NAME) == 0) {
243 		*vpp = zfsctl_root(dzp);
244 	} else {
245 		error = zfs_dirent_lock(&dl, dzp, name, &zp, ZEXISTS | ZSHARED);
246 		if (error == 0) {
247 			*vpp = ZTOV(zp);
248 			zfs_dirent_unlock(dl);
249 			dzp->z_zn_prefetch = B_TRUE; /* enable prefetching */
250 		}
251 	}
252 
253 	return (error);
254 }
255 
256 static char *
257 zfs_dq_hexname(char namebuf[17], uint64_t x)
258 {
259 	char *name = &namebuf[16];
260 	const char digits[16] = "0123456789abcdef";
261 
262 	*name = '\0';
263 	do {
264 		*--name = digits[x & 0xf];
265 		x >>= 4;
266 	} while (x != 0);
267 
268 	return (name);
269 }
270 
271 void
272 zfs_dq_add(znode_t *zp, dmu_tx_t *tx)
273 {
274 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
275 	char obj_name[17];
276 	int error;
277 
278 	ASSERT(zp->z_reap);
279 	ASSERT3U(zp->z_phys->zp_links, ==, 0);
280 
281 	error = zap_add(zfsvfs->z_os, zfsvfs->z_dqueue,
282 	    zfs_dq_hexname(obj_name, zp->z_id), 8, 1, &zp->z_id, tx);
283 	ASSERT3U(error, ==, 0);
284 }
285 
286 /*
287  * Delete the entire contents of a directory.  Return a count
288  * of the number of entries that could not be deleted.
289  *
290  * NOTE: this function assumes that the directory is inactive,
291  *	so there is no need to lock its entries before deletion.
292  *	Also, it assumes the directory contents is *only* regular
293  *	files.
294  */
295 static int
296 zfs_purgedir(znode_t *dzp)
297 {
298 	zap_cursor_t	zc;
299 	zap_attribute_t	zap;
300 	znode_t		*xzp;
301 	dmu_tx_t	*tx;
302 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
303 	zfs_dirlock_t	dl;
304 	int skipped = 0;
305 	int error;
306 
307 	ASSERT(dzp->z_active == 0);
308 
309 	for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id);
310 	    (error = zap_cursor_retrieve(&zc, &zap)) == 0;
311 	    zap_cursor_advance(&zc)) {
312 		error = zfs_zget(zfsvfs, zap.za_first_integer, &xzp);
313 		ASSERT3U(error, ==, 0);
314 
315 		ASSERT((ZTOV(xzp)->v_type == VREG) ||
316 		    (ZTOV(xzp)->v_type == VLNK));
317 
318 		tx = dmu_tx_create(zfsvfs->z_os);
319 		dmu_tx_hold_bonus(tx, dzp->z_id);
320 		dmu_tx_hold_zap(tx, dzp->z_id, -1);
321 		dmu_tx_hold_bonus(tx, xzp->z_id);
322 		dmu_tx_hold_zap(tx, zfsvfs->z_dqueue, 1);
323 		error = dmu_tx_assign(tx, TXG_WAIT);
324 		if (error) {
325 			dmu_tx_abort(tx);
326 			VN_RELE(ZTOV(xzp));
327 			skipped += 1;
328 			continue;
329 		}
330 		bzero(&dl, sizeof (dl));
331 		dl.dl_dzp = dzp;
332 		dl.dl_name = zap.za_name;
333 
334 		error = zfs_link_destroy(&dl, xzp, tx, 0, NULL);
335 		ASSERT3U(error, ==, 0);
336 		dmu_tx_commit(tx);
337 
338 		VN_RELE(ZTOV(xzp));
339 	}
340 	zap_cursor_fini(&zc);
341 	ASSERT(error == ENOENT);
342 	return (skipped);
343 }
344 
345 /*
346  * Special function to requeue the znodes for deletion that were
347  * in progress when we either crashed or umounted the file system.
348  */
349 static void
350 zfs_drain_dq(zfsvfs_t *zfsvfs)
351 {
352 	zap_cursor_t	zc;
353 	zap_attribute_t zap;
354 	dmu_object_info_t doi;
355 	znode_t		*zp;
356 	int		error;
357 
358 	/*
359 	 * Interate over the contents of the delete queue.
360 	 */
361 	for (zap_cursor_init(&zc, zfsvfs->z_os, zfsvfs->z_dqueue);
362 	    zap_cursor_retrieve(&zc, &zap) == 0;
363 	    zap_cursor_advance(&zc)) {
364 
365 		/*
366 		 * Need some helpers?
367 		 */
368 		if (zfs_delete_thread_target(zfsvfs, -1) != 0)
369 			return;
370 
371 		/*
372 		 * See what kind of object we have in queue
373 		 */
374 
375 		error = dmu_object_info(zfsvfs->z_os,
376 		    zap.za_first_integer, &doi);
377 		if (error != 0)
378 			continue;
379 
380 		ASSERT((doi.doi_type == DMU_OT_PLAIN_FILE_CONTENTS) ||
381 		    (doi.doi_type == DMU_OT_DIRECTORY_CONTENTS));
382 		/*
383 		 * We need to re-mark these queue entries for reaping,
384 		 * so we pull them back into core and set zp->z_reap.
385 		 */
386 		error = zfs_zget(zfsvfs, zap.za_first_integer, &zp);
387 
388 		/*
389 		 * We may pick up znodes that are already marked for reaping.
390 		 * This could happen during the purge of an extended attribute
391 		 * directory.  All we need to do is skip over them, since they
392 		 * are already in the system to be processed by the taskq.
393 		 */
394 		if (error != 0) {
395 			continue;
396 		}
397 		zp->z_reap = 1;
398 		VN_RELE(ZTOV(zp));
399 		break;
400 	}
401 	zap_cursor_fini(&zc);
402 }
403 
404 void
405 zfs_delete_thread(void *arg)
406 {
407 	zfsvfs_t	*zfsvfs = arg;
408 	zfs_delete_t 	*zd = &zfsvfs->z_delete_head;
409 	znode_t		*zp;
410 	callb_cpr_t	cprinfo;
411 
412 	CALLB_CPR_INIT(&cprinfo, &zd->z_mutex, callb_generic_cpr, "zfs_delete");
413 
414 	mutex_enter(&zd->z_mutex);
415 
416 	if (!zd->z_drained && !zd->z_draining) {
417 		zd->z_draining = B_TRUE;
418 		mutex_exit(&zd->z_mutex);
419 		zfs_drain_dq(zfsvfs);
420 		mutex_enter(&zd->z_mutex);
421 		zd->z_draining = B_FALSE;
422 		zd->z_drained = B_TRUE;
423 		cv_broadcast(&zd->z_quiesce_cv);
424 	}
425 
426 	while (zd->z_thread_count <= zd->z_thread_target) {
427 		zp = list_head(&zd->z_znodes);
428 		if (zp == NULL) {
429 			ASSERT(zd->z_znode_count == 0);
430 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
431 			cv_wait(&zd->z_cv, &zd->z_mutex);
432 			CALLB_CPR_SAFE_END(&cprinfo, &zd->z_mutex);
433 			continue;
434 		}
435 		ASSERT(zd->z_znode_count != 0);
436 		list_remove(&zd->z_znodes, zp);
437 		if (--zd->z_znode_count == 0)
438 			cv_broadcast(&zd->z_quiesce_cv);
439 		mutex_exit(&zd->z_mutex);
440 		zfs_rmnode(zp);
441 		(void) zfs_delete_thread_target(zfsvfs, -1);
442 		mutex_enter(&zd->z_mutex);
443 	}
444 
445 	ASSERT(zd->z_thread_count != 0);
446 	if (--zd->z_thread_count == 0)
447 		cv_broadcast(&zd->z_cv);
448 
449 	CALLB_CPR_EXIT(&cprinfo);	/* NB: drops z_mutex */
450 	thread_exit();
451 }
452 
453 static int zfs_work_per_thread_shift = 11;	/* 2048 (2^11) per thread */
454 
455 /*
456  * Set the target number of delete threads to 'nthreads'.
457  * If nthreads == -1, choose a number based on current workload.
458  * If nthreads == 0, don't return until the threads have exited.
459  */
460 int
461 zfs_delete_thread_target(zfsvfs_t *zfsvfs, int nthreads)
462 {
463 	zfs_delete_t *zd = &zfsvfs->z_delete_head;
464 
465 	mutex_enter(&zd->z_mutex);
466 
467 	if (nthreads == -1) {
468 		if (zd->z_thread_target == 0) {
469 			mutex_exit(&zd->z_mutex);
470 			return (EBUSY);
471 		}
472 		nthreads = zd->z_znode_count >> zfs_work_per_thread_shift;
473 		nthreads = MIN(nthreads, ncpus << 1);
474 		nthreads = MAX(nthreads, 1);
475 		nthreads += !!zd->z_draining;
476 	}
477 
478 	zd->z_thread_target = nthreads;
479 
480 	while (zd->z_thread_count < zd->z_thread_target) {
481 		(void) thread_create(NULL, 0, zfs_delete_thread, zfsvfs,
482 		    0, &p0, TS_RUN, minclsyspri);
483 		zd->z_thread_count++;
484 	}
485 
486 	while (zd->z_thread_count > zd->z_thread_target && nthreads == 0) {
487 		cv_broadcast(&zd->z_cv);
488 		cv_wait(&zd->z_cv, &zd->z_mutex);
489 	}
490 
491 	mutex_exit(&zd->z_mutex);
492 
493 	return (0);
494 }
495 
496 /*
497  * Wait until everything that's been queued has been deleted.
498  */
499 void
500 zfs_delete_wait_empty(zfsvfs_t *zfsvfs)
501 {
502 	zfs_delete_t *zd = &zfsvfs->z_delete_head;
503 
504 	mutex_enter(&zd->z_mutex);
505 	ASSERT(zd->z_thread_target != 0);
506 	while (!zd->z_drained || zd->z_znode_count != 0) {
507 		ASSERT(zd->z_thread_target != 0);
508 		cv_wait(&zd->z_quiesce_cv, &zd->z_mutex);
509 	}
510 	mutex_exit(&zd->z_mutex);
511 }
512 
513 void
514 zfs_rmnode(znode_t *zp)
515 {
516 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
517 	objset_t	*os = zfsvfs->z_os;
518 	znode_t		*xzp = NULL;
519 	char		obj_name[17];
520 	dmu_tx_t	*tx;
521 	uint64_t	acl_obj;
522 	int		error;
523 
524 	ASSERT(zp->z_active == 0);
525 	ASSERT(ZTOV(zp)->v_count == 0);
526 	ASSERT(zp->z_phys->zp_links == 0);
527 
528 	/*
529 	 * If this is an attribute directory, purge its contents.
530 	 */
531 	if (ZTOV(zp)->v_type == VDIR && (zp->z_phys->zp_flags & ZFS_XATTR))
532 		if (zfs_purgedir(zp) != 0) {
533 			zfs_delete_t *delq = &zfsvfs->z_delete_head;
534 			/*
535 			 * Add this back to the delete list to be retried later.
536 			 *
537 			 * XXX - this could just busy loop on us...
538 			 */
539 			mutex_enter(&delq->z_mutex);
540 			list_insert_tail(&delq->z_znodes, zp);
541 			delq->z_znode_count++;
542 			mutex_exit(&delq->z_mutex);
543 			return;
544 		}
545 
546 	/*
547 	 * If the file has extended attributes, unlink the xattr dir.
548 	 */
549 	if (zp->z_phys->zp_xattr) {
550 		error = zfs_zget(zfsvfs, zp->z_phys->zp_xattr, &xzp);
551 		ASSERT(error == 0);
552 	}
553 
554 	acl_obj = zp->z_phys->zp_acl.z_acl_extern_obj;
555 
556 	/*
557 	 * Set up the transaction.
558 	 */
559 	tx = dmu_tx_create(os);
560 	dmu_tx_hold_free(tx, zp->z_id, 0, DMU_OBJECT_END);
561 	dmu_tx_hold_zap(tx, zfsvfs->z_dqueue, -1);
562 	if (xzp) {
563 		dmu_tx_hold_bonus(tx, xzp->z_id);
564 		dmu_tx_hold_zap(tx, zfsvfs->z_dqueue, 1);
565 	}
566 	if (acl_obj)
567 		dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
568 	error = dmu_tx_assign(tx, TXG_WAIT);
569 	if (error) {
570 		zfs_delete_t *delq = &zfsvfs->z_delete_head;
571 
572 		dmu_tx_abort(tx);
573 		/*
574 		 * Add this back to the delete list to be retried later.
575 		 *
576 		 * XXX - this could just busy loop on us...
577 		 */
578 		mutex_enter(&delq->z_mutex);
579 		list_insert_tail(&delq->z_znodes, zp);
580 		delq->z_znode_count++;
581 		mutex_exit(&delq->z_mutex);
582 		return;
583 	}
584 
585 	if (xzp) {
586 		dmu_buf_will_dirty(xzp->z_dbuf, tx);
587 		mutex_enter(&xzp->z_lock);
588 		xzp->z_reap = 1;		/* mark xzp for deletion */
589 		xzp->z_phys->zp_links = 0;	/* no more links to it */
590 		mutex_exit(&xzp->z_lock);
591 		zfs_dq_add(xzp, tx);		/* add xzp to delete queue */
592 	}
593 
594 	/*
595 	 * Remove this znode from delete queue
596 	 */
597 	error = zap_remove(os, zfsvfs->z_dqueue,
598 	    zfs_dq_hexname(obj_name, zp->z_id), tx);
599 	ASSERT3U(error, ==, 0);
600 
601 	zfs_znode_delete(zp, tx);
602 
603 	dmu_tx_commit(tx);
604 
605 	if (xzp)
606 		VN_RELE(ZTOV(xzp));
607 }
608 
609 /*
610  * Link zp into dl.  Can only fail if zp has been reaped.
611  */
612 int
613 zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
614 {
615 	znode_t *dzp = dl->dl_dzp;
616 	vnode_t *vp = ZTOV(zp);
617 	int zp_is_dir = (vp->v_type == VDIR);
618 	int error;
619 
620 	dmu_buf_will_dirty(zp->z_dbuf, tx);
621 	mutex_enter(&zp->z_lock);
622 
623 	if (!(flag & ZRENAMING)) {
624 		if (zp->z_reap) {	/* no new links to reaped zp */
625 			ASSERT(!(flag & (ZNEW | ZEXISTS)));
626 			mutex_exit(&zp->z_lock);
627 			return (ENOENT);
628 		}
629 		zp->z_phys->zp_links++;
630 	}
631 	zp->z_phys->zp_parent = dzp->z_id;	/* dzp is now zp's parent */
632 
633 	if (!(flag & ZNEW))
634 		zfs_time_stamper_locked(zp, STATE_CHANGED, tx);
635 	mutex_exit(&zp->z_lock);
636 
637 	dmu_buf_will_dirty(dzp->z_dbuf, tx);
638 	mutex_enter(&dzp->z_lock);
639 	dzp->z_phys->zp_size++;			/* one dirent added */
640 	dzp->z_phys->zp_links += zp_is_dir;	/* ".." link from zp */
641 	zfs_time_stamper_locked(dzp, CONTENT_MODIFIED, tx);
642 	mutex_exit(&dzp->z_lock);
643 
644 	error = zap_add(zp->z_zfsvfs->z_os, dzp->z_id, dl->dl_name,
645 	    8, 1, &zp->z_id, tx);
646 	ASSERT(error == 0);
647 
648 	return (0);
649 }
650 
651 /*
652  * Unlink zp from dl, and mark zp for reaping if this was the last link.
653  * Can fail if zp is a mount point (EBUSY) or a non-empty directory (EEXIST).
654  * If 'reaped_ptr' is NULL, we put reaped znodes on the delete queue.
655  * If it's non-NULL, we use it to indicate whether the znode needs reaping,
656  * and it's the caller's job to do it.
657  */
658 int
659 zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
660 	int *reaped_ptr)
661 {
662 	znode_t *dzp = dl->dl_dzp;
663 	vnode_t *vp = ZTOV(zp);
664 	int zp_is_dir = (vp->v_type == VDIR);
665 	int reaped = 0;
666 	int error;
667 
668 	if (!(flag & ZRENAMING)) {
669 		dmu_buf_will_dirty(zp->z_dbuf, tx);
670 
671 		if (vn_vfswlock(vp))		/* prevent new mounts on zp */
672 			return (EBUSY);
673 
674 		if (vn_ismntpt(vp)) {		/* don't remove mount point */
675 			vn_vfsunlock(vp);
676 			return (EBUSY);
677 		}
678 
679 		mutex_enter(&zp->z_lock);
680 		if (zp_is_dir && !zfs_dirempty(zp)) {	/* dir not empty */
681 			mutex_exit(&zp->z_lock);
682 			vn_vfsunlock(vp);
683 			return (EEXIST);
684 		}
685 		ASSERT(zp->z_phys->zp_links > zp_is_dir);
686 		if (--zp->z_phys->zp_links == zp_is_dir) {
687 			zp->z_reap = 1;
688 			zp->z_phys->zp_links = 0;
689 			reaped = 1;
690 		} else {
691 			zfs_time_stamper_locked(zp, STATE_CHANGED, tx);
692 		}
693 		mutex_exit(&zp->z_lock);
694 		vn_vfsunlock(vp);
695 	}
696 
697 	dmu_buf_will_dirty(dzp->z_dbuf, tx);
698 	mutex_enter(&dzp->z_lock);
699 	dzp->z_phys->zp_size--;			/* one dirent removed */
700 	dzp->z_phys->zp_links -= zp_is_dir;	/* ".." link from zp */
701 	zfs_time_stamper_locked(dzp, CONTENT_MODIFIED, tx);
702 	mutex_exit(&dzp->z_lock);
703 
704 	error = zap_remove(zp->z_zfsvfs->z_os, dzp->z_id, dl->dl_name, tx);
705 	ASSERT(error == 0);
706 
707 	if (reaped_ptr != NULL)
708 		*reaped_ptr = reaped;
709 	else if (reaped)
710 		zfs_dq_add(zp, tx);
711 
712 	return (0);
713 }
714 
715 /*
716  * Indicate whether the directory is empty.  Works with or without z_lock
717  * held, but can only be consider a hint in the latter case.  Returns true
718  * if only "." and ".." remain and there's no work in progress.
719  */
720 boolean_t
721 zfs_dirempty(znode_t *dzp)
722 {
723 	return (dzp->z_phys->zp_size == 2 && dzp->z_dirlocks == 0);
724 }
725 
726 int
727 zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr)
728 {
729 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
730 	znode_t *xzp;
731 	dmu_tx_t *tx;
732 	uint64_t xoid;
733 	int error;
734 
735 	*xvpp = NULL;
736 
737 	if (error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, cr))
738 		return (error);
739 
740 	tx = dmu_tx_create(zfsvfs->z_os);
741 	dmu_tx_hold_bonus(tx, zp->z_id);
742 	dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, 0);
743 	error = dmu_tx_assign(tx, zfsvfs->z_assign);
744 	if (error) {
745 		dmu_tx_abort(tx);
746 		return (error);
747 	}
748 	zfs_mknode(zp, vap, &xoid, tx, cr, IS_XATTR, &xzp, 0);
749 	ASSERT(xzp->z_id == xoid);
750 	ASSERT(xzp->z_phys->zp_parent == zp->z_id);
751 	dmu_buf_will_dirty(zp->z_dbuf, tx);
752 	zp->z_phys->zp_xattr = xoid;
753 
754 	(void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp, xzp, "");
755 	dmu_tx_commit(tx);
756 
757 	*xvpp = ZTOV(xzp);
758 
759 	return (0);
760 }
761 
762 /*
763  * Return a znode for the extended attribute directory for zp.
764  * ** If the directory does not already exist, it is created **
765  *
766  *	IN:	zp	- znode to obtain attribute directory from
767  *		cr	- credentials of caller
768  *
769  *	OUT:	xzpp	- pointer to extended attribute znode
770  *
771  *	RETURN:	0 on success
772  *		error number on failure
773  */
774 int
775 zfs_get_xattrdir(znode_t *zp, vnode_t **xvpp, cred_t *cr)
776 {
777 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
778 	znode_t		*xzp;
779 	zfs_dirlock_t	*dl;
780 	vattr_t		va;
781 	int		error;
782 top:
783 	error = zfs_dirent_lock(&dl, zp, "", &xzp, ZXATTR);
784 	if (error)
785 		return (error);
786 
787 	if (xzp != NULL) {
788 		*xvpp = ZTOV(xzp);
789 		zfs_dirent_unlock(dl);
790 		return (0);
791 	}
792 
793 	ASSERT(zp->z_phys->zp_xattr == 0);
794 
795 	if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
796 		zfs_dirent_unlock(dl);
797 		return (EROFS);
798 	}
799 
800 	/*
801 	 * The ability to 'create' files in an attribute
802 	 * directory comes from the write_xattr permission on the base file.
803 	 *
804 	 * The ability to 'search' an attribute directory requires
805 	 * read_xattr permission on the base file.
806 	 *
807 	 * Once in a directory the ability to read/write attributes
808 	 * is controlled by the permissions on the attribute file.
809 	 */
810 	va.va_mask = AT_TYPE | AT_MODE | AT_UID | AT_GID;
811 	va.va_type = VDIR;
812 	va.va_mode = S_IFDIR | 0755;
813 	va.va_uid = (uid_t)zp->z_phys->zp_uid;
814 	va.va_gid = (gid_t)zp->z_phys->zp_gid;
815 
816 	error = zfs_make_xattrdir(zp, &va, xvpp, cr);
817 	zfs_dirent_unlock(dl);
818 
819 	if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
820 		txg_wait_open(dmu_objset_pool(zfsvfs->z_os), 0);
821 		goto top;
822 	}
823 
824 	return (error);
825 }
826 
827 /*
828  * Decide whether it is okay to remove within a sticky directory.
829  *
830  * In sticky directories, write access is not sufficient;
831  * you can remove entries from a directory only if:
832  *
833  *	you own the directory,
834  *	you own the entry,
835  *	the entry is a plain file and you have write access,
836  *	or you are privileged (checked in secpolicy...).
837  *
838  * The function returns 0 if remove access is granted.
839  */
840 int
841 zfs_sticky_remove_access(znode_t *zdp, znode_t *zp, cred_t *cr)
842 {
843 	uid_t  		uid;
844 
845 	if (zdp->z_zfsvfs->z_assign >= TXG_INITIAL)	/* ZIL replay */
846 		return (0);
847 
848 	if ((zdp->z_phys->zp_mode & S_ISVTX) == 0 ||
849 	    (uid = crgetuid(cr)) == zdp->z_phys->zp_uid ||
850 	    uid == zp->z_phys->zp_uid ||
851 	    (ZTOV(zp)->v_type == VREG &&
852 	    zfs_zaccess(zp, ACE_WRITE_DATA, cr) == 0))
853 		return (0);
854 	else
855 		return (secpolicy_vnode_remove(cr));
856 }
857