xref: /titanic_44/usr/src/uts/common/fs/zfs/zfs_fuid.c (revision 5819f75e225cf93d9c11f52e04ee71c2dcd0eca9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <sys/zfs_context.h>
26 #include <sys/dmu.h>
27 #include <sys/avl.h>
28 #include <sys/zap.h>
29 #include <sys/refcount.h>
30 #include <sys/nvpair.h>
31 #ifdef _KERNEL
32 #include <sys/kidmap.h>
33 #include <sys/sid.h>
34 #include <sys/zfs_vfsops.h>
35 #include <sys/zfs_znode.h>
36 #endif
37 #include <sys/zfs_fuid.h>
38 
39 /*
40  * FUID Domain table(s).
41  *
42  * The FUID table is stored as a packed nvlist of an array
43  * of nvlists which contain an index, domain string and offset
44  *
45  * During file system initialization the nvlist(s) are read and
46  * two AVL trees are created.  One tree is keyed by the index number
47  * and the other by the domain string.  Nodes are never removed from
48  * trees, but new entries may be added.  If a new entry is added then
49  * the zfsvfs->z_fuid_dirty flag is set to true and the caller will then
50  * be responsible for calling zfs_fuid_sync() to sync the changes to disk.
51  *
52  */
53 
54 #define	FUID_IDX	"fuid_idx"
55 #define	FUID_DOMAIN	"fuid_domain"
56 #define	FUID_OFFSET	"fuid_offset"
57 #define	FUID_NVP_ARRAY	"fuid_nvlist"
58 
59 typedef struct fuid_domain {
60 	avl_node_t	f_domnode;
61 	avl_node_t	f_idxnode;
62 	ksiddomain_t	*f_ksid;
63 	uint64_t	f_idx;
64 } fuid_domain_t;
65 
66 static char *nulldomain = "";
67 
68 /*
69  * Compare two indexes.
70  */
71 static int
72 idx_compare(const void *arg1, const void *arg2)
73 {
74 	const fuid_domain_t *node1 = arg1;
75 	const fuid_domain_t *node2 = arg2;
76 
77 	if (node1->f_idx < node2->f_idx)
78 		return (-1);
79 	else if (node1->f_idx > node2->f_idx)
80 		return (1);
81 	return (0);
82 }
83 
84 /*
85  * Compare two domain strings.
86  */
87 static int
88 domain_compare(const void *arg1, const void *arg2)
89 {
90 	const fuid_domain_t *node1 = arg1;
91 	const fuid_domain_t *node2 = arg2;
92 	int val;
93 
94 	val = strcmp(node1->f_ksid->kd_name, node2->f_ksid->kd_name);
95 	if (val == 0)
96 		return (0);
97 	return (val > 0 ? 1 : -1);
98 }
99 
100 void
101 zfs_fuid_avl_tree_create(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
102 {
103 	avl_create(idx_tree, idx_compare,
104 	    sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_idxnode));
105 	avl_create(domain_tree, domain_compare,
106 	    sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_domnode));
107 }
108 
109 /*
110  * load initial fuid domain and idx trees.  This function is used by
111  * both the kernel and zdb.
112  */
113 uint64_t
114 zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree,
115     avl_tree_t *domain_tree)
116 {
117 	dmu_buf_t *db;
118 	uint64_t fuid_size;
119 
120 	ASSERT(fuid_obj != 0);
121 	VERIFY(0 == dmu_bonus_hold(os, fuid_obj,
122 	    FTAG, &db));
123 	fuid_size = *(uint64_t *)db->db_data;
124 	dmu_buf_rele(db, FTAG);
125 
126 	if (fuid_size)  {
127 		nvlist_t **fuidnvp;
128 		nvlist_t *nvp = NULL;
129 		uint_t count;
130 		char *packed;
131 		int i;
132 
133 		packed = kmem_alloc(fuid_size, KM_SLEEP);
134 		VERIFY(dmu_read(os, fuid_obj, 0,
135 		    fuid_size, packed, DMU_READ_PREFETCH) == 0);
136 		VERIFY(nvlist_unpack(packed, fuid_size,
137 		    &nvp, 0) == 0);
138 		VERIFY(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY,
139 		    &fuidnvp, &count) == 0);
140 
141 		for (i = 0; i != count; i++) {
142 			fuid_domain_t *domnode;
143 			char *domain;
144 			uint64_t idx;
145 
146 			VERIFY(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN,
147 			    &domain) == 0);
148 			VERIFY(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX,
149 			    &idx) == 0);
150 
151 			domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
152 
153 			domnode->f_idx = idx;
154 			domnode->f_ksid = ksid_lookupdomain(domain);
155 			avl_add(idx_tree, domnode);
156 			avl_add(domain_tree, domnode);
157 		}
158 		nvlist_free(nvp);
159 		kmem_free(packed, fuid_size);
160 	}
161 	return (fuid_size);
162 }
163 
164 void
165 zfs_fuid_table_destroy(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
166 {
167 	fuid_domain_t *domnode;
168 	void *cookie;
169 
170 	cookie = NULL;
171 	while (domnode = avl_destroy_nodes(domain_tree, &cookie))
172 		ksiddomain_rele(domnode->f_ksid);
173 
174 	avl_destroy(domain_tree);
175 	cookie = NULL;
176 	while (domnode = avl_destroy_nodes(idx_tree, &cookie))
177 		kmem_free(domnode, sizeof (fuid_domain_t));
178 	avl_destroy(idx_tree);
179 }
180 
181 char *
182 zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx)
183 {
184 	fuid_domain_t searchnode, *findnode;
185 	avl_index_t loc;
186 
187 	searchnode.f_idx = idx;
188 
189 	findnode = avl_find(idx_tree, &searchnode, &loc);
190 
191 	return (findnode ? findnode->f_ksid->kd_name : nulldomain);
192 }
193 
194 #ifdef _KERNEL
195 /*
196  * Load the fuid table(s) into memory.
197  */
198 static void
199 zfs_fuid_init(zfsvfs_t *zfsvfs)
200 {
201 	rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
202 
203 	if (zfsvfs->z_fuid_loaded) {
204 		rw_exit(&zfsvfs->z_fuid_lock);
205 		return;
206 	}
207 
208 	zfs_fuid_avl_tree_create(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
209 
210 	(void) zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ,
211 	    ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj);
212 	if (zfsvfs->z_fuid_obj != 0) {
213 		zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os,
214 		    zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx,
215 		    &zfsvfs->z_fuid_domain);
216 	}
217 
218 	zfsvfs->z_fuid_loaded = B_TRUE;
219 	rw_exit(&zfsvfs->z_fuid_lock);
220 }
221 
222 /*
223  * sync out AVL trees to persistent storage.
224  */
225 void
226 zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
227 {
228 	nvlist_t *nvp;
229 	nvlist_t **fuids;
230 	size_t nvsize = 0;
231 	char *packed;
232 	dmu_buf_t *db;
233 	fuid_domain_t *domnode;
234 	int numnodes;
235 	int i;
236 
237 	if (!zfsvfs->z_fuid_dirty) {
238 		return;
239 	}
240 
241 	rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
242 
243 	/*
244 	 * First see if table needs to be created?
245 	 */
246 	if (zfsvfs->z_fuid_obj == 0) {
247 		zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os,
248 		    DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE,
249 		    sizeof (uint64_t), tx);
250 		VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
251 		    ZFS_FUID_TABLES, sizeof (uint64_t), 1,
252 		    &zfsvfs->z_fuid_obj, tx) == 0);
253 	}
254 
255 	VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
256 
257 	numnodes = avl_numnodes(&zfsvfs->z_fuid_idx);
258 	fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP);
259 	for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++,
260 	    domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) {
261 		VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0);
262 		VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
263 		    domnode->f_idx) == 0);
264 		VERIFY(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0) == 0);
265 		VERIFY(nvlist_add_string(fuids[i], FUID_DOMAIN,
266 		    domnode->f_ksid->kd_name) == 0);
267 	}
268 	VERIFY(nvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY,
269 	    fuids, numnodes) == 0);
270 	for (i = 0; i != numnodes; i++)
271 		nvlist_free(fuids[i]);
272 	kmem_free(fuids, numnodes * sizeof (void *));
273 	VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0);
274 	packed = kmem_alloc(nvsize, KM_SLEEP);
275 	VERIFY(nvlist_pack(nvp, &packed, &nvsize,
276 	    NV_ENCODE_XDR, KM_SLEEP) == 0);
277 	nvlist_free(nvp);
278 	zfsvfs->z_fuid_size = nvsize;
279 	dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
280 	    zfsvfs->z_fuid_size, packed, tx);
281 	kmem_free(packed, zfsvfs->z_fuid_size);
282 	VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj,
283 	    FTAG, &db));
284 	dmu_buf_will_dirty(db, tx);
285 	*(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
286 	dmu_buf_rele(db, FTAG);
287 
288 	zfsvfs->z_fuid_dirty = B_FALSE;
289 	rw_exit(&zfsvfs->z_fuid_lock);
290 }
291 
292 /*
293  * Query domain table for a given domain.
294  *
295  * If domain isn't found and addok is set, it is added to AVL trees and
296  * the zfsvfs->z_fuid_dirty flag will be set to TRUE.  It will then be
297  * necessary for the caller or another thread to detect the dirty table
298  * and sync out the changes.
299  */
300 int
301 zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain,
302     char **retdomain, boolean_t addok)
303 {
304 	fuid_domain_t searchnode, *findnode;
305 	avl_index_t loc;
306 	krw_t rw = RW_READER;
307 
308 	/*
309 	 * If the dummy "nobody" domain then return an index of 0
310 	 * to cause the created FUID to be a standard POSIX id
311 	 * for the user nobody.
312 	 */
313 	if (domain[0] == '\0') {
314 		if (retdomain)
315 			*retdomain = nulldomain;
316 		return (0);
317 	}
318 
319 	searchnode.f_ksid = ksid_lookupdomain(domain);
320 	if (retdomain)
321 		*retdomain = searchnode.f_ksid->kd_name;
322 	if (!zfsvfs->z_fuid_loaded)
323 		zfs_fuid_init(zfsvfs);
324 
325 retry:
326 	rw_enter(&zfsvfs->z_fuid_lock, rw);
327 	findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc);
328 
329 	if (findnode) {
330 		rw_exit(&zfsvfs->z_fuid_lock);
331 		ksiddomain_rele(searchnode.f_ksid);
332 		return (findnode->f_idx);
333 	} else if (addok) {
334 		fuid_domain_t *domnode;
335 		uint64_t retidx;
336 
337 		if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) {
338 			rw_exit(&zfsvfs->z_fuid_lock);
339 			rw = RW_WRITER;
340 			goto retry;
341 		}
342 
343 		domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
344 		domnode->f_ksid = searchnode.f_ksid;
345 
346 		retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1;
347 
348 		avl_add(&zfsvfs->z_fuid_domain, domnode);
349 		avl_add(&zfsvfs->z_fuid_idx, domnode);
350 		zfsvfs->z_fuid_dirty = B_TRUE;
351 		rw_exit(&zfsvfs->z_fuid_lock);
352 		return (retidx);
353 	} else {
354 		rw_exit(&zfsvfs->z_fuid_lock);
355 		return (-1);
356 	}
357 }
358 
359 /*
360  * Query domain table by index, returning domain string
361  *
362  * Returns a pointer from an avl node of the domain string.
363  *
364  */
365 const char *
366 zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx)
367 {
368 	char *domain;
369 
370 	if (idx == 0 || !zfsvfs->z_use_fuids)
371 		return (NULL);
372 
373 	if (!zfsvfs->z_fuid_loaded)
374 		zfs_fuid_init(zfsvfs);
375 
376 	rw_enter(&zfsvfs->z_fuid_lock, RW_READER);
377 
378 	if (zfsvfs->z_fuid_obj || zfsvfs->z_fuid_dirty)
379 		domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx);
380 	else
381 		domain = nulldomain;
382 	rw_exit(&zfsvfs->z_fuid_lock);
383 
384 	ASSERT(domain);
385 	return (domain);
386 }
387 
388 void
389 zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp)
390 {
391 	uint64_t fuid, fgid;
392 	sa_bulk_attr_t bulk[2];
393 	int count = 0;
394 
395 	if (IS_EPHEMERAL(zp->z_uid) || IS_EPHEMERAL(zp->z_gid)) {
396 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zp->z_zfsvfs),
397 		    NULL, &fuid, 8);
398 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zp->z_zfsvfs),
399 		    NULL, &fgid, 8);
400 		VERIFY(0 == sa_bulk_lookup(zp->z_sa_hdl, bulk, count));
401 	}
402 	if (IS_EPHEMERAL(zp->z_uid))
403 		*uidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
404 	else
405 		*uidp = zp->z_uid;
406 	if (IS_EPHEMERAL(zp->z_gid))
407 		*gidp = zfs_fuid_map_id(zp->z_zfsvfs,
408 		    zp->z_gid, cr, ZFS_GROUP);
409 	else
410 		*gidp = zp->z_gid;
411 }
412 
413 uid_t
414 zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
415     cred_t *cr, zfs_fuid_type_t type)
416 {
417 	uint32_t index = FUID_INDEX(fuid);
418 	const char *domain;
419 	uid_t id;
420 
421 	if (index == 0)
422 		return (fuid);
423 
424 	domain = zfs_fuid_find_by_idx(zfsvfs, index);
425 	ASSERT(domain != NULL);
426 
427 	if (type == ZFS_OWNER || type == ZFS_ACE_USER) {
428 		(void) kidmap_getuidbysid(crgetzone(cr), domain,
429 		    FUID_RID(fuid), &id);
430 	} else {
431 		(void) kidmap_getgidbysid(crgetzone(cr), domain,
432 		    FUID_RID(fuid), &id);
433 	}
434 	return (id);
435 }
436 
437 /*
438  * Add a FUID node to the list of fuid's being created for this
439  * ACL
440  *
441  * If ACL has multiple domains, then keep only one copy of each unique
442  * domain.
443  */
444 void
445 zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid,
446     uint64_t idx, uint64_t id, zfs_fuid_type_t type)
447 {
448 	zfs_fuid_t *fuid;
449 	zfs_fuid_domain_t *fuid_domain;
450 	zfs_fuid_info_t *fuidp;
451 	uint64_t fuididx;
452 	boolean_t found = B_FALSE;
453 
454 	if (*fuidpp == NULL)
455 		*fuidpp = zfs_fuid_info_alloc();
456 
457 	fuidp = *fuidpp;
458 	/*
459 	 * First find fuid domain index in linked list
460 	 *
461 	 * If one isn't found then create an entry.
462 	 */
463 
464 	for (fuididx = 1, fuid_domain = list_head(&fuidp->z_domains);
465 	    fuid_domain; fuid_domain = list_next(&fuidp->z_domains,
466 	    fuid_domain), fuididx++) {
467 		if (idx == fuid_domain->z_domidx) {
468 			found = B_TRUE;
469 			break;
470 		}
471 	}
472 
473 	if (!found) {
474 		fuid_domain = kmem_alloc(sizeof (zfs_fuid_domain_t), KM_SLEEP);
475 		fuid_domain->z_domain = domain;
476 		fuid_domain->z_domidx = idx;
477 		list_insert_tail(&fuidp->z_domains, fuid_domain);
478 		fuidp->z_domain_str_sz += strlen(domain) + 1;
479 		fuidp->z_domain_cnt++;
480 	}
481 
482 	if (type == ZFS_ACE_USER || type == ZFS_ACE_GROUP) {
483 
484 		/*
485 		 * Now allocate fuid entry and add it on the end of the list
486 		 */
487 
488 		fuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP);
489 		fuid->z_id = id;
490 		fuid->z_domidx = idx;
491 		fuid->z_logfuid = FUID_ENCODE(fuididx, rid);
492 
493 		list_insert_tail(&fuidp->z_fuids, fuid);
494 		fuidp->z_fuid_cnt++;
495 	} else {
496 		if (type == ZFS_OWNER)
497 			fuidp->z_fuid_owner = FUID_ENCODE(fuididx, rid);
498 		else
499 			fuidp->z_fuid_group = FUID_ENCODE(fuididx, rid);
500 	}
501 }
502 
503 /*
504  * Create a file system FUID, based on information in the users cred
505  *
506  * If cred contains KSID_OWNER then it should be used to determine
507  * the uid otherwise cred's uid will be used. By default cred's gid
508  * is used unless it's an ephemeral ID in which case KSID_GROUP will
509  * be used if it exists.
510  */
511 uint64_t
512 zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
513     cred_t *cr, zfs_fuid_info_t **fuidp)
514 {
515 	uint64_t	idx;
516 	ksid_t		*ksid;
517 	uint32_t	rid;
518 	char 		*kdomain;
519 	const char	*domain;
520 	uid_t		id;
521 
522 	VERIFY(type == ZFS_OWNER || type == ZFS_GROUP);
523 
524 	ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP);
525 
526 	if (!zfsvfs->z_use_fuids || (ksid == NULL)) {
527 		id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr);
528 
529 		if (IS_EPHEMERAL(id))
530 			return ((type == ZFS_OWNER) ? UID_NOBODY : GID_NOBODY);
531 
532 		return ((uint64_t)id);
533 	}
534 
535 	/*
536 	 * ksid is present and FUID is supported
537 	 */
538 	id = (type == ZFS_OWNER) ? ksid_getid(ksid) : crgetgid(cr);
539 
540 	if (!IS_EPHEMERAL(id))
541 		return ((uint64_t)id);
542 
543 	if (type == ZFS_GROUP)
544 		id = ksid_getid(ksid);
545 
546 	rid = ksid_getrid(ksid);
547 	domain = ksid_getdomain(ksid);
548 
549 	idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
550 
551 	zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type);
552 
553 	return (FUID_ENCODE(idx, rid));
554 }
555 
556 /*
557  * Create a file system FUID for an ACL ace
558  * or a chown/chgrp of the file.
559  * This is similar to zfs_fuid_create_cred, except that
560  * we can't find the domain + rid information in the
561  * cred.  Instead we have to query Winchester for the
562  * domain and rid.
563  *
564  * During replay operations the domain+rid information is
565  * found in the zfs_fuid_info_t that the replay code has
566  * attached to the zfsvfs of the file system.
567  */
568 uint64_t
569 zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
570     zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp)
571 {
572 	const char *domain;
573 	char *kdomain;
574 	uint32_t fuid_idx = FUID_INDEX(id);
575 	uint32_t rid;
576 	idmap_stat status;
577 	uint64_t idx;
578 	zfs_fuid_t *zfuid = NULL;
579 	zfs_fuid_info_t *fuidp;
580 
581 	/*
582 	 * If POSIX ID, or entry is already a FUID then
583 	 * just return the id
584 	 *
585 	 * We may also be handed an already FUID'ized id via
586 	 * chmod.
587 	 */
588 
589 	if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0)
590 		return (id);
591 
592 	if (zfsvfs->z_replay) {
593 		fuidp = zfsvfs->z_fuid_replay;
594 
595 		/*
596 		 * If we are passed an ephemeral id, but no
597 		 * fuid_info was logged then return NOBODY.
598 		 * This is most likely a result of idmap service
599 		 * not being available.
600 		 */
601 		if (fuidp == NULL)
602 			return (UID_NOBODY);
603 
604 		switch (type) {
605 		case ZFS_ACE_USER:
606 		case ZFS_ACE_GROUP:
607 			zfuid = list_head(&fuidp->z_fuids);
608 			rid = FUID_RID(zfuid->z_logfuid);
609 			idx = FUID_INDEX(zfuid->z_logfuid);
610 			break;
611 		case ZFS_OWNER:
612 			rid = FUID_RID(fuidp->z_fuid_owner);
613 			idx = FUID_INDEX(fuidp->z_fuid_owner);
614 			break;
615 		case ZFS_GROUP:
616 			rid = FUID_RID(fuidp->z_fuid_group);
617 			idx = FUID_INDEX(fuidp->z_fuid_group);
618 			break;
619 		};
620 		domain = fuidp->z_domain_table[idx -1];
621 	} else {
622 		if (type == ZFS_OWNER || type == ZFS_ACE_USER)
623 			status = kidmap_getsidbyuid(crgetzone(cr), id,
624 			    &domain, &rid);
625 		else
626 			status = kidmap_getsidbygid(crgetzone(cr), id,
627 			    &domain, &rid);
628 
629 		if (status != 0) {
630 			/*
631 			 * When returning nobody we will need to
632 			 * make a dummy fuid table entry for logging
633 			 * purposes.
634 			 */
635 			rid = UID_NOBODY;
636 			domain = nulldomain;
637 		}
638 	}
639 
640 	idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
641 
642 	if (!zfsvfs->z_replay)
643 		zfs_fuid_node_add(fuidpp, kdomain,
644 		    rid, idx, id, type);
645 	else if (zfuid != NULL) {
646 		list_remove(&fuidp->z_fuids, zfuid);
647 		kmem_free(zfuid, sizeof (zfs_fuid_t));
648 	}
649 	return (FUID_ENCODE(idx, rid));
650 }
651 
652 void
653 zfs_fuid_destroy(zfsvfs_t *zfsvfs)
654 {
655 	rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
656 	if (!zfsvfs->z_fuid_loaded) {
657 		rw_exit(&zfsvfs->z_fuid_lock);
658 		return;
659 	}
660 	zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
661 	rw_exit(&zfsvfs->z_fuid_lock);
662 }
663 
664 /*
665  * Allocate zfs_fuid_info for tracking FUIDs created during
666  * zfs_mknode, VOP_SETATTR() or VOP_SETSECATTR()
667  */
668 zfs_fuid_info_t *
669 zfs_fuid_info_alloc(void)
670 {
671 	zfs_fuid_info_t *fuidp;
672 
673 	fuidp = kmem_zalloc(sizeof (zfs_fuid_info_t), KM_SLEEP);
674 	list_create(&fuidp->z_domains, sizeof (zfs_fuid_domain_t),
675 	    offsetof(zfs_fuid_domain_t, z_next));
676 	list_create(&fuidp->z_fuids, sizeof (zfs_fuid_t),
677 	    offsetof(zfs_fuid_t, z_next));
678 	return (fuidp);
679 }
680 
681 /*
682  * Release all memory associated with zfs_fuid_info_t
683  */
684 void
685 zfs_fuid_info_free(zfs_fuid_info_t *fuidp)
686 {
687 	zfs_fuid_t *zfuid;
688 	zfs_fuid_domain_t *zdomain;
689 
690 	while ((zfuid = list_head(&fuidp->z_fuids)) != NULL) {
691 		list_remove(&fuidp->z_fuids, zfuid);
692 		kmem_free(zfuid, sizeof (zfs_fuid_t));
693 	}
694 
695 	if (fuidp->z_domain_table != NULL)
696 		kmem_free(fuidp->z_domain_table,
697 		    (sizeof (char **)) * fuidp->z_domain_cnt);
698 
699 	while ((zdomain = list_head(&fuidp->z_domains)) != NULL) {
700 		list_remove(&fuidp->z_domains, zdomain);
701 		kmem_free(zdomain, sizeof (zfs_fuid_domain_t));
702 	}
703 
704 	kmem_free(fuidp, sizeof (zfs_fuid_info_t));
705 }
706 
707 /*
708  * Check to see if id is a groupmember.  If cred
709  * has ksid info then sidlist is checked first
710  * and if still not found then POSIX groups are checked
711  *
712  * Will use a straight FUID compare when possible.
713  */
714 boolean_t
715 zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
716 {
717 	ksid_t		*ksid = crgetsid(cr, KSID_GROUP);
718 	ksidlist_t	*ksidlist = crgetsidlist(cr);
719 	uid_t		gid;
720 
721 	if (ksid && ksidlist) {
722 		int 		i;
723 		ksid_t		*ksid_groups;
724 		uint32_t	idx = FUID_INDEX(id);
725 		uint32_t	rid = FUID_RID(id);
726 
727 		ksid_groups = ksidlist->ksl_sids;
728 
729 		for (i = 0; i != ksidlist->ksl_nsid; i++) {
730 			if (idx == 0) {
731 				if (id != IDMAP_WK_CREATOR_GROUP_GID &&
732 				    id == ksid_groups[i].ks_id) {
733 					return (B_TRUE);
734 				}
735 			} else {
736 				const char *domain;
737 
738 				domain = zfs_fuid_find_by_idx(zfsvfs, idx);
739 				ASSERT(domain != NULL);
740 
741 				if (strcmp(domain,
742 				    IDMAP_WK_CREATOR_SID_AUTHORITY) == 0)
743 					return (B_FALSE);
744 
745 				if ((strcmp(domain,
746 				    ksid_groups[i].ks_domain->kd_name) == 0) &&
747 				    rid == ksid_groups[i].ks_rid)
748 					return (B_TRUE);
749 			}
750 		}
751 	}
752 
753 	/*
754 	 * Not found in ksidlist, check posix groups
755 	 */
756 	gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP);
757 	return (groupmember(gid, cr));
758 }
759 
760 void
761 zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
762 {
763 	if (zfsvfs->z_fuid_obj == 0) {
764 		dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
765 		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
766 		    FUID_SIZE_ESTIMATE(zfsvfs));
767 		dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL);
768 	} else {
769 		dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj);
770 		dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0,
771 		    FUID_SIZE_ESTIMATE(zfsvfs));
772 	}
773 }
774 #endif
775