xref: /titanic_51/usr/src/uts/common/fs/zfs/zfs_fuid.c (revision eb9dbf0cbc8141f9da4ed18e6b5515d9eaea0e00)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/zfs_context.h>
27 #include <sys/dmu.h>
28 #include <sys/avl.h>
29 #include <sys/zap.h>
30 #include <sys/refcount.h>
31 #include <sys/nvpair.h>
32 #ifdef _KERNEL
33 #include <sys/kidmap.h>
34 #include <sys/sid.h>
35 #include <sys/zfs_vfsops.h>
36 #include <sys/zfs_znode.h>
37 #endif
38 #include <sys/zfs_fuid.h>
39 
40 /*
41  * FUID Domain table(s).
42  *
43  * The FUID table is stored as a packed nvlist of an array
44  * of nvlists which contain an index, domain string and offset
45  *
46  * During file system initialization the nvlist(s) are read and
47  * two AVL trees are created.  One tree is keyed by the index number
48  * and the other by the domain string.  Nodes are never removed from
49  * trees, but new entries may be added.  If a new entry is added then
50  * the zfsvfs->z_fuid_dirty flag is set to true and the caller will then
51  * be responsible for calling zfs_fuid_sync() to sync the changes to disk.
52  *
53  */
54 
55 #define	FUID_IDX	"fuid_idx"
56 #define	FUID_DOMAIN	"fuid_domain"
57 #define	FUID_OFFSET	"fuid_offset"
58 #define	FUID_NVP_ARRAY	"fuid_nvlist"
59 
60 typedef struct fuid_domain {
61 	avl_node_t	f_domnode;
62 	avl_node_t	f_idxnode;
63 	ksiddomain_t	*f_ksid;
64 	uint64_t	f_idx;
65 } fuid_domain_t;
66 
67 static char *nulldomain = "";
68 
69 /*
70  * Compare two indexes.
71  */
72 static int
73 idx_compare(const void *arg1, const void *arg2)
74 {
75 	const fuid_domain_t *node1 = arg1;
76 	const fuid_domain_t *node2 = arg2;
77 
78 	if (node1->f_idx < node2->f_idx)
79 		return (-1);
80 	else if (node1->f_idx > node2->f_idx)
81 		return (1);
82 	return (0);
83 }
84 
85 /*
86  * Compare two domain strings.
87  */
88 static int
89 domain_compare(const void *arg1, const void *arg2)
90 {
91 	const fuid_domain_t *node1 = arg1;
92 	const fuid_domain_t *node2 = arg2;
93 	int val;
94 
95 	val = strcmp(node1->f_ksid->kd_name, node2->f_ksid->kd_name);
96 	if (val == 0)
97 		return (0);
98 	return (val > 0 ? 1 : -1);
99 }
100 
101 void
102 zfs_fuid_avl_tree_create(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
103 {
104 	avl_create(idx_tree, idx_compare,
105 	    sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_idxnode));
106 	avl_create(domain_tree, domain_compare,
107 	    sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_domnode));
108 }
109 
110 /*
111  * load initial fuid domain and idx trees.  This function is used by
112  * both the kernel and zdb.
113  */
114 uint64_t
115 zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree,
116     avl_tree_t *domain_tree)
117 {
118 	dmu_buf_t *db;
119 	uint64_t fuid_size;
120 
121 	ASSERT(fuid_obj != 0);
122 	VERIFY(0 == dmu_bonus_hold(os, fuid_obj,
123 	    FTAG, &db));
124 	fuid_size = *(uint64_t *)db->db_data;
125 	dmu_buf_rele(db, FTAG);
126 
127 	if (fuid_size)  {
128 		nvlist_t **fuidnvp;
129 		nvlist_t *nvp = NULL;
130 		uint_t count;
131 		char *packed;
132 		int i;
133 
134 		packed = kmem_alloc(fuid_size, KM_SLEEP);
135 		VERIFY(dmu_read(os, fuid_obj, 0,
136 		    fuid_size, packed, DMU_READ_PREFETCH) == 0);
137 		VERIFY(nvlist_unpack(packed, fuid_size,
138 		    &nvp, 0) == 0);
139 		VERIFY(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY,
140 		    &fuidnvp, &count) == 0);
141 
142 		for (i = 0; i != count; i++) {
143 			fuid_domain_t *domnode;
144 			char *domain;
145 			uint64_t idx;
146 
147 			VERIFY(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN,
148 			    &domain) == 0);
149 			VERIFY(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX,
150 			    &idx) == 0);
151 
152 			domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
153 
154 			domnode->f_idx = idx;
155 			domnode->f_ksid = ksid_lookupdomain(domain);
156 			avl_add(idx_tree, domnode);
157 			avl_add(domain_tree, domnode);
158 		}
159 		nvlist_free(nvp);
160 		kmem_free(packed, fuid_size);
161 	}
162 	return (fuid_size);
163 }
164 
165 void
166 zfs_fuid_table_destroy(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
167 {
168 	fuid_domain_t *domnode;
169 	void *cookie;
170 
171 	cookie = NULL;
172 	while (domnode = avl_destroy_nodes(domain_tree, &cookie))
173 		ksiddomain_rele(domnode->f_ksid);
174 
175 	avl_destroy(domain_tree);
176 	cookie = NULL;
177 	while (domnode = avl_destroy_nodes(idx_tree, &cookie))
178 		kmem_free(domnode, sizeof (fuid_domain_t));
179 	avl_destroy(idx_tree);
180 }
181 
182 char *
183 zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx)
184 {
185 	fuid_domain_t searchnode, *findnode;
186 	avl_index_t loc;
187 
188 	searchnode.f_idx = idx;
189 
190 	findnode = avl_find(idx_tree, &searchnode, &loc);
191 
192 	return (findnode ? findnode->f_ksid->kd_name : nulldomain);
193 }
194 
195 #ifdef _KERNEL
196 /*
197  * Load the fuid table(s) into memory.
198  */
199 static void
200 zfs_fuid_init(zfsvfs_t *zfsvfs)
201 {
202 	rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
203 
204 	if (zfsvfs->z_fuid_loaded) {
205 		rw_exit(&zfsvfs->z_fuid_lock);
206 		return;
207 	}
208 
209 	zfs_fuid_avl_tree_create(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
210 
211 	(void) zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ,
212 	    ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj);
213 	if (zfsvfs->z_fuid_obj != 0) {
214 		zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os,
215 		    zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx,
216 		    &zfsvfs->z_fuid_domain);
217 	}
218 
219 	zfsvfs->z_fuid_loaded = B_TRUE;
220 	rw_exit(&zfsvfs->z_fuid_lock);
221 }
222 
223 /*
224  * sync out AVL trees to persistent storage.
225  */
226 void
227 zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
228 {
229 	nvlist_t *nvp;
230 	nvlist_t **fuids;
231 	size_t nvsize = 0;
232 	char *packed;
233 	dmu_buf_t *db;
234 	fuid_domain_t *domnode;
235 	int numnodes;
236 	int i;
237 
238 	if (!zfsvfs->z_fuid_dirty) {
239 		return;
240 	}
241 
242 	rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
243 
244 	/*
245 	 * First see if table needs to be created?
246 	 */
247 	if (zfsvfs->z_fuid_obj == 0) {
248 		zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os,
249 		    DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE,
250 		    sizeof (uint64_t), tx);
251 		VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
252 		    ZFS_FUID_TABLES, sizeof (uint64_t), 1,
253 		    &zfsvfs->z_fuid_obj, tx) == 0);
254 	}
255 
256 	VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
257 
258 	numnodes = avl_numnodes(&zfsvfs->z_fuid_idx);
259 	fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP);
260 	for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++,
261 	    domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) {
262 		VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0);
263 		VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
264 		    domnode->f_idx) == 0);
265 		VERIFY(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0) == 0);
266 		VERIFY(nvlist_add_string(fuids[i], FUID_DOMAIN,
267 		    domnode->f_ksid->kd_name) == 0);
268 	}
269 	VERIFY(nvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY,
270 	    fuids, numnodes) == 0);
271 	for (i = 0; i != numnodes; i++)
272 		nvlist_free(fuids[i]);
273 	kmem_free(fuids, numnodes * sizeof (void *));
274 	VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0);
275 	packed = kmem_alloc(nvsize, KM_SLEEP);
276 	VERIFY(nvlist_pack(nvp, &packed, &nvsize,
277 	    NV_ENCODE_XDR, KM_SLEEP) == 0);
278 	nvlist_free(nvp);
279 	zfsvfs->z_fuid_size = nvsize;
280 	dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
281 	    zfsvfs->z_fuid_size, packed, tx);
282 	kmem_free(packed, zfsvfs->z_fuid_size);
283 	VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj,
284 	    FTAG, &db));
285 	dmu_buf_will_dirty(db, tx);
286 	*(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
287 	dmu_buf_rele(db, FTAG);
288 
289 	zfsvfs->z_fuid_dirty = B_FALSE;
290 	rw_exit(&zfsvfs->z_fuid_lock);
291 }
292 
293 /*
294  * Query domain table for a given domain.
295  *
296  * If domain isn't found and addok is set, it is added to AVL trees and
297  * the zfsvfs->z_fuid_dirty flag will be set to TRUE.  It will then be
298  * necessary for the caller or another thread to detect the dirty table
299  * and sync out the changes.
300  */
301 int
302 zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain,
303     char **retdomain, boolean_t addok)
304 {
305 	fuid_domain_t searchnode, *findnode;
306 	avl_index_t loc;
307 	krw_t rw = RW_READER;
308 
309 	/*
310 	 * If the dummy "nobody" domain then return an index of 0
311 	 * to cause the created FUID to be a standard POSIX id
312 	 * for the user nobody.
313 	 */
314 	if (domain[0] == '\0') {
315 		if (retdomain)
316 			*retdomain = nulldomain;
317 		return (0);
318 	}
319 
320 	searchnode.f_ksid = ksid_lookupdomain(domain);
321 	if (retdomain)
322 		*retdomain = searchnode.f_ksid->kd_name;
323 	if (!zfsvfs->z_fuid_loaded)
324 		zfs_fuid_init(zfsvfs);
325 
326 retry:
327 	rw_enter(&zfsvfs->z_fuid_lock, rw);
328 	findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc);
329 
330 	if (findnode) {
331 		rw_exit(&zfsvfs->z_fuid_lock);
332 		ksiddomain_rele(searchnode.f_ksid);
333 		return (findnode->f_idx);
334 	} else if (addok) {
335 		fuid_domain_t *domnode;
336 		uint64_t retidx;
337 
338 		if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) {
339 			rw_exit(&zfsvfs->z_fuid_lock);
340 			rw = RW_WRITER;
341 			goto retry;
342 		}
343 
344 		domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
345 		domnode->f_ksid = searchnode.f_ksid;
346 
347 		retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1;
348 
349 		avl_add(&zfsvfs->z_fuid_domain, domnode);
350 		avl_add(&zfsvfs->z_fuid_idx, domnode);
351 		zfsvfs->z_fuid_dirty = B_TRUE;
352 		rw_exit(&zfsvfs->z_fuid_lock);
353 		return (retidx);
354 	} else {
355 		rw_exit(&zfsvfs->z_fuid_lock);
356 		return (-1);
357 	}
358 }
359 
360 /*
361  * Query domain table by index, returning domain string
362  *
363  * Returns a pointer from an avl node of the domain string.
364  *
365  */
366 const char *
367 zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx)
368 {
369 	char *domain;
370 
371 	if (idx == 0 || !zfsvfs->z_use_fuids)
372 		return (NULL);
373 
374 	if (!zfsvfs->z_fuid_loaded)
375 		zfs_fuid_init(zfsvfs);
376 
377 	rw_enter(&zfsvfs->z_fuid_lock, RW_READER);
378 
379 	if (zfsvfs->z_fuid_obj)
380 		domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx);
381 	else
382 		domain = nulldomain;
383 	rw_exit(&zfsvfs->z_fuid_lock);
384 
385 	ASSERT(domain);
386 	return (domain);
387 }
388 
389 void
390 zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp)
391 {
392 	uint64_t fuid, fgid;
393 	sa_bulk_attr_t bulk[2];
394 	int count = 0;
395 
396 	if (IS_EPHEMERAL(zp->z_uid) || IS_EPHEMERAL(zp->z_gid)) {
397 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zp->z_zfsvfs),
398 		    NULL, &fuid, 8);
399 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zp->z_zfsvfs),
400 		    NULL, &fgid, 8);
401 		VERIFY(0 == sa_bulk_lookup(zp->z_sa_hdl, bulk, count));
402 	}
403 	if (IS_EPHEMERAL(zp->z_uid))
404 		*uidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
405 	else
406 		*uidp = zp->z_uid;
407 	if (IS_EPHEMERAL(zp->z_gid))
408 		*gidp = zfs_fuid_map_id(zp->z_zfsvfs,
409 		    zp->z_gid, cr, ZFS_GROUP);
410 	else
411 		*gidp = zp->z_gid;
412 }
413 
414 uid_t
415 zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
416     cred_t *cr, zfs_fuid_type_t type)
417 {
418 	uint32_t index = FUID_INDEX(fuid);
419 	const char *domain;
420 	uid_t id;
421 
422 	if (index == 0)
423 		return (fuid);
424 
425 	domain = zfs_fuid_find_by_idx(zfsvfs, index);
426 	ASSERT(domain != NULL);
427 
428 	if (type == ZFS_OWNER || type == ZFS_ACE_USER) {
429 		(void) kidmap_getuidbysid(crgetzone(cr), domain,
430 		    FUID_RID(fuid), &id);
431 	} else {
432 		(void) kidmap_getgidbysid(crgetzone(cr), domain,
433 		    FUID_RID(fuid), &id);
434 	}
435 	return (id);
436 }
437 
438 /*
439  * Add a FUID node to the list of fuid's being created for this
440  * ACL
441  *
442  * If ACL has multiple domains, then keep only one copy of each unique
443  * domain.
444  */
445 void
446 zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid,
447     uint64_t idx, uint64_t id, zfs_fuid_type_t type)
448 {
449 	zfs_fuid_t *fuid;
450 	zfs_fuid_domain_t *fuid_domain;
451 	zfs_fuid_info_t *fuidp;
452 	uint64_t fuididx;
453 	boolean_t found = B_FALSE;
454 
455 	if (*fuidpp == NULL)
456 		*fuidpp = zfs_fuid_info_alloc();
457 
458 	fuidp = *fuidpp;
459 	/*
460 	 * First find fuid domain index in linked list
461 	 *
462 	 * If one isn't found then create an entry.
463 	 */
464 
465 	for (fuididx = 1, fuid_domain = list_head(&fuidp->z_domains);
466 	    fuid_domain; fuid_domain = list_next(&fuidp->z_domains,
467 	    fuid_domain), fuididx++) {
468 		if (idx == fuid_domain->z_domidx) {
469 			found = B_TRUE;
470 			break;
471 		}
472 	}
473 
474 	if (!found) {
475 		fuid_domain = kmem_alloc(sizeof (zfs_fuid_domain_t), KM_SLEEP);
476 		fuid_domain->z_domain = domain;
477 		fuid_domain->z_domidx = idx;
478 		list_insert_tail(&fuidp->z_domains, fuid_domain);
479 		fuidp->z_domain_str_sz += strlen(domain) + 1;
480 		fuidp->z_domain_cnt++;
481 	}
482 
483 	if (type == ZFS_ACE_USER || type == ZFS_ACE_GROUP) {
484 
485 		/*
486 		 * Now allocate fuid entry and add it on the end of the list
487 		 */
488 
489 		fuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP);
490 		fuid->z_id = id;
491 		fuid->z_domidx = idx;
492 		fuid->z_logfuid = FUID_ENCODE(fuididx, rid);
493 
494 		list_insert_tail(&fuidp->z_fuids, fuid);
495 		fuidp->z_fuid_cnt++;
496 	} else {
497 		if (type == ZFS_OWNER)
498 			fuidp->z_fuid_owner = FUID_ENCODE(fuididx, rid);
499 		else
500 			fuidp->z_fuid_group = FUID_ENCODE(fuididx, rid);
501 	}
502 }
503 
504 /*
505  * Create a file system FUID, based on information in the users cred
506  */
507 uint64_t
508 zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
509     cred_t *cr, zfs_fuid_info_t **fuidp)
510 {
511 	uint64_t	idx;
512 	ksid_t		*ksid;
513 	uint32_t	rid;
514 	char 		*kdomain;
515 	const char	*domain;
516 	uid_t		id;
517 
518 	VERIFY(type == ZFS_OWNER || type == ZFS_GROUP);
519 
520 	ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP);
521 	if (ksid) {
522 		id = ksid_getid(ksid);
523 	} else {
524 		if (type == ZFS_OWNER)
525 			id = crgetuid(cr);
526 		else
527 			id = crgetgid(cr);
528 
529 		if (IS_EPHEMERAL(id)) {
530 			return ((uint64_t)(type == ZFS_OWNER ?
531 			    UID_NOBODY : GID_NOBODY));
532 		}
533 	}
534 
535 	if (!zfsvfs->z_use_fuids || (!IS_EPHEMERAL(id)))
536 		return ((uint64_t)id);
537 
538 	rid = ksid_getrid(ksid);
539 	domain = ksid_getdomain(ksid);
540 
541 	idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
542 
543 	zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type);
544 
545 	return (FUID_ENCODE(idx, rid));
546 }
547 
548 /*
549  * Create a file system FUID for an ACL ace
550  * or a chown/chgrp of the file.
551  * This is similar to zfs_fuid_create_cred, except that
552  * we can't find the domain + rid information in the
553  * cred.  Instead we have to query Winchester for the
554  * domain and rid.
555  *
556  * During replay operations the domain+rid information is
557  * found in the zfs_fuid_info_t that the replay code has
558  * attached to the zfsvfs of the file system.
559  */
560 uint64_t
561 zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
562     zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp)
563 {
564 	const char *domain;
565 	char *kdomain;
566 	uint32_t fuid_idx = FUID_INDEX(id);
567 	uint32_t rid;
568 	idmap_stat status;
569 	uint64_t idx;
570 	zfs_fuid_t *zfuid = NULL;
571 	zfs_fuid_info_t *fuidp;
572 
573 	/*
574 	 * If POSIX ID, or entry is already a FUID then
575 	 * just return the id
576 	 *
577 	 * We may also be handed an already FUID'ized id via
578 	 * chmod.
579 	 */
580 
581 	if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0)
582 		return (id);
583 
584 	if (zfsvfs->z_replay) {
585 		fuidp = zfsvfs->z_fuid_replay;
586 
587 		/*
588 		 * If we are passed an ephemeral id, but no
589 		 * fuid_info was logged then return NOBODY.
590 		 * This is most likely a result of idmap service
591 		 * not being available.
592 		 */
593 		if (fuidp == NULL)
594 			return (UID_NOBODY);
595 
596 		switch (type) {
597 		case ZFS_ACE_USER:
598 		case ZFS_ACE_GROUP:
599 			zfuid = list_head(&fuidp->z_fuids);
600 			rid = FUID_RID(zfuid->z_logfuid);
601 			idx = FUID_INDEX(zfuid->z_logfuid);
602 			break;
603 		case ZFS_OWNER:
604 			rid = FUID_RID(fuidp->z_fuid_owner);
605 			idx = FUID_INDEX(fuidp->z_fuid_owner);
606 			break;
607 		case ZFS_GROUP:
608 			rid = FUID_RID(fuidp->z_fuid_group);
609 			idx = FUID_INDEX(fuidp->z_fuid_group);
610 			break;
611 		};
612 		domain = fuidp->z_domain_table[idx -1];
613 	} else {
614 		if (type == ZFS_OWNER || type == ZFS_ACE_USER)
615 			status = kidmap_getsidbyuid(crgetzone(cr), id,
616 			    &domain, &rid);
617 		else
618 			status = kidmap_getsidbygid(crgetzone(cr), id,
619 			    &domain, &rid);
620 
621 		if (status != 0) {
622 			/*
623 			 * When returning nobody we will need to
624 			 * make a dummy fuid table entry for logging
625 			 * purposes.
626 			 */
627 			rid = UID_NOBODY;
628 			domain = nulldomain;
629 		}
630 	}
631 
632 	idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
633 
634 	if (!zfsvfs->z_replay)
635 		zfs_fuid_node_add(fuidpp, kdomain,
636 		    rid, idx, id, type);
637 	else if (zfuid != NULL) {
638 		list_remove(&fuidp->z_fuids, zfuid);
639 		kmem_free(zfuid, sizeof (zfs_fuid_t));
640 	}
641 	return (FUID_ENCODE(idx, rid));
642 }
643 
644 void
645 zfs_fuid_destroy(zfsvfs_t *zfsvfs)
646 {
647 	rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
648 	if (!zfsvfs->z_fuid_loaded) {
649 		rw_exit(&zfsvfs->z_fuid_lock);
650 		return;
651 	}
652 	zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
653 	rw_exit(&zfsvfs->z_fuid_lock);
654 }
655 
656 /*
657  * Allocate zfs_fuid_info for tracking FUIDs created during
658  * zfs_mknode, VOP_SETATTR() or VOP_SETSECATTR()
659  */
660 zfs_fuid_info_t *
661 zfs_fuid_info_alloc(void)
662 {
663 	zfs_fuid_info_t *fuidp;
664 
665 	fuidp = kmem_zalloc(sizeof (zfs_fuid_info_t), KM_SLEEP);
666 	list_create(&fuidp->z_domains, sizeof (zfs_fuid_domain_t),
667 	    offsetof(zfs_fuid_domain_t, z_next));
668 	list_create(&fuidp->z_fuids, sizeof (zfs_fuid_t),
669 	    offsetof(zfs_fuid_t, z_next));
670 	return (fuidp);
671 }
672 
673 /*
674  * Release all memory associated with zfs_fuid_info_t
675  */
676 void
677 zfs_fuid_info_free(zfs_fuid_info_t *fuidp)
678 {
679 	zfs_fuid_t *zfuid;
680 	zfs_fuid_domain_t *zdomain;
681 
682 	while ((zfuid = list_head(&fuidp->z_fuids)) != NULL) {
683 		list_remove(&fuidp->z_fuids, zfuid);
684 		kmem_free(zfuid, sizeof (zfs_fuid_t));
685 	}
686 
687 	if (fuidp->z_domain_table != NULL)
688 		kmem_free(fuidp->z_domain_table,
689 		    (sizeof (char **)) * fuidp->z_domain_cnt);
690 
691 	while ((zdomain = list_head(&fuidp->z_domains)) != NULL) {
692 		list_remove(&fuidp->z_domains, zdomain);
693 		kmem_free(zdomain, sizeof (zfs_fuid_domain_t));
694 	}
695 
696 	kmem_free(fuidp, sizeof (zfs_fuid_info_t));
697 }
698 
699 /*
700  * Check to see if id is a groupmember.  If cred
701  * has ksid info then sidlist is checked first
702  * and if still not found then POSIX groups are checked
703  *
704  * Will use a straight FUID compare when possible.
705  */
706 boolean_t
707 zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
708 {
709 	ksid_t		*ksid = crgetsid(cr, KSID_GROUP);
710 	ksidlist_t	*ksidlist = crgetsidlist(cr);
711 	uid_t		gid;
712 
713 	if (ksid && ksidlist) {
714 		int 		i;
715 		ksid_t		*ksid_groups;
716 		uint32_t	idx = FUID_INDEX(id);
717 		uint32_t	rid = FUID_RID(id);
718 
719 		ksid_groups = ksidlist->ksl_sids;
720 
721 		for (i = 0; i != ksidlist->ksl_nsid; i++) {
722 			if (idx == 0) {
723 				if (id != IDMAP_WK_CREATOR_GROUP_GID &&
724 				    id == ksid_groups[i].ks_id) {
725 					return (B_TRUE);
726 				}
727 			} else {
728 				const char *domain;
729 
730 				domain = zfs_fuid_find_by_idx(zfsvfs, idx);
731 				ASSERT(domain != NULL);
732 
733 				if (strcmp(domain,
734 				    IDMAP_WK_CREATOR_SID_AUTHORITY) == 0)
735 					return (B_FALSE);
736 
737 				if ((strcmp(domain,
738 				    ksid_groups[i].ks_domain->kd_name) == 0) &&
739 				    rid == ksid_groups[i].ks_rid)
740 					return (B_TRUE);
741 			}
742 		}
743 	}
744 
745 	/*
746 	 * Not found in ksidlist, check posix groups
747 	 */
748 	gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP);
749 	return (groupmember(gid, cr));
750 }
751 
752 void
753 zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
754 {
755 	if (zfsvfs->z_fuid_obj == 0) {
756 		dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
757 		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
758 		    FUID_SIZE_ESTIMATE(zfsvfs));
759 		dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL);
760 	} else {
761 		dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj);
762 		dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0,
763 		    FUID_SIZE_ESTIMATE(zfsvfs));
764 	}
765 }
766 #endif
767