1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 #include <sys/zfs_context.h>
26 #include <sys/dmu.h>
27 #include <sys/avl.h>
28 #include <sys/zap.h>
29 #include <sys/refcount.h>
30 #include <sys/nvpair.h>
31 #ifdef _KERNEL
32 #include <sys/kidmap.h>
33 #include <sys/sid.h>
34 #include <sys/zfs_vfsops.h>
35 #include <sys/zfs_znode.h>
36 #endif
37 #include <sys/zfs_fuid.h>
38
39 /*
40 * FUID Domain table(s).
41 *
42 * The FUID table is stored as a packed nvlist of an array
43 * of nvlists which contain an index, domain string and offset
44 *
45 * During file system initialization the nvlist(s) are read and
46 * two AVL trees are created. One tree is keyed by the index number
47 * and the other by the domain string. Nodes are never removed from
48 * trees, but new entries may be added. If a new entry is added then
49 * the zfsvfs->z_fuid_dirty flag is set to true and the caller will then
50 * be responsible for calling zfs_fuid_sync() to sync the changes to disk.
51 *
52 */
53
54 #define FUID_IDX "fuid_idx"
55 #define FUID_DOMAIN "fuid_domain"
56 #define FUID_OFFSET "fuid_offset"
57 #define FUID_NVP_ARRAY "fuid_nvlist"
58
59 typedef struct fuid_domain {
60 avl_node_t f_domnode;
61 avl_node_t f_idxnode;
62 ksiddomain_t *f_ksid;
63 uint64_t f_idx;
64 } fuid_domain_t;
65
66 static char *nulldomain = "";
67
68 /*
69 * Compare two indexes.
70 */
71 static int
idx_compare(const void * arg1,const void * arg2)72 idx_compare(const void *arg1, const void *arg2)
73 {
74 const fuid_domain_t *node1 = arg1;
75 const fuid_domain_t *node2 = arg2;
76
77 if (node1->f_idx < node2->f_idx)
78 return (-1);
79 else if (node1->f_idx > node2->f_idx)
80 return (1);
81 return (0);
82 }
83
84 /*
85 * Compare two domain strings.
86 */
87 static int
domain_compare(const void * arg1,const void * arg2)88 domain_compare(const void *arg1, const void *arg2)
89 {
90 const fuid_domain_t *node1 = arg1;
91 const fuid_domain_t *node2 = arg2;
92 int val;
93
94 val = strcmp(node1->f_ksid->kd_name, node2->f_ksid->kd_name);
95 if (val == 0)
96 return (0);
97 return (val > 0 ? 1 : -1);
98 }
99
100 void
zfs_fuid_avl_tree_create(avl_tree_t * idx_tree,avl_tree_t * domain_tree)101 zfs_fuid_avl_tree_create(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
102 {
103 avl_create(idx_tree, idx_compare,
104 sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_idxnode));
105 avl_create(domain_tree, domain_compare,
106 sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_domnode));
107 }
108
109 /*
110 * load initial fuid domain and idx trees. This function is used by
111 * both the kernel and zdb.
112 */
113 uint64_t
zfs_fuid_table_load(objset_t * os,uint64_t fuid_obj,avl_tree_t * idx_tree,avl_tree_t * domain_tree)114 zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree,
115 avl_tree_t *domain_tree)
116 {
117 dmu_buf_t *db;
118 uint64_t fuid_size;
119
120 ASSERT(fuid_obj != 0);
121 VERIFY(0 == dmu_bonus_hold(os, fuid_obj,
122 FTAG, &db));
123 fuid_size = *(uint64_t *)db->db_data;
124 dmu_buf_rele(db, FTAG);
125
126 if (fuid_size) {
127 nvlist_t **fuidnvp;
128 nvlist_t *nvp = NULL;
129 uint_t count;
130 char *packed;
131 int i;
132
133 packed = kmem_alloc(fuid_size, KM_SLEEP);
134 VERIFY(dmu_read(os, fuid_obj, 0,
135 fuid_size, packed, DMU_READ_PREFETCH) == 0);
136 VERIFY(nvlist_unpack(packed, fuid_size,
137 &nvp, 0) == 0);
138 VERIFY(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY,
139 &fuidnvp, &count) == 0);
140
141 for (i = 0; i != count; i++) {
142 fuid_domain_t *domnode;
143 char *domain;
144 uint64_t idx;
145
146 VERIFY(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN,
147 &domain) == 0);
148 VERIFY(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX,
149 &idx) == 0);
150
151 domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
152
153 domnode->f_idx = idx;
154 domnode->f_ksid = ksid_lookupdomain(domain);
155 avl_add(idx_tree, domnode);
156 avl_add(domain_tree, domnode);
157 }
158 nvlist_free(nvp);
159 kmem_free(packed, fuid_size);
160 }
161 return (fuid_size);
162 }
163
164 void
zfs_fuid_table_destroy(avl_tree_t * idx_tree,avl_tree_t * domain_tree)165 zfs_fuid_table_destroy(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
166 {
167 fuid_domain_t *domnode;
168 void *cookie;
169
170 cookie = NULL;
171 while (domnode = avl_destroy_nodes(domain_tree, &cookie))
172 ksiddomain_rele(domnode->f_ksid);
173
174 avl_destroy(domain_tree);
175 cookie = NULL;
176 while (domnode = avl_destroy_nodes(idx_tree, &cookie))
177 kmem_free(domnode, sizeof (fuid_domain_t));
178 avl_destroy(idx_tree);
179 }
180
181 char *
zfs_fuid_idx_domain(avl_tree_t * idx_tree,uint32_t idx)182 zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx)
183 {
184 fuid_domain_t searchnode, *findnode;
185 avl_index_t loc;
186
187 searchnode.f_idx = idx;
188
189 findnode = avl_find(idx_tree, &searchnode, &loc);
190
191 return (findnode ? findnode->f_ksid->kd_name : nulldomain);
192 }
193
194 #ifdef _KERNEL
195 /*
196 * Load the fuid table(s) into memory.
197 */
198 static void
zfs_fuid_init(zfsvfs_t * zfsvfs)199 zfs_fuid_init(zfsvfs_t *zfsvfs)
200 {
201 rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
202
203 if (zfsvfs->z_fuid_loaded) {
204 rw_exit(&zfsvfs->z_fuid_lock);
205 return;
206 }
207
208 zfs_fuid_avl_tree_create(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
209
210 (void) zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ,
211 ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj);
212 if (zfsvfs->z_fuid_obj != 0) {
213 zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os,
214 zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx,
215 &zfsvfs->z_fuid_domain);
216 }
217
218 zfsvfs->z_fuid_loaded = B_TRUE;
219 rw_exit(&zfsvfs->z_fuid_lock);
220 }
221
222 /*
223 * sync out AVL trees to persistent storage.
224 */
225 void
zfs_fuid_sync(zfsvfs_t * zfsvfs,dmu_tx_t * tx)226 zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
227 {
228 nvlist_t *nvp;
229 nvlist_t **fuids;
230 size_t nvsize = 0;
231 char *packed;
232 dmu_buf_t *db;
233 fuid_domain_t *domnode;
234 int numnodes;
235 int i;
236
237 if (!zfsvfs->z_fuid_dirty) {
238 return;
239 }
240
241 rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
242
243 /*
244 * First see if table needs to be created?
245 */
246 if (zfsvfs->z_fuid_obj == 0) {
247 zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os,
248 DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE,
249 sizeof (uint64_t), tx);
250 VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
251 ZFS_FUID_TABLES, sizeof (uint64_t), 1,
252 &zfsvfs->z_fuid_obj, tx) == 0);
253 }
254
255 VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
256
257 numnodes = avl_numnodes(&zfsvfs->z_fuid_idx);
258 fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP);
259 for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++,
260 domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) {
261 VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0);
262 VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
263 domnode->f_idx) == 0);
264 VERIFY(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0) == 0);
265 VERIFY(nvlist_add_string(fuids[i], FUID_DOMAIN,
266 domnode->f_ksid->kd_name) == 0);
267 }
268 VERIFY(nvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY,
269 fuids, numnodes) == 0);
270 for (i = 0; i != numnodes; i++)
271 nvlist_free(fuids[i]);
272 kmem_free(fuids, numnodes * sizeof (void *));
273 VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0);
274 packed = kmem_alloc(nvsize, KM_SLEEP);
275 VERIFY(nvlist_pack(nvp, &packed, &nvsize,
276 NV_ENCODE_XDR, KM_SLEEP) == 0);
277 nvlist_free(nvp);
278 zfsvfs->z_fuid_size = nvsize;
279 dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
280 zfsvfs->z_fuid_size, packed, tx);
281 kmem_free(packed, zfsvfs->z_fuid_size);
282 VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj,
283 FTAG, &db));
284 dmu_buf_will_dirty(db, tx);
285 *(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
286 dmu_buf_rele(db, FTAG);
287
288 zfsvfs->z_fuid_dirty = B_FALSE;
289 rw_exit(&zfsvfs->z_fuid_lock);
290 }
291
292 /*
293 * Query domain table for a given domain.
294 *
295 * If domain isn't found and addok is set, it is added to AVL trees and
296 * the zfsvfs->z_fuid_dirty flag will be set to TRUE. It will then be
297 * necessary for the caller or another thread to detect the dirty table
298 * and sync out the changes.
299 */
300 int
zfs_fuid_find_by_domain(zfsvfs_t * zfsvfs,const char * domain,char ** retdomain,boolean_t addok)301 zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain,
302 char **retdomain, boolean_t addok)
303 {
304 fuid_domain_t searchnode, *findnode;
305 avl_index_t loc;
306 krw_t rw = RW_READER;
307
308 /*
309 * If the dummy "nobody" domain then return an index of 0
310 * to cause the created FUID to be a standard POSIX id
311 * for the user nobody.
312 */
313 if (domain[0] == '\0') {
314 if (retdomain)
315 *retdomain = nulldomain;
316 return (0);
317 }
318
319 searchnode.f_ksid = ksid_lookupdomain(domain);
320 if (retdomain)
321 *retdomain = searchnode.f_ksid->kd_name;
322 if (!zfsvfs->z_fuid_loaded)
323 zfs_fuid_init(zfsvfs);
324
325 retry:
326 rw_enter(&zfsvfs->z_fuid_lock, rw);
327 findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc);
328
329 if (findnode) {
330 rw_exit(&zfsvfs->z_fuid_lock);
331 ksiddomain_rele(searchnode.f_ksid);
332 return (findnode->f_idx);
333 } else if (addok) {
334 fuid_domain_t *domnode;
335 uint64_t retidx;
336
337 if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) {
338 rw_exit(&zfsvfs->z_fuid_lock);
339 rw = RW_WRITER;
340 goto retry;
341 }
342
343 domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
344 domnode->f_ksid = searchnode.f_ksid;
345
346 retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1;
347
348 avl_add(&zfsvfs->z_fuid_domain, domnode);
349 avl_add(&zfsvfs->z_fuid_idx, domnode);
350 zfsvfs->z_fuid_dirty = B_TRUE;
351 rw_exit(&zfsvfs->z_fuid_lock);
352 return (retidx);
353 } else {
354 rw_exit(&zfsvfs->z_fuid_lock);
355 return (-1);
356 }
357 }
358
359 /*
360 * Query domain table by index, returning domain string
361 *
362 * Returns a pointer from an avl node of the domain string.
363 *
364 */
365 const char *
zfs_fuid_find_by_idx(zfsvfs_t * zfsvfs,uint32_t idx)366 zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx)
367 {
368 char *domain;
369
370 if (idx == 0 || !zfsvfs->z_use_fuids)
371 return (NULL);
372
373 if (!zfsvfs->z_fuid_loaded)
374 zfs_fuid_init(zfsvfs);
375
376 rw_enter(&zfsvfs->z_fuid_lock, RW_READER);
377
378 if (zfsvfs->z_fuid_obj || zfsvfs->z_fuid_dirty)
379 domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx);
380 else
381 domain = nulldomain;
382 rw_exit(&zfsvfs->z_fuid_lock);
383
384 ASSERT(domain);
385 return (domain);
386 }
387
388 void
zfs_fuid_map_ids(znode_t * zp,cred_t * cr,uid_t * uidp,uid_t * gidp)389 zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp)
390 {
391 *uidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
392 *gidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_gid, cr, ZFS_GROUP);
393 }
394
395 uid_t
zfs_fuid_map_id(zfsvfs_t * zfsvfs,uint64_t fuid,cred_t * cr,zfs_fuid_type_t type)396 zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
397 cred_t *cr, zfs_fuid_type_t type)
398 {
399 uint32_t index = FUID_INDEX(fuid);
400 const char *domain;
401 uid_t id;
402
403 if (index == 0)
404 return (fuid);
405
406 domain = zfs_fuid_find_by_idx(zfsvfs, index);
407 ASSERT(domain != NULL);
408
409 if (type == ZFS_OWNER || type == ZFS_ACE_USER) {
410 (void) kidmap_getuidbysid(crgetzone(cr), domain,
411 FUID_RID(fuid), &id);
412 } else {
413 (void) kidmap_getgidbysid(crgetzone(cr), domain,
414 FUID_RID(fuid), &id);
415 }
416 return (id);
417 }
418
419 /*
420 * Add a FUID node to the list of fuid's being created for this
421 * ACL
422 *
423 * If ACL has multiple domains, then keep only one copy of each unique
424 * domain.
425 */
426 void
zfs_fuid_node_add(zfs_fuid_info_t ** fuidpp,const char * domain,uint32_t rid,uint64_t idx,uint64_t id,zfs_fuid_type_t type)427 zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid,
428 uint64_t idx, uint64_t id, zfs_fuid_type_t type)
429 {
430 zfs_fuid_t *fuid;
431 zfs_fuid_domain_t *fuid_domain;
432 zfs_fuid_info_t *fuidp;
433 uint64_t fuididx;
434 boolean_t found = B_FALSE;
435
436 if (*fuidpp == NULL)
437 *fuidpp = zfs_fuid_info_alloc();
438
439 fuidp = *fuidpp;
440 /*
441 * First find fuid domain index in linked list
442 *
443 * If one isn't found then create an entry.
444 */
445
446 for (fuididx = 1, fuid_domain = list_head(&fuidp->z_domains);
447 fuid_domain; fuid_domain = list_next(&fuidp->z_domains,
448 fuid_domain), fuididx++) {
449 if (idx == fuid_domain->z_domidx) {
450 found = B_TRUE;
451 break;
452 }
453 }
454
455 if (!found) {
456 fuid_domain = kmem_alloc(sizeof (zfs_fuid_domain_t), KM_SLEEP);
457 fuid_domain->z_domain = domain;
458 fuid_domain->z_domidx = idx;
459 list_insert_tail(&fuidp->z_domains, fuid_domain);
460 fuidp->z_domain_str_sz += strlen(domain) + 1;
461 fuidp->z_domain_cnt++;
462 }
463
464 if (type == ZFS_ACE_USER || type == ZFS_ACE_GROUP) {
465
466 /*
467 * Now allocate fuid entry and add it on the end of the list
468 */
469
470 fuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP);
471 fuid->z_id = id;
472 fuid->z_domidx = idx;
473 fuid->z_logfuid = FUID_ENCODE(fuididx, rid);
474
475 list_insert_tail(&fuidp->z_fuids, fuid);
476 fuidp->z_fuid_cnt++;
477 } else {
478 if (type == ZFS_OWNER)
479 fuidp->z_fuid_owner = FUID_ENCODE(fuididx, rid);
480 else
481 fuidp->z_fuid_group = FUID_ENCODE(fuididx, rid);
482 }
483 }
484
485 /*
486 * Create a file system FUID, based on information in the users cred
487 *
488 * If cred contains KSID_OWNER then it should be used to determine
489 * the uid otherwise cred's uid will be used. By default cred's gid
490 * is used unless it's an ephemeral ID in which case KSID_GROUP will
491 * be used if it exists.
492 */
493 uint64_t
zfs_fuid_create_cred(zfsvfs_t * zfsvfs,zfs_fuid_type_t type,cred_t * cr,zfs_fuid_info_t ** fuidp)494 zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
495 cred_t *cr, zfs_fuid_info_t **fuidp)
496 {
497 uint64_t idx;
498 ksid_t *ksid;
499 uint32_t rid;
500 char *kdomain;
501 const char *domain;
502 uid_t id;
503
504 VERIFY(type == ZFS_OWNER || type == ZFS_GROUP);
505
506 ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP);
507
508 if (!zfsvfs->z_use_fuids || (ksid == NULL)) {
509 id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr);
510
511 if (IS_EPHEMERAL(id))
512 return ((type == ZFS_OWNER) ? UID_NOBODY : GID_NOBODY);
513
514 return ((uint64_t)id);
515 }
516
517 /*
518 * ksid is present and FUID is supported
519 */
520 id = (type == ZFS_OWNER) ? ksid_getid(ksid) : crgetgid(cr);
521
522 if (!IS_EPHEMERAL(id))
523 return ((uint64_t)id);
524
525 if (type == ZFS_GROUP)
526 id = ksid_getid(ksid);
527
528 rid = ksid_getrid(ksid);
529 domain = ksid_getdomain(ksid);
530
531 idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
532
533 zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type);
534
535 return (FUID_ENCODE(idx, rid));
536 }
537
538 /*
539 * Create a file system FUID for an ACL ace
540 * or a chown/chgrp of the file.
541 * This is similar to zfs_fuid_create_cred, except that
542 * we can't find the domain + rid information in the
543 * cred. Instead we have to query Winchester for the
544 * domain and rid.
545 *
546 * During replay operations the domain+rid information is
547 * found in the zfs_fuid_info_t that the replay code has
548 * attached to the zfsvfs of the file system.
549 */
550 uint64_t
zfs_fuid_create(zfsvfs_t * zfsvfs,uint64_t id,cred_t * cr,zfs_fuid_type_t type,zfs_fuid_info_t ** fuidpp)551 zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
552 zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp)
553 {
554 const char *domain;
555 char *kdomain;
556 uint32_t fuid_idx = FUID_INDEX(id);
557 uint32_t rid;
558 idmap_stat status;
559 uint64_t idx = 0;
560 zfs_fuid_t *zfuid = NULL;
561 zfs_fuid_info_t *fuidp = NULL;
562
563 /*
564 * If POSIX ID, or entry is already a FUID then
565 * just return the id
566 *
567 * We may also be handed an already FUID'ized id via
568 * chmod.
569 */
570
571 if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0)
572 return (id);
573
574 if (zfsvfs->z_replay) {
575 fuidp = zfsvfs->z_fuid_replay;
576
577 /*
578 * If we are passed an ephemeral id, but no
579 * fuid_info was logged then return NOBODY.
580 * This is most likely a result of idmap service
581 * not being available.
582 */
583 if (fuidp == NULL)
584 return (UID_NOBODY);
585
586 VERIFY3U(type, >=, ZFS_OWNER);
587 VERIFY3U(type, <=, ZFS_ACE_GROUP);
588
589 switch (type) {
590 case ZFS_ACE_USER:
591 case ZFS_ACE_GROUP:
592 zfuid = list_head(&fuidp->z_fuids);
593 rid = FUID_RID(zfuid->z_logfuid);
594 idx = FUID_INDEX(zfuid->z_logfuid);
595 break;
596 case ZFS_OWNER:
597 rid = FUID_RID(fuidp->z_fuid_owner);
598 idx = FUID_INDEX(fuidp->z_fuid_owner);
599 break;
600 case ZFS_GROUP:
601 rid = FUID_RID(fuidp->z_fuid_group);
602 idx = FUID_INDEX(fuidp->z_fuid_group);
603 break;
604 };
605 domain = fuidp->z_domain_table[idx - 1];
606 } else {
607 if (type == ZFS_OWNER || type == ZFS_ACE_USER)
608 status = kidmap_getsidbyuid(crgetzone(cr), id,
609 &domain, &rid);
610 else
611 status = kidmap_getsidbygid(crgetzone(cr), id,
612 &domain, &rid);
613
614 if (status != 0) {
615 /*
616 * When returning nobody we will need to
617 * make a dummy fuid table entry for logging
618 * purposes.
619 */
620 rid = UID_NOBODY;
621 domain = nulldomain;
622 }
623 }
624
625 idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
626
627 if (!zfsvfs->z_replay)
628 zfs_fuid_node_add(fuidpp, kdomain,
629 rid, idx, id, type);
630 else if (zfuid != NULL) {
631 list_remove(&fuidp->z_fuids, zfuid);
632 kmem_free(zfuid, sizeof (zfs_fuid_t));
633 }
634 return (FUID_ENCODE(idx, rid));
635 }
636
637 void
zfs_fuid_destroy(zfsvfs_t * zfsvfs)638 zfs_fuid_destroy(zfsvfs_t *zfsvfs)
639 {
640 rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
641 if (!zfsvfs->z_fuid_loaded) {
642 rw_exit(&zfsvfs->z_fuid_lock);
643 return;
644 }
645 zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
646 rw_exit(&zfsvfs->z_fuid_lock);
647 }
648
649 /*
650 * Allocate zfs_fuid_info for tracking FUIDs created during
651 * zfs_mknode, VOP_SETATTR() or VOP_SETSECATTR()
652 */
653 zfs_fuid_info_t *
zfs_fuid_info_alloc(void)654 zfs_fuid_info_alloc(void)
655 {
656 zfs_fuid_info_t *fuidp;
657
658 fuidp = kmem_zalloc(sizeof (zfs_fuid_info_t), KM_SLEEP);
659 list_create(&fuidp->z_domains, sizeof (zfs_fuid_domain_t),
660 offsetof(zfs_fuid_domain_t, z_next));
661 list_create(&fuidp->z_fuids, sizeof (zfs_fuid_t),
662 offsetof(zfs_fuid_t, z_next));
663 return (fuidp);
664 }
665
666 /*
667 * Release all memory associated with zfs_fuid_info_t
668 */
669 void
zfs_fuid_info_free(zfs_fuid_info_t * fuidp)670 zfs_fuid_info_free(zfs_fuid_info_t *fuidp)
671 {
672 zfs_fuid_t *zfuid;
673 zfs_fuid_domain_t *zdomain;
674
675 while ((zfuid = list_head(&fuidp->z_fuids)) != NULL) {
676 list_remove(&fuidp->z_fuids, zfuid);
677 kmem_free(zfuid, sizeof (zfs_fuid_t));
678 }
679
680 if (fuidp->z_domain_table != NULL)
681 kmem_free(fuidp->z_domain_table,
682 (sizeof (char **)) * fuidp->z_domain_cnt);
683
684 while ((zdomain = list_head(&fuidp->z_domains)) != NULL) {
685 list_remove(&fuidp->z_domains, zdomain);
686 kmem_free(zdomain, sizeof (zfs_fuid_domain_t));
687 }
688
689 kmem_free(fuidp, sizeof (zfs_fuid_info_t));
690 }
691
692 /*
693 * Check to see if id is a groupmember. If cred
694 * has ksid info then sidlist is checked first
695 * and if still not found then POSIX groups are checked
696 *
697 * Will use a straight FUID compare when possible.
698 */
699 boolean_t
zfs_groupmember(zfsvfs_t * zfsvfs,uint64_t id,cred_t * cr)700 zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
701 {
702 ksid_t *ksid = crgetsid(cr, KSID_GROUP);
703 ksidlist_t *ksidlist = crgetsidlist(cr);
704 uid_t gid;
705
706 if (ksid && ksidlist) {
707 int i;
708 ksid_t *ksid_groups;
709 uint32_t idx = FUID_INDEX(id);
710 uint32_t rid = FUID_RID(id);
711
712 ksid_groups = ksidlist->ksl_sids;
713
714 for (i = 0; i != ksidlist->ksl_nsid; i++) {
715 if (idx == 0) {
716 if (id != IDMAP_WK_CREATOR_GROUP_GID &&
717 id == ksid_groups[i].ks_id) {
718 return (B_TRUE);
719 }
720 } else {
721 const char *domain;
722
723 domain = zfs_fuid_find_by_idx(zfsvfs, idx);
724 ASSERT(domain != NULL);
725
726 if (strcmp(domain,
727 IDMAP_WK_CREATOR_SID_AUTHORITY) == 0)
728 return (B_FALSE);
729
730 if ((strcmp(domain,
731 ksid_groups[i].ks_domain->kd_name) == 0) &&
732 rid == ksid_groups[i].ks_rid)
733 return (B_TRUE);
734 }
735 }
736 }
737
738 /*
739 * Not found in ksidlist, check posix groups
740 */
741 gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP);
742 return (groupmember(gid, cr));
743 }
744
745 void
zfs_fuid_txhold(zfsvfs_t * zfsvfs,dmu_tx_t * tx)746 zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
747 {
748 if (zfsvfs->z_fuid_obj == 0) {
749 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
750 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
751 FUID_SIZE_ESTIMATE(zfsvfs));
752 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL);
753 } else {
754 dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj);
755 dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0,
756 FUID_SIZE_ESTIMATE(zfsvfs));
757 }
758 }
759 #endif
760