1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2020 Tintri by DDN, Inc. All rights reserved.
24 */
25
26 #include <sys/zfs_context.h>
27 #include <sys/dmu.h>
28 #include <sys/avl.h>
29 #include <sys/zap.h>
30 #include <sys/refcount.h>
31 #include <sys/nvpair.h>
32 #ifdef _KERNEL
33 #include <sys/kidmap.h>
34 #include <sys/sid.h>
35 #include <sys/zfs_vfsops.h>
36 #include <sys/zfs_znode.h>
37 #endif
38 #include <sys/zfs_fuid.h>
39
40 /*
41 * FUID Domain table(s).
42 *
43 * The FUID table is stored as a packed nvlist of an array
44 * of nvlists which contain an index, domain string and offset
45 *
46 * During file system initialization the nvlist(s) are read and
47 * two AVL trees are created. One tree is keyed by the index number
48 * and the other by the domain string. Nodes are never removed from
49 * trees, but new entries may be added. If a new entry is added then
50 * the zfsvfs->z_fuid_dirty flag is set to true and the caller will then
51 * be responsible for calling zfs_fuid_sync() to sync the changes to disk.
52 *
53 */
54
55 #define FUID_IDX "fuid_idx"
56 #define FUID_DOMAIN "fuid_domain"
57 #define FUID_OFFSET "fuid_offset"
58 #define FUID_NVP_ARRAY "fuid_nvlist"
59
60 typedef struct fuid_domain {
61 avl_node_t f_domnode;
62 avl_node_t f_idxnode;
63 ksiddomain_t *f_ksid;
64 uint64_t f_idx;
65 } fuid_domain_t;
66
67 static char *nulldomain = "";
68
69 /*
70 * Compare two indexes.
71 */
72 static int
idx_compare(const void * arg1,const void * arg2)73 idx_compare(const void *arg1, const void *arg2)
74 {
75 const fuid_domain_t *node1 = (const fuid_domain_t *)arg1;
76 const fuid_domain_t *node2 = (const fuid_domain_t *)arg2;
77
78 return (TREE_CMP(node1->f_idx, node2->f_idx));
79 }
80
81 /*
82 * Compare two domain strings.
83 */
84 static int
domain_compare(const void * arg1,const void * arg2)85 domain_compare(const void *arg1, const void *arg2)
86 {
87 const fuid_domain_t *node1 = (const fuid_domain_t *)arg1;
88 const fuid_domain_t *node2 = (const fuid_domain_t *)arg2;
89 int val;
90
91 val = strcmp(node1->f_ksid->kd_name, node2->f_ksid->kd_name);
92
93 return (TREE_ISIGN(val));
94 }
95
96 void
zfs_fuid_avl_tree_create(avl_tree_t * idx_tree,avl_tree_t * domain_tree)97 zfs_fuid_avl_tree_create(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
98 {
99 avl_create(idx_tree, idx_compare,
100 sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_idxnode));
101 avl_create(domain_tree, domain_compare,
102 sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_domnode));
103 }
104
105 /*
106 * load initial fuid domain and idx trees. This function is used by
107 * both the kernel and zdb.
108 */
109 uint64_t
zfs_fuid_table_load(objset_t * os,uint64_t fuid_obj,avl_tree_t * idx_tree,avl_tree_t * domain_tree)110 zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree,
111 avl_tree_t *domain_tree)
112 {
113 dmu_buf_t *db;
114 uint64_t fuid_size;
115
116 ASSERT(fuid_obj != 0);
117 VERIFY(0 == dmu_bonus_hold(os, fuid_obj,
118 FTAG, &db));
119 fuid_size = *(uint64_t *)db->db_data;
120 dmu_buf_rele(db, FTAG);
121
122 if (fuid_size) {
123 nvlist_t **fuidnvp;
124 nvlist_t *nvp = NULL;
125 uint_t count;
126 char *packed;
127 int i;
128
129 packed = kmem_alloc(fuid_size, KM_SLEEP);
130 VERIFY(dmu_read(os, fuid_obj, 0,
131 fuid_size, packed, DMU_READ_PREFETCH) == 0);
132 VERIFY(nvlist_unpack(packed, fuid_size,
133 &nvp, 0) == 0);
134 VERIFY(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY,
135 &fuidnvp, &count) == 0);
136
137 for (i = 0; i != count; i++) {
138 fuid_domain_t *domnode;
139 char *domain;
140 uint64_t idx;
141
142 VERIFY(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN,
143 &domain) == 0);
144 VERIFY(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX,
145 &idx) == 0);
146
147 domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
148
149 domnode->f_idx = idx;
150 domnode->f_ksid = ksid_lookupdomain(domain);
151 avl_add(idx_tree, domnode);
152 avl_add(domain_tree, domnode);
153 }
154 nvlist_free(nvp);
155 kmem_free(packed, fuid_size);
156 }
157 return (fuid_size);
158 }
159
160 void
zfs_fuid_table_destroy(avl_tree_t * idx_tree,avl_tree_t * domain_tree)161 zfs_fuid_table_destroy(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
162 {
163 fuid_domain_t *domnode;
164 void *cookie;
165
166 cookie = NULL;
167 while (domnode = avl_destroy_nodes(domain_tree, &cookie))
168 ksiddomain_rele(domnode->f_ksid);
169
170 avl_destroy(domain_tree);
171 cookie = NULL;
172 while (domnode = avl_destroy_nodes(idx_tree, &cookie))
173 kmem_free(domnode, sizeof (fuid_domain_t));
174 avl_destroy(idx_tree);
175 }
176
177 char *
zfs_fuid_idx_domain(avl_tree_t * idx_tree,uint32_t idx)178 zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx)
179 {
180 fuid_domain_t searchnode, *findnode;
181 avl_index_t loc;
182
183 searchnode.f_idx = idx;
184
185 findnode = avl_find(idx_tree, &searchnode, &loc);
186
187 return (findnode ? findnode->f_ksid->kd_name : nulldomain);
188 }
189
190 #ifdef _KERNEL
191 /*
192 * Load the fuid table(s) into memory.
193 */
194 static void
zfs_fuid_init(zfsvfs_t * zfsvfs)195 zfs_fuid_init(zfsvfs_t *zfsvfs)
196 {
197 rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
198
199 if (zfsvfs->z_fuid_loaded) {
200 rw_exit(&zfsvfs->z_fuid_lock);
201 return;
202 }
203
204 zfs_fuid_avl_tree_create(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
205
206 (void) zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ,
207 ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj);
208 if (zfsvfs->z_fuid_obj != 0) {
209 zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os,
210 zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx,
211 &zfsvfs->z_fuid_domain);
212 }
213
214 zfsvfs->z_fuid_loaded = B_TRUE;
215 rw_exit(&zfsvfs->z_fuid_lock);
216 }
217
218 /*
219 * sync out AVL trees to persistent storage.
220 */
221 void
zfs_fuid_sync(zfsvfs_t * zfsvfs,dmu_tx_t * tx)222 zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
223 {
224 nvlist_t *nvp;
225 nvlist_t **fuids;
226 size_t nvsize = 0;
227 char *packed;
228 dmu_buf_t *db;
229 fuid_domain_t *domnode;
230 int numnodes;
231 int i;
232
233 if (!zfsvfs->z_fuid_dirty) {
234 return;
235 }
236
237 rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
238
239 /*
240 * First see if table needs to be created?
241 */
242 if (zfsvfs->z_fuid_obj == 0) {
243 zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os,
244 DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE,
245 sizeof (uint64_t), tx);
246 VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
247 ZFS_FUID_TABLES, sizeof (uint64_t), 1,
248 &zfsvfs->z_fuid_obj, tx) == 0);
249 }
250
251 VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
252
253 numnodes = avl_numnodes(&zfsvfs->z_fuid_idx);
254 fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP);
255 for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++,
256 domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) {
257 VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0);
258 VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
259 domnode->f_idx) == 0);
260 VERIFY(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0) == 0);
261 VERIFY(nvlist_add_string(fuids[i], FUID_DOMAIN,
262 domnode->f_ksid->kd_name) == 0);
263 }
264 VERIFY(nvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY,
265 fuids, numnodes) == 0);
266 for (i = 0; i != numnodes; i++)
267 nvlist_free(fuids[i]);
268 kmem_free(fuids, numnodes * sizeof (void *));
269 VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0);
270 packed = kmem_alloc(nvsize, KM_SLEEP);
271 VERIFY(nvlist_pack(nvp, &packed, &nvsize,
272 NV_ENCODE_XDR, KM_SLEEP) == 0);
273 nvlist_free(nvp);
274 zfsvfs->z_fuid_size = nvsize;
275 dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
276 zfsvfs->z_fuid_size, packed, tx);
277 kmem_free(packed, zfsvfs->z_fuid_size);
278 VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj,
279 FTAG, &db));
280 dmu_buf_will_dirty(db, tx);
281 *(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
282 dmu_buf_rele(db, FTAG);
283
284 zfsvfs->z_fuid_dirty = B_FALSE;
285 rw_exit(&zfsvfs->z_fuid_lock);
286 }
287
288 /*
289 * Query domain table for a given domain.
290 *
291 * If domain isn't found and addok is set, it is added to AVL trees and
292 * the zfsvfs->z_fuid_dirty flag will be set to TRUE. It will then be
293 * necessary for the caller or another thread to detect the dirty table
294 * and sync out the changes.
295 */
296 int
zfs_fuid_find_by_domain(zfsvfs_t * zfsvfs,const char * domain,char ** retdomain,boolean_t addok)297 zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain,
298 char **retdomain, boolean_t addok)
299 {
300 fuid_domain_t searchnode, *findnode;
301 avl_index_t loc;
302 krw_t rw = RW_READER;
303
304 /*
305 * If the dummy "nobody" domain then return an index of 0
306 * to cause the created FUID to be a standard POSIX id
307 * for the user nobody.
308 */
309 if (domain[0] == '\0') {
310 if (retdomain)
311 *retdomain = nulldomain;
312 return (0);
313 }
314
315 searchnode.f_ksid = ksid_lookupdomain(domain);
316 if (retdomain)
317 *retdomain = searchnode.f_ksid->kd_name;
318 if (!zfsvfs->z_fuid_loaded)
319 zfs_fuid_init(zfsvfs);
320
321 retry:
322 rw_enter(&zfsvfs->z_fuid_lock, rw);
323 findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc);
324
325 if (findnode) {
326 rw_exit(&zfsvfs->z_fuid_lock);
327 ksiddomain_rele(searchnode.f_ksid);
328 return (findnode->f_idx);
329 } else if (addok) {
330 fuid_domain_t *domnode;
331 uint64_t retidx;
332
333 if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) {
334 rw_exit(&zfsvfs->z_fuid_lock);
335 rw = RW_WRITER;
336 goto retry;
337 }
338
339 domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
340 domnode->f_ksid = searchnode.f_ksid;
341
342 retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1;
343
344 avl_add(&zfsvfs->z_fuid_domain, domnode);
345 avl_add(&zfsvfs->z_fuid_idx, domnode);
346 zfsvfs->z_fuid_dirty = B_TRUE;
347 rw_exit(&zfsvfs->z_fuid_lock);
348 return (retidx);
349 } else {
350 rw_exit(&zfsvfs->z_fuid_lock);
351 return (-1);
352 }
353 }
354
355 /*
356 * Query domain table by index, returning domain string
357 *
358 * Returns a pointer from an avl node of the domain string.
359 *
360 */
361 const char *
zfs_fuid_find_by_idx(zfsvfs_t * zfsvfs,uint32_t idx)362 zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx)
363 {
364 char *domain;
365
366 if (idx == 0 || !zfsvfs->z_use_fuids)
367 return (NULL);
368
369 if (!zfsvfs->z_fuid_loaded)
370 zfs_fuid_init(zfsvfs);
371
372 rw_enter(&zfsvfs->z_fuid_lock, RW_READER);
373
374 if (zfsvfs->z_fuid_obj || zfsvfs->z_fuid_dirty)
375 domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx);
376 else
377 domain = nulldomain;
378 rw_exit(&zfsvfs->z_fuid_lock);
379
380 ASSERT(domain);
381 return (domain);
382 }
383
384 void
zfs_fuid_map_ids(znode_t * zp,cred_t * cr,uid_t * uidp,uid_t * gidp)385 zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp)
386 {
387 *uidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
388 *gidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_gid, cr, ZFS_GROUP);
389 }
390
391 uid_t
zfs_fuid_map_id(zfsvfs_t * zfsvfs,uint64_t fuid,cred_t * cr,zfs_fuid_type_t type)392 zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
393 cred_t *cr, zfs_fuid_type_t type)
394 {
395 uint32_t index = FUID_INDEX(fuid);
396 const char *domain;
397 uid_t id;
398
399 if (index == 0)
400 return (fuid);
401
402 domain = zfs_fuid_find_by_idx(zfsvfs, index);
403 ASSERT(domain != NULL);
404
405 if (type == ZFS_OWNER || type == ZFS_ACE_USER) {
406 (void) kidmap_getuidbysid(crgetzone(cr), domain,
407 FUID_RID(fuid), &id);
408 } else {
409 (void) kidmap_getgidbysid(crgetzone(cr), domain,
410 FUID_RID(fuid), &id);
411 }
412 return (id);
413 }
414
415 /*
416 * Add a FUID node to the list of fuid's being created for this
417 * ACL
418 *
419 * If ACL has multiple domains, then keep only one copy of each unique
420 * domain.
421 */
422 void
zfs_fuid_node_add(zfs_fuid_info_t ** fuidpp,const char * domain,uint32_t rid,uint64_t idx,uint64_t id,zfs_fuid_type_t type)423 zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid,
424 uint64_t idx, uint64_t id, zfs_fuid_type_t type)
425 {
426 zfs_fuid_t *fuid;
427 zfs_fuid_domain_t *fuid_domain;
428 zfs_fuid_info_t *fuidp;
429 uint64_t fuididx;
430 boolean_t found = B_FALSE;
431
432 if (*fuidpp == NULL)
433 *fuidpp = zfs_fuid_info_alloc();
434
435 fuidp = *fuidpp;
436 /*
437 * First find fuid domain index in linked list
438 *
439 * If one isn't found then create an entry.
440 */
441
442 for (fuididx = 1, fuid_domain = list_head(&fuidp->z_domains);
443 fuid_domain; fuid_domain = list_next(&fuidp->z_domains,
444 fuid_domain), fuididx++) {
445 if (idx == fuid_domain->z_domidx) {
446 found = B_TRUE;
447 break;
448 }
449 }
450
451 if (!found) {
452 fuid_domain = kmem_alloc(sizeof (zfs_fuid_domain_t), KM_SLEEP);
453 fuid_domain->z_domain = domain;
454 fuid_domain->z_domidx = idx;
455 list_insert_tail(&fuidp->z_domains, fuid_domain);
456 fuidp->z_domain_str_sz += strlen(domain) + 1;
457 fuidp->z_domain_cnt++;
458 }
459
460 if (type == ZFS_ACE_USER || type == ZFS_ACE_GROUP) {
461
462 /*
463 * Now allocate fuid entry and add it on the end of the list
464 */
465
466 fuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP);
467 fuid->z_id = id;
468 fuid->z_domidx = idx;
469 fuid->z_logfuid = FUID_ENCODE(fuididx, rid);
470
471 list_insert_tail(&fuidp->z_fuids, fuid);
472 fuidp->z_fuid_cnt++;
473 } else {
474 if (type == ZFS_OWNER)
475 fuidp->z_fuid_owner = FUID_ENCODE(fuididx, rid);
476 else
477 fuidp->z_fuid_group = FUID_ENCODE(fuididx, rid);
478 }
479 }
480
481 /*
482 * Create a file system FUID, based on information in the users cred
483 *
484 * If cred contains KSID_OWNER then it should be used to determine
485 * the uid otherwise cred's uid will be used. By default cred's gid
486 * is used unless it's an ephemeral ID in which case KSID_GROUP will
487 * be used if it exists.
488 */
489 uint64_t
zfs_fuid_create_cred(zfsvfs_t * zfsvfs,zfs_fuid_type_t type,cred_t * cr,zfs_fuid_info_t ** fuidp)490 zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
491 cred_t *cr, zfs_fuid_info_t **fuidp)
492 {
493 uint64_t idx;
494 ksid_t *ksid;
495 uint32_t rid;
496 char *kdomain;
497 const char *domain;
498 uid_t id;
499
500 VERIFY(type == ZFS_OWNER || type == ZFS_GROUP);
501
502 ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP);
503
504 if (!zfsvfs->z_use_fuids || (ksid == NULL)) {
505 id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr);
506
507 if (IS_EPHEMERAL(id))
508 return ((type == ZFS_OWNER) ? UID_NOBODY : GID_NOBODY);
509
510 return ((uint64_t)id);
511 }
512
513 /*
514 * ksid is present and FUID is supported
515 */
516 id = (type == ZFS_OWNER) ? ksid_getid(ksid) : crgetgid(cr);
517
518 if (!IS_EPHEMERAL(id))
519 return ((uint64_t)id);
520
521 if (type == ZFS_GROUP)
522 id = ksid_getid(ksid);
523
524 rid = ksid_getrid(ksid);
525 domain = ksid_getdomain(ksid);
526
527 idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
528
529 zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type);
530
531 return (FUID_ENCODE(idx, rid));
532 }
533
534 /*
535 * Create a file system FUID for an ACL ace
536 * or a chown/chgrp of the file.
537 * This is similar to zfs_fuid_create_cred, except that
538 * we can't find the domain + rid information in the
539 * cred. Instead we have to query Winchester for the
540 * domain and rid.
541 *
542 * During replay operations the domain+rid information is
543 * found in the zfs_fuid_info_t that the replay code has
544 * attached to the zfsvfs of the file system.
545 */
546 uint64_t
zfs_fuid_create(zfsvfs_t * zfsvfs,uint64_t id,cred_t * cr,zfs_fuid_type_t type,zfs_fuid_info_t ** fuidpp)547 zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
548 zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp)
549 {
550 const char *domain;
551 char *kdomain;
552 uint32_t fuid_idx = FUID_INDEX(id);
553 uint32_t rid;
554 idmap_stat status;
555 uint64_t idx = 0;
556 zfs_fuid_t *zfuid = NULL;
557 zfs_fuid_info_t *fuidp = NULL;
558
559 /*
560 * If POSIX ID, or entry is already a FUID then
561 * just return the id
562 *
563 * We may also be handed an already FUID'ized id via
564 * chmod.
565 */
566
567 if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0)
568 return (id);
569
570 if (zfsvfs->z_replay) {
571 fuidp = zfsvfs->z_fuid_replay;
572
573 /*
574 * If we are passed an ephemeral id, but no
575 * fuid_info was logged then return NOBODY.
576 * This is most likely a result of idmap service
577 * not being available.
578 */
579 if (fuidp == NULL)
580 return (UID_NOBODY);
581
582 VERIFY3U(type, >=, ZFS_OWNER);
583 VERIFY3U(type, <=, ZFS_ACE_GROUP);
584
585 switch (type) {
586 case ZFS_ACE_USER:
587 case ZFS_ACE_GROUP:
588 zfuid = list_head(&fuidp->z_fuids);
589 rid = FUID_RID(zfuid->z_logfuid);
590 idx = FUID_INDEX(zfuid->z_logfuid);
591 break;
592 case ZFS_OWNER:
593 rid = FUID_RID(fuidp->z_fuid_owner);
594 idx = FUID_INDEX(fuidp->z_fuid_owner);
595 break;
596 case ZFS_GROUP:
597 rid = FUID_RID(fuidp->z_fuid_group);
598 idx = FUID_INDEX(fuidp->z_fuid_group);
599 break;
600 };
601 domain = fuidp->z_domain_table[idx - 1];
602 } else {
603 if (type == ZFS_OWNER || type == ZFS_ACE_USER)
604 status = kidmap_getsidbyuid(crgetzone(cr), id,
605 &domain, &rid);
606 else
607 status = kidmap_getsidbygid(crgetzone(cr), id,
608 &domain, &rid);
609
610 if (status != 0) {
611 /*
612 * When returning nobody we will need to
613 * make a dummy fuid table entry for logging
614 * purposes.
615 */
616 rid = UID_NOBODY;
617 domain = nulldomain;
618 }
619 }
620
621 idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
622
623 if (!zfsvfs->z_replay)
624 zfs_fuid_node_add(fuidpp, kdomain,
625 rid, idx, id, type);
626 else if (zfuid != NULL) {
627 list_remove(&fuidp->z_fuids, zfuid);
628 kmem_free(zfuid, sizeof (zfs_fuid_t));
629 }
630 return (FUID_ENCODE(idx, rid));
631 }
632
633 void
zfs_fuid_destroy(zfsvfs_t * zfsvfs)634 zfs_fuid_destroy(zfsvfs_t *zfsvfs)
635 {
636 rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
637 if (!zfsvfs->z_fuid_loaded) {
638 rw_exit(&zfsvfs->z_fuid_lock);
639 return;
640 }
641 zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
642 rw_exit(&zfsvfs->z_fuid_lock);
643 }
644
645 /*
646 * Allocate zfs_fuid_info for tracking FUIDs created during
647 * zfs_mknode, VOP_SETATTR() or VOP_SETSECATTR()
648 */
649 zfs_fuid_info_t *
zfs_fuid_info_alloc(void)650 zfs_fuid_info_alloc(void)
651 {
652 zfs_fuid_info_t *fuidp;
653
654 fuidp = kmem_zalloc(sizeof (zfs_fuid_info_t), KM_SLEEP);
655 list_create(&fuidp->z_domains, sizeof (zfs_fuid_domain_t),
656 offsetof(zfs_fuid_domain_t, z_next));
657 list_create(&fuidp->z_fuids, sizeof (zfs_fuid_t),
658 offsetof(zfs_fuid_t, z_next));
659 return (fuidp);
660 }
661
662 /*
663 * Release all memory associated with zfs_fuid_info_t
664 */
665 void
zfs_fuid_info_free(zfs_fuid_info_t * fuidp)666 zfs_fuid_info_free(zfs_fuid_info_t *fuidp)
667 {
668 zfs_fuid_t *zfuid;
669 zfs_fuid_domain_t *zdomain;
670
671 while ((zfuid = list_head(&fuidp->z_fuids)) != NULL) {
672 list_remove(&fuidp->z_fuids, zfuid);
673 kmem_free(zfuid, sizeof (zfs_fuid_t));
674 }
675
676 if (fuidp->z_domain_table != NULL)
677 kmem_free(fuidp->z_domain_table,
678 (sizeof (char **)) * fuidp->z_domain_cnt);
679
680 while ((zdomain = list_head(&fuidp->z_domains)) != NULL) {
681 list_remove(&fuidp->z_domains, zdomain);
682 kmem_free(zdomain, sizeof (zfs_fuid_domain_t));
683 }
684
685 kmem_free(fuidp, sizeof (zfs_fuid_info_t));
686 }
687
688 /*
689 * Check to see if user ID is in the list of SIDs in CR.
690 */
691 boolean_t
zfs_user_in_cred(zfsvfs_t * zfsvfs,uint64_t id,cred_t * cr)692 zfs_user_in_cred(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
693 {
694 ksid_t *ksid = crgetsid(cr, KSID_USER);
695 ksidlist_t *ksidlist = crgetsidlist(cr);
696 uid_t uid;
697
698 /* Check for match with cred->cr_uid */
699 uid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_ACE_USER);
700 if (uid != IDMAP_WK_CREATOR_OWNER_UID &&
701 uid == crgetuid(cr))
702 return (B_TRUE);
703
704 /* Check for any match in the ksidlist */
705 if (ksid && ksidlist) {
706 uint32_t idx = FUID_INDEX(id);
707 uint32_t rid = FUID_RID(id);
708 const char *domain;
709
710 if (idx == 0) {
711 /*
712 * The ID passed in has idx zero, which means
713 * it's just a Unix UID. That can never match
714 * anything in ksid_vec[] because those all
715 * have ksid->ks_id set to a Group ID.
716 */
717 return (B_FALSE);
718 }
719
720 domain = zfs_fuid_find_by_idx(zfsvfs, idx);
721 ASSERT(domain != NULL);
722
723 if (strcmp(domain, IDMAP_WK_CREATOR_SID_AUTHORITY) == 0)
724 return (B_FALSE);
725
726 if (ksidlist_has_sid(ksidlist, domain, rid))
727 return (B_TRUE);
728 }
729 return (B_FALSE);
730 }
731
732 /*
733 * Check to see if id is a groupmember. If cred
734 * has ksid info then sidlist is checked first
735 * and if still not found then POSIX groups are checked
736 *
737 * Will use a straight FUID compare when possible.
738 */
739 boolean_t
zfs_groupmember(zfsvfs_t * zfsvfs,uint64_t id,cred_t * cr)740 zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
741 {
742 ksid_t *ksid = crgetsid(cr, KSID_GROUP);
743 ksidlist_t *ksidlist = crgetsidlist(cr);
744 uid_t gid;
745
746 if (ksid && ksidlist && id != IDMAP_WK_CREATOR_GROUP_GID) {
747 uint32_t idx = FUID_INDEX(id);
748 uint32_t rid = FUID_RID(id);
749 const char *domain = NULL;
750
751 if (idx != 0) {
752 domain = zfs_fuid_find_by_idx(zfsvfs, idx);
753 ASSERT(domain != NULL);
754
755 if (strcmp(domain,
756 IDMAP_WK_CREATOR_SID_AUTHORITY) == 0)
757 return (B_FALSE);
758
759 if (ksidlist_has_sid(ksidlist, domain, rid))
760 return (B_TRUE);
761 } else {
762 if (ksidlist_has_pid(ksidlist, rid))
763 return (B_TRUE);
764 }
765 }
766
767 /*
768 * Not found in ksidlist, check posix groups
769 */
770 gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP);
771 return (groupmember(gid, cr));
772 }
773
774 void
zfs_fuid_txhold(zfsvfs_t * zfsvfs,dmu_tx_t * tx)775 zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
776 {
777 if (zfsvfs->z_fuid_obj == 0) {
778 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
779 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
780 FUID_SIZE_ESTIMATE(zfsvfs));
781 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL);
782 } else {
783 dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj);
784 dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0,
785 FUID_SIZE_ESTIMATE(zfsvfs));
786 }
787 }
788 #endif
789