1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 #include <sys/zfs_context.h>
27 #include <sys/dmu.h>
28 #include <sys/avl.h>
29 #include <sys/zap.h>
30 #include <sys/nvpair.h>
31 #ifdef _KERNEL
32 #include <sys/sid.h>
33 #include <sys/zfs_vfsops.h>
34 #include <sys/zfs_znode.h>
35 #endif
36 #include <sys/zfs_fuid.h>
37
38 /*
39 * FUID Domain table(s).
40 *
41 * The FUID table is stored as a packed nvlist of an array
42 * of nvlists which contain an index, domain string and offset
43 *
44 * During file system initialization the nvlist(s) are read and
45 * two AVL trees are created. One tree is keyed by the index number
46 * and the other by the domain string. Nodes are never removed from
47 * trees, but new entries may be added. If a new entry is added then
48 * the zfsvfs->z_fuid_dirty flag is set to true and the caller will then
49 * be responsible for calling zfs_fuid_sync() to sync the changes to disk.
50 *
51 */
52
53 #define FUID_IDX "fuid_idx"
54 #define FUID_DOMAIN "fuid_domain"
55 #define FUID_OFFSET "fuid_offset"
56 #define FUID_NVP_ARRAY "fuid_nvlist"
57
58 typedef struct fuid_domain {
59 avl_node_t f_domnode;
60 avl_node_t f_idxnode;
61 ksiddomain_t *f_ksid;
62 uint64_t f_idx;
63 } fuid_domain_t;
64
65 static const char *const nulldomain = "";
66
67 /*
68 * Compare two indexes.
69 */
70 static int
idx_compare(const void * arg1,const void * arg2)71 idx_compare(const void *arg1, const void *arg2)
72 {
73 const fuid_domain_t *node1 = (const fuid_domain_t *)arg1;
74 const fuid_domain_t *node2 = (const fuid_domain_t *)arg2;
75
76 return (TREE_CMP(node1->f_idx, node2->f_idx));
77 }
78
79 /*
80 * Compare two domain strings.
81 */
82 static int
domain_compare(const void * arg1,const void * arg2)83 domain_compare(const void *arg1, const void *arg2)
84 {
85 const fuid_domain_t *node1 = (const fuid_domain_t *)arg1;
86 const fuid_domain_t *node2 = (const fuid_domain_t *)arg2;
87 int val;
88
89 val = strcmp(node1->f_ksid->kd_name, node2->f_ksid->kd_name);
90
91 return (TREE_ISIGN(val));
92 }
93
94 void
zfs_fuid_avl_tree_create(avl_tree_t * idx_tree,avl_tree_t * domain_tree)95 zfs_fuid_avl_tree_create(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
96 {
97 avl_create(idx_tree, idx_compare,
98 sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_idxnode));
99 avl_create(domain_tree, domain_compare,
100 sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_domnode));
101 }
102
103 /*
104 * load initial fuid domain and idx trees. This function is used by
105 * both the kernel and zdb.
106 */
107 uint64_t
zfs_fuid_table_load(objset_t * os,uint64_t fuid_obj,avl_tree_t * idx_tree,avl_tree_t * domain_tree)108 zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree,
109 avl_tree_t *domain_tree)
110 {
111 dmu_buf_t *db;
112 uint64_t fuid_size;
113
114 ASSERT(fuid_obj != 0);
115 VERIFY0(dmu_bonus_hold(os, fuid_obj, FTAG, &db));
116 fuid_size = *(uint64_t *)db->db_data;
117 dmu_buf_rele(db, FTAG);
118
119 if (fuid_size) {
120 nvlist_t **fuidnvp;
121 nvlist_t *nvp = NULL;
122 uint_t count;
123 char *packed;
124 int i;
125
126 packed = kmem_alloc(fuid_size, KM_SLEEP);
127 VERIFY0(dmu_read(os, fuid_obj, 0,
128 fuid_size, packed, DMU_READ_PREFETCH));
129 VERIFY0(nvlist_unpack(packed, fuid_size, &nvp, 0));
130 VERIFY0(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY,
131 &fuidnvp, &count));
132
133 for (i = 0; i != count; i++) {
134 fuid_domain_t *domnode;
135 const char *domain;
136 uint64_t idx;
137
138 VERIFY0(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN,
139 &domain));
140 VERIFY0(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX,
141 &idx));
142
143 domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
144
145 domnode->f_idx = idx;
146 domnode->f_ksid = ksid_lookupdomain(domain);
147 avl_add(idx_tree, domnode);
148 avl_add(domain_tree, domnode);
149 }
150 nvlist_free(nvp);
151 kmem_free(packed, fuid_size);
152 }
153 return (fuid_size);
154 }
155
156 void
zfs_fuid_table_destroy(avl_tree_t * idx_tree,avl_tree_t * domain_tree)157 zfs_fuid_table_destroy(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
158 {
159 fuid_domain_t *domnode;
160 void *cookie;
161
162 cookie = NULL;
163 while ((domnode = avl_destroy_nodes(domain_tree, &cookie)))
164 ksiddomain_rele(domnode->f_ksid);
165
166 avl_destroy(domain_tree);
167 cookie = NULL;
168 while ((domnode = avl_destroy_nodes(idx_tree, &cookie)))
169 kmem_free(domnode, sizeof (fuid_domain_t));
170 avl_destroy(idx_tree);
171 }
172
173 const char *
zfs_fuid_idx_domain(avl_tree_t * idx_tree,uint32_t idx)174 zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx)
175 {
176 fuid_domain_t searchnode, *findnode;
177 avl_index_t loc;
178
179 searchnode.f_idx = idx;
180
181 findnode = avl_find(idx_tree, &searchnode, &loc);
182
183 return (findnode ? findnode->f_ksid->kd_name : nulldomain);
184 }
185
186 #ifdef _KERNEL
187 /*
188 * Load the fuid table(s) into memory.
189 */
190 static void
zfs_fuid_init(zfsvfs_t * zfsvfs)191 zfs_fuid_init(zfsvfs_t *zfsvfs)
192 {
193 rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
194
195 if (zfsvfs->z_fuid_loaded) {
196 rw_exit(&zfsvfs->z_fuid_lock);
197 return;
198 }
199
200 zfs_fuid_avl_tree_create(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
201
202 (void) zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ,
203 ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj);
204 if (zfsvfs->z_fuid_obj != 0) {
205 zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os,
206 zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx,
207 &zfsvfs->z_fuid_domain);
208 }
209
210 zfsvfs->z_fuid_loaded = B_TRUE;
211 rw_exit(&zfsvfs->z_fuid_lock);
212 }
213
214 /*
215 * sync out AVL trees to persistent storage.
216 */
217 void
zfs_fuid_sync(zfsvfs_t * zfsvfs,dmu_tx_t * tx)218 zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
219 {
220 nvlist_t *nvp;
221 nvlist_t **fuids;
222 size_t nvsize = 0;
223 char *packed;
224 dmu_buf_t *db;
225 fuid_domain_t *domnode;
226 int numnodes;
227 int i;
228
229 if (!zfsvfs->z_fuid_dirty) {
230 return;
231 }
232
233 rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
234
235 /*
236 * First see if table needs to be created?
237 */
238 if (zfsvfs->z_fuid_obj == 0) {
239 zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os,
240 DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE,
241 sizeof (uint64_t), tx);
242 VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
243 ZFS_FUID_TABLES, sizeof (uint64_t), 1,
244 &zfsvfs->z_fuid_obj, tx) == 0);
245 }
246
247 VERIFY0(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP));
248
249 numnodes = avl_numnodes(&zfsvfs->z_fuid_idx);
250 fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP);
251 for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++,
252 domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) {
253 VERIFY0(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP));
254 VERIFY0(nvlist_add_uint64(fuids[i], FUID_IDX,
255 domnode->f_idx));
256 VERIFY0(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0));
257 VERIFY0(nvlist_add_string(fuids[i], FUID_DOMAIN,
258 domnode->f_ksid->kd_name));
259 }
260 fnvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY,
261 (const nvlist_t * const *)fuids, numnodes);
262 for (i = 0; i != numnodes; i++)
263 nvlist_free(fuids[i]);
264 kmem_free(fuids, numnodes * sizeof (void *));
265 VERIFY0(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR));
266 packed = kmem_alloc(nvsize, KM_SLEEP);
267 VERIFY0(nvlist_pack(nvp, &packed, &nvsize, NV_ENCODE_XDR, KM_SLEEP));
268 nvlist_free(nvp);
269 zfsvfs->z_fuid_size = nvsize;
270 dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
271 zfsvfs->z_fuid_size, packed, tx);
272 kmem_free(packed, zfsvfs->z_fuid_size);
273 VERIFY0(dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj, FTAG, &db));
274 dmu_buf_will_dirty(db, tx);
275 *(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
276 dmu_buf_rele(db, FTAG);
277
278 zfsvfs->z_fuid_dirty = B_FALSE;
279 rw_exit(&zfsvfs->z_fuid_lock);
280 }
281
282 /*
283 * Query domain table for a given domain.
284 *
285 * If domain isn't found and addok is set, it is added to AVL trees and
286 * the zfsvfs->z_fuid_dirty flag will be set to TRUE. It will then be
287 * necessary for the caller or another thread to detect the dirty table
288 * and sync out the changes.
289 */
290 static int
zfs_fuid_find_by_domain(zfsvfs_t * zfsvfs,const char * domain,const char ** retdomain,boolean_t addok)291 zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain,
292 const char **retdomain, boolean_t addok)
293 {
294 fuid_domain_t searchnode, *findnode;
295 avl_index_t loc;
296 krw_t rw = RW_READER;
297
298 /*
299 * If the dummy "nobody" domain then return an index of 0
300 * to cause the created FUID to be a standard POSIX id
301 * for the user nobody.
302 */
303 if (domain[0] == '\0') {
304 if (retdomain)
305 *retdomain = nulldomain;
306 return (0);
307 }
308
309 searchnode.f_ksid = ksid_lookupdomain(domain);
310 if (retdomain)
311 *retdomain = searchnode.f_ksid->kd_name;
312 if (!zfsvfs->z_fuid_loaded)
313 zfs_fuid_init(zfsvfs);
314
315 retry:
316 rw_enter(&zfsvfs->z_fuid_lock, rw);
317 findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc);
318
319 if (findnode) {
320 rw_exit(&zfsvfs->z_fuid_lock);
321 ksiddomain_rele(searchnode.f_ksid);
322 return (findnode->f_idx);
323 } else if (addok) {
324 fuid_domain_t *domnode;
325 uint64_t retidx;
326
327 if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) {
328 rw_exit(&zfsvfs->z_fuid_lock);
329 rw = RW_WRITER;
330 goto retry;
331 }
332
333 domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
334 domnode->f_ksid = searchnode.f_ksid;
335
336 retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1;
337
338 avl_add(&zfsvfs->z_fuid_domain, domnode);
339 avl_add(&zfsvfs->z_fuid_idx, domnode);
340 zfsvfs->z_fuid_dirty = B_TRUE;
341 rw_exit(&zfsvfs->z_fuid_lock);
342 return (retidx);
343 } else {
344 rw_exit(&zfsvfs->z_fuid_lock);
345 return (-1);
346 }
347 }
348
349 /*
350 * Query domain table by index, returning domain string
351 *
352 * Returns a pointer from an avl node of the domain string.
353 *
354 */
355 const char *
zfs_fuid_find_by_idx(zfsvfs_t * zfsvfs,uint32_t idx)356 zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx)
357 {
358 const char *domain;
359
360 if (idx == 0 || !zfsvfs->z_use_fuids)
361 return (NULL);
362
363 if (!zfsvfs->z_fuid_loaded)
364 zfs_fuid_init(zfsvfs);
365
366 rw_enter(&zfsvfs->z_fuid_lock, RW_READER);
367
368 if (zfsvfs->z_fuid_obj || zfsvfs->z_fuid_dirty)
369 domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx);
370 else
371 domain = nulldomain;
372 rw_exit(&zfsvfs->z_fuid_lock);
373
374 ASSERT(domain);
375 return (domain);
376 }
377
378 void
zfs_fuid_map_ids(znode_t * zp,cred_t * cr,uid_t * uidp,uid_t * gidp)379 zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp)
380 {
381 *uidp = zfs_fuid_map_id(ZTOZSB(zp), KUID_TO_SUID(ZTOUID(zp)),
382 cr, ZFS_OWNER);
383 *gidp = zfs_fuid_map_id(ZTOZSB(zp), KGID_TO_SGID(ZTOGID(zp)),
384 cr, ZFS_GROUP);
385 }
386
387 #ifdef __FreeBSD__
388 uid_t
zfs_fuid_map_id(zfsvfs_t * zfsvfs,uint64_t fuid,cred_t * cr,zfs_fuid_type_t type)389 zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
390 cred_t *cr, zfs_fuid_type_t type)
391 {
392 uint32_t index = FUID_INDEX(fuid);
393
394 if (index == 0)
395 return (fuid);
396
397 return (UID_NOBODY);
398 }
399 #elif defined(__linux__)
400 uid_t
zfs_fuid_map_id(zfsvfs_t * zfsvfs,uint64_t fuid,cred_t * cr,zfs_fuid_type_t type)401 zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
402 cred_t *cr, zfs_fuid_type_t type)
403 {
404 /*
405 * The Linux port only supports POSIX IDs, use the passed id.
406 */
407 return (fuid);
408 }
409
410 #else
411 uid_t
zfs_fuid_map_id(zfsvfs_t * zfsvfs,uint64_t fuid,cred_t * cr,zfs_fuid_type_t type)412 zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
413 cred_t *cr, zfs_fuid_type_t type)
414 {
415 uint32_t index = FUID_INDEX(fuid);
416 const char *domain;
417 uid_t id;
418
419 if (index == 0)
420 return (fuid);
421
422 domain = zfs_fuid_find_by_idx(zfsvfs, index);
423 ASSERT(domain != NULL);
424
425 if (type == ZFS_OWNER || type == ZFS_ACE_USER) {
426 (void) kidmap_getuidbysid(crgetzone(cr), domain,
427 FUID_RID(fuid), &id);
428 } else {
429 (void) kidmap_getgidbysid(crgetzone(cr), domain,
430 FUID_RID(fuid), &id);
431 }
432 return (id);
433 }
434 #endif
435
436 /*
437 * Add a FUID node to the list of fuid's being created for this
438 * ACL
439 *
440 * If ACL has multiple domains, then keep only one copy of each unique
441 * domain.
442 */
443 void
zfs_fuid_node_add(zfs_fuid_info_t ** fuidpp,const char * domain,uint32_t rid,uint64_t idx,uint64_t id,zfs_fuid_type_t type)444 zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid,
445 uint64_t idx, uint64_t id, zfs_fuid_type_t type)
446 {
447 zfs_fuid_t *fuid;
448 zfs_fuid_domain_t *fuid_domain;
449 zfs_fuid_info_t *fuidp;
450 uint64_t fuididx;
451 boolean_t found = B_FALSE;
452
453 if (*fuidpp == NULL)
454 *fuidpp = zfs_fuid_info_alloc();
455
456 fuidp = *fuidpp;
457 /*
458 * First find fuid domain index in linked list
459 *
460 * If one isn't found then create an entry.
461 */
462
463 for (fuididx = 1, fuid_domain = list_head(&fuidp->z_domains);
464 fuid_domain; fuid_domain = list_next(&fuidp->z_domains,
465 fuid_domain), fuididx++) {
466 if (idx == fuid_domain->z_domidx) {
467 found = B_TRUE;
468 break;
469 }
470 }
471
472 if (!found) {
473 fuid_domain = kmem_alloc(sizeof (zfs_fuid_domain_t), KM_SLEEP);
474 fuid_domain->z_domain = domain;
475 fuid_domain->z_domidx = idx;
476 list_insert_tail(&fuidp->z_domains, fuid_domain);
477 fuidp->z_domain_str_sz += strlen(domain) + 1;
478 fuidp->z_domain_cnt++;
479 }
480
481 if (type == ZFS_ACE_USER || type == ZFS_ACE_GROUP) {
482
483 /*
484 * Now allocate fuid entry and add it on the end of the list
485 */
486
487 fuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP);
488 fuid->z_id = id;
489 fuid->z_domidx = idx;
490 fuid->z_logfuid = FUID_ENCODE(fuididx, rid);
491
492 list_insert_tail(&fuidp->z_fuids, fuid);
493 fuidp->z_fuid_cnt++;
494 } else {
495 if (type == ZFS_OWNER)
496 fuidp->z_fuid_owner = FUID_ENCODE(fuididx, rid);
497 else
498 fuidp->z_fuid_group = FUID_ENCODE(fuididx, rid);
499 }
500 }
501
502 #ifdef HAVE_KSID
503 /*
504 * Create a file system FUID, based on information in the users cred
505 *
506 * If cred contains KSID_OWNER then it should be used to determine
507 * the uid otherwise cred's uid will be used. By default cred's gid
508 * is used unless it's an ephemeral ID in which case KSID_GROUP will
509 * be used if it exists.
510 */
511 uint64_t
zfs_fuid_create_cred(zfsvfs_t * zfsvfs,zfs_fuid_type_t type,cred_t * cr,zfs_fuid_info_t ** fuidp)512 zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
513 cred_t *cr, zfs_fuid_info_t **fuidp)
514 {
515 uint64_t idx;
516 ksid_t *ksid;
517 uint32_t rid;
518 const char *kdomain, *domain;
519 uid_t id;
520
521 VERIFY(type == ZFS_OWNER || type == ZFS_GROUP);
522
523 ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP);
524
525 if (!zfsvfs->z_use_fuids || (ksid == NULL)) {
526 id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr);
527
528 if (IS_EPHEMERAL(id))
529 return ((type == ZFS_OWNER) ? UID_NOBODY : GID_NOBODY);
530
531 return ((uint64_t)id);
532 }
533
534 /*
535 * ksid is present and FUID is supported
536 */
537 id = (type == ZFS_OWNER) ? ksid_getid(ksid) : crgetgid(cr);
538
539 if (!IS_EPHEMERAL(id))
540 return ((uint64_t)id);
541
542 if (type == ZFS_GROUP)
543 id = ksid_getid(ksid);
544
545 rid = ksid_getrid(ksid);
546 domain = ksid_getdomain(ksid);
547
548 idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
549
550 zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type);
551
552 return (FUID_ENCODE(idx, rid));
553 }
554 #endif /* HAVE_KSID */
555
556 /*
557 * Create a file system FUID for an ACL ace
558 * or a chown/chgrp of the file.
559 * This is similar to zfs_fuid_create_cred, except that
560 * we can't find the domain + rid information in the
561 * cred. Instead we have to query Winchester for the
562 * domain and rid.
563 *
564 * During replay operations the domain+rid information is
565 * found in the zfs_fuid_info_t that the replay code has
566 * attached to the zfsvfs of the file system.
567 */
568 uint64_t
zfs_fuid_create(zfsvfs_t * zfsvfs,uint64_t id,cred_t * cr,zfs_fuid_type_t type,zfs_fuid_info_t ** fuidpp)569 zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
570 zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp)
571 {
572 #ifdef HAVE_KSID
573 const char *domain, *kdomain;
574 uint32_t fuid_idx = FUID_INDEX(id);
575 uint32_t rid = 0;
576 idmap_stat status;
577 uint64_t idx = UID_NOBODY;
578 zfs_fuid_t *zfuid = NULL;
579 zfs_fuid_info_t *fuidp = NULL;
580
581 /*
582 * If POSIX ID, or entry is already a FUID then
583 * just return the id
584 *
585 * We may also be handed an already FUID'ized id via
586 * chmod.
587 */
588
589 if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0)
590 return (id);
591
592 if (zfsvfs->z_replay) {
593 fuidp = zfsvfs->z_fuid_replay;
594
595 /*
596 * If we are passed an ephemeral id, but no
597 * fuid_info was logged then return NOBODY.
598 * This is most likely a result of idmap service
599 * not being available.
600 */
601 if (fuidp == NULL)
602 return (UID_NOBODY);
603
604 VERIFY3U(type, >=, ZFS_OWNER);
605 VERIFY3U(type, <=, ZFS_ACE_GROUP);
606
607 switch (type) {
608 case ZFS_ACE_USER:
609 case ZFS_ACE_GROUP:
610 zfuid = list_head(&fuidp->z_fuids);
611 rid = FUID_RID(zfuid->z_logfuid);
612 idx = FUID_INDEX(zfuid->z_logfuid);
613 break;
614 case ZFS_OWNER:
615 rid = FUID_RID(fuidp->z_fuid_owner);
616 idx = FUID_INDEX(fuidp->z_fuid_owner);
617 break;
618 case ZFS_GROUP:
619 rid = FUID_RID(fuidp->z_fuid_group);
620 idx = FUID_INDEX(fuidp->z_fuid_group);
621 break;
622 }
623 domain = fuidp->z_domain_table[idx - 1];
624 } else {
625 if (type == ZFS_OWNER || type == ZFS_ACE_USER)
626 status = kidmap_getsidbyuid(crgetzone(cr), id,
627 &domain, &rid);
628 else
629 status = kidmap_getsidbygid(crgetzone(cr), id,
630 &domain, &rid);
631
632 if (status != 0) {
633 /*
634 * When returning nobody we will need to
635 * make a dummy fuid table entry for logging
636 * purposes.
637 */
638 rid = UID_NOBODY;
639 domain = nulldomain;
640 }
641 }
642
643 idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
644
645 if (!zfsvfs->z_replay)
646 zfs_fuid_node_add(fuidpp, kdomain,
647 rid, idx, id, type);
648 else if (zfuid != NULL) {
649 list_remove(&fuidp->z_fuids, zfuid);
650 kmem_free(zfuid, sizeof (zfs_fuid_t));
651 }
652 return (FUID_ENCODE(idx, rid));
653 #else
654 /*
655 * The Linux port only supports POSIX IDs, use the passed id.
656 */
657 return (id);
658 #endif
659 }
660
661 void
zfs_fuid_destroy(zfsvfs_t * zfsvfs)662 zfs_fuid_destroy(zfsvfs_t *zfsvfs)
663 {
664 rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
665 if (!zfsvfs->z_fuid_loaded) {
666 rw_exit(&zfsvfs->z_fuid_lock);
667 return;
668 }
669 zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
670 rw_exit(&zfsvfs->z_fuid_lock);
671 }
672
673 /*
674 * Allocate zfs_fuid_info for tracking FUIDs created during
675 * zfs_mknode, VOP_SETATTR() or VOP_SETSECATTR()
676 */
677 zfs_fuid_info_t *
zfs_fuid_info_alloc(void)678 zfs_fuid_info_alloc(void)
679 {
680 zfs_fuid_info_t *fuidp;
681
682 fuidp = kmem_zalloc(sizeof (zfs_fuid_info_t), KM_SLEEP);
683 list_create(&fuidp->z_domains, sizeof (zfs_fuid_domain_t),
684 offsetof(zfs_fuid_domain_t, z_next));
685 list_create(&fuidp->z_fuids, sizeof (zfs_fuid_t),
686 offsetof(zfs_fuid_t, z_next));
687 return (fuidp);
688 }
689
690 /*
691 * Release all memory associated with zfs_fuid_info_t
692 */
693 void
zfs_fuid_info_free(zfs_fuid_info_t * fuidp)694 zfs_fuid_info_free(zfs_fuid_info_t *fuidp)
695 {
696 zfs_fuid_t *zfuid;
697 zfs_fuid_domain_t *zdomain;
698
699 while ((zfuid = list_remove_head(&fuidp->z_fuids)) != NULL)
700 kmem_free(zfuid, sizeof (zfs_fuid_t));
701
702 if (fuidp->z_domain_table != NULL)
703 kmem_free(fuidp->z_domain_table,
704 (sizeof (char *)) * fuidp->z_domain_cnt);
705
706 while ((zdomain = list_remove_head(&fuidp->z_domains)) != NULL)
707 kmem_free(zdomain, sizeof (zfs_fuid_domain_t));
708
709 kmem_free(fuidp, sizeof (zfs_fuid_info_t));
710 }
711
712 /*
713 * Check to see if id is a groupmember. If cred
714 * has ksid info then sidlist is checked first
715 * and if still not found then POSIX groups are checked
716 *
717 * Will use a straight FUID compare when possible.
718 */
719 boolean_t
zfs_groupmember(zfsvfs_t * zfsvfs,uint64_t id,cred_t * cr)720 zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
721 {
722 uid_t gid;
723
724 #ifdef illumos
725 ksid_t *ksid = crgetsid(cr, KSID_GROUP);
726 ksidlist_t *ksidlist = crgetsidlist(cr);
727
728 if (ksid && ksidlist) {
729 int i;
730 ksid_t *ksid_groups;
731 uint32_t idx = FUID_INDEX(id);
732 uint32_t rid = FUID_RID(id);
733
734 ksid_groups = ksidlist->ksl_sids;
735
736 for (i = 0; i != ksidlist->ksl_nsid; i++) {
737 if (idx == 0) {
738 if (id != IDMAP_WK_CREATOR_GROUP_GID &&
739 id == ksid_groups[i].ks_id) {
740 return (B_TRUE);
741 }
742 } else {
743 const char *domain;
744
745 domain = zfs_fuid_find_by_idx(zfsvfs, idx);
746 ASSERT(domain != NULL);
747
748 if (strcmp(domain,
749 IDMAP_WK_CREATOR_SID_AUTHORITY) == 0)
750 return (B_FALSE);
751
752 if ((strcmp(domain,
753 ksid_groups[i].ks_domain->kd_name) == 0) &&
754 rid == ksid_groups[i].ks_rid)
755 return (B_TRUE);
756 }
757 }
758 }
759 #endif /* illumos */
760
761 /*
762 * Not found in ksidlist, check posix groups
763 */
764 gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP);
765 return (groupmember(gid, cr));
766 }
767
768 void
zfs_fuid_txhold(zfsvfs_t * zfsvfs,dmu_tx_t * tx)769 zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
770 {
771 if (zfsvfs->z_fuid_obj == 0) {
772 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
773 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
774 FUID_SIZE_ESTIMATE(zfsvfs));
775 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL);
776 } else {
777 dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj);
778 dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0,
779 FUID_SIZE_ESTIMATE(zfsvfs));
780 }
781 }
782
783 /*
784 * buf must be big enough (eg, 32 bytes)
785 */
786 int
zfs_id_to_fuidstr(zfsvfs_t * zfsvfs,const char * domain,uid_t rid,char * buf,size_t len,boolean_t addok)787 zfs_id_to_fuidstr(zfsvfs_t *zfsvfs, const char *domain, uid_t rid,
788 char *buf, size_t len, boolean_t addok)
789 {
790 uint64_t fuid;
791 int domainid = 0;
792
793 if (domain && domain[0]) {
794 domainid = zfs_fuid_find_by_domain(zfsvfs, domain, NULL, addok);
795 if (domainid == -1)
796 return (SET_ERROR(ENOENT));
797 }
798 fuid = FUID_ENCODE(domainid, rid);
799 (void) snprintf(buf, len, "%llx", (longlong_t)fuid);
800 return (0);
801 }
802 #endif
803