1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, 2021 by Delphix. All rights reserved. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/zfs_refcount.h> 28 29 #ifdef ZFS_DEBUG 30 /* 31 * Reference count tracking is disabled by default. It's memory requirements 32 * are reasonable, however as implemented it consumes a significant amount of 33 * cpu time. Until its performance is improved it should be manually enabled. 34 */ 35 int reference_tracking_enable = B_FALSE; 36 static uint_t reference_history = 3; /* tunable */ 37 38 static kmem_cache_t *reference_cache; 39 40 void 41 zfs_refcount_init(void) 42 { 43 reference_cache = kmem_cache_create("reference_cache", 44 sizeof (reference_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 45 } 46 47 void 48 zfs_refcount_fini(void) 49 { 50 kmem_cache_destroy(reference_cache); 51 } 52 53 static int 54 zfs_refcount_compare(const void *x1, const void *x2) 55 { 56 const reference_t *r1 = (const reference_t *)x1; 57 const reference_t *r2 = (const reference_t *)x2; 58 59 int cmp1 = TREE_CMP(r1->ref_holder, r2->ref_holder); 60 int cmp2 = TREE_CMP(r1->ref_number, r2->ref_number); 61 int cmp = cmp1 ? cmp1 : cmp2; 62 return ((cmp || r1->ref_search) ? cmp : TREE_PCMP(r1, r2)); 63 } 64 65 void 66 zfs_refcount_create(zfs_refcount_t *rc) 67 { 68 mutex_init(&rc->rc_mtx, NULL, MUTEX_DEFAULT, NULL); 69 avl_create(&rc->rc_tree, zfs_refcount_compare, sizeof (reference_t), 70 offsetof(reference_t, ref_link.a)); 71 list_create(&rc->rc_removed, sizeof (reference_t), 72 offsetof(reference_t, ref_link.l)); 73 rc->rc_count = 0; 74 rc->rc_removed_count = 0; 75 rc->rc_tracked = reference_tracking_enable; 76 } 77 78 void 79 zfs_refcount_create_tracked(zfs_refcount_t *rc) 80 { 81 zfs_refcount_create(rc); 82 rc->rc_tracked = B_TRUE; 83 } 84 85 void 86 zfs_refcount_create_untracked(zfs_refcount_t *rc) 87 { 88 zfs_refcount_create(rc); 89 rc->rc_tracked = B_FALSE; 90 } 91 92 void 93 zfs_refcount_destroy_many(zfs_refcount_t *rc, uint64_t number) 94 { 95 reference_t *ref; 96 void *cookie = NULL; 97 98 ASSERT3U(rc->rc_count, ==, number); 99 while ((ref = avl_destroy_nodes(&rc->rc_tree, &cookie)) != NULL) 100 kmem_cache_free(reference_cache, ref); 101 avl_destroy(&rc->rc_tree); 102 103 while ((ref = list_remove_head(&rc->rc_removed))) 104 kmem_cache_free(reference_cache, ref); 105 list_destroy(&rc->rc_removed); 106 mutex_destroy(&rc->rc_mtx); 107 } 108 109 void 110 zfs_refcount_destroy(zfs_refcount_t *rc) 111 { 112 zfs_refcount_destroy_many(rc, 0); 113 } 114 115 int 116 zfs_refcount_is_zero(zfs_refcount_t *rc) 117 { 118 return (zfs_refcount_count(rc) == 0); 119 } 120 121 int64_t 122 zfs_refcount_count(zfs_refcount_t *rc) 123 { 124 return (atomic_load_64(&rc->rc_count)); 125 } 126 127 int64_t 128 zfs_refcount_add_many(zfs_refcount_t *rc, uint64_t number, const void *holder) 129 { 130 reference_t *ref; 131 int64_t count; 132 133 if (likely(!rc->rc_tracked)) { 134 count = atomic_add_64_nv(&(rc)->rc_count, number); 135 ASSERT3U(count, >=, number); 136 return (count); 137 } 138 139 ref = kmem_cache_alloc(reference_cache, KM_SLEEP); 140 ref->ref_holder = holder; 141 ref->ref_number = number; 142 ref->ref_search = B_FALSE; 143 mutex_enter(&rc->rc_mtx); 144 avl_add(&rc->rc_tree, ref); 145 rc->rc_count += number; 146 count = rc->rc_count; 147 mutex_exit(&rc->rc_mtx); 148 149 return (count); 150 } 151 152 int64_t 153 zfs_refcount_add(zfs_refcount_t *rc, const void *holder) 154 { 155 return (zfs_refcount_add_many(rc, 1, holder)); 156 } 157 158 void 159 zfs_refcount_add_few(zfs_refcount_t *rc, uint64_t number, const void *holder) 160 { 161 if (likely(!rc->rc_tracked)) 162 (void) zfs_refcount_add_many(rc, number, holder); 163 else for (; number > 0; number--) 164 (void) zfs_refcount_add(rc, holder); 165 } 166 167 int64_t 168 zfs_refcount_remove_many(zfs_refcount_t *rc, uint64_t number, 169 const void *holder) 170 { 171 reference_t *ref, s; 172 int64_t count; 173 174 if (likely(!rc->rc_tracked)) { 175 count = atomic_add_64_nv(&(rc)->rc_count, -number); 176 ASSERT3S(count, >=, 0); 177 return (count); 178 } 179 180 s.ref_holder = holder; 181 s.ref_number = number; 182 s.ref_search = B_TRUE; 183 mutex_enter(&rc->rc_mtx); 184 ASSERT3U(rc->rc_count, >=, number); 185 ref = avl_find(&rc->rc_tree, &s, NULL); 186 if (unlikely(ref == NULL)) { 187 panic("No such hold %p on refcount %llx", holder, 188 (u_longlong_t)(uintptr_t)rc); 189 return (-1); 190 } 191 avl_remove(&rc->rc_tree, ref); 192 if (reference_history > 0) { 193 list_insert_head(&rc->rc_removed, ref); 194 if (rc->rc_removed_count >= reference_history) { 195 ref = list_remove_tail(&rc->rc_removed); 196 kmem_cache_free(reference_cache, ref); 197 } else { 198 rc->rc_removed_count++; 199 } 200 } else { 201 kmem_cache_free(reference_cache, ref); 202 } 203 rc->rc_count -= number; 204 count = rc->rc_count; 205 mutex_exit(&rc->rc_mtx); 206 return (count); 207 } 208 209 int64_t 210 zfs_refcount_remove(zfs_refcount_t *rc, const void *holder) 211 { 212 return (zfs_refcount_remove_many(rc, 1, holder)); 213 } 214 215 void 216 zfs_refcount_remove_few(zfs_refcount_t *rc, uint64_t number, const void *holder) 217 { 218 if (likely(!rc->rc_tracked)) 219 (void) zfs_refcount_remove_many(rc, number, holder); 220 else for (; number > 0; number--) 221 (void) zfs_refcount_remove(rc, holder); 222 } 223 224 void 225 zfs_refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src) 226 { 227 avl_tree_t tree; 228 list_t removed; 229 reference_t *ref; 230 void *cookie = NULL; 231 uint64_t count; 232 uint_t removed_count; 233 234 avl_create(&tree, zfs_refcount_compare, sizeof (reference_t), 235 offsetof(reference_t, ref_link.a)); 236 list_create(&removed, sizeof (reference_t), 237 offsetof(reference_t, ref_link.l)); 238 239 mutex_enter(&src->rc_mtx); 240 count = src->rc_count; 241 removed_count = src->rc_removed_count; 242 src->rc_count = 0; 243 src->rc_removed_count = 0; 244 avl_swap(&tree, &src->rc_tree); 245 list_move_tail(&removed, &src->rc_removed); 246 mutex_exit(&src->rc_mtx); 247 248 mutex_enter(&dst->rc_mtx); 249 dst->rc_count += count; 250 dst->rc_removed_count += removed_count; 251 if (avl_is_empty(&dst->rc_tree)) 252 avl_swap(&dst->rc_tree, &tree); 253 else while ((ref = avl_destroy_nodes(&tree, &cookie)) != NULL) 254 avl_add(&dst->rc_tree, ref); 255 list_move_tail(&dst->rc_removed, &removed); 256 mutex_exit(&dst->rc_mtx); 257 258 avl_destroy(&tree); 259 list_destroy(&removed); 260 } 261 262 void 263 zfs_refcount_transfer_ownership_many(zfs_refcount_t *rc, uint64_t number, 264 const void *current_holder, const void *new_holder) 265 { 266 reference_t *ref, s; 267 268 if (likely(!rc->rc_tracked)) 269 return; 270 271 s.ref_holder = current_holder; 272 s.ref_number = number; 273 s.ref_search = B_TRUE; 274 mutex_enter(&rc->rc_mtx); 275 ref = avl_find(&rc->rc_tree, &s, NULL); 276 ASSERT(ref); 277 ref->ref_holder = new_holder; 278 avl_update(&rc->rc_tree, ref); 279 mutex_exit(&rc->rc_mtx); 280 } 281 282 void 283 zfs_refcount_transfer_ownership(zfs_refcount_t *rc, const void *current_holder, 284 const void *new_holder) 285 { 286 return (zfs_refcount_transfer_ownership_many(rc, 1, current_holder, 287 new_holder)); 288 } 289 290 /* 291 * If tracking is enabled, return true if a reference exists that matches 292 * the "holder" tag. If tracking is disabled, then return true if a reference 293 * might be held. 294 */ 295 boolean_t 296 zfs_refcount_held(zfs_refcount_t *rc, const void *holder) 297 { 298 reference_t *ref, s; 299 avl_index_t idx; 300 boolean_t res; 301 302 if (likely(!rc->rc_tracked)) 303 return (zfs_refcount_count(rc) > 0); 304 305 s.ref_holder = holder; 306 s.ref_number = 0; 307 s.ref_search = B_TRUE; 308 mutex_enter(&rc->rc_mtx); 309 ref = avl_find(&rc->rc_tree, &s, &idx); 310 if (likely(ref == NULL)) 311 ref = avl_nearest(&rc->rc_tree, idx, AVL_AFTER); 312 res = ref && ref->ref_holder == holder; 313 mutex_exit(&rc->rc_mtx); 314 return (res); 315 } 316 317 /* 318 * If tracking is enabled, return true if a reference does not exist that 319 * matches the "holder" tag. If tracking is disabled, always return true 320 * since the reference might not be held. 321 */ 322 boolean_t 323 zfs_refcount_not_held(zfs_refcount_t *rc, const void *holder) 324 { 325 reference_t *ref, s; 326 avl_index_t idx; 327 boolean_t res; 328 329 if (likely(!rc->rc_tracked)) 330 return (B_TRUE); 331 332 mutex_enter(&rc->rc_mtx); 333 s.ref_holder = holder; 334 s.ref_number = 0; 335 s.ref_search = B_TRUE; 336 ref = avl_find(&rc->rc_tree, &s, &idx); 337 if (likely(ref == NULL)) 338 ref = avl_nearest(&rc->rc_tree, idx, AVL_AFTER); 339 res = ref == NULL || ref->ref_holder != holder; 340 mutex_exit(&rc->rc_mtx); 341 return (res); 342 } 343 344 EXPORT_SYMBOL(zfs_refcount_create); 345 EXPORT_SYMBOL(zfs_refcount_destroy); 346 EXPORT_SYMBOL(zfs_refcount_is_zero); 347 EXPORT_SYMBOL(zfs_refcount_count); 348 EXPORT_SYMBOL(zfs_refcount_add); 349 EXPORT_SYMBOL(zfs_refcount_remove); 350 EXPORT_SYMBOL(zfs_refcount_held); 351 352 ZFS_MODULE_PARAM(zfs, , reference_tracking_enable, INT, ZMOD_RW, 353 "Track reference holders to refcount_t objects"); 354 355 ZFS_MODULE_PARAM(zfs, , reference_history, UINT, ZMOD_RW, 356 "Maximum reference holders being tracked"); 357 #endif /* ZFS_DEBUG */ 358