1 // SPDX-License-Identifier: CDDL-1.0 2 /* 3 * CDDL HEADER START 4 * 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or https://opensource.org/licenses/CDDL-1.0. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 23 /* 24 * Copyright (c) 2020, 2021, 2022 by Pawel Jakub Dawidek 25 */ 26 27 #include <sys/zfs_context.h> 28 #include <sys/spa.h> 29 #include <sys/spa_impl.h> 30 #include <sys/zio.h> 31 #include <sys/brt.h> 32 #include <sys/brt_impl.h> 33 #include <sys/ddt.h> 34 #include <sys/bitmap.h> 35 #include <sys/zap.h> 36 #include <sys/dmu_tx.h> 37 #include <sys/arc.h> 38 #include <sys/dsl_pool.h> 39 #include <sys/dsl_scan.h> 40 #include <sys/vdev_impl.h> 41 #include <sys/kstat.h> 42 #include <sys/wmsum.h> 43 44 /* 45 * Block Cloning design. 46 * 47 * Block Cloning allows to manually clone a file (or a subset of its blocks) 48 * into another (or the same) file by just creating additional references to 49 * the data blocks without copying the data itself. Those references are kept 50 * in the Block Reference Tables (BRTs). 51 * 52 * In many ways this is similar to the existing deduplication, but there are 53 * some important differences: 54 * 55 * - Deduplication is automatic and Block Cloning is not - one has to use a 56 * dedicated system call(s) to clone the given file/blocks. 57 * - Deduplication keeps all data blocks in its table, even those referenced 58 * just once. Block Cloning creates an entry in its tables only when there 59 * are at least two references to the given data block. If the block was 60 * never explicitly cloned or the second to last reference was dropped, 61 * there will be neither space nor performance overhead. 62 * - Deduplication needs data to work - one needs to pass real data to the 63 * write(2) syscall, so hash can be calculated. Block Cloning doesn't require 64 * data, just block pointers to the data, so it is extremely fast, as we pay 65 * neither the cost of reading the data, nor the cost of writing the data - 66 * we operate exclusively on metadata. 67 * - If the D (dedup) bit is not set in the block pointer, it means that 68 * the block is not in the dedup table (DDT) and we won't consult the DDT 69 * when we need to free the block. Block Cloning must be consulted on every 70 * free, because we cannot modify the source BP (eg. by setting something 71 * similar to the D bit), thus we have no hint if the block is in the 72 * Block Reference Table (BRT), so we need to look into the BRT. There is 73 * an optimization in place that allows us to eliminate the majority of BRT 74 * lookups which is described below in the "Minimizing free penalty" section. 75 * - The BRT entry is much smaller than the DDT entry - for BRT we only store 76 * 64bit offset and 64bit reference counter. 77 * - Dedup keys are cryptographic hashes, so two blocks that are close to each 78 * other on disk are most likely in totally different parts of the DDT. 79 * The BRT entry keys are offsets into a single top-level VDEV, so data blocks 80 * from one file should have BRT entries close to each other. 81 * - Scrub will only do a single pass over a block that is referenced multiple 82 * times in the DDT. Unfortunately it is not currently (if at all) possible 83 * with Block Cloning and block referenced multiple times will be scrubbed 84 * multiple times. The new, sorted scrub should be able to eliminate 85 * duplicated reads given enough memory. 86 * - Deduplication requires cryptographically strong hash as a checksum or 87 * additional data verification. Block Cloning works with any checksum 88 * algorithm or even with checksumming disabled. 89 * 90 * As mentioned above, the BRT entries are much smaller than the DDT entries. 91 * To uniquely identify a block we just need its vdev id and offset. We also 92 * need to maintain a reference counter. The vdev id will often repeat, as there 93 * is a small number of top-level VDEVs and a large number of blocks stored in 94 * each VDEV. We take advantage of that to reduce the BRT entry size further by 95 * maintaining one BRT for each top-level VDEV, so we can then have only offset 96 * and counter as the BRT entry. 97 * 98 * Minimizing free penalty. 99 * 100 * Block Cloning allows creating additional references to any existing block. 101 * When we free a block there is no hint in the block pointer whether the block 102 * was cloned or not, so on each free we have to check if there is a 103 * corresponding entry in the BRT or not. If there is, we need to decrease 104 * the reference counter. Doing BRT lookup on every free can potentially be 105 * expensive by requiring additional I/Os if the BRT doesn't fit into memory. 106 * This is the main problem with deduplication, so we've learned our lesson and 107 * try not to repeat the same mistake here. How do we do that? We divide each 108 * top-level VDEV into 16MB regions. For each region we maintain a counter that 109 * is a sum of all the BRT entries that have offsets within the region. This 110 * creates the entries count array of 16bit numbers for each top-level VDEV. 111 * The entries count array is always kept in memory and updated on disk in the 112 * same transaction group as the BRT updates to keep everything in-sync. We can 113 * keep the array in memory, because it is very small. With 16MB regions and 114 * 1TB VDEV the array requires only 128kB of memory (we may decide to decrease 115 * the region size even further in the future). Now, when we want to free 116 * a block, we first consult the array. If the counter for the whole region is 117 * zero, there is no need to look for the BRT entry, as there isn't one for 118 * sure. If the counter for the region is greater than zero, only then we will 119 * do a BRT lookup and if an entry is found we will decrease the reference 120 * counter in the BRT entry and in the entry counters array. 121 * 122 * The entry counters array is small, but can potentially be larger for very 123 * large VDEVs or smaller regions. In this case we don't want to rewrite entire 124 * array on every change. We then divide the array into 32kB block and keep 125 * a bitmap of dirty blocks within a transaction group. When we sync the 126 * transaction group we can only update the parts of the entry counters array 127 * that were modified. Note: Keeping track of the dirty parts of the entry 128 * counters array is implemented, but updating only parts of the array on disk 129 * is not yet implemented - for now we will update entire array if there was 130 * any change. 131 * 132 * The implementation tries to be economic: if BRT is not used, or no longer 133 * used, there will be no entries in the MOS and no additional memory used (eg. 134 * the entry counters array is only allocated if needed). 135 * 136 * Interaction between Deduplication and Block Cloning. 137 * 138 * If both functionalities are in use, we could end up with a block that is 139 * referenced multiple times in both DDT and BRT. When we free one of the 140 * references we couldn't tell where it belongs, so we would have to decide 141 * what table takes the precedence: do we first clear DDT references or BRT 142 * references? To avoid this dilemma BRT cooperates with DDT - if a given block 143 * is being cloned using BRT and the BP has the D (dedup) bit set, BRT will 144 * lookup DDT entry instead and increase the counter there. No BRT entry 145 * will be created for a block which has the D (dedup) bit set. 146 * BRT may be more efficient for manual deduplication, but if the block is 147 * already in the DDT, then creating additional BRT entry would be less 148 * efficient. This clever idea was proposed by Allan Jude. 149 * 150 * Block Cloning across datasets. 151 * 152 * Block Cloning is not limited to cloning blocks within the same dataset. 153 * It is possible (and very useful) to clone blocks between different datasets. 154 * One use case is recovering files from snapshots. By cloning the files into 155 * dataset we need no additional storage. Without Block Cloning we would need 156 * additional space for those files. 157 * Another interesting use case is moving the files between datasets 158 * (copying the file content to the new dataset and removing the source file). 159 * In that case Block Cloning will only be used briefly, because the BRT entries 160 * will be removed when the source is removed. 161 * Block Cloning across encrypted datasets is supported as long as both 162 * datasets share the same master key (e.g. snapshots and clones) 163 * 164 * Block Cloning flow through ZFS layers. 165 * 166 * Note: Block Cloning can be used both for cloning file system blocks and ZVOL 167 * blocks. As of this writing no interface is implemented that allows for block 168 * cloning within a ZVOL. 169 * FreeBSD and Linux provides copy_file_range(2) system call and we will use it 170 * for blocking cloning. 171 * 172 * ssize_t 173 * copy_file_range(int infd, off_t *inoffp, int outfd, off_t *outoffp, 174 * size_t len, unsigned int flags); 175 * 176 * Even though offsets and length represent bytes, they have to be 177 * block-aligned or we will return an error so the upper layer can 178 * fallback to the generic mechanism that will just copy the data. 179 * Using copy_file_range(2) will call OS-independent zfs_clone_range() function. 180 * This function was implemented based on zfs_write(), but instead of writing 181 * the given data we first read block pointers using the new dmu_read_l0_bps() 182 * function from the source file. Once we have BPs from the source file we call 183 * the dmu_brt_clone() function on the destination file. This function 184 * allocates BPs for us. We iterate over all source BPs. If the given BP is 185 * a hole or an embedded block, we just copy BP as-is. If it points to a real 186 * data we place this BP on a BRT pending list using the brt_pending_add() 187 * function. 188 * 189 * We use this pending list to keep track of all BPs that got new references 190 * within this transaction group. 191 * 192 * Some special cases to consider and how we address them: 193 * - The block we want to clone may have been created within the same 194 * transaction group that we are trying to clone. Such block has no BP 195 * allocated yet, so cannot be immediately cloned. We return EAGAIN. 196 * - The block we want to clone may have been modified within the same 197 * transaction group. We return EAGAIN. 198 * - A block may be cloned multiple times during one transaction group (that's 199 * why pending list is actually a tree and not an append-only list - this 200 * way we can figure out faster if this block is cloned for the first time 201 * in this txg or consecutive time). 202 * - A block may be cloned and freed within the same transaction group 203 * (see dbuf_undirty()). 204 * - A block may be cloned and within the same transaction group the clone 205 * can be cloned again (see dmu_read_l0_bps()). 206 * - A file might have been deleted, but the caller still has a file descriptor 207 * open to this file and clones it. 208 * 209 * When we free a block we have an additional step in the ZIO pipeline where we 210 * call the zio_brt_free() function. We then call the brt_entry_decref() 211 * that loads the corresponding BRT entry (if one exists) and decreases 212 * reference counter. If this is not the last reference we will stop ZIO 213 * pipeline here. If this is the last reference or the block is not in the 214 * BRT, we continue the pipeline and free the block as usual. 215 * 216 * At the beginning of spa_sync() where there can be no more block cloning, 217 * but before issuing frees we call brt_pending_apply(). This function applies 218 * all the new clones to the BRT table - we load BRT entries and update 219 * reference counters. To sync new BRT entries to disk, we use brt_sync() 220 * function. This function will sync all dirty per-top-level-vdev BRTs, 221 * the entry counters arrays, etc. 222 * 223 * Block Cloning and ZIL. 224 * 225 * Every clone operation is divided into chunks (similar to write) and each 226 * chunk is cloned in a separate transaction. The chunk size is determined by 227 * how many BPs we can fit into a single ZIL entry. 228 * Replaying clone operation is different from the regular clone operation, 229 * as when we log clone operations we cannot use the source object - it may 230 * reside on a different dataset, so we log BPs we want to clone. 231 * The ZIL is replayed when we mount the given dataset, not when the pool is 232 * imported. Taking this into account it is possible that the pool is imported 233 * without mounting datasets and the source dataset is destroyed before the 234 * destination dataset is mounted and its ZIL replayed. 235 * To address this situation we leverage zil_claim() mechanism where ZFS will 236 * parse all the ZILs on pool import. When we come across TX_CLONE_RANGE 237 * entries, we will bump reference counters for their BPs in the BRT. Then 238 * on mount and ZIL replay we bump the reference counters once more, while the 239 * first references are dropped during ZIL destroy by zil_free_clone_range(). 240 * It is possible that after zil_claim() we never mount the destination, so 241 * we never replay its ZIL and just destroy it. In this case the only taken 242 * references will be dropped by zil_free_clone_range(), since the cloning is 243 * not going to ever take place. 244 */ 245 246 static kmem_cache_t *brt_entry_cache; 247 248 /* 249 * Enable/disable prefetching of BRT entries that we are going to modify. 250 */ 251 static int brt_zap_prefetch = 1; 252 253 #ifdef ZFS_DEBUG 254 #define BRT_DEBUG(...) do { \ 255 if ((zfs_flags & ZFS_DEBUG_BRT) != 0) { \ 256 __dprintf(B_TRUE, __FILE__, __func__, __LINE__, __VA_ARGS__); \ 257 } \ 258 } while (0) 259 #else 260 #define BRT_DEBUG(...) do { } while (0) 261 #endif 262 263 static int brt_zap_default_bs = 12; 264 static int brt_zap_default_ibs = 12; 265 266 static kstat_t *brt_ksp; 267 268 typedef struct brt_stats { 269 kstat_named_t brt_addref_entry_not_on_disk; 270 kstat_named_t brt_addref_entry_on_disk; 271 kstat_named_t brt_decref_entry_in_memory; 272 kstat_named_t brt_decref_entry_loaded_from_disk; 273 kstat_named_t brt_decref_entry_not_in_memory; 274 kstat_named_t brt_decref_entry_read_lost_race; 275 kstat_named_t brt_decref_entry_still_referenced; 276 kstat_named_t brt_decref_free_data_later; 277 kstat_named_t brt_decref_free_data_now; 278 kstat_named_t brt_decref_no_entry; 279 } brt_stats_t; 280 281 static brt_stats_t brt_stats = { 282 { "addref_entry_not_on_disk", KSTAT_DATA_UINT64 }, 283 { "addref_entry_on_disk", KSTAT_DATA_UINT64 }, 284 { "decref_entry_in_memory", KSTAT_DATA_UINT64 }, 285 { "decref_entry_loaded_from_disk", KSTAT_DATA_UINT64 }, 286 { "decref_entry_not_in_memory", KSTAT_DATA_UINT64 }, 287 { "decref_entry_read_lost_race", KSTAT_DATA_UINT64 }, 288 { "decref_entry_still_referenced", KSTAT_DATA_UINT64 }, 289 { "decref_free_data_later", KSTAT_DATA_UINT64 }, 290 { "decref_free_data_now", KSTAT_DATA_UINT64 }, 291 { "decref_no_entry", KSTAT_DATA_UINT64 } 292 }; 293 294 struct { 295 wmsum_t brt_addref_entry_not_on_disk; 296 wmsum_t brt_addref_entry_on_disk; 297 wmsum_t brt_decref_entry_in_memory; 298 wmsum_t brt_decref_entry_loaded_from_disk; 299 wmsum_t brt_decref_entry_not_in_memory; 300 wmsum_t brt_decref_entry_read_lost_race; 301 wmsum_t brt_decref_entry_still_referenced; 302 wmsum_t brt_decref_free_data_later; 303 wmsum_t brt_decref_free_data_now; 304 wmsum_t brt_decref_no_entry; 305 } brt_sums; 306 307 #define BRTSTAT_BUMP(stat) wmsum_add(&brt_sums.stat, 1) 308 309 static int brt_entry_compare(const void *x1, const void *x2); 310 static void brt_vdevs_expand(spa_t *spa, uint64_t nvdevs); 311 312 static void 313 brt_rlock(spa_t *spa) 314 { 315 rw_enter(&spa->spa_brt_lock, RW_READER); 316 } 317 318 static void 319 brt_wlock(spa_t *spa) 320 { 321 rw_enter(&spa->spa_brt_lock, RW_WRITER); 322 } 323 324 static void 325 brt_unlock(spa_t *spa) 326 { 327 rw_exit(&spa->spa_brt_lock); 328 } 329 330 static uint16_t 331 brt_vdev_entcount_get(const brt_vdev_t *brtvd, uint64_t idx) 332 { 333 334 ASSERT3U(idx, <, brtvd->bv_size); 335 336 if (unlikely(brtvd->bv_need_byteswap)) { 337 return (BSWAP_16(brtvd->bv_entcount[idx])); 338 } else { 339 return (brtvd->bv_entcount[idx]); 340 } 341 } 342 343 static void 344 brt_vdev_entcount_set(brt_vdev_t *brtvd, uint64_t idx, uint16_t entcnt) 345 { 346 347 ASSERT3U(idx, <, brtvd->bv_size); 348 349 if (unlikely(brtvd->bv_need_byteswap)) { 350 brtvd->bv_entcount[idx] = BSWAP_16(entcnt); 351 } else { 352 brtvd->bv_entcount[idx] = entcnt; 353 } 354 } 355 356 static void 357 brt_vdev_entcount_inc(brt_vdev_t *brtvd, uint64_t idx) 358 { 359 uint16_t entcnt; 360 361 ASSERT3U(idx, <, brtvd->bv_size); 362 363 entcnt = brt_vdev_entcount_get(brtvd, idx); 364 ASSERT(entcnt < UINT16_MAX); 365 366 brt_vdev_entcount_set(brtvd, idx, entcnt + 1); 367 } 368 369 static void 370 brt_vdev_entcount_dec(brt_vdev_t *brtvd, uint64_t idx) 371 { 372 uint16_t entcnt; 373 374 ASSERT3U(idx, <, brtvd->bv_size); 375 376 entcnt = brt_vdev_entcount_get(brtvd, idx); 377 ASSERT(entcnt > 0); 378 379 brt_vdev_entcount_set(brtvd, idx, entcnt - 1); 380 } 381 382 #ifdef ZFS_DEBUG 383 static void 384 brt_vdev_dump(brt_vdev_t *brtvd) 385 { 386 uint64_t idx; 387 388 uint64_t nblocks = BRT_RANGESIZE_TO_NBLOCKS(brtvd->bv_size); 389 zfs_dbgmsg(" BRT vdevid=%llu meta_dirty=%d entcount_dirty=%d " 390 "size=%llu totalcount=%llu nblocks=%llu bitmapsize=%zu", 391 (u_longlong_t)brtvd->bv_vdevid, 392 brtvd->bv_meta_dirty, brtvd->bv_entcount_dirty, 393 (u_longlong_t)brtvd->bv_size, 394 (u_longlong_t)brtvd->bv_totalcount, 395 (u_longlong_t)nblocks, 396 (size_t)BT_SIZEOFMAP(nblocks)); 397 if (brtvd->bv_totalcount > 0) { 398 zfs_dbgmsg(" entcounts:"); 399 for (idx = 0; idx < brtvd->bv_size; idx++) { 400 uint16_t entcnt = brt_vdev_entcount_get(brtvd, idx); 401 if (entcnt > 0) { 402 zfs_dbgmsg(" [%04llu] %hu", 403 (u_longlong_t)idx, entcnt); 404 } 405 } 406 } 407 if (brtvd->bv_entcount_dirty) { 408 char *bitmap; 409 410 bitmap = kmem_alloc(nblocks + 1, KM_SLEEP); 411 for (idx = 0; idx < nblocks; idx++) { 412 bitmap[idx] = 413 BT_TEST(brtvd->bv_bitmap, idx) ? 'x' : '.'; 414 } 415 bitmap[idx] = '\0'; 416 zfs_dbgmsg(" dirty: %s", bitmap); 417 kmem_free(bitmap, nblocks + 1); 418 } 419 } 420 #endif 421 422 static brt_vdev_t * 423 brt_vdev(spa_t *spa, uint64_t vdevid, boolean_t alloc) 424 { 425 brt_vdev_t *brtvd = NULL; 426 427 brt_rlock(spa); 428 if (vdevid < spa->spa_brt_nvdevs) { 429 brtvd = spa->spa_brt_vdevs[vdevid]; 430 } else if (alloc) { 431 /* New VDEV was added. */ 432 brt_unlock(spa); 433 brt_wlock(spa); 434 if (vdevid >= spa->spa_brt_nvdevs) 435 brt_vdevs_expand(spa, vdevid + 1); 436 brtvd = spa->spa_brt_vdevs[vdevid]; 437 } 438 brt_unlock(spa); 439 return (brtvd); 440 } 441 442 static void 443 brt_vdev_create(spa_t *spa, brt_vdev_t *brtvd, dmu_tx_t *tx) 444 { 445 char name[64]; 446 447 ASSERT(brtvd->bv_initiated); 448 ASSERT0(brtvd->bv_mos_brtvdev); 449 ASSERT0(brtvd->bv_mos_entries); 450 451 uint64_t mos_entries = zap_create_flags(spa->spa_meta_objset, 0, 452 ZAP_FLAG_HASH64 | ZAP_FLAG_UINT64_KEY, DMU_OTN_ZAP_METADATA, 453 brt_zap_default_bs, brt_zap_default_ibs, DMU_OT_NONE, 0, tx); 454 VERIFY(mos_entries != 0); 455 VERIFY0(dnode_hold(spa->spa_meta_objset, mos_entries, brtvd, 456 &brtvd->bv_mos_entries_dnode)); 457 rw_enter(&brtvd->bv_mos_entries_lock, RW_WRITER); 458 brtvd->bv_mos_entries = mos_entries; 459 rw_exit(&brtvd->bv_mos_entries_lock); 460 BRT_DEBUG("MOS entries created, object=%llu", 461 (u_longlong_t)brtvd->bv_mos_entries); 462 463 /* 464 * We allocate DMU buffer to store the bv_entcount[] array. 465 * We will keep array size (bv_size) and cummulative count for all 466 * bv_entcount[]s (bv_totalcount) in the bonus buffer. 467 */ 468 brtvd->bv_mos_brtvdev = dmu_object_alloc(spa->spa_meta_objset, 469 DMU_OTN_UINT64_METADATA, BRT_BLOCKSIZE, 470 DMU_OTN_UINT64_METADATA, sizeof (brt_vdev_phys_t), tx); 471 VERIFY(brtvd->bv_mos_brtvdev != 0); 472 BRT_DEBUG("MOS BRT VDEV created, object=%llu", 473 (u_longlong_t)brtvd->bv_mos_brtvdev); 474 475 snprintf(name, sizeof (name), "%s%llu", BRT_OBJECT_VDEV_PREFIX, 476 (u_longlong_t)brtvd->bv_vdevid); 477 VERIFY0(zap_add(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, name, 478 sizeof (uint64_t), 1, &brtvd->bv_mos_brtvdev, tx)); 479 BRT_DEBUG("Pool directory object created, object=%s", name); 480 481 spa_feature_incr(spa, SPA_FEATURE_BLOCK_CLONING, tx); 482 } 483 484 static void 485 brt_vdev_realloc(spa_t *spa, brt_vdev_t *brtvd) 486 { 487 vdev_t *vd; 488 uint16_t *entcount; 489 ulong_t *bitmap; 490 uint64_t nblocks, onblocks, size; 491 492 ASSERT(RW_WRITE_HELD(&brtvd->bv_lock)); 493 494 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 495 vd = vdev_lookup_top(spa, brtvd->bv_vdevid); 496 size = (vdev_get_min_asize(vd) - 1) / spa->spa_brt_rangesize + 1; 497 spa_config_exit(spa, SCL_VDEV, FTAG); 498 499 entcount = vmem_zalloc(sizeof (entcount[0]) * size, KM_SLEEP); 500 nblocks = BRT_RANGESIZE_TO_NBLOCKS(size); 501 bitmap = kmem_zalloc(BT_SIZEOFMAP(nblocks), KM_SLEEP); 502 503 if (!brtvd->bv_initiated) { 504 ASSERT0(brtvd->bv_size); 505 ASSERT0P(brtvd->bv_entcount); 506 ASSERT0P(brtvd->bv_bitmap); 507 } else { 508 ASSERT(brtvd->bv_size > 0); 509 ASSERT(brtvd->bv_entcount != NULL); 510 ASSERT(brtvd->bv_bitmap != NULL); 511 /* 512 * TODO: Allow vdev shrinking. We only need to implement 513 * shrinking the on-disk BRT VDEV object. 514 * dmu_free_range(spa->spa_meta_objset, brtvd->bv_mos_brtvdev, 515 * offset, size, tx); 516 */ 517 ASSERT3U(brtvd->bv_size, <=, size); 518 519 memcpy(entcount, brtvd->bv_entcount, 520 sizeof (entcount[0]) * MIN(size, brtvd->bv_size)); 521 vmem_free(brtvd->bv_entcount, 522 sizeof (entcount[0]) * brtvd->bv_size); 523 onblocks = BRT_RANGESIZE_TO_NBLOCKS(brtvd->bv_size); 524 memcpy(bitmap, brtvd->bv_bitmap, MIN(BT_SIZEOFMAP(nblocks), 525 BT_SIZEOFMAP(onblocks))); 526 kmem_free(brtvd->bv_bitmap, BT_SIZEOFMAP(onblocks)); 527 } 528 529 brtvd->bv_size = size; 530 brtvd->bv_entcount = entcount; 531 brtvd->bv_bitmap = bitmap; 532 if (!brtvd->bv_initiated) { 533 brtvd->bv_need_byteswap = FALSE; 534 brtvd->bv_initiated = TRUE; 535 BRT_DEBUG("BRT VDEV %llu initiated.", 536 (u_longlong_t)brtvd->bv_vdevid); 537 } 538 } 539 540 static int 541 brt_vdev_load(spa_t *spa, brt_vdev_t *brtvd) 542 { 543 dmu_buf_t *db; 544 brt_vdev_phys_t *bvphys; 545 int error; 546 547 ASSERT(!brtvd->bv_initiated); 548 ASSERT(brtvd->bv_mos_brtvdev != 0); 549 550 error = dmu_bonus_hold(spa->spa_meta_objset, brtvd->bv_mos_brtvdev, 551 FTAG, &db); 552 if (error != 0) 553 return (error); 554 555 bvphys = db->db_data; 556 if (spa->spa_brt_rangesize == 0) { 557 spa->spa_brt_rangesize = bvphys->bvp_rangesize; 558 } else { 559 ASSERT3U(spa->spa_brt_rangesize, ==, bvphys->bvp_rangesize); 560 } 561 562 brt_vdev_realloc(spa, brtvd); 563 564 /* TODO: We don't support VDEV shrinking. */ 565 ASSERT3U(bvphys->bvp_size, <=, brtvd->bv_size); 566 567 /* 568 * If VDEV grew, we will leave new bv_entcount[] entries zeroed out. 569 */ 570 error = dmu_read(spa->spa_meta_objset, brtvd->bv_mos_brtvdev, 0, 571 MIN(brtvd->bv_size, bvphys->bvp_size) * sizeof (uint16_t), 572 brtvd->bv_entcount, DMU_READ_NO_PREFETCH); 573 if (error != 0) 574 return (error); 575 576 ASSERT(bvphys->bvp_mos_entries != 0); 577 VERIFY0(dnode_hold(spa->spa_meta_objset, bvphys->bvp_mos_entries, brtvd, 578 &brtvd->bv_mos_entries_dnode)); 579 rw_enter(&brtvd->bv_mos_entries_lock, RW_WRITER); 580 brtvd->bv_mos_entries = bvphys->bvp_mos_entries; 581 rw_exit(&brtvd->bv_mos_entries_lock); 582 brtvd->bv_need_byteswap = 583 (bvphys->bvp_byteorder != BRT_NATIVE_BYTEORDER); 584 brtvd->bv_totalcount = bvphys->bvp_totalcount; 585 brtvd->bv_usedspace = bvphys->bvp_usedspace; 586 brtvd->bv_savedspace = bvphys->bvp_savedspace; 587 588 dmu_buf_rele(db, FTAG); 589 590 BRT_DEBUG("BRT VDEV %llu loaded: mos_brtvdev=%llu, mos_entries=%llu", 591 (u_longlong_t)brtvd->bv_vdevid, 592 (u_longlong_t)brtvd->bv_mos_brtvdev, 593 (u_longlong_t)brtvd->bv_mos_entries); 594 return (0); 595 } 596 597 static void 598 brt_vdev_dealloc(brt_vdev_t *brtvd) 599 { 600 ASSERT(RW_WRITE_HELD(&brtvd->bv_lock)); 601 ASSERT(brtvd->bv_initiated); 602 ASSERT0(avl_numnodes(&brtvd->bv_tree)); 603 604 vmem_free(brtvd->bv_entcount, sizeof (uint16_t) * brtvd->bv_size); 605 brtvd->bv_entcount = NULL; 606 uint64_t nblocks = BRT_RANGESIZE_TO_NBLOCKS(brtvd->bv_size); 607 kmem_free(brtvd->bv_bitmap, BT_SIZEOFMAP(nblocks)); 608 brtvd->bv_bitmap = NULL; 609 610 brtvd->bv_size = 0; 611 612 brtvd->bv_initiated = FALSE; 613 BRT_DEBUG("BRT VDEV %llu deallocated.", (u_longlong_t)brtvd->bv_vdevid); 614 } 615 616 static void 617 brt_vdev_destroy(spa_t *spa, brt_vdev_t *brtvd, dmu_tx_t *tx) 618 { 619 char name[64]; 620 uint64_t count; 621 622 ASSERT(brtvd->bv_initiated); 623 ASSERT(brtvd->bv_mos_brtvdev != 0); 624 ASSERT(brtvd->bv_mos_entries != 0); 625 ASSERT0(brtvd->bv_totalcount); 626 ASSERT0(brtvd->bv_usedspace); 627 ASSERT0(brtvd->bv_savedspace); 628 629 uint64_t mos_entries = brtvd->bv_mos_entries; 630 rw_enter(&brtvd->bv_mos_entries_lock, RW_WRITER); 631 brtvd->bv_mos_entries = 0; 632 rw_exit(&brtvd->bv_mos_entries_lock); 633 dnode_rele(brtvd->bv_mos_entries_dnode, brtvd); 634 brtvd->bv_mos_entries_dnode = NULL; 635 ASSERT0(zap_count(spa->spa_meta_objset, mos_entries, &count)); 636 ASSERT0(count); 637 VERIFY0(zap_destroy(spa->spa_meta_objset, mos_entries, tx)); 638 BRT_DEBUG("MOS entries destroyed, object=%llu", 639 (u_longlong_t)mos_entries); 640 641 VERIFY0(dmu_object_free(spa->spa_meta_objset, brtvd->bv_mos_brtvdev, 642 tx)); 643 BRT_DEBUG("MOS BRT VDEV destroyed, object=%llu", 644 (u_longlong_t)brtvd->bv_mos_brtvdev); 645 brtvd->bv_mos_brtvdev = 0; 646 brtvd->bv_entcount_dirty = FALSE; 647 648 snprintf(name, sizeof (name), "%s%llu", BRT_OBJECT_VDEV_PREFIX, 649 (u_longlong_t)brtvd->bv_vdevid); 650 VERIFY0(zap_remove(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 651 name, tx)); 652 BRT_DEBUG("Pool directory object removed, object=%s", name); 653 654 brtvd->bv_meta_dirty = FALSE; 655 656 rw_enter(&brtvd->bv_lock, RW_WRITER); 657 brt_vdev_dealloc(brtvd); 658 rw_exit(&brtvd->bv_lock); 659 660 spa_feature_decr(spa, SPA_FEATURE_BLOCK_CLONING, tx); 661 } 662 663 static void 664 brt_vdevs_expand(spa_t *spa, uint64_t nvdevs) 665 { 666 brt_vdev_t **vdevs; 667 668 ASSERT(RW_WRITE_HELD(&spa->spa_brt_lock)); 669 ASSERT3U(nvdevs, >=, spa->spa_brt_nvdevs); 670 671 if (nvdevs == spa->spa_brt_nvdevs) 672 return; 673 674 vdevs = kmem_zalloc(sizeof (*spa->spa_brt_vdevs) * nvdevs, KM_SLEEP); 675 if (spa->spa_brt_nvdevs > 0) { 676 ASSERT(spa->spa_brt_vdevs != NULL); 677 678 memcpy(vdevs, spa->spa_brt_vdevs, 679 sizeof (*spa->spa_brt_vdevs) * spa->spa_brt_nvdevs); 680 kmem_free(spa->spa_brt_vdevs, 681 sizeof (*spa->spa_brt_vdevs) * spa->spa_brt_nvdevs); 682 } 683 spa->spa_brt_vdevs = vdevs; 684 685 for (uint64_t vdevid = spa->spa_brt_nvdevs; vdevid < nvdevs; vdevid++) { 686 brt_vdev_t *brtvd = kmem_zalloc(sizeof (*brtvd), KM_SLEEP); 687 rw_init(&brtvd->bv_lock, NULL, RW_DEFAULT, NULL); 688 brtvd->bv_vdevid = vdevid; 689 brtvd->bv_initiated = FALSE; 690 rw_init(&brtvd->bv_mos_entries_lock, NULL, RW_DEFAULT, NULL); 691 avl_create(&brtvd->bv_tree, brt_entry_compare, 692 sizeof (brt_entry_t), offsetof(brt_entry_t, bre_node)); 693 for (int i = 0; i < TXG_SIZE; i++) { 694 avl_create(&brtvd->bv_pending_tree[i], 695 brt_entry_compare, sizeof (brt_entry_t), 696 offsetof(brt_entry_t, bre_node)); 697 } 698 mutex_init(&brtvd->bv_pending_lock, NULL, MUTEX_DEFAULT, NULL); 699 spa->spa_brt_vdevs[vdevid] = brtvd; 700 } 701 702 BRT_DEBUG("BRT VDEVs expanded from %llu to %llu.", 703 (u_longlong_t)spa->spa_brt_nvdevs, (u_longlong_t)nvdevs); 704 spa->spa_brt_nvdevs = nvdevs; 705 } 706 707 static boolean_t 708 brt_vdev_lookup(spa_t *spa, brt_vdev_t *brtvd, uint64_t offset) 709 { 710 uint64_t idx = offset / spa->spa_brt_rangesize; 711 if (idx < brtvd->bv_size) { 712 /* VDEV wasn't expanded. */ 713 return (brt_vdev_entcount_get(brtvd, idx) > 0); 714 } 715 return (FALSE); 716 } 717 718 static void 719 brt_vdev_addref(spa_t *spa, brt_vdev_t *brtvd, const brt_entry_t *bre, 720 uint64_t dsize, uint64_t count) 721 { 722 uint64_t idx; 723 724 ASSERT(brtvd->bv_initiated); 725 726 brtvd->bv_savedspace += dsize * count; 727 brtvd->bv_meta_dirty = TRUE; 728 729 if (bre->bre_count > 0) 730 return; 731 732 brtvd->bv_usedspace += dsize; 733 734 idx = BRE_OFFSET(bre) / spa->spa_brt_rangesize; 735 if (idx >= brtvd->bv_size) { 736 /* VDEV has been expanded. */ 737 rw_enter(&brtvd->bv_lock, RW_WRITER); 738 brt_vdev_realloc(spa, brtvd); 739 rw_exit(&brtvd->bv_lock); 740 } 741 742 ASSERT3U(idx, <, brtvd->bv_size); 743 744 brtvd->bv_totalcount++; 745 brt_vdev_entcount_inc(brtvd, idx); 746 brtvd->bv_entcount_dirty = TRUE; 747 idx = idx / BRT_BLOCKSIZE / 8; 748 BT_SET(brtvd->bv_bitmap, idx); 749 } 750 751 static void 752 brt_vdev_decref(spa_t *spa, brt_vdev_t *brtvd, const brt_entry_t *bre, 753 uint64_t dsize) 754 { 755 uint64_t idx; 756 757 ASSERT(RW_WRITE_HELD(&brtvd->bv_lock)); 758 ASSERT(brtvd->bv_initiated); 759 760 brtvd->bv_savedspace -= dsize; 761 brtvd->bv_meta_dirty = TRUE; 762 763 if (bre->bre_count > 0) 764 return; 765 766 brtvd->bv_usedspace -= dsize; 767 768 idx = BRE_OFFSET(bre) / spa->spa_brt_rangesize; 769 ASSERT3U(idx, <, brtvd->bv_size); 770 771 ASSERT(brtvd->bv_totalcount > 0); 772 brtvd->bv_totalcount--; 773 brt_vdev_entcount_dec(brtvd, idx); 774 brtvd->bv_entcount_dirty = TRUE; 775 idx = idx / BRT_BLOCKSIZE / 8; 776 BT_SET(brtvd->bv_bitmap, idx); 777 } 778 779 static void 780 brt_vdev_sync(spa_t *spa, brt_vdev_t *brtvd, dmu_tx_t *tx) 781 { 782 dmu_buf_t *db; 783 brt_vdev_phys_t *bvphys; 784 785 ASSERT(brtvd->bv_meta_dirty); 786 ASSERT(brtvd->bv_mos_brtvdev != 0); 787 ASSERT(dmu_tx_is_syncing(tx)); 788 789 VERIFY0(dmu_bonus_hold(spa->spa_meta_objset, brtvd->bv_mos_brtvdev, 790 FTAG, &db)); 791 792 if (brtvd->bv_entcount_dirty) { 793 /* 794 * TODO: Walk brtvd->bv_bitmap and write only the dirty blocks. 795 */ 796 dmu_write(spa->spa_meta_objset, brtvd->bv_mos_brtvdev, 0, 797 brtvd->bv_size * sizeof (brtvd->bv_entcount[0]), 798 brtvd->bv_entcount, tx); 799 uint64_t nblocks = BRT_RANGESIZE_TO_NBLOCKS(brtvd->bv_size); 800 memset(brtvd->bv_bitmap, 0, BT_SIZEOFMAP(nblocks)); 801 brtvd->bv_entcount_dirty = FALSE; 802 } 803 804 dmu_buf_will_dirty(db, tx); 805 bvphys = db->db_data; 806 bvphys->bvp_mos_entries = brtvd->bv_mos_entries; 807 bvphys->bvp_size = brtvd->bv_size; 808 if (brtvd->bv_need_byteswap) { 809 bvphys->bvp_byteorder = BRT_NON_NATIVE_BYTEORDER; 810 } else { 811 bvphys->bvp_byteorder = BRT_NATIVE_BYTEORDER; 812 } 813 bvphys->bvp_totalcount = brtvd->bv_totalcount; 814 bvphys->bvp_rangesize = spa->spa_brt_rangesize; 815 bvphys->bvp_usedspace = brtvd->bv_usedspace; 816 bvphys->bvp_savedspace = brtvd->bv_savedspace; 817 dmu_buf_rele(db, FTAG); 818 819 brtvd->bv_meta_dirty = FALSE; 820 } 821 822 static void 823 brt_vdevs_free(spa_t *spa) 824 { 825 if (spa->spa_brt_vdevs == 0) 826 return; 827 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) { 828 brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid]; 829 rw_enter(&brtvd->bv_lock, RW_WRITER); 830 if (brtvd->bv_initiated) 831 brt_vdev_dealloc(brtvd); 832 rw_exit(&brtvd->bv_lock); 833 rw_destroy(&brtvd->bv_lock); 834 if (brtvd->bv_mos_entries != 0) 835 dnode_rele(brtvd->bv_mos_entries_dnode, brtvd); 836 rw_destroy(&brtvd->bv_mos_entries_lock); 837 avl_destroy(&brtvd->bv_tree); 838 for (int i = 0; i < TXG_SIZE; i++) 839 avl_destroy(&brtvd->bv_pending_tree[i]); 840 mutex_destroy(&brtvd->bv_pending_lock); 841 kmem_free(brtvd, sizeof (*brtvd)); 842 } 843 kmem_free(spa->spa_brt_vdevs, sizeof (*spa->spa_brt_vdevs) * 844 spa->spa_brt_nvdevs); 845 } 846 847 static void 848 brt_entry_fill(const blkptr_t *bp, brt_entry_t *bre, uint64_t *vdevidp) 849 { 850 851 bre->bre_bp = *bp; 852 bre->bre_count = 0; 853 bre->bre_pcount = 0; 854 855 *vdevidp = DVA_GET_VDEV(&bp->blk_dva[0]); 856 } 857 858 static int 859 brt_entry_lookup(brt_vdev_t *brtvd, brt_entry_t *bre) 860 { 861 uint64_t off = BRE_OFFSET(bre); 862 863 if (brtvd->bv_mos_entries == 0) 864 return (SET_ERROR(ENOENT)); 865 866 return (zap_lookup_uint64_by_dnode(brtvd->bv_mos_entries_dnode, 867 &off, BRT_KEY_WORDS, 1, sizeof (bre->bre_count), &bre->bre_count)); 868 } 869 870 /* 871 * Return TRUE if we _can_ have BRT entry for this bp. It might be false 872 * positive, but gives us quick answer if we should look into BRT, which 873 * may require reads and thus will be more expensive. 874 */ 875 boolean_t 876 brt_maybe_exists(spa_t *spa, const blkptr_t *bp) 877 { 878 879 if (spa->spa_brt_nvdevs == 0) 880 return (B_FALSE); 881 882 uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[0]); 883 brt_vdev_t *brtvd = brt_vdev(spa, vdevid, B_FALSE); 884 if (brtvd == NULL || !brtvd->bv_initiated) 885 return (FALSE); 886 887 /* 888 * We don't need locks here, since bv_entcount pointer must be 889 * stable at this point, and we don't care about false positive 890 * races here, while false negative should be impossible, since 891 * all brt_vdev_addref() have already completed by this point. 892 */ 893 uint64_t off = DVA_GET_OFFSET(&bp->blk_dva[0]); 894 return (brt_vdev_lookup(spa, brtvd, off)); 895 } 896 897 uint64_t 898 brt_get_dspace(spa_t *spa) 899 { 900 if (spa->spa_brt_nvdevs == 0) 901 return (0); 902 903 brt_rlock(spa); 904 uint64_t s = 0; 905 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) 906 s += spa->spa_brt_vdevs[vdevid]->bv_savedspace; 907 brt_unlock(spa); 908 return (s); 909 } 910 911 uint64_t 912 brt_get_used(spa_t *spa) 913 { 914 if (spa->spa_brt_nvdevs == 0) 915 return (0); 916 917 brt_rlock(spa); 918 uint64_t s = 0; 919 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) 920 s += spa->spa_brt_vdevs[vdevid]->bv_usedspace; 921 brt_unlock(spa); 922 return (s); 923 } 924 925 uint64_t 926 brt_get_saved(spa_t *spa) 927 { 928 return (brt_get_dspace(spa)); 929 } 930 931 uint64_t 932 brt_get_ratio(spa_t *spa) 933 { 934 uint64_t used = brt_get_used(spa); 935 if (used == 0) 936 return (100); 937 return ((used + brt_get_saved(spa)) * 100 / used); 938 } 939 940 static int 941 brt_kstats_update(kstat_t *ksp, int rw) 942 { 943 brt_stats_t *bs = ksp->ks_data; 944 945 if (rw == KSTAT_WRITE) 946 return (EACCES); 947 948 bs->brt_addref_entry_not_on_disk.value.ui64 = 949 wmsum_value(&brt_sums.brt_addref_entry_not_on_disk); 950 bs->brt_addref_entry_on_disk.value.ui64 = 951 wmsum_value(&brt_sums.brt_addref_entry_on_disk); 952 bs->brt_decref_entry_in_memory.value.ui64 = 953 wmsum_value(&brt_sums.brt_decref_entry_in_memory); 954 bs->brt_decref_entry_loaded_from_disk.value.ui64 = 955 wmsum_value(&brt_sums.brt_decref_entry_loaded_from_disk); 956 bs->brt_decref_entry_not_in_memory.value.ui64 = 957 wmsum_value(&brt_sums.brt_decref_entry_not_in_memory); 958 bs->brt_decref_entry_read_lost_race.value.ui64 = 959 wmsum_value(&brt_sums.brt_decref_entry_read_lost_race); 960 bs->brt_decref_entry_still_referenced.value.ui64 = 961 wmsum_value(&brt_sums.brt_decref_entry_still_referenced); 962 bs->brt_decref_free_data_later.value.ui64 = 963 wmsum_value(&brt_sums.brt_decref_free_data_later); 964 bs->brt_decref_free_data_now.value.ui64 = 965 wmsum_value(&brt_sums.brt_decref_free_data_now); 966 bs->brt_decref_no_entry.value.ui64 = 967 wmsum_value(&brt_sums.brt_decref_no_entry); 968 969 return (0); 970 } 971 972 static void 973 brt_stat_init(void) 974 { 975 976 wmsum_init(&brt_sums.brt_addref_entry_not_on_disk, 0); 977 wmsum_init(&brt_sums.brt_addref_entry_on_disk, 0); 978 wmsum_init(&brt_sums.brt_decref_entry_in_memory, 0); 979 wmsum_init(&brt_sums.brt_decref_entry_loaded_from_disk, 0); 980 wmsum_init(&brt_sums.brt_decref_entry_not_in_memory, 0); 981 wmsum_init(&brt_sums.brt_decref_entry_read_lost_race, 0); 982 wmsum_init(&brt_sums.brt_decref_entry_still_referenced, 0); 983 wmsum_init(&brt_sums.brt_decref_free_data_later, 0); 984 wmsum_init(&brt_sums.brt_decref_free_data_now, 0); 985 wmsum_init(&brt_sums.brt_decref_no_entry, 0); 986 987 brt_ksp = kstat_create("zfs", 0, "brtstats", "misc", KSTAT_TYPE_NAMED, 988 sizeof (brt_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 989 if (brt_ksp != NULL) { 990 brt_ksp->ks_data = &brt_stats; 991 brt_ksp->ks_update = brt_kstats_update; 992 kstat_install(brt_ksp); 993 } 994 } 995 996 static void 997 brt_stat_fini(void) 998 { 999 if (brt_ksp != NULL) { 1000 kstat_delete(brt_ksp); 1001 brt_ksp = NULL; 1002 } 1003 1004 wmsum_fini(&brt_sums.brt_addref_entry_not_on_disk); 1005 wmsum_fini(&brt_sums.brt_addref_entry_on_disk); 1006 wmsum_fini(&brt_sums.brt_decref_entry_in_memory); 1007 wmsum_fini(&brt_sums.brt_decref_entry_loaded_from_disk); 1008 wmsum_fini(&brt_sums.brt_decref_entry_not_in_memory); 1009 wmsum_fini(&brt_sums.brt_decref_entry_read_lost_race); 1010 wmsum_fini(&brt_sums.brt_decref_entry_still_referenced); 1011 wmsum_fini(&brt_sums.brt_decref_free_data_later); 1012 wmsum_fini(&brt_sums.brt_decref_free_data_now); 1013 wmsum_fini(&brt_sums.brt_decref_no_entry); 1014 } 1015 1016 void 1017 brt_init(void) 1018 { 1019 brt_entry_cache = kmem_cache_create("brt_entry_cache", 1020 sizeof (brt_entry_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 1021 1022 brt_stat_init(); 1023 } 1024 1025 void 1026 brt_fini(void) 1027 { 1028 brt_stat_fini(); 1029 1030 kmem_cache_destroy(brt_entry_cache); 1031 } 1032 1033 /* Return TRUE if block should be freed immediately. */ 1034 boolean_t 1035 brt_entry_decref(spa_t *spa, const blkptr_t *bp) 1036 { 1037 brt_entry_t *bre, *racebre; 1038 brt_entry_t bre_search; 1039 avl_index_t where; 1040 uint64_t vdevid; 1041 int error; 1042 1043 brt_entry_fill(bp, &bre_search, &vdevid); 1044 1045 brt_vdev_t *brtvd = brt_vdev(spa, vdevid, B_FALSE); 1046 ASSERT(brtvd != NULL); 1047 1048 rw_enter(&brtvd->bv_lock, RW_WRITER); 1049 ASSERT(brtvd->bv_initiated); 1050 bre = avl_find(&brtvd->bv_tree, &bre_search, NULL); 1051 if (bre != NULL) { 1052 BRTSTAT_BUMP(brt_decref_entry_in_memory); 1053 goto out; 1054 } else { 1055 BRTSTAT_BUMP(brt_decref_entry_not_in_memory); 1056 } 1057 rw_exit(&brtvd->bv_lock); 1058 1059 error = brt_entry_lookup(brtvd, &bre_search); 1060 /* bre_search now contains correct bre_count */ 1061 if (error == ENOENT) { 1062 BRTSTAT_BUMP(brt_decref_no_entry); 1063 return (B_TRUE); 1064 } 1065 ASSERT0(error); 1066 1067 rw_enter(&brtvd->bv_lock, RW_WRITER); 1068 racebre = avl_find(&brtvd->bv_tree, &bre_search, &where); 1069 if (racebre != NULL) { 1070 /* The entry was added when the lock was dropped. */ 1071 BRTSTAT_BUMP(brt_decref_entry_read_lost_race); 1072 bre = racebre; 1073 goto out; 1074 } 1075 1076 BRTSTAT_BUMP(brt_decref_entry_loaded_from_disk); 1077 bre = kmem_cache_alloc(brt_entry_cache, KM_SLEEP); 1078 bre->bre_bp = bre_search.bre_bp; 1079 bre->bre_count = bre_search.bre_count; 1080 bre->bre_pcount = 0; 1081 avl_insert(&brtvd->bv_tree, bre, where); 1082 1083 out: 1084 if (bre->bre_count == 0) { 1085 rw_exit(&brtvd->bv_lock); 1086 BRTSTAT_BUMP(brt_decref_free_data_now); 1087 return (B_TRUE); 1088 } 1089 1090 bre->bre_pcount--; 1091 ASSERT(bre->bre_count > 0); 1092 bre->bre_count--; 1093 if (bre->bre_count == 0) 1094 BRTSTAT_BUMP(brt_decref_free_data_later); 1095 else 1096 BRTSTAT_BUMP(brt_decref_entry_still_referenced); 1097 brt_vdev_decref(spa, brtvd, bre, bp_get_dsize_sync(spa, bp)); 1098 1099 rw_exit(&brtvd->bv_lock); 1100 1101 return (B_FALSE); 1102 } 1103 1104 uint64_t 1105 brt_entry_get_refcount(spa_t *spa, const blkptr_t *bp) 1106 { 1107 brt_entry_t bre_search, *bre; 1108 uint64_t vdevid, refcnt; 1109 int error; 1110 1111 brt_entry_fill(bp, &bre_search, &vdevid); 1112 1113 brt_vdev_t *brtvd = brt_vdev(spa, vdevid, B_FALSE); 1114 ASSERT(brtvd != NULL); 1115 1116 rw_enter(&brtvd->bv_lock, RW_READER); 1117 ASSERT(brtvd->bv_initiated); 1118 bre = avl_find(&brtvd->bv_tree, &bre_search, NULL); 1119 if (bre == NULL) { 1120 rw_exit(&brtvd->bv_lock); 1121 error = brt_entry_lookup(brtvd, &bre_search); 1122 if (error == ENOENT) { 1123 refcnt = 0; 1124 } else { 1125 ASSERT0(error); 1126 refcnt = bre_search.bre_count; 1127 } 1128 } else { 1129 refcnt = bre->bre_count; 1130 rw_exit(&brtvd->bv_lock); 1131 } 1132 1133 return (refcnt); 1134 } 1135 1136 static void 1137 brt_prefetch(brt_vdev_t *brtvd, const blkptr_t *bp) 1138 { 1139 if (!brt_zap_prefetch || brtvd->bv_mos_entries == 0) 1140 return; 1141 1142 uint64_t off = DVA_GET_OFFSET(&bp->blk_dva[0]); 1143 rw_enter(&brtvd->bv_mos_entries_lock, RW_READER); 1144 if (brtvd->bv_mos_entries != 0) { 1145 (void) zap_prefetch_uint64_by_dnode(brtvd->bv_mos_entries_dnode, 1146 &off, BRT_KEY_WORDS); 1147 } 1148 rw_exit(&brtvd->bv_mos_entries_lock); 1149 } 1150 1151 static int 1152 brt_entry_compare(const void *x1, const void *x2) 1153 { 1154 const brt_entry_t *bre1 = x1, *bre2 = x2; 1155 const blkptr_t *bp1 = &bre1->bre_bp, *bp2 = &bre2->bre_bp; 1156 1157 return (TREE_CMP(DVA_GET_OFFSET(&bp1->blk_dva[0]), 1158 DVA_GET_OFFSET(&bp2->blk_dva[0]))); 1159 } 1160 1161 void 1162 brt_pending_add(spa_t *spa, const blkptr_t *bp, dmu_tx_t *tx) 1163 { 1164 brt_entry_t *bre, *newbre; 1165 avl_index_t where; 1166 uint64_t txg; 1167 1168 txg = dmu_tx_get_txg(tx); 1169 ASSERT3U(txg, !=, 0); 1170 1171 uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[0]); 1172 brt_vdev_t *brtvd = brt_vdev(spa, vdevid, B_TRUE); 1173 avl_tree_t *pending_tree = &brtvd->bv_pending_tree[txg & TXG_MASK]; 1174 1175 newbre = kmem_cache_alloc(brt_entry_cache, KM_SLEEP); 1176 newbre->bre_bp = *bp; 1177 newbre->bre_count = 0; 1178 newbre->bre_pcount = 1; 1179 1180 mutex_enter(&brtvd->bv_pending_lock); 1181 bre = avl_find(pending_tree, newbre, &where); 1182 if (bre == NULL) { 1183 avl_insert(pending_tree, newbre, where); 1184 newbre = NULL; 1185 } else { 1186 bre->bre_pcount++; 1187 } 1188 mutex_exit(&brtvd->bv_pending_lock); 1189 1190 if (newbre != NULL) { 1191 ASSERT(bre != NULL); 1192 ASSERT(bre != newbre); 1193 kmem_cache_free(brt_entry_cache, newbre); 1194 } else { 1195 ASSERT0P(bre); 1196 1197 /* Prefetch BRT entry for the syncing context. */ 1198 brt_prefetch(brtvd, bp); 1199 } 1200 } 1201 1202 void 1203 brt_pending_remove(spa_t *spa, const blkptr_t *bp, dmu_tx_t *tx) 1204 { 1205 brt_entry_t *bre, bre_search; 1206 uint64_t txg; 1207 1208 txg = dmu_tx_get_txg(tx); 1209 ASSERT3U(txg, !=, 0); 1210 1211 uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[0]); 1212 brt_vdev_t *brtvd = brt_vdev(spa, vdevid, B_FALSE); 1213 ASSERT(brtvd != NULL); 1214 avl_tree_t *pending_tree = &brtvd->bv_pending_tree[txg & TXG_MASK]; 1215 1216 bre_search.bre_bp = *bp; 1217 1218 mutex_enter(&brtvd->bv_pending_lock); 1219 bre = avl_find(pending_tree, &bre_search, NULL); 1220 ASSERT(bre != NULL); 1221 ASSERT(bre->bre_pcount > 0); 1222 bre->bre_pcount--; 1223 if (bre->bre_pcount == 0) 1224 avl_remove(pending_tree, bre); 1225 else 1226 bre = NULL; 1227 mutex_exit(&brtvd->bv_pending_lock); 1228 1229 if (bre) 1230 kmem_cache_free(brt_entry_cache, bre); 1231 } 1232 1233 static void 1234 brt_pending_apply_vdev(spa_t *spa, brt_vdev_t *brtvd, uint64_t txg) 1235 { 1236 brt_entry_t *bre, *nbre; 1237 1238 /* 1239 * We are in syncing context, so no other bv_pending_tree accesses 1240 * are possible for the TXG. So we don't need bv_pending_lock. 1241 */ 1242 ASSERT(avl_is_empty(&brtvd->bv_tree)); 1243 avl_swap(&brtvd->bv_tree, &brtvd->bv_pending_tree[txg & TXG_MASK]); 1244 1245 for (bre = avl_first(&brtvd->bv_tree); bre; bre = nbre) { 1246 nbre = AVL_NEXT(&brtvd->bv_tree, bre); 1247 1248 /* 1249 * If the block has DEDUP bit set, it means that it 1250 * already exists in the DEDUP table, so we can just 1251 * use that instead of creating new entry in the BRT. 1252 */ 1253 if (BP_GET_DEDUP(&bre->bre_bp)) { 1254 while (bre->bre_pcount > 0) { 1255 if (!ddt_addref(spa, &bre->bre_bp)) 1256 break; 1257 bre->bre_pcount--; 1258 } 1259 if (bre->bre_pcount == 0) { 1260 avl_remove(&brtvd->bv_tree, bre); 1261 kmem_cache_free(brt_entry_cache, bre); 1262 continue; 1263 } 1264 } 1265 1266 /* 1267 * Unless we know that the block is definitely not in ZAP, 1268 * try to get its reference count from there. 1269 */ 1270 uint64_t off = BRE_OFFSET(bre); 1271 if (brtvd->bv_mos_entries != 0 && 1272 brt_vdev_lookup(spa, brtvd, off)) { 1273 int error = zap_lookup_uint64_by_dnode( 1274 brtvd->bv_mos_entries_dnode, &off, 1275 BRT_KEY_WORDS, 1, sizeof (bre->bre_count), 1276 &bre->bre_count); 1277 if (error == 0) { 1278 BRTSTAT_BUMP(brt_addref_entry_on_disk); 1279 } else { 1280 ASSERT3U(error, ==, ENOENT); 1281 BRTSTAT_BUMP(brt_addref_entry_not_on_disk); 1282 } 1283 } 1284 } 1285 1286 /* 1287 * If all the cloned blocks we had were handled by DDT, we don't need 1288 * to initiate the vdev. 1289 */ 1290 if (avl_is_empty(&brtvd->bv_tree)) 1291 return; 1292 1293 if (!brtvd->bv_initiated) { 1294 rw_enter(&brtvd->bv_lock, RW_WRITER); 1295 brt_vdev_realloc(spa, brtvd); 1296 rw_exit(&brtvd->bv_lock); 1297 } 1298 1299 /* 1300 * Convert pending references into proper ones. This has to be a 1301 * separate loop, since entcount modifications would cause false 1302 * positives for brt_vdev_lookup() on following iterations. 1303 */ 1304 for (bre = avl_first(&brtvd->bv_tree); bre; 1305 bre = AVL_NEXT(&brtvd->bv_tree, bre)) { 1306 brt_vdev_addref(spa, brtvd, bre, 1307 bp_get_dsize(spa, &bre->bre_bp), bre->bre_pcount); 1308 bre->bre_count += bre->bre_pcount; 1309 } 1310 } 1311 1312 void 1313 brt_pending_apply(spa_t *spa, uint64_t txg) 1314 { 1315 1316 brt_rlock(spa); 1317 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) { 1318 brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid]; 1319 brt_unlock(spa); 1320 1321 brt_pending_apply_vdev(spa, brtvd, txg); 1322 1323 brt_rlock(spa); 1324 } 1325 brt_unlock(spa); 1326 } 1327 1328 static void 1329 brt_sync_entry(dnode_t *dn, brt_entry_t *bre, dmu_tx_t *tx) 1330 { 1331 uint64_t off = BRE_OFFSET(bre); 1332 1333 if (bre->bre_pcount == 0) { 1334 /* The net change is zero, nothing to do in ZAP. */ 1335 } else if (bre->bre_count == 0) { 1336 int error = zap_remove_uint64_by_dnode(dn, &off, 1337 BRT_KEY_WORDS, tx); 1338 VERIFY(error == 0 || error == ENOENT); 1339 } else { 1340 VERIFY0(zap_update_uint64_by_dnode(dn, &off, 1341 BRT_KEY_WORDS, 1, sizeof (bre->bre_count), 1342 &bre->bre_count, tx)); 1343 } 1344 } 1345 1346 static void 1347 brt_sync_table(spa_t *spa, dmu_tx_t *tx) 1348 { 1349 brt_entry_t *bre; 1350 1351 brt_rlock(spa); 1352 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) { 1353 brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid]; 1354 brt_unlock(spa); 1355 1356 if (!brtvd->bv_meta_dirty) { 1357 ASSERT(!brtvd->bv_entcount_dirty); 1358 ASSERT0(avl_numnodes(&brtvd->bv_tree)); 1359 brt_rlock(spa); 1360 continue; 1361 } 1362 1363 ASSERT(!brtvd->bv_entcount_dirty || 1364 avl_numnodes(&brtvd->bv_tree) != 0); 1365 1366 if (brtvd->bv_mos_brtvdev == 0) 1367 brt_vdev_create(spa, brtvd, tx); 1368 1369 void *c = NULL; 1370 while ((bre = avl_destroy_nodes(&brtvd->bv_tree, &c)) != NULL) { 1371 brt_sync_entry(brtvd->bv_mos_entries_dnode, bre, tx); 1372 kmem_cache_free(brt_entry_cache, bre); 1373 } 1374 1375 #ifdef ZFS_DEBUG 1376 if (zfs_flags & ZFS_DEBUG_BRT) 1377 brt_vdev_dump(brtvd); 1378 #endif 1379 if (brtvd->bv_totalcount == 0) 1380 brt_vdev_destroy(spa, brtvd, tx); 1381 else 1382 brt_vdev_sync(spa, brtvd, tx); 1383 brt_rlock(spa); 1384 } 1385 brt_unlock(spa); 1386 } 1387 1388 void 1389 brt_sync(spa_t *spa, uint64_t txg) 1390 { 1391 dmu_tx_t *tx; 1392 uint64_t vdevid; 1393 1394 ASSERT3U(spa_syncing_txg(spa), ==, txg); 1395 1396 brt_rlock(spa); 1397 for (vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) { 1398 if (spa->spa_brt_vdevs[vdevid]->bv_meta_dirty) 1399 break; 1400 } 1401 if (vdevid >= spa->spa_brt_nvdevs) { 1402 brt_unlock(spa); 1403 return; 1404 } 1405 brt_unlock(spa); 1406 1407 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1408 brt_sync_table(spa, tx); 1409 dmu_tx_commit(tx); 1410 } 1411 1412 static void 1413 brt_alloc(spa_t *spa) 1414 { 1415 rw_init(&spa->spa_brt_lock, NULL, RW_DEFAULT, NULL); 1416 spa->spa_brt_vdevs = NULL; 1417 spa->spa_brt_nvdevs = 0; 1418 spa->spa_brt_rangesize = 0; 1419 } 1420 1421 void 1422 brt_create(spa_t *spa) 1423 { 1424 brt_alloc(spa); 1425 spa->spa_brt_rangesize = BRT_RANGESIZE; 1426 } 1427 1428 int 1429 brt_load(spa_t *spa) 1430 { 1431 int error = 0; 1432 1433 brt_alloc(spa); 1434 brt_wlock(spa); 1435 for (uint64_t vdevid = 0; vdevid < spa->spa_root_vdev->vdev_children; 1436 vdevid++) { 1437 char name[64]; 1438 uint64_t mos_brtvdev; 1439 1440 /* Look if this vdev had active block cloning. */ 1441 snprintf(name, sizeof (name), "%s%llu", BRT_OBJECT_VDEV_PREFIX, 1442 (u_longlong_t)vdevid); 1443 error = zap_lookup(spa->spa_meta_objset, 1444 DMU_POOL_DIRECTORY_OBJECT, name, sizeof (uint64_t), 1, 1445 &mos_brtvdev); 1446 if (error == ENOENT) { 1447 error = 0; 1448 continue; 1449 } 1450 if (error != 0) 1451 break; 1452 1453 /* If it did, then allocate them all and load this one. */ 1454 brt_vdevs_expand(spa, spa->spa_root_vdev->vdev_children); 1455 brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid]; 1456 rw_enter(&brtvd->bv_lock, RW_WRITER); 1457 brtvd->bv_mos_brtvdev = mos_brtvdev; 1458 error = brt_vdev_load(spa, brtvd); 1459 rw_exit(&brtvd->bv_lock); 1460 if (error != 0) 1461 break; 1462 } 1463 1464 if (spa->spa_brt_rangesize == 0) 1465 spa->spa_brt_rangesize = BRT_RANGESIZE; 1466 brt_unlock(spa); 1467 return (error); 1468 } 1469 1470 void 1471 brt_unload(spa_t *spa) 1472 { 1473 if (spa->spa_brt_rangesize == 0) 1474 return; 1475 brt_vdevs_free(spa); 1476 rw_destroy(&spa->spa_brt_lock); 1477 spa->spa_brt_rangesize = 0; 1478 } 1479 1480 ZFS_MODULE_PARAM(zfs_brt, , brt_zap_prefetch, INT, ZMOD_RW, 1481 "Enable prefetching of BRT ZAP entries"); 1482 ZFS_MODULE_PARAM(zfs_brt, , brt_zap_default_bs, UINT, ZMOD_RW, 1483 "BRT ZAP leaf blockshift"); 1484 ZFS_MODULE_PARAM(zfs_brt, , brt_zap_default_ibs, UINT, ZMOD_RW, 1485 "BRT ZAP indirect blockshift"); 1486