1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 /* 26 * Copyright (c) 2012, 2019 by Delphix. All rights reserved. 27 */ 28 29 #include <sys/zfs_context.h> 30 #include <sys/spa.h> 31 #include <sys/dmu.h> 32 #include <sys/dmu_tx.h> 33 #include <sys/dnode.h> 34 #include <sys/dsl_pool.h> 35 #include <sys/zio.h> 36 #include <sys/space_map.h> 37 #include <sys/zfeature.h> 38 39 /* 40 * Note on space map block size: 41 * 42 * The data for a given space map can be kept on blocks of any size. 43 * Larger blocks entail fewer I/O operations, but they also cause the 44 * DMU to keep more data in-core, and also to waste more I/O bandwidth 45 * when only a few blocks have changed since the last transaction group. 46 */ 47 48 /* 49 * Enabled whenever we want to stress test the use of double-word 50 * space map entries. 51 */ 52 boolean_t zfs_force_some_double_word_sm_entries = B_FALSE; 53 54 /* 55 * Override the default indirect block size of 128K, instead use 16K for 56 * spacemaps (2^14 bytes). This dramatically reduces write inflation since 57 * appending to a spacemap typically has to write one data block (4KB) and one 58 * or two indirect blocks (16K-32K, rather than 128K). 59 */ 60 int space_map_ibs = 14; 61 62 boolean_t 63 sm_entry_is_debug(uint64_t e) 64 { 65 return (SM_PREFIX_DECODE(e) == SM_DEBUG_PREFIX); 66 } 67 68 boolean_t 69 sm_entry_is_single_word(uint64_t e) 70 { 71 uint8_t prefix = SM_PREFIX_DECODE(e); 72 return (prefix != SM_DEBUG_PREFIX && prefix != SM2_PREFIX); 73 } 74 75 boolean_t 76 sm_entry_is_double_word(uint64_t e) 77 { 78 return (SM_PREFIX_DECODE(e) == SM2_PREFIX); 79 } 80 81 /* 82 * Iterate through the space map, invoking the callback on each (non-debug) 83 * space map entry. Stop after reading 'end' bytes of the space map. 84 */ 85 int 86 space_map_iterate(space_map_t *sm, uint64_t end, sm_cb_t callback, void *arg) 87 { 88 uint64_t blksz = sm->sm_blksz; 89 90 ASSERT3U(blksz, !=, 0); 91 ASSERT3U(end, <=, space_map_length(sm)); 92 ASSERT0(P2PHASE(end, sizeof (uint64_t))); 93 94 dmu_prefetch(sm->sm_os, space_map_object(sm), 0, 0, end, 95 ZIO_PRIORITY_SYNC_READ); 96 97 int error = 0; 98 uint64_t txg = 0, sync_pass = 0; 99 for (uint64_t block_base = 0; block_base < end && error == 0; 100 block_base += blksz) { 101 dmu_buf_t *db; 102 error = dmu_buf_hold(sm->sm_os, space_map_object(sm), 103 block_base, FTAG, &db, DMU_READ_PREFETCH); 104 if (error != 0) 105 return (error); 106 107 uint64_t *block_start = db->db_data; 108 uint64_t block_length = MIN(end - block_base, blksz); 109 uint64_t *block_end = block_start + 110 (block_length / sizeof (uint64_t)); 111 112 VERIFY0(P2PHASE(block_length, sizeof (uint64_t))); 113 VERIFY3U(block_length, !=, 0); 114 ASSERT3U(blksz, ==, db->db_size); 115 116 for (uint64_t *block_cursor = block_start; 117 block_cursor < block_end && error == 0; block_cursor++) { 118 uint64_t e = *block_cursor; 119 120 if (sm_entry_is_debug(e)) { 121 /* 122 * Debug entries are only needed to record the 123 * current TXG and sync pass if available. 124 * 125 * Note though that sometimes there can be 126 * debug entries that are used as padding 127 * at the end of space map blocks in-order 128 * to not split a double-word entry in the 129 * middle between two blocks. These entries 130 * have their TXG field set to 0 and we 131 * skip them without recording the TXG. 132 * [see comment in space_map_write_seg()] 133 */ 134 uint64_t e_txg = SM_DEBUG_TXG_DECODE(e); 135 if (e_txg != 0) { 136 txg = e_txg; 137 sync_pass = SM_DEBUG_SYNCPASS_DECODE(e); 138 } else { 139 ASSERT0(SM_DEBUG_SYNCPASS_DECODE(e)); 140 } 141 continue; 142 } 143 144 uint64_t raw_offset, raw_run, vdev_id; 145 maptype_t type; 146 if (sm_entry_is_single_word(e)) { 147 type = SM_TYPE_DECODE(e); 148 vdev_id = SM_NO_VDEVID; 149 raw_offset = SM_OFFSET_DECODE(e); 150 raw_run = SM_RUN_DECODE(e); 151 } else { 152 /* it is a two-word entry */ 153 ASSERT(sm_entry_is_double_word(e)); 154 raw_run = SM2_RUN_DECODE(e); 155 vdev_id = SM2_VDEV_DECODE(e); 156 157 /* move on to the second word */ 158 block_cursor++; 159 e = *block_cursor; 160 VERIFY3P(block_cursor, <=, block_end); 161 162 type = SM2_TYPE_DECODE(e); 163 raw_offset = SM2_OFFSET_DECODE(e); 164 } 165 166 uint64_t entry_offset = (raw_offset << sm->sm_shift) + 167 sm->sm_start; 168 uint64_t entry_run = raw_run << sm->sm_shift; 169 170 VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift)); 171 VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift)); 172 ASSERT3U(entry_offset, >=, sm->sm_start); 173 ASSERT3U(entry_offset, <, sm->sm_start + sm->sm_size); 174 ASSERT3U(entry_run, <=, sm->sm_size); 175 ASSERT3U(entry_offset + entry_run, <=, 176 sm->sm_start + sm->sm_size); 177 178 space_map_entry_t sme = { 179 .sme_type = type, 180 .sme_vdev = vdev_id, 181 .sme_offset = entry_offset, 182 .sme_run = entry_run, 183 .sme_txg = txg, 184 .sme_sync_pass = sync_pass 185 }; 186 error = callback(&sme, arg); 187 } 188 dmu_buf_rele(db, FTAG); 189 } 190 return (error); 191 } 192 193 /* 194 * Reads the entries from the last block of the space map into 195 * buf in reverse order. Populates nwords with number of words 196 * in the last block. 197 * 198 * Refer to block comment within space_map_incremental_destroy() 199 * to understand why this function is needed. 200 */ 201 static int 202 space_map_reversed_last_block_entries(space_map_t *sm, uint64_t *buf, 203 uint64_t bufsz, uint64_t *nwords) 204 { 205 int error = 0; 206 dmu_buf_t *db; 207 208 /* 209 * Find the offset of the last word in the space map and use 210 * that to read the last block of the space map with 211 * dmu_buf_hold(). 212 */ 213 uint64_t last_word_offset = 214 sm->sm_phys->smp_length - sizeof (uint64_t); 215 error = dmu_buf_hold(sm->sm_os, space_map_object(sm), last_word_offset, 216 FTAG, &db, DMU_READ_NO_PREFETCH); 217 if (error != 0) 218 return (error); 219 220 ASSERT3U(sm->sm_object, ==, db->db_object); 221 ASSERT3U(sm->sm_blksz, ==, db->db_size); 222 ASSERT3U(bufsz, >=, db->db_size); 223 ASSERT(nwords != NULL); 224 225 uint64_t *words = db->db_data; 226 *nwords = 227 (sm->sm_phys->smp_length - db->db_offset) / sizeof (uint64_t); 228 229 ASSERT3U(*nwords, <=, bufsz / sizeof (uint64_t)); 230 231 uint64_t n = *nwords; 232 uint64_t j = n - 1; 233 for (uint64_t i = 0; i < n; i++) { 234 uint64_t entry = words[i]; 235 if (sm_entry_is_double_word(entry)) { 236 /* 237 * Since we are populating the buffer backwards 238 * we have to be extra careful and add the two 239 * words of the double-word entry in the right 240 * order. 241 */ 242 ASSERT3U(j, >, 0); 243 buf[j - 1] = entry; 244 245 i++; 246 ASSERT3U(i, <, n); 247 entry = words[i]; 248 buf[j] = entry; 249 j -= 2; 250 } else { 251 ASSERT(sm_entry_is_debug(entry) || 252 sm_entry_is_single_word(entry)); 253 buf[j] = entry; 254 j--; 255 } 256 } 257 258 /* 259 * Assert that we wrote backwards all the 260 * way to the beginning of the buffer. 261 */ 262 ASSERT3S(j, ==, -1); 263 264 dmu_buf_rele(db, FTAG); 265 return (error); 266 } 267 268 /* 269 * Note: This function performs destructive actions - specifically 270 * it deletes entries from the end of the space map. Thus, callers 271 * should ensure that they are holding the appropriate locks for 272 * the space map that they provide. 273 */ 274 int 275 space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg, 276 dmu_tx_t *tx) 277 { 278 uint64_t bufsz = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE); 279 uint64_t *buf = zio_buf_alloc(bufsz); 280 281 dmu_buf_will_dirty(sm->sm_dbuf, tx); 282 283 /* 284 * Ideally we would want to iterate from the beginning of the 285 * space map to the end in incremental steps. The issue with this 286 * approach is that we don't have any field on-disk that points 287 * us where to start between each step. We could try zeroing out 288 * entries that we've destroyed, but this doesn't work either as 289 * an entry that is 0 is a valid one (ALLOC for range [0x0:0x200]). 290 * 291 * As a result, we destroy its entries incrementally starting from 292 * the end after applying the callback to each of them. 293 * 294 * The problem with this approach is that we cannot literally 295 * iterate through the words in the space map backwards as we 296 * can't distinguish two-word space map entries from their second 297 * word. Thus we do the following: 298 * 299 * 1] We get all the entries from the last block of the space map 300 * and put them into a buffer in reverse order. This way the 301 * last entry comes first in the buffer, the second to last is 302 * second, etc. 303 * 2] We iterate through the entries in the buffer and we apply 304 * the callback to each one. As we move from entry to entry we 305 * we decrease the size of the space map, deleting effectively 306 * each entry. 307 * 3] If there are no more entries in the space map or the callback 308 * returns a value other than 0, we stop iterating over the 309 * space map. If there are entries remaining and the callback 310 * returned 0, we go back to step [1]. 311 */ 312 int error = 0; 313 while (space_map_length(sm) > 0 && error == 0) { 314 uint64_t nwords = 0; 315 error = space_map_reversed_last_block_entries(sm, buf, bufsz, 316 &nwords); 317 if (error != 0) 318 break; 319 320 ASSERT3U(nwords, <=, bufsz / sizeof (uint64_t)); 321 322 for (uint64_t i = 0; i < nwords; i++) { 323 uint64_t e = buf[i]; 324 325 if (sm_entry_is_debug(e)) { 326 sm->sm_phys->smp_length -= sizeof (uint64_t); 327 continue; 328 } 329 330 int words = 1; 331 uint64_t raw_offset, raw_run, vdev_id; 332 maptype_t type; 333 if (sm_entry_is_single_word(e)) { 334 type = SM_TYPE_DECODE(e); 335 vdev_id = SM_NO_VDEVID; 336 raw_offset = SM_OFFSET_DECODE(e); 337 raw_run = SM_RUN_DECODE(e); 338 } else { 339 ASSERT(sm_entry_is_double_word(e)); 340 words = 2; 341 342 raw_run = SM2_RUN_DECODE(e); 343 vdev_id = SM2_VDEV_DECODE(e); 344 345 /* move to the second word */ 346 i++; 347 e = buf[i]; 348 349 ASSERT3P(i, <=, nwords); 350 351 type = SM2_TYPE_DECODE(e); 352 raw_offset = SM2_OFFSET_DECODE(e); 353 } 354 355 uint64_t entry_offset = 356 (raw_offset << sm->sm_shift) + sm->sm_start; 357 uint64_t entry_run = raw_run << sm->sm_shift; 358 359 VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift)); 360 VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift)); 361 VERIFY3U(entry_offset, >=, sm->sm_start); 362 VERIFY3U(entry_offset, <, sm->sm_start + sm->sm_size); 363 VERIFY3U(entry_run, <=, sm->sm_size); 364 VERIFY3U(entry_offset + entry_run, <=, 365 sm->sm_start + sm->sm_size); 366 367 space_map_entry_t sme = { 368 .sme_type = type, 369 .sme_vdev = vdev_id, 370 .sme_offset = entry_offset, 371 .sme_run = entry_run 372 }; 373 error = callback(&sme, arg); 374 if (error != 0) 375 break; 376 377 if (type == SM_ALLOC) 378 sm->sm_phys->smp_alloc -= entry_run; 379 else 380 sm->sm_phys->smp_alloc += entry_run; 381 sm->sm_phys->smp_length -= words * sizeof (uint64_t); 382 } 383 } 384 385 if (space_map_length(sm) == 0) { 386 ASSERT0(error); 387 ASSERT0(space_map_allocated(sm)); 388 } 389 390 zio_buf_free(buf, bufsz); 391 return (error); 392 } 393 394 typedef struct space_map_load_arg { 395 space_map_t *smla_sm; 396 zfs_range_tree_t *smla_rt; 397 maptype_t smla_type; 398 } space_map_load_arg_t; 399 400 static int 401 space_map_load_callback(space_map_entry_t *sme, void *arg) 402 { 403 space_map_load_arg_t *smla = arg; 404 if (sme->sme_type == smla->smla_type) { 405 VERIFY3U(zfs_range_tree_space(smla->smla_rt) + sme->sme_run, <=, 406 smla->smla_sm->sm_size); 407 zfs_range_tree_add(smla->smla_rt, sme->sme_offset, 408 sme->sme_run); 409 } else { 410 zfs_range_tree_remove(smla->smla_rt, sme->sme_offset, 411 sme->sme_run); 412 } 413 414 return (0); 415 } 416 417 /* 418 * Load the spacemap into the rangetree, like space_map_load. But only 419 * read the first 'length' bytes of the spacemap. 420 */ 421 int 422 space_map_load_length(space_map_t *sm, zfs_range_tree_t *rt, maptype_t maptype, 423 uint64_t length) 424 { 425 space_map_load_arg_t smla; 426 427 VERIFY0(zfs_range_tree_space(rt)); 428 429 if (maptype == SM_FREE) 430 zfs_range_tree_add(rt, sm->sm_start, sm->sm_size); 431 432 smla.smla_rt = rt; 433 smla.smla_sm = sm; 434 smla.smla_type = maptype; 435 int err = space_map_iterate(sm, length, 436 space_map_load_callback, &smla); 437 438 if (err != 0) 439 zfs_range_tree_vacate(rt, NULL, NULL); 440 441 return (err); 442 } 443 444 /* 445 * Load the space map disk into the specified range tree. Segments of maptype 446 * are added to the range tree, other segment types are removed. 447 */ 448 int 449 space_map_load(space_map_t *sm, zfs_range_tree_t *rt, maptype_t maptype) 450 { 451 return (space_map_load_length(sm, rt, maptype, space_map_length(sm))); 452 } 453 454 void 455 space_map_histogram_clear(space_map_t *sm) 456 { 457 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) 458 return; 459 460 memset(sm->sm_phys->smp_histogram, 0, 461 sizeof (sm->sm_phys->smp_histogram)); 462 } 463 464 boolean_t 465 space_map_histogram_verify(space_map_t *sm, zfs_range_tree_t *rt) 466 { 467 /* 468 * Verify that the in-core range tree does not have any 469 * ranges smaller than our sm_shift size. 470 */ 471 for (int i = 0; i < sm->sm_shift; i++) { 472 if (rt->rt_histogram[i] != 0) 473 return (B_FALSE); 474 } 475 return (B_TRUE); 476 } 477 478 void 479 space_map_histogram_add(space_map_t *sm, zfs_range_tree_t *rt, dmu_tx_t *tx) 480 { 481 int idx = 0; 482 483 ASSERT(dmu_tx_is_syncing(tx)); 484 VERIFY3U(space_map_object(sm), !=, 0); 485 486 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) 487 return; 488 489 dmu_buf_will_dirty(sm->sm_dbuf, tx); 490 491 ASSERT(space_map_histogram_verify(sm, rt)); 492 /* 493 * Transfer the content of the range tree histogram to the space 494 * map histogram. The space map histogram contains 32 buckets ranging 495 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree, 496 * however, can represent ranges from 2^0 to 2^63. Since the space 497 * map only cares about allocatable blocks (minimum of sm_shift) we 498 * can safely ignore all ranges in the range tree smaller than sm_shift. 499 */ 500 for (int i = sm->sm_shift; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) { 501 502 /* 503 * Since the largest histogram bucket in the space map is 504 * 2^(32+sm_shift-1), we need to normalize the values in 505 * the range tree for any bucket larger than that size. For 506 * example given an sm_shift of 9, ranges larger than 2^40 507 * would get normalized as if they were 1TB ranges. Assume 508 * the range tree had a count of 5 in the 2^44 (16TB) bucket, 509 * the calculation below would normalize this to 5 * 2^4 (16). 510 */ 511 ASSERT3U(i, >=, idx + sm->sm_shift); 512 sm->sm_phys->smp_histogram[idx] += 513 rt->rt_histogram[i] << (i - idx - sm->sm_shift); 514 515 /* 516 * Increment the space map's index as long as we haven't 517 * reached the maximum bucket size. Accumulate all ranges 518 * larger than the max bucket size into the last bucket. 519 */ 520 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) { 521 ASSERT3U(idx + sm->sm_shift, ==, i); 522 idx++; 523 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE); 524 } 525 } 526 } 527 528 static void 529 space_map_write_intro_debug(space_map_t *sm, maptype_t maptype, dmu_tx_t *tx) 530 { 531 dmu_buf_will_dirty(sm->sm_dbuf, tx); 532 533 uint64_t dentry = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) | 534 SM_DEBUG_ACTION_ENCODE(maptype) | 535 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(tx->tx_pool->dp_spa)) | 536 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx)); 537 538 dmu_write(sm->sm_os, space_map_object(sm), sm->sm_phys->smp_length, 539 sizeof (dentry), &dentry, tx); 540 541 sm->sm_phys->smp_length += sizeof (dentry); 542 } 543 544 /* 545 * Writes one or more entries given a segment. 546 * 547 * Note: The function may release the dbuf from the pointer initially 548 * passed to it, and return a different dbuf. Also, the space map's 549 * dbuf must be dirty for the changes in sm_phys to take effect. 550 */ 551 static void 552 space_map_write_seg(space_map_t *sm, uint64_t rstart, uint64_t rend, 553 maptype_t maptype, uint64_t vdev_id, uint8_t words, dmu_buf_t **dbp, 554 const void *tag, dmu_tx_t *tx) 555 { 556 ASSERT3U(words, !=, 0); 557 ASSERT3U(words, <=, 2); 558 559 /* ensure the vdev_id can be represented by the space map */ 560 ASSERT3U(vdev_id, <=, SM_NO_VDEVID); 561 562 /* 563 * if this is a single word entry, ensure that no vdev was 564 * specified. 565 */ 566 IMPLY(words == 1, vdev_id == SM_NO_VDEVID); 567 568 dmu_buf_t *db = *dbp; 569 ASSERT3U(db->db_size, ==, sm->sm_blksz); 570 571 uint64_t *block_base = db->db_data; 572 uint64_t *block_end = block_base + (sm->sm_blksz / sizeof (uint64_t)); 573 uint64_t *block_cursor = block_base + 574 (sm->sm_phys->smp_length - db->db_offset) / sizeof (uint64_t); 575 576 ASSERT3P(block_cursor, <=, block_end); 577 578 uint64_t size = (rend - rstart) >> sm->sm_shift; 579 uint64_t start = (rstart - sm->sm_start) >> sm->sm_shift; 580 uint64_t run_max = (words == 2) ? SM2_RUN_MAX : SM_RUN_MAX; 581 582 ASSERT3U(rstart, >=, sm->sm_start); 583 ASSERT3U(rstart, <, sm->sm_start + sm->sm_size); 584 ASSERT3U(rend - rstart, <=, sm->sm_size); 585 ASSERT3U(rend, <=, sm->sm_start + sm->sm_size); 586 587 while (size != 0) { 588 ASSERT3P(block_cursor, <=, block_end); 589 590 /* 591 * If we are at the end of this block, flush it and start 592 * writing again from the beginning. 593 */ 594 if (block_cursor == block_end) { 595 dmu_buf_rele(db, tag); 596 597 uint64_t next_word_offset = sm->sm_phys->smp_length; 598 VERIFY0(dmu_buf_hold(sm->sm_os, 599 space_map_object(sm), next_word_offset, 600 tag, &db, DMU_READ_PREFETCH)); 601 dmu_buf_will_dirty(db, tx); 602 603 /* update caller's dbuf */ 604 *dbp = db; 605 606 ASSERT3U(db->db_size, ==, sm->sm_blksz); 607 608 block_base = db->db_data; 609 block_cursor = block_base; 610 block_end = block_base + 611 (db->db_size / sizeof (uint64_t)); 612 } 613 614 /* 615 * If we are writing a two-word entry and we only have one 616 * word left on this block, just pad it with an empty debug 617 * entry and write the two-word entry in the next block. 618 */ 619 uint64_t *next_entry = block_cursor + 1; 620 if (next_entry == block_end && words > 1) { 621 ASSERT3U(words, ==, 2); 622 *block_cursor = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) | 623 SM_DEBUG_ACTION_ENCODE(0) | 624 SM_DEBUG_SYNCPASS_ENCODE(0) | 625 SM_DEBUG_TXG_ENCODE(0); 626 block_cursor++; 627 sm->sm_phys->smp_length += sizeof (uint64_t); 628 ASSERT3P(block_cursor, ==, block_end); 629 continue; 630 } 631 632 uint64_t run_len = MIN(size, run_max); 633 switch (words) { 634 case 1: 635 *block_cursor = SM_OFFSET_ENCODE(start) | 636 SM_TYPE_ENCODE(maptype) | 637 SM_RUN_ENCODE(run_len); 638 block_cursor++; 639 break; 640 case 2: 641 /* write the first word of the entry */ 642 *block_cursor = SM_PREFIX_ENCODE(SM2_PREFIX) | 643 SM2_RUN_ENCODE(run_len) | 644 SM2_VDEV_ENCODE(vdev_id); 645 block_cursor++; 646 647 /* move on to the second word of the entry */ 648 ASSERT3P(block_cursor, <, block_end); 649 *block_cursor = SM2_TYPE_ENCODE(maptype) | 650 SM2_OFFSET_ENCODE(start); 651 block_cursor++; 652 break; 653 default: 654 panic("%d-word space map entries are not supported", 655 words); 656 break; 657 } 658 sm->sm_phys->smp_length += words * sizeof (uint64_t); 659 660 start += run_len; 661 size -= run_len; 662 } 663 ASSERT0(size); 664 665 } 666 667 /* 668 * Note: The space map's dbuf must be dirty for the changes in sm_phys to 669 * take effect. 670 */ 671 static void 672 space_map_write_impl(space_map_t *sm, zfs_range_tree_t *rt, maptype_t maptype, 673 uint64_t vdev_id, dmu_tx_t *tx) 674 { 675 spa_t *spa = tx->tx_pool->dp_spa; 676 dmu_buf_t *db; 677 678 space_map_write_intro_debug(sm, maptype, tx); 679 680 #ifdef ZFS_DEBUG 681 /* 682 * We do this right after we write the intro debug entry 683 * because the estimate does not take it into account. 684 */ 685 uint64_t initial_objsize = sm->sm_phys->smp_length; 686 uint64_t estimated_growth = 687 space_map_estimate_optimal_size(sm, rt, SM_NO_VDEVID); 688 uint64_t estimated_final_objsize = initial_objsize + estimated_growth; 689 #endif 690 691 /* 692 * Find the offset right after the last word in the space map 693 * and use that to get a hold of the last block, so we can 694 * start appending to it. 695 */ 696 uint64_t next_word_offset = sm->sm_phys->smp_length; 697 VERIFY0(dmu_buf_hold(sm->sm_os, space_map_object(sm), 698 next_word_offset, FTAG, &db, DMU_READ_PREFETCH)); 699 ASSERT3U(db->db_size, ==, sm->sm_blksz); 700 701 dmu_buf_will_dirty(db, tx); 702 703 zfs_btree_t *t = &rt->rt_root; 704 zfs_btree_index_t where; 705 for (zfs_range_seg_t *rs = zfs_btree_first(t, &where); rs != NULL; 706 rs = zfs_btree_next(t, &where, &where)) { 707 uint64_t offset = (zfs_rs_get_start(rs, rt) - sm->sm_start) >> 708 sm->sm_shift; 709 uint64_t length = (zfs_rs_get_end(rs, rt) - 710 zfs_rs_get_start(rs, rt)) >> sm->sm_shift; 711 uint8_t words = 1; 712 713 /* 714 * We only write two-word entries when both of the following 715 * are true: 716 * 717 * [1] The feature is enabled. 718 * [2] The offset or run is too big for a single-word entry, 719 * or the vdev_id is set (meaning not equal to 720 * SM_NO_VDEVID). 721 * 722 * Note that for purposes of testing we've added the case that 723 * we write two-word entries occasionally when the feature is 724 * enabled and zfs_force_some_double_word_sm_entries has been 725 * set. 726 */ 727 if (spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_V2) && 728 (offset >= (1ULL << SM_OFFSET_BITS) || 729 length > SM_RUN_MAX || 730 vdev_id != SM_NO_VDEVID || 731 (zfs_force_some_double_word_sm_entries && 732 random_in_range(100) == 0))) 733 words = 2; 734 735 space_map_write_seg(sm, zfs_rs_get_start(rs, rt), 736 zfs_rs_get_end(rs, rt), maptype, vdev_id, words, &db, 737 FTAG, tx); 738 } 739 740 dmu_buf_rele(db, FTAG); 741 742 #ifdef ZFS_DEBUG 743 /* 744 * We expect our estimation to be based on the worst case 745 * scenario [see comment in space_map_estimate_optimal_size()]. 746 * Therefore we expect the actual objsize to be equal or less 747 * than whatever we estimated it to be. 748 */ 749 ASSERT3U(estimated_final_objsize, >=, sm->sm_phys->smp_length); 750 #endif 751 } 752 753 /* 754 * Note: This function manipulates the state of the given space map but 755 * does not hold any locks implicitly. Thus the caller is responsible 756 * for synchronizing writes to the space map. 757 */ 758 void 759 space_map_write(space_map_t *sm, zfs_range_tree_t *rt, maptype_t maptype, 760 uint64_t vdev_id, dmu_tx_t *tx) 761 { 762 ASSERT(dsl_pool_sync_context(dmu_objset_pool(sm->sm_os))); 763 VERIFY3U(space_map_object(sm), !=, 0); 764 765 dmu_buf_will_dirty(sm->sm_dbuf, tx); 766 767 /* 768 * This field is no longer necessary since the in-core space map 769 * now contains the object number but is maintained for backwards 770 * compatibility. 771 */ 772 sm->sm_phys->smp_object = sm->sm_object; 773 774 if (zfs_range_tree_is_empty(rt)) { 775 VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object); 776 return; 777 } 778 779 if (maptype == SM_ALLOC) 780 sm->sm_phys->smp_alloc += zfs_range_tree_space(rt); 781 else 782 sm->sm_phys->smp_alloc -= zfs_range_tree_space(rt); 783 784 uint64_t nodes = zfs_btree_numnodes(&rt->rt_root); 785 uint64_t rt_space = zfs_range_tree_space(rt); 786 787 space_map_write_impl(sm, rt, maptype, vdev_id, tx); 788 789 /* 790 * Ensure that the space_map's accounting wasn't changed 791 * while we were in the middle of writing it out. 792 */ 793 VERIFY3U(nodes, ==, zfs_btree_numnodes(&rt->rt_root)); 794 VERIFY3U(zfs_range_tree_space(rt), ==, rt_space); 795 } 796 797 static int 798 space_map_open_impl(space_map_t *sm) 799 { 800 int error; 801 u_longlong_t blocks; 802 803 error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf); 804 if (error) 805 return (error); 806 807 dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks); 808 sm->sm_phys = sm->sm_dbuf->db_data; 809 return (0); 810 } 811 812 int 813 space_map_open(space_map_t **smp, objset_t *os, uint64_t object, 814 uint64_t start, uint64_t size, uint8_t shift) 815 { 816 space_map_t *sm; 817 int error; 818 819 ASSERT(*smp == NULL); 820 ASSERT(os != NULL); 821 ASSERT(object != 0); 822 823 sm = kmem_alloc(sizeof (space_map_t), KM_SLEEP); 824 825 sm->sm_start = start; 826 sm->sm_size = size; 827 sm->sm_shift = shift; 828 sm->sm_os = os; 829 sm->sm_object = object; 830 sm->sm_blksz = 0; 831 sm->sm_dbuf = NULL; 832 sm->sm_phys = NULL; 833 834 error = space_map_open_impl(sm); 835 if (error != 0) { 836 space_map_close(sm); 837 return (error); 838 } 839 *smp = sm; 840 841 return (0); 842 } 843 844 void 845 space_map_close(space_map_t *sm) 846 { 847 if (sm == NULL) 848 return; 849 850 if (sm->sm_dbuf != NULL) 851 dmu_buf_rele(sm->sm_dbuf, sm); 852 sm->sm_dbuf = NULL; 853 sm->sm_phys = NULL; 854 855 kmem_free(sm, sizeof (*sm)); 856 } 857 858 void 859 space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx) 860 { 861 objset_t *os = sm->sm_os; 862 spa_t *spa = dmu_objset_spa(os); 863 dmu_object_info_t doi; 864 865 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 866 ASSERT(dmu_tx_is_syncing(tx)); 867 VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa)); 868 869 dmu_object_info_from_db(sm->sm_dbuf, &doi); 870 871 /* 872 * If the space map has the wrong bonus size (because 873 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or 874 * the wrong block size (because space_map_blksz has changed), 875 * free and re-allocate its object with the updated sizes. 876 * 877 * Otherwise, just truncate the current object. 878 */ 879 if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) && 880 doi.doi_bonus_size != sizeof (space_map_phys_t)) || 881 doi.doi_data_block_size != blocksize || 882 doi.doi_metadata_block_size != 1 << space_map_ibs) { 883 zfs_dbgmsg("txg %llu, spa %s, sm %px, reallocating " 884 "object[%llu]: old bonus %llu, old blocksz %u", 885 (u_longlong_t)dmu_tx_get_txg(tx), spa_name(spa), sm, 886 (u_longlong_t)sm->sm_object, 887 (u_longlong_t)doi.doi_bonus_size, 888 doi.doi_data_block_size); 889 890 space_map_free(sm, tx); 891 dmu_buf_rele(sm->sm_dbuf, sm); 892 893 sm->sm_object = space_map_alloc(sm->sm_os, blocksize, tx); 894 VERIFY0(space_map_open_impl(sm)); 895 } else { 896 VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx)); 897 898 /* 899 * If the spacemap is reallocated, its histogram 900 * will be reset. Do the same in the common case so that 901 * bugs related to the uncommon case do not go unnoticed. 902 */ 903 memset(sm->sm_phys->smp_histogram, 0, 904 sizeof (sm->sm_phys->smp_histogram)); 905 } 906 907 dmu_buf_will_dirty(sm->sm_dbuf, tx); 908 sm->sm_phys->smp_length = 0; 909 sm->sm_phys->smp_alloc = 0; 910 } 911 912 uint64_t 913 space_map_alloc(objset_t *os, int blocksize, dmu_tx_t *tx) 914 { 915 spa_t *spa = dmu_objset_spa(os); 916 uint64_t object; 917 int bonuslen; 918 919 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) { 920 spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx); 921 bonuslen = sizeof (space_map_phys_t); 922 ASSERT3U(bonuslen, <=, dmu_bonus_max()); 923 } else { 924 bonuslen = SPACE_MAP_SIZE_V0; 925 } 926 927 object = dmu_object_alloc_ibs(os, DMU_OT_SPACE_MAP, blocksize, 928 space_map_ibs, DMU_OT_SPACE_MAP_HEADER, bonuslen, tx); 929 930 return (object); 931 } 932 933 void 934 space_map_free_obj(objset_t *os, uint64_t smobj, dmu_tx_t *tx) 935 { 936 spa_t *spa = dmu_objset_spa(os); 937 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) { 938 dmu_object_info_t doi; 939 940 VERIFY0(dmu_object_info(os, smobj, &doi)); 941 if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) { 942 spa_feature_decr(spa, 943 SPA_FEATURE_SPACEMAP_HISTOGRAM, tx); 944 } 945 } 946 947 VERIFY0(dmu_object_free(os, smobj, tx)); 948 } 949 950 void 951 space_map_free(space_map_t *sm, dmu_tx_t *tx) 952 { 953 if (sm == NULL) 954 return; 955 956 space_map_free_obj(sm->sm_os, space_map_object(sm), tx); 957 sm->sm_object = 0; 958 } 959 960 /* 961 * Given a range tree, it makes a worst-case estimate of how much 962 * space would the tree's segments take if they were written to 963 * the given space map. 964 */ 965 uint64_t 966 space_map_estimate_optimal_size(space_map_t *sm, zfs_range_tree_t *rt, 967 uint64_t vdev_id) 968 { 969 spa_t *spa = dmu_objset_spa(sm->sm_os); 970 uint64_t shift = sm->sm_shift; 971 uint64_t *histogram = rt->rt_histogram; 972 uint64_t entries_for_seg = 0; 973 974 /* 975 * In order to get a quick estimate of the optimal size that this 976 * range tree would have on-disk as a space map, we iterate through 977 * its histogram buckets instead of iterating through its nodes. 978 * 979 * Note that this is a highest-bound/worst-case estimate for the 980 * following reasons: 981 * 982 * 1] We assume that we always add a debug padding for each block 983 * we write and we also assume that we start at the last word 984 * of a block attempting to write a two-word entry. 985 * 2] Rounding up errors due to the way segments are distributed 986 * in the buckets of the range tree's histogram. 987 * 3] The activation of zfs_force_some_double_word_sm_entries 988 * (tunable) when testing. 989 * 990 * = Math and Rounding Errors = 991 * 992 * rt_histogram[i] bucket of a range tree represents the number 993 * of entries in [2^i, (2^(i+1))-1] of that range_tree. Given 994 * that, we want to divide the buckets into groups: Buckets that 995 * can be represented using a single-word entry, ones that can 996 * be represented with a double-word entry, and ones that can 997 * only be represented with multiple two-word entries. 998 * 999 * [Note that if the new encoding feature is not enabled there 1000 * are only two groups: single-word entry buckets and multiple 1001 * single-word entry buckets. The information below assumes 1002 * two-word entries enabled, but it can easily applied when 1003 * the feature is not enabled] 1004 * 1005 * To find the highest bucket that can be represented with a 1006 * single-word entry we look at the maximum run that such entry 1007 * can have, which is 2^(SM_RUN_BITS + sm_shift) [remember that 1008 * the run of a space map entry is shifted by sm_shift, thus we 1009 * add it to the exponent]. This way, excluding the value of the 1010 * maximum run that can be represented by a single-word entry, 1011 * all runs that are smaller exist in buckets 0 to 1012 * SM_RUN_BITS + shift - 1. 1013 * 1014 * To find the highest bucket that can be represented with a 1015 * double-word entry, we follow the same approach. Finally, any 1016 * bucket higher than that are represented with multiple two-word 1017 * entries. To be more specific, if the highest bucket whose 1018 * segments can be represented with a single two-word entry is X, 1019 * then bucket X+1 will need 2 two-word entries for each of its 1020 * segments, X+2 will need 4, X+3 will need 8, ...etc. 1021 * 1022 * With all of the above we make our estimation based on bucket 1023 * groups. There is a rounding error though. As we mentioned in 1024 * the example with the one-word entry, the maximum run that can 1025 * be represented in a one-word entry 2^(SM_RUN_BITS + shift) is 1026 * not part of bucket SM_RUN_BITS + shift - 1. Thus, segments of 1027 * that length fall into the next bucket (and bucket group) where 1028 * we start counting two-word entries and this is one more reason 1029 * why the estimated size may end up being bigger than the actual 1030 * size written. 1031 */ 1032 uint64_t size = 0; 1033 uint64_t idx = 0; 1034 1035 if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) || 1036 (vdev_id == SM_NO_VDEVID && sm->sm_size < SM_OFFSET_MAX)) { 1037 1038 /* 1039 * If we are trying to force some double word entries just 1040 * assume the worst-case of every single word entry being 1041 * written as a double word entry. 1042 */ 1043 uint64_t entry_size = 1044 (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) && 1045 zfs_force_some_double_word_sm_entries) ? 1046 (2 * sizeof (uint64_t)) : sizeof (uint64_t); 1047 1048 uint64_t single_entry_max_bucket = SM_RUN_BITS + shift - 1; 1049 for (; idx <= single_entry_max_bucket; idx++) 1050 size += histogram[idx] * entry_size; 1051 1052 if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2)) { 1053 for (; idx < ZFS_RANGE_TREE_HISTOGRAM_SIZE; idx++) { 1054 ASSERT3U(idx, >=, single_entry_max_bucket); 1055 entries_for_seg = 1056 1ULL << (idx - single_entry_max_bucket); 1057 size += histogram[idx] * 1058 entries_for_seg * entry_size; 1059 } 1060 return (size); 1061 } 1062 } 1063 1064 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2)); 1065 1066 uint64_t double_entry_max_bucket = SM2_RUN_BITS + shift - 1; 1067 for (; idx <= double_entry_max_bucket; idx++) 1068 size += histogram[idx] * 2 * sizeof (uint64_t); 1069 1070 for (; idx < ZFS_RANGE_TREE_HISTOGRAM_SIZE; idx++) { 1071 ASSERT3U(idx, >=, double_entry_max_bucket); 1072 entries_for_seg = 1ULL << (idx - double_entry_max_bucket); 1073 size += histogram[idx] * 1074 entries_for_seg * 2 * sizeof (uint64_t); 1075 } 1076 1077 /* 1078 * Assume the worst case where we start with the padding at the end 1079 * of the current block and we add an extra padding entry at the end 1080 * of all subsequent blocks. 1081 */ 1082 size += ((size / sm->sm_blksz) + 1) * sizeof (uint64_t); 1083 1084 return (size); 1085 } 1086 1087 uint64_t 1088 space_map_object(space_map_t *sm) 1089 { 1090 return (sm != NULL ? sm->sm_object : 0); 1091 } 1092 1093 int64_t 1094 space_map_allocated(space_map_t *sm) 1095 { 1096 return (sm != NULL ? sm->sm_phys->smp_alloc : 0); 1097 } 1098 1099 uint64_t 1100 space_map_length(space_map_t *sm) 1101 { 1102 return (sm != NULL ? sm->sm_phys->smp_length : 0); 1103 } 1104 1105 uint64_t 1106 space_map_nblocks(space_map_t *sm) 1107 { 1108 if (sm == NULL) 1109 return (0); 1110 return (DIV_ROUND_UP(space_map_length(sm), sm->sm_blksz)); 1111 } 1112