1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 /* 26 * Copyright (c) 2012, 2019 by Delphix. All rights reserved. 27 */ 28 29 #include <sys/zfs_context.h> 30 #include <sys/spa.h> 31 #include <sys/dmu.h> 32 #include <sys/dmu_tx.h> 33 #include <sys/dnode.h> 34 #include <sys/dsl_pool.h> 35 #include <sys/zio.h> 36 #include <sys/space_map.h> 37 #include <sys/zfeature.h> 38 39 /* 40 * Note on space map block size: 41 * 42 * The data for a given space map can be kept on blocks of any size. 43 * Larger blocks entail fewer I/O operations, but they also cause the 44 * DMU to keep more data in-core, and also to waste more I/O bandwidth 45 * when only a few blocks have changed since the last transaction group. 46 */ 47 48 /* 49 * Enabled whenever we want to stress test the use of double-word 50 * space map entries. 51 */ 52 boolean_t zfs_force_some_double_word_sm_entries = B_FALSE; 53 54 /* 55 * Override the default indirect block size of 128K, instead use 16K for 56 * spacemaps (2^14 bytes). This dramatically reduces write inflation since 57 * appending to a spacemap typically has to write one data block (4KB) and one 58 * or two indirect blocks (16K-32K, rather than 128K). 59 */ 60 int space_map_ibs = 14; 61 62 boolean_t 63 sm_entry_is_debug(uint64_t e) 64 { 65 return (SM_PREFIX_DECODE(e) == SM_DEBUG_PREFIX); 66 } 67 68 boolean_t 69 sm_entry_is_single_word(uint64_t e) 70 { 71 uint8_t prefix = SM_PREFIX_DECODE(e); 72 return (prefix != SM_DEBUG_PREFIX && prefix != SM2_PREFIX); 73 } 74 75 boolean_t 76 sm_entry_is_double_word(uint64_t e) 77 { 78 return (SM_PREFIX_DECODE(e) == SM2_PREFIX); 79 } 80 81 /* 82 * Iterate through the space map, invoking the callback on each (non-debug) 83 * space map entry. Stop after reading 'end' bytes of the space map. 84 */ 85 int 86 space_map_iterate(space_map_t *sm, uint64_t end, sm_cb_t callback, void *arg) 87 { 88 uint64_t blksz = sm->sm_blksz; 89 90 ASSERT3U(blksz, !=, 0); 91 ASSERT3U(end, <=, space_map_length(sm)); 92 ASSERT0(P2PHASE(end, sizeof (uint64_t))); 93 94 dmu_prefetch(sm->sm_os, space_map_object(sm), 0, 0, end, 95 ZIO_PRIORITY_SYNC_READ); 96 97 int error = 0; 98 uint64_t txg = 0, sync_pass = 0; 99 for (uint64_t block_base = 0; block_base < end && error == 0; 100 block_base += blksz) { 101 dmu_buf_t *db; 102 error = dmu_buf_hold(sm->sm_os, space_map_object(sm), 103 block_base, FTAG, &db, DMU_READ_PREFETCH); 104 if (error != 0) 105 return (error); 106 107 uint64_t *block_start = db->db_data; 108 uint64_t block_length = MIN(end - block_base, blksz); 109 uint64_t *block_end = block_start + 110 (block_length / sizeof (uint64_t)); 111 112 VERIFY0(P2PHASE(block_length, sizeof (uint64_t))); 113 VERIFY3U(block_length, !=, 0); 114 ASSERT3U(blksz, ==, db->db_size); 115 116 for (uint64_t *block_cursor = block_start; 117 block_cursor < block_end && error == 0; block_cursor++) { 118 uint64_t e = *block_cursor; 119 120 if (sm_entry_is_debug(e)) { 121 /* 122 * Debug entries are only needed to record the 123 * current TXG and sync pass if available. 124 * 125 * Note though that sometimes there can be 126 * debug entries that are used as padding 127 * at the end of space map blocks in-order 128 * to not split a double-word entry in the 129 * middle between two blocks. These entries 130 * have their TXG field set to 0 and we 131 * skip them without recording the TXG. 132 * [see comment in space_map_write_seg()] 133 */ 134 uint64_t e_txg = SM_DEBUG_TXG_DECODE(e); 135 if (e_txg != 0) { 136 txg = e_txg; 137 sync_pass = SM_DEBUG_SYNCPASS_DECODE(e); 138 } else { 139 ASSERT0(SM_DEBUG_SYNCPASS_DECODE(e)); 140 } 141 continue; 142 } 143 144 uint64_t raw_offset, raw_run, vdev_id; 145 maptype_t type; 146 if (sm_entry_is_single_word(e)) { 147 type = SM_TYPE_DECODE(e); 148 vdev_id = SM_NO_VDEVID; 149 raw_offset = SM_OFFSET_DECODE(e); 150 raw_run = SM_RUN_DECODE(e); 151 } else { 152 /* it is a two-word entry */ 153 ASSERT(sm_entry_is_double_word(e)); 154 raw_run = SM2_RUN_DECODE(e); 155 vdev_id = SM2_VDEV_DECODE(e); 156 157 /* move on to the second word */ 158 block_cursor++; 159 e = *block_cursor; 160 VERIFY3P(block_cursor, <=, block_end); 161 162 type = SM2_TYPE_DECODE(e); 163 raw_offset = SM2_OFFSET_DECODE(e); 164 } 165 166 uint64_t entry_offset = (raw_offset << sm->sm_shift) + 167 sm->sm_start; 168 uint64_t entry_run = raw_run << sm->sm_shift; 169 170 VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift)); 171 VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift)); 172 ASSERT3U(entry_offset, >=, sm->sm_start); 173 ASSERT3U(entry_offset, <, sm->sm_start + sm->sm_size); 174 ASSERT3U(entry_run, <=, sm->sm_size); 175 ASSERT3U(entry_offset + entry_run, <=, 176 sm->sm_start + sm->sm_size); 177 178 space_map_entry_t sme = { 179 .sme_type = type, 180 .sme_vdev = vdev_id, 181 .sme_offset = entry_offset, 182 .sme_run = entry_run, 183 .sme_txg = txg, 184 .sme_sync_pass = sync_pass 185 }; 186 error = callback(&sme, arg); 187 } 188 dmu_buf_rele(db, FTAG); 189 } 190 return (error); 191 } 192 193 /* 194 * Reads the entries from the last block of the space map into 195 * buf in reverse order. Populates nwords with number of words 196 * in the last block. 197 * 198 * Refer to block comment within space_map_incremental_destroy() 199 * to understand why this function is needed. 200 */ 201 static int 202 space_map_reversed_last_block_entries(space_map_t *sm, uint64_t *buf, 203 uint64_t bufsz, uint64_t *nwords) 204 { 205 int error = 0; 206 dmu_buf_t *db; 207 208 /* 209 * Find the offset of the last word in the space map and use 210 * that to read the last block of the space map with 211 * dmu_buf_hold(). 212 */ 213 uint64_t last_word_offset = 214 sm->sm_phys->smp_length - sizeof (uint64_t); 215 error = dmu_buf_hold(sm->sm_os, space_map_object(sm), last_word_offset, 216 FTAG, &db, DMU_READ_NO_PREFETCH); 217 if (error != 0) 218 return (error); 219 220 ASSERT3U(sm->sm_object, ==, db->db_object); 221 ASSERT3U(sm->sm_blksz, ==, db->db_size); 222 ASSERT3U(bufsz, >=, db->db_size); 223 ASSERT(nwords != NULL); 224 225 uint64_t *words = db->db_data; 226 *nwords = 227 (sm->sm_phys->smp_length - db->db_offset) / sizeof (uint64_t); 228 229 ASSERT3U(*nwords, <=, bufsz / sizeof (uint64_t)); 230 231 uint64_t n = *nwords; 232 uint64_t j = n - 1; 233 for (uint64_t i = 0; i < n; i++) { 234 uint64_t entry = words[i]; 235 if (sm_entry_is_double_word(entry)) { 236 /* 237 * Since we are populating the buffer backwards 238 * we have to be extra careful and add the two 239 * words of the double-word entry in the right 240 * order. 241 */ 242 ASSERT3U(j, >, 0); 243 buf[j - 1] = entry; 244 245 i++; 246 ASSERT3U(i, <, n); 247 entry = words[i]; 248 buf[j] = entry; 249 j -= 2; 250 } else { 251 ASSERT(sm_entry_is_debug(entry) || 252 sm_entry_is_single_word(entry)); 253 buf[j] = entry; 254 j--; 255 } 256 } 257 258 /* 259 * Assert that we wrote backwards all the 260 * way to the beginning of the buffer. 261 */ 262 ASSERT3S(j, ==, -1); 263 264 dmu_buf_rele(db, FTAG); 265 return (error); 266 } 267 268 /* 269 * Note: This function performs destructive actions - specifically 270 * it deletes entries from the end of the space map. Thus, callers 271 * should ensure that they are holding the appropriate locks for 272 * the space map that they provide. 273 */ 274 int 275 space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg, 276 dmu_tx_t *tx) 277 { 278 uint64_t bufsz = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE); 279 uint64_t *buf = zio_buf_alloc(bufsz); 280 281 dmu_buf_will_dirty(sm->sm_dbuf, tx); 282 283 /* 284 * Ideally we would want to iterate from the beginning of the 285 * space map to the end in incremental steps. The issue with this 286 * approach is that we don't have any field on-disk that points 287 * us where to start between each step. We could try zeroing out 288 * entries that we've destroyed, but this doesn't work either as 289 * an entry that is 0 is a valid one (ALLOC for range [0x0:0x200]). 290 * 291 * As a result, we destroy its entries incrementally starting from 292 * the end after applying the callback to each of them. 293 * 294 * The problem with this approach is that we cannot literally 295 * iterate through the words in the space map backwards as we 296 * can't distinguish two-word space map entries from their second 297 * word. Thus we do the following: 298 * 299 * 1] We get all the entries from the last block of the space map 300 * and put them into a buffer in reverse order. This way the 301 * last entry comes first in the buffer, the second to last is 302 * second, etc. 303 * 2] We iterate through the entries in the buffer and we apply 304 * the callback to each one. As we move from entry to entry we 305 * we decrease the size of the space map, deleting effectively 306 * each entry. 307 * 3] If there are no more entries in the space map or the callback 308 * returns a value other than 0, we stop iterating over the 309 * space map. If there are entries remaining and the callback 310 * returned 0, we go back to step [1]. 311 */ 312 int error = 0; 313 while (space_map_length(sm) > 0 && error == 0) { 314 uint64_t nwords = 0; 315 error = space_map_reversed_last_block_entries(sm, buf, bufsz, 316 &nwords); 317 if (error != 0) 318 break; 319 320 ASSERT3U(nwords, <=, bufsz / sizeof (uint64_t)); 321 322 for (uint64_t i = 0; i < nwords; i++) { 323 uint64_t e = buf[i]; 324 325 if (sm_entry_is_debug(e)) { 326 sm->sm_phys->smp_length -= sizeof (uint64_t); 327 continue; 328 } 329 330 int words = 1; 331 uint64_t raw_offset, raw_run, vdev_id; 332 maptype_t type; 333 if (sm_entry_is_single_word(e)) { 334 type = SM_TYPE_DECODE(e); 335 vdev_id = SM_NO_VDEVID; 336 raw_offset = SM_OFFSET_DECODE(e); 337 raw_run = SM_RUN_DECODE(e); 338 } else { 339 ASSERT(sm_entry_is_double_word(e)); 340 words = 2; 341 342 raw_run = SM2_RUN_DECODE(e); 343 vdev_id = SM2_VDEV_DECODE(e); 344 345 /* move to the second word */ 346 i++; 347 e = buf[i]; 348 349 ASSERT3P(i, <=, nwords); 350 351 type = SM2_TYPE_DECODE(e); 352 raw_offset = SM2_OFFSET_DECODE(e); 353 } 354 355 uint64_t entry_offset = 356 (raw_offset << sm->sm_shift) + sm->sm_start; 357 uint64_t entry_run = raw_run << sm->sm_shift; 358 359 VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift)); 360 VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift)); 361 VERIFY3U(entry_offset, >=, sm->sm_start); 362 VERIFY3U(entry_offset, <, sm->sm_start + sm->sm_size); 363 VERIFY3U(entry_run, <=, sm->sm_size); 364 VERIFY3U(entry_offset + entry_run, <=, 365 sm->sm_start + sm->sm_size); 366 367 space_map_entry_t sme = { 368 .sme_type = type, 369 .sme_vdev = vdev_id, 370 .sme_offset = entry_offset, 371 .sme_run = entry_run 372 }; 373 error = callback(&sme, arg); 374 if (error != 0) 375 break; 376 377 if (type == SM_ALLOC) 378 sm->sm_phys->smp_alloc -= entry_run; 379 else 380 sm->sm_phys->smp_alloc += entry_run; 381 sm->sm_phys->smp_length -= words * sizeof (uint64_t); 382 } 383 } 384 385 if (space_map_length(sm) == 0) { 386 ASSERT0(error); 387 ASSERT0(space_map_allocated(sm)); 388 } 389 390 zio_buf_free(buf, bufsz); 391 return (error); 392 } 393 394 typedef struct space_map_load_arg { 395 space_map_t *smla_sm; 396 range_tree_t *smla_rt; 397 maptype_t smla_type; 398 } space_map_load_arg_t; 399 400 static int 401 space_map_load_callback(space_map_entry_t *sme, void *arg) 402 { 403 space_map_load_arg_t *smla = arg; 404 if (sme->sme_type == smla->smla_type) { 405 VERIFY3U(range_tree_space(smla->smla_rt) + sme->sme_run, <=, 406 smla->smla_sm->sm_size); 407 range_tree_add(smla->smla_rt, sme->sme_offset, sme->sme_run); 408 } else { 409 range_tree_remove(smla->smla_rt, sme->sme_offset, sme->sme_run); 410 } 411 412 return (0); 413 } 414 415 /* 416 * Load the spacemap into the rangetree, like space_map_load. But only 417 * read the first 'length' bytes of the spacemap. 418 */ 419 int 420 space_map_load_length(space_map_t *sm, range_tree_t *rt, maptype_t maptype, 421 uint64_t length) 422 { 423 space_map_load_arg_t smla; 424 425 VERIFY0(range_tree_space(rt)); 426 427 if (maptype == SM_FREE) 428 range_tree_add(rt, sm->sm_start, sm->sm_size); 429 430 smla.smla_rt = rt; 431 smla.smla_sm = sm; 432 smla.smla_type = maptype; 433 int err = space_map_iterate(sm, length, 434 space_map_load_callback, &smla); 435 436 if (err != 0) 437 range_tree_vacate(rt, NULL, NULL); 438 439 return (err); 440 } 441 442 /* 443 * Load the space map disk into the specified range tree. Segments of maptype 444 * are added to the range tree, other segment types are removed. 445 */ 446 int 447 space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype) 448 { 449 return (space_map_load_length(sm, rt, maptype, space_map_length(sm))); 450 } 451 452 void 453 space_map_histogram_clear(space_map_t *sm) 454 { 455 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) 456 return; 457 458 bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram)); 459 } 460 461 boolean_t 462 space_map_histogram_verify(space_map_t *sm, range_tree_t *rt) 463 { 464 /* 465 * Verify that the in-core range tree does not have any 466 * ranges smaller than our sm_shift size. 467 */ 468 for (int i = 0; i < sm->sm_shift; i++) { 469 if (rt->rt_histogram[i] != 0) 470 return (B_FALSE); 471 } 472 return (B_TRUE); 473 } 474 475 void 476 space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx) 477 { 478 int idx = 0; 479 480 ASSERT(dmu_tx_is_syncing(tx)); 481 VERIFY3U(space_map_object(sm), !=, 0); 482 483 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) 484 return; 485 486 dmu_buf_will_dirty(sm->sm_dbuf, tx); 487 488 ASSERT(space_map_histogram_verify(sm, rt)); 489 /* 490 * Transfer the content of the range tree histogram to the space 491 * map histogram. The space map histogram contains 32 buckets ranging 492 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree, 493 * however, can represent ranges from 2^0 to 2^63. Since the space 494 * map only cares about allocatable blocks (minimum of sm_shift) we 495 * can safely ignore all ranges in the range tree smaller than sm_shift. 496 */ 497 for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { 498 499 /* 500 * Since the largest histogram bucket in the space map is 501 * 2^(32+sm_shift-1), we need to normalize the values in 502 * the range tree for any bucket larger than that size. For 503 * example given an sm_shift of 9, ranges larger than 2^40 504 * would get normalized as if they were 1TB ranges. Assume 505 * the range tree had a count of 5 in the 2^44 (16TB) bucket, 506 * the calculation below would normalize this to 5 * 2^4 (16). 507 */ 508 ASSERT3U(i, >=, idx + sm->sm_shift); 509 sm->sm_phys->smp_histogram[idx] += 510 rt->rt_histogram[i] << (i - idx - sm->sm_shift); 511 512 /* 513 * Increment the space map's index as long as we haven't 514 * reached the maximum bucket size. Accumulate all ranges 515 * larger than the max bucket size into the last bucket. 516 */ 517 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) { 518 ASSERT3U(idx + sm->sm_shift, ==, i); 519 idx++; 520 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE); 521 } 522 } 523 } 524 525 static void 526 space_map_write_intro_debug(space_map_t *sm, maptype_t maptype, dmu_tx_t *tx) 527 { 528 dmu_buf_will_dirty(sm->sm_dbuf, tx); 529 530 uint64_t dentry = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) | 531 SM_DEBUG_ACTION_ENCODE(maptype) | 532 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(tx->tx_pool->dp_spa)) | 533 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx)); 534 535 dmu_write(sm->sm_os, space_map_object(sm), sm->sm_phys->smp_length, 536 sizeof (dentry), &dentry, tx); 537 538 sm->sm_phys->smp_length += sizeof (dentry); 539 } 540 541 /* 542 * Writes one or more entries given a segment. 543 * 544 * Note: The function may release the dbuf from the pointer initially 545 * passed to it, and return a different dbuf. Also, the space map's 546 * dbuf must be dirty for the changes in sm_phys to take effect. 547 */ 548 static void 549 space_map_write_seg(space_map_t *sm, uint64_t rstart, uint64_t rend, 550 maptype_t maptype, uint64_t vdev_id, uint8_t words, dmu_buf_t **dbp, 551 void *tag, dmu_tx_t *tx) 552 { 553 ASSERT3U(words, !=, 0); 554 ASSERT3U(words, <=, 2); 555 556 /* ensure the vdev_id can be represented by the space map */ 557 ASSERT3U(vdev_id, <=, SM_NO_VDEVID); 558 559 /* 560 * if this is a single word entry, ensure that no vdev was 561 * specified. 562 */ 563 IMPLY(words == 1, vdev_id == SM_NO_VDEVID); 564 565 dmu_buf_t *db = *dbp; 566 ASSERT3U(db->db_size, ==, sm->sm_blksz); 567 568 uint64_t *block_base = db->db_data; 569 uint64_t *block_end = block_base + (sm->sm_blksz / sizeof (uint64_t)); 570 uint64_t *block_cursor = block_base + 571 (sm->sm_phys->smp_length - db->db_offset) / sizeof (uint64_t); 572 573 ASSERT3P(block_cursor, <=, block_end); 574 575 uint64_t size = (rend - rstart) >> sm->sm_shift; 576 uint64_t start = (rstart - sm->sm_start) >> sm->sm_shift; 577 uint64_t run_max = (words == 2) ? SM2_RUN_MAX : SM_RUN_MAX; 578 579 ASSERT3U(rstart, >=, sm->sm_start); 580 ASSERT3U(rstart, <, sm->sm_start + sm->sm_size); 581 ASSERT3U(rend - rstart, <=, sm->sm_size); 582 ASSERT3U(rend, <=, sm->sm_start + sm->sm_size); 583 584 while (size != 0) { 585 ASSERT3P(block_cursor, <=, block_end); 586 587 /* 588 * If we are at the end of this block, flush it and start 589 * writing again from the beginning. 590 */ 591 if (block_cursor == block_end) { 592 dmu_buf_rele(db, tag); 593 594 uint64_t next_word_offset = sm->sm_phys->smp_length; 595 VERIFY0(dmu_buf_hold(sm->sm_os, 596 space_map_object(sm), next_word_offset, 597 tag, &db, DMU_READ_PREFETCH)); 598 dmu_buf_will_dirty(db, tx); 599 600 /* update caller's dbuf */ 601 *dbp = db; 602 603 ASSERT3U(db->db_size, ==, sm->sm_blksz); 604 605 block_base = db->db_data; 606 block_cursor = block_base; 607 block_end = block_base + 608 (db->db_size / sizeof (uint64_t)); 609 } 610 611 /* 612 * If we are writing a two-word entry and we only have one 613 * word left on this block, just pad it with an empty debug 614 * entry and write the two-word entry in the next block. 615 */ 616 uint64_t *next_entry = block_cursor + 1; 617 if (next_entry == block_end && words > 1) { 618 ASSERT3U(words, ==, 2); 619 *block_cursor = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) | 620 SM_DEBUG_ACTION_ENCODE(0) | 621 SM_DEBUG_SYNCPASS_ENCODE(0) | 622 SM_DEBUG_TXG_ENCODE(0); 623 block_cursor++; 624 sm->sm_phys->smp_length += sizeof (uint64_t); 625 ASSERT3P(block_cursor, ==, block_end); 626 continue; 627 } 628 629 uint64_t run_len = MIN(size, run_max); 630 switch (words) { 631 case 1: 632 *block_cursor = SM_OFFSET_ENCODE(start) | 633 SM_TYPE_ENCODE(maptype) | 634 SM_RUN_ENCODE(run_len); 635 block_cursor++; 636 break; 637 case 2: 638 /* write the first word of the entry */ 639 *block_cursor = SM_PREFIX_ENCODE(SM2_PREFIX) | 640 SM2_RUN_ENCODE(run_len) | 641 SM2_VDEV_ENCODE(vdev_id); 642 block_cursor++; 643 644 /* move on to the second word of the entry */ 645 ASSERT3P(block_cursor, <, block_end); 646 *block_cursor = SM2_TYPE_ENCODE(maptype) | 647 SM2_OFFSET_ENCODE(start); 648 block_cursor++; 649 break; 650 default: 651 panic("%d-word space map entries are not supported", 652 words); 653 break; 654 } 655 sm->sm_phys->smp_length += words * sizeof (uint64_t); 656 657 start += run_len; 658 size -= run_len; 659 } 660 ASSERT0(size); 661 662 } 663 664 /* 665 * Note: The space map's dbuf must be dirty for the changes in sm_phys to 666 * take effect. 667 */ 668 static void 669 space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype, 670 uint64_t vdev_id, dmu_tx_t *tx) 671 { 672 spa_t *spa = tx->tx_pool->dp_spa; 673 dmu_buf_t *db; 674 675 space_map_write_intro_debug(sm, maptype, tx); 676 677 #ifdef ZFS_DEBUG 678 /* 679 * We do this right after we write the intro debug entry 680 * because the estimate does not take it into account. 681 */ 682 uint64_t initial_objsize = sm->sm_phys->smp_length; 683 uint64_t estimated_growth = 684 space_map_estimate_optimal_size(sm, rt, SM_NO_VDEVID); 685 uint64_t estimated_final_objsize = initial_objsize + estimated_growth; 686 #endif 687 688 /* 689 * Find the offset right after the last word in the space map 690 * and use that to get a hold of the last block, so we can 691 * start appending to it. 692 */ 693 uint64_t next_word_offset = sm->sm_phys->smp_length; 694 VERIFY0(dmu_buf_hold(sm->sm_os, space_map_object(sm), 695 next_word_offset, FTAG, &db, DMU_READ_PREFETCH)); 696 ASSERT3U(db->db_size, ==, sm->sm_blksz); 697 698 dmu_buf_will_dirty(db, tx); 699 700 zfs_btree_t *t = &rt->rt_root; 701 zfs_btree_index_t where; 702 for (range_seg_t *rs = zfs_btree_first(t, &where); rs != NULL; 703 rs = zfs_btree_next(t, &where, &where)) { 704 uint64_t offset = (rs_get_start(rs, rt) - sm->sm_start) >> 705 sm->sm_shift; 706 uint64_t length = (rs_get_end(rs, rt) - rs_get_start(rs, rt)) >> 707 sm->sm_shift; 708 uint8_t words = 1; 709 710 /* 711 * We only write two-word entries when both of the following 712 * are true: 713 * 714 * [1] The feature is enabled. 715 * [2] The offset or run is too big for a single-word entry, 716 * or the vdev_id is set (meaning not equal to 717 * SM_NO_VDEVID). 718 * 719 * Note that for purposes of testing we've added the case that 720 * we write two-word entries occasionally when the feature is 721 * enabled and zfs_force_some_double_word_sm_entries has been 722 * set. 723 */ 724 if (spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_V2) && 725 (offset >= (1ULL << SM_OFFSET_BITS) || 726 length > SM_RUN_MAX || 727 vdev_id != SM_NO_VDEVID || 728 (zfs_force_some_double_word_sm_entries && 729 random_in_range(100) == 0))) 730 words = 2; 731 732 space_map_write_seg(sm, rs_get_start(rs, rt), rs_get_end(rs, 733 rt), maptype, vdev_id, words, &db, FTAG, tx); 734 } 735 736 dmu_buf_rele(db, FTAG); 737 738 #ifdef ZFS_DEBUG 739 /* 740 * We expect our estimation to be based on the worst case 741 * scenario [see comment in space_map_estimate_optimal_size()]. 742 * Therefore we expect the actual objsize to be equal or less 743 * than whatever we estimated it to be. 744 */ 745 ASSERT3U(estimated_final_objsize, >=, sm->sm_phys->smp_length); 746 #endif 747 } 748 749 /* 750 * Note: This function manipulates the state of the given space map but 751 * does not hold any locks implicitly. Thus the caller is responsible 752 * for synchronizing writes to the space map. 753 */ 754 void 755 space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype, 756 uint64_t vdev_id, dmu_tx_t *tx) 757 { 758 ASSERT(dsl_pool_sync_context(dmu_objset_pool(sm->sm_os))); 759 VERIFY3U(space_map_object(sm), !=, 0); 760 761 dmu_buf_will_dirty(sm->sm_dbuf, tx); 762 763 /* 764 * This field is no longer necessary since the in-core space map 765 * now contains the object number but is maintained for backwards 766 * compatibility. 767 */ 768 sm->sm_phys->smp_object = sm->sm_object; 769 770 if (range_tree_is_empty(rt)) { 771 VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object); 772 return; 773 } 774 775 if (maptype == SM_ALLOC) 776 sm->sm_phys->smp_alloc += range_tree_space(rt); 777 else 778 sm->sm_phys->smp_alloc -= range_tree_space(rt); 779 780 uint64_t nodes = zfs_btree_numnodes(&rt->rt_root); 781 uint64_t rt_space = range_tree_space(rt); 782 783 space_map_write_impl(sm, rt, maptype, vdev_id, tx); 784 785 /* 786 * Ensure that the space_map's accounting wasn't changed 787 * while we were in the middle of writing it out. 788 */ 789 VERIFY3U(nodes, ==, zfs_btree_numnodes(&rt->rt_root)); 790 VERIFY3U(range_tree_space(rt), ==, rt_space); 791 } 792 793 static int 794 space_map_open_impl(space_map_t *sm) 795 { 796 int error; 797 u_longlong_t blocks; 798 799 error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf); 800 if (error) 801 return (error); 802 803 dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks); 804 sm->sm_phys = sm->sm_dbuf->db_data; 805 return (0); 806 } 807 808 int 809 space_map_open(space_map_t **smp, objset_t *os, uint64_t object, 810 uint64_t start, uint64_t size, uint8_t shift) 811 { 812 space_map_t *sm; 813 int error; 814 815 ASSERT(*smp == NULL); 816 ASSERT(os != NULL); 817 ASSERT(object != 0); 818 819 sm = kmem_alloc(sizeof (space_map_t), KM_SLEEP); 820 821 sm->sm_start = start; 822 sm->sm_size = size; 823 sm->sm_shift = shift; 824 sm->sm_os = os; 825 sm->sm_object = object; 826 sm->sm_blksz = 0; 827 sm->sm_dbuf = NULL; 828 sm->sm_phys = NULL; 829 830 error = space_map_open_impl(sm); 831 if (error != 0) { 832 space_map_close(sm); 833 return (error); 834 } 835 *smp = sm; 836 837 return (0); 838 } 839 840 void 841 space_map_close(space_map_t *sm) 842 { 843 if (sm == NULL) 844 return; 845 846 if (sm->sm_dbuf != NULL) 847 dmu_buf_rele(sm->sm_dbuf, sm); 848 sm->sm_dbuf = NULL; 849 sm->sm_phys = NULL; 850 851 kmem_free(sm, sizeof (*sm)); 852 } 853 854 void 855 space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx) 856 { 857 objset_t *os = sm->sm_os; 858 spa_t *spa = dmu_objset_spa(os); 859 dmu_object_info_t doi; 860 861 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 862 ASSERT(dmu_tx_is_syncing(tx)); 863 VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa)); 864 865 dmu_object_info_from_db(sm->sm_dbuf, &doi); 866 867 /* 868 * If the space map has the wrong bonus size (because 869 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or 870 * the wrong block size (because space_map_blksz has changed), 871 * free and re-allocate its object with the updated sizes. 872 * 873 * Otherwise, just truncate the current object. 874 */ 875 if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) && 876 doi.doi_bonus_size != sizeof (space_map_phys_t)) || 877 doi.doi_data_block_size != blocksize || 878 doi.doi_metadata_block_size != 1 << space_map_ibs) { 879 zfs_dbgmsg("txg %llu, spa %s, sm %px, reallocating " 880 "object[%llu]: old bonus %llu, old blocksz %u", 881 (u_longlong_t)dmu_tx_get_txg(tx), spa_name(spa), sm, 882 (u_longlong_t)sm->sm_object, 883 (u_longlong_t)doi.doi_bonus_size, 884 doi.doi_data_block_size); 885 886 space_map_free(sm, tx); 887 dmu_buf_rele(sm->sm_dbuf, sm); 888 889 sm->sm_object = space_map_alloc(sm->sm_os, blocksize, tx); 890 VERIFY0(space_map_open_impl(sm)); 891 } else { 892 VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx)); 893 894 /* 895 * If the spacemap is reallocated, its histogram 896 * will be reset. Do the same in the common case so that 897 * bugs related to the uncommon case do not go unnoticed. 898 */ 899 bzero(sm->sm_phys->smp_histogram, 900 sizeof (sm->sm_phys->smp_histogram)); 901 } 902 903 dmu_buf_will_dirty(sm->sm_dbuf, tx); 904 sm->sm_phys->smp_length = 0; 905 sm->sm_phys->smp_alloc = 0; 906 } 907 908 uint64_t 909 space_map_alloc(objset_t *os, int blocksize, dmu_tx_t *tx) 910 { 911 spa_t *spa = dmu_objset_spa(os); 912 uint64_t object; 913 int bonuslen; 914 915 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) { 916 spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx); 917 bonuslen = sizeof (space_map_phys_t); 918 ASSERT3U(bonuslen, <=, dmu_bonus_max()); 919 } else { 920 bonuslen = SPACE_MAP_SIZE_V0; 921 } 922 923 object = dmu_object_alloc_ibs(os, DMU_OT_SPACE_MAP, blocksize, 924 space_map_ibs, DMU_OT_SPACE_MAP_HEADER, bonuslen, tx); 925 926 return (object); 927 } 928 929 void 930 space_map_free_obj(objset_t *os, uint64_t smobj, dmu_tx_t *tx) 931 { 932 spa_t *spa = dmu_objset_spa(os); 933 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) { 934 dmu_object_info_t doi; 935 936 VERIFY0(dmu_object_info(os, smobj, &doi)); 937 if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) { 938 spa_feature_decr(spa, 939 SPA_FEATURE_SPACEMAP_HISTOGRAM, tx); 940 } 941 } 942 943 VERIFY0(dmu_object_free(os, smobj, tx)); 944 } 945 946 void 947 space_map_free(space_map_t *sm, dmu_tx_t *tx) 948 { 949 if (sm == NULL) 950 return; 951 952 space_map_free_obj(sm->sm_os, space_map_object(sm), tx); 953 sm->sm_object = 0; 954 } 955 956 /* 957 * Given a range tree, it makes a worst-case estimate of how much 958 * space would the tree's segments take if they were written to 959 * the given space map. 960 */ 961 uint64_t 962 space_map_estimate_optimal_size(space_map_t *sm, range_tree_t *rt, 963 uint64_t vdev_id) 964 { 965 spa_t *spa = dmu_objset_spa(sm->sm_os); 966 uint64_t shift = sm->sm_shift; 967 uint64_t *histogram = rt->rt_histogram; 968 uint64_t entries_for_seg = 0; 969 970 /* 971 * In order to get a quick estimate of the optimal size that this 972 * range tree would have on-disk as a space map, we iterate through 973 * its histogram buckets instead of iterating through its nodes. 974 * 975 * Note that this is a highest-bound/worst-case estimate for the 976 * following reasons: 977 * 978 * 1] We assume that we always add a debug padding for each block 979 * we write and we also assume that we start at the last word 980 * of a block attempting to write a two-word entry. 981 * 2] Rounding up errors due to the way segments are distributed 982 * in the buckets of the range tree's histogram. 983 * 3] The activation of zfs_force_some_double_word_sm_entries 984 * (tunable) when testing. 985 * 986 * = Math and Rounding Errors = 987 * 988 * rt_histogram[i] bucket of a range tree represents the number 989 * of entries in [2^i, (2^(i+1))-1] of that range_tree. Given 990 * that, we want to divide the buckets into groups: Buckets that 991 * can be represented using a single-word entry, ones that can 992 * be represented with a double-word entry, and ones that can 993 * only be represented with multiple two-word entries. 994 * 995 * [Note that if the new encoding feature is not enabled there 996 * are only two groups: single-word entry buckets and multiple 997 * single-word entry buckets. The information below assumes 998 * two-word entries enabled, but it can easily applied when 999 * the feature is not enabled] 1000 * 1001 * To find the highest bucket that can be represented with a 1002 * single-word entry we look at the maximum run that such entry 1003 * can have, which is 2^(SM_RUN_BITS + sm_shift) [remember that 1004 * the run of a space map entry is shifted by sm_shift, thus we 1005 * add it to the exponent]. This way, excluding the value of the 1006 * maximum run that can be represented by a single-word entry, 1007 * all runs that are smaller exist in buckets 0 to 1008 * SM_RUN_BITS + shift - 1. 1009 * 1010 * To find the highest bucket that can be represented with a 1011 * double-word entry, we follow the same approach. Finally, any 1012 * bucket higher than that are represented with multiple two-word 1013 * entries. To be more specific, if the highest bucket whose 1014 * segments can be represented with a single two-word entry is X, 1015 * then bucket X+1 will need 2 two-word entries for each of its 1016 * segments, X+2 will need 4, X+3 will need 8, ...etc. 1017 * 1018 * With all of the above we make our estimation based on bucket 1019 * groups. There is a rounding error though. As we mentioned in 1020 * the example with the one-word entry, the maximum run that can 1021 * be represented in a one-word entry 2^(SM_RUN_BITS + shift) is 1022 * not part of bucket SM_RUN_BITS + shift - 1. Thus, segments of 1023 * that length fall into the next bucket (and bucket group) where 1024 * we start counting two-word entries and this is one more reason 1025 * why the estimated size may end up being bigger than the actual 1026 * size written. 1027 */ 1028 uint64_t size = 0; 1029 uint64_t idx = 0; 1030 1031 if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) || 1032 (vdev_id == SM_NO_VDEVID && sm->sm_size < SM_OFFSET_MAX)) { 1033 1034 /* 1035 * If we are trying to force some double word entries just 1036 * assume the worst-case of every single word entry being 1037 * written as a double word entry. 1038 */ 1039 uint64_t entry_size = 1040 (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) && 1041 zfs_force_some_double_word_sm_entries) ? 1042 (2 * sizeof (uint64_t)) : sizeof (uint64_t); 1043 1044 uint64_t single_entry_max_bucket = SM_RUN_BITS + shift - 1; 1045 for (; idx <= single_entry_max_bucket; idx++) 1046 size += histogram[idx] * entry_size; 1047 1048 if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2)) { 1049 for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) { 1050 ASSERT3U(idx, >=, single_entry_max_bucket); 1051 entries_for_seg = 1052 1ULL << (idx - single_entry_max_bucket); 1053 size += histogram[idx] * 1054 entries_for_seg * entry_size; 1055 } 1056 return (size); 1057 } 1058 } 1059 1060 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2)); 1061 1062 uint64_t double_entry_max_bucket = SM2_RUN_BITS + shift - 1; 1063 for (; idx <= double_entry_max_bucket; idx++) 1064 size += histogram[idx] * 2 * sizeof (uint64_t); 1065 1066 for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) { 1067 ASSERT3U(idx, >=, double_entry_max_bucket); 1068 entries_for_seg = 1ULL << (idx - double_entry_max_bucket); 1069 size += histogram[idx] * 1070 entries_for_seg * 2 * sizeof (uint64_t); 1071 } 1072 1073 /* 1074 * Assume the worst case where we start with the padding at the end 1075 * of the current block and we add an extra padding entry at the end 1076 * of all subsequent blocks. 1077 */ 1078 size += ((size / sm->sm_blksz) + 1) * sizeof (uint64_t); 1079 1080 return (size); 1081 } 1082 1083 uint64_t 1084 space_map_object(space_map_t *sm) 1085 { 1086 return (sm != NULL ? sm->sm_object : 0); 1087 } 1088 1089 int64_t 1090 space_map_allocated(space_map_t *sm) 1091 { 1092 return (sm != NULL ? sm->sm_phys->smp_alloc : 0); 1093 } 1094 1095 uint64_t 1096 space_map_length(space_map_t *sm) 1097 { 1098 return (sm != NULL ? sm->sm_phys->smp_length : 0); 1099 } 1100 1101 uint64_t 1102 space_map_nblocks(space_map_t *sm) 1103 { 1104 if (sm == NULL) 1105 return (0); 1106 return (DIV_ROUND_UP(space_map_length(sm), sm->sm_blksz)); 1107 } 1108