1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 /* 26 * Copyright (c) 2012, 2016 by Delphix. All rights reserved. 27 */ 28 29 #include <sys/zfs_context.h> 30 #include <sys/spa.h> 31 #include <sys/dmu.h> 32 #include <sys/dmu_tx.h> 33 #include <sys/dnode.h> 34 #include <sys/dsl_pool.h> 35 #include <sys/zio.h> 36 #include <sys/space_map.h> 37 #include <sys/refcount.h> 38 #include <sys/zfeature.h> 39 40 /* 41 * The data for a given space map can be kept on blocks of any size. 42 * Larger blocks entail fewer i/o operations, but they also cause the 43 * DMU to keep more data in-core, and also to waste more i/o bandwidth 44 * when only a few blocks have changed since the last transaction group. 45 */ 46 int space_map_blksz = (1 << 12); 47 48 /* 49 * Load the space map disk into the specified range tree. Segments of maptype 50 * are added to the range tree, other segment types are removed. 51 * 52 * Note: space_map_load() will drop sm_lock across dmu_read() calls. 53 * The caller must be OK with this. 54 */ 55 int 56 space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype) 57 { 58 uint64_t *entry, *entry_map, *entry_map_end; 59 uint64_t bufsize, size, offset, end, space; 60 int error = 0; 61 62 ASSERT(MUTEX_HELD(sm->sm_lock)); 63 64 end = space_map_length(sm); 65 space = space_map_allocated(sm); 66 67 VERIFY0(range_tree_space(rt)); 68 69 if (maptype == SM_FREE) { 70 range_tree_add(rt, sm->sm_start, sm->sm_size); 71 space = sm->sm_size - space; 72 } 73 74 bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE); 75 entry_map = zio_buf_alloc(bufsize); 76 77 mutex_exit(sm->sm_lock); 78 if (end > bufsize) { 79 dmu_prefetch(sm->sm_os, space_map_object(sm), 0, bufsize, 80 end - bufsize, ZIO_PRIORITY_SYNC_READ); 81 } 82 mutex_enter(sm->sm_lock); 83 84 for (offset = 0; offset < end; offset += bufsize) { 85 size = MIN(end - offset, bufsize); 86 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0); 87 VERIFY(size != 0); 88 ASSERT3U(sm->sm_blksz, !=, 0); 89 90 dprintf("object=%llu offset=%llx size=%llx\n", 91 space_map_object(sm), offset, size); 92 93 mutex_exit(sm->sm_lock); 94 error = dmu_read(sm->sm_os, space_map_object(sm), offset, size, 95 entry_map, DMU_READ_PREFETCH); 96 mutex_enter(sm->sm_lock); 97 if (error != 0) 98 break; 99 100 entry_map_end = entry_map + (size / sizeof (uint64_t)); 101 for (entry = entry_map; entry < entry_map_end; entry++) { 102 uint64_t e = *entry; 103 uint64_t offset, size; 104 105 if (SM_DEBUG_DECODE(e)) /* Skip debug entries */ 106 continue; 107 108 offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) + 109 sm->sm_start; 110 size = SM_RUN_DECODE(e) << sm->sm_shift; 111 112 VERIFY0(P2PHASE(offset, 1ULL << sm->sm_shift)); 113 VERIFY0(P2PHASE(size, 1ULL << sm->sm_shift)); 114 VERIFY3U(offset, >=, sm->sm_start); 115 VERIFY3U(offset + size, <=, sm->sm_start + sm->sm_size); 116 if (SM_TYPE_DECODE(e) == maptype) { 117 VERIFY3U(range_tree_space(rt) + size, <=, 118 sm->sm_size); 119 range_tree_add(rt, offset, size); 120 } else { 121 range_tree_remove(rt, offset, size); 122 } 123 } 124 } 125 126 if (error == 0) 127 VERIFY3U(range_tree_space(rt), ==, space); 128 else 129 range_tree_vacate(rt, NULL, NULL); 130 131 zio_buf_free(entry_map, bufsize); 132 return (error); 133 } 134 135 void 136 space_map_histogram_clear(space_map_t *sm) 137 { 138 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) 139 return; 140 141 bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram)); 142 } 143 144 boolean_t 145 space_map_histogram_verify(space_map_t *sm, range_tree_t *rt) 146 { 147 /* 148 * Verify that the in-core range tree does not have any 149 * ranges smaller than our sm_shift size. 150 */ 151 for (int i = 0; i < sm->sm_shift; i++) { 152 if (rt->rt_histogram[i] != 0) 153 return (B_FALSE); 154 } 155 return (B_TRUE); 156 } 157 158 void 159 space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx) 160 { 161 int idx = 0; 162 163 ASSERT(MUTEX_HELD(rt->rt_lock)); 164 ASSERT(dmu_tx_is_syncing(tx)); 165 VERIFY3U(space_map_object(sm), !=, 0); 166 167 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) 168 return; 169 170 dmu_buf_will_dirty(sm->sm_dbuf, tx); 171 172 ASSERT(space_map_histogram_verify(sm, rt)); 173 /* 174 * Transfer the content of the range tree histogram to the space 175 * map histogram. The space map histogram contains 32 buckets ranging 176 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree, 177 * however, can represent ranges from 2^0 to 2^63. Since the space 178 * map only cares about allocatable blocks (minimum of sm_shift) we 179 * can safely ignore all ranges in the range tree smaller than sm_shift. 180 */ 181 for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { 182 183 /* 184 * Since the largest histogram bucket in the space map is 185 * 2^(32+sm_shift-1), we need to normalize the values in 186 * the range tree for any bucket larger than that size. For 187 * example given an sm_shift of 9, ranges larger than 2^40 188 * would get normalized as if they were 1TB ranges. Assume 189 * the range tree had a count of 5 in the 2^44 (16TB) bucket, 190 * the calculation below would normalize this to 5 * 2^4 (16). 191 */ 192 ASSERT3U(i, >=, idx + sm->sm_shift); 193 sm->sm_phys->smp_histogram[idx] += 194 rt->rt_histogram[i] << (i - idx - sm->sm_shift); 195 196 /* 197 * Increment the space map's index as long as we haven't 198 * reached the maximum bucket size. Accumulate all ranges 199 * larger than the max bucket size into the last bucket. 200 */ 201 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) { 202 ASSERT3U(idx + sm->sm_shift, ==, i); 203 idx++; 204 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE); 205 } 206 } 207 } 208 209 uint64_t 210 space_map_entries(space_map_t *sm, range_tree_t *rt) 211 { 212 avl_tree_t *t = &rt->rt_root; 213 range_seg_t *rs; 214 uint64_t size, entries; 215 216 /* 217 * All space_maps always have a debug entry so account for it here. 218 */ 219 entries = 1; 220 221 /* 222 * Traverse the range tree and calculate the number of space map 223 * entries that would be required to write out the range tree. 224 */ 225 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) { 226 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift; 227 entries += howmany(size, SM_RUN_MAX); 228 } 229 return (entries); 230 } 231 232 /* 233 * Note: space_map_write() will drop sm_lock across dmu_write() calls. 234 */ 235 void 236 space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype, 237 dmu_tx_t *tx) 238 { 239 objset_t *os = sm->sm_os; 240 spa_t *spa = dmu_objset_spa(os); 241 avl_tree_t *t = &rt->rt_root; 242 range_seg_t *rs; 243 uint64_t size, total, rt_space, nodes; 244 uint64_t *entry, *entry_map, *entry_map_end; 245 uint64_t expected_entries, actual_entries = 1; 246 247 ASSERT(MUTEX_HELD(rt->rt_lock)); 248 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 249 VERIFY3U(space_map_object(sm), !=, 0); 250 dmu_buf_will_dirty(sm->sm_dbuf, tx); 251 252 /* 253 * This field is no longer necessary since the in-core space map 254 * now contains the object number but is maintained for backwards 255 * compatibility. 256 */ 257 sm->sm_phys->smp_object = sm->sm_object; 258 259 if (range_tree_space(rt) == 0) { 260 VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object); 261 return; 262 } 263 264 if (maptype == SM_ALLOC) 265 sm->sm_phys->smp_alloc += range_tree_space(rt); 266 else 267 sm->sm_phys->smp_alloc -= range_tree_space(rt); 268 269 expected_entries = space_map_entries(sm, rt); 270 271 entry_map = zio_buf_alloc(sm->sm_blksz); 272 entry_map_end = entry_map + (sm->sm_blksz / sizeof (uint64_t)); 273 entry = entry_map; 274 275 *entry++ = SM_DEBUG_ENCODE(1) | 276 SM_DEBUG_ACTION_ENCODE(maptype) | 277 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) | 278 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx)); 279 280 total = 0; 281 nodes = avl_numnodes(&rt->rt_root); 282 rt_space = range_tree_space(rt); 283 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) { 284 uint64_t start; 285 286 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift; 287 start = (rs->rs_start - sm->sm_start) >> sm->sm_shift; 288 289 total += size << sm->sm_shift; 290 291 while (size != 0) { 292 uint64_t run_len; 293 294 run_len = MIN(size, SM_RUN_MAX); 295 296 if (entry == entry_map_end) { 297 mutex_exit(rt->rt_lock); 298 dmu_write(os, space_map_object(sm), 299 sm->sm_phys->smp_objsize, sm->sm_blksz, 300 entry_map, tx); 301 mutex_enter(rt->rt_lock); 302 sm->sm_phys->smp_objsize += sm->sm_blksz; 303 entry = entry_map; 304 } 305 306 *entry++ = SM_OFFSET_ENCODE(start) | 307 SM_TYPE_ENCODE(maptype) | 308 SM_RUN_ENCODE(run_len); 309 310 start += run_len; 311 size -= run_len; 312 actual_entries++; 313 } 314 } 315 316 if (entry != entry_map) { 317 size = (entry - entry_map) * sizeof (uint64_t); 318 mutex_exit(rt->rt_lock); 319 dmu_write(os, space_map_object(sm), sm->sm_phys->smp_objsize, 320 size, entry_map, tx); 321 mutex_enter(rt->rt_lock); 322 sm->sm_phys->smp_objsize += size; 323 } 324 ASSERT3U(expected_entries, ==, actual_entries); 325 326 /* 327 * Ensure that the space_map's accounting wasn't changed 328 * while we were in the middle of writing it out. 329 */ 330 VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root)); 331 VERIFY3U(range_tree_space(rt), ==, rt_space); 332 VERIFY3U(range_tree_space(rt), ==, total); 333 334 zio_buf_free(entry_map, sm->sm_blksz); 335 } 336 337 static int 338 space_map_open_impl(space_map_t *sm) 339 { 340 int error; 341 u_longlong_t blocks; 342 343 error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf); 344 if (error) 345 return (error); 346 347 dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks); 348 sm->sm_phys = sm->sm_dbuf->db_data; 349 return (0); 350 } 351 352 int 353 space_map_open(space_map_t **smp, objset_t *os, uint64_t object, 354 uint64_t start, uint64_t size, uint8_t shift, kmutex_t *lp) 355 { 356 space_map_t *sm; 357 int error; 358 359 ASSERT(*smp == NULL); 360 ASSERT(os != NULL); 361 ASSERT(object != 0); 362 363 sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP); 364 365 sm->sm_start = start; 366 sm->sm_size = size; 367 sm->sm_shift = shift; 368 sm->sm_lock = lp; 369 sm->sm_os = os; 370 sm->sm_object = object; 371 372 error = space_map_open_impl(sm); 373 if (error != 0) { 374 space_map_close(sm); 375 return (error); 376 } 377 378 *smp = sm; 379 380 return (0); 381 } 382 383 void 384 space_map_close(space_map_t *sm) 385 { 386 if (sm == NULL) 387 return; 388 389 if (sm->sm_dbuf != NULL) 390 dmu_buf_rele(sm->sm_dbuf, sm); 391 sm->sm_dbuf = NULL; 392 sm->sm_phys = NULL; 393 394 kmem_free(sm, sizeof (*sm)); 395 } 396 397 void 398 space_map_truncate(space_map_t *sm, dmu_tx_t *tx) 399 { 400 objset_t *os = sm->sm_os; 401 spa_t *spa = dmu_objset_spa(os); 402 dmu_object_info_t doi; 403 404 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 405 ASSERT(dmu_tx_is_syncing(tx)); 406 VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa)); 407 408 dmu_object_info_from_db(sm->sm_dbuf, &doi); 409 410 /* 411 * If the space map has the wrong bonus size (because 412 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or 413 * the wrong block size (because space_map_blksz has changed), 414 * free and re-allocate its object with the updated sizes. 415 * 416 * Otherwise, just truncate the current object. 417 */ 418 if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) && 419 doi.doi_bonus_size != sizeof (space_map_phys_t)) || 420 doi.doi_data_block_size != space_map_blksz) { 421 zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating " 422 "object[%llu]: old bonus %u, old blocksz %u", 423 dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object, 424 doi.doi_bonus_size, doi.doi_data_block_size); 425 426 space_map_free(sm, tx); 427 dmu_buf_rele(sm->sm_dbuf, sm); 428 429 sm->sm_object = space_map_alloc(sm->sm_os, tx); 430 VERIFY0(space_map_open_impl(sm)); 431 } else { 432 VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx)); 433 434 /* 435 * If the spacemap is reallocated, its histogram 436 * will be reset. Do the same in the common case so that 437 * bugs related to the uncommon case do not go unnoticed. 438 */ 439 bzero(sm->sm_phys->smp_histogram, 440 sizeof (sm->sm_phys->smp_histogram)); 441 } 442 443 dmu_buf_will_dirty(sm->sm_dbuf, tx); 444 sm->sm_phys->smp_objsize = 0; 445 sm->sm_phys->smp_alloc = 0; 446 } 447 448 /* 449 * Update the in-core space_map allocation and length values. 450 */ 451 void 452 space_map_update(space_map_t *sm) 453 { 454 if (sm == NULL) 455 return; 456 457 ASSERT(MUTEX_HELD(sm->sm_lock)); 458 459 sm->sm_alloc = sm->sm_phys->smp_alloc; 460 sm->sm_length = sm->sm_phys->smp_objsize; 461 } 462 463 uint64_t 464 space_map_alloc(objset_t *os, dmu_tx_t *tx) 465 { 466 spa_t *spa = dmu_objset_spa(os); 467 uint64_t object; 468 int bonuslen; 469 470 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) { 471 spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx); 472 bonuslen = sizeof (space_map_phys_t); 473 ASSERT3U(bonuslen, <=, dmu_bonus_max()); 474 } else { 475 bonuslen = SPACE_MAP_SIZE_V0; 476 } 477 478 object = dmu_object_alloc(os, 479 DMU_OT_SPACE_MAP, space_map_blksz, 480 DMU_OT_SPACE_MAP_HEADER, bonuslen, tx); 481 482 return (object); 483 } 484 485 void 486 space_map_free(space_map_t *sm, dmu_tx_t *tx) 487 { 488 spa_t *spa; 489 490 if (sm == NULL) 491 return; 492 493 spa = dmu_objset_spa(sm->sm_os); 494 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) { 495 dmu_object_info_t doi; 496 497 dmu_object_info_from_db(sm->sm_dbuf, &doi); 498 if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) { 499 VERIFY(spa_feature_is_active(spa, 500 SPA_FEATURE_SPACEMAP_HISTOGRAM)); 501 spa_feature_decr(spa, 502 SPA_FEATURE_SPACEMAP_HISTOGRAM, tx); 503 } 504 } 505 506 VERIFY3U(dmu_object_free(sm->sm_os, space_map_object(sm), tx), ==, 0); 507 sm->sm_object = 0; 508 } 509 510 uint64_t 511 space_map_object(space_map_t *sm) 512 { 513 return (sm != NULL ? sm->sm_object : 0); 514 } 515 516 /* 517 * Returns the already synced, on-disk allocated space. 518 */ 519 uint64_t 520 space_map_allocated(space_map_t *sm) 521 { 522 return (sm != NULL ? sm->sm_alloc : 0); 523 } 524 525 /* 526 * Returns the already synced, on-disk length; 527 */ 528 uint64_t 529 space_map_length(space_map_t *sm) 530 { 531 return (sm != NULL ? sm->sm_length : 0); 532 } 533 534 /* 535 * Returns the allocated space that is currently syncing. 536 */ 537 int64_t 538 space_map_alloc_delta(space_map_t *sm) 539 { 540 if (sm == NULL) 541 return (0); 542 ASSERT(sm->sm_dbuf != NULL); 543 return (sm->sm_phys->smp_alloc - space_map_allocated(sm)); 544 } 545