1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2016 by Delphix. All rights reserved. 24 */ 25 26 #include <sys/spa.h> 27 #include <sys/spa_impl.h> 28 #include <sys/txg.h> 29 #include <sys/vdev_impl.h> 30 #include <sys/refcount.h> 31 #include <sys/metaslab_impl.h> 32 #include <sys/dsl_synctask.h> 33 #include <sys/zap.h> 34 #include <sys/dmu_tx.h> 35 36 /* 37 * Maximum number of metaslabs per group that can be initialized 38 * simultaneously. 39 */ 40 int max_initialize_ms = 3; 41 42 /* 43 * Value that is written to disk during initialization. 44 */ 45 uint64_t zfs_initialize_value = 0xdeadbeefdeadbeefULL; 46 47 /* maximum number of I/Os outstanding per leaf vdev */ 48 int zfs_initialize_limit = 1; 49 50 /* size of initializing writes; default 1MiB, see zfs_remove_max_segment */ 51 uint64_t zfs_initialize_chunk_size = 1024 * 1024; 52 53 static boolean_t 54 vdev_initialize_should_stop(vdev_t *vd) 55 { 56 return (vd->vdev_initialize_exit_wanted || !vdev_writeable(vd) || 57 vd->vdev_detached || vd->vdev_top->vdev_removing); 58 } 59 60 static void 61 vdev_initialize_zap_update_sync(void *arg, dmu_tx_t *tx) 62 { 63 /* 64 * We pass in the guid instead of the vdev_t since the vdev may 65 * have been freed prior to the sync task being processed. This 66 * happens when a vdev is detached as we call spa_config_vdev_exit(), 67 * stop the intializing thread, schedule the sync task, and free 68 * the vdev. Later when the scheduled sync task is invoked, it would 69 * find that the vdev has been freed. 70 */ 71 uint64_t guid = *(uint64_t *)arg; 72 uint64_t txg = dmu_tx_get_txg(tx); 73 kmem_free(arg, sizeof (uint64_t)); 74 75 vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE); 76 if (vd == NULL || vd->vdev_top->vdev_removing || !vdev_is_concrete(vd)) 77 return; 78 79 uint64_t last_offset = vd->vdev_initialize_offset[txg & TXG_MASK]; 80 vd->vdev_initialize_offset[txg & TXG_MASK] = 0; 81 82 VERIFY(vd->vdev_leaf_zap != 0); 83 84 objset_t *mos = vd->vdev_spa->spa_meta_objset; 85 86 if (last_offset > 0) { 87 vd->vdev_initialize_last_offset = last_offset; 88 VERIFY0(zap_update(mos, vd->vdev_leaf_zap, 89 VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET, 90 sizeof (last_offset), 1, &last_offset, tx)); 91 } 92 if (vd->vdev_initialize_action_time > 0) { 93 uint64_t val = (uint64_t)vd->vdev_initialize_action_time; 94 VERIFY0(zap_update(mos, vd->vdev_leaf_zap, 95 VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME, sizeof (val), 96 1, &val, tx)); 97 } 98 99 uint64_t initialize_state = vd->vdev_initialize_state; 100 VERIFY0(zap_update(mos, vd->vdev_leaf_zap, 101 VDEV_LEAF_ZAP_INITIALIZE_STATE, sizeof (initialize_state), 1, 102 &initialize_state, tx)); 103 } 104 105 static void 106 vdev_initialize_change_state(vdev_t *vd, vdev_initializing_state_t new_state) 107 { 108 ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock)); 109 spa_t *spa = vd->vdev_spa; 110 111 if (new_state == vd->vdev_initialize_state) 112 return; 113 114 /* 115 * Copy the vd's guid, this will be freed by the sync task. 116 */ 117 uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP); 118 *guid = vd->vdev_guid; 119 120 /* 121 * If we're suspending, then preserving the original start time. 122 */ 123 if (vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED) { 124 vd->vdev_initialize_action_time = gethrestime_sec(); 125 } 126 vd->vdev_initialize_state = new_state; 127 128 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 129 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 130 dsl_sync_task_nowait(spa_get_dsl(spa), vdev_initialize_zap_update_sync, 131 guid, 2, ZFS_SPACE_CHECK_RESERVED, tx); 132 133 switch (new_state) { 134 case VDEV_INITIALIZE_ACTIVE: 135 spa_history_log_internal(spa, "initialize", tx, 136 "vdev=%s activated", vd->vdev_path); 137 break; 138 case VDEV_INITIALIZE_SUSPENDED: 139 spa_history_log_internal(spa, "initialize", tx, 140 "vdev=%s suspended", vd->vdev_path); 141 break; 142 case VDEV_INITIALIZE_CANCELED: 143 spa_history_log_internal(spa, "initialize", tx, 144 "vdev=%s canceled", vd->vdev_path); 145 break; 146 case VDEV_INITIALIZE_COMPLETE: 147 spa_history_log_internal(spa, "initialize", tx, 148 "vdev=%s complete", vd->vdev_path); 149 break; 150 default: 151 panic("invalid state %llu", (unsigned long long)new_state); 152 } 153 154 dmu_tx_commit(tx); 155 } 156 157 static void 158 vdev_initialize_cb(zio_t *zio) 159 { 160 vdev_t *vd = zio->io_vd; 161 mutex_enter(&vd->vdev_initialize_io_lock); 162 if (zio->io_error == ENXIO && !vdev_writeable(vd)) { 163 /* 164 * The I/O failed because the vdev was unavailable; roll the 165 * last offset back. (This works because spa_sync waits on 166 * spa_txg_zio before it runs sync tasks.) 167 */ 168 uint64_t *off = 169 &vd->vdev_initialize_offset[zio->io_txg & TXG_MASK]; 170 *off = MIN(*off, zio->io_offset); 171 } else { 172 /* 173 * Since initializing is best-effort, we ignore I/O errors and 174 * rely on vdev_probe to determine if the errors are more 175 * critical. 176 */ 177 if (zio->io_error != 0) 178 vd->vdev_stat.vs_initialize_errors++; 179 180 vd->vdev_initialize_bytes_done += zio->io_orig_size; 181 } 182 ASSERT3U(vd->vdev_initialize_inflight, >, 0); 183 vd->vdev_initialize_inflight--; 184 cv_broadcast(&vd->vdev_initialize_io_cv); 185 mutex_exit(&vd->vdev_initialize_io_lock); 186 187 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); 188 } 189 190 /* Takes care of physical writing and limiting # of concurrent ZIOs. */ 191 static int 192 vdev_initialize_write(vdev_t *vd, uint64_t start, uint64_t size, abd_t *data) 193 { 194 spa_t *spa = vd->vdev_spa; 195 196 /* Limit inflight initializing I/Os */ 197 mutex_enter(&vd->vdev_initialize_io_lock); 198 while (vd->vdev_initialize_inflight >= zfs_initialize_limit) { 199 cv_wait(&vd->vdev_initialize_io_cv, 200 &vd->vdev_initialize_io_lock); 201 } 202 vd->vdev_initialize_inflight++; 203 mutex_exit(&vd->vdev_initialize_io_lock); 204 205 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 206 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 207 uint64_t txg = dmu_tx_get_txg(tx); 208 209 spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER); 210 mutex_enter(&vd->vdev_initialize_lock); 211 212 if (vd->vdev_initialize_offset[txg & TXG_MASK] == 0) { 213 uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP); 214 *guid = vd->vdev_guid; 215 216 /* This is the first write of this txg. */ 217 dsl_sync_task_nowait(spa_get_dsl(spa), 218 vdev_initialize_zap_update_sync, guid, 2, 219 ZFS_SPACE_CHECK_RESERVED, tx); 220 } 221 222 /* 223 * We know the vdev struct will still be around since all 224 * consumers of vdev_free must stop the initialization first. 225 */ 226 if (vdev_initialize_should_stop(vd)) { 227 mutex_enter(&vd->vdev_initialize_io_lock); 228 ASSERT3U(vd->vdev_initialize_inflight, >, 0); 229 vd->vdev_initialize_inflight--; 230 mutex_exit(&vd->vdev_initialize_io_lock); 231 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); 232 mutex_exit(&vd->vdev_initialize_lock); 233 dmu_tx_commit(tx); 234 return (SET_ERROR(EINTR)); 235 } 236 mutex_exit(&vd->vdev_initialize_lock); 237 238 vd->vdev_initialize_offset[txg & TXG_MASK] = start + size; 239 zio_nowait(zio_write_phys(spa->spa_txg_zio[txg & TXG_MASK], vd, start, 240 size, data, ZIO_CHECKSUM_OFF, vdev_initialize_cb, NULL, 241 ZIO_PRIORITY_INITIALIZING, ZIO_FLAG_CANFAIL, B_FALSE)); 242 /* vdev_initialize_cb releases SCL_STATE_ALL */ 243 244 dmu_tx_commit(tx); 245 246 return (0); 247 } 248 249 /* 250 * Translate a logical range to the physical range for the specified vdev_t. 251 * This function is initially called with a leaf vdev and will walk each 252 * parent vdev until it reaches a top-level vdev. Once the top-level is 253 * reached the physical range is initialized and the recursive function 254 * begins to unwind. As it unwinds it calls the parent's vdev specific 255 * translation function to do the real conversion. 256 */ 257 void 258 vdev_xlate(vdev_t *vd, const range_seg_t *logical_rs, range_seg_t *physical_rs) 259 { 260 /* 261 * Walk up the vdev tree 262 */ 263 if (vd != vd->vdev_top) { 264 vdev_xlate(vd->vdev_parent, logical_rs, physical_rs); 265 } else { 266 /* 267 * We've reached the top-level vdev, initialize the 268 * physical range to the logical range and start to 269 * unwind. 270 */ 271 physical_rs->rs_start = logical_rs->rs_start; 272 physical_rs->rs_end = logical_rs->rs_end; 273 return; 274 } 275 276 vdev_t *pvd = vd->vdev_parent; 277 ASSERT3P(pvd, !=, NULL); 278 ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL); 279 280 /* 281 * As this recursive function unwinds, translate the logical 282 * range into its physical components by calling the 283 * vdev specific translate function. 284 */ 285 range_seg_t intermediate = { 0 }; 286 pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate); 287 288 physical_rs->rs_start = intermediate.rs_start; 289 physical_rs->rs_end = intermediate.rs_end; 290 } 291 292 /* 293 * Callback to fill each ABD chunk with zfs_initialize_value. len must be 294 * divisible by sizeof (uint64_t), and buf must be 8-byte aligned. The ABD 295 * allocation will guarantee these for us. 296 */ 297 /* ARGSUSED */ 298 static int 299 vdev_initialize_block_fill(void *buf, size_t len, void *unused) 300 { 301 ASSERT0(len % sizeof (uint64_t)); 302 for (uint64_t i = 0; i < len; i += sizeof (uint64_t)) { 303 *(uint64_t *)((char *)(buf) + i) = zfs_initialize_value; 304 } 305 return (0); 306 } 307 308 static abd_t * 309 vdev_initialize_block_alloc() 310 { 311 /* Allocate ABD for filler data */ 312 abd_t *data = abd_alloc_for_io(zfs_initialize_chunk_size, B_FALSE); 313 314 ASSERT0(zfs_initialize_chunk_size % sizeof (uint64_t)); 315 (void) abd_iterate_func(data, 0, zfs_initialize_chunk_size, 316 vdev_initialize_block_fill, NULL); 317 318 return (data); 319 } 320 321 static void 322 vdev_initialize_block_free(abd_t *data) 323 { 324 abd_free(data); 325 } 326 327 static int 328 vdev_initialize_ranges(vdev_t *vd, abd_t *data) 329 { 330 avl_tree_t *rt = &vd->vdev_initialize_tree->rt_root; 331 332 for (range_seg_t *rs = avl_first(rt); rs != NULL; 333 rs = AVL_NEXT(rt, rs)) { 334 uint64_t size = rs->rs_end - rs->rs_start; 335 336 /* Split range into legally-sized physical chunks */ 337 uint64_t writes_required = 338 ((size - 1) / zfs_initialize_chunk_size) + 1; 339 340 for (uint64_t w = 0; w < writes_required; w++) { 341 int error; 342 343 error = vdev_initialize_write(vd, 344 VDEV_LABEL_START_SIZE + rs->rs_start + 345 (w * zfs_initialize_chunk_size), 346 MIN(size - (w * zfs_initialize_chunk_size), 347 zfs_initialize_chunk_size), data); 348 if (error != 0) 349 return (error); 350 } 351 } 352 return (0); 353 } 354 355 static void 356 vdev_initialize_ms_load(metaslab_t *msp) 357 { 358 ASSERT(MUTEX_HELD(&msp->ms_lock)); 359 360 metaslab_load_wait(msp); 361 if (!msp->ms_loaded) 362 VERIFY0(metaslab_load(msp)); 363 } 364 365 static void 366 vdev_initialize_mg_wait(metaslab_group_t *mg) 367 { 368 ASSERT(MUTEX_HELD(&mg->mg_ms_initialize_lock)); 369 while (mg->mg_initialize_updating) { 370 cv_wait(&mg->mg_ms_initialize_cv, &mg->mg_ms_initialize_lock); 371 } 372 } 373 374 static void 375 vdev_initialize_mg_mark(metaslab_group_t *mg) 376 { 377 ASSERT(MUTEX_HELD(&mg->mg_ms_initialize_lock)); 378 ASSERT(mg->mg_initialize_updating); 379 380 while (mg->mg_ms_initializing >= max_initialize_ms) { 381 cv_wait(&mg->mg_ms_initialize_cv, &mg->mg_ms_initialize_lock); 382 } 383 mg->mg_ms_initializing++; 384 ASSERT3U(mg->mg_ms_initializing, <=, max_initialize_ms); 385 } 386 387 /* 388 * Mark the metaslab as being initialized to prevent any allocations 389 * on this metaslab. We must also track how many metaslabs are currently 390 * being initialized within a metaslab group and limit them to prevent 391 * allocation failures from occurring because all metaslabs are being 392 * initialized. 393 */ 394 static void 395 vdev_initialize_ms_mark(metaslab_t *msp) 396 { 397 ASSERT(!MUTEX_HELD(&msp->ms_lock)); 398 metaslab_group_t *mg = msp->ms_group; 399 400 mutex_enter(&mg->mg_ms_initialize_lock); 401 402 /* 403 * To keep an accurate count of how many threads are initializing 404 * a specific metaslab group, we only allow one thread to mark 405 * the metaslab group at a time. This ensures that the value of 406 * ms_initializing will be accurate when we decide to mark a metaslab 407 * group as being initialized. To do this we force all other threads 408 * to wait till the metaslab's mg_initialize_updating flag is no 409 * longer set. 410 */ 411 vdev_initialize_mg_wait(mg); 412 mg->mg_initialize_updating = B_TRUE; 413 if (msp->ms_initializing == 0) { 414 vdev_initialize_mg_mark(mg); 415 } 416 mutex_enter(&msp->ms_lock); 417 msp->ms_initializing++; 418 mutex_exit(&msp->ms_lock); 419 420 mg->mg_initialize_updating = B_FALSE; 421 cv_broadcast(&mg->mg_ms_initialize_cv); 422 mutex_exit(&mg->mg_ms_initialize_lock); 423 } 424 425 static void 426 vdev_initialize_ms_unmark(metaslab_t *msp) 427 { 428 ASSERT(!MUTEX_HELD(&msp->ms_lock)); 429 metaslab_group_t *mg = msp->ms_group; 430 mutex_enter(&mg->mg_ms_initialize_lock); 431 mutex_enter(&msp->ms_lock); 432 if (--msp->ms_initializing == 0) { 433 mg->mg_ms_initializing--; 434 cv_broadcast(&mg->mg_ms_initialize_cv); 435 } 436 mutex_exit(&msp->ms_lock); 437 mutex_exit(&mg->mg_ms_initialize_lock); 438 } 439 440 static void 441 vdev_initialize_calculate_progress(vdev_t *vd) 442 { 443 ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) || 444 spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER)); 445 ASSERT(vd->vdev_leaf_zap != 0); 446 447 vd->vdev_initialize_bytes_est = 0; 448 vd->vdev_initialize_bytes_done = 0; 449 450 for (uint64_t i = 0; i < vd->vdev_top->vdev_ms_count; i++) { 451 metaslab_t *msp = vd->vdev_top->vdev_ms[i]; 452 mutex_enter(&msp->ms_lock); 453 454 uint64_t ms_free = msp->ms_size - 455 space_map_allocated(msp->ms_sm); 456 457 if (vd->vdev_top->vdev_ops == &vdev_raidz_ops) 458 ms_free /= vd->vdev_top->vdev_children; 459 460 /* 461 * Convert the metaslab range to a physical range 462 * on our vdev. We use this to determine if we are 463 * in the middle of this metaslab range. 464 */ 465 range_seg_t logical_rs, physical_rs; 466 logical_rs.rs_start = msp->ms_start; 467 logical_rs.rs_end = msp->ms_start + msp->ms_size; 468 vdev_xlate(vd, &logical_rs, &physical_rs); 469 470 if (vd->vdev_initialize_last_offset <= physical_rs.rs_start) { 471 vd->vdev_initialize_bytes_est += ms_free; 472 mutex_exit(&msp->ms_lock); 473 continue; 474 } else if (vd->vdev_initialize_last_offset > 475 physical_rs.rs_end) { 476 vd->vdev_initialize_bytes_done += ms_free; 477 vd->vdev_initialize_bytes_est += ms_free; 478 mutex_exit(&msp->ms_lock); 479 continue; 480 } 481 482 /* 483 * If we get here, we're in the middle of initializing this 484 * metaslab. Load it and walk the free tree for more accurate 485 * progress estimation. 486 */ 487 vdev_initialize_ms_load(msp); 488 489 for (range_seg_t *rs = avl_first(&msp->ms_allocatable->rt_root); rs; 490 rs = AVL_NEXT(&msp->ms_allocatable->rt_root, rs)) { 491 logical_rs.rs_start = rs->rs_start; 492 logical_rs.rs_end = rs->rs_end; 493 vdev_xlate(vd, &logical_rs, &physical_rs); 494 495 uint64_t size = physical_rs.rs_end - 496 physical_rs.rs_start; 497 vd->vdev_initialize_bytes_est += size; 498 if (vd->vdev_initialize_last_offset > 499 physical_rs.rs_end) { 500 vd->vdev_initialize_bytes_done += size; 501 } else if (vd->vdev_initialize_last_offset > 502 physical_rs.rs_start && 503 vd->vdev_initialize_last_offset < 504 physical_rs.rs_end) { 505 vd->vdev_initialize_bytes_done += 506 vd->vdev_initialize_last_offset - 507 physical_rs.rs_start; 508 } 509 } 510 mutex_exit(&msp->ms_lock); 511 } 512 } 513 514 static void 515 vdev_initialize_load(vdev_t *vd) 516 { 517 ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) || 518 spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER)); 519 ASSERT(vd->vdev_leaf_zap != 0); 520 521 if (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE || 522 vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED) { 523 int err = zap_lookup(vd->vdev_spa->spa_meta_objset, 524 vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET, 525 sizeof (vd->vdev_initialize_last_offset), 1, 526 &vd->vdev_initialize_last_offset); 527 ASSERT(err == 0 || err == ENOENT); 528 } 529 530 vdev_initialize_calculate_progress(vd); 531 } 532 533 534 /* 535 * Convert the logical range into a physcial range and add it to our 536 * avl tree. 537 */ 538 void 539 vdev_initialize_range_add(void *arg, uint64_t start, uint64_t size) 540 { 541 vdev_t *vd = arg; 542 range_seg_t logical_rs, physical_rs; 543 logical_rs.rs_start = start; 544 logical_rs.rs_end = start + size; 545 546 ASSERT(vd->vdev_ops->vdev_op_leaf); 547 vdev_xlate(vd, &logical_rs, &physical_rs); 548 549 IMPLY(vd->vdev_top == vd, 550 logical_rs.rs_start == physical_rs.rs_start); 551 IMPLY(vd->vdev_top == vd, 552 logical_rs.rs_end == physical_rs.rs_end); 553 554 /* Only add segments that we have not visited yet */ 555 if (physical_rs.rs_end <= vd->vdev_initialize_last_offset) 556 return; 557 558 /* Pick up where we left off mid-range. */ 559 if (vd->vdev_initialize_last_offset > physical_rs.rs_start) { 560 zfs_dbgmsg("range write: vd %s changed (%llu, %llu) to " 561 "(%llu, %llu)", vd->vdev_path, 562 (u_longlong_t)physical_rs.rs_start, 563 (u_longlong_t)physical_rs.rs_end, 564 (u_longlong_t)vd->vdev_initialize_last_offset, 565 (u_longlong_t)physical_rs.rs_end); 566 ASSERT3U(physical_rs.rs_end, >, 567 vd->vdev_initialize_last_offset); 568 physical_rs.rs_start = vd->vdev_initialize_last_offset; 569 } 570 ASSERT3U(physical_rs.rs_end, >=, physical_rs.rs_start); 571 572 /* 573 * With raidz, it's possible that the logical range does not live on 574 * this leaf vdev. We only add the physical range to this vdev's if it 575 * has a length greater than 0. 576 */ 577 if (physical_rs.rs_end > physical_rs.rs_start) { 578 range_tree_add(vd->vdev_initialize_tree, physical_rs.rs_start, 579 physical_rs.rs_end - physical_rs.rs_start); 580 } else { 581 ASSERT3U(physical_rs.rs_end, ==, physical_rs.rs_start); 582 } 583 } 584 585 static void 586 vdev_initialize_thread(void *arg) 587 { 588 vdev_t *vd = arg; 589 spa_t *spa = vd->vdev_spa; 590 int error = 0; 591 uint64_t ms_count = 0; 592 593 ASSERT(vdev_is_concrete(vd)); 594 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 595 596 vd->vdev_initialize_last_offset = 0; 597 vdev_initialize_load(vd); 598 599 abd_t *deadbeef = vdev_initialize_block_alloc(); 600 601 vd->vdev_initialize_tree = range_tree_create(NULL, NULL); 602 603 for (uint64_t i = 0; !vd->vdev_detached && 604 i < vd->vdev_top->vdev_ms_count; i++) { 605 metaslab_t *msp = vd->vdev_top->vdev_ms[i]; 606 607 /* 608 * If we've expanded the top-level vdev or it's our 609 * first pass, calculate our progress. 610 */ 611 if (vd->vdev_top->vdev_ms_count != ms_count) { 612 vdev_initialize_calculate_progress(vd); 613 ms_count = vd->vdev_top->vdev_ms_count; 614 } 615 616 vdev_initialize_ms_mark(msp); 617 mutex_enter(&msp->ms_lock); 618 vdev_initialize_ms_load(msp); 619 620 range_tree_walk(msp->ms_allocatable, vdev_initialize_range_add, 621 vd); 622 mutex_exit(&msp->ms_lock); 623 624 spa_config_exit(spa, SCL_CONFIG, FTAG); 625 error = vdev_initialize_ranges(vd, deadbeef); 626 vdev_initialize_ms_unmark(msp); 627 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 628 629 range_tree_vacate(vd->vdev_initialize_tree, NULL, NULL); 630 if (error != 0) 631 break; 632 } 633 634 spa_config_exit(spa, SCL_CONFIG, FTAG); 635 mutex_enter(&vd->vdev_initialize_io_lock); 636 while (vd->vdev_initialize_inflight > 0) { 637 cv_wait(&vd->vdev_initialize_io_cv, 638 &vd->vdev_initialize_io_lock); 639 } 640 mutex_exit(&vd->vdev_initialize_io_lock); 641 642 range_tree_destroy(vd->vdev_initialize_tree); 643 vdev_initialize_block_free(deadbeef); 644 vd->vdev_initialize_tree = NULL; 645 646 mutex_enter(&vd->vdev_initialize_lock); 647 if (!vd->vdev_initialize_exit_wanted && vdev_writeable(vd)) { 648 vdev_initialize_change_state(vd, VDEV_INITIALIZE_COMPLETE); 649 } 650 ASSERT(vd->vdev_initialize_thread != NULL || 651 vd->vdev_initialize_inflight == 0); 652 653 /* 654 * Drop the vdev_initialize_lock while we sync out the 655 * txg since it's possible that a device might be trying to 656 * come online and must check to see if it needs to restart an 657 * initialization. That thread will be holding the spa_config_lock 658 * which would prevent the txg_wait_synced from completing. 659 */ 660 mutex_exit(&vd->vdev_initialize_lock); 661 txg_wait_synced(spa_get_dsl(spa), 0); 662 mutex_enter(&vd->vdev_initialize_lock); 663 664 vd->vdev_initialize_thread = NULL; 665 cv_broadcast(&vd->vdev_initialize_cv); 666 mutex_exit(&vd->vdev_initialize_lock); 667 } 668 669 /* 670 * Initiates a device. Caller must hold vdev_initialize_lock. 671 * Device must be a leaf and not already be initializing. 672 */ 673 void 674 vdev_initialize(vdev_t *vd) 675 { 676 ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock)); 677 ASSERT(vd->vdev_ops->vdev_op_leaf); 678 ASSERT(vdev_is_concrete(vd)); 679 ASSERT3P(vd->vdev_initialize_thread, ==, NULL); 680 ASSERT(!vd->vdev_detached); 681 ASSERT(!vd->vdev_initialize_exit_wanted); 682 ASSERT(!vd->vdev_top->vdev_removing); 683 684 vdev_initialize_change_state(vd, VDEV_INITIALIZE_ACTIVE); 685 vd->vdev_initialize_thread = thread_create(NULL, 0, 686 vdev_initialize_thread, vd, 0, &p0, TS_RUN, maxclsyspri); 687 } 688 689 /* 690 * Stop initializng a device, with the resultant initialing state being 691 * tgt_state. Blocks until the initializing thread has exited. 692 * Caller must hold vdev_initialize_lock and must not be writing to the spa 693 * config, as the initializing thread may try to enter the config as a reader 694 * before exiting. 695 */ 696 void 697 vdev_initialize_stop(vdev_t *vd, vdev_initializing_state_t tgt_state) 698 { 699 spa_t *spa = vd->vdev_spa; 700 ASSERT(!spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_WRITER)); 701 702 ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock)); 703 ASSERT(vd->vdev_ops->vdev_op_leaf); 704 ASSERT(vdev_is_concrete(vd)); 705 706 /* 707 * Allow cancel requests to proceed even if the initialize thread 708 * has stopped. 709 */ 710 if (vd->vdev_initialize_thread == NULL && 711 tgt_state != VDEV_INITIALIZE_CANCELED) { 712 return; 713 } 714 715 vdev_initialize_change_state(vd, tgt_state); 716 vd->vdev_initialize_exit_wanted = B_TRUE; 717 while (vd->vdev_initialize_thread != NULL) 718 cv_wait(&vd->vdev_initialize_cv, &vd->vdev_initialize_lock); 719 720 ASSERT3P(vd->vdev_initialize_thread, ==, NULL); 721 vd->vdev_initialize_exit_wanted = B_FALSE; 722 } 723 724 static void 725 vdev_initialize_stop_all_impl(vdev_t *vd, vdev_initializing_state_t tgt_state) 726 { 727 if (vd->vdev_ops->vdev_op_leaf && vdev_is_concrete(vd)) { 728 mutex_enter(&vd->vdev_initialize_lock); 729 vdev_initialize_stop(vd, tgt_state); 730 mutex_exit(&vd->vdev_initialize_lock); 731 return; 732 } 733 734 for (uint64_t i = 0; i < vd->vdev_children; i++) { 735 vdev_initialize_stop_all_impl(vd->vdev_child[i], tgt_state); 736 } 737 } 738 739 /* 740 * Convenience function to stop initializing of a vdev tree and set all 741 * initialize thread pointers to NULL. 742 */ 743 void 744 vdev_initialize_stop_all(vdev_t *vd, vdev_initializing_state_t tgt_state) 745 { 746 vdev_initialize_stop_all_impl(vd, tgt_state); 747 748 if (vd->vdev_spa->spa_sync_on) { 749 /* Make sure that our state has been synced to disk */ 750 txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0); 751 } 752 } 753 754 void 755 vdev_initialize_restart(vdev_t *vd) 756 { 757 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 758 ASSERT(!spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); 759 760 if (vd->vdev_leaf_zap != 0) { 761 mutex_enter(&vd->vdev_initialize_lock); 762 uint64_t initialize_state = VDEV_INITIALIZE_NONE; 763 int err = zap_lookup(vd->vdev_spa->spa_meta_objset, 764 vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_STATE, 765 sizeof (initialize_state), 1, &initialize_state); 766 ASSERT(err == 0 || err == ENOENT); 767 vd->vdev_initialize_state = initialize_state; 768 769 uint64_t timestamp = 0; 770 err = zap_lookup(vd->vdev_spa->spa_meta_objset, 771 vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME, 772 sizeof (timestamp), 1, ×tamp); 773 ASSERT(err == 0 || err == ENOENT); 774 vd->vdev_initialize_action_time = (time_t)timestamp; 775 776 if (vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED || 777 vd->vdev_offline) { 778 /* load progress for reporting, but don't resume */ 779 vdev_initialize_load(vd); 780 } else if (vd->vdev_initialize_state == 781 VDEV_INITIALIZE_ACTIVE && vdev_writeable(vd)) { 782 vdev_initialize(vd); 783 } 784 785 mutex_exit(&vd->vdev_initialize_lock); 786 } 787 788 for (uint64_t i = 0; i < vd->vdev_children; i++) { 789 vdev_initialize_restart(vd->vdev_child[i]); 790 } 791 } 792