1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2013, 2014, Delphix. All rights reserved. 24 * Copyright (c) 2019 Datto Inc. 25 * Copyright (c) 2021, 2022, George Amanakis. All rights reserved. 26 */ 27 28 /* 29 * Routines to manage the on-disk persistent error log. 30 * 31 * Each pool stores a log of all logical data errors seen during normal 32 * operation. This is actually the union of two distinct logs: the last log, 33 * and the current log. All errors seen are logged to the current log. When a 34 * scrub completes, the current log becomes the last log, the last log is thrown 35 * out, and the current log is reinitialized. This way, if an error is somehow 36 * corrected, a new scrub will show that it no longer exists, and will be 37 * deleted from the log when the scrub completes. 38 * 39 * The log is stored using a ZAP object whose key is a string form of the 40 * zbookmark_phys tuple (objset, object, level, blkid), and whose contents is an 41 * optional 'objset:object' human-readable string describing the data. When an 42 * error is first logged, this string will be empty, indicating that no name is 43 * known. This prevents us from having to issue a potentially large amount of 44 * I/O to discover the object name during an error path. Instead, we do the 45 * calculation when the data is requested, storing the result so future queries 46 * will be faster. 47 * 48 * If the head_errlog feature is enabled, a different on-disk format is used. 49 * The error log of each head dataset is stored separately in the zap object 50 * and keyed by the head id. This enables listing every dataset affected in 51 * userland. In order to be able to track whether an error block has been 52 * modified or added to snapshots since it was marked as an error, a new tuple 53 * is introduced: zbookmark_err_phys_t. It allows the storage of the birth 54 * transaction group of an error block on-disk. The birth transaction group is 55 * used by check_filesystem() to assess whether this block was freed, 56 * re-written or added to a snapshot since its marking as an error. 57 * 58 * This log is then shipped into an nvlist where the key is the dataset name and 59 * the value is the object name. Userland is then responsible for uniquifying 60 * this list and displaying it to the user. 61 */ 62 63 #include <sys/dmu_tx.h> 64 #include <sys/spa.h> 65 #include <sys/spa_impl.h> 66 #include <sys/zap.h> 67 #include <sys/zio.h> 68 #include <sys/dsl_dir.h> 69 #include <sys/dmu_objset.h> 70 #include <sys/dbuf.h> 71 #include <sys/zfs_znode.h> 72 73 #define NAME_MAX_LEN 64 74 75 typedef struct clones { 76 uint64_t clone_ds; 77 list_node_t node; 78 } clones_t; 79 80 /* 81 * spa_upgrade_errlog_limit : A zfs module parameter that controls the number 82 * of on-disk error log entries that will be converted to the new 83 * format when enabling head_errlog. Defaults to 0 which converts 84 * all log entries. 85 */ 86 static uint_t spa_upgrade_errlog_limit = 0; 87 88 /* 89 * Convert a bookmark to a string. 90 */ 91 static void 92 bookmark_to_name(zbookmark_phys_t *zb, char *buf, size_t len) 93 { 94 (void) snprintf(buf, len, "%llx:%llx:%llx:%llx", 95 (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object, 96 (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid); 97 } 98 99 /* 100 * Convert an err_phys to a string. 101 */ 102 static void 103 errphys_to_name(zbookmark_err_phys_t *zep, char *buf, size_t len) 104 { 105 (void) snprintf(buf, len, "%llx:%llx:%llx:%llx", 106 (u_longlong_t)zep->zb_object, (u_longlong_t)zep->zb_level, 107 (u_longlong_t)zep->zb_blkid, (u_longlong_t)zep->zb_birth); 108 } 109 110 /* 111 * Convert a string to a err_phys. 112 */ 113 void 114 name_to_errphys(char *buf, zbookmark_err_phys_t *zep) 115 { 116 zep->zb_object = zfs_strtonum(buf, &buf); 117 ASSERT(*buf == ':'); 118 zep->zb_level = (int)zfs_strtonum(buf + 1, &buf); 119 ASSERT(*buf == ':'); 120 zep->zb_blkid = zfs_strtonum(buf + 1, &buf); 121 ASSERT(*buf == ':'); 122 zep->zb_birth = zfs_strtonum(buf + 1, &buf); 123 ASSERT(*buf == '\0'); 124 } 125 126 /* 127 * Convert a string to a bookmark. 128 */ 129 static void 130 name_to_bookmark(char *buf, zbookmark_phys_t *zb) 131 { 132 zb->zb_objset = zfs_strtonum(buf, &buf); 133 ASSERT(*buf == ':'); 134 zb->zb_object = zfs_strtonum(buf + 1, &buf); 135 ASSERT(*buf == ':'); 136 zb->zb_level = (int)zfs_strtonum(buf + 1, &buf); 137 ASSERT(*buf == ':'); 138 zb->zb_blkid = zfs_strtonum(buf + 1, &buf); 139 ASSERT(*buf == '\0'); 140 } 141 142 void 143 zep_to_zb(uint64_t dataset, zbookmark_err_phys_t *zep, zbookmark_phys_t *zb) 144 { 145 zb->zb_objset = dataset; 146 zb->zb_object = zep->zb_object; 147 zb->zb_level = zep->zb_level; 148 zb->zb_blkid = zep->zb_blkid; 149 } 150 151 static void 152 name_to_object(char *buf, uint64_t *obj) 153 { 154 *obj = zfs_strtonum(buf, &buf); 155 ASSERT(*buf == '\0'); 156 } 157 158 /* 159 * Retrieve the head filesystem. 160 */ 161 static int get_head_ds(spa_t *spa, uint64_t dsobj, uint64_t *head_ds) 162 { 163 dsl_dataset_t *ds; 164 int error = dsl_dataset_hold_obj_flags(spa->spa_dsl_pool, 165 dsobj, DS_HOLD_FLAG_DECRYPT, FTAG, &ds); 166 167 if (error != 0) 168 return (error); 169 170 ASSERT(head_ds); 171 *head_ds = dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj; 172 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); 173 174 return (error); 175 } 176 177 /* 178 * Log an uncorrectable error to the persistent error log. We add it to the 179 * spa's list of pending errors. The changes are actually synced out to disk 180 * during spa_errlog_sync(). 181 */ 182 void 183 spa_log_error(spa_t *spa, const zbookmark_phys_t *zb, const uint64_t birth) 184 { 185 spa_error_entry_t search; 186 spa_error_entry_t *new; 187 avl_tree_t *tree; 188 avl_index_t where; 189 190 /* 191 * If we are trying to import a pool, ignore any errors, as we won't be 192 * writing to the pool any time soon. 193 */ 194 if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT) 195 return; 196 197 mutex_enter(&spa->spa_errlist_lock); 198 199 /* 200 * If we have had a request to rotate the log, log it to the next list 201 * instead of the current one. 202 */ 203 if (spa->spa_scrub_active || spa->spa_scrub_finished) 204 tree = &spa->spa_errlist_scrub; 205 else 206 tree = &spa->spa_errlist_last; 207 208 search.se_bookmark = *zb; 209 if (avl_find(tree, &search, &where) != NULL) { 210 mutex_exit(&spa->spa_errlist_lock); 211 return; 212 } 213 214 new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP); 215 new->se_bookmark = *zb; 216 217 /* 218 * If the head_errlog feature is enabled, store the birth txg now. In 219 * case the file is deleted before spa_errlog_sync() runs, we will not 220 * be able to retrieve the birth txg. 221 */ 222 if (spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) { 223 new->se_zep.zb_object = zb->zb_object; 224 new->se_zep.zb_level = zb->zb_level; 225 new->se_zep.zb_blkid = zb->zb_blkid; 226 new->se_zep.zb_birth = birth; 227 } 228 229 avl_insert(tree, new, where); 230 mutex_exit(&spa->spa_errlist_lock); 231 } 232 233 int 234 find_birth_txg(dsl_dataset_t *ds, zbookmark_err_phys_t *zep, 235 uint64_t *birth_txg) 236 { 237 objset_t *os; 238 int error = dmu_objset_from_ds(ds, &os); 239 if (error != 0) 240 return (error); 241 242 dnode_t *dn; 243 blkptr_t bp; 244 245 error = dnode_hold(os, zep->zb_object, FTAG, &dn); 246 if (error != 0) 247 return (error); 248 249 rw_enter(&dn->dn_struct_rwlock, RW_READER); 250 error = dbuf_dnode_findbp(dn, zep->zb_level, zep->zb_blkid, &bp, NULL, 251 NULL); 252 if (error == 0 && BP_IS_HOLE(&bp)) 253 error = SET_ERROR(ENOENT); 254 255 *birth_txg = BP_GET_LOGICAL_BIRTH(&bp); 256 rw_exit(&dn->dn_struct_rwlock); 257 dnode_rele(dn, FTAG); 258 return (error); 259 } 260 261 /* 262 * This function finds the oldest affected filesystem containing an error 263 * block. 264 */ 265 int 266 find_top_affected_fs(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep, 267 uint64_t *top_affected_fs) 268 { 269 uint64_t oldest_dsobj; 270 int error = dsl_dataset_oldest_snapshot(spa, head_ds, zep->zb_birth, 271 &oldest_dsobj); 272 if (error != 0) 273 return (error); 274 275 dsl_dataset_t *ds; 276 error = dsl_dataset_hold_obj_flags(spa->spa_dsl_pool, oldest_dsobj, 277 DS_HOLD_FLAG_DECRYPT, FTAG, &ds); 278 if (error != 0) 279 return (error); 280 281 *top_affected_fs = 282 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj; 283 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); 284 return (0); 285 } 286 287 288 #ifdef _KERNEL 289 /* 290 * Copy the bookmark to the end of the user-space buffer which starts at 291 * uaddr and has *count unused entries, and decrement *count by 1. 292 */ 293 static int 294 copyout_entry(const zbookmark_phys_t *zb, void *uaddr, uint64_t *count) 295 { 296 if (*count == 0) 297 return (SET_ERROR(ENOMEM)); 298 299 *count -= 1; 300 if (copyout(zb, (char *)uaddr + (*count) * sizeof (zbookmark_phys_t), 301 sizeof (zbookmark_phys_t)) != 0) 302 return (SET_ERROR(EFAULT)); 303 return (0); 304 } 305 306 /* 307 * Each time the error block is referenced by a snapshot or clone, add a 308 * zbookmark_phys_t entry to the userspace array at uaddr. The array is 309 * filled from the back and the in-out parameter *count is modified to be the 310 * number of unused entries at the beginning of the array. The function 311 * scrub_filesystem() is modelled after this one. 312 */ 313 static int 314 check_filesystem(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep, 315 void *uaddr, uint64_t *count, list_t *clones_list) 316 { 317 dsl_dataset_t *ds; 318 dsl_pool_t *dp = spa->spa_dsl_pool; 319 320 int error = dsl_dataset_hold_obj_flags(dp, head_ds, 321 DS_HOLD_FLAG_DECRYPT, FTAG, &ds); 322 if (error != 0) 323 return (error); 324 325 uint64_t latest_txg; 326 uint64_t txg_to_consider = spa->spa_syncing_txg; 327 boolean_t check_snapshot = B_TRUE; 328 error = find_birth_txg(ds, zep, &latest_txg); 329 330 /* 331 * If find_birth_txg() errors out otherwise, let txg_to_consider be 332 * equal to the spa's syncing txg: if check_filesystem() errors out 333 * then affected snapshots or clones will not be checked. 334 */ 335 if (error == 0 && zep->zb_birth == latest_txg) { 336 /* Block neither free nor rewritten. */ 337 zbookmark_phys_t zb; 338 zep_to_zb(head_ds, zep, &zb); 339 error = copyout_entry(&zb, uaddr, count); 340 if (error != 0) { 341 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); 342 return (error); 343 } 344 check_snapshot = B_FALSE; 345 } else if (error == 0) { 346 txg_to_consider = latest_txg; 347 } 348 349 /* 350 * Retrieve the number of snapshots if the dataset is not a snapshot. 351 */ 352 uint64_t snap_count = 0; 353 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) { 354 355 error = zap_count(spa->spa_meta_objset, 356 dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count); 357 358 if (error != 0) { 359 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); 360 return (error); 361 } 362 } 363 364 if (snap_count == 0) { 365 /* Filesystem without snapshots. */ 366 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); 367 return (0); 368 } 369 370 uint64_t *snap_obj_array = kmem_zalloc(snap_count * sizeof (uint64_t), 371 KM_SLEEP); 372 373 int aff_snap_count = 0; 374 uint64_t snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 375 uint64_t snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 376 uint64_t zap_clone = dsl_dir_phys(ds->ds_dir)->dd_clones; 377 378 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); 379 380 /* Check only snapshots created from this file system. */ 381 while (snap_obj != 0 && zep->zb_birth < snap_obj_txg && 382 snap_obj_txg <= txg_to_consider) { 383 384 error = dsl_dataset_hold_obj_flags(dp, snap_obj, 385 DS_HOLD_FLAG_DECRYPT, FTAG, &ds); 386 if (error != 0) 387 goto out; 388 389 if (dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj != head_ds) { 390 snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 391 snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 392 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); 393 continue; 394 } 395 396 boolean_t affected = B_TRUE; 397 if (check_snapshot) { 398 uint64_t blk_txg; 399 error = find_birth_txg(ds, zep, &blk_txg); 400 affected = (error == 0 && zep->zb_birth == blk_txg); 401 } 402 403 /* Report errors in snapshots. */ 404 if (affected) { 405 snap_obj_array[aff_snap_count] = snap_obj; 406 aff_snap_count++; 407 408 zbookmark_phys_t zb; 409 zep_to_zb(snap_obj, zep, &zb); 410 error = copyout_entry(&zb, uaddr, count); 411 if (error != 0) { 412 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, 413 FTAG); 414 goto out; 415 } 416 } 417 snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 418 snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 419 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); 420 } 421 422 if (zap_clone == 0 || aff_snap_count == 0) { 423 error = 0; 424 goto out; 425 } 426 427 /* Check clones. */ 428 zap_cursor_t *zc; 429 zap_attribute_t *za; 430 431 zc = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP); 432 za = zap_attribute_alloc(); 433 434 for (zap_cursor_init(zc, spa->spa_meta_objset, zap_clone); 435 zap_cursor_retrieve(zc, za) == 0; 436 zap_cursor_advance(zc)) { 437 438 dsl_dataset_t *clone; 439 error = dsl_dataset_hold_obj_flags(dp, za->za_first_integer, 440 DS_HOLD_FLAG_DECRYPT, FTAG, &clone); 441 442 if (error != 0) 443 break; 444 445 /* 446 * Only clones whose origins were affected could also 447 * have affected snapshots. 448 */ 449 boolean_t found = B_FALSE; 450 for (int i = 0; i < snap_count; i++) { 451 if (dsl_dir_phys(clone->ds_dir)->dd_origin_obj 452 == snap_obj_array[i]) 453 found = B_TRUE; 454 } 455 dsl_dataset_rele_flags(clone, DS_HOLD_FLAG_DECRYPT, FTAG); 456 457 if (!found) 458 continue; 459 460 clones_t *ct = kmem_zalloc(sizeof (*ct), KM_SLEEP); 461 ct->clone_ds = za->za_first_integer; 462 list_insert_tail(clones_list, ct); 463 } 464 465 zap_cursor_fini(zc); 466 zap_attribute_free(za); 467 kmem_free(zc, sizeof (*zc)); 468 469 out: 470 kmem_free(snap_obj_array, sizeof (*snap_obj_array)); 471 return (error); 472 } 473 474 static int 475 process_error_block(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep, 476 void *uaddr, uint64_t *count) 477 { 478 /* 479 * If zb_birth == 0 or head_ds == 0 it means we failed to retrieve the 480 * birth txg or the head filesystem of the block pointer. This may 481 * happen e.g. when an encrypted filesystem is not mounted or when 482 * the key is not loaded. In this case do not proceed to 483 * check_filesystem(), instead do the accounting here. 484 */ 485 if (zep->zb_birth == 0 || head_ds == 0) { 486 zbookmark_phys_t zb; 487 zep_to_zb(head_ds, zep, &zb); 488 int error = copyout_entry(&zb, uaddr, count); 489 if (error != 0) { 490 return (error); 491 } 492 return (0); 493 } 494 495 uint64_t top_affected_fs; 496 uint64_t init_count = *count; 497 int error = find_top_affected_fs(spa, head_ds, zep, &top_affected_fs); 498 if (error == 0) { 499 clones_t *ct; 500 list_t clones_list; 501 502 list_create(&clones_list, sizeof (clones_t), 503 offsetof(clones_t, node)); 504 505 error = check_filesystem(spa, top_affected_fs, zep, 506 uaddr, count, &clones_list); 507 508 while ((ct = list_remove_head(&clones_list)) != NULL) { 509 error = check_filesystem(spa, ct->clone_ds, zep, 510 uaddr, count, &clones_list); 511 kmem_free(ct, sizeof (*ct)); 512 513 if (error) { 514 while (!list_is_empty(&clones_list)) { 515 ct = list_remove_head(&clones_list); 516 kmem_free(ct, sizeof (*ct)); 517 } 518 break; 519 } 520 } 521 522 list_destroy(&clones_list); 523 } 524 if (error == 0 && init_count == *count) { 525 /* 526 * If we reach this point, no errors have been detected 527 * in the checked filesystems/snapshots. Before returning mark 528 * the error block to be removed from the error lists and logs. 529 */ 530 zbookmark_phys_t zb; 531 zep_to_zb(head_ds, zep, &zb); 532 spa_remove_error(spa, &zb, zep->zb_birth); 533 } 534 535 return (error); 536 } 537 #endif 538 539 /* Return the number of errors in the error log */ 540 uint64_t 541 spa_get_last_errlog_size(spa_t *spa) 542 { 543 uint64_t total = 0, count; 544 mutex_enter(&spa->spa_errlog_lock); 545 546 if (spa->spa_errlog_last != 0 && 547 zap_count(spa->spa_meta_objset, spa->spa_errlog_last, 548 &count) == 0) 549 total += count; 550 mutex_exit(&spa->spa_errlog_lock); 551 return (total); 552 } 553 554 /* 555 * If a healed bookmark matches an entry in the error log we stash it in a tree 556 * so that we can later remove the related log entries in sync context. 557 */ 558 static void 559 spa_add_healed_error(spa_t *spa, uint64_t obj, zbookmark_phys_t *healed_zb, 560 const uint64_t birth) 561 { 562 char name[NAME_MAX_LEN]; 563 564 if (obj == 0) 565 return; 566 567 boolean_t held_list = B_FALSE; 568 boolean_t held_log = B_FALSE; 569 570 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) { 571 bookmark_to_name(healed_zb, name, sizeof (name)); 572 573 if (zap_contains(spa->spa_meta_objset, healed_zb->zb_objset, 574 name) == 0) { 575 if (!MUTEX_HELD(&spa->spa_errlog_lock)) { 576 mutex_enter(&spa->spa_errlog_lock); 577 held_log = B_TRUE; 578 } 579 580 /* 581 * Found an error matching healed zb, add zb to our 582 * tree of healed errors 583 */ 584 avl_tree_t *tree = &spa->spa_errlist_healed; 585 spa_error_entry_t search; 586 spa_error_entry_t *new; 587 avl_index_t where; 588 search.se_bookmark = *healed_zb; 589 if (!MUTEX_HELD(&spa->spa_errlist_lock)) { 590 mutex_enter(&spa->spa_errlist_lock); 591 held_list = B_TRUE; 592 } 593 if (avl_find(tree, &search, &where) != NULL) { 594 if (held_list) 595 mutex_exit(&spa->spa_errlist_lock); 596 if (held_log) 597 mutex_exit(&spa->spa_errlog_lock); 598 return; 599 } 600 new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP); 601 new->se_bookmark = *healed_zb; 602 avl_insert(tree, new, where); 603 if (held_list) 604 mutex_exit(&spa->spa_errlist_lock); 605 if (held_log) 606 mutex_exit(&spa->spa_errlog_lock); 607 } 608 return; 609 } 610 611 zbookmark_err_phys_t healed_zep; 612 healed_zep.zb_object = healed_zb->zb_object; 613 healed_zep.zb_level = healed_zb->zb_level; 614 healed_zep.zb_blkid = healed_zb->zb_blkid; 615 healed_zep.zb_birth = birth; 616 617 errphys_to_name(&healed_zep, name, sizeof (name)); 618 619 zap_cursor_t zc; 620 zap_attribute_t *za = zap_attribute_alloc(); 621 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa->spa_errlog_last); 622 zap_cursor_retrieve(&zc, za) == 0; zap_cursor_advance(&zc)) { 623 if (zap_contains(spa->spa_meta_objset, za->za_first_integer, 624 name) == 0) { 625 if (!MUTEX_HELD(&spa->spa_errlog_lock)) { 626 mutex_enter(&spa->spa_errlog_lock); 627 held_log = B_TRUE; 628 } 629 630 avl_tree_t *tree = &spa->spa_errlist_healed; 631 spa_error_entry_t search; 632 spa_error_entry_t *new; 633 avl_index_t where; 634 search.se_bookmark = *healed_zb; 635 636 if (!MUTEX_HELD(&spa->spa_errlist_lock)) { 637 mutex_enter(&spa->spa_errlist_lock); 638 held_list = B_TRUE; 639 } 640 641 if (avl_find(tree, &search, &where) != NULL) { 642 if (held_list) 643 mutex_exit(&spa->spa_errlist_lock); 644 if (held_log) 645 mutex_exit(&spa->spa_errlog_lock); 646 continue; 647 } 648 new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP); 649 new->se_bookmark = *healed_zb; 650 new->se_zep = healed_zep; 651 avl_insert(tree, new, where); 652 653 if (held_list) 654 mutex_exit(&spa->spa_errlist_lock); 655 if (held_log) 656 mutex_exit(&spa->spa_errlog_lock); 657 } 658 } 659 zap_cursor_fini(&zc); 660 zap_attribute_free(za); 661 } 662 663 /* 664 * If this error exists in the given tree remove it. 665 */ 666 static void 667 remove_error_from_list(spa_t *spa, avl_tree_t *t, const zbookmark_phys_t *zb) 668 { 669 spa_error_entry_t search, *found; 670 avl_index_t where; 671 672 mutex_enter(&spa->spa_errlist_lock); 673 search.se_bookmark = *zb; 674 if ((found = avl_find(t, &search, &where)) != NULL) { 675 avl_remove(t, found); 676 kmem_free(found, sizeof (spa_error_entry_t)); 677 } 678 mutex_exit(&spa->spa_errlist_lock); 679 } 680 681 682 /* 683 * Removes all of the recv healed errors from both on-disk error logs 684 */ 685 static void 686 spa_remove_healed_errors(spa_t *spa, avl_tree_t *s, avl_tree_t *l, dmu_tx_t *tx) 687 { 688 char name[NAME_MAX_LEN]; 689 spa_error_entry_t *se; 690 void *cookie = NULL; 691 692 ASSERT(MUTEX_HELD(&spa->spa_errlog_lock)); 693 694 while ((se = avl_destroy_nodes(&spa->spa_errlist_healed, 695 &cookie)) != NULL) { 696 remove_error_from_list(spa, s, &se->se_bookmark); 697 remove_error_from_list(spa, l, &se->se_bookmark); 698 699 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) { 700 bookmark_to_name(&se->se_bookmark, name, sizeof (name)); 701 (void) zap_remove(spa->spa_meta_objset, 702 spa->spa_errlog_last, name, tx); 703 (void) zap_remove(spa->spa_meta_objset, 704 spa->spa_errlog_scrub, name, tx); 705 } else { 706 errphys_to_name(&se->se_zep, name, sizeof (name)); 707 zap_cursor_t zc; 708 zap_attribute_t *za = zap_attribute_alloc(); 709 for (zap_cursor_init(&zc, spa->spa_meta_objset, 710 spa->spa_errlog_last); 711 zap_cursor_retrieve(&zc, za) == 0; 712 zap_cursor_advance(&zc)) { 713 zap_remove(spa->spa_meta_objset, 714 za->za_first_integer, name, tx); 715 } 716 zap_cursor_fini(&zc); 717 718 for (zap_cursor_init(&zc, spa->spa_meta_objset, 719 spa->spa_errlog_scrub); 720 zap_cursor_retrieve(&zc, za) == 0; 721 zap_cursor_advance(&zc)) { 722 zap_remove(spa->spa_meta_objset, 723 za->za_first_integer, name, tx); 724 } 725 zap_cursor_fini(&zc); 726 zap_attribute_free(za); 727 } 728 kmem_free(se, sizeof (spa_error_entry_t)); 729 } 730 } 731 732 /* 733 * Stash away healed bookmarks to remove them from the on-disk error logs 734 * later in spa_remove_healed_errors(). 735 */ 736 void 737 spa_remove_error(spa_t *spa, zbookmark_phys_t *zb, uint64_t birth) 738 { 739 spa_add_healed_error(spa, spa->spa_errlog_last, zb, birth); 740 spa_add_healed_error(spa, spa->spa_errlog_scrub, zb, birth); 741 } 742 743 static uint64_t 744 approx_errlog_size_impl(spa_t *spa, uint64_t spa_err_obj) 745 { 746 if (spa_err_obj == 0) 747 return (0); 748 uint64_t total = 0; 749 750 zap_cursor_t zc; 751 zap_attribute_t *za = zap_attribute_alloc(); 752 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj); 753 zap_cursor_retrieve(&zc, za) == 0; zap_cursor_advance(&zc)) { 754 uint64_t count; 755 if (zap_count(spa->spa_meta_objset, za->za_first_integer, 756 &count) == 0) 757 total += count; 758 } 759 zap_cursor_fini(&zc); 760 zap_attribute_free(za); 761 return (total); 762 } 763 764 /* 765 * Return the approximate number of errors currently in the error log. This 766 * will be nonzero if there are some errors, but otherwise it may be more 767 * or less than the number of entries returned by spa_get_errlog(). 768 */ 769 uint64_t 770 spa_approx_errlog_size(spa_t *spa) 771 { 772 uint64_t total = 0; 773 774 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) { 775 mutex_enter(&spa->spa_errlog_lock); 776 uint64_t count; 777 if (spa->spa_errlog_scrub != 0 && 778 zap_count(spa->spa_meta_objset, spa->spa_errlog_scrub, 779 &count) == 0) 780 total += count; 781 782 if (spa->spa_errlog_last != 0 && !spa->spa_scrub_finished && 783 zap_count(spa->spa_meta_objset, spa->spa_errlog_last, 784 &count) == 0) 785 total += count; 786 mutex_exit(&spa->spa_errlog_lock); 787 788 } else { 789 mutex_enter(&spa->spa_errlog_lock); 790 total += approx_errlog_size_impl(spa, spa->spa_errlog_last); 791 total += approx_errlog_size_impl(spa, spa->spa_errlog_scrub); 792 mutex_exit(&spa->spa_errlog_lock); 793 } 794 mutex_enter(&spa->spa_errlist_lock); 795 total += avl_numnodes(&spa->spa_errlist_last); 796 total += avl_numnodes(&spa->spa_errlist_scrub); 797 mutex_exit(&spa->spa_errlist_lock); 798 return (total); 799 } 800 801 /* 802 * This function sweeps through an on-disk error log and stores all bookmarks 803 * as error bookmarks in a new ZAP object. At the end we discard the old one, 804 * and spa_update_errlog() will set the spa's on-disk error log to new ZAP 805 * object. 806 */ 807 static void 808 sync_upgrade_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t *newobj, 809 dmu_tx_t *tx) 810 { 811 zap_cursor_t zc; 812 zap_attribute_t *za; 813 zbookmark_phys_t zb; 814 uint64_t count; 815 816 *newobj = zap_create(spa->spa_meta_objset, DMU_OT_ERROR_LOG, 817 DMU_OT_NONE, 0, tx); 818 819 /* 820 * If we cannnot perform the upgrade we should clear the old on-disk 821 * error logs. 822 */ 823 if (zap_count(spa->spa_meta_objset, spa_err_obj, &count) != 0) { 824 VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx)); 825 return; 826 } 827 828 za = zap_attribute_alloc(); 829 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj); 830 zap_cursor_retrieve(&zc, za) == 0; 831 zap_cursor_advance(&zc)) { 832 if (spa_upgrade_errlog_limit != 0 && 833 zc.zc_cd == spa_upgrade_errlog_limit) 834 break; 835 836 name_to_bookmark(za->za_name, &zb); 837 838 zbookmark_err_phys_t zep; 839 zep.zb_object = zb.zb_object; 840 zep.zb_level = zb.zb_level; 841 zep.zb_blkid = zb.zb_blkid; 842 zep.zb_birth = 0; 843 844 /* 845 * In case of an error we should simply continue instead of 846 * returning prematurely. See the next comment. 847 */ 848 uint64_t head_ds; 849 dsl_pool_t *dp = spa->spa_dsl_pool; 850 dsl_dataset_t *ds; 851 objset_t *os; 852 853 int error = dsl_dataset_hold_obj_flags(dp, zb.zb_objset, 854 DS_HOLD_FLAG_DECRYPT, FTAG, &ds); 855 if (error != 0) 856 continue; 857 858 head_ds = dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj; 859 860 /* 861 * The objset and the dnode are required for getting the block 862 * pointer, which is used to determine if BP_IS_HOLE(). If 863 * getting the objset or the dnode fails, do not create a 864 * zap entry (presuming we know the dataset) as this may create 865 * spurious errors that we cannot ever resolve. If an error is 866 * truly persistent, it should re-appear after a scan. 867 */ 868 if (dmu_objset_from_ds(ds, &os) != 0) { 869 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); 870 continue; 871 } 872 873 dnode_t *dn; 874 blkptr_t bp; 875 876 if (dnode_hold(os, zep.zb_object, FTAG, &dn) != 0) { 877 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); 878 continue; 879 } 880 881 rw_enter(&dn->dn_struct_rwlock, RW_READER); 882 error = dbuf_dnode_findbp(dn, zep.zb_level, zep.zb_blkid, &bp, 883 NULL, NULL); 884 if (error == EACCES) 885 error = 0; 886 else if (!error) 887 zep.zb_birth = BP_GET_LOGICAL_BIRTH(&bp); 888 889 rw_exit(&dn->dn_struct_rwlock); 890 dnode_rele(dn, FTAG); 891 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); 892 893 if (error != 0 || BP_IS_HOLE(&bp)) 894 continue; 895 896 uint64_t err_obj; 897 error = zap_lookup_int_key(spa->spa_meta_objset, *newobj, 898 head_ds, &err_obj); 899 900 if (error == ENOENT) { 901 err_obj = zap_create(spa->spa_meta_objset, 902 DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx); 903 904 (void) zap_update_int_key(spa->spa_meta_objset, 905 *newobj, head_ds, err_obj, tx); 906 } 907 908 char buf[64]; 909 errphys_to_name(&zep, buf, sizeof (buf)); 910 911 const char *name = ""; 912 (void) zap_update(spa->spa_meta_objset, err_obj, 913 buf, 1, strlen(name) + 1, name, tx); 914 } 915 zap_cursor_fini(&zc); 916 zap_attribute_free(za); 917 918 VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx)); 919 } 920 921 void 922 spa_upgrade_errlog(spa_t *spa, dmu_tx_t *tx) 923 { 924 uint64_t newobj = 0; 925 926 mutex_enter(&spa->spa_errlog_lock); 927 if (spa->spa_errlog_last != 0) { 928 sync_upgrade_errlog(spa, spa->spa_errlog_last, &newobj, tx); 929 spa->spa_errlog_last = newobj; 930 931 (void) zap_update(spa->spa_meta_objset, 932 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST, 933 sizeof (uint64_t), 1, &spa->spa_errlog_last, tx); 934 } 935 936 if (spa->spa_errlog_scrub != 0) { 937 sync_upgrade_errlog(spa, spa->spa_errlog_scrub, &newobj, tx); 938 spa->spa_errlog_scrub = newobj; 939 940 (void) zap_update(spa->spa_meta_objset, 941 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB, 942 sizeof (uint64_t), 1, &spa->spa_errlog_scrub, tx); 943 } 944 945 mutex_exit(&spa->spa_errlog_lock); 946 } 947 948 #ifdef _KERNEL 949 /* 950 * If an error block is shared by two datasets it will be counted twice. 951 */ 952 static int 953 process_error_log(spa_t *spa, uint64_t obj, void *uaddr, uint64_t *count) 954 { 955 if (obj == 0) 956 return (0); 957 958 zap_cursor_t *zc; 959 zap_attribute_t *za; 960 961 zc = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP); 962 za = zap_attribute_alloc(); 963 964 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) { 965 for (zap_cursor_init(zc, spa->spa_meta_objset, obj); 966 zap_cursor_retrieve(zc, za) == 0; 967 zap_cursor_advance(zc)) { 968 if (*count == 0) { 969 zap_cursor_fini(zc); 970 kmem_free(zc, sizeof (*zc)); 971 zap_attribute_free(za); 972 return (SET_ERROR(ENOMEM)); 973 } 974 975 zbookmark_phys_t zb; 976 name_to_bookmark(za->za_name, &zb); 977 978 int error = copyout_entry(&zb, uaddr, count); 979 if (error != 0) { 980 zap_cursor_fini(zc); 981 kmem_free(zc, sizeof (*zc)); 982 zap_attribute_free(za); 983 return (error); 984 } 985 } 986 zap_cursor_fini(zc); 987 kmem_free(zc, sizeof (*zc)); 988 zap_attribute_free(za); 989 return (0); 990 } 991 992 for (zap_cursor_init(zc, spa->spa_meta_objset, obj); 993 zap_cursor_retrieve(zc, za) == 0; 994 zap_cursor_advance(zc)) { 995 996 zap_cursor_t *head_ds_cursor; 997 zap_attribute_t *head_ds_attr; 998 999 head_ds_cursor = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP); 1000 head_ds_attr = zap_attribute_alloc(); 1001 1002 uint64_t head_ds_err_obj = za->za_first_integer; 1003 uint64_t head_ds; 1004 name_to_object(za->za_name, &head_ds); 1005 for (zap_cursor_init(head_ds_cursor, spa->spa_meta_objset, 1006 head_ds_err_obj); zap_cursor_retrieve(head_ds_cursor, 1007 head_ds_attr) == 0; zap_cursor_advance(head_ds_cursor)) { 1008 1009 zbookmark_err_phys_t head_ds_block; 1010 name_to_errphys(head_ds_attr->za_name, &head_ds_block); 1011 int error = process_error_block(spa, head_ds, 1012 &head_ds_block, uaddr, count); 1013 1014 if (error != 0) { 1015 zap_cursor_fini(head_ds_cursor); 1016 kmem_free(head_ds_cursor, 1017 sizeof (*head_ds_cursor)); 1018 zap_attribute_free(head_ds_attr); 1019 1020 zap_cursor_fini(zc); 1021 zap_attribute_free(za); 1022 kmem_free(zc, sizeof (*zc)); 1023 return (error); 1024 } 1025 } 1026 zap_cursor_fini(head_ds_cursor); 1027 kmem_free(head_ds_cursor, sizeof (*head_ds_cursor)); 1028 zap_attribute_free(head_ds_attr); 1029 } 1030 zap_cursor_fini(zc); 1031 zap_attribute_free(za); 1032 kmem_free(zc, sizeof (*zc)); 1033 return (0); 1034 } 1035 1036 static int 1037 process_error_list(spa_t *spa, avl_tree_t *list, void *uaddr, uint64_t *count) 1038 { 1039 spa_error_entry_t *se; 1040 1041 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) { 1042 for (se = avl_first(list); se != NULL; 1043 se = AVL_NEXT(list, se)) { 1044 int error = 1045 copyout_entry(&se->se_bookmark, uaddr, count); 1046 if (error != 0) { 1047 return (error); 1048 } 1049 } 1050 return (0); 1051 } 1052 1053 for (se = avl_first(list); se != NULL; se = AVL_NEXT(list, se)) { 1054 uint64_t head_ds = 0; 1055 int error = get_head_ds(spa, se->se_bookmark.zb_objset, 1056 &head_ds); 1057 1058 /* 1059 * If get_head_ds() errors out, set the head filesystem 1060 * to the filesystem stored in the bookmark of the 1061 * error block. 1062 */ 1063 if (error != 0) 1064 head_ds = se->se_bookmark.zb_objset; 1065 1066 error = process_error_block(spa, head_ds, 1067 &se->se_zep, uaddr, count); 1068 if (error != 0) 1069 return (error); 1070 } 1071 return (0); 1072 } 1073 #endif 1074 1075 /* 1076 * Copy all known errors to userland as an array of bookmarks. This is 1077 * actually a union of the on-disk last log and current log, as well as any 1078 * pending error requests. 1079 * 1080 * Because the act of reading the on-disk log could cause errors to be 1081 * generated, we have two separate locks: one for the error log and one for the 1082 * in-core error lists. We only need the error list lock to log and error, so 1083 * we grab the error log lock while we read the on-disk logs, and only pick up 1084 * the error list lock when we are finished. 1085 */ 1086 int 1087 spa_get_errlog(spa_t *spa, void *uaddr, uint64_t *count) 1088 { 1089 int ret = 0; 1090 1091 #ifdef _KERNEL 1092 /* 1093 * The pool config lock is needed to hold a dataset_t via (among other 1094 * places) process_error_list() -> process_error_block()-> 1095 * find_top_affected_fs(), and lock ordering requires that we get it 1096 * before the spa_errlog_lock. 1097 */ 1098 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG); 1099 mutex_enter(&spa->spa_errlog_lock); 1100 1101 ret = process_error_log(spa, spa->spa_errlog_scrub, uaddr, count); 1102 1103 if (!ret && !spa->spa_scrub_finished) 1104 ret = process_error_log(spa, spa->spa_errlog_last, uaddr, 1105 count); 1106 1107 mutex_enter(&spa->spa_errlist_lock); 1108 if (!ret) 1109 ret = process_error_list(spa, &spa->spa_errlist_scrub, uaddr, 1110 count); 1111 if (!ret) 1112 ret = process_error_list(spa, &spa->spa_errlist_last, uaddr, 1113 count); 1114 mutex_exit(&spa->spa_errlist_lock); 1115 1116 mutex_exit(&spa->spa_errlog_lock); 1117 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG); 1118 #else 1119 (void) spa, (void) uaddr, (void) count; 1120 #endif 1121 1122 return (ret); 1123 } 1124 1125 /* 1126 * Called when a scrub completes. This simply set a bit which tells which AVL 1127 * tree to add new errors. spa_errlog_sync() is responsible for actually 1128 * syncing the changes to the underlying objects. 1129 */ 1130 void 1131 spa_errlog_rotate(spa_t *spa) 1132 { 1133 mutex_enter(&spa->spa_errlist_lock); 1134 spa->spa_scrub_finished = B_TRUE; 1135 mutex_exit(&spa->spa_errlist_lock); 1136 } 1137 1138 /* 1139 * Discard any pending errors from the spa_t. Called when unloading a faulted 1140 * pool, as the errors encountered during the open cannot be synced to disk. 1141 */ 1142 void 1143 spa_errlog_drain(spa_t *spa) 1144 { 1145 spa_error_entry_t *se; 1146 void *cookie; 1147 1148 mutex_enter(&spa->spa_errlist_lock); 1149 1150 cookie = NULL; 1151 while ((se = avl_destroy_nodes(&spa->spa_errlist_last, 1152 &cookie)) != NULL) 1153 kmem_free(se, sizeof (spa_error_entry_t)); 1154 cookie = NULL; 1155 while ((se = avl_destroy_nodes(&spa->spa_errlist_scrub, 1156 &cookie)) != NULL) 1157 kmem_free(se, sizeof (spa_error_entry_t)); 1158 1159 mutex_exit(&spa->spa_errlist_lock); 1160 } 1161 1162 /* 1163 * Process a list of errors into the current on-disk log. 1164 */ 1165 void 1166 sync_error_list(spa_t *spa, avl_tree_t *t, uint64_t *obj, dmu_tx_t *tx) 1167 { 1168 spa_error_entry_t *se; 1169 char buf[NAME_MAX_LEN]; 1170 void *cookie; 1171 1172 if (avl_numnodes(t) == 0) 1173 return; 1174 1175 /* create log if necessary */ 1176 if (*obj == 0) 1177 *obj = zap_create(spa->spa_meta_objset, DMU_OT_ERROR_LOG, 1178 DMU_OT_NONE, 0, tx); 1179 1180 /* add errors to the current log */ 1181 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) { 1182 for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) { 1183 bookmark_to_name(&se->se_bookmark, buf, sizeof (buf)); 1184 1185 const char *name = se->se_name ? se->se_name : ""; 1186 (void) zap_update(spa->spa_meta_objset, *obj, buf, 1, 1187 strlen(name) + 1, name, tx); 1188 } 1189 } else { 1190 for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) { 1191 zbookmark_err_phys_t zep; 1192 zep.zb_object = se->se_zep.zb_object; 1193 zep.zb_level = se->se_zep.zb_level; 1194 zep.zb_blkid = se->se_zep.zb_blkid; 1195 zep.zb_birth = se->se_zep.zb_birth; 1196 1197 uint64_t head_ds = 0; 1198 int error = get_head_ds(spa, se->se_bookmark.zb_objset, 1199 &head_ds); 1200 1201 /* 1202 * If get_head_ds() errors out, set the head filesystem 1203 * to the filesystem stored in the bookmark of the 1204 * error block. 1205 */ 1206 if (error != 0) 1207 head_ds = se->se_bookmark.zb_objset; 1208 1209 uint64_t err_obj; 1210 error = zap_lookup_int_key(spa->spa_meta_objset, 1211 *obj, head_ds, &err_obj); 1212 1213 if (error == ENOENT) { 1214 err_obj = zap_create(spa->spa_meta_objset, 1215 DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx); 1216 1217 (void) zap_update_int_key(spa->spa_meta_objset, 1218 *obj, head_ds, err_obj, tx); 1219 } 1220 errphys_to_name(&zep, buf, sizeof (buf)); 1221 1222 const char *name = se->se_name ? se->se_name : ""; 1223 (void) zap_update(spa->spa_meta_objset, 1224 err_obj, buf, 1, strlen(name) + 1, name, tx); 1225 } 1226 } 1227 /* purge the error list */ 1228 cookie = NULL; 1229 while ((se = avl_destroy_nodes(t, &cookie)) != NULL) 1230 kmem_free(se, sizeof (spa_error_entry_t)); 1231 } 1232 1233 static void 1234 delete_errlog(spa_t *spa, uint64_t spa_err_obj, dmu_tx_t *tx) 1235 { 1236 if (spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) { 1237 zap_cursor_t zc; 1238 zap_attribute_t *za = zap_attribute_alloc(); 1239 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj); 1240 zap_cursor_retrieve(&zc, za) == 0; 1241 zap_cursor_advance(&zc)) { 1242 VERIFY0(dmu_object_free(spa->spa_meta_objset, 1243 za->za_first_integer, tx)); 1244 } 1245 zap_cursor_fini(&zc); 1246 zap_attribute_free(za); 1247 } 1248 VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx)); 1249 } 1250 1251 /* 1252 * Sync the error log out to disk. This is a little tricky because the act of 1253 * writing the error log requires the spa_errlist_lock. So, we need to lock the 1254 * error lists, take a copy of the lists, and then reinitialize them. Then, we 1255 * drop the error list lock and take the error log lock, at which point we 1256 * do the errlog processing. Then, if we encounter an I/O error during this 1257 * process, we can successfully add the error to the list. Note that this will 1258 * result in the perpetual recycling of errors, but it is an unlikely situation 1259 * and not a performance critical operation. 1260 */ 1261 void 1262 spa_errlog_sync(spa_t *spa, uint64_t txg) 1263 { 1264 dmu_tx_t *tx; 1265 avl_tree_t scrub, last; 1266 int scrub_finished; 1267 1268 mutex_enter(&spa->spa_errlist_lock); 1269 1270 /* 1271 * Bail out early under normal circumstances. 1272 */ 1273 if (avl_numnodes(&spa->spa_errlist_scrub) == 0 && 1274 avl_numnodes(&spa->spa_errlist_last) == 0 && 1275 avl_numnodes(&spa->spa_errlist_healed) == 0 && 1276 !spa->spa_scrub_finished) { 1277 mutex_exit(&spa->spa_errlist_lock); 1278 return; 1279 } 1280 1281 spa_get_errlists(spa, &last, &scrub); 1282 scrub_finished = spa->spa_scrub_finished; 1283 spa->spa_scrub_finished = B_FALSE; 1284 1285 mutex_exit(&spa->spa_errlist_lock); 1286 1287 /* 1288 * The pool config lock is needed to hold a dataset_t via 1289 * sync_error_list() -> get_head_ds(), and lock ordering 1290 * requires that we get it before the spa_errlog_lock. 1291 */ 1292 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG); 1293 mutex_enter(&spa->spa_errlog_lock); 1294 1295 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1296 1297 /* 1298 * Remove healed errors from errors. 1299 */ 1300 spa_remove_healed_errors(spa, &last, &scrub, tx); 1301 1302 /* 1303 * Sync out the current list of errors. 1304 */ 1305 sync_error_list(spa, &last, &spa->spa_errlog_last, tx); 1306 1307 /* 1308 * Rotate the log if necessary. 1309 */ 1310 if (scrub_finished) { 1311 if (spa->spa_errlog_last != 0) 1312 delete_errlog(spa, spa->spa_errlog_last, tx); 1313 spa->spa_errlog_last = spa->spa_errlog_scrub; 1314 spa->spa_errlog_scrub = 0; 1315 1316 sync_error_list(spa, &scrub, &spa->spa_errlog_last, tx); 1317 } 1318 1319 /* 1320 * Sync out any pending scrub errors. 1321 */ 1322 sync_error_list(spa, &scrub, &spa->spa_errlog_scrub, tx); 1323 1324 /* 1325 * Update the MOS to reflect the new values. 1326 */ 1327 (void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1328 DMU_POOL_ERRLOG_LAST, sizeof (uint64_t), 1, 1329 &spa->spa_errlog_last, tx); 1330 (void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1331 DMU_POOL_ERRLOG_SCRUB, sizeof (uint64_t), 1, 1332 &spa->spa_errlog_scrub, tx); 1333 1334 dmu_tx_commit(tx); 1335 1336 mutex_exit(&spa->spa_errlog_lock); 1337 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG); 1338 } 1339 1340 static void 1341 delete_dataset_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t ds, 1342 dmu_tx_t *tx) 1343 { 1344 if (spa_err_obj == 0) 1345 return; 1346 1347 zap_cursor_t zc; 1348 zap_attribute_t *za = zap_attribute_alloc(); 1349 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj); 1350 zap_cursor_retrieve(&zc, za) == 0; zap_cursor_advance(&zc)) { 1351 uint64_t head_ds; 1352 name_to_object(za->za_name, &head_ds); 1353 if (head_ds == ds) { 1354 (void) zap_remove(spa->spa_meta_objset, spa_err_obj, 1355 za->za_name, tx); 1356 VERIFY0(dmu_object_free(spa->spa_meta_objset, 1357 za->za_first_integer, tx)); 1358 break; 1359 } 1360 } 1361 zap_cursor_fini(&zc); 1362 zap_attribute_free(za); 1363 } 1364 1365 void 1366 spa_delete_dataset_errlog(spa_t *spa, uint64_t ds, dmu_tx_t *tx) 1367 { 1368 mutex_enter(&spa->spa_errlog_lock); 1369 delete_dataset_errlog(spa, spa->spa_errlog_scrub, ds, tx); 1370 delete_dataset_errlog(spa, spa->spa_errlog_last, ds, tx); 1371 mutex_exit(&spa->spa_errlog_lock); 1372 } 1373 1374 static int 1375 find_txg_ancestor_snapshot(spa_t *spa, uint64_t new_head, uint64_t old_head, 1376 uint64_t *txg) 1377 { 1378 dsl_dataset_t *ds; 1379 dsl_pool_t *dp = spa->spa_dsl_pool; 1380 1381 int error = dsl_dataset_hold_obj_flags(dp, old_head, 1382 DS_HOLD_FLAG_DECRYPT, FTAG, &ds); 1383 if (error != 0) 1384 return (error); 1385 1386 uint64_t prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 1387 uint64_t prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 1388 1389 while (prev_obj != 0) { 1390 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); 1391 if ((error = dsl_dataset_hold_obj_flags(dp, prev_obj, 1392 DS_HOLD_FLAG_DECRYPT, FTAG, &ds)) == 0 && 1393 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj == new_head) 1394 break; 1395 1396 if (error != 0) 1397 return (error); 1398 1399 prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 1400 prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 1401 } 1402 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); 1403 ASSERT(prev_obj != 0); 1404 *txg = prev_obj_txg; 1405 return (0); 1406 } 1407 1408 static void 1409 swap_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t new_head, uint64_t 1410 old_head, dmu_tx_t *tx) 1411 { 1412 if (spa_err_obj == 0) 1413 return; 1414 1415 uint64_t old_head_errlog; 1416 int error = zap_lookup_int_key(spa->spa_meta_objset, spa_err_obj, 1417 old_head, &old_head_errlog); 1418 1419 /* If no error log, then there is nothing to do. */ 1420 if (error != 0) 1421 return; 1422 1423 uint64_t txg; 1424 error = find_txg_ancestor_snapshot(spa, new_head, old_head, &txg); 1425 if (error != 0) 1426 return; 1427 1428 /* 1429 * Create an error log if the file system being promoted does not 1430 * already have one. 1431 */ 1432 uint64_t new_head_errlog; 1433 error = zap_lookup_int_key(spa->spa_meta_objset, spa_err_obj, new_head, 1434 &new_head_errlog); 1435 1436 if (error != 0) { 1437 new_head_errlog = zap_create(spa->spa_meta_objset, 1438 DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx); 1439 1440 (void) zap_update_int_key(spa->spa_meta_objset, spa_err_obj, 1441 new_head, new_head_errlog, tx); 1442 } 1443 1444 zap_cursor_t zc; 1445 zap_attribute_t *za = zap_attribute_alloc(); 1446 zbookmark_err_phys_t err_block; 1447 for (zap_cursor_init(&zc, spa->spa_meta_objset, old_head_errlog); 1448 zap_cursor_retrieve(&zc, za) == 0; zap_cursor_advance(&zc)) { 1449 1450 const char *name = ""; 1451 name_to_errphys(za->za_name, &err_block); 1452 if (err_block.zb_birth < txg) { 1453 (void) zap_update(spa->spa_meta_objset, new_head_errlog, 1454 za->za_name, 1, strlen(name) + 1, name, tx); 1455 1456 (void) zap_remove(spa->spa_meta_objset, old_head_errlog, 1457 za->za_name, tx); 1458 } 1459 } 1460 zap_cursor_fini(&zc); 1461 zap_attribute_free(za); 1462 } 1463 1464 void 1465 spa_swap_errlog(spa_t *spa, uint64_t new_head_ds, uint64_t old_head_ds, 1466 dmu_tx_t *tx) 1467 { 1468 mutex_enter(&spa->spa_errlog_lock); 1469 swap_errlog(spa, spa->spa_errlog_scrub, new_head_ds, old_head_ds, tx); 1470 swap_errlog(spa, spa->spa_errlog_last, new_head_ds, old_head_ds, tx); 1471 mutex_exit(&spa->spa_errlog_lock); 1472 } 1473 1474 #if defined(_KERNEL) 1475 /* error handling */ 1476 EXPORT_SYMBOL(spa_log_error); 1477 EXPORT_SYMBOL(spa_approx_errlog_size); 1478 EXPORT_SYMBOL(spa_get_last_errlog_size); 1479 EXPORT_SYMBOL(spa_get_errlog); 1480 EXPORT_SYMBOL(spa_errlog_rotate); 1481 EXPORT_SYMBOL(spa_errlog_drain); 1482 EXPORT_SYMBOL(spa_errlog_sync); 1483 EXPORT_SYMBOL(spa_get_errlists); 1484 EXPORT_SYMBOL(spa_delete_dataset_errlog); 1485 EXPORT_SYMBOL(spa_swap_errlog); 1486 EXPORT_SYMBOL(sync_error_list); 1487 EXPORT_SYMBOL(spa_upgrade_errlog); 1488 EXPORT_SYMBOL(find_top_affected_fs); 1489 EXPORT_SYMBOL(find_birth_txg); 1490 EXPORT_SYMBOL(zep_to_zb); 1491 EXPORT_SYMBOL(name_to_errphys); 1492 #endif 1493 1494 /* BEGIN CSTYLED */ 1495 ZFS_MODULE_PARAM(zfs_spa, spa_, upgrade_errlog_limit, UINT, ZMOD_RW, 1496 "Limit the number of errors which will be upgraded to the new " 1497 "on-disk error log when enabling head_errlog"); 1498 /* END CSTYLED */ 1499