1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2013, 2014, Delphix. All rights reserved. 24 * Copyright (c) 2019 Datto Inc. 25 * Copyright (c) 2021, 2022, George Amanakis. All rights reserved. 26 */ 27 28 /* 29 * Routines to manage the on-disk persistent error log. 30 * 31 * Each pool stores a log of all logical data errors seen during normal 32 * operation. This is actually the union of two distinct logs: the last log, 33 * and the current log. All errors seen are logged to the current log. When a 34 * scrub completes, the current log becomes the last log, the last log is thrown 35 * out, and the current log is reinitialized. This way, if an error is somehow 36 * corrected, a new scrub will show that it no longer exists, and will be 37 * deleted from the log when the scrub completes. 38 * 39 * The log is stored using a ZAP object whose key is a string form of the 40 * zbookmark_phys tuple (objset, object, level, blkid), and whose contents is an 41 * optional 'objset:object' human-readable string describing the data. When an 42 * error is first logged, this string will be empty, indicating that no name is 43 * known. This prevents us from having to issue a potentially large amount of 44 * I/O to discover the object name during an error path. Instead, we do the 45 * calculation when the data is requested, storing the result so future queries 46 * will be faster. 47 * 48 * If the head_errlog feature is enabled, a different on-disk format is used. 49 * The error log of each head dataset is stored separately in the zap object 50 * and keyed by the head id. This enables listing every dataset affected in 51 * userland. In order to be able to track whether an error block has been 52 * modified or added to snapshots since it was marked as an error, a new tuple 53 * is introduced: zbookmark_err_phys_t. It allows the storage of the birth 54 * transaction group of an error block on-disk. The birth transaction group is 55 * used by check_filesystem() to assess whether this block was freed, 56 * re-written or added to a snapshot since its marking as an error. 57 * 58 * This log is then shipped into an nvlist where the key is the dataset name and 59 * the value is the object name. Userland is then responsible for uniquifying 60 * this list and displaying it to the user. 61 */ 62 63 #include <sys/dmu_tx.h> 64 #include <sys/spa.h> 65 #include <sys/spa_impl.h> 66 #include <sys/zap.h> 67 #include <sys/zio.h> 68 #include <sys/dsl_dir.h> 69 #include <sys/dmu_objset.h> 70 #include <sys/dbuf.h> 71 #include <sys/zfs_znode.h> 72 73 #define NAME_MAX_LEN 64 74 75 /* 76 * spa_upgrade_errlog_limit : A zfs module parameter that controls the number 77 * of on-disk error log entries that will be converted to the new 78 * format when enabling head_errlog. Defaults to 0 which converts 79 * all log entries. 80 */ 81 static uint_t spa_upgrade_errlog_limit = 0; 82 83 /* 84 * Convert a bookmark to a string. 85 */ 86 static void 87 bookmark_to_name(zbookmark_phys_t *zb, char *buf, size_t len) 88 { 89 (void) snprintf(buf, len, "%llx:%llx:%llx:%llx", 90 (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object, 91 (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid); 92 } 93 94 /* 95 * Convert an err_phys to a string. 96 */ 97 static void 98 errphys_to_name(zbookmark_err_phys_t *zep, char *buf, size_t len) 99 { 100 (void) snprintf(buf, len, "%llx:%llx:%llx:%llx", 101 (u_longlong_t)zep->zb_object, (u_longlong_t)zep->zb_level, 102 (u_longlong_t)zep->zb_blkid, (u_longlong_t)zep->zb_birth); 103 } 104 105 /* 106 * Convert a string to a err_phys. 107 */ 108 static void 109 name_to_errphys(char *buf, zbookmark_err_phys_t *zep) 110 { 111 zep->zb_object = zfs_strtonum(buf, &buf); 112 ASSERT(*buf == ':'); 113 zep->zb_level = (int)zfs_strtonum(buf + 1, &buf); 114 ASSERT(*buf == ':'); 115 zep->zb_blkid = zfs_strtonum(buf + 1, &buf); 116 ASSERT(*buf == ':'); 117 zep->zb_birth = zfs_strtonum(buf + 1, &buf); 118 ASSERT(*buf == '\0'); 119 } 120 121 /* 122 * Convert a string to a bookmark. 123 */ 124 static void 125 name_to_bookmark(char *buf, zbookmark_phys_t *zb) 126 { 127 zb->zb_objset = zfs_strtonum(buf, &buf); 128 ASSERT(*buf == ':'); 129 zb->zb_object = zfs_strtonum(buf + 1, &buf); 130 ASSERT(*buf == ':'); 131 zb->zb_level = (int)zfs_strtonum(buf + 1, &buf); 132 ASSERT(*buf == ':'); 133 zb->zb_blkid = zfs_strtonum(buf + 1, &buf); 134 ASSERT(*buf == '\0'); 135 } 136 137 #ifdef _KERNEL 138 static int check_clones(spa_t *spa, uint64_t zap_clone, uint64_t snap_count, 139 uint64_t *snap_obj_array, zbookmark_err_phys_t *zep, void* uaddr, 140 uint64_t *count); 141 142 static void 143 zep_to_zb(uint64_t dataset, zbookmark_err_phys_t *zep, zbookmark_phys_t *zb) 144 { 145 zb->zb_objset = dataset; 146 zb->zb_object = zep->zb_object; 147 zb->zb_level = zep->zb_level; 148 zb->zb_blkid = zep->zb_blkid; 149 } 150 #endif 151 152 static void 153 name_to_object(char *buf, uint64_t *obj) 154 { 155 *obj = zfs_strtonum(buf, &buf); 156 ASSERT(*buf == '\0'); 157 } 158 159 /* 160 * Retrieve the head filesystem. 161 */ 162 static int get_head_ds(spa_t *spa, uint64_t dsobj, uint64_t *head_ds) 163 { 164 dsl_dataset_t *ds; 165 int error = dsl_dataset_hold_obj(spa->spa_dsl_pool, 166 dsobj, FTAG, &ds); 167 168 if (error != 0) 169 return (error); 170 171 ASSERT(head_ds); 172 *head_ds = dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj; 173 dsl_dataset_rele(ds, FTAG); 174 175 return (error); 176 } 177 178 /* 179 * Log an uncorrectable error to the persistent error log. We add it to the 180 * spa's list of pending errors. The changes are actually synced out to disk 181 * during spa_errlog_sync(). 182 */ 183 void 184 spa_log_error(spa_t *spa, const zbookmark_phys_t *zb, const uint64_t *birth) 185 { 186 spa_error_entry_t search; 187 spa_error_entry_t *new; 188 avl_tree_t *tree; 189 avl_index_t where; 190 191 /* 192 * If we are trying to import a pool, ignore any errors, as we won't be 193 * writing to the pool any time soon. 194 */ 195 if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT) 196 return; 197 198 mutex_enter(&spa->spa_errlist_lock); 199 200 /* 201 * If we have had a request to rotate the log, log it to the next list 202 * instead of the current one. 203 */ 204 if (spa->spa_scrub_active || spa->spa_scrub_finished) 205 tree = &spa->spa_errlist_scrub; 206 else 207 tree = &spa->spa_errlist_last; 208 209 search.se_bookmark = *zb; 210 if (avl_find(tree, &search, &where) != NULL) { 211 mutex_exit(&spa->spa_errlist_lock); 212 return; 213 } 214 215 new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP); 216 new->se_bookmark = *zb; 217 218 /* 219 * If the head_errlog feature is enabled, store the birth txg now. In 220 * case the file is deleted before spa_errlog_sync() runs, we will not 221 * be able to retrieve the birth txg. 222 */ 223 if (spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) { 224 new->se_zep.zb_object = zb->zb_object; 225 new->se_zep.zb_level = zb->zb_level; 226 new->se_zep.zb_blkid = zb->zb_blkid; 227 228 /* 229 * birth may end up being NULL, e.g. in zio_done(). We 230 * will handle this in process_error_block(). 231 */ 232 if (birth != NULL) 233 new->se_zep.zb_birth = *birth; 234 } 235 236 avl_insert(tree, new, where); 237 mutex_exit(&spa->spa_errlist_lock); 238 } 239 240 #ifdef _KERNEL 241 static int 242 find_birth_txg(dsl_dataset_t *ds, zbookmark_err_phys_t *zep, 243 uint64_t *birth_txg) 244 { 245 objset_t *os; 246 int error = dmu_objset_from_ds(ds, &os); 247 if (error != 0) 248 return (error); 249 250 dnode_t *dn; 251 blkptr_t bp; 252 253 error = dnode_hold(os, zep->zb_object, FTAG, &dn); 254 if (error != 0) 255 return (error); 256 257 rw_enter(&dn->dn_struct_rwlock, RW_READER); 258 error = dbuf_dnode_findbp(dn, zep->zb_level, zep->zb_blkid, &bp, NULL, 259 NULL); 260 if (error == 0 && BP_IS_HOLE(&bp)) 261 error = SET_ERROR(ENOENT); 262 263 *birth_txg = bp.blk_birth; 264 rw_exit(&dn->dn_struct_rwlock); 265 dnode_rele(dn, FTAG); 266 return (error); 267 } 268 269 /* 270 * Copy the bookmark to the end of the user-space buffer which starts at 271 * uaddr and has *count unused entries, and decrement *count by 1. 272 */ 273 static int 274 copyout_entry(const zbookmark_phys_t *zb, void *uaddr, uint64_t *count) 275 { 276 if (*count == 0) 277 return (SET_ERROR(ENOMEM)); 278 279 *count -= 1; 280 if (copyout(zb, (char *)uaddr + (*count) * sizeof (zbookmark_phys_t), 281 sizeof (zbookmark_phys_t)) != 0) 282 return (SET_ERROR(EFAULT)); 283 return (0); 284 } 285 286 /* 287 * Each time the error block is referenced by a snapshot or clone, add a 288 * zbookmark_phys_t entry to the userspace array at uaddr. The array is 289 * filled from the back and the in-out parameter *count is modified to be the 290 * number of unused entries at the beginning of the array. 291 */ 292 static int 293 check_filesystem(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep, 294 void *uaddr, uint64_t *count) 295 { 296 dsl_dataset_t *ds; 297 dsl_pool_t *dp = spa->spa_dsl_pool; 298 299 int error = dsl_dataset_hold_obj(dp, head_ds, FTAG, &ds); 300 if (error != 0) 301 return (error); 302 303 uint64_t latest_txg; 304 uint64_t txg_to_consider = spa->spa_syncing_txg; 305 boolean_t check_snapshot = B_TRUE; 306 error = find_birth_txg(ds, zep, &latest_txg); 307 308 /* 309 * If the filesystem is encrypted and the key is not loaded 310 * or the encrypted filesystem is not mounted the error will be EACCES. 311 * In that case report an error in the head filesystem and return. 312 */ 313 if (error == EACCES) { 314 dsl_dataset_rele(ds, FTAG); 315 zbookmark_phys_t zb; 316 zep_to_zb(head_ds, zep, &zb); 317 error = copyout_entry(&zb, uaddr, count); 318 if (error != 0) { 319 dsl_dataset_rele(ds, FTAG); 320 return (error); 321 } 322 return (0); 323 } 324 325 /* 326 * If find_birth_txg() errors out otherwise, let txg_to_consider be 327 * equal to the spa's syncing txg: if check_filesystem() errors out 328 * then affected snapshots or clones will not be checked. 329 */ 330 if (error == 0 && zep->zb_birth == latest_txg) { 331 /* Block neither free nor rewritten. */ 332 zbookmark_phys_t zb; 333 zep_to_zb(head_ds, zep, &zb); 334 error = copyout_entry(&zb, uaddr, count); 335 if (error != 0) { 336 dsl_dataset_rele(ds, FTAG); 337 return (error); 338 } 339 check_snapshot = B_FALSE; 340 } else if (error == 0) { 341 txg_to_consider = latest_txg; 342 } 343 344 /* 345 * Retrieve the number of snapshots if the dataset is not a snapshot. 346 */ 347 uint64_t snap_count = 0; 348 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) { 349 350 error = zap_count(spa->spa_meta_objset, 351 dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count); 352 353 if (error != 0) { 354 dsl_dataset_rele(ds, FTAG); 355 return (error); 356 } 357 } 358 359 if (snap_count == 0) { 360 /* Filesystem without snapshots. */ 361 dsl_dataset_rele(ds, FTAG); 362 return (0); 363 } 364 365 uint64_t *snap_obj_array = kmem_zalloc(snap_count * sizeof (uint64_t), 366 KM_SLEEP); 367 368 int aff_snap_count = 0; 369 uint64_t snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 370 uint64_t snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 371 uint64_t zap_clone = dsl_dir_phys(ds->ds_dir)->dd_clones; 372 373 dsl_dataset_rele(ds, FTAG); 374 375 /* Check only snapshots created from this file system. */ 376 while (snap_obj != 0 && zep->zb_birth < snap_obj_txg && 377 snap_obj_txg <= txg_to_consider) { 378 379 error = dsl_dataset_hold_obj(dp, snap_obj, FTAG, &ds); 380 if (error != 0) 381 goto out; 382 383 if (dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj != head_ds) { 384 snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 385 snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 386 dsl_dataset_rele(ds, FTAG); 387 continue; 388 } 389 390 boolean_t affected = B_TRUE; 391 if (check_snapshot) { 392 uint64_t blk_txg; 393 error = find_birth_txg(ds, zep, &blk_txg); 394 affected = (error == 0 && zep->zb_birth == blk_txg); 395 } 396 397 /* Report errors in snapshots. */ 398 if (affected) { 399 snap_obj_array[aff_snap_count] = snap_obj; 400 aff_snap_count++; 401 402 zbookmark_phys_t zb; 403 zep_to_zb(snap_obj, zep, &zb); 404 error = copyout_entry(&zb, uaddr, count); 405 if (error != 0) { 406 dsl_dataset_rele(ds, FTAG); 407 goto out; 408 } 409 } 410 snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 411 snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 412 dsl_dataset_rele(ds, FTAG); 413 } 414 415 if (zap_clone != 0 && aff_snap_count > 0) { 416 error = check_clones(spa, zap_clone, snap_count, snap_obj_array, 417 zep, uaddr, count); 418 } 419 420 out: 421 kmem_free(snap_obj_array, sizeof (*snap_obj_array)); 422 return (error); 423 } 424 425 /* 426 * Clone checking. 427 */ 428 static int check_clones(spa_t *spa, uint64_t zap_clone, uint64_t snap_count, 429 uint64_t *snap_obj_array, zbookmark_err_phys_t *zep, void* uaddr, 430 uint64_t *count) 431 { 432 int error = 0; 433 zap_cursor_t *zc; 434 zap_attribute_t *za; 435 436 zc = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP); 437 za = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP); 438 439 for (zap_cursor_init(zc, spa->spa_meta_objset, zap_clone); 440 zap_cursor_retrieve(zc, za) == 0; 441 zap_cursor_advance(zc)) { 442 443 dsl_pool_t *dp = spa->spa_dsl_pool; 444 dsl_dataset_t *clone; 445 error = dsl_dataset_hold_obj(dp, za->za_first_integer, 446 FTAG, &clone); 447 448 if (error != 0) 449 break; 450 451 /* 452 * Only clones whose origins were affected could also 453 * have affected snapshots. 454 */ 455 boolean_t found = B_FALSE; 456 for (int i = 0; i < snap_count; i++) { 457 if (dsl_dir_phys(clone->ds_dir)->dd_origin_obj 458 == snap_obj_array[i]) 459 found = B_TRUE; 460 } 461 dsl_dataset_rele(clone, FTAG); 462 463 if (!found) 464 continue; 465 466 error = check_filesystem(spa, za->za_first_integer, zep, 467 uaddr, count); 468 469 if (error != 0) 470 break; 471 } 472 473 zap_cursor_fini(zc); 474 kmem_free(za, sizeof (*za)); 475 kmem_free(zc, sizeof (*zc)); 476 477 return (error); 478 } 479 480 static int 481 find_top_affected_fs(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep, 482 uint64_t *top_affected_fs) 483 { 484 uint64_t oldest_dsobj; 485 int error = dsl_dataset_oldest_snapshot(spa, head_ds, zep->zb_birth, 486 &oldest_dsobj); 487 if (error != 0) 488 return (error); 489 490 dsl_dataset_t *ds; 491 error = dsl_dataset_hold_obj(spa->spa_dsl_pool, oldest_dsobj, 492 FTAG, &ds); 493 if (error != 0) 494 return (error); 495 496 *top_affected_fs = 497 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj; 498 dsl_dataset_rele(ds, FTAG); 499 return (0); 500 } 501 502 static int 503 process_error_block(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep, 504 void *uaddr, uint64_t *count) 505 { 506 /* 507 * If zb_birth == 0 or head_ds == 0 it means we failed to retrieve the 508 * birth txg or the head filesystem of the block pointer. This may 509 * happen e.g. when an encrypted filesystem is not mounted or when 510 * the key is not loaded. In this case do not proceed to 511 * check_filesystem(), instead do the accounting here. 512 */ 513 if (zep->zb_birth == 0 || head_ds == 0) { 514 zbookmark_phys_t zb; 515 zep_to_zb(head_ds, zep, &zb); 516 int error = copyout_entry(&zb, uaddr, count); 517 if (error != 0) { 518 return (error); 519 } 520 return (0); 521 } 522 523 uint64_t top_affected_fs; 524 int error = find_top_affected_fs(spa, head_ds, zep, &top_affected_fs); 525 if (error == 0) { 526 error = check_filesystem(spa, top_affected_fs, zep, 527 uaddr, count); 528 } 529 530 return (error); 531 } 532 #endif 533 534 /* 535 * If a healed bookmark matches an entry in the error log we stash it in a tree 536 * so that we can later remove the related log entries in sync context. 537 */ 538 static void 539 spa_add_healed_error(spa_t *spa, uint64_t obj, zbookmark_phys_t *healed_zb) 540 { 541 char name[NAME_MAX_LEN]; 542 543 if (obj == 0) 544 return; 545 546 bookmark_to_name(healed_zb, name, sizeof (name)); 547 mutex_enter(&spa->spa_errlog_lock); 548 if (zap_contains(spa->spa_meta_objset, obj, name) == 0) { 549 /* 550 * Found an error matching healed zb, add zb to our 551 * tree of healed errors 552 */ 553 avl_tree_t *tree = &spa->spa_errlist_healed; 554 spa_error_entry_t search; 555 spa_error_entry_t *new; 556 avl_index_t where; 557 search.se_bookmark = *healed_zb; 558 mutex_enter(&spa->spa_errlist_lock); 559 if (avl_find(tree, &search, &where) != NULL) { 560 mutex_exit(&spa->spa_errlist_lock); 561 mutex_exit(&spa->spa_errlog_lock); 562 return; 563 } 564 new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP); 565 new->se_bookmark = *healed_zb; 566 avl_insert(tree, new, where); 567 mutex_exit(&spa->spa_errlist_lock); 568 } 569 mutex_exit(&spa->spa_errlog_lock); 570 } 571 572 /* 573 * If this error exists in the given tree remove it. 574 */ 575 static void 576 remove_error_from_list(spa_t *spa, avl_tree_t *t, const zbookmark_phys_t *zb) 577 { 578 spa_error_entry_t search, *found; 579 avl_index_t where; 580 581 mutex_enter(&spa->spa_errlist_lock); 582 search.se_bookmark = *zb; 583 if ((found = avl_find(t, &search, &where)) != NULL) { 584 avl_remove(t, found); 585 kmem_free(found, sizeof (spa_error_entry_t)); 586 } 587 mutex_exit(&spa->spa_errlist_lock); 588 } 589 590 591 /* 592 * Removes all of the recv healed errors from both on-disk error logs 593 */ 594 static void 595 spa_remove_healed_errors(spa_t *spa, avl_tree_t *s, avl_tree_t *l, dmu_tx_t *tx) 596 { 597 char name[NAME_MAX_LEN]; 598 spa_error_entry_t *se; 599 void *cookie = NULL; 600 601 ASSERT(MUTEX_HELD(&spa->spa_errlog_lock)); 602 603 while ((se = avl_destroy_nodes(&spa->spa_errlist_healed, 604 &cookie)) != NULL) { 605 remove_error_from_list(spa, s, &se->se_bookmark); 606 remove_error_from_list(spa, l, &se->se_bookmark); 607 bookmark_to_name(&se->se_bookmark, name, sizeof (name)); 608 kmem_free(se, sizeof (spa_error_entry_t)); 609 (void) zap_remove(spa->spa_meta_objset, 610 spa->spa_errlog_last, name, tx); 611 (void) zap_remove(spa->spa_meta_objset, 612 spa->spa_errlog_scrub, name, tx); 613 } 614 } 615 616 /* 617 * Stash away healed bookmarks to remove them from the on-disk error logs 618 * later in spa_remove_healed_errors(). 619 */ 620 void 621 spa_remove_error(spa_t *spa, zbookmark_phys_t *zb) 622 { 623 char name[NAME_MAX_LEN]; 624 625 bookmark_to_name(zb, name, sizeof (name)); 626 627 spa_add_healed_error(spa, spa->spa_errlog_last, zb); 628 spa_add_healed_error(spa, spa->spa_errlog_scrub, zb); 629 } 630 631 static uint64_t 632 approx_errlog_size_impl(spa_t *spa, uint64_t spa_err_obj) 633 { 634 if (spa_err_obj == 0) 635 return (0); 636 uint64_t total = 0; 637 638 zap_cursor_t zc; 639 zap_attribute_t za; 640 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj); 641 zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { 642 uint64_t count; 643 if (zap_count(spa->spa_meta_objset, za.za_first_integer, 644 &count) == 0) 645 total += count; 646 } 647 zap_cursor_fini(&zc); 648 return (total); 649 } 650 651 /* 652 * Return the approximate number of errors currently in the error log. This 653 * will be nonzero if there are some errors, but otherwise it may be more 654 * or less than the number of entries returned by spa_get_errlog(). 655 */ 656 uint64_t 657 spa_approx_errlog_size(spa_t *spa) 658 { 659 uint64_t total = 0; 660 661 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) { 662 mutex_enter(&spa->spa_errlog_lock); 663 uint64_t count; 664 if (spa->spa_errlog_scrub != 0 && 665 zap_count(spa->spa_meta_objset, spa->spa_errlog_scrub, 666 &count) == 0) 667 total += count; 668 669 if (spa->spa_errlog_last != 0 && !spa->spa_scrub_finished && 670 zap_count(spa->spa_meta_objset, spa->spa_errlog_last, 671 &count) == 0) 672 total += count; 673 mutex_exit(&spa->spa_errlog_lock); 674 675 } else { 676 mutex_enter(&spa->spa_errlog_lock); 677 total += approx_errlog_size_impl(spa, spa->spa_errlog_last); 678 total += approx_errlog_size_impl(spa, spa->spa_errlog_scrub); 679 mutex_exit(&spa->spa_errlog_lock); 680 } 681 mutex_enter(&spa->spa_errlist_lock); 682 total += avl_numnodes(&spa->spa_errlist_last); 683 total += avl_numnodes(&spa->spa_errlist_scrub); 684 mutex_exit(&spa->spa_errlist_lock); 685 return (total); 686 } 687 688 /* 689 * This function sweeps through an on-disk error log and stores all bookmarks 690 * as error bookmarks in a new ZAP object. At the end we discard the old one, 691 * and spa_update_errlog() will set the spa's on-disk error log to new ZAP 692 * object. 693 */ 694 static void 695 sync_upgrade_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t *newobj, 696 dmu_tx_t *tx) 697 { 698 zap_cursor_t zc; 699 zap_attribute_t za; 700 zbookmark_phys_t zb; 701 uint64_t count; 702 703 *newobj = zap_create(spa->spa_meta_objset, DMU_OT_ERROR_LOG, 704 DMU_OT_NONE, 0, tx); 705 706 /* 707 * If we cannnot perform the upgrade we should clear the old on-disk 708 * error logs. 709 */ 710 if (zap_count(spa->spa_meta_objset, spa_err_obj, &count) != 0) { 711 VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx)); 712 return; 713 } 714 715 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj); 716 zap_cursor_retrieve(&zc, &za) == 0; 717 zap_cursor_advance(&zc)) { 718 if (spa_upgrade_errlog_limit != 0 && 719 zc.zc_cd == spa_upgrade_errlog_limit) 720 break; 721 722 name_to_bookmark(za.za_name, &zb); 723 724 zbookmark_err_phys_t zep; 725 zep.zb_object = zb.zb_object; 726 zep.zb_level = zb.zb_level; 727 zep.zb_blkid = zb.zb_blkid; 728 zep.zb_birth = 0; 729 730 /* 731 * In case of an error we should simply continue instead of 732 * returning prematurely. See the next comment. 733 */ 734 uint64_t head_ds; 735 dsl_pool_t *dp = spa->spa_dsl_pool; 736 dsl_dataset_t *ds; 737 objset_t *os; 738 739 int error = dsl_dataset_hold_obj(dp, zb.zb_objset, FTAG, &ds); 740 if (error != 0) 741 continue; 742 743 head_ds = dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj; 744 745 /* 746 * The objset and the dnode are required for getting the block 747 * pointer, which is used to determine if BP_IS_HOLE(). If 748 * getting the objset or the dnode fails, do not create a 749 * zap entry (presuming we know the dataset) as this may create 750 * spurious errors that we cannot ever resolve. If an error is 751 * truly persistent, it should re-appear after a scan. 752 */ 753 if (dmu_objset_from_ds(ds, &os) != 0) { 754 dsl_dataset_rele(ds, FTAG); 755 continue; 756 } 757 758 dnode_t *dn; 759 blkptr_t bp; 760 761 if (dnode_hold(os, zep.zb_object, FTAG, &dn) != 0) { 762 dsl_dataset_rele(ds, FTAG); 763 continue; 764 } 765 766 rw_enter(&dn->dn_struct_rwlock, RW_READER); 767 error = dbuf_dnode_findbp(dn, zep.zb_level, zep.zb_blkid, &bp, 768 NULL, NULL); 769 if (error == EACCES) 770 error = 0; 771 else if (!error) 772 zep.zb_birth = bp.blk_birth; 773 774 rw_exit(&dn->dn_struct_rwlock); 775 dnode_rele(dn, FTAG); 776 dsl_dataset_rele(ds, FTAG); 777 778 if (error != 0 || BP_IS_HOLE(&bp)) 779 continue; 780 781 uint64_t err_obj; 782 error = zap_lookup_int_key(spa->spa_meta_objset, *newobj, 783 head_ds, &err_obj); 784 785 if (error == ENOENT) { 786 err_obj = zap_create(spa->spa_meta_objset, 787 DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx); 788 789 (void) zap_update_int_key(spa->spa_meta_objset, 790 *newobj, head_ds, err_obj, tx); 791 } 792 793 char buf[64]; 794 errphys_to_name(&zep, buf, sizeof (buf)); 795 796 const char *name = ""; 797 (void) zap_update(spa->spa_meta_objset, err_obj, 798 buf, 1, strlen(name) + 1, name, tx); 799 } 800 zap_cursor_fini(&zc); 801 802 VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx)); 803 } 804 805 void 806 spa_upgrade_errlog(spa_t *spa, dmu_tx_t *tx) 807 { 808 uint64_t newobj = 0; 809 810 mutex_enter(&spa->spa_errlog_lock); 811 if (spa->spa_errlog_last != 0) { 812 sync_upgrade_errlog(spa, spa->spa_errlog_last, &newobj, tx); 813 spa->spa_errlog_last = newobj; 814 } 815 816 if (spa->spa_errlog_scrub != 0) { 817 sync_upgrade_errlog(spa, spa->spa_errlog_scrub, &newobj, tx); 818 spa->spa_errlog_scrub = newobj; 819 } 820 mutex_exit(&spa->spa_errlog_lock); 821 } 822 823 #ifdef _KERNEL 824 /* 825 * If an error block is shared by two datasets it will be counted twice. 826 */ 827 static int 828 process_error_log(spa_t *spa, uint64_t obj, void *uaddr, uint64_t *count) 829 { 830 zap_cursor_t zc; 831 zap_attribute_t za; 832 833 if (obj == 0) 834 return (0); 835 836 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) { 837 for (zap_cursor_init(&zc, spa->spa_meta_objset, obj); 838 zap_cursor_retrieve(&zc, &za) == 0; 839 zap_cursor_advance(&zc)) { 840 if (*count == 0) { 841 zap_cursor_fini(&zc); 842 return (SET_ERROR(ENOMEM)); 843 } 844 845 zbookmark_phys_t zb; 846 name_to_bookmark(za.za_name, &zb); 847 848 int error = copyout_entry(&zb, uaddr, count); 849 if (error != 0) { 850 zap_cursor_fini(&zc); 851 return (error); 852 } 853 } 854 zap_cursor_fini(&zc); 855 return (0); 856 } 857 858 for (zap_cursor_init(&zc, spa->spa_meta_objset, obj); 859 zap_cursor_retrieve(&zc, &za) == 0; 860 zap_cursor_advance(&zc)) { 861 862 zap_cursor_t head_ds_cursor; 863 zap_attribute_t head_ds_attr; 864 865 uint64_t head_ds_err_obj = za.za_first_integer; 866 uint64_t head_ds; 867 name_to_object(za.za_name, &head_ds); 868 for (zap_cursor_init(&head_ds_cursor, spa->spa_meta_objset, 869 head_ds_err_obj); zap_cursor_retrieve(&head_ds_cursor, 870 &head_ds_attr) == 0; zap_cursor_advance(&head_ds_cursor)) { 871 872 zbookmark_err_phys_t head_ds_block; 873 name_to_errphys(head_ds_attr.za_name, &head_ds_block); 874 int error = process_error_block(spa, head_ds, 875 &head_ds_block, uaddr, count); 876 877 if (error != 0) { 878 zap_cursor_fini(&head_ds_cursor); 879 zap_cursor_fini(&zc); 880 return (error); 881 } 882 } 883 zap_cursor_fini(&head_ds_cursor); 884 } 885 zap_cursor_fini(&zc); 886 return (0); 887 } 888 889 static int 890 process_error_list(spa_t *spa, avl_tree_t *list, void *uaddr, uint64_t *count) 891 { 892 spa_error_entry_t *se; 893 894 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) { 895 for (se = avl_first(list); se != NULL; 896 se = AVL_NEXT(list, se)) { 897 int error = 898 copyout_entry(&se->se_bookmark, uaddr, count); 899 if (error != 0) { 900 return (error); 901 } 902 } 903 return (0); 904 } 905 906 for (se = avl_first(list); se != NULL; se = AVL_NEXT(list, se)) { 907 uint64_t head_ds = 0; 908 int error = get_head_ds(spa, se->se_bookmark.zb_objset, 909 &head_ds); 910 911 /* 912 * If get_head_ds() errors out, set the head filesystem 913 * to the filesystem stored in the bookmark of the 914 * error block. 915 */ 916 if (error != 0) 917 head_ds = se->se_bookmark.zb_objset; 918 919 error = process_error_block(spa, head_ds, 920 &se->se_zep, uaddr, count); 921 if (error != 0) 922 return (error); 923 } 924 return (0); 925 } 926 #endif 927 928 /* 929 * Copy all known errors to userland as an array of bookmarks. This is 930 * actually a union of the on-disk last log and current log, as well as any 931 * pending error requests. 932 * 933 * Because the act of reading the on-disk log could cause errors to be 934 * generated, we have two separate locks: one for the error log and one for the 935 * in-core error lists. We only need the error list lock to log and error, so 936 * we grab the error log lock while we read the on-disk logs, and only pick up 937 * the error list lock when we are finished. 938 */ 939 int 940 spa_get_errlog(spa_t *spa, void *uaddr, uint64_t *count) 941 { 942 int ret = 0; 943 944 #ifdef _KERNEL 945 /* 946 * The pool config lock is needed to hold a dataset_t via (among other 947 * places) process_error_list() -> process_error_block()-> 948 * find_top_affected_fs(), and lock ordering requires that we get it 949 * before the spa_errlog_lock. 950 */ 951 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG); 952 mutex_enter(&spa->spa_errlog_lock); 953 954 ret = process_error_log(spa, spa->spa_errlog_scrub, uaddr, count); 955 956 if (!ret && !spa->spa_scrub_finished) 957 ret = process_error_log(spa, spa->spa_errlog_last, uaddr, 958 count); 959 960 mutex_enter(&spa->spa_errlist_lock); 961 if (!ret) 962 ret = process_error_list(spa, &spa->spa_errlist_scrub, uaddr, 963 count); 964 if (!ret) 965 ret = process_error_list(spa, &spa->spa_errlist_last, uaddr, 966 count); 967 mutex_exit(&spa->spa_errlist_lock); 968 969 mutex_exit(&spa->spa_errlog_lock); 970 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG); 971 #else 972 (void) spa, (void) uaddr, (void) count; 973 #endif 974 975 return (ret); 976 } 977 978 /* 979 * Called when a scrub completes. This simply set a bit which tells which AVL 980 * tree to add new errors. spa_errlog_sync() is responsible for actually 981 * syncing the changes to the underlying objects. 982 */ 983 void 984 spa_errlog_rotate(spa_t *spa) 985 { 986 mutex_enter(&spa->spa_errlist_lock); 987 spa->spa_scrub_finished = B_TRUE; 988 mutex_exit(&spa->spa_errlist_lock); 989 } 990 991 /* 992 * Discard any pending errors from the spa_t. Called when unloading a faulted 993 * pool, as the errors encountered during the open cannot be synced to disk. 994 */ 995 void 996 spa_errlog_drain(spa_t *spa) 997 { 998 spa_error_entry_t *se; 999 void *cookie; 1000 1001 mutex_enter(&spa->spa_errlist_lock); 1002 1003 cookie = NULL; 1004 while ((se = avl_destroy_nodes(&spa->spa_errlist_last, 1005 &cookie)) != NULL) 1006 kmem_free(se, sizeof (spa_error_entry_t)); 1007 cookie = NULL; 1008 while ((se = avl_destroy_nodes(&spa->spa_errlist_scrub, 1009 &cookie)) != NULL) 1010 kmem_free(se, sizeof (spa_error_entry_t)); 1011 1012 mutex_exit(&spa->spa_errlist_lock); 1013 } 1014 1015 /* 1016 * Process a list of errors into the current on-disk log. 1017 */ 1018 void 1019 sync_error_list(spa_t *spa, avl_tree_t *t, uint64_t *obj, dmu_tx_t *tx) 1020 { 1021 spa_error_entry_t *se; 1022 char buf[NAME_MAX_LEN]; 1023 void *cookie; 1024 1025 if (avl_numnodes(t) == 0) 1026 return; 1027 1028 /* create log if necessary */ 1029 if (*obj == 0) 1030 *obj = zap_create(spa->spa_meta_objset, DMU_OT_ERROR_LOG, 1031 DMU_OT_NONE, 0, tx); 1032 1033 /* add errors to the current log */ 1034 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) { 1035 for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) { 1036 bookmark_to_name(&se->se_bookmark, buf, sizeof (buf)); 1037 1038 const char *name = se->se_name ? se->se_name : ""; 1039 (void) zap_update(spa->spa_meta_objset, *obj, buf, 1, 1040 strlen(name) + 1, name, tx); 1041 } 1042 } else { 1043 for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) { 1044 zbookmark_err_phys_t zep; 1045 zep.zb_object = se->se_zep.zb_object; 1046 zep.zb_level = se->se_zep.zb_level; 1047 zep.zb_blkid = se->se_zep.zb_blkid; 1048 zep.zb_birth = se->se_zep.zb_birth; 1049 1050 uint64_t head_ds = 0; 1051 int error = get_head_ds(spa, se->se_bookmark.zb_objset, 1052 &head_ds); 1053 1054 /* 1055 * If get_head_ds() errors out, set the head filesystem 1056 * to the filesystem stored in the bookmark of the 1057 * error block. 1058 */ 1059 if (error != 0) 1060 head_ds = se->se_bookmark.zb_objset; 1061 1062 uint64_t err_obj; 1063 error = zap_lookup_int_key(spa->spa_meta_objset, 1064 *obj, head_ds, &err_obj); 1065 1066 if (error == ENOENT) { 1067 err_obj = zap_create(spa->spa_meta_objset, 1068 DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx); 1069 1070 (void) zap_update_int_key(spa->spa_meta_objset, 1071 *obj, head_ds, err_obj, tx); 1072 } 1073 errphys_to_name(&zep, buf, sizeof (buf)); 1074 1075 const char *name = se->se_name ? se->se_name : ""; 1076 (void) zap_update(spa->spa_meta_objset, 1077 err_obj, buf, 1, strlen(name) + 1, name, tx); 1078 } 1079 } 1080 /* purge the error list */ 1081 cookie = NULL; 1082 while ((se = avl_destroy_nodes(t, &cookie)) != NULL) 1083 kmem_free(se, sizeof (spa_error_entry_t)); 1084 } 1085 1086 static void 1087 delete_errlog(spa_t *spa, uint64_t spa_err_obj, dmu_tx_t *tx) 1088 { 1089 if (spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) { 1090 zap_cursor_t zc; 1091 zap_attribute_t za; 1092 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj); 1093 zap_cursor_retrieve(&zc, &za) == 0; 1094 zap_cursor_advance(&zc)) { 1095 VERIFY0(dmu_object_free(spa->spa_meta_objset, 1096 za.za_first_integer, tx)); 1097 } 1098 zap_cursor_fini(&zc); 1099 } 1100 VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx)); 1101 } 1102 1103 /* 1104 * Sync the error log out to disk. This is a little tricky because the act of 1105 * writing the error log requires the spa_errlist_lock. So, we need to lock the 1106 * error lists, take a copy of the lists, and then reinitialize them. Then, we 1107 * drop the error list lock and take the error log lock, at which point we 1108 * do the errlog processing. Then, if we encounter an I/O error during this 1109 * process, we can successfully add the error to the list. Note that this will 1110 * result in the perpetual recycling of errors, but it is an unlikely situation 1111 * and not a performance critical operation. 1112 */ 1113 void 1114 spa_errlog_sync(spa_t *spa, uint64_t txg) 1115 { 1116 dmu_tx_t *tx; 1117 avl_tree_t scrub, last; 1118 int scrub_finished; 1119 1120 mutex_enter(&spa->spa_errlist_lock); 1121 1122 /* 1123 * Bail out early under normal circumstances. 1124 */ 1125 if (avl_numnodes(&spa->spa_errlist_scrub) == 0 && 1126 avl_numnodes(&spa->spa_errlist_last) == 0 && 1127 avl_numnodes(&spa->spa_errlist_healed) == 0 && 1128 !spa->spa_scrub_finished) { 1129 mutex_exit(&spa->spa_errlist_lock); 1130 return; 1131 } 1132 1133 spa_get_errlists(spa, &last, &scrub); 1134 scrub_finished = spa->spa_scrub_finished; 1135 spa->spa_scrub_finished = B_FALSE; 1136 1137 mutex_exit(&spa->spa_errlist_lock); 1138 1139 /* 1140 * The pool config lock is needed to hold a dataset_t via 1141 * sync_error_list() -> get_head_ds(), and lock ordering 1142 * requires that we get it before the spa_errlog_lock. 1143 */ 1144 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG); 1145 mutex_enter(&spa->spa_errlog_lock); 1146 1147 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1148 1149 /* 1150 * Remove healed errors from errors. 1151 */ 1152 spa_remove_healed_errors(spa, &last, &scrub, tx); 1153 1154 /* 1155 * Sync out the current list of errors. 1156 */ 1157 sync_error_list(spa, &last, &spa->spa_errlog_last, tx); 1158 1159 /* 1160 * Rotate the log if necessary. 1161 */ 1162 if (scrub_finished) { 1163 if (spa->spa_errlog_last != 0) 1164 delete_errlog(spa, spa->spa_errlog_last, tx); 1165 spa->spa_errlog_last = spa->spa_errlog_scrub; 1166 spa->spa_errlog_scrub = 0; 1167 1168 sync_error_list(spa, &scrub, &spa->spa_errlog_last, tx); 1169 } 1170 1171 /* 1172 * Sync out any pending scrub errors. 1173 */ 1174 sync_error_list(spa, &scrub, &spa->spa_errlog_scrub, tx); 1175 1176 /* 1177 * Update the MOS to reflect the new values. 1178 */ 1179 (void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1180 DMU_POOL_ERRLOG_LAST, sizeof (uint64_t), 1, 1181 &spa->spa_errlog_last, tx); 1182 (void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1183 DMU_POOL_ERRLOG_SCRUB, sizeof (uint64_t), 1, 1184 &spa->spa_errlog_scrub, tx); 1185 1186 dmu_tx_commit(tx); 1187 1188 mutex_exit(&spa->spa_errlog_lock); 1189 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG); 1190 } 1191 1192 static void 1193 delete_dataset_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t ds, 1194 dmu_tx_t *tx) 1195 { 1196 if (spa_err_obj == 0) 1197 return; 1198 1199 zap_cursor_t zc; 1200 zap_attribute_t za; 1201 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj); 1202 zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { 1203 uint64_t head_ds; 1204 name_to_object(za.za_name, &head_ds); 1205 if (head_ds == ds) { 1206 (void) zap_remove(spa->spa_meta_objset, spa_err_obj, 1207 za.za_name, tx); 1208 VERIFY0(dmu_object_free(spa->spa_meta_objset, 1209 za.za_first_integer, tx)); 1210 break; 1211 } 1212 } 1213 zap_cursor_fini(&zc); 1214 } 1215 1216 void 1217 spa_delete_dataset_errlog(spa_t *spa, uint64_t ds, dmu_tx_t *tx) 1218 { 1219 mutex_enter(&spa->spa_errlog_lock); 1220 delete_dataset_errlog(spa, spa->spa_errlog_scrub, ds, tx); 1221 delete_dataset_errlog(spa, spa->spa_errlog_last, ds, tx); 1222 mutex_exit(&spa->spa_errlog_lock); 1223 } 1224 1225 static int 1226 find_txg_ancestor_snapshot(spa_t *spa, uint64_t new_head, uint64_t old_head, 1227 uint64_t *txg) 1228 { 1229 dsl_dataset_t *ds; 1230 dsl_pool_t *dp = spa->spa_dsl_pool; 1231 1232 int error = dsl_dataset_hold_obj(dp, old_head, FTAG, &ds); 1233 if (error != 0) 1234 return (error); 1235 1236 uint64_t prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 1237 uint64_t prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 1238 1239 while (prev_obj != 0) { 1240 dsl_dataset_rele(ds, FTAG); 1241 if ((error = dsl_dataset_hold_obj(dp, prev_obj, 1242 FTAG, &ds)) == 0 && 1243 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj == new_head) 1244 break; 1245 1246 if (error != 0) 1247 return (error); 1248 1249 prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 1250 prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 1251 } 1252 dsl_dataset_rele(ds, FTAG); 1253 ASSERT(prev_obj != 0); 1254 *txg = prev_obj_txg; 1255 return (0); 1256 } 1257 1258 static void 1259 swap_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t new_head, uint64_t 1260 old_head, dmu_tx_t *tx) 1261 { 1262 if (spa_err_obj == 0) 1263 return; 1264 1265 uint64_t old_head_errlog; 1266 int error = zap_lookup_int_key(spa->spa_meta_objset, spa_err_obj, 1267 old_head, &old_head_errlog); 1268 1269 /* If no error log, then there is nothing to do. */ 1270 if (error != 0) 1271 return; 1272 1273 uint64_t txg; 1274 error = find_txg_ancestor_snapshot(spa, new_head, old_head, &txg); 1275 if (error != 0) 1276 return; 1277 1278 /* 1279 * Create an error log if the file system being promoted does not 1280 * already have one. 1281 */ 1282 uint64_t new_head_errlog; 1283 error = zap_lookup_int_key(spa->spa_meta_objset, spa_err_obj, new_head, 1284 &new_head_errlog); 1285 1286 if (error != 0) { 1287 new_head_errlog = zap_create(spa->spa_meta_objset, 1288 DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx); 1289 1290 (void) zap_update_int_key(spa->spa_meta_objset, spa_err_obj, 1291 new_head, new_head_errlog, tx); 1292 } 1293 1294 zap_cursor_t zc; 1295 zap_attribute_t za; 1296 zbookmark_err_phys_t err_block; 1297 for (zap_cursor_init(&zc, spa->spa_meta_objset, old_head_errlog); 1298 zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { 1299 1300 const char *name = ""; 1301 name_to_errphys(za.za_name, &err_block); 1302 if (err_block.zb_birth < txg) { 1303 (void) zap_update(spa->spa_meta_objset, new_head_errlog, 1304 za.za_name, 1, strlen(name) + 1, name, tx); 1305 1306 (void) zap_remove(spa->spa_meta_objset, old_head_errlog, 1307 za.za_name, tx); 1308 } 1309 } 1310 zap_cursor_fini(&zc); 1311 } 1312 1313 void 1314 spa_swap_errlog(spa_t *spa, uint64_t new_head_ds, uint64_t old_head_ds, 1315 dmu_tx_t *tx) 1316 { 1317 mutex_enter(&spa->spa_errlog_lock); 1318 swap_errlog(spa, spa->spa_errlog_scrub, new_head_ds, old_head_ds, tx); 1319 swap_errlog(spa, spa->spa_errlog_last, new_head_ds, old_head_ds, tx); 1320 mutex_exit(&spa->spa_errlog_lock); 1321 } 1322 1323 #if defined(_KERNEL) 1324 /* error handling */ 1325 EXPORT_SYMBOL(spa_log_error); 1326 EXPORT_SYMBOL(spa_approx_errlog_size); 1327 EXPORT_SYMBOL(spa_get_errlog); 1328 EXPORT_SYMBOL(spa_errlog_rotate); 1329 EXPORT_SYMBOL(spa_errlog_drain); 1330 EXPORT_SYMBOL(spa_errlog_sync); 1331 EXPORT_SYMBOL(spa_get_errlists); 1332 EXPORT_SYMBOL(spa_delete_dataset_errlog); 1333 EXPORT_SYMBOL(spa_swap_errlog); 1334 EXPORT_SYMBOL(sync_error_list); 1335 EXPORT_SYMBOL(spa_upgrade_errlog); 1336 #endif 1337 1338 /* BEGIN CSTYLED */ 1339 ZFS_MODULE_PARAM(zfs_spa, spa_, upgrade_errlog_limit, UINT, ZMOD_RW, 1340 "Limit the number of errors which will be upgraded to the new " 1341 "on-disk error log when enabling head_errlog"); 1342 /* END CSTYLED */ 1343