xref: /freebsd/sys/contrib/openzfs/module/zfs/spa_errlog.c (revision e9ba1fd5eda2c0bc22edafa75b2ef10222bf24e6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2013, 2014, Delphix. All rights reserved.
24  * Copyright (c) 2021, George Amanakis. All rights reserved.
25  * Copyright (c) 2019 Datto Inc.
26  */
27 
28 /*
29  * Routines to manage the on-disk persistent error log.
30  *
31  * Each pool stores a log of all logical data errors seen during normal
32  * operation.  This is actually the union of two distinct logs: the last log,
33  * and the current log.  All errors seen are logged to the current log.  When a
34  * scrub completes, the current log becomes the last log, the last log is thrown
35  * out, and the current log is reinitialized.  This way, if an error is somehow
36  * corrected, a new scrub will show that it no longer exists, and will be
37  * deleted from the log when the scrub completes.
38  *
39  * The log is stored using a ZAP object whose key is a string form of the
40  * zbookmark_phys tuple (objset, object, level, blkid), and whose contents is an
41  * optional 'objset:object' human-readable string describing the data.  When an
42  * error is first logged, this string will be empty, indicating that no name is
43  * known.  This prevents us from having to issue a potentially large amount of
44  * I/O to discover the object name during an error path.  Instead, we do the
45  * calculation when the data is requested, storing the result so future queries
46  * will be faster.
47  *
48  * If the head_errlog feature is enabled, a different on-disk format is used.
49  * The error log of each head dataset is stored separately in the zap object
50  * and keyed by the head id. This enables listing every dataset affected in
51  * userland. In order to be able to track whether an error block has been
52  * modified or added to snapshots since it was marked as an error, a new tuple
53  * is introduced: zbookmark_err_phys_t. It allows the storage of the birth
54  * transaction group of an error block on-disk. The birth transaction group is
55  * used by check_filesystem() to assess whether this block was freed,
56  * re-written or added to a snapshot since its marking as an error.
57  *
58  * This log is then shipped into an nvlist where the key is the dataset name and
59  * the value is the object name.  Userland is then responsible for uniquifying
60  * this list and displaying it to the user.
61  */
62 
63 #include <sys/dmu_tx.h>
64 #include <sys/spa.h>
65 #include <sys/spa_impl.h>
66 #include <sys/zap.h>
67 #include <sys/zio.h>
68 #include <sys/dsl_dir.h>
69 #include <sys/dmu_objset.h>
70 #include <sys/dbuf.h>
71 
72 #define	NAME_MAX_LEN 64
73 
74 /*
75  * spa_upgrade_errlog_limit : A zfs module parameter that controls the number
76  *		of on-disk error log entries that will be converted to the new
77  *		format when enabling head_errlog. Defaults to 0 which converts
78  *		all log entries.
79  */
80 static uint32_t spa_upgrade_errlog_limit = 0;
81 
82 /*
83  * Convert a bookmark to a string.
84  */
85 static void
86 bookmark_to_name(zbookmark_phys_t *zb, char *buf, size_t len)
87 {
88 	(void) snprintf(buf, len, "%llx:%llx:%llx:%llx",
89 	    (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
90 	    (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid);
91 }
92 
93 /*
94  * Convert an err_phys to a string.
95  */
96 static void
97 errphys_to_name(zbookmark_err_phys_t *zep, char *buf, size_t len)
98 {
99 	(void) snprintf(buf, len, "%llx:%llx:%llx:%llx",
100 	    (u_longlong_t)zep->zb_object, (u_longlong_t)zep->zb_level,
101 	    (u_longlong_t)zep->zb_blkid, (u_longlong_t)zep->zb_birth);
102 }
103 
104 /*
105  * Convert a string to a err_phys.
106  */
107 static void
108 name_to_errphys(char *buf, zbookmark_err_phys_t *zep)
109 {
110 	zep->zb_object = zfs_strtonum(buf, &buf);
111 	ASSERT(*buf == ':');
112 	zep->zb_level = (int)zfs_strtonum(buf + 1, &buf);
113 	ASSERT(*buf == ':');
114 	zep->zb_blkid = zfs_strtonum(buf + 1, &buf);
115 	ASSERT(*buf == ':');
116 	zep->zb_birth = zfs_strtonum(buf + 1, &buf);
117 	ASSERT(*buf == '\0');
118 }
119 
120 /*
121  * Convert a string to a bookmark.
122  */
123 static void
124 name_to_bookmark(char *buf, zbookmark_phys_t *zb)
125 {
126 	zb->zb_objset = zfs_strtonum(buf, &buf);
127 	ASSERT(*buf == ':');
128 	zb->zb_object = zfs_strtonum(buf + 1, &buf);
129 	ASSERT(*buf == ':');
130 	zb->zb_level = (int)zfs_strtonum(buf + 1, &buf);
131 	ASSERT(*buf == ':');
132 	zb->zb_blkid = zfs_strtonum(buf + 1, &buf);
133 	ASSERT(*buf == '\0');
134 }
135 
136 #ifdef _KERNEL
137 static void
138 zep_to_zb(uint64_t dataset, zbookmark_err_phys_t *zep, zbookmark_phys_t *zb)
139 {
140 	zb->zb_objset = dataset;
141 	zb->zb_object = zep->zb_object;
142 	zb->zb_level = zep->zb_level;
143 	zb->zb_blkid = zep->zb_blkid;
144 }
145 #endif
146 
147 static void
148 name_to_object(char *buf, uint64_t *obj)
149 {
150 	*obj = zfs_strtonum(buf, &buf);
151 	ASSERT(*buf == '\0');
152 }
153 
154 static int
155 get_head_and_birth_txg(spa_t *spa, zbookmark_err_phys_t *zep, uint64_t ds_obj,
156     uint64_t *head_dataset_id)
157 {
158 	dsl_pool_t *dp = spa->spa_dsl_pool;
159 	dsl_dataset_t *ds;
160 	objset_t *os;
161 
162 	dsl_pool_config_enter(dp, FTAG);
163 	int error = dsl_dataset_hold_obj(dp, ds_obj, FTAG, &ds);
164 	if (error != 0) {
165 		dsl_pool_config_exit(dp, FTAG);
166 		return (error);
167 	}
168 	ASSERT(head_dataset_id);
169 	*head_dataset_id = dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj;
170 
171 	error = dmu_objset_from_ds(ds, &os);
172 	if (error != 0) {
173 		dsl_dataset_rele(ds, FTAG);
174 		dsl_pool_config_exit(dp, FTAG);
175 		return (error);
176 	}
177 
178 	dnode_t *dn;
179 	blkptr_t bp;
180 
181 	error = dnode_hold(os, zep->zb_object, FTAG, &dn);
182 	if (error != 0) {
183 		dsl_dataset_rele(ds, FTAG);
184 		dsl_pool_config_exit(dp, FTAG);
185 		return (error);
186 	}
187 
188 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
189 	error = dbuf_dnode_findbp(dn, zep->zb_level, zep->zb_blkid, &bp, NULL,
190 	    NULL);
191 
192 	if (error == 0 && BP_IS_HOLE(&bp))
193 		error = SET_ERROR(ENOENT);
194 
195 	zep->zb_birth = bp.blk_birth;
196 	rw_exit(&dn->dn_struct_rwlock);
197 	dnode_rele(dn, FTAG);
198 	dsl_dataset_rele(ds, FTAG);
199 	dsl_pool_config_exit(dp, FTAG);
200 	return (error);
201 }
202 
203 /*
204  * Log an uncorrectable error to the persistent error log.  We add it to the
205  * spa's list of pending errors.  The changes are actually synced out to disk
206  * during spa_errlog_sync().
207  */
208 void
209 spa_log_error(spa_t *spa, const zbookmark_phys_t *zb)
210 {
211 	spa_error_entry_t search;
212 	spa_error_entry_t *new;
213 	avl_tree_t *tree;
214 	avl_index_t where;
215 
216 	/*
217 	 * If we are trying to import a pool, ignore any errors, as we won't be
218 	 * writing to the pool any time soon.
219 	 */
220 	if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT)
221 		return;
222 
223 	mutex_enter(&spa->spa_errlist_lock);
224 
225 	/*
226 	 * If we have had a request to rotate the log, log it to the next list
227 	 * instead of the current one.
228 	 */
229 	if (spa->spa_scrub_active || spa->spa_scrub_finished)
230 		tree = &spa->spa_errlist_scrub;
231 	else
232 		tree = &spa->spa_errlist_last;
233 
234 	search.se_bookmark = *zb;
235 	if (avl_find(tree, &search, &where) != NULL) {
236 		mutex_exit(&spa->spa_errlist_lock);
237 		return;
238 	}
239 
240 	new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP);
241 	new->se_bookmark = *zb;
242 	avl_insert(tree, new, where);
243 
244 	mutex_exit(&spa->spa_errlist_lock);
245 }
246 
247 #ifdef _KERNEL
248 static int
249 find_birth_txg(dsl_dataset_t *ds, zbookmark_err_phys_t *zep,
250     uint64_t *birth_txg)
251 {
252 	objset_t *os;
253 	int error = dmu_objset_from_ds(ds, &os);
254 	if (error != 0)
255 		return (error);
256 
257 	dnode_t *dn;
258 	blkptr_t bp;
259 
260 	error = dnode_hold(os, zep->zb_object, FTAG, &dn);
261 	if (error != 0)
262 		return (error);
263 
264 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
265 	error = dbuf_dnode_findbp(dn, zep->zb_level, zep->zb_blkid, &bp, NULL,
266 	    NULL);
267 
268 	if (error == 0 && BP_IS_HOLE(&bp))
269 		error = SET_ERROR(ENOENT);
270 
271 	*birth_txg = bp.blk_birth;
272 	rw_exit(&dn->dn_struct_rwlock);
273 	dnode_rele(dn, FTAG);
274 	return (error);
275 }
276 
277 /*
278  * This function serves a double role. If only_count is true, it returns
279  * (in *count) how many times an error block belonging to this filesystem is
280  * referenced by snapshots or clones. If only_count is false, each time the
281  * error block is referenced by a snapshot or clone, it fills the userspace
282  * array at uaddr with the bookmarks of the error blocks. The array is filled
283  * from the back and *count is modified to be the number of unused entries at
284  * the beginning of the array.
285  */
286 static int
287 check_filesystem(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep,
288     uint64_t *count, void *uaddr, boolean_t only_count)
289 {
290 	dsl_dataset_t *ds;
291 	dsl_pool_t *dp = spa->spa_dsl_pool;
292 
293 	int error = dsl_dataset_hold_obj(dp, head_ds, FTAG, &ds);
294 	if (error != 0)
295 		return (error);
296 
297 	uint64_t latest_txg;
298 	uint64_t txg_to_consider = spa->spa_syncing_txg;
299 	boolean_t check_snapshot = B_TRUE;
300 	error = find_birth_txg(ds, zep, &latest_txg);
301 	if (error == 0) {
302 		if (zep->zb_birth == latest_txg) {
303 			/* Block neither free nor rewritten. */
304 			if (!only_count) {
305 				zbookmark_phys_t zb;
306 				zep_to_zb(head_ds, zep, &zb);
307 				if (copyout(&zb, (char *)uaddr + (*count - 1)
308 				    * sizeof (zbookmark_phys_t),
309 				    sizeof (zbookmark_phys_t)) != 0) {
310 					dsl_dataset_rele(ds, FTAG);
311 					return (SET_ERROR(EFAULT));
312 				}
313 				(*count)--;
314 			} else {
315 				(*count)++;
316 			}
317 			check_snapshot = B_FALSE;
318 		} else {
319 			ASSERT3U(zep->zb_birth, <, latest_txg);
320 			txg_to_consider = latest_txg;
321 		}
322 	}
323 
324 	/* How many snapshots reference this block. */
325 	uint64_t snap_count;
326 	error = zap_count(spa->spa_meta_objset,
327 	    dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count);
328 	if (error != 0) {
329 		dsl_dataset_rele(ds, FTAG);
330 		return (error);
331 	}
332 
333 	if (snap_count == 0) {
334 		/* File system has no snapshot. */
335 		dsl_dataset_rele(ds, FTAG);
336 		return (0);
337 	}
338 
339 	uint64_t *snap_obj_array = kmem_alloc(snap_count * sizeof (uint64_t),
340 	    KM_SLEEP);
341 
342 	int aff_snap_count = 0;
343 	uint64_t snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
344 	uint64_t snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
345 
346 	/* Check only snapshots created from this file system. */
347 	while (snap_obj != 0 && zep->zb_birth < snap_obj_txg &&
348 	    snap_obj_txg <= txg_to_consider) {
349 
350 		dsl_dataset_rele(ds, FTAG);
351 		error = dsl_dataset_hold_obj(dp, snap_obj, FTAG, &ds);
352 		if (error != 0)
353 			goto out;
354 
355 		if (dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj != head_ds)
356 			break;
357 
358 		boolean_t affected = B_TRUE;
359 		if (check_snapshot) {
360 			uint64_t blk_txg;
361 			error = find_birth_txg(ds, zep, &blk_txg);
362 			affected = (error == 0 && zep->zb_birth == blk_txg);
363 		}
364 
365 		if (affected) {
366 			snap_obj_array[aff_snap_count] = snap_obj;
367 			aff_snap_count++;
368 
369 			if (!only_count) {
370 				zbookmark_phys_t zb;
371 				zep_to_zb(snap_obj, zep, &zb);
372 				if (copyout(&zb, (char *)uaddr + (*count - 1) *
373 				    sizeof (zbookmark_phys_t),
374 				    sizeof (zbookmark_phys_t)) != 0) {
375 					dsl_dataset_rele(ds, FTAG);
376 					error = SET_ERROR(EFAULT);
377 					goto out;
378 				}
379 				(*count)--;
380 			} else {
381 				(*count)++;
382 			}
383 
384 			/*
385 			 * Only clones whose origins were affected could also
386 			 * have affected snapshots.
387 			 */
388 			zap_cursor_t zc;
389 			zap_attribute_t za;
390 			for (zap_cursor_init(&zc, spa->spa_meta_objset,
391 			    dsl_dataset_phys(ds)->ds_next_clones_obj);
392 			    zap_cursor_retrieve(&zc, &za) == 0;
393 			    zap_cursor_advance(&zc)) {
394 				error = check_filesystem(spa,
395 				    za.za_first_integer, zep,
396 				    count, uaddr, only_count);
397 
398 				if (error != 0) {
399 					zap_cursor_fini(&zc);
400 					goto out;
401 				}
402 			}
403 			zap_cursor_fini(&zc);
404 		}
405 		snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
406 		snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
407 	}
408 	dsl_dataset_rele(ds, FTAG);
409 
410 out:
411 	kmem_free(snap_obj_array, sizeof (*snap_obj_array));
412 	return (error);
413 }
414 
415 static int
416 find_top_affected_fs(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep,
417     uint64_t *top_affected_fs)
418 {
419 	uint64_t oldest_dsobj;
420 	int error = dsl_dataset_oldest_snapshot(spa, head_ds, zep->zb_birth,
421 	    &oldest_dsobj);
422 	if (error != 0)
423 		return (error);
424 
425 	dsl_dataset_t *ds;
426 	error = dsl_dataset_hold_obj(spa->spa_dsl_pool, oldest_dsobj,
427 	    FTAG, &ds);
428 	if (error != 0)
429 		return (error);
430 
431 	*top_affected_fs =
432 	    dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj;
433 	dsl_dataset_rele(ds, FTAG);
434 	return (0);
435 }
436 
437 static int
438 process_error_block(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep,
439     uint64_t *count, void *uaddr, boolean_t only_count)
440 {
441 	dsl_pool_t *dp = spa->spa_dsl_pool;
442 	dsl_pool_config_enter(dp, FTAG);
443 	uint64_t top_affected_fs;
444 
445 	int error = find_top_affected_fs(spa, head_ds, zep, &top_affected_fs);
446 	if (error == 0)
447 		error = check_filesystem(spa, top_affected_fs, zep, count,
448 		    uaddr, only_count);
449 
450 	dsl_pool_config_exit(dp, FTAG);
451 	return (error);
452 }
453 
454 static uint64_t
455 get_errlog_size(spa_t *spa, uint64_t spa_err_obj)
456 {
457 	if (spa_err_obj == 0)
458 		return (0);
459 	uint64_t total = 0;
460 
461 	zap_cursor_t zc;
462 	zap_attribute_t za;
463 	for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
464 	    zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
465 
466 		zap_cursor_t head_ds_cursor;
467 		zap_attribute_t head_ds_attr;
468 		zbookmark_err_phys_t head_ds_block;
469 
470 		uint64_t head_ds;
471 		name_to_object(za.za_name, &head_ds);
472 
473 		for (zap_cursor_init(&head_ds_cursor, spa->spa_meta_objset,
474 		    za.za_first_integer); zap_cursor_retrieve(&head_ds_cursor,
475 		    &head_ds_attr) == 0; zap_cursor_advance(&head_ds_cursor)) {
476 
477 			name_to_errphys(head_ds_attr.za_name, &head_ds_block);
478 			(void) process_error_block(spa, head_ds, &head_ds_block,
479 			    &total, NULL, B_TRUE);
480 		}
481 		zap_cursor_fini(&head_ds_cursor);
482 	}
483 	zap_cursor_fini(&zc);
484 	return (total);
485 }
486 
487 static uint64_t
488 get_errlist_size(spa_t *spa, avl_tree_t *tree)
489 {
490 	if (avl_numnodes(tree) == 0)
491 		return (0);
492 	uint64_t total = 0;
493 
494 	spa_error_entry_t *se;
495 	for (se = avl_first(tree); se != NULL; se = AVL_NEXT(tree, se)) {
496 		zbookmark_err_phys_t zep;
497 		zep.zb_object = se->se_bookmark.zb_object;
498 		zep.zb_level = se->se_bookmark.zb_level;
499 		zep.zb_blkid = se->se_bookmark.zb_blkid;
500 
501 		/*
502 		 * If we cannot find out the head dataset and birth txg of
503 		 * the present error block, we opt not to error out. In the
504 		 * next pool sync this information will be retrieved by
505 		 * sync_error_list() and written to the on-disk error log.
506 		 */
507 		uint64_t head_ds_obj;
508 		if (get_head_and_birth_txg(spa, &zep,
509 		    se->se_bookmark.zb_objset, &head_ds_obj) == 0)
510 			(void) process_error_block(spa, head_ds_obj, &zep,
511 			    &total, NULL, B_TRUE);
512 	}
513 	return (total);
514 }
515 #endif
516 
517 /*
518  * If a healed bookmark matches an entry in the error log we stash it in a tree
519  * so that we can later remove the related log entries in sync context.
520  */
521 static void
522 spa_add_healed_error(spa_t *spa, uint64_t obj, zbookmark_phys_t *healed_zb)
523 {
524 	char name[NAME_MAX_LEN];
525 
526 	if (obj == 0)
527 		return;
528 
529 	bookmark_to_name(healed_zb, name, sizeof (name));
530 	mutex_enter(&spa->spa_errlog_lock);
531 	if (zap_contains(spa->spa_meta_objset, obj, name) == 0) {
532 		/*
533 		 * Found an error matching healed zb, add zb to our
534 		 * tree of healed errors
535 		 */
536 		avl_tree_t *tree = &spa->spa_errlist_healed;
537 		spa_error_entry_t search;
538 		spa_error_entry_t *new;
539 		avl_index_t where;
540 		search.se_bookmark = *healed_zb;
541 		mutex_enter(&spa->spa_errlist_lock);
542 		if (avl_find(tree, &search, &where) != NULL) {
543 			mutex_exit(&spa->spa_errlist_lock);
544 			mutex_exit(&spa->spa_errlog_lock);
545 			return;
546 		}
547 		new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP);
548 		new->se_bookmark = *healed_zb;
549 		avl_insert(tree, new, where);
550 		mutex_exit(&spa->spa_errlist_lock);
551 	}
552 	mutex_exit(&spa->spa_errlog_lock);
553 }
554 
555 /*
556  * If this error exists in the given tree remove it.
557  */
558 static void
559 remove_error_from_list(spa_t *spa, avl_tree_t *t, const zbookmark_phys_t *zb)
560 {
561 	spa_error_entry_t search, *found;
562 	avl_index_t where;
563 
564 	mutex_enter(&spa->spa_errlist_lock);
565 	search.se_bookmark = *zb;
566 	if ((found = avl_find(t, &search, &where)) != NULL) {
567 		avl_remove(t, found);
568 		kmem_free(found, sizeof (spa_error_entry_t));
569 	}
570 	mutex_exit(&spa->spa_errlist_lock);
571 }
572 
573 
574 /*
575  * Removes all of the recv healed errors from both on-disk error logs
576  */
577 static void
578 spa_remove_healed_errors(spa_t *spa, avl_tree_t *s, avl_tree_t *l, dmu_tx_t *tx)
579 {
580 	char name[NAME_MAX_LEN];
581 	spa_error_entry_t *se;
582 	void *cookie = NULL;
583 
584 	ASSERT(MUTEX_HELD(&spa->spa_errlog_lock));
585 
586 	while ((se = avl_destroy_nodes(&spa->spa_errlist_healed,
587 	    &cookie)) != NULL) {
588 		remove_error_from_list(spa, s, &se->se_bookmark);
589 		remove_error_from_list(spa, l, &se->se_bookmark);
590 		bookmark_to_name(&se->se_bookmark, name, sizeof (name));
591 		kmem_free(se, sizeof (spa_error_entry_t));
592 		(void) zap_remove(spa->spa_meta_objset,
593 		    spa->spa_errlog_last, name, tx);
594 		(void) zap_remove(spa->spa_meta_objset,
595 		    spa->spa_errlog_scrub, name, tx);
596 	}
597 }
598 
599 /*
600  * Stash away healed bookmarks to remove them from the on-disk error logs
601  * later in spa_remove_healed_errors().
602  */
603 void
604 spa_remove_error(spa_t *spa, zbookmark_phys_t *zb)
605 {
606 	char name[NAME_MAX_LEN];
607 
608 	bookmark_to_name(zb, name, sizeof (name));
609 
610 	spa_add_healed_error(spa, spa->spa_errlog_last, zb);
611 	spa_add_healed_error(spa, spa->spa_errlog_scrub, zb);
612 }
613 
614 /*
615  * Return the number of errors currently in the error log.  This is actually the
616  * sum of both the last log and the current log, since we don't know the union
617  * of these logs until we reach userland.
618  */
619 uint64_t
620 spa_get_errlog_size(spa_t *spa)
621 {
622 	uint64_t total = 0;
623 
624 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
625 		mutex_enter(&spa->spa_errlog_lock);
626 		uint64_t count;
627 		if (spa->spa_errlog_scrub != 0 &&
628 		    zap_count(spa->spa_meta_objset, spa->spa_errlog_scrub,
629 		    &count) == 0)
630 			total += count;
631 
632 		if (spa->spa_errlog_last != 0 && !spa->spa_scrub_finished &&
633 		    zap_count(spa->spa_meta_objset, spa->spa_errlog_last,
634 		    &count) == 0)
635 			total += count;
636 		mutex_exit(&spa->spa_errlog_lock);
637 
638 		mutex_enter(&spa->spa_errlist_lock);
639 		total += avl_numnodes(&spa->spa_errlist_last);
640 		total += avl_numnodes(&spa->spa_errlist_scrub);
641 		mutex_exit(&spa->spa_errlist_lock);
642 	} else {
643 #ifdef _KERNEL
644 		mutex_enter(&spa->spa_errlog_lock);
645 		total += get_errlog_size(spa, spa->spa_errlog_last);
646 		total += get_errlog_size(spa, spa->spa_errlog_scrub);
647 		mutex_exit(&spa->spa_errlog_lock);
648 
649 		mutex_enter(&spa->spa_errlist_lock);
650 		total += get_errlist_size(spa, &spa->spa_errlist_last);
651 		total += get_errlist_size(spa, &spa->spa_errlist_scrub);
652 		mutex_exit(&spa->spa_errlist_lock);
653 #endif
654 	}
655 	return (total);
656 }
657 
658 /*
659  * This function sweeps through an on-disk error log and stores all bookmarks
660  * as error bookmarks in a new ZAP object. At the end we discard the old one,
661  * and spa_update_errlog() will set the spa's on-disk error log to new ZAP
662  * object.
663  */
664 static void
665 sync_upgrade_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t *newobj,
666     dmu_tx_t *tx)
667 {
668 	zap_cursor_t zc;
669 	zap_attribute_t za;
670 	zbookmark_phys_t zb;
671 	uint64_t count;
672 
673 	*newobj = zap_create(spa->spa_meta_objset, DMU_OT_ERROR_LOG,
674 	    DMU_OT_NONE, 0, tx);
675 
676 	/*
677 	 * If we cannnot perform the upgrade we should clear the old on-disk
678 	 * error logs.
679 	 */
680 	if (zap_count(spa->spa_meta_objset, spa_err_obj, &count) != 0) {
681 		VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx));
682 		return;
683 	}
684 
685 	for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
686 	    zap_cursor_retrieve(&zc, &za) == 0;
687 	    zap_cursor_advance(&zc)) {
688 		if (spa_upgrade_errlog_limit != 0 &&
689 		    zc.zc_cd == spa_upgrade_errlog_limit)
690 			break;
691 
692 		name_to_bookmark(za.za_name, &zb);
693 
694 		zbookmark_err_phys_t zep;
695 		zep.zb_object = zb.zb_object;
696 		zep.zb_level = zb.zb_level;
697 		zep.zb_blkid = zb.zb_blkid;
698 
699 		/*
700 		 * We cannot use get_head_and_birth_txg() because it will
701 		 * acquire the pool config lock, which we already have. In case
702 		 * of an error we simply continue.
703 		 */
704 		uint64_t head_dataset_obj;
705 		dsl_pool_t *dp = spa->spa_dsl_pool;
706 		dsl_dataset_t *ds;
707 		objset_t *os;
708 
709 		int error = dsl_dataset_hold_obj(dp, zb.zb_objset, FTAG, &ds);
710 		if (error != 0)
711 			continue;
712 
713 		head_dataset_obj =
714 		    dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj;
715 
716 		/*
717 		 * The objset and the dnode are required for getting the block
718 		 * pointer, which is used to determine if BP_IS_HOLE(). If
719 		 * getting the objset or the dnode fails, do not create a
720 		 * zap entry (presuming we know the dataset) as this may create
721 		 * spurious errors that we cannot ever resolve. If an error is
722 		 * truly persistent, it should re-appear after a scan.
723 		 */
724 		if (dmu_objset_from_ds(ds, &os) != 0) {
725 			dsl_dataset_rele(ds, FTAG);
726 			continue;
727 		}
728 
729 		dnode_t *dn;
730 		blkptr_t bp;
731 
732 		if (dnode_hold(os, zep.zb_object, FTAG, &dn) != 0) {
733 			dsl_dataset_rele(ds, FTAG);
734 			continue;
735 		}
736 
737 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
738 		error = dbuf_dnode_findbp(dn, zep.zb_level, zep.zb_blkid, &bp,
739 		    NULL, NULL);
740 
741 		zep.zb_birth = bp.blk_birth;
742 		rw_exit(&dn->dn_struct_rwlock);
743 		dnode_rele(dn, FTAG);
744 		dsl_dataset_rele(ds, FTAG);
745 
746 		if (error != 0 || BP_IS_HOLE(&bp))
747 			continue;
748 
749 		uint64_t err_obj;
750 		error = zap_lookup_int_key(spa->spa_meta_objset, *newobj,
751 		    head_dataset_obj, &err_obj);
752 
753 		if (error == ENOENT) {
754 			err_obj = zap_create(spa->spa_meta_objset,
755 			    DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx);
756 
757 			(void) zap_update_int_key(spa->spa_meta_objset,
758 			    *newobj, head_dataset_obj, err_obj, tx);
759 		}
760 
761 		char buf[64];
762 		errphys_to_name(&zep, buf, sizeof (buf));
763 
764 		const char *name = "";
765 		(void) zap_update(spa->spa_meta_objset, err_obj,
766 		    buf, 1, strlen(name) + 1, name, tx);
767 	}
768 	zap_cursor_fini(&zc);
769 
770 	VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx));
771 }
772 
773 void
774 spa_upgrade_errlog(spa_t *spa, dmu_tx_t *tx)
775 {
776 	uint64_t newobj = 0;
777 
778 	mutex_enter(&spa->spa_errlog_lock);
779 	if (spa->spa_errlog_last != 0) {
780 		sync_upgrade_errlog(spa, spa->spa_errlog_last, &newobj, tx);
781 		spa->spa_errlog_last = newobj;
782 	}
783 
784 	if (spa->spa_errlog_scrub != 0) {
785 		sync_upgrade_errlog(spa, spa->spa_errlog_scrub, &newobj, tx);
786 		spa->spa_errlog_scrub = newobj;
787 	}
788 	mutex_exit(&spa->spa_errlog_lock);
789 }
790 
791 #ifdef _KERNEL
792 /*
793  * If an error block is shared by two datasets it will be counted twice. For
794  * detailed message see spa_get_errlog_size() above.
795  */
796 static int
797 process_error_log(spa_t *spa, uint64_t obj, void *uaddr, uint64_t *count)
798 {
799 	zap_cursor_t zc;
800 	zap_attribute_t za;
801 
802 	if (obj == 0)
803 		return (0);
804 
805 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
806 		for (zap_cursor_init(&zc, spa->spa_meta_objset, obj);
807 		    zap_cursor_retrieve(&zc, &za) == 0;
808 		    zap_cursor_advance(&zc)) {
809 			if (*count == 0) {
810 				zap_cursor_fini(&zc);
811 				return (SET_ERROR(ENOMEM));
812 			}
813 
814 			zbookmark_phys_t zb;
815 			name_to_bookmark(za.za_name, &zb);
816 
817 			if (copyout(&zb, (char *)uaddr +
818 			    (*count - 1) * sizeof (zbookmark_phys_t),
819 			    sizeof (zbookmark_phys_t)) != 0) {
820 				zap_cursor_fini(&zc);
821 				return (SET_ERROR(EFAULT));
822 			}
823 			*count -= 1;
824 
825 		}
826 		zap_cursor_fini(&zc);
827 		return (0);
828 	}
829 
830 	for (zap_cursor_init(&zc, spa->spa_meta_objset, obj);
831 	    zap_cursor_retrieve(&zc, &za) == 0;
832 	    zap_cursor_advance(&zc)) {
833 
834 		zap_cursor_t head_ds_cursor;
835 		zap_attribute_t head_ds_attr;
836 
837 		uint64_t head_ds_err_obj = za.za_first_integer;
838 		uint64_t head_ds;
839 		name_to_object(za.za_name, &head_ds);
840 		for (zap_cursor_init(&head_ds_cursor, spa->spa_meta_objset,
841 		    head_ds_err_obj); zap_cursor_retrieve(&head_ds_cursor,
842 		    &head_ds_attr) == 0; zap_cursor_advance(&head_ds_cursor)) {
843 
844 			zbookmark_err_phys_t head_ds_block;
845 			name_to_errphys(head_ds_attr.za_name, &head_ds_block);
846 			int error = process_error_block(spa, head_ds,
847 			    &head_ds_block, count, uaddr, B_FALSE);
848 
849 			if (error != 0) {
850 				zap_cursor_fini(&head_ds_cursor);
851 				zap_cursor_fini(&zc);
852 				return (error);
853 			}
854 		}
855 		zap_cursor_fini(&head_ds_cursor);
856 	}
857 	zap_cursor_fini(&zc);
858 	return (0);
859 }
860 
861 static int
862 process_error_list(spa_t *spa, avl_tree_t *list, void *uaddr, uint64_t *count)
863 {
864 	spa_error_entry_t *se;
865 
866 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
867 		for (se = avl_first(list); se != NULL;
868 		    se = AVL_NEXT(list, se)) {
869 
870 			if (*count == 0)
871 				return (SET_ERROR(ENOMEM));
872 
873 			if (copyout(&se->se_bookmark, (char *)uaddr +
874 			    (*count - 1) * sizeof (zbookmark_phys_t),
875 			    sizeof (zbookmark_phys_t)) != 0)
876 				return (SET_ERROR(EFAULT));
877 
878 			*count -= 1;
879 		}
880 		return (0);
881 	}
882 
883 	for (se = avl_first(list); se != NULL; se = AVL_NEXT(list, se)) {
884 		zbookmark_err_phys_t zep;
885 		zep.zb_object = se->se_bookmark.zb_object;
886 		zep.zb_level = se->se_bookmark.zb_level;
887 		zep.zb_blkid = se->se_bookmark.zb_blkid;
888 
889 		uint64_t head_ds_obj;
890 		int error = get_head_and_birth_txg(spa, &zep,
891 		    se->se_bookmark.zb_objset, &head_ds_obj);
892 		if (error != 0)
893 			return (error);
894 
895 		error = process_error_block(spa, head_ds_obj, &zep, count,
896 		    uaddr, B_FALSE);
897 		if (error != 0)
898 			return (error);
899 	}
900 	return (0);
901 }
902 #endif
903 
904 /*
905  * Copy all known errors to userland as an array of bookmarks.  This is
906  * actually a union of the on-disk last log and current log, as well as any
907  * pending error requests.
908  *
909  * Because the act of reading the on-disk log could cause errors to be
910  * generated, we have two separate locks: one for the error log and one for the
911  * in-core error lists.  We only need the error list lock to log and error, so
912  * we grab the error log lock while we read the on-disk logs, and only pick up
913  * the error list lock when we are finished.
914  */
915 int
916 spa_get_errlog(spa_t *spa, void *uaddr, uint64_t *count)
917 {
918 	int ret = 0;
919 
920 #ifdef _KERNEL
921 	mutex_enter(&spa->spa_errlog_lock);
922 
923 	ret = process_error_log(spa, spa->spa_errlog_scrub, uaddr, count);
924 
925 	if (!ret && !spa->spa_scrub_finished)
926 		ret = process_error_log(spa, spa->spa_errlog_last, uaddr,
927 		    count);
928 
929 	mutex_enter(&spa->spa_errlist_lock);
930 	if (!ret)
931 		ret = process_error_list(spa, &spa->spa_errlist_scrub, uaddr,
932 		    count);
933 	if (!ret)
934 		ret = process_error_list(spa, &spa->spa_errlist_last, uaddr,
935 		    count);
936 	mutex_exit(&spa->spa_errlist_lock);
937 
938 	mutex_exit(&spa->spa_errlog_lock);
939 #else
940 	(void) spa, (void) uaddr, (void) count;
941 #endif
942 
943 	return (ret);
944 }
945 
946 /*
947  * Called when a scrub completes.  This simply set a bit which tells which AVL
948  * tree to add new errors.  spa_errlog_sync() is responsible for actually
949  * syncing the changes to the underlying objects.
950  */
951 void
952 spa_errlog_rotate(spa_t *spa)
953 {
954 	mutex_enter(&spa->spa_errlist_lock);
955 	spa->spa_scrub_finished = B_TRUE;
956 	mutex_exit(&spa->spa_errlist_lock);
957 }
958 
959 /*
960  * Discard any pending errors from the spa_t.  Called when unloading a faulted
961  * pool, as the errors encountered during the open cannot be synced to disk.
962  */
963 void
964 spa_errlog_drain(spa_t *spa)
965 {
966 	spa_error_entry_t *se;
967 	void *cookie;
968 
969 	mutex_enter(&spa->spa_errlist_lock);
970 
971 	cookie = NULL;
972 	while ((se = avl_destroy_nodes(&spa->spa_errlist_last,
973 	    &cookie)) != NULL)
974 		kmem_free(se, sizeof (spa_error_entry_t));
975 	cookie = NULL;
976 	while ((se = avl_destroy_nodes(&spa->spa_errlist_scrub,
977 	    &cookie)) != NULL)
978 		kmem_free(se, sizeof (spa_error_entry_t));
979 
980 	mutex_exit(&spa->spa_errlist_lock);
981 }
982 
983 /*
984  * Process a list of errors into the current on-disk log.
985  */
986 void
987 sync_error_list(spa_t *spa, avl_tree_t *t, uint64_t *obj, dmu_tx_t *tx)
988 {
989 	spa_error_entry_t *se;
990 	char buf[NAME_MAX_LEN];
991 	void *cookie;
992 
993 	if (avl_numnodes(t) == 0)
994 		return;
995 
996 	/* create log if necessary */
997 	if (*obj == 0)
998 		*obj = zap_create(spa->spa_meta_objset, DMU_OT_ERROR_LOG,
999 		    DMU_OT_NONE, 0, tx);
1000 
1001 	/* add errors to the current log */
1002 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
1003 		for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) {
1004 			bookmark_to_name(&se->se_bookmark, buf, sizeof (buf));
1005 
1006 			const char *name = se->se_name ? se->se_name : "";
1007 			(void) zap_update(spa->spa_meta_objset, *obj, buf, 1,
1008 			    strlen(name) + 1, name, tx);
1009 		}
1010 	} else {
1011 		for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) {
1012 			zbookmark_err_phys_t zep;
1013 			zep.zb_object = se->se_bookmark.zb_object;
1014 			zep.zb_level = se->se_bookmark.zb_level;
1015 			zep.zb_blkid = se->se_bookmark.zb_blkid;
1016 
1017 			/*
1018 			 * If we cannot find out the head dataset and birth txg
1019 			 * of the present error block, we simply continue.
1020 			 * Reinserting that error block to the error lists,
1021 			 * even if we are not syncing the final txg, results
1022 			 * in duplicate posting of errors.
1023 			 */
1024 			uint64_t head_dataset_obj;
1025 			int error = get_head_and_birth_txg(spa, &zep,
1026 			    se->se_bookmark.zb_objset, &head_dataset_obj);
1027 			if (error != 0)
1028 				continue;
1029 
1030 			uint64_t err_obj;
1031 			error = zap_lookup_int_key(spa->spa_meta_objset,
1032 			    *obj, head_dataset_obj, &err_obj);
1033 
1034 			if (error == ENOENT) {
1035 				err_obj = zap_create(spa->spa_meta_objset,
1036 				    DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx);
1037 
1038 				(void) zap_update_int_key(spa->spa_meta_objset,
1039 				    *obj, head_dataset_obj, err_obj, tx);
1040 			}
1041 			errphys_to_name(&zep, buf, sizeof (buf));
1042 
1043 			const char *name = se->se_name ? se->se_name : "";
1044 			(void) zap_update(spa->spa_meta_objset,
1045 			    err_obj, buf, 1, strlen(name) + 1, name, tx);
1046 		}
1047 	}
1048 	/* purge the error list */
1049 	cookie = NULL;
1050 	while ((se = avl_destroy_nodes(t, &cookie)) != NULL)
1051 		kmem_free(se, sizeof (spa_error_entry_t));
1052 }
1053 
1054 static void
1055 delete_errlog(spa_t *spa, uint64_t spa_err_obj, dmu_tx_t *tx)
1056 {
1057 	if (spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
1058 		zap_cursor_t zc;
1059 		zap_attribute_t za;
1060 		for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
1061 		    zap_cursor_retrieve(&zc, &za) == 0;
1062 		    zap_cursor_advance(&zc)) {
1063 			VERIFY0(dmu_object_free(spa->spa_meta_objset,
1064 			    za.za_first_integer, tx));
1065 		}
1066 		zap_cursor_fini(&zc);
1067 	}
1068 	VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx));
1069 }
1070 
1071 /*
1072  * Sync the error log out to disk.  This is a little tricky because the act of
1073  * writing the error log requires the spa_errlist_lock.  So, we need to lock the
1074  * error lists, take a copy of the lists, and then reinitialize them.  Then, we
1075  * drop the error list lock and take the error log lock, at which point we
1076  * do the errlog processing.  Then, if we encounter an I/O error during this
1077  * process, we can successfully add the error to the list.  Note that this will
1078  * result in the perpetual recycling of errors, but it is an unlikely situation
1079  * and not a performance critical operation.
1080  */
1081 void
1082 spa_errlog_sync(spa_t *spa, uint64_t txg)
1083 {
1084 	dmu_tx_t *tx;
1085 	avl_tree_t scrub, last;
1086 	int scrub_finished;
1087 
1088 	mutex_enter(&spa->spa_errlist_lock);
1089 
1090 	/*
1091 	 * Bail out early under normal circumstances.
1092 	 */
1093 	if (avl_numnodes(&spa->spa_errlist_scrub) == 0 &&
1094 	    avl_numnodes(&spa->spa_errlist_last) == 0 &&
1095 	    avl_numnodes(&spa->spa_errlist_healed) == 0 &&
1096 	    !spa->spa_scrub_finished) {
1097 		mutex_exit(&spa->spa_errlist_lock);
1098 		return;
1099 	}
1100 
1101 	spa_get_errlists(spa, &last, &scrub);
1102 	scrub_finished = spa->spa_scrub_finished;
1103 	spa->spa_scrub_finished = B_FALSE;
1104 
1105 	mutex_exit(&spa->spa_errlist_lock);
1106 	mutex_enter(&spa->spa_errlog_lock);
1107 
1108 	tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1109 
1110 	/*
1111 	 * Remove healed errors from errors.
1112 	 */
1113 	spa_remove_healed_errors(spa, &last, &scrub, tx);
1114 
1115 	/*
1116 	 * Sync out the current list of errors.
1117 	 */
1118 	sync_error_list(spa, &last, &spa->spa_errlog_last, tx);
1119 
1120 	/*
1121 	 * Rotate the log if necessary.
1122 	 */
1123 	if (scrub_finished) {
1124 		if (spa->spa_errlog_last != 0)
1125 			delete_errlog(spa, spa->spa_errlog_last, tx);
1126 		spa->spa_errlog_last = spa->spa_errlog_scrub;
1127 		spa->spa_errlog_scrub = 0;
1128 
1129 		sync_error_list(spa, &scrub, &spa->spa_errlog_last, tx);
1130 	}
1131 
1132 	/*
1133 	 * Sync out any pending scrub errors.
1134 	 */
1135 	sync_error_list(spa, &scrub, &spa->spa_errlog_scrub, tx);
1136 
1137 	/*
1138 	 * Update the MOS to reflect the new values.
1139 	 */
1140 	(void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1141 	    DMU_POOL_ERRLOG_LAST, sizeof (uint64_t), 1,
1142 	    &spa->spa_errlog_last, tx);
1143 	(void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1144 	    DMU_POOL_ERRLOG_SCRUB, sizeof (uint64_t), 1,
1145 	    &spa->spa_errlog_scrub, tx);
1146 
1147 	dmu_tx_commit(tx);
1148 
1149 	mutex_exit(&spa->spa_errlog_lock);
1150 }
1151 
1152 static void
1153 delete_dataset_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t ds,
1154     dmu_tx_t *tx)
1155 {
1156 	if (spa_err_obj == 0)
1157 		return;
1158 
1159 	zap_cursor_t zc;
1160 	zap_attribute_t za;
1161 	for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
1162 	    zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
1163 		uint64_t head_ds;
1164 		name_to_object(za.za_name, &head_ds);
1165 		if (head_ds == ds) {
1166 			(void) zap_remove(spa->spa_meta_objset, spa_err_obj,
1167 			    za.za_name, tx);
1168 			VERIFY0(dmu_object_free(spa->spa_meta_objset,
1169 			    za.za_first_integer, tx));
1170 			break;
1171 		}
1172 	}
1173 	zap_cursor_fini(&zc);
1174 }
1175 
1176 void
1177 spa_delete_dataset_errlog(spa_t *spa, uint64_t ds, dmu_tx_t *tx)
1178 {
1179 	mutex_enter(&spa->spa_errlog_lock);
1180 	delete_dataset_errlog(spa, spa->spa_errlog_scrub, ds, tx);
1181 	delete_dataset_errlog(spa, spa->spa_errlog_last, ds, tx);
1182 	mutex_exit(&spa->spa_errlog_lock);
1183 }
1184 
1185 static int
1186 find_txg_ancestor_snapshot(spa_t *spa, uint64_t new_head, uint64_t old_head,
1187     uint64_t *txg)
1188 {
1189 	dsl_dataset_t *ds;
1190 	dsl_pool_t *dp = spa->spa_dsl_pool;
1191 
1192 	int error = dsl_dataset_hold_obj(dp, old_head, FTAG, &ds);
1193 	if (error != 0)
1194 		return (error);
1195 
1196 	uint64_t prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1197 	uint64_t prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
1198 
1199 	while (prev_obj != 0) {
1200 		dsl_dataset_rele(ds, FTAG);
1201 		if ((error = dsl_dataset_hold_obj(dp, prev_obj,
1202 		    FTAG, &ds)) == 0 &&
1203 		    dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj == new_head)
1204 			break;
1205 
1206 		if (error != 0)
1207 			return (error);
1208 
1209 		prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
1210 		prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1211 	}
1212 	dsl_dataset_rele(ds, FTAG);
1213 	ASSERT(prev_obj != 0);
1214 	*txg = prev_obj_txg;
1215 	return (0);
1216 }
1217 
1218 static void
1219 swap_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t new_head, uint64_t
1220     old_head, dmu_tx_t *tx)
1221 {
1222 	if (spa_err_obj == 0)
1223 		return;
1224 
1225 	uint64_t old_head_errlog;
1226 	int error = zap_lookup_int_key(spa->spa_meta_objset, spa_err_obj,
1227 	    old_head, &old_head_errlog);
1228 
1229 	/* If no error log, then there is nothing to do. */
1230 	if (error != 0)
1231 		return;
1232 
1233 	uint64_t txg;
1234 	error = find_txg_ancestor_snapshot(spa, new_head, old_head, &txg);
1235 	if (error != 0)
1236 		return;
1237 
1238 	/*
1239 	 * Create an error log if the file system being promoted does not
1240 	 * already have one.
1241 	 */
1242 	uint64_t new_head_errlog;
1243 	error = zap_lookup_int_key(spa->spa_meta_objset, spa_err_obj, new_head,
1244 	    &new_head_errlog);
1245 
1246 	if (error != 0) {
1247 		new_head_errlog = zap_create(spa->spa_meta_objset,
1248 		    DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx);
1249 
1250 		(void) zap_update_int_key(spa->spa_meta_objset, spa_err_obj,
1251 		    new_head, new_head_errlog, tx);
1252 	}
1253 
1254 	zap_cursor_t zc;
1255 	zap_attribute_t za;
1256 	zbookmark_err_phys_t err_block;
1257 	for (zap_cursor_init(&zc, spa->spa_meta_objset, old_head_errlog);
1258 	    zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
1259 
1260 		const char *name = "";
1261 		name_to_errphys(za.za_name, &err_block);
1262 		if (err_block.zb_birth < txg) {
1263 			(void) zap_update(spa->spa_meta_objset, new_head_errlog,
1264 			    za.za_name, 1, strlen(name) + 1, name, tx);
1265 
1266 			(void) zap_remove(spa->spa_meta_objset, old_head_errlog,
1267 			    za.za_name, tx);
1268 		}
1269 	}
1270 	zap_cursor_fini(&zc);
1271 }
1272 
1273 void
1274 spa_swap_errlog(spa_t *spa, uint64_t new_head_ds, uint64_t old_head_ds,
1275     dmu_tx_t *tx)
1276 {
1277 	mutex_enter(&spa->spa_errlog_lock);
1278 	swap_errlog(spa, spa->spa_errlog_scrub, new_head_ds, old_head_ds, tx);
1279 	swap_errlog(spa, spa->spa_errlog_last, new_head_ds, old_head_ds, tx);
1280 	mutex_exit(&spa->spa_errlog_lock);
1281 }
1282 
1283 #if defined(_KERNEL)
1284 /* error handling */
1285 EXPORT_SYMBOL(spa_log_error);
1286 EXPORT_SYMBOL(spa_get_errlog_size);
1287 EXPORT_SYMBOL(spa_get_errlog);
1288 EXPORT_SYMBOL(spa_errlog_rotate);
1289 EXPORT_SYMBOL(spa_errlog_drain);
1290 EXPORT_SYMBOL(spa_errlog_sync);
1291 EXPORT_SYMBOL(spa_get_errlists);
1292 EXPORT_SYMBOL(spa_delete_dataset_errlog);
1293 EXPORT_SYMBOL(spa_swap_errlog);
1294 EXPORT_SYMBOL(sync_error_list);
1295 EXPORT_SYMBOL(spa_upgrade_errlog);
1296 #endif
1297 
1298 /* BEGIN CSTYLED */
1299 ZFS_MODULE_PARAM(zfs_spa, spa_, upgrade_errlog_limit, INT, ZMOD_RW,
1300 	"Limit the number of errors which will be upgraded to the new "
1301 	"on-disk error log when enabling head_errlog");
1302 /* END CSTYLED */
1303