xref: /freebsd/sys/contrib/openzfs/module/zfs/spa_errlog.c (revision d5b0e70f7e04d971691517ce1304d86a1e367e2e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2013, 2014, Delphix. All rights reserved.
24  * Copyright (c) 2019 Datto Inc.
25  * Copyright (c) 2021, 2022, George Amanakis. All rights reserved.
26  */
27 
28 /*
29  * Routines to manage the on-disk persistent error log.
30  *
31  * Each pool stores a log of all logical data errors seen during normal
32  * operation.  This is actually the union of two distinct logs: the last log,
33  * and the current log.  All errors seen are logged to the current log.  When a
34  * scrub completes, the current log becomes the last log, the last log is thrown
35  * out, and the current log is reinitialized.  This way, if an error is somehow
36  * corrected, a new scrub will show that it no longer exists, and will be
37  * deleted from the log when the scrub completes.
38  *
39  * The log is stored using a ZAP object whose key is a string form of the
40  * zbookmark_phys tuple (objset, object, level, blkid), and whose contents is an
41  * optional 'objset:object' human-readable string describing the data.  When an
42  * error is first logged, this string will be empty, indicating that no name is
43  * known.  This prevents us from having to issue a potentially large amount of
44  * I/O to discover the object name during an error path.  Instead, we do the
45  * calculation when the data is requested, storing the result so future queries
46  * will be faster.
47  *
48  * If the head_errlog feature is enabled, a different on-disk format is used.
49  * The error log of each head dataset is stored separately in the zap object
50  * and keyed by the head id. This enables listing every dataset affected in
51  * userland. In order to be able to track whether an error block has been
52  * modified or added to snapshots since it was marked as an error, a new tuple
53  * is introduced: zbookmark_err_phys_t. It allows the storage of the birth
54  * transaction group of an error block on-disk. The birth transaction group is
55  * used by check_filesystem() to assess whether this block was freed,
56  * re-written or added to a snapshot since its marking as an error.
57  *
58  * This log is then shipped into an nvlist where the key is the dataset name and
59  * the value is the object name.  Userland is then responsible for uniquifying
60  * this list and displaying it to the user.
61  */
62 
63 #include <sys/dmu_tx.h>
64 #include <sys/spa.h>
65 #include <sys/spa_impl.h>
66 #include <sys/zap.h>
67 #include <sys/zio.h>
68 #include <sys/dsl_dir.h>
69 #include <sys/dmu_objset.h>
70 #include <sys/dbuf.h>
71 #include <sys/zfs_znode.h>
72 
73 #define	NAME_MAX_LEN 64
74 
75 /*
76  * spa_upgrade_errlog_limit : A zfs module parameter that controls the number
77  *		of on-disk error log entries that will be converted to the new
78  *		format when enabling head_errlog. Defaults to 0 which converts
79  *		all log entries.
80  */
81 static uint_t spa_upgrade_errlog_limit = 0;
82 
83 /*
84  * Convert a bookmark to a string.
85  */
86 static void
87 bookmark_to_name(zbookmark_phys_t *zb, char *buf, size_t len)
88 {
89 	(void) snprintf(buf, len, "%llx:%llx:%llx:%llx",
90 	    (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
91 	    (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid);
92 }
93 
94 /*
95  * Convert an err_phys to a string.
96  */
97 static void
98 errphys_to_name(zbookmark_err_phys_t *zep, char *buf, size_t len)
99 {
100 	(void) snprintf(buf, len, "%llx:%llx:%llx:%llx",
101 	    (u_longlong_t)zep->zb_object, (u_longlong_t)zep->zb_level,
102 	    (u_longlong_t)zep->zb_blkid, (u_longlong_t)zep->zb_birth);
103 }
104 
105 /*
106  * Convert a string to a err_phys.
107  */
108 static void
109 name_to_errphys(char *buf, zbookmark_err_phys_t *zep)
110 {
111 	zep->zb_object = zfs_strtonum(buf, &buf);
112 	ASSERT(*buf == ':');
113 	zep->zb_level = (int)zfs_strtonum(buf + 1, &buf);
114 	ASSERT(*buf == ':');
115 	zep->zb_blkid = zfs_strtonum(buf + 1, &buf);
116 	ASSERT(*buf == ':');
117 	zep->zb_birth = zfs_strtonum(buf + 1, &buf);
118 	ASSERT(*buf == '\0');
119 }
120 
121 /*
122  * Convert a string to a bookmark.
123  */
124 static void
125 name_to_bookmark(char *buf, zbookmark_phys_t *zb)
126 {
127 	zb->zb_objset = zfs_strtonum(buf, &buf);
128 	ASSERT(*buf == ':');
129 	zb->zb_object = zfs_strtonum(buf + 1, &buf);
130 	ASSERT(*buf == ':');
131 	zb->zb_level = (int)zfs_strtonum(buf + 1, &buf);
132 	ASSERT(*buf == ':');
133 	zb->zb_blkid = zfs_strtonum(buf + 1, &buf);
134 	ASSERT(*buf == '\0');
135 }
136 
137 #ifdef _KERNEL
138 static void
139 zep_to_zb(uint64_t dataset, zbookmark_err_phys_t *zep, zbookmark_phys_t *zb)
140 {
141 	zb->zb_objset = dataset;
142 	zb->zb_object = zep->zb_object;
143 	zb->zb_level = zep->zb_level;
144 	zb->zb_blkid = zep->zb_blkid;
145 }
146 #endif
147 
148 static void
149 name_to_object(char *buf, uint64_t *obj)
150 {
151 	*obj = zfs_strtonum(buf, &buf);
152 	ASSERT(*buf == '\0');
153 }
154 
155 static int
156 get_head_and_birth_txg(spa_t *spa, zbookmark_err_phys_t *zep, uint64_t ds_obj,
157     uint64_t *head_dataset_id)
158 {
159 	dsl_pool_t *dp = spa->spa_dsl_pool;
160 	dsl_dataset_t *ds;
161 	objset_t *os;
162 
163 	dsl_pool_config_enter(dp, FTAG);
164 	int error = dsl_dataset_hold_obj(dp, ds_obj, FTAG, &ds);
165 	if (error != 0) {
166 		dsl_pool_config_exit(dp, FTAG);
167 		return (error);
168 	}
169 	ASSERT(head_dataset_id);
170 	*head_dataset_id = dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj;
171 
172 	error = dmu_objset_from_ds(ds, &os);
173 	if (error != 0) {
174 		dsl_dataset_rele(ds, FTAG);
175 		dsl_pool_config_exit(dp, FTAG);
176 		return (error);
177 	}
178 
179 	/*
180 	 * If the key is not loaded dbuf_dnode_findbp() will error out with
181 	 * EACCES. However in that case dnode_hold() will eventually call
182 	 * dbuf_read()->zio_wait() which may call spa_log_error(). This will
183 	 * lead to a deadlock due to us holding the mutex spa_errlist_lock.
184 	 * Avoid this by checking here if the keys are loaded, if not return.
185 	 * If the keys are not loaded the head_errlog feature is meaningless
186 	 * as we cannot figure out the birth txg of the block pointer.
187 	 */
188 	if (dsl_dataset_get_keystatus(ds->ds_dir) ==
189 	    ZFS_KEYSTATUS_UNAVAILABLE) {
190 		zep->zb_birth = 0;
191 		dsl_dataset_rele(ds, FTAG);
192 		dsl_pool_config_exit(dp, FTAG);
193 		return (0);
194 	}
195 
196 	dnode_t *dn;
197 	blkptr_t bp;
198 
199 	error = dnode_hold(os, zep->zb_object, FTAG, &dn);
200 	if (error != 0) {
201 		dsl_dataset_rele(ds, FTAG);
202 		dsl_pool_config_exit(dp, FTAG);
203 		return (error);
204 	}
205 
206 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
207 	error = dbuf_dnode_findbp(dn, zep->zb_level, zep->zb_blkid, &bp, NULL,
208 	    NULL);
209 	if (error == 0 && BP_IS_HOLE(&bp))
210 		error = SET_ERROR(ENOENT);
211 
212 	/*
213 	 * If the key is loaded but the encrypted filesystem is unmounted when
214 	 * a scrub is run, then dbuf_dnode_findbp() will still error out with
215 	 * EACCES (possibly due to the key mapping being removed upon
216 	 * unmounting). In that case the head_errlog feature is also
217 	 * meaningless as we cannot figure out the birth txg of the block
218 	 * pointer.
219 	 */
220 	if (error == EACCES)
221 		error = 0;
222 	else if (!error)
223 		zep->zb_birth = bp.blk_birth;
224 
225 	rw_exit(&dn->dn_struct_rwlock);
226 	dnode_rele(dn, FTAG);
227 	dsl_dataset_rele(ds, FTAG);
228 	dsl_pool_config_exit(dp, FTAG);
229 	return (error);
230 }
231 
232 /*
233  * Log an uncorrectable error to the persistent error log.  We add it to the
234  * spa's list of pending errors.  The changes are actually synced out to disk
235  * during spa_errlog_sync().
236  */
237 void
238 spa_log_error(spa_t *spa, const zbookmark_phys_t *zb)
239 {
240 	spa_error_entry_t search;
241 	spa_error_entry_t *new;
242 	avl_tree_t *tree;
243 	avl_index_t where;
244 
245 	/*
246 	 * If we are trying to import a pool, ignore any errors, as we won't be
247 	 * writing to the pool any time soon.
248 	 */
249 	if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT)
250 		return;
251 
252 	mutex_enter(&spa->spa_errlist_lock);
253 
254 	/*
255 	 * If we have had a request to rotate the log, log it to the next list
256 	 * instead of the current one.
257 	 */
258 	if (spa->spa_scrub_active || spa->spa_scrub_finished)
259 		tree = &spa->spa_errlist_scrub;
260 	else
261 		tree = &spa->spa_errlist_last;
262 
263 	search.se_bookmark = *zb;
264 	if (avl_find(tree, &search, &where) != NULL) {
265 		mutex_exit(&spa->spa_errlist_lock);
266 		return;
267 	}
268 
269 	new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP);
270 	new->se_bookmark = *zb;
271 	avl_insert(tree, new, where);
272 
273 	mutex_exit(&spa->spa_errlist_lock);
274 }
275 
276 #ifdef _KERNEL
277 static int
278 find_birth_txg(dsl_dataset_t *ds, zbookmark_err_phys_t *zep,
279     uint64_t *birth_txg)
280 {
281 	objset_t *os;
282 	int error = dmu_objset_from_ds(ds, &os);
283 	if (error != 0)
284 		return (error);
285 
286 	dnode_t *dn;
287 	blkptr_t bp;
288 
289 	error = dnode_hold(os, zep->zb_object, FTAG, &dn);
290 	if (error != 0)
291 		return (error);
292 
293 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
294 	error = dbuf_dnode_findbp(dn, zep->zb_level, zep->zb_blkid, &bp, NULL,
295 	    NULL);
296 	if (error == 0 && BP_IS_HOLE(&bp))
297 		error = SET_ERROR(ENOENT);
298 
299 	*birth_txg = bp.blk_birth;
300 	rw_exit(&dn->dn_struct_rwlock);
301 	dnode_rele(dn, FTAG);
302 	return (error);
303 }
304 
305 /*
306  * This function serves a double role. If only_count is true, it returns
307  * (in *count) how many times an error block belonging to this filesystem is
308  * referenced by snapshots or clones. If only_count is false, each time the
309  * error block is referenced by a snapshot or clone, it fills the userspace
310  * array at uaddr with the bookmarks of the error blocks. The array is filled
311  * from the back and *count is modified to be the number of unused entries at
312  * the beginning of the array.
313  */
314 static int
315 check_filesystem(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep,
316     uint64_t *count, void *uaddr, boolean_t only_count)
317 {
318 	dsl_dataset_t *ds;
319 	dsl_pool_t *dp = spa->spa_dsl_pool;
320 
321 	int error = dsl_dataset_hold_obj(dp, head_ds, FTAG, &ds);
322 	if (error != 0)
323 		return (error);
324 
325 	uint64_t latest_txg;
326 	uint64_t txg_to_consider = spa->spa_syncing_txg;
327 	boolean_t check_snapshot = B_TRUE;
328 	error = find_birth_txg(ds, zep, &latest_txg);
329 
330 	/*
331 	 * If we cannot figure out the current birth txg of the block pointer
332 	 * error out. If the filesystem is encrypted and the key is not loaded
333 	 * or the encrypted filesystem is not mounted the error will be EACCES.
334 	 * In that case do not return an error.
335 	 */
336 	if (error == EACCES) {
337 		dsl_dataset_rele(ds, FTAG);
338 		return (0);
339 	}
340 	if (error) {
341 		dsl_dataset_rele(ds, FTAG);
342 		return (error);
343 	}
344 	if (zep->zb_birth == latest_txg) {
345 		/* Block neither free nor rewritten. */
346 		if (!only_count) {
347 			zbookmark_phys_t zb;
348 			zep_to_zb(head_ds, zep, &zb);
349 			if (copyout(&zb, (char *)uaddr + (*count - 1)
350 			    * sizeof (zbookmark_phys_t),
351 			    sizeof (zbookmark_phys_t)) != 0) {
352 				dsl_dataset_rele(ds, FTAG);
353 				return (SET_ERROR(EFAULT));
354 			}
355 			(*count)--;
356 		} else {
357 			(*count)++;
358 		}
359 		check_snapshot = B_FALSE;
360 	} else {
361 		ASSERT3U(zep->zb_birth, <, latest_txg);
362 		txg_to_consider = latest_txg;
363 	}
364 
365 	/* How many snapshots reference this block. */
366 	uint64_t snap_count;
367 	error = zap_count(spa->spa_meta_objset,
368 	    dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count);
369 	if (error != 0) {
370 		dsl_dataset_rele(ds, FTAG);
371 		return (error);
372 	}
373 
374 	if (snap_count == 0) {
375 		/* File system has no snapshot. */
376 		dsl_dataset_rele(ds, FTAG);
377 		return (0);
378 	}
379 
380 	uint64_t *snap_obj_array = kmem_alloc(snap_count * sizeof (uint64_t),
381 	    KM_SLEEP);
382 
383 	int aff_snap_count = 0;
384 	uint64_t snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
385 	uint64_t snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
386 
387 	/* Check only snapshots created from this file system. */
388 	while (snap_obj != 0 && zep->zb_birth < snap_obj_txg &&
389 	    snap_obj_txg <= txg_to_consider) {
390 
391 		dsl_dataset_rele(ds, FTAG);
392 		error = dsl_dataset_hold_obj(dp, snap_obj, FTAG, &ds);
393 		if (error != 0)
394 			goto out;
395 
396 		if (dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj != head_ds)
397 			break;
398 
399 		boolean_t affected = B_TRUE;
400 		if (check_snapshot) {
401 			uint64_t blk_txg;
402 			error = find_birth_txg(ds, zep, &blk_txg);
403 			affected = (error == 0 && zep->zb_birth == blk_txg);
404 		}
405 
406 		if (affected) {
407 			snap_obj_array[aff_snap_count] = snap_obj;
408 			aff_snap_count++;
409 
410 			if (!only_count) {
411 				zbookmark_phys_t zb;
412 				zep_to_zb(snap_obj, zep, &zb);
413 				if (copyout(&zb, (char *)uaddr + (*count - 1) *
414 				    sizeof (zbookmark_phys_t),
415 				    sizeof (zbookmark_phys_t)) != 0) {
416 					dsl_dataset_rele(ds, FTAG);
417 					error = SET_ERROR(EFAULT);
418 					goto out;
419 				}
420 				(*count)--;
421 			} else {
422 				(*count)++;
423 			}
424 
425 			/*
426 			 * Only clones whose origins were affected could also
427 			 * have affected snapshots.
428 			 */
429 			zap_cursor_t zc;
430 			zap_attribute_t za;
431 			for (zap_cursor_init(&zc, spa->spa_meta_objset,
432 			    dsl_dataset_phys(ds)->ds_next_clones_obj);
433 			    zap_cursor_retrieve(&zc, &za) == 0;
434 			    zap_cursor_advance(&zc)) {
435 				error = check_filesystem(spa,
436 				    za.za_first_integer, zep,
437 				    count, uaddr, only_count);
438 
439 				if (error != 0) {
440 					zap_cursor_fini(&zc);
441 					goto out;
442 				}
443 			}
444 			zap_cursor_fini(&zc);
445 		}
446 		snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
447 		snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
448 	}
449 	dsl_dataset_rele(ds, FTAG);
450 
451 out:
452 	kmem_free(snap_obj_array, sizeof (*snap_obj_array));
453 	return (error);
454 }
455 
456 static int
457 find_top_affected_fs(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep,
458     uint64_t *top_affected_fs)
459 {
460 	uint64_t oldest_dsobj;
461 	int error = dsl_dataset_oldest_snapshot(spa, head_ds, zep->zb_birth,
462 	    &oldest_dsobj);
463 	if (error != 0)
464 		return (error);
465 
466 	dsl_dataset_t *ds;
467 	error = dsl_dataset_hold_obj(spa->spa_dsl_pool, oldest_dsobj,
468 	    FTAG, &ds);
469 	if (error != 0)
470 		return (error);
471 
472 	*top_affected_fs =
473 	    dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj;
474 	dsl_dataset_rele(ds, FTAG);
475 	return (0);
476 }
477 
478 static int
479 process_error_block(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep,
480     uint64_t *count, void *uaddr, boolean_t only_count)
481 {
482 	dsl_pool_t *dp = spa->spa_dsl_pool;
483 	uint64_t top_affected_fs;
484 
485 	/*
486 	 * If the zb_birth is 0 it means we failed to retrieve the birth txg
487 	 * of the block pointer. This happens when an encrypted filesystem is
488 	 * not mounted or when the key is not loaded. Do not proceed to
489 	 * check_filesystem(), instead do the accounting here.
490 	 */
491 	if (zep->zb_birth == 0) {
492 		if (!only_count) {
493 			zbookmark_phys_t zb;
494 			zep_to_zb(head_ds, zep, &zb);
495 			if (copyout(&zb, (char *)uaddr + (*count - 1)
496 			    * sizeof (zbookmark_phys_t),
497 			    sizeof (zbookmark_phys_t)) != 0) {
498 				return (SET_ERROR(EFAULT));
499 			}
500 			(*count)--;
501 		} else {
502 			(*count)++;
503 		}
504 		return (0);
505 	}
506 
507 	dsl_pool_config_enter(dp, FTAG);
508 	int error = find_top_affected_fs(spa, head_ds, zep, &top_affected_fs);
509 	if (error == 0)
510 		error = check_filesystem(spa, top_affected_fs, zep, count,
511 		    uaddr, only_count);
512 
513 	dsl_pool_config_exit(dp, FTAG);
514 	return (error);
515 }
516 
517 static uint64_t
518 get_errlog_size(spa_t *spa, uint64_t spa_err_obj)
519 {
520 	if (spa_err_obj == 0)
521 		return (0);
522 	uint64_t total = 0;
523 
524 	zap_cursor_t zc;
525 	zap_attribute_t za;
526 	for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
527 	    zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
528 
529 		zap_cursor_t head_ds_cursor;
530 		zap_attribute_t head_ds_attr;
531 		zbookmark_err_phys_t head_ds_block;
532 
533 		uint64_t head_ds;
534 		name_to_object(za.za_name, &head_ds);
535 
536 		for (zap_cursor_init(&head_ds_cursor, spa->spa_meta_objset,
537 		    za.za_first_integer); zap_cursor_retrieve(&head_ds_cursor,
538 		    &head_ds_attr) == 0; zap_cursor_advance(&head_ds_cursor)) {
539 
540 			name_to_errphys(head_ds_attr.za_name, &head_ds_block);
541 			(void) process_error_block(spa, head_ds, &head_ds_block,
542 			    &total, NULL, B_TRUE);
543 		}
544 		zap_cursor_fini(&head_ds_cursor);
545 	}
546 	zap_cursor_fini(&zc);
547 	return (total);
548 }
549 
550 static uint64_t
551 get_errlist_size(spa_t *spa, avl_tree_t *tree)
552 {
553 	if (avl_numnodes(tree) == 0)
554 		return (0);
555 	uint64_t total = 0;
556 
557 	spa_error_entry_t *se;
558 	for (se = avl_first(tree); se != NULL; se = AVL_NEXT(tree, se)) {
559 		zbookmark_err_phys_t zep;
560 		zep.zb_object = se->se_bookmark.zb_object;
561 		zep.zb_level = se->se_bookmark.zb_level;
562 		zep.zb_blkid = se->se_bookmark.zb_blkid;
563 		zep.zb_birth = 0;
564 
565 		/*
566 		 * If we cannot find out the head dataset and birth txg of
567 		 * the present error block, we opt not to error out. In the
568 		 * next pool sync this information will be retrieved by
569 		 * sync_error_list() and written to the on-disk error log.
570 		 */
571 		uint64_t head_ds_obj;
572 		int error = get_head_and_birth_txg(spa, &zep,
573 		    se->se_bookmark.zb_objset, &head_ds_obj);
574 
575 		if (!error)
576 			(void) process_error_block(spa, head_ds_obj, &zep,
577 			    &total, NULL, B_TRUE);
578 	}
579 	return (total);
580 }
581 #endif
582 
583 /*
584  * If a healed bookmark matches an entry in the error log we stash it in a tree
585  * so that we can later remove the related log entries in sync context.
586  */
587 static void
588 spa_add_healed_error(spa_t *spa, uint64_t obj, zbookmark_phys_t *healed_zb)
589 {
590 	char name[NAME_MAX_LEN];
591 
592 	if (obj == 0)
593 		return;
594 
595 	bookmark_to_name(healed_zb, name, sizeof (name));
596 	mutex_enter(&spa->spa_errlog_lock);
597 	if (zap_contains(spa->spa_meta_objset, obj, name) == 0) {
598 		/*
599 		 * Found an error matching healed zb, add zb to our
600 		 * tree of healed errors
601 		 */
602 		avl_tree_t *tree = &spa->spa_errlist_healed;
603 		spa_error_entry_t search;
604 		spa_error_entry_t *new;
605 		avl_index_t where;
606 		search.se_bookmark = *healed_zb;
607 		mutex_enter(&spa->spa_errlist_lock);
608 		if (avl_find(tree, &search, &where) != NULL) {
609 			mutex_exit(&spa->spa_errlist_lock);
610 			mutex_exit(&spa->spa_errlog_lock);
611 			return;
612 		}
613 		new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP);
614 		new->se_bookmark = *healed_zb;
615 		avl_insert(tree, new, where);
616 		mutex_exit(&spa->spa_errlist_lock);
617 	}
618 	mutex_exit(&spa->spa_errlog_lock);
619 }
620 
621 /*
622  * If this error exists in the given tree remove it.
623  */
624 static void
625 remove_error_from_list(spa_t *spa, avl_tree_t *t, const zbookmark_phys_t *zb)
626 {
627 	spa_error_entry_t search, *found;
628 	avl_index_t where;
629 
630 	mutex_enter(&spa->spa_errlist_lock);
631 	search.se_bookmark = *zb;
632 	if ((found = avl_find(t, &search, &where)) != NULL) {
633 		avl_remove(t, found);
634 		kmem_free(found, sizeof (spa_error_entry_t));
635 	}
636 	mutex_exit(&spa->spa_errlist_lock);
637 }
638 
639 
640 /*
641  * Removes all of the recv healed errors from both on-disk error logs
642  */
643 static void
644 spa_remove_healed_errors(spa_t *spa, avl_tree_t *s, avl_tree_t *l, dmu_tx_t *tx)
645 {
646 	char name[NAME_MAX_LEN];
647 	spa_error_entry_t *se;
648 	void *cookie = NULL;
649 
650 	ASSERT(MUTEX_HELD(&spa->spa_errlog_lock));
651 
652 	while ((se = avl_destroy_nodes(&spa->spa_errlist_healed,
653 	    &cookie)) != NULL) {
654 		remove_error_from_list(spa, s, &se->se_bookmark);
655 		remove_error_from_list(spa, l, &se->se_bookmark);
656 		bookmark_to_name(&se->se_bookmark, name, sizeof (name));
657 		kmem_free(se, sizeof (spa_error_entry_t));
658 		(void) zap_remove(spa->spa_meta_objset,
659 		    spa->spa_errlog_last, name, tx);
660 		(void) zap_remove(spa->spa_meta_objset,
661 		    spa->spa_errlog_scrub, name, tx);
662 	}
663 }
664 
665 /*
666  * Stash away healed bookmarks to remove them from the on-disk error logs
667  * later in spa_remove_healed_errors().
668  */
669 void
670 spa_remove_error(spa_t *spa, zbookmark_phys_t *zb)
671 {
672 	char name[NAME_MAX_LEN];
673 
674 	bookmark_to_name(zb, name, sizeof (name));
675 
676 	spa_add_healed_error(spa, spa->spa_errlog_last, zb);
677 	spa_add_healed_error(spa, spa->spa_errlog_scrub, zb);
678 }
679 
680 /*
681  * Return the number of errors currently in the error log.  This is actually the
682  * sum of both the last log and the current log, since we don't know the union
683  * of these logs until we reach userland.
684  */
685 uint64_t
686 spa_get_errlog_size(spa_t *spa)
687 {
688 	uint64_t total = 0;
689 
690 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
691 		mutex_enter(&spa->spa_errlog_lock);
692 		uint64_t count;
693 		if (spa->spa_errlog_scrub != 0 &&
694 		    zap_count(spa->spa_meta_objset, spa->spa_errlog_scrub,
695 		    &count) == 0)
696 			total += count;
697 
698 		if (spa->spa_errlog_last != 0 && !spa->spa_scrub_finished &&
699 		    zap_count(spa->spa_meta_objset, spa->spa_errlog_last,
700 		    &count) == 0)
701 			total += count;
702 		mutex_exit(&spa->spa_errlog_lock);
703 
704 		mutex_enter(&spa->spa_errlist_lock);
705 		total += avl_numnodes(&spa->spa_errlist_last);
706 		total += avl_numnodes(&spa->spa_errlist_scrub);
707 		mutex_exit(&spa->spa_errlist_lock);
708 	} else {
709 #ifdef _KERNEL
710 		mutex_enter(&spa->spa_errlog_lock);
711 		total += get_errlog_size(spa, spa->spa_errlog_last);
712 		total += get_errlog_size(spa, spa->spa_errlog_scrub);
713 		mutex_exit(&spa->spa_errlog_lock);
714 
715 		mutex_enter(&spa->spa_errlist_lock);
716 		total += get_errlist_size(spa, &spa->spa_errlist_last);
717 		total += get_errlist_size(spa, &spa->spa_errlist_scrub);
718 		mutex_exit(&spa->spa_errlist_lock);
719 #endif
720 	}
721 	return (total);
722 }
723 
724 /*
725  * This function sweeps through an on-disk error log and stores all bookmarks
726  * as error bookmarks in a new ZAP object. At the end we discard the old one,
727  * and spa_update_errlog() will set the spa's on-disk error log to new ZAP
728  * object.
729  */
730 static void
731 sync_upgrade_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t *newobj,
732     dmu_tx_t *tx)
733 {
734 	zap_cursor_t zc;
735 	zap_attribute_t za;
736 	zbookmark_phys_t zb;
737 	uint64_t count;
738 
739 	*newobj = zap_create(spa->spa_meta_objset, DMU_OT_ERROR_LOG,
740 	    DMU_OT_NONE, 0, tx);
741 
742 	/*
743 	 * If we cannnot perform the upgrade we should clear the old on-disk
744 	 * error logs.
745 	 */
746 	if (zap_count(spa->spa_meta_objset, spa_err_obj, &count) != 0) {
747 		VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx));
748 		return;
749 	}
750 
751 	for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
752 	    zap_cursor_retrieve(&zc, &za) == 0;
753 	    zap_cursor_advance(&zc)) {
754 		if (spa_upgrade_errlog_limit != 0 &&
755 		    zc.zc_cd == spa_upgrade_errlog_limit)
756 			break;
757 
758 		name_to_bookmark(za.za_name, &zb);
759 
760 		zbookmark_err_phys_t zep;
761 		zep.zb_object = zb.zb_object;
762 		zep.zb_level = zb.zb_level;
763 		zep.zb_blkid = zb.zb_blkid;
764 		zep.zb_birth = 0;
765 
766 		/*
767 		 * We cannot use get_head_and_birth_txg() because it will
768 		 * acquire the pool config lock, which we already have. In case
769 		 * of an error we simply continue.
770 		 */
771 		uint64_t head_dataset_obj;
772 		dsl_pool_t *dp = spa->spa_dsl_pool;
773 		dsl_dataset_t *ds;
774 		objset_t *os;
775 
776 		int error = dsl_dataset_hold_obj(dp, zb.zb_objset, FTAG, &ds);
777 		if (error != 0)
778 			continue;
779 
780 		head_dataset_obj =
781 		    dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj;
782 
783 		/*
784 		 * The objset and the dnode are required for getting the block
785 		 * pointer, which is used to determine if BP_IS_HOLE(). If
786 		 * getting the objset or the dnode fails, do not create a
787 		 * zap entry (presuming we know the dataset) as this may create
788 		 * spurious errors that we cannot ever resolve. If an error is
789 		 * truly persistent, it should re-appear after a scan.
790 		 */
791 		if (dmu_objset_from_ds(ds, &os) != 0) {
792 			dsl_dataset_rele(ds, FTAG);
793 			continue;
794 		}
795 
796 		dnode_t *dn;
797 		blkptr_t bp;
798 
799 		if (dnode_hold(os, zep.zb_object, FTAG, &dn) != 0) {
800 			dsl_dataset_rele(ds, FTAG);
801 			continue;
802 		}
803 
804 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
805 		error = dbuf_dnode_findbp(dn, zep.zb_level, zep.zb_blkid, &bp,
806 		    NULL, NULL);
807 		if (error == EACCES)
808 			error = 0;
809 		else if (!error)
810 			zep.zb_birth = bp.blk_birth;
811 
812 		rw_exit(&dn->dn_struct_rwlock);
813 		dnode_rele(dn, FTAG);
814 		dsl_dataset_rele(ds, FTAG);
815 
816 		if (error != 0 || BP_IS_HOLE(&bp))
817 			continue;
818 
819 		uint64_t err_obj;
820 		error = zap_lookup_int_key(spa->spa_meta_objset, *newobj,
821 		    head_dataset_obj, &err_obj);
822 
823 		if (error == ENOENT) {
824 			err_obj = zap_create(spa->spa_meta_objset,
825 			    DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx);
826 
827 			(void) zap_update_int_key(spa->spa_meta_objset,
828 			    *newobj, head_dataset_obj, err_obj, tx);
829 		}
830 
831 		char buf[64];
832 		errphys_to_name(&zep, buf, sizeof (buf));
833 
834 		const char *name = "";
835 		(void) zap_update(spa->spa_meta_objset, err_obj,
836 		    buf, 1, strlen(name) + 1, name, tx);
837 	}
838 	zap_cursor_fini(&zc);
839 
840 	VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx));
841 }
842 
843 void
844 spa_upgrade_errlog(spa_t *spa, dmu_tx_t *tx)
845 {
846 	uint64_t newobj = 0;
847 
848 	mutex_enter(&spa->spa_errlog_lock);
849 	if (spa->spa_errlog_last != 0) {
850 		sync_upgrade_errlog(spa, spa->spa_errlog_last, &newobj, tx);
851 		spa->spa_errlog_last = newobj;
852 	}
853 
854 	if (spa->spa_errlog_scrub != 0) {
855 		sync_upgrade_errlog(spa, spa->spa_errlog_scrub, &newobj, tx);
856 		spa->spa_errlog_scrub = newobj;
857 	}
858 	mutex_exit(&spa->spa_errlog_lock);
859 }
860 
861 #ifdef _KERNEL
862 /*
863  * If an error block is shared by two datasets it will be counted twice. For
864  * detailed message see spa_get_errlog_size() above.
865  */
866 static int
867 process_error_log(spa_t *spa, uint64_t obj, void *uaddr, uint64_t *count)
868 {
869 	zap_cursor_t zc;
870 	zap_attribute_t za;
871 
872 	if (obj == 0)
873 		return (0);
874 
875 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
876 		for (zap_cursor_init(&zc, spa->spa_meta_objset, obj);
877 		    zap_cursor_retrieve(&zc, &za) == 0;
878 		    zap_cursor_advance(&zc)) {
879 			if (*count == 0) {
880 				zap_cursor_fini(&zc);
881 				return (SET_ERROR(ENOMEM));
882 			}
883 
884 			zbookmark_phys_t zb;
885 			name_to_bookmark(za.za_name, &zb);
886 
887 			if (copyout(&zb, (char *)uaddr +
888 			    (*count - 1) * sizeof (zbookmark_phys_t),
889 			    sizeof (zbookmark_phys_t)) != 0) {
890 				zap_cursor_fini(&zc);
891 				return (SET_ERROR(EFAULT));
892 			}
893 			*count -= 1;
894 
895 		}
896 		zap_cursor_fini(&zc);
897 		return (0);
898 	}
899 
900 	for (zap_cursor_init(&zc, spa->spa_meta_objset, obj);
901 	    zap_cursor_retrieve(&zc, &za) == 0;
902 	    zap_cursor_advance(&zc)) {
903 
904 		zap_cursor_t head_ds_cursor;
905 		zap_attribute_t head_ds_attr;
906 
907 		uint64_t head_ds_err_obj = za.za_first_integer;
908 		uint64_t head_ds;
909 		name_to_object(za.za_name, &head_ds);
910 		for (zap_cursor_init(&head_ds_cursor, spa->spa_meta_objset,
911 		    head_ds_err_obj); zap_cursor_retrieve(&head_ds_cursor,
912 		    &head_ds_attr) == 0; zap_cursor_advance(&head_ds_cursor)) {
913 
914 			zbookmark_err_phys_t head_ds_block;
915 			name_to_errphys(head_ds_attr.za_name, &head_ds_block);
916 			int error = process_error_block(spa, head_ds,
917 			    &head_ds_block, count, uaddr, B_FALSE);
918 
919 			if (error != 0) {
920 				zap_cursor_fini(&head_ds_cursor);
921 				zap_cursor_fini(&zc);
922 				return (error);
923 			}
924 		}
925 		zap_cursor_fini(&head_ds_cursor);
926 	}
927 	zap_cursor_fini(&zc);
928 	return (0);
929 }
930 
931 static int
932 process_error_list(spa_t *spa, avl_tree_t *list, void *uaddr, uint64_t *count)
933 {
934 	spa_error_entry_t *se;
935 
936 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
937 		for (se = avl_first(list); se != NULL;
938 		    se = AVL_NEXT(list, se)) {
939 
940 			if (*count == 0)
941 				return (SET_ERROR(ENOMEM));
942 
943 			if (copyout(&se->se_bookmark, (char *)uaddr +
944 			    (*count - 1) * sizeof (zbookmark_phys_t),
945 			    sizeof (zbookmark_phys_t)) != 0)
946 				return (SET_ERROR(EFAULT));
947 
948 			*count -= 1;
949 		}
950 		return (0);
951 	}
952 
953 	for (se = avl_first(list); se != NULL; se = AVL_NEXT(list, se)) {
954 		zbookmark_err_phys_t zep;
955 		zep.zb_object = se->se_bookmark.zb_object;
956 		zep.zb_level = se->se_bookmark.zb_level;
957 		zep.zb_blkid = se->se_bookmark.zb_blkid;
958 		zep.zb_birth = 0;
959 
960 		uint64_t head_ds_obj;
961 		int error = get_head_and_birth_txg(spa, &zep,
962 		    se->se_bookmark.zb_objset, &head_ds_obj);
963 
964 		if (!error)
965 			error = process_error_block(spa, head_ds_obj, &zep,
966 			    count, uaddr, B_FALSE);
967 		if (error)
968 			return (error);
969 	}
970 	return (0);
971 }
972 #endif
973 
974 /*
975  * Copy all known errors to userland as an array of bookmarks.  This is
976  * actually a union of the on-disk last log and current log, as well as any
977  * pending error requests.
978  *
979  * Because the act of reading the on-disk log could cause errors to be
980  * generated, we have two separate locks: one for the error log and one for the
981  * in-core error lists.  We only need the error list lock to log and error, so
982  * we grab the error log lock while we read the on-disk logs, and only pick up
983  * the error list lock when we are finished.
984  */
985 int
986 spa_get_errlog(spa_t *spa, void *uaddr, uint64_t *count)
987 {
988 	int ret = 0;
989 
990 #ifdef _KERNEL
991 	mutex_enter(&spa->spa_errlog_lock);
992 
993 	ret = process_error_log(spa, spa->spa_errlog_scrub, uaddr, count);
994 
995 	if (!ret && !spa->spa_scrub_finished)
996 		ret = process_error_log(spa, spa->spa_errlog_last, uaddr,
997 		    count);
998 
999 	mutex_enter(&spa->spa_errlist_lock);
1000 	if (!ret)
1001 		ret = process_error_list(spa, &spa->spa_errlist_scrub, uaddr,
1002 		    count);
1003 	if (!ret)
1004 		ret = process_error_list(spa, &spa->spa_errlist_last, uaddr,
1005 		    count);
1006 	mutex_exit(&spa->spa_errlist_lock);
1007 
1008 	mutex_exit(&spa->spa_errlog_lock);
1009 #else
1010 	(void) spa, (void) uaddr, (void) count;
1011 #endif
1012 
1013 	return (ret);
1014 }
1015 
1016 /*
1017  * Called when a scrub completes.  This simply set a bit which tells which AVL
1018  * tree to add new errors.  spa_errlog_sync() is responsible for actually
1019  * syncing the changes to the underlying objects.
1020  */
1021 void
1022 spa_errlog_rotate(spa_t *spa)
1023 {
1024 	mutex_enter(&spa->spa_errlist_lock);
1025 	spa->spa_scrub_finished = B_TRUE;
1026 	mutex_exit(&spa->spa_errlist_lock);
1027 }
1028 
1029 /*
1030  * Discard any pending errors from the spa_t.  Called when unloading a faulted
1031  * pool, as the errors encountered during the open cannot be synced to disk.
1032  */
1033 void
1034 spa_errlog_drain(spa_t *spa)
1035 {
1036 	spa_error_entry_t *se;
1037 	void *cookie;
1038 
1039 	mutex_enter(&spa->spa_errlist_lock);
1040 
1041 	cookie = NULL;
1042 	while ((se = avl_destroy_nodes(&spa->spa_errlist_last,
1043 	    &cookie)) != NULL)
1044 		kmem_free(se, sizeof (spa_error_entry_t));
1045 	cookie = NULL;
1046 	while ((se = avl_destroy_nodes(&spa->spa_errlist_scrub,
1047 	    &cookie)) != NULL)
1048 		kmem_free(se, sizeof (spa_error_entry_t));
1049 
1050 	mutex_exit(&spa->spa_errlist_lock);
1051 }
1052 
1053 /*
1054  * Process a list of errors into the current on-disk log.
1055  */
1056 void
1057 sync_error_list(spa_t *spa, avl_tree_t *t, uint64_t *obj, dmu_tx_t *tx)
1058 {
1059 	spa_error_entry_t *se;
1060 	char buf[NAME_MAX_LEN];
1061 	void *cookie;
1062 
1063 	if (avl_numnodes(t) == 0)
1064 		return;
1065 
1066 	/* create log if necessary */
1067 	if (*obj == 0)
1068 		*obj = zap_create(spa->spa_meta_objset, DMU_OT_ERROR_LOG,
1069 		    DMU_OT_NONE, 0, tx);
1070 
1071 	/* add errors to the current log */
1072 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
1073 		for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) {
1074 			bookmark_to_name(&se->se_bookmark, buf, sizeof (buf));
1075 
1076 			const char *name = se->se_name ? se->se_name : "";
1077 			(void) zap_update(spa->spa_meta_objset, *obj, buf, 1,
1078 			    strlen(name) + 1, name, tx);
1079 		}
1080 	} else {
1081 		for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) {
1082 			zbookmark_err_phys_t zep;
1083 			zep.zb_object = se->se_bookmark.zb_object;
1084 			zep.zb_level = se->se_bookmark.zb_level;
1085 			zep.zb_blkid = se->se_bookmark.zb_blkid;
1086 			zep.zb_birth = 0;
1087 
1088 			/*
1089 			 * If we cannot find out the head dataset and birth txg
1090 			 * of the present error block, we simply continue.
1091 			 * Reinserting that error block to the error lists,
1092 			 * even if we are not syncing the final txg, results
1093 			 * in duplicate posting of errors.
1094 			 */
1095 			uint64_t head_dataset_obj;
1096 			int error = get_head_and_birth_txg(spa, &zep,
1097 			    se->se_bookmark.zb_objset, &head_dataset_obj);
1098 			if (error)
1099 				continue;
1100 
1101 			uint64_t err_obj;
1102 			error = zap_lookup_int_key(spa->spa_meta_objset,
1103 			    *obj, head_dataset_obj, &err_obj);
1104 
1105 			if (error == ENOENT) {
1106 				err_obj = zap_create(spa->spa_meta_objset,
1107 				    DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx);
1108 
1109 				(void) zap_update_int_key(spa->spa_meta_objset,
1110 				    *obj, head_dataset_obj, err_obj, tx);
1111 			}
1112 			errphys_to_name(&zep, buf, sizeof (buf));
1113 
1114 			const char *name = se->se_name ? se->se_name : "";
1115 			(void) zap_update(spa->spa_meta_objset,
1116 			    err_obj, buf, 1, strlen(name) + 1, name, tx);
1117 		}
1118 	}
1119 	/* purge the error list */
1120 	cookie = NULL;
1121 	while ((se = avl_destroy_nodes(t, &cookie)) != NULL)
1122 		kmem_free(se, sizeof (spa_error_entry_t));
1123 }
1124 
1125 static void
1126 delete_errlog(spa_t *spa, uint64_t spa_err_obj, dmu_tx_t *tx)
1127 {
1128 	if (spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
1129 		zap_cursor_t zc;
1130 		zap_attribute_t za;
1131 		for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
1132 		    zap_cursor_retrieve(&zc, &za) == 0;
1133 		    zap_cursor_advance(&zc)) {
1134 			VERIFY0(dmu_object_free(spa->spa_meta_objset,
1135 			    za.za_first_integer, tx));
1136 		}
1137 		zap_cursor_fini(&zc);
1138 	}
1139 	VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx));
1140 }
1141 
1142 /*
1143  * Sync the error log out to disk.  This is a little tricky because the act of
1144  * writing the error log requires the spa_errlist_lock.  So, we need to lock the
1145  * error lists, take a copy of the lists, and then reinitialize them.  Then, we
1146  * drop the error list lock and take the error log lock, at which point we
1147  * do the errlog processing.  Then, if we encounter an I/O error during this
1148  * process, we can successfully add the error to the list.  Note that this will
1149  * result in the perpetual recycling of errors, but it is an unlikely situation
1150  * and not a performance critical operation.
1151  */
1152 void
1153 spa_errlog_sync(spa_t *spa, uint64_t txg)
1154 {
1155 	dmu_tx_t *tx;
1156 	avl_tree_t scrub, last;
1157 	int scrub_finished;
1158 
1159 	mutex_enter(&spa->spa_errlist_lock);
1160 
1161 	/*
1162 	 * Bail out early under normal circumstances.
1163 	 */
1164 	if (avl_numnodes(&spa->spa_errlist_scrub) == 0 &&
1165 	    avl_numnodes(&spa->spa_errlist_last) == 0 &&
1166 	    avl_numnodes(&spa->spa_errlist_healed) == 0 &&
1167 	    !spa->spa_scrub_finished) {
1168 		mutex_exit(&spa->spa_errlist_lock);
1169 		return;
1170 	}
1171 
1172 	spa_get_errlists(spa, &last, &scrub);
1173 	scrub_finished = spa->spa_scrub_finished;
1174 	spa->spa_scrub_finished = B_FALSE;
1175 
1176 	mutex_exit(&spa->spa_errlist_lock);
1177 	mutex_enter(&spa->spa_errlog_lock);
1178 
1179 	tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1180 
1181 	/*
1182 	 * Remove healed errors from errors.
1183 	 */
1184 	spa_remove_healed_errors(spa, &last, &scrub, tx);
1185 
1186 	/*
1187 	 * Sync out the current list of errors.
1188 	 */
1189 	sync_error_list(spa, &last, &spa->spa_errlog_last, tx);
1190 
1191 	/*
1192 	 * Rotate the log if necessary.
1193 	 */
1194 	if (scrub_finished) {
1195 		if (spa->spa_errlog_last != 0)
1196 			delete_errlog(spa, spa->spa_errlog_last, tx);
1197 		spa->spa_errlog_last = spa->spa_errlog_scrub;
1198 		spa->spa_errlog_scrub = 0;
1199 
1200 		sync_error_list(spa, &scrub, &spa->spa_errlog_last, tx);
1201 	}
1202 
1203 	/*
1204 	 * Sync out any pending scrub errors.
1205 	 */
1206 	sync_error_list(spa, &scrub, &spa->spa_errlog_scrub, tx);
1207 
1208 	/*
1209 	 * Update the MOS to reflect the new values.
1210 	 */
1211 	(void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1212 	    DMU_POOL_ERRLOG_LAST, sizeof (uint64_t), 1,
1213 	    &spa->spa_errlog_last, tx);
1214 	(void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1215 	    DMU_POOL_ERRLOG_SCRUB, sizeof (uint64_t), 1,
1216 	    &spa->spa_errlog_scrub, tx);
1217 
1218 	dmu_tx_commit(tx);
1219 
1220 	mutex_exit(&spa->spa_errlog_lock);
1221 }
1222 
1223 static void
1224 delete_dataset_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t ds,
1225     dmu_tx_t *tx)
1226 {
1227 	if (spa_err_obj == 0)
1228 		return;
1229 
1230 	zap_cursor_t zc;
1231 	zap_attribute_t za;
1232 	for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
1233 	    zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
1234 		uint64_t head_ds;
1235 		name_to_object(za.za_name, &head_ds);
1236 		if (head_ds == ds) {
1237 			(void) zap_remove(spa->spa_meta_objset, spa_err_obj,
1238 			    za.za_name, tx);
1239 			VERIFY0(dmu_object_free(spa->spa_meta_objset,
1240 			    za.za_first_integer, tx));
1241 			break;
1242 		}
1243 	}
1244 	zap_cursor_fini(&zc);
1245 }
1246 
1247 void
1248 spa_delete_dataset_errlog(spa_t *spa, uint64_t ds, dmu_tx_t *tx)
1249 {
1250 	mutex_enter(&spa->spa_errlog_lock);
1251 	delete_dataset_errlog(spa, spa->spa_errlog_scrub, ds, tx);
1252 	delete_dataset_errlog(spa, spa->spa_errlog_last, ds, tx);
1253 	mutex_exit(&spa->spa_errlog_lock);
1254 }
1255 
1256 static int
1257 find_txg_ancestor_snapshot(spa_t *spa, uint64_t new_head, uint64_t old_head,
1258     uint64_t *txg)
1259 {
1260 	dsl_dataset_t *ds;
1261 	dsl_pool_t *dp = spa->spa_dsl_pool;
1262 
1263 	int error = dsl_dataset_hold_obj(dp, old_head, FTAG, &ds);
1264 	if (error != 0)
1265 		return (error);
1266 
1267 	uint64_t prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1268 	uint64_t prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
1269 
1270 	while (prev_obj != 0) {
1271 		dsl_dataset_rele(ds, FTAG);
1272 		if ((error = dsl_dataset_hold_obj(dp, prev_obj,
1273 		    FTAG, &ds)) == 0 &&
1274 		    dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj == new_head)
1275 			break;
1276 
1277 		if (error != 0)
1278 			return (error);
1279 
1280 		prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
1281 		prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1282 	}
1283 	dsl_dataset_rele(ds, FTAG);
1284 	ASSERT(prev_obj != 0);
1285 	*txg = prev_obj_txg;
1286 	return (0);
1287 }
1288 
1289 static void
1290 swap_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t new_head, uint64_t
1291     old_head, dmu_tx_t *tx)
1292 {
1293 	if (spa_err_obj == 0)
1294 		return;
1295 
1296 	uint64_t old_head_errlog;
1297 	int error = zap_lookup_int_key(spa->spa_meta_objset, spa_err_obj,
1298 	    old_head, &old_head_errlog);
1299 
1300 	/* If no error log, then there is nothing to do. */
1301 	if (error != 0)
1302 		return;
1303 
1304 	uint64_t txg;
1305 	error = find_txg_ancestor_snapshot(spa, new_head, old_head, &txg);
1306 	if (error != 0)
1307 		return;
1308 
1309 	/*
1310 	 * Create an error log if the file system being promoted does not
1311 	 * already have one.
1312 	 */
1313 	uint64_t new_head_errlog;
1314 	error = zap_lookup_int_key(spa->spa_meta_objset, spa_err_obj, new_head,
1315 	    &new_head_errlog);
1316 
1317 	if (error != 0) {
1318 		new_head_errlog = zap_create(spa->spa_meta_objset,
1319 		    DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx);
1320 
1321 		(void) zap_update_int_key(spa->spa_meta_objset, spa_err_obj,
1322 		    new_head, new_head_errlog, tx);
1323 	}
1324 
1325 	zap_cursor_t zc;
1326 	zap_attribute_t za;
1327 	zbookmark_err_phys_t err_block;
1328 	for (zap_cursor_init(&zc, spa->spa_meta_objset, old_head_errlog);
1329 	    zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
1330 
1331 		const char *name = "";
1332 		name_to_errphys(za.za_name, &err_block);
1333 		if (err_block.zb_birth < txg) {
1334 			(void) zap_update(spa->spa_meta_objset, new_head_errlog,
1335 			    za.za_name, 1, strlen(name) + 1, name, tx);
1336 
1337 			(void) zap_remove(spa->spa_meta_objset, old_head_errlog,
1338 			    za.za_name, tx);
1339 		}
1340 	}
1341 	zap_cursor_fini(&zc);
1342 }
1343 
1344 void
1345 spa_swap_errlog(spa_t *spa, uint64_t new_head_ds, uint64_t old_head_ds,
1346     dmu_tx_t *tx)
1347 {
1348 	mutex_enter(&spa->spa_errlog_lock);
1349 	swap_errlog(spa, spa->spa_errlog_scrub, new_head_ds, old_head_ds, tx);
1350 	swap_errlog(spa, spa->spa_errlog_last, new_head_ds, old_head_ds, tx);
1351 	mutex_exit(&spa->spa_errlog_lock);
1352 }
1353 
1354 #if defined(_KERNEL)
1355 /* error handling */
1356 EXPORT_SYMBOL(spa_log_error);
1357 EXPORT_SYMBOL(spa_get_errlog_size);
1358 EXPORT_SYMBOL(spa_get_errlog);
1359 EXPORT_SYMBOL(spa_errlog_rotate);
1360 EXPORT_SYMBOL(spa_errlog_drain);
1361 EXPORT_SYMBOL(spa_errlog_sync);
1362 EXPORT_SYMBOL(spa_get_errlists);
1363 EXPORT_SYMBOL(spa_delete_dataset_errlog);
1364 EXPORT_SYMBOL(spa_swap_errlog);
1365 EXPORT_SYMBOL(sync_error_list);
1366 EXPORT_SYMBOL(spa_upgrade_errlog);
1367 #endif
1368 
1369 /* BEGIN CSTYLED */
1370 ZFS_MODULE_PARAM(zfs_spa, spa_, upgrade_errlog_limit, UINT, ZMOD_RW,
1371 	"Limit the number of errors which will be upgraded to the new "
1372 	"on-disk error log when enabling head_errlog");
1373 /* END CSTYLED */
1374