xref: /freebsd/sys/contrib/openzfs/module/zfs/spa_errlog.c (revision 61145dc2b94f12f6a47344fb9aac702321880e43)
1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or https://opensource.org/licenses/CDDL-1.0.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2013, 2014, Delphix. All rights reserved.
25  * Copyright (c) 2019 Datto Inc.
26  * Copyright (c) 2021, 2022, George Amanakis. All rights reserved.
27  */
28 
29 /*
30  * Routines to manage the on-disk persistent error log.
31  *
32  * Each pool stores a log of all logical data errors seen during normal
33  * operation.  This is actually the union of two distinct logs: the last log,
34  * and the current log.  All errors seen are logged to the current log.  When a
35  * scrub completes, the current log becomes the last log, the last log is thrown
36  * out, and the current log is reinitialized.  This way, if an error is somehow
37  * corrected, a new scrub will show that it no longer exists, and will be
38  * deleted from the log when the scrub completes.
39  *
40  * The log is stored using a ZAP object whose key is a string form of the
41  * zbookmark_phys tuple (objset, object, level, blkid), and whose contents is an
42  * optional 'objset:object' human-readable string describing the data.  When an
43  * error is first logged, this string will be empty, indicating that no name is
44  * known.  This prevents us from having to issue a potentially large amount of
45  * I/O to discover the object name during an error path.  Instead, we do the
46  * calculation when the data is requested, storing the result so future queries
47  * will be faster.
48  *
49  * If the head_errlog feature is enabled, a different on-disk format is used.
50  * The error log of each head dataset is stored separately in the zap object
51  * and keyed by the head id. This enables listing every dataset affected in
52  * userland. In order to be able to track whether an error block has been
53  * modified or added to snapshots since it was marked as an error, a new tuple
54  * is introduced: zbookmark_err_phys_t. It allows the storage of the birth
55  * transaction group of an error block on-disk. The birth transaction group is
56  * used by check_filesystem() to assess whether this block was freed,
57  * re-written or added to a snapshot since its marking as an error.
58  *
59  * This log is then shipped into an nvlist where the key is the dataset name and
60  * the value is the object name.  Userland is then responsible for uniquifying
61  * this list and displaying it to the user.
62  */
63 
64 #include <sys/dmu_tx.h>
65 #include <sys/spa.h>
66 #include <sys/spa_impl.h>
67 #include <sys/zap.h>
68 #include <sys/zio.h>
69 #include <sys/dsl_dir.h>
70 #include <sys/dmu_objset.h>
71 #include <sys/dbuf.h>
72 #include <sys/zfs_znode.h>
73 
74 #define	NAME_MAX_LEN 64
75 
76 typedef struct clones {
77 	uint64_t clone_ds;
78 	list_node_t node;
79 } clones_t;
80 
81 /*
82  * spa_upgrade_errlog_limit : A zfs module parameter that controls the number
83  *		of on-disk error log entries that will be converted to the new
84  *		format when enabling head_errlog. Defaults to 0 which converts
85  *		all log entries.
86  */
87 static uint_t spa_upgrade_errlog_limit = 0;
88 
89 /*
90  * Convert a bookmark to a string.
91  */
92 static void
bookmark_to_name(zbookmark_phys_t * zb,char * buf,size_t len)93 bookmark_to_name(zbookmark_phys_t *zb, char *buf, size_t len)
94 {
95 	(void) snprintf(buf, len, "%llx:%llx:%llx:%llx",
96 	    (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
97 	    (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid);
98 }
99 
100 /*
101  * Convert an err_phys to a string.
102  */
103 static void
errphys_to_name(zbookmark_err_phys_t * zep,char * buf,size_t len)104 errphys_to_name(zbookmark_err_phys_t *zep, char *buf, size_t len)
105 {
106 	(void) snprintf(buf, len, "%llx:%llx:%llx:%llx",
107 	    (u_longlong_t)zep->zb_object, (u_longlong_t)zep->zb_level,
108 	    (u_longlong_t)zep->zb_blkid, (u_longlong_t)zep->zb_birth);
109 }
110 
111 /*
112  * Convert a string to a err_phys.
113  */
114 void
name_to_errphys(char * buf,zbookmark_err_phys_t * zep)115 name_to_errphys(char *buf, zbookmark_err_phys_t *zep)
116 {
117 	zep->zb_object = zfs_strtonum(buf, &buf);
118 	ASSERT(*buf == ':');
119 	zep->zb_level = (int)zfs_strtonum(buf + 1, &buf);
120 	ASSERT(*buf == ':');
121 	zep->zb_blkid = zfs_strtonum(buf + 1, &buf);
122 	ASSERT(*buf == ':');
123 	zep->zb_birth = zfs_strtonum(buf + 1, &buf);
124 	ASSERT(*buf == '\0');
125 }
126 
127 /*
128  * Convert a string to a bookmark.
129  */
130 static void
name_to_bookmark(char * buf,zbookmark_phys_t * zb)131 name_to_bookmark(char *buf, zbookmark_phys_t *zb)
132 {
133 	zb->zb_objset = zfs_strtonum(buf, &buf);
134 	ASSERT(*buf == ':');
135 	zb->zb_object = zfs_strtonum(buf + 1, &buf);
136 	ASSERT(*buf == ':');
137 	zb->zb_level = (int)zfs_strtonum(buf + 1, &buf);
138 	ASSERT(*buf == ':');
139 	zb->zb_blkid = zfs_strtonum(buf + 1, &buf);
140 	ASSERT(*buf == '\0');
141 }
142 
143 void
zep_to_zb(uint64_t dataset,zbookmark_err_phys_t * zep,zbookmark_phys_t * zb)144 zep_to_zb(uint64_t dataset, zbookmark_err_phys_t *zep, zbookmark_phys_t *zb)
145 {
146 	zb->zb_objset = dataset;
147 	zb->zb_object = zep->zb_object;
148 	zb->zb_level = zep->zb_level;
149 	zb->zb_blkid = zep->zb_blkid;
150 }
151 
152 static void
name_to_object(char * buf,uint64_t * obj)153 name_to_object(char *buf, uint64_t *obj)
154 {
155 	*obj = zfs_strtonum(buf, &buf);
156 	ASSERT(*buf == '\0');
157 }
158 
159 /*
160  * Retrieve the head filesystem.
161  */
get_head_ds(spa_t * spa,uint64_t dsobj,uint64_t * head_ds)162 static int get_head_ds(spa_t *spa, uint64_t dsobj, uint64_t *head_ds)
163 {
164 	dsl_dataset_t *ds;
165 	int error = dsl_dataset_hold_obj_flags(spa->spa_dsl_pool,
166 	    dsobj, DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
167 
168 	if (error != 0)
169 		return (error);
170 
171 	ASSERT(head_ds);
172 	*head_ds = dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj;
173 	dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
174 
175 	return (error);
176 }
177 
178 /*
179  * Log an uncorrectable error to the persistent error log.  We add it to the
180  * spa's list of pending errors.  The changes are actually synced out to disk
181  * during spa_errlog_sync().
182  */
183 void
spa_log_error(spa_t * spa,const zbookmark_phys_t * zb,const uint64_t birth)184 spa_log_error(spa_t *spa, const zbookmark_phys_t *zb, const uint64_t birth)
185 {
186 	spa_error_entry_t search;
187 	spa_error_entry_t *new;
188 	avl_tree_t *tree;
189 	avl_index_t where;
190 
191 	/*
192 	 * If we are trying to import a pool, ignore any errors, as we won't be
193 	 * writing to the pool any time soon.
194 	 */
195 	if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT)
196 		return;
197 
198 	mutex_enter(&spa->spa_errlist_lock);
199 
200 	/*
201 	 * If we have had a request to rotate the log, log it to the next list
202 	 * instead of the current one.
203 	 */
204 	if (spa->spa_scrub_active || spa->spa_scrub_finished)
205 		tree = &spa->spa_errlist_scrub;
206 	else
207 		tree = &spa->spa_errlist_last;
208 
209 	search.se_bookmark = *zb;
210 	if (avl_find(tree, &search, &where) != NULL) {
211 		mutex_exit(&spa->spa_errlist_lock);
212 		return;
213 	}
214 
215 	new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP);
216 	new->se_bookmark = *zb;
217 
218 	/*
219 	 * If the head_errlog feature is enabled, store the birth txg now. In
220 	 * case the file is deleted before spa_errlog_sync() runs, we will not
221 	 * be able to retrieve the birth txg.
222 	 */
223 	if (spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
224 		new->se_zep.zb_object = zb->zb_object;
225 		new->se_zep.zb_level = zb->zb_level;
226 		new->se_zep.zb_blkid = zb->zb_blkid;
227 		new->se_zep.zb_birth = birth;
228 	}
229 
230 	avl_insert(tree, new, where);
231 	mutex_exit(&spa->spa_errlist_lock);
232 }
233 
234 int
find_birth_txg(dsl_dataset_t * ds,zbookmark_err_phys_t * zep,uint64_t * birth_txg)235 find_birth_txg(dsl_dataset_t *ds, zbookmark_err_phys_t *zep,
236     uint64_t *birth_txg)
237 {
238 	objset_t *os;
239 	int error = dmu_objset_from_ds(ds, &os);
240 	if (error != 0)
241 		return (error);
242 
243 	dnode_t *dn;
244 	blkptr_t bp;
245 
246 	error = dnode_hold(os, zep->zb_object, FTAG, &dn);
247 	if (error != 0)
248 		return (error);
249 
250 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
251 	error = dbuf_dnode_findbp(dn, zep->zb_level, zep->zb_blkid, &bp, NULL,
252 	    NULL);
253 	if (error == 0 && BP_IS_HOLE(&bp))
254 		error = SET_ERROR(ENOENT);
255 
256 	*birth_txg = BP_GET_LOGICAL_BIRTH(&bp);
257 	rw_exit(&dn->dn_struct_rwlock);
258 	dnode_rele(dn, FTAG);
259 	return (error);
260 }
261 
262 /*
263  * This function finds the oldest affected filesystem containing an error
264  * block.
265  */
266 int
find_top_affected_fs(spa_t * spa,uint64_t head_ds,zbookmark_err_phys_t * zep,uint64_t * top_affected_fs)267 find_top_affected_fs(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep,
268     uint64_t *top_affected_fs)
269 {
270 	uint64_t oldest_dsobj;
271 	int error = dsl_dataset_oldest_snapshot(spa, head_ds, zep->zb_birth,
272 	    &oldest_dsobj);
273 	if (error != 0)
274 		return (error);
275 
276 	dsl_dataset_t *ds;
277 	error = dsl_dataset_hold_obj_flags(spa->spa_dsl_pool, oldest_dsobj,
278 	    DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
279 	if (error != 0)
280 		return (error);
281 
282 	*top_affected_fs =
283 	    dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj;
284 	dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
285 	return (0);
286 }
287 
288 
289 #ifdef _KERNEL
290 /*
291  * Copy the bookmark to the end of the user-space buffer which starts at
292  * uaddr and has *count unused entries, and decrement *count by 1.
293  */
294 static int
copyout_entry(const zbookmark_phys_t * zb,void * uaddr,uint64_t * count)295 copyout_entry(const zbookmark_phys_t *zb, void *uaddr, uint64_t *count)
296 {
297 	if (*count == 0)
298 		return (SET_ERROR(ENOMEM));
299 
300 	*count -= 1;
301 	if (copyout(zb, (char *)uaddr + (*count) * sizeof (zbookmark_phys_t),
302 	    sizeof (zbookmark_phys_t)) != 0)
303 		return (SET_ERROR(EFAULT));
304 	return (0);
305 }
306 
307 /*
308  * Each time the error block is referenced by a snapshot or clone, add a
309  * zbookmark_phys_t entry to the userspace array at uaddr. The array is
310  * filled from the back and the in-out parameter *count is modified to be the
311  * number of unused entries at the beginning of the array. The function
312  * scrub_filesystem() is modelled after this one.
313  */
314 static int
check_filesystem(spa_t * spa,uint64_t head_ds,zbookmark_err_phys_t * zep,void * uaddr,uint64_t * count,list_t * clones_list)315 check_filesystem(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep,
316     void *uaddr, uint64_t *count, list_t *clones_list)
317 {
318 	dsl_dataset_t *ds;
319 	dsl_pool_t *dp = spa->spa_dsl_pool;
320 
321 	int error = dsl_dataset_hold_obj_flags(dp, head_ds,
322 	    DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
323 	if (error != 0)
324 		return (error);
325 
326 	uint64_t latest_txg;
327 	uint64_t txg_to_consider = spa->spa_syncing_txg;
328 	boolean_t check_snapshot = B_TRUE;
329 	error = find_birth_txg(ds, zep, &latest_txg);
330 
331 	/*
332 	 * If find_birth_txg() errors out otherwise, let txg_to_consider be
333 	 * equal to the spa's syncing txg: if check_filesystem() errors out
334 	 * then affected snapshots or clones will not be checked.
335 	 */
336 	if (error == 0 && zep->zb_birth == latest_txg) {
337 		/* Block neither free nor rewritten. */
338 		zbookmark_phys_t zb;
339 		zep_to_zb(head_ds, zep, &zb);
340 		error = copyout_entry(&zb, uaddr, count);
341 		if (error != 0) {
342 			dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
343 			return (error);
344 		}
345 		check_snapshot = B_FALSE;
346 	} else if (error == 0) {
347 		txg_to_consider = latest_txg;
348 	}
349 
350 	/*
351 	 * Retrieve the number of snapshots if the dataset is not a snapshot.
352 	 */
353 	uint64_t snap_count = 0;
354 	if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) {
355 
356 		error = zap_count(spa->spa_meta_objset,
357 		    dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count);
358 
359 		if (error != 0) {
360 			dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
361 			return (error);
362 		}
363 	}
364 
365 	if (snap_count == 0) {
366 		/* Filesystem without snapshots. */
367 		dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
368 		return (0);
369 	}
370 
371 	uint64_t *snap_obj_array = kmem_zalloc(snap_count * sizeof (uint64_t),
372 	    KM_SLEEP);
373 
374 	int aff_snap_count = 0;
375 	uint64_t snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
376 	uint64_t snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
377 	uint64_t zap_clone = dsl_dir_phys(ds->ds_dir)->dd_clones;
378 
379 	dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
380 
381 	/* Check only snapshots created from this file system. */
382 	while (snap_obj != 0 && zep->zb_birth < snap_obj_txg &&
383 	    snap_obj_txg <= txg_to_consider) {
384 
385 		error = dsl_dataset_hold_obj_flags(dp, snap_obj,
386 		    DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
387 		if (error != 0)
388 			goto out;
389 
390 		if (dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj != head_ds) {
391 			snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
392 			snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
393 			dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
394 			continue;
395 		}
396 
397 		boolean_t affected = B_TRUE;
398 		if (check_snapshot) {
399 			uint64_t blk_txg;
400 			error = find_birth_txg(ds, zep, &blk_txg);
401 			affected = (error == 0 && zep->zb_birth == blk_txg);
402 		}
403 
404 		/* Report errors in snapshots. */
405 		if (affected) {
406 			snap_obj_array[aff_snap_count] = snap_obj;
407 			aff_snap_count++;
408 
409 			zbookmark_phys_t zb;
410 			zep_to_zb(snap_obj, zep, &zb);
411 			error = copyout_entry(&zb, uaddr, count);
412 			if (error != 0) {
413 				dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT,
414 				    FTAG);
415 				goto out;
416 			}
417 		}
418 		snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
419 		snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
420 		dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
421 	}
422 
423 	if (zap_clone == 0 || aff_snap_count == 0) {
424 		error = 0;
425 		goto out;
426 	}
427 
428 	/* Check clones. */
429 	zap_cursor_t *zc;
430 	zap_attribute_t *za;
431 
432 	zc = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP);
433 	za = zap_attribute_alloc();
434 
435 	for (zap_cursor_init(zc, spa->spa_meta_objset, zap_clone);
436 	    zap_cursor_retrieve(zc, za) == 0;
437 	    zap_cursor_advance(zc)) {
438 
439 		dsl_dataset_t *clone;
440 		error = dsl_dataset_hold_obj_flags(dp, za->za_first_integer,
441 		    DS_HOLD_FLAG_DECRYPT, FTAG, &clone);
442 
443 		if (error != 0)
444 			break;
445 
446 		/*
447 		 * Only clones whose origins were affected could also
448 		 * have affected snapshots.
449 		 */
450 		boolean_t found = B_FALSE;
451 		for (int i = 0; i < snap_count; i++) {
452 			if (dsl_dir_phys(clone->ds_dir)->dd_origin_obj
453 			    == snap_obj_array[i])
454 				found = B_TRUE;
455 		}
456 		dsl_dataset_rele_flags(clone, DS_HOLD_FLAG_DECRYPT, FTAG);
457 
458 		if (!found)
459 			continue;
460 
461 		clones_t *ct = kmem_zalloc(sizeof (*ct), KM_SLEEP);
462 		ct->clone_ds = za->za_first_integer;
463 		list_insert_tail(clones_list, ct);
464 	}
465 
466 	zap_cursor_fini(zc);
467 	zap_attribute_free(za);
468 	kmem_free(zc, sizeof (*zc));
469 
470 out:
471 	kmem_free(snap_obj_array, sizeof (*snap_obj_array));
472 	return (error);
473 }
474 
475 static int
process_error_block(spa_t * spa,uint64_t head_ds,zbookmark_err_phys_t * zep,void * uaddr,uint64_t * count)476 process_error_block(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep,
477     void *uaddr, uint64_t *count)
478 {
479 	/*
480 	 * If zb_birth == 0 or head_ds == 0 it means we failed to retrieve the
481 	 * birth txg or the head filesystem of the block pointer. This may
482 	 * happen e.g. when an encrypted filesystem is not mounted or when
483 	 * the key is not loaded. In this case do not proceed to
484 	 * check_filesystem(), instead do the accounting here.
485 	 */
486 	if (zep->zb_birth == 0 || head_ds == 0) {
487 		zbookmark_phys_t zb;
488 		zep_to_zb(head_ds, zep, &zb);
489 		int error = copyout_entry(&zb, uaddr, count);
490 		if (error != 0) {
491 			return (error);
492 		}
493 		return (0);
494 	}
495 
496 	uint64_t top_affected_fs;
497 	uint64_t init_count = *count;
498 	int error = find_top_affected_fs(spa, head_ds, zep, &top_affected_fs);
499 	if (error == 0) {
500 		clones_t *ct;
501 		list_t clones_list;
502 
503 		list_create(&clones_list, sizeof (clones_t),
504 		    offsetof(clones_t, node));
505 
506 		error = check_filesystem(spa, top_affected_fs, zep,
507 		    uaddr, count, &clones_list);
508 
509 		while ((ct = list_remove_head(&clones_list)) != NULL) {
510 			error = check_filesystem(spa, ct->clone_ds, zep,
511 			    uaddr, count, &clones_list);
512 			kmem_free(ct, sizeof (*ct));
513 
514 			if (error) {
515 				while (!list_is_empty(&clones_list)) {
516 					ct = list_remove_head(&clones_list);
517 					kmem_free(ct, sizeof (*ct));
518 				}
519 				break;
520 			}
521 		}
522 
523 		list_destroy(&clones_list);
524 	}
525 	if (error == 0 && init_count == *count) {
526 		/*
527 		 * If we reach this point, no errors have been detected
528 		 * in the checked filesystems/snapshots. Before returning mark
529 		 * the error block to be removed from the error lists and logs.
530 		 */
531 		zbookmark_phys_t zb;
532 		zep_to_zb(head_ds, zep, &zb);
533 		spa_remove_error(spa, &zb, zep->zb_birth);
534 	}
535 
536 	return (error);
537 }
538 #endif
539 
540 /* Return the number of errors in the error log */
541 uint64_t
spa_get_last_errlog_size(spa_t * spa)542 spa_get_last_errlog_size(spa_t *spa)
543 {
544 	uint64_t total = 0, count;
545 	mutex_enter(&spa->spa_errlog_lock);
546 
547 	if (spa->spa_errlog_last != 0 &&
548 	    zap_count(spa->spa_meta_objset, spa->spa_errlog_last,
549 	    &count) == 0)
550 		total += count;
551 	mutex_exit(&spa->spa_errlog_lock);
552 	return (total);
553 }
554 
555 /*
556  * If a healed bookmark matches an entry in the error log we stash it in a tree
557  * so that we can later remove the related log entries in sync context.
558  */
559 static void
spa_add_healed_error(spa_t * spa,uint64_t obj,zbookmark_phys_t * healed_zb,const uint64_t birth)560 spa_add_healed_error(spa_t *spa, uint64_t obj, zbookmark_phys_t *healed_zb,
561     const uint64_t birth)
562 {
563 	char name[NAME_MAX_LEN];
564 
565 	if (obj == 0)
566 		return;
567 
568 	boolean_t held_list = B_FALSE;
569 	boolean_t held_log = B_FALSE;
570 
571 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
572 		bookmark_to_name(healed_zb, name, sizeof (name));
573 
574 		if (zap_contains(spa->spa_meta_objset, healed_zb->zb_objset,
575 		    name) == 0) {
576 			if (!MUTEX_HELD(&spa->spa_errlog_lock)) {
577 				mutex_enter(&spa->spa_errlog_lock);
578 				held_log = B_TRUE;
579 			}
580 
581 			/*
582 			 * Found an error matching healed zb, add zb to our
583 			 * tree of healed errors
584 			 */
585 			avl_tree_t *tree = &spa->spa_errlist_healed;
586 			spa_error_entry_t search;
587 			spa_error_entry_t *new;
588 			avl_index_t where;
589 			search.se_bookmark = *healed_zb;
590 			if (!MUTEX_HELD(&spa->spa_errlist_lock)) {
591 				mutex_enter(&spa->spa_errlist_lock);
592 				held_list = B_TRUE;
593 			}
594 			if (avl_find(tree, &search, &where) != NULL) {
595 				if (held_list)
596 					mutex_exit(&spa->spa_errlist_lock);
597 				if (held_log)
598 					mutex_exit(&spa->spa_errlog_lock);
599 				return;
600 			}
601 			new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP);
602 			new->se_bookmark = *healed_zb;
603 			avl_insert(tree, new, where);
604 			if (held_list)
605 				mutex_exit(&spa->spa_errlist_lock);
606 			if (held_log)
607 				mutex_exit(&spa->spa_errlog_lock);
608 		}
609 		return;
610 	}
611 
612 	zbookmark_err_phys_t healed_zep;
613 	healed_zep.zb_object = healed_zb->zb_object;
614 	healed_zep.zb_level = healed_zb->zb_level;
615 	healed_zep.zb_blkid = healed_zb->zb_blkid;
616 	healed_zep.zb_birth = birth;
617 
618 	errphys_to_name(&healed_zep, name, sizeof (name));
619 
620 	zap_cursor_t zc;
621 	zap_attribute_t *za = zap_attribute_alloc();
622 	for (zap_cursor_init(&zc, spa->spa_meta_objset, spa->spa_errlog_last);
623 	    zap_cursor_retrieve(&zc, za) == 0; zap_cursor_advance(&zc)) {
624 		if (zap_contains(spa->spa_meta_objset, za->za_first_integer,
625 		    name) == 0) {
626 			if (!MUTEX_HELD(&spa->spa_errlog_lock)) {
627 				mutex_enter(&spa->spa_errlog_lock);
628 				held_log = B_TRUE;
629 			}
630 
631 			avl_tree_t *tree = &spa->spa_errlist_healed;
632 			spa_error_entry_t search;
633 			spa_error_entry_t *new;
634 			avl_index_t where;
635 			search.se_bookmark = *healed_zb;
636 
637 			if (!MUTEX_HELD(&spa->spa_errlist_lock)) {
638 				mutex_enter(&spa->spa_errlist_lock);
639 				held_list = B_TRUE;
640 			}
641 
642 			if (avl_find(tree, &search, &where) != NULL) {
643 				if (held_list)
644 					mutex_exit(&spa->spa_errlist_lock);
645 				if (held_log)
646 					mutex_exit(&spa->spa_errlog_lock);
647 				continue;
648 			}
649 			new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP);
650 			new->se_bookmark = *healed_zb;
651 			new->se_zep = healed_zep;
652 			avl_insert(tree, new, where);
653 
654 			if (held_list)
655 				mutex_exit(&spa->spa_errlist_lock);
656 			if (held_log)
657 				mutex_exit(&spa->spa_errlog_lock);
658 		}
659 	}
660 	zap_cursor_fini(&zc);
661 	zap_attribute_free(za);
662 }
663 
664 /*
665  * If this error exists in the given tree remove it.
666  */
667 static void
remove_error_from_list(spa_t * spa,avl_tree_t * t,const zbookmark_phys_t * zb)668 remove_error_from_list(spa_t *spa, avl_tree_t *t, const zbookmark_phys_t *zb)
669 {
670 	spa_error_entry_t search, *found;
671 	avl_index_t where;
672 
673 	mutex_enter(&spa->spa_errlist_lock);
674 	search.se_bookmark = *zb;
675 	if ((found = avl_find(t, &search, &where)) != NULL) {
676 		avl_remove(t, found);
677 		kmem_free(found, sizeof (spa_error_entry_t));
678 	}
679 	mutex_exit(&spa->spa_errlist_lock);
680 }
681 
682 
683 /*
684  * Removes all of the recv healed errors from both on-disk error logs
685  */
686 static void
spa_remove_healed_errors(spa_t * spa,avl_tree_t * s,avl_tree_t * l,dmu_tx_t * tx)687 spa_remove_healed_errors(spa_t *spa, avl_tree_t *s, avl_tree_t *l, dmu_tx_t *tx)
688 {
689 	char name[NAME_MAX_LEN];
690 	spa_error_entry_t *se;
691 	void *cookie = NULL;
692 
693 	ASSERT(MUTEX_HELD(&spa->spa_errlog_lock));
694 
695 	while ((se = avl_destroy_nodes(&spa->spa_errlist_healed,
696 	    &cookie)) != NULL) {
697 		remove_error_from_list(spa, s, &se->se_bookmark);
698 		remove_error_from_list(spa, l, &se->se_bookmark);
699 
700 		if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
701 			bookmark_to_name(&se->se_bookmark, name, sizeof (name));
702 			(void) zap_remove(spa->spa_meta_objset,
703 			    spa->spa_errlog_last, name, tx);
704 			(void) zap_remove(spa->spa_meta_objset,
705 			    spa->spa_errlog_scrub, name, tx);
706 		} else {
707 			errphys_to_name(&se->se_zep, name, sizeof (name));
708 			zap_cursor_t zc;
709 			zap_attribute_t *za = zap_attribute_alloc();
710 			for (zap_cursor_init(&zc, spa->spa_meta_objset,
711 			    spa->spa_errlog_last);
712 			    zap_cursor_retrieve(&zc, za) == 0;
713 			    zap_cursor_advance(&zc)) {
714 				zap_remove(spa->spa_meta_objset,
715 				    za->za_first_integer, name, tx);
716 			}
717 			zap_cursor_fini(&zc);
718 
719 			for (zap_cursor_init(&zc, spa->spa_meta_objset,
720 			    spa->spa_errlog_scrub);
721 			    zap_cursor_retrieve(&zc, za) == 0;
722 			    zap_cursor_advance(&zc)) {
723 				zap_remove(spa->spa_meta_objset,
724 				    za->za_first_integer, name, tx);
725 			}
726 			zap_cursor_fini(&zc);
727 			zap_attribute_free(za);
728 		}
729 		kmem_free(se, sizeof (spa_error_entry_t));
730 	}
731 }
732 
733 /*
734  * Stash away healed bookmarks to remove them from the on-disk error logs
735  * later in spa_remove_healed_errors().
736  */
737 void
spa_remove_error(spa_t * spa,zbookmark_phys_t * zb,uint64_t birth)738 spa_remove_error(spa_t *spa, zbookmark_phys_t *zb, uint64_t birth)
739 {
740 	spa_add_healed_error(spa, spa->spa_errlog_last, zb, birth);
741 	spa_add_healed_error(spa, spa->spa_errlog_scrub, zb, birth);
742 }
743 
744 static uint64_t
approx_errlog_size_impl(spa_t * spa,uint64_t spa_err_obj)745 approx_errlog_size_impl(spa_t *spa, uint64_t spa_err_obj)
746 {
747 	if (spa_err_obj == 0)
748 		return (0);
749 	uint64_t total = 0;
750 
751 	zap_cursor_t zc;
752 	zap_attribute_t *za = zap_attribute_alloc();
753 	for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
754 	    zap_cursor_retrieve(&zc, za) == 0; zap_cursor_advance(&zc)) {
755 		uint64_t count;
756 		if (zap_count(spa->spa_meta_objset, za->za_first_integer,
757 		    &count) == 0)
758 			total += count;
759 	}
760 	zap_cursor_fini(&zc);
761 	zap_attribute_free(za);
762 	return (total);
763 }
764 
765 /*
766  * Return the approximate number of errors currently in the error log.  This
767  * will be nonzero if there are some errors, but otherwise it may be more
768  * or less than the number of entries returned by spa_get_errlog().
769  */
770 uint64_t
spa_approx_errlog_size(spa_t * spa)771 spa_approx_errlog_size(spa_t *spa)
772 {
773 	uint64_t total = 0;
774 
775 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
776 		mutex_enter(&spa->spa_errlog_lock);
777 		uint64_t count;
778 		if (spa->spa_errlog_scrub != 0 &&
779 		    zap_count(spa->spa_meta_objset, spa->spa_errlog_scrub,
780 		    &count) == 0)
781 			total += count;
782 
783 		if (spa->spa_errlog_last != 0 && !spa->spa_scrub_finished &&
784 		    zap_count(spa->spa_meta_objset, spa->spa_errlog_last,
785 		    &count) == 0)
786 			total += count;
787 		mutex_exit(&spa->spa_errlog_lock);
788 
789 	} else {
790 		mutex_enter(&spa->spa_errlog_lock);
791 		total += approx_errlog_size_impl(spa, spa->spa_errlog_last);
792 		total += approx_errlog_size_impl(spa, spa->spa_errlog_scrub);
793 		mutex_exit(&spa->spa_errlog_lock);
794 	}
795 	mutex_enter(&spa->spa_errlist_lock);
796 	total += avl_numnodes(&spa->spa_errlist_last);
797 	total += avl_numnodes(&spa->spa_errlist_scrub);
798 	mutex_exit(&spa->spa_errlist_lock);
799 	return (total);
800 }
801 
802 /*
803  * This function sweeps through an on-disk error log and stores all bookmarks
804  * as error bookmarks in a new ZAP object. At the end we discard the old one,
805  * and spa_update_errlog() will set the spa's on-disk error log to new ZAP
806  * object.
807  */
808 static void
sync_upgrade_errlog(spa_t * spa,uint64_t spa_err_obj,uint64_t * newobj,dmu_tx_t * tx)809 sync_upgrade_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t *newobj,
810     dmu_tx_t *tx)
811 {
812 	zap_cursor_t zc;
813 	zap_attribute_t *za;
814 	zbookmark_phys_t zb;
815 	uint64_t count;
816 
817 	*newobj = zap_create(spa->spa_meta_objset, DMU_OT_ERROR_LOG,
818 	    DMU_OT_NONE, 0, tx);
819 
820 	/*
821 	 * If we cannnot perform the upgrade we should clear the old on-disk
822 	 * error logs.
823 	 */
824 	if (zap_count(spa->spa_meta_objset, spa_err_obj, &count) != 0) {
825 		VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx));
826 		return;
827 	}
828 
829 	za = zap_attribute_alloc();
830 	for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
831 	    zap_cursor_retrieve(&zc, za) == 0;
832 	    zap_cursor_advance(&zc)) {
833 		if (spa_upgrade_errlog_limit != 0 &&
834 		    zc.zc_cd == spa_upgrade_errlog_limit)
835 			break;
836 
837 		name_to_bookmark(za->za_name, &zb);
838 
839 		zbookmark_err_phys_t zep;
840 		zep.zb_object = zb.zb_object;
841 		zep.zb_level = zb.zb_level;
842 		zep.zb_blkid = zb.zb_blkid;
843 		zep.zb_birth = 0;
844 
845 		/*
846 		 * In case of an error we should simply continue instead of
847 		 * returning prematurely. See the next comment.
848 		 */
849 		uint64_t head_ds;
850 		dsl_pool_t *dp = spa->spa_dsl_pool;
851 		dsl_dataset_t *ds;
852 		objset_t *os;
853 
854 		int error = dsl_dataset_hold_obj_flags(dp, zb.zb_objset,
855 		    DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
856 		if (error != 0)
857 			continue;
858 
859 		head_ds = dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj;
860 
861 		/*
862 		 * The objset and the dnode are required for getting the block
863 		 * pointer, which is used to determine if BP_IS_HOLE(). If
864 		 * getting the objset or the dnode fails, do not create a
865 		 * zap entry (presuming we know the dataset) as this may create
866 		 * spurious errors that we cannot ever resolve. If an error is
867 		 * truly persistent, it should re-appear after a scan.
868 		 */
869 		if (dmu_objset_from_ds(ds, &os) != 0) {
870 			dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
871 			continue;
872 		}
873 
874 		dnode_t *dn;
875 		blkptr_t bp;
876 
877 		if (dnode_hold(os, zep.zb_object, FTAG, &dn) != 0) {
878 			dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
879 			continue;
880 		}
881 
882 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
883 		error = dbuf_dnode_findbp(dn, zep.zb_level, zep.zb_blkid, &bp,
884 		    NULL, NULL);
885 		if (error == EACCES)
886 			error = 0;
887 		else if (!error)
888 			zep.zb_birth = BP_GET_LOGICAL_BIRTH(&bp);
889 
890 		rw_exit(&dn->dn_struct_rwlock);
891 		dnode_rele(dn, FTAG);
892 		dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
893 
894 		if (error != 0 || BP_IS_HOLE(&bp))
895 			continue;
896 
897 		uint64_t err_obj;
898 		error = zap_lookup_int_key(spa->spa_meta_objset, *newobj,
899 		    head_ds, &err_obj);
900 
901 		if (error == ENOENT) {
902 			err_obj = zap_create(spa->spa_meta_objset,
903 			    DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx);
904 
905 			(void) zap_update_int_key(spa->spa_meta_objset,
906 			    *newobj, head_ds, err_obj, tx);
907 		}
908 
909 		char buf[64];
910 		errphys_to_name(&zep, buf, sizeof (buf));
911 
912 		const char *name = "";
913 		(void) zap_update(spa->spa_meta_objset, err_obj,
914 		    buf, 1, strlen(name) + 1, name, tx);
915 	}
916 	zap_cursor_fini(&zc);
917 	zap_attribute_free(za);
918 
919 	VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx));
920 }
921 
922 void
spa_upgrade_errlog(spa_t * spa,dmu_tx_t * tx)923 spa_upgrade_errlog(spa_t *spa, dmu_tx_t *tx)
924 {
925 	uint64_t newobj = 0;
926 
927 	mutex_enter(&spa->spa_errlog_lock);
928 	if (spa->spa_errlog_last != 0) {
929 		sync_upgrade_errlog(spa, spa->spa_errlog_last, &newobj, tx);
930 		spa->spa_errlog_last = newobj;
931 
932 		(void) zap_update(spa->spa_meta_objset,
933 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
934 		    sizeof (uint64_t), 1, &spa->spa_errlog_last, tx);
935 	}
936 
937 	if (spa->spa_errlog_scrub != 0) {
938 		sync_upgrade_errlog(spa, spa->spa_errlog_scrub, &newobj, tx);
939 		spa->spa_errlog_scrub = newobj;
940 
941 		(void) zap_update(spa->spa_meta_objset,
942 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
943 		    sizeof (uint64_t), 1, &spa->spa_errlog_scrub, tx);
944 	}
945 
946 	mutex_exit(&spa->spa_errlog_lock);
947 }
948 
949 #ifdef _KERNEL
950 /*
951  * If an error block is shared by two datasets it will be counted twice.
952  */
953 static int
process_error_log(spa_t * spa,uint64_t obj,void * uaddr,uint64_t * count)954 process_error_log(spa_t *spa, uint64_t obj, void *uaddr, uint64_t *count)
955 {
956 	if (obj == 0)
957 		return (0);
958 
959 	zap_cursor_t *zc;
960 	zap_attribute_t *za;
961 
962 	zc = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP);
963 	za = zap_attribute_alloc();
964 
965 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
966 		for (zap_cursor_init(zc, spa->spa_meta_objset, obj);
967 		    zap_cursor_retrieve(zc, za) == 0;
968 		    zap_cursor_advance(zc)) {
969 			if (*count == 0) {
970 				zap_cursor_fini(zc);
971 				kmem_free(zc, sizeof (*zc));
972 				zap_attribute_free(za);
973 				return (SET_ERROR(ENOMEM));
974 			}
975 
976 			zbookmark_phys_t zb;
977 			name_to_bookmark(za->za_name, &zb);
978 
979 			int error = copyout_entry(&zb, uaddr, count);
980 			if (error != 0) {
981 				zap_cursor_fini(zc);
982 				kmem_free(zc, sizeof (*zc));
983 				zap_attribute_free(za);
984 				return (error);
985 			}
986 		}
987 		zap_cursor_fini(zc);
988 		kmem_free(zc, sizeof (*zc));
989 		zap_attribute_free(za);
990 		return (0);
991 	}
992 
993 	for (zap_cursor_init(zc, spa->spa_meta_objset, obj);
994 	    zap_cursor_retrieve(zc, za) == 0;
995 	    zap_cursor_advance(zc)) {
996 
997 		zap_cursor_t *head_ds_cursor;
998 		zap_attribute_t *head_ds_attr;
999 
1000 		head_ds_cursor = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP);
1001 		head_ds_attr = zap_attribute_alloc();
1002 
1003 		uint64_t head_ds_err_obj = za->za_first_integer;
1004 		uint64_t head_ds;
1005 		name_to_object(za->za_name, &head_ds);
1006 		for (zap_cursor_init(head_ds_cursor, spa->spa_meta_objset,
1007 		    head_ds_err_obj); zap_cursor_retrieve(head_ds_cursor,
1008 		    head_ds_attr) == 0; zap_cursor_advance(head_ds_cursor)) {
1009 
1010 			zbookmark_err_phys_t head_ds_block;
1011 			name_to_errphys(head_ds_attr->za_name, &head_ds_block);
1012 			int error = process_error_block(spa, head_ds,
1013 			    &head_ds_block, uaddr, count);
1014 
1015 			if (error != 0) {
1016 				zap_cursor_fini(head_ds_cursor);
1017 				kmem_free(head_ds_cursor,
1018 				    sizeof (*head_ds_cursor));
1019 				zap_attribute_free(head_ds_attr);
1020 
1021 				zap_cursor_fini(zc);
1022 				zap_attribute_free(za);
1023 				kmem_free(zc, sizeof (*zc));
1024 				return (error);
1025 			}
1026 		}
1027 		zap_cursor_fini(head_ds_cursor);
1028 		kmem_free(head_ds_cursor, sizeof (*head_ds_cursor));
1029 		zap_attribute_free(head_ds_attr);
1030 	}
1031 	zap_cursor_fini(zc);
1032 	zap_attribute_free(za);
1033 	kmem_free(zc, sizeof (*zc));
1034 	return (0);
1035 }
1036 
1037 static int
process_error_list(spa_t * spa,avl_tree_t * list,void * uaddr,uint64_t * count)1038 process_error_list(spa_t *spa, avl_tree_t *list, void *uaddr, uint64_t *count)
1039 {
1040 	spa_error_entry_t *se;
1041 
1042 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
1043 		for (se = avl_first(list); se != NULL;
1044 		    se = AVL_NEXT(list, se)) {
1045 			int error =
1046 			    copyout_entry(&se->se_bookmark, uaddr, count);
1047 			if (error != 0) {
1048 				return (error);
1049 			}
1050 		}
1051 		return (0);
1052 	}
1053 
1054 	for (se = avl_first(list); se != NULL; se = AVL_NEXT(list, se)) {
1055 		uint64_t head_ds = 0;
1056 		int error = get_head_ds(spa, se->se_bookmark.zb_objset,
1057 		    &head_ds);
1058 
1059 		/*
1060 		 * If get_head_ds() errors out, set the head filesystem
1061 		 * to the filesystem stored in the bookmark of the
1062 		 * error block.
1063 		 */
1064 		if (error != 0)
1065 			head_ds = se->se_bookmark.zb_objset;
1066 
1067 		error = process_error_block(spa, head_ds,
1068 		    &se->se_zep, uaddr, count);
1069 		if (error != 0)
1070 			return (error);
1071 	}
1072 	return (0);
1073 }
1074 #endif
1075 
1076 /*
1077  * Copy all known errors to userland as an array of bookmarks.  This is
1078  * actually a union of the on-disk last log and current log, as well as any
1079  * pending error requests.
1080  *
1081  * Because the act of reading the on-disk log could cause errors to be
1082  * generated, we have two separate locks: one for the error log and one for the
1083  * in-core error lists.  We only need the error list lock to log and error, so
1084  * we grab the error log lock while we read the on-disk logs, and only pick up
1085  * the error list lock when we are finished.
1086  */
1087 int
spa_get_errlog(spa_t * spa,void * uaddr,uint64_t * count)1088 spa_get_errlog(spa_t *spa, void *uaddr, uint64_t *count)
1089 {
1090 	int ret = 0;
1091 
1092 #ifdef _KERNEL
1093 	/*
1094 	 * The pool config lock is needed to hold a dataset_t via (among other
1095 	 * places) process_error_list() -> process_error_block()->
1096 	 * find_top_affected_fs(), and lock ordering requires that we get it
1097 	 * before the spa_errlog_lock.
1098 	 */
1099 	dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
1100 	mutex_enter(&spa->spa_errlog_lock);
1101 
1102 	ret = process_error_log(spa, spa->spa_errlog_scrub, uaddr, count);
1103 
1104 	if (!ret && !spa->spa_scrub_finished)
1105 		ret = process_error_log(spa, spa->spa_errlog_last, uaddr,
1106 		    count);
1107 
1108 	mutex_enter(&spa->spa_errlist_lock);
1109 	if (!ret)
1110 		ret = process_error_list(spa, &spa->spa_errlist_scrub, uaddr,
1111 		    count);
1112 	if (!ret)
1113 		ret = process_error_list(spa, &spa->spa_errlist_last, uaddr,
1114 		    count);
1115 	mutex_exit(&spa->spa_errlist_lock);
1116 
1117 	mutex_exit(&spa->spa_errlog_lock);
1118 	dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
1119 #else
1120 	(void) spa, (void) uaddr, (void) count;
1121 #endif
1122 
1123 	return (ret);
1124 }
1125 
1126 /*
1127  * Called when a scrub completes.  This simply set a bit which tells which AVL
1128  * tree to add new errors.  spa_errlog_sync() is responsible for actually
1129  * syncing the changes to the underlying objects.
1130  */
1131 void
spa_errlog_rotate(spa_t * spa)1132 spa_errlog_rotate(spa_t *spa)
1133 {
1134 	mutex_enter(&spa->spa_errlist_lock);
1135 	spa->spa_scrub_finished = B_TRUE;
1136 	mutex_exit(&spa->spa_errlist_lock);
1137 }
1138 
1139 /*
1140  * Discard any pending errors from the spa_t.  Called when unloading a faulted
1141  * pool, as the errors encountered during the open cannot be synced to disk.
1142  */
1143 void
spa_errlog_drain(spa_t * spa)1144 spa_errlog_drain(spa_t *spa)
1145 {
1146 	spa_error_entry_t *se;
1147 	void *cookie;
1148 
1149 	mutex_enter(&spa->spa_errlist_lock);
1150 
1151 	cookie = NULL;
1152 	while ((se = avl_destroy_nodes(&spa->spa_errlist_last,
1153 	    &cookie)) != NULL)
1154 		kmem_free(se, sizeof (spa_error_entry_t));
1155 	cookie = NULL;
1156 	while ((se = avl_destroy_nodes(&spa->spa_errlist_scrub,
1157 	    &cookie)) != NULL)
1158 		kmem_free(se, sizeof (spa_error_entry_t));
1159 
1160 	mutex_exit(&spa->spa_errlist_lock);
1161 }
1162 
1163 /*
1164  * Process a list of errors into the current on-disk log.
1165  */
1166 void
sync_error_list(spa_t * spa,avl_tree_t * t,uint64_t * obj,dmu_tx_t * tx)1167 sync_error_list(spa_t *spa, avl_tree_t *t, uint64_t *obj, dmu_tx_t *tx)
1168 {
1169 	spa_error_entry_t *se;
1170 	char buf[NAME_MAX_LEN];
1171 	void *cookie;
1172 
1173 	if (avl_numnodes(t) == 0)
1174 		return;
1175 
1176 	/* create log if necessary */
1177 	if (*obj == 0)
1178 		*obj = zap_create(spa->spa_meta_objset, DMU_OT_ERROR_LOG,
1179 		    DMU_OT_NONE, 0, tx);
1180 
1181 	/* add errors to the current log */
1182 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
1183 		for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) {
1184 			bookmark_to_name(&se->se_bookmark, buf, sizeof (buf));
1185 
1186 			const char *name = se->se_name ? se->se_name : "";
1187 			(void) zap_update(spa->spa_meta_objset, *obj, buf, 1,
1188 			    strlen(name) + 1, name, tx);
1189 		}
1190 	} else {
1191 		for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) {
1192 			zbookmark_err_phys_t zep;
1193 			zep.zb_object = se->se_zep.zb_object;
1194 			zep.zb_level = se->se_zep.zb_level;
1195 			zep.zb_blkid = se->se_zep.zb_blkid;
1196 			zep.zb_birth = se->se_zep.zb_birth;
1197 
1198 			uint64_t head_ds = 0;
1199 			int error = get_head_ds(spa, se->se_bookmark.zb_objset,
1200 			    &head_ds);
1201 
1202 			/*
1203 			 * If get_head_ds() errors out, set the head filesystem
1204 			 * to the filesystem stored in the bookmark of the
1205 			 * error block.
1206 			 */
1207 			if (error != 0)
1208 				head_ds = se->se_bookmark.zb_objset;
1209 
1210 			uint64_t err_obj;
1211 			error = zap_lookup_int_key(spa->spa_meta_objset,
1212 			    *obj, head_ds, &err_obj);
1213 
1214 			if (error == ENOENT) {
1215 				err_obj = zap_create(spa->spa_meta_objset,
1216 				    DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx);
1217 
1218 				(void) zap_update_int_key(spa->spa_meta_objset,
1219 				    *obj, head_ds, err_obj, tx);
1220 			}
1221 			errphys_to_name(&zep, buf, sizeof (buf));
1222 
1223 			const char *name = se->se_name ? se->se_name : "";
1224 			(void) zap_update(spa->spa_meta_objset,
1225 			    err_obj, buf, 1, strlen(name) + 1, name, tx);
1226 		}
1227 	}
1228 	/* purge the error list */
1229 	cookie = NULL;
1230 	while ((se = avl_destroy_nodes(t, &cookie)) != NULL)
1231 		kmem_free(se, sizeof (spa_error_entry_t));
1232 }
1233 
1234 static void
delete_errlog(spa_t * spa,uint64_t spa_err_obj,dmu_tx_t * tx)1235 delete_errlog(spa_t *spa, uint64_t spa_err_obj, dmu_tx_t *tx)
1236 {
1237 	if (spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
1238 		zap_cursor_t zc;
1239 		zap_attribute_t *za = zap_attribute_alloc();
1240 		for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
1241 		    zap_cursor_retrieve(&zc, za) == 0;
1242 		    zap_cursor_advance(&zc)) {
1243 			VERIFY0(dmu_object_free(spa->spa_meta_objset,
1244 			    za->za_first_integer, tx));
1245 		}
1246 		zap_cursor_fini(&zc);
1247 		zap_attribute_free(za);
1248 	}
1249 	VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx));
1250 }
1251 
1252 /*
1253  * Sync the error log out to disk.  This is a little tricky because the act of
1254  * writing the error log requires the spa_errlist_lock.  So, we need to lock the
1255  * error lists, take a copy of the lists, and then reinitialize them.  Then, we
1256  * drop the error list lock and take the error log lock, at which point we
1257  * do the errlog processing.  Then, if we encounter an I/O error during this
1258  * process, we can successfully add the error to the list.  Note that this will
1259  * result in the perpetual recycling of errors, but it is an unlikely situation
1260  * and not a performance critical operation.
1261  */
1262 void
spa_errlog_sync(spa_t * spa,uint64_t txg)1263 spa_errlog_sync(spa_t *spa, uint64_t txg)
1264 {
1265 	dmu_tx_t *tx;
1266 	avl_tree_t scrub, last;
1267 	int scrub_finished;
1268 
1269 	mutex_enter(&spa->spa_errlist_lock);
1270 
1271 	/*
1272 	 * Bail out early under normal circumstances.
1273 	 */
1274 	if (avl_numnodes(&spa->spa_errlist_scrub) == 0 &&
1275 	    avl_numnodes(&spa->spa_errlist_last) == 0 &&
1276 	    avl_numnodes(&spa->spa_errlist_healed) == 0 &&
1277 	    !spa->spa_scrub_finished) {
1278 		mutex_exit(&spa->spa_errlist_lock);
1279 		return;
1280 	}
1281 
1282 	spa_get_errlists(spa, &last, &scrub);
1283 	scrub_finished = spa->spa_scrub_finished;
1284 	spa->spa_scrub_finished = B_FALSE;
1285 
1286 	mutex_exit(&spa->spa_errlist_lock);
1287 
1288 	/*
1289 	 * The pool config lock is needed to hold a dataset_t via
1290 	 * sync_error_list() -> get_head_ds(), and lock ordering
1291 	 * requires that we get it before the spa_errlog_lock.
1292 	 */
1293 	dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
1294 	mutex_enter(&spa->spa_errlog_lock);
1295 
1296 	tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1297 
1298 	/*
1299 	 * Remove healed errors from errors.
1300 	 */
1301 	spa_remove_healed_errors(spa, &last, &scrub, tx);
1302 
1303 	/*
1304 	 * Sync out the current list of errors.
1305 	 */
1306 	sync_error_list(spa, &last, &spa->spa_errlog_last, tx);
1307 
1308 	/*
1309 	 * Rotate the log if necessary.
1310 	 */
1311 	if (scrub_finished) {
1312 		if (spa->spa_errlog_last != 0)
1313 			delete_errlog(spa, spa->spa_errlog_last, tx);
1314 		spa->spa_errlog_last = spa->spa_errlog_scrub;
1315 		spa->spa_errlog_scrub = 0;
1316 
1317 		sync_error_list(spa, &scrub, &spa->spa_errlog_last, tx);
1318 	}
1319 
1320 	/*
1321 	 * Sync out any pending scrub errors.
1322 	 */
1323 	sync_error_list(spa, &scrub, &spa->spa_errlog_scrub, tx);
1324 
1325 	/*
1326 	 * Update the MOS to reflect the new values.
1327 	 */
1328 	(void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1329 	    DMU_POOL_ERRLOG_LAST, sizeof (uint64_t), 1,
1330 	    &spa->spa_errlog_last, tx);
1331 	(void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1332 	    DMU_POOL_ERRLOG_SCRUB, sizeof (uint64_t), 1,
1333 	    &spa->spa_errlog_scrub, tx);
1334 
1335 	dmu_tx_commit(tx);
1336 
1337 	mutex_exit(&spa->spa_errlog_lock);
1338 	dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
1339 }
1340 
1341 static void
delete_dataset_errlog(spa_t * spa,uint64_t spa_err_obj,uint64_t ds,dmu_tx_t * tx)1342 delete_dataset_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t ds,
1343     dmu_tx_t *tx)
1344 {
1345 	if (spa_err_obj == 0)
1346 		return;
1347 
1348 	zap_cursor_t zc;
1349 	zap_attribute_t *za = zap_attribute_alloc();
1350 	for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
1351 	    zap_cursor_retrieve(&zc, za) == 0; zap_cursor_advance(&zc)) {
1352 		uint64_t head_ds;
1353 		name_to_object(za->za_name, &head_ds);
1354 		if (head_ds == ds) {
1355 			(void) zap_remove(spa->spa_meta_objset, spa_err_obj,
1356 			    za->za_name, tx);
1357 			VERIFY0(dmu_object_free(spa->spa_meta_objset,
1358 			    za->za_first_integer, tx));
1359 			break;
1360 		}
1361 	}
1362 	zap_cursor_fini(&zc);
1363 	zap_attribute_free(za);
1364 }
1365 
1366 void
spa_delete_dataset_errlog(spa_t * spa,uint64_t ds,dmu_tx_t * tx)1367 spa_delete_dataset_errlog(spa_t *spa, uint64_t ds, dmu_tx_t *tx)
1368 {
1369 	mutex_enter(&spa->spa_errlog_lock);
1370 	delete_dataset_errlog(spa, spa->spa_errlog_scrub, ds, tx);
1371 	delete_dataset_errlog(spa, spa->spa_errlog_last, ds, tx);
1372 	mutex_exit(&spa->spa_errlog_lock);
1373 }
1374 
1375 static int
find_txg_ancestor_snapshot(spa_t * spa,uint64_t new_head,uint64_t old_head,uint64_t * txg)1376 find_txg_ancestor_snapshot(spa_t *spa, uint64_t new_head, uint64_t old_head,
1377     uint64_t *txg)
1378 {
1379 	dsl_dataset_t *ds;
1380 	dsl_pool_t *dp = spa->spa_dsl_pool;
1381 
1382 	int error = dsl_dataset_hold_obj_flags(dp, old_head,
1383 	    DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
1384 	if (error != 0)
1385 		return (error);
1386 
1387 	uint64_t prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1388 	uint64_t prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
1389 
1390 	while (prev_obj != 0) {
1391 		dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
1392 		if ((error = dsl_dataset_hold_obj_flags(dp, prev_obj,
1393 		    DS_HOLD_FLAG_DECRYPT, FTAG, &ds)) == 0 &&
1394 		    dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj == new_head)
1395 			break;
1396 
1397 		if (error != 0)
1398 			return (error);
1399 
1400 		prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
1401 		prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1402 	}
1403 	dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
1404 	ASSERT(prev_obj != 0);
1405 	*txg = prev_obj_txg;
1406 	return (0);
1407 }
1408 
1409 static void
swap_errlog(spa_t * spa,uint64_t spa_err_obj,uint64_t new_head,uint64_t old_head,dmu_tx_t * tx)1410 swap_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t new_head, uint64_t
1411     old_head, dmu_tx_t *tx)
1412 {
1413 	if (spa_err_obj == 0)
1414 		return;
1415 
1416 	uint64_t old_head_errlog;
1417 	int error = zap_lookup_int_key(spa->spa_meta_objset, spa_err_obj,
1418 	    old_head, &old_head_errlog);
1419 
1420 	/* If no error log, then there is nothing to do. */
1421 	if (error != 0)
1422 		return;
1423 
1424 	uint64_t txg;
1425 	error = find_txg_ancestor_snapshot(spa, new_head, old_head, &txg);
1426 	if (error != 0)
1427 		return;
1428 
1429 	/*
1430 	 * Create an error log if the file system being promoted does not
1431 	 * already have one.
1432 	 */
1433 	uint64_t new_head_errlog;
1434 	error = zap_lookup_int_key(spa->spa_meta_objset, spa_err_obj, new_head,
1435 	    &new_head_errlog);
1436 
1437 	if (error != 0) {
1438 		new_head_errlog = zap_create(spa->spa_meta_objset,
1439 		    DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx);
1440 
1441 		(void) zap_update_int_key(spa->spa_meta_objset, spa_err_obj,
1442 		    new_head, new_head_errlog, tx);
1443 	}
1444 
1445 	zap_cursor_t zc;
1446 	zap_attribute_t *za = zap_attribute_alloc();
1447 	zbookmark_err_phys_t err_block;
1448 	for (zap_cursor_init(&zc, spa->spa_meta_objset, old_head_errlog);
1449 	    zap_cursor_retrieve(&zc, za) == 0; zap_cursor_advance(&zc)) {
1450 
1451 		const char *name = "";
1452 		name_to_errphys(za->za_name, &err_block);
1453 		if (err_block.zb_birth < txg) {
1454 			(void) zap_update(spa->spa_meta_objset, new_head_errlog,
1455 			    za->za_name, 1, strlen(name) + 1, name, tx);
1456 
1457 			(void) zap_remove(spa->spa_meta_objset, old_head_errlog,
1458 			    za->za_name, tx);
1459 		}
1460 	}
1461 	zap_cursor_fini(&zc);
1462 	zap_attribute_free(za);
1463 }
1464 
1465 void
spa_swap_errlog(spa_t * spa,uint64_t new_head_ds,uint64_t old_head_ds,dmu_tx_t * tx)1466 spa_swap_errlog(spa_t *spa, uint64_t new_head_ds, uint64_t old_head_ds,
1467     dmu_tx_t *tx)
1468 {
1469 	mutex_enter(&spa->spa_errlog_lock);
1470 	swap_errlog(spa, spa->spa_errlog_scrub, new_head_ds, old_head_ds, tx);
1471 	swap_errlog(spa, spa->spa_errlog_last, new_head_ds, old_head_ds, tx);
1472 	mutex_exit(&spa->spa_errlog_lock);
1473 }
1474 
1475 #if defined(_KERNEL)
1476 /* error handling */
1477 EXPORT_SYMBOL(spa_log_error);
1478 EXPORT_SYMBOL(spa_approx_errlog_size);
1479 EXPORT_SYMBOL(spa_get_last_errlog_size);
1480 EXPORT_SYMBOL(spa_get_errlog);
1481 EXPORT_SYMBOL(spa_errlog_rotate);
1482 EXPORT_SYMBOL(spa_errlog_drain);
1483 EXPORT_SYMBOL(spa_errlog_sync);
1484 EXPORT_SYMBOL(spa_get_errlists);
1485 EXPORT_SYMBOL(spa_delete_dataset_errlog);
1486 EXPORT_SYMBOL(spa_swap_errlog);
1487 EXPORT_SYMBOL(sync_error_list);
1488 EXPORT_SYMBOL(spa_upgrade_errlog);
1489 EXPORT_SYMBOL(find_top_affected_fs);
1490 EXPORT_SYMBOL(find_birth_txg);
1491 EXPORT_SYMBOL(zep_to_zb);
1492 EXPORT_SYMBOL(name_to_errphys);
1493 #endif
1494 
1495 ZFS_MODULE_PARAM(zfs_spa, spa_, upgrade_errlog_limit, UINT, ZMOD_RW,
1496 	"Limit the number of errors which will be upgraded to the new "
1497 	"on-disk error log when enabling head_errlog");
1498