1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
25 * Copyright 2017 Nexenta Systems, Inc.
26 * Copyright (c) 2014 Integros [integros.com]
27 * Copyright 2016 Toomas Soome <tsoome@me.com>
28 * Copyright 2019 Joyent, Inc.
29 * Copyright (c) 2017, Intel Corporation.
30 * Copyright (c) 2019, Datto Inc. All rights reserved.
31 */
32
33 #include <sys/zfs_context.h>
34 #include <sys/fm/fs/zfs.h>
35 #include <sys/spa.h>
36 #include <sys/spa_impl.h>
37 #include <sys/bpobj.h>
38 #include <sys/dmu.h>
39 #include <sys/dmu_tx.h>
40 #include <sys/dsl_dir.h>
41 #include <sys/vdev_impl.h>
42 #include <sys/uberblock_impl.h>
43 #include <sys/metaslab.h>
44 #include <sys/metaslab_impl.h>
45 #include <sys/space_map.h>
46 #include <sys/space_reftree.h>
47 #include <sys/zio.h>
48 #include <sys/zap.h>
49 #include <sys/fs/zfs.h>
50 #include <sys/arc.h>
51 #include <sys/zil.h>
52 #include <sys/dsl_scan.h>
53 #include <sys/abd.h>
54 #include <sys/vdev_initialize.h>
55 #include <sys/vdev_trim.h>
56
57 /*
58 * Virtual device management.
59 */
60
61 static vdev_ops_t *vdev_ops_table[] = {
62 &vdev_root_ops,
63 &vdev_raidz_ops,
64 &vdev_mirror_ops,
65 &vdev_replacing_ops,
66 &vdev_spare_ops,
67 &vdev_disk_ops,
68 &vdev_file_ops,
69 &vdev_missing_ops,
70 &vdev_hole_ops,
71 &vdev_indirect_ops,
72 NULL
73 };
74
75 /* maximum scrub/resilver I/O queue per leaf vdev */
76 int zfs_scrub_limit = 10;
77
78 /* default target for number of metaslabs per top-level vdev */
79 int zfs_vdev_default_ms_count = 200;
80
81 /* minimum number of metaslabs per top-level vdev */
82 int zfs_vdev_min_ms_count = 16;
83
84 /* practical upper limit of total metaslabs per top-level vdev */
85 int zfs_vdev_ms_count_limit = 1ULL << 17;
86
87 /* lower limit for metaslab size (512M) */
88 int zfs_vdev_default_ms_shift = 29;
89
90 /* upper limit for metaslab size (16G) */
91 int zfs_vdev_max_ms_shift = 34;
92
93 boolean_t vdev_validate_skip = B_FALSE;
94
95 /*
96 * Since the DTL space map of a vdev is not expected to have a lot of
97 * entries, we default its block size to 4K.
98 */
99 int zfs_vdev_dtl_sm_blksz = (1 << 12);
100
101 /*
102 * Ignore errors during scrub/resilver. Allows to work around resilver
103 * upon import when there are pool errors.
104 */
105 int zfs_scan_ignore_errors = 0;
106
107 /*
108 * vdev-wide space maps that have lots of entries written to them at
109 * the end of each transaction can benefit from a higher I/O bandwidth
110 * (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
111 */
112 int zfs_vdev_standard_sm_blksz = (1 << 17);
113
114 int zfs_ashift_min;
115
116 /*PRINTFLIKE2*/
117 void
vdev_dbgmsg(vdev_t * vd,const char * fmt,...)118 vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
119 {
120 va_list adx;
121 char buf[256];
122
123 va_start(adx, fmt);
124 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
125 va_end(adx);
126
127 if (vd->vdev_path != NULL) {
128 zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type,
129 vd->vdev_path, buf);
130 } else {
131 zfs_dbgmsg("%s-%llu vdev (guid %llu): %s",
132 vd->vdev_ops->vdev_op_type,
133 (u_longlong_t)vd->vdev_id,
134 (u_longlong_t)vd->vdev_guid, buf);
135 }
136 }
137
138 void
vdev_dbgmsg_print_tree(vdev_t * vd,int indent)139 vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
140 {
141 char state[20];
142
143 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) {
144 zfs_dbgmsg("%*svdev %u: %s", indent, "", vd->vdev_id,
145 vd->vdev_ops->vdev_op_type);
146 return;
147 }
148
149 switch (vd->vdev_state) {
150 case VDEV_STATE_UNKNOWN:
151 (void) snprintf(state, sizeof (state), "unknown");
152 break;
153 case VDEV_STATE_CLOSED:
154 (void) snprintf(state, sizeof (state), "closed");
155 break;
156 case VDEV_STATE_OFFLINE:
157 (void) snprintf(state, sizeof (state), "offline");
158 break;
159 case VDEV_STATE_REMOVED:
160 (void) snprintf(state, sizeof (state), "removed");
161 break;
162 case VDEV_STATE_CANT_OPEN:
163 (void) snprintf(state, sizeof (state), "can't open");
164 break;
165 case VDEV_STATE_FAULTED:
166 (void) snprintf(state, sizeof (state), "faulted");
167 break;
168 case VDEV_STATE_DEGRADED:
169 (void) snprintf(state, sizeof (state), "degraded");
170 break;
171 case VDEV_STATE_HEALTHY:
172 (void) snprintf(state, sizeof (state), "healthy");
173 break;
174 default:
175 (void) snprintf(state, sizeof (state), "<state %u>",
176 (uint_t)vd->vdev_state);
177 }
178
179 zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent,
180 "", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type,
181 vd->vdev_islog ? " (log)" : "",
182 (u_longlong_t)vd->vdev_guid,
183 vd->vdev_path ? vd->vdev_path : "N/A", state);
184
185 for (uint64_t i = 0; i < vd->vdev_children; i++)
186 vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2);
187 }
188
189 /*
190 * Given a vdev type, return the appropriate ops vector.
191 */
192 static vdev_ops_t *
vdev_getops(const char * type)193 vdev_getops(const char *type)
194 {
195 vdev_ops_t *ops, **opspp;
196
197 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
198 if (strcmp(ops->vdev_op_type, type) == 0)
199 break;
200
201 return (ops);
202 }
203
204 /*
205 * Derive the enumerated alloction bias from string input.
206 * String origin is either the per-vdev zap or zpool(8).
207 */
208 static vdev_alloc_bias_t
vdev_derive_alloc_bias(const char * bias)209 vdev_derive_alloc_bias(const char *bias)
210 {
211 vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
212
213 if (strcmp(bias, VDEV_ALLOC_BIAS_LOG) == 0)
214 alloc_bias = VDEV_BIAS_LOG;
215 else if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
216 alloc_bias = VDEV_BIAS_SPECIAL;
217 else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
218 alloc_bias = VDEV_BIAS_DEDUP;
219
220 return (alloc_bias);
221 }
222
223 /* ARGSUSED */
224 void
vdev_default_xlate(vdev_t * vd,const range_seg64_t * in,range_seg64_t * res)225 vdev_default_xlate(vdev_t *vd, const range_seg64_t *in, range_seg64_t *res)
226 {
227 res->rs_start = in->rs_start;
228 res->rs_end = in->rs_end;
229 }
230
231 /*
232 * Default asize function: return the MAX of psize with the asize of
233 * all children. This is what's used by anything other than RAID-Z.
234 */
235 uint64_t
vdev_default_asize(vdev_t * vd,uint64_t psize)236 vdev_default_asize(vdev_t *vd, uint64_t psize)
237 {
238 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
239 uint64_t csize;
240
241 for (int c = 0; c < vd->vdev_children; c++) {
242 csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
243 asize = MAX(asize, csize);
244 }
245
246 return (asize);
247 }
248
249 /*
250 * Get the minimum allocatable size. We define the allocatable size as
251 * the vdev's asize rounded to the nearest metaslab. This allows us to
252 * replace or attach devices which don't have the same physical size but
253 * can still satisfy the same number of allocations.
254 */
255 uint64_t
vdev_get_min_asize(vdev_t * vd)256 vdev_get_min_asize(vdev_t *vd)
257 {
258 vdev_t *pvd = vd->vdev_parent;
259
260 /*
261 * If our parent is NULL (inactive spare or cache) or is the root,
262 * just return our own asize.
263 */
264 if (pvd == NULL)
265 return (vd->vdev_asize);
266
267 /*
268 * The top-level vdev just returns the allocatable size rounded
269 * to the nearest metaslab.
270 */
271 if (vd == vd->vdev_top)
272 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift));
273
274 /*
275 * The allocatable space for a raidz vdev is N * sizeof(smallest child),
276 * so each child must provide at least 1/Nth of its asize.
277 */
278 if (pvd->vdev_ops == &vdev_raidz_ops)
279 return ((pvd->vdev_min_asize + pvd->vdev_children - 1) /
280 pvd->vdev_children);
281
282 return (pvd->vdev_min_asize);
283 }
284
285 void
vdev_set_min_asize(vdev_t * vd)286 vdev_set_min_asize(vdev_t *vd)
287 {
288 vd->vdev_min_asize = vdev_get_min_asize(vd);
289
290 for (int c = 0; c < vd->vdev_children; c++)
291 vdev_set_min_asize(vd->vdev_child[c]);
292 }
293
294 vdev_t *
vdev_lookup_top(spa_t * spa,uint64_t vdev)295 vdev_lookup_top(spa_t *spa, uint64_t vdev)
296 {
297 vdev_t *rvd = spa->spa_root_vdev;
298
299 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
300
301 if (vdev < rvd->vdev_children) {
302 ASSERT(rvd->vdev_child[vdev] != NULL);
303 return (rvd->vdev_child[vdev]);
304 }
305
306 return (NULL);
307 }
308
309 vdev_t *
vdev_lookup_by_guid(vdev_t * vd,uint64_t guid)310 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
311 {
312 vdev_t *mvd;
313
314 if (vd->vdev_guid == guid)
315 return (vd);
316
317 for (int c = 0; c < vd->vdev_children; c++)
318 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
319 NULL)
320 return (mvd);
321
322 return (NULL);
323 }
324
325 static int
vdev_count_leaves_impl(vdev_t * vd)326 vdev_count_leaves_impl(vdev_t *vd)
327 {
328 int n = 0;
329
330 if (vd->vdev_ops->vdev_op_leaf)
331 return (1);
332
333 for (int c = 0; c < vd->vdev_children; c++)
334 n += vdev_count_leaves_impl(vd->vdev_child[c]);
335
336 return (n);
337 }
338
339 int
vdev_count_leaves(spa_t * spa)340 vdev_count_leaves(spa_t *spa)
341 {
342 int rc;
343
344 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
345 rc = vdev_count_leaves_impl(spa->spa_root_vdev);
346 spa_config_exit(spa, SCL_VDEV, FTAG);
347
348 return (rc);
349 }
350
351 void
vdev_add_child(vdev_t * pvd,vdev_t * cvd)352 vdev_add_child(vdev_t *pvd, vdev_t *cvd)
353 {
354 size_t oldsize, newsize;
355 uint64_t id = cvd->vdev_id;
356 vdev_t **newchild;
357 spa_t *spa = cvd->vdev_spa;
358
359 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
360 ASSERT(cvd->vdev_parent == NULL);
361
362 cvd->vdev_parent = pvd;
363
364 if (pvd == NULL)
365 return;
366
367 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
368
369 oldsize = pvd->vdev_children * sizeof (vdev_t *);
370 pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
371 newsize = pvd->vdev_children * sizeof (vdev_t *);
372
373 newchild = kmem_zalloc(newsize, KM_SLEEP);
374 if (pvd->vdev_child != NULL) {
375 bcopy(pvd->vdev_child, newchild, oldsize);
376 kmem_free(pvd->vdev_child, oldsize);
377 }
378
379 pvd->vdev_child = newchild;
380 pvd->vdev_child[id] = cvd;
381
382 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
383 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
384
385 /*
386 * Walk up all ancestors to update guid sum.
387 */
388 for (; pvd != NULL; pvd = pvd->vdev_parent)
389 pvd->vdev_guid_sum += cvd->vdev_guid_sum;
390
391 if (cvd->vdev_ops->vdev_op_leaf) {
392 list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd);
393 cvd->vdev_spa->spa_leaf_list_gen++;
394 }
395 }
396
397 void
vdev_remove_child(vdev_t * pvd,vdev_t * cvd)398 vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
399 {
400 int c;
401 uint_t id = cvd->vdev_id;
402
403 ASSERT(cvd->vdev_parent == pvd);
404
405 if (pvd == NULL)
406 return;
407
408 ASSERT(id < pvd->vdev_children);
409 ASSERT(pvd->vdev_child[id] == cvd);
410
411 pvd->vdev_child[id] = NULL;
412 cvd->vdev_parent = NULL;
413
414 for (c = 0; c < pvd->vdev_children; c++)
415 if (pvd->vdev_child[c])
416 break;
417
418 if (c == pvd->vdev_children) {
419 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
420 pvd->vdev_child = NULL;
421 pvd->vdev_children = 0;
422 }
423
424 if (cvd->vdev_ops->vdev_op_leaf) {
425 spa_t *spa = cvd->vdev_spa;
426 list_remove(&spa->spa_leaf_list, cvd);
427 spa->spa_leaf_list_gen++;
428 }
429
430 /*
431 * Walk up all ancestors to update guid sum.
432 */
433 for (; pvd != NULL; pvd = pvd->vdev_parent)
434 pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
435 }
436
437 /*
438 * Remove any holes in the child array.
439 */
440 void
vdev_compact_children(vdev_t * pvd)441 vdev_compact_children(vdev_t *pvd)
442 {
443 vdev_t **newchild, *cvd;
444 int oldc = pvd->vdev_children;
445 int newc;
446
447 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
448
449 for (int c = newc = 0; c < oldc; c++)
450 if (pvd->vdev_child[c])
451 newc++;
452
453 /*
454 * If there are no remaining children (possible if this is an indirect
455 * vdev) just free the old child array and return to avoid a pointless
456 * zero-byte alloc.
457 */
458 if (newc == 0) {
459 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
460 pvd->vdev_child = NULL;
461 pvd->vdev_children = newc;
462 return;
463 }
464
465 newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP);
466
467 for (int c = newc = 0; c < oldc; c++) {
468 if ((cvd = pvd->vdev_child[c]) != NULL) {
469 newchild[newc] = cvd;
470 cvd->vdev_id = newc++;
471 }
472 }
473
474 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
475 pvd->vdev_child = newchild;
476 pvd->vdev_children = newc;
477 }
478
479 /*
480 * Allocate and minimally initialize a vdev_t.
481 */
482 vdev_t *
vdev_alloc_common(spa_t * spa,uint_t id,uint64_t guid,vdev_ops_t * ops)483 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
484 {
485 vdev_t *vd;
486 vdev_indirect_config_t *vic;
487
488 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
489 vic = &vd->vdev_indirect_config;
490
491 if (spa->spa_root_vdev == NULL) {
492 ASSERT(ops == &vdev_root_ops);
493 spa->spa_root_vdev = vd;
494 spa->spa_load_guid = spa_generate_guid(NULL);
495 }
496
497 if (guid == 0 && ops != &vdev_hole_ops) {
498 if (spa->spa_root_vdev == vd) {
499 /*
500 * The root vdev's guid will also be the pool guid,
501 * which must be unique among all pools.
502 */
503 guid = spa_generate_guid(NULL);
504 } else {
505 /*
506 * Any other vdev's guid must be unique within the pool.
507 */
508 guid = spa_generate_guid(spa);
509 }
510 ASSERT(!spa_guid_exists(spa_guid(spa), guid));
511 }
512
513 vd->vdev_spa = spa;
514 vd->vdev_id = id;
515 vd->vdev_guid = guid;
516 vd->vdev_guid_sum = guid;
517 vd->vdev_ops = ops;
518 vd->vdev_state = VDEV_STATE_CLOSED;
519 vd->vdev_ishole = (ops == &vdev_hole_ops);
520 vic->vic_prev_indirect_vdev = UINT64_MAX;
521
522 rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
523 mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
524 vd->vdev_obsolete_segments = range_tree_create(NULL, RANGE_SEG64, NULL,
525 0, 0);
526
527 list_link_init(&vd->vdev_initialize_node);
528 list_link_init(&vd->vdev_leaf_node);
529 list_link_init(&vd->vdev_trim_node);
530 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL);
531 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
532 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
533 mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL);
534 mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
535 mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL);
536 cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL);
537 cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL);
538 mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL);
539 mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL);
540 mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL);
541 cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL);
542 cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL);
543 cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL);
544
545 for (int t = 0; t < DTL_TYPES; t++) {
546 vd->vdev_dtl[t] = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
547 0);
548 }
549 txg_list_create(&vd->vdev_ms_list, spa,
550 offsetof(struct metaslab, ms_txg_node));
551 txg_list_create(&vd->vdev_dtl_list, spa,
552 offsetof(struct vdev, vdev_dtl_node));
553 vd->vdev_stat.vs_timestamp = gethrtime();
554 vdev_queue_init(vd);
555 vdev_cache_init(vd);
556
557 return (vd);
558 }
559
560 /*
561 * Allocate a new vdev. The 'alloctype' is used to control whether we are
562 * creating a new vdev or loading an existing one - the behavior is slightly
563 * different for each case.
564 */
565 int
vdev_alloc(spa_t * spa,vdev_t ** vdp,nvlist_t * nv,vdev_t * parent,uint_t id,int alloctype)566 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
567 int alloctype)
568 {
569 vdev_ops_t *ops;
570 char *type;
571 uint64_t guid = 0, islog, nparity;
572 vdev_t *vd;
573 vdev_indirect_config_t *vic;
574 vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
575 boolean_t top_level = (parent && !parent->vdev_parent);
576
577 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
578
579 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
580 return (SET_ERROR(EINVAL));
581
582 if ((ops = vdev_getops(type)) == NULL)
583 return (SET_ERROR(EINVAL));
584
585 /*
586 * If this is a load, get the vdev guid from the nvlist.
587 * Otherwise, vdev_alloc_common() will generate one for us.
588 */
589 if (alloctype == VDEV_ALLOC_LOAD) {
590 uint64_t label_id;
591
592 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
593 label_id != id)
594 return (SET_ERROR(EINVAL));
595
596 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
597 return (SET_ERROR(EINVAL));
598 } else if (alloctype == VDEV_ALLOC_SPARE) {
599 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
600 return (SET_ERROR(EINVAL));
601 } else if (alloctype == VDEV_ALLOC_L2CACHE) {
602 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
603 return (SET_ERROR(EINVAL));
604 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
605 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
606 return (SET_ERROR(EINVAL));
607 }
608
609 /*
610 * The first allocated vdev must be of type 'root'.
611 */
612 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
613 return (SET_ERROR(EINVAL));
614
615 /*
616 * Determine whether we're a log vdev.
617 */
618 islog = 0;
619 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
620 if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
621 return (SET_ERROR(ENOTSUP));
622
623 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
624 return (SET_ERROR(ENOTSUP));
625
626 /*
627 * Set the nparity property for RAID-Z vdevs.
628 */
629 nparity = -1ULL;
630 if (ops == &vdev_raidz_ops) {
631 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
632 &nparity) == 0) {
633 if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY)
634 return (SET_ERROR(EINVAL));
635 /*
636 * Previous versions could only support 1 or 2 parity
637 * device.
638 */
639 if (nparity > 1 &&
640 spa_version(spa) < SPA_VERSION_RAIDZ2)
641 return (SET_ERROR(ENOTSUP));
642 if (nparity > 2 &&
643 spa_version(spa) < SPA_VERSION_RAIDZ3)
644 return (SET_ERROR(ENOTSUP));
645 } else {
646 /*
647 * We require the parity to be specified for SPAs that
648 * support multiple parity levels.
649 */
650 if (spa_version(spa) >= SPA_VERSION_RAIDZ2)
651 return (SET_ERROR(EINVAL));
652 /*
653 * Otherwise, we default to 1 parity device for RAID-Z.
654 */
655 nparity = 1;
656 }
657 } else {
658 nparity = 0;
659 }
660 ASSERT(nparity != -1ULL);
661
662 /*
663 * If creating a top-level vdev, check for allocation classes input
664 */
665 if (top_level && alloctype == VDEV_ALLOC_ADD) {
666 char *bias;
667
668 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
669 &bias) == 0) {
670 alloc_bias = vdev_derive_alloc_bias(bias);
671
672 /* spa_vdev_add() expects feature to be enabled */
673 if (alloc_bias != VDEV_BIAS_LOG &&
674 spa->spa_load_state != SPA_LOAD_CREATE &&
675 !spa_feature_is_enabled(spa,
676 SPA_FEATURE_ALLOCATION_CLASSES)) {
677 return (SET_ERROR(ENOTSUP));
678 }
679 }
680 }
681
682 vd = vdev_alloc_common(spa, id, guid, ops);
683 vic = &vd->vdev_indirect_config;
684
685 vd->vdev_islog = islog;
686 vd->vdev_nparity = nparity;
687 if (top_level && alloc_bias != VDEV_BIAS_NONE)
688 vd->vdev_alloc_bias = alloc_bias;
689
690 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
691 vd->vdev_path = spa_strdup(vd->vdev_path);
692 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
693 vd->vdev_devid = spa_strdup(vd->vdev_devid);
694 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH,
695 &vd->vdev_physpath) == 0)
696 vd->vdev_physpath = spa_strdup(vd->vdev_physpath);
697 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0)
698 vd->vdev_fru = spa_strdup(vd->vdev_fru);
699
700 /*
701 * Set the whole_disk property. If it's not specified, leave the value
702 * as -1.
703 */
704 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
705 &vd->vdev_wholedisk) != 0)
706 vd->vdev_wholedisk = -1ULL;
707
708 ASSERT0(vic->vic_mapping_object);
709 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
710 &vic->vic_mapping_object);
711 ASSERT0(vic->vic_births_object);
712 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
713 &vic->vic_births_object);
714 ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX);
715 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
716 &vic->vic_prev_indirect_vdev);
717
718 /*
719 * Look for the 'not present' flag. This will only be set if the device
720 * was not present at the time of import.
721 */
722 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
723 &vd->vdev_not_present);
724
725 /*
726 * Get the alignment requirement.
727 */
728 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
729
730 /*
731 * Retrieve the vdev creation time.
732 */
733 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
734 &vd->vdev_crtxg);
735
736 /*
737 * If we're a top-level vdev, try to load the allocation parameters.
738 */
739 if (top_level &&
740 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
741 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
742 &vd->vdev_ms_array);
743 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
744 &vd->vdev_ms_shift);
745 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
746 &vd->vdev_asize);
747 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
748 &vd->vdev_removing);
749 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
750 &vd->vdev_top_zap);
751 } else {
752 ASSERT0(vd->vdev_top_zap);
753 }
754
755 if (top_level && alloctype != VDEV_ALLOC_ATTACH) {
756 ASSERT(alloctype == VDEV_ALLOC_LOAD ||
757 alloctype == VDEV_ALLOC_ADD ||
758 alloctype == VDEV_ALLOC_SPLIT ||
759 alloctype == VDEV_ALLOC_ROOTPOOL);
760 /* Note: metaslab_group_create() is now deferred */
761 }
762
763 if (vd->vdev_ops->vdev_op_leaf &&
764 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
765 (void) nvlist_lookup_uint64(nv,
766 ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
767 } else {
768 ASSERT0(vd->vdev_leaf_zap);
769 }
770
771 /*
772 * If we're a leaf vdev, try to load the DTL object and other state.
773 */
774
775 if (vd->vdev_ops->vdev_op_leaf &&
776 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
777 alloctype == VDEV_ALLOC_ROOTPOOL)) {
778 if (alloctype == VDEV_ALLOC_LOAD) {
779 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
780 &vd->vdev_dtl_object);
781 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
782 &vd->vdev_unspare);
783 }
784
785 if (alloctype == VDEV_ALLOC_ROOTPOOL) {
786 uint64_t spare = 0;
787
788 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
789 &spare) == 0 && spare)
790 spa_spare_add(vd);
791 }
792
793 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
794 &vd->vdev_offline);
795
796 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
797 &vd->vdev_resilver_txg);
798
799 if (nvlist_exists(nv, ZPOOL_CONFIG_RESILVER_DEFER))
800 vdev_defer_resilver(vd);
801
802 /*
803 * When importing a pool, we want to ignore the persistent fault
804 * state, as the diagnosis made on another system may not be
805 * valid in the current context. Local vdevs will
806 * remain in the faulted state.
807 */
808 if (spa_load_state(spa) == SPA_LOAD_OPEN) {
809 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
810 &vd->vdev_faulted);
811 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
812 &vd->vdev_degraded);
813 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
814 &vd->vdev_removed);
815
816 if (vd->vdev_faulted || vd->vdev_degraded) {
817 char *aux;
818
819 vd->vdev_label_aux =
820 VDEV_AUX_ERR_EXCEEDED;
821 if (nvlist_lookup_string(nv,
822 ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
823 strcmp(aux, "external") == 0)
824 vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
825 }
826 }
827 }
828
829 /*
830 * Add ourselves to the parent's list of children.
831 */
832 vdev_add_child(parent, vd);
833
834 *vdp = vd;
835
836 return (0);
837 }
838
839 void
vdev_free(vdev_t * vd)840 vdev_free(vdev_t *vd)
841 {
842 spa_t *spa = vd->vdev_spa;
843
844 ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
845 ASSERT3P(vd->vdev_trim_thread, ==, NULL);
846 ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
847
848 /*
849 * Scan queues are normally destroyed at the end of a scan. If the
850 * queue exists here, that implies the vdev is being removed while
851 * the scan is still running.
852 */
853 if (vd->vdev_scan_io_queue != NULL) {
854 mutex_enter(&vd->vdev_scan_io_queue_lock);
855 dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue);
856 vd->vdev_scan_io_queue = NULL;
857 mutex_exit(&vd->vdev_scan_io_queue_lock);
858 }
859
860 /*
861 * vdev_free() implies closing the vdev first. This is simpler than
862 * trying to ensure complicated semantics for all callers.
863 */
864 vdev_close(vd);
865
866 ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
867 ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
868
869 /*
870 * Free all children.
871 */
872 for (int c = 0; c < vd->vdev_children; c++)
873 vdev_free(vd->vdev_child[c]);
874
875 ASSERT(vd->vdev_child == NULL);
876 ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
877
878 /*
879 * Discard allocation state.
880 */
881 if (vd->vdev_mg != NULL) {
882 vdev_metaslab_fini(vd);
883 metaslab_group_destroy(vd->vdev_mg);
884 vd->vdev_mg = NULL;
885 }
886
887 ASSERT0(vd->vdev_stat.vs_space);
888 ASSERT0(vd->vdev_stat.vs_dspace);
889 ASSERT0(vd->vdev_stat.vs_alloc);
890
891 /*
892 * Remove this vdev from its parent's child list.
893 */
894 vdev_remove_child(vd->vdev_parent, vd);
895
896 ASSERT(vd->vdev_parent == NULL);
897 ASSERT(!list_link_active(&vd->vdev_leaf_node));
898
899 /*
900 * Clean up vdev structure.
901 */
902 vdev_queue_fini(vd);
903 vdev_cache_fini(vd);
904
905 if (vd->vdev_path)
906 spa_strfree(vd->vdev_path);
907 if (vd->vdev_devid)
908 spa_strfree(vd->vdev_devid);
909 if (vd->vdev_physpath)
910 spa_strfree(vd->vdev_physpath);
911 if (vd->vdev_fru)
912 spa_strfree(vd->vdev_fru);
913
914 if (vd->vdev_isspare)
915 spa_spare_remove(vd);
916 if (vd->vdev_isl2cache)
917 spa_l2cache_remove(vd);
918
919 txg_list_destroy(&vd->vdev_ms_list);
920 txg_list_destroy(&vd->vdev_dtl_list);
921
922 mutex_enter(&vd->vdev_dtl_lock);
923 space_map_close(vd->vdev_dtl_sm);
924 for (int t = 0; t < DTL_TYPES; t++) {
925 range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
926 range_tree_destroy(vd->vdev_dtl[t]);
927 }
928 mutex_exit(&vd->vdev_dtl_lock);
929
930 EQUIV(vd->vdev_indirect_births != NULL,
931 vd->vdev_indirect_mapping != NULL);
932 if (vd->vdev_indirect_births != NULL) {
933 vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
934 vdev_indirect_births_close(vd->vdev_indirect_births);
935 }
936
937 if (vd->vdev_obsolete_sm != NULL) {
938 ASSERT(vd->vdev_removing ||
939 vd->vdev_ops == &vdev_indirect_ops);
940 space_map_close(vd->vdev_obsolete_sm);
941 vd->vdev_obsolete_sm = NULL;
942 }
943 range_tree_destroy(vd->vdev_obsolete_segments);
944 rw_destroy(&vd->vdev_indirect_rwlock);
945 mutex_destroy(&vd->vdev_obsolete_lock);
946
947 mutex_destroy(&vd->vdev_dtl_lock);
948 mutex_destroy(&vd->vdev_stat_lock);
949 mutex_destroy(&vd->vdev_probe_lock);
950 mutex_destroy(&vd->vdev_scan_io_queue_lock);
951 mutex_destroy(&vd->vdev_initialize_lock);
952 mutex_destroy(&vd->vdev_initialize_io_lock);
953 cv_destroy(&vd->vdev_initialize_io_cv);
954 cv_destroy(&vd->vdev_initialize_cv);
955 mutex_destroy(&vd->vdev_trim_lock);
956 mutex_destroy(&vd->vdev_autotrim_lock);
957 mutex_destroy(&vd->vdev_trim_io_lock);
958 cv_destroy(&vd->vdev_trim_cv);
959 cv_destroy(&vd->vdev_autotrim_cv);
960 cv_destroy(&vd->vdev_trim_io_cv);
961
962 if (vd == spa->spa_root_vdev)
963 spa->spa_root_vdev = NULL;
964
965 kmem_free(vd, sizeof (vdev_t));
966 }
967
968 /*
969 * Transfer top-level vdev state from svd to tvd.
970 */
971 static void
vdev_top_transfer(vdev_t * svd,vdev_t * tvd)972 vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
973 {
974 spa_t *spa = svd->vdev_spa;
975 metaslab_t *msp;
976 vdev_t *vd;
977 int t;
978
979 ASSERT(tvd == tvd->vdev_top);
980
981 tvd->vdev_ms_array = svd->vdev_ms_array;
982 tvd->vdev_ms_shift = svd->vdev_ms_shift;
983 tvd->vdev_ms_count = svd->vdev_ms_count;
984 tvd->vdev_top_zap = svd->vdev_top_zap;
985
986 svd->vdev_ms_array = 0;
987 svd->vdev_ms_shift = 0;
988 svd->vdev_ms_count = 0;
989 svd->vdev_top_zap = 0;
990
991 if (tvd->vdev_mg)
992 ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
993 tvd->vdev_mg = svd->vdev_mg;
994 tvd->vdev_ms = svd->vdev_ms;
995
996 svd->vdev_mg = NULL;
997 svd->vdev_ms = NULL;
998
999 if (tvd->vdev_mg != NULL)
1000 tvd->vdev_mg->mg_vd = tvd;
1001
1002 tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm;
1003 svd->vdev_checkpoint_sm = NULL;
1004
1005 tvd->vdev_alloc_bias = svd->vdev_alloc_bias;
1006 svd->vdev_alloc_bias = VDEV_BIAS_NONE;
1007
1008 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
1009 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
1010 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
1011
1012 svd->vdev_stat.vs_alloc = 0;
1013 svd->vdev_stat.vs_space = 0;
1014 svd->vdev_stat.vs_dspace = 0;
1015
1016 /*
1017 * State which may be set on a top-level vdev that's in the
1018 * process of being removed.
1019 */
1020 ASSERT0(tvd->vdev_indirect_config.vic_births_object);
1021 ASSERT0(tvd->vdev_indirect_config.vic_mapping_object);
1022 ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL);
1023 ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL);
1024 ASSERT3P(tvd->vdev_indirect_births, ==, NULL);
1025 ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL);
1026 ASSERT0(tvd->vdev_removing);
1027 tvd->vdev_removing = svd->vdev_removing;
1028 tvd->vdev_indirect_config = svd->vdev_indirect_config;
1029 tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping;
1030 tvd->vdev_indirect_births = svd->vdev_indirect_births;
1031 range_tree_swap(&svd->vdev_obsolete_segments,
1032 &tvd->vdev_obsolete_segments);
1033 tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm;
1034 svd->vdev_indirect_config.vic_mapping_object = 0;
1035 svd->vdev_indirect_config.vic_births_object = 0;
1036 svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL;
1037 svd->vdev_indirect_mapping = NULL;
1038 svd->vdev_indirect_births = NULL;
1039 svd->vdev_obsolete_sm = NULL;
1040 svd->vdev_removing = 0;
1041
1042 for (t = 0; t < TXG_SIZE; t++) {
1043 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
1044 (void) txg_list_add(&tvd->vdev_ms_list, msp, t);
1045 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
1046 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
1047 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
1048 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
1049 }
1050
1051 if (list_link_active(&svd->vdev_config_dirty_node)) {
1052 vdev_config_clean(svd);
1053 vdev_config_dirty(tvd);
1054 }
1055
1056 if (list_link_active(&svd->vdev_state_dirty_node)) {
1057 vdev_state_clean(svd);
1058 vdev_state_dirty(tvd);
1059 }
1060
1061 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
1062 svd->vdev_deflate_ratio = 0;
1063
1064 tvd->vdev_islog = svd->vdev_islog;
1065 svd->vdev_islog = 0;
1066
1067 dsl_scan_io_queue_vdev_xfer(svd, tvd);
1068 }
1069
1070 static void
vdev_top_update(vdev_t * tvd,vdev_t * vd)1071 vdev_top_update(vdev_t *tvd, vdev_t *vd)
1072 {
1073 if (vd == NULL)
1074 return;
1075
1076 vd->vdev_top = tvd;
1077
1078 for (int c = 0; c < vd->vdev_children; c++)
1079 vdev_top_update(tvd, vd->vdev_child[c]);
1080 }
1081
1082 /*
1083 * Add a mirror/replacing vdev above an existing vdev.
1084 */
1085 vdev_t *
vdev_add_parent(vdev_t * cvd,vdev_ops_t * ops)1086 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
1087 {
1088 spa_t *spa = cvd->vdev_spa;
1089 vdev_t *pvd = cvd->vdev_parent;
1090 vdev_t *mvd;
1091
1092 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1093
1094 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
1095
1096 mvd->vdev_asize = cvd->vdev_asize;
1097 mvd->vdev_min_asize = cvd->vdev_min_asize;
1098 mvd->vdev_max_asize = cvd->vdev_max_asize;
1099 mvd->vdev_psize = cvd->vdev_psize;
1100 mvd->vdev_ashift = cvd->vdev_ashift;
1101 mvd->vdev_state = cvd->vdev_state;
1102 mvd->vdev_crtxg = cvd->vdev_crtxg;
1103
1104 vdev_remove_child(pvd, cvd);
1105 vdev_add_child(pvd, mvd);
1106 cvd->vdev_id = mvd->vdev_children;
1107 vdev_add_child(mvd, cvd);
1108 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
1109
1110 if (mvd == mvd->vdev_top)
1111 vdev_top_transfer(cvd, mvd);
1112
1113 return (mvd);
1114 }
1115
1116 /*
1117 * Remove a 1-way mirror/replacing vdev from the tree.
1118 */
1119 void
vdev_remove_parent(vdev_t * cvd)1120 vdev_remove_parent(vdev_t *cvd)
1121 {
1122 vdev_t *mvd = cvd->vdev_parent;
1123 vdev_t *pvd = mvd->vdev_parent;
1124
1125 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1126
1127 ASSERT(mvd->vdev_children == 1);
1128 ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
1129 mvd->vdev_ops == &vdev_replacing_ops ||
1130 mvd->vdev_ops == &vdev_spare_ops);
1131 cvd->vdev_ashift = mvd->vdev_ashift;
1132
1133 vdev_remove_child(mvd, cvd);
1134 vdev_remove_child(pvd, mvd);
1135
1136 /*
1137 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
1138 * Otherwise, we could have detached an offline device, and when we
1139 * go to import the pool we'll think we have two top-level vdevs,
1140 * instead of a different version of the same top-level vdev.
1141 */
1142 if (mvd->vdev_top == mvd) {
1143 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
1144 cvd->vdev_orig_guid = cvd->vdev_guid;
1145 cvd->vdev_guid += guid_delta;
1146 cvd->vdev_guid_sum += guid_delta;
1147 }
1148 cvd->vdev_id = mvd->vdev_id;
1149 vdev_add_child(pvd, cvd);
1150 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
1151
1152 if (cvd == cvd->vdev_top)
1153 vdev_top_transfer(mvd, cvd);
1154
1155 ASSERT(mvd->vdev_children == 0);
1156 vdev_free(mvd);
1157 }
1158
1159 static void
vdev_metaslab_group_create(vdev_t * vd)1160 vdev_metaslab_group_create(vdev_t *vd)
1161 {
1162 spa_t *spa = vd->vdev_spa;
1163
1164 /*
1165 * metaslab_group_create was delayed until allocation bias was available
1166 */
1167 if (vd->vdev_mg == NULL) {
1168 metaslab_class_t *mc;
1169
1170 if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE)
1171 vd->vdev_alloc_bias = VDEV_BIAS_LOG;
1172
1173 ASSERT3U(vd->vdev_islog, ==,
1174 (vd->vdev_alloc_bias == VDEV_BIAS_LOG));
1175
1176 switch (vd->vdev_alloc_bias) {
1177 case VDEV_BIAS_LOG:
1178 mc = spa_log_class(spa);
1179 break;
1180 case VDEV_BIAS_SPECIAL:
1181 mc = spa_special_class(spa);
1182 break;
1183 case VDEV_BIAS_DEDUP:
1184 mc = spa_dedup_class(spa);
1185 break;
1186 default:
1187 mc = spa_normal_class(spa);
1188 }
1189
1190 vd->vdev_mg = metaslab_group_create(mc, vd,
1191 spa->spa_alloc_count);
1192
1193 /*
1194 * The spa ashift values currently only reflect the
1195 * general vdev classes. Class destination is late
1196 * binding so ashift checking had to wait until now
1197 */
1198 if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
1199 mc == spa_normal_class(spa) && vd->vdev_aux == NULL) {
1200 if (vd->vdev_ashift > spa->spa_max_ashift)
1201 spa->spa_max_ashift = vd->vdev_ashift;
1202 if (vd->vdev_ashift < spa->spa_min_ashift)
1203 spa->spa_min_ashift = vd->vdev_ashift;
1204 }
1205 }
1206 }
1207
1208 int
vdev_metaslab_init(vdev_t * vd,uint64_t txg)1209 vdev_metaslab_init(vdev_t *vd, uint64_t txg)
1210 {
1211 spa_t *spa = vd->vdev_spa;
1212 objset_t *mos = spa->spa_meta_objset;
1213 uint64_t m;
1214 uint64_t oldc = vd->vdev_ms_count;
1215 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
1216 metaslab_t **mspp;
1217 int error;
1218 boolean_t expanding = (oldc != 0);
1219
1220 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1221
1222 /*
1223 * This vdev is not being allocated from yet or is a hole.
1224 */
1225 if (vd->vdev_ms_shift == 0)
1226 return (0);
1227
1228 ASSERT(!vd->vdev_ishole);
1229
1230 ASSERT(oldc <= newc);
1231
1232 mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
1233
1234 if (expanding) {
1235 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
1236 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
1237 }
1238
1239 vd->vdev_ms = mspp;
1240 vd->vdev_ms_count = newc;
1241 for (m = oldc; m < newc; m++) {
1242 uint64_t object = 0;
1243
1244 /*
1245 * vdev_ms_array may be 0 if we are creating the "fake"
1246 * metaslabs for an indirect vdev for zdb's leak detection.
1247 * See zdb_leak_init().
1248 */
1249 if (txg == 0 && vd->vdev_ms_array != 0) {
1250 error = dmu_read(mos, vd->vdev_ms_array,
1251 m * sizeof (uint64_t), sizeof (uint64_t), &object,
1252 DMU_READ_PREFETCH);
1253 if (error != 0) {
1254 vdev_dbgmsg(vd, "unable to read the metaslab "
1255 "array [error=%d]", error);
1256 return (error);
1257 }
1258 }
1259
1260 #ifndef _KERNEL
1261 /*
1262 * To accomodate zdb_leak_init() fake indirect
1263 * metaslabs, we allocate a metaslab group for
1264 * indirect vdevs which normally don't have one.
1265 */
1266 if (vd->vdev_mg == NULL) {
1267 ASSERT0(vdev_is_concrete(vd));
1268 vdev_metaslab_group_create(vd);
1269 }
1270 #endif
1271 error = metaslab_init(vd->vdev_mg, m, object, txg,
1272 &(vd->vdev_ms[m]));
1273 if (error != 0) {
1274 vdev_dbgmsg(vd, "metaslab_init failed [error=%d]",
1275 error);
1276 return (error);
1277 }
1278 }
1279
1280 if (txg == 0)
1281 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
1282
1283 /*
1284 * If the vdev is being removed we don't activate
1285 * the metaslabs since we want to ensure that no new
1286 * allocations are performed on this device.
1287 */
1288 if (!expanding && !vd->vdev_removing) {
1289 metaslab_group_activate(vd->vdev_mg);
1290 }
1291
1292 if (txg == 0)
1293 spa_config_exit(spa, SCL_ALLOC, FTAG);
1294
1295 /*
1296 * Regardless whether this vdev was just added or it is being
1297 * expanded, the metaslab count has changed. Recalculate the
1298 * block limit.
1299 */
1300 spa_log_sm_set_blocklimit(spa);
1301
1302 return (0);
1303 }
1304
1305 void
vdev_metaslab_fini(vdev_t * vd)1306 vdev_metaslab_fini(vdev_t *vd)
1307 {
1308 if (vd->vdev_checkpoint_sm != NULL) {
1309 ASSERT(spa_feature_is_active(vd->vdev_spa,
1310 SPA_FEATURE_POOL_CHECKPOINT));
1311 space_map_close(vd->vdev_checkpoint_sm);
1312 /*
1313 * Even though we close the space map, we need to set its
1314 * pointer to NULL. The reason is that vdev_metaslab_fini()
1315 * may be called multiple times for certain operations
1316 * (i.e. when destroying a pool) so we need to ensure that
1317 * this clause never executes twice. This logic is similar
1318 * to the one used for the vdev_ms clause below.
1319 */
1320 vd->vdev_checkpoint_sm = NULL;
1321 }
1322
1323 if (vd->vdev_ms != NULL) {
1324 metaslab_group_t *mg = vd->vdev_mg;
1325 metaslab_group_passivate(mg);
1326
1327 uint64_t count = vd->vdev_ms_count;
1328 for (uint64_t m = 0; m < count; m++) {
1329 metaslab_t *msp = vd->vdev_ms[m];
1330 if (msp != NULL)
1331 metaslab_fini(msp);
1332 }
1333 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
1334 vd->vdev_ms = NULL;
1335
1336 vd->vdev_ms_count = 0;
1337
1338 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
1339 ASSERT0(mg->mg_histogram[i]);
1340 }
1341 ASSERT0(vd->vdev_ms_count);
1342 }
1343
1344 typedef struct vdev_probe_stats {
1345 boolean_t vps_readable;
1346 boolean_t vps_writeable;
1347 int vps_flags;
1348 } vdev_probe_stats_t;
1349
1350 static void
vdev_probe_done(zio_t * zio)1351 vdev_probe_done(zio_t *zio)
1352 {
1353 spa_t *spa = zio->io_spa;
1354 vdev_t *vd = zio->io_vd;
1355 vdev_probe_stats_t *vps = zio->io_private;
1356
1357 ASSERT(vd->vdev_probe_zio != NULL);
1358
1359 if (zio->io_type == ZIO_TYPE_READ) {
1360 if (zio->io_error == 0)
1361 vps->vps_readable = 1;
1362 if (zio->io_error == 0 && spa_writeable(spa)) {
1363 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
1364 zio->io_offset, zio->io_size, zio->io_abd,
1365 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1366 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
1367 } else {
1368 abd_free(zio->io_abd);
1369 }
1370 } else if (zio->io_type == ZIO_TYPE_WRITE) {
1371 if (zio->io_error == 0)
1372 vps->vps_writeable = 1;
1373 abd_free(zio->io_abd);
1374 } else if (zio->io_type == ZIO_TYPE_NULL) {
1375 zio_t *pio;
1376
1377 vd->vdev_cant_read |= !vps->vps_readable;
1378 vd->vdev_cant_write |= !vps->vps_writeable;
1379
1380 if (vdev_readable(vd) &&
1381 (vdev_writeable(vd) || !spa_writeable(spa))) {
1382 zio->io_error = 0;
1383 } else {
1384 ASSERT(zio->io_error != 0);
1385 vdev_dbgmsg(vd, "failed probe");
1386 (void) zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
1387 spa, vd, NULL, NULL, 0, 0);
1388 zio->io_error = SET_ERROR(ENXIO);
1389 }
1390
1391 mutex_enter(&vd->vdev_probe_lock);
1392 ASSERT(vd->vdev_probe_zio == zio);
1393 vd->vdev_probe_zio = NULL;
1394 mutex_exit(&vd->vdev_probe_lock);
1395
1396 zio_link_t *zl = NULL;
1397 while ((pio = zio_walk_parents(zio, &zl)) != NULL)
1398 if (!vdev_accessible(vd, pio))
1399 pio->io_error = SET_ERROR(ENXIO);
1400
1401 kmem_free(vps, sizeof (*vps));
1402 }
1403 }
1404
1405 /*
1406 * Determine whether this device is accessible.
1407 *
1408 * Read and write to several known locations: the pad regions of each
1409 * vdev label but the first, which we leave alone in case it contains
1410 * a VTOC.
1411 */
1412 zio_t *
vdev_probe(vdev_t * vd,zio_t * zio)1413 vdev_probe(vdev_t *vd, zio_t *zio)
1414 {
1415 spa_t *spa = vd->vdev_spa;
1416 vdev_probe_stats_t *vps = NULL;
1417 zio_t *pio;
1418
1419 ASSERT(vd->vdev_ops->vdev_op_leaf);
1420
1421 /*
1422 * Don't probe the probe.
1423 */
1424 if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
1425 return (NULL);
1426
1427 /*
1428 * To prevent 'probe storms' when a device fails, we create
1429 * just one probe i/o at a time. All zios that want to probe
1430 * this vdev will become parents of the probe io.
1431 */
1432 mutex_enter(&vd->vdev_probe_lock);
1433
1434 if ((pio = vd->vdev_probe_zio) == NULL) {
1435 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
1436
1437 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
1438 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
1439 ZIO_FLAG_TRYHARD;
1440
1441 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
1442 /*
1443 * vdev_cant_read and vdev_cant_write can only
1444 * transition from TRUE to FALSE when we have the
1445 * SCL_ZIO lock as writer; otherwise they can only
1446 * transition from FALSE to TRUE. This ensures that
1447 * any zio looking at these values can assume that
1448 * failures persist for the life of the I/O. That's
1449 * important because when a device has intermittent
1450 * connectivity problems, we want to ensure that
1451 * they're ascribed to the device (ENXIO) and not
1452 * the zio (EIO).
1453 *
1454 * Since we hold SCL_ZIO as writer here, clear both
1455 * values so the probe can reevaluate from first
1456 * principles.
1457 */
1458 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
1459 vd->vdev_cant_read = B_FALSE;
1460 vd->vdev_cant_write = B_FALSE;
1461 }
1462
1463 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
1464 vdev_probe_done, vps,
1465 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
1466
1467 /*
1468 * We can't change the vdev state in this context, so we
1469 * kick off an async task to do it on our behalf.
1470 */
1471 if (zio != NULL) {
1472 vd->vdev_probe_wanted = B_TRUE;
1473 spa_async_request(spa, SPA_ASYNC_PROBE);
1474 }
1475 }
1476
1477 if (zio != NULL)
1478 zio_add_child(zio, pio);
1479
1480 mutex_exit(&vd->vdev_probe_lock);
1481
1482 if (vps == NULL) {
1483 ASSERT(zio != NULL);
1484 return (NULL);
1485 }
1486
1487 for (int l = 1; l < VDEV_LABELS; l++) {
1488 zio_nowait(zio_read_phys(pio, vd,
1489 vdev_label_offset(vd->vdev_psize, l,
1490 offsetof(vdev_label_t, vl_be)), VDEV_PAD_SIZE,
1491 abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
1492 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1493 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
1494 }
1495
1496 if (zio == NULL)
1497 return (pio);
1498
1499 zio_nowait(pio);
1500 return (NULL);
1501 }
1502
1503 static void
vdev_open_child(void * arg)1504 vdev_open_child(void *arg)
1505 {
1506 vdev_t *vd = arg;
1507
1508 vd->vdev_open_thread = curthread;
1509 vd->vdev_open_error = vdev_open(vd);
1510 vd->vdev_open_thread = NULL;
1511 }
1512
1513 boolean_t
vdev_uses_zvols(vdev_t * vd)1514 vdev_uses_zvols(vdev_t *vd)
1515 {
1516 if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR,
1517 strlen(ZVOL_DIR)) == 0)
1518 return (B_TRUE);
1519 for (int c = 0; c < vd->vdev_children; c++)
1520 if (vdev_uses_zvols(vd->vdev_child[c]))
1521 return (B_TRUE);
1522 return (B_FALSE);
1523 }
1524
1525 void
vdev_open_children(vdev_t * vd)1526 vdev_open_children(vdev_t *vd)
1527 {
1528 taskq_t *tq;
1529 int children = vd->vdev_children;
1530
1531 /*
1532 * in order to handle pools on top of zvols, do the opens
1533 * in a single thread so that the same thread holds the
1534 * spa_namespace_lock
1535 */
1536 if (vdev_uses_zvols(vd)) {
1537 retry_sync:
1538 for (int c = 0; c < children; c++)
1539 vd->vdev_child[c]->vdev_open_error =
1540 vdev_open(vd->vdev_child[c]);
1541 } else {
1542 tq = taskq_create("vdev_open", children, minclsyspri,
1543 children, children, TASKQ_PREPOPULATE);
1544 if (tq == NULL)
1545 goto retry_sync;
1546
1547 for (int c = 0; c < children; c++)
1548 VERIFY(taskq_dispatch(tq, vdev_open_child,
1549 vd->vdev_child[c], TQ_SLEEP) != TASKQID_INVALID);
1550
1551 taskq_destroy(tq);
1552 }
1553
1554 vd->vdev_nonrot = B_TRUE;
1555
1556 for (int c = 0; c < children; c++)
1557 vd->vdev_nonrot &= vd->vdev_child[c]->vdev_nonrot;
1558 }
1559
1560 /*
1561 * Compute the raidz-deflation ratio. Note, we hard-code
1562 * in 128k (1 << 17) because it is the "typical" blocksize.
1563 * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
1564 * otherwise it would inconsistently account for existing bp's.
1565 */
1566 static void
vdev_set_deflate_ratio(vdev_t * vd)1567 vdev_set_deflate_ratio(vdev_t *vd)
1568 {
1569 if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) {
1570 vd->vdev_deflate_ratio = (1 << 17) /
1571 (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
1572 }
1573 }
1574
1575 /*
1576 * Prepare a virtual device for access.
1577 */
1578 int
vdev_open(vdev_t * vd)1579 vdev_open(vdev_t *vd)
1580 {
1581 spa_t *spa = vd->vdev_spa;
1582 int error;
1583 uint64_t osize = 0;
1584 uint64_t max_osize = 0;
1585 uint64_t asize, max_asize, psize;
1586 uint64_t ashift = 0;
1587
1588 ASSERT(vd->vdev_open_thread == curthread ||
1589 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1590 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
1591 vd->vdev_state == VDEV_STATE_CANT_OPEN ||
1592 vd->vdev_state == VDEV_STATE_OFFLINE);
1593
1594 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1595 vd->vdev_cant_read = B_FALSE;
1596 vd->vdev_cant_write = B_FALSE;
1597 vd->vdev_min_asize = vdev_get_min_asize(vd);
1598
1599 /*
1600 * If this vdev is not removed, check its fault status. If it's
1601 * faulted, bail out of the open.
1602 */
1603 if (!vd->vdev_removed && vd->vdev_faulted) {
1604 ASSERT(vd->vdev_children == 0);
1605 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1606 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
1607 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1608 vd->vdev_label_aux);
1609 return (SET_ERROR(ENXIO));
1610 } else if (vd->vdev_offline) {
1611 ASSERT(vd->vdev_children == 0);
1612 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
1613 return (SET_ERROR(ENXIO));
1614 }
1615
1616 error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift);
1617
1618 /*
1619 * Reset the vdev_reopening flag so that we actually close
1620 * the vdev on error.
1621 */
1622 vd->vdev_reopening = B_FALSE;
1623 if (zio_injection_enabled && error == 0)
1624 error = zio_handle_device_injection(vd, NULL, ENXIO);
1625
1626 if (error) {
1627 if (vd->vdev_removed &&
1628 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
1629 vd->vdev_removed = B_FALSE;
1630
1631 if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) {
1632 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE,
1633 vd->vdev_stat.vs_aux);
1634 } else {
1635 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1636 vd->vdev_stat.vs_aux);
1637 }
1638 return (error);
1639 }
1640
1641 vd->vdev_removed = B_FALSE;
1642
1643 /*
1644 * Recheck the faulted flag now that we have confirmed that
1645 * the vdev is accessible. If we're faulted, bail.
1646 */
1647 if (vd->vdev_faulted) {
1648 ASSERT(vd->vdev_children == 0);
1649 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1650 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
1651 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1652 vd->vdev_label_aux);
1653 return (SET_ERROR(ENXIO));
1654 }
1655
1656 if (vd->vdev_degraded) {
1657 ASSERT(vd->vdev_children == 0);
1658 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1659 VDEV_AUX_ERR_EXCEEDED);
1660 } else {
1661 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
1662 }
1663
1664 /*
1665 * For hole or missing vdevs we just return success.
1666 */
1667 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
1668 return (0);
1669
1670 for (int c = 0; c < vd->vdev_children; c++) {
1671 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
1672 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1673 VDEV_AUX_NONE);
1674 break;
1675 }
1676 }
1677
1678 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
1679 max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
1680
1681 if (vd->vdev_children == 0) {
1682 if (osize < SPA_MINDEVSIZE) {
1683 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1684 VDEV_AUX_TOO_SMALL);
1685 return (SET_ERROR(EOVERFLOW));
1686 }
1687 psize = osize;
1688 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
1689 max_asize = max_osize - (VDEV_LABEL_START_SIZE +
1690 VDEV_LABEL_END_SIZE);
1691 } else {
1692 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
1693 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
1694 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1695 VDEV_AUX_TOO_SMALL);
1696 return (SET_ERROR(EOVERFLOW));
1697 }
1698 psize = 0;
1699 asize = osize;
1700 max_asize = max_osize;
1701 }
1702
1703 vd->vdev_psize = psize;
1704
1705 /*
1706 * Make sure the allocatable size hasn't shrunk too much.
1707 */
1708 if (asize < vd->vdev_min_asize) {
1709 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1710 VDEV_AUX_BAD_LABEL);
1711 return (SET_ERROR(EINVAL));
1712 }
1713
1714 if (vd->vdev_asize == 0) {
1715 /*
1716 * This is the first-ever open, so use the computed values.
1717 * For compatibility, a different ashift can be requested.
1718 */
1719 vd->vdev_asize = asize;
1720 vd->vdev_max_asize = max_asize;
1721 if (vd->vdev_ashift == 0) {
1722 vd->vdev_ashift = ashift; /* use detected value */
1723 }
1724 if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN ||
1725 vd->vdev_ashift > ASHIFT_MAX)) {
1726 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1727 VDEV_AUX_BAD_ASHIFT);
1728 return (SET_ERROR(EDOM));
1729 }
1730 } else {
1731 /*
1732 * Detect if the alignment requirement has increased.
1733 * We don't want to make the pool unavailable, just
1734 * post an event instead.
1735 */
1736 if (ashift > vd->vdev_top->vdev_ashift &&
1737 vd->vdev_ops->vdev_op_leaf) {
1738 (void) zfs_ereport_post(
1739 FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
1740 spa, vd, NULL, NULL, 0, 0);
1741 }
1742
1743 vd->vdev_max_asize = max_asize;
1744 }
1745
1746 /*
1747 * If all children are healthy we update asize if either:
1748 * The asize has increased, due to a device expansion caused by dynamic
1749 * LUN growth or vdev replacement, and automatic expansion is enabled;
1750 * making the additional space available.
1751 *
1752 * The asize has decreased, due to a device shrink usually caused by a
1753 * vdev replace with a smaller device. This ensures that calculations
1754 * based of max_asize and asize e.g. esize are always valid. It's safe
1755 * to do this as we've already validated that asize is greater than
1756 * vdev_min_asize.
1757 */
1758 if (vd->vdev_state == VDEV_STATE_HEALTHY &&
1759 ((asize > vd->vdev_asize &&
1760 (vd->vdev_expanding || spa->spa_autoexpand)) ||
1761 (asize < vd->vdev_asize)))
1762 vd->vdev_asize = asize;
1763
1764 vdev_set_min_asize(vd);
1765
1766 /*
1767 * Ensure we can issue some IO before declaring the
1768 * vdev open for business.
1769 */
1770 if (vd->vdev_ops->vdev_op_leaf &&
1771 (error = zio_wait(vdev_probe(vd, NULL))) != 0) {
1772 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1773 VDEV_AUX_ERR_EXCEEDED);
1774 return (error);
1775 }
1776
1777 /*
1778 * Track the min and max ashift values for normal data devices.
1779 *
1780 * DJB - TBD these should perhaps be tracked per allocation class
1781 * (e.g. spa_min_ashift is used to round up post compression buffers)
1782 */
1783 if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
1784 vd->vdev_alloc_bias == VDEV_BIAS_NONE &&
1785 vd->vdev_aux == NULL) {
1786 if (vd->vdev_ashift > spa->spa_max_ashift)
1787 spa->spa_max_ashift = vd->vdev_ashift;
1788 if (vd->vdev_ashift < spa->spa_min_ashift)
1789 spa->spa_min_ashift = vd->vdev_ashift;
1790 }
1791
1792 /*
1793 * If this is a leaf vdev, assess whether a resilver is needed.
1794 * But don't do this if we are doing a reopen for a scrub, since
1795 * this would just restart the scrub we are already doing.
1796 */
1797 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen)
1798 dsl_scan_assess_vdev(spa->spa_dsl_pool, vd);
1799
1800 return (0);
1801 }
1802
1803 /*
1804 * Called once the vdevs are all opened, this routine validates the label
1805 * contents. This needs to be done before vdev_load() so that we don't
1806 * inadvertently do repair I/Os to the wrong device.
1807 *
1808 * This function will only return failure if one of the vdevs indicates that it
1809 * has since been destroyed or exported. This is only possible if
1810 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
1811 * will be updated but the function will return 0.
1812 */
1813 int
vdev_validate(vdev_t * vd)1814 vdev_validate(vdev_t *vd)
1815 {
1816 spa_t *spa = vd->vdev_spa;
1817 nvlist_t *label;
1818 uint64_t guid = 0, aux_guid = 0, top_guid;
1819 uint64_t state;
1820 nvlist_t *nvl;
1821 uint64_t txg;
1822
1823 if (vdev_validate_skip)
1824 return (0);
1825
1826 for (uint64_t c = 0; c < vd->vdev_children; c++)
1827 if (vdev_validate(vd->vdev_child[c]) != 0)
1828 return (SET_ERROR(EBADF));
1829
1830 /*
1831 * If the device has already failed, or was marked offline, don't do
1832 * any further validation. Otherwise, label I/O will fail and we will
1833 * overwrite the previous state.
1834 */
1835 if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd))
1836 return (0);
1837
1838 /*
1839 * If we are performing an extreme rewind, we allow for a label that
1840 * was modified at a point after the current txg.
1841 * If config lock is not held do not check for the txg. spa_sync could
1842 * be updating the vdev's label before updating spa_last_synced_txg.
1843 */
1844 if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 ||
1845 spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG)
1846 txg = UINT64_MAX;
1847 else
1848 txg = spa_last_synced_txg(spa);
1849
1850 if ((label = vdev_label_read_config(vd, txg)) == NULL) {
1851 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1852 VDEV_AUX_BAD_LABEL);
1853 vdev_dbgmsg(vd, "vdev_validate: failed reading config for "
1854 "txg %llu", (u_longlong_t)txg);
1855 return (0);
1856 }
1857
1858 /*
1859 * Determine if this vdev has been split off into another
1860 * pool. If so, then refuse to open it.
1861 */
1862 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
1863 &aux_guid) == 0 && aux_guid == spa_guid(spa)) {
1864 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1865 VDEV_AUX_SPLIT_POOL);
1866 nvlist_free(label);
1867 vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool");
1868 return (0);
1869 }
1870
1871 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) {
1872 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1873 VDEV_AUX_CORRUPT_DATA);
1874 nvlist_free(label);
1875 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
1876 ZPOOL_CONFIG_POOL_GUID);
1877 return (0);
1878 }
1879
1880 /*
1881 * If config is not trusted then ignore the spa guid check. This is
1882 * necessary because if the machine crashed during a re-guid the new
1883 * guid might have been written to all of the vdev labels, but not the
1884 * cached config. The check will be performed again once we have the
1885 * trusted config from the MOS.
1886 */
1887 if (spa->spa_trust_config && guid != spa_guid(spa)) {
1888 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1889 VDEV_AUX_CORRUPT_DATA);
1890 nvlist_free(label);
1891 vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't "
1892 "match config (%llu != %llu)", (u_longlong_t)guid,
1893 (u_longlong_t)spa_guid(spa));
1894 return (0);
1895 }
1896
1897 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
1898 != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
1899 &aux_guid) != 0)
1900 aux_guid = 0;
1901
1902 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) {
1903 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1904 VDEV_AUX_CORRUPT_DATA);
1905 nvlist_free(label);
1906 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
1907 ZPOOL_CONFIG_GUID);
1908 return (0);
1909 }
1910
1911 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid)
1912 != 0) {
1913 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1914 VDEV_AUX_CORRUPT_DATA);
1915 nvlist_free(label);
1916 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
1917 ZPOOL_CONFIG_TOP_GUID);
1918 return (0);
1919 }
1920
1921 /*
1922 * If this vdev just became a top-level vdev because its sibling was
1923 * detached, it will have adopted the parent's vdev guid -- but the
1924 * label may or may not be on disk yet. Fortunately, either version
1925 * of the label will have the same top guid, so if we're a top-level
1926 * vdev, we can safely compare to that instead.
1927 * However, if the config comes from a cachefile that failed to update
1928 * after the detach, a top-level vdev will appear as a non top-level
1929 * vdev in the config. Also relax the constraints if we perform an
1930 * extreme rewind.
1931 *
1932 * If we split this vdev off instead, then we also check the
1933 * original pool's guid. We don't want to consider the vdev
1934 * corrupt if it is partway through a split operation.
1935 */
1936 if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) {
1937 boolean_t mismatch = B_FALSE;
1938 if (spa->spa_trust_config && !spa->spa_extreme_rewind) {
1939 if (vd != vd->vdev_top || vd->vdev_guid != top_guid)
1940 mismatch = B_TRUE;
1941 } else {
1942 if (vd->vdev_guid != top_guid &&
1943 vd->vdev_top->vdev_guid != guid)
1944 mismatch = B_TRUE;
1945 }
1946
1947 if (mismatch) {
1948 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1949 VDEV_AUX_CORRUPT_DATA);
1950 nvlist_free(label);
1951 vdev_dbgmsg(vd, "vdev_validate: config guid "
1952 "doesn't match label guid");
1953 vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu",
1954 (u_longlong_t)vd->vdev_guid,
1955 (u_longlong_t)vd->vdev_top->vdev_guid);
1956 vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, "
1957 "aux_guid %llu", (u_longlong_t)guid,
1958 (u_longlong_t)top_guid, (u_longlong_t)aux_guid);
1959 return (0);
1960 }
1961 }
1962
1963 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
1964 &state) != 0) {
1965 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1966 VDEV_AUX_CORRUPT_DATA);
1967 nvlist_free(label);
1968 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
1969 ZPOOL_CONFIG_POOL_STATE);
1970 return (0);
1971 }
1972
1973 nvlist_free(label);
1974
1975 /*
1976 * If this is a verbatim import, no need to check the
1977 * state of the pool.
1978 */
1979 if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
1980 spa_load_state(spa) == SPA_LOAD_OPEN &&
1981 state != POOL_STATE_ACTIVE) {
1982 vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) "
1983 "for spa %s", (u_longlong_t)state, spa->spa_name);
1984 return (SET_ERROR(EBADF));
1985 }
1986
1987 /*
1988 * If we were able to open and validate a vdev that was
1989 * previously marked permanently unavailable, clear that state
1990 * now.
1991 */
1992 if (vd->vdev_not_present)
1993 vd->vdev_not_present = 0;
1994
1995 return (0);
1996 }
1997
1998 static void
vdev_copy_path_impl(vdev_t * svd,vdev_t * dvd)1999 vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd)
2000 {
2001 if (svd->vdev_path != NULL && dvd->vdev_path != NULL) {
2002 if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) {
2003 zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed "
2004 "from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
2005 dvd->vdev_path, svd->vdev_path);
2006 spa_strfree(dvd->vdev_path);
2007 dvd->vdev_path = spa_strdup(svd->vdev_path);
2008 }
2009 } else if (svd->vdev_path != NULL) {
2010 dvd->vdev_path = spa_strdup(svd->vdev_path);
2011 zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'",
2012 (u_longlong_t)dvd->vdev_guid, dvd->vdev_path);
2013 }
2014 }
2015
2016 /*
2017 * Recursively copy vdev paths from one vdev to another. Source and destination
2018 * vdev trees must have same geometry otherwise return error. Intended to copy
2019 * paths from userland config into MOS config.
2020 */
2021 int
vdev_copy_path_strict(vdev_t * svd,vdev_t * dvd)2022 vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd)
2023 {
2024 if ((svd->vdev_ops == &vdev_missing_ops) ||
2025 (svd->vdev_ishole && dvd->vdev_ishole) ||
2026 (dvd->vdev_ops == &vdev_indirect_ops))
2027 return (0);
2028
2029 if (svd->vdev_ops != dvd->vdev_ops) {
2030 vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s",
2031 svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type);
2032 return (SET_ERROR(EINVAL));
2033 }
2034
2035 if (svd->vdev_guid != dvd->vdev_guid) {
2036 vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != "
2037 "%llu)", (u_longlong_t)svd->vdev_guid,
2038 (u_longlong_t)dvd->vdev_guid);
2039 return (SET_ERROR(EINVAL));
2040 }
2041
2042 if (svd->vdev_children != dvd->vdev_children) {
2043 vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: "
2044 "%llu != %llu", (u_longlong_t)svd->vdev_children,
2045 (u_longlong_t)dvd->vdev_children);
2046 return (SET_ERROR(EINVAL));
2047 }
2048
2049 for (uint64_t i = 0; i < svd->vdev_children; i++) {
2050 int error = vdev_copy_path_strict(svd->vdev_child[i],
2051 dvd->vdev_child[i]);
2052 if (error != 0)
2053 return (error);
2054 }
2055
2056 if (svd->vdev_ops->vdev_op_leaf)
2057 vdev_copy_path_impl(svd, dvd);
2058
2059 return (0);
2060 }
2061
2062 static void
vdev_copy_path_search(vdev_t * stvd,vdev_t * dvd)2063 vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd)
2064 {
2065 ASSERT(stvd->vdev_top == stvd);
2066 ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id);
2067
2068 for (uint64_t i = 0; i < dvd->vdev_children; i++) {
2069 vdev_copy_path_search(stvd, dvd->vdev_child[i]);
2070 }
2071
2072 if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd))
2073 return;
2074
2075 /*
2076 * The idea here is that while a vdev can shift positions within
2077 * a top vdev (when replacing, attaching mirror, etc.) it cannot
2078 * step outside of it.
2079 */
2080 vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid);
2081
2082 if (vd == NULL || vd->vdev_ops != dvd->vdev_ops)
2083 return;
2084
2085 ASSERT(vd->vdev_ops->vdev_op_leaf);
2086
2087 vdev_copy_path_impl(vd, dvd);
2088 }
2089
2090 /*
2091 * Recursively copy vdev paths from one root vdev to another. Source and
2092 * destination vdev trees may differ in geometry. For each destination leaf
2093 * vdev, search a vdev with the same guid and top vdev id in the source.
2094 * Intended to copy paths from userland config into MOS config.
2095 */
2096 void
vdev_copy_path_relaxed(vdev_t * srvd,vdev_t * drvd)2097 vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd)
2098 {
2099 uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children);
2100 ASSERT(srvd->vdev_ops == &vdev_root_ops);
2101 ASSERT(drvd->vdev_ops == &vdev_root_ops);
2102
2103 for (uint64_t i = 0; i < children; i++) {
2104 vdev_copy_path_search(srvd->vdev_child[i],
2105 drvd->vdev_child[i]);
2106 }
2107 }
2108
2109 /*
2110 * Close a virtual device.
2111 */
2112 void
vdev_close(vdev_t * vd)2113 vdev_close(vdev_t *vd)
2114 {
2115 spa_t *spa = vd->vdev_spa;
2116 vdev_t *pvd = vd->vdev_parent;
2117
2118 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2119
2120 /*
2121 * If our parent is reopening, then we are as well, unless we are
2122 * going offline.
2123 */
2124 if (pvd != NULL && pvd->vdev_reopening)
2125 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
2126
2127 vd->vdev_ops->vdev_op_close(vd);
2128
2129 vdev_cache_purge(vd);
2130
2131 /*
2132 * We record the previous state before we close it, so that if we are
2133 * doing a reopen(), we don't generate FMA ereports if we notice that
2134 * it's still faulted.
2135 */
2136 vd->vdev_prevstate = vd->vdev_state;
2137
2138 if (vd->vdev_offline)
2139 vd->vdev_state = VDEV_STATE_OFFLINE;
2140 else
2141 vd->vdev_state = VDEV_STATE_CLOSED;
2142 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
2143 }
2144
2145 void
vdev_hold(vdev_t * vd)2146 vdev_hold(vdev_t *vd)
2147 {
2148 spa_t *spa = vd->vdev_spa;
2149
2150 ASSERT(spa_is_root(spa));
2151 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
2152 return;
2153
2154 for (int c = 0; c < vd->vdev_children; c++)
2155 vdev_hold(vd->vdev_child[c]);
2156
2157 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_hold != NULL)
2158 vd->vdev_ops->vdev_op_hold(vd);
2159 }
2160
2161 void
vdev_rele(vdev_t * vd)2162 vdev_rele(vdev_t *vd)
2163 {
2164 spa_t *spa = vd->vdev_spa;
2165
2166 ASSERT(spa_is_root(spa));
2167 for (int c = 0; c < vd->vdev_children; c++)
2168 vdev_rele(vd->vdev_child[c]);
2169
2170 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_rele != NULL)
2171 vd->vdev_ops->vdev_op_rele(vd);
2172 }
2173
2174 /*
2175 * Reopen all interior vdevs and any unopened leaves. We don't actually
2176 * reopen leaf vdevs which had previously been opened as they might deadlock
2177 * on the spa_config_lock. Instead we only obtain the leaf's physical size.
2178 * If the leaf has never been opened then open it, as usual.
2179 */
2180 void
vdev_reopen(vdev_t * vd)2181 vdev_reopen(vdev_t *vd)
2182 {
2183 spa_t *spa = vd->vdev_spa;
2184
2185 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2186
2187 /* set the reopening flag unless we're taking the vdev offline */
2188 vd->vdev_reopening = !vd->vdev_offline;
2189 vdev_close(vd);
2190 (void) vdev_open(vd);
2191
2192 /*
2193 * Call vdev_validate() here to make sure we have the same device.
2194 * Otherwise, a device with an invalid label could be successfully
2195 * opened in response to vdev_reopen().
2196 */
2197 if (vd->vdev_aux) {
2198 (void) vdev_validate_aux(vd);
2199 if (vdev_readable(vd) && vdev_writeable(vd) &&
2200 vd->vdev_aux == &spa->spa_l2cache) {
2201 /*
2202 * When reopening we can assume the device label has
2203 * already the attribute l2cache_persistent, since we've
2204 * opened the device in the past and updated the label.
2205 * In case the vdev is present we should evict all ARC
2206 * buffers and pointers to log blocks and reclaim their
2207 * space before restoring its contents to L2ARC.
2208 */
2209 if (l2arc_vdev_present(vd)) {
2210 l2arc_rebuild_vdev(vd, B_TRUE);
2211 } else {
2212 l2arc_add_vdev(spa, vd);
2213 }
2214 spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
2215 }
2216 } else {
2217 (void) vdev_validate(vd);
2218 }
2219
2220 /*
2221 * Reassess parent vdev's health.
2222 */
2223 vdev_propagate_state(vd);
2224 }
2225
2226 int
vdev_create(vdev_t * vd,uint64_t txg,boolean_t isreplacing)2227 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
2228 {
2229 int error;
2230
2231 /*
2232 * Normally, partial opens (e.g. of a mirror) are allowed.
2233 * For a create, however, we want to fail the request if
2234 * there are any components we can't open.
2235 */
2236 error = vdev_open(vd);
2237
2238 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
2239 vdev_close(vd);
2240 return (error ? error : ENXIO);
2241 }
2242
2243 /*
2244 * Recursively load DTLs and initialize all labels.
2245 */
2246 if ((error = vdev_dtl_load(vd)) != 0 ||
2247 (error = vdev_label_init(vd, txg, isreplacing ?
2248 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
2249 vdev_close(vd);
2250 return (error);
2251 }
2252
2253 return (0);
2254 }
2255
2256 void
vdev_metaslab_set_size(vdev_t * vd)2257 vdev_metaslab_set_size(vdev_t *vd)
2258 {
2259 uint64_t asize = vd->vdev_asize;
2260 uint64_t ms_count = asize >> zfs_vdev_default_ms_shift;
2261 uint64_t ms_shift;
2262
2263 /* BEGIN CSTYLED */
2264 /*
2265 * There are two dimensions to the metaslab sizing calculation:
2266 * the size of the metaslab and the count of metaslabs per vdev.
2267 *
2268 * The default values used below are a good balance between memory
2269 * usage (larger metaslab size means more memory needed for loaded
2270 * metaslabs; more metaslabs means more memory needed for the
2271 * metaslab_t structs), metaslab load time (larger metaslabs take
2272 * longer to load), and metaslab sync time (more metaslabs means
2273 * more time spent syncing all of them).
2274 *
2275 * In general, we aim for zfs_vdev_default_ms_count (200) metaslabs.
2276 * The range of the dimensions are as follows:
2277 *
2278 * 2^29 <= ms_size <= 2^34
2279 * 16 <= ms_count <= 131,072
2280 *
2281 * On the lower end of vdev sizes, we aim for metaslabs sizes of
2282 * at least 512MB (2^29) to minimize fragmentation effects when
2283 * testing with smaller devices. However, the count constraint
2284 * of at least 16 metaslabs will override this minimum size goal.
2285 *
2286 * On the upper end of vdev sizes, we aim for a maximum metaslab
2287 * size of 16GB. However, we will cap the total count to 2^17
2288 * metaslabs to keep our memory footprint in check and let the
2289 * metaslab size grow from there if that limit is hit.
2290 *
2291 * The net effect of applying above constrains is summarized below.
2292 *
2293 * vdev size metaslab count
2294 * --------------|-----------------
2295 * < 8GB ~16
2296 * 8GB - 100GB one per 512MB
2297 * 100GB - 3TB ~200
2298 * 3TB - 2PB one per 16GB
2299 * > 2PB ~131,072
2300 * --------------------------------
2301 *
2302 * Finally, note that all of the above calculate the initial
2303 * number of metaslabs. Expanding a top-level vdev will result
2304 * in additional metaslabs being allocated making it possible
2305 * to exceed the zfs_vdev_ms_count_limit.
2306 */
2307 /* END CSTYLED */
2308
2309 if (ms_count < zfs_vdev_min_ms_count)
2310 ms_shift = highbit64(asize / zfs_vdev_min_ms_count);
2311 else if (ms_count > zfs_vdev_default_ms_count)
2312 ms_shift = highbit64(asize / zfs_vdev_default_ms_count);
2313 else
2314 ms_shift = zfs_vdev_default_ms_shift;
2315
2316 if (ms_shift < SPA_MAXBLOCKSHIFT) {
2317 ms_shift = SPA_MAXBLOCKSHIFT;
2318 } else if (ms_shift > zfs_vdev_max_ms_shift) {
2319 ms_shift = zfs_vdev_max_ms_shift;
2320 /* cap the total count to constrain memory footprint */
2321 if ((asize >> ms_shift) > zfs_vdev_ms_count_limit)
2322 ms_shift = highbit64(asize / zfs_vdev_ms_count_limit);
2323 }
2324
2325 vd->vdev_ms_shift = ms_shift;
2326 ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT);
2327 }
2328
2329 void
vdev_dirty(vdev_t * vd,int flags,void * arg,uint64_t txg)2330 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
2331 {
2332 ASSERT(vd == vd->vdev_top);
2333 /* indirect vdevs don't have metaslabs or dtls */
2334 ASSERT(vdev_is_concrete(vd) || flags == 0);
2335 ASSERT(ISP2(flags));
2336 ASSERT(spa_writeable(vd->vdev_spa));
2337
2338 if (flags & VDD_METASLAB)
2339 (void) txg_list_add(&vd->vdev_ms_list, arg, txg);
2340
2341 if (flags & VDD_DTL)
2342 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
2343
2344 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
2345 }
2346
2347 void
vdev_dirty_leaves(vdev_t * vd,int flags,uint64_t txg)2348 vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
2349 {
2350 for (int c = 0; c < vd->vdev_children; c++)
2351 vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
2352
2353 if (vd->vdev_ops->vdev_op_leaf)
2354 vdev_dirty(vd->vdev_top, flags, vd, txg);
2355 }
2356
2357 /*
2358 * DTLs.
2359 *
2360 * A vdev's DTL (dirty time log) is the set of transaction groups for which
2361 * the vdev has less than perfect replication. There are four kinds of DTL:
2362 *
2363 * DTL_MISSING: txgs for which the vdev has no valid copies of the data
2364 *
2365 * DTL_PARTIAL: txgs for which data is available, but not fully replicated
2366 *
2367 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
2368 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
2369 * txgs that was scrubbed.
2370 *
2371 * DTL_OUTAGE: txgs which cannot currently be read, whether due to
2372 * persistent errors or just some device being offline.
2373 * Unlike the other three, the DTL_OUTAGE map is not generally
2374 * maintained; it's only computed when needed, typically to
2375 * determine whether a device can be detached.
2376 *
2377 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
2378 * either has the data or it doesn't.
2379 *
2380 * For interior vdevs such as mirror and RAID-Z the picture is more complex.
2381 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
2382 * if any child is less than fully replicated, then so is its parent.
2383 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
2384 * comprising only those txgs which appear in 'maxfaults' or more children;
2385 * those are the txgs we don't have enough replication to read. For example,
2386 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
2387 * thus, its DTL_MISSING consists of the set of txgs that appear in more than
2388 * two child DTL_MISSING maps.
2389 *
2390 * It should be clear from the above that to compute the DTLs and outage maps
2391 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
2392 * Therefore, that is all we keep on disk. When loading the pool, or after
2393 * a configuration change, we generate all other DTLs from first principles.
2394 */
2395 void
vdev_dtl_dirty(vdev_t * vd,vdev_dtl_type_t t,uint64_t txg,uint64_t size)2396 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
2397 {
2398 range_tree_t *rt = vd->vdev_dtl[t];
2399
2400 ASSERT(t < DTL_TYPES);
2401 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2402 ASSERT(spa_writeable(vd->vdev_spa));
2403
2404 mutex_enter(&vd->vdev_dtl_lock);
2405 if (!range_tree_contains(rt, txg, size))
2406 range_tree_add(rt, txg, size);
2407 mutex_exit(&vd->vdev_dtl_lock);
2408 }
2409
2410 boolean_t
vdev_dtl_contains(vdev_t * vd,vdev_dtl_type_t t,uint64_t txg,uint64_t size)2411 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
2412 {
2413 range_tree_t *rt = vd->vdev_dtl[t];
2414 boolean_t dirty = B_FALSE;
2415
2416 ASSERT(t < DTL_TYPES);
2417 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2418
2419 /*
2420 * While we are loading the pool, the DTLs have not been loaded yet.
2421 * Ignore the DTLs and try all devices. This avoids a recursive
2422 * mutex enter on the vdev_dtl_lock, and also makes us try hard
2423 * when loading the pool (relying on the checksum to ensure that
2424 * we get the right data -- note that we while loading, we are
2425 * only reading the MOS, which is always checksummed).
2426 */
2427 if (vd->vdev_spa->spa_load_state != SPA_LOAD_NONE)
2428 return (B_FALSE);
2429
2430 mutex_enter(&vd->vdev_dtl_lock);
2431 if (!range_tree_is_empty(rt))
2432 dirty = range_tree_contains(rt, txg, size);
2433 mutex_exit(&vd->vdev_dtl_lock);
2434
2435 return (dirty);
2436 }
2437
2438 boolean_t
vdev_dtl_empty(vdev_t * vd,vdev_dtl_type_t t)2439 vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
2440 {
2441 range_tree_t *rt = vd->vdev_dtl[t];
2442 boolean_t empty;
2443
2444 mutex_enter(&vd->vdev_dtl_lock);
2445 empty = range_tree_is_empty(rt);
2446 mutex_exit(&vd->vdev_dtl_lock);
2447
2448 return (empty);
2449 }
2450
2451 /*
2452 * Returns B_TRUE if vdev determines offset needs to be resilvered.
2453 */
2454 boolean_t
vdev_dtl_need_resilver(vdev_t * vd,uint64_t offset,size_t psize)2455 vdev_dtl_need_resilver(vdev_t *vd, uint64_t offset, size_t psize)
2456 {
2457 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
2458
2459 if (vd->vdev_ops->vdev_op_need_resilver == NULL ||
2460 vd->vdev_ops->vdev_op_leaf)
2461 return (B_TRUE);
2462
2463 return (vd->vdev_ops->vdev_op_need_resilver(vd, offset, psize));
2464 }
2465
2466 /*
2467 * Returns the lowest txg in the DTL range.
2468 */
2469 static uint64_t
vdev_dtl_min(vdev_t * vd)2470 vdev_dtl_min(vdev_t *vd)
2471 {
2472 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
2473 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
2474 ASSERT0(vd->vdev_children);
2475
2476 return (range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1);
2477 }
2478
2479 /*
2480 * Returns the highest txg in the DTL.
2481 */
2482 static uint64_t
vdev_dtl_max(vdev_t * vd)2483 vdev_dtl_max(vdev_t *vd)
2484 {
2485 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
2486 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
2487 ASSERT0(vd->vdev_children);
2488
2489 return (range_tree_max(vd->vdev_dtl[DTL_MISSING]));
2490 }
2491
2492 /*
2493 * Determine if a resilvering vdev should remove any DTL entries from
2494 * its range. If the vdev was resilvering for the entire duration of the
2495 * scan then it should excise that range from its DTLs. Otherwise, this
2496 * vdev is considered partially resilvered and should leave its DTL
2497 * entries intact. The comment in vdev_dtl_reassess() describes how we
2498 * excise the DTLs.
2499 */
2500 static boolean_t
vdev_dtl_should_excise(vdev_t * vd)2501 vdev_dtl_should_excise(vdev_t *vd)
2502 {
2503 spa_t *spa = vd->vdev_spa;
2504 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
2505
2506 ASSERT0(vd->vdev_children);
2507
2508 if (vd->vdev_state < VDEV_STATE_DEGRADED)
2509 return (B_FALSE);
2510
2511 if (vd->vdev_resilver_deferred)
2512 return (B_FALSE);
2513
2514 if (vd->vdev_resilver_txg == 0 ||
2515 range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
2516 return (B_TRUE);
2517
2518 /*
2519 * When a resilver is initiated the scan will assign the scn_max_txg
2520 * value to the highest txg value that exists in all DTLs. If this
2521 * device's max DTL is not part of this scan (i.e. it is not in
2522 * the range (scn_min_txg, scn_max_txg] then it is not eligible
2523 * for excision.
2524 */
2525 if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
2526 ASSERT3U(scn->scn_phys.scn_min_txg, <=, vdev_dtl_min(vd));
2527 ASSERT3U(scn->scn_phys.scn_min_txg, <, vd->vdev_resilver_txg);
2528 ASSERT3U(vd->vdev_resilver_txg, <=, scn->scn_phys.scn_max_txg);
2529 return (B_TRUE);
2530 }
2531 return (B_FALSE);
2532 }
2533
2534 /*
2535 * Reassess DTLs after a config change or scrub completion.
2536 */
2537 void
vdev_dtl_reassess(vdev_t * vd,uint64_t txg,uint64_t scrub_txg,int scrub_done)2538 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
2539 {
2540 spa_t *spa = vd->vdev_spa;
2541 avl_tree_t reftree;
2542 int minref;
2543
2544 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
2545
2546 for (int c = 0; c < vd->vdev_children; c++)
2547 vdev_dtl_reassess(vd->vdev_child[c], txg,
2548 scrub_txg, scrub_done);
2549
2550 if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux)
2551 return;
2552
2553 if (vd->vdev_ops->vdev_op_leaf) {
2554 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
2555 boolean_t wasempty = B_TRUE;
2556
2557 mutex_enter(&vd->vdev_dtl_lock);
2558
2559 /*
2560 * If requested, pretend the scan completed cleanly.
2561 */
2562 if (zfs_scan_ignore_errors && scn)
2563 scn->scn_phys.scn_errors = 0;
2564
2565 if (scrub_txg != 0 &&
2566 !range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
2567 wasempty = B_FALSE;
2568 zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d "
2569 "dtl:%llu/%llu errors:%llu",
2570 (u_longlong_t)vd->vdev_guid, (u_longlong_t)txg,
2571 (u_longlong_t)scrub_txg, spa->spa_scrub_started,
2572 (u_longlong_t)vdev_dtl_min(vd),
2573 (u_longlong_t)vdev_dtl_max(vd),
2574 (u_longlong_t)(scn ? scn->scn_phys.scn_errors : 0));
2575 }
2576
2577 /*
2578 * If we've completed a scan cleanly then determine
2579 * if this vdev should remove any DTLs. We only want to
2580 * excise regions on vdevs that were available during
2581 * the entire duration of this scan.
2582 */
2583 if (scrub_txg != 0 &&
2584 (spa->spa_scrub_started ||
2585 (scn != NULL && scn->scn_phys.scn_errors == 0)) &&
2586 vdev_dtl_should_excise(vd)) {
2587 /*
2588 * We completed a scrub up to scrub_txg. If we
2589 * did it without rebooting, then the scrub dtl
2590 * will be valid, so excise the old region and
2591 * fold in the scrub dtl. Otherwise, leave the
2592 * dtl as-is if there was an error.
2593 *
2594 * There's little trick here: to excise the beginning
2595 * of the DTL_MISSING map, we put it into a reference
2596 * tree and then add a segment with refcnt -1 that
2597 * covers the range [0, scrub_txg). This means
2598 * that each txg in that range has refcnt -1 or 0.
2599 * We then add DTL_SCRUB with a refcnt of 2, so that
2600 * entries in the range [0, scrub_txg) will have a
2601 * positive refcnt -- either 1 or 2. We then convert
2602 * the reference tree into the new DTL_MISSING map.
2603 */
2604 space_reftree_create(&reftree);
2605 space_reftree_add_map(&reftree,
2606 vd->vdev_dtl[DTL_MISSING], 1);
2607 space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
2608 space_reftree_add_map(&reftree,
2609 vd->vdev_dtl[DTL_SCRUB], 2);
2610 space_reftree_generate_map(&reftree,
2611 vd->vdev_dtl[DTL_MISSING], 1);
2612 space_reftree_destroy(&reftree);
2613
2614 if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
2615 zfs_dbgmsg("update DTL_MISSING:%llu/%llu",
2616 (u_longlong_t)vdev_dtl_min(vd),
2617 (u_longlong_t)vdev_dtl_max(vd));
2618 } else if (!wasempty) {
2619 zfs_dbgmsg("DTL_MISSING is now empty");
2620 }
2621 }
2622 range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
2623 range_tree_walk(vd->vdev_dtl[DTL_MISSING],
2624 range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
2625 if (scrub_done)
2626 range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
2627 range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
2628 if (!vdev_readable(vd))
2629 range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
2630 else
2631 range_tree_walk(vd->vdev_dtl[DTL_MISSING],
2632 range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
2633
2634 /*
2635 * If the vdev was resilvering and no longer has any
2636 * DTLs then reset its resilvering flag.
2637 */
2638 if (vd->vdev_resilver_txg != 0 &&
2639 range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
2640 range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE]))
2641 vd->vdev_resilver_txg = 0;
2642
2643 mutex_exit(&vd->vdev_dtl_lock);
2644
2645 if (txg != 0)
2646 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
2647 return;
2648 }
2649
2650 mutex_enter(&vd->vdev_dtl_lock);
2651 for (int t = 0; t < DTL_TYPES; t++) {
2652 /* account for child's outage in parent's missing map */
2653 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
2654 if (t == DTL_SCRUB)
2655 continue; /* leaf vdevs only */
2656 if (t == DTL_PARTIAL)
2657 minref = 1; /* i.e. non-zero */
2658 else if (vd->vdev_nparity != 0)
2659 minref = vd->vdev_nparity + 1; /* RAID-Z */
2660 else
2661 minref = vd->vdev_children; /* any kind of mirror */
2662 space_reftree_create(&reftree);
2663 for (int c = 0; c < vd->vdev_children; c++) {
2664 vdev_t *cvd = vd->vdev_child[c];
2665 mutex_enter(&cvd->vdev_dtl_lock);
2666 space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
2667 mutex_exit(&cvd->vdev_dtl_lock);
2668 }
2669 space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref);
2670 space_reftree_destroy(&reftree);
2671 }
2672 mutex_exit(&vd->vdev_dtl_lock);
2673 }
2674
2675 int
vdev_dtl_load(vdev_t * vd)2676 vdev_dtl_load(vdev_t *vd)
2677 {
2678 spa_t *spa = vd->vdev_spa;
2679 objset_t *mos = spa->spa_meta_objset;
2680 int error = 0;
2681
2682 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
2683 ASSERT(vdev_is_concrete(vd));
2684
2685 error = space_map_open(&vd->vdev_dtl_sm, mos,
2686 vd->vdev_dtl_object, 0, -1ULL, 0);
2687 if (error)
2688 return (error);
2689 ASSERT(vd->vdev_dtl_sm != NULL);
2690
2691 mutex_enter(&vd->vdev_dtl_lock);
2692 error = space_map_load(vd->vdev_dtl_sm,
2693 vd->vdev_dtl[DTL_MISSING], SM_ALLOC);
2694 mutex_exit(&vd->vdev_dtl_lock);
2695
2696 return (error);
2697 }
2698
2699 for (int c = 0; c < vd->vdev_children; c++) {
2700 error = vdev_dtl_load(vd->vdev_child[c]);
2701 if (error != 0)
2702 break;
2703 }
2704
2705 return (error);
2706 }
2707
2708 static void
vdev_zap_allocation_data(vdev_t * vd,dmu_tx_t * tx)2709 vdev_zap_allocation_data(vdev_t *vd, dmu_tx_t *tx)
2710 {
2711 spa_t *spa = vd->vdev_spa;
2712 objset_t *mos = spa->spa_meta_objset;
2713 vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
2714 const char *string;
2715
2716 ASSERT(alloc_bias != VDEV_BIAS_NONE);
2717
2718 string =
2719 (alloc_bias == VDEV_BIAS_LOG) ? VDEV_ALLOC_BIAS_LOG :
2720 (alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL :
2721 (alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP : NULL;
2722
2723 ASSERT(string != NULL);
2724 VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS,
2725 1, strlen(string) + 1, string, tx));
2726
2727 if (alloc_bias == VDEV_BIAS_SPECIAL || alloc_bias == VDEV_BIAS_DEDUP) {
2728 spa_activate_allocation_classes(spa, tx);
2729 }
2730 }
2731
2732 void
vdev_destroy_unlink_zap(vdev_t * vd,uint64_t zapobj,dmu_tx_t * tx)2733 vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
2734 {
2735 spa_t *spa = vd->vdev_spa;
2736
2737 VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
2738 VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
2739 zapobj, tx));
2740 }
2741
2742 uint64_t
vdev_create_link_zap(vdev_t * vd,dmu_tx_t * tx)2743 vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
2744 {
2745 spa_t *spa = vd->vdev_spa;
2746 uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
2747 DMU_OT_NONE, 0, tx);
2748
2749 ASSERT(zap != 0);
2750 VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
2751 zap, tx));
2752
2753 return (zap);
2754 }
2755
2756 void
vdev_construct_zaps(vdev_t * vd,dmu_tx_t * tx)2757 vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
2758 {
2759 if (vd->vdev_ops != &vdev_hole_ops &&
2760 vd->vdev_ops != &vdev_missing_ops &&
2761 vd->vdev_ops != &vdev_root_ops &&
2762 !vd->vdev_top->vdev_removing) {
2763 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
2764 vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
2765 }
2766 if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
2767 vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
2768 if (vd->vdev_alloc_bias != VDEV_BIAS_NONE)
2769 vdev_zap_allocation_data(vd, tx);
2770 }
2771 }
2772
2773 for (uint64_t i = 0; i < vd->vdev_children; i++) {
2774 vdev_construct_zaps(vd->vdev_child[i], tx);
2775 }
2776 }
2777
2778 void
vdev_dtl_sync(vdev_t * vd,uint64_t txg)2779 vdev_dtl_sync(vdev_t *vd, uint64_t txg)
2780 {
2781 spa_t *spa = vd->vdev_spa;
2782 range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
2783 objset_t *mos = spa->spa_meta_objset;
2784 range_tree_t *rtsync;
2785 dmu_tx_t *tx;
2786 uint64_t object = space_map_object(vd->vdev_dtl_sm);
2787
2788 ASSERT(vdev_is_concrete(vd));
2789 ASSERT(vd->vdev_ops->vdev_op_leaf);
2790
2791 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
2792
2793 if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
2794 mutex_enter(&vd->vdev_dtl_lock);
2795 space_map_free(vd->vdev_dtl_sm, tx);
2796 space_map_close(vd->vdev_dtl_sm);
2797 vd->vdev_dtl_sm = NULL;
2798 mutex_exit(&vd->vdev_dtl_lock);
2799
2800 /*
2801 * We only destroy the leaf ZAP for detached leaves or for
2802 * removed log devices. Removed data devices handle leaf ZAP
2803 * cleanup later, once cancellation is no longer possible.
2804 */
2805 if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
2806 vd->vdev_top->vdev_islog)) {
2807 vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
2808 vd->vdev_leaf_zap = 0;
2809 }
2810
2811 dmu_tx_commit(tx);
2812 return;
2813 }
2814
2815 if (vd->vdev_dtl_sm == NULL) {
2816 uint64_t new_object;
2817
2818 new_object = space_map_alloc(mos, zfs_vdev_dtl_sm_blksz, tx);
2819 VERIFY3U(new_object, !=, 0);
2820
2821 VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
2822 0, -1ULL, 0));
2823 ASSERT(vd->vdev_dtl_sm != NULL);
2824 }
2825
2826 rtsync = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
2827
2828 mutex_enter(&vd->vdev_dtl_lock);
2829 range_tree_walk(rt, range_tree_add, rtsync);
2830 mutex_exit(&vd->vdev_dtl_lock);
2831
2832 space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx);
2833 space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx);
2834 range_tree_vacate(rtsync, NULL, NULL);
2835
2836 range_tree_destroy(rtsync);
2837
2838 /*
2839 * If the object for the space map has changed then dirty
2840 * the top level so that we update the config.
2841 */
2842 if (object != space_map_object(vd->vdev_dtl_sm)) {
2843 vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
2844 "new object %llu", (u_longlong_t)txg, spa_name(spa),
2845 (u_longlong_t)object,
2846 (u_longlong_t)space_map_object(vd->vdev_dtl_sm));
2847 vdev_config_dirty(vd->vdev_top);
2848 }
2849
2850 dmu_tx_commit(tx);
2851 }
2852
2853 /*
2854 * Determine whether the specified vdev can be offlined/detached/removed
2855 * without losing data.
2856 */
2857 boolean_t
vdev_dtl_required(vdev_t * vd)2858 vdev_dtl_required(vdev_t *vd)
2859 {
2860 spa_t *spa = vd->vdev_spa;
2861 vdev_t *tvd = vd->vdev_top;
2862 uint8_t cant_read = vd->vdev_cant_read;
2863 boolean_t required;
2864
2865 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2866
2867 if (vd == spa->spa_root_vdev || vd == tvd)
2868 return (B_TRUE);
2869
2870 /*
2871 * Temporarily mark the device as unreadable, and then determine
2872 * whether this results in any DTL outages in the top-level vdev.
2873 * If not, we can safely offline/detach/remove the device.
2874 */
2875 vd->vdev_cant_read = B_TRUE;
2876 vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
2877 required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
2878 vd->vdev_cant_read = cant_read;
2879 vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
2880
2881 if (!required && zio_injection_enabled)
2882 required = !!zio_handle_device_injection(vd, NULL, ECHILD);
2883
2884 return (required);
2885 }
2886
2887 /*
2888 * Determine if resilver is needed, and if so the txg range.
2889 */
2890 boolean_t
vdev_resilver_needed(vdev_t * vd,uint64_t * minp,uint64_t * maxp)2891 vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
2892 {
2893 boolean_t needed = B_FALSE;
2894 uint64_t thismin = UINT64_MAX;
2895 uint64_t thismax = 0;
2896
2897 if (vd->vdev_children == 0) {
2898 mutex_enter(&vd->vdev_dtl_lock);
2899 if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
2900 vdev_writeable(vd)) {
2901
2902 thismin = vdev_dtl_min(vd);
2903 thismax = vdev_dtl_max(vd);
2904 needed = B_TRUE;
2905 }
2906 mutex_exit(&vd->vdev_dtl_lock);
2907 } else {
2908 for (int c = 0; c < vd->vdev_children; c++) {
2909 vdev_t *cvd = vd->vdev_child[c];
2910 uint64_t cmin, cmax;
2911
2912 if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
2913 thismin = MIN(thismin, cmin);
2914 thismax = MAX(thismax, cmax);
2915 needed = B_TRUE;
2916 }
2917 }
2918 }
2919
2920 if (needed && minp) {
2921 *minp = thismin;
2922 *maxp = thismax;
2923 }
2924 return (needed);
2925 }
2926
2927 /*
2928 * Gets the checkpoint space map object from the vdev's ZAP.
2929 * Returns the spacemap object, or 0 if it wasn't in the ZAP
2930 * or the ZAP doesn't exist yet.
2931 */
2932 int
vdev_checkpoint_sm_object(vdev_t * vd)2933 vdev_checkpoint_sm_object(vdev_t *vd)
2934 {
2935 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
2936 if (vd->vdev_top_zap == 0) {
2937 return (0);
2938 }
2939
2940 uint64_t sm_obj = 0;
2941 int err = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap,
2942 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, &sm_obj);
2943
2944 ASSERT(err == 0 || err == ENOENT);
2945
2946 return (sm_obj);
2947 }
2948
2949 int
vdev_load(vdev_t * vd)2950 vdev_load(vdev_t *vd)
2951 {
2952 int error = 0;
2953 /*
2954 * Recursively load all children.
2955 */
2956 for (int c = 0; c < vd->vdev_children; c++) {
2957 error = vdev_load(vd->vdev_child[c]);
2958 if (error != 0) {
2959 return (error);
2960 }
2961 }
2962
2963 vdev_set_deflate_ratio(vd);
2964
2965 /*
2966 * On spa_load path, grab the allocation bias from our zap
2967 */
2968 if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
2969 spa_t *spa = vd->vdev_spa;
2970 char bias_str[64];
2971
2972 if (zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
2973 VDEV_TOP_ZAP_ALLOCATION_BIAS, 1, sizeof (bias_str),
2974 bias_str) == 0) {
2975 ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE);
2976 vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str);
2977 }
2978 }
2979
2980 /*
2981 * If this is a top-level vdev, initialize its metaslabs.
2982 */
2983 if (vd == vd->vdev_top && vdev_is_concrete(vd)) {
2984 vdev_metaslab_group_create(vd);
2985
2986 if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
2987 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2988 VDEV_AUX_CORRUPT_DATA);
2989 vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, "
2990 "asize=%llu", (u_longlong_t)vd->vdev_ashift,
2991 (u_longlong_t)vd->vdev_asize);
2992 return (SET_ERROR(ENXIO));
2993 }
2994
2995 error = vdev_metaslab_init(vd, 0);
2996 if (error != 0) {
2997 vdev_dbgmsg(vd, "vdev_load: metaslab_init failed "
2998 "[error=%d]", error);
2999 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3000 VDEV_AUX_CORRUPT_DATA);
3001 return (error);
3002 }
3003
3004 uint64_t checkpoint_sm_obj = vdev_checkpoint_sm_object(vd);
3005 if (checkpoint_sm_obj != 0) {
3006 objset_t *mos = spa_meta_objset(vd->vdev_spa);
3007 ASSERT(vd->vdev_asize != 0);
3008 ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
3009
3010 error = space_map_open(&vd->vdev_checkpoint_sm,
3011 mos, checkpoint_sm_obj, 0, vd->vdev_asize,
3012 vd->vdev_ashift);
3013 if (error != 0) {
3014 vdev_dbgmsg(vd, "vdev_load: space_map_open "
3015 "failed for checkpoint spacemap (obj %llu) "
3016 "[error=%d]",
3017 (u_longlong_t)checkpoint_sm_obj, error);
3018 return (error);
3019 }
3020 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
3021
3022 /*
3023 * Since the checkpoint_sm contains free entries
3024 * exclusively we can use space_map_allocated() to
3025 * indicate the cumulative checkpointed space that
3026 * has been freed.
3027 */
3028 vd->vdev_stat.vs_checkpoint_space =
3029 -space_map_allocated(vd->vdev_checkpoint_sm);
3030 vd->vdev_spa->spa_checkpoint_info.sci_dspace +=
3031 vd->vdev_stat.vs_checkpoint_space;
3032 }
3033 }
3034
3035 /*
3036 * If this is a leaf vdev, load its DTL.
3037 */
3038 if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
3039 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3040 VDEV_AUX_CORRUPT_DATA);
3041 vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed "
3042 "[error=%d]", error);
3043 return (error);
3044 }
3045
3046 uint64_t obsolete_sm_object = vdev_obsolete_sm_object(vd);
3047 if (obsolete_sm_object != 0) {
3048 objset_t *mos = vd->vdev_spa->spa_meta_objset;
3049 ASSERT(vd->vdev_asize != 0);
3050 ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
3051
3052 if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
3053 obsolete_sm_object, 0, vd->vdev_asize, 0))) {
3054 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
3055 VDEV_AUX_CORRUPT_DATA);
3056 vdev_dbgmsg(vd, "vdev_load: space_map_open failed for "
3057 "obsolete spacemap (obj %llu) [error=%d]",
3058 (u_longlong_t)obsolete_sm_object, error);
3059 return (error);
3060 }
3061 }
3062
3063 return (0);
3064 }
3065
3066 /*
3067 * The special vdev case is used for hot spares and l2cache devices. Its
3068 * sole purpose it to set the vdev state for the associated vdev. To do this,
3069 * we make sure that we can open the underlying device, then try to read the
3070 * label, and make sure that the label is sane and that it hasn't been
3071 * repurposed to another pool.
3072 */
3073 int
vdev_validate_aux(vdev_t * vd)3074 vdev_validate_aux(vdev_t *vd)
3075 {
3076 nvlist_t *label;
3077 uint64_t guid, version;
3078 uint64_t state;
3079
3080 if (!vdev_readable(vd))
3081 return (0);
3082
3083 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
3084 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
3085 VDEV_AUX_CORRUPT_DATA);
3086 return (-1);
3087 }
3088
3089 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
3090 !SPA_VERSION_IS_SUPPORTED(version) ||
3091 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
3092 guid != vd->vdev_guid ||
3093 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
3094 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
3095 VDEV_AUX_CORRUPT_DATA);
3096 nvlist_free(label);
3097 return (-1);
3098 }
3099
3100 /*
3101 * We don't actually check the pool state here. If it's in fact in
3102 * use by another pool, we update this fact on the fly when requested.
3103 */
3104 nvlist_free(label);
3105 return (0);
3106 }
3107
3108 static void
vdev_destroy_ms_flush_data(vdev_t * vd,dmu_tx_t * tx)3109 vdev_destroy_ms_flush_data(vdev_t *vd, dmu_tx_t *tx)
3110 {
3111 objset_t *mos = spa_meta_objset(vd->vdev_spa);
3112
3113 if (vd->vdev_top_zap == 0)
3114 return;
3115
3116 uint64_t object = 0;
3117 int err = zap_lookup(mos, vd->vdev_top_zap,
3118 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, &object);
3119 if (err == ENOENT)
3120 return;
3121
3122 VERIFY0(dmu_object_free(mos, object, tx));
3123 VERIFY0(zap_remove(mos, vd->vdev_top_zap,
3124 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, tx));
3125 }
3126
3127 /*
3128 * Free the objects used to store this vdev's spacemaps, and the array
3129 * that points to them.
3130 */
3131 void
vdev_destroy_spacemaps(vdev_t * vd,dmu_tx_t * tx)3132 vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx)
3133 {
3134 if (vd->vdev_ms_array == 0)
3135 return;
3136
3137 objset_t *mos = vd->vdev_spa->spa_meta_objset;
3138 uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift;
3139 size_t array_bytes = array_count * sizeof (uint64_t);
3140 uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP);
3141 VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0,
3142 array_bytes, smobj_array, 0));
3143
3144 for (uint64_t i = 0; i < array_count; i++) {
3145 uint64_t smobj = smobj_array[i];
3146 if (smobj == 0)
3147 continue;
3148
3149 space_map_free_obj(mos, smobj, tx);
3150 }
3151
3152 kmem_free(smobj_array, array_bytes);
3153 VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx));
3154 vdev_destroy_ms_flush_data(vd, tx);
3155 vd->vdev_ms_array = 0;
3156 }
3157
3158 static void
vdev_remove_empty_log(vdev_t * vd,uint64_t txg)3159 vdev_remove_empty_log(vdev_t *vd, uint64_t txg)
3160 {
3161 spa_t *spa = vd->vdev_spa;
3162
3163 ASSERT(vd->vdev_islog);
3164 ASSERT(vd == vd->vdev_top);
3165 ASSERT3U(txg, ==, spa_syncing_txg(spa));
3166
3167 dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
3168
3169 vdev_destroy_spacemaps(vd, tx);
3170 if (vd->vdev_top_zap != 0) {
3171 vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
3172 vd->vdev_top_zap = 0;
3173 }
3174
3175 dmu_tx_commit(tx);
3176 }
3177
3178 void
vdev_sync_done(vdev_t * vd,uint64_t txg)3179 vdev_sync_done(vdev_t *vd, uint64_t txg)
3180 {
3181 metaslab_t *msp;
3182 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
3183
3184 ASSERT(vdev_is_concrete(vd));
3185
3186 while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
3187 != NULL)
3188 metaslab_sync_done(msp, txg);
3189
3190 if (reassess)
3191 metaslab_sync_reassess(vd->vdev_mg);
3192 }
3193
3194 void
vdev_sync(vdev_t * vd,uint64_t txg)3195 vdev_sync(vdev_t *vd, uint64_t txg)
3196 {
3197 spa_t *spa = vd->vdev_spa;
3198 vdev_t *lvd;
3199 metaslab_t *msp;
3200
3201 ASSERT3U(txg, ==, spa->spa_syncing_txg);
3202 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3203 if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
3204 ASSERT(vd->vdev_removing ||
3205 vd->vdev_ops == &vdev_indirect_ops);
3206
3207 vdev_indirect_sync_obsolete(vd, tx);
3208
3209 /*
3210 * If the vdev is indirect, it can't have dirty
3211 * metaslabs or DTLs.
3212 */
3213 if (vd->vdev_ops == &vdev_indirect_ops) {
3214 ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
3215 ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
3216 dmu_tx_commit(tx);
3217 return;
3218 }
3219 }
3220
3221 ASSERT(vdev_is_concrete(vd));
3222
3223 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 &&
3224 !vd->vdev_removing) {
3225 ASSERT(vd == vd->vdev_top);
3226 ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
3227 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
3228 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
3229 ASSERT(vd->vdev_ms_array != 0);
3230 vdev_config_dirty(vd);
3231 }
3232
3233 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
3234 metaslab_sync(msp, txg);
3235 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
3236 }
3237
3238 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
3239 vdev_dtl_sync(lvd, txg);
3240
3241 /*
3242 * If this is an empty log device being removed, destroy the
3243 * metadata associated with it.
3244 */
3245 if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
3246 vdev_remove_empty_log(vd, txg);
3247
3248 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
3249 dmu_tx_commit(tx);
3250 }
3251
3252 uint64_t
vdev_psize_to_asize(vdev_t * vd,uint64_t psize)3253 vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
3254 {
3255 return (vd->vdev_ops->vdev_op_asize(vd, psize));
3256 }
3257
3258 /*
3259 * Mark the given vdev faulted. A faulted vdev behaves as if the device could
3260 * not be opened, and no I/O is attempted.
3261 */
3262 int
vdev_fault(spa_t * spa,uint64_t guid,vdev_aux_t aux)3263 vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
3264 {
3265 vdev_t *vd, *tvd;
3266
3267 spa_vdev_state_enter(spa, SCL_NONE);
3268
3269 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
3270 return (spa_vdev_state_exit(spa, NULL, ENODEV));
3271
3272 if (!vd->vdev_ops->vdev_op_leaf)
3273 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
3274
3275 tvd = vd->vdev_top;
3276
3277 /*
3278 * We don't directly use the aux state here, but if we do a
3279 * vdev_reopen(), we need this value to be present to remember why we
3280 * were faulted.
3281 */
3282 vd->vdev_label_aux = aux;
3283
3284 /*
3285 * Faulted state takes precedence over degraded.
3286 */
3287 vd->vdev_delayed_close = B_FALSE;
3288 vd->vdev_faulted = 1ULL;
3289 vd->vdev_degraded = 0ULL;
3290 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
3291
3292 /*
3293 * If this device has the only valid copy of the data, then
3294 * back off and simply mark the vdev as degraded instead.
3295 */
3296 if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
3297 vd->vdev_degraded = 1ULL;
3298 vd->vdev_faulted = 0ULL;
3299
3300 /*
3301 * If we reopen the device and it's not dead, only then do we
3302 * mark it degraded.
3303 */
3304 vdev_reopen(tvd);
3305
3306 if (vdev_readable(vd))
3307 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
3308 }
3309
3310 return (spa_vdev_state_exit(spa, vd, 0));
3311 }
3312
3313 /*
3314 * Mark the given vdev degraded. A degraded vdev is purely an indication to the
3315 * user that something is wrong. The vdev continues to operate as normal as far
3316 * as I/O is concerned.
3317 */
3318 int
vdev_degrade(spa_t * spa,uint64_t guid,vdev_aux_t aux)3319 vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
3320 {
3321 vdev_t *vd;
3322
3323 spa_vdev_state_enter(spa, SCL_NONE);
3324
3325 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
3326 return (spa_vdev_state_exit(spa, NULL, ENODEV));
3327
3328 if (!vd->vdev_ops->vdev_op_leaf)
3329 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
3330
3331 /*
3332 * If the vdev is already faulted, then don't do anything.
3333 */
3334 if (vd->vdev_faulted || vd->vdev_degraded)
3335 return (spa_vdev_state_exit(spa, NULL, 0));
3336
3337 vd->vdev_degraded = 1ULL;
3338 if (!vdev_is_dead(vd))
3339 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
3340 aux);
3341
3342 return (spa_vdev_state_exit(spa, vd, 0));
3343 }
3344
3345 /*
3346 * Online the given vdev.
3347 *
3348 * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
3349 * spare device should be detached when the device finishes resilvering.
3350 * Second, the online should be treated like a 'test' online case, so no FMA
3351 * events are generated if the device fails to open.
3352 */
3353 int
vdev_online(spa_t * spa,uint64_t guid,uint64_t flags,vdev_state_t * newstate)3354 vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
3355 {
3356 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
3357 boolean_t wasoffline;
3358 vdev_state_t oldstate;
3359
3360 spa_vdev_state_enter(spa, SCL_NONE);
3361
3362 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
3363 return (spa_vdev_state_exit(spa, NULL, ENODEV));
3364
3365 if (!vd->vdev_ops->vdev_op_leaf)
3366 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
3367
3368 wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
3369 oldstate = vd->vdev_state;
3370
3371 tvd = vd->vdev_top;
3372 vd->vdev_offline = B_FALSE;
3373 vd->vdev_tmpoffline = B_FALSE;
3374 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
3375 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
3376
3377 /* XXX - L2ARC 1.0 does not support expansion */
3378 if (!vd->vdev_aux) {
3379 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
3380 pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND);
3381 }
3382
3383 vdev_reopen(tvd);
3384 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
3385
3386 if (!vd->vdev_aux) {
3387 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
3388 pvd->vdev_expanding = B_FALSE;
3389 }
3390
3391 if (newstate)
3392 *newstate = vd->vdev_state;
3393 if ((flags & ZFS_ONLINE_UNSPARE) &&
3394 !vdev_is_dead(vd) && vd->vdev_parent &&
3395 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
3396 vd->vdev_parent->vdev_child[0] == vd)
3397 vd->vdev_unspare = B_TRUE;
3398
3399 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
3400
3401 /* XXX - L2ARC 1.0 does not support expansion */
3402 if (vd->vdev_aux)
3403 return (spa_vdev_state_exit(spa, vd, ENOTSUP));
3404 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
3405 }
3406
3407 /* Restart initializing if necessary */
3408 mutex_enter(&vd->vdev_initialize_lock);
3409 if (vdev_writeable(vd) &&
3410 vd->vdev_initialize_thread == NULL &&
3411 vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) {
3412 (void) vdev_initialize(vd);
3413 }
3414 mutex_exit(&vd->vdev_initialize_lock);
3415
3416 /* Restart trimming if necessary */
3417 mutex_enter(&vd->vdev_trim_lock);
3418 if (vdev_writeable(vd) &&
3419 vd->vdev_trim_thread == NULL &&
3420 vd->vdev_trim_state == VDEV_TRIM_ACTIVE) {
3421 (void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial,
3422 vd->vdev_trim_secure);
3423 }
3424 mutex_exit(&vd->vdev_trim_lock);
3425
3426 if (wasoffline ||
3427 (oldstate < VDEV_STATE_DEGRADED &&
3428 vd->vdev_state >= VDEV_STATE_DEGRADED))
3429 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
3430
3431 return (spa_vdev_state_exit(spa, vd, 0));
3432 }
3433
3434 static int
vdev_offline_locked(spa_t * spa,uint64_t guid,uint64_t flags)3435 vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
3436 {
3437 vdev_t *vd, *tvd;
3438 int error = 0;
3439 uint64_t generation;
3440 metaslab_group_t *mg;
3441
3442 top:
3443 spa_vdev_state_enter(spa, SCL_ALLOC);
3444
3445 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
3446 return (spa_vdev_state_exit(spa, NULL, ENODEV));
3447
3448 if (!vd->vdev_ops->vdev_op_leaf)
3449 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
3450
3451 tvd = vd->vdev_top;
3452 mg = tvd->vdev_mg;
3453 generation = spa->spa_config_generation + 1;
3454
3455 /*
3456 * If the device isn't already offline, try to offline it.
3457 */
3458 if (!vd->vdev_offline) {
3459 /*
3460 * If this device has the only valid copy of some data,
3461 * don't allow it to be offlined. Log devices are always
3462 * expendable.
3463 */
3464 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
3465 vdev_dtl_required(vd))
3466 return (spa_vdev_state_exit(spa, NULL, EBUSY));
3467
3468 /*
3469 * If the top-level is a slog and it has had allocations
3470 * then proceed. We check that the vdev's metaslab group
3471 * is not NULL since it's possible that we may have just
3472 * added this vdev but not yet initialized its metaslabs.
3473 */
3474 if (tvd->vdev_islog && mg != NULL) {
3475 /*
3476 * Prevent any future allocations.
3477 */
3478 metaslab_group_passivate(mg);
3479 (void) spa_vdev_state_exit(spa, vd, 0);
3480
3481 error = spa_reset_logs(spa);
3482
3483 /*
3484 * If the log device was successfully reset but has
3485 * checkpointed data, do not offline it.
3486 */
3487 if (error == 0 &&
3488 tvd->vdev_checkpoint_sm != NULL) {
3489 error = ZFS_ERR_CHECKPOINT_EXISTS;
3490 }
3491
3492 spa_vdev_state_enter(spa, SCL_ALLOC);
3493
3494 /*
3495 * Check to see if the config has changed.
3496 */
3497 if (error || generation != spa->spa_config_generation) {
3498 metaslab_group_activate(mg);
3499 if (error)
3500 return (spa_vdev_state_exit(spa,
3501 vd, error));
3502 (void) spa_vdev_state_exit(spa, vd, 0);
3503 goto top;
3504 }
3505 ASSERT0(tvd->vdev_stat.vs_alloc);
3506 }
3507
3508 /*
3509 * Offline this device and reopen its top-level vdev.
3510 * If the top-level vdev is a log device then just offline
3511 * it. Otherwise, if this action results in the top-level
3512 * vdev becoming unusable, undo it and fail the request.
3513 */
3514 vd->vdev_offline = B_TRUE;
3515 vdev_reopen(tvd);
3516
3517 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
3518 vdev_is_dead(tvd)) {
3519 vd->vdev_offline = B_FALSE;
3520 vdev_reopen(tvd);
3521 return (spa_vdev_state_exit(spa, NULL, EBUSY));
3522 }
3523
3524 /*
3525 * Add the device back into the metaslab rotor so that
3526 * once we online the device it's open for business.
3527 */
3528 if (tvd->vdev_islog && mg != NULL)
3529 metaslab_group_activate(mg);
3530 }
3531
3532 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
3533
3534 return (spa_vdev_state_exit(spa, vd, 0));
3535 }
3536
3537 int
vdev_offline(spa_t * spa,uint64_t guid,uint64_t flags)3538 vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
3539 {
3540 int error;
3541
3542 mutex_enter(&spa->spa_vdev_top_lock);
3543 error = vdev_offline_locked(spa, guid, flags);
3544 mutex_exit(&spa->spa_vdev_top_lock);
3545
3546 return (error);
3547 }
3548
3549 /*
3550 * Clear the error counts associated with this vdev. Unlike vdev_online() and
3551 * vdev_offline(), we assume the spa config is locked. We also clear all
3552 * children. If 'vd' is NULL, then the user wants to clear all vdevs.
3553 */
3554 void
vdev_clear(spa_t * spa,vdev_t * vd)3555 vdev_clear(spa_t *spa, vdev_t *vd)
3556 {
3557 vdev_t *rvd = spa->spa_root_vdev;
3558
3559 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
3560
3561 if (vd == NULL)
3562 vd = rvd;
3563
3564 vd->vdev_stat.vs_read_errors = 0;
3565 vd->vdev_stat.vs_write_errors = 0;
3566 vd->vdev_stat.vs_checksum_errors = 0;
3567 vd->vdev_stat.vs_slow_ios = 0;
3568
3569 for (int c = 0; c < vd->vdev_children; c++)
3570 vdev_clear(spa, vd->vdev_child[c]);
3571
3572 /*
3573 * It makes no sense to "clear" an indirect vdev.
3574 */
3575 if (!vdev_is_concrete(vd))
3576 return;
3577
3578 /*
3579 * If we're in the FAULTED state or have experienced failed I/O, then
3580 * clear the persistent state and attempt to reopen the device. We
3581 * also mark the vdev config dirty, so that the new faulted state is
3582 * written out to disk.
3583 */
3584 if (vd->vdev_faulted || vd->vdev_degraded ||
3585 !vdev_readable(vd) || !vdev_writeable(vd)) {
3586
3587 /*
3588 * When reopening in reponse to a clear event, it may be due to
3589 * a fmadm repair request. In this case, if the device is
3590 * still broken, we want to still post the ereport again.
3591 */
3592 vd->vdev_forcefault = B_TRUE;
3593
3594 vd->vdev_faulted = vd->vdev_degraded = 0ULL;
3595 vd->vdev_cant_read = B_FALSE;
3596 vd->vdev_cant_write = B_FALSE;
3597
3598 vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
3599
3600 vd->vdev_forcefault = B_FALSE;
3601
3602 if (vd != rvd && vdev_writeable(vd->vdev_top))
3603 vdev_state_dirty(vd->vdev_top);
3604
3605 /* If a resilver isn't required, check if vdevs can be culled */
3606 if (vd->vdev_aux == NULL && !vdev_is_dead(vd) &&
3607 !dsl_scan_resilvering(spa->spa_dsl_pool) &&
3608 !dsl_scan_resilver_scheduled(spa->spa_dsl_pool))
3609 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
3610
3611 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
3612 }
3613
3614 /*
3615 * When clearing a FMA-diagnosed fault, we always want to
3616 * unspare the device, as we assume that the original spare was
3617 * done in response to the FMA fault.
3618 */
3619 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
3620 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
3621 vd->vdev_parent->vdev_child[0] == vd)
3622 vd->vdev_unspare = B_TRUE;
3623 }
3624
3625 boolean_t
vdev_is_dead(vdev_t * vd)3626 vdev_is_dead(vdev_t *vd)
3627 {
3628 /*
3629 * Holes and missing devices are always considered "dead".
3630 * This simplifies the code since we don't have to check for
3631 * these types of devices in the various code paths.
3632 * Instead we rely on the fact that we skip over dead devices
3633 * before issuing I/O to them.
3634 */
3635 return (vd->vdev_state < VDEV_STATE_DEGRADED ||
3636 vd->vdev_ops == &vdev_hole_ops ||
3637 vd->vdev_ops == &vdev_missing_ops);
3638 }
3639
3640 boolean_t
vdev_readable(vdev_t * vd)3641 vdev_readable(vdev_t *vd)
3642 {
3643 return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
3644 }
3645
3646 boolean_t
vdev_writeable(vdev_t * vd)3647 vdev_writeable(vdev_t *vd)
3648 {
3649 return (!vdev_is_dead(vd) && !vd->vdev_cant_write &&
3650 vdev_is_concrete(vd));
3651 }
3652
3653 boolean_t
vdev_allocatable(vdev_t * vd)3654 vdev_allocatable(vdev_t *vd)
3655 {
3656 uint64_t state = vd->vdev_state;
3657
3658 /*
3659 * We currently allow allocations from vdevs which may be in the
3660 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device
3661 * fails to reopen then we'll catch it later when we're holding
3662 * the proper locks. Note that we have to get the vdev state
3663 * in a local variable because although it changes atomically,
3664 * we're asking two separate questions about it.
3665 */
3666 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
3667 !vd->vdev_cant_write && vdev_is_concrete(vd) &&
3668 vd->vdev_mg->mg_initialized);
3669 }
3670
3671 boolean_t
vdev_accessible(vdev_t * vd,zio_t * zio)3672 vdev_accessible(vdev_t *vd, zio_t *zio)
3673 {
3674 ASSERT(zio->io_vd == vd);
3675
3676 if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
3677 return (B_FALSE);
3678
3679 if (zio->io_type == ZIO_TYPE_READ)
3680 return (!vd->vdev_cant_read);
3681
3682 if (zio->io_type == ZIO_TYPE_WRITE)
3683 return (!vd->vdev_cant_write);
3684
3685 return (B_TRUE);
3686 }
3687
3688 static void
vdev_get_child_stat(vdev_t * cvd,vdev_stat_t * vs,vdev_stat_t * cvs)3689 vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
3690 {
3691 for (int t = 0; t < VS_ZIO_TYPES; t++) {
3692 vs->vs_ops[t] += cvs->vs_ops[t];
3693 vs->vs_bytes[t] += cvs->vs_bytes[t];
3694 }
3695
3696 cvs->vs_scan_removing = cvd->vdev_removing;
3697 }
3698
3699 /*
3700 * Get extended stats
3701 */
3702 static void
vdev_get_child_stat_ex(vdev_t * cvd,vdev_stat_ex_t * vsx,vdev_stat_ex_t * cvsx)3703 vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
3704 {
3705 int t, b;
3706 for (t = 0; t < ZIO_TYPES; t++) {
3707 for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++)
3708 vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b];
3709
3710 for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) {
3711 vsx->vsx_total_histo[t][b] +=
3712 cvsx->vsx_total_histo[t][b];
3713 }
3714 }
3715
3716 for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
3717 for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) {
3718 vsx->vsx_queue_histo[t][b] +=
3719 cvsx->vsx_queue_histo[t][b];
3720 }
3721 vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t];
3722 vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t];
3723
3724 for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++)
3725 vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b];
3726
3727 for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++)
3728 vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b];
3729 }
3730
3731 }
3732
3733 boolean_t
vdev_is_spacemap_addressable(vdev_t * vd)3734 vdev_is_spacemap_addressable(vdev_t *vd)
3735 {
3736 if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2))
3737 return (B_TRUE);
3738
3739 /*
3740 * If double-word space map entries are not enabled we assume
3741 * 47 bits of the space map entry are dedicated to the entry's
3742 * offset (see SM_OFFSET_BITS in space_map.h). We then use that
3743 * to calculate the maximum address that can be described by a
3744 * space map entry for the given device.
3745 */
3746 uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS;
3747
3748 if (shift >= 63) /* detect potential overflow */
3749 return (B_TRUE);
3750
3751 return (vd->vdev_asize < (1ULL << shift));
3752 }
3753
3754 /*
3755 * Get statistics for the given vdev.
3756 */
3757 static void
vdev_get_stats_ex_impl(vdev_t * vd,vdev_stat_t * vs,vdev_stat_ex_t * vsx)3758 vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
3759 {
3760 int t;
3761 /*
3762 * If we're getting stats on the root vdev, aggregate the I/O counts
3763 * over all top-level vdevs (i.e. the direct children of the root).
3764 */
3765 if (!vd->vdev_ops->vdev_op_leaf) {
3766 if (vs) {
3767 memset(vs->vs_ops, 0, sizeof (vs->vs_ops));
3768 memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes));
3769 }
3770 if (vsx)
3771 memset(vsx, 0, sizeof (*vsx));
3772
3773 for (int c = 0; c < vd->vdev_children; c++) {
3774 vdev_t *cvd = vd->vdev_child[c];
3775 vdev_stat_t *cvs = &cvd->vdev_stat;
3776 vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
3777
3778 vdev_get_stats_ex_impl(cvd, cvs, cvsx);
3779 if (vs)
3780 vdev_get_child_stat(cvd, vs, cvs);
3781 if (vsx)
3782 vdev_get_child_stat_ex(cvd, vsx, cvsx);
3783
3784 }
3785 } else {
3786 /*
3787 * We're a leaf. Just copy our ZIO active queue stats in. The
3788 * other leaf stats are updated in vdev_stat_update().
3789 */
3790 if (!vsx)
3791 return;
3792
3793 memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex));
3794
3795 for (t = 0; t < ARRAY_SIZE(vd->vdev_queue.vq_class); t++) {
3796 vsx->vsx_active_queue[t] =
3797 vd->vdev_queue.vq_class[t].vqc_active;
3798 vsx->vsx_pend_queue[t] = avl_numnodes(
3799 &vd->vdev_queue.vq_class[t].vqc_queued_tree);
3800 }
3801 }
3802 }
3803
3804 void
vdev_get_stats_ex(vdev_t * vd,vdev_stat_t * vs,vdev_stat_ex_t * vsx)3805 vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
3806 {
3807 vdev_t *tvd = vd->vdev_top;
3808 spa_t *spa = vd->vdev_spa;
3809
3810 mutex_enter(&vd->vdev_stat_lock);
3811 if (vs) {
3812 bcopy(&vd->vdev_stat, vs, sizeof (*vs));
3813 vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
3814 vs->vs_state = vd->vdev_state;
3815 vs->vs_rsize = vdev_get_min_asize(vd);
3816 if (vd->vdev_ops->vdev_op_leaf) {
3817 vs->vs_rsize += VDEV_LABEL_START_SIZE +
3818 VDEV_LABEL_END_SIZE;
3819 /*
3820 * Report initializing progress. Since we don't
3821 * have the initializing locks held, this is only
3822 * an estimate (although a fairly accurate one).
3823 */
3824 vs->vs_initialize_bytes_done =
3825 vd->vdev_initialize_bytes_done;
3826 vs->vs_initialize_bytes_est =
3827 vd->vdev_initialize_bytes_est;
3828 vs->vs_initialize_state = vd->vdev_initialize_state;
3829 vs->vs_initialize_action_time =
3830 vd->vdev_initialize_action_time;
3831
3832 /*
3833 * Report manual TRIM progress. Since we don't have
3834 * the manual TRIM locks held, this is only an
3835 * estimate (although fairly accurate one).
3836 */
3837 vs->vs_trim_notsup = !vd->vdev_has_trim;
3838 vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done;
3839 vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est;
3840 vs->vs_trim_state = vd->vdev_trim_state;
3841 vs->vs_trim_action_time = vd->vdev_trim_action_time;
3842 }
3843 /*
3844 * Report expandable space on top-level, non-auxiliary devices
3845 * only. The expandable space is reported in terms of metaslab
3846 * sized units since that determines how much space the pool
3847 * can expand.
3848 */
3849 if (vd->vdev_aux == NULL && tvd != NULL) {
3850 vs->vs_esize = P2ALIGN(
3851 vd->vdev_max_asize - vd->vdev_asize -
3852 spa->spa_bootsize, 1ULL << tvd->vdev_ms_shift);
3853 }
3854 if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
3855 vdev_is_concrete(vd)) {
3856 vs->vs_fragmentation = (vd->vdev_mg != NULL) ?
3857 vd->vdev_mg->mg_fragmentation : 0;
3858 }
3859 if (vd->vdev_ops->vdev_op_leaf)
3860 vs->vs_resilver_deferred = vd->vdev_resilver_deferred;
3861 }
3862
3863 vdev_get_stats_ex_impl(vd, vs, vsx);
3864 mutex_exit(&vd->vdev_stat_lock);
3865 }
3866
3867 void
vdev_get_stats(vdev_t * vd,vdev_stat_t * vs)3868 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
3869 {
3870 return (vdev_get_stats_ex(vd, vs, NULL));
3871 }
3872
3873 void
vdev_clear_stats(vdev_t * vd)3874 vdev_clear_stats(vdev_t *vd)
3875 {
3876 mutex_enter(&vd->vdev_stat_lock);
3877 vd->vdev_stat.vs_space = 0;
3878 vd->vdev_stat.vs_dspace = 0;
3879 vd->vdev_stat.vs_alloc = 0;
3880 mutex_exit(&vd->vdev_stat_lock);
3881 }
3882
3883 void
vdev_scan_stat_init(vdev_t * vd)3884 vdev_scan_stat_init(vdev_t *vd)
3885 {
3886 vdev_stat_t *vs = &vd->vdev_stat;
3887
3888 for (int c = 0; c < vd->vdev_children; c++)
3889 vdev_scan_stat_init(vd->vdev_child[c]);
3890
3891 mutex_enter(&vd->vdev_stat_lock);
3892 vs->vs_scan_processed = 0;
3893 mutex_exit(&vd->vdev_stat_lock);
3894 }
3895
3896 void
vdev_stat_update(zio_t * zio,uint64_t psize)3897 vdev_stat_update(zio_t *zio, uint64_t psize)
3898 {
3899 spa_t *spa = zio->io_spa;
3900 vdev_t *rvd = spa->spa_root_vdev;
3901 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
3902 vdev_t *pvd;
3903 uint64_t txg = zio->io_txg;
3904 vdev_stat_t *vs = &vd->vdev_stat;
3905 vdev_stat_ex_t *vsx = &vd->vdev_stat_ex;
3906 zio_type_t type = zio->io_type;
3907 int flags = zio->io_flags;
3908
3909 /*
3910 * If this i/o is a gang leader, it didn't do any actual work.
3911 */
3912 if (zio->io_gang_tree)
3913 return;
3914
3915 if (zio->io_error == 0) {
3916 /*
3917 * If this is a root i/o, don't count it -- we've already
3918 * counted the top-level vdevs, and vdev_get_stats() will
3919 * aggregate them when asked. This reduces contention on
3920 * the root vdev_stat_lock and implicitly handles blocks
3921 * that compress away to holes, for which there is no i/o.
3922 * (Holes never create vdev children, so all the counters
3923 * remain zero, which is what we want.)
3924 *
3925 * Note: this only applies to successful i/o (io_error == 0)
3926 * because unlike i/o counts, errors are not additive.
3927 * When reading a ditto block, for example, failure of
3928 * one top-level vdev does not imply a root-level error.
3929 */
3930 if (vd == rvd)
3931 return;
3932
3933 ASSERT(vd == zio->io_vd);
3934
3935 if (flags & ZIO_FLAG_IO_BYPASS)
3936 return;
3937
3938 mutex_enter(&vd->vdev_stat_lock);
3939
3940 if (flags & ZIO_FLAG_IO_REPAIR) {
3941 if (flags & ZIO_FLAG_SCAN_THREAD) {
3942 dsl_scan_phys_t *scn_phys =
3943 &spa->spa_dsl_pool->dp_scan->scn_phys;
3944 uint64_t *processed = &scn_phys->scn_processed;
3945
3946 /* XXX cleanup? */
3947 if (vd->vdev_ops->vdev_op_leaf)
3948 atomic_add_64(processed, psize);
3949 vs->vs_scan_processed += psize;
3950 }
3951
3952 if (flags & ZIO_FLAG_SELF_HEAL)
3953 vs->vs_self_healed += psize;
3954 }
3955
3956 /*
3957 * The bytes/ops/histograms are recorded at the leaf level and
3958 * aggregated into the higher level vdevs in vdev_get_stats().
3959 */
3960 if (vd->vdev_ops->vdev_op_leaf &&
3961 (zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) {
3962 zio_type_t vs_type = type;
3963
3964 /*
3965 * TRIM ops and bytes are reported to user space as
3966 * ZIO_TYPE_IOCTL. This is done to preserve the
3967 * vdev_stat_t structure layout for user space.
3968 */
3969 if (type == ZIO_TYPE_TRIM)
3970 vs_type = ZIO_TYPE_IOCTL;
3971
3972 vs->vs_ops[vs_type]++;
3973 vs->vs_bytes[vs_type] += psize;
3974
3975 if (flags & ZIO_FLAG_DELEGATED) {
3976 vsx->vsx_agg_histo[zio->io_priority]
3977 [RQ_HISTO(zio->io_size)]++;
3978 } else {
3979 vsx->vsx_ind_histo[zio->io_priority]
3980 [RQ_HISTO(zio->io_size)]++;
3981 }
3982
3983 if (zio->io_delta && zio->io_delay) {
3984 vsx->vsx_queue_histo[zio->io_priority]
3985 [L_HISTO(zio->io_delta - zio->io_delay)]++;
3986 vsx->vsx_disk_histo[type]
3987 [L_HISTO(zio->io_delay)]++;
3988 vsx->vsx_total_histo[type]
3989 [L_HISTO(zio->io_delta)]++;
3990 }
3991 }
3992
3993 mutex_exit(&vd->vdev_stat_lock);
3994 return;
3995 }
3996
3997 if (flags & ZIO_FLAG_SPECULATIVE)
3998 return;
3999
4000 /*
4001 * If this is an I/O error that is going to be retried, then ignore the
4002 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as
4003 * hard errors, when in reality they can happen for any number of
4004 * innocuous reasons (bus resets, MPxIO link failure, etc).
4005 */
4006 if (zio->io_error == EIO &&
4007 !(zio->io_flags & ZIO_FLAG_IO_RETRY))
4008 return;
4009
4010 /*
4011 * Intent logs writes won't propagate their error to the root
4012 * I/O so don't mark these types of failures as pool-level
4013 * errors.
4014 */
4015 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
4016 return;
4017
4018 mutex_enter(&vd->vdev_stat_lock);
4019 if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) {
4020 if (zio->io_error == ECKSUM)
4021 vs->vs_checksum_errors++;
4022 else
4023 vs->vs_read_errors++;
4024 }
4025 if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd))
4026 vs->vs_write_errors++;
4027 mutex_exit(&vd->vdev_stat_lock);
4028
4029 if (spa->spa_load_state == SPA_LOAD_NONE &&
4030 type == ZIO_TYPE_WRITE && txg != 0 &&
4031 (!(flags & ZIO_FLAG_IO_REPAIR) ||
4032 (flags & ZIO_FLAG_SCAN_THREAD) ||
4033 spa->spa_claiming)) {
4034 /*
4035 * This is either a normal write (not a repair), or it's
4036 * a repair induced by the scrub thread, or it's a repair
4037 * made by zil_claim() during spa_load() in the first txg.
4038 * In the normal case, we commit the DTL change in the same
4039 * txg as the block was born. In the scrub-induced repair
4040 * case, we know that scrubs run in first-pass syncing context,
4041 * so we commit the DTL change in spa_syncing_txg(spa).
4042 * In the zil_claim() case, we commit in spa_first_txg(spa).
4043 *
4044 * We currently do not make DTL entries for failed spontaneous
4045 * self-healing writes triggered by normal (non-scrubbing)
4046 * reads, because we have no transactional context in which to
4047 * do so -- and it's not clear that it'd be desirable anyway.
4048 */
4049 if (vd->vdev_ops->vdev_op_leaf) {
4050 uint64_t commit_txg = txg;
4051 if (flags & ZIO_FLAG_SCAN_THREAD) {
4052 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
4053 ASSERT(spa_sync_pass(spa) == 1);
4054 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
4055 commit_txg = spa_syncing_txg(spa);
4056 } else if (spa->spa_claiming) {
4057 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
4058 commit_txg = spa_first_txg(spa);
4059 }
4060 ASSERT(commit_txg >= spa_syncing_txg(spa));
4061 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
4062 return;
4063 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
4064 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
4065 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
4066 }
4067 if (vd != rvd)
4068 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
4069 }
4070 }
4071
4072 int64_t
vdev_deflated_space(vdev_t * vd,int64_t space)4073 vdev_deflated_space(vdev_t *vd, int64_t space)
4074 {
4075 ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0);
4076 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
4077
4078 return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio);
4079 }
4080
4081 /*
4082 * Update the in-core space usage stats for this vdev, its metaslab class,
4083 * and the root vdev.
4084 */
4085 void
vdev_space_update(vdev_t * vd,int64_t alloc_delta,int64_t defer_delta,int64_t space_delta)4086 vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
4087 int64_t space_delta)
4088 {
4089 int64_t dspace_delta;
4090 spa_t *spa = vd->vdev_spa;
4091 vdev_t *rvd = spa->spa_root_vdev;
4092
4093 ASSERT(vd == vd->vdev_top);
4094
4095 /*
4096 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
4097 * factor. We must calculate this here and not at the root vdev
4098 * because the root vdev's psize-to-asize is simply the max of its
4099 * childrens', thus not accurate enough for us.
4100 */
4101 dspace_delta = vdev_deflated_space(vd, space_delta);
4102
4103 mutex_enter(&vd->vdev_stat_lock);
4104 /* ensure we won't underflow */
4105 if (alloc_delta < 0) {
4106 ASSERT3U(vd->vdev_stat.vs_alloc, >=, -alloc_delta);
4107 }
4108
4109 vd->vdev_stat.vs_alloc += alloc_delta;
4110 vd->vdev_stat.vs_space += space_delta;
4111 vd->vdev_stat.vs_dspace += dspace_delta;
4112 mutex_exit(&vd->vdev_stat_lock);
4113
4114 /* every class but log contributes to root space stats */
4115 if (vd->vdev_mg != NULL && !vd->vdev_islog) {
4116 ASSERT(!vd->vdev_isl2cache);
4117 mutex_enter(&rvd->vdev_stat_lock);
4118 rvd->vdev_stat.vs_alloc += alloc_delta;
4119 rvd->vdev_stat.vs_space += space_delta;
4120 rvd->vdev_stat.vs_dspace += dspace_delta;
4121 mutex_exit(&rvd->vdev_stat_lock);
4122 }
4123 /* Note: metaslab_class_space_update moved to metaslab_space_update */
4124 }
4125
4126 /*
4127 * Mark a top-level vdev's config as dirty, placing it on the dirty list
4128 * so that it will be written out next time the vdev configuration is synced.
4129 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
4130 */
4131 void
vdev_config_dirty(vdev_t * vd)4132 vdev_config_dirty(vdev_t *vd)
4133 {
4134 spa_t *spa = vd->vdev_spa;
4135 vdev_t *rvd = spa->spa_root_vdev;
4136 int c;
4137
4138 ASSERT(spa_writeable(spa));
4139
4140 /*
4141 * If this is an aux vdev (as with l2cache and spare devices), then we
4142 * update the vdev config manually and set the sync flag.
4143 */
4144 if (vd->vdev_aux != NULL) {
4145 spa_aux_vdev_t *sav = vd->vdev_aux;
4146 nvlist_t **aux;
4147 uint_t naux;
4148
4149 for (c = 0; c < sav->sav_count; c++) {
4150 if (sav->sav_vdevs[c] == vd)
4151 break;
4152 }
4153
4154 if (c == sav->sav_count) {
4155 /*
4156 * We're being removed. There's nothing more to do.
4157 */
4158 ASSERT(sav->sav_sync == B_TRUE);
4159 return;
4160 }
4161
4162 sav->sav_sync = B_TRUE;
4163
4164 if (nvlist_lookup_nvlist_array(sav->sav_config,
4165 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
4166 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
4167 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
4168 }
4169
4170 ASSERT(c < naux);
4171
4172 /*
4173 * Setting the nvlist in the middle if the array is a little
4174 * sketchy, but it will work.
4175 */
4176 nvlist_free(aux[c]);
4177 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
4178
4179 return;
4180 }
4181
4182 /*
4183 * The dirty list is protected by the SCL_CONFIG lock. The caller
4184 * must either hold SCL_CONFIG as writer, or must be the sync thread
4185 * (which holds SCL_CONFIG as reader). There's only one sync thread,
4186 * so this is sufficient to ensure mutual exclusion.
4187 */
4188 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
4189 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
4190 spa_config_held(spa, SCL_CONFIG, RW_READER)));
4191
4192 if (vd == rvd) {
4193 for (c = 0; c < rvd->vdev_children; c++)
4194 vdev_config_dirty(rvd->vdev_child[c]);
4195 } else {
4196 ASSERT(vd == vd->vdev_top);
4197
4198 if (!list_link_active(&vd->vdev_config_dirty_node) &&
4199 vdev_is_concrete(vd)) {
4200 list_insert_head(&spa->spa_config_dirty_list, vd);
4201 }
4202 }
4203 }
4204
4205 void
vdev_config_clean(vdev_t * vd)4206 vdev_config_clean(vdev_t *vd)
4207 {
4208 spa_t *spa = vd->vdev_spa;
4209
4210 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
4211 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
4212 spa_config_held(spa, SCL_CONFIG, RW_READER)));
4213
4214 ASSERT(list_link_active(&vd->vdev_config_dirty_node));
4215 list_remove(&spa->spa_config_dirty_list, vd);
4216 }
4217
4218 /*
4219 * Mark a top-level vdev's state as dirty, so that the next pass of
4220 * spa_sync() can convert this into vdev_config_dirty(). We distinguish
4221 * the state changes from larger config changes because they require
4222 * much less locking, and are often needed for administrative actions.
4223 */
4224 void
vdev_state_dirty(vdev_t * vd)4225 vdev_state_dirty(vdev_t *vd)
4226 {
4227 spa_t *spa = vd->vdev_spa;
4228
4229 ASSERT(spa_writeable(spa));
4230 ASSERT(vd == vd->vdev_top);
4231
4232 /*
4233 * The state list is protected by the SCL_STATE lock. The caller
4234 * must either hold SCL_STATE as writer, or must be the sync thread
4235 * (which holds SCL_STATE as reader). There's only one sync thread,
4236 * so this is sufficient to ensure mutual exclusion.
4237 */
4238 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
4239 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
4240 spa_config_held(spa, SCL_STATE, RW_READER)));
4241
4242 if (!list_link_active(&vd->vdev_state_dirty_node) &&
4243 vdev_is_concrete(vd))
4244 list_insert_head(&spa->spa_state_dirty_list, vd);
4245 }
4246
4247 void
vdev_state_clean(vdev_t * vd)4248 vdev_state_clean(vdev_t *vd)
4249 {
4250 spa_t *spa = vd->vdev_spa;
4251
4252 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
4253 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
4254 spa_config_held(spa, SCL_STATE, RW_READER)));
4255
4256 ASSERT(list_link_active(&vd->vdev_state_dirty_node));
4257 list_remove(&spa->spa_state_dirty_list, vd);
4258 }
4259
4260 /*
4261 * Propagate vdev state up from children to parent.
4262 */
4263 void
vdev_propagate_state(vdev_t * vd)4264 vdev_propagate_state(vdev_t *vd)
4265 {
4266 spa_t *spa = vd->vdev_spa;
4267 vdev_t *rvd = spa->spa_root_vdev;
4268 int degraded = 0, faulted = 0;
4269 int corrupted = 0;
4270 vdev_t *child;
4271
4272 if (vd->vdev_children > 0) {
4273 for (int c = 0; c < vd->vdev_children; c++) {
4274 child = vd->vdev_child[c];
4275
4276 /*
4277 * Don't factor holes or indirect vdevs into the
4278 * decision.
4279 */
4280 if (!vdev_is_concrete(child))
4281 continue;
4282
4283 if (!vdev_readable(child) ||
4284 (!vdev_writeable(child) && spa_writeable(spa))) {
4285 /*
4286 * Root special: if there is a top-level log
4287 * device, treat the root vdev as if it were
4288 * degraded.
4289 */
4290 if (child->vdev_islog && vd == rvd)
4291 degraded++;
4292 else
4293 faulted++;
4294 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
4295 degraded++;
4296 }
4297
4298 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
4299 corrupted++;
4300 }
4301
4302 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
4303
4304 /*
4305 * Root special: if there is a top-level vdev that cannot be
4306 * opened due to corrupted metadata, then propagate the root
4307 * vdev's aux state as 'corrupt' rather than 'insufficient
4308 * replicas'.
4309 */
4310 if (corrupted && vd == rvd &&
4311 rvd->vdev_state == VDEV_STATE_CANT_OPEN)
4312 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
4313 VDEV_AUX_CORRUPT_DATA);
4314 }
4315
4316 if (vd->vdev_parent)
4317 vdev_propagate_state(vd->vdev_parent);
4318 }
4319
4320 /*
4321 * Set a vdev's state. If this is during an open, we don't update the parent
4322 * state, because we're in the process of opening children depth-first.
4323 * Otherwise, we propagate the change to the parent.
4324 *
4325 * If this routine places a device in a faulted state, an appropriate ereport is
4326 * generated.
4327 */
4328 void
vdev_set_state(vdev_t * vd,boolean_t isopen,vdev_state_t state,vdev_aux_t aux)4329 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
4330 {
4331 uint64_t save_state;
4332 spa_t *spa = vd->vdev_spa;
4333
4334 if (state == vd->vdev_state) {
4335 vd->vdev_stat.vs_aux = aux;
4336 return;
4337 }
4338
4339 save_state = vd->vdev_state;
4340
4341 vd->vdev_state = state;
4342 vd->vdev_stat.vs_aux = aux;
4343
4344 /*
4345 * If we are setting the vdev state to anything but an open state, then
4346 * always close the underlying device unless the device has requested
4347 * a delayed close (i.e. we're about to remove or fault the device).
4348 * Otherwise, we keep accessible but invalid devices open forever.
4349 * We don't call vdev_close() itself, because that implies some extra
4350 * checks (offline, etc) that we don't want here. This is limited to
4351 * leaf devices, because otherwise closing the device will affect other
4352 * children.
4353 */
4354 if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
4355 vd->vdev_ops->vdev_op_leaf)
4356 vd->vdev_ops->vdev_op_close(vd);
4357
4358 /*
4359 * If we have brought this vdev back into service, we need
4360 * to notify fmd so that it can gracefully repair any outstanding
4361 * cases due to a missing device. We do this in all cases, even those
4362 * that probably don't correlate to a repaired fault. This is sure to
4363 * catch all cases, and we let the zfs-retire agent sort it out. If
4364 * this is a transient state it's OK, as the retire agent will
4365 * double-check the state of the vdev before repairing it.
4366 */
4367 if (state == VDEV_STATE_HEALTHY && vd->vdev_ops->vdev_op_leaf &&
4368 vd->vdev_prevstate != state)
4369 zfs_post_state_change(spa, vd);
4370
4371 if (vd->vdev_removed &&
4372 state == VDEV_STATE_CANT_OPEN &&
4373 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
4374 /*
4375 * If the previous state is set to VDEV_STATE_REMOVED, then this
4376 * device was previously marked removed and someone attempted to
4377 * reopen it. If this failed due to a nonexistent device, then
4378 * keep the device in the REMOVED state. We also let this be if
4379 * it is one of our special test online cases, which is only
4380 * attempting to online the device and shouldn't generate an FMA
4381 * fault.
4382 */
4383 vd->vdev_state = VDEV_STATE_REMOVED;
4384 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
4385 } else if (state == VDEV_STATE_REMOVED) {
4386 vd->vdev_removed = B_TRUE;
4387 } else if (state == VDEV_STATE_CANT_OPEN) {
4388 /*
4389 * If we fail to open a vdev during an import or recovery, we
4390 * mark it as "not available", which signifies that it was
4391 * never there to begin with. Failure to open such a device
4392 * is not considered an error.
4393 */
4394 if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
4395 spa_load_state(spa) == SPA_LOAD_RECOVER) &&
4396 vd->vdev_ops->vdev_op_leaf)
4397 vd->vdev_not_present = 1;
4398
4399 /*
4400 * Post the appropriate ereport. If the 'prevstate' field is
4401 * set to something other than VDEV_STATE_UNKNOWN, it indicates
4402 * that this is part of a vdev_reopen(). In this case, we don't
4403 * want to post the ereport if the device was already in the
4404 * CANT_OPEN state beforehand.
4405 *
4406 * If the 'checkremove' flag is set, then this is an attempt to
4407 * online the device in response to an insertion event. If we
4408 * hit this case, then we have detected an insertion event for a
4409 * faulted or offline device that wasn't in the removed state.
4410 * In this scenario, we don't post an ereport because we are
4411 * about to replace the device, or attempt an online with
4412 * vdev_forcefault, which will generate the fault for us.
4413 */
4414 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
4415 !vd->vdev_not_present && !vd->vdev_checkremove &&
4416 vd != spa->spa_root_vdev) {
4417 const char *class;
4418
4419 switch (aux) {
4420 case VDEV_AUX_OPEN_FAILED:
4421 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
4422 break;
4423 case VDEV_AUX_CORRUPT_DATA:
4424 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
4425 break;
4426 case VDEV_AUX_NO_REPLICAS:
4427 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
4428 break;
4429 case VDEV_AUX_BAD_GUID_SUM:
4430 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
4431 break;
4432 case VDEV_AUX_TOO_SMALL:
4433 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
4434 break;
4435 case VDEV_AUX_BAD_LABEL:
4436 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
4437 break;
4438 case VDEV_AUX_BAD_ASHIFT:
4439 class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT;
4440 break;
4441 default:
4442 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
4443 }
4444
4445 (void) zfs_ereport_post(class, spa, vd, NULL, NULL,
4446 save_state, 0);
4447 }
4448
4449 /* Erase any notion of persistent removed state */
4450 vd->vdev_removed = B_FALSE;
4451 } else {
4452 vd->vdev_removed = B_FALSE;
4453 }
4454
4455 if (!isopen && vd->vdev_parent)
4456 vdev_propagate_state(vd->vdev_parent);
4457 }
4458
4459 boolean_t
vdev_children_are_offline(vdev_t * vd)4460 vdev_children_are_offline(vdev_t *vd)
4461 {
4462 ASSERT(!vd->vdev_ops->vdev_op_leaf);
4463
4464 for (uint64_t i = 0; i < vd->vdev_children; i++) {
4465 if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE)
4466 return (B_FALSE);
4467 }
4468
4469 return (B_TRUE);
4470 }
4471
4472 /*
4473 * Check the vdev configuration to ensure that it's capable of supporting
4474 * a root pool. We do not support partial configuration.
4475 */
4476 boolean_t
vdev_is_bootable(vdev_t * vd)4477 vdev_is_bootable(vdev_t *vd)
4478 {
4479 if (!vd->vdev_ops->vdev_op_leaf) {
4480 char *vdev_type = vd->vdev_ops->vdev_op_type;
4481
4482 if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) {
4483 return (B_FALSE);
4484 }
4485 }
4486
4487 for (int c = 0; c < vd->vdev_children; c++) {
4488 if (!vdev_is_bootable(vd->vdev_child[c]))
4489 return (B_FALSE);
4490 }
4491 return (B_TRUE);
4492 }
4493
4494 boolean_t
vdev_is_concrete(vdev_t * vd)4495 vdev_is_concrete(vdev_t *vd)
4496 {
4497 vdev_ops_t *ops = vd->vdev_ops;
4498 if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops ||
4499 ops == &vdev_missing_ops || ops == &vdev_root_ops) {
4500 return (B_FALSE);
4501 } else {
4502 return (B_TRUE);
4503 }
4504 }
4505
4506 /*
4507 * Determine if a log device has valid content. If the vdev was
4508 * removed or faulted in the MOS config then we know that
4509 * the content on the log device has already been written to the pool.
4510 */
4511 boolean_t
vdev_log_state_valid(vdev_t * vd)4512 vdev_log_state_valid(vdev_t *vd)
4513 {
4514 if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
4515 !vd->vdev_removed)
4516 return (B_TRUE);
4517
4518 for (int c = 0; c < vd->vdev_children; c++)
4519 if (vdev_log_state_valid(vd->vdev_child[c]))
4520 return (B_TRUE);
4521
4522 return (B_FALSE);
4523 }
4524
4525 /*
4526 * Expand a vdev if possible.
4527 */
4528 void
vdev_expand(vdev_t * vd,uint64_t txg)4529 vdev_expand(vdev_t *vd, uint64_t txg)
4530 {
4531 ASSERT(vd->vdev_top == vd);
4532 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
4533 ASSERT(vdev_is_concrete(vd));
4534
4535 vdev_set_deflate_ratio(vd);
4536
4537 if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
4538 vdev_is_concrete(vd)) {
4539 vdev_metaslab_group_create(vd);
4540 VERIFY(vdev_metaslab_init(vd, txg) == 0);
4541 vdev_config_dirty(vd);
4542 }
4543 }
4544
4545 /*
4546 * Split a vdev.
4547 */
4548 void
vdev_split(vdev_t * vd)4549 vdev_split(vdev_t *vd)
4550 {
4551 vdev_t *cvd, *pvd = vd->vdev_parent;
4552
4553 vdev_remove_child(pvd, vd);
4554 vdev_compact_children(pvd);
4555
4556 cvd = pvd->vdev_child[0];
4557 if (pvd->vdev_children == 1) {
4558 vdev_remove_parent(cvd);
4559 cvd->vdev_splitting = B_TRUE;
4560 }
4561 vdev_propagate_state(cvd);
4562 }
4563
4564 void
vdev_deadman(vdev_t * vd)4565 vdev_deadman(vdev_t *vd)
4566 {
4567 for (int c = 0; c < vd->vdev_children; c++) {
4568 vdev_t *cvd = vd->vdev_child[c];
4569
4570 vdev_deadman(cvd);
4571 }
4572
4573 if (vd->vdev_ops->vdev_op_leaf) {
4574 vdev_queue_t *vq = &vd->vdev_queue;
4575
4576 mutex_enter(&vq->vq_lock);
4577 if (avl_numnodes(&vq->vq_active_tree) > 0) {
4578 spa_t *spa = vd->vdev_spa;
4579 zio_t *fio;
4580 uint64_t delta;
4581
4582 /*
4583 * Look at the head of all the pending queues,
4584 * if any I/O has been outstanding for longer than
4585 * the spa_deadman_synctime we panic the system.
4586 */
4587 fio = avl_first(&vq->vq_active_tree);
4588 delta = gethrtime() - fio->io_timestamp;
4589 if (delta > spa_deadman_synctime(spa)) {
4590 vdev_dbgmsg(vd, "SLOW IO: zio timestamp "
4591 "%lluns, delta %lluns, last io %lluns",
4592 fio->io_timestamp, (u_longlong_t)delta,
4593 vq->vq_io_complete_ts);
4594 fm_panic("I/O to pool '%s' appears to be "
4595 "hung.", spa_name(spa));
4596 }
4597 }
4598 mutex_exit(&vq->vq_lock);
4599 }
4600 }
4601
4602 void
vdev_defer_resilver(vdev_t * vd)4603 vdev_defer_resilver(vdev_t *vd)
4604 {
4605 ASSERT(vd->vdev_ops->vdev_op_leaf);
4606
4607 vd->vdev_resilver_deferred = B_TRUE;
4608 vd->vdev_spa->spa_resilver_deferred = B_TRUE;
4609 }
4610
4611 /*
4612 * Clears the resilver deferred flag on all leaf devs under vd. Returns
4613 * B_TRUE if we have devices that need to be resilvered and are available to
4614 * accept resilver I/Os.
4615 */
4616 boolean_t
vdev_clear_resilver_deferred(vdev_t * vd,dmu_tx_t * tx)4617 vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx)
4618 {
4619 boolean_t resilver_needed = B_FALSE;
4620 spa_t *spa = vd->vdev_spa;
4621
4622 for (int c = 0; c < vd->vdev_children; c++) {
4623 vdev_t *cvd = vd->vdev_child[c];
4624 resilver_needed |= vdev_clear_resilver_deferred(cvd, tx);
4625 }
4626
4627 if (vd == spa->spa_root_vdev &&
4628 spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) {
4629 spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
4630 vdev_config_dirty(vd);
4631 spa->spa_resilver_deferred = B_FALSE;
4632 return (resilver_needed);
4633 }
4634
4635 if (!vdev_is_concrete(vd) || vd->vdev_aux ||
4636 !vd->vdev_ops->vdev_op_leaf)
4637 return (resilver_needed);
4638
4639 vd->vdev_resilver_deferred = B_FALSE;
4640
4641 return (!vdev_is_dead(vd) && !vd->vdev_offline &&
4642 vdev_resilver_needed(vd, NULL, NULL));
4643 }
4644
4645 /*
4646 * Translate a logical range to the physical range for the specified vdev_t.
4647 * This function is initially called with a leaf vdev and will walk each
4648 * parent vdev until it reaches a top-level vdev. Once the top-level is
4649 * reached the physical range is initialized and the recursive function
4650 * begins to unwind. As it unwinds it calls the parent's vdev specific
4651 * translation function to do the real conversion.
4652 */
4653 void
vdev_xlate(vdev_t * vd,const range_seg64_t * logical_rs,range_seg64_t * physical_rs)4654 vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
4655 range_seg64_t *physical_rs)
4656 {
4657 /*
4658 * Walk up the vdev tree
4659 */
4660 if (vd != vd->vdev_top) {
4661 vdev_xlate(vd->vdev_parent, logical_rs, physical_rs);
4662 } else {
4663 /*
4664 * We've reached the top-level vdev, initialize the
4665 * physical range to the logical range and start to
4666 * unwind.
4667 */
4668 physical_rs->rs_start = logical_rs->rs_start;
4669 physical_rs->rs_end = logical_rs->rs_end;
4670 return;
4671 }
4672
4673 vdev_t *pvd = vd->vdev_parent;
4674 ASSERT3P(pvd, !=, NULL);
4675 ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL);
4676
4677 /*
4678 * As this recursive function unwinds, translate the logical
4679 * range into its physical components by calling the
4680 * vdev specific translate function.
4681 */
4682 range_seg64_t intermediate = { 0 };
4683 pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate);
4684
4685 physical_rs->rs_start = intermediate.rs_start;
4686 physical_rs->rs_end = intermediate.rs_end;
4687 }
4688