1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2017, Intel Corporation.
26 * Copyright 2020 Joyent, Inc.
27 */
28
29 /*
30 * Virtual Device Labels
31 * ---------------------
32 *
33 * The vdev label serves several distinct purposes:
34 *
35 * 1. Uniquely identify this device as part of a ZFS pool and confirm its
36 * identity within the pool.
37 *
38 * 2. Verify that all the devices given in a configuration are present
39 * within the pool.
40 *
41 * 3. Determine the uberblock for the pool.
42 *
43 * 4. In case of an import operation, determine the configuration of the
44 * toplevel vdev of which it is a part.
45 *
46 * 5. If an import operation cannot find all the devices in the pool,
47 * provide enough information to the administrator to determine which
48 * devices are missing.
49 *
50 * It is important to note that while the kernel is responsible for writing the
51 * label, it only consumes the information in the first three cases. The
52 * latter information is only consumed in userland when determining the
53 * configuration to import a pool.
54 *
55 *
56 * Label Organization
57 * ------------------
58 *
59 * Before describing the contents of the label, it's important to understand how
60 * the labels are written and updated with respect to the uberblock.
61 *
62 * When the pool configuration is altered, either because it was newly created
63 * or a device was added, we want to update all the labels such that we can deal
64 * with fatal failure at any point. To this end, each disk has two labels which
65 * are updated before and after the uberblock is synced. Assuming we have
66 * labels and an uberblock with the following transaction groups:
67 *
68 * L1 UB L2
69 * +------+ +------+ +------+
70 * | | | | | |
71 * | t10 | | t10 | | t10 |
72 * | | | | | |
73 * +------+ +------+ +------+
74 *
75 * In this stable state, the labels and the uberblock were all updated within
76 * the same transaction group (10). Each label is mirrored and checksummed, so
77 * that we can detect when we fail partway through writing the label.
78 *
79 * In order to identify which labels are valid, the labels are written in the
80 * following manner:
81 *
82 * 1. For each vdev, update 'L1' to the new label
83 * 2. Update the uberblock
84 * 3. For each vdev, update 'L2' to the new label
85 *
86 * Given arbitrary failure, we can determine the correct label to use based on
87 * the transaction group. If we fail after updating L1 but before updating the
88 * UB, we will notice that L1's transaction group is greater than the uberblock,
89 * so L2 must be valid. If we fail after writing the uberblock but before
90 * writing L2, we will notice that L2's transaction group is less than L1, and
91 * therefore L1 is valid.
92 *
93 * Another added complexity is that not every label is updated when the config
94 * is synced. If we add a single device, we do not want to have to re-write
95 * every label for every device in the pool. This means that both L1 and L2 may
96 * be older than the pool uberblock, because the necessary information is stored
97 * on another vdev.
98 *
99 *
100 * On-disk Format
101 * --------------
102 *
103 * The vdev label consists of two distinct parts, and is wrapped within the
104 * vdev_label_t structure. The label includes 8k of padding to permit legacy
105 * VTOC disk labels, but is otherwise ignored.
106 *
107 * The first half of the label is a packed nvlist which contains pool wide
108 * properties, per-vdev properties, and configuration information. It is
109 * described in more detail below.
110 *
111 * The latter half of the label consists of a redundant array of uberblocks.
112 * These uberblocks are updated whenever a transaction group is committed,
113 * or when the configuration is updated. When a pool is loaded, we scan each
114 * vdev for the 'best' uberblock.
115 *
116 *
117 * Configuration Information
118 * -------------------------
119 *
120 * The nvlist describing the pool and vdev contains the following elements:
121 *
122 * version ZFS on-disk version
123 * name Pool name
124 * state Pool state
125 * txg Transaction group in which this label was written
126 * pool_guid Unique identifier for this pool
127 * vdev_tree An nvlist describing vdev tree.
128 * features_for_read
129 * An nvlist of the features necessary for reading the MOS.
130 *
131 * Each leaf device label also contains the following:
132 *
133 * top_guid Unique ID for top-level vdev in which this is contained
134 * guid Unique ID for the leaf vdev
135 *
136 * The 'vs' configuration follows the format described in 'spa_config.c'.
137 */
138
139 #include <sys/zfs_context.h>
140 #include <sys/spa.h>
141 #include <sys/spa_impl.h>
142 #include <sys/dmu.h>
143 #include <sys/zap.h>
144 #include <sys/vdev.h>
145 #include <sys/vdev_impl.h>
146 #include <sys/uberblock_impl.h>
147 #include <sys/metaslab.h>
148 #include <sys/metaslab_impl.h>
149 #include <sys/zio.h>
150 #include <sys/dsl_scan.h>
151 #include <sys/abd.h>
152 #include <sys/fs/zfs.h>
153 #include <sys/byteorder.h>
154 #include <sys/zfs_bootenv.h>
155
156 /*
157 * Basic routines to read and write from a vdev label.
158 * Used throughout the rest of this file.
159 */
160 uint64_t
vdev_label_offset(uint64_t psize,int l,uint64_t offset)161 vdev_label_offset(uint64_t psize, int l, uint64_t offset)
162 {
163 ASSERT(offset < sizeof (vdev_label_t));
164 ASSERT(P2PHASE_TYPED(psize, sizeof (vdev_label_t), uint64_t) == 0);
165
166 return (offset + l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
167 0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
168 }
169
170 /*
171 * Returns back the vdev label associated with the passed in offset.
172 */
173 int
vdev_label_number(uint64_t psize,uint64_t offset)174 vdev_label_number(uint64_t psize, uint64_t offset)
175 {
176 int l;
177
178 if (offset >= psize - VDEV_LABEL_END_SIZE) {
179 offset -= psize - VDEV_LABEL_END_SIZE;
180 offset += (VDEV_LABELS / 2) * sizeof (vdev_label_t);
181 }
182 l = offset / sizeof (vdev_label_t);
183 return (l < VDEV_LABELS ? l : -1);
184 }
185
186 static void
vdev_label_read(zio_t * zio,vdev_t * vd,int l,abd_t * buf,uint64_t offset,uint64_t size,zio_done_func_t * done,void * private,int flags)187 vdev_label_read(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
188 uint64_t size, zio_done_func_t *done, void *private, int flags)
189 {
190 ASSERT(
191 spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE ||
192 spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE);
193 ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
194
195 zio_nowait(zio_read_phys(zio, vd,
196 vdev_label_offset(vd->vdev_psize, l, offset),
197 size, buf, ZIO_CHECKSUM_LABEL, done, private,
198 ZIO_PRIORITY_SYNC_READ, flags, B_TRUE));
199 }
200
201 void
vdev_label_write(zio_t * zio,vdev_t * vd,int l,abd_t * buf,uint64_t offset,uint64_t size,zio_done_func_t * done,void * private,int flags)202 vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
203 uint64_t size, zio_done_func_t *done, void *private, int flags)
204 {
205 ASSERT(
206 spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE ||
207 spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE);
208 ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
209
210 zio_nowait(zio_write_phys(zio, vd,
211 vdev_label_offset(vd->vdev_psize, l, offset),
212 size, buf, ZIO_CHECKSUM_LABEL, done, private,
213 ZIO_PRIORITY_SYNC_WRITE, flags, B_TRUE));
214 }
215
216 /*
217 * Generate the nvlist representing this vdev's stats
218 */
219 void
vdev_config_generate_stats(vdev_t * vd,nvlist_t * nv)220 vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv)
221 {
222 nvlist_t *nvx;
223 vdev_stat_t *vs;
224 vdev_stat_ex_t *vsx;
225
226 vs = kmem_alloc(sizeof (*vs), KM_SLEEP);
227 vsx = kmem_alloc(sizeof (*vsx), KM_SLEEP);
228
229 vdev_get_stats_ex(vd, vs, vsx);
230 fnvlist_add_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
231 (uint64_t *)vs, sizeof (*vs) / sizeof (uint64_t));
232
233 /*
234 * Add extended stats into a special extended stats nvlist. This keeps
235 * all the extended stats nicely grouped together. The extended stats
236 * nvlist is then added to the main nvlist.
237 */
238 nvx = fnvlist_alloc();
239
240 /* ZIOs in flight to disk */
241 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
242 vsx->vsx_active_queue[ZIO_PRIORITY_SYNC_READ]);
243
244 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
245 vsx->vsx_active_queue[ZIO_PRIORITY_SYNC_WRITE]);
246
247 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
248 vsx->vsx_active_queue[ZIO_PRIORITY_ASYNC_READ]);
249
250 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
251 vsx->vsx_active_queue[ZIO_PRIORITY_ASYNC_WRITE]);
252
253 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
254 vsx->vsx_active_queue[ZIO_PRIORITY_SCRUB]);
255
256 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
257 vsx->vsx_active_queue[ZIO_PRIORITY_TRIM]);
258
259 /* ZIOs pending */
260 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
261 vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_READ]);
262
263 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
264 vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_WRITE]);
265
266 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
267 vsx->vsx_pend_queue[ZIO_PRIORITY_ASYNC_READ]);
268
269 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
270 vsx->vsx_pend_queue[ZIO_PRIORITY_ASYNC_WRITE]);
271
272 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
273 vsx->vsx_pend_queue[ZIO_PRIORITY_SCRUB]);
274
275 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
276 vsx->vsx_pend_queue[ZIO_PRIORITY_TRIM]);
277
278 /* Histograms */
279 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
280 vsx->vsx_total_histo[ZIO_TYPE_READ],
281 ARRAY_SIZE(vsx->vsx_total_histo[ZIO_TYPE_READ]));
282
283 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
284 vsx->vsx_total_histo[ZIO_TYPE_WRITE],
285 ARRAY_SIZE(vsx->vsx_total_histo[ZIO_TYPE_WRITE]));
286
287 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
288 vsx->vsx_disk_histo[ZIO_TYPE_READ],
289 ARRAY_SIZE(vsx->vsx_disk_histo[ZIO_TYPE_READ]));
290
291 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
292 vsx->vsx_disk_histo[ZIO_TYPE_WRITE],
293 ARRAY_SIZE(vsx->vsx_disk_histo[ZIO_TYPE_WRITE]));
294
295 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
296 vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_READ],
297 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_READ]));
298
299 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
300 vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_WRITE],
301 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_WRITE]));
302
303 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
304 vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_READ],
305 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_READ]));
306
307 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
308 vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_WRITE],
309 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_WRITE]));
310
311 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
312 vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB],
313 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB]));
314
315 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
316 vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM],
317 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM]));
318
319 /* Request sizes */
320 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
321 vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ],
322 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ]));
323
324 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
325 vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_WRITE],
326 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_WRITE]));
327
328 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
329 vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_READ],
330 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_READ]));
331
332 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
333 vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_WRITE],
334 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_WRITE]));
335
336 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
337 vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB],
338 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB]));
339
340 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
341 vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM],
342 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM]));
343
344 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
345 vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ],
346 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ]));
347
348 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
349 vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_WRITE],
350 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_WRITE]));
351
352 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
353 vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_READ],
354 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_READ]));
355
356 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
357 vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_WRITE],
358 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_WRITE]));
359
360 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
361 vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB],
362 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB]));
363
364 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
365 vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM],
366 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM]));
367
368 /* IO delays */
369 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SLOW_IOS, vs->vs_slow_ios);
370
371 /* Add extended stats nvlist to main nvlist */
372 fnvlist_add_nvlist(nv, ZPOOL_CONFIG_VDEV_STATS_EX, nvx);
373
374 nvlist_free(nvx);
375 kmem_free(vs, sizeof (*vs));
376 kmem_free(vsx, sizeof (*vsx));
377 }
378
379 static void
root_vdev_actions_getprogress(vdev_t * vd,nvlist_t * nvl)380 root_vdev_actions_getprogress(vdev_t *vd, nvlist_t *nvl)
381 {
382 spa_t *spa = vd->vdev_spa;
383
384 if (vd != spa->spa_root_vdev)
385 return;
386
387 /* provide either current or previous scan information */
388 pool_scan_stat_t ps;
389 if (spa_scan_get_stats(spa, &ps) == 0) {
390 fnvlist_add_uint64_array(nvl,
391 ZPOOL_CONFIG_SCAN_STATS, (uint64_t *)&ps,
392 sizeof (pool_scan_stat_t) / sizeof (uint64_t));
393 }
394
395 pool_removal_stat_t prs;
396 if (spa_removal_get_stats(spa, &prs) == 0) {
397 fnvlist_add_uint64_array(nvl,
398 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t *)&prs,
399 sizeof (prs) / sizeof (uint64_t));
400 }
401
402 pool_checkpoint_stat_t pcs;
403 if (spa_checkpoint_get_stats(spa, &pcs) == 0) {
404 fnvlist_add_uint64_array(nvl,
405 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t *)&pcs,
406 sizeof (pcs) / sizeof (uint64_t));
407 }
408 }
409
410 /*
411 * Generate the nvlist representing this vdev's config.
412 */
413 nvlist_t *
vdev_config_generate(spa_t * spa,vdev_t * vd,boolean_t getstats,vdev_config_flag_t flags)414 vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
415 vdev_config_flag_t flags)
416 {
417 nvlist_t *nv = NULL;
418 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
419
420 nv = fnvlist_alloc();
421
422 fnvlist_add_string(nv, ZPOOL_CONFIG_TYPE, vd->vdev_ops->vdev_op_type);
423 if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)))
424 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ID, vd->vdev_id);
425 fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, vd->vdev_guid);
426
427 if (vd->vdev_path != NULL)
428 fnvlist_add_string(nv, ZPOOL_CONFIG_PATH, vd->vdev_path);
429
430 if (vd->vdev_devid != NULL)
431 fnvlist_add_string(nv, ZPOOL_CONFIG_DEVID, vd->vdev_devid);
432
433 if (vd->vdev_physpath != NULL)
434 fnvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH,
435 vd->vdev_physpath);
436
437 if (vd->vdev_fru != NULL)
438 fnvlist_add_string(nv, ZPOOL_CONFIG_FRU, vd->vdev_fru);
439
440 if (vd->vdev_nparity != 0) {
441 ASSERT(strcmp(vd->vdev_ops->vdev_op_type,
442 VDEV_TYPE_RAIDZ) == 0);
443
444 /*
445 * Make sure someone hasn't managed to sneak a fancy new vdev
446 * into a crufty old storage pool.
447 */
448 ASSERT(vd->vdev_nparity == 1 ||
449 (vd->vdev_nparity <= 2 &&
450 spa_version(spa) >= SPA_VERSION_RAIDZ2) ||
451 (vd->vdev_nparity <= 3 &&
452 spa_version(spa) >= SPA_VERSION_RAIDZ3));
453
454 /*
455 * Note that we'll add the nparity tag even on storage pools
456 * that only support a single parity device -- older software
457 * will just ignore it.
458 */
459 fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vd->vdev_nparity);
460 }
461
462 if (vd->vdev_wholedisk != -1ULL)
463 fnvlist_add_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
464 vd->vdev_wholedisk);
465
466 if (vd->vdev_not_present && !(flags & VDEV_CONFIG_MISSING))
467 fnvlist_add_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1);
468
469 if (vd->vdev_isspare)
470 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
471
472 if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)) &&
473 vd == vd->vdev_top) {
474 fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
475 vd->vdev_ms_array);
476 fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
477 vd->vdev_ms_shift);
478 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift);
479 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE,
480 vd->vdev_asize);
481 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG, vd->vdev_islog);
482 if (vd->vdev_removing) {
483 fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVING,
484 vd->vdev_removing);
485 }
486
487 /* zpool command expects alloc class data */
488 if (getstats && vd->vdev_alloc_bias != VDEV_BIAS_NONE) {
489 const char *bias = NULL;
490
491 switch (vd->vdev_alloc_bias) {
492 case VDEV_BIAS_LOG:
493 bias = VDEV_ALLOC_BIAS_LOG;
494 break;
495 case VDEV_BIAS_SPECIAL:
496 bias = VDEV_ALLOC_BIAS_SPECIAL;
497 break;
498 case VDEV_BIAS_DEDUP:
499 bias = VDEV_ALLOC_BIAS_DEDUP;
500 break;
501 default:
502 ASSERT3U(vd->vdev_alloc_bias, ==,
503 VDEV_BIAS_NONE);
504 }
505 fnvlist_add_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
506 bias);
507 }
508 }
509
510 if (vd->vdev_dtl_sm != NULL) {
511 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DTL,
512 space_map_object(vd->vdev_dtl_sm));
513 }
514
515 if (vic->vic_mapping_object != 0) {
516 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
517 vic->vic_mapping_object);
518 }
519
520 if (vic->vic_births_object != 0) {
521 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
522 vic->vic_births_object);
523 }
524
525 if (vic->vic_prev_indirect_vdev != UINT64_MAX) {
526 fnvlist_add_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
527 vic->vic_prev_indirect_vdev);
528 }
529
530 if (vd->vdev_crtxg)
531 fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
532
533 if (flags & VDEV_CONFIG_MOS) {
534 if (vd->vdev_leaf_zap != 0) {
535 ASSERT(vd->vdev_ops->vdev_op_leaf);
536 fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_LEAF_ZAP,
537 vd->vdev_leaf_zap);
538 }
539
540 if (vd->vdev_top_zap != 0) {
541 ASSERT(vd == vd->vdev_top);
542 fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
543 vd->vdev_top_zap);
544 }
545
546 if (vd->vdev_resilver_deferred) {
547 ASSERT(vd->vdev_ops->vdev_op_leaf);
548 ASSERT(spa->spa_resilver_deferred);
549 fnvlist_add_boolean(nv, ZPOOL_CONFIG_RESILVER_DEFER);
550 }
551 }
552
553 if (getstats) {
554 vdev_config_generate_stats(vd, nv);
555
556 root_vdev_actions_getprogress(vd, nv);
557
558 /*
559 * Note: this can be called from open context
560 * (spa_get_stats()), so we need the rwlock to prevent
561 * the mapping from being changed by condensing.
562 */
563 rw_enter(&vd->vdev_indirect_rwlock, RW_READER);
564 if (vd->vdev_indirect_mapping != NULL) {
565 ASSERT(vd->vdev_indirect_births != NULL);
566 vdev_indirect_mapping_t *vim =
567 vd->vdev_indirect_mapping;
568 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
569 vdev_indirect_mapping_size(vim));
570 }
571 rw_exit(&vd->vdev_indirect_rwlock);
572 if (vd->vdev_mg != NULL &&
573 vd->vdev_mg->mg_fragmentation != ZFS_FRAG_INVALID) {
574 /*
575 * Compute approximately how much memory would be used
576 * for the indirect mapping if this device were to
577 * be removed.
578 *
579 * Note: If the frag metric is invalid, then not
580 * enough metaslabs have been converted to have
581 * histograms.
582 */
583 uint64_t seg_count = 0;
584 uint64_t to_alloc = vd->vdev_stat.vs_alloc;
585
586 /*
587 * There are the same number of allocated segments
588 * as free segments, so we will have at least one
589 * entry per free segment. However, small free
590 * segments (smaller than vdev_removal_max_span)
591 * will be combined with adjacent allocated segments
592 * as a single mapping.
593 */
594 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
595 if (1ULL << (i + 1) < vdev_removal_max_span) {
596 to_alloc +=
597 vd->vdev_mg->mg_histogram[i] <<
598 i + 1;
599 } else {
600 seg_count +=
601 vd->vdev_mg->mg_histogram[i];
602 }
603 }
604
605 /*
606 * The maximum length of a mapping is
607 * zfs_remove_max_segment, so we need at least one entry
608 * per zfs_remove_max_segment of allocated data.
609 */
610 seg_count += to_alloc / zfs_remove_max_segment;
611
612 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
613 seg_count *
614 sizeof (vdev_indirect_mapping_entry_phys_t));
615 }
616 }
617
618 if (!vd->vdev_ops->vdev_op_leaf) {
619 nvlist_t **child;
620 int c, idx;
621
622 ASSERT(!vd->vdev_ishole);
623
624 /*
625 * Indirect vdevs store the remnants of a removed vdev;
626 * they have no direct children, but are not leaf devices.
627 * The content of an indirect device is stored elsewhere
628 * in the pool. We can avoid a pointless zero-length alloc
629 * and return early here.
630 */
631 if (vd->vdev_children == 0)
632 return (nv);
633
634 child = kmem_alloc(vd->vdev_children * sizeof (nvlist_t *),
635 KM_SLEEP);
636
637 for (c = 0, idx = 0; c < vd->vdev_children; c++) {
638 vdev_t *cvd = vd->vdev_child[c];
639
640 /*
641 * If we're generating an nvlist of removing
642 * vdevs then skip over any device which is
643 * not being removed.
644 */
645 if ((flags & VDEV_CONFIG_REMOVING) &&
646 !cvd->vdev_removing)
647 continue;
648
649 child[idx++] = vdev_config_generate(spa, cvd,
650 getstats, flags);
651 }
652
653 if (idx) {
654 fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
655 child, idx);
656 }
657
658 for (c = 0; c < idx; c++)
659 nvlist_free(child[c]);
660
661 kmem_free(child, vd->vdev_children * sizeof (nvlist_t *));
662
663 } else {
664 const char *aux = NULL;
665
666 if (vd->vdev_offline && !vd->vdev_tmpoffline)
667 fnvlist_add_uint64(nv, ZPOOL_CONFIG_OFFLINE, B_TRUE);
668 if (vd->vdev_resilver_txg != 0)
669 fnvlist_add_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
670 vd->vdev_resilver_txg);
671 if (vd->vdev_faulted)
672 fnvlist_add_uint64(nv, ZPOOL_CONFIG_FAULTED, B_TRUE);
673 if (vd->vdev_degraded)
674 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DEGRADED, B_TRUE);
675 if (vd->vdev_removed)
676 fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVED, B_TRUE);
677 if (vd->vdev_unspare)
678 fnvlist_add_uint64(nv, ZPOOL_CONFIG_UNSPARE, B_TRUE);
679 if (vd->vdev_ishole)
680 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_HOLE, B_TRUE);
681
682 switch (vd->vdev_stat.vs_aux) {
683 case VDEV_AUX_ERR_EXCEEDED:
684 aux = "err_exceeded";
685 break;
686
687 case VDEV_AUX_EXTERNAL:
688 aux = "external";
689 break;
690 }
691
692 if (aux != NULL)
693 fnvlist_add_string(nv, ZPOOL_CONFIG_AUX_STATE, aux);
694
695 if (vd->vdev_splitting && vd->vdev_orig_guid != 0LL) {
696 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ORIG_GUID,
697 vd->vdev_orig_guid);
698 }
699 }
700
701 return (nv);
702 }
703
704 /*
705 * Generate a view of the top-level vdevs. If we currently have holes
706 * in the namespace, then generate an array which contains a list of holey
707 * vdevs. Additionally, add the number of top-level children that currently
708 * exist.
709 */
710 void
vdev_top_config_generate(spa_t * spa,nvlist_t * config)711 vdev_top_config_generate(spa_t *spa, nvlist_t *config)
712 {
713 vdev_t *rvd = spa->spa_root_vdev;
714 uint64_t *array;
715 uint_t c, idx;
716
717 array = kmem_alloc(rvd->vdev_children * sizeof (uint64_t), KM_SLEEP);
718
719 for (c = 0, idx = 0; c < rvd->vdev_children; c++) {
720 vdev_t *tvd = rvd->vdev_child[c];
721
722 if (tvd->vdev_ishole) {
723 array[idx++] = c;
724 }
725 }
726
727 if (idx) {
728 VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY,
729 array, idx) == 0);
730 }
731
732 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
733 rvd->vdev_children) == 0);
734
735 kmem_free(array, rvd->vdev_children * sizeof (uint64_t));
736 }
737
738 /*
739 * Returns the configuration from the label of the given vdev. For vdevs
740 * which don't have a txg value stored on their label (i.e. spares/cache)
741 * or have not been completely initialized (txg = 0) just return
742 * the configuration from the first valid label we find. Otherwise,
743 * find the most up-to-date label that does not exceed the specified
744 * 'txg' value.
745 */
746 nvlist_t *
vdev_label_read_config(vdev_t * vd,uint64_t txg)747 vdev_label_read_config(vdev_t *vd, uint64_t txg)
748 {
749 spa_t *spa = vd->vdev_spa;
750 nvlist_t *config = NULL;
751 vdev_phys_t *vp;
752 abd_t *vp_abd;
753 zio_t *zio;
754 uint64_t best_txg = 0;
755 uint64_t label_txg = 0;
756 int error = 0;
757 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
758 ZIO_FLAG_SPECULATIVE;
759
760 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
761
762 if (!vdev_readable(vd))
763 return (NULL);
764
765 vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
766 vp = abd_to_buf(vp_abd);
767
768 retry:
769 for (int l = 0; l < VDEV_LABELS; l++) {
770 nvlist_t *label = NULL;
771
772 zio = zio_root(spa, NULL, NULL, flags);
773
774 vdev_label_read(zio, vd, l, vp_abd,
775 offsetof(vdev_label_t, vl_vdev_phys),
776 sizeof (vdev_phys_t), NULL, NULL, flags);
777
778 if (zio_wait(zio) == 0 &&
779 nvlist_unpack(vp->vp_nvlist, sizeof (vp->vp_nvlist),
780 &label, 0) == 0) {
781 /*
782 * Auxiliary vdevs won't have txg values in their
783 * labels and newly added vdevs may not have been
784 * completely initialized so just return the
785 * configuration from the first valid label we
786 * encounter.
787 */
788 error = nvlist_lookup_uint64(label,
789 ZPOOL_CONFIG_POOL_TXG, &label_txg);
790 if ((error || label_txg == 0) && !config) {
791 config = label;
792 break;
793 } else if (label_txg <= txg && label_txg > best_txg) {
794 best_txg = label_txg;
795 nvlist_free(config);
796 config = fnvlist_dup(label);
797 }
798 }
799
800 if (label != NULL) {
801 nvlist_free(label);
802 label = NULL;
803 }
804 }
805
806 if (config == NULL && !(flags & ZIO_FLAG_TRYHARD)) {
807 flags |= ZIO_FLAG_TRYHARD;
808 goto retry;
809 }
810
811 /*
812 * We found a valid label but it didn't pass txg restrictions.
813 */
814 if (config == NULL && label_txg != 0) {
815 vdev_dbgmsg(vd, "label discarded as txg is too large "
816 "(%llu > %llu)", (u_longlong_t)label_txg,
817 (u_longlong_t)txg);
818 }
819
820 abd_free(vp_abd);
821
822 return (config);
823 }
824
825 /*
826 * Determine if a device is in use. The 'spare_guid' parameter will be filled
827 * in with the device guid if this spare is active elsewhere on the system.
828 */
829 static boolean_t
vdev_inuse(vdev_t * vd,uint64_t crtxg,vdev_labeltype_t reason,uint64_t * spare_guid,uint64_t * l2cache_guid)830 vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason,
831 uint64_t *spare_guid, uint64_t *l2cache_guid)
832 {
833 spa_t *spa = vd->vdev_spa;
834 uint64_t state, pool_guid, device_guid, txg, spare_pool;
835 uint64_t vdtxg = 0;
836 nvlist_t *label;
837
838 if (spare_guid)
839 *spare_guid = 0ULL;
840 if (l2cache_guid)
841 *l2cache_guid = 0ULL;
842
843 /*
844 * Read the label, if any, and perform some basic sanity checks.
845 */
846 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL)
847 return (B_FALSE);
848
849 (void) nvlist_lookup_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
850 &vdtxg);
851
852 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
853 &state) != 0 ||
854 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
855 &device_guid) != 0) {
856 nvlist_free(label);
857 return (B_FALSE);
858 }
859
860 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
861 (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID,
862 &pool_guid) != 0 ||
863 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
864 &txg) != 0)) {
865 nvlist_free(label);
866 return (B_FALSE);
867 }
868
869 nvlist_free(label);
870
871 /*
872 * Check to see if this device indeed belongs to the pool it claims to
873 * be a part of. The only way this is allowed is if the device is a hot
874 * spare (which we check for later on).
875 */
876 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
877 !spa_guid_exists(pool_guid, device_guid) &&
878 !spa_spare_exists(device_guid, NULL, NULL) &&
879 !spa_l2cache_exists(device_guid, NULL))
880 return (B_FALSE);
881
882 /*
883 * If the transaction group is zero, then this an initialized (but
884 * unused) label. This is only an error if the create transaction
885 * on-disk is the same as the one we're using now, in which case the
886 * user has attempted to add the same vdev multiple times in the same
887 * transaction.
888 */
889 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
890 txg == 0 && vdtxg == crtxg)
891 return (B_TRUE);
892
893 /*
894 * Check to see if this is a spare device. We do an explicit check for
895 * spa_has_spare() here because it may be on our pending list of spares
896 * to add. We also check if it is an l2cache device.
897 */
898 if (spa_spare_exists(device_guid, &spare_pool, NULL) ||
899 spa_has_spare(spa, device_guid)) {
900 if (spare_guid)
901 *spare_guid = device_guid;
902
903 switch (reason) {
904 case VDEV_LABEL_CREATE:
905 case VDEV_LABEL_L2CACHE:
906 return (B_TRUE);
907
908 case VDEV_LABEL_REPLACE:
909 return (!spa_has_spare(spa, device_guid) ||
910 spare_pool != 0ULL);
911
912 case VDEV_LABEL_SPARE:
913 return (spa_has_spare(spa, device_guid));
914 }
915 }
916
917 /*
918 * Check to see if this is an l2cache device.
919 */
920 if (spa_l2cache_exists(device_guid, NULL))
921 return (B_TRUE);
922
923 /*
924 * We can't rely on a pool's state if it's been imported
925 * read-only. Instead we look to see if the pools is marked
926 * read-only in the namespace and set the state to active.
927 */
928 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
929 (spa = spa_by_guid(pool_guid, device_guid)) != NULL &&
930 spa_mode(spa) == FREAD)
931 state = POOL_STATE_ACTIVE;
932
933 /*
934 * If the device is marked ACTIVE, then this device is in use by another
935 * pool on the system.
936 */
937 return (state == POOL_STATE_ACTIVE);
938 }
939
940 /*
941 * Initialize a vdev label. We check to make sure each leaf device is not in
942 * use, and writable. We put down an initial label which we will later
943 * overwrite with a complete label. Note that it's important to do this
944 * sequentially, not in parallel, so that we catch cases of multiple use of the
945 * same leaf vdev in the vdev we're creating -- e.g. mirroring a disk with
946 * itself.
947 */
948 int
vdev_label_init(vdev_t * vd,uint64_t crtxg,vdev_labeltype_t reason)949 vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
950 {
951 spa_t *spa = vd->vdev_spa;
952 nvlist_t *label;
953 vdev_phys_t *vp;
954 abd_t *vp_abd;
955 abd_t *bootenv;
956 uberblock_t *ub;
957 abd_t *ub_abd;
958 zio_t *zio;
959 char *buf;
960 size_t buflen;
961 int error;
962 uint64_t spare_guid = 0, l2cache_guid;
963 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
964
965 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
966
967 for (int c = 0; c < vd->vdev_children; c++)
968 if ((error = vdev_label_init(vd->vdev_child[c],
969 crtxg, reason)) != 0)
970 return (error);
971
972 /* Track the creation time for this vdev */
973 vd->vdev_crtxg = crtxg;
974
975 if (!vd->vdev_ops->vdev_op_leaf || !spa_writeable(spa))
976 return (0);
977
978 /*
979 * Dead vdevs cannot be initialized.
980 */
981 if (vdev_is_dead(vd))
982 return (SET_ERROR(EIO));
983
984 /*
985 * Determine if the vdev is in use.
986 */
987 if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPLIT &&
988 vdev_inuse(vd, crtxg, reason, &spare_guid, &l2cache_guid))
989 return (SET_ERROR(EBUSY));
990
991 /*
992 * If this is a request to add or replace a spare or l2cache device
993 * that is in use elsewhere on the system, then we must update the
994 * guid (which was initialized to a random value) to reflect the
995 * actual GUID (which is shared between multiple pools).
996 */
997 if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_L2CACHE &&
998 spare_guid != 0ULL) {
999 uint64_t guid_delta = spare_guid - vd->vdev_guid;
1000
1001 vd->vdev_guid += guid_delta;
1002
1003 for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
1004 pvd->vdev_guid_sum += guid_delta;
1005
1006 /*
1007 * If this is a replacement, then we want to fallthrough to the
1008 * rest of the code. If we're adding a spare, then it's already
1009 * labeled appropriately and we can just return.
1010 */
1011 if (reason == VDEV_LABEL_SPARE)
1012 return (0);
1013 ASSERT(reason == VDEV_LABEL_REPLACE ||
1014 reason == VDEV_LABEL_SPLIT);
1015 }
1016
1017 if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPARE &&
1018 l2cache_guid != 0ULL) {
1019 uint64_t guid_delta = l2cache_guid - vd->vdev_guid;
1020
1021 vd->vdev_guid += guid_delta;
1022
1023 for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
1024 pvd->vdev_guid_sum += guid_delta;
1025
1026 /*
1027 * If this is a replacement, then we want to fallthrough to the
1028 * rest of the code. If we're adding an l2cache, then it's
1029 * already labeled appropriately and we can just return.
1030 */
1031 if (reason == VDEV_LABEL_L2CACHE)
1032 return (0);
1033 ASSERT(reason == VDEV_LABEL_REPLACE);
1034 }
1035
1036 /*
1037 * Initialize its label.
1038 */
1039 vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
1040 abd_zero(vp_abd, sizeof (vdev_phys_t));
1041 vp = abd_to_buf(vp_abd);
1042
1043 /*
1044 * Generate a label describing the pool and our top-level vdev.
1045 * We mark it as being from txg 0 to indicate that it's not
1046 * really part of an active pool just yet. The labels will
1047 * be written again with a meaningful txg by spa_sync().
1048 */
1049 if (reason == VDEV_LABEL_SPARE ||
1050 (reason == VDEV_LABEL_REMOVE && vd->vdev_isspare)) {
1051 /*
1052 * For inactive hot spares, we generate a special label that
1053 * identifies as a mutually shared hot spare. We write the
1054 * label if we are adding a hot spare, or if we are removing an
1055 * active hot spare (in which case we want to revert the
1056 * labels).
1057 */
1058 VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1059
1060 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION,
1061 spa_version(spa)) == 0);
1062 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE,
1063 POOL_STATE_SPARE) == 0);
1064 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID,
1065 vd->vdev_guid) == 0);
1066 } else if (reason == VDEV_LABEL_L2CACHE ||
1067 (reason == VDEV_LABEL_REMOVE && vd->vdev_isl2cache)) {
1068 /*
1069 * For level 2 ARC devices, add a special label.
1070 */
1071 VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1072
1073 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION,
1074 spa_version(spa)) == 0);
1075 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE,
1076 POOL_STATE_L2CACHE) == 0);
1077 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID,
1078 vd->vdev_guid) == 0);
1079 } else {
1080 uint64_t txg = 0ULL;
1081
1082 if (reason == VDEV_LABEL_SPLIT)
1083 txg = spa->spa_uberblock.ub_txg;
1084 label = spa_config_generate(spa, vd, txg, B_FALSE);
1085
1086 /*
1087 * Add our creation time. This allows us to detect multiple
1088 * vdev uses as described above, and automatically expires if we
1089 * fail.
1090 */
1091 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
1092 crtxg) == 0);
1093 }
1094
1095 buf = vp->vp_nvlist;
1096 buflen = sizeof (vp->vp_nvlist);
1097
1098 error = nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP);
1099 if (error != 0) {
1100 nvlist_free(label);
1101 abd_free(vp_abd);
1102 /* EFAULT means nvlist_pack ran out of room */
1103 return (error == EFAULT ? ENAMETOOLONG : EINVAL);
1104 }
1105
1106 /*
1107 * Initialize uberblock template.
1108 */
1109 ub_abd = abd_alloc_linear(VDEV_UBERBLOCK_RING, B_TRUE);
1110 abd_zero(ub_abd, VDEV_UBERBLOCK_RING);
1111 abd_copy_from_buf(ub_abd, &spa->spa_uberblock, sizeof (uberblock_t));
1112 ub = abd_to_buf(ub_abd);
1113 ub->ub_txg = 0;
1114
1115 /* Initialize the 2nd padding area. */
1116 bootenv = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE);
1117 abd_zero(bootenv, VDEV_PAD_SIZE);
1118
1119 /*
1120 * Write everything in parallel.
1121 */
1122 retry:
1123 zio = zio_root(spa, NULL, NULL, flags);
1124
1125 for (int l = 0; l < VDEV_LABELS; l++) {
1126
1127 vdev_label_write(zio, vd, l, vp_abd,
1128 offsetof(vdev_label_t, vl_vdev_phys),
1129 sizeof (vdev_phys_t), NULL, NULL, flags);
1130
1131 /*
1132 * Skip the 1st padding area.
1133 * Zero out the 2nd padding area where it might have
1134 * left over data from previous filesystem format.
1135 */
1136 vdev_label_write(zio, vd, l, bootenv,
1137 offsetof(vdev_label_t, vl_be),
1138 VDEV_PAD_SIZE, NULL, NULL, flags);
1139
1140 vdev_label_write(zio, vd, l, ub_abd,
1141 offsetof(vdev_label_t, vl_uberblock),
1142 VDEV_UBERBLOCK_RING, NULL, NULL, flags);
1143 }
1144
1145 error = zio_wait(zio);
1146
1147 if (error != 0 && !(flags & ZIO_FLAG_TRYHARD)) {
1148 flags |= ZIO_FLAG_TRYHARD;
1149 goto retry;
1150 }
1151
1152 nvlist_free(label);
1153 abd_free(bootenv);
1154 abd_free(ub_abd);
1155 abd_free(vp_abd);
1156
1157 /*
1158 * If this vdev hasn't been previously identified as a spare, then we
1159 * mark it as such only if a) we are labeling it as a spare, or b) it
1160 * exists as a spare elsewhere in the system. Do the same for
1161 * level 2 ARC devices.
1162 */
1163 if (error == 0 && !vd->vdev_isspare &&
1164 (reason == VDEV_LABEL_SPARE ||
1165 spa_spare_exists(vd->vdev_guid, NULL, NULL)))
1166 spa_spare_add(vd);
1167
1168 if (error == 0 && !vd->vdev_isl2cache &&
1169 (reason == VDEV_LABEL_L2CACHE ||
1170 spa_l2cache_exists(vd->vdev_guid, NULL)))
1171 spa_l2cache_add(vd);
1172
1173 return (error);
1174 }
1175
1176 /*
1177 * Done callback for vdev_label_read_bootenv_impl. If this is the first
1178 * callback to finish, store our abd in the callback pointer. Otherwise, we
1179 * just free our abd and return.
1180 */
1181 static void
vdev_label_read_bootenv_done(zio_t * zio)1182 vdev_label_read_bootenv_done(zio_t *zio)
1183 {
1184 zio_t *rio = zio->io_private;
1185 abd_t **cbp = rio->io_private;
1186
1187 ASSERT3U(zio->io_size, ==, VDEV_PAD_SIZE);
1188
1189 if (zio->io_error == 0) {
1190 mutex_enter(&rio->io_lock);
1191 if (*cbp == NULL) {
1192 /* Will free this buffer in vdev_label_read_bootenv. */
1193 *cbp = zio->io_abd;
1194 } else {
1195 abd_free(zio->io_abd);
1196 }
1197 mutex_exit(&rio->io_lock);
1198 } else {
1199 abd_free(zio->io_abd);
1200 }
1201 }
1202
1203 static void
vdev_label_read_bootenv_impl(zio_t * zio,vdev_t * vd,int flags)1204 vdev_label_read_bootenv_impl(zio_t *zio, vdev_t *vd, int flags)
1205 {
1206 for (int c = 0; c < vd->vdev_children; c++)
1207 vdev_label_read_bootenv_impl(zio, vd->vdev_child[c], flags);
1208
1209 /*
1210 * We just use the first label that has a correct checksum; the
1211 * bootloader should have rewritten them all to be the same on boot,
1212 * and any changes we made since boot have been the same across all
1213 * labels.
1214 */
1215 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
1216 for (int l = 0; l < VDEV_LABELS; l++) {
1217 vdev_label_read(zio, vd, l,
1218 abd_alloc_linear(VDEV_PAD_SIZE, B_FALSE),
1219 offsetof(vdev_label_t, vl_be), VDEV_PAD_SIZE,
1220 vdev_label_read_bootenv_done, zio, flags);
1221 }
1222 }
1223 }
1224
1225 int
vdev_label_read_bootenv(vdev_t * rvd,nvlist_t * bootenv)1226 vdev_label_read_bootenv(vdev_t *rvd, nvlist_t *bootenv)
1227 {
1228 nvlist_t *config;
1229 spa_t *spa = rvd->vdev_spa;
1230 abd_t *abd = NULL;
1231 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
1232 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_TRYHARD;
1233
1234 ASSERT(bootenv);
1235 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1236
1237 zio_t *zio = zio_root(spa, NULL, &abd, flags);
1238 vdev_label_read_bootenv_impl(zio, rvd, flags);
1239 int err = zio_wait(zio);
1240
1241 if (abd != NULL) {
1242 char *buf;
1243 vdev_boot_envblock_t *vbe = abd_to_buf(abd);
1244
1245 vbe->vbe_version = ntohll(vbe->vbe_version);
1246 switch (vbe->vbe_version) {
1247 case VB_RAW:
1248 /*
1249 * if we have textual data in vbe_bootenv, create nvlist
1250 * with key "envmap".
1251 */
1252 fnvlist_add_uint64(bootenv, BOOTENV_VERSION, VB_RAW);
1253 vbe->vbe_bootenv[sizeof (vbe->vbe_bootenv) - 1] = '\0';
1254 fnvlist_add_string(bootenv, GRUB_ENVMAP,
1255 vbe->vbe_bootenv);
1256 break;
1257
1258 case VB_NVLIST:
1259 err = nvlist_unpack(vbe->vbe_bootenv,
1260 sizeof (vbe->vbe_bootenv), &config, 0);
1261 if (err == 0) {
1262 fnvlist_merge(bootenv, config);
1263 nvlist_free(config);
1264 break;
1265 }
1266 /* FALLTHROUGH */
1267 default:
1268 /* Check for FreeBSD zfs bootonce command string */
1269 buf = abd_to_buf(abd);
1270 if (*buf == '\0') {
1271 fnvlist_add_uint64(bootenv, BOOTENV_VERSION,
1272 VB_NVLIST);
1273 break;
1274 }
1275 fnvlist_add_string(bootenv, FREEBSD_BOOTONCE, buf);
1276 }
1277
1278 /*
1279 * abd was allocated in vdev_label_read_bootenv_impl()
1280 */
1281 abd_free(abd);
1282 /*
1283 * If we managed to read any successfully,
1284 * return success.
1285 */
1286 return (0);
1287 }
1288 return (err);
1289 }
1290
1291 int
vdev_label_write_bootenv(vdev_t * vd,nvlist_t * env)1292 vdev_label_write_bootenv(vdev_t *vd, nvlist_t *env)
1293 {
1294 zio_t *zio;
1295 spa_t *spa = vd->vdev_spa;
1296 vdev_boot_envblock_t *bootenv;
1297 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
1298 int error;
1299 size_t nvsize;
1300 char *nvbuf;
1301
1302 error = nvlist_size(env, &nvsize, NV_ENCODE_XDR);
1303 if (error != 0)
1304 return (SET_ERROR(error));
1305
1306 if (nvsize >= sizeof (bootenv->vbe_bootenv)) {
1307 return (SET_ERROR(E2BIG));
1308 }
1309
1310 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1311
1312 error = ENXIO;
1313 for (int c = 0; c < vd->vdev_children; c++) {
1314 int child_err;
1315
1316 child_err = vdev_label_write_bootenv(vd->vdev_child[c], env);
1317 /*
1318 * As long as any of the disks managed to write all of their
1319 * labels successfully, return success.
1320 */
1321 if (child_err == 0)
1322 error = child_err;
1323 }
1324
1325 if (!vd->vdev_ops->vdev_op_leaf || vdev_is_dead(vd) ||
1326 !vdev_writeable(vd)) {
1327 return (error);
1328 }
1329 ASSERT3U(sizeof (*bootenv), ==, VDEV_PAD_SIZE);
1330 abd_t *abd = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE);
1331 abd_zero(abd, VDEV_PAD_SIZE);
1332
1333 bootenv = abd_borrow_buf_copy(abd, VDEV_PAD_SIZE);
1334 nvbuf = bootenv->vbe_bootenv;
1335 nvsize = sizeof (bootenv->vbe_bootenv);
1336
1337 bootenv->vbe_version = fnvlist_lookup_uint64(env, BOOTENV_VERSION);
1338 switch (bootenv->vbe_version) {
1339 case VB_RAW:
1340 if (nvlist_lookup_string(env, GRUB_ENVMAP, &nvbuf) == 0) {
1341 (void) strlcpy(bootenv->vbe_bootenv, nvbuf, nvsize);
1342 }
1343 error = 0;
1344 break;
1345
1346 case VB_NVLIST:
1347 error = nvlist_pack(env, &nvbuf, &nvsize, NV_ENCODE_XDR,
1348 KM_SLEEP);
1349 break;
1350
1351 default:
1352 error = EINVAL;
1353 break;
1354 }
1355
1356 if (error == 0) {
1357 bootenv->vbe_version = htonll(bootenv->vbe_version);
1358 abd_return_buf_copy(abd, bootenv, VDEV_PAD_SIZE);
1359 } else {
1360 abd_free(abd);
1361 return (SET_ERROR(error));
1362 }
1363
1364 retry:
1365 zio = zio_root(spa, NULL, NULL, flags);
1366 for (int l = 0; l < VDEV_LABELS; l++) {
1367 vdev_label_write(zio, vd, l, abd,
1368 offsetof(vdev_label_t, vl_be),
1369 VDEV_PAD_SIZE, NULL, NULL, flags);
1370 }
1371
1372 error = zio_wait(zio);
1373 if (error != 0 && !(flags & ZIO_FLAG_TRYHARD)) {
1374 flags |= ZIO_FLAG_TRYHARD;
1375 goto retry;
1376 }
1377
1378 abd_free(abd);
1379 return (error);
1380 }
1381
1382 /*
1383 * ==========================================================================
1384 * uberblock load/sync
1385 * ==========================================================================
1386 */
1387
1388 /*
1389 * Consider the following situation: txg is safely synced to disk. We've
1390 * written the first uberblock for txg + 1, and then we lose power. When we
1391 * come back up, we fail to see the uberblock for txg + 1 because, say,
1392 * it was on a mirrored device and the replica to which we wrote txg + 1
1393 * is now offline. If we then make some changes and sync txg + 1, and then
1394 * the missing replica comes back, then for a few seconds we'll have two
1395 * conflicting uberblocks on disk with the same txg. The solution is simple:
1396 * among uberblocks with equal txg, choose the one with the latest timestamp.
1397 */
1398 static int
vdev_uberblock_compare(const uberblock_t * ub1,const uberblock_t * ub2)1399 vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2)
1400 {
1401 int cmp = TREE_CMP(ub1->ub_txg, ub2->ub_txg);
1402
1403 if (likely(cmp))
1404 return (cmp);
1405
1406 cmp = TREE_CMP(ub1->ub_timestamp, ub2->ub_timestamp);
1407 if (likely(cmp))
1408 return (cmp);
1409
1410 /*
1411 * If MMP_VALID(ub) && MMP_SEQ_VALID(ub) then the host has an MMP-aware
1412 * ZFS, e.g. zfsonlinux >= 0.7.
1413 *
1414 * If one ub has MMP and the other does not, they were written by
1415 * different hosts, which matters for MMP. So we treat no MMP/no SEQ as
1416 * a 0 value.
1417 *
1418 * Since timestamp and txg are the same if we get this far, either is
1419 * acceptable for importing the pool.
1420 */
1421 unsigned int seq1 = 0;
1422 unsigned int seq2 = 0;
1423
1424 if (MMP_VALID(ub1) && MMP_SEQ_VALID(ub1))
1425 seq1 = MMP_SEQ(ub1);
1426
1427 if (MMP_VALID(ub2) && MMP_SEQ_VALID(ub2))
1428 seq2 = MMP_SEQ(ub2);
1429
1430 return (TREE_CMP(seq1, seq2));
1431 }
1432
1433 struct ubl_cbdata {
1434 uberblock_t *ubl_ubbest; /* Best uberblock */
1435 vdev_t *ubl_vd; /* vdev associated with the above */
1436 };
1437
1438 static void
vdev_uberblock_load_done(zio_t * zio)1439 vdev_uberblock_load_done(zio_t *zio)
1440 {
1441 vdev_t *vd = zio->io_vd;
1442 spa_t *spa = zio->io_spa;
1443 zio_t *rio = zio->io_private;
1444 uberblock_t *ub = abd_to_buf(zio->io_abd);
1445 struct ubl_cbdata *cbp = rio->io_private;
1446
1447 ASSERT3U(zio->io_size, ==, VDEV_UBERBLOCK_SIZE(vd));
1448
1449 if (zio->io_error == 0 && uberblock_verify(ub) == 0) {
1450 mutex_enter(&rio->io_lock);
1451 if (ub->ub_txg <= spa->spa_load_max_txg &&
1452 vdev_uberblock_compare(ub, cbp->ubl_ubbest) > 0) {
1453 /*
1454 * Keep track of the vdev in which this uberblock
1455 * was found. We will use this information later
1456 * to obtain the config nvlist associated with
1457 * this uberblock.
1458 */
1459 *cbp->ubl_ubbest = *ub;
1460 cbp->ubl_vd = vd;
1461 }
1462 mutex_exit(&rio->io_lock);
1463 }
1464
1465 abd_free(zio->io_abd);
1466 }
1467
1468 static void
vdev_uberblock_load_impl(zio_t * zio,vdev_t * vd,int flags,struct ubl_cbdata * cbp)1469 vdev_uberblock_load_impl(zio_t *zio, vdev_t *vd, int flags,
1470 struct ubl_cbdata *cbp)
1471 {
1472 for (int c = 0; c < vd->vdev_children; c++)
1473 vdev_uberblock_load_impl(zio, vd->vdev_child[c], flags, cbp);
1474
1475 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
1476 for (int l = 0; l < VDEV_LABELS; l++) {
1477 for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
1478 vdev_label_read(zio, vd, l,
1479 abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd),
1480 B_TRUE), VDEV_UBERBLOCK_OFFSET(vd, n),
1481 VDEV_UBERBLOCK_SIZE(vd),
1482 vdev_uberblock_load_done, zio, flags);
1483 }
1484 }
1485 }
1486 }
1487
1488 /*
1489 * Reads the 'best' uberblock from disk along with its associated
1490 * configuration. First, we read the uberblock array of each label of each
1491 * vdev, keeping track of the uberblock with the highest txg in each array.
1492 * Then, we read the configuration from the same vdev as the best uberblock.
1493 */
1494 void
vdev_uberblock_load(vdev_t * rvd,uberblock_t * ub,nvlist_t ** config)1495 vdev_uberblock_load(vdev_t *rvd, uberblock_t *ub, nvlist_t **config)
1496 {
1497 zio_t *zio;
1498 spa_t *spa = rvd->vdev_spa;
1499 struct ubl_cbdata cb;
1500 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
1501 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_TRYHARD;
1502
1503 ASSERT(ub);
1504 ASSERT(config);
1505
1506 bzero(ub, sizeof (uberblock_t));
1507 *config = NULL;
1508
1509 cb.ubl_ubbest = ub;
1510 cb.ubl_vd = NULL;
1511
1512 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1513 zio = zio_root(spa, NULL, &cb, flags);
1514 vdev_uberblock_load_impl(zio, rvd, flags, &cb);
1515 (void) zio_wait(zio);
1516
1517 /*
1518 * It's possible that the best uberblock was discovered on a label
1519 * that has a configuration which was written in a future txg.
1520 * Search all labels on this vdev to find the configuration that
1521 * matches the txg for our uberblock.
1522 */
1523 if (cb.ubl_vd != NULL) {
1524 vdev_dbgmsg(cb.ubl_vd, "best uberblock found for spa %s. "
1525 "txg %llu", spa->spa_name, (u_longlong_t)ub->ub_txg);
1526
1527 *config = vdev_label_read_config(cb.ubl_vd, ub->ub_txg);
1528 if (*config == NULL && spa->spa_extreme_rewind) {
1529 vdev_dbgmsg(cb.ubl_vd, "failed to read label config. "
1530 "Trying again without txg restrictions.");
1531 *config = vdev_label_read_config(cb.ubl_vd, UINT64_MAX);
1532 }
1533 if (*config == NULL) {
1534 vdev_dbgmsg(cb.ubl_vd, "failed to read label config");
1535 }
1536 }
1537 spa_config_exit(spa, SCL_ALL, FTAG);
1538 }
1539
1540 /*
1541 * On success, increment root zio's count of good writes.
1542 * We only get credit for writes to known-visible vdevs; see spa_vdev_add().
1543 */
1544 static void
vdev_uberblock_sync_done(zio_t * zio)1545 vdev_uberblock_sync_done(zio_t *zio)
1546 {
1547 uint64_t *good_writes = zio->io_private;
1548
1549 if (zio->io_error == 0 && zio->io_vd->vdev_top->vdev_ms_array != 0)
1550 atomic_inc_64(good_writes);
1551 }
1552
1553 /*
1554 * Write the uberblock to all labels of all leaves of the specified vdev.
1555 */
1556 static void
vdev_uberblock_sync(zio_t * zio,uint64_t * good_writes,uberblock_t * ub,vdev_t * vd,int flags)1557 vdev_uberblock_sync(zio_t *zio, uint64_t *good_writes,
1558 uberblock_t *ub, vdev_t *vd, int flags)
1559 {
1560 for (uint64_t c = 0; c < vd->vdev_children; c++) {
1561 vdev_uberblock_sync(zio, good_writes,
1562 ub, vd->vdev_child[c], flags);
1563 }
1564
1565 if (!vd->vdev_ops->vdev_op_leaf)
1566 return;
1567
1568 if (!vdev_writeable(vd))
1569 return;
1570
1571 int m = spa_multihost(vd->vdev_spa) ? MMP_BLOCKS_PER_LABEL : 0;
1572 int n = ub->ub_txg % (VDEV_UBERBLOCK_COUNT(vd) - m);
1573
1574 /* Copy the uberblock_t into the ABD */
1575 abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
1576 abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
1577 abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
1578
1579 for (int l = 0; l < VDEV_LABELS; l++)
1580 vdev_label_write(zio, vd, l, ub_abd,
1581 VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd),
1582 vdev_uberblock_sync_done, good_writes,
1583 flags | ZIO_FLAG_DONT_PROPAGATE);
1584
1585 abd_free(ub_abd);
1586 }
1587
1588 /* Sync the uberblocks to all vdevs in svd[] */
1589 int
vdev_uberblock_sync_list(vdev_t ** svd,int svdcount,uberblock_t * ub,int flags)1590 vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags)
1591 {
1592 spa_t *spa = svd[0]->vdev_spa;
1593 zio_t *zio;
1594 uint64_t good_writes = 0;
1595
1596 zio = zio_root(spa, NULL, NULL, flags);
1597
1598 for (int v = 0; v < svdcount; v++)
1599 vdev_uberblock_sync(zio, &good_writes, ub, svd[v], flags);
1600
1601 (void) zio_wait(zio);
1602
1603 /*
1604 * Flush the uberblocks to disk. This ensures that the odd labels
1605 * are no longer needed (because the new uberblocks and the even
1606 * labels are safely on disk), so it is safe to overwrite them.
1607 */
1608 zio = zio_root(spa, NULL, NULL, flags);
1609
1610 for (int v = 0; v < svdcount; v++) {
1611 if (vdev_writeable(svd[v])) {
1612 zio_flush(zio, svd[v]);
1613 }
1614 }
1615
1616 (void) zio_wait(zio);
1617
1618 return (good_writes >= 1 ? 0 : EIO);
1619 }
1620
1621 /*
1622 * On success, increment the count of good writes for our top-level vdev.
1623 */
1624 static void
vdev_label_sync_done(zio_t * zio)1625 vdev_label_sync_done(zio_t *zio)
1626 {
1627 uint64_t *good_writes = zio->io_private;
1628
1629 if (zio->io_error == 0)
1630 atomic_inc_64(good_writes);
1631 }
1632
1633 /*
1634 * If there weren't enough good writes, indicate failure to the parent.
1635 */
1636 static void
vdev_label_sync_top_done(zio_t * zio)1637 vdev_label_sync_top_done(zio_t *zio)
1638 {
1639 uint64_t *good_writes = zio->io_private;
1640
1641 if (*good_writes == 0)
1642 zio->io_error = SET_ERROR(EIO);
1643
1644 kmem_free(good_writes, sizeof (uint64_t));
1645 }
1646
1647 /*
1648 * We ignore errors for log and cache devices, simply free the private data.
1649 */
1650 static void
vdev_label_sync_ignore_done(zio_t * zio)1651 vdev_label_sync_ignore_done(zio_t *zio)
1652 {
1653 kmem_free(zio->io_private, sizeof (uint64_t));
1654 }
1655
1656 /*
1657 * Write all even or odd labels to all leaves of the specified vdev.
1658 */
1659 static void
vdev_label_sync(zio_t * zio,uint64_t * good_writes,vdev_t * vd,int l,uint64_t txg,int flags)1660 vdev_label_sync(zio_t *zio, uint64_t *good_writes,
1661 vdev_t *vd, int l, uint64_t txg, int flags)
1662 {
1663 nvlist_t *label;
1664 vdev_phys_t *vp;
1665 abd_t *vp_abd;
1666 char *buf;
1667 size_t buflen;
1668
1669 for (int c = 0; c < vd->vdev_children; c++) {
1670 vdev_label_sync(zio, good_writes,
1671 vd->vdev_child[c], l, txg, flags);
1672 }
1673
1674 if (!vd->vdev_ops->vdev_op_leaf)
1675 return;
1676
1677 if (!vdev_writeable(vd))
1678 return;
1679
1680 /*
1681 * Generate a label describing the top-level config to which we belong.
1682 */
1683 label = spa_config_generate(vd->vdev_spa, vd, txg, B_FALSE);
1684
1685 vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
1686 abd_zero(vp_abd, sizeof (vdev_phys_t));
1687 vp = abd_to_buf(vp_abd);
1688
1689 buf = vp->vp_nvlist;
1690 buflen = sizeof (vp->vp_nvlist);
1691
1692 if (nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP) == 0) {
1693 for (; l < VDEV_LABELS; l += 2) {
1694 vdev_label_write(zio, vd, l, vp_abd,
1695 offsetof(vdev_label_t, vl_vdev_phys),
1696 sizeof (vdev_phys_t),
1697 vdev_label_sync_done, good_writes,
1698 flags | ZIO_FLAG_DONT_PROPAGATE);
1699 }
1700 }
1701
1702 abd_free(vp_abd);
1703 nvlist_free(label);
1704 }
1705
1706 int
vdev_label_sync_list(spa_t * spa,int l,uint64_t txg,int flags)1707 vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags)
1708 {
1709 list_t *dl = &spa->spa_config_dirty_list;
1710 vdev_t *vd;
1711 zio_t *zio;
1712 int error;
1713
1714 /*
1715 * Write the new labels to disk.
1716 */
1717 zio = zio_root(spa, NULL, NULL, flags);
1718
1719 for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) {
1720 uint64_t *good_writes = kmem_zalloc(sizeof (uint64_t),
1721 KM_SLEEP);
1722
1723 ASSERT(!vd->vdev_ishole);
1724
1725 zio_t *vio = zio_null(zio, spa, NULL,
1726 (vd->vdev_islog || vd->vdev_aux != NULL) ?
1727 vdev_label_sync_ignore_done : vdev_label_sync_top_done,
1728 good_writes, flags);
1729 vdev_label_sync(vio, good_writes, vd, l, txg, flags);
1730 zio_nowait(vio);
1731 }
1732
1733 error = zio_wait(zio);
1734
1735 /*
1736 * Flush the new labels to disk.
1737 */
1738 zio = zio_root(spa, NULL, NULL, flags);
1739
1740 for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd))
1741 zio_flush(zio, vd);
1742
1743 (void) zio_wait(zio);
1744
1745 return (error);
1746 }
1747
1748 /*
1749 * Sync the uberblock and any changes to the vdev configuration.
1750 *
1751 * The order of operations is carefully crafted to ensure that
1752 * if the system panics or loses power at any time, the state on disk
1753 * is still transactionally consistent. The in-line comments below
1754 * describe the failure semantics at each stage.
1755 *
1756 * Moreover, vdev_config_sync() is designed to be idempotent: if it fails
1757 * at any time, you can just call it again, and it will resume its work.
1758 */
1759 int
vdev_config_sync(vdev_t ** svd,int svdcount,uint64_t txg)1760 vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg)
1761 {
1762 spa_t *spa = svd[0]->vdev_spa;
1763 uberblock_t *ub = &spa->spa_uberblock;
1764 int error = 0;
1765 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
1766
1767 ASSERT(svdcount != 0);
1768 retry:
1769 /*
1770 * Normally, we don't want to try too hard to write every label and
1771 * uberblock. If there is a flaky disk, we don't want the rest of the
1772 * sync process to block while we retry. But if we can't write a
1773 * single label out, we should retry with ZIO_FLAG_TRYHARD before
1774 * bailing out and declaring the pool faulted.
1775 */
1776 if (error != 0) {
1777 if ((flags & ZIO_FLAG_TRYHARD) != 0)
1778 return (error);
1779 flags |= ZIO_FLAG_TRYHARD;
1780 }
1781
1782 ASSERT(ub->ub_txg <= txg);
1783
1784 /*
1785 * If this isn't a resync due to I/O errors,
1786 * and nothing changed in this transaction group,
1787 * and the vdev configuration hasn't changed,
1788 * then there's nothing to do.
1789 */
1790 if (ub->ub_txg < txg) {
1791 boolean_t changed = uberblock_update(ub, spa->spa_root_vdev,
1792 txg, spa->spa_mmp.mmp_delay);
1793
1794 if (!changed && list_is_empty(&spa->spa_config_dirty_list))
1795 return (0);
1796 }
1797
1798 if (txg > spa_freeze_txg(spa))
1799 return (0);
1800
1801 ASSERT(txg <= spa->spa_final_txg);
1802
1803 /*
1804 * Flush the write cache of every disk that's been written to
1805 * in this transaction group. This ensures that all blocks
1806 * written in this txg will be committed to stable storage
1807 * before any uberblock that references them.
1808 */
1809 zio_t *zio = zio_root(spa, NULL, NULL, flags);
1810
1811 for (vdev_t *vd =
1812 txg_list_head(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)); vd != NULL;
1813 vd = txg_list_next(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)))
1814 zio_flush(zio, vd);
1815
1816 (void) zio_wait(zio);
1817
1818 /*
1819 * Sync out the even labels (L0, L2) for every dirty vdev. If the
1820 * system dies in the middle of this process, that's OK: all of the
1821 * even labels that made it to disk will be newer than any uberblock,
1822 * and will therefore be considered invalid. The odd labels (L1, L3),
1823 * which have not yet been touched, will still be valid. We flush
1824 * the new labels to disk to ensure that all even-label updates
1825 * are committed to stable storage before the uberblock update.
1826 */
1827 if ((error = vdev_label_sync_list(spa, 0, txg, flags)) != 0) {
1828 if ((flags & ZIO_FLAG_TRYHARD) != 0) {
1829 zfs_dbgmsg("vdev_label_sync_list() returned error %d "
1830 "for pool '%s' when syncing out the even labels "
1831 "of dirty vdevs", error, spa_name(spa));
1832 }
1833 goto retry;
1834 }
1835
1836 /*
1837 * Sync the uberblocks to all vdevs in svd[].
1838 * If the system dies in the middle of this step, there are two cases
1839 * to consider, and the on-disk state is consistent either way:
1840 *
1841 * (1) If none of the new uberblocks made it to disk, then the
1842 * previous uberblock will be the newest, and the odd labels
1843 * (which had not yet been touched) will be valid with respect
1844 * to that uberblock.
1845 *
1846 * (2) If one or more new uberblocks made it to disk, then they
1847 * will be the newest, and the even labels (which had all
1848 * been successfully committed) will be valid with respect
1849 * to the new uberblocks.
1850 */
1851 if ((error = vdev_uberblock_sync_list(svd, svdcount, ub, flags)) != 0) {
1852 if ((flags & ZIO_FLAG_TRYHARD) != 0) {
1853 zfs_dbgmsg("vdev_uberblock_sync_list() returned error "
1854 "%d for pool '%s'", error, spa_name(spa));
1855 }
1856 goto retry;
1857 }
1858
1859 if (spa_multihost(spa))
1860 mmp_update_uberblock(spa, ub);
1861
1862 /*
1863 * Sync out odd labels for every dirty vdev. If the system dies
1864 * in the middle of this process, the even labels and the new
1865 * uberblocks will suffice to open the pool. The next time
1866 * the pool is opened, the first thing we'll do -- before any
1867 * user data is modified -- is mark every vdev dirty so that
1868 * all labels will be brought up to date. We flush the new labels
1869 * to disk to ensure that all odd-label updates are committed to
1870 * stable storage before the next transaction group begins.
1871 */
1872 if ((error = vdev_label_sync_list(spa, 1, txg, flags)) != 0) {
1873 if ((flags & ZIO_FLAG_TRYHARD) != 0) {
1874 zfs_dbgmsg("vdev_label_sync_list() returned error %d "
1875 "for pool '%s' when syncing out the odd labels of "
1876 "dirty vdevs", error, spa_name(spa));
1877 }
1878 goto retry;
1879 }
1880
1881 return (0);
1882 }
1883