1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
24 * All rights reserved.
25 *
26 * Portions Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>
27 */
28
29 #include <sys/zfs_context.h>
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/bio.h>
33 #include <sys/buf.h>
34 #include <sys/file.h>
35 #include <sys/spa.h>
36 #include <sys/spa_impl.h>
37 #include <sys/vdev_impl.h>
38 #include <sys/vdev_os.h>
39 #include <sys/fs/zfs.h>
40 #include <sys/zio.h>
41 #include <vm/vm_page.h>
42 #include <geom/geom.h>
43 #include <geom/geom_disk.h>
44 #include <geom/geom_int.h>
45
46 #ifndef g_topology_locked
47 #define g_topology_locked() sx_xlocked(&topology_lock)
48 #endif
49
50 /*
51 * Virtual device vector for GEOM.
52 */
53
54 static g_attrchanged_t vdev_geom_attrchanged;
55 struct g_class zfs_vdev_class = {
56 .name = "ZFS::VDEV",
57 .version = G_VERSION,
58 .attrchanged = vdev_geom_attrchanged,
59 };
60
61 struct consumer_vdev_elem {
62 SLIST_ENTRY(consumer_vdev_elem) elems;
63 vdev_t *vd;
64 };
65
66 SLIST_HEAD(consumer_priv_t, consumer_vdev_elem);
67 _Static_assert(
68 sizeof (((struct g_consumer *)NULL)->private) ==
69 sizeof (struct consumer_priv_t *),
70 "consumer_priv_t* can't be stored in g_consumer.private");
71
72 DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
73
74 SYSCTL_DECL(_vfs_zfs_vdev);
75 /* Don't send BIO_FLUSH. */
76 static int vdev_geom_bio_flush_disable;
77 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RWTUN,
78 &vdev_geom_bio_flush_disable, 0, "Disable BIO_FLUSH");
79 /* Don't send BIO_DELETE. */
80 static int vdev_geom_bio_delete_disable;
81 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_delete_disable, CTLFLAG_RWTUN,
82 &vdev_geom_bio_delete_disable, 0, "Disable BIO_DELETE");
83
84 /* Declare local functions */
85 static void vdev_geom_detach(struct g_consumer *cp, boolean_t open_for_read);
86
87 /*
88 * Thread local storage used to indicate when a thread is probing geoms
89 * for their guids. If NULL, this thread is not tasting geoms. If non NULL,
90 * it is looking for a replacement for the vdev_t* that is its value.
91 */
92 uint_t zfs_geom_probe_vdev_key;
93
94 static void
vdev_geom_set_physpath(vdev_t * vd,struct g_consumer * cp,boolean_t do_null_update)95 vdev_geom_set_physpath(vdev_t *vd, struct g_consumer *cp,
96 boolean_t do_null_update)
97 {
98 boolean_t needs_update = B_FALSE;
99 char *physpath;
100 int error, physpath_len;
101
102 physpath_len = MAXPATHLEN;
103 physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO);
104 error = g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath);
105 if (error == 0) {
106 char *old_physpath;
107
108 /* g_topology lock ensures that vdev has not been closed */
109 g_topology_assert();
110 old_physpath = vd->vdev_physpath;
111 vd->vdev_physpath = spa_strdup(physpath);
112
113 if (old_physpath != NULL) {
114 needs_update = (strcmp(old_physpath,
115 vd->vdev_physpath) != 0);
116 spa_strfree(old_physpath);
117 } else
118 needs_update = do_null_update;
119 }
120 g_free(physpath);
121
122 /*
123 * If the physical path changed, update the config.
124 * Only request an update for previously unset physpaths if
125 * requested by the caller.
126 */
127 if (needs_update)
128 spa_async_request(vd->vdev_spa, SPA_ASYNC_CONFIG_UPDATE);
129
130 }
131
132 static void
vdev_geom_attrchanged(struct g_consumer * cp,const char * attr)133 vdev_geom_attrchanged(struct g_consumer *cp, const char *attr)
134 {
135 struct consumer_priv_t *priv;
136 struct consumer_vdev_elem *elem;
137
138 priv = (struct consumer_priv_t *)&cp->private;
139 if (SLIST_EMPTY(priv))
140 return;
141
142 SLIST_FOREACH(elem, priv, elems) {
143 vdev_t *vd = elem->vd;
144 if (strcmp(attr, "GEOM::physpath") == 0) {
145 vdev_geom_set_physpath(vd, cp, /* null_update */B_TRUE);
146 return;
147 }
148 }
149 }
150
151 static void
vdev_geom_resize(struct g_consumer * cp)152 vdev_geom_resize(struct g_consumer *cp)
153 {
154 struct consumer_priv_t *priv;
155 struct consumer_vdev_elem *elem;
156 spa_t *spa;
157 vdev_t *vd;
158
159 priv = (struct consumer_priv_t *)&cp->private;
160 if (SLIST_EMPTY(priv))
161 return;
162
163 SLIST_FOREACH(elem, priv, elems) {
164 vd = elem->vd;
165 if (vd->vdev_state != VDEV_STATE_HEALTHY)
166 continue;
167 spa = vd->vdev_spa;
168 if (!spa->spa_autoexpand)
169 continue;
170 vdev_online(spa, vd->vdev_guid, ZFS_ONLINE_EXPAND, NULL);
171 }
172 }
173
174 static void
vdev_geom_orphan(struct g_consumer * cp)175 vdev_geom_orphan(struct g_consumer *cp)
176 {
177 struct consumer_priv_t *priv;
178 // cppcheck-suppress uninitvar
179 struct consumer_vdev_elem *elem;
180
181 g_topology_assert();
182
183 priv = (struct consumer_priv_t *)&cp->private;
184 if (SLIST_EMPTY(priv))
185 /* Vdev close in progress. Ignore the event. */
186 return;
187
188 /*
189 * Orphan callbacks occur from the GEOM event thread.
190 * Concurrent with this call, new I/O requests may be
191 * working their way through GEOM about to find out
192 * (only once executed by the g_down thread) that we've
193 * been orphaned from our disk provider. These I/Os
194 * must be retired before we can detach our consumer.
195 * This is most easily achieved by acquiring the
196 * SPA ZIO configuration lock as a writer, but doing
197 * so with the GEOM topology lock held would cause
198 * a lock order reversal. Instead, rely on the SPA's
199 * async removal support to invoke a close on this
200 * vdev once it is safe to do so.
201 */
202 SLIST_FOREACH(elem, priv, elems) {
203 // cppcheck-suppress uninitvar
204 vdev_t *vd = elem->vd;
205
206 vd->vdev_remove_wanted = B_TRUE;
207 spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE);
208 }
209 }
210
211 static struct g_consumer *
vdev_geom_attach(struct g_provider * pp,vdev_t * vd,boolean_t sanity)212 vdev_geom_attach(struct g_provider *pp, vdev_t *vd, boolean_t sanity)
213 {
214 struct g_geom *gp;
215 struct g_consumer *cp;
216 int error;
217
218 g_topology_assert();
219
220 ZFS_LOG(1, "Attaching to %s.", pp->name);
221
222 if (sanity) {
223 if (pp->sectorsize > VDEV_PAD_SIZE || !ISP2(pp->sectorsize)) {
224 ZFS_LOG(1, "Failing attach of %s. "
225 "Incompatible sectorsize %d\n",
226 pp->name, pp->sectorsize);
227 return (NULL);
228 } else if (pp->mediasize < SPA_MINDEVSIZE) {
229 ZFS_LOG(1, "Failing attach of %s. "
230 "Incompatible mediasize %ju\n",
231 pp->name, pp->mediasize);
232 return (NULL);
233 }
234 }
235
236 /* Do we have geom already? No? Create one. */
237 LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) {
238 if (gp->flags & G_GEOM_WITHER)
239 continue;
240 if (strcmp(gp->name, "zfs::vdev") != 0)
241 continue;
242 break;
243 }
244 if (gp == NULL) {
245 gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev");
246 gp->orphan = vdev_geom_orphan;
247 gp->attrchanged = vdev_geom_attrchanged;
248 gp->resize = vdev_geom_resize;
249 cp = g_new_consumer(gp);
250 error = g_attach(cp, pp);
251 if (error != 0) {
252 ZFS_LOG(1, "%s(%d): g_attach failed: %d\n", __func__,
253 __LINE__, error);
254 vdev_geom_detach(cp, B_FALSE);
255 return (NULL);
256 }
257 error = g_access(cp, 1, 0, 1);
258 if (error != 0) {
259 ZFS_LOG(1, "%s(%d): g_access failed: %d\n", __func__,
260 __LINE__, error);
261 vdev_geom_detach(cp, B_FALSE);
262 return (NULL);
263 }
264 ZFS_LOG(1, "Created geom and consumer for %s.", pp->name);
265 } else {
266 /* Check if we are already connected to this provider. */
267 LIST_FOREACH(cp, &gp->consumer, consumer) {
268 if (cp->provider == pp) {
269 ZFS_LOG(1, "Found consumer for %s.", pp->name);
270 break;
271 }
272 }
273 if (cp == NULL) {
274 cp = g_new_consumer(gp);
275 error = g_attach(cp, pp);
276 if (error != 0) {
277 ZFS_LOG(1, "%s(%d): g_attach failed: %d\n",
278 __func__, __LINE__, error);
279 vdev_geom_detach(cp, B_FALSE);
280 return (NULL);
281 }
282 error = g_access(cp, 1, 0, 1);
283 if (error != 0) {
284 ZFS_LOG(1, "%s(%d): g_access failed: %d\n",
285 __func__, __LINE__, error);
286 vdev_geom_detach(cp, B_FALSE);
287 return (NULL);
288 }
289 ZFS_LOG(1, "Created consumer for %s.", pp->name);
290 } else {
291 error = g_access(cp, 1, 0, 1);
292 if (error != 0) {
293 ZFS_LOG(1, "%s(%d): g_access failed: %d\n",
294 __func__, __LINE__, error);
295 return (NULL);
296 }
297 ZFS_LOG(1, "Used existing consumer for %s.", pp->name);
298 }
299 }
300
301 if (vd != NULL)
302 vd->vdev_tsd = cp;
303
304 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
305 return (cp);
306 }
307
308 static void
vdev_geom_detach(struct g_consumer * cp,boolean_t open_for_read)309 vdev_geom_detach(struct g_consumer *cp, boolean_t open_for_read)
310 {
311 struct g_geom *gp;
312
313 g_topology_assert();
314
315 ZFS_LOG(1, "Detaching from %s.",
316 cp->provider && cp->provider->name ? cp->provider->name : "NULL");
317
318 gp = cp->geom;
319 if (open_for_read)
320 g_access(cp, -1, 0, -1);
321 /* Destroy consumer on last close. */
322 if (cp->acr == 0 && cp->ace == 0) {
323 if (cp->acw > 0)
324 g_access(cp, 0, -cp->acw, 0);
325 if (cp->provider != NULL) {
326 ZFS_LOG(1, "Destroying consumer for %s.",
327 cp->provider->name ? cp->provider->name : "NULL");
328 g_detach(cp);
329 }
330 g_destroy_consumer(cp);
331 }
332 /* Destroy geom if there are no consumers left. */
333 if (LIST_EMPTY(&gp->consumer)) {
334 ZFS_LOG(1, "Destroyed geom %s.", gp->name);
335 g_wither_geom(gp, ENXIO);
336 }
337 }
338
339 static void
vdev_geom_close_locked(vdev_t * vd)340 vdev_geom_close_locked(vdev_t *vd)
341 {
342 struct g_consumer *cp;
343 struct consumer_priv_t *priv;
344 struct consumer_vdev_elem *elem, *elem_temp;
345
346 g_topology_assert();
347
348 cp = vd->vdev_tsd;
349 vd->vdev_delayed_close = B_FALSE;
350 if (cp == NULL)
351 return;
352
353 ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
354 KASSERT(cp->private != NULL, ("%s: cp->private is NULL", __func__));
355 priv = (struct consumer_priv_t *)&cp->private;
356 vd->vdev_tsd = NULL;
357 SLIST_FOREACH_SAFE(elem, priv, elems, elem_temp) {
358 if (elem->vd == vd) {
359 SLIST_REMOVE(priv, elem, consumer_vdev_elem, elems);
360 g_free(elem);
361 }
362 }
363
364 vdev_geom_detach(cp, B_TRUE);
365 }
366
367 /*
368 * Issue one or more bios to the vdev in parallel
369 * cmds, datas, offsets, errors, and sizes are arrays of length ncmds. Each IO
370 * operation is described by parallel entries from each array. There may be
371 * more bios actually issued than entries in the array
372 */
373 static void
vdev_geom_io(struct g_consumer * cp,int * cmds,void ** datas,off_t * offsets,off_t * sizes,int * errors,int ncmds)374 vdev_geom_io(struct g_consumer *cp, int *cmds, void **datas, off_t *offsets,
375 off_t *sizes, int *errors, int ncmds)
376 {
377 struct bio **bios;
378 uint8_t *p;
379 off_t off, maxio, s, end;
380 int i, n_bios, j;
381 size_t bios_size;
382
383 maxio = maxphys - (maxphys % cp->provider->sectorsize);
384 n_bios = 0;
385
386 /* How many bios are required for all commands ? */
387 for (i = 0; i < ncmds; i++)
388 n_bios += (sizes[i] + maxio - 1) / maxio;
389
390 /* Allocate memory for the bios */
391 bios_size = n_bios * sizeof (struct bio *);
392 bios = kmem_zalloc(bios_size, KM_SLEEP);
393
394 /* Prepare and issue all of the bios */
395 for (i = j = 0; i < ncmds; i++) {
396 off = offsets[i];
397 p = datas[i];
398 s = sizes[i];
399 end = off + s;
400 ASSERT0(off % cp->provider->sectorsize);
401 ASSERT0(s % cp->provider->sectorsize);
402
403 for (; off < end; off += maxio, p += maxio, s -= maxio, j++) {
404 bios[j] = g_alloc_bio();
405 bios[j]->bio_cmd = cmds[i];
406 bios[j]->bio_done = NULL;
407 bios[j]->bio_offset = off;
408 bios[j]->bio_length = MIN(s, maxio);
409 bios[j]->bio_data = (caddr_t)p;
410 g_io_request(bios[j], cp);
411 }
412 }
413 ASSERT3S(j, ==, n_bios);
414
415 /* Wait for all of the bios to complete, and clean them up */
416 for (i = j = 0; i < ncmds; i++) {
417 off = offsets[i];
418 s = sizes[i];
419 end = off + s;
420
421 for (; off < end; off += maxio, s -= maxio, j++) {
422 errors[i] = biowait(bios[j], "vdev_geom_io") ||
423 errors[i];
424 g_destroy_bio(bios[j]);
425 }
426 }
427 kmem_free(bios, bios_size);
428 }
429
430 /*
431 * Read the vdev config from a device. Return the number of valid labels that
432 * were found. The vdev config will be returned in config if and only if at
433 * least one valid label was found.
434 */
435 static int
vdev_geom_read_config(struct g_consumer * cp,nvlist_t ** configp)436 vdev_geom_read_config(struct g_consumer *cp, nvlist_t **configp)
437 {
438 struct g_provider *pp;
439 nvlist_t *config;
440 vdev_phys_t *vdev_lists[VDEV_LABELS];
441 char *buf;
442 size_t buflen;
443 uint64_t psize, state, txg;
444 off_t offsets[VDEV_LABELS];
445 off_t size;
446 off_t sizes[VDEV_LABELS];
447 int cmds[VDEV_LABELS];
448 int errors[VDEV_LABELS];
449 int l, nlabels;
450
451 g_topology_assert_not();
452
453 pp = cp->provider;
454 ZFS_LOG(1, "Reading config from %s...", pp->name);
455
456 psize = pp->mediasize;
457 psize = P2ALIGN_TYPED(psize, sizeof (vdev_label_t), uint64_t);
458
459 size = sizeof (*vdev_lists[0]) + pp->sectorsize -
460 ((sizeof (*vdev_lists[0]) - 1) % pp->sectorsize) - 1;
461
462 buflen = sizeof (vdev_lists[0]->vp_nvlist);
463
464 /* Create all of the IO requests */
465 for (l = 0; l < VDEV_LABELS; l++) {
466 cmds[l] = BIO_READ;
467 vdev_lists[l] = kmem_alloc(size, KM_SLEEP);
468 offsets[l] = vdev_label_offset(psize, l, 0) + VDEV_SKIP_SIZE;
469 sizes[l] = size;
470 errors[l] = 0;
471 ASSERT0(offsets[l] % pp->sectorsize);
472 }
473
474 /* Issue the IO requests */
475 vdev_geom_io(cp, cmds, (void**)vdev_lists, offsets, sizes, errors,
476 VDEV_LABELS);
477
478 /* Parse the labels */
479 config = *configp = NULL;
480 nlabels = 0;
481 for (l = 0; l < VDEV_LABELS; l++) {
482 if (errors[l] != 0)
483 continue;
484
485 buf = vdev_lists[l]->vp_nvlist;
486
487 if (nvlist_unpack(buf, buflen, &config, 0) != 0)
488 continue;
489
490 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
491 &state) != 0 || state > POOL_STATE_L2CACHE) {
492 nvlist_free(config);
493 continue;
494 }
495
496 if (state != POOL_STATE_SPARE &&
497 state != POOL_STATE_L2CACHE &&
498 (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
499 &txg) != 0 || txg == 0)) {
500 nvlist_free(config);
501 continue;
502 }
503
504 if (*configp != NULL)
505 nvlist_free(*configp);
506 *configp = config;
507 nlabels++;
508 }
509
510 /* Free the label storage */
511 for (l = 0; l < VDEV_LABELS; l++)
512 kmem_free(vdev_lists[l], size);
513
514 return (nlabels);
515 }
516
517 static void
resize_configs(nvlist_t *** configs,uint64_t * count,uint64_t id)518 resize_configs(nvlist_t ***configs, uint64_t *count, uint64_t id)
519 {
520 nvlist_t **new_configs;
521 uint64_t i;
522
523 if (id < *count)
524 return;
525 new_configs = kmem_zalloc((id + 1) * sizeof (nvlist_t *),
526 KM_SLEEP);
527 for (i = 0; i < *count; i++)
528 new_configs[i] = (*configs)[i];
529 if (*configs != NULL)
530 kmem_free(*configs, *count * sizeof (void *));
531 *configs = new_configs;
532 *count = id + 1;
533 }
534
535 static void
process_vdev_config(nvlist_t *** configs,uint64_t * count,nvlist_t * cfg,const char * name,uint64_t * known_pool_guid)536 process_vdev_config(nvlist_t ***configs, uint64_t *count, nvlist_t *cfg,
537 const char *name, uint64_t *known_pool_guid)
538 {
539 nvlist_t *vdev_tree;
540 uint64_t pool_guid;
541 uint64_t vdev_guid;
542 uint64_t id, txg, known_txg;
543 const char *pname;
544
545 if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &pname) != 0 ||
546 strcmp(pname, name) != 0)
547 goto ignore;
548
549 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &pool_guid) != 0)
550 goto ignore;
551
552 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_TOP_GUID, &vdev_guid) != 0)
553 goto ignore;
554
555 if (nvlist_lookup_nvlist(cfg, ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0)
556 goto ignore;
557
558 if (nvlist_lookup_uint64(vdev_tree, ZPOOL_CONFIG_ID, &id) != 0)
559 goto ignore;
560
561 txg = fnvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_TXG);
562
563 if (*known_pool_guid != 0) {
564 if (pool_guid != *known_pool_guid)
565 goto ignore;
566 } else
567 *known_pool_guid = pool_guid;
568
569 resize_configs(configs, count, id);
570
571 if ((*configs)[id] != NULL) {
572 known_txg = fnvlist_lookup_uint64((*configs)[id],
573 ZPOOL_CONFIG_POOL_TXG);
574 if (txg <= known_txg)
575 goto ignore;
576 nvlist_free((*configs)[id]);
577 }
578
579 (*configs)[id] = cfg;
580 return;
581
582 ignore:
583 nvlist_free(cfg);
584 }
585
586 int
vdev_geom_read_pool_label(const char * name,nvlist_t *** configs,uint64_t * count)587 vdev_geom_read_pool_label(const char *name,
588 nvlist_t ***configs, uint64_t *count)
589 {
590 struct g_class *mp;
591 struct g_geom *gp;
592 struct g_provider *pp;
593 struct g_consumer *zcp;
594 nvlist_t *vdev_cfg;
595 uint64_t pool_guid;
596 int nlabels;
597
598 DROP_GIANT();
599 g_topology_lock();
600
601 *configs = NULL;
602 *count = 0;
603 pool_guid = 0;
604 LIST_FOREACH(mp, &g_classes, class) {
605 if (mp == &zfs_vdev_class)
606 continue;
607 LIST_FOREACH(gp, &mp->geom, geom) {
608 if (gp->flags & G_GEOM_WITHER)
609 continue;
610 LIST_FOREACH(pp, &gp->provider, provider) {
611 if (pp->flags & G_PF_WITHER)
612 continue;
613 zcp = vdev_geom_attach(pp, NULL, B_TRUE);
614 if (zcp == NULL)
615 continue;
616 g_topology_unlock();
617 nlabels = vdev_geom_read_config(zcp, &vdev_cfg);
618 g_topology_lock();
619 vdev_geom_detach(zcp, B_TRUE);
620 if (nlabels == 0)
621 continue;
622 ZFS_LOG(1, "successfully read vdev config");
623
624 process_vdev_config(configs, count,
625 vdev_cfg, name, &pool_guid);
626 }
627 }
628 }
629 g_topology_unlock();
630 PICKUP_GIANT();
631
632 return (*count > 0 ? 0 : ENOENT);
633 }
634
635 enum match {
636 NO_MATCH = 0, /* No matching labels found */
637 TOPGUID_MATCH = 1, /* Labels match top guid, not vdev guid */
638 ZERO_MATCH = 1, /* Should never be returned */
639 ONE_MATCH = 2, /* 1 label matching the vdev_guid */
640 TWO_MATCH = 3, /* 2 label matching the vdev_guid */
641 THREE_MATCH = 4, /* 3 label matching the vdev_guid */
642 FULL_MATCH = 5 /* all labels match the vdev_guid */
643 };
644
645 static enum match
vdev_attach_ok(vdev_t * vd,struct g_provider * pp)646 vdev_attach_ok(vdev_t *vd, struct g_provider *pp)
647 {
648 nvlist_t *config;
649 uint64_t pool_guid, top_guid, vdev_guid;
650 struct g_consumer *cp;
651 int nlabels;
652
653 cp = vdev_geom_attach(pp, NULL, B_TRUE);
654 if (cp == NULL) {
655 ZFS_LOG(1, "Unable to attach tasting instance to %s.",
656 pp->name);
657 return (NO_MATCH);
658 }
659 g_topology_unlock();
660 nlabels = vdev_geom_read_config(cp, &config);
661 g_topology_lock();
662 vdev_geom_detach(cp, B_TRUE);
663 if (nlabels == 0) {
664 ZFS_LOG(1, "Unable to read config from %s.", pp->name);
665 return (NO_MATCH);
666 }
667
668 pool_guid = 0;
669 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid);
670 top_guid = 0;
671 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, &top_guid);
672 vdev_guid = 0;
673 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid);
674 nvlist_free(config);
675
676 /*
677 * Check that the label's pool guid matches the desired guid.
678 * Inactive spares and L2ARCs do not have any pool guid in the label.
679 */
680 if (pool_guid != 0 && pool_guid != spa_guid(vd->vdev_spa)) {
681 ZFS_LOG(1, "pool guid mismatch for provider %s: %ju != %ju.",
682 pp->name,
683 (uintmax_t)spa_guid(vd->vdev_spa), (uintmax_t)pool_guid);
684 return (NO_MATCH);
685 }
686
687 /*
688 * Check that the label's vdev guid matches the desired guid.
689 * The second condition handles possible race on vdev detach, when
690 * remaining vdev receives GUID of destroyed top level mirror vdev.
691 */
692 if (vdev_guid == vd->vdev_guid) {
693 ZFS_LOG(1, "guids match for provider %s.", pp->name);
694 return (ZERO_MATCH + nlabels);
695 } else if (top_guid == vd->vdev_guid && vd == vd->vdev_top) {
696 ZFS_LOG(1, "top vdev guid match for provider %s.", pp->name);
697 return (TOPGUID_MATCH);
698 }
699 ZFS_LOG(1, "vdev guid mismatch for provider %s: %ju != %ju.",
700 pp->name, (uintmax_t)vd->vdev_guid, (uintmax_t)vdev_guid);
701 return (NO_MATCH);
702 }
703
704 static struct g_consumer *
vdev_geom_attach_by_guids(vdev_t * vd)705 vdev_geom_attach_by_guids(vdev_t *vd)
706 {
707 struct g_class *mp;
708 struct g_geom *gp;
709 struct g_provider *pp, *best_pp;
710 struct g_consumer *cp;
711 const char *vdpath;
712 enum match match, best_match;
713
714 g_topology_assert();
715
716 vdpath = vd->vdev_path + sizeof ("/dev/") - 1;
717 cp = NULL;
718 best_pp = NULL;
719 best_match = NO_MATCH;
720 LIST_FOREACH(mp, &g_classes, class) {
721 if (mp == &zfs_vdev_class)
722 continue;
723 LIST_FOREACH(gp, &mp->geom, geom) {
724 if (gp->flags & G_GEOM_WITHER)
725 continue;
726 LIST_FOREACH(pp, &gp->provider, provider) {
727 match = vdev_attach_ok(vd, pp);
728 if (match > best_match) {
729 best_match = match;
730 best_pp = pp;
731 } else if (match == best_match) {
732 if (strcmp(pp->name, vdpath) == 0) {
733 best_pp = pp;
734 }
735 }
736 if (match == FULL_MATCH)
737 goto out;
738 }
739 }
740 }
741
742 out:
743 if (best_pp) {
744 cp = vdev_geom_attach(best_pp, vd, B_TRUE);
745 if (cp == NULL) {
746 printf("ZFS WARNING: Unable to attach to %s.\n",
747 best_pp->name);
748 }
749 }
750 return (cp);
751 }
752
753 static struct g_consumer *
vdev_geom_open_by_guids(vdev_t * vd)754 vdev_geom_open_by_guids(vdev_t *vd)
755 {
756 struct g_consumer *cp;
757 char *buf;
758 size_t len;
759
760 g_topology_assert();
761
762 ZFS_LOG(1, "Searching by guids [%ju:%ju].",
763 (uintmax_t)spa_guid(vd->vdev_spa), (uintmax_t)vd->vdev_guid);
764 cp = vdev_geom_attach_by_guids(vd);
765 if (cp != NULL) {
766 len = strlen(cp->provider->name) + strlen("/dev/") + 1;
767 buf = kmem_alloc(len, KM_SLEEP);
768
769 snprintf(buf, len, "/dev/%s", cp->provider->name);
770 spa_strfree(vd->vdev_path);
771 vd->vdev_path = buf;
772
773 ZFS_LOG(1, "Attach by guid [%ju:%ju] succeeded, provider %s.",
774 (uintmax_t)spa_guid(vd->vdev_spa),
775 (uintmax_t)vd->vdev_guid, cp->provider->name);
776 } else {
777 ZFS_LOG(1, "Search by guid [%ju:%ju] failed.",
778 (uintmax_t)spa_guid(vd->vdev_spa),
779 (uintmax_t)vd->vdev_guid);
780 }
781
782 return (cp);
783 }
784
785 static struct g_consumer *
vdev_geom_open_by_path(vdev_t * vd,int check_guid)786 vdev_geom_open_by_path(vdev_t *vd, int check_guid)
787 {
788 struct g_provider *pp;
789 struct g_consumer *cp;
790
791 g_topology_assert();
792
793 cp = NULL;
794 pp = g_provider_by_name(vd->vdev_path + sizeof ("/dev/") - 1);
795 if (pp != NULL) {
796 ZFS_LOG(1, "Found provider by name %s.", vd->vdev_path);
797 if (!check_guid || vdev_attach_ok(vd, pp) == FULL_MATCH)
798 cp = vdev_geom_attach(pp, vd, B_FALSE);
799 }
800
801 return (cp);
802 }
803
804 static int
vdev_geom_open(vdev_t * vd,uint64_t * psize,uint64_t * max_psize,uint64_t * logical_ashift,uint64_t * physical_ashift)805 vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
806 uint64_t *logical_ashift, uint64_t *physical_ashift)
807 {
808 struct g_provider *pp;
809 struct g_consumer *cp;
810 int error, has_trim;
811 uint16_t rate;
812
813 /*
814 * Set the TLS to indicate downstack that we
815 * should not access zvols
816 */
817 VERIFY0(tsd_set(zfs_geom_probe_vdev_key, vd));
818
819 /*
820 * We must have a pathname, and it must be absolute.
821 */
822 if (vd->vdev_path == NULL || strncmp(vd->vdev_path, "/dev/", 5) != 0) {
823 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
824 return (EINVAL);
825 }
826
827 /*
828 * Reopen the device if it's not currently open. Otherwise,
829 * just update the physical size of the device.
830 */
831 if ((cp = vd->vdev_tsd) != NULL) {
832 ASSERT(vd->vdev_reopening);
833 goto skip_open;
834 }
835
836 DROP_GIANT();
837 g_topology_lock();
838 error = 0;
839
840 if (vd->vdev_spa->spa_is_splitting ||
841 ((vd->vdev_prevstate == VDEV_STATE_UNKNOWN &&
842 (vd->vdev_spa->spa_load_state == SPA_LOAD_NONE ||
843 vd->vdev_spa->spa_load_state == SPA_LOAD_CREATE)))) {
844 /*
845 * We are dealing with a vdev that hasn't been previously
846 * opened (since boot), and we are not loading an
847 * existing pool configuration. This looks like a
848 * vdev add operation to a new or existing pool.
849 * Assume the user really wants to do this, and find
850 * GEOM provider by its name, ignoring GUID mismatches.
851 *
852 * XXPOLICY: It would be safer to only allow a device
853 * that is unlabeled or labeled but missing
854 * GUID information to be opened in this fashion,
855 * unless we are doing a split, in which case we
856 * should allow any guid.
857 */
858 cp = vdev_geom_open_by_path(vd, 0);
859 } else {
860 /*
861 * Try using the recorded path for this device, but only
862 * accept it if its label data contains the expected GUIDs.
863 */
864 cp = vdev_geom_open_by_path(vd, 1);
865 if (cp == NULL) {
866 /*
867 * The device at vd->vdev_path doesn't have the
868 * expected GUIDs. The disks might have merely
869 * moved around so try all other GEOM providers
870 * to find one with the right GUIDs.
871 */
872 cp = vdev_geom_open_by_guids(vd);
873 }
874 }
875
876 /* Clear the TLS now that tasting is done */
877 VERIFY0(tsd_set(zfs_geom_probe_vdev_key, NULL));
878
879 if (cp == NULL) {
880 ZFS_LOG(1, "Vdev %s not found.", vd->vdev_path);
881 error = ENOENT;
882 } else {
883 struct consumer_priv_t *priv;
884 struct consumer_vdev_elem *elem;
885 int spamode;
886
887 priv = (struct consumer_priv_t *)&cp->private;
888 if (cp->private == NULL)
889 SLIST_INIT(priv);
890 elem = g_malloc(sizeof (*elem), M_WAITOK|M_ZERO);
891 elem->vd = vd;
892 SLIST_INSERT_HEAD(priv, elem, elems);
893
894 spamode = spa_mode(vd->vdev_spa);
895 if (cp->provider->sectorsize > VDEV_PAD_SIZE ||
896 !ISP2(cp->provider->sectorsize)) {
897 ZFS_LOG(1, "Provider %s has unsupported sectorsize.",
898 cp->provider->name);
899
900 vdev_geom_close_locked(vd);
901 error = EINVAL;
902 cp = NULL;
903 } else if (cp->acw == 0 && (spamode & FWRITE) != 0) {
904 int i;
905
906 for (i = 0; i < 5; i++) {
907 error = g_access(cp, 0, 1, 0);
908 if (error == 0)
909 break;
910 g_topology_unlock();
911 tsleep(vd, 0, "vdev", hz / 2);
912 g_topology_lock();
913 }
914 if (error != 0) {
915 printf("ZFS WARNING: Unable to open %s for "
916 "writing (error=%d).\n",
917 cp->provider->name, error);
918 vdev_geom_close_locked(vd);
919 cp = NULL;
920 }
921 }
922 }
923
924 /* Fetch initial physical path information for this device. */
925 if (cp != NULL) {
926 vdev_geom_attrchanged(cp, "GEOM::physpath");
927
928 /* Set other GEOM characteristics */
929 vdev_geom_set_physpath(vd, cp, /* do_null_update */B_FALSE);
930 }
931
932 g_topology_unlock();
933 PICKUP_GIANT();
934 if (cp == NULL) {
935 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
936 vdev_dbgmsg(vd, "vdev_geom_open: failed to open [error=%d]",
937 error);
938 return (error);
939 }
940 skip_open:
941 pp = cp->provider;
942
943 /*
944 * Determine the actual size of the device.
945 */
946 *max_psize = *psize = pp->mediasize;
947
948 /*
949 * Determine the device's minimum transfer size and preferred
950 * transfer size.
951 */
952 *logical_ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1;
953 *physical_ashift = 0;
954 if (pp->stripesize && pp->stripesize > (1 << *logical_ashift) &&
955 ISP2(pp->stripesize) && pp->stripeoffset == 0)
956 *physical_ashift = highbit(pp->stripesize) - 1;
957
958 /*
959 * Clear the nowritecache settings, so that on a vdev_reopen()
960 * we will try again.
961 */
962 vd->vdev_nowritecache = B_FALSE;
963
964 /* Inform the ZIO pipeline that we are non-rotational. */
965 error = g_getattr("GEOM::rotation_rate", cp, &rate);
966 if (error == 0 && rate == DISK_RR_NON_ROTATING)
967 vd->vdev_nonrot = B_TRUE;
968 else
969 vd->vdev_nonrot = B_FALSE;
970
971 /* Set when device reports it supports TRIM. */
972 error = g_getattr("GEOM::candelete", cp, &has_trim);
973 vd->vdev_has_trim = (error == 0 && has_trim);
974
975 /* Set when device reports it supports secure TRIM. */
976 /* unavailable on FreeBSD */
977 vd->vdev_has_securetrim = B_FALSE;
978
979 return (0);
980 }
981
982 static void
vdev_geom_close(vdev_t * vd)983 vdev_geom_close(vdev_t *vd)
984 {
985 struct g_consumer *cp;
986 boolean_t locked;
987
988 cp = vd->vdev_tsd;
989
990 DROP_GIANT();
991 locked = g_topology_locked();
992 if (!locked)
993 g_topology_lock();
994
995 if (!vd->vdev_reopening ||
996 (cp != NULL && ((cp->flags & G_CF_ORPHAN) != 0 ||
997 (cp->provider != NULL && cp->provider->error != 0))))
998 vdev_geom_close_locked(vd);
999
1000 if (!locked)
1001 g_topology_unlock();
1002 PICKUP_GIANT();
1003 }
1004
1005 static void
vdev_geom_io_intr(struct bio * bp)1006 vdev_geom_io_intr(struct bio *bp)
1007 {
1008 vdev_t *vd;
1009 zio_t *zio;
1010
1011 zio = bp->bio_caller1;
1012 vd = zio->io_vd;
1013 zio->io_error = bp->bio_error;
1014 if (zio->io_error == 0 && bp->bio_resid != 0)
1015 zio->io_error = SET_ERROR(EIO);
1016
1017 switch (zio->io_error) {
1018 case ENXIO:
1019 if (!vd->vdev_remove_wanted) {
1020 /*
1021 * If provider's error is set we assume it is being
1022 * removed.
1023 */
1024 if (bp->bio_to->error != 0) {
1025 vd->vdev_remove_wanted = B_TRUE;
1026 spa_async_request(zio->io_spa,
1027 SPA_ASYNC_REMOVE);
1028 } else if (!vd->vdev_delayed_close) {
1029 vd->vdev_delayed_close = B_TRUE;
1030 }
1031 }
1032 break;
1033 }
1034
1035 /*
1036 * We have to split bio freeing into two parts, because the ABD code
1037 * cannot be called in this context and vdev_op_io_done is not called
1038 * for ZIO_TYPE_FLUSH zio-s.
1039 */
1040 if (zio->io_type != ZIO_TYPE_READ && zio->io_type != ZIO_TYPE_WRITE) {
1041 g_destroy_bio(bp);
1042 zio->io_bio = NULL;
1043 }
1044 zio_delay_interrupt(zio);
1045 }
1046
1047 struct vdev_geom_check_unmapped_cb_state {
1048 int pages;
1049 uint_t end;
1050 };
1051
1052 /*
1053 * Callback to check the ABD segment size/alignment and count the pages.
1054 * GEOM requires data buffer to look virtually contiguous. It means only
1055 * the first page of the buffer may not start and only the last may not
1056 * end on a page boundary. All other physical pages must be full.
1057 */
1058 static int
vdev_geom_check_unmapped_cb(void * buf,size_t len,void * priv)1059 vdev_geom_check_unmapped_cb(void *buf, size_t len, void *priv)
1060 {
1061 struct vdev_geom_check_unmapped_cb_state *s = priv;
1062 vm_offset_t off = (vm_offset_t)buf & PAGE_MASK;
1063
1064 if (s->pages != 0 && off != 0)
1065 return (1);
1066 if (s->end != 0)
1067 return (1);
1068 s->end = (off + len) & PAGE_MASK;
1069 s->pages += (off + len + PAGE_MASK) >> PAGE_SHIFT;
1070 return (0);
1071 }
1072
1073 /*
1074 * Check whether we can use unmapped I/O for this ZIO on this device to
1075 * avoid data copying between scattered and/or gang ABD buffer and linear.
1076 */
1077 static int
vdev_geom_check_unmapped(zio_t * zio,struct g_consumer * cp)1078 vdev_geom_check_unmapped(zio_t *zio, struct g_consumer *cp)
1079 {
1080 struct vdev_geom_check_unmapped_cb_state s;
1081
1082 /* If unmapped I/O is administratively disabled, respect that. */
1083 if (!unmapped_buf_allowed)
1084 return (0);
1085
1086 /* If the buffer is already linear, then nothing to do here. */
1087 if (abd_is_linear(zio->io_abd))
1088 return (0);
1089
1090 /*
1091 * If unmapped I/O is not supported by the GEOM provider,
1092 * then we can't do anything and have to copy the data.
1093 */
1094 if ((cp->provider->flags & G_PF_ACCEPT_UNMAPPED) == 0)
1095 return (0);
1096
1097 /* Check the buffer chunks sizes/alignments and count pages. */
1098 s.pages = s.end = 0;
1099 if (abd_iterate_func(zio->io_abd, 0, zio->io_size,
1100 vdev_geom_check_unmapped_cb, &s))
1101 return (0);
1102 return (s.pages);
1103 }
1104
1105 /*
1106 * Callback to translate the ABD segment into array of physical pages.
1107 */
1108 static int
vdev_geom_fill_unmap_cb(void * buf,size_t len,void * priv)1109 vdev_geom_fill_unmap_cb(void *buf, size_t len, void *priv)
1110 {
1111 struct bio *bp = priv;
1112 vm_offset_t addr = (vm_offset_t)buf;
1113 vm_offset_t end = addr + len;
1114
1115 if (bp->bio_ma_n == 0) {
1116 bp->bio_ma_offset = addr & PAGE_MASK;
1117 addr &= ~PAGE_MASK;
1118 } else {
1119 ASSERT0(P2PHASE(addr, PAGE_SIZE));
1120 }
1121 do {
1122 bp->bio_ma[bp->bio_ma_n++] =
1123 PHYS_TO_VM_PAGE(pmap_kextract(addr));
1124 addr += PAGE_SIZE;
1125 } while (addr < end);
1126 return (0);
1127 }
1128
1129 static void
vdev_geom_io_start(zio_t * zio)1130 vdev_geom_io_start(zio_t *zio)
1131 {
1132 vdev_t *vd;
1133 struct g_consumer *cp;
1134 struct bio *bp;
1135
1136 vd = zio->io_vd;
1137
1138 if (zio->io_type == ZIO_TYPE_FLUSH) {
1139 /* XXPOLICY */
1140 if (!vdev_readable(vd)) {
1141 zio->io_error = SET_ERROR(ENXIO);
1142 zio_interrupt(zio);
1143 return;
1144 }
1145
1146 if (zfs_nocacheflush || vdev_geom_bio_flush_disable) {
1147 zio_execute(zio);
1148 return;
1149 }
1150
1151 if (vd->vdev_nowritecache) {
1152 zio->io_error = SET_ERROR(ENOTSUP);
1153 zio_execute(zio);
1154 return;
1155 }
1156 } else if (zio->io_type == ZIO_TYPE_TRIM) {
1157 if (vdev_geom_bio_delete_disable) {
1158 zio_execute(zio);
1159 return;
1160 }
1161 }
1162
1163 ASSERT(zio->io_type == ZIO_TYPE_READ ||
1164 zio->io_type == ZIO_TYPE_WRITE ||
1165 zio->io_type == ZIO_TYPE_TRIM ||
1166 zio->io_type == ZIO_TYPE_FLUSH);
1167
1168 cp = vd->vdev_tsd;
1169 if (cp == NULL) {
1170 zio->io_error = SET_ERROR(ENXIO);
1171 zio_interrupt(zio);
1172 return;
1173 }
1174 bp = g_alloc_bio();
1175 bp->bio_caller1 = zio;
1176 switch (zio->io_type) {
1177 case ZIO_TYPE_READ:
1178 case ZIO_TYPE_WRITE:
1179 zio->io_target_timestamp = zio_handle_io_delay(zio);
1180 bp->bio_offset = zio->io_offset;
1181 bp->bio_length = zio->io_size;
1182 if (zio->io_type == ZIO_TYPE_READ)
1183 bp->bio_cmd = BIO_READ;
1184 else
1185 bp->bio_cmd = BIO_WRITE;
1186
1187 /*
1188 * If possible, represent scattered and/or gang ABD buffer to
1189 * GEOM as an array of physical pages. It allows to satisfy
1190 * requirement of virtually contiguous buffer without copying.
1191 */
1192 int pgs = vdev_geom_check_unmapped(zio, cp);
1193 if (pgs > 0) {
1194 bp->bio_ma = malloc(sizeof (struct vm_page *) * pgs,
1195 M_DEVBUF, M_WAITOK);
1196 bp->bio_ma_n = 0;
1197 bp->bio_ma_offset = 0;
1198 abd_iterate_func(zio->io_abd, 0, zio->io_size,
1199 vdev_geom_fill_unmap_cb, bp);
1200 bp->bio_data = unmapped_buf;
1201 bp->bio_flags |= BIO_UNMAPPED;
1202 } else {
1203 if (zio->io_type == ZIO_TYPE_READ) {
1204 bp->bio_data = abd_borrow_buf(zio->io_abd,
1205 zio->io_size);
1206 } else {
1207 bp->bio_data = abd_borrow_buf_copy(zio->io_abd,
1208 zio->io_size);
1209 }
1210 }
1211 break;
1212 case ZIO_TYPE_TRIM:
1213 bp->bio_cmd = BIO_DELETE;
1214 bp->bio_data = NULL;
1215 bp->bio_offset = zio->io_offset;
1216 bp->bio_length = zio->io_size;
1217 break;
1218 case ZIO_TYPE_FLUSH:
1219 bp->bio_cmd = BIO_FLUSH;
1220 bp->bio_data = NULL;
1221 bp->bio_offset = cp->provider->mediasize;
1222 bp->bio_length = 0;
1223 break;
1224 default:
1225 panic("invalid zio->io_type: %d\n", zio->io_type);
1226 }
1227 bp->bio_done = vdev_geom_io_intr;
1228 zio->io_bio = bp;
1229
1230 g_io_request(bp, cp);
1231 }
1232
1233 static void
vdev_geom_io_done(zio_t * zio)1234 vdev_geom_io_done(zio_t *zio)
1235 {
1236 struct bio *bp = zio->io_bio;
1237
1238 if (zio->io_type != ZIO_TYPE_READ && zio->io_type != ZIO_TYPE_WRITE) {
1239 ASSERT3P(bp, ==, NULL);
1240 return;
1241 }
1242
1243 if (bp == NULL) {
1244 if (zio_injection_enabled && zio->io_error == EIO)
1245 /*
1246 * Convert an injected EIO to ENXIO. This is needed to
1247 * work around zio_handle_device_injection_impl() not
1248 * currently being able to inject ENXIO directly, while
1249 * the assertion below only allows ENXIO here.
1250 */
1251 zio->io_error = SET_ERROR(ENXIO);
1252 else
1253 ASSERT3S(zio->io_error, ==, ENXIO);
1254 return;
1255 }
1256
1257 if (bp->bio_ma != NULL) {
1258 free(bp->bio_ma, M_DEVBUF);
1259 } else {
1260 if (zio->io_type == ZIO_TYPE_READ) {
1261 abd_return_buf_copy(zio->io_abd, bp->bio_data,
1262 zio->io_size);
1263 } else {
1264 abd_return_buf(zio->io_abd, bp->bio_data,
1265 zio->io_size);
1266 }
1267 }
1268
1269 g_destroy_bio(bp);
1270 zio->io_bio = NULL;
1271 }
1272
1273 static void
vdev_geom_hold(vdev_t * vd)1274 vdev_geom_hold(vdev_t *vd)
1275 {
1276 }
1277
1278 static void
vdev_geom_rele(vdev_t * vd)1279 vdev_geom_rele(vdev_t *vd)
1280 {
1281 }
1282
1283 vdev_ops_t vdev_disk_ops = {
1284 .vdev_op_init = NULL,
1285 .vdev_op_fini = NULL,
1286 .vdev_op_open = vdev_geom_open,
1287 .vdev_op_close = vdev_geom_close,
1288 .vdev_op_psize_to_asize = vdev_default_asize,
1289 .vdev_op_asize_to_psize = vdev_default_psize,
1290 .vdev_op_min_asize = vdev_default_min_asize,
1291 .vdev_op_min_alloc = NULL,
1292 .vdev_op_io_start = vdev_geom_io_start,
1293 .vdev_op_io_done = vdev_geom_io_done,
1294 .vdev_op_state_change = NULL,
1295 .vdev_op_need_resilver = NULL,
1296 .vdev_op_hold = vdev_geom_hold,
1297 .vdev_op_rele = vdev_geom_rele,
1298 .vdev_op_remap = NULL,
1299 .vdev_op_xlate = vdev_default_xlate,
1300 .vdev_op_rebuild_asize = NULL,
1301 .vdev_op_metaslab_init = NULL,
1302 .vdev_op_config_generate = NULL,
1303 .vdev_op_nparity = NULL,
1304 .vdev_op_ndisks = NULL,
1305 .vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
1306 .vdev_op_leaf = B_TRUE /* leaf vdev */
1307 };
1308