1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012 by Delphix. All rights reserved. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/spa_impl.h> 28 #include <sys/refcount.h> 29 #include <sys/vdev_disk.h> 30 #include <sys/vdev_impl.h> 31 #include <sys/fs/zfs.h> 32 #include <sys/zio.h> 33 #include <sys/sunldi.h> 34 #include <sys/efi_partition.h> 35 #include <sys/fm/fs/zfs.h> 36 37 /* 38 * Virtual device vector for disks. 39 */ 40 41 extern ldi_ident_t zfs_li; 42 43 static void 44 vdev_disk_hold(vdev_t *vd) 45 { 46 ddi_devid_t devid; 47 char *minor; 48 49 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER)); 50 51 /* 52 * We must have a pathname, and it must be absolute. 53 */ 54 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') 55 return; 56 57 /* 58 * Only prefetch path and devid info if the device has 59 * never been opened. 60 */ 61 if (vd->vdev_tsd != NULL) 62 return; 63 64 if (vd->vdev_wholedisk == -1ULL) { 65 size_t len = strlen(vd->vdev_path) + 3; 66 char *buf = kmem_alloc(len, KM_SLEEP); 67 68 (void) snprintf(buf, len, "%ss0", vd->vdev_path); 69 70 (void) ldi_vp_from_name(buf, &vd->vdev_name_vp); 71 kmem_free(buf, len); 72 } 73 74 if (vd->vdev_name_vp == NULL) 75 (void) ldi_vp_from_name(vd->vdev_path, &vd->vdev_name_vp); 76 77 if (vd->vdev_devid != NULL && 78 ddi_devid_str_decode(vd->vdev_devid, &devid, &minor) == 0) { 79 (void) ldi_vp_from_devid(devid, minor, &vd->vdev_devid_vp); 80 ddi_devid_str_free(minor); 81 ddi_devid_free(devid); 82 } 83 } 84 85 static void 86 vdev_disk_rele(vdev_t *vd) 87 { 88 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER)); 89 90 if (vd->vdev_name_vp) { 91 VN_RELE_ASYNC(vd->vdev_name_vp, 92 dsl_pool_vnrele_taskq(vd->vdev_spa->spa_dsl_pool)); 93 vd->vdev_name_vp = NULL; 94 } 95 if (vd->vdev_devid_vp) { 96 VN_RELE_ASYNC(vd->vdev_devid_vp, 97 dsl_pool_vnrele_taskq(vd->vdev_spa->spa_dsl_pool)); 98 vd->vdev_devid_vp = NULL; 99 } 100 } 101 102 static uint64_t 103 vdev_disk_get_space(vdev_t *vd, uint64_t capacity, uint_t blksz) 104 { 105 ASSERT(vd->vdev_wholedisk); 106 107 vdev_disk_t *dvd = vd->vdev_tsd; 108 dk_efi_t dk_ioc; 109 efi_gpt_t *efi; 110 uint64_t avail_space = 0; 111 int efisize = EFI_LABEL_SIZE * 2; 112 113 dk_ioc.dki_data = kmem_alloc(efisize, KM_SLEEP); 114 dk_ioc.dki_lba = 1; 115 dk_ioc.dki_length = efisize; 116 dk_ioc.dki_data_64 = (uint64_t)(uintptr_t)dk_ioc.dki_data; 117 efi = dk_ioc.dki_data; 118 119 if (ldi_ioctl(dvd->vd_lh, DKIOCGETEFI, (intptr_t)&dk_ioc, 120 FKIOCTL, kcred, NULL) == 0) { 121 uint64_t efi_altern_lba = LE_64(efi->efi_gpt_AlternateLBA); 122 123 zfs_dbgmsg("vdev %s, capacity %llu, altern lba %llu", 124 vd->vdev_path, capacity, efi_altern_lba); 125 if (capacity > efi_altern_lba) 126 avail_space = (capacity - efi_altern_lba) * blksz; 127 } 128 kmem_free(dk_ioc.dki_data, efisize); 129 return (avail_space); 130 } 131 132 static int 133 vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize, 134 uint64_t *ashift) 135 { 136 spa_t *spa = vd->vdev_spa; 137 vdev_disk_t *dvd; 138 struct dk_minfo_ext dkmext; 139 int error; 140 dev_t dev; 141 int otyp; 142 143 /* 144 * We must have a pathname, and it must be absolute. 145 */ 146 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') { 147 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; 148 return (EINVAL); 149 } 150 151 /* 152 * Reopen the device if it's not currently open. Otherwise, 153 * just update the physical size of the device. 154 */ 155 if (vd->vdev_tsd != NULL) { 156 ASSERT(vd->vdev_reopening); 157 dvd = vd->vdev_tsd; 158 goto skip_open; 159 } 160 161 dvd = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP); 162 163 /* 164 * When opening a disk device, we want to preserve the user's original 165 * intent. We always want to open the device by the path the user gave 166 * us, even if it is one of multiple paths to the same device. But we 167 * also want to be able to survive disks being removed/recabled. 168 * Therefore the sequence of opening devices is: 169 * 170 * 1. Try opening the device by path. For legacy pools without the 171 * 'whole_disk' property, attempt to fix the path by appending 's0'. 172 * 173 * 2. If the devid of the device matches the stored value, return 174 * success. 175 * 176 * 3. Otherwise, the device may have moved. Try opening the device 177 * by the devid instead. 178 */ 179 if (vd->vdev_devid != NULL) { 180 if (ddi_devid_str_decode(vd->vdev_devid, &dvd->vd_devid, 181 &dvd->vd_minor) != 0) { 182 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; 183 return (EINVAL); 184 } 185 } 186 187 error = EINVAL; /* presume failure */ 188 189 if (vd->vdev_path != NULL) { 190 ddi_devid_t devid; 191 192 if (vd->vdev_wholedisk == -1ULL) { 193 size_t len = strlen(vd->vdev_path) + 3; 194 char *buf = kmem_alloc(len, KM_SLEEP); 195 ldi_handle_t lh; 196 197 (void) snprintf(buf, len, "%ss0", vd->vdev_path); 198 199 if (ldi_open_by_name(buf, spa_mode(spa), kcred, 200 &lh, zfs_li) == 0) { 201 spa_strfree(vd->vdev_path); 202 vd->vdev_path = buf; 203 vd->vdev_wholedisk = 1ULL; 204 (void) ldi_close(lh, spa_mode(spa), kcred); 205 } else { 206 kmem_free(buf, len); 207 } 208 } 209 210 error = ldi_open_by_name(vd->vdev_path, spa_mode(spa), kcred, 211 &dvd->vd_lh, zfs_li); 212 213 /* 214 * Compare the devid to the stored value. 215 */ 216 if (error == 0 && vd->vdev_devid != NULL && 217 ldi_get_devid(dvd->vd_lh, &devid) == 0) { 218 if (ddi_devid_compare(devid, dvd->vd_devid) != 0) { 219 error = EINVAL; 220 (void) ldi_close(dvd->vd_lh, spa_mode(spa), 221 kcred); 222 dvd->vd_lh = NULL; 223 } 224 ddi_devid_free(devid); 225 } 226 227 /* 228 * If we succeeded in opening the device, but 'vdev_wholedisk' 229 * is not yet set, then this must be a slice. 230 */ 231 if (error == 0 && vd->vdev_wholedisk == -1ULL) 232 vd->vdev_wholedisk = 0; 233 } 234 235 /* 236 * If we were unable to open by path, or the devid check fails, open by 237 * devid instead. 238 */ 239 if (error != 0 && vd->vdev_devid != NULL) 240 error = ldi_open_by_devid(dvd->vd_devid, dvd->vd_minor, 241 spa_mode(spa), kcred, &dvd->vd_lh, zfs_li); 242 243 /* 244 * If all else fails, then try opening by physical path (if available) 245 * or the logical path (if we failed due to the devid check). While not 246 * as reliable as the devid, this will give us something, and the higher 247 * level vdev validation will prevent us from opening the wrong device. 248 */ 249 if (error) { 250 if (vd->vdev_physpath != NULL && 251 (dev = ddi_pathname_to_dev_t(vd->vdev_physpath)) != NODEV) 252 error = ldi_open_by_dev(&dev, OTYP_BLK, spa_mode(spa), 253 kcred, &dvd->vd_lh, zfs_li); 254 255 /* 256 * Note that we don't support the legacy auto-wholedisk support 257 * as above. This hasn't been used in a very long time and we 258 * don't need to propagate its oddities to this edge condition. 259 */ 260 if (error && vd->vdev_path != NULL) 261 error = ldi_open_by_name(vd->vdev_path, spa_mode(spa), 262 kcred, &dvd->vd_lh, zfs_li); 263 } 264 265 if (error) { 266 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; 267 return (error); 268 } 269 270 /* 271 * Once a device is opened, verify that the physical device path (if 272 * available) is up to date. 273 */ 274 if (ldi_get_dev(dvd->vd_lh, &dev) == 0 && 275 ldi_get_otyp(dvd->vd_lh, &otyp) == 0) { 276 char *physpath, *minorname; 277 278 physpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 279 minorname = NULL; 280 if (ddi_dev_pathname(dev, otyp, physpath) == 0 && 281 ldi_get_minor_name(dvd->vd_lh, &minorname) == 0 && 282 (vd->vdev_physpath == NULL || 283 strcmp(vd->vdev_physpath, physpath) != 0)) { 284 if (vd->vdev_physpath) 285 spa_strfree(vd->vdev_physpath); 286 (void) strlcat(physpath, ":", MAXPATHLEN); 287 (void) strlcat(physpath, minorname, MAXPATHLEN); 288 vd->vdev_physpath = spa_strdup(physpath); 289 } 290 if (minorname) 291 kmem_free(minorname, strlen(minorname) + 1); 292 kmem_free(physpath, MAXPATHLEN); 293 } 294 295 skip_open: 296 /* 297 * Determine the actual size of the device. 298 */ 299 if (ldi_get_size(dvd->vd_lh, psize) != 0) { 300 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; 301 return (EINVAL); 302 } 303 304 /* 305 * Determine the device's minimum transfer size. 306 * If the ioctl isn't supported, assume DEV_BSIZE. 307 */ 308 if (ldi_ioctl(dvd->vd_lh, DKIOCGMEDIAINFOEXT, (intptr_t)&dkmext, 309 FKIOCTL, kcred, NULL) != 0) 310 dkmext.dki_pbsize = DEV_BSIZE; 311 312 *ashift = highbit(MAX(dkmext.dki_pbsize, SPA_MINBLOCKSIZE)) - 1; 313 314 if (vd->vdev_wholedisk == 1) { 315 uint64_t capacity = dkmext.dki_capacity - 1; 316 uint64_t blksz = dkmext.dki_lbsize; 317 int wce = 1; 318 319 /* 320 * If we own the whole disk, try to enable disk write caching. 321 * We ignore errors because it's OK if we can't do it. 322 */ 323 (void) ldi_ioctl(dvd->vd_lh, DKIOCSETWCE, (intptr_t)&wce, 324 FKIOCTL, kcred, NULL); 325 326 *max_psize = *psize + vdev_disk_get_space(vd, capacity, blksz); 327 zfs_dbgmsg("capacity change: vdev %s, psize %llu, " 328 "max_psize %llu", vd->vdev_path, *psize, *max_psize); 329 } else { 330 *max_psize = *psize; 331 } 332 333 /* 334 * Clear the nowritecache bit, so that on a vdev_reopen() we will 335 * try again. 336 */ 337 vd->vdev_nowritecache = B_FALSE; 338 339 return (0); 340 } 341 342 static void 343 vdev_disk_close(vdev_t *vd) 344 { 345 vdev_disk_t *dvd = vd->vdev_tsd; 346 347 if (vd->vdev_reopening || dvd == NULL) 348 return; 349 350 if (dvd->vd_minor != NULL) 351 ddi_devid_str_free(dvd->vd_minor); 352 353 if (dvd->vd_devid != NULL) 354 ddi_devid_free(dvd->vd_devid); 355 356 if (dvd->vd_lh != NULL) 357 (void) ldi_close(dvd->vd_lh, spa_mode(vd->vdev_spa), kcred); 358 359 vd->vdev_delayed_close = B_FALSE; 360 kmem_free(dvd, sizeof (vdev_disk_t)); 361 vd->vdev_tsd = NULL; 362 } 363 364 int 365 vdev_disk_physio(ldi_handle_t vd_lh, caddr_t data, size_t size, 366 uint64_t offset, int flags) 367 { 368 buf_t *bp; 369 int error = 0; 370 371 if (vd_lh == NULL) 372 return (EINVAL); 373 374 ASSERT(flags & B_READ || flags & B_WRITE); 375 376 bp = getrbuf(KM_SLEEP); 377 bp->b_flags = flags | B_BUSY | B_NOCACHE | B_FAILFAST; 378 bp->b_bcount = size; 379 bp->b_un.b_addr = (void *)data; 380 bp->b_lblkno = lbtodb(offset); 381 bp->b_bufsize = size; 382 383 error = ldi_strategy(vd_lh, bp); 384 ASSERT(error == 0); 385 if ((error = biowait(bp)) == 0 && bp->b_resid != 0) 386 error = EIO; 387 freerbuf(bp); 388 389 return (error); 390 } 391 392 static void 393 vdev_disk_io_intr(buf_t *bp) 394 { 395 vdev_buf_t *vb = (vdev_buf_t *)bp; 396 zio_t *zio = vb->vb_io; 397 398 /* 399 * The rest of the zio stack only deals with EIO, ECKSUM, and ENXIO. 400 * Rather than teach the rest of the stack about other error 401 * possibilities (EFAULT, etc), we normalize the error value here. 402 */ 403 zio->io_error = (geterror(bp) != 0 ? EIO : 0); 404 405 if (zio->io_error == 0 && bp->b_resid != 0) 406 zio->io_error = EIO; 407 408 kmem_free(vb, sizeof (vdev_buf_t)); 409 410 zio_interrupt(zio); 411 } 412 413 static void 414 vdev_disk_ioctl_free(zio_t *zio) 415 { 416 kmem_free(zio->io_vsd, sizeof (struct dk_callback)); 417 } 418 419 static const zio_vsd_ops_t vdev_disk_vsd_ops = { 420 vdev_disk_ioctl_free, 421 zio_vsd_default_cksum_report 422 }; 423 424 static void 425 vdev_disk_ioctl_done(void *zio_arg, int error) 426 { 427 zio_t *zio = zio_arg; 428 429 zio->io_error = error; 430 431 zio_interrupt(zio); 432 } 433 434 static int 435 vdev_disk_io_start(zio_t *zio) 436 { 437 vdev_t *vd = zio->io_vd; 438 vdev_disk_t *dvd = vd->vdev_tsd; 439 vdev_buf_t *vb; 440 struct dk_callback *dkc; 441 buf_t *bp; 442 int error; 443 444 if (zio->io_type == ZIO_TYPE_IOCTL) { 445 /* XXPOLICY */ 446 if (!vdev_readable(vd)) { 447 zio->io_error = ENXIO; 448 return (ZIO_PIPELINE_CONTINUE); 449 } 450 451 switch (zio->io_cmd) { 452 453 case DKIOCFLUSHWRITECACHE: 454 455 if (zfs_nocacheflush) 456 break; 457 458 if (vd->vdev_nowritecache) { 459 zio->io_error = ENOTSUP; 460 break; 461 } 462 463 zio->io_vsd = dkc = kmem_alloc(sizeof (*dkc), KM_SLEEP); 464 zio->io_vsd_ops = &vdev_disk_vsd_ops; 465 466 dkc->dkc_callback = vdev_disk_ioctl_done; 467 dkc->dkc_flag = FLUSH_VOLATILE; 468 dkc->dkc_cookie = zio; 469 470 error = ldi_ioctl(dvd->vd_lh, zio->io_cmd, 471 (uintptr_t)dkc, FKIOCTL, kcred, NULL); 472 473 if (error == 0) { 474 /* 475 * The ioctl will be done asychronously, 476 * and will call vdev_disk_ioctl_done() 477 * upon completion. 478 */ 479 return (ZIO_PIPELINE_STOP); 480 } 481 482 if (error == ENOTSUP || error == ENOTTY) { 483 /* 484 * If we get ENOTSUP or ENOTTY, we know that 485 * no future attempts will ever succeed. 486 * In this case we set a persistent bit so 487 * that we don't bother with the ioctl in the 488 * future. 489 */ 490 vd->vdev_nowritecache = B_TRUE; 491 } 492 zio->io_error = error; 493 494 break; 495 496 default: 497 zio->io_error = ENOTSUP; 498 } 499 500 return (ZIO_PIPELINE_CONTINUE); 501 } 502 503 vb = kmem_alloc(sizeof (vdev_buf_t), KM_SLEEP); 504 505 vb->vb_io = zio; 506 bp = &vb->vb_buf; 507 508 bioinit(bp); 509 bp->b_flags = B_BUSY | B_NOCACHE | 510 (zio->io_type == ZIO_TYPE_READ ? B_READ : B_WRITE); 511 if (!(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD))) 512 bp->b_flags |= B_FAILFAST; 513 bp->b_bcount = zio->io_size; 514 bp->b_un.b_addr = zio->io_data; 515 bp->b_lblkno = lbtodb(zio->io_offset); 516 bp->b_bufsize = zio->io_size; 517 bp->b_iodone = (int (*)())vdev_disk_io_intr; 518 519 /* ldi_strategy() will return non-zero only on programming errors */ 520 VERIFY(ldi_strategy(dvd->vd_lh, bp) == 0); 521 522 return (ZIO_PIPELINE_STOP); 523 } 524 525 static void 526 vdev_disk_io_done(zio_t *zio) 527 { 528 vdev_t *vd = zio->io_vd; 529 530 /* 531 * If the device returned EIO, then attempt a DKIOCSTATE ioctl to see if 532 * the device has been removed. If this is the case, then we trigger an 533 * asynchronous removal of the device. Otherwise, probe the device and 534 * make sure it's still accessible. 535 */ 536 if (zio->io_error == EIO && !vd->vdev_remove_wanted) { 537 vdev_disk_t *dvd = vd->vdev_tsd; 538 int state = DKIO_NONE; 539 540 if (ldi_ioctl(dvd->vd_lh, DKIOCSTATE, (intptr_t)&state, 541 FKIOCTL, kcred, NULL) == 0 && state != DKIO_INSERTED) { 542 /* 543 * We post the resource as soon as possible, instead of 544 * when the async removal actually happens, because the 545 * DE is using this information to discard previous I/O 546 * errors. 547 */ 548 zfs_post_remove(zio->io_spa, vd); 549 vd->vdev_remove_wanted = B_TRUE; 550 spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE); 551 } else if (!vd->vdev_delayed_close) { 552 vd->vdev_delayed_close = B_TRUE; 553 } 554 } 555 } 556 557 vdev_ops_t vdev_disk_ops = { 558 vdev_disk_open, 559 vdev_disk_close, 560 vdev_default_asize, 561 vdev_disk_io_start, 562 vdev_disk_io_done, 563 NULL, 564 vdev_disk_hold, 565 vdev_disk_rele, 566 VDEV_TYPE_DISK, /* name of this vdev type */ 567 B_TRUE /* leaf vdev */ 568 }; 569 570 /* 571 * Given the root disk device devid or pathname, read the label from 572 * the device, and construct a configuration nvlist. 573 */ 574 int 575 vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config) 576 { 577 ldi_handle_t vd_lh; 578 vdev_label_t *label; 579 uint64_t s, size; 580 int l; 581 ddi_devid_t tmpdevid; 582 int error = -1; 583 char *minor_name; 584 585 /* 586 * Read the device label and build the nvlist. 587 */ 588 if (devid != NULL && ddi_devid_str_decode(devid, &tmpdevid, 589 &minor_name) == 0) { 590 error = ldi_open_by_devid(tmpdevid, minor_name, 591 FREAD, kcred, &vd_lh, zfs_li); 592 ddi_devid_free(tmpdevid); 593 ddi_devid_str_free(minor_name); 594 } 595 596 if (error && (error = ldi_open_by_name(devpath, FREAD, kcred, &vd_lh, 597 zfs_li))) 598 return (error); 599 600 if (ldi_get_size(vd_lh, &s)) { 601 (void) ldi_close(vd_lh, FREAD, kcred); 602 return (EIO); 603 } 604 605 size = P2ALIGN_TYPED(s, sizeof (vdev_label_t), uint64_t); 606 label = kmem_alloc(sizeof (vdev_label_t), KM_SLEEP); 607 608 *config = NULL; 609 for (l = 0; l < VDEV_LABELS; l++) { 610 uint64_t offset, state, txg = 0; 611 612 /* read vdev label */ 613 offset = vdev_label_offset(size, l, 0); 614 if (vdev_disk_physio(vd_lh, (caddr_t)label, 615 VDEV_SKIP_SIZE + VDEV_PHYS_SIZE, offset, B_READ) != 0) 616 continue; 617 618 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, 619 sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) { 620 *config = NULL; 621 continue; 622 } 623 624 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE, 625 &state) != 0 || state >= POOL_STATE_DESTROYED) { 626 nvlist_free(*config); 627 *config = NULL; 628 continue; 629 } 630 631 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG, 632 &txg) != 0 || txg == 0) { 633 nvlist_free(*config); 634 *config = NULL; 635 continue; 636 } 637 638 break; 639 } 640 641 kmem_free(label, sizeof (vdev_label_t)); 642 (void) ldi_close(vd_lh, FREAD, kcred); 643 if (*config == NULL) 644 error = EIDRM; 645 646 return (error); 647 } 648