1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2015 Netflix, Inc 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Derived from ata_da.c: 29 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org> 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 37 #ifdef _KERNEL 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/bio.h> 41 #include <sys/sysctl.h> 42 #include <sys/taskqueue.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/conf.h> 46 #include <sys/devicestat.h> 47 #include <sys/eventhandler.h> 48 #include <sys/malloc.h> 49 #include <sys/cons.h> 50 #include <sys/proc.h> 51 #include <sys/reboot.h> 52 #include <geom/geom_disk.h> 53 #endif /* _KERNEL */ 54 55 #ifndef _KERNEL 56 #include <stdio.h> 57 #include <string.h> 58 #endif /* _KERNEL */ 59 60 #include <cam/cam.h> 61 #include <cam/cam_ccb.h> 62 #include <cam/cam_periph.h> 63 #include <cam/cam_xpt_periph.h> 64 #include <cam/cam_sim.h> 65 #include <cam/cam_iosched.h> 66 67 #include <cam/nvme/nvme_all.h> 68 69 typedef enum { 70 NDA_STATE_NORMAL 71 } nda_state; 72 73 typedef enum { 74 NDA_FLAG_OPEN = 0x0001, 75 NDA_FLAG_DIRTY = 0x0002, 76 NDA_FLAG_SCTX_INIT = 0x0004, 77 } nda_flags; 78 79 typedef enum { 80 NDA_Q_4K = 0x01, 81 NDA_Q_NONE = 0x00, 82 } nda_quirks; 83 84 #define NDA_Q_BIT_STRING \ 85 "\020" \ 86 "\001Bit 0" 87 88 typedef enum { 89 NDA_CCB_BUFFER_IO = 0x01, 90 NDA_CCB_DUMP = 0x02, 91 NDA_CCB_TRIM = 0x03, 92 NDA_CCB_TYPE_MASK = 0x0F, 93 } nda_ccb_state; 94 95 /* Offsets into our private area for storing information */ 96 #define ccb_state ccb_h.ppriv_field0 97 #define ccb_bp ccb_h.ppriv_ptr1 /* For NDA_CCB_BUFFER_IO */ 98 #define ccb_trim ccb_h.ppriv_ptr1 /* For NDA_CCB_TRIM */ 99 100 struct nda_softc { 101 struct cam_iosched_softc *cam_iosched; 102 int outstanding_cmds; /* Number of active commands */ 103 int refcount; /* Active xpt_action() calls */ 104 nda_state state; 105 nda_flags flags; 106 nda_quirks quirks; 107 int unmappedio; 108 quad_t deletes; 109 quad_t dsm_req; 110 uint32_t nsid; /* Namespace ID for this nda device */ 111 struct disk *disk; 112 struct task sysctl_task; 113 struct sysctl_ctx_list sysctl_ctx; 114 struct sysctl_oid *sysctl_tree; 115 #ifdef CAM_TEST_FAILURE 116 int force_read_error; 117 int force_write_error; 118 int periodic_read_error; 119 int periodic_read_count; 120 #endif 121 #ifdef CAM_IO_STATS 122 struct sysctl_ctx_list sysctl_stats_ctx; 123 struct sysctl_oid *sysctl_stats_tree; 124 u_int timeouts; 125 u_int errors; 126 u_int invalidations; 127 #endif 128 }; 129 130 struct nda_trim_request { 131 union { 132 struct nvme_dsm_range dsm; 133 uint8_t data[NVME_MAX_DSM_TRIM]; 134 }; 135 TAILQ_HEAD(, bio) bps; 136 }; 137 138 /* Need quirk table */ 139 140 static disk_strategy_t ndastrategy; 141 static dumper_t ndadump; 142 static periph_init_t ndainit; 143 static void ndaasync(void *callback_arg, u_int32_t code, 144 struct cam_path *path, void *arg); 145 static void ndasysctlinit(void *context, int pending); 146 static periph_ctor_t ndaregister; 147 static periph_dtor_t ndacleanup; 148 static periph_start_t ndastart; 149 static periph_oninv_t ndaoninvalidate; 150 static void ndadone(struct cam_periph *periph, 151 union ccb *done_ccb); 152 static int ndaerror(union ccb *ccb, u_int32_t cam_flags, 153 u_int32_t sense_flags); 154 static void ndashutdown(void *arg, int howto); 155 static void ndasuspend(void *arg); 156 157 #ifndef NDA_DEFAULT_SEND_ORDERED 158 #define NDA_DEFAULT_SEND_ORDERED 1 159 #endif 160 #ifndef NDA_DEFAULT_TIMEOUT 161 #define NDA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */ 162 #endif 163 #ifndef NDA_DEFAULT_RETRY 164 #define NDA_DEFAULT_RETRY 4 165 #endif 166 #ifndef NDA_MAX_TRIM_ENTRIES 167 #define NDA_MAX_TRIM_ENTRIES (NVME_MAX_DSM_TRIM / sizeof(struct nvme_dsm_range))/* Number of DSM trims to use, max 256 */ 168 #endif 169 170 static SYSCTL_NODE(_kern_cam, OID_AUTO, nda, CTLFLAG_RD, 0, 171 "CAM Direct Access Disk driver"); 172 173 //static int nda_retry_count = NDA_DEFAULT_RETRY; 174 static int nda_send_ordered = NDA_DEFAULT_SEND_ORDERED; 175 static int nda_default_timeout = NDA_DEFAULT_TIMEOUT; 176 static int nda_max_trim_entries = NDA_MAX_TRIM_ENTRIES; 177 SYSCTL_INT(_kern_cam_nda, OID_AUTO, max_trim, CTLFLAG_RDTUN, 178 &nda_max_trim_entries, NDA_MAX_TRIM_ENTRIES, 179 "Maximum number of BIO_DELETE to send down as a DSM TRIM."); 180 181 /* 182 * All NVMe media is non-rotational, so all nvme device instances 183 * share this to implement the sysctl. 184 */ 185 static int nda_rotating_media = 0; 186 187 static struct periph_driver ndadriver = 188 { 189 ndainit, "nda", 190 TAILQ_HEAD_INITIALIZER(ndadriver.units), /* generation */ 0 191 }; 192 193 PERIPHDRIVER_DECLARE(nda, ndadriver); 194 195 static MALLOC_DEFINE(M_NVMEDA, "nvme_da", "nvme_da buffers"); 196 197 /* 198 * nice wrappers. Maybe these belong in nvme_all.c instead of 199 * here, but this is the only place that uses these. Should 200 * we ever grow another NVME periph, we should move them 201 * all there wholesale. 202 */ 203 204 static void 205 nda_nvme_flush(struct nda_softc *softc, struct ccb_nvmeio *nvmeio) 206 { 207 cam_fill_nvmeio(nvmeio, 208 0, /* retries */ 209 ndadone, /* cbfcnp */ 210 CAM_DIR_NONE, /* flags */ 211 NULL, /* data_ptr */ 212 0, /* dxfer_len */ 213 nda_default_timeout * 1000); /* timeout 30s */ 214 nvme_ns_flush_cmd(&nvmeio->cmd, softc->nsid); 215 } 216 217 static void 218 nda_nvme_trim(struct nda_softc *softc, struct ccb_nvmeio *nvmeio, 219 void *payload, uint32_t num_ranges) 220 { 221 cam_fill_nvmeio(nvmeio, 222 0, /* retries */ 223 ndadone, /* cbfcnp */ 224 CAM_DIR_OUT, /* flags */ 225 payload, /* data_ptr */ 226 num_ranges * sizeof(struct nvme_dsm_range), /* dxfer_len */ 227 nda_default_timeout * 1000); /* timeout 30s */ 228 nvme_ns_trim_cmd(&nvmeio->cmd, softc->nsid, num_ranges); 229 } 230 231 static void 232 nda_nvme_write(struct nda_softc *softc, struct ccb_nvmeio *nvmeio, 233 void *payload, uint64_t lba, uint32_t len, uint32_t count) 234 { 235 cam_fill_nvmeio(nvmeio, 236 0, /* retries */ 237 ndadone, /* cbfcnp */ 238 CAM_DIR_OUT, /* flags */ 239 payload, /* data_ptr */ 240 len, /* dxfer_len */ 241 nda_default_timeout * 1000); /* timeout 30s */ 242 nvme_ns_write_cmd(&nvmeio->cmd, softc->nsid, lba, count); 243 } 244 245 static void 246 nda_nvme_rw_bio(struct nda_softc *softc, struct ccb_nvmeio *nvmeio, 247 struct bio *bp, uint32_t rwcmd) 248 { 249 int flags = rwcmd == NVME_OPC_READ ? CAM_DIR_IN : CAM_DIR_OUT; 250 void *payload; 251 uint64_t lba; 252 uint32_t count; 253 254 if (bp->bio_flags & BIO_UNMAPPED) { 255 flags |= CAM_DATA_BIO; 256 payload = bp; 257 } else { 258 payload = bp->bio_data; 259 } 260 261 lba = bp->bio_pblkno; 262 count = bp->bio_bcount / softc->disk->d_sectorsize; 263 264 cam_fill_nvmeio(nvmeio, 265 0, /* retries */ 266 ndadone, /* cbfcnp */ 267 flags, /* flags */ 268 payload, /* data_ptr */ 269 bp->bio_bcount, /* dxfer_len */ 270 nda_default_timeout * 1000); /* timeout 30s */ 271 nvme_ns_rw_cmd(&nvmeio->cmd, rwcmd, softc->nsid, lba, count); 272 } 273 274 static int 275 ndaopen(struct disk *dp) 276 { 277 struct cam_periph *periph; 278 struct nda_softc *softc; 279 int error; 280 281 periph = (struct cam_periph *)dp->d_drv1; 282 if (cam_periph_acquire(periph) != 0) { 283 return(ENXIO); 284 } 285 286 cam_periph_lock(periph); 287 if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { 288 cam_periph_unlock(periph); 289 cam_periph_release(periph); 290 return (error); 291 } 292 293 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, 294 ("ndaopen\n")); 295 296 softc = (struct nda_softc *)periph->softc; 297 softc->flags |= NDA_FLAG_OPEN; 298 299 cam_periph_unhold(periph); 300 cam_periph_unlock(periph); 301 return (0); 302 } 303 304 static int 305 ndaclose(struct disk *dp) 306 { 307 struct cam_periph *periph; 308 struct nda_softc *softc; 309 union ccb *ccb; 310 int error; 311 312 periph = (struct cam_periph *)dp->d_drv1; 313 softc = (struct nda_softc *)periph->softc; 314 cam_periph_lock(periph); 315 316 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, 317 ("ndaclose\n")); 318 319 if ((softc->flags & NDA_FLAG_DIRTY) != 0 && 320 (periph->flags & CAM_PERIPH_INVALID) == 0 && 321 cam_periph_hold(periph, PRIBIO) == 0) { 322 323 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 324 nda_nvme_flush(softc, &ccb->nvmeio); 325 error = cam_periph_runccb(ccb, ndaerror, /*cam_flags*/0, 326 /*sense_flags*/0, softc->disk->d_devstat); 327 328 if (error != 0) 329 xpt_print(periph->path, "Synchronize cache failed\n"); 330 else 331 softc->flags &= ~NDA_FLAG_DIRTY; 332 xpt_release_ccb(ccb); 333 cam_periph_unhold(periph); 334 } 335 336 softc->flags &= ~NDA_FLAG_OPEN; 337 338 while (softc->refcount != 0) 339 cam_periph_sleep(periph, &softc->refcount, PRIBIO, "ndaclose", 1); 340 cam_periph_unlock(periph); 341 cam_periph_release(periph); 342 return (0); 343 } 344 345 static void 346 ndaschedule(struct cam_periph *periph) 347 { 348 struct nda_softc *softc = (struct nda_softc *)periph->softc; 349 350 if (softc->state != NDA_STATE_NORMAL) 351 return; 352 353 cam_iosched_schedule(softc->cam_iosched, periph); 354 } 355 356 /* 357 * Actually translate the requested transfer into one the physical driver 358 * can understand. The transfer is described by a buf and will include 359 * only one physical transfer. 360 */ 361 static void 362 ndastrategy(struct bio *bp) 363 { 364 struct cam_periph *periph; 365 struct nda_softc *softc; 366 367 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 368 softc = (struct nda_softc *)periph->softc; 369 370 cam_periph_lock(periph); 371 372 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ndastrategy(%p)\n", bp)); 373 374 /* 375 * If the device has been made invalid, error out 376 */ 377 if ((periph->flags & CAM_PERIPH_INVALID) != 0) { 378 cam_periph_unlock(periph); 379 biofinish(bp, NULL, ENXIO); 380 return; 381 } 382 383 if (bp->bio_cmd == BIO_DELETE) 384 softc->deletes++; 385 386 /* 387 * Place it in the queue of disk activities for this disk 388 */ 389 cam_iosched_queue_work(softc->cam_iosched, bp); 390 391 /* 392 * Schedule ourselves for performing the work. 393 */ 394 ndaschedule(periph); 395 cam_periph_unlock(periph); 396 397 return; 398 } 399 400 static int 401 ndadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) 402 { 403 struct cam_periph *periph; 404 struct nda_softc *softc; 405 u_int secsize; 406 struct ccb_nvmeio nvmeio; 407 struct disk *dp; 408 uint64_t lba; 409 uint32_t count; 410 int error = 0; 411 412 dp = arg; 413 periph = dp->d_drv1; 414 softc = (struct nda_softc *)periph->softc; 415 secsize = softc->disk->d_sectorsize; 416 lba = offset / secsize; 417 count = length / secsize; 418 419 if ((periph->flags & CAM_PERIPH_INVALID) != 0) 420 return (ENXIO); 421 422 /* xpt_get_ccb returns a zero'd allocation for the ccb, mimic that here */ 423 memset(&nvmeio, 0, sizeof(nvmeio)); 424 if (length > 0) { 425 xpt_setup_ccb(&nvmeio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 426 nvmeio.ccb_state = NDA_CCB_DUMP; 427 nda_nvme_write(softc, &nvmeio, virtual, lba, length, count); 428 error = cam_periph_runccb((union ccb *)&nvmeio, cam_periph_error, 429 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); 430 if (error != 0) 431 printf("Aborting dump due to I/O error %d.\n", error); 432 433 return (error); 434 } 435 436 /* Flush */ 437 xpt_setup_ccb(&nvmeio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 438 439 nvmeio.ccb_state = NDA_CCB_DUMP; 440 nda_nvme_flush(softc, &nvmeio); 441 error = cam_periph_runccb((union ccb *)&nvmeio, cam_periph_error, 442 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); 443 if (error != 0) 444 xpt_print(periph->path, "flush cmd failed\n"); 445 return (error); 446 } 447 448 static void 449 ndainit(void) 450 { 451 cam_status status; 452 453 /* 454 * Install a global async callback. This callback will 455 * receive async callbacks like "new device found". 456 */ 457 status = xpt_register_async(AC_FOUND_DEVICE, ndaasync, NULL, NULL); 458 459 if (status != CAM_REQ_CMP) { 460 printf("nda: Failed to attach master async callback " 461 "due to status 0x%x!\n", status); 462 } else if (nda_send_ordered) { 463 464 /* Register our event handlers */ 465 if ((EVENTHANDLER_REGISTER(power_suspend, ndasuspend, 466 NULL, EVENTHANDLER_PRI_LAST)) == NULL) 467 printf("ndainit: power event registration failed!\n"); 468 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, ndashutdown, 469 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) 470 printf("ndainit: shutdown event registration failed!\n"); 471 } 472 } 473 474 /* 475 * Callback from GEOM, called when it has finished cleaning up its 476 * resources. 477 */ 478 static void 479 ndadiskgonecb(struct disk *dp) 480 { 481 struct cam_periph *periph; 482 483 periph = (struct cam_periph *)dp->d_drv1; 484 485 cam_periph_release(periph); 486 } 487 488 static void 489 ndaoninvalidate(struct cam_periph *periph) 490 { 491 struct nda_softc *softc; 492 493 softc = (struct nda_softc *)periph->softc; 494 495 /* 496 * De-register any async callbacks. 497 */ 498 xpt_register_async(0, ndaasync, periph, periph->path); 499 #ifdef CAM_IO_STATS 500 softc->invalidations++; 501 #endif 502 503 /* 504 * Return all queued I/O with ENXIO. 505 * XXX Handle any transactions queued to the card 506 * with XPT_ABORT_CCB. 507 */ 508 cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); 509 510 disk_gone(softc->disk); 511 } 512 513 static void 514 ndacleanup(struct cam_periph *periph) 515 { 516 struct nda_softc *softc; 517 518 softc = (struct nda_softc *)periph->softc; 519 520 cam_periph_unlock(periph); 521 522 cam_iosched_fini(softc->cam_iosched); 523 524 /* 525 * If we can't free the sysctl tree, oh well... 526 */ 527 if ((softc->flags & NDA_FLAG_SCTX_INIT) != 0) { 528 #ifdef CAM_IO_STATS 529 if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) 530 xpt_print(periph->path, 531 "can't remove sysctl stats context\n"); 532 #endif 533 if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) 534 xpt_print(periph->path, 535 "can't remove sysctl context\n"); 536 } 537 538 disk_destroy(softc->disk); 539 free(softc, M_DEVBUF); 540 cam_periph_lock(periph); 541 } 542 543 static void 544 ndaasync(void *callback_arg, u_int32_t code, 545 struct cam_path *path, void *arg) 546 { 547 struct cam_periph *periph; 548 549 periph = (struct cam_periph *)callback_arg; 550 switch (code) { 551 case AC_FOUND_DEVICE: 552 { 553 struct ccb_getdev *cgd; 554 cam_status status; 555 556 cgd = (struct ccb_getdev *)arg; 557 if (cgd == NULL) 558 break; 559 560 if (cgd->protocol != PROTO_NVME) 561 break; 562 563 /* 564 * Allocate a peripheral instance for 565 * this device and start the probe 566 * process. 567 */ 568 status = cam_periph_alloc(ndaregister, ndaoninvalidate, 569 ndacleanup, ndastart, 570 "nda", CAM_PERIPH_BIO, 571 path, ndaasync, 572 AC_FOUND_DEVICE, cgd); 573 574 if (status != CAM_REQ_CMP 575 && status != CAM_REQ_INPROG) 576 printf("ndaasync: Unable to attach to new device " 577 "due to status 0x%x\n", status); 578 break; 579 } 580 case AC_ADVINFO_CHANGED: 581 { 582 uintptr_t buftype; 583 584 buftype = (uintptr_t)arg; 585 if (buftype == CDAI_TYPE_PHYS_PATH) { 586 struct nda_softc *softc; 587 588 softc = periph->softc; 589 disk_attr_changed(softc->disk, "GEOM::physpath", 590 M_NOWAIT); 591 } 592 break; 593 } 594 case AC_LOST_DEVICE: 595 default: 596 cam_periph_async(periph, code, path, arg); 597 break; 598 } 599 } 600 601 static void 602 ndasysctlinit(void *context, int pending) 603 { 604 struct cam_periph *periph; 605 struct nda_softc *softc; 606 char tmpstr[32], tmpstr2[16]; 607 608 periph = (struct cam_periph *)context; 609 610 /* periph was held for us when this task was enqueued */ 611 if ((periph->flags & CAM_PERIPH_INVALID) != 0) { 612 cam_periph_release(periph); 613 return; 614 } 615 616 softc = (struct nda_softc *)periph->softc; 617 snprintf(tmpstr, sizeof(tmpstr), "CAM NDA unit %d", periph->unit_number); 618 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); 619 620 sysctl_ctx_init(&softc->sysctl_ctx); 621 softc->flags |= NDA_FLAG_SCTX_INIT; 622 softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, 623 SYSCTL_STATIC_CHILDREN(_kern_cam_nda), OID_AUTO, tmpstr2, 624 CTLFLAG_RD, 0, tmpstr, "device_index"); 625 if (softc->sysctl_tree == NULL) { 626 printf("ndasysctlinit: unable to allocate sysctl tree\n"); 627 cam_periph_release(periph); 628 return; 629 } 630 631 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 632 OID_AUTO, "unmapped_io", CTLFLAG_RD, 633 &softc->unmappedio, 0, "Unmapped I/O leaf"); 634 635 SYSCTL_ADD_QUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 636 OID_AUTO, "deletes", CTLFLAG_RD, 637 &softc->deletes, "Number of BIO_DELETE requests"); 638 639 SYSCTL_ADD_QUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 640 OID_AUTO, "dsm_req", CTLFLAG_RD, 641 &softc->dsm_req, "Number of DSM requests sent to SIM"); 642 643 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 644 OID_AUTO, "rotating", CTLFLAG_RD, &nda_rotating_media, 1, 645 "Rotating media"); 646 647 #ifdef CAM_IO_STATS 648 softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, 649 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", 650 CTLFLAG_RD, 0, "Statistics"); 651 if (softc->sysctl_stats_tree == NULL) { 652 printf("ndasysctlinit: unable to allocate sysctl tree for stats\n"); 653 cam_periph_release(periph); 654 return; 655 } 656 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 657 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 658 OID_AUTO, "timeouts", CTLFLAG_RD, 659 &softc->timeouts, 0, 660 "Device timeouts reported by the SIM"); 661 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 662 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 663 OID_AUTO, "errors", CTLFLAG_RD, 664 &softc->errors, 0, 665 "Transport errors reported by the SIM."); 666 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 667 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 668 OID_AUTO, "pack_invalidations", CTLFLAG_RD, 669 &softc->invalidations, 0, 670 "Device pack invalidations."); 671 #endif 672 673 #ifdef CAM_TEST_FAILURE 674 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 675 OID_AUTO, "invalidate", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, 676 periph, 0, cam_periph_invalidate_sysctl, "I", 677 "Write 1 to invalidate the drive immediately"); 678 #endif 679 680 cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, 681 softc->sysctl_tree); 682 683 cam_periph_release(periph); 684 } 685 686 static int 687 ndagetattr(struct bio *bp) 688 { 689 int ret; 690 struct cam_periph *periph; 691 692 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 693 cam_periph_lock(periph); 694 ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, 695 periph->path); 696 cam_periph_unlock(periph); 697 if (ret == 0) 698 bp->bio_completed = bp->bio_length; 699 return ret; 700 } 701 702 static cam_status 703 ndaregister(struct cam_periph *periph, void *arg) 704 { 705 struct nda_softc *softc; 706 struct disk *disk; 707 struct ccb_pathinq cpi; 708 const struct nvme_namespace_data *nsd; 709 const struct nvme_controller_data *cd; 710 char announce_buf[80]; 711 uint8_t flbas_fmt, lbads, vwc_present; 712 u_int maxio; 713 int quirks; 714 715 nsd = nvme_get_identify_ns(periph); 716 cd = nvme_get_identify_cntrl(periph); 717 718 softc = (struct nda_softc *)malloc(sizeof(*softc), M_DEVBUF, 719 M_NOWAIT | M_ZERO); 720 721 if (softc == NULL) { 722 printf("ndaregister: Unable to probe new device. " 723 "Unable to allocate softc\n"); 724 return(CAM_REQ_CMP_ERR); 725 } 726 727 if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { 728 printf("ndaregister: Unable to probe new device. " 729 "Unable to allocate iosched memory\n"); 730 free(softc, M_DEVBUF); 731 return(CAM_REQ_CMP_ERR); 732 } 733 734 /* ident_data parsing */ 735 736 periph->softc = softc; 737 738 softc->quirks = NDA_Q_NONE; 739 740 xpt_path_inq(&cpi, periph->path); 741 742 TASK_INIT(&softc->sysctl_task, 0, ndasysctlinit, periph); 743 744 /* 745 * The name space ID is the lun, save it for later I/O 746 */ 747 softc->nsid = (uint32_t)xpt_path_lun_id(periph->path); 748 749 /* 750 * Register this media as a disk 751 */ 752 (void)cam_periph_hold(periph, PRIBIO); 753 cam_periph_unlock(periph); 754 snprintf(announce_buf, sizeof(announce_buf), 755 "kern.cam.nda.%d.quirks", periph->unit_number); 756 quirks = softc->quirks; 757 TUNABLE_INT_FETCH(announce_buf, &quirks); 758 softc->quirks = quirks; 759 cam_iosched_set_sort_queue(softc->cam_iosched, 0); 760 softc->disk = disk = disk_alloc(); 761 strlcpy(softc->disk->d_descr, cd->mn, 762 MIN(sizeof(softc->disk->d_descr), sizeof(cd->mn))); 763 strlcpy(softc->disk->d_ident, cd->sn, 764 MIN(sizeof(softc->disk->d_ident), sizeof(cd->sn))); 765 disk->d_rotation_rate = DISK_RR_NON_ROTATING; 766 disk->d_open = ndaopen; 767 disk->d_close = ndaclose; 768 disk->d_strategy = ndastrategy; 769 disk->d_getattr = ndagetattr; 770 disk->d_dump = ndadump; 771 disk->d_gone = ndadiskgonecb; 772 disk->d_name = "nda"; 773 disk->d_drv1 = periph; 774 disk->d_unit = periph->unit_number; 775 maxio = cpi.maxio; /* Honor max I/O size of SIM */ 776 if (maxio == 0) 777 maxio = DFLTPHYS; /* traditional default */ 778 else if (maxio > MAXPHYS) 779 maxio = MAXPHYS; /* for safety */ 780 disk->d_maxsize = maxio; 781 flbas_fmt = (nsd->flbas >> NVME_NS_DATA_FLBAS_FORMAT_SHIFT) & 782 NVME_NS_DATA_FLBAS_FORMAT_MASK; 783 lbads = (nsd->lbaf[flbas_fmt] >> NVME_NS_DATA_LBAF_LBADS_SHIFT) & 784 NVME_NS_DATA_LBAF_LBADS_MASK; 785 disk->d_sectorsize = 1 << lbads; 786 disk->d_mediasize = (off_t)(disk->d_sectorsize * nsd->nsze); 787 disk->d_delmaxsize = disk->d_mediasize; 788 disk->d_flags = DISKFLAG_DIRECT_COMPLETION; 789 // if (cd->oncs.dsm) // XXX broken? 790 disk->d_flags |= DISKFLAG_CANDELETE; 791 vwc_present = (cd->vwc >> NVME_CTRLR_DATA_VWC_PRESENT_SHIFT) & 792 NVME_CTRLR_DATA_VWC_PRESENT_MASK; 793 if (vwc_present) 794 disk->d_flags |= DISKFLAG_CANFLUSHCACHE; 795 if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { 796 disk->d_flags |= DISKFLAG_UNMAPPED_BIO; 797 softc->unmappedio = 1; 798 } 799 /* 800 * d_ident and d_descr are both far bigger than the length of either 801 * the serial or model number strings. 802 */ 803 nvme_strvis(disk->d_descr, cd->mn, 804 sizeof(disk->d_descr), NVME_MODEL_NUMBER_LENGTH); 805 nvme_strvis(disk->d_ident, cd->sn, 806 sizeof(disk->d_ident), NVME_SERIAL_NUMBER_LENGTH); 807 disk->d_hba_vendor = cpi.hba_vendor; 808 disk->d_hba_device = cpi.hba_device; 809 disk->d_hba_subvendor = cpi.hba_subvendor; 810 disk->d_hba_subdevice = cpi.hba_subdevice; 811 disk->d_stripesize = disk->d_sectorsize; 812 disk->d_stripeoffset = 0; 813 disk->d_devstat = devstat_new_entry(periph->periph_name, 814 periph->unit_number, disk->d_sectorsize, 815 DEVSTAT_ALL_SUPPORTED, 816 DEVSTAT_TYPE_DIRECT | XPORT_DEVSTAT_TYPE(cpi.transport), 817 DEVSTAT_PRIORITY_DISK); 818 /* 819 * Add alias for older nvd drives to ease transition. 820 */ 821 /* disk_add_alias(disk, "nvd"); Have reports of this causing problems */ 822 823 /* 824 * Acquire a reference to the periph before we register with GEOM. 825 * We'll release this reference once GEOM calls us back (via 826 * ndadiskgonecb()) telling us that our provider has been freed. 827 */ 828 if (cam_periph_acquire(periph) != 0) { 829 xpt_print(periph->path, "%s: lost periph during " 830 "registration!\n", __func__); 831 cam_periph_lock(periph); 832 return (CAM_REQ_CMP_ERR); 833 } 834 disk_create(softc->disk, DISK_VERSION); 835 cam_periph_lock(periph); 836 cam_periph_unhold(periph); 837 838 snprintf(announce_buf, sizeof(announce_buf), 839 "%juMB (%ju %u byte sectors)", 840 (uintmax_t)((uintmax_t)disk->d_mediasize / (1024*1024)), 841 (uintmax_t)disk->d_mediasize / disk->d_sectorsize, 842 disk->d_sectorsize); 843 xpt_announce_periph(periph, announce_buf); 844 xpt_announce_quirks(periph, softc->quirks, NDA_Q_BIT_STRING); 845 846 /* 847 * Create our sysctl variables, now that we know 848 * we have successfully attached. 849 */ 850 if (cam_periph_acquire(periph) == 0) 851 taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); 852 853 /* 854 * Register for device going away and info about the drive 855 * changing (though with NVMe, it can't) 856 */ 857 xpt_register_async(AC_LOST_DEVICE | AC_ADVINFO_CHANGED, 858 ndaasync, periph, periph->path); 859 860 softc->state = NDA_STATE_NORMAL; 861 return(CAM_REQ_CMP); 862 } 863 864 static void 865 ndastart(struct cam_periph *periph, union ccb *start_ccb) 866 { 867 struct nda_softc *softc = (struct nda_softc *)periph->softc; 868 struct ccb_nvmeio *nvmeio = &start_ccb->nvmeio; 869 870 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ndastart\n")); 871 872 switch (softc->state) { 873 case NDA_STATE_NORMAL: 874 { 875 struct bio *bp; 876 877 bp = cam_iosched_next_bio(softc->cam_iosched); 878 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ndastart: bio %p\n", bp)); 879 if (bp == NULL) { 880 xpt_release_ccb(start_ccb); 881 break; 882 } 883 884 switch (bp->bio_cmd) { 885 case BIO_WRITE: 886 softc->flags |= NDA_FLAG_DIRTY; 887 /* FALLTHROUGH */ 888 case BIO_READ: 889 { 890 #ifdef CAM_TEST_FAILURE 891 int fail = 0; 892 893 /* 894 * Support the failure ioctls. If the command is a 895 * read, and there are pending forced read errors, or 896 * if a write and pending write errors, then fail this 897 * operation with EIO. This is useful for testing 898 * purposes. Also, support having every Nth read fail. 899 * 900 * This is a rather blunt tool. 901 */ 902 if (bp->bio_cmd == BIO_READ) { 903 if (softc->force_read_error) { 904 softc->force_read_error--; 905 fail = 1; 906 } 907 if (softc->periodic_read_error > 0) { 908 if (++softc->periodic_read_count >= 909 softc->periodic_read_error) { 910 softc->periodic_read_count = 0; 911 fail = 1; 912 } 913 } 914 } else { 915 if (softc->force_write_error) { 916 softc->force_write_error--; 917 fail = 1; 918 } 919 } 920 if (fail) { 921 biofinish(bp, NULL, EIO); 922 xpt_release_ccb(start_ccb); 923 ndaschedule(periph); 924 return; 925 } 926 #endif 927 KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 || 928 round_page(bp->bio_bcount + bp->bio_ma_offset) / 929 PAGE_SIZE == bp->bio_ma_n, 930 ("Short bio %p", bp)); 931 nda_nvme_rw_bio(softc, &start_ccb->nvmeio, bp, bp->bio_cmd == BIO_READ ? 932 NVME_OPC_READ : NVME_OPC_WRITE); 933 break; 934 } 935 case BIO_DELETE: 936 { 937 struct nvme_dsm_range *dsm_range, *dsm_end; 938 struct nda_trim_request *trim; 939 struct bio *bp1; 940 int ents; 941 942 trim = malloc(sizeof(*trim), M_NVMEDA, M_ZERO | M_NOWAIT); 943 if (trim == NULL) { 944 biofinish(bp, NULL, ENOMEM); 945 xpt_release_ccb(start_ccb); 946 ndaschedule(periph); 947 return; 948 } 949 TAILQ_INIT(&trim->bps); 950 bp1 = bp; 951 ents = sizeof(trim->data) / sizeof(struct nvme_dsm_range); 952 ents = min(ents, nda_max_trim_entries); 953 dsm_range = &trim->dsm; 954 dsm_end = dsm_range + ents; 955 do { 956 TAILQ_INSERT_TAIL(&trim->bps, bp1, bio_queue); 957 dsm_range->length = 958 htole32(bp1->bio_bcount / softc->disk->d_sectorsize); 959 dsm_range->starting_lba = 960 htole64(bp1->bio_offset / softc->disk->d_sectorsize); 961 dsm_range++; 962 if (dsm_range >= dsm_end) 963 break; 964 bp1 = cam_iosched_next_trim(softc->cam_iosched); 965 /* XXX -- Could collapse adjacent ranges, but we don't for now */ 966 /* XXX -- Could limit based on total payload size */ 967 } while (bp1 != NULL); 968 start_ccb->ccb_trim = trim; 969 softc->dsm_req++; 970 nda_nvme_trim(softc, &start_ccb->nvmeio, &trim->dsm, 971 dsm_range - &trim->dsm); 972 start_ccb->ccb_state = NDA_CCB_TRIM; 973 /* 974 * Note: We can have multiple TRIMs in flight, so we don't call 975 * cam_iosched_submit_trim(softc->cam_iosched); 976 * since that forces the I/O scheduler to only schedule one at a time. 977 * On NVMe drives, this is a performance disaster. 978 */ 979 goto out; 980 } 981 case BIO_FLUSH: 982 nda_nvme_flush(softc, nvmeio); 983 break; 984 } 985 start_ccb->ccb_state = NDA_CCB_BUFFER_IO; 986 start_ccb->ccb_bp = bp; 987 out: 988 start_ccb->ccb_h.flags |= CAM_UNLOCKED; 989 softc->outstanding_cmds++; 990 softc->refcount++; 991 cam_periph_unlock(periph); 992 xpt_action(start_ccb); 993 cam_periph_lock(periph); 994 softc->refcount--; 995 996 /* May have more work to do, so ensure we stay scheduled */ 997 ndaschedule(periph); 998 break; 999 } 1000 } 1001 } 1002 1003 static void 1004 ndadone(struct cam_periph *periph, union ccb *done_ccb) 1005 { 1006 struct nda_softc *softc; 1007 struct ccb_nvmeio *nvmeio = &done_ccb->nvmeio; 1008 struct cam_path *path; 1009 int state; 1010 1011 softc = (struct nda_softc *)periph->softc; 1012 path = done_ccb->ccb_h.path; 1013 1014 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("ndadone\n")); 1015 1016 state = nvmeio->ccb_state & NDA_CCB_TYPE_MASK; 1017 switch (state) { 1018 case NDA_CCB_BUFFER_IO: 1019 case NDA_CCB_TRIM: 1020 { 1021 int error; 1022 1023 cam_periph_lock(periph); 1024 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1025 error = ndaerror(done_ccb, 0, 0); 1026 if (error == ERESTART) { 1027 /* A retry was scheduled, so just return. */ 1028 cam_periph_unlock(periph); 1029 return; 1030 } 1031 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1032 cam_release_devq(path, 1033 /*relsim_flags*/0, 1034 /*reduction*/0, 1035 /*timeout*/0, 1036 /*getcount_only*/0); 1037 } else { 1038 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1039 panic("REQ_CMP with QFRZN"); 1040 error = 0; 1041 } 1042 if (state == NDA_CCB_BUFFER_IO) { 1043 struct bio *bp; 1044 1045 bp = (struct bio *)done_ccb->ccb_bp; 1046 bp->bio_error = error; 1047 if (error != 0) { 1048 bp->bio_resid = bp->bio_bcount; 1049 bp->bio_flags |= BIO_ERROR; 1050 } else { 1051 bp->bio_resid = 0; 1052 } 1053 softc->outstanding_cmds--; 1054 1055 /* 1056 * We need to call cam_iosched before we call biodone so that we 1057 * don't measure any activity that happens in the completion 1058 * routine, which in the case of sendfile can be quite 1059 * extensive. 1060 */ 1061 cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); 1062 xpt_release_ccb(done_ccb); 1063 ndaschedule(periph); 1064 cam_periph_unlock(periph); 1065 biodone(bp); 1066 } else { /* state == NDA_CCB_TRIM */ 1067 struct nda_trim_request *trim; 1068 struct bio *bp1, *bp2; 1069 TAILQ_HEAD(, bio) queue; 1070 1071 trim = nvmeio->ccb_trim; 1072 TAILQ_INIT(&queue); 1073 TAILQ_CONCAT(&queue, &trim->bps, bio_queue); 1074 free(trim, M_NVMEDA); 1075 1076 /* 1077 * Since we can have multiple trims in flight, we don't 1078 * need to call this here. 1079 * cam_iosched_trim_done(softc->cam_iosched); 1080 */ 1081 /* 1082 * The the I/O scheduler that we're finishing the I/O 1083 * so we can keep book. The first one we pass in the CCB 1084 * which has the timing information. The rest we pass in NULL 1085 * so we can keep proper counts. 1086 */ 1087 bp1 = TAILQ_FIRST(&queue); 1088 cam_iosched_bio_complete(softc->cam_iosched, bp1, done_ccb); 1089 xpt_release_ccb(done_ccb); 1090 ndaschedule(periph); 1091 cam_periph_unlock(periph); 1092 while ((bp2 = TAILQ_FIRST(&queue)) != NULL) { 1093 TAILQ_REMOVE(&queue, bp2, bio_queue); 1094 bp2->bio_error = error; 1095 if (error != 0) { 1096 bp2->bio_flags |= BIO_ERROR; 1097 bp2->bio_resid = bp1->bio_bcount; 1098 } else 1099 bp2->bio_resid = 0; 1100 if (bp1 != bp2) 1101 cam_iosched_bio_complete(softc->cam_iosched, bp2, NULL); 1102 biodone(bp2); 1103 } 1104 } 1105 return; 1106 } 1107 case NDA_CCB_DUMP: 1108 /* No-op. We're polling */ 1109 return; 1110 default: 1111 break; 1112 } 1113 xpt_release_ccb(done_ccb); 1114 } 1115 1116 static int 1117 ndaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 1118 { 1119 struct nda_softc *softc; 1120 struct cam_periph *periph; 1121 1122 periph = xpt_path_periph(ccb->ccb_h.path); 1123 softc = (struct nda_softc *)periph->softc; 1124 1125 switch (ccb->ccb_h.status & CAM_STATUS_MASK) { 1126 case CAM_CMD_TIMEOUT: 1127 #ifdef CAM_IO_STATS 1128 softc->timeouts++; 1129 #endif 1130 break; 1131 case CAM_REQ_ABORTED: 1132 case CAM_REQ_CMP_ERR: 1133 case CAM_REQ_TERMIO: 1134 case CAM_UNREC_HBA_ERROR: 1135 case CAM_DATA_RUN_ERR: 1136 case CAM_ATA_STATUS_ERROR: 1137 #ifdef CAM_IO_STATS 1138 softc->errors++; 1139 #endif 1140 break; 1141 default: 1142 break; 1143 } 1144 1145 return(cam_periph_error(ccb, cam_flags, sense_flags)); 1146 } 1147 1148 /* 1149 * Step through all NDA peripheral drivers, and if the device is still open, 1150 * sync the disk cache to physical media. 1151 */ 1152 static void 1153 ndaflush(void) 1154 { 1155 struct cam_periph *periph; 1156 struct nda_softc *softc; 1157 union ccb *ccb; 1158 int error; 1159 1160 CAM_PERIPH_FOREACH(periph, &ndadriver) { 1161 softc = (struct nda_softc *)periph->softc; 1162 1163 if (SCHEDULER_STOPPED()) { 1164 /* 1165 * If we paniced with the lock held or the periph is not 1166 * open, do not recurse. Otherwise, call ndadump since 1167 * that avoids the sleeping cam_periph_getccb does if no 1168 * CCBs are available. 1169 */ 1170 if (!cam_periph_owned(periph) && 1171 (softc->flags & NDA_FLAG_OPEN)) { 1172 ndadump(softc->disk, NULL, 0, 0, 0); 1173 } 1174 continue; 1175 } 1176 1177 /* 1178 * We only sync the cache if the drive is still open 1179 */ 1180 cam_periph_lock(periph); 1181 if ((softc->flags & NDA_FLAG_OPEN) == 0) { 1182 cam_periph_unlock(periph); 1183 continue; 1184 } 1185 1186 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 1187 nda_nvme_flush(softc, &ccb->nvmeio); 1188 error = cam_periph_runccb(ccb, ndaerror, /*cam_flags*/0, 1189 /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY, 1190 softc->disk->d_devstat); 1191 if (error != 0) 1192 xpt_print(periph->path, "Synchronize cache failed\n"); 1193 xpt_release_ccb(ccb); 1194 cam_periph_unlock(periph); 1195 } 1196 } 1197 1198 static void 1199 ndashutdown(void *arg, int howto) 1200 { 1201 1202 ndaflush(); 1203 } 1204 1205 static void 1206 ndasuspend(void *arg) 1207 { 1208 1209 ndaflush(); 1210 } 1211