1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2015 Netflix, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer, 11 * without modification, immediately at the beginning of the file. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 * Derived from ata_da.c: 28 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org> 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 36 #ifdef _KERNEL 37 #include <sys/systm.h> 38 #include <sys/kernel.h> 39 #include <sys/bio.h> 40 #include <sys/sysctl.h> 41 #include <sys/taskqueue.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/conf.h> 45 #include <sys/devicestat.h> 46 #include <sys/eventhandler.h> 47 #include <sys/malloc.h> 48 #include <sys/cons.h> 49 #include <sys/proc.h> 50 #include <sys/reboot.h> 51 #include <sys/sbuf.h> 52 #include <geom/geom.h> 53 #include <geom/geom_disk.h> 54 #endif /* _KERNEL */ 55 56 #ifndef _KERNEL 57 #include <stdio.h> 58 #include <string.h> 59 #endif /* _KERNEL */ 60 61 #include <cam/cam.h> 62 #include <cam/cam_ccb.h> 63 #include <cam/cam_periph.h> 64 #include <cam/cam_xpt_periph.h> 65 #include <cam/cam_sim.h> 66 #include <cam/cam_iosched.h> 67 68 #include <cam/nvme/nvme_all.h> 69 70 typedef enum { 71 NDA_STATE_NORMAL 72 } nda_state; 73 74 typedef enum { 75 NDA_FLAG_OPEN = 0x0001, 76 NDA_FLAG_DIRTY = 0x0002, 77 NDA_FLAG_SCTX_INIT = 0x0004, 78 } nda_flags; 79 #define NDA_FLAG_STRING \ 80 "\020" \ 81 "\001OPEN" \ 82 "\002DIRTY" \ 83 "\003SCTX_INIT" 84 85 typedef enum { 86 NDA_Q_4K = 0x01, 87 NDA_Q_NONE = 0x00, 88 } nda_quirks; 89 90 #define NDA_Q_BIT_STRING \ 91 "\020" \ 92 "\001Bit 0" 93 94 typedef enum { 95 NDA_CCB_BUFFER_IO = 0x01, 96 NDA_CCB_DUMP = 0x02, 97 NDA_CCB_TRIM = 0x03, 98 NDA_CCB_TYPE_MASK = 0x0F, 99 } nda_ccb_state; 100 101 /* Offsets into our private area for storing information */ 102 #define ccb_state ccb_h.ppriv_field0 103 #define ccb_bp ccb_h.ppriv_ptr1 /* For NDA_CCB_BUFFER_IO */ 104 #define ccb_trim ccb_h.ppriv_ptr1 /* For NDA_CCB_TRIM */ 105 106 struct nda_softc { 107 struct cam_iosched_softc *cam_iosched; 108 int outstanding_cmds; /* Number of active commands */ 109 int refcount; /* Active xpt_action() calls */ 110 nda_state state; 111 nda_flags flags; 112 nda_quirks quirks; 113 int unmappedio; 114 quad_t deletes; 115 uint32_t nsid; /* Namespace ID for this nda device */ 116 struct disk *disk; 117 struct task sysctl_task; 118 struct sysctl_ctx_list sysctl_ctx; 119 struct sysctl_oid *sysctl_tree; 120 uint64_t trim_count; 121 uint64_t trim_ranges; 122 uint64_t trim_lbas; 123 #ifdef CAM_TEST_FAILURE 124 int force_read_error; 125 int force_write_error; 126 int periodic_read_error; 127 int periodic_read_count; 128 #endif 129 #ifdef CAM_IO_STATS 130 struct sysctl_ctx_list sysctl_stats_ctx; 131 struct sysctl_oid *sysctl_stats_tree; 132 u_int timeouts; 133 u_int errors; 134 u_int invalidations; 135 #endif 136 }; 137 138 struct nda_trim_request { 139 struct nvme_dsm_range dsm[NVME_MAX_DSM_TRIM / sizeof(struct nvme_dsm_range)]; 140 TAILQ_HEAD(, bio) bps; 141 }; 142 _Static_assert(NVME_MAX_DSM_TRIM % sizeof(struct nvme_dsm_range) == 0, 143 "NVME_MAX_DSM_TRIM must be an integral number of ranges"); 144 145 /* Need quirk table */ 146 147 static disk_strategy_t ndastrategy; 148 static dumper_t ndadump; 149 static periph_init_t ndainit; 150 static void ndaasync(void *callback_arg, u_int32_t code, 151 struct cam_path *path, void *arg); 152 static void ndasysctlinit(void *context, int pending); 153 static int ndaflagssysctl(SYSCTL_HANDLER_ARGS); 154 static periph_ctor_t ndaregister; 155 static periph_dtor_t ndacleanup; 156 static periph_start_t ndastart; 157 static periph_oninv_t ndaoninvalidate; 158 static void ndadone(struct cam_periph *periph, 159 union ccb *done_ccb); 160 static int ndaerror(union ccb *ccb, u_int32_t cam_flags, 161 u_int32_t sense_flags); 162 static void ndashutdown(void *arg, int howto); 163 static void ndasuspend(void *arg); 164 165 #ifndef NDA_DEFAULT_SEND_ORDERED 166 #define NDA_DEFAULT_SEND_ORDERED 1 167 #endif 168 #ifndef NDA_DEFAULT_TIMEOUT 169 #define NDA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */ 170 #endif 171 #ifndef NDA_DEFAULT_RETRY 172 #define NDA_DEFAULT_RETRY 4 173 #endif 174 #ifndef NDA_MAX_TRIM_ENTRIES 175 #define NDA_MAX_TRIM_ENTRIES (NVME_MAX_DSM_TRIM / sizeof(struct nvme_dsm_range))/* Number of DSM trims to use, max 256 */ 176 #endif 177 178 static SYSCTL_NODE(_kern_cam, OID_AUTO, nda, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 179 "CAM Direct Access Disk driver"); 180 181 //static int nda_retry_count = NDA_DEFAULT_RETRY; 182 static int nda_send_ordered = NDA_DEFAULT_SEND_ORDERED; 183 static int nda_default_timeout = NDA_DEFAULT_TIMEOUT; 184 static int nda_max_trim_entries = NDA_MAX_TRIM_ENTRIES; 185 static int nda_enable_biospeedup = 1; 186 SYSCTL_INT(_kern_cam_nda, OID_AUTO, max_trim, CTLFLAG_RDTUN, 187 &nda_max_trim_entries, NDA_MAX_TRIM_ENTRIES, 188 "Maximum number of BIO_DELETE to send down as a DSM TRIM."); 189 SYSCTL_INT(_kern_cam_nda, OID_AUTO, enable_biospeedup, CTLFLAG_RDTUN, 190 &nda_enable_biospeedup, 0, "Enable BIO_SPEEDUP processing"); 191 192 /* 193 * All NVMe media is non-rotational, so all nvme device instances 194 * share this to implement the sysctl. 195 */ 196 static int nda_rotating_media = 0; 197 198 static struct periph_driver ndadriver = 199 { 200 ndainit, "nda", 201 TAILQ_HEAD_INITIALIZER(ndadriver.units), /* generation */ 0 202 }; 203 204 PERIPHDRIVER_DECLARE(nda, ndadriver); 205 206 static MALLOC_DEFINE(M_NVMEDA, "nvme_da", "nvme_da buffers"); 207 208 /* 209 * nice wrappers. Maybe these belong in nvme_all.c instead of 210 * here, but this is the only place that uses these. Should 211 * we ever grow another NVME periph, we should move them 212 * all there wholesale. 213 */ 214 215 static void 216 nda_nvme_flush(struct nda_softc *softc, struct ccb_nvmeio *nvmeio) 217 { 218 cam_fill_nvmeio(nvmeio, 219 0, /* retries */ 220 ndadone, /* cbfcnp */ 221 CAM_DIR_NONE, /* flags */ 222 NULL, /* data_ptr */ 223 0, /* dxfer_len */ 224 nda_default_timeout * 1000); /* timeout 30s */ 225 nvme_ns_flush_cmd(&nvmeio->cmd, softc->nsid); 226 } 227 228 static void 229 nda_nvme_trim(struct nda_softc *softc, struct ccb_nvmeio *nvmeio, 230 void *payload, uint32_t num_ranges) 231 { 232 cam_fill_nvmeio(nvmeio, 233 0, /* retries */ 234 ndadone, /* cbfcnp */ 235 CAM_DIR_OUT, /* flags */ 236 payload, /* data_ptr */ 237 num_ranges * sizeof(struct nvme_dsm_range), /* dxfer_len */ 238 nda_default_timeout * 1000); /* timeout 30s */ 239 nvme_ns_trim_cmd(&nvmeio->cmd, softc->nsid, num_ranges); 240 } 241 242 static void 243 nda_nvme_write(struct nda_softc *softc, struct ccb_nvmeio *nvmeio, 244 void *payload, uint64_t lba, uint32_t len, uint32_t count) 245 { 246 cam_fill_nvmeio(nvmeio, 247 0, /* retries */ 248 ndadone, /* cbfcnp */ 249 CAM_DIR_OUT, /* flags */ 250 payload, /* data_ptr */ 251 len, /* dxfer_len */ 252 nda_default_timeout * 1000); /* timeout 30s */ 253 nvme_ns_write_cmd(&nvmeio->cmd, softc->nsid, lba, count); 254 } 255 256 static void 257 nda_nvme_rw_bio(struct nda_softc *softc, struct ccb_nvmeio *nvmeio, 258 struct bio *bp, uint32_t rwcmd) 259 { 260 int flags = rwcmd == NVME_OPC_READ ? CAM_DIR_IN : CAM_DIR_OUT; 261 void *payload; 262 uint64_t lba; 263 uint32_t count; 264 265 if (bp->bio_flags & BIO_UNMAPPED) { 266 flags |= CAM_DATA_BIO; 267 payload = bp; 268 } else { 269 payload = bp->bio_data; 270 } 271 272 lba = bp->bio_pblkno; 273 count = bp->bio_bcount / softc->disk->d_sectorsize; 274 275 cam_fill_nvmeio(nvmeio, 276 0, /* retries */ 277 ndadone, /* cbfcnp */ 278 flags, /* flags */ 279 payload, /* data_ptr */ 280 bp->bio_bcount, /* dxfer_len */ 281 nda_default_timeout * 1000); /* timeout 30s */ 282 nvme_ns_rw_cmd(&nvmeio->cmd, rwcmd, softc->nsid, lba, count); 283 } 284 285 static int 286 ndaopen(struct disk *dp) 287 { 288 struct cam_periph *periph; 289 struct nda_softc *softc; 290 int error; 291 292 periph = (struct cam_periph *)dp->d_drv1; 293 if (cam_periph_acquire(periph) != 0) { 294 return(ENXIO); 295 } 296 297 cam_periph_lock(periph); 298 if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { 299 cam_periph_unlock(periph); 300 cam_periph_release(periph); 301 return (error); 302 } 303 304 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, 305 ("ndaopen\n")); 306 307 softc = (struct nda_softc *)periph->softc; 308 softc->flags |= NDA_FLAG_OPEN; 309 310 cam_periph_unhold(periph); 311 cam_periph_unlock(periph); 312 return (0); 313 } 314 315 static int 316 ndaclose(struct disk *dp) 317 { 318 struct cam_periph *periph; 319 struct nda_softc *softc; 320 union ccb *ccb; 321 int error; 322 323 periph = (struct cam_periph *)dp->d_drv1; 324 softc = (struct nda_softc *)periph->softc; 325 cam_periph_lock(periph); 326 327 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, 328 ("ndaclose\n")); 329 330 if ((softc->flags & NDA_FLAG_DIRTY) != 0 && 331 (periph->flags & CAM_PERIPH_INVALID) == 0 && 332 cam_periph_hold(periph, PRIBIO) == 0) { 333 334 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 335 nda_nvme_flush(softc, &ccb->nvmeio); 336 error = cam_periph_runccb(ccb, ndaerror, /*cam_flags*/0, 337 /*sense_flags*/0, softc->disk->d_devstat); 338 339 if (error != 0) 340 xpt_print(periph->path, "Synchronize cache failed\n"); 341 else 342 softc->flags &= ~NDA_FLAG_DIRTY; 343 xpt_release_ccb(ccb); 344 cam_periph_unhold(periph); 345 } 346 347 softc->flags &= ~NDA_FLAG_OPEN; 348 349 while (softc->refcount != 0) 350 cam_periph_sleep(periph, &softc->refcount, PRIBIO, "ndaclose", 1); 351 KASSERT(softc->outstanding_cmds == 0, 352 ("nda %d outstanding commands", softc->outstanding_cmds)); 353 cam_periph_unlock(periph); 354 cam_periph_release(periph); 355 return (0); 356 } 357 358 static void 359 ndaschedule(struct cam_periph *periph) 360 { 361 struct nda_softc *softc = (struct nda_softc *)periph->softc; 362 363 if (softc->state != NDA_STATE_NORMAL) 364 return; 365 366 cam_iosched_schedule(softc->cam_iosched, periph); 367 } 368 369 /* 370 * Actually translate the requested transfer into one the physical driver 371 * can understand. The transfer is described by a buf and will include 372 * only one physical transfer. 373 */ 374 static void 375 ndastrategy(struct bio *bp) 376 { 377 struct cam_periph *periph; 378 struct nda_softc *softc; 379 380 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 381 softc = (struct nda_softc *)periph->softc; 382 383 cam_periph_lock(periph); 384 385 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ndastrategy(%p)\n", bp)); 386 387 /* 388 * If the device has been made invalid, error out 389 */ 390 if ((periph->flags & CAM_PERIPH_INVALID) != 0) { 391 cam_periph_unlock(periph); 392 biofinish(bp, NULL, ENXIO); 393 return; 394 } 395 396 if (bp->bio_cmd == BIO_DELETE) 397 softc->deletes++; 398 399 /* 400 * Place it in the queue of disk activities for this disk 401 */ 402 cam_iosched_queue_work(softc->cam_iosched, bp); 403 404 /* 405 * Schedule ourselves for performing the work. 406 */ 407 ndaschedule(periph); 408 cam_periph_unlock(periph); 409 410 return; 411 } 412 413 static int 414 ndadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) 415 { 416 struct cam_periph *periph; 417 struct nda_softc *softc; 418 u_int secsize; 419 struct ccb_nvmeio nvmeio; 420 struct disk *dp; 421 uint64_t lba; 422 uint32_t count; 423 int error = 0; 424 425 dp = arg; 426 periph = dp->d_drv1; 427 softc = (struct nda_softc *)periph->softc; 428 secsize = softc->disk->d_sectorsize; 429 lba = offset / secsize; 430 count = length / secsize; 431 432 if ((periph->flags & CAM_PERIPH_INVALID) != 0) 433 return (ENXIO); 434 435 /* xpt_get_ccb returns a zero'd allocation for the ccb, mimic that here */ 436 memset(&nvmeio, 0, sizeof(nvmeio)); 437 if (length > 0) { 438 xpt_setup_ccb(&nvmeio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 439 nvmeio.ccb_state = NDA_CCB_DUMP; 440 nda_nvme_write(softc, &nvmeio, virtual, lba, length, count); 441 error = cam_periph_runccb((union ccb *)&nvmeio, cam_periph_error, 442 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); 443 if (error != 0) 444 printf("Aborting dump due to I/O error %d.\n", error); 445 446 return (error); 447 } 448 449 /* Flush */ 450 xpt_setup_ccb(&nvmeio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 451 452 nvmeio.ccb_state = NDA_CCB_DUMP; 453 nda_nvme_flush(softc, &nvmeio); 454 error = cam_periph_runccb((union ccb *)&nvmeio, cam_periph_error, 455 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); 456 if (error != 0) 457 xpt_print(periph->path, "flush cmd failed\n"); 458 return (error); 459 } 460 461 static void 462 ndainit(void) 463 { 464 cam_status status; 465 466 /* 467 * Install a global async callback. This callback will 468 * receive async callbacks like "new device found". 469 */ 470 status = xpt_register_async(AC_FOUND_DEVICE, ndaasync, NULL, NULL); 471 472 if (status != CAM_REQ_CMP) { 473 printf("nda: Failed to attach master async callback " 474 "due to status 0x%x!\n", status); 475 } else if (nda_send_ordered) { 476 477 /* Register our event handlers */ 478 if ((EVENTHANDLER_REGISTER(power_suspend, ndasuspend, 479 NULL, EVENTHANDLER_PRI_LAST)) == NULL) 480 printf("ndainit: power event registration failed!\n"); 481 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, ndashutdown, 482 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) 483 printf("ndainit: shutdown event registration failed!\n"); 484 } 485 } 486 487 /* 488 * Callback from GEOM, called when it has finished cleaning up its 489 * resources. 490 */ 491 static void 492 ndadiskgonecb(struct disk *dp) 493 { 494 struct cam_periph *periph; 495 496 periph = (struct cam_periph *)dp->d_drv1; 497 498 cam_periph_release(periph); 499 } 500 501 static void 502 ndaoninvalidate(struct cam_periph *periph) 503 { 504 struct nda_softc *softc; 505 506 softc = (struct nda_softc *)periph->softc; 507 508 /* 509 * De-register any async callbacks. 510 */ 511 xpt_register_async(0, ndaasync, periph, periph->path); 512 #ifdef CAM_IO_STATS 513 softc->invalidations++; 514 #endif 515 516 /* 517 * Return all queued I/O with ENXIO. 518 * XXX Handle any transactions queued to the card 519 * with XPT_ABORT_CCB. 520 */ 521 cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); 522 523 disk_gone(softc->disk); 524 } 525 526 static void 527 ndacleanup(struct cam_periph *periph) 528 { 529 struct nda_softc *softc; 530 531 softc = (struct nda_softc *)periph->softc; 532 533 cam_periph_unlock(periph); 534 535 cam_iosched_fini(softc->cam_iosched); 536 537 /* 538 * If we can't free the sysctl tree, oh well... 539 */ 540 if ((softc->flags & NDA_FLAG_SCTX_INIT) != 0) { 541 #ifdef CAM_IO_STATS 542 if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) 543 xpt_print(periph->path, 544 "can't remove sysctl stats context\n"); 545 #endif 546 if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) 547 xpt_print(periph->path, 548 "can't remove sysctl context\n"); 549 } 550 551 disk_destroy(softc->disk); 552 free(softc, M_DEVBUF); 553 cam_periph_lock(periph); 554 } 555 556 static void 557 ndaasync(void *callback_arg, u_int32_t code, 558 struct cam_path *path, void *arg) 559 { 560 struct cam_periph *periph; 561 562 periph = (struct cam_periph *)callback_arg; 563 switch (code) { 564 case AC_FOUND_DEVICE: 565 { 566 struct ccb_getdev *cgd; 567 cam_status status; 568 569 cgd = (struct ccb_getdev *)arg; 570 if (cgd == NULL) 571 break; 572 573 if (cgd->protocol != PROTO_NVME) 574 break; 575 576 /* 577 * Allocate a peripheral instance for 578 * this device and start the probe 579 * process. 580 */ 581 status = cam_periph_alloc(ndaregister, ndaoninvalidate, 582 ndacleanup, ndastart, 583 "nda", CAM_PERIPH_BIO, 584 path, ndaasync, 585 AC_FOUND_DEVICE, cgd); 586 587 if (status != CAM_REQ_CMP 588 && status != CAM_REQ_INPROG) 589 printf("ndaasync: Unable to attach to new device " 590 "due to status 0x%x\n", status); 591 break; 592 } 593 case AC_ADVINFO_CHANGED: 594 { 595 uintptr_t buftype; 596 597 buftype = (uintptr_t)arg; 598 if (buftype == CDAI_TYPE_PHYS_PATH) { 599 struct nda_softc *softc; 600 601 softc = periph->softc; 602 disk_attr_changed(softc->disk, "GEOM::physpath", 603 M_NOWAIT); 604 } 605 break; 606 } 607 case AC_LOST_DEVICE: 608 default: 609 cam_periph_async(periph, code, path, arg); 610 break; 611 } 612 } 613 614 static void 615 ndasysctlinit(void *context, int pending) 616 { 617 struct cam_periph *periph; 618 struct nda_softc *softc; 619 char tmpstr[32], tmpstr2[16]; 620 621 periph = (struct cam_periph *)context; 622 623 /* periph was held for us when this task was enqueued */ 624 if ((periph->flags & CAM_PERIPH_INVALID) != 0) { 625 cam_periph_release(periph); 626 return; 627 } 628 629 softc = (struct nda_softc *)periph->softc; 630 snprintf(tmpstr, sizeof(tmpstr), "CAM NDA unit %d", periph->unit_number); 631 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); 632 633 sysctl_ctx_init(&softc->sysctl_ctx); 634 softc->flags |= NDA_FLAG_SCTX_INIT; 635 softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, 636 SYSCTL_STATIC_CHILDREN(_kern_cam_nda), OID_AUTO, tmpstr2, 637 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr, "device_index"); 638 if (softc->sysctl_tree == NULL) { 639 printf("ndasysctlinit: unable to allocate sysctl tree\n"); 640 cam_periph_release(periph); 641 return; 642 } 643 644 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 645 OID_AUTO, "unmapped_io", CTLFLAG_RD, 646 &softc->unmappedio, 0, "Unmapped I/O leaf"); 647 648 SYSCTL_ADD_QUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 649 OID_AUTO, "deletes", CTLFLAG_RD, 650 &softc->deletes, "Number of BIO_DELETE requests"); 651 652 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 653 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 654 "trim_count", CTLFLAG_RD, &softc->trim_count, 655 "Total number of unmap/dsm commands sent"); 656 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 657 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 658 "trim_ranges", CTLFLAG_RD, &softc->trim_ranges, 659 "Total number of ranges in unmap/dsm commands"); 660 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 661 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 662 "trim_lbas", CTLFLAG_RD, &softc->trim_lbas, 663 "Total lbas in the unmap/dsm commands sent"); 664 665 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 666 OID_AUTO, "rotating", CTLFLAG_RD, &nda_rotating_media, 1, 667 "Rotating media"); 668 669 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 670 OID_AUTO, "flags", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 671 softc, 0, ndaflagssysctl, "A", 672 "Flags for drive"); 673 674 #ifdef CAM_IO_STATS 675 softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, 676 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", 677 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Statistics"); 678 if (softc->sysctl_stats_tree == NULL) { 679 printf("ndasysctlinit: unable to allocate sysctl tree for stats\n"); 680 cam_periph_release(periph); 681 return; 682 } 683 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 684 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 685 OID_AUTO, "timeouts", CTLFLAG_RD, 686 &softc->timeouts, 0, 687 "Device timeouts reported by the SIM"); 688 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 689 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 690 OID_AUTO, "errors", CTLFLAG_RD, 691 &softc->errors, 0, 692 "Transport errors reported by the SIM."); 693 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 694 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 695 OID_AUTO, "pack_invalidations", CTLFLAG_RD, 696 &softc->invalidations, 0, 697 "Device pack invalidations."); 698 #endif 699 700 #ifdef CAM_TEST_FAILURE 701 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 702 OID_AUTO, "invalidate", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, 703 periph, 0, cam_periph_invalidate_sysctl, "I", 704 "Write 1 to invalidate the drive immediately"); 705 #endif 706 707 cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, 708 softc->sysctl_tree); 709 710 cam_periph_release(periph); 711 } 712 713 static int 714 ndaflagssysctl(SYSCTL_HANDLER_ARGS) 715 { 716 struct sbuf sbuf; 717 struct nda_softc *softc = arg1; 718 int error; 719 720 sbuf_new_for_sysctl(&sbuf, NULL, 0, req); 721 if (softc->flags != 0) 722 sbuf_printf(&sbuf, "0x%b", (unsigned)softc->flags, NDA_FLAG_STRING); 723 else 724 sbuf_printf(&sbuf, "0"); 725 error = sbuf_finish(&sbuf); 726 sbuf_delete(&sbuf); 727 728 return (error); 729 } 730 731 static int 732 ndagetattr(struct bio *bp) 733 { 734 int ret; 735 struct cam_periph *periph; 736 737 if (g_handleattr_int(bp, "GEOM::canspeedup", nda_enable_biospeedup)) 738 return (EJUSTRETURN); 739 740 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 741 cam_periph_lock(periph); 742 ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, 743 periph->path); 744 cam_periph_unlock(periph); 745 if (ret == 0) 746 bp->bio_completed = bp->bio_length; 747 return ret; 748 } 749 750 static cam_status 751 ndaregister(struct cam_periph *periph, void *arg) 752 { 753 struct nda_softc *softc; 754 struct disk *disk; 755 struct ccb_pathinq cpi; 756 const struct nvme_namespace_data *nsd; 757 const struct nvme_controller_data *cd; 758 char announce_buf[80]; 759 uint8_t flbas_fmt, lbads, vwc_present; 760 u_int maxio; 761 int quirks; 762 763 nsd = nvme_get_identify_ns(periph); 764 cd = nvme_get_identify_cntrl(periph); 765 766 softc = (struct nda_softc *)malloc(sizeof(*softc), M_DEVBUF, 767 M_NOWAIT | M_ZERO); 768 769 if (softc == NULL) { 770 printf("ndaregister: Unable to probe new device. " 771 "Unable to allocate softc\n"); 772 return(CAM_REQ_CMP_ERR); 773 } 774 775 if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { 776 printf("ndaregister: Unable to probe new device. " 777 "Unable to allocate iosched memory\n"); 778 free(softc, M_DEVBUF); 779 return(CAM_REQ_CMP_ERR); 780 } 781 782 /* ident_data parsing */ 783 784 periph->softc = softc; 785 786 softc->quirks = NDA_Q_NONE; 787 788 xpt_path_inq(&cpi, periph->path); 789 790 TASK_INIT(&softc->sysctl_task, 0, ndasysctlinit, periph); 791 792 /* 793 * The name space ID is the lun, save it for later I/O 794 */ 795 softc->nsid = (uint32_t)xpt_path_lun_id(periph->path); 796 797 /* 798 * Register this media as a disk 799 */ 800 (void)cam_periph_hold(periph, PRIBIO); 801 cam_periph_unlock(periph); 802 snprintf(announce_buf, sizeof(announce_buf), 803 "kern.cam.nda.%d.quirks", periph->unit_number); 804 quirks = softc->quirks; 805 TUNABLE_INT_FETCH(announce_buf, &quirks); 806 softc->quirks = quirks; 807 cam_iosched_set_sort_queue(softc->cam_iosched, 0); 808 softc->disk = disk = disk_alloc(); 809 disk->d_rotation_rate = DISK_RR_NON_ROTATING; 810 disk->d_open = ndaopen; 811 disk->d_close = ndaclose; 812 disk->d_strategy = ndastrategy; 813 disk->d_getattr = ndagetattr; 814 disk->d_dump = ndadump; 815 disk->d_gone = ndadiskgonecb; 816 disk->d_name = "nda"; 817 disk->d_drv1 = periph; 818 disk->d_unit = periph->unit_number; 819 maxio = cpi.maxio; /* Honor max I/O size of SIM */ 820 if (maxio == 0) 821 maxio = DFLTPHYS; /* traditional default */ 822 else if (maxio > MAXPHYS) 823 maxio = MAXPHYS; /* for safety */ 824 disk->d_maxsize = maxio; 825 flbas_fmt = (nsd->flbas >> NVME_NS_DATA_FLBAS_FORMAT_SHIFT) & 826 NVME_NS_DATA_FLBAS_FORMAT_MASK; 827 lbads = (nsd->lbaf[flbas_fmt] >> NVME_NS_DATA_LBAF_LBADS_SHIFT) & 828 NVME_NS_DATA_LBAF_LBADS_MASK; 829 disk->d_sectorsize = 1 << lbads; 830 disk->d_mediasize = (off_t)(disk->d_sectorsize * nsd->nsze); 831 disk->d_delmaxsize = disk->d_mediasize; 832 disk->d_flags = DISKFLAG_DIRECT_COMPLETION; 833 if (nvme_ctrlr_has_dataset_mgmt(cd)) 834 disk->d_flags |= DISKFLAG_CANDELETE; 835 vwc_present = (cd->vwc >> NVME_CTRLR_DATA_VWC_PRESENT_SHIFT) & 836 NVME_CTRLR_DATA_VWC_PRESENT_MASK; 837 if (vwc_present) 838 disk->d_flags |= DISKFLAG_CANFLUSHCACHE; 839 if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { 840 disk->d_flags |= DISKFLAG_UNMAPPED_BIO; 841 softc->unmappedio = 1; 842 } 843 /* 844 * d_ident and d_descr are both far bigger than the length of either 845 * the serial or model number strings. 846 */ 847 cam_strvis(disk->d_descr, cd->mn, 848 NVME_MODEL_NUMBER_LENGTH, sizeof(disk->d_descr)); 849 cam_strvis(disk->d_ident, cd->sn, 850 NVME_SERIAL_NUMBER_LENGTH, sizeof(disk->d_ident)); 851 disk->d_hba_vendor = cpi.hba_vendor; 852 disk->d_hba_device = cpi.hba_device; 853 disk->d_hba_subvendor = cpi.hba_subvendor; 854 disk->d_hba_subdevice = cpi.hba_subdevice; 855 snprintf(disk->d_attachment, sizeof(disk->d_attachment), 856 "%s%d", cpi.dev_name, cpi.unit_number); 857 disk->d_stripesize = disk->d_sectorsize; 858 disk->d_stripeoffset = 0; 859 disk->d_devstat = devstat_new_entry(periph->periph_name, 860 periph->unit_number, disk->d_sectorsize, 861 DEVSTAT_ALL_SUPPORTED, 862 DEVSTAT_TYPE_DIRECT | XPORT_DEVSTAT_TYPE(cpi.transport), 863 DEVSTAT_PRIORITY_DISK); 864 /* 865 * Add alias for older nvd drives to ease transition. 866 */ 867 /* disk_add_alias(disk, "nvd"); Have reports of this causing problems */ 868 869 /* 870 * Acquire a reference to the periph before we register with GEOM. 871 * We'll release this reference once GEOM calls us back (via 872 * ndadiskgonecb()) telling us that our provider has been freed. 873 */ 874 if (cam_periph_acquire(periph) != 0) { 875 xpt_print(periph->path, "%s: lost periph during " 876 "registration!\n", __func__); 877 cam_periph_lock(periph); 878 return (CAM_REQ_CMP_ERR); 879 } 880 disk_create(softc->disk, DISK_VERSION); 881 cam_periph_lock(periph); 882 cam_periph_unhold(periph); 883 884 snprintf(announce_buf, sizeof(announce_buf), 885 "%juMB (%ju %u byte sectors)", 886 (uintmax_t)((uintmax_t)disk->d_mediasize / (1024*1024)), 887 (uintmax_t)disk->d_mediasize / disk->d_sectorsize, 888 disk->d_sectorsize); 889 xpt_announce_periph(periph, announce_buf); 890 xpt_announce_quirks(periph, softc->quirks, NDA_Q_BIT_STRING); 891 892 /* 893 * Create our sysctl variables, now that we know 894 * we have successfully attached. 895 */ 896 if (cam_periph_acquire(periph) == 0) 897 taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); 898 899 /* 900 * Register for device going away and info about the drive 901 * changing (though with NVMe, it can't) 902 */ 903 xpt_register_async(AC_LOST_DEVICE | AC_ADVINFO_CHANGED, 904 ndaasync, periph, periph->path); 905 906 softc->state = NDA_STATE_NORMAL; 907 return(CAM_REQ_CMP); 908 } 909 910 static void 911 ndastart(struct cam_periph *periph, union ccb *start_ccb) 912 { 913 struct nda_softc *softc = (struct nda_softc *)periph->softc; 914 struct ccb_nvmeio *nvmeio = &start_ccb->nvmeio; 915 916 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ndastart\n")); 917 918 switch (softc->state) { 919 case NDA_STATE_NORMAL: 920 { 921 struct bio *bp; 922 923 bp = cam_iosched_next_bio(softc->cam_iosched); 924 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ndastart: bio %p\n", bp)); 925 if (bp == NULL) { 926 xpt_release_ccb(start_ccb); 927 break; 928 } 929 930 switch (bp->bio_cmd) { 931 case BIO_WRITE: 932 softc->flags |= NDA_FLAG_DIRTY; 933 /* FALLTHROUGH */ 934 case BIO_READ: 935 { 936 #ifdef CAM_TEST_FAILURE 937 int fail = 0; 938 939 /* 940 * Support the failure ioctls. If the command is a 941 * read, and there are pending forced read errors, or 942 * if a write and pending write errors, then fail this 943 * operation with EIO. This is useful for testing 944 * purposes. Also, support having every Nth read fail. 945 * 946 * This is a rather blunt tool. 947 */ 948 if (bp->bio_cmd == BIO_READ) { 949 if (softc->force_read_error) { 950 softc->force_read_error--; 951 fail = 1; 952 } 953 if (softc->periodic_read_error > 0) { 954 if (++softc->periodic_read_count >= 955 softc->periodic_read_error) { 956 softc->periodic_read_count = 0; 957 fail = 1; 958 } 959 } 960 } else { 961 if (softc->force_write_error) { 962 softc->force_write_error--; 963 fail = 1; 964 } 965 } 966 if (fail) { 967 biofinish(bp, NULL, EIO); 968 xpt_release_ccb(start_ccb); 969 ndaschedule(periph); 970 return; 971 } 972 #endif 973 KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 || 974 round_page(bp->bio_bcount + bp->bio_ma_offset) / 975 PAGE_SIZE == bp->bio_ma_n, 976 ("Short bio %p", bp)); 977 nda_nvme_rw_bio(softc, &start_ccb->nvmeio, bp, bp->bio_cmd == BIO_READ ? 978 NVME_OPC_READ : NVME_OPC_WRITE); 979 break; 980 } 981 case BIO_DELETE: 982 { 983 struct nvme_dsm_range *dsm_range, *dsm_end; 984 struct nda_trim_request *trim; 985 struct bio *bp1; 986 int ents; 987 uint32_t totalcount = 0, ranges = 0; 988 989 trim = malloc(sizeof(*trim), M_NVMEDA, M_ZERO | M_NOWAIT); 990 if (trim == NULL) { 991 biofinish(bp, NULL, ENOMEM); 992 xpt_release_ccb(start_ccb); 993 ndaschedule(periph); 994 return; 995 } 996 TAILQ_INIT(&trim->bps); 997 bp1 = bp; 998 ents = min(nitems(trim->dsm), nda_max_trim_entries); 999 dsm_range = trim->dsm; 1000 dsm_end = dsm_range + ents; 1001 do { 1002 TAILQ_INSERT_TAIL(&trim->bps, bp1, bio_queue); 1003 dsm_range->length = 1004 htole32(bp1->bio_bcount / softc->disk->d_sectorsize); 1005 dsm_range->starting_lba = 1006 htole64(bp1->bio_offset / softc->disk->d_sectorsize); 1007 ranges++; 1008 totalcount += dsm_range->length; 1009 dsm_range++; 1010 if (dsm_range >= dsm_end) 1011 break; 1012 bp1 = cam_iosched_next_trim(softc->cam_iosched); 1013 /* XXX -- Could collapse adjacent ranges, but we don't for now */ 1014 /* XXX -- Could limit based on total payload size */ 1015 } while (bp1 != NULL); 1016 start_ccb->ccb_trim = trim; 1017 nda_nvme_trim(softc, &start_ccb->nvmeio, trim->dsm, 1018 dsm_range - trim->dsm); 1019 start_ccb->ccb_state = NDA_CCB_TRIM; 1020 softc->trim_count++; 1021 softc->trim_ranges += ranges; 1022 softc->trim_lbas += totalcount; 1023 /* 1024 * Note: We can have multiple TRIMs in flight, so we don't call 1025 * cam_iosched_submit_trim(softc->cam_iosched); 1026 * since that forces the I/O scheduler to only schedule one at a time. 1027 * On NVMe drives, this is a performance disaster. 1028 */ 1029 goto out; 1030 } 1031 case BIO_FLUSH: 1032 nda_nvme_flush(softc, nvmeio); 1033 break; 1034 default: 1035 biofinish(bp, NULL, EOPNOTSUPP); 1036 xpt_release_ccb(start_ccb); 1037 ndaschedule(periph); 1038 return; 1039 } 1040 start_ccb->ccb_state = NDA_CCB_BUFFER_IO; 1041 start_ccb->ccb_bp = bp; 1042 out: 1043 start_ccb->ccb_h.flags |= CAM_UNLOCKED; 1044 softc->outstanding_cmds++; 1045 softc->refcount++; /* For submission only */ 1046 cam_periph_unlock(periph); 1047 xpt_action(start_ccb); 1048 cam_periph_lock(periph); 1049 softc->refcount--; /* Submission done */ 1050 1051 /* May have more work to do, so ensure we stay scheduled */ 1052 ndaschedule(periph); 1053 break; 1054 } 1055 } 1056 } 1057 1058 static void 1059 ndadone(struct cam_periph *periph, union ccb *done_ccb) 1060 { 1061 struct nda_softc *softc; 1062 struct ccb_nvmeio *nvmeio = &done_ccb->nvmeio; 1063 struct cam_path *path; 1064 int state; 1065 1066 softc = (struct nda_softc *)periph->softc; 1067 path = done_ccb->ccb_h.path; 1068 1069 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("ndadone\n")); 1070 1071 state = nvmeio->ccb_state & NDA_CCB_TYPE_MASK; 1072 switch (state) { 1073 case NDA_CCB_BUFFER_IO: 1074 case NDA_CCB_TRIM: 1075 { 1076 int error; 1077 1078 cam_periph_lock(periph); 1079 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1080 error = ndaerror(done_ccb, 0, 0); 1081 if (error == ERESTART) { 1082 /* A retry was scheduled, so just return. */ 1083 cam_periph_unlock(periph); 1084 return; 1085 } 1086 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1087 cam_release_devq(path, 1088 /*relsim_flags*/0, 1089 /*reduction*/0, 1090 /*timeout*/0, 1091 /*getcount_only*/0); 1092 } else { 1093 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1094 panic("REQ_CMP with QFRZN"); 1095 error = 0; 1096 } 1097 if (state == NDA_CCB_BUFFER_IO) { 1098 struct bio *bp; 1099 1100 bp = (struct bio *)done_ccb->ccb_bp; 1101 bp->bio_error = error; 1102 if (error != 0) { 1103 bp->bio_resid = bp->bio_bcount; 1104 bp->bio_flags |= BIO_ERROR; 1105 } else { 1106 bp->bio_resid = 0; 1107 } 1108 softc->outstanding_cmds--; 1109 1110 /* 1111 * We need to call cam_iosched before we call biodone so that we 1112 * don't measure any activity that happens in the completion 1113 * routine, which in the case of sendfile can be quite 1114 * extensive. 1115 */ 1116 cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); 1117 xpt_release_ccb(done_ccb); 1118 ndaschedule(periph); 1119 cam_periph_unlock(periph); 1120 biodone(bp); 1121 } else { /* state == NDA_CCB_TRIM */ 1122 struct nda_trim_request *trim; 1123 struct bio *bp1, *bp2; 1124 TAILQ_HEAD(, bio) queue; 1125 1126 trim = nvmeio->ccb_trim; 1127 TAILQ_INIT(&queue); 1128 TAILQ_CONCAT(&queue, &trim->bps, bio_queue); 1129 free(trim, M_NVMEDA); 1130 1131 /* 1132 * Since we can have multiple trims in flight, we don't 1133 * need to call this here. 1134 * cam_iosched_trim_done(softc->cam_iosched); 1135 */ 1136 /* 1137 * The the I/O scheduler that we're finishing the I/O 1138 * so we can keep book. The first one we pass in the CCB 1139 * which has the timing information. The rest we pass in NULL 1140 * so we can keep proper counts. 1141 */ 1142 bp1 = TAILQ_FIRST(&queue); 1143 cam_iosched_bio_complete(softc->cam_iosched, bp1, done_ccb); 1144 xpt_release_ccb(done_ccb); 1145 softc->outstanding_cmds--; 1146 ndaschedule(periph); 1147 cam_periph_unlock(periph); 1148 while ((bp2 = TAILQ_FIRST(&queue)) != NULL) { 1149 TAILQ_REMOVE(&queue, bp2, bio_queue); 1150 bp2->bio_error = error; 1151 if (error != 0) { 1152 bp2->bio_flags |= BIO_ERROR; 1153 bp2->bio_resid = bp1->bio_bcount; 1154 } else 1155 bp2->bio_resid = 0; 1156 if (bp1 != bp2) 1157 cam_iosched_bio_complete(softc->cam_iosched, bp2, NULL); 1158 biodone(bp2); 1159 } 1160 } 1161 return; 1162 } 1163 case NDA_CCB_DUMP: 1164 /* No-op. We're polling */ 1165 return; 1166 default: 1167 break; 1168 } 1169 xpt_release_ccb(done_ccb); 1170 } 1171 1172 static int 1173 ndaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 1174 { 1175 struct nda_softc *softc; 1176 struct cam_periph *periph; 1177 1178 periph = xpt_path_periph(ccb->ccb_h.path); 1179 softc = (struct nda_softc *)periph->softc; 1180 1181 switch (ccb->ccb_h.status & CAM_STATUS_MASK) { 1182 case CAM_CMD_TIMEOUT: 1183 #ifdef CAM_IO_STATS 1184 softc->timeouts++; 1185 #endif 1186 break; 1187 case CAM_REQ_ABORTED: 1188 case CAM_REQ_CMP_ERR: 1189 case CAM_REQ_TERMIO: 1190 case CAM_UNREC_HBA_ERROR: 1191 case CAM_DATA_RUN_ERR: 1192 case CAM_ATA_STATUS_ERROR: 1193 #ifdef CAM_IO_STATS 1194 softc->errors++; 1195 #endif 1196 break; 1197 default: 1198 break; 1199 } 1200 1201 return(cam_periph_error(ccb, cam_flags, sense_flags)); 1202 } 1203 1204 /* 1205 * Step through all NDA peripheral drivers, and if the device is still open, 1206 * sync the disk cache to physical media. 1207 */ 1208 static void 1209 ndaflush(void) 1210 { 1211 struct cam_periph *periph; 1212 struct nda_softc *softc; 1213 union ccb *ccb; 1214 int error; 1215 1216 CAM_PERIPH_FOREACH(periph, &ndadriver) { 1217 softc = (struct nda_softc *)periph->softc; 1218 1219 if (SCHEDULER_STOPPED()) { 1220 /* 1221 * If we paniced with the lock held or the periph is not 1222 * open, do not recurse. Otherwise, call ndadump since 1223 * that avoids the sleeping cam_periph_getccb does if no 1224 * CCBs are available. 1225 */ 1226 if (!cam_periph_owned(periph) && 1227 (softc->flags & NDA_FLAG_OPEN)) { 1228 ndadump(softc->disk, NULL, 0, 0, 0); 1229 } 1230 continue; 1231 } 1232 1233 /* 1234 * We only sync the cache if the drive is still open 1235 */ 1236 cam_periph_lock(periph); 1237 if ((softc->flags & NDA_FLAG_OPEN) == 0) { 1238 cam_periph_unlock(periph); 1239 continue; 1240 } 1241 1242 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 1243 nda_nvme_flush(softc, &ccb->nvmeio); 1244 error = cam_periph_runccb(ccb, ndaerror, /*cam_flags*/0, 1245 /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY, 1246 softc->disk->d_devstat); 1247 if (error != 0) 1248 xpt_print(periph->path, "Synchronize cache failed\n"); 1249 xpt_release_ccb(ccb); 1250 cam_periph_unlock(periph); 1251 } 1252 } 1253 1254 static void 1255 ndashutdown(void *arg, int howto) 1256 { 1257 1258 ndaflush(); 1259 } 1260 1261 static void 1262 ndasuspend(void *arg) 1263 { 1264 1265 ndaflush(); 1266 } 1267