1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2015 Netflix, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * derived from ata_xpt.c: Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org> 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/bus.h> 36 #include <sys/endian.h> 37 #include <sys/systm.h> 38 #include <sys/types.h> 39 #include <sys/malloc.h> 40 #include <sys/kernel.h> 41 #include <sys/time.h> 42 #include <sys/conf.h> 43 #include <sys/fcntl.h> 44 #include <sys/interrupt.h> 45 #include <sys/sbuf.h> 46 47 #include <sys/lock.h> 48 #include <sys/mutex.h> 49 #include <sys/sysctl.h> 50 51 #include <cam/cam.h> 52 #include <cam/cam_ccb.h> 53 #include <cam/cam_queue.h> 54 #include <cam/cam_periph.h> 55 #include <cam/cam_sim.h> 56 #include <cam/cam_xpt.h> 57 #include <cam/cam_xpt_sim.h> 58 #include <cam/cam_xpt_periph.h> 59 #include <cam/cam_xpt_internal.h> 60 #include <cam/cam_debug.h> 61 62 #include <cam/scsi/scsi_all.h> 63 #include <cam/scsi/scsi_message.h> 64 #include <cam/nvme/nvme_all.h> 65 #include <machine/stdarg.h> /* for xpt_print below */ 66 #include "opt_cam.h" 67 68 struct nvme_quirk_entry { 69 u_int quirks; 70 #define CAM_QUIRK_MAXTAGS 1 71 u_int mintags; 72 u_int maxtags; 73 }; 74 75 /* Not even sure why we need this */ 76 static periph_init_t nvme_probe_periph_init; 77 78 static struct periph_driver nvme_probe_driver = 79 { 80 nvme_probe_periph_init, "nvme_probe", 81 TAILQ_HEAD_INITIALIZER(nvme_probe_driver.units), /* generation */ 0, 82 CAM_PERIPH_DRV_EARLY 83 }; 84 85 PERIPHDRIVER_DECLARE(nvme_probe, nvme_probe_driver); 86 87 typedef enum { 88 NVME_PROBE_IDENTIFY, 89 NVME_PROBE_DONE, 90 NVME_PROBE_INVALID, 91 NVME_PROBE_RESET 92 } nvme_probe_action; 93 94 static char *nvme_probe_action_text[] = { 95 "NVME_PROBE_IDENTIFY", 96 "NVME_PROBE_DONE", 97 "NVME_PROBE_INVALID", 98 "NVME_PROBE_RESET", 99 }; 100 101 #define NVME_PROBE_SET_ACTION(softc, newaction) \ 102 do { \ 103 char **text; \ 104 text = nvme_probe_action_text; \ 105 CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE, \ 106 ("Probe %s to %s\n", text[(softc)->action], \ 107 text[(newaction)])); \ 108 (softc)->action = (newaction); \ 109 } while(0) 110 111 typedef enum { 112 NVME_PROBE_NO_ANNOUNCE = 0x04 113 } nvme_probe_flags; 114 115 typedef struct { 116 TAILQ_HEAD(, ccb_hdr) request_ccbs; 117 nvme_probe_action action; 118 nvme_probe_flags flags; 119 int restart; 120 struct cam_periph *periph; 121 } nvme_probe_softc; 122 123 static struct nvme_quirk_entry nvme_quirk_table[] = 124 { 125 { 126 // { 127 // T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 128 // /*vendor*/"*", /*product*/"*", /*revision*/"*" 129 // }, 130 .quirks = 0, .mintags = 0, .maxtags = 0 131 }, 132 }; 133 134 static const int nvme_quirk_table_size = 135 sizeof(nvme_quirk_table) / sizeof(*nvme_quirk_table); 136 137 static cam_status nvme_probe_register(struct cam_periph *periph, 138 void *arg); 139 static void nvme_probe_schedule(struct cam_periph *nvme_probe_periph); 140 static void nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb); 141 static void nvme_probe_cleanup(struct cam_periph *periph); 142 //static void nvme_find_quirk(struct cam_ed *device); 143 static void nvme_scan_lun(struct cam_periph *periph, 144 struct cam_path *path, cam_flags flags, 145 union ccb *ccb); 146 static struct cam_ed * 147 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, 148 lun_id_t lun_id); 149 static void nvme_device_transport(struct cam_path *path); 150 static void nvme_dev_async(u_int32_t async_code, 151 struct cam_eb *bus, 152 struct cam_et *target, 153 struct cam_ed *device, 154 void *async_arg); 155 static void nvme_action(union ccb *start_ccb); 156 static void nvme_announce_periph(struct cam_periph *periph); 157 static void nvme_proto_announce(struct cam_ed *device); 158 static void nvme_proto_denounce(struct cam_ed *device); 159 static void nvme_proto_debug_out(union ccb *ccb); 160 161 static struct xpt_xport_ops nvme_xport_ops = { 162 .alloc_device = nvme_alloc_device, 163 .action = nvme_action, 164 .async = nvme_dev_async, 165 .announce = nvme_announce_periph, 166 }; 167 #define NVME_XPT_XPORT(x, X) \ 168 static struct xpt_xport nvme_xport_ ## x = { \ 169 .xport = XPORT_ ## X, \ 170 .name = #x, \ 171 .ops = &nvme_xport_ops, \ 172 }; \ 173 CAM_XPT_XPORT(nvme_xport_ ## x); 174 175 NVME_XPT_XPORT(nvme, NVME); 176 177 #undef NVME_XPT_XPORT 178 179 static struct xpt_proto_ops nvme_proto_ops = { 180 .announce = nvme_proto_announce, 181 .denounce = nvme_proto_denounce, 182 .debug_out = nvme_proto_debug_out, 183 }; 184 static struct xpt_proto nvme_proto = { 185 .proto = PROTO_NVME, 186 .name = "nvme", 187 .ops = &nvme_proto_ops, 188 }; 189 CAM_XPT_PROTO(nvme_proto); 190 191 static void 192 nvme_probe_periph_init() 193 { 194 195 } 196 197 static cam_status 198 nvme_probe_register(struct cam_periph *periph, void *arg) 199 { 200 union ccb *request_ccb; /* CCB representing the probe request */ 201 cam_status status; 202 nvme_probe_softc *softc; 203 204 request_ccb = (union ccb *)arg; 205 if (request_ccb == NULL) { 206 printf("nvme_probe_register: no probe CCB, " 207 "can't register device\n"); 208 return(CAM_REQ_CMP_ERR); 209 } 210 211 softc = (nvme_probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_ZERO | M_NOWAIT); 212 213 if (softc == NULL) { 214 printf("nvme_probe_register: Unable to probe new device. " 215 "Unable to allocate softc\n"); 216 return(CAM_REQ_CMP_ERR); 217 } 218 TAILQ_INIT(&softc->request_ccbs); 219 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 220 periph_links.tqe); 221 softc->flags = 0; 222 periph->softc = softc; 223 softc->periph = periph; 224 softc->action = NVME_PROBE_INVALID; 225 status = cam_periph_acquire(periph); 226 if (status != CAM_REQ_CMP) { 227 return (status); 228 } 229 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n")); 230 231 // nvme_device_transport(periph->path); 232 nvme_probe_schedule(periph); 233 234 return(CAM_REQ_CMP); 235 } 236 237 static void 238 nvme_probe_schedule(struct cam_periph *periph) 239 { 240 union ccb *ccb; 241 nvme_probe_softc *softc; 242 243 softc = (nvme_probe_softc *)periph->softc; 244 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 245 246 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY); 247 248 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) 249 softc->flags |= NVME_PROBE_NO_ANNOUNCE; 250 else 251 softc->flags &= ~NVME_PROBE_NO_ANNOUNCE; 252 253 xpt_schedule(periph, CAM_PRIORITY_XPT); 254 } 255 256 static void 257 nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb) 258 { 259 struct ccb_nvmeio *nvmeio; 260 struct ccb_scsiio *csio; 261 nvme_probe_softc *softc; 262 struct cam_path *path; 263 const struct nvme_namespace_data *nvme_data; 264 lun_id_t lun; 265 266 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_start\n")); 267 268 softc = (nvme_probe_softc *)periph->softc; 269 path = start_ccb->ccb_h.path; 270 nvmeio = &start_ccb->nvmeio; 271 csio = &start_ccb->csio; 272 nvme_data = periph->path->device->nvme_data; 273 274 if (softc->restart) { 275 softc->restart = 0; 276 if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) 277 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_RESET); 278 else 279 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY); 280 } 281 282 /* 283 * Other transports have to ask their SIM to do a lot of action. 284 * NVMe doesn't, so don't do the dance. Just do things 285 * directly. 286 */ 287 switch (softc->action) { 288 case NVME_PROBE_RESET: 289 /* FALLTHROUGH */ 290 case NVME_PROBE_IDENTIFY: 291 nvme_device_transport(path); 292 /* 293 * Test for lun == CAM_LUN_WILDCARD is lame, but 294 * appears to be necessary here. XXX 295 */ 296 lun = xpt_path_lun_id(periph->path); 297 if (lun == CAM_LUN_WILDCARD || 298 periph->path->device->flags & CAM_DEV_UNCONFIGURED) { 299 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 300 xpt_acquire_device(path->device); 301 start_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 302 xpt_action(start_ccb); 303 xpt_async(AC_FOUND_DEVICE, path, start_ccb); 304 } 305 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_DONE); 306 break; 307 default: 308 panic("nvme_probe_start: invalid action state 0x%x\n", softc->action); 309 } 310 /* 311 * Probing is now done. We need to complete any lingering items 312 * in the queue, though there shouldn't be any. 313 */ 314 xpt_release_ccb(start_ccb); 315 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n")); 316 while ((start_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs))) { 317 TAILQ_REMOVE(&softc->request_ccbs, 318 &start_ccb->ccb_h, periph_links.tqe); 319 start_ccb->ccb_h.status = CAM_REQ_CMP; 320 xpt_done(start_ccb); 321 } 322 cam_periph_invalidate(periph); 323 cam_periph_release_locked(periph); 324 } 325 326 static void 327 nvme_probe_cleanup(struct cam_periph *periph) 328 { 329 330 free(periph->softc, M_CAMXPT); 331 } 332 333 #if 0 334 /* XXX should be used, don't delete */ 335 static void 336 nvme_find_quirk(struct cam_ed *device) 337 { 338 struct nvme_quirk_entry *quirk; 339 caddr_t match; 340 341 match = cam_quirkmatch((caddr_t)&device->nvme_data, 342 (caddr_t)nvme_quirk_table, 343 nvme_quirk_table_size, 344 sizeof(*nvme_quirk_table), nvme_identify_match); 345 346 if (match == NULL) 347 panic("xpt_find_quirk: device didn't match wildcard entry!!"); 348 349 quirk = (struct nvme_quirk_entry *)match; 350 device->quirk = quirk; 351 if (quirk->quirks & CAM_QUIRK_MAXTAGS) { 352 device->mintags = quirk->mintags; 353 device->maxtags = quirk->maxtags; 354 } 355 } 356 #endif 357 358 static void 359 nvme_scan_lun(struct cam_periph *periph, struct cam_path *path, 360 cam_flags flags, union ccb *request_ccb) 361 { 362 struct ccb_pathinq cpi; 363 cam_status status; 364 struct cam_periph *old_periph; 365 int lock; 366 367 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun\n")); 368 369 xpt_path_inq(&cpi, path); 370 371 if (cpi.ccb_h.status != CAM_REQ_CMP) { 372 if (request_ccb != NULL) { 373 request_ccb->ccb_h.status = cpi.ccb_h.status; 374 xpt_done(request_ccb); 375 } 376 return; 377 } 378 379 if (xpt_path_lun_id(path) == CAM_LUN_WILDCARD) { 380 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun ignoring bus\n")); 381 request_ccb->ccb_h.status = CAM_REQ_CMP; /* XXX signal error ? */ 382 xpt_done(request_ccb); 383 return; 384 } 385 386 lock = (xpt_path_owned(path) == 0); 387 if (lock) 388 xpt_path_lock(path); 389 if ((old_periph = cam_periph_find(path, "nvme_probe")) != NULL) { 390 if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) { 391 nvme_probe_softc *softc; 392 393 softc = (nvme_probe_softc *)old_periph->softc; 394 TAILQ_INSERT_TAIL(&softc->request_ccbs, 395 &request_ccb->ccb_h, periph_links.tqe); 396 softc->restart = 1; 397 CAM_DEBUG(path, CAM_DEBUG_TRACE, 398 ("restarting nvme_probe device\n")); 399 } else { 400 request_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 401 CAM_DEBUG(path, CAM_DEBUG_TRACE, 402 ("Failing to restart nvme_probe device\n")); 403 xpt_done(request_ccb); 404 } 405 } else { 406 CAM_DEBUG(path, CAM_DEBUG_TRACE, 407 ("Adding nvme_probe device\n")); 408 status = cam_periph_alloc(nvme_probe_register, NULL, nvme_probe_cleanup, 409 nvme_probe_start, "nvme_probe", 410 CAM_PERIPH_BIO, 411 request_ccb->ccb_h.path, NULL, 0, 412 request_ccb); 413 414 if (status != CAM_REQ_CMP) { 415 xpt_print(path, "xpt_scan_lun: cam_alloc_periph " 416 "returned an error, can't continue probe\n"); 417 request_ccb->ccb_h.status = status; 418 xpt_done(request_ccb); 419 } 420 } 421 if (lock) 422 xpt_path_unlock(path); 423 } 424 425 static struct cam_ed * 426 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 427 { 428 struct nvme_quirk_entry *quirk; 429 struct cam_ed *device; 430 431 device = xpt_alloc_device(bus, target, lun_id); 432 if (device == NULL) 433 return (NULL); 434 435 /* 436 * Take the default quirk entry until we have inquiry 437 * data from nvme and can determine a better quirk to use. 438 */ 439 quirk = &nvme_quirk_table[nvme_quirk_table_size - 1]; 440 device->quirk = (void *)quirk; 441 device->mintags = 0; 442 device->maxtags = 0; 443 device->inq_flags = 0; 444 device->queue_flags = 0; 445 device->device_id = NULL; /* XXX Need to set this somewhere */ 446 device->device_id_len = 0; 447 device->serial_num = NULL; /* XXX Need to set this somewhere */ 448 device->serial_num_len = 0; 449 return (device); 450 } 451 452 static void 453 nvme_device_transport(struct cam_path *path) 454 { 455 struct ccb_pathinq cpi; 456 struct ccb_trans_settings cts; 457 /* XXX get data from nvme namespace and other info ??? */ 458 459 /* Get transport information from the SIM */ 460 xpt_path_inq(&cpi, path); 461 462 path->device->transport = cpi.transport; 463 path->device->transport_version = cpi.transport_version; 464 465 path->device->protocol = cpi.protocol; 466 path->device->protocol_version = cpi.protocol_version; 467 468 /* Tell the controller what we think */ 469 xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); 470 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 471 cts.type = CTS_TYPE_CURRENT_SETTINGS; 472 cts.transport = path->device->transport; 473 cts.transport_version = path->device->transport_version; 474 cts.protocol = path->device->protocol; 475 cts.protocol_version = path->device->protocol_version; 476 cts.proto_specific.valid = 0; 477 cts.xport_specific.valid = 0; 478 xpt_action((union ccb *)&cts); 479 } 480 481 static void 482 nvme_dev_advinfo(union ccb *start_ccb) 483 { 484 struct cam_ed *device; 485 struct ccb_dev_advinfo *cdai; 486 off_t amt; 487 488 start_ccb->ccb_h.status = CAM_REQ_INVALID; 489 device = start_ccb->ccb_h.path->device; 490 cdai = &start_ccb->cdai; 491 switch(cdai->buftype) { 492 case CDAI_TYPE_SCSI_DEVID: 493 if (cdai->flags & CDAI_FLAG_STORE) 494 return; 495 cdai->provsiz = device->device_id_len; 496 if (device->device_id_len == 0) 497 break; 498 amt = device->device_id_len; 499 if (cdai->provsiz > cdai->bufsiz) 500 amt = cdai->bufsiz; 501 memcpy(cdai->buf, device->device_id, amt); 502 break; 503 case CDAI_TYPE_SERIAL_NUM: 504 if (cdai->flags & CDAI_FLAG_STORE) 505 return; 506 cdai->provsiz = device->serial_num_len; 507 if (device->serial_num_len == 0) 508 break; 509 amt = device->serial_num_len; 510 if (cdai->provsiz > cdai->bufsiz) 511 amt = cdai->bufsiz; 512 memcpy(cdai->buf, device->serial_num, amt); 513 break; 514 case CDAI_TYPE_PHYS_PATH: 515 if (cdai->flags & CDAI_FLAG_STORE) { 516 if (device->physpath != NULL) 517 free(device->physpath, M_CAMXPT); 518 device->physpath_len = cdai->bufsiz; 519 /* Clear existing buffer if zero length */ 520 if (cdai->bufsiz == 0) 521 break; 522 device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT); 523 if (device->physpath == NULL) { 524 start_ccb->ccb_h.status = CAM_REQ_ABORTED; 525 return; 526 } 527 memcpy(device->physpath, cdai->buf, cdai->bufsiz); 528 } else { 529 cdai->provsiz = device->physpath_len; 530 if (device->physpath_len == 0) 531 break; 532 amt = device->physpath_len; 533 if (cdai->provsiz > cdai->bufsiz) 534 amt = cdai->bufsiz; 535 memcpy(cdai->buf, device->physpath, amt); 536 } 537 break; 538 case CDAI_TYPE_NVME_CNTRL: 539 if (cdai->flags & CDAI_FLAG_STORE) 540 return; 541 amt = sizeof(struct nvme_controller_data); 542 cdai->provsiz = amt; 543 if (amt > cdai->bufsiz) 544 amt = cdai->bufsiz; 545 memcpy(cdai->buf, device->nvme_cdata, amt); 546 break; 547 case CDAI_TYPE_NVME_NS: 548 if (cdai->flags & CDAI_FLAG_STORE) 549 return; 550 amt = sizeof(struct nvme_namespace_data); 551 cdai->provsiz = amt; 552 if (amt > cdai->bufsiz) 553 amt = cdai->bufsiz; 554 memcpy(cdai->buf, device->nvme_data, amt); 555 break; 556 default: 557 return; 558 } 559 start_ccb->ccb_h.status = CAM_REQ_CMP; 560 561 if (cdai->flags & CDAI_FLAG_STORE) { 562 xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path, 563 (void *)(uintptr_t)cdai->buftype); 564 } 565 } 566 567 static void 568 nvme_action(union ccb *start_ccb) 569 { 570 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, 571 ("nvme_action: func= %#x\n", start_ccb->ccb_h.func_code)); 572 573 switch (start_ccb->ccb_h.func_code) { 574 case XPT_SCAN_BUS: 575 case XPT_SCAN_TGT: 576 case XPT_SCAN_LUN: 577 nvme_scan_lun(start_ccb->ccb_h.path->periph, 578 start_ccb->ccb_h.path, start_ccb->crcn.flags, 579 start_ccb); 580 break; 581 case XPT_DEV_ADVINFO: 582 nvme_dev_advinfo(start_ccb); 583 break; 584 585 default: 586 xpt_action_default(start_ccb); 587 break; 588 } 589 } 590 591 /* 592 * Handle any per-device event notifications that require action by the XPT. 593 */ 594 static void 595 nvme_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, 596 struct cam_ed *device, void *async_arg) 597 { 598 599 /* 600 * We only need to handle events for real devices. 601 */ 602 if (target->target_id == CAM_TARGET_WILDCARD 603 || device->lun_id == CAM_LUN_WILDCARD) 604 return; 605 606 if (async_code == AC_LOST_DEVICE && 607 (device->flags & CAM_DEV_UNCONFIGURED) == 0) { 608 device->flags |= CAM_DEV_UNCONFIGURED; 609 xpt_release_device(device); 610 } 611 } 612 613 static void 614 nvme_announce_periph(struct cam_periph *periph) 615 { 616 struct ccb_pathinq cpi; 617 struct ccb_trans_settings cts; 618 struct cam_path *path = periph->path; 619 struct ccb_trans_settings_nvme *nvmex; 620 621 cam_periph_assert(periph, MA_OWNED); 622 623 /* Ask the SIM for connection details */ 624 xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL); 625 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 626 cts.type = CTS_TYPE_CURRENT_SETTINGS; 627 xpt_action((union ccb*)&cts); 628 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 629 return; 630 nvmex = &cts.xport_specific.nvme; 631 632 /* Ask the SIM for its base transfer speed */ 633 xpt_path_inq(&cpi, periph->path); 634 printf("%s%d: nvme version %d.%d x%d (max x%d) lanes PCIe Gen%d (max Gen%d) link", 635 periph->periph_name, periph->unit_number, 636 NVME_MAJOR(nvmex->spec), 637 NVME_MINOR(nvmex->spec), 638 nvmex->lanes, nvmex->max_lanes, 639 nvmex->speed, nvmex->max_speed); 640 printf("\n"); 641 } 642 643 static void 644 nvme_proto_announce(struct cam_ed *device) 645 { 646 struct sbuf sb; 647 char buffer[120]; 648 649 sbuf_new(&sb, buffer, sizeof(buffer), SBUF_FIXEDLEN); 650 nvme_print_ident(device->nvme_cdata, device->nvme_data, &sb); 651 sbuf_finish(&sb); 652 sbuf_putbuf(&sb); 653 } 654 655 static void 656 nvme_proto_denounce(struct cam_ed *device) 657 { 658 659 nvme_proto_announce(device); 660 } 661 662 static void 663 nvme_proto_debug_out(union ccb *ccb) 664 { 665 char cdb_str[(sizeof(struct nvme_command) * 3) + 1]; 666 667 if (ccb->ccb_h.func_code != XPT_NVME_IO) 668 return; 669 670 CAM_DEBUG(ccb->ccb_h.path, 671 CAM_DEBUG_CDB,("%s. NCB: %s\n", nvme_op_string(&ccb->nvmeio.cmd), 672 nvme_cmd_string(&ccb->nvmeio.cmd, cdb_str, sizeof(cdb_str)))); 673 } 674 675