1 /*- 2 * Copyright (c) 2015 Netflix, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * derived from ata_xpt.c: Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org> 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/endian.h> 35 #include <sys/systm.h> 36 #include <sys/types.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/time.h> 40 #include <sys/conf.h> 41 #include <sys/fcntl.h> 42 #include <sys/interrupt.h> 43 #include <sys/sbuf.h> 44 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/sysctl.h> 48 49 #include <cam/cam.h> 50 #include <cam/cam_ccb.h> 51 #include <cam/cam_queue.h> 52 #include <cam/cam_periph.h> 53 #include <cam/cam_sim.h> 54 #include <cam/cam_xpt.h> 55 #include <cam/cam_xpt_sim.h> 56 #include <cam/cam_xpt_periph.h> 57 #include <cam/cam_xpt_internal.h> 58 #include <cam/cam_debug.h> 59 60 #include <cam/scsi/scsi_all.h> 61 #include <cam/scsi/scsi_message.h> 62 #include <cam/nvme/nvme_all.h> 63 #include <machine/stdarg.h> /* for xpt_print below */ 64 #include "opt_cam.h" 65 66 struct nvme_quirk_entry { 67 u_int quirks; 68 #define CAM_QUIRK_MAXTAGS 1 69 u_int mintags; 70 u_int maxtags; 71 }; 72 73 /* Not even sure why we need this */ 74 static periph_init_t nvme_probe_periph_init; 75 76 static struct periph_driver nvme_probe_driver = 77 { 78 nvme_probe_periph_init, "nvme_probe", 79 TAILQ_HEAD_INITIALIZER(nvme_probe_driver.units), /* generation */ 0, 80 CAM_PERIPH_DRV_EARLY 81 }; 82 83 PERIPHDRIVER_DECLARE(nvme_probe, nvme_probe_driver); 84 85 typedef enum { 86 NVME_PROBE_IDENTIFY, 87 NVME_PROBE_DONE, 88 NVME_PROBE_INVALID, 89 NVME_PROBE_RESET 90 } nvme_probe_action; 91 92 static char *nvme_probe_action_text[] = { 93 "NVME_PROBE_IDENTIFY", 94 "NVME_PROBE_DONE", 95 "NVME_PROBE_INVALID", 96 "NVME_PROBE_RESET", 97 }; 98 99 #define NVME_PROBE_SET_ACTION(softc, newaction) \ 100 do { \ 101 char **text; \ 102 text = nvme_probe_action_text; \ 103 CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE, \ 104 ("Probe %s to %s\n", text[(softc)->action], \ 105 text[(newaction)])); \ 106 (softc)->action = (newaction); \ 107 } while(0) 108 109 typedef enum { 110 NVME_PROBE_NO_ANNOUNCE = 0x04 111 } nvme_probe_flags; 112 113 typedef struct { 114 TAILQ_HEAD(, ccb_hdr) request_ccbs; 115 nvme_probe_action action; 116 nvme_probe_flags flags; 117 int restart; 118 struct cam_periph *periph; 119 } nvme_probe_softc; 120 121 static struct nvme_quirk_entry nvme_quirk_table[] = 122 { 123 { 124 // { 125 // T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 126 // /*vendor*/"*", /*product*/"*", /*revision*/"*" 127 // }, 128 .quirks = 0, .mintags = 0, .maxtags = 0 129 }, 130 }; 131 132 static const int nvme_quirk_table_size = 133 sizeof(nvme_quirk_table) / sizeof(*nvme_quirk_table); 134 135 static cam_status nvme_probe_register(struct cam_periph *periph, 136 void *arg); 137 static void nvme_probe_schedule(struct cam_periph *nvme_probe_periph); 138 static void nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb); 139 static void nvme_probe_cleanup(struct cam_periph *periph); 140 //static void nvme_find_quirk(struct cam_ed *device); 141 static void nvme_scan_lun(struct cam_periph *periph, 142 struct cam_path *path, cam_flags flags, 143 union ccb *ccb); 144 static struct cam_ed * 145 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, 146 lun_id_t lun_id); 147 static void nvme_device_transport(struct cam_path *path); 148 static void nvme_dev_async(u_int32_t async_code, 149 struct cam_eb *bus, 150 struct cam_et *target, 151 struct cam_ed *device, 152 void *async_arg); 153 static void nvme_action(union ccb *start_ccb); 154 static void nvme_announce_periph(struct cam_periph *periph); 155 static void nvme_proto_announce(struct cam_ed *device); 156 static void nvme_proto_denounce(struct cam_ed *device); 157 static void nvme_proto_debug_out(union ccb *ccb); 158 159 static struct xpt_xport_ops nvme_xport_ops = { 160 .alloc_device = nvme_alloc_device, 161 .action = nvme_action, 162 .async = nvme_dev_async, 163 .announce = nvme_announce_periph, 164 }; 165 #define NVME_XPT_XPORT(x, X) \ 166 static struct xpt_xport nvme_xport_ ## x = { \ 167 .xport = XPORT_ ## X, \ 168 .name = #x, \ 169 .ops = &nvme_xport_ops, \ 170 }; \ 171 CAM_XPT_XPORT(nvme_xport_ ## x); 172 173 NVME_XPT_XPORT(nvme, NVME); 174 175 #undef NVME_XPT_XPORT 176 177 static struct xpt_proto_ops nvme_proto_ops = { 178 .announce = nvme_proto_announce, 179 .denounce = nvme_proto_denounce, 180 .debug_out = nvme_proto_debug_out, 181 }; 182 static struct xpt_proto nvme_proto = { 183 .proto = PROTO_NVME, 184 .name = "nvme", 185 .ops = &nvme_proto_ops, 186 }; 187 CAM_XPT_PROTO(nvme_proto); 188 189 static void 190 nvme_probe_periph_init() 191 { 192 193 } 194 195 static cam_status 196 nvme_probe_register(struct cam_periph *periph, void *arg) 197 { 198 union ccb *request_ccb; /* CCB representing the probe request */ 199 cam_status status; 200 nvme_probe_softc *softc; 201 202 request_ccb = (union ccb *)arg; 203 if (request_ccb == NULL) { 204 printf("nvme_probe_register: no probe CCB, " 205 "can't register device\n"); 206 return(CAM_REQ_CMP_ERR); 207 } 208 209 softc = (nvme_probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_ZERO | M_NOWAIT); 210 211 if (softc == NULL) { 212 printf("nvme_probe_register: Unable to probe new device. " 213 "Unable to allocate softc\n"); 214 return(CAM_REQ_CMP_ERR); 215 } 216 TAILQ_INIT(&softc->request_ccbs); 217 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 218 periph_links.tqe); 219 softc->flags = 0; 220 periph->softc = softc; 221 softc->periph = periph; 222 softc->action = NVME_PROBE_INVALID; 223 status = cam_periph_acquire(periph); 224 if (status != CAM_REQ_CMP) { 225 return (status); 226 } 227 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n")); 228 229 // nvme_device_transport(periph->path); 230 nvme_probe_schedule(periph); 231 232 return(CAM_REQ_CMP); 233 } 234 235 static void 236 nvme_probe_schedule(struct cam_periph *periph) 237 { 238 union ccb *ccb; 239 nvme_probe_softc *softc; 240 241 softc = (nvme_probe_softc *)periph->softc; 242 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 243 244 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY); 245 246 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) 247 softc->flags |= NVME_PROBE_NO_ANNOUNCE; 248 else 249 softc->flags &= ~NVME_PROBE_NO_ANNOUNCE; 250 251 xpt_schedule(periph, CAM_PRIORITY_XPT); 252 } 253 254 static void 255 nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb) 256 { 257 struct ccb_nvmeio *nvmeio; 258 struct ccb_scsiio *csio; 259 nvme_probe_softc *softc; 260 struct cam_path *path; 261 const struct nvme_namespace_data *nvme_data; 262 lun_id_t lun; 263 264 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_start\n")); 265 266 softc = (nvme_probe_softc *)periph->softc; 267 path = start_ccb->ccb_h.path; 268 nvmeio = &start_ccb->nvmeio; 269 csio = &start_ccb->csio; 270 nvme_data = periph->path->device->nvme_data; 271 272 if (softc->restart) { 273 softc->restart = 0; 274 if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) 275 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_RESET); 276 else 277 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY); 278 } 279 280 /* 281 * Other transports have to ask their SIM to do a lot of action. 282 * NVMe doesn't, so don't do the dance. Just do things 283 * directly. 284 */ 285 switch (softc->action) { 286 case NVME_PROBE_RESET: 287 /* FALLTHROUGH */ 288 case NVME_PROBE_IDENTIFY: 289 nvme_device_transport(path); 290 /* 291 * Test for lun == CAM_LUN_WILDCARD is lame, but 292 * appears to be necessary here. XXX 293 */ 294 lun = xpt_path_lun_id(periph->path); 295 if (lun == CAM_LUN_WILDCARD || 296 periph->path->device->flags & CAM_DEV_UNCONFIGURED) { 297 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 298 xpt_acquire_device(path->device); 299 start_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 300 xpt_action(start_ccb); 301 xpt_async(AC_FOUND_DEVICE, path, start_ccb); 302 } 303 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_DONE); 304 break; 305 default: 306 panic("nvme_probe_start: invalid action state 0x%x\n", softc->action); 307 } 308 /* 309 * Probing is now done. We need to complete any lingering items 310 * in the queue, though there shouldn't be any. 311 */ 312 xpt_release_ccb(start_ccb); 313 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n")); 314 while ((start_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs))) { 315 TAILQ_REMOVE(&softc->request_ccbs, 316 &start_ccb->ccb_h, periph_links.tqe); 317 start_ccb->ccb_h.status = CAM_REQ_CMP; 318 xpt_done(start_ccb); 319 } 320 cam_periph_invalidate(periph); 321 /* Can't release periph since we hit a (possibly bogus) assertion */ 322 // cam_periph_release_locked(periph); 323 } 324 325 static void 326 nvme_probe_cleanup(struct cam_periph *periph) 327 { 328 329 free(periph->softc, M_CAMXPT); 330 } 331 332 #if 0 333 /* XXX should be used, don't delete */ 334 static void 335 nvme_find_quirk(struct cam_ed *device) 336 { 337 struct nvme_quirk_entry *quirk; 338 caddr_t match; 339 340 match = cam_quirkmatch((caddr_t)&device->nvme_data, 341 (caddr_t)nvme_quirk_table, 342 nvme_quirk_table_size, 343 sizeof(*nvme_quirk_table), nvme_identify_match); 344 345 if (match == NULL) 346 panic("xpt_find_quirk: device didn't match wildcard entry!!"); 347 348 quirk = (struct nvme_quirk_entry *)match; 349 device->quirk = quirk; 350 if (quirk->quirks & CAM_QUIRK_MAXTAGS) { 351 device->mintags = quirk->mintags; 352 device->maxtags = quirk->maxtags; 353 } 354 } 355 #endif 356 357 static void 358 nvme_scan_lun(struct cam_periph *periph, struct cam_path *path, 359 cam_flags flags, union ccb *request_ccb) 360 { 361 struct ccb_pathinq cpi; 362 cam_status status; 363 struct cam_periph *old_periph; 364 int lock; 365 366 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun\n")); 367 368 xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); 369 cpi.ccb_h.func_code = XPT_PATH_INQ; 370 xpt_action((union ccb *)&cpi); 371 372 if (cpi.ccb_h.status != CAM_REQ_CMP) { 373 if (request_ccb != NULL) { 374 request_ccb->ccb_h.status = cpi.ccb_h.status; 375 xpt_done(request_ccb); 376 } 377 return; 378 } 379 380 if (xpt_path_lun_id(path) == CAM_LUN_WILDCARD) { 381 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun ignoring bus\n")); 382 request_ccb->ccb_h.status = CAM_REQ_CMP; /* XXX signal error ? */ 383 xpt_done(request_ccb); 384 return; 385 } 386 387 lock = (xpt_path_owned(path) == 0); 388 if (lock) 389 xpt_path_lock(path); 390 if ((old_periph = cam_periph_find(path, "nvme_probe")) != NULL) { 391 if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) { 392 nvme_probe_softc *softc; 393 394 softc = (nvme_probe_softc *)old_periph->softc; 395 TAILQ_INSERT_TAIL(&softc->request_ccbs, 396 &request_ccb->ccb_h, periph_links.tqe); 397 softc->restart = 1; 398 CAM_DEBUG(path, CAM_DEBUG_TRACE, 399 ("restarting nvme_probe device\n")); 400 } else { 401 request_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 402 CAM_DEBUG(path, CAM_DEBUG_TRACE, 403 ("Failing to restart nvme_probe device\n")); 404 xpt_done(request_ccb); 405 } 406 } else { 407 CAM_DEBUG(path, CAM_DEBUG_TRACE, 408 ("Adding nvme_probe device\n")); 409 status = cam_periph_alloc(nvme_probe_register, NULL, nvme_probe_cleanup, 410 nvme_probe_start, "nvme_probe", 411 CAM_PERIPH_BIO, 412 request_ccb->ccb_h.path, NULL, 0, 413 request_ccb); 414 415 if (status != CAM_REQ_CMP) { 416 xpt_print(path, "xpt_scan_lun: cam_alloc_periph " 417 "returned an error, can't continue probe\n"); 418 request_ccb->ccb_h.status = status; 419 xpt_done(request_ccb); 420 } 421 } 422 if (lock) 423 xpt_path_unlock(path); 424 } 425 426 static struct cam_ed * 427 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 428 { 429 struct nvme_quirk_entry *quirk; 430 struct cam_ed *device; 431 432 device = xpt_alloc_device(bus, target, lun_id); 433 if (device == NULL) 434 return (NULL); 435 436 /* 437 * Take the default quirk entry until we have inquiry 438 * data from nvme and can determine a better quirk to use. 439 */ 440 quirk = &nvme_quirk_table[nvme_quirk_table_size - 1]; 441 device->quirk = (void *)quirk; 442 device->mintags = 0; 443 device->maxtags = 0; 444 device->inq_flags = 0; 445 device->queue_flags = 0; 446 device->device_id = NULL; /* XXX Need to set this somewhere */ 447 device->device_id_len = 0; 448 device->serial_num = NULL; /* XXX Need to set this somewhere */ 449 device->serial_num_len = 0; 450 return (device); 451 } 452 453 static void 454 nvme_device_transport(struct cam_path *path) 455 { 456 struct ccb_pathinq cpi; 457 struct ccb_trans_settings cts; 458 /* XXX get data from nvme namespace and other info ??? */ 459 460 /* Get transport information from the SIM */ 461 xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); 462 cpi.ccb_h.func_code = XPT_PATH_INQ; 463 xpt_action((union ccb *)&cpi); 464 465 path->device->transport = cpi.transport; 466 path->device->transport_version = cpi.transport_version; 467 468 path->device->protocol = cpi.protocol; 469 path->device->protocol_version = cpi.protocol_version; 470 471 /* Tell the controller what we think */ 472 xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); 473 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 474 cts.type = CTS_TYPE_CURRENT_SETTINGS; 475 cts.transport = path->device->transport; 476 cts.transport_version = path->device->transport_version; 477 cts.protocol = path->device->protocol; 478 cts.protocol_version = path->device->protocol_version; 479 cts.proto_specific.valid = 0; 480 cts.xport_specific.valid = 0; 481 xpt_action((union ccb *)&cts); 482 } 483 484 static void 485 nvme_dev_advinfo(union ccb *start_ccb) 486 { 487 struct cam_ed *device; 488 struct ccb_dev_advinfo *cdai; 489 off_t amt; 490 491 start_ccb->ccb_h.status = CAM_REQ_INVALID; 492 device = start_ccb->ccb_h.path->device; 493 cdai = &start_ccb->cdai; 494 switch(cdai->buftype) { 495 case CDAI_TYPE_SCSI_DEVID: 496 if (cdai->flags & CDAI_FLAG_STORE) 497 return; 498 cdai->provsiz = device->device_id_len; 499 if (device->device_id_len == 0) 500 break; 501 amt = device->device_id_len; 502 if (cdai->provsiz > cdai->bufsiz) 503 amt = cdai->bufsiz; 504 memcpy(cdai->buf, device->device_id, amt); 505 break; 506 case CDAI_TYPE_SERIAL_NUM: 507 if (cdai->flags & CDAI_FLAG_STORE) 508 return; 509 cdai->provsiz = device->serial_num_len; 510 if (device->serial_num_len == 0) 511 break; 512 amt = device->serial_num_len; 513 if (cdai->provsiz > cdai->bufsiz) 514 amt = cdai->bufsiz; 515 memcpy(cdai->buf, device->serial_num, amt); 516 break; 517 case CDAI_TYPE_PHYS_PATH: 518 if (cdai->flags & CDAI_FLAG_STORE) { 519 if (device->physpath != NULL) 520 free(device->physpath, M_CAMXPT); 521 device->physpath_len = cdai->bufsiz; 522 /* Clear existing buffer if zero length */ 523 if (cdai->bufsiz == 0) 524 break; 525 device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT); 526 if (device->physpath == NULL) { 527 start_ccb->ccb_h.status = CAM_REQ_ABORTED; 528 return; 529 } 530 memcpy(device->physpath, cdai->buf, cdai->bufsiz); 531 } else { 532 cdai->provsiz = device->physpath_len; 533 if (device->physpath_len == 0) 534 break; 535 amt = device->physpath_len; 536 if (cdai->provsiz > cdai->bufsiz) 537 amt = cdai->bufsiz; 538 memcpy(cdai->buf, device->physpath, amt); 539 } 540 break; 541 case CDAI_TYPE_NVME_CNTRL: 542 if (cdai->flags & CDAI_FLAG_STORE) 543 return; 544 amt = sizeof(struct nvme_controller_data); 545 cdai->provsiz = amt; 546 if (amt > cdai->bufsiz) 547 amt = cdai->bufsiz; 548 memcpy(cdai->buf, device->nvme_cdata, amt); 549 break; 550 case CDAI_TYPE_NVME_NS: 551 if (cdai->flags & CDAI_FLAG_STORE) 552 return; 553 amt = sizeof(struct nvme_namespace_data); 554 cdai->provsiz = amt; 555 if (amt > cdai->bufsiz) 556 amt = cdai->bufsiz; 557 memcpy(cdai->buf, device->nvme_data, amt); 558 break; 559 default: 560 return; 561 } 562 start_ccb->ccb_h.status = CAM_REQ_CMP; 563 564 if (cdai->flags & CDAI_FLAG_STORE) { 565 xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path, 566 (void *)(uintptr_t)cdai->buftype); 567 } 568 } 569 570 static void 571 nvme_action(union ccb *start_ccb) 572 { 573 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, 574 ("nvme_action: func= %#x\n", start_ccb->ccb_h.func_code)); 575 576 switch (start_ccb->ccb_h.func_code) { 577 case XPT_SCAN_BUS: 578 case XPT_SCAN_TGT: 579 case XPT_SCAN_LUN: 580 nvme_scan_lun(start_ccb->ccb_h.path->periph, 581 start_ccb->ccb_h.path, start_ccb->crcn.flags, 582 start_ccb); 583 break; 584 case XPT_DEV_ADVINFO: 585 nvme_dev_advinfo(start_ccb); 586 break; 587 588 default: 589 xpt_action_default(start_ccb); 590 break; 591 } 592 } 593 594 /* 595 * Handle any per-device event notifications that require action by the XPT. 596 */ 597 static void 598 nvme_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, 599 struct cam_ed *device, void *async_arg) 600 { 601 602 /* 603 * We only need to handle events for real devices. 604 */ 605 if (target->target_id == CAM_TARGET_WILDCARD 606 || device->lun_id == CAM_LUN_WILDCARD) 607 return; 608 609 if (async_code == AC_LOST_DEVICE && 610 (device->flags & CAM_DEV_UNCONFIGURED) == 0) { 611 device->flags |= CAM_DEV_UNCONFIGURED; 612 xpt_release_device(device); 613 } 614 } 615 616 static void 617 nvme_announce_periph(struct cam_periph *periph) 618 { 619 struct ccb_pathinq cpi; 620 struct ccb_trans_settings cts; 621 struct cam_path *path = periph->path; 622 struct ccb_trans_settings_nvme *nvmex; 623 624 cam_periph_assert(periph, MA_OWNED); 625 626 /* Ask the SIM for connection details */ 627 xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL); 628 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 629 cts.type = CTS_TYPE_CURRENT_SETTINGS; 630 xpt_action((union ccb*)&cts); 631 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 632 return; 633 nvmex = &cts.xport_specific.nvme; 634 635 /* Ask the SIM for its base transfer speed */ 636 xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); 637 cpi.ccb_h.func_code = XPT_PATH_INQ; 638 xpt_action((union ccb *)&cpi); 639 printf("%s%d: nvme version %d.%d x%d (max x%d) lanes PCIe Gen%d (max Gen%d) link", 640 periph->periph_name, periph->unit_number, 641 NVME_MAJOR(nvmex->spec), 642 NVME_MINOR(nvmex->spec), 643 nvmex->lanes, nvmex->max_lanes, 644 nvmex->speed, nvmex->max_speed); 645 printf("\n"); 646 } 647 648 static void 649 nvme_proto_announce(struct cam_ed *device) 650 { 651 struct sbuf sb; 652 char buffer[120]; 653 654 sbuf_new(&sb, buffer, sizeof(buffer), SBUF_FIXEDLEN); 655 nvme_print_ident(device->nvme_cdata, device->nvme_data, &sb); 656 sbuf_finish(&sb); 657 sbuf_putbuf(&sb); 658 } 659 660 static void 661 nvme_proto_denounce(struct cam_ed *device) 662 { 663 664 nvme_proto_announce(device); 665 } 666 667 static void 668 nvme_proto_debug_out(union ccb *ccb) 669 { 670 char cdb_str[(sizeof(struct nvme_command) * 3) + 1]; 671 672 if (ccb->ccb_h.func_code != XPT_NVME_IO) 673 return; 674 675 CAM_DEBUG(ccb->ccb_h.path, 676 CAM_DEBUG_CDB,("%s. NCB: %s\n", nvme_op_string(&ccb->nvmeio.cmd), 677 nvme_cmd_string(&ccb->nvmeio.cmd, cdb_str, sizeof(cdb_str)))); 678 } 679 680