1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2015 Netflix, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * derived from ata_xpt.c: Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org> 28 */ 29 30 #include <sys/param.h> 31 #include <sys/bus.h> 32 #include <sys/endian.h> 33 #include <sys/systm.h> 34 #include <sys/types.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <sys/time.h> 38 #include <sys/conf.h> 39 #include <sys/fcntl.h> 40 #include <sys/sbuf.h> 41 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/sysctl.h> 45 46 #include <cam/cam.h> 47 #include <cam/cam_ccb.h> 48 #include <cam/cam_queue.h> 49 #include <cam/cam_periph.h> 50 #include <cam/cam_sim.h> 51 #include <cam/cam_xpt.h> 52 #include <cam/cam_xpt_sim.h> 53 #include <cam/cam_xpt_periph.h> 54 #include <cam/cam_xpt_internal.h> 55 #include <cam/cam_debug.h> 56 57 #include <cam/scsi/scsi_all.h> 58 #include <cam/scsi/scsi_message.h> 59 #include <cam/nvme/nvme_all.h> 60 #include <machine/stdarg.h> /* for xpt_print below */ 61 62 struct nvme_quirk_entry { 63 u_int quirks; 64 #define CAM_QUIRK_MAXTAGS 1 65 u_int mintags; 66 u_int maxtags; 67 }; 68 69 /* Not even sure why we need this */ 70 static periph_init_t nvme_probe_periph_init; 71 72 static struct periph_driver nvme_probe_driver = 73 { 74 nvme_probe_periph_init, "nvme_probe", 75 TAILQ_HEAD_INITIALIZER(nvme_probe_driver.units), /* generation */ 0, 76 CAM_PERIPH_DRV_EARLY 77 }; 78 79 PERIPHDRIVER_DECLARE(nvme_probe, nvme_probe_driver); 80 81 typedef enum { 82 NVME_PROBE_IDENTIFY_CD, 83 NVME_PROBE_IDENTIFY_NS, 84 NVME_PROBE_DONE, 85 NVME_PROBE_INVALID 86 } nvme_probe_action; 87 88 static char *nvme_probe_action_text[] = { 89 "NVME_PROBE_IDENTIFY_CD", 90 "NVME_PROBE_IDENTIFY_NS", 91 "NVME_PROBE_DONE", 92 "NVME_PROBE_INVALID" 93 }; 94 95 #define NVME_PROBE_SET_ACTION(softc, newaction) \ 96 do { \ 97 char **text; \ 98 text = nvme_probe_action_text; \ 99 CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE, \ 100 ("Probe %s to %s\n", text[(softc)->action], \ 101 text[(newaction)])); \ 102 (softc)->action = (newaction); \ 103 } while(0) 104 105 typedef enum { 106 NVME_PROBE_NO_ANNOUNCE = 0x04 107 } nvme_probe_flags; 108 109 typedef struct { 110 TAILQ_HEAD(, ccb_hdr) request_ccbs; 111 union { 112 struct nvme_controller_data cd; 113 struct nvme_namespace_data ns; 114 }; 115 nvme_probe_action action; 116 nvme_probe_flags flags; 117 int restart; 118 struct cam_periph *periph; 119 } nvme_probe_softc; 120 121 static struct nvme_quirk_entry nvme_quirk_table[] = 122 { 123 { 124 // { 125 // T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 126 // /*vendor*/"*", /*product*/"*", /*revision*/"*" 127 // }, 128 .quirks = 0, .mintags = 0, .maxtags = 0 129 }, 130 }; 131 132 static const int nvme_quirk_table_size = 133 sizeof(nvme_quirk_table) / sizeof(*nvme_quirk_table); 134 135 static cam_status nvme_probe_register(struct cam_periph *periph, 136 void *arg); 137 static void nvme_probe_schedule(struct cam_periph *nvme_probe_periph); 138 static void nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb); 139 static void nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb); 140 static void nvme_probe_cleanup(struct cam_periph *periph); 141 //static void nvme_find_quirk(struct cam_ed *device); 142 static void nvme_scan_lun(struct cam_periph *periph, 143 struct cam_path *path, cam_flags flags, 144 union ccb *ccb); 145 static struct cam_ed * 146 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, 147 lun_id_t lun_id); 148 static void nvme_device_transport(struct cam_path *path); 149 static void nvme_dev_async(uint32_t async_code, 150 struct cam_eb *bus, 151 struct cam_et *target, 152 struct cam_ed *device, 153 void *async_arg); 154 static void nvme_action(union ccb *start_ccb); 155 static void nvme_announce_periph_sbuf(struct cam_periph *periph, 156 struct sbuf *sb); 157 static void nvme_proto_announce_sbuf(struct cam_ed *device, 158 struct sbuf *sb); 159 static void nvme_proto_denounce_sbuf(struct cam_ed *device, 160 struct sbuf *sb); 161 static void nvme_proto_debug_out(union ccb *ccb); 162 163 static struct xpt_xport_ops nvme_xport_ops = { 164 .alloc_device = nvme_alloc_device, 165 .action = nvme_action, 166 .async = nvme_dev_async, 167 .announce_sbuf = nvme_announce_periph_sbuf, 168 }; 169 #define NVME_XPT_XPORT(x, X) \ 170 static struct xpt_xport nvme_xport_ ## x = { \ 171 .xport = XPORT_ ## X, \ 172 .name = #x, \ 173 .ops = &nvme_xport_ops, \ 174 }; \ 175 CAM_XPT_XPORT(nvme_xport_ ## x); 176 177 NVME_XPT_XPORT(nvme, NVME); 178 NVME_XPT_XPORT(nvmf, NVMF); 179 180 #undef NVME_XPT_XPORT 181 182 static struct xpt_proto_ops nvme_proto_ops = { 183 .announce_sbuf = nvme_proto_announce_sbuf, 184 .denounce_sbuf = nvme_proto_denounce_sbuf, 185 .debug_out = nvme_proto_debug_out, 186 }; 187 static struct xpt_proto nvme_proto = { 188 .proto = PROTO_NVME, 189 .name = "nvme", 190 .ops = &nvme_proto_ops, 191 }; 192 CAM_XPT_PROTO(nvme_proto); 193 194 static void 195 nvme_probe_periph_init(void) 196 { 197 } 198 199 static cam_status 200 nvme_probe_register(struct cam_periph *periph, void *arg) 201 { 202 union ccb *request_ccb; /* CCB representing the probe request */ 203 nvme_probe_softc *softc; 204 205 request_ccb = (union ccb *)arg; 206 if (request_ccb == NULL) { 207 printf("nvme_probe_register: no probe CCB, " 208 "can't register device\n"); 209 return(CAM_REQ_CMP_ERR); 210 } 211 212 softc = (nvme_probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_ZERO | M_NOWAIT); 213 214 if (softc == NULL) { 215 printf("nvme_probe_register: Unable to probe new device. " 216 "Unable to allocate softc\n"); 217 return(CAM_REQ_CMP_ERR); 218 } 219 TAILQ_INIT(&softc->request_ccbs); 220 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 221 periph_links.tqe); 222 softc->flags = 0; 223 periph->softc = softc; 224 softc->periph = periph; 225 softc->action = NVME_PROBE_INVALID; 226 if (cam_periph_acquire(periph) != 0) 227 return (CAM_REQ_CMP_ERR); 228 229 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n")); 230 231 // nvme_device_transport(periph->path); 232 nvme_probe_schedule(periph); 233 234 return(CAM_REQ_CMP); 235 } 236 237 static void 238 nvme_probe_schedule(struct cam_periph *periph) 239 { 240 union ccb *ccb; 241 nvme_probe_softc *softc; 242 243 softc = (nvme_probe_softc *)periph->softc; 244 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 245 246 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD); 247 248 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) 249 softc->flags |= NVME_PROBE_NO_ANNOUNCE; 250 else 251 softc->flags &= ~NVME_PROBE_NO_ANNOUNCE; 252 253 xpt_schedule(periph, CAM_PRIORITY_XPT); 254 } 255 256 static void 257 nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb) 258 { 259 struct ccb_nvmeio *nvmeio; 260 nvme_probe_softc *softc; 261 lun_id_t lun; 262 263 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_start\n")); 264 265 softc = (nvme_probe_softc *)periph->softc; 266 nvmeio = &start_ccb->nvmeio; 267 lun = xpt_path_lun_id(periph->path); 268 269 if (softc->restart) { 270 softc->restart = 0; 271 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD); 272 } 273 274 switch (softc->action) { 275 case NVME_PROBE_IDENTIFY_CD: 276 cam_fill_nvmeadmin(nvmeio, 277 0, /* retries */ 278 nvme_probe_done, /* cbfcnp */ 279 CAM_DIR_IN, /* flags */ 280 (uint8_t *)&softc->cd, /* data_ptr */ 281 sizeof(softc->cd), /* dxfer_len */ 282 30 * 1000); /* timeout 30s */ 283 nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, 0, 284 1, 0, 0, 0, 0, 0); 285 break; 286 case NVME_PROBE_IDENTIFY_NS: 287 cam_fill_nvmeadmin(nvmeio, 288 0, /* retries */ 289 nvme_probe_done, /* cbfcnp */ 290 CAM_DIR_IN, /* flags */ 291 (uint8_t *)&softc->ns, /* data_ptr */ 292 sizeof(softc->ns), /* dxfer_len */ 293 30 * 1000); /* timeout 30s */ 294 nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, lun, 295 0, 0, 0, 0, 0, 0); 296 break; 297 default: 298 panic("nvme_probe_start: invalid action state 0x%x\n", softc->action); 299 } 300 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 301 xpt_action(start_ccb); 302 } 303 304 static void 305 nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb) 306 { 307 struct nvme_namespace_data *nvme_data; 308 struct nvme_controller_data *nvme_cdata; 309 nvme_probe_softc *softc; 310 struct cam_path *path; 311 struct scsi_vpd_device_id *did; 312 struct scsi_vpd_id_descriptor *idd; 313 uint32_t priority; 314 int found = 1, e, g, len; 315 316 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_done\n")); 317 318 softc = (nvme_probe_softc *)periph->softc; 319 path = done_ccb->ccb_h.path; 320 priority = done_ccb->ccb_h.pinfo.priority; 321 322 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 323 if (cam_periph_error(done_ccb, 324 0, softc->restart ? (SF_NO_RECOVERY | SF_NO_RETRY) : 0 325 ) == ERESTART) { 326 out: 327 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ 328 cam_release_devq(path, 0, 0, 0, FALSE); 329 return; 330 } 331 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 332 /* Don't wedge the queue */ 333 xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); 334 } 335 336 /* 337 * If we get to this point, we got an error status back 338 * from the inquiry and the error status doesn't require 339 * automatically retrying the command. Therefore, the 340 * inquiry failed. If we had inquiry information before 341 * for this device, but this latest inquiry command failed, 342 * the device has probably gone away. If this device isn't 343 * already marked unconfigured, notify the peripheral 344 * drivers that this device is no more. 345 */ 346 device_fail: if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 347 xpt_async(AC_LOST_DEVICE, path, NULL); 348 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_INVALID); 349 found = 0; 350 goto done; 351 } 352 if (softc->restart) 353 goto done; 354 switch (softc->action) { 355 case NVME_PROBE_IDENTIFY_CD: 356 nvme_controller_data_swapbytes(&softc->cd); 357 358 nvme_cdata = path->device->nvme_cdata; 359 if (nvme_cdata == NULL) { 360 nvme_cdata = malloc(sizeof(*nvme_cdata), M_CAMXPT, 361 M_NOWAIT); 362 if (nvme_cdata == NULL) { 363 xpt_print(path, "Can't allocate memory"); 364 goto device_fail; 365 } 366 } 367 bcopy(&softc->cd, nvme_cdata, sizeof(*nvme_cdata)); 368 path->device->nvme_cdata = nvme_cdata; 369 370 /* Save/update serial number. */ 371 if (path->device->serial_num != NULL) { 372 free(path->device->serial_num, M_CAMXPT); 373 path->device->serial_num = NULL; 374 path->device->serial_num_len = 0; 375 } 376 path->device->serial_num = (uint8_t *) 377 malloc(NVME_SERIAL_NUMBER_LENGTH + 1, M_CAMXPT, M_NOWAIT); 378 if (path->device->serial_num != NULL) { 379 cam_strvis_flag(path->device->serial_num, 380 nvme_cdata->sn, sizeof(nvme_cdata->sn), 381 NVME_SERIAL_NUMBER_LENGTH + 1, 382 CAM_STRVIS_FLAG_NONASCII_SPC); 383 384 path->device->serial_num_len = 385 strlen(path->device->serial_num); 386 } 387 388 // nvme_find_quirk(path->device); 389 nvme_device_transport(path); 390 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_NS); 391 xpt_release_ccb(done_ccb); 392 xpt_schedule(periph, priority); 393 goto out; 394 case NVME_PROBE_IDENTIFY_NS: 395 nvme_namespace_data_swapbytes(&softc->ns); 396 397 /* Check that the namespace exists. */ 398 if (softc->ns.nsze == 0) 399 goto device_fail; 400 401 nvme_data = path->device->nvme_data; 402 if (nvme_data == NULL) { 403 nvme_data = malloc(sizeof(*nvme_data), M_CAMXPT, 404 M_NOWAIT); 405 if (nvme_data == NULL) { 406 xpt_print(path, "Can't allocate memory"); 407 goto device_fail; 408 } 409 } 410 bcopy(&softc->ns, nvme_data, sizeof(*nvme_data)); 411 path->device->nvme_data = nvme_data; 412 413 /* Save/update device_id based on NGUID and/or EUI64. */ 414 if (path->device->device_id != NULL) { 415 free(path->device->device_id, M_CAMXPT); 416 path->device->device_id = NULL; 417 path->device->device_id_len = 0; 418 } 419 len = 0; 420 for (g = 0; g < sizeof(nvme_data->nguid); g++) { 421 if (nvme_data->nguid[g] != 0) 422 break; 423 } 424 if (g < sizeof(nvme_data->nguid)) 425 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 426 for (e = 0; e < sizeof(nvme_data->eui64); e++) { 427 if (nvme_data->eui64[e] != 0) 428 break; 429 } 430 if (e < sizeof(nvme_data->eui64)) 431 len += sizeof(struct scsi_vpd_id_descriptor) + 8; 432 if (len > 0) { 433 path->device->device_id = (uint8_t *) 434 malloc(SVPD_DEVICE_ID_HDR_LEN + len, 435 M_CAMXPT, M_NOWAIT); 436 } 437 if (path->device->device_id != NULL) { 438 did = (struct scsi_vpd_device_id *)path->device->device_id; 439 did->device = SID_QUAL_LU_CONNECTED | T_DIRECT; 440 did->page_code = SVPD_DEVICE_ID; 441 scsi_ulto2b(len, did->length); 442 idd = (struct scsi_vpd_id_descriptor *)(did + 1); 443 if (g < sizeof(nvme_data->nguid)) { 444 idd->proto_codeset = SVPD_ID_CODESET_BINARY; 445 idd->id_type = SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64; 446 idd->length = 16; 447 bcopy(nvme_data->nguid, idd->identifier, 16); 448 idd = (struct scsi_vpd_id_descriptor *) 449 &idd->identifier[16]; 450 } 451 if (e < sizeof(nvme_data->eui64)) { 452 idd->proto_codeset = SVPD_ID_CODESET_BINARY; 453 idd->id_type = SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64; 454 idd->length = 8; 455 bcopy(nvme_data->eui64, idd->identifier, 8); 456 } 457 path->device->device_id_len = SVPD_DEVICE_ID_HDR_LEN + len; 458 } 459 460 if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) { 461 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 462 xpt_acquire_device(path->device); 463 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 464 xpt_action(done_ccb); 465 xpt_async(AC_FOUND_DEVICE, path, done_ccb); 466 } 467 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_DONE); 468 break; 469 default: 470 panic("nvme_probe_done: invalid action state 0x%x\n", softc->action); 471 } 472 done: 473 if (softc->restart) { 474 softc->restart = 0; 475 xpt_release_ccb(done_ccb); 476 nvme_probe_schedule(periph); 477 goto out; 478 } 479 xpt_release_ccb(done_ccb); 480 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n")); 481 while ((done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs))) { 482 TAILQ_REMOVE(&softc->request_ccbs, 483 &done_ccb->ccb_h, periph_links.tqe); 484 done_ccb->ccb_h.status = found ? CAM_REQ_CMP : CAM_REQ_CMP_ERR; 485 xpt_done(done_ccb); 486 } 487 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ 488 cam_release_devq(path, 0, 0, 0, FALSE); 489 cam_periph_invalidate(periph); 490 cam_periph_release_locked(periph); 491 } 492 493 static void 494 nvme_probe_cleanup(struct cam_periph *periph) 495 { 496 497 free(periph->softc, M_CAMXPT); 498 } 499 500 #if 0 501 /* XXX should be used, don't delete */ 502 static void 503 nvme_find_quirk(struct cam_ed *device) 504 { 505 struct nvme_quirk_entry *quirk; 506 caddr_t match; 507 508 match = cam_quirkmatch((caddr_t)&device->nvme_data, 509 (caddr_t)nvme_quirk_table, 510 nvme_quirk_table_size, 511 sizeof(*nvme_quirk_table), nvme_identify_match); 512 513 if (match == NULL) 514 panic("xpt_find_quirk: device didn't match wildcard entry!!"); 515 516 quirk = (struct nvme_quirk_entry *)match; 517 device->quirk = quirk; 518 if (quirk->quirks & CAM_QUIRK_MAXTAGS) { 519 device->mintags = quirk->mintags; 520 device->maxtags = quirk->maxtags; 521 } 522 } 523 #endif 524 525 static void 526 nvme_scan_lun(struct cam_periph *periph, struct cam_path *path, 527 cam_flags flags, union ccb *request_ccb) 528 { 529 struct ccb_pathinq cpi; 530 cam_status status; 531 struct cam_periph *old_periph; 532 int lock; 533 534 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun\n")); 535 536 xpt_path_inq(&cpi, path); 537 538 if (cpi.ccb_h.status != CAM_REQ_CMP) { 539 if (request_ccb != NULL) { 540 request_ccb->ccb_h.status = cpi.ccb_h.status; 541 xpt_done(request_ccb); 542 } 543 return; 544 } 545 546 if (xpt_path_lun_id(path) == CAM_LUN_WILDCARD) { 547 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun ignoring bus\n")); 548 request_ccb->ccb_h.status = CAM_REQ_CMP; /* XXX signal error ? */ 549 xpt_done(request_ccb); 550 return; 551 } 552 553 lock = (xpt_path_owned(path) == 0); 554 if (lock) 555 xpt_path_lock(path); 556 if ((old_periph = cam_periph_find(path, "nvme_probe")) != NULL) { 557 if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) { 558 nvme_probe_softc *softc; 559 560 softc = (nvme_probe_softc *)old_periph->softc; 561 TAILQ_INSERT_TAIL(&softc->request_ccbs, 562 &request_ccb->ccb_h, periph_links.tqe); 563 softc->restart = 1; 564 CAM_DEBUG(path, CAM_DEBUG_TRACE, 565 ("restarting nvme_probe device\n")); 566 } else { 567 request_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 568 CAM_DEBUG(path, CAM_DEBUG_TRACE, 569 ("Failing to restart nvme_probe device\n")); 570 xpt_done(request_ccb); 571 } 572 } else { 573 CAM_DEBUG(path, CAM_DEBUG_TRACE, 574 ("Adding nvme_probe device\n")); 575 status = cam_periph_alloc(nvme_probe_register, NULL, nvme_probe_cleanup, 576 nvme_probe_start, "nvme_probe", 577 CAM_PERIPH_BIO, 578 request_ccb->ccb_h.path, NULL, 0, 579 request_ccb); 580 581 if (status != CAM_REQ_CMP) { 582 xpt_print(path, "xpt_scan_lun: cam_alloc_periph " 583 "returned an error, can't continue probe\n"); 584 request_ccb->ccb_h.status = status; 585 xpt_done(request_ccb); 586 } 587 } 588 if (lock) 589 xpt_path_unlock(path); 590 } 591 592 static struct cam_ed * 593 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 594 { 595 struct nvme_quirk_entry *quirk; 596 struct cam_ed *device; 597 598 device = xpt_alloc_device(bus, target, lun_id); 599 if (device == NULL) 600 return (NULL); 601 602 /* 603 * Take the default quirk entry until we have inquiry 604 * data from nvme and can determine a better quirk to use. 605 */ 606 quirk = &nvme_quirk_table[nvme_quirk_table_size - 1]; 607 device->quirk = (void *)quirk; 608 device->mintags = 0; 609 device->maxtags = 0; 610 device->inq_flags = 0; 611 device->queue_flags = 0; 612 device->device_id = NULL; 613 device->device_id_len = 0; 614 device->serial_num = NULL; 615 device->serial_num_len = 0; 616 return (device); 617 } 618 619 static void 620 nvme_device_transport(struct cam_path *path) 621 { 622 struct ccb_pathinq cpi; 623 struct ccb_trans_settings cts; 624 /* XXX get data from nvme namespace and other info ??? */ 625 626 /* Get transport information from the SIM */ 627 xpt_path_inq(&cpi, path); 628 629 path->device->transport = cpi.transport; 630 path->device->transport_version = cpi.transport_version; 631 632 path->device->protocol = cpi.protocol; 633 path->device->protocol_version = cpi.protocol_version; 634 635 /* Tell the controller what we think */ 636 memset(&cts, 0, sizeof(cts)); 637 xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); 638 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 639 cts.type = CTS_TYPE_CURRENT_SETTINGS; 640 cts.transport = path->device->transport; 641 cts.transport_version = path->device->transport_version; 642 cts.protocol = path->device->protocol; 643 cts.protocol_version = path->device->protocol_version; 644 cts.proto_specific.valid = 0; 645 cts.xport_specific.valid = 0; 646 xpt_action((union ccb *)&cts); 647 } 648 649 static void 650 nvme_dev_advinfo(union ccb *start_ccb) 651 { 652 struct cam_ed *device; 653 struct ccb_dev_advinfo *cdai; 654 off_t amt; 655 656 xpt_path_assert(start_ccb->ccb_h.path, MA_OWNED); 657 start_ccb->ccb_h.status = CAM_REQ_INVALID; 658 device = start_ccb->ccb_h.path->device; 659 cdai = &start_ccb->cdai; 660 switch(cdai->buftype) { 661 case CDAI_TYPE_SCSI_DEVID: 662 if (cdai->flags & CDAI_FLAG_STORE) 663 return; 664 cdai->provsiz = device->device_id_len; 665 if (device->device_id_len == 0) 666 break; 667 amt = device->device_id_len; 668 if (cdai->provsiz > cdai->bufsiz) 669 amt = cdai->bufsiz; 670 memcpy(cdai->buf, device->device_id, amt); 671 break; 672 case CDAI_TYPE_SERIAL_NUM: 673 if (cdai->flags & CDAI_FLAG_STORE) 674 return; 675 cdai->provsiz = device->serial_num_len; 676 if (device->serial_num_len == 0) 677 break; 678 amt = device->serial_num_len; 679 if (cdai->provsiz > cdai->bufsiz) 680 amt = cdai->bufsiz; 681 memcpy(cdai->buf, device->serial_num, amt); 682 break; 683 case CDAI_TYPE_PHYS_PATH: 684 if (cdai->flags & CDAI_FLAG_STORE) { 685 if (device->physpath != NULL) { 686 free(device->physpath, M_CAMXPT); 687 device->physpath = NULL; 688 device->physpath_len = 0; 689 } 690 /* Clear existing buffer if zero length */ 691 if (cdai->bufsiz == 0) 692 break; 693 device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT); 694 if (device->physpath == NULL) { 695 start_ccb->ccb_h.status = CAM_REQ_ABORTED; 696 return; 697 } 698 device->physpath_len = cdai->bufsiz; 699 memcpy(device->physpath, cdai->buf, cdai->bufsiz); 700 } else { 701 cdai->provsiz = device->physpath_len; 702 if (device->physpath_len == 0) 703 break; 704 amt = device->physpath_len; 705 if (cdai->provsiz > cdai->bufsiz) 706 amt = cdai->bufsiz; 707 memcpy(cdai->buf, device->physpath, amt); 708 } 709 break; 710 case CDAI_TYPE_NVME_CNTRL: 711 if (cdai->flags & CDAI_FLAG_STORE) 712 return; 713 amt = sizeof(struct nvme_controller_data); 714 cdai->provsiz = amt; 715 if (amt > cdai->bufsiz) 716 amt = cdai->bufsiz; 717 memcpy(cdai->buf, device->nvme_cdata, amt); 718 break; 719 case CDAI_TYPE_NVME_NS: 720 if (cdai->flags & CDAI_FLAG_STORE) 721 return; 722 amt = sizeof(struct nvme_namespace_data); 723 cdai->provsiz = amt; 724 if (amt > cdai->bufsiz) 725 amt = cdai->bufsiz; 726 memcpy(cdai->buf, device->nvme_data, amt); 727 break; 728 default: 729 return; 730 } 731 start_ccb->ccb_h.status = CAM_REQ_CMP; 732 733 if (cdai->flags & CDAI_FLAG_STORE) { 734 xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path, 735 (void *)(uintptr_t)cdai->buftype); 736 } 737 } 738 739 static void 740 nvme_action(union ccb *start_ccb) 741 { 742 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, 743 ("nvme_action: func= %#x\n", start_ccb->ccb_h.func_code)); 744 745 switch (start_ccb->ccb_h.func_code) { 746 case XPT_SCAN_BUS: 747 case XPT_SCAN_TGT: 748 case XPT_SCAN_LUN: 749 nvme_scan_lun(start_ccb->ccb_h.path->periph, 750 start_ccb->ccb_h.path, start_ccb->crcn.flags, 751 start_ccb); 752 break; 753 case XPT_DEV_ADVINFO: 754 nvme_dev_advinfo(start_ccb); 755 break; 756 757 default: 758 xpt_action_default(start_ccb); 759 break; 760 } 761 } 762 763 /* 764 * Handle any per-device event notifications that require action by the XPT. 765 */ 766 static void 767 nvme_dev_async(uint32_t async_code, struct cam_eb *bus, struct cam_et *target, 768 struct cam_ed *device, void *async_arg) 769 { 770 771 /* 772 * We only need to handle events for real devices. 773 */ 774 if (target->target_id == CAM_TARGET_WILDCARD 775 || device->lun_id == CAM_LUN_WILDCARD) 776 return; 777 778 if (async_code == AC_LOST_DEVICE && 779 (device->flags & CAM_DEV_UNCONFIGURED) == 0) { 780 device->flags |= CAM_DEV_UNCONFIGURED; 781 xpt_release_device(device); 782 } 783 } 784 785 static void 786 nvme_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb) 787 { 788 struct ccb_pathinq cpi; 789 struct ccb_trans_settings cts; 790 struct cam_path *path = periph->path; 791 struct ccb_trans_settings_nvme *nvmex; 792 793 cam_periph_assert(periph, MA_OWNED); 794 795 /* Ask the SIM for connection details */ 796 memset(&cts, 0, sizeof(cts)); 797 xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL); 798 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 799 cts.type = CTS_TYPE_CURRENT_SETTINGS; 800 xpt_action((union ccb*)&cts); 801 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 802 return; 803 804 /* Ask the SIM for its base transfer speed */ 805 xpt_path_inq(&cpi, periph->path); 806 sbuf_printf(sb, "%s%d: nvme version %d.%d", 807 periph->periph_name, periph->unit_number, 808 NVME_MAJOR(cts.protocol_version), 809 NVME_MINOR(cts.protocol_version)); 810 if (cts.transport == XPORT_NVME) { 811 nvmex = &cts.proto_specific.nvme; 812 if (nvmex->valid & CTS_NVME_VALID_LINK) 813 sbuf_printf(sb, 814 " x%d (max x%d) lanes PCIe Gen%d (max Gen%d) link", 815 nvmex->lanes, nvmex->max_lanes, 816 nvmex->speed, nvmex->max_speed); 817 } 818 sbuf_putc(sb, '\n'); 819 } 820 821 static void 822 nvme_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb) 823 { 824 nvme_print_ident(device->nvme_cdata, device->nvme_data, sb); 825 } 826 827 static void 828 nvme_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb) 829 { 830 nvme_print_ident_short(device->nvme_cdata, device->nvme_data, sb); 831 } 832 833 static void 834 nvme_proto_debug_out(union ccb *ccb) 835 { 836 char cdb_str[(sizeof(struct nvme_command) * 3) + 1]; 837 838 if (ccb->ccb_h.func_code != XPT_NVME_IO && 839 ccb->ccb_h.func_code != XPT_NVME_ADMIN) 840 return; 841 842 CAM_DEBUG(ccb->ccb_h.path, 843 CAM_DEBUG_CDB,("%s. NCB: %s\n", nvme_op_string(&ccb->nvmeio.cmd, 844 ccb->ccb_h.func_code == XPT_NVME_ADMIN), 845 nvme_cmd_string(&ccb->nvmeio.cmd, cdb_str, sizeof(cdb_str)))); 846 } 847