1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2015 Netflix, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * derived from ata_xpt.c: Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org> 28 */ 29 30 #include <sys/param.h> 31 #include <sys/bus.h> 32 #include <sys/endian.h> 33 #include <sys/systm.h> 34 #include <sys/types.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <sys/time.h> 38 #include <sys/conf.h> 39 #include <sys/fcntl.h> 40 #include <sys/sbuf.h> 41 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/sysctl.h> 45 46 #include <cam/cam.h> 47 #include <cam/cam_ccb.h> 48 #include <cam/cam_queue.h> 49 #include <cam/cam_periph.h> 50 #include <cam/cam_sim.h> 51 #include <cam/cam_xpt.h> 52 #include <cam/cam_xpt_sim.h> 53 #include <cam/cam_xpt_periph.h> 54 #include <cam/cam_xpt_internal.h> 55 #include <cam/cam_debug.h> 56 57 #include <cam/scsi/scsi_all.h> 58 #include <cam/scsi/scsi_message.h> 59 #include <cam/nvme/nvme_all.h> 60 #include <machine/stdarg.h> /* for xpt_print below */ 61 62 struct nvme_quirk_entry { 63 u_int quirks; 64 #define CAM_QUIRK_MAXTAGS 1 65 u_int mintags; 66 u_int maxtags; 67 }; 68 69 /* Not even sure why we need this */ 70 static periph_init_t nvme_probe_periph_init; 71 72 static struct periph_driver nvme_probe_driver = 73 { 74 nvme_probe_periph_init, "nvme_probe", 75 TAILQ_HEAD_INITIALIZER(nvme_probe_driver.units), /* generation */ 0, 76 CAM_PERIPH_DRV_EARLY 77 }; 78 79 PERIPHDRIVER_DECLARE(nvme_probe, nvme_probe_driver); 80 81 typedef enum { 82 NVME_PROBE_IDENTIFY_CD, 83 NVME_PROBE_IDENTIFY_NS, 84 NVME_PROBE_DONE, 85 NVME_PROBE_INVALID 86 } nvme_probe_action; 87 88 static char *nvme_probe_action_text[] = { 89 "NVME_PROBE_IDENTIFY_CD", 90 "NVME_PROBE_IDENTIFY_NS", 91 "NVME_PROBE_DONE", 92 "NVME_PROBE_INVALID" 93 }; 94 95 #define NVME_PROBE_SET_ACTION(softc, newaction) \ 96 do { \ 97 char **text; \ 98 text = nvme_probe_action_text; \ 99 CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE, \ 100 ("Probe %s to %s\n", text[(softc)->action], \ 101 text[(newaction)])); \ 102 (softc)->action = (newaction); \ 103 } while(0) 104 105 typedef enum { 106 NVME_PROBE_NO_ANNOUNCE = 0x04 107 } nvme_probe_flags; 108 109 typedef struct { 110 TAILQ_HEAD(, ccb_hdr) request_ccbs; 111 union { 112 struct nvme_controller_data cd; 113 struct nvme_namespace_data ns; 114 }; 115 nvme_probe_action action; 116 nvme_probe_flags flags; 117 int restart; 118 struct cam_periph *periph; 119 } nvme_probe_softc; 120 121 static struct nvme_quirk_entry nvme_quirk_table[] = 122 { 123 { 124 // { 125 // T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 126 // /*vendor*/"*", /*product*/"*", /*revision*/"*" 127 // }, 128 .quirks = 0, .mintags = 0, .maxtags = 0 129 }, 130 }; 131 132 static const int nvme_quirk_table_size = 133 sizeof(nvme_quirk_table) / sizeof(*nvme_quirk_table); 134 135 static cam_status nvme_probe_register(struct cam_periph *periph, 136 void *arg); 137 static void nvme_probe_schedule(struct cam_periph *nvme_probe_periph); 138 static void nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb); 139 static void nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb); 140 static void nvme_probe_cleanup(struct cam_periph *periph); 141 //static void nvme_find_quirk(struct cam_ed *device); 142 static void nvme_scan_lun(struct cam_periph *periph, 143 struct cam_path *path, cam_flags flags, 144 union ccb *ccb); 145 static struct cam_ed * 146 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, 147 lun_id_t lun_id); 148 static void nvme_device_transport(struct cam_path *path); 149 static void nvme_dev_async(uint32_t async_code, 150 struct cam_eb *bus, 151 struct cam_et *target, 152 struct cam_ed *device, 153 void *async_arg); 154 static void nvme_action(union ccb *start_ccb); 155 static void nvme_announce_periph_sbuf(struct cam_periph *periph, 156 struct sbuf *sb); 157 static void nvme_proto_announce_sbuf(struct cam_ed *device, 158 struct sbuf *sb); 159 static void nvme_proto_denounce_sbuf(struct cam_ed *device, 160 struct sbuf *sb); 161 static void nvme_proto_debug_out(union ccb *ccb); 162 163 static struct xpt_xport_ops nvme_xport_ops = { 164 .alloc_device = nvme_alloc_device, 165 .action = nvme_action, 166 .async = nvme_dev_async, 167 .announce_sbuf = nvme_announce_periph_sbuf, 168 }; 169 #define NVME_XPT_XPORT(x, X) \ 170 static struct xpt_xport nvme_xport_ ## x = { \ 171 .xport = XPORT_ ## X, \ 172 .name = #x, \ 173 .ops = &nvme_xport_ops, \ 174 }; \ 175 CAM_XPT_XPORT(nvme_xport_ ## x); 176 177 NVME_XPT_XPORT(nvme, NVME); 178 179 #undef NVME_XPT_XPORT 180 181 static struct xpt_proto_ops nvme_proto_ops = { 182 .announce_sbuf = nvme_proto_announce_sbuf, 183 .denounce_sbuf = nvme_proto_denounce_sbuf, 184 .debug_out = nvme_proto_debug_out, 185 }; 186 static struct xpt_proto nvme_proto = { 187 .proto = PROTO_NVME, 188 .name = "nvme", 189 .ops = &nvme_proto_ops, 190 }; 191 CAM_XPT_PROTO(nvme_proto); 192 193 static void 194 nvme_probe_periph_init(void) 195 { 196 } 197 198 static cam_status 199 nvme_probe_register(struct cam_periph *periph, void *arg) 200 { 201 union ccb *request_ccb; /* CCB representing the probe request */ 202 nvme_probe_softc *softc; 203 204 request_ccb = (union ccb *)arg; 205 if (request_ccb == NULL) { 206 printf("nvme_probe_register: no probe CCB, " 207 "can't register device\n"); 208 return(CAM_REQ_CMP_ERR); 209 } 210 211 softc = (nvme_probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_ZERO | M_NOWAIT); 212 213 if (softc == NULL) { 214 printf("nvme_probe_register: Unable to probe new device. " 215 "Unable to allocate softc\n"); 216 return(CAM_REQ_CMP_ERR); 217 } 218 TAILQ_INIT(&softc->request_ccbs); 219 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 220 periph_links.tqe); 221 softc->flags = 0; 222 periph->softc = softc; 223 softc->periph = periph; 224 softc->action = NVME_PROBE_INVALID; 225 if (cam_periph_acquire(periph) != 0) 226 return (CAM_REQ_CMP_ERR); 227 228 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n")); 229 230 // nvme_device_transport(periph->path); 231 nvme_probe_schedule(periph); 232 233 return(CAM_REQ_CMP); 234 } 235 236 static void 237 nvme_probe_schedule(struct cam_periph *periph) 238 { 239 union ccb *ccb; 240 nvme_probe_softc *softc; 241 242 softc = (nvme_probe_softc *)periph->softc; 243 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 244 245 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD); 246 247 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) 248 softc->flags |= NVME_PROBE_NO_ANNOUNCE; 249 else 250 softc->flags &= ~NVME_PROBE_NO_ANNOUNCE; 251 252 xpt_schedule(periph, CAM_PRIORITY_XPT); 253 } 254 255 static void 256 nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb) 257 { 258 struct ccb_nvmeio *nvmeio; 259 nvme_probe_softc *softc; 260 lun_id_t lun; 261 262 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_start\n")); 263 264 softc = (nvme_probe_softc *)periph->softc; 265 nvmeio = &start_ccb->nvmeio; 266 lun = xpt_path_lun_id(periph->path); 267 268 if (softc->restart) { 269 softc->restart = 0; 270 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD); 271 } 272 273 switch (softc->action) { 274 case NVME_PROBE_IDENTIFY_CD: 275 cam_fill_nvmeadmin(nvmeio, 276 0, /* retries */ 277 nvme_probe_done, /* cbfcnp */ 278 CAM_DIR_IN, /* flags */ 279 (uint8_t *)&softc->cd, /* data_ptr */ 280 sizeof(softc->cd), /* dxfer_len */ 281 30 * 1000); /* timeout 30s */ 282 nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, 0, 283 1, 0, 0, 0, 0, 0); 284 break; 285 case NVME_PROBE_IDENTIFY_NS: 286 cam_fill_nvmeadmin(nvmeio, 287 0, /* retries */ 288 nvme_probe_done, /* cbfcnp */ 289 CAM_DIR_IN, /* flags */ 290 (uint8_t *)&softc->ns, /* data_ptr */ 291 sizeof(softc->ns), /* dxfer_len */ 292 30 * 1000); /* timeout 30s */ 293 nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, lun, 294 0, 0, 0, 0, 0, 0); 295 break; 296 default: 297 panic("nvme_probe_start: invalid action state 0x%x\n", softc->action); 298 } 299 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 300 xpt_action(start_ccb); 301 } 302 303 static void 304 nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb) 305 { 306 struct nvme_namespace_data *nvme_data; 307 struct nvme_controller_data *nvme_cdata; 308 nvme_probe_softc *softc; 309 struct cam_path *path; 310 struct scsi_vpd_device_id *did; 311 struct scsi_vpd_id_descriptor *idd; 312 uint32_t priority; 313 int found = 1, e, g, len; 314 315 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_done\n")); 316 317 softc = (nvme_probe_softc *)periph->softc; 318 path = done_ccb->ccb_h.path; 319 priority = done_ccb->ccb_h.pinfo.priority; 320 321 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 322 if (cam_periph_error(done_ccb, 323 0, softc->restart ? (SF_NO_RECOVERY | SF_NO_RETRY) : 0 324 ) == ERESTART) { 325 out: 326 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ 327 cam_release_devq(path, 0, 0, 0, FALSE); 328 return; 329 } 330 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 331 /* Don't wedge the queue */ 332 xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); 333 } 334 335 /* 336 * If we get to this point, we got an error status back 337 * from the inquiry and the error status doesn't require 338 * automatically retrying the command. Therefore, the 339 * inquiry failed. If we had inquiry information before 340 * for this device, but this latest inquiry command failed, 341 * the device has probably gone away. If this device isn't 342 * already marked unconfigured, notify the peripheral 343 * drivers that this device is no more. 344 */ 345 device_fail: if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 346 xpt_async(AC_LOST_DEVICE, path, NULL); 347 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_INVALID); 348 found = 0; 349 goto done; 350 } 351 if (softc->restart) 352 goto done; 353 switch (softc->action) { 354 case NVME_PROBE_IDENTIFY_CD: 355 nvme_controller_data_swapbytes(&softc->cd); 356 357 nvme_cdata = path->device->nvme_cdata; 358 if (nvme_cdata == NULL) { 359 nvme_cdata = malloc(sizeof(*nvme_cdata), M_CAMXPT, 360 M_NOWAIT); 361 if (nvme_cdata == NULL) { 362 xpt_print(path, "Can't allocate memory"); 363 goto device_fail; 364 } 365 } 366 bcopy(&softc->cd, nvme_cdata, sizeof(*nvme_cdata)); 367 path->device->nvme_cdata = nvme_cdata; 368 369 /* Save/update serial number. */ 370 if (path->device->serial_num != NULL) { 371 free(path->device->serial_num, M_CAMXPT); 372 path->device->serial_num = NULL; 373 path->device->serial_num_len = 0; 374 } 375 path->device->serial_num = (uint8_t *) 376 malloc(NVME_SERIAL_NUMBER_LENGTH + 1, M_CAMXPT, M_NOWAIT); 377 if (path->device->serial_num != NULL) { 378 cam_strvis_flag(path->device->serial_num, 379 nvme_cdata->sn, sizeof(nvme_cdata->sn), 380 NVME_SERIAL_NUMBER_LENGTH + 1, 381 CAM_STRVIS_FLAG_NONASCII_SPC); 382 383 path->device->serial_num_len = 384 strlen(path->device->serial_num); 385 } 386 387 // nvme_find_quirk(path->device); 388 nvme_device_transport(path); 389 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_NS); 390 xpt_release_ccb(done_ccb); 391 xpt_schedule(periph, priority); 392 goto out; 393 case NVME_PROBE_IDENTIFY_NS: 394 nvme_namespace_data_swapbytes(&softc->ns); 395 396 /* Check that the namespace exists. */ 397 if (softc->ns.nsze == 0) 398 goto device_fail; 399 400 nvme_data = path->device->nvme_data; 401 if (nvme_data == NULL) { 402 nvme_data = malloc(sizeof(*nvme_data), M_CAMXPT, 403 M_NOWAIT); 404 if (nvme_data == NULL) { 405 xpt_print(path, "Can't allocate memory"); 406 goto device_fail; 407 } 408 } 409 bcopy(&softc->ns, nvme_data, sizeof(*nvme_data)); 410 path->device->nvme_data = nvme_data; 411 412 /* Save/update device_id based on NGUID and/or EUI64. */ 413 if (path->device->device_id != NULL) { 414 free(path->device->device_id, M_CAMXPT); 415 path->device->device_id = NULL; 416 path->device->device_id_len = 0; 417 } 418 len = 0; 419 for (g = 0; g < sizeof(nvme_data->nguid); g++) { 420 if (nvme_data->nguid[g] != 0) 421 break; 422 } 423 if (g < sizeof(nvme_data->nguid)) 424 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 425 for (e = 0; e < sizeof(nvme_data->eui64); e++) { 426 if (nvme_data->eui64[e] != 0) 427 break; 428 } 429 if (e < sizeof(nvme_data->eui64)) 430 len += sizeof(struct scsi_vpd_id_descriptor) + 8; 431 if (len > 0) { 432 path->device->device_id = (uint8_t *) 433 malloc(SVPD_DEVICE_ID_HDR_LEN + len, 434 M_CAMXPT, M_NOWAIT); 435 } 436 if (path->device->device_id != NULL) { 437 did = (struct scsi_vpd_device_id *)path->device->device_id; 438 did->device = SID_QUAL_LU_CONNECTED | T_DIRECT; 439 did->page_code = SVPD_DEVICE_ID; 440 scsi_ulto2b(len, did->length); 441 idd = (struct scsi_vpd_id_descriptor *)(did + 1); 442 if (g < sizeof(nvme_data->nguid)) { 443 idd->proto_codeset = SVPD_ID_CODESET_BINARY; 444 idd->id_type = SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64; 445 idd->length = 16; 446 bcopy(nvme_data->nguid, idd->identifier, 16); 447 idd = (struct scsi_vpd_id_descriptor *) 448 &idd->identifier[16]; 449 } 450 if (e < sizeof(nvme_data->eui64)) { 451 idd->proto_codeset = SVPD_ID_CODESET_BINARY; 452 idd->id_type = SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64; 453 idd->length = 8; 454 bcopy(nvme_data->eui64, idd->identifier, 8); 455 } 456 path->device->device_id_len = SVPD_DEVICE_ID_HDR_LEN + len; 457 } 458 459 if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) { 460 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 461 xpt_acquire_device(path->device); 462 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 463 xpt_action(done_ccb); 464 xpt_async(AC_FOUND_DEVICE, path, done_ccb); 465 } 466 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_DONE); 467 break; 468 default: 469 panic("nvme_probe_done: invalid action state 0x%x\n", softc->action); 470 } 471 done: 472 if (softc->restart) { 473 softc->restart = 0; 474 xpt_release_ccb(done_ccb); 475 nvme_probe_schedule(periph); 476 goto out; 477 } 478 xpt_release_ccb(done_ccb); 479 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n")); 480 while ((done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs))) { 481 TAILQ_REMOVE(&softc->request_ccbs, 482 &done_ccb->ccb_h, periph_links.tqe); 483 done_ccb->ccb_h.status = found ? CAM_REQ_CMP : CAM_REQ_CMP_ERR; 484 xpt_done(done_ccb); 485 } 486 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ 487 cam_release_devq(path, 0, 0, 0, FALSE); 488 cam_periph_invalidate(periph); 489 cam_periph_release_locked(periph); 490 } 491 492 static void 493 nvme_probe_cleanup(struct cam_periph *periph) 494 { 495 496 free(periph->softc, M_CAMXPT); 497 } 498 499 #if 0 500 /* XXX should be used, don't delete */ 501 static void 502 nvme_find_quirk(struct cam_ed *device) 503 { 504 struct nvme_quirk_entry *quirk; 505 caddr_t match; 506 507 match = cam_quirkmatch((caddr_t)&device->nvme_data, 508 (caddr_t)nvme_quirk_table, 509 nvme_quirk_table_size, 510 sizeof(*nvme_quirk_table), nvme_identify_match); 511 512 if (match == NULL) 513 panic("xpt_find_quirk: device didn't match wildcard entry!!"); 514 515 quirk = (struct nvme_quirk_entry *)match; 516 device->quirk = quirk; 517 if (quirk->quirks & CAM_QUIRK_MAXTAGS) { 518 device->mintags = quirk->mintags; 519 device->maxtags = quirk->maxtags; 520 } 521 } 522 #endif 523 524 static void 525 nvme_scan_lun(struct cam_periph *periph, struct cam_path *path, 526 cam_flags flags, union ccb *request_ccb) 527 { 528 struct ccb_pathinq cpi; 529 cam_status status; 530 struct cam_periph *old_periph; 531 int lock; 532 533 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun\n")); 534 535 xpt_path_inq(&cpi, path); 536 537 if (cpi.ccb_h.status != CAM_REQ_CMP) { 538 if (request_ccb != NULL) { 539 request_ccb->ccb_h.status = cpi.ccb_h.status; 540 xpt_done(request_ccb); 541 } 542 return; 543 } 544 545 if (xpt_path_lun_id(path) == CAM_LUN_WILDCARD) { 546 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun ignoring bus\n")); 547 request_ccb->ccb_h.status = CAM_REQ_CMP; /* XXX signal error ? */ 548 xpt_done(request_ccb); 549 return; 550 } 551 552 lock = (xpt_path_owned(path) == 0); 553 if (lock) 554 xpt_path_lock(path); 555 if ((old_periph = cam_periph_find(path, "nvme_probe")) != NULL) { 556 if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) { 557 nvme_probe_softc *softc; 558 559 softc = (nvme_probe_softc *)old_periph->softc; 560 TAILQ_INSERT_TAIL(&softc->request_ccbs, 561 &request_ccb->ccb_h, periph_links.tqe); 562 softc->restart = 1; 563 CAM_DEBUG(path, CAM_DEBUG_TRACE, 564 ("restarting nvme_probe device\n")); 565 } else { 566 request_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 567 CAM_DEBUG(path, CAM_DEBUG_TRACE, 568 ("Failing to restart nvme_probe device\n")); 569 xpt_done(request_ccb); 570 } 571 } else { 572 CAM_DEBUG(path, CAM_DEBUG_TRACE, 573 ("Adding nvme_probe device\n")); 574 status = cam_periph_alloc(nvme_probe_register, NULL, nvme_probe_cleanup, 575 nvme_probe_start, "nvme_probe", 576 CAM_PERIPH_BIO, 577 request_ccb->ccb_h.path, NULL, 0, 578 request_ccb); 579 580 if (status != CAM_REQ_CMP) { 581 xpt_print(path, "xpt_scan_lun: cam_alloc_periph " 582 "returned an error, can't continue probe\n"); 583 request_ccb->ccb_h.status = status; 584 xpt_done(request_ccb); 585 } 586 } 587 if (lock) 588 xpt_path_unlock(path); 589 } 590 591 static struct cam_ed * 592 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 593 { 594 struct nvme_quirk_entry *quirk; 595 struct cam_ed *device; 596 597 device = xpt_alloc_device(bus, target, lun_id); 598 if (device == NULL) 599 return (NULL); 600 601 /* 602 * Take the default quirk entry until we have inquiry 603 * data from nvme and can determine a better quirk to use. 604 */ 605 quirk = &nvme_quirk_table[nvme_quirk_table_size - 1]; 606 device->quirk = (void *)quirk; 607 device->mintags = 0; 608 device->maxtags = 0; 609 device->inq_flags = 0; 610 device->queue_flags = 0; 611 device->device_id = NULL; 612 device->device_id_len = 0; 613 device->serial_num = NULL; 614 device->serial_num_len = 0; 615 return (device); 616 } 617 618 static void 619 nvme_device_transport(struct cam_path *path) 620 { 621 struct ccb_pathinq cpi; 622 struct ccb_trans_settings cts; 623 /* XXX get data from nvme namespace and other info ??? */ 624 625 /* Get transport information from the SIM */ 626 xpt_path_inq(&cpi, path); 627 628 path->device->transport = cpi.transport; 629 path->device->transport_version = cpi.transport_version; 630 631 path->device->protocol = cpi.protocol; 632 path->device->protocol_version = cpi.protocol_version; 633 634 /* Tell the controller what we think */ 635 memset(&cts, 0, sizeof(cts)); 636 xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); 637 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 638 cts.type = CTS_TYPE_CURRENT_SETTINGS; 639 cts.transport = path->device->transport; 640 cts.transport_version = path->device->transport_version; 641 cts.protocol = path->device->protocol; 642 cts.protocol_version = path->device->protocol_version; 643 cts.proto_specific.valid = 0; 644 cts.xport_specific.valid = 0; 645 xpt_action((union ccb *)&cts); 646 } 647 648 static void 649 nvme_dev_advinfo(union ccb *start_ccb) 650 { 651 struct cam_ed *device; 652 struct ccb_dev_advinfo *cdai; 653 off_t amt; 654 655 xpt_path_assert(start_ccb->ccb_h.path, MA_OWNED); 656 start_ccb->ccb_h.status = CAM_REQ_INVALID; 657 device = start_ccb->ccb_h.path->device; 658 cdai = &start_ccb->cdai; 659 switch(cdai->buftype) { 660 case CDAI_TYPE_SCSI_DEVID: 661 if (cdai->flags & CDAI_FLAG_STORE) 662 return; 663 cdai->provsiz = device->device_id_len; 664 if (device->device_id_len == 0) 665 break; 666 amt = device->device_id_len; 667 if (cdai->provsiz > cdai->bufsiz) 668 amt = cdai->bufsiz; 669 memcpy(cdai->buf, device->device_id, amt); 670 break; 671 case CDAI_TYPE_SERIAL_NUM: 672 if (cdai->flags & CDAI_FLAG_STORE) 673 return; 674 cdai->provsiz = device->serial_num_len; 675 if (device->serial_num_len == 0) 676 break; 677 amt = device->serial_num_len; 678 if (cdai->provsiz > cdai->bufsiz) 679 amt = cdai->bufsiz; 680 memcpy(cdai->buf, device->serial_num, amt); 681 break; 682 case CDAI_TYPE_PHYS_PATH: 683 if (cdai->flags & CDAI_FLAG_STORE) { 684 if (device->physpath != NULL) { 685 free(device->physpath, M_CAMXPT); 686 device->physpath = NULL; 687 device->physpath_len = 0; 688 } 689 /* Clear existing buffer if zero length */ 690 if (cdai->bufsiz == 0) 691 break; 692 device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT); 693 if (device->physpath == NULL) { 694 start_ccb->ccb_h.status = CAM_REQ_ABORTED; 695 return; 696 } 697 device->physpath_len = cdai->bufsiz; 698 memcpy(device->physpath, cdai->buf, cdai->bufsiz); 699 } else { 700 cdai->provsiz = device->physpath_len; 701 if (device->physpath_len == 0) 702 break; 703 amt = device->physpath_len; 704 if (cdai->provsiz > cdai->bufsiz) 705 amt = cdai->bufsiz; 706 memcpy(cdai->buf, device->physpath, amt); 707 } 708 break; 709 case CDAI_TYPE_NVME_CNTRL: 710 if (cdai->flags & CDAI_FLAG_STORE) 711 return; 712 amt = sizeof(struct nvme_controller_data); 713 cdai->provsiz = amt; 714 if (amt > cdai->bufsiz) 715 amt = cdai->bufsiz; 716 memcpy(cdai->buf, device->nvme_cdata, amt); 717 break; 718 case CDAI_TYPE_NVME_NS: 719 if (cdai->flags & CDAI_FLAG_STORE) 720 return; 721 amt = sizeof(struct nvme_namespace_data); 722 cdai->provsiz = amt; 723 if (amt > cdai->bufsiz) 724 amt = cdai->bufsiz; 725 memcpy(cdai->buf, device->nvme_data, amt); 726 break; 727 default: 728 return; 729 } 730 start_ccb->ccb_h.status = CAM_REQ_CMP; 731 732 if (cdai->flags & CDAI_FLAG_STORE) { 733 xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path, 734 (void *)(uintptr_t)cdai->buftype); 735 } 736 } 737 738 static void 739 nvme_action(union ccb *start_ccb) 740 { 741 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, 742 ("nvme_action: func= %#x\n", start_ccb->ccb_h.func_code)); 743 744 switch (start_ccb->ccb_h.func_code) { 745 case XPT_SCAN_BUS: 746 case XPT_SCAN_TGT: 747 case XPT_SCAN_LUN: 748 nvme_scan_lun(start_ccb->ccb_h.path->periph, 749 start_ccb->ccb_h.path, start_ccb->crcn.flags, 750 start_ccb); 751 break; 752 case XPT_DEV_ADVINFO: 753 nvme_dev_advinfo(start_ccb); 754 break; 755 756 default: 757 xpt_action_default(start_ccb); 758 break; 759 } 760 } 761 762 /* 763 * Handle any per-device event notifications that require action by the XPT. 764 */ 765 static void 766 nvme_dev_async(uint32_t async_code, struct cam_eb *bus, struct cam_et *target, 767 struct cam_ed *device, void *async_arg) 768 { 769 770 /* 771 * We only need to handle events for real devices. 772 */ 773 if (target->target_id == CAM_TARGET_WILDCARD 774 || device->lun_id == CAM_LUN_WILDCARD) 775 return; 776 777 if (async_code == AC_LOST_DEVICE && 778 (device->flags & CAM_DEV_UNCONFIGURED) == 0) { 779 device->flags |= CAM_DEV_UNCONFIGURED; 780 xpt_release_device(device); 781 } 782 } 783 784 static void 785 nvme_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb) 786 { 787 struct ccb_pathinq cpi; 788 struct ccb_trans_settings cts; 789 struct cam_path *path = periph->path; 790 struct ccb_trans_settings_nvme *nvmex; 791 792 cam_periph_assert(periph, MA_OWNED); 793 794 /* Ask the SIM for connection details */ 795 memset(&cts, 0, sizeof(cts)); 796 xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL); 797 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 798 cts.type = CTS_TYPE_CURRENT_SETTINGS; 799 xpt_action((union ccb*)&cts); 800 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 801 return; 802 803 /* Ask the SIM for its base transfer speed */ 804 xpt_path_inq(&cpi, periph->path); 805 sbuf_printf(sb, "%s%d: nvme version %d.%d", 806 periph->periph_name, periph->unit_number, 807 NVME_MAJOR(cts.protocol_version), 808 NVME_MINOR(cts.protocol_version)); 809 if (cts.transport == XPORT_NVME) { 810 nvmex = &cts.proto_specific.nvme; 811 if (nvmex->valid & CTS_NVME_VALID_LINK) 812 sbuf_printf(sb, 813 " x%d (max x%d) lanes PCIe Gen%d (max Gen%d) link", 814 nvmex->lanes, nvmex->max_lanes, 815 nvmex->speed, nvmex->max_speed); 816 } 817 sbuf_putc(sb, '\n'); 818 } 819 820 static void 821 nvme_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb) 822 { 823 nvme_print_ident(device->nvme_cdata, device->nvme_data, sb); 824 } 825 826 static void 827 nvme_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb) 828 { 829 nvme_print_ident_short(device->nvme_cdata, device->nvme_data, sb); 830 } 831 832 static void 833 nvme_proto_debug_out(union ccb *ccb) 834 { 835 char cdb_str[(sizeof(struct nvme_command) * 3) + 1]; 836 837 if (ccb->ccb_h.func_code != XPT_NVME_IO && 838 ccb->ccb_h.func_code != XPT_NVME_ADMIN) 839 return; 840 841 CAM_DEBUG(ccb->ccb_h.path, 842 CAM_DEBUG_CDB,("%s. NCB: %s\n", nvme_op_string(&ccb->nvmeio.cmd, 843 ccb->ccb_h.func_code == XPT_NVME_ADMIN), 844 nvme_cmd_string(&ccb->nvmeio.cmd, cdb_str, sizeof(cdb_str)))); 845 } 846