1 /*- 2 * Copyright (c) 2016 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer, 9 * without modification, immediately at the beginning of the file. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 */ 25 26 #include <sys/param.h> 27 #include <sys/systm.h> 28 #include <sys/buf.h> 29 #include <sys/bus.h> 30 #include <sys/conf.h> 31 #include <sys/ioccom.h> 32 #include <sys/malloc.h> 33 #include <sys/proc.h> 34 #include <sys/smp.h> 35 36 #include <cam/cam.h> 37 #include <cam/cam_ccb.h> 38 #include <cam/cam_sim.h> 39 #include <cam/cam_xpt_sim.h> 40 #include <cam/cam_debug.h> 41 42 #include <dev/pci/pcivar.h> 43 #include <dev/pci/pcireg.h> 44 45 #include "nvme_private.h" 46 47 #define ccb_accb_ptr spriv_ptr0 48 #define ccb_ctrlr_ptr spriv_ptr1 49 static void nvme_sim_action(struct cam_sim *sim, union ccb *ccb); 50 static void nvme_sim_poll(struct cam_sim *sim); 51 52 #define sim2softc(sim) ((struct nvme_sim_softc *)cam_sim_softc(sim)) 53 #define sim2ctrlr(sim) (sim2softc(sim)->s_ctrlr) 54 55 struct nvme_sim_softc 56 { 57 struct nvme_controller *s_ctrlr; 58 struct cam_sim *s_sim; 59 struct cam_path *s_path; 60 }; 61 62 static void 63 nvme_sim_nvmeio_done(void *ccb_arg, const struct nvme_completion *cpl) 64 { 65 union ccb *ccb = (union ccb *)ccb_arg; 66 67 /* 68 * Let the periph know the completion, and let it sort out what 69 * it means. Report an error or success based on SC and SCT. 70 * We do not try to fetch additional data from the error log, 71 * though maybe we should in the future. 72 */ 73 memcpy(&ccb->nvmeio.cpl, cpl, sizeof(*cpl)); 74 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 75 if (nvme_completion_is_error(cpl)) { 76 ccb->ccb_h.status = CAM_NVME_STATUS_ERROR; 77 xpt_done(ccb); 78 } else { 79 ccb->ccb_h.status = CAM_REQ_CMP; 80 xpt_done_direct(ccb); 81 } 82 } 83 84 static void 85 nvme_sim_nvmeio(struct cam_sim *sim, union ccb *ccb) 86 { 87 struct ccb_nvmeio *nvmeio = &ccb->nvmeio; 88 struct nvme_request *req; 89 void *payload; 90 uint32_t size; 91 struct nvme_controller *ctrlr; 92 93 ctrlr = sim2ctrlr(sim); 94 payload = nvmeio->data_ptr; 95 size = nvmeio->dxfer_len; 96 /* SG LIST ??? */ 97 if ((nvmeio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO) 98 req = nvme_allocate_request_bio((struct bio *)payload, 99 M_NOWAIT, nvme_sim_nvmeio_done, ccb); 100 else if ((nvmeio->ccb_h.flags & CAM_DATA_SG) == CAM_DATA_SG) 101 req = nvme_allocate_request_ccb(ccb, M_NOWAIT, 102 nvme_sim_nvmeio_done, ccb); 103 else if (payload == NULL) 104 req = nvme_allocate_request_null(M_NOWAIT, nvme_sim_nvmeio_done, 105 ccb); 106 else 107 req = nvme_allocate_request_vaddr(payload, size, M_NOWAIT, 108 nvme_sim_nvmeio_done, ccb); 109 if (req == NULL) { 110 nvmeio->ccb_h.status = CAM_RESRC_UNAVAIL; 111 xpt_done(ccb); 112 return; 113 } 114 ccb->ccb_h.status |= CAM_SIM_QUEUED; 115 116 memcpy(&req->cmd, &ccb->nvmeio.cmd, sizeof(ccb->nvmeio.cmd)); 117 118 if (ccb->ccb_h.func_code == XPT_NVME_IO) 119 nvme_ctrlr_submit_io_request(ctrlr, req); 120 else 121 nvme_ctrlr_submit_admin_request(ctrlr, req); 122 } 123 124 static uint32_t 125 nvme_link_kBps(struct nvme_controller *ctrlr) 126 { 127 uint32_t speed, lanes, link[] = { 1, 250000, 500000, 985000, 1970000 }; 128 uint32_t status; 129 130 status = pcie_read_config(ctrlr->dev, PCIER_LINK_STA, 2); 131 speed = status & PCIEM_LINK_STA_SPEED; 132 lanes = (status & PCIEM_LINK_STA_WIDTH) >> 4; 133 /* 134 * Failsafe on link speed indicator. If it is insane report the number of 135 * lanes as the speed. Not 100% accurate, but may be diagnostic. 136 */ 137 if (speed >= nitems(link)) 138 speed = 0; 139 return link[speed] * lanes; 140 } 141 142 static void 143 nvme_sim_action(struct cam_sim *sim, union ccb *ccb) 144 { 145 struct nvme_controller *ctrlr; 146 147 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 148 ("nvme_sim_action: func= %#x\n", 149 ccb->ccb_h.func_code)); 150 151 ctrlr = sim2ctrlr(sim); 152 153 switch (ccb->ccb_h.func_code) { 154 case XPT_CALC_GEOMETRY: /* Calculate Geometry Totally nuts ? XXX */ 155 /* 156 * Only meaningful for old-school SCSI disks since only the SCSI 157 * da driver generates them. Reject all these that slip through. 158 */ 159 /*FALLTHROUGH*/ 160 case XPT_ABORT: /* Abort the specified CCB */ 161 ccb->ccb_h.status = CAM_REQ_INVALID; 162 break; 163 case XPT_SET_TRAN_SETTINGS: 164 /* 165 * NVMe doesn't really have different transfer settings, but 166 * other parts of CAM think failure here is a big deal. 167 */ 168 ccb->ccb_h.status = CAM_REQ_CMP; 169 break; 170 case XPT_PATH_INQ: /* Path routing inquiry */ 171 { 172 struct ccb_pathinq *cpi = &ccb->cpi; 173 device_t dev = ctrlr->dev; 174 175 /* 176 * For devices that are reported as children of the AHCI 177 * controller, which has no access to the config space for this 178 * controller, report the AHCI controller's data. 179 */ 180 if (ctrlr->quirks & QUIRK_AHCI) 181 dev = device_get_parent(dev); 182 cpi->version_num = 1; 183 cpi->hba_inquiry = 0; 184 cpi->target_sprt = 0; 185 cpi->hba_misc = PIM_UNMAPPED | PIM_NOSCAN; 186 cpi->hba_eng_cnt = 0; 187 cpi->max_target = 0; 188 cpi->max_lun = ctrlr->cdata.nn; 189 cpi->maxio = ctrlr->max_xfer_size; 190 cpi->initiator_id = 0; 191 cpi->bus_id = cam_sim_bus(sim); 192 cpi->base_transfer_speed = nvme_link_kBps(ctrlr); 193 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 194 strlcpy(cpi->hba_vid, "NVMe", HBA_IDLEN); 195 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 196 cpi->unit_number = cam_sim_unit(sim); 197 cpi->transport = XPORT_NVME; /* XXX XPORT_PCIE ? */ 198 cpi->transport_version = nvme_mmio_read_4(ctrlr, vs); 199 cpi->protocol = PROTO_NVME; 200 cpi->protocol_version = nvme_mmio_read_4(ctrlr, vs); 201 cpi->xport_specific.nvme.nsid = xpt_path_lun_id(ccb->ccb_h.path); 202 cpi->xport_specific.nvme.domain = pci_get_domain(dev); 203 cpi->xport_specific.nvme.bus = pci_get_bus(dev); 204 cpi->xport_specific.nvme.slot = pci_get_slot(dev); 205 cpi->xport_specific.nvme.function = pci_get_function(dev); 206 cpi->xport_specific.nvme.extra = 0; 207 strlcpy(cpi->xport_specific.nvme.dev_name, device_get_nameunit(dev), 208 sizeof(cpi->xport_specific.nvme.dev_name)); 209 cpi->hba_vendor = pci_get_vendor(dev); 210 cpi->hba_device = pci_get_device(dev); 211 cpi->hba_subvendor = pci_get_subvendor(dev); 212 cpi->hba_subdevice = pci_get_subdevice(dev); 213 cpi->ccb_h.status = CAM_REQ_CMP; 214 break; 215 } 216 case XPT_GET_TRAN_SETTINGS: /* Get transport settings */ 217 { 218 struct ccb_trans_settings *cts; 219 struct ccb_trans_settings_nvme *nvmep; 220 struct ccb_trans_settings_nvme *nvmex; 221 device_t dev; 222 uint32_t status, caps, flags; 223 224 dev = ctrlr->dev; 225 cts = &ccb->cts; 226 nvmex = &cts->xport_specific.nvme; 227 nvmep = &cts->proto_specific.nvme; 228 229 nvmex->spec = nvme_mmio_read_4(ctrlr, vs); 230 nvmex->valid = CTS_NVME_VALID_SPEC; 231 if ((ctrlr->quirks & QUIRK_AHCI) == 0) { 232 /* AHCI redirect makes it impossible to query */ 233 status = pcie_read_config(dev, PCIER_LINK_STA, 2); 234 caps = pcie_read_config(dev, PCIER_LINK_CAP, 2); 235 flags = pcie_read_config(dev, PCIER_FLAGS, 2); 236 if ((flags & PCIEM_FLAGS_TYPE) == PCIEM_TYPE_ENDPOINT) { 237 nvmex->valid |= CTS_NVME_VALID_LINK; 238 nvmex->speed = status & PCIEM_LINK_STA_SPEED; 239 nvmex->lanes = (status & PCIEM_LINK_STA_WIDTH) >> 4; 240 nvmex->max_speed = caps & PCIEM_LINK_CAP_MAX_SPEED; 241 nvmex->max_lanes = (caps & PCIEM_LINK_CAP_MAX_WIDTH) >> 4; 242 } 243 } 244 245 /* XXX these should be something else maybe ? */ 246 nvmep->valid = CTS_NVME_VALID_SPEC; 247 nvmep->spec = nvmex->spec; 248 249 cts->transport = XPORT_NVME; 250 cts->transport_version = nvmex->spec; 251 cts->protocol = PROTO_NVME; 252 cts->protocol_version = nvmex->spec; 253 cts->ccb_h.status = CAM_REQ_CMP; 254 break; 255 } 256 case XPT_TERM_IO: /* Terminate the I/O process */ 257 /* 258 * every driver handles this, but nothing generates it. Assume 259 * it's OK to just say 'that worked'. 260 */ 261 /*FALLTHROUGH*/ 262 case XPT_RESET_DEV: /* Bus Device Reset the specified device */ 263 case XPT_RESET_BUS: /* Reset the specified bus */ 264 /* 265 * NVMe doesn't really support physically resetting the bus. It's part 266 * of the bus scanning dance, so return sucess to tell the process to 267 * proceed. 268 */ 269 ccb->ccb_h.status = CAM_REQ_CMP; 270 break; 271 case XPT_NVME_IO: /* Execute the requested I/O operation */ 272 if (ctrlr->is_failed) { 273 /* 274 * I/O came in while we were failing the drive, so drop 275 * it. Once falure is complete, we'll be destroyed. 276 */ 277 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 278 break; 279 } 280 nvme_sim_nvmeio(sim, ccb); 281 return; /* no done */ 282 case XPT_NVME_ADMIN: /* or Admin operation */ 283 if (ctrlr->is_failed_admin) { 284 /* 285 * Admin request came in when we can't send admin 286 * commands, so drop it. Once falure is complete, we'll 287 * be destroyed. 288 */ 289 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 290 break; 291 } 292 nvme_sim_nvmeio(sim, ccb); 293 return; /* no done */ 294 default: 295 ccb->ccb_h.status = CAM_REQ_INVALID; 296 break; 297 } 298 xpt_done(ccb); 299 } 300 301 static void 302 nvme_sim_poll(struct cam_sim *sim) 303 { 304 305 nvme_ctrlr_poll(sim2ctrlr(sim)); 306 } 307 308 static void * 309 nvme_sim_new_controller(struct nvme_controller *ctrlr) 310 { 311 struct nvme_sim_softc *sc; 312 struct cam_devq *devq; 313 int max_trans; 314 315 max_trans = ctrlr->max_hw_pend_io; 316 devq = cam_simq_alloc(max_trans); 317 if (devq == NULL) 318 return (NULL); 319 320 sc = malloc(sizeof(*sc), M_NVME, M_ZERO | M_WAITOK); 321 sc->s_ctrlr = ctrlr; 322 323 sc->s_sim = cam_sim_alloc(nvme_sim_action, nvme_sim_poll, 324 "nvme", sc, device_get_unit(ctrlr->dev), 325 NULL, max_trans, max_trans, devq); 326 if (sc->s_sim == NULL) { 327 printf("Failed to allocate a sim\n"); 328 cam_simq_free(devq); 329 goto err1; 330 } 331 if (xpt_bus_register(sc->s_sim, ctrlr->dev, 0) != CAM_SUCCESS) { 332 printf("Failed to create a bus\n"); 333 goto err2; 334 } 335 if (xpt_create_path(&sc->s_path, /*periph*/NULL, cam_sim_path(sc->s_sim), 336 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 337 printf("Failed to create a path\n"); 338 goto err3; 339 } 340 341 return (sc); 342 343 err3: 344 xpt_bus_deregister(cam_sim_path(sc->s_sim)); 345 err2: 346 cam_sim_free(sc->s_sim, /*free_devq*/TRUE); 347 err1: 348 free(sc, M_NVME); 349 return (NULL); 350 } 351 352 static void * 353 nvme_sim_ns_change(struct nvme_namespace *ns, void *sc_arg) 354 { 355 struct nvme_sim_softc *sc = sc_arg; 356 union ccb *ccb; 357 358 ccb = xpt_alloc_ccb_nowait(); 359 if (ccb == NULL) { 360 printf("unable to alloc CCB for rescan\n"); 361 return (NULL); 362 } 363 364 /* 365 * We map the NVMe namespace idea onto the CAM unit LUN. For 366 * each new namespace, we create a new CAM path for it. We then 367 * rescan the path to get it to enumerate. 368 */ 369 if (xpt_create_path(&ccb->ccb_h.path, /*periph*/NULL, 370 cam_sim_path(sc->s_sim), 0, ns->id) != CAM_REQ_CMP) { 371 printf("unable to create path for rescan\n"); 372 xpt_free_ccb(ccb); 373 return (NULL); 374 } 375 xpt_rescan(ccb); 376 377 return (sc_arg); 378 } 379 380 static void 381 nvme_sim_controller_fail(void *ctrlr_arg) 382 { 383 struct nvme_sim_softc *sc = ctrlr_arg; 384 385 xpt_async(AC_LOST_DEVICE, sc->s_path, NULL); 386 xpt_free_path(sc->s_path); 387 xpt_bus_deregister(cam_sim_path(sc->s_sim)); 388 cam_sim_free(sc->s_sim, /*free_devq*/TRUE); 389 free(sc, M_NVME); 390 } 391 392 struct nvme_consumer *consumer_cookie; 393 394 static void 395 nvme_sim_init(void) 396 { 397 if (nvme_use_nvd) 398 return; 399 400 consumer_cookie = nvme_register_consumer(nvme_sim_ns_change, 401 nvme_sim_new_controller, NULL, nvme_sim_controller_fail); 402 } 403 404 SYSINIT(nvme_sim_register, SI_SUB_DRIVERS, SI_ORDER_ANY, 405 nvme_sim_init, NULL); 406 407 static void 408 nvme_sim_uninit(void) 409 { 410 if (nvme_use_nvd) 411 return; 412 /* XXX Cleanup */ 413 414 nvme_unregister_consumer(consumer_cookie); 415 } 416 417 SYSUNINIT(nvme_sim_unregister, SI_SUB_DRIVERS, SI_ORDER_ANY, 418 nvme_sim_uninit, NULL); 419