1 /*- 2 * Copyright (C) 2012 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bus.h> 32 #include <sys/conf.h> 33 #include <sys/module.h> 34 35 #include <vm/uma.h> 36 37 #include <dev/pci/pcireg.h> 38 #include <dev/pci/pcivar.h> 39 40 #include "nvme_private.h" 41 42 struct nvme_consumer { 43 uint32_t id; 44 nvme_cons_ns_fn_t ns_fn; 45 nvme_cons_ctrlr_fn_t ctrlr_fn; 46 nvme_cons_async_fn_t async_fn; 47 nvme_cons_fail_fn_t fail_fn; 48 }; 49 50 struct nvme_consumer nvme_consumer[NVME_MAX_CONSUMERS]; 51 #define INVALID_CONSUMER_ID 0xFFFF 52 53 uma_zone_t nvme_request_zone; 54 int32_t nvme_retry_count; 55 56 MALLOC_DEFINE(M_NVME, "nvme", "nvme(4) memory allocations"); 57 58 static int nvme_probe(device_t); 59 static int nvme_attach(device_t); 60 static int nvme_detach(device_t); 61 static int nvme_modevent(module_t mod, int type, void *arg); 62 63 static devclass_t nvme_devclass; 64 65 static device_method_t nvme_pci_methods[] = { 66 /* Device interface */ 67 DEVMETHOD(device_probe, nvme_probe), 68 DEVMETHOD(device_attach, nvme_attach), 69 DEVMETHOD(device_detach, nvme_detach), 70 { 0, 0 } 71 }; 72 73 static driver_t nvme_pci_driver = { 74 "nvme", 75 nvme_pci_methods, 76 sizeof(struct nvme_controller), 77 }; 78 79 DRIVER_MODULE(nvme, pci, nvme_pci_driver, nvme_devclass, nvme_modevent, 0); 80 MODULE_VERSION(nvme, 1); 81 82 static struct _pcsid 83 { 84 u_int32_t type; 85 const char *desc; 86 } pci_ids[] = { 87 { 0x01118086, "NVMe Controller" }, 88 { CHATHAM_PCI_ID, "Chatham Prototype NVMe Controller" }, 89 { IDT32_PCI_ID, "IDT NVMe Controller (32 channel)" }, 90 { IDT8_PCI_ID, "IDT NVMe Controller (8 channel)" }, 91 { 0x00000000, NULL } 92 }; 93 94 static int 95 nvme_probe (device_t device) 96 { 97 struct _pcsid *ep; 98 u_int32_t type; 99 100 type = pci_get_devid(device); 101 ep = pci_ids; 102 103 while (ep->type && ep->type != type) 104 ++ep; 105 106 if (ep->desc) { 107 device_set_desc(device, ep->desc); 108 return (BUS_PROBE_DEFAULT); 109 } 110 111 #if defined(PCIS_STORAGE_NVM) 112 if (pci_get_class(device) == PCIC_STORAGE && 113 pci_get_subclass(device) == PCIS_STORAGE_NVM && 114 pci_get_progif(device) == PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0) { 115 device_set_desc(device, "Generic NVMe Device"); 116 return (BUS_PROBE_GENERIC); 117 } 118 #endif 119 120 return (ENXIO); 121 } 122 123 static void 124 nvme_init(void) 125 { 126 uint32_t i; 127 128 nvme_request_zone = uma_zcreate("nvme_request", 129 sizeof(struct nvme_request), NULL, NULL, NULL, NULL, 0, 0); 130 131 for (i = 0; i < NVME_MAX_CONSUMERS; i++) 132 nvme_consumer[i].id = INVALID_CONSUMER_ID; 133 } 134 135 SYSINIT(nvme_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_init, NULL); 136 137 static void 138 nvme_uninit(void) 139 { 140 uma_zdestroy(nvme_request_zone); 141 } 142 143 SYSUNINIT(nvme_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_uninit, NULL); 144 145 static void 146 nvme_load(void) 147 { 148 } 149 150 static void 151 nvme_unload(void) 152 { 153 } 154 155 static void 156 nvme_shutdown(void) 157 { 158 device_t *devlist; 159 struct nvme_controller *ctrlr; 160 union cc_register cc; 161 union csts_register csts; 162 int dev, devcount; 163 164 if (devclass_get_devices(nvme_devclass, &devlist, &devcount)) 165 return; 166 167 for (dev = 0; dev < devcount; dev++) { 168 /* 169 * Only notify controller of shutdown when a real shutdown is 170 * in process, not when a module unload occurs. It seems at 171 * least some controllers (Chatham at least) don't let you 172 * re-enable the controller after shutdown notification has 173 * been received. 174 */ 175 ctrlr = DEVICE2SOFTC(devlist[dev]); 176 cc.raw = nvme_mmio_read_4(ctrlr, cc); 177 cc.bits.shn = NVME_SHN_NORMAL; 178 nvme_mmio_write_4(ctrlr, cc, cc.raw); 179 csts.raw = nvme_mmio_read_4(ctrlr, csts); 180 while (csts.bits.shst != NVME_SHST_COMPLETE) { 181 DELAY(5); 182 csts.raw = nvme_mmio_read_4(ctrlr, csts); 183 } 184 } 185 186 free(devlist, M_TEMP); 187 } 188 189 static int 190 nvme_modevent(module_t mod, int type, void *arg) 191 { 192 193 switch (type) { 194 case MOD_LOAD: 195 nvme_load(); 196 break; 197 case MOD_UNLOAD: 198 nvme_unload(); 199 break; 200 case MOD_SHUTDOWN: 201 nvme_shutdown(); 202 break; 203 default: 204 break; 205 } 206 207 return (0); 208 } 209 210 void 211 nvme_dump_command(struct nvme_command *cmd) 212 { 213 printf( 214 "opc:%x f:%x r1:%x cid:%x nsid:%x r2:%x r3:%x mptr:%jx prp1:%jx prp2:%jx cdw:%x %x %x %x %x %x\n", 215 cmd->opc, cmd->fuse, cmd->rsvd1, cmd->cid, cmd->nsid, 216 cmd->rsvd2, cmd->rsvd3, 217 (uintmax_t)cmd->mptr, (uintmax_t)cmd->prp1, (uintmax_t)cmd->prp2, 218 cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14, 219 cmd->cdw15); 220 } 221 222 void 223 nvme_dump_completion(struct nvme_completion *cpl) 224 { 225 printf("cdw0:%08x sqhd:%04x sqid:%04x " 226 "cid:%04x p:%x sc:%02x sct:%x m:%x dnr:%x\n", 227 cpl->cdw0, cpl->sqhd, cpl->sqid, 228 cpl->cid, cpl->status.p, cpl->status.sc, cpl->status.sct, 229 cpl->status.m, cpl->status.dnr); 230 } 231 232 void 233 nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 234 { 235 struct nvme_tracker *tr = arg; 236 uint32_t cur_nseg; 237 238 /* 239 * If the mapping operation failed, return immediately. The caller 240 * is responsible for detecting the error status and failing the 241 * tracker manually. 242 */ 243 if (error != 0) 244 return; 245 246 /* 247 * Note that we specified PAGE_SIZE for alignment and max 248 * segment size when creating the bus dma tags. So here 249 * we can safely just transfer each segment to its 250 * associated PRP entry. 251 */ 252 tr->req->cmd.prp1 = seg[0].ds_addr; 253 254 if (nseg == 2) { 255 tr->req->cmd.prp2 = seg[1].ds_addr; 256 } else if (nseg > 2) { 257 cur_nseg = 1; 258 tr->req->cmd.prp2 = (uint64_t)tr->prp_bus_addr; 259 while (cur_nseg < nseg) { 260 tr->prp[cur_nseg-1] = 261 (uint64_t)seg[cur_nseg].ds_addr; 262 cur_nseg++; 263 } 264 } 265 266 nvme_qpair_submit_tracker(tr->qpair, tr); 267 } 268 269 static int 270 nvme_attach(device_t dev) 271 { 272 struct nvme_controller *ctrlr = DEVICE2SOFTC(dev); 273 int status; 274 275 status = nvme_ctrlr_construct(ctrlr, dev); 276 277 if (status != 0) 278 return (status); 279 280 /* 281 * Reset controller twice to ensure we do a transition from cc.en==1 282 * to cc.en==0. This is because we don't really know what status 283 * the controller was left in when boot handed off to OS. 284 */ 285 status = nvme_ctrlr_hw_reset(ctrlr); 286 if (status != 0) 287 return (status); 288 289 status = nvme_ctrlr_hw_reset(ctrlr); 290 if (status != 0) 291 return (status); 292 293 nvme_sysctl_initialize_ctrlr(ctrlr); 294 295 ctrlr->config_hook.ich_func = nvme_ctrlr_start_config_hook; 296 ctrlr->config_hook.ich_arg = ctrlr; 297 298 config_intrhook_establish(&ctrlr->config_hook); 299 300 return (0); 301 } 302 303 static int 304 nvme_detach (device_t dev) 305 { 306 struct nvme_controller *ctrlr = DEVICE2SOFTC(dev); 307 308 nvme_ctrlr_destruct(ctrlr, dev); 309 return (0); 310 } 311 312 static void 313 nvme_notify_consumer(struct nvme_consumer *cons) 314 { 315 device_t *devlist; 316 struct nvme_controller *ctrlr; 317 struct nvme_namespace *ns; 318 void *ctrlr_cookie; 319 int dev_idx, ns_idx, devcount; 320 321 if (devclass_get_devices(nvme_devclass, &devlist, &devcount)) 322 return; 323 324 for (dev_idx = 0; dev_idx < devcount; dev_idx++) { 325 ctrlr = DEVICE2SOFTC(devlist[dev_idx]); 326 if (cons->ctrlr_fn != NULL) 327 ctrlr_cookie = (*cons->ctrlr_fn)(ctrlr); 328 else 329 ctrlr_cookie = NULL; 330 ctrlr->cons_cookie[cons->id] = ctrlr_cookie; 331 for (ns_idx = 0; ns_idx < ctrlr->cdata.nn; ns_idx++) { 332 ns = &ctrlr->ns[ns_idx]; 333 if (cons->ns_fn != NULL) 334 ns->cons_cookie[cons->id] = 335 (*cons->ns_fn)(ns, ctrlr_cookie); 336 } 337 } 338 339 free(devlist, M_TEMP); 340 } 341 342 void 343 nvme_notify_async_consumers(struct nvme_controller *ctrlr, 344 const struct nvme_completion *async_cpl, 345 uint32_t log_page_id, void *log_page_buffer, 346 uint32_t log_page_size) 347 { 348 struct nvme_consumer *cons; 349 uint32_t i; 350 351 for (i = 0; i < NVME_MAX_CONSUMERS; i++) { 352 cons = &nvme_consumer[i]; 353 if (cons->id != INVALID_CONSUMER_ID && cons->async_fn != NULL) 354 (*cons->async_fn)(ctrlr->cons_cookie[i], async_cpl, 355 log_page_id, log_page_buffer, log_page_size); 356 } 357 } 358 359 void 360 nvme_notify_fail_consumers(struct nvme_controller *ctrlr) 361 { 362 struct nvme_consumer *cons; 363 uint32_t i; 364 365 for (i = 0; i < NVME_MAX_CONSUMERS; i++) { 366 cons = &nvme_consumer[i]; 367 if (cons->id != INVALID_CONSUMER_ID && cons->fail_fn != NULL) 368 cons->fail_fn(ctrlr->cons_cookie[i]); 369 } 370 } 371 372 struct nvme_consumer * 373 nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, nvme_cons_ctrlr_fn_t ctrlr_fn, 374 nvme_cons_async_fn_t async_fn, 375 nvme_cons_fail_fn_t fail_fn) 376 { 377 int i; 378 379 /* 380 * TODO: add locking around consumer registration. Not an issue 381 * right now since we only have one nvme consumer - nvd(4). 382 */ 383 for (i = 0; i < NVME_MAX_CONSUMERS; i++) 384 if (nvme_consumer[i].id == INVALID_CONSUMER_ID) { 385 nvme_consumer[i].id = i; 386 nvme_consumer[i].ns_fn = ns_fn; 387 nvme_consumer[i].ctrlr_fn = ctrlr_fn; 388 nvme_consumer[i].async_fn = async_fn; 389 nvme_consumer[i].fail_fn = fail_fn; 390 391 nvme_notify_consumer(&nvme_consumer[i]); 392 return (&nvme_consumer[i]); 393 } 394 395 printf("nvme(4): consumer not registered - no slots available\n"); 396 return (NULL); 397 } 398 399 void 400 nvme_unregister_consumer(struct nvme_consumer *consumer) 401 { 402 403 consumer->id = INVALID_CONSUMER_ID; 404 } 405 406 void 407 nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl) 408 { 409 struct nvme_completion_poll_status *status = arg; 410 411 /* 412 * Copy status into the argument passed by the caller, so that 413 * the caller can check the status to determine if the 414 * the request passed or failed. 415 */ 416 memcpy(&status->cpl, cpl, sizeof(*cpl)); 417 wmb(); 418 status->done = TRUE; 419 } 420 421