1 /*- 2 * Copyright (C) 2012-2014 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bus.h> 32 #include <sys/conf.h> 33 #include <sys/module.h> 34 35 #include <vm/uma.h> 36 37 #include <dev/pci/pcireg.h> 38 #include <dev/pci/pcivar.h> 39 40 #include "nvme_private.h" 41 42 struct nvme_consumer { 43 uint32_t id; 44 nvme_cons_ns_fn_t ns_fn; 45 nvme_cons_ctrlr_fn_t ctrlr_fn; 46 nvme_cons_async_fn_t async_fn; 47 nvme_cons_fail_fn_t fail_fn; 48 }; 49 50 struct nvme_consumer nvme_consumer[NVME_MAX_CONSUMERS]; 51 #define INVALID_CONSUMER_ID 0xFFFF 52 53 uma_zone_t nvme_request_zone; 54 int32_t nvme_retry_count; 55 56 MALLOC_DEFINE(M_NVME, "nvme", "nvme(4) memory allocations"); 57 58 static int nvme_probe(device_t); 59 static int nvme_attach(device_t); 60 static int nvme_detach(device_t); 61 static int nvme_shutdown(device_t); 62 static int nvme_modevent(module_t mod, int type, void *arg); 63 64 static devclass_t nvme_devclass; 65 66 static device_method_t nvme_pci_methods[] = { 67 /* Device interface */ 68 DEVMETHOD(device_probe, nvme_probe), 69 DEVMETHOD(device_attach, nvme_attach), 70 DEVMETHOD(device_detach, nvme_detach), 71 DEVMETHOD(device_shutdown, nvme_shutdown), 72 { 0, 0 } 73 }; 74 75 static driver_t nvme_pci_driver = { 76 "nvme", 77 nvme_pci_methods, 78 sizeof(struct nvme_controller), 79 }; 80 81 DRIVER_MODULE(nvme, pci, nvme_pci_driver, nvme_devclass, nvme_modevent, 0); 82 MODULE_VERSION(nvme, 1); 83 84 static struct _pcsid 85 { 86 uint32_t devid; 87 int match_subdevice; 88 uint16_t subdevice; 89 const char *desc; 90 } pci_ids[] = { 91 { 0x01118086, 0, 0, "NVMe Controller" }, 92 { IDT32_PCI_ID, 0, 0, "IDT NVMe Controller (32 channel)" }, 93 { IDT8_PCI_ID, 0, 0, "IDT NVMe Controller (8 channel)" }, 94 { 0x09538086, 1, 0x3702, "DC P3700 SSD" }, 95 { 0x09538086, 1, 0x3703, "DC P3700 SSD [2.5\" SFF]" }, 96 { 0x09538086, 1, 0x3704, "DC P3500 SSD [Add-in Card]" }, 97 { 0x09538086, 1, 0x3705, "DC P3500 SSD [2.5\" SFF]" }, 98 { 0x09538086, 1, 0x3709, "DC P3600 SSD [Add-in Card]" }, 99 { 0x09538086, 1, 0x370a, "DC P3600 SSD [2.5\" SFF]" }, 100 { 0x00000000, 0, 0, NULL } 101 }; 102 103 static int 104 nvme_match(uint32_t devid, uint16_t subdevice, struct _pcsid *ep) 105 { 106 if (devid != ep->devid) 107 return 0; 108 109 if (!ep->match_subdevice) 110 return 1; 111 112 if (subdevice == ep->subdevice) 113 return 1; 114 else 115 return 0; 116 } 117 118 static int 119 nvme_probe (device_t device) 120 { 121 struct _pcsid *ep; 122 uint32_t devid; 123 uint16_t subdevice; 124 125 devid = pci_get_devid(device); 126 subdevice = pci_get_subdevice(device); 127 ep = pci_ids; 128 129 while (ep->devid) { 130 if (nvme_match(devid, subdevice, ep)) 131 break; 132 ++ep; 133 } 134 135 if (ep->desc) { 136 device_set_desc(device, ep->desc); 137 return (BUS_PROBE_DEFAULT); 138 } 139 140 #if defined(PCIS_STORAGE_NVM) 141 if (pci_get_class(device) == PCIC_STORAGE && 142 pci_get_subclass(device) == PCIS_STORAGE_NVM && 143 pci_get_progif(device) == PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0) { 144 device_set_desc(device, "Generic NVMe Device"); 145 return (BUS_PROBE_GENERIC); 146 } 147 #endif 148 149 return (ENXIO); 150 } 151 152 static void 153 nvme_init(void) 154 { 155 uint32_t i; 156 157 nvme_request_zone = uma_zcreate("nvme_request", 158 sizeof(struct nvme_request), NULL, NULL, NULL, NULL, 0, 0); 159 160 for (i = 0; i < NVME_MAX_CONSUMERS; i++) 161 nvme_consumer[i].id = INVALID_CONSUMER_ID; 162 } 163 164 SYSINIT(nvme_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_init, NULL); 165 166 static void 167 nvme_uninit(void) 168 { 169 uma_zdestroy(nvme_request_zone); 170 } 171 172 SYSUNINIT(nvme_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_uninit, NULL); 173 174 static void 175 nvme_load(void) 176 { 177 } 178 179 static void 180 nvme_unload(void) 181 { 182 } 183 184 static int 185 nvme_shutdown(device_t dev) 186 { 187 struct nvme_controller *ctrlr; 188 189 ctrlr = DEVICE2SOFTC(dev); 190 nvme_ctrlr_shutdown(ctrlr); 191 192 return (0); 193 } 194 195 static int 196 nvme_modevent(module_t mod, int type, void *arg) 197 { 198 199 switch (type) { 200 case MOD_LOAD: 201 nvme_load(); 202 break; 203 case MOD_UNLOAD: 204 nvme_unload(); 205 break; 206 default: 207 break; 208 } 209 210 return (0); 211 } 212 213 void 214 nvme_dump_command(struct nvme_command *cmd) 215 { 216 printf( 217 "opc:%x f:%x r1:%x cid:%x nsid:%x r2:%x r3:%x mptr:%jx prp1:%jx prp2:%jx cdw:%x %x %x %x %x %x\n", 218 cmd->opc, cmd->fuse, cmd->rsvd1, cmd->cid, cmd->nsid, 219 cmd->rsvd2, cmd->rsvd3, 220 (uintmax_t)cmd->mptr, (uintmax_t)cmd->prp1, (uintmax_t)cmd->prp2, 221 cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14, 222 cmd->cdw15); 223 } 224 225 void 226 nvme_dump_completion(struct nvme_completion *cpl) 227 { 228 printf("cdw0:%08x sqhd:%04x sqid:%04x " 229 "cid:%04x p:%x sc:%02x sct:%x m:%x dnr:%x\n", 230 cpl->cdw0, cpl->sqhd, cpl->sqid, 231 cpl->cid, cpl->status.p, cpl->status.sc, cpl->status.sct, 232 cpl->status.m, cpl->status.dnr); 233 } 234 235 static int 236 nvme_attach(device_t dev) 237 { 238 struct nvme_controller *ctrlr = DEVICE2SOFTC(dev); 239 int status; 240 241 status = nvme_ctrlr_construct(ctrlr, dev); 242 243 if (status != 0) { 244 nvme_ctrlr_destruct(ctrlr, dev); 245 return (status); 246 } 247 248 /* 249 * Enable busmastering so the completion status messages can 250 * be busmastered back to the host. 251 */ 252 pci_enable_busmaster(dev); 253 254 /* 255 * Reset controller twice to ensure we do a transition from cc.en==1 256 * to cc.en==0. This is because we don't really know what status 257 * the controller was left in when boot handed off to OS. 258 */ 259 status = nvme_ctrlr_hw_reset(ctrlr); 260 if (status != 0) { 261 nvme_ctrlr_destruct(ctrlr, dev); 262 return (status); 263 } 264 265 status = nvme_ctrlr_hw_reset(ctrlr); 266 if (status != 0) { 267 nvme_ctrlr_destruct(ctrlr, dev); 268 return (status); 269 } 270 271 ctrlr->config_hook.ich_func = nvme_ctrlr_start_config_hook; 272 ctrlr->config_hook.ich_arg = ctrlr; 273 274 config_intrhook_establish(&ctrlr->config_hook); 275 276 return (0); 277 } 278 279 static int 280 nvme_detach (device_t dev) 281 { 282 struct nvme_controller *ctrlr = DEVICE2SOFTC(dev); 283 284 nvme_ctrlr_destruct(ctrlr, dev); 285 pci_disable_busmaster(dev); 286 return (0); 287 } 288 289 static void 290 nvme_notify(struct nvme_consumer *cons, 291 struct nvme_controller *ctrlr) 292 { 293 struct nvme_namespace *ns; 294 void *ctrlr_cookie; 295 int cmpset, ns_idx; 296 297 /* 298 * The consumer may register itself after the nvme devices 299 * have registered with the kernel, but before the 300 * driver has completed initialization. In that case, 301 * return here, and when initialization completes, the 302 * controller will make sure the consumer gets notified. 303 */ 304 if (!ctrlr->is_initialized) 305 return; 306 307 cmpset = atomic_cmpset_32(&ctrlr->notification_sent, 0, 1); 308 309 if (cmpset == 0) 310 return; 311 312 if (cons->ctrlr_fn != NULL) 313 ctrlr_cookie = (*cons->ctrlr_fn)(ctrlr); 314 else 315 ctrlr_cookie = NULL; 316 ctrlr->cons_cookie[cons->id] = ctrlr_cookie; 317 if (ctrlr->is_failed) { 318 if (cons->fail_fn != NULL) 319 (*cons->fail_fn)(ctrlr_cookie); 320 /* 321 * Do not notify consumers about the namespaces of a 322 * failed controller. 323 */ 324 return; 325 } 326 for (ns_idx = 0; ns_idx < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); ns_idx++) { 327 ns = &ctrlr->ns[ns_idx]; 328 if (ns->data.nsze == 0) 329 continue; 330 if (cons->ns_fn != NULL) 331 ns->cons_cookie[cons->id] = 332 (*cons->ns_fn)(ns, ctrlr_cookie); 333 } 334 } 335 336 void 337 nvme_notify_new_controller(struct nvme_controller *ctrlr) 338 { 339 int i; 340 341 for (i = 0; i < NVME_MAX_CONSUMERS; i++) { 342 if (nvme_consumer[i].id != INVALID_CONSUMER_ID) { 343 nvme_notify(&nvme_consumer[i], ctrlr); 344 } 345 } 346 } 347 348 static void 349 nvme_notify_new_consumer(struct nvme_consumer *cons) 350 { 351 device_t *devlist; 352 struct nvme_controller *ctrlr; 353 int dev_idx, devcount; 354 355 if (devclass_get_devices(nvme_devclass, &devlist, &devcount)) 356 return; 357 358 for (dev_idx = 0; dev_idx < devcount; dev_idx++) { 359 ctrlr = DEVICE2SOFTC(devlist[dev_idx]); 360 nvme_notify(cons, ctrlr); 361 } 362 363 free(devlist, M_TEMP); 364 } 365 366 void 367 nvme_notify_async_consumers(struct nvme_controller *ctrlr, 368 const struct nvme_completion *async_cpl, 369 uint32_t log_page_id, void *log_page_buffer, 370 uint32_t log_page_size) 371 { 372 struct nvme_consumer *cons; 373 uint32_t i; 374 375 for (i = 0; i < NVME_MAX_CONSUMERS; i++) { 376 cons = &nvme_consumer[i]; 377 if (cons->id != INVALID_CONSUMER_ID && cons->async_fn != NULL) 378 (*cons->async_fn)(ctrlr->cons_cookie[i], async_cpl, 379 log_page_id, log_page_buffer, log_page_size); 380 } 381 } 382 383 void 384 nvme_notify_fail_consumers(struct nvme_controller *ctrlr) 385 { 386 struct nvme_consumer *cons; 387 uint32_t i; 388 389 /* 390 * This controller failed during initialization (i.e. IDENTIFY 391 * command failed or timed out). Do not notify any nvme 392 * consumers of the failure here, since the consumer does not 393 * even know about the controller yet. 394 */ 395 if (!ctrlr->is_initialized) 396 return; 397 398 for (i = 0; i < NVME_MAX_CONSUMERS; i++) { 399 cons = &nvme_consumer[i]; 400 if (cons->id != INVALID_CONSUMER_ID && cons->fail_fn != NULL) 401 cons->fail_fn(ctrlr->cons_cookie[i]); 402 } 403 } 404 405 struct nvme_consumer * 406 nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, nvme_cons_ctrlr_fn_t ctrlr_fn, 407 nvme_cons_async_fn_t async_fn, 408 nvme_cons_fail_fn_t fail_fn) 409 { 410 int i; 411 412 /* 413 * TODO: add locking around consumer registration. Not an issue 414 * right now since we only have one nvme consumer - nvd(4). 415 */ 416 for (i = 0; i < NVME_MAX_CONSUMERS; i++) 417 if (nvme_consumer[i].id == INVALID_CONSUMER_ID) { 418 nvme_consumer[i].id = i; 419 nvme_consumer[i].ns_fn = ns_fn; 420 nvme_consumer[i].ctrlr_fn = ctrlr_fn; 421 nvme_consumer[i].async_fn = async_fn; 422 nvme_consumer[i].fail_fn = fail_fn; 423 424 nvme_notify_new_consumer(&nvme_consumer[i]); 425 return (&nvme_consumer[i]); 426 } 427 428 printf("nvme(4): consumer not registered - no slots available\n"); 429 return (NULL); 430 } 431 432 void 433 nvme_unregister_consumer(struct nvme_consumer *consumer) 434 { 435 436 consumer->id = INVALID_CONSUMER_ID; 437 } 438 439 void 440 nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl) 441 { 442 struct nvme_completion_poll_status *status = arg; 443 444 /* 445 * Copy status into the argument passed by the caller, so that 446 * the caller can check the status to determine if the 447 * the request passed or failed. 448 */ 449 memcpy(&status->cpl, cpl, sizeof(*cpl)); 450 wmb(); 451 status->done = TRUE; 452 } 453