1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVM Express device driver 4 * Copyright (c) 2011-2014, Intel Corporation. 5 */ 6 7 #include <linux/acpi.h> 8 #include <linux/async.h> 9 #include <linux/blkdev.h> 10 #include <linux/blk-mq-dma.h> 11 #include <linux/blk-integrity.h> 12 #include <linux/dmi.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/io.h> 16 #include <linux/kstrtox.h> 17 #include <linux/memremap.h> 18 #include <linux/mm.h> 19 #include <linux/module.h> 20 #include <linux/mutex.h> 21 #include <linux/nodemask.h> 22 #include <linux/once.h> 23 #include <linux/pci.h> 24 #include <linux/suspend.h> 25 #include <linux/t10-pi.h> 26 #include <linux/types.h> 27 #include <linux/io-64-nonatomic-lo-hi.h> 28 #include <linux/io-64-nonatomic-hi-lo.h> 29 #include <linux/sed-opal.h> 30 31 #include "trace.h" 32 #include "nvme.h" 33 34 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) 35 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) 36 37 /* Optimisation for I/Os between 4k and 128k */ 38 #define NVME_SMALL_POOL_SIZE 256 39 40 /* 41 * Arbitrary upper bound. 42 */ 43 #define NVME_MAX_BYTES SZ_8M 44 #define NVME_MAX_NR_DESCRIPTORS 5 45 46 /* 47 * For data SGLs we support a single descriptors worth of SGL entries. 48 * For PRPs, segments don't matter at all. 49 */ 50 #define NVME_MAX_SEGS \ 51 (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) 52 53 /* 54 * For metadata SGLs, only the small descriptor is supported, and the first 55 * entry is the segment descriptor, which for the data pointer sits in the SQE. 56 */ 57 #define NVME_MAX_META_SEGS \ 58 ((NVME_SMALL_POOL_SIZE / sizeof(struct nvme_sgl_desc)) - 1) 59 60 /* 61 * The last entry is used to link to the next descriptor. 62 */ 63 #define PRPS_PER_PAGE \ 64 (((NVME_CTRL_PAGE_SIZE / sizeof(__le64))) - 1) 65 66 /* 67 * I/O could be non-aligned both at the beginning and end. 68 */ 69 #define MAX_PRP_RANGE \ 70 (NVME_MAX_BYTES + 2 * (NVME_CTRL_PAGE_SIZE - 1)) 71 72 static_assert(MAX_PRP_RANGE / NVME_CTRL_PAGE_SIZE <= 73 (1 /* prp1 */ + NVME_MAX_NR_DESCRIPTORS * PRPS_PER_PAGE)); 74 75 static int use_threaded_interrupts; 76 module_param(use_threaded_interrupts, int, 0444); 77 78 static bool use_cmb_sqes = true; 79 module_param(use_cmb_sqes, bool, 0444); 80 MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); 81 82 static unsigned int max_host_mem_size_mb = 128; 83 module_param(max_host_mem_size_mb, uint, 0444); 84 MODULE_PARM_DESC(max_host_mem_size_mb, 85 "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); 86 87 static unsigned int sgl_threshold = SZ_32K; 88 module_param(sgl_threshold, uint, 0644); 89 MODULE_PARM_DESC(sgl_threshold, 90 "Use SGLs when average request segment size is larger or equal to " 91 "this size. Use 0 to disable SGLs."); 92 93 #define NVME_PCI_MIN_QUEUE_SIZE 2 94 #define NVME_PCI_MAX_QUEUE_SIZE 4095 95 static int io_queue_depth_set(const char *val, const struct kernel_param *kp); 96 static const struct kernel_param_ops io_queue_depth_ops = { 97 .set = io_queue_depth_set, 98 .get = param_get_uint, 99 }; 100 101 static unsigned int io_queue_depth = 1024; 102 module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); 103 MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096"); 104 105 static int io_queue_count_set(const char *val, const struct kernel_param *kp) 106 { 107 unsigned int n; 108 int ret; 109 110 ret = kstrtouint(val, 10, &n); 111 if (ret != 0 || n > blk_mq_num_possible_queues(0)) 112 return -EINVAL; 113 return param_set_uint(val, kp); 114 } 115 116 static const struct kernel_param_ops io_queue_count_ops = { 117 .set = io_queue_count_set, 118 .get = param_get_uint, 119 }; 120 121 static unsigned int write_queues; 122 module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644); 123 MODULE_PARM_DESC(write_queues, 124 "Number of queues to use for writes. If not set, reads and writes " 125 "will share a queue set."); 126 127 static unsigned int poll_queues; 128 module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644); 129 MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); 130 131 static bool noacpi; 132 module_param(noacpi, bool, 0444); 133 MODULE_PARM_DESC(noacpi, "disable acpi bios quirks"); 134 135 struct nvme_dev; 136 struct nvme_queue; 137 138 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 139 static void nvme_delete_io_queues(struct nvme_dev *dev); 140 static void nvme_update_attrs(struct nvme_dev *dev); 141 142 struct nvme_descriptor_pools { 143 struct dma_pool *large; 144 struct dma_pool *small; 145 }; 146 147 /* 148 * Represents an NVM Express device. Each nvme_dev is a PCI function. 149 */ 150 struct nvme_dev { 151 struct nvme_queue *queues; 152 struct blk_mq_tag_set tagset; 153 struct blk_mq_tag_set admin_tagset; 154 u32 __iomem *dbs; 155 struct device *dev; 156 unsigned online_queues; 157 unsigned max_qid; 158 unsigned io_queues[HCTX_MAX_TYPES]; 159 unsigned int num_vecs; 160 u32 q_depth; 161 int io_sqes; 162 u32 db_stride; 163 void __iomem *bar; 164 unsigned long bar_mapped_size; 165 struct mutex shutdown_lock; 166 bool subsystem; 167 u64 cmb_size; 168 bool cmb_use_sqes; 169 u32 cmbsz; 170 u32 cmbloc; 171 struct nvme_ctrl ctrl; 172 u32 last_ps; 173 bool hmb; 174 struct sg_table *hmb_sgt; 175 mempool_t *dmavec_mempool; 176 177 /* shadow doorbell buffer support: */ 178 __le32 *dbbuf_dbs; 179 dma_addr_t dbbuf_dbs_dma_addr; 180 __le32 *dbbuf_eis; 181 dma_addr_t dbbuf_eis_dma_addr; 182 183 /* host memory buffer support: */ 184 u64 host_mem_size; 185 u32 nr_host_mem_descs; 186 u32 host_mem_descs_size; 187 dma_addr_t host_mem_descs_dma; 188 struct nvme_host_mem_buf_desc *host_mem_descs; 189 void **host_mem_desc_bufs; 190 unsigned int nr_allocated_queues; 191 unsigned int nr_write_queues; 192 unsigned int nr_poll_queues; 193 struct nvme_descriptor_pools descriptor_pools[]; 194 }; 195 196 static int io_queue_depth_set(const char *val, const struct kernel_param *kp) 197 { 198 return param_set_uint_minmax(val, kp, NVME_PCI_MIN_QUEUE_SIZE, 199 NVME_PCI_MAX_QUEUE_SIZE); 200 } 201 202 static inline unsigned int sq_idx(unsigned int qid, u32 stride) 203 { 204 return qid * 2 * stride; 205 } 206 207 static inline unsigned int cq_idx(unsigned int qid, u32 stride) 208 { 209 return (qid * 2 + 1) * stride; 210 } 211 212 static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) 213 { 214 return container_of(ctrl, struct nvme_dev, ctrl); 215 } 216 217 /* 218 * An NVM Express queue. Each device has at least two (one for admin 219 * commands and one for I/O commands). 220 */ 221 struct nvme_queue { 222 struct nvme_dev *dev; 223 struct nvme_descriptor_pools descriptor_pools; 224 spinlock_t sq_lock; 225 void *sq_cmds; 226 /* only used for poll queues: */ 227 spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; 228 struct nvme_completion *cqes; 229 dma_addr_t sq_dma_addr; 230 dma_addr_t cq_dma_addr; 231 u32 __iomem *q_db; 232 u32 q_depth; 233 u16 cq_vector; 234 u16 sq_tail; 235 u16 last_sq_tail; 236 u16 cq_head; 237 u16 qid; 238 u8 cq_phase; 239 u8 sqes; 240 unsigned long flags; 241 #define NVMEQ_ENABLED 0 242 #define NVMEQ_SQ_CMB 1 243 #define NVMEQ_DELETE_ERROR 2 244 #define NVMEQ_POLLED 3 245 __le32 *dbbuf_sq_db; 246 __le32 *dbbuf_cq_db; 247 __le32 *dbbuf_sq_ei; 248 __le32 *dbbuf_cq_ei; 249 struct completion delete_done; 250 }; 251 252 /* bits for iod->flags */ 253 enum nvme_iod_flags { 254 /* this command has been aborted by the timeout handler */ 255 IOD_ABORTED = 1U << 0, 256 257 /* uses the small descriptor pool */ 258 IOD_SMALL_DESCRIPTOR = 1U << 1, 259 260 /* single segment dma mapping */ 261 IOD_SINGLE_SEGMENT = 1U << 2, 262 263 /* Metadata using non-coalesced MPTR */ 264 IOD_SINGLE_META_SEGMENT = 1U << 5, 265 }; 266 267 struct nvme_dma_vec { 268 dma_addr_t addr; 269 unsigned int len; 270 }; 271 272 /* 273 * The nvme_iod describes the data in an I/O. 274 */ 275 struct nvme_iod { 276 struct nvme_request req; 277 struct nvme_command cmd; 278 u8 flags; 279 u8 nr_descriptors; 280 281 unsigned int total_len; 282 struct dma_iova_state dma_state; 283 void *descriptors[NVME_MAX_NR_DESCRIPTORS]; 284 struct nvme_dma_vec *dma_vecs; 285 unsigned int nr_dma_vecs; 286 287 dma_addr_t meta_dma; 288 unsigned int meta_total_len; 289 struct dma_iova_state meta_dma_state; 290 struct nvme_sgl_desc *meta_descriptor; 291 }; 292 293 static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) 294 { 295 return dev->nr_allocated_queues * 8 * dev->db_stride; 296 } 297 298 static void nvme_dbbuf_dma_alloc(struct nvme_dev *dev) 299 { 300 unsigned int mem_size = nvme_dbbuf_size(dev); 301 302 if (!(dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP)) 303 return; 304 305 if (dev->dbbuf_dbs) { 306 /* 307 * Clear the dbbuf memory so the driver doesn't observe stale 308 * values from the previous instantiation. 309 */ 310 memset(dev->dbbuf_dbs, 0, mem_size); 311 memset(dev->dbbuf_eis, 0, mem_size); 312 return; 313 } 314 315 dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, 316 &dev->dbbuf_dbs_dma_addr, 317 GFP_KERNEL); 318 if (!dev->dbbuf_dbs) 319 goto fail; 320 dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, 321 &dev->dbbuf_eis_dma_addr, 322 GFP_KERNEL); 323 if (!dev->dbbuf_eis) 324 goto fail_free_dbbuf_dbs; 325 return; 326 327 fail_free_dbbuf_dbs: 328 dma_free_coherent(dev->dev, mem_size, dev->dbbuf_dbs, 329 dev->dbbuf_dbs_dma_addr); 330 dev->dbbuf_dbs = NULL; 331 fail: 332 dev_warn(dev->dev, "unable to allocate dma for dbbuf\n"); 333 } 334 335 static void nvme_dbbuf_dma_free(struct nvme_dev *dev) 336 { 337 unsigned int mem_size = nvme_dbbuf_size(dev); 338 339 if (dev->dbbuf_dbs) { 340 dma_free_coherent(dev->dev, mem_size, 341 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 342 dev->dbbuf_dbs = NULL; 343 } 344 if (dev->dbbuf_eis) { 345 dma_free_coherent(dev->dev, mem_size, 346 dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); 347 dev->dbbuf_eis = NULL; 348 } 349 } 350 351 static void nvme_dbbuf_init(struct nvme_dev *dev, 352 struct nvme_queue *nvmeq, int qid) 353 { 354 if (!dev->dbbuf_dbs || !qid) 355 return; 356 357 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; 358 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; 359 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; 360 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; 361 } 362 363 static void nvme_dbbuf_free(struct nvme_queue *nvmeq) 364 { 365 if (!nvmeq->qid) 366 return; 367 368 nvmeq->dbbuf_sq_db = NULL; 369 nvmeq->dbbuf_cq_db = NULL; 370 nvmeq->dbbuf_sq_ei = NULL; 371 nvmeq->dbbuf_cq_ei = NULL; 372 } 373 374 static void nvme_dbbuf_set(struct nvme_dev *dev) 375 { 376 struct nvme_command c = { }; 377 unsigned int i; 378 379 if (!dev->dbbuf_dbs) 380 return; 381 382 c.dbbuf.opcode = nvme_admin_dbbuf; 383 c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); 384 c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); 385 386 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { 387 dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); 388 /* Free memory and continue on */ 389 nvme_dbbuf_dma_free(dev); 390 391 for (i = 1; i <= dev->online_queues; i++) 392 nvme_dbbuf_free(&dev->queues[i]); 393 } 394 } 395 396 static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) 397 { 398 return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); 399 } 400 401 /* Update dbbuf and return true if an MMIO is required */ 402 static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, 403 volatile __le32 *dbbuf_ei) 404 { 405 if (dbbuf_db) { 406 u16 old_value, event_idx; 407 408 /* 409 * Ensure that the queue is written before updating 410 * the doorbell in memory 411 */ 412 wmb(); 413 414 old_value = le32_to_cpu(*dbbuf_db); 415 *dbbuf_db = cpu_to_le32(value); 416 417 /* 418 * Ensure that the doorbell is updated before reading the event 419 * index from memory. The controller needs to provide similar 420 * ordering to ensure the event index is updated before reading 421 * the doorbell. 422 */ 423 mb(); 424 425 event_idx = le32_to_cpu(*dbbuf_ei); 426 if (!nvme_dbbuf_need_event(event_idx, value, old_value)) 427 return false; 428 } 429 430 return true; 431 } 432 433 static struct nvme_descriptor_pools * 434 nvme_setup_descriptor_pools(struct nvme_dev *dev, unsigned numa_node) 435 { 436 struct nvme_descriptor_pools *pools = &dev->descriptor_pools[numa_node]; 437 size_t small_align = NVME_SMALL_POOL_SIZE; 438 439 if (pools->small) 440 return pools; /* already initialized */ 441 442 pools->large = dma_pool_create_node("nvme descriptor page", dev->dev, 443 NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE, 0, numa_node); 444 if (!pools->large) 445 return ERR_PTR(-ENOMEM); 446 447 if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512) 448 small_align = 512; 449 450 pools->small = dma_pool_create_node("nvme descriptor small", dev->dev, 451 NVME_SMALL_POOL_SIZE, small_align, 0, numa_node); 452 if (!pools->small) { 453 dma_pool_destroy(pools->large); 454 pools->large = NULL; 455 return ERR_PTR(-ENOMEM); 456 } 457 458 return pools; 459 } 460 461 static void nvme_release_descriptor_pools(struct nvme_dev *dev) 462 { 463 unsigned i; 464 465 for (i = 0; i < nr_node_ids; i++) { 466 struct nvme_descriptor_pools *pools = &dev->descriptor_pools[i]; 467 468 dma_pool_destroy(pools->large); 469 dma_pool_destroy(pools->small); 470 } 471 } 472 473 static int nvme_init_hctx_common(struct blk_mq_hw_ctx *hctx, void *data, 474 unsigned qid) 475 { 476 struct nvme_dev *dev = to_nvme_dev(data); 477 struct nvme_queue *nvmeq = &dev->queues[qid]; 478 struct nvme_descriptor_pools *pools; 479 struct blk_mq_tags *tags; 480 481 tags = qid ? dev->tagset.tags[qid - 1] : dev->admin_tagset.tags[0]; 482 WARN_ON(tags != hctx->tags); 483 pools = nvme_setup_descriptor_pools(dev, hctx->numa_node); 484 if (IS_ERR(pools)) 485 return PTR_ERR(pools); 486 487 nvmeq->descriptor_pools = *pools; 488 hctx->driver_data = nvmeq; 489 return 0; 490 } 491 492 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 493 unsigned int hctx_idx) 494 { 495 WARN_ON(hctx_idx != 0); 496 return nvme_init_hctx_common(hctx, data, 0); 497 } 498 499 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 500 unsigned int hctx_idx) 501 { 502 return nvme_init_hctx_common(hctx, data, hctx_idx + 1); 503 } 504 505 static int nvme_pci_init_request(struct blk_mq_tag_set *set, 506 struct request *req, unsigned int hctx_idx, 507 unsigned int numa_node) 508 { 509 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 510 511 nvme_req(req)->ctrl = set->driver_data; 512 nvme_req(req)->cmd = &iod->cmd; 513 return 0; 514 } 515 516 static int queue_irq_offset(struct nvme_dev *dev) 517 { 518 /* if we have more than 1 vec, admin queue offsets us by 1 */ 519 if (dev->num_vecs > 1) 520 return 1; 521 522 return 0; 523 } 524 525 static void nvme_pci_map_queues(struct blk_mq_tag_set *set) 526 { 527 struct nvme_dev *dev = to_nvme_dev(set->driver_data); 528 int i, qoff, offset; 529 530 offset = queue_irq_offset(dev); 531 for (i = 0, qoff = 0; i < set->nr_maps; i++) { 532 struct blk_mq_queue_map *map = &set->map[i]; 533 534 map->nr_queues = dev->io_queues[i]; 535 if (!map->nr_queues) { 536 BUG_ON(i == HCTX_TYPE_DEFAULT); 537 continue; 538 } 539 540 /* 541 * The poll queue(s) doesn't have an IRQ (and hence IRQ 542 * affinity), so use the regular blk-mq cpu mapping 543 */ 544 map->queue_offset = qoff; 545 if (i != HCTX_TYPE_POLL && offset) 546 blk_mq_map_hw_queues(map, dev->dev, offset); 547 else 548 blk_mq_map_queues(map); 549 qoff += map->nr_queues; 550 offset += map->nr_queues; 551 } 552 } 553 554 /* 555 * Write sq tail if we are asked to, or if the next command would wrap. 556 */ 557 static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) 558 { 559 if (!write_sq) { 560 u16 next_tail = nvmeq->sq_tail + 1; 561 562 if (next_tail == nvmeq->q_depth) 563 next_tail = 0; 564 if (next_tail != nvmeq->last_sq_tail) 565 return; 566 } 567 568 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, 569 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) 570 writel(nvmeq->sq_tail, nvmeq->q_db); 571 nvmeq->last_sq_tail = nvmeq->sq_tail; 572 } 573 574 static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq, 575 struct nvme_command *cmd) 576 { 577 memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), 578 absolute_pointer(cmd), sizeof(*cmd)); 579 if (++nvmeq->sq_tail == nvmeq->q_depth) 580 nvmeq->sq_tail = 0; 581 } 582 583 static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) 584 { 585 struct nvme_queue *nvmeq = hctx->driver_data; 586 587 spin_lock(&nvmeq->sq_lock); 588 if (nvmeq->sq_tail != nvmeq->last_sq_tail) 589 nvme_write_sq_db(nvmeq, true); 590 spin_unlock(&nvmeq->sq_lock); 591 } 592 593 enum nvme_use_sgl { 594 SGL_UNSUPPORTED, 595 SGL_SUPPORTED, 596 SGL_FORCED, 597 }; 598 599 static inline bool nvme_pci_metadata_use_sgls(struct request *req) 600 { 601 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 602 struct nvme_dev *dev = nvmeq->dev; 603 604 if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl)) 605 return false; 606 return req->nr_integrity_segments > 1 || 607 nvme_req(req)->flags & NVME_REQ_USERCMD; 608 } 609 610 static inline enum nvme_use_sgl nvme_pci_use_sgls(struct nvme_dev *dev, 611 struct request *req) 612 { 613 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 614 615 if (nvmeq->qid && nvme_ctrl_sgl_supported(&dev->ctrl)) { 616 if (nvme_req(req)->flags & NVME_REQ_USERCMD) 617 return SGL_FORCED; 618 if (req->nr_integrity_segments > 1) 619 return SGL_FORCED; 620 return SGL_SUPPORTED; 621 } 622 623 return SGL_UNSUPPORTED; 624 } 625 626 static unsigned int nvme_pci_avg_seg_size(struct request *req) 627 { 628 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 629 unsigned int nseg; 630 631 if (blk_rq_dma_map_coalesce(&iod->dma_state)) 632 nseg = 1; 633 else 634 nseg = blk_rq_nr_phys_segments(req); 635 return DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); 636 } 637 638 static inline struct dma_pool *nvme_dma_pool(struct nvme_queue *nvmeq, 639 struct nvme_iod *iod) 640 { 641 if (iod->flags & IOD_SMALL_DESCRIPTOR) 642 return nvmeq->descriptor_pools.small; 643 return nvmeq->descriptor_pools.large; 644 } 645 646 static inline bool nvme_pci_cmd_use_meta_sgl(struct nvme_command *cmd) 647 { 648 return (cmd->common.flags & NVME_CMD_SGL_ALL) == NVME_CMD_SGL_METASEG; 649 } 650 651 static inline bool nvme_pci_cmd_use_sgl(struct nvme_command *cmd) 652 { 653 return cmd->common.flags & 654 (NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG); 655 } 656 657 static inline dma_addr_t nvme_pci_first_desc_dma_addr(struct nvme_command *cmd) 658 { 659 if (nvme_pci_cmd_use_sgl(cmd)) 660 return le64_to_cpu(cmd->common.dptr.sgl.addr); 661 return le64_to_cpu(cmd->common.dptr.prp2); 662 } 663 664 static void nvme_free_descriptors(struct request *req) 665 { 666 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 667 const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; 668 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 669 dma_addr_t dma_addr = nvme_pci_first_desc_dma_addr(&iod->cmd); 670 int i; 671 672 if (iod->nr_descriptors == 1) { 673 dma_pool_free(nvme_dma_pool(nvmeq, iod), iod->descriptors[0], 674 dma_addr); 675 return; 676 } 677 678 for (i = 0; i < iod->nr_descriptors; i++) { 679 __le64 *prp_list = iod->descriptors[i]; 680 dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); 681 682 dma_pool_free(nvmeq->descriptor_pools.large, prp_list, 683 dma_addr); 684 dma_addr = next_dma_addr; 685 } 686 } 687 688 static void nvme_free_prps(struct request *req) 689 { 690 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 691 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 692 unsigned int i; 693 694 for (i = 0; i < iod->nr_dma_vecs; i++) 695 dma_unmap_page(nvmeq->dev->dev, iod->dma_vecs[i].addr, 696 iod->dma_vecs[i].len, rq_dma_dir(req)); 697 mempool_free(iod->dma_vecs, nvmeq->dev->dmavec_mempool); 698 } 699 700 static void nvme_free_sgls(struct request *req, struct nvme_sgl_desc *sge, 701 struct nvme_sgl_desc *sg_list) 702 { 703 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 704 enum dma_data_direction dir = rq_dma_dir(req); 705 unsigned int len = le32_to_cpu(sge->length); 706 struct device *dma_dev = nvmeq->dev->dev; 707 unsigned int i; 708 709 if (sge->type == (NVME_SGL_FMT_DATA_DESC << 4)) { 710 dma_unmap_page(dma_dev, le64_to_cpu(sge->addr), len, dir); 711 return; 712 } 713 714 for (i = 0; i < len / sizeof(*sg_list); i++) 715 dma_unmap_page(dma_dev, le64_to_cpu(sg_list[i].addr), 716 le32_to_cpu(sg_list[i].length), dir); 717 } 718 719 static void nvme_unmap_metadata(struct request *req) 720 { 721 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 722 enum dma_data_direction dir = rq_dma_dir(req); 723 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 724 struct device *dma_dev = nvmeq->dev->dev; 725 struct nvme_sgl_desc *sge = iod->meta_descriptor; 726 727 if (iod->flags & IOD_SINGLE_META_SEGMENT) { 728 dma_unmap_page(dma_dev, iod->meta_dma, 729 rq_integrity_vec(req).bv_len, 730 rq_dma_dir(req)); 731 return; 732 } 733 734 if (!blk_rq_integrity_dma_unmap(req, dma_dev, &iod->meta_dma_state, 735 iod->meta_total_len)) { 736 if (nvme_pci_cmd_use_meta_sgl(&iod->cmd)) 737 nvme_free_sgls(req, sge, &sge[1]); 738 else 739 dma_unmap_page(dma_dev, iod->meta_dma, 740 iod->meta_total_len, dir); 741 } 742 743 if (iod->meta_descriptor) 744 dma_pool_free(nvmeq->descriptor_pools.small, 745 iod->meta_descriptor, iod->meta_dma); 746 } 747 748 static void nvme_unmap_data(struct request *req) 749 { 750 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 751 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 752 struct device *dma_dev = nvmeq->dev->dev; 753 754 if (iod->flags & IOD_SINGLE_SEGMENT) { 755 static_assert(offsetof(union nvme_data_ptr, prp1) == 756 offsetof(union nvme_data_ptr, sgl.addr)); 757 dma_unmap_page(dma_dev, le64_to_cpu(iod->cmd.common.dptr.prp1), 758 iod->total_len, rq_dma_dir(req)); 759 return; 760 } 761 762 if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) { 763 if (nvme_pci_cmd_use_sgl(&iod->cmd)) 764 nvme_free_sgls(req, iod->descriptors[0], 765 &iod->cmd.common.dptr.sgl); 766 else 767 nvme_free_prps(req); 768 } 769 770 if (iod->nr_descriptors) 771 nvme_free_descriptors(req); 772 } 773 774 static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev, 775 struct blk_dma_iter *iter) 776 { 777 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 778 779 if (iter->len) 780 return true; 781 if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter)) 782 return false; 783 if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) { 784 iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr; 785 iod->dma_vecs[iod->nr_dma_vecs].len = iter->len; 786 iod->nr_dma_vecs++; 787 } 788 return true; 789 } 790 791 static blk_status_t nvme_pci_setup_data_prp(struct request *req, 792 struct blk_dma_iter *iter) 793 { 794 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 795 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 796 unsigned int length = blk_rq_payload_bytes(req); 797 dma_addr_t prp1_dma, prp2_dma = 0; 798 unsigned int prp_len, i; 799 __le64 *prp_list; 800 801 if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(nvmeq->dev->dev)) { 802 iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool, 803 GFP_ATOMIC); 804 if (!iod->dma_vecs) 805 return BLK_STS_RESOURCE; 806 iod->dma_vecs[0].addr = iter->addr; 807 iod->dma_vecs[0].len = iter->len; 808 iod->nr_dma_vecs = 1; 809 } 810 811 /* 812 * PRP1 always points to the start of the DMA transfers. 813 * 814 * This is the only PRP (except for the list entries) that could be 815 * non-aligned. 816 */ 817 prp1_dma = iter->addr; 818 prp_len = min(length, NVME_CTRL_PAGE_SIZE - 819 (iter->addr & (NVME_CTRL_PAGE_SIZE - 1))); 820 iod->total_len += prp_len; 821 iter->addr += prp_len; 822 iter->len -= prp_len; 823 length -= prp_len; 824 if (!length) 825 goto done; 826 827 if (!nvme_pci_prp_iter_next(req, nvmeq->dev->dev, iter)) { 828 if (WARN_ON_ONCE(!iter->status)) 829 goto bad_sgl; 830 goto done; 831 } 832 833 /* 834 * PRP2 is usually a list, but can point to data if all data to be 835 * transferred fits into PRP1 + PRP2: 836 */ 837 if (length <= NVME_CTRL_PAGE_SIZE) { 838 prp2_dma = iter->addr; 839 iod->total_len += length; 840 goto done; 841 } 842 843 if (DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE) <= 844 NVME_SMALL_POOL_SIZE / sizeof(__le64)) 845 iod->flags |= IOD_SMALL_DESCRIPTOR; 846 847 prp_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC, 848 &prp2_dma); 849 if (!prp_list) { 850 iter->status = BLK_STS_RESOURCE; 851 goto done; 852 } 853 iod->descriptors[iod->nr_descriptors++] = prp_list; 854 855 i = 0; 856 for (;;) { 857 prp_list[i++] = cpu_to_le64(iter->addr); 858 prp_len = min(length, NVME_CTRL_PAGE_SIZE); 859 if (WARN_ON_ONCE(iter->len < prp_len)) 860 goto bad_sgl; 861 862 iod->total_len += prp_len; 863 iter->addr += prp_len; 864 iter->len -= prp_len; 865 length -= prp_len; 866 if (!length) 867 break; 868 869 if (!nvme_pci_prp_iter_next(req, nvmeq->dev->dev, iter)) { 870 if (WARN_ON_ONCE(!iter->status)) 871 goto bad_sgl; 872 goto done; 873 } 874 875 /* 876 * If we've filled the entire descriptor, allocate a new that is 877 * pointed to be the last entry in the previous PRP list. To 878 * accommodate for that move the last actual entry to the new 879 * descriptor. 880 */ 881 if (i == NVME_CTRL_PAGE_SIZE >> 3) { 882 __le64 *old_prp_list = prp_list; 883 dma_addr_t prp_list_dma; 884 885 prp_list = dma_pool_alloc(nvmeq->descriptor_pools.large, 886 GFP_ATOMIC, &prp_list_dma); 887 if (!prp_list) { 888 iter->status = BLK_STS_RESOURCE; 889 goto done; 890 } 891 iod->descriptors[iod->nr_descriptors++] = prp_list; 892 893 prp_list[0] = old_prp_list[i - 1]; 894 old_prp_list[i - 1] = cpu_to_le64(prp_list_dma); 895 i = 1; 896 } 897 } 898 899 done: 900 /* 901 * nvme_unmap_data uses the DPT field in the SQE to tear down the 902 * mapping, so initialize it even for failures. 903 */ 904 iod->cmd.common.dptr.prp1 = cpu_to_le64(prp1_dma); 905 iod->cmd.common.dptr.prp2 = cpu_to_le64(prp2_dma); 906 if (unlikely(iter->status)) 907 nvme_unmap_data(req); 908 return iter->status; 909 910 bad_sgl: 911 dev_err_once(nvmeq->dev->dev, 912 "Incorrectly formed request for payload:%d nents:%d\n", 913 blk_rq_payload_bytes(req), blk_rq_nr_phys_segments(req)); 914 return BLK_STS_IOERR; 915 } 916 917 static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, 918 struct blk_dma_iter *iter) 919 { 920 sge->addr = cpu_to_le64(iter->addr); 921 sge->length = cpu_to_le32(iter->len); 922 sge->type = NVME_SGL_FMT_DATA_DESC << 4; 923 } 924 925 static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, 926 dma_addr_t dma_addr, int entries) 927 { 928 sge->addr = cpu_to_le64(dma_addr); 929 sge->length = cpu_to_le32(entries * sizeof(*sge)); 930 sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; 931 } 932 933 static blk_status_t nvme_pci_setup_data_sgl(struct request *req, 934 struct blk_dma_iter *iter) 935 { 936 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 937 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 938 unsigned int entries = blk_rq_nr_phys_segments(req); 939 struct nvme_sgl_desc *sg_list; 940 dma_addr_t sgl_dma; 941 unsigned int mapped = 0; 942 943 /* set the transfer type as SGL */ 944 iod->cmd.common.flags = NVME_CMD_SGL_METABUF; 945 946 if (entries == 1 || blk_rq_dma_map_coalesce(&iod->dma_state)) { 947 nvme_pci_sgl_set_data(&iod->cmd.common.dptr.sgl, iter); 948 iod->total_len += iter->len; 949 return BLK_STS_OK; 950 } 951 952 if (entries <= NVME_SMALL_POOL_SIZE / sizeof(*sg_list)) 953 iod->flags |= IOD_SMALL_DESCRIPTOR; 954 955 sg_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC, 956 &sgl_dma); 957 if (!sg_list) 958 return BLK_STS_RESOURCE; 959 iod->descriptors[iod->nr_descriptors++] = sg_list; 960 961 do { 962 if (WARN_ON_ONCE(mapped == entries)) { 963 iter->status = BLK_STS_IOERR; 964 break; 965 } 966 nvme_pci_sgl_set_data(&sg_list[mapped++], iter); 967 iod->total_len += iter->len; 968 } while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, &iod->dma_state, 969 iter)); 970 971 nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped); 972 if (unlikely(iter->status)) 973 nvme_unmap_data(req); 974 return iter->status; 975 } 976 977 static blk_status_t nvme_pci_setup_data_simple(struct request *req, 978 enum nvme_use_sgl use_sgl) 979 { 980 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 981 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 982 struct bio_vec bv = req_bvec(req); 983 unsigned int prp1_offset = bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1); 984 bool prp_possible = prp1_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2; 985 dma_addr_t dma_addr; 986 987 if (!use_sgl && !prp_possible) 988 return BLK_STS_AGAIN; 989 if (is_pci_p2pdma_page(bv.bv_page)) 990 return BLK_STS_AGAIN; 991 992 dma_addr = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0); 993 if (dma_mapping_error(nvmeq->dev->dev, dma_addr)) 994 return BLK_STS_RESOURCE; 995 iod->total_len = bv.bv_len; 996 iod->flags |= IOD_SINGLE_SEGMENT; 997 998 if (use_sgl == SGL_FORCED || !prp_possible) { 999 iod->cmd.common.flags = NVME_CMD_SGL_METABUF; 1000 iod->cmd.common.dptr.sgl.addr = cpu_to_le64(dma_addr); 1001 iod->cmd.common.dptr.sgl.length = cpu_to_le32(bv.bv_len); 1002 iod->cmd.common.dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; 1003 } else { 1004 unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - prp1_offset; 1005 1006 iod->cmd.common.dptr.prp1 = cpu_to_le64(dma_addr); 1007 iod->cmd.common.dptr.prp2 = 0; 1008 if (bv.bv_len > first_prp_len) 1009 iod->cmd.common.dptr.prp2 = 1010 cpu_to_le64(dma_addr + first_prp_len); 1011 } 1012 1013 return BLK_STS_OK; 1014 } 1015 1016 static blk_status_t nvme_map_data(struct request *req) 1017 { 1018 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1019 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1020 struct nvme_dev *dev = nvmeq->dev; 1021 enum nvme_use_sgl use_sgl = nvme_pci_use_sgls(dev, req); 1022 struct blk_dma_iter iter; 1023 blk_status_t ret; 1024 1025 /* 1026 * Try to skip the DMA iterator for single segment requests, as that 1027 * significantly improves performances for small I/O sizes. 1028 */ 1029 if (blk_rq_nr_phys_segments(req) == 1) { 1030 ret = nvme_pci_setup_data_simple(req, use_sgl); 1031 if (ret != BLK_STS_AGAIN) 1032 return ret; 1033 } 1034 1035 if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter)) 1036 return iter.status; 1037 1038 if (use_sgl == SGL_FORCED || 1039 (use_sgl == SGL_SUPPORTED && 1040 (sgl_threshold && nvme_pci_avg_seg_size(req) >= sgl_threshold))) 1041 return nvme_pci_setup_data_sgl(req, &iter); 1042 return nvme_pci_setup_data_prp(req, &iter); 1043 } 1044 1045 static blk_status_t nvme_pci_setup_meta_sgls(struct request *req) 1046 { 1047 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1048 unsigned int entries = req->nr_integrity_segments; 1049 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1050 struct nvme_dev *dev = nvmeq->dev; 1051 struct nvme_sgl_desc *sg_list; 1052 struct blk_dma_iter iter; 1053 dma_addr_t sgl_dma; 1054 int i = 0; 1055 1056 if (!blk_rq_integrity_dma_map_iter_start(req, dev->dev, 1057 &iod->meta_dma_state, &iter)) 1058 return iter.status; 1059 1060 if (blk_rq_dma_map_coalesce(&iod->meta_dma_state)) 1061 entries = 1; 1062 1063 /* 1064 * The NVMe MPTR descriptor has an implicit length that the host and 1065 * device must agree on to avoid data/memory corruption. We trust the 1066 * kernel allocated correctly based on the format's parameters, so use 1067 * the more efficient MPTR to avoid extra dma pool allocations for the 1068 * SGL indirection. 1069 * 1070 * But for user commands, we don't necessarily know what they do, so 1071 * the driver can't validate the metadata buffer size. The SGL 1072 * descriptor provides an explicit length, so we're relying on that 1073 * mechanism to catch any misunderstandings between the application and 1074 * device. 1075 */ 1076 if (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD)) { 1077 iod->cmd.common.metadata = cpu_to_le64(iter.addr); 1078 iod->meta_total_len = iter.len; 1079 iod->meta_dma = iter.addr; 1080 iod->meta_descriptor = NULL; 1081 return BLK_STS_OK; 1082 } 1083 1084 sg_list = dma_pool_alloc(nvmeq->descriptor_pools.small, GFP_ATOMIC, 1085 &sgl_dma); 1086 if (!sg_list) 1087 return BLK_STS_RESOURCE; 1088 1089 iod->meta_descriptor = sg_list; 1090 iod->meta_dma = sgl_dma; 1091 iod->cmd.common.flags = NVME_CMD_SGL_METASEG; 1092 iod->cmd.common.metadata = cpu_to_le64(sgl_dma); 1093 if (entries == 1) { 1094 iod->meta_total_len = iter.len; 1095 nvme_pci_sgl_set_data(sg_list, &iter); 1096 return BLK_STS_OK; 1097 } 1098 1099 sgl_dma += sizeof(*sg_list); 1100 do { 1101 nvme_pci_sgl_set_data(&sg_list[++i], &iter); 1102 iod->meta_total_len += iter.len; 1103 } while (blk_rq_integrity_dma_map_iter_next(req, dev->dev, &iter)); 1104 1105 nvme_pci_sgl_set_seg(sg_list, sgl_dma, i); 1106 if (unlikely(iter.status)) 1107 nvme_unmap_metadata(req); 1108 return iter.status; 1109 } 1110 1111 static blk_status_t nvme_pci_setup_meta_mptr(struct request *req) 1112 { 1113 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1114 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1115 struct bio_vec bv = rq_integrity_vec(req); 1116 1117 iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0); 1118 if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma)) 1119 return BLK_STS_IOERR; 1120 iod->cmd.common.metadata = cpu_to_le64(iod->meta_dma); 1121 iod->flags |= IOD_SINGLE_META_SEGMENT; 1122 return BLK_STS_OK; 1123 } 1124 1125 static blk_status_t nvme_map_metadata(struct request *req) 1126 { 1127 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1128 1129 if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) && 1130 nvme_pci_metadata_use_sgls(req)) 1131 return nvme_pci_setup_meta_sgls(req); 1132 return nvme_pci_setup_meta_mptr(req); 1133 } 1134 1135 static blk_status_t nvme_prep_rq(struct request *req) 1136 { 1137 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1138 blk_status_t ret; 1139 1140 iod->flags = 0; 1141 iod->nr_descriptors = 0; 1142 iod->total_len = 0; 1143 iod->meta_total_len = 0; 1144 1145 ret = nvme_setup_cmd(req->q->queuedata, req); 1146 if (ret) 1147 return ret; 1148 1149 if (blk_rq_nr_phys_segments(req)) { 1150 ret = nvme_map_data(req); 1151 if (ret) 1152 goto out_free_cmd; 1153 } 1154 1155 if (blk_integrity_rq(req)) { 1156 ret = nvme_map_metadata(req); 1157 if (ret) 1158 goto out_unmap_data; 1159 } 1160 1161 nvme_start_request(req); 1162 return BLK_STS_OK; 1163 out_unmap_data: 1164 if (blk_rq_nr_phys_segments(req)) 1165 nvme_unmap_data(req); 1166 out_free_cmd: 1167 nvme_cleanup_cmd(req); 1168 return ret; 1169 } 1170 1171 static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 1172 const struct blk_mq_queue_data *bd) 1173 { 1174 struct nvme_queue *nvmeq = hctx->driver_data; 1175 struct nvme_dev *dev = nvmeq->dev; 1176 struct request *req = bd->rq; 1177 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1178 blk_status_t ret; 1179 1180 /* 1181 * We should not need to do this, but we're still using this to 1182 * ensure we can drain requests on a dying queue. 1183 */ 1184 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) 1185 return BLK_STS_IOERR; 1186 1187 if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) 1188 return nvme_fail_nonready_command(&dev->ctrl, req); 1189 1190 ret = nvme_prep_rq(req); 1191 if (unlikely(ret)) 1192 return ret; 1193 spin_lock(&nvmeq->sq_lock); 1194 nvme_sq_copy_cmd(nvmeq, &iod->cmd); 1195 nvme_write_sq_db(nvmeq, bd->last); 1196 spin_unlock(&nvmeq->sq_lock); 1197 return BLK_STS_OK; 1198 } 1199 1200 static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct rq_list *rqlist) 1201 { 1202 struct request *req; 1203 1204 if (rq_list_empty(rqlist)) 1205 return; 1206 1207 spin_lock(&nvmeq->sq_lock); 1208 while ((req = rq_list_pop(rqlist))) { 1209 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1210 1211 nvme_sq_copy_cmd(nvmeq, &iod->cmd); 1212 } 1213 nvme_write_sq_db(nvmeq, true); 1214 spin_unlock(&nvmeq->sq_lock); 1215 } 1216 1217 static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req) 1218 { 1219 /* 1220 * We should not need to do this, but we're still using this to 1221 * ensure we can drain requests on a dying queue. 1222 */ 1223 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) 1224 return false; 1225 if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) 1226 return false; 1227 1228 return nvme_prep_rq(req) == BLK_STS_OK; 1229 } 1230 1231 static void nvme_queue_rqs(struct rq_list *rqlist) 1232 { 1233 struct rq_list submit_list = { }; 1234 struct rq_list requeue_list = { }; 1235 struct nvme_queue *nvmeq = NULL; 1236 struct request *req; 1237 1238 while ((req = rq_list_pop(rqlist))) { 1239 if (nvmeq && nvmeq != req->mq_hctx->driver_data) 1240 nvme_submit_cmds(nvmeq, &submit_list); 1241 nvmeq = req->mq_hctx->driver_data; 1242 1243 if (nvme_prep_rq_batch(nvmeq, req)) 1244 rq_list_add_tail(&submit_list, req); 1245 else 1246 rq_list_add_tail(&requeue_list, req); 1247 } 1248 1249 if (nvmeq) 1250 nvme_submit_cmds(nvmeq, &submit_list); 1251 *rqlist = requeue_list; 1252 } 1253 1254 static __always_inline void nvme_pci_unmap_rq(struct request *req) 1255 { 1256 if (blk_integrity_rq(req)) 1257 nvme_unmap_metadata(req); 1258 if (blk_rq_nr_phys_segments(req)) 1259 nvme_unmap_data(req); 1260 } 1261 1262 static void nvme_pci_complete_rq(struct request *req) 1263 { 1264 nvme_pci_unmap_rq(req); 1265 nvme_complete_rq(req); 1266 } 1267 1268 static void nvme_pci_complete_batch(struct io_comp_batch *iob) 1269 { 1270 nvme_complete_batch(iob, nvme_pci_unmap_rq); 1271 } 1272 1273 /* We read the CQE phase first to check if the rest of the entry is valid */ 1274 static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) 1275 { 1276 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; 1277 1278 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; 1279 } 1280 1281 static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) 1282 { 1283 u16 head = nvmeq->cq_head; 1284 1285 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, 1286 nvmeq->dbbuf_cq_ei)) 1287 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 1288 } 1289 1290 static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) 1291 { 1292 if (!nvmeq->qid) 1293 return nvmeq->dev->admin_tagset.tags[0]; 1294 return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; 1295 } 1296 1297 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, 1298 struct io_comp_batch *iob, u16 idx) 1299 { 1300 struct nvme_completion *cqe = &nvmeq->cqes[idx]; 1301 __u16 command_id = READ_ONCE(cqe->command_id); 1302 struct request *req; 1303 1304 /* 1305 * AEN requests are special as they don't time out and can 1306 * survive any kind of queue freeze and often don't respond to 1307 * aborts. We don't even bother to allocate a struct request 1308 * for them but rather special case them here. 1309 */ 1310 if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { 1311 nvme_complete_async_event(&nvmeq->dev->ctrl, 1312 cqe->status, &cqe->result); 1313 return; 1314 } 1315 1316 req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id); 1317 if (unlikely(!req)) { 1318 dev_warn(nvmeq->dev->ctrl.device, 1319 "invalid id %d completed on queue %d\n", 1320 command_id, le16_to_cpu(cqe->sq_id)); 1321 return; 1322 } 1323 1324 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); 1325 if (!nvme_try_complete_req(req, cqe->status, cqe->result) && 1326 !blk_mq_add_to_batch(req, iob, 1327 nvme_req(req)->status != NVME_SC_SUCCESS, 1328 nvme_pci_complete_batch)) 1329 nvme_pci_complete_rq(req); 1330 } 1331 1332 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) 1333 { 1334 u32 tmp = nvmeq->cq_head + 1; 1335 1336 if (tmp == nvmeq->q_depth) { 1337 nvmeq->cq_head = 0; 1338 nvmeq->cq_phase ^= 1; 1339 } else { 1340 nvmeq->cq_head = tmp; 1341 } 1342 } 1343 1344 static inline bool nvme_poll_cq(struct nvme_queue *nvmeq, 1345 struct io_comp_batch *iob) 1346 { 1347 bool found = false; 1348 1349 while (nvme_cqe_pending(nvmeq)) { 1350 found = true; 1351 /* 1352 * load-load control dependency between phase and the rest of 1353 * the cqe requires a full read memory barrier 1354 */ 1355 dma_rmb(); 1356 nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); 1357 nvme_update_cq_head(nvmeq); 1358 } 1359 1360 if (found) 1361 nvme_ring_cq_doorbell(nvmeq); 1362 return found; 1363 } 1364 1365 static irqreturn_t nvme_irq(int irq, void *data) 1366 { 1367 struct nvme_queue *nvmeq = data; 1368 DEFINE_IO_COMP_BATCH(iob); 1369 1370 if (nvme_poll_cq(nvmeq, &iob)) { 1371 if (!rq_list_empty(&iob.req_list)) 1372 nvme_pci_complete_batch(&iob); 1373 return IRQ_HANDLED; 1374 } 1375 return IRQ_NONE; 1376 } 1377 1378 static irqreturn_t nvme_irq_check(int irq, void *data) 1379 { 1380 struct nvme_queue *nvmeq = data; 1381 1382 if (nvme_cqe_pending(nvmeq)) 1383 return IRQ_WAKE_THREAD; 1384 return IRQ_NONE; 1385 } 1386 1387 /* 1388 * Poll for completions for any interrupt driven queue 1389 * Can be called from any context. 1390 */ 1391 static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) 1392 { 1393 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 1394 1395 WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); 1396 1397 disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1398 spin_lock(&nvmeq->cq_poll_lock); 1399 nvme_poll_cq(nvmeq, NULL); 1400 spin_unlock(&nvmeq->cq_poll_lock); 1401 enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1402 } 1403 1404 static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) 1405 { 1406 struct nvme_queue *nvmeq = hctx->driver_data; 1407 bool found; 1408 1409 if (!nvme_cqe_pending(nvmeq)) 1410 return 0; 1411 1412 spin_lock(&nvmeq->cq_poll_lock); 1413 found = nvme_poll_cq(nvmeq, iob); 1414 spin_unlock(&nvmeq->cq_poll_lock); 1415 1416 return found; 1417 } 1418 1419 static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) 1420 { 1421 struct nvme_dev *dev = to_nvme_dev(ctrl); 1422 struct nvme_queue *nvmeq = &dev->queues[0]; 1423 struct nvme_command c = { }; 1424 1425 c.common.opcode = nvme_admin_async_event; 1426 c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; 1427 1428 spin_lock(&nvmeq->sq_lock); 1429 nvme_sq_copy_cmd(nvmeq, &c); 1430 nvme_write_sq_db(nvmeq, true); 1431 spin_unlock(&nvmeq->sq_lock); 1432 } 1433 1434 static int nvme_pci_subsystem_reset(struct nvme_ctrl *ctrl) 1435 { 1436 struct nvme_dev *dev = to_nvme_dev(ctrl); 1437 int ret = 0; 1438 1439 /* 1440 * Taking the shutdown_lock ensures the BAR mapping is not being 1441 * altered by reset_work. Holding this lock before the RESETTING state 1442 * change, if successful, also ensures nvme_remove won't be able to 1443 * proceed to iounmap until we're done. 1444 */ 1445 mutex_lock(&dev->shutdown_lock); 1446 if (!dev->bar_mapped_size) { 1447 ret = -ENODEV; 1448 goto unlock; 1449 } 1450 1451 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { 1452 ret = -EBUSY; 1453 goto unlock; 1454 } 1455 1456 writel(NVME_SUBSYS_RESET, dev->bar + NVME_REG_NSSR); 1457 nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE); 1458 1459 /* 1460 * Read controller status to flush the previous write and trigger a 1461 * pcie read error. 1462 */ 1463 readl(dev->bar + NVME_REG_CSTS); 1464 unlock: 1465 mutex_unlock(&dev->shutdown_lock); 1466 return ret; 1467 } 1468 1469 static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 1470 { 1471 struct nvme_command c = { }; 1472 1473 c.delete_queue.opcode = opcode; 1474 c.delete_queue.qid = cpu_to_le16(id); 1475 1476 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 1477 } 1478 1479 static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 1480 struct nvme_queue *nvmeq, s16 vector) 1481 { 1482 struct nvme_command c = { }; 1483 int flags = NVME_QUEUE_PHYS_CONTIG; 1484 1485 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) 1486 flags |= NVME_CQ_IRQ_ENABLED; 1487 1488 /* 1489 * Note: we (ab)use the fact that the prp fields survive if no data 1490 * is attached to the request. 1491 */ 1492 c.create_cq.opcode = nvme_admin_create_cq; 1493 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 1494 c.create_cq.cqid = cpu_to_le16(qid); 1495 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 1496 c.create_cq.cq_flags = cpu_to_le16(flags); 1497 c.create_cq.irq_vector = cpu_to_le16(vector); 1498 1499 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 1500 } 1501 1502 static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 1503 struct nvme_queue *nvmeq) 1504 { 1505 struct nvme_ctrl *ctrl = &dev->ctrl; 1506 struct nvme_command c = { }; 1507 int flags = NVME_QUEUE_PHYS_CONTIG; 1508 1509 /* 1510 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't 1511 * set. Since URGENT priority is zeroes, it makes all queues 1512 * URGENT. 1513 */ 1514 if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) 1515 flags |= NVME_SQ_PRIO_MEDIUM; 1516 1517 /* 1518 * Note: we (ab)use the fact that the prp fields survive if no data 1519 * is attached to the request. 1520 */ 1521 c.create_sq.opcode = nvme_admin_create_sq; 1522 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 1523 c.create_sq.sqid = cpu_to_le16(qid); 1524 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 1525 c.create_sq.sq_flags = cpu_to_le16(flags); 1526 c.create_sq.cqid = cpu_to_le16(qid); 1527 1528 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 1529 } 1530 1531 static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 1532 { 1533 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 1534 } 1535 1536 static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 1537 { 1538 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 1539 } 1540 1541 static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error) 1542 { 1543 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1544 1545 dev_warn(nvmeq->dev->ctrl.device, 1546 "Abort status: 0x%x", nvme_req(req)->status); 1547 atomic_inc(&nvmeq->dev->ctrl.abort_limit); 1548 blk_mq_free_request(req); 1549 return RQ_END_IO_NONE; 1550 } 1551 1552 static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) 1553 { 1554 /* If true, indicates loss of adapter communication, possibly by a 1555 * NVMe Subsystem reset. 1556 */ 1557 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 1558 1559 /* If there is a reset/reinit ongoing, we shouldn't reset again. */ 1560 switch (nvme_ctrl_state(&dev->ctrl)) { 1561 case NVME_CTRL_RESETTING: 1562 case NVME_CTRL_CONNECTING: 1563 return false; 1564 default: 1565 break; 1566 } 1567 1568 /* We shouldn't reset unless the controller is on fatal error state 1569 * _or_ if we lost the communication with it. 1570 */ 1571 if (!(csts & NVME_CSTS_CFS) && !nssro) 1572 return false; 1573 1574 return true; 1575 } 1576 1577 static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) 1578 { 1579 /* Read a config register to help see what died. */ 1580 u16 pci_status; 1581 int result; 1582 1583 result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, 1584 &pci_status); 1585 if (result == PCIBIOS_SUCCESSFUL) 1586 dev_warn(dev->ctrl.device, 1587 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", 1588 csts, pci_status); 1589 else 1590 dev_warn(dev->ctrl.device, 1591 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", 1592 csts, result); 1593 1594 if (csts != ~0) 1595 return; 1596 1597 dev_warn(dev->ctrl.device, 1598 "Does your device have a faulty power saving mode enabled?\n"); 1599 dev_warn(dev->ctrl.device, 1600 "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off pcie_port_pm=off\" and report a bug\n"); 1601 } 1602 1603 static enum blk_eh_timer_return nvme_timeout(struct request *req) 1604 { 1605 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1606 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1607 struct nvme_dev *dev = nvmeq->dev; 1608 struct request *abort_req; 1609 struct nvme_command cmd = { }; 1610 struct pci_dev *pdev = to_pci_dev(dev->dev); 1611 u32 csts = readl(dev->bar + NVME_REG_CSTS); 1612 u8 opcode; 1613 1614 /* 1615 * Shutdown the device immediately if we see it is disconnected. This 1616 * unblocks PCIe error handling if the nvme driver is waiting in 1617 * error_resume for a device that has been removed. We can't unbind the 1618 * driver while the driver's error callback is waiting to complete, so 1619 * we're relying on a timeout to break that deadlock if a removal 1620 * occurs while reset work is running. 1621 */ 1622 if (pci_dev_is_disconnected(pdev)) 1623 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 1624 if (nvme_state_terminal(&dev->ctrl)) 1625 goto disable; 1626 1627 /* If PCI error recovery process is happening, we cannot reset or 1628 * the recovery mechanism will surely fail. 1629 */ 1630 mb(); 1631 if (pci_channel_offline(pdev)) 1632 return BLK_EH_RESET_TIMER; 1633 1634 /* 1635 * Reset immediately if the controller is failed 1636 */ 1637 if (nvme_should_reset(dev, csts)) { 1638 nvme_warn_reset(dev, csts); 1639 goto disable; 1640 } 1641 1642 /* 1643 * Did we miss an interrupt? 1644 */ 1645 if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) 1646 nvme_poll(req->mq_hctx, NULL); 1647 else 1648 nvme_poll_irqdisable(nvmeq); 1649 1650 if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) { 1651 dev_warn(dev->ctrl.device, 1652 "I/O tag %d (%04x) QID %d timeout, completion polled\n", 1653 req->tag, nvme_cid(req), nvmeq->qid); 1654 return BLK_EH_DONE; 1655 } 1656 1657 /* 1658 * Shutdown immediately if controller times out while starting. The 1659 * reset work will see the pci device disabled when it gets the forced 1660 * cancellation error. All outstanding requests are completed on 1661 * shutdown, so we return BLK_EH_DONE. 1662 */ 1663 switch (nvme_ctrl_state(&dev->ctrl)) { 1664 case NVME_CTRL_CONNECTING: 1665 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 1666 fallthrough; 1667 case NVME_CTRL_DELETING: 1668 dev_warn_ratelimited(dev->ctrl.device, 1669 "I/O tag %d (%04x) QID %d timeout, disable controller\n", 1670 req->tag, nvme_cid(req), nvmeq->qid); 1671 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1672 nvme_dev_disable(dev, true); 1673 return BLK_EH_DONE; 1674 case NVME_CTRL_RESETTING: 1675 return BLK_EH_RESET_TIMER; 1676 default: 1677 break; 1678 } 1679 1680 /* 1681 * Shutdown the controller immediately and schedule a reset if the 1682 * command was already aborted once before and still hasn't been 1683 * returned to the driver, or if this is the admin queue. 1684 */ 1685 opcode = nvme_req(req)->cmd->common.opcode; 1686 if (!nvmeq->qid || (iod->flags & IOD_ABORTED)) { 1687 dev_warn(dev->ctrl.device, 1688 "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n", 1689 req->tag, nvme_cid(req), opcode, 1690 nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid); 1691 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1692 goto disable; 1693 } 1694 1695 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { 1696 atomic_inc(&dev->ctrl.abort_limit); 1697 return BLK_EH_RESET_TIMER; 1698 } 1699 iod->flags |= IOD_ABORTED; 1700 1701 cmd.abort.opcode = nvme_admin_abort_cmd; 1702 cmd.abort.cid = nvme_cid(req); 1703 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 1704 1705 dev_warn(nvmeq->dev->ctrl.device, 1706 "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, aborting req_op:%s(%u) size:%u\n", 1707 req->tag, nvme_cid(req), opcode, nvme_get_opcode_str(opcode), 1708 nvmeq->qid, blk_op_str(req_op(req)), req_op(req), 1709 blk_rq_bytes(req)); 1710 1711 abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd), 1712 BLK_MQ_REQ_NOWAIT); 1713 if (IS_ERR(abort_req)) { 1714 atomic_inc(&dev->ctrl.abort_limit); 1715 return BLK_EH_RESET_TIMER; 1716 } 1717 nvme_init_request(abort_req, &cmd); 1718 1719 abort_req->end_io = abort_endio; 1720 abort_req->end_io_data = NULL; 1721 blk_execute_rq_nowait(abort_req, false); 1722 1723 /* 1724 * The aborted req will be completed on receiving the abort req. 1725 * We enable the timer again. If hit twice, it'll cause a device reset, 1726 * as the device then is in a faulty state. 1727 */ 1728 return BLK_EH_RESET_TIMER; 1729 1730 disable: 1731 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) { 1732 if (nvme_state_terminal(&dev->ctrl)) 1733 nvme_dev_disable(dev, true); 1734 return BLK_EH_DONE; 1735 } 1736 1737 nvme_dev_disable(dev, false); 1738 if (nvme_try_sched_reset(&dev->ctrl)) 1739 nvme_unquiesce_io_queues(&dev->ctrl); 1740 return BLK_EH_DONE; 1741 } 1742 1743 static void nvme_free_queue(struct nvme_queue *nvmeq) 1744 { 1745 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), 1746 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 1747 if (!nvmeq->sq_cmds) 1748 return; 1749 1750 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { 1751 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), 1752 nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 1753 } else { 1754 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), 1755 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 1756 } 1757 } 1758 1759 static void nvme_free_queues(struct nvme_dev *dev, int lowest) 1760 { 1761 int i; 1762 1763 for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { 1764 dev->ctrl.queue_count--; 1765 nvme_free_queue(&dev->queues[i]); 1766 } 1767 } 1768 1769 static void nvme_suspend_queue(struct nvme_dev *dev, unsigned int qid) 1770 { 1771 struct nvme_queue *nvmeq = &dev->queues[qid]; 1772 1773 if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) 1774 return; 1775 1776 /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */ 1777 mb(); 1778 1779 nvmeq->dev->online_queues--; 1780 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) 1781 nvme_quiesce_admin_queue(&nvmeq->dev->ctrl); 1782 if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) 1783 pci_free_irq(to_pci_dev(dev->dev), nvmeq->cq_vector, nvmeq); 1784 } 1785 1786 static void nvme_suspend_io_queues(struct nvme_dev *dev) 1787 { 1788 int i; 1789 1790 for (i = dev->ctrl.queue_count - 1; i > 0; i--) 1791 nvme_suspend_queue(dev, i); 1792 } 1793 1794 /* 1795 * Called only on a device that has been disabled and after all other threads 1796 * that can check this device's completion queues have synced, except 1797 * nvme_poll(). This is the last chance for the driver to see a natural 1798 * completion before nvme_cancel_request() terminates all incomplete requests. 1799 */ 1800 static void nvme_reap_pending_cqes(struct nvme_dev *dev) 1801 { 1802 int i; 1803 1804 for (i = dev->ctrl.queue_count - 1; i > 0; i--) { 1805 spin_lock(&dev->queues[i].cq_poll_lock); 1806 nvme_poll_cq(&dev->queues[i], NULL); 1807 spin_unlock(&dev->queues[i].cq_poll_lock); 1808 } 1809 } 1810 1811 static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, 1812 int entry_size) 1813 { 1814 int q_depth = dev->q_depth; 1815 unsigned q_size_aligned = roundup(q_depth * entry_size, 1816 NVME_CTRL_PAGE_SIZE); 1817 1818 if (q_size_aligned * nr_io_queues > dev->cmb_size) { 1819 u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); 1820 1821 mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE); 1822 q_depth = div_u64(mem_per_q, entry_size); 1823 1824 /* 1825 * Ensure the reduced q_depth is above some threshold where it 1826 * would be better to map queues in system memory with the 1827 * original depth 1828 */ 1829 if (q_depth < 64) 1830 return -ENOMEM; 1831 } 1832 1833 return q_depth; 1834 } 1835 1836 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 1837 int qid) 1838 { 1839 struct pci_dev *pdev = to_pci_dev(dev->dev); 1840 1841 if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 1842 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); 1843 if (nvmeq->sq_cmds) { 1844 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, 1845 nvmeq->sq_cmds); 1846 if (nvmeq->sq_dma_addr) { 1847 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); 1848 return 0; 1849 } 1850 1851 pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 1852 } 1853 } 1854 1855 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), 1856 &nvmeq->sq_dma_addr, GFP_KERNEL); 1857 if (!nvmeq->sq_cmds) 1858 return -ENOMEM; 1859 return 0; 1860 } 1861 1862 static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) 1863 { 1864 struct nvme_queue *nvmeq = &dev->queues[qid]; 1865 1866 if (dev->ctrl.queue_count > qid) 1867 return 0; 1868 1869 nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; 1870 nvmeq->q_depth = depth; 1871 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), 1872 &nvmeq->cq_dma_addr, GFP_KERNEL); 1873 if (!nvmeq->cqes) 1874 goto free_nvmeq; 1875 1876 if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) 1877 goto free_cqdma; 1878 1879 nvmeq->dev = dev; 1880 spin_lock_init(&nvmeq->sq_lock); 1881 spin_lock_init(&nvmeq->cq_poll_lock); 1882 nvmeq->cq_head = 0; 1883 nvmeq->cq_phase = 1; 1884 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1885 nvmeq->qid = qid; 1886 dev->ctrl.queue_count++; 1887 1888 return 0; 1889 1890 free_cqdma: 1891 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, 1892 nvmeq->cq_dma_addr); 1893 free_nvmeq: 1894 return -ENOMEM; 1895 } 1896 1897 static int queue_request_irq(struct nvme_queue *nvmeq) 1898 { 1899 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 1900 int nr = nvmeq->dev->ctrl.instance; 1901 1902 if (use_threaded_interrupts) { 1903 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, 1904 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 1905 } else { 1906 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, 1907 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 1908 } 1909 } 1910 1911 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 1912 { 1913 struct nvme_dev *dev = nvmeq->dev; 1914 1915 nvmeq->sq_tail = 0; 1916 nvmeq->last_sq_tail = 0; 1917 nvmeq->cq_head = 0; 1918 nvmeq->cq_phase = 1; 1919 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1920 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); 1921 nvme_dbbuf_init(dev, nvmeq, qid); 1922 dev->online_queues++; 1923 wmb(); /* ensure the first interrupt sees the initialization */ 1924 } 1925 1926 /* 1927 * Try getting shutdown_lock while setting up IO queues. 1928 */ 1929 static int nvme_setup_io_queues_trylock(struct nvme_dev *dev) 1930 { 1931 /* 1932 * Give up if the lock is being held by nvme_dev_disable. 1933 */ 1934 if (!mutex_trylock(&dev->shutdown_lock)) 1935 return -ENODEV; 1936 1937 /* 1938 * Controller is in wrong state, fail early. 1939 */ 1940 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_CONNECTING) { 1941 mutex_unlock(&dev->shutdown_lock); 1942 return -ENODEV; 1943 } 1944 1945 return 0; 1946 } 1947 1948 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) 1949 { 1950 struct nvme_dev *dev = nvmeq->dev; 1951 int result; 1952 u16 vector = 0; 1953 1954 clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 1955 1956 /* 1957 * A queue's vector matches the queue identifier unless the controller 1958 * has only one vector available. 1959 */ 1960 if (!polled) 1961 vector = dev->num_vecs == 1 ? 0 : qid; 1962 else 1963 set_bit(NVMEQ_POLLED, &nvmeq->flags); 1964 1965 result = adapter_alloc_cq(dev, qid, nvmeq, vector); 1966 if (result) 1967 return result; 1968 1969 result = adapter_alloc_sq(dev, qid, nvmeq); 1970 if (result < 0) 1971 return result; 1972 if (result) 1973 goto release_cq; 1974 1975 nvmeq->cq_vector = vector; 1976 1977 result = nvme_setup_io_queues_trylock(dev); 1978 if (result) 1979 return result; 1980 nvme_init_queue(nvmeq, qid); 1981 if (!polled) { 1982 result = queue_request_irq(nvmeq); 1983 if (result < 0) 1984 goto release_sq; 1985 } 1986 1987 set_bit(NVMEQ_ENABLED, &nvmeq->flags); 1988 mutex_unlock(&dev->shutdown_lock); 1989 return result; 1990 1991 release_sq: 1992 dev->online_queues--; 1993 mutex_unlock(&dev->shutdown_lock); 1994 adapter_delete_sq(dev, qid); 1995 release_cq: 1996 adapter_delete_cq(dev, qid); 1997 return result; 1998 } 1999 2000 static const struct blk_mq_ops nvme_mq_admin_ops = { 2001 .queue_rq = nvme_queue_rq, 2002 .complete = nvme_pci_complete_rq, 2003 .init_hctx = nvme_admin_init_hctx, 2004 .init_request = nvme_pci_init_request, 2005 .timeout = nvme_timeout, 2006 }; 2007 2008 static const struct blk_mq_ops nvme_mq_ops = { 2009 .queue_rq = nvme_queue_rq, 2010 .queue_rqs = nvme_queue_rqs, 2011 .complete = nvme_pci_complete_rq, 2012 .commit_rqs = nvme_commit_rqs, 2013 .init_hctx = nvme_init_hctx, 2014 .init_request = nvme_pci_init_request, 2015 .map_queues = nvme_pci_map_queues, 2016 .timeout = nvme_timeout, 2017 .poll = nvme_poll, 2018 }; 2019 2020 static void nvme_dev_remove_admin(struct nvme_dev *dev) 2021 { 2022 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 2023 /* 2024 * If the controller was reset during removal, it's possible 2025 * user requests may be waiting on a stopped queue. Start the 2026 * queue to flush these to completion. 2027 */ 2028 nvme_unquiesce_admin_queue(&dev->ctrl); 2029 nvme_remove_admin_tag_set(&dev->ctrl); 2030 } 2031 } 2032 2033 static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) 2034 { 2035 return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); 2036 } 2037 2038 static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) 2039 { 2040 struct pci_dev *pdev = to_pci_dev(dev->dev); 2041 2042 if (size <= dev->bar_mapped_size) 2043 return 0; 2044 if (size > pci_resource_len(pdev, 0)) 2045 return -ENOMEM; 2046 if (dev->bar) 2047 iounmap(dev->bar); 2048 dev->bar = ioremap(pci_resource_start(pdev, 0), size); 2049 if (!dev->bar) { 2050 dev->bar_mapped_size = 0; 2051 return -ENOMEM; 2052 } 2053 dev->bar_mapped_size = size; 2054 dev->dbs = dev->bar + NVME_REG_DBS; 2055 2056 return 0; 2057 } 2058 2059 static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) 2060 { 2061 int result; 2062 u32 aqa; 2063 struct nvme_queue *nvmeq; 2064 2065 result = nvme_remap_bar(dev, db_bar_size(dev, 0)); 2066 if (result < 0) 2067 return result; 2068 2069 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? 2070 NVME_CAP_NSSRC(dev->ctrl.cap) : 0; 2071 2072 if (dev->subsystem && 2073 (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) 2074 writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); 2075 2076 /* 2077 * If the device has been passed off to us in an enabled state, just 2078 * clear the enabled bit. The spec says we should set the 'shutdown 2079 * notification bits', but doing so may cause the device to complete 2080 * commands to the admin queue ... and we don't know what memory that 2081 * might be pointing at! 2082 */ 2083 result = nvme_disable_ctrl(&dev->ctrl, false); 2084 if (result < 0) { 2085 struct pci_dev *pdev = to_pci_dev(dev->dev); 2086 2087 /* 2088 * The NVMe Controller Reset method did not get an expected 2089 * CSTS.RDY transition, so something with the device appears to 2090 * be stuck. Use the lower level and bigger hammer PCIe 2091 * Function Level Reset to attempt restoring the device to its 2092 * initial state, and try again. 2093 */ 2094 result = pcie_reset_flr(pdev, false); 2095 if (result < 0) 2096 return result; 2097 2098 pci_restore_state(pdev); 2099 result = nvme_disable_ctrl(&dev->ctrl, false); 2100 if (result < 0) 2101 return result; 2102 2103 dev_info(dev->ctrl.device, 2104 "controller reset completed after pcie flr\n"); 2105 } 2106 2107 result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); 2108 if (result) 2109 return result; 2110 2111 dev->ctrl.numa_node = dev_to_node(dev->dev); 2112 2113 nvmeq = &dev->queues[0]; 2114 aqa = nvmeq->q_depth - 1; 2115 aqa |= aqa << 16; 2116 2117 writel(aqa, dev->bar + NVME_REG_AQA); 2118 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); 2119 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); 2120 2121 result = nvme_enable_ctrl(&dev->ctrl); 2122 if (result) 2123 return result; 2124 2125 nvmeq->cq_vector = 0; 2126 nvme_init_queue(nvmeq, 0); 2127 result = queue_request_irq(nvmeq); 2128 if (result) { 2129 dev->online_queues--; 2130 return result; 2131 } 2132 2133 set_bit(NVMEQ_ENABLED, &nvmeq->flags); 2134 return result; 2135 } 2136 2137 static int nvme_create_io_queues(struct nvme_dev *dev) 2138 { 2139 unsigned i, max, rw_queues; 2140 int ret = 0; 2141 2142 for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { 2143 if (nvme_alloc_queue(dev, i, dev->q_depth)) { 2144 ret = -ENOMEM; 2145 break; 2146 } 2147 } 2148 2149 max = min(dev->max_qid, dev->ctrl.queue_count - 1); 2150 if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { 2151 rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + 2152 dev->io_queues[HCTX_TYPE_READ]; 2153 } else { 2154 rw_queues = max; 2155 } 2156 2157 for (i = dev->online_queues; i <= max; i++) { 2158 bool polled = i > rw_queues; 2159 2160 ret = nvme_create_queue(&dev->queues[i], i, polled); 2161 if (ret) 2162 break; 2163 } 2164 2165 /* 2166 * Ignore failing Create SQ/CQ commands, we can continue with less 2167 * than the desired amount of queues, and even a controller without 2168 * I/O queues can still be used to issue admin commands. This might 2169 * be useful to upgrade a buggy firmware for example. 2170 */ 2171 return ret >= 0 ? 0 : ret; 2172 } 2173 2174 static u64 nvme_cmb_size_unit(struct nvme_dev *dev) 2175 { 2176 u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; 2177 2178 return 1ULL << (12 + 4 * szu); 2179 } 2180 2181 static u32 nvme_cmb_size(struct nvme_dev *dev) 2182 { 2183 return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; 2184 } 2185 2186 static void nvme_map_cmb(struct nvme_dev *dev) 2187 { 2188 u64 size, offset; 2189 resource_size_t bar_size; 2190 struct pci_dev *pdev = to_pci_dev(dev->dev); 2191 int bar; 2192 2193 if (dev->cmb_size) 2194 return; 2195 2196 if (NVME_CAP_CMBS(dev->ctrl.cap)) 2197 writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); 2198 2199 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 2200 if (!dev->cmbsz) 2201 return; 2202 dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); 2203 2204 size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); 2205 offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); 2206 bar = NVME_CMB_BIR(dev->cmbloc); 2207 bar_size = pci_resource_len(pdev, bar); 2208 2209 if (offset > bar_size) 2210 return; 2211 2212 /* 2213 * Controllers may support a CMB size larger than their BAR, for 2214 * example, due to being behind a bridge. Reduce the CMB to the 2215 * reported size of the BAR 2216 */ 2217 size = min(size, bar_size - offset); 2218 2219 if (!IS_ALIGNED(size, memremap_compat_align()) || 2220 !IS_ALIGNED(pci_resource_start(pdev, bar), 2221 memremap_compat_align())) 2222 return; 2223 2224 /* 2225 * Tell the controller about the host side address mapping the CMB, 2226 * and enable CMB decoding for the NVMe 1.4+ scheme: 2227 */ 2228 if (NVME_CAP_CMBS(dev->ctrl.cap)) { 2229 hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE | 2230 (pci_bus_address(pdev, bar) + offset), 2231 dev->bar + NVME_REG_CMBMSC); 2232 } 2233 2234 if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { 2235 dev_warn(dev->ctrl.device, 2236 "failed to register the CMB\n"); 2237 hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC); 2238 return; 2239 } 2240 2241 dev->cmb_size = size; 2242 dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); 2243 2244 if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == 2245 (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) 2246 pci_p2pmem_publish(pdev, true); 2247 } 2248 2249 static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) 2250 { 2251 u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; 2252 u64 dma_addr = dev->host_mem_descs_dma; 2253 struct nvme_command c = { }; 2254 int ret; 2255 2256 c.features.opcode = nvme_admin_set_features; 2257 c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); 2258 c.features.dword11 = cpu_to_le32(bits); 2259 c.features.dword12 = cpu_to_le32(host_mem_size); 2260 c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); 2261 c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); 2262 c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); 2263 2264 ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 2265 if (ret) { 2266 dev_warn(dev->ctrl.device, 2267 "failed to set host mem (err %d, flags %#x).\n", 2268 ret, bits); 2269 } else 2270 dev->hmb = bits & NVME_HOST_MEM_ENABLE; 2271 2272 return ret; 2273 } 2274 2275 static void nvme_free_host_mem_multi(struct nvme_dev *dev) 2276 { 2277 int i; 2278 2279 for (i = 0; i < dev->nr_host_mem_descs; i++) { 2280 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; 2281 size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE; 2282 2283 dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], 2284 le64_to_cpu(desc->addr), 2285 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 2286 } 2287 2288 kfree(dev->host_mem_desc_bufs); 2289 dev->host_mem_desc_bufs = NULL; 2290 } 2291 2292 static void nvme_free_host_mem(struct nvme_dev *dev) 2293 { 2294 if (dev->hmb_sgt) 2295 dma_free_noncontiguous(dev->dev, dev->host_mem_size, 2296 dev->hmb_sgt, DMA_BIDIRECTIONAL); 2297 else 2298 nvme_free_host_mem_multi(dev); 2299 2300 dma_free_coherent(dev->dev, dev->host_mem_descs_size, 2301 dev->host_mem_descs, dev->host_mem_descs_dma); 2302 dev->host_mem_descs = NULL; 2303 dev->host_mem_descs_size = 0; 2304 dev->nr_host_mem_descs = 0; 2305 } 2306 2307 static int nvme_alloc_host_mem_single(struct nvme_dev *dev, u64 size) 2308 { 2309 dev->hmb_sgt = dma_alloc_noncontiguous(dev->dev, size, 2310 DMA_BIDIRECTIONAL, GFP_KERNEL, 0); 2311 if (!dev->hmb_sgt) 2312 return -ENOMEM; 2313 2314 dev->host_mem_descs = dma_alloc_coherent(dev->dev, 2315 sizeof(*dev->host_mem_descs), &dev->host_mem_descs_dma, 2316 GFP_KERNEL); 2317 if (!dev->host_mem_descs) { 2318 dma_free_noncontiguous(dev->dev, size, dev->hmb_sgt, 2319 DMA_BIDIRECTIONAL); 2320 dev->hmb_sgt = NULL; 2321 return -ENOMEM; 2322 } 2323 dev->host_mem_size = size; 2324 dev->host_mem_descs_size = sizeof(*dev->host_mem_descs); 2325 dev->nr_host_mem_descs = 1; 2326 2327 dev->host_mem_descs[0].addr = 2328 cpu_to_le64(dev->hmb_sgt->sgl->dma_address); 2329 dev->host_mem_descs[0].size = cpu_to_le32(size / NVME_CTRL_PAGE_SIZE); 2330 return 0; 2331 } 2332 2333 static int nvme_alloc_host_mem_multi(struct nvme_dev *dev, u64 preferred, 2334 u32 chunk_size) 2335 { 2336 struct nvme_host_mem_buf_desc *descs; 2337 u32 max_entries, len, descs_size; 2338 dma_addr_t descs_dma; 2339 int i = 0; 2340 void **bufs; 2341 u64 size, tmp; 2342 2343 tmp = (preferred + chunk_size - 1); 2344 do_div(tmp, chunk_size); 2345 max_entries = tmp; 2346 2347 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) 2348 max_entries = dev->ctrl.hmmaxd; 2349 2350 descs_size = max_entries * sizeof(*descs); 2351 descs = dma_alloc_coherent(dev->dev, descs_size, &descs_dma, 2352 GFP_KERNEL); 2353 if (!descs) 2354 goto out; 2355 2356 bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL); 2357 if (!bufs) 2358 goto out_free_descs; 2359 2360 for (size = 0; size < preferred && i < max_entries; size += len) { 2361 dma_addr_t dma_addr; 2362 2363 len = min_t(u64, chunk_size, preferred - size); 2364 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, 2365 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 2366 if (!bufs[i]) 2367 break; 2368 2369 descs[i].addr = cpu_to_le64(dma_addr); 2370 descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE); 2371 i++; 2372 } 2373 2374 if (!size) 2375 goto out_free_bufs; 2376 2377 dev->nr_host_mem_descs = i; 2378 dev->host_mem_size = size; 2379 dev->host_mem_descs = descs; 2380 dev->host_mem_descs_dma = descs_dma; 2381 dev->host_mem_descs_size = descs_size; 2382 dev->host_mem_desc_bufs = bufs; 2383 return 0; 2384 2385 out_free_bufs: 2386 kfree(bufs); 2387 out_free_descs: 2388 dma_free_coherent(dev->dev, descs_size, descs, descs_dma); 2389 out: 2390 dev->host_mem_descs = NULL; 2391 return -ENOMEM; 2392 } 2393 2394 static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) 2395 { 2396 unsigned long dma_merge_boundary = dma_get_merge_boundary(dev->dev); 2397 u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); 2398 u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); 2399 u64 chunk_size; 2400 2401 /* 2402 * If there is an IOMMU that can merge pages, try a virtually 2403 * non-contiguous allocation for a single segment first. 2404 */ 2405 if (dma_merge_boundary && (PAGE_SIZE & dma_merge_boundary) == 0) { 2406 if (!nvme_alloc_host_mem_single(dev, preferred)) 2407 return 0; 2408 } 2409 2410 /* start big and work our way down */ 2411 for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) { 2412 if (!nvme_alloc_host_mem_multi(dev, preferred, chunk_size)) { 2413 if (!min || dev->host_mem_size >= min) 2414 return 0; 2415 nvme_free_host_mem(dev); 2416 } 2417 } 2418 2419 return -ENOMEM; 2420 } 2421 2422 static int nvme_setup_host_mem(struct nvme_dev *dev) 2423 { 2424 u64 max = (u64)max_host_mem_size_mb * SZ_1M; 2425 u64 preferred = (u64)dev->ctrl.hmpre * 4096; 2426 u64 min = (u64)dev->ctrl.hmmin * 4096; 2427 u32 enable_bits = NVME_HOST_MEM_ENABLE; 2428 int ret; 2429 2430 if (!dev->ctrl.hmpre) 2431 return 0; 2432 2433 preferred = min(preferred, max); 2434 if (min > max) { 2435 dev_warn(dev->ctrl.device, 2436 "min host memory (%lld MiB) above limit (%d MiB).\n", 2437 min >> ilog2(SZ_1M), max_host_mem_size_mb); 2438 nvme_free_host_mem(dev); 2439 return 0; 2440 } 2441 2442 /* 2443 * If we already have a buffer allocated check if we can reuse it. 2444 */ 2445 if (dev->host_mem_descs) { 2446 if (dev->host_mem_size >= min) 2447 enable_bits |= NVME_HOST_MEM_RETURN; 2448 else 2449 nvme_free_host_mem(dev); 2450 } 2451 2452 if (!dev->host_mem_descs) { 2453 if (nvme_alloc_host_mem(dev, min, preferred)) { 2454 dev_warn(dev->ctrl.device, 2455 "failed to allocate host memory buffer.\n"); 2456 return 0; /* controller must work without HMB */ 2457 } 2458 2459 dev_info(dev->ctrl.device, 2460 "allocated %lld MiB host memory buffer (%u segment%s).\n", 2461 dev->host_mem_size >> ilog2(SZ_1M), 2462 dev->nr_host_mem_descs, 2463 str_plural(dev->nr_host_mem_descs)); 2464 } 2465 2466 ret = nvme_set_host_mem(dev, enable_bits); 2467 if (ret) 2468 nvme_free_host_mem(dev); 2469 return ret; 2470 } 2471 2472 static ssize_t cmb_show(struct device *dev, struct device_attribute *attr, 2473 char *buf) 2474 { 2475 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2476 2477 return sysfs_emit(buf, "cmbloc : 0x%08x\ncmbsz : 0x%08x\n", 2478 ndev->cmbloc, ndev->cmbsz); 2479 } 2480 static DEVICE_ATTR_RO(cmb); 2481 2482 static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr, 2483 char *buf) 2484 { 2485 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2486 2487 return sysfs_emit(buf, "%u\n", ndev->cmbloc); 2488 } 2489 static DEVICE_ATTR_RO(cmbloc); 2490 2491 static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr, 2492 char *buf) 2493 { 2494 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2495 2496 return sysfs_emit(buf, "%u\n", ndev->cmbsz); 2497 } 2498 static DEVICE_ATTR_RO(cmbsz); 2499 2500 static ssize_t hmb_show(struct device *dev, struct device_attribute *attr, 2501 char *buf) 2502 { 2503 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2504 2505 return sysfs_emit(buf, "%d\n", ndev->hmb); 2506 } 2507 2508 static ssize_t hmb_store(struct device *dev, struct device_attribute *attr, 2509 const char *buf, size_t count) 2510 { 2511 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2512 bool new; 2513 int ret; 2514 2515 if (kstrtobool(buf, &new) < 0) 2516 return -EINVAL; 2517 2518 if (new == ndev->hmb) 2519 return count; 2520 2521 if (new) { 2522 ret = nvme_setup_host_mem(ndev); 2523 } else { 2524 ret = nvme_set_host_mem(ndev, 0); 2525 if (!ret) 2526 nvme_free_host_mem(ndev); 2527 } 2528 2529 if (ret < 0) 2530 return ret; 2531 2532 return count; 2533 } 2534 static DEVICE_ATTR_RW(hmb); 2535 2536 static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj, 2537 struct attribute *a, int n) 2538 { 2539 struct nvme_ctrl *ctrl = 2540 dev_get_drvdata(container_of(kobj, struct device, kobj)); 2541 struct nvme_dev *dev = to_nvme_dev(ctrl); 2542 2543 if (a == &dev_attr_cmb.attr || 2544 a == &dev_attr_cmbloc.attr || 2545 a == &dev_attr_cmbsz.attr) { 2546 if (!dev->cmbsz) 2547 return 0; 2548 } 2549 if (a == &dev_attr_hmb.attr && !ctrl->hmpre) 2550 return 0; 2551 2552 return a->mode; 2553 } 2554 2555 static struct attribute *nvme_pci_attrs[] = { 2556 &dev_attr_cmb.attr, 2557 &dev_attr_cmbloc.attr, 2558 &dev_attr_cmbsz.attr, 2559 &dev_attr_hmb.attr, 2560 NULL, 2561 }; 2562 2563 static const struct attribute_group nvme_pci_dev_attrs_group = { 2564 .attrs = nvme_pci_attrs, 2565 .is_visible = nvme_pci_attrs_are_visible, 2566 }; 2567 2568 static const struct attribute_group *nvme_pci_dev_attr_groups[] = { 2569 &nvme_dev_attrs_group, 2570 &nvme_pci_dev_attrs_group, 2571 NULL, 2572 }; 2573 2574 static void nvme_update_attrs(struct nvme_dev *dev) 2575 { 2576 sysfs_update_group(&dev->ctrl.device->kobj, &nvme_pci_dev_attrs_group); 2577 } 2578 2579 /* 2580 * nirqs is the number of interrupts available for write and read 2581 * queues. The core already reserved an interrupt for the admin queue. 2582 */ 2583 static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) 2584 { 2585 struct nvme_dev *dev = affd->priv; 2586 unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; 2587 2588 /* 2589 * If there is no interrupt available for queues, ensure that 2590 * the default queue is set to 1. The affinity set size is 2591 * also set to one, but the irq core ignores it for this case. 2592 * 2593 * If only one interrupt is available or 'write_queue' == 0, combine 2594 * write and read queues. 2595 * 2596 * If 'write_queues' > 0, ensure it leaves room for at least one read 2597 * queue. 2598 */ 2599 if (!nrirqs) { 2600 nrirqs = 1; 2601 nr_read_queues = 0; 2602 } else if (nrirqs == 1 || !nr_write_queues) { 2603 nr_read_queues = 0; 2604 } else if (nr_write_queues >= nrirqs) { 2605 nr_read_queues = 1; 2606 } else { 2607 nr_read_queues = nrirqs - nr_write_queues; 2608 } 2609 2610 dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2611 affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2612 dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; 2613 affd->set_size[HCTX_TYPE_READ] = nr_read_queues; 2614 affd->nr_sets = nr_read_queues ? 2 : 1; 2615 } 2616 2617 static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) 2618 { 2619 struct pci_dev *pdev = to_pci_dev(dev->dev); 2620 struct irq_affinity affd = { 2621 .pre_vectors = 1, 2622 .calc_sets = nvme_calc_irq_sets, 2623 .priv = dev, 2624 }; 2625 unsigned int irq_queues, poll_queues; 2626 unsigned int flags = PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY; 2627 2628 /* 2629 * Poll queues don't need interrupts, but we need at least one I/O queue 2630 * left over for non-polled I/O. 2631 */ 2632 poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); 2633 dev->io_queues[HCTX_TYPE_POLL] = poll_queues; 2634 2635 /* 2636 * Initialize for the single interrupt case, will be updated in 2637 * nvme_calc_irq_sets(). 2638 */ 2639 dev->io_queues[HCTX_TYPE_DEFAULT] = 1; 2640 dev->io_queues[HCTX_TYPE_READ] = 0; 2641 2642 /* 2643 * We need interrupts for the admin queue and each non-polled I/O queue, 2644 * but some Apple controllers require all queues to use the first 2645 * vector. 2646 */ 2647 irq_queues = 1; 2648 if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) 2649 irq_queues += (nr_io_queues - poll_queues); 2650 if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI) 2651 flags &= ~PCI_IRQ_MSI; 2652 return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, flags, 2653 &affd); 2654 } 2655 2656 static unsigned int nvme_max_io_queues(struct nvme_dev *dev) 2657 { 2658 /* 2659 * If tags are shared with admin queue (Apple bug), then 2660 * make sure we only use one IO queue. 2661 */ 2662 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) 2663 return 1; 2664 return blk_mq_num_possible_queues(0) + dev->nr_write_queues + 2665 dev->nr_poll_queues; 2666 } 2667 2668 static int nvme_setup_io_queues(struct nvme_dev *dev) 2669 { 2670 struct nvme_queue *adminq = &dev->queues[0]; 2671 struct pci_dev *pdev = to_pci_dev(dev->dev); 2672 unsigned int nr_io_queues; 2673 unsigned long size; 2674 int result; 2675 2676 /* 2677 * Sample the module parameters once at reset time so that we have 2678 * stable values to work with. 2679 */ 2680 dev->nr_write_queues = write_queues; 2681 dev->nr_poll_queues = poll_queues; 2682 2683 nr_io_queues = dev->nr_allocated_queues - 1; 2684 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 2685 if (result < 0) 2686 return result; 2687 2688 if (nr_io_queues == 0) 2689 return 0; 2690 2691 /* 2692 * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions 2693 * from set to unset. If there is a window to it is truely freed, 2694 * pci_free_irq_vectors() jumping into this window will crash. 2695 * And take lock to avoid racing with pci_free_irq_vectors() in 2696 * nvme_dev_disable() path. 2697 */ 2698 result = nvme_setup_io_queues_trylock(dev); 2699 if (result) 2700 return result; 2701 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) 2702 pci_free_irq(pdev, 0, adminq); 2703 2704 if (dev->cmb_use_sqes) { 2705 result = nvme_cmb_qdepth(dev, nr_io_queues, 2706 sizeof(struct nvme_command)); 2707 if (result > 0) { 2708 dev->q_depth = result; 2709 dev->ctrl.sqsize = result - 1; 2710 } else { 2711 dev->cmb_use_sqes = false; 2712 } 2713 } 2714 2715 do { 2716 size = db_bar_size(dev, nr_io_queues); 2717 result = nvme_remap_bar(dev, size); 2718 if (!result) 2719 break; 2720 if (!--nr_io_queues) { 2721 result = -ENOMEM; 2722 goto out_unlock; 2723 } 2724 } while (1); 2725 adminq->q_db = dev->dbs; 2726 2727 retry: 2728 /* Deregister the admin queue's interrupt */ 2729 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) 2730 pci_free_irq(pdev, 0, adminq); 2731 2732 /* 2733 * If we enable msix early due to not intx, disable it again before 2734 * setting up the full range we need. 2735 */ 2736 pci_free_irq_vectors(pdev); 2737 2738 result = nvme_setup_irqs(dev, nr_io_queues); 2739 if (result <= 0) { 2740 result = -EIO; 2741 goto out_unlock; 2742 } 2743 2744 dev->num_vecs = result; 2745 result = max(result - 1, 1); 2746 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; 2747 2748 /* 2749 * Should investigate if there's a performance win from allocating 2750 * more queues than interrupt vectors; it might allow the submission 2751 * path to scale better, even if the receive path is limited by the 2752 * number of interrupts. 2753 */ 2754 result = queue_request_irq(adminq); 2755 if (result) 2756 goto out_unlock; 2757 set_bit(NVMEQ_ENABLED, &adminq->flags); 2758 mutex_unlock(&dev->shutdown_lock); 2759 2760 result = nvme_create_io_queues(dev); 2761 if (result || dev->online_queues < 2) 2762 return result; 2763 2764 if (dev->online_queues - 1 < dev->max_qid) { 2765 nr_io_queues = dev->online_queues - 1; 2766 nvme_delete_io_queues(dev); 2767 result = nvme_setup_io_queues_trylock(dev); 2768 if (result) 2769 return result; 2770 nvme_suspend_io_queues(dev); 2771 goto retry; 2772 } 2773 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", 2774 dev->io_queues[HCTX_TYPE_DEFAULT], 2775 dev->io_queues[HCTX_TYPE_READ], 2776 dev->io_queues[HCTX_TYPE_POLL]); 2777 return 0; 2778 out_unlock: 2779 mutex_unlock(&dev->shutdown_lock); 2780 return result; 2781 } 2782 2783 static enum rq_end_io_ret nvme_del_queue_end(struct request *req, 2784 blk_status_t error) 2785 { 2786 struct nvme_queue *nvmeq = req->end_io_data; 2787 2788 blk_mq_free_request(req); 2789 complete(&nvmeq->delete_done); 2790 return RQ_END_IO_NONE; 2791 } 2792 2793 static enum rq_end_io_ret nvme_del_cq_end(struct request *req, 2794 blk_status_t error) 2795 { 2796 struct nvme_queue *nvmeq = req->end_io_data; 2797 2798 if (error) 2799 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 2800 2801 return nvme_del_queue_end(req, error); 2802 } 2803 2804 static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) 2805 { 2806 struct request_queue *q = nvmeq->dev->ctrl.admin_q; 2807 struct request *req; 2808 struct nvme_command cmd = { }; 2809 2810 cmd.delete_queue.opcode = opcode; 2811 cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); 2812 2813 req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT); 2814 if (IS_ERR(req)) 2815 return PTR_ERR(req); 2816 nvme_init_request(req, &cmd); 2817 2818 if (opcode == nvme_admin_delete_cq) 2819 req->end_io = nvme_del_cq_end; 2820 else 2821 req->end_io = nvme_del_queue_end; 2822 req->end_io_data = nvmeq; 2823 2824 init_completion(&nvmeq->delete_done); 2825 blk_execute_rq_nowait(req, false); 2826 return 0; 2827 } 2828 2829 static bool __nvme_delete_io_queues(struct nvme_dev *dev, u8 opcode) 2830 { 2831 int nr_queues = dev->online_queues - 1, sent = 0; 2832 unsigned long timeout; 2833 2834 retry: 2835 timeout = NVME_ADMIN_TIMEOUT; 2836 while (nr_queues > 0) { 2837 if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) 2838 break; 2839 nr_queues--; 2840 sent++; 2841 } 2842 while (sent) { 2843 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; 2844 2845 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, 2846 timeout); 2847 if (timeout == 0) 2848 return false; 2849 2850 sent--; 2851 if (nr_queues) 2852 goto retry; 2853 } 2854 return true; 2855 } 2856 2857 static void nvme_delete_io_queues(struct nvme_dev *dev) 2858 { 2859 if (__nvme_delete_io_queues(dev, nvme_admin_delete_sq)) 2860 __nvme_delete_io_queues(dev, nvme_admin_delete_cq); 2861 } 2862 2863 static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev) 2864 { 2865 if (dev->io_queues[HCTX_TYPE_POLL]) 2866 return 3; 2867 if (dev->io_queues[HCTX_TYPE_READ]) 2868 return 2; 2869 return 1; 2870 } 2871 2872 static bool nvme_pci_update_nr_queues(struct nvme_dev *dev) 2873 { 2874 if (!dev->ctrl.tagset) { 2875 nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, 2876 nvme_pci_nr_maps(dev), sizeof(struct nvme_iod)); 2877 return true; 2878 } 2879 2880 /* Give up if we are racing with nvme_dev_disable() */ 2881 if (!mutex_trylock(&dev->shutdown_lock)) 2882 return false; 2883 2884 /* Check if nvme_dev_disable() has been executed already */ 2885 if (!dev->online_queues) { 2886 mutex_unlock(&dev->shutdown_lock); 2887 return false; 2888 } 2889 2890 blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); 2891 /* free previously allocated queues that are no longer usable */ 2892 nvme_free_queues(dev, dev->online_queues); 2893 mutex_unlock(&dev->shutdown_lock); 2894 return true; 2895 } 2896 2897 static int nvme_pci_enable(struct nvme_dev *dev) 2898 { 2899 int result = -ENOMEM; 2900 struct pci_dev *pdev = to_pci_dev(dev->dev); 2901 unsigned int flags = PCI_IRQ_ALL_TYPES; 2902 2903 if (pci_enable_device_mem(pdev)) 2904 return result; 2905 2906 pci_set_master(pdev); 2907 2908 if (readl(dev->bar + NVME_REG_CSTS) == -1) { 2909 result = -ENODEV; 2910 goto disable; 2911 } 2912 2913 /* 2914 * Some devices and/or platforms don't advertise or work with INTx 2915 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll 2916 * adjust this later. 2917 */ 2918 if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI) 2919 flags &= ~PCI_IRQ_MSI; 2920 result = pci_alloc_irq_vectors(pdev, 1, 1, flags); 2921 if (result < 0) 2922 goto disable; 2923 2924 dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 2925 2926 dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, 2927 io_queue_depth); 2928 dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); 2929 dev->dbs = dev->bar + 4096; 2930 2931 /* 2932 * Some Apple controllers require a non-standard SQE size. 2933 * Interestingly they also seem to ignore the CC:IOSQES register 2934 * so we don't bother updating it here. 2935 */ 2936 if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) 2937 dev->io_sqes = 7; 2938 else 2939 dev->io_sqes = NVME_NVM_IOSQES; 2940 2941 if (dev->ctrl.quirks & NVME_QUIRK_QDEPTH_ONE) { 2942 dev->q_depth = 2; 2943 } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && 2944 (pdev->device == 0xa821 || pdev->device == 0xa822) && 2945 NVME_CAP_MQES(dev->ctrl.cap) == 0) { 2946 dev->q_depth = 64; 2947 dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " 2948 "set queue depth=%u\n", dev->q_depth); 2949 } 2950 2951 /* 2952 * Controllers with the shared tags quirk need the IO queue to be 2953 * big enough so that we get 32 tags for the admin queue 2954 */ 2955 if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && 2956 (dev->q_depth < (NVME_AQ_DEPTH + 2))) { 2957 dev->q_depth = NVME_AQ_DEPTH + 2; 2958 dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", 2959 dev->q_depth); 2960 } 2961 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ 2962 2963 nvme_map_cmb(dev); 2964 2965 pci_save_state(pdev); 2966 2967 result = nvme_pci_configure_admin_queue(dev); 2968 if (result) 2969 goto free_irq; 2970 return result; 2971 2972 free_irq: 2973 pci_free_irq_vectors(pdev); 2974 disable: 2975 pci_disable_device(pdev); 2976 return result; 2977 } 2978 2979 static void nvme_dev_unmap(struct nvme_dev *dev) 2980 { 2981 if (dev->bar) 2982 iounmap(dev->bar); 2983 pci_release_mem_regions(to_pci_dev(dev->dev)); 2984 } 2985 2986 static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev) 2987 { 2988 struct pci_dev *pdev = to_pci_dev(dev->dev); 2989 u32 csts; 2990 2991 if (!pci_is_enabled(pdev) || !pci_device_is_present(pdev)) 2992 return true; 2993 if (pdev->error_state != pci_channel_io_normal) 2994 return true; 2995 2996 csts = readl(dev->bar + NVME_REG_CSTS); 2997 return (csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY); 2998 } 2999 3000 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 3001 { 3002 enum nvme_ctrl_state state = nvme_ctrl_state(&dev->ctrl); 3003 struct pci_dev *pdev = to_pci_dev(dev->dev); 3004 bool dead; 3005 3006 mutex_lock(&dev->shutdown_lock); 3007 dead = nvme_pci_ctrl_is_dead(dev); 3008 if (state == NVME_CTRL_LIVE || state == NVME_CTRL_RESETTING) { 3009 if (pci_is_enabled(pdev)) 3010 nvme_start_freeze(&dev->ctrl); 3011 /* 3012 * Give the controller a chance to complete all entered requests 3013 * if doing a safe shutdown. 3014 */ 3015 if (!dead && shutdown) 3016 nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); 3017 } 3018 3019 nvme_quiesce_io_queues(&dev->ctrl); 3020 3021 if (!dead && dev->ctrl.queue_count > 0) { 3022 nvme_delete_io_queues(dev); 3023 nvme_disable_ctrl(&dev->ctrl, shutdown); 3024 nvme_poll_irqdisable(&dev->queues[0]); 3025 } 3026 nvme_suspend_io_queues(dev); 3027 nvme_suspend_queue(dev, 0); 3028 pci_free_irq_vectors(pdev); 3029 if (pci_is_enabled(pdev)) 3030 pci_disable_device(pdev); 3031 nvme_reap_pending_cqes(dev); 3032 3033 nvme_cancel_tagset(&dev->ctrl); 3034 nvme_cancel_admin_tagset(&dev->ctrl); 3035 3036 /* 3037 * The driver will not be starting up queues again if shutting down so 3038 * must flush all entered requests to their failed completion to avoid 3039 * deadlocking blk-mq hot-cpu notifier. 3040 */ 3041 if (shutdown) { 3042 nvme_unquiesce_io_queues(&dev->ctrl); 3043 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) 3044 nvme_unquiesce_admin_queue(&dev->ctrl); 3045 } 3046 mutex_unlock(&dev->shutdown_lock); 3047 } 3048 3049 static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) 3050 { 3051 if (!nvme_wait_reset(&dev->ctrl)) 3052 return -EBUSY; 3053 nvme_dev_disable(dev, shutdown); 3054 return 0; 3055 } 3056 3057 static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) 3058 { 3059 size_t alloc_size = sizeof(struct nvme_dma_vec) * NVME_MAX_SEGS; 3060 3061 dev->dmavec_mempool = mempool_create_node(1, 3062 mempool_kmalloc, mempool_kfree, 3063 (void *)alloc_size, GFP_KERNEL, 3064 dev_to_node(dev->dev)); 3065 if (!dev->dmavec_mempool) 3066 return -ENOMEM; 3067 return 0; 3068 } 3069 3070 static void nvme_free_tagset(struct nvme_dev *dev) 3071 { 3072 if (dev->tagset.tags) 3073 nvme_remove_io_tag_set(&dev->ctrl); 3074 dev->ctrl.tagset = NULL; 3075 } 3076 3077 /* pairs with nvme_pci_alloc_dev */ 3078 static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) 3079 { 3080 struct nvme_dev *dev = to_nvme_dev(ctrl); 3081 3082 nvme_free_tagset(dev); 3083 put_device(dev->dev); 3084 kfree(dev->queues); 3085 kfree(dev); 3086 } 3087 3088 static void nvme_reset_work(struct work_struct *work) 3089 { 3090 struct nvme_dev *dev = 3091 container_of(work, struct nvme_dev, ctrl.reset_work); 3092 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 3093 int result; 3094 3095 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_RESETTING) { 3096 dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", 3097 dev->ctrl.state); 3098 result = -ENODEV; 3099 goto out; 3100 } 3101 3102 /* 3103 * If we're called to reset a live controller first shut it down before 3104 * moving on. 3105 */ 3106 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 3107 nvme_dev_disable(dev, false); 3108 nvme_sync_queues(&dev->ctrl); 3109 3110 mutex_lock(&dev->shutdown_lock); 3111 result = nvme_pci_enable(dev); 3112 if (result) 3113 goto out_unlock; 3114 nvme_unquiesce_admin_queue(&dev->ctrl); 3115 mutex_unlock(&dev->shutdown_lock); 3116 3117 /* 3118 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the 3119 * initializing procedure here. 3120 */ 3121 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 3122 dev_warn(dev->ctrl.device, 3123 "failed to mark controller CONNECTING\n"); 3124 result = -EBUSY; 3125 goto out; 3126 } 3127 3128 result = nvme_init_ctrl_finish(&dev->ctrl, was_suspend); 3129 if (result) 3130 goto out; 3131 3132 if (nvme_ctrl_meta_sgl_supported(&dev->ctrl)) 3133 dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS; 3134 else 3135 dev->ctrl.max_integrity_segments = 1; 3136 3137 nvme_dbbuf_dma_alloc(dev); 3138 3139 result = nvme_setup_host_mem(dev); 3140 if (result < 0) 3141 goto out; 3142 3143 nvme_update_attrs(dev); 3144 3145 result = nvme_setup_io_queues(dev); 3146 if (result) 3147 goto out; 3148 3149 /* 3150 * Freeze and update the number of I/O queues as those might have 3151 * changed. If there are no I/O queues left after this reset, keep the 3152 * controller around but remove all namespaces. 3153 */ 3154 if (dev->online_queues > 1) { 3155 nvme_dbbuf_set(dev); 3156 nvme_unquiesce_io_queues(&dev->ctrl); 3157 nvme_wait_freeze(&dev->ctrl); 3158 if (!nvme_pci_update_nr_queues(dev)) 3159 goto out; 3160 nvme_unfreeze(&dev->ctrl); 3161 } else { 3162 dev_warn(dev->ctrl.device, "IO queues lost\n"); 3163 nvme_mark_namespaces_dead(&dev->ctrl); 3164 nvme_unquiesce_io_queues(&dev->ctrl); 3165 nvme_remove_namespaces(&dev->ctrl); 3166 nvme_free_tagset(dev); 3167 } 3168 3169 /* 3170 * If only admin queue live, keep it to do further investigation or 3171 * recovery. 3172 */ 3173 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { 3174 dev_warn(dev->ctrl.device, 3175 "failed to mark controller live state\n"); 3176 result = -ENODEV; 3177 goto out; 3178 } 3179 3180 nvme_start_ctrl(&dev->ctrl); 3181 return; 3182 3183 out_unlock: 3184 mutex_unlock(&dev->shutdown_lock); 3185 out: 3186 /* 3187 * Set state to deleting now to avoid blocking nvme_wait_reset(), which 3188 * may be holding this pci_dev's device lock. 3189 */ 3190 dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n", 3191 result); 3192 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 3193 nvme_dev_disable(dev, true); 3194 nvme_sync_queues(&dev->ctrl); 3195 nvme_mark_namespaces_dead(&dev->ctrl); 3196 nvme_unquiesce_io_queues(&dev->ctrl); 3197 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 3198 } 3199 3200 static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 3201 { 3202 *val = readl(to_nvme_dev(ctrl)->bar + off); 3203 return 0; 3204 } 3205 3206 static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) 3207 { 3208 writel(val, to_nvme_dev(ctrl)->bar + off); 3209 return 0; 3210 } 3211 3212 static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 3213 { 3214 *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); 3215 return 0; 3216 } 3217 3218 static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) 3219 { 3220 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); 3221 3222 return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); 3223 } 3224 3225 static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl) 3226 { 3227 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); 3228 struct nvme_subsystem *subsys = ctrl->subsys; 3229 3230 dev_err(ctrl->device, 3231 "VID:DID %04x:%04x model:%.*s firmware:%.*s\n", 3232 pdev->vendor, pdev->device, 3233 nvme_strlen(subsys->model, sizeof(subsys->model)), 3234 subsys->model, nvme_strlen(subsys->firmware_rev, 3235 sizeof(subsys->firmware_rev)), 3236 subsys->firmware_rev); 3237 } 3238 3239 static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl) 3240 { 3241 struct nvme_dev *dev = to_nvme_dev(ctrl); 3242 3243 return dma_pci_p2pdma_supported(dev->dev); 3244 } 3245 3246 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 3247 .name = "pcie", 3248 .module = THIS_MODULE, 3249 .flags = NVME_F_METADATA_SUPPORTED, 3250 .dev_attr_groups = nvme_pci_dev_attr_groups, 3251 .reg_read32 = nvme_pci_reg_read32, 3252 .reg_write32 = nvme_pci_reg_write32, 3253 .reg_read64 = nvme_pci_reg_read64, 3254 .free_ctrl = nvme_pci_free_ctrl, 3255 .submit_async_event = nvme_pci_submit_async_event, 3256 .subsystem_reset = nvme_pci_subsystem_reset, 3257 .get_address = nvme_pci_get_address, 3258 .print_device_info = nvme_pci_print_device_info, 3259 .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma, 3260 }; 3261 3262 static int nvme_dev_map(struct nvme_dev *dev) 3263 { 3264 struct pci_dev *pdev = to_pci_dev(dev->dev); 3265 3266 if (pci_request_mem_regions(pdev, "nvme")) 3267 return -ENODEV; 3268 3269 if (nvme_remap_bar(dev, NVME_REG_DBS + 4096)) 3270 goto release; 3271 3272 return 0; 3273 release: 3274 pci_release_mem_regions(pdev); 3275 return -ENODEV; 3276 } 3277 3278 static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) 3279 { 3280 if (pdev->vendor == 0x144d && pdev->device == 0xa802) { 3281 /* 3282 * Several Samsung devices seem to drop off the PCIe bus 3283 * randomly when APST is on and uses the deepest sleep state. 3284 * This has been observed on a Samsung "SM951 NVMe SAMSUNG 3285 * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD 3286 * 950 PRO 256GB", but it seems to be restricted to two Dell 3287 * laptops. 3288 */ 3289 if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && 3290 (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || 3291 dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) 3292 return NVME_QUIRK_NO_DEEPEST_PS; 3293 } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { 3294 /* 3295 * Samsung SSD 960 EVO drops off the PCIe bus after system 3296 * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as 3297 * within few minutes after bootup on a Coffee Lake board - 3298 * ASUS PRIME Z370-A 3299 */ 3300 if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && 3301 (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || 3302 dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) 3303 return NVME_QUIRK_NO_APST; 3304 } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || 3305 pdev->device == 0xa808 || pdev->device == 0xa809)) || 3306 (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { 3307 /* 3308 * Forcing to use host managed nvme power settings for 3309 * lowest idle power with quick resume latency on 3310 * Samsung and Toshiba SSDs based on suspend behavior 3311 * on Coffee Lake board for LENOVO C640 3312 */ 3313 if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) && 3314 dmi_match(DMI_BOARD_NAME, "LNVNB161216")) 3315 return NVME_QUIRK_SIMPLE_SUSPEND; 3316 } else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 || 3317 pdev->device == 0x500f)) { 3318 /* 3319 * Exclude some Kingston NV1 and A2000 devices from 3320 * NVME_QUIRK_SIMPLE_SUSPEND. Do a full suspend to save a 3321 * lot of energy with s2idle sleep on some TUXEDO platforms. 3322 */ 3323 if (dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") || 3324 dmi_match(DMI_BOARD_NAME, "NS5x_7xAU") || 3325 dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") || 3326 dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1")) 3327 return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND; 3328 } else if (pdev->vendor == 0x144d && pdev->device == 0xa80d) { 3329 /* 3330 * Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND 3331 * because of high power consumption (> 2 Watt) in s2idle 3332 * sleep. Only some boards with Intel CPU are affected. 3333 * (Note for testing: Samsung 990 Evo Plus has same PCI ID) 3334 */ 3335 if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") || 3336 dmi_match(DMI_BOARD_NAME, "GMxPXxx") || 3337 dmi_match(DMI_BOARD_NAME, "GXxMRXx") || 3338 dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") || 3339 dmi_match(DMI_BOARD_NAME, "PH4PG31") || 3340 dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") || 3341 dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71")) 3342 return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND; 3343 } 3344 3345 /* 3346 * NVMe SSD drops off the PCIe bus after system idle 3347 * for 10 hours on a Lenovo N60z board. 3348 */ 3349 if (dmi_match(DMI_BOARD_NAME, "LXKT-ZXEG-N6")) 3350 return NVME_QUIRK_NO_APST; 3351 3352 return 0; 3353 } 3354 3355 static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, 3356 const struct pci_device_id *id) 3357 { 3358 unsigned long quirks = id->driver_data; 3359 int node = dev_to_node(&pdev->dev); 3360 struct nvme_dev *dev; 3361 int ret = -ENOMEM; 3362 3363 dev = kzalloc_node(struct_size(dev, descriptor_pools, nr_node_ids), 3364 GFP_KERNEL, node); 3365 if (!dev) 3366 return ERR_PTR(-ENOMEM); 3367 INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); 3368 mutex_init(&dev->shutdown_lock); 3369 3370 dev->nr_write_queues = write_queues; 3371 dev->nr_poll_queues = poll_queues; 3372 dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; 3373 dev->queues = kcalloc_node(dev->nr_allocated_queues, 3374 sizeof(struct nvme_queue), GFP_KERNEL, node); 3375 if (!dev->queues) 3376 goto out_free_dev; 3377 3378 dev->dev = get_device(&pdev->dev); 3379 3380 quirks |= check_vendor_combination_bug(pdev); 3381 if (!noacpi && 3382 !(quirks & NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND) && 3383 acpi_storage_d3(&pdev->dev)) { 3384 /* 3385 * Some systems use a bios work around to ask for D3 on 3386 * platforms that support kernel managed suspend. 3387 */ 3388 dev_info(&pdev->dev, 3389 "platform quirk: setting simple suspend\n"); 3390 quirks |= NVME_QUIRK_SIMPLE_SUSPEND; 3391 } 3392 ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 3393 quirks); 3394 if (ret) 3395 goto out_put_device; 3396 3397 if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) 3398 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); 3399 else 3400 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3401 dma_set_min_align_mask(&pdev->dev, NVME_CTRL_PAGE_SIZE - 1); 3402 dma_set_max_seg_size(&pdev->dev, 0xffffffff); 3403 3404 /* 3405 * Limit the max command size to prevent iod->sg allocations going 3406 * over a single page. 3407 */ 3408 dev->ctrl.max_hw_sectors = min_t(u32, 3409 NVME_MAX_BYTES >> SECTOR_SHIFT, 3410 dma_opt_mapping_size(&pdev->dev) >> 9); 3411 dev->ctrl.max_segments = NVME_MAX_SEGS; 3412 dev->ctrl.max_integrity_segments = 1; 3413 return dev; 3414 3415 out_put_device: 3416 put_device(dev->dev); 3417 kfree(dev->queues); 3418 out_free_dev: 3419 kfree(dev); 3420 return ERR_PTR(ret); 3421 } 3422 3423 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3424 { 3425 struct nvme_dev *dev; 3426 int result = -ENOMEM; 3427 3428 dev = nvme_pci_alloc_dev(pdev, id); 3429 if (IS_ERR(dev)) 3430 return PTR_ERR(dev); 3431 3432 result = nvme_add_ctrl(&dev->ctrl); 3433 if (result) 3434 goto out_put_ctrl; 3435 3436 result = nvme_dev_map(dev); 3437 if (result) 3438 goto out_uninit_ctrl; 3439 3440 result = nvme_pci_alloc_iod_mempool(dev); 3441 if (result) 3442 goto out_dev_unmap; 3443 3444 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 3445 3446 result = nvme_pci_enable(dev); 3447 if (result) 3448 goto out_release_iod_mempool; 3449 3450 result = nvme_alloc_admin_tag_set(&dev->ctrl, &dev->admin_tagset, 3451 &nvme_mq_admin_ops, sizeof(struct nvme_iod)); 3452 if (result) 3453 goto out_disable; 3454 3455 /* 3456 * Mark the controller as connecting before sending admin commands to 3457 * allow the timeout handler to do the right thing. 3458 */ 3459 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 3460 dev_warn(dev->ctrl.device, 3461 "failed to mark controller CONNECTING\n"); 3462 result = -EBUSY; 3463 goto out_disable; 3464 } 3465 3466 result = nvme_init_ctrl_finish(&dev->ctrl, false); 3467 if (result) 3468 goto out_disable; 3469 3470 if (nvme_ctrl_meta_sgl_supported(&dev->ctrl)) 3471 dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS; 3472 else 3473 dev->ctrl.max_integrity_segments = 1; 3474 3475 nvme_dbbuf_dma_alloc(dev); 3476 3477 result = nvme_setup_host_mem(dev); 3478 if (result < 0) 3479 goto out_disable; 3480 3481 nvme_update_attrs(dev); 3482 3483 result = nvme_setup_io_queues(dev); 3484 if (result) 3485 goto out_disable; 3486 3487 if (dev->online_queues > 1) { 3488 nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, 3489 nvme_pci_nr_maps(dev), sizeof(struct nvme_iod)); 3490 nvme_dbbuf_set(dev); 3491 } 3492 3493 if (!dev->ctrl.tagset) 3494 dev_warn(dev->ctrl.device, "IO queues not created\n"); 3495 3496 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { 3497 dev_warn(dev->ctrl.device, 3498 "failed to mark controller live state\n"); 3499 result = -ENODEV; 3500 goto out_disable; 3501 } 3502 3503 pci_set_drvdata(pdev, dev); 3504 3505 nvme_start_ctrl(&dev->ctrl); 3506 nvme_put_ctrl(&dev->ctrl); 3507 flush_work(&dev->ctrl.scan_work); 3508 return 0; 3509 3510 out_disable: 3511 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 3512 nvme_dev_disable(dev, true); 3513 nvme_free_host_mem(dev); 3514 nvme_dev_remove_admin(dev); 3515 nvme_dbbuf_dma_free(dev); 3516 nvme_free_queues(dev, 0); 3517 out_release_iod_mempool: 3518 mempool_destroy(dev->dmavec_mempool); 3519 out_dev_unmap: 3520 nvme_dev_unmap(dev); 3521 out_uninit_ctrl: 3522 nvme_uninit_ctrl(&dev->ctrl); 3523 out_put_ctrl: 3524 nvme_put_ctrl(&dev->ctrl); 3525 return result; 3526 } 3527 3528 static void nvme_reset_prepare(struct pci_dev *pdev) 3529 { 3530 struct nvme_dev *dev = pci_get_drvdata(pdev); 3531 3532 /* 3533 * We don't need to check the return value from waiting for the reset 3534 * state as pci_dev device lock is held, making it impossible to race 3535 * with ->remove(). 3536 */ 3537 nvme_disable_prepare_reset(dev, false); 3538 nvme_sync_queues(&dev->ctrl); 3539 } 3540 3541 static void nvme_reset_done(struct pci_dev *pdev) 3542 { 3543 struct nvme_dev *dev = pci_get_drvdata(pdev); 3544 3545 if (!nvme_try_sched_reset(&dev->ctrl)) 3546 flush_work(&dev->ctrl.reset_work); 3547 } 3548 3549 static void nvme_shutdown(struct pci_dev *pdev) 3550 { 3551 struct nvme_dev *dev = pci_get_drvdata(pdev); 3552 3553 nvme_disable_prepare_reset(dev, true); 3554 } 3555 3556 /* 3557 * The driver's remove may be called on a device in a partially initialized 3558 * state. This function must not have any dependencies on the device state in 3559 * order to proceed. 3560 */ 3561 static void nvme_remove(struct pci_dev *pdev) 3562 { 3563 struct nvme_dev *dev = pci_get_drvdata(pdev); 3564 3565 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 3566 pci_set_drvdata(pdev, NULL); 3567 3568 if (!pci_device_is_present(pdev)) { 3569 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 3570 nvme_dev_disable(dev, true); 3571 } 3572 3573 flush_work(&dev->ctrl.reset_work); 3574 nvme_stop_ctrl(&dev->ctrl); 3575 nvme_remove_namespaces(&dev->ctrl); 3576 nvme_dev_disable(dev, true); 3577 nvme_free_host_mem(dev); 3578 nvme_dev_remove_admin(dev); 3579 nvme_dbbuf_dma_free(dev); 3580 nvme_free_queues(dev, 0); 3581 mempool_destroy(dev->dmavec_mempool); 3582 nvme_release_descriptor_pools(dev); 3583 nvme_dev_unmap(dev); 3584 nvme_uninit_ctrl(&dev->ctrl); 3585 } 3586 3587 #ifdef CONFIG_PM_SLEEP 3588 static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps) 3589 { 3590 return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps); 3591 } 3592 3593 static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps) 3594 { 3595 return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL); 3596 } 3597 3598 static int nvme_resume(struct device *dev) 3599 { 3600 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 3601 struct nvme_ctrl *ctrl = &ndev->ctrl; 3602 3603 if (ndev->last_ps == U32_MAX || 3604 nvme_set_power_state(ctrl, ndev->last_ps) != 0) 3605 goto reset; 3606 if (ctrl->hmpre && nvme_setup_host_mem(ndev)) 3607 goto reset; 3608 3609 return 0; 3610 reset: 3611 return nvme_try_sched_reset(ctrl); 3612 } 3613 3614 static int nvme_suspend(struct device *dev) 3615 { 3616 struct pci_dev *pdev = to_pci_dev(dev); 3617 struct nvme_dev *ndev = pci_get_drvdata(pdev); 3618 struct nvme_ctrl *ctrl = &ndev->ctrl; 3619 int ret = -EBUSY; 3620 3621 ndev->last_ps = U32_MAX; 3622 3623 /* 3624 * The platform does not remove power for a kernel managed suspend so 3625 * use host managed nvme power settings for lowest idle power if 3626 * possible. This should have quicker resume latency than a full device 3627 * shutdown. But if the firmware is involved after the suspend or the 3628 * device does not support any non-default power states, shut down the 3629 * device fully. 3630 * 3631 * If ASPM is not enabled for the device, shut down the device and allow 3632 * the PCI bus layer to put it into D3 in order to take the PCIe link 3633 * down, so as to allow the platform to achieve its minimum low-power 3634 * state (which may not be possible if the link is up). 3635 */ 3636 if (pm_suspend_via_firmware() || !ctrl->npss || 3637 !pcie_aspm_enabled(pdev) || 3638 (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) 3639 return nvme_disable_prepare_reset(ndev, true); 3640 3641 nvme_start_freeze(ctrl); 3642 nvme_wait_freeze(ctrl); 3643 nvme_sync_queues(ctrl); 3644 3645 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) 3646 goto unfreeze; 3647 3648 /* 3649 * Host memory access may not be successful in a system suspend state, 3650 * but the specification allows the controller to access memory in a 3651 * non-operational power state. 3652 */ 3653 if (ndev->hmb) { 3654 ret = nvme_set_host_mem(ndev, 0); 3655 if (ret < 0) 3656 goto unfreeze; 3657 } 3658 3659 ret = nvme_get_power_state(ctrl, &ndev->last_ps); 3660 if (ret < 0) 3661 goto unfreeze; 3662 3663 /* 3664 * A saved state prevents pci pm from generically controlling the 3665 * device's power. If we're using protocol specific settings, we don't 3666 * want pci interfering. 3667 */ 3668 pci_save_state(pdev); 3669 3670 ret = nvme_set_power_state(ctrl, ctrl->npss); 3671 if (ret < 0) 3672 goto unfreeze; 3673 3674 if (ret) { 3675 /* discard the saved state */ 3676 pci_load_saved_state(pdev, NULL); 3677 3678 /* 3679 * Clearing npss forces a controller reset on resume. The 3680 * correct value will be rediscovered then. 3681 */ 3682 ret = nvme_disable_prepare_reset(ndev, true); 3683 ctrl->npss = 0; 3684 } 3685 unfreeze: 3686 nvme_unfreeze(ctrl); 3687 return ret; 3688 } 3689 3690 static int nvme_simple_suspend(struct device *dev) 3691 { 3692 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 3693 3694 return nvme_disable_prepare_reset(ndev, true); 3695 } 3696 3697 static int nvme_simple_resume(struct device *dev) 3698 { 3699 struct pci_dev *pdev = to_pci_dev(dev); 3700 struct nvme_dev *ndev = pci_get_drvdata(pdev); 3701 3702 return nvme_try_sched_reset(&ndev->ctrl); 3703 } 3704 3705 static const struct dev_pm_ops nvme_dev_pm_ops = { 3706 .suspend = nvme_suspend, 3707 .resume = nvme_resume, 3708 .freeze = nvme_simple_suspend, 3709 .thaw = nvme_simple_resume, 3710 .poweroff = nvme_simple_suspend, 3711 .restore = nvme_simple_resume, 3712 }; 3713 #endif /* CONFIG_PM_SLEEP */ 3714 3715 static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, 3716 pci_channel_state_t state) 3717 { 3718 struct nvme_dev *dev = pci_get_drvdata(pdev); 3719 3720 /* 3721 * A frozen channel requires a reset. When detected, this method will 3722 * shutdown the controller to quiesce. The controller will be restarted 3723 * after the slot reset through driver's slot_reset callback. 3724 */ 3725 switch (state) { 3726 case pci_channel_io_normal: 3727 return PCI_ERS_RESULT_CAN_RECOVER; 3728 case pci_channel_io_frozen: 3729 dev_warn(dev->ctrl.device, 3730 "frozen state error detected, reset controller\n"); 3731 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) { 3732 nvme_dev_disable(dev, true); 3733 return PCI_ERS_RESULT_DISCONNECT; 3734 } 3735 nvme_dev_disable(dev, false); 3736 return PCI_ERS_RESULT_NEED_RESET; 3737 case pci_channel_io_perm_failure: 3738 dev_warn(dev->ctrl.device, 3739 "failure state error detected, request disconnect\n"); 3740 return PCI_ERS_RESULT_DISCONNECT; 3741 } 3742 return PCI_ERS_RESULT_NEED_RESET; 3743 } 3744 3745 static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) 3746 { 3747 struct nvme_dev *dev = pci_get_drvdata(pdev); 3748 3749 dev_info(dev->ctrl.device, "restart after slot reset\n"); 3750 pci_restore_state(pdev); 3751 if (nvme_try_sched_reset(&dev->ctrl)) 3752 nvme_unquiesce_io_queues(&dev->ctrl); 3753 return PCI_ERS_RESULT_RECOVERED; 3754 } 3755 3756 static void nvme_error_resume(struct pci_dev *pdev) 3757 { 3758 struct nvme_dev *dev = pci_get_drvdata(pdev); 3759 3760 flush_work(&dev->ctrl.reset_work); 3761 } 3762 3763 static const struct pci_error_handlers nvme_err_handler = { 3764 .error_detected = nvme_error_detected, 3765 .slot_reset = nvme_slot_reset, 3766 .resume = nvme_error_resume, 3767 .reset_prepare = nvme_reset_prepare, 3768 .reset_done = nvme_reset_done, 3769 }; 3770 3771 static const struct pci_device_id nvme_id_table[] = { 3772 { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */ 3773 .driver_data = NVME_QUIRK_STRIPE_SIZE | 3774 NVME_QUIRK_DEALLOCATE_ZEROES, }, 3775 { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */ 3776 .driver_data = NVME_QUIRK_STRIPE_SIZE | 3777 NVME_QUIRK_DEALLOCATE_ZEROES, }, 3778 { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */ 3779 .driver_data = NVME_QUIRK_STRIPE_SIZE | 3780 NVME_QUIRK_IGNORE_DEV_SUBNQN | 3781 NVME_QUIRK_BOGUS_NID, }, 3782 { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */ 3783 .driver_data = NVME_QUIRK_STRIPE_SIZE, }, 3784 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ 3785 .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 3786 NVME_QUIRK_MEDIUM_PRIO_SQ | 3787 NVME_QUIRK_NO_TEMP_THRESH_CHANGE | 3788 NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3789 { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ 3790 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3791 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 3792 .driver_data = NVME_QUIRK_IDENTIFY_CNS | 3793 NVME_QUIRK_DISABLE_WRITE_ZEROES | 3794 NVME_QUIRK_BOGUS_NID, }, 3795 { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ 3796 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3797 { PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */ 3798 .driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, }, 3799 { PCI_DEVICE(0x126f, 0x1001), /* Silicon Motion generic */ 3800 .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 3801 NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3802 { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */ 3803 .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 3804 NVME_QUIRK_BOGUS_NID, }, 3805 { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ 3806 .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | 3807 NVME_QUIRK_BOGUS_NID, }, 3808 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ 3809 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 3810 NVME_QUIRK_NO_NS_DESC_LIST, }, 3811 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 3812 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3813 { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ 3814 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3815 { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ 3816 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3817 { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ 3818 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3819 { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ 3820 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 3821 NVME_QUIRK_DISABLE_WRITE_ZEROES| 3822 NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3823 { PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */ 3824 .driver_data = NVME_QUIRK_BROKEN_MSI }, 3825 { PCI_DEVICE(0x15b7, 0x5009), /* Sandisk SN550 */ 3826 .driver_data = NVME_QUIRK_BROKEN_MSI | 3827 NVME_QUIRK_NO_DEEPEST_PS }, 3828 { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ 3829 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3830 { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ 3831 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | 3832 NVME_QUIRK_BOGUS_NID, }, 3833 { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */ 3834 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3835 { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */ 3836 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3837 { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ 3838 .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | 3839 NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3840 { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */ 3841 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3842 { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ 3843 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | 3844 NVME_QUIRK_BOGUS_NID, }, 3845 { PCI_DEVICE(0x10ec, 0x5763), /* ADATA SX6000PNP */ 3846 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3847 { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ 3848 .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 3849 NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3850 { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */ 3851 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN }, 3852 { PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */ 3853 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3854 { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ 3855 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3856 { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */ 3857 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3858 { PCI_DEVICE(0x1c5c, 0x1D59), /* SK Hynix BC901 */ 3859 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3860 { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ 3861 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3862 { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ 3863 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3864 { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */ 3865 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES | 3866 NVME_QUIRK_BOGUS_NID, }, 3867 { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */ 3868 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3869 { PCI_DEVICE(0x144d, 0xa802), /* Samsung SM953 */ 3870 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3871 { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */ 3872 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3873 { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */ 3874 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3875 { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */ 3876 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3877 { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ 3878 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3879 { PCI_DEVICE(0x2646, 0x5013), /* Kingston KC3000, Kingston FURY Renegade */ 3880 .driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, }, 3881 { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */ 3882 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3883 { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */ 3884 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3885 { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */ 3886 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3887 { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */ 3888 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3889 { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */ 3890 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3891 { PCI_DEVICE(0x1f40, 0x1202), /* Netac Technologies Co. NV3000 NVMe SSD */ 3892 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3893 { PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */ 3894 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3895 { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */ 3896 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3897 { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */ 3898 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3899 { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ 3900 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3901 { PCI_DEVICE(0x1e4B, 0x1602), /* MAXIO MAP1602 */ 3902 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3903 { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ 3904 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3905 { PCI_DEVICE(0x1dbe, 0x5216), /* Acer/INNOGRIT FA100/5216 NVMe SSD */ 3906 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3907 { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ 3908 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3909 { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */ 3910 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3911 { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ 3912 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3913 { PCI_DEVICE(0x025e, 0xf1ac), /* SOLIDIGM P44 pro SSDPFKKW020X7 */ 3914 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3915 { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ 3916 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3917 { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ 3918 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3919 { PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */ 3920 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3921 { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */ 3922 .driver_data = NVME_QUIRK_BOGUS_NID | 3923 NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3924 { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */ 3925 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3926 { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */ 3927 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3928 { PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */ 3929 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3930 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), 3931 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3932 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), 3933 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3934 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061), 3935 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3936 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00), 3937 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3938 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01), 3939 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3940 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02), 3941 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3942 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), 3943 /* 3944 * Fix for the Apple controller found in the MacBook8,1 and 3945 * some MacBook7,1 to avoid controller resets and data loss. 3946 */ 3947 .driver_data = NVME_QUIRK_SINGLE_VECTOR | 3948 NVME_QUIRK_QDEPTH_ONE }, 3949 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, 3950 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), 3951 .driver_data = NVME_QUIRK_SINGLE_VECTOR | 3952 NVME_QUIRK_128_BYTES_SQES | 3953 NVME_QUIRK_SHARED_TAGS | 3954 NVME_QUIRK_SKIP_CID_GEN | 3955 NVME_QUIRK_IDENTIFY_CNS }, 3956 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 3957 { 0, } 3958 }; 3959 MODULE_DEVICE_TABLE(pci, nvme_id_table); 3960 3961 static struct pci_driver nvme_driver = { 3962 .name = "nvme", 3963 .id_table = nvme_id_table, 3964 .probe = nvme_probe, 3965 .remove = nvme_remove, 3966 .shutdown = nvme_shutdown, 3967 .driver = { 3968 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 3969 #ifdef CONFIG_PM_SLEEP 3970 .pm = &nvme_dev_pm_ops, 3971 #endif 3972 }, 3973 .sriov_configure = pci_sriov_configure_simple, 3974 .err_handler = &nvme_err_handler, 3975 }; 3976 3977 static int __init nvme_init(void) 3978 { 3979 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 3980 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 3981 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 3982 BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); 3983 3984 return pci_register_driver(&nvme_driver); 3985 } 3986 3987 static void __exit nvme_exit(void) 3988 { 3989 pci_unregister_driver(&nvme_driver); 3990 flush_workqueue(nvme_wq); 3991 } 3992 3993 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 3994 MODULE_LICENSE("GPL"); 3995 MODULE_VERSION("1.0"); 3996 MODULE_DESCRIPTION("NVMe host PCIe transport driver"); 3997 module_init(nvme_init); 3998 module_exit(nvme_exit); 3999