1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVM Express device driver 4 * Copyright (c) 2011-2014, Intel Corporation. 5 */ 6 7 #include <linux/acpi.h> 8 #include <linux/async.h> 9 #include <linux/blkdev.h> 10 #include <linux/blk-mq.h> 11 #include <linux/blk-integrity.h> 12 #include <linux/dmi.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/io.h> 16 #include <linux/kstrtox.h> 17 #include <linux/memremap.h> 18 #include <linux/mm.h> 19 #include <linux/module.h> 20 #include <linux/mutex.h> 21 #include <linux/once.h> 22 #include <linux/pci.h> 23 #include <linux/suspend.h> 24 #include <linux/t10-pi.h> 25 #include <linux/types.h> 26 #include <linux/io-64-nonatomic-lo-hi.h> 27 #include <linux/io-64-nonatomic-hi-lo.h> 28 #include <linux/sed-opal.h> 29 #include <linux/pci-p2pdma.h> 30 31 #include "trace.h" 32 #include "nvme.h" 33 34 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) 35 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) 36 37 #define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) 38 39 /* 40 * These can be higher, but we need to ensure that any command doesn't 41 * require an sg allocation that needs more than a page of data. 42 */ 43 #define NVME_MAX_KB_SZ 8192 44 #define NVME_MAX_SEGS 128 45 #define NVME_MAX_META_SEGS 15 46 #define NVME_MAX_NR_ALLOCATIONS 5 47 48 static int use_threaded_interrupts; 49 module_param(use_threaded_interrupts, int, 0444); 50 51 static bool use_cmb_sqes = true; 52 module_param(use_cmb_sqes, bool, 0444); 53 MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); 54 55 static unsigned int max_host_mem_size_mb = 128; 56 module_param(max_host_mem_size_mb, uint, 0444); 57 MODULE_PARM_DESC(max_host_mem_size_mb, 58 "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); 59 60 static unsigned int sgl_threshold = SZ_32K; 61 module_param(sgl_threshold, uint, 0644); 62 MODULE_PARM_DESC(sgl_threshold, 63 "Use SGLs when average request segment size is larger or equal to " 64 "this size. Use 0 to disable SGLs."); 65 66 #define NVME_PCI_MIN_QUEUE_SIZE 2 67 #define NVME_PCI_MAX_QUEUE_SIZE 4095 68 static int io_queue_depth_set(const char *val, const struct kernel_param *kp); 69 static const struct kernel_param_ops io_queue_depth_ops = { 70 .set = io_queue_depth_set, 71 .get = param_get_uint, 72 }; 73 74 static unsigned int io_queue_depth = 1024; 75 module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); 76 MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096"); 77 78 static int io_queue_count_set(const char *val, const struct kernel_param *kp) 79 { 80 unsigned int n; 81 int ret; 82 83 ret = kstrtouint(val, 10, &n); 84 if (ret != 0 || n > num_possible_cpus()) 85 return -EINVAL; 86 return param_set_uint(val, kp); 87 } 88 89 static const struct kernel_param_ops io_queue_count_ops = { 90 .set = io_queue_count_set, 91 .get = param_get_uint, 92 }; 93 94 static unsigned int write_queues; 95 module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644); 96 MODULE_PARM_DESC(write_queues, 97 "Number of queues to use for writes. If not set, reads and writes " 98 "will share a queue set."); 99 100 static unsigned int poll_queues; 101 module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644); 102 MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); 103 104 static bool noacpi; 105 module_param(noacpi, bool, 0444); 106 MODULE_PARM_DESC(noacpi, "disable acpi bios quirks"); 107 108 struct nvme_dev; 109 struct nvme_queue; 110 111 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 112 static void nvme_delete_io_queues(struct nvme_dev *dev); 113 static void nvme_update_attrs(struct nvme_dev *dev); 114 115 /* 116 * Represents an NVM Express device. Each nvme_dev is a PCI function. 117 */ 118 struct nvme_dev { 119 struct nvme_queue *queues; 120 struct blk_mq_tag_set tagset; 121 struct blk_mq_tag_set admin_tagset; 122 u32 __iomem *dbs; 123 struct device *dev; 124 struct dma_pool *prp_page_pool; 125 struct dma_pool *prp_small_pool; 126 unsigned online_queues; 127 unsigned max_qid; 128 unsigned io_queues[HCTX_MAX_TYPES]; 129 unsigned int num_vecs; 130 u32 q_depth; 131 int io_sqes; 132 u32 db_stride; 133 void __iomem *bar; 134 unsigned long bar_mapped_size; 135 struct mutex shutdown_lock; 136 bool subsystem; 137 u64 cmb_size; 138 bool cmb_use_sqes; 139 u32 cmbsz; 140 u32 cmbloc; 141 struct nvme_ctrl ctrl; 142 u32 last_ps; 143 bool hmb; 144 struct sg_table *hmb_sgt; 145 146 mempool_t *iod_mempool; 147 mempool_t *iod_meta_mempool; 148 149 /* shadow doorbell buffer support: */ 150 __le32 *dbbuf_dbs; 151 dma_addr_t dbbuf_dbs_dma_addr; 152 __le32 *dbbuf_eis; 153 dma_addr_t dbbuf_eis_dma_addr; 154 155 /* host memory buffer support: */ 156 u64 host_mem_size; 157 u32 nr_host_mem_descs; 158 u32 host_mem_descs_size; 159 dma_addr_t host_mem_descs_dma; 160 struct nvme_host_mem_buf_desc *host_mem_descs; 161 void **host_mem_desc_bufs; 162 unsigned int nr_allocated_queues; 163 unsigned int nr_write_queues; 164 unsigned int nr_poll_queues; 165 }; 166 167 static int io_queue_depth_set(const char *val, const struct kernel_param *kp) 168 { 169 return param_set_uint_minmax(val, kp, NVME_PCI_MIN_QUEUE_SIZE, 170 NVME_PCI_MAX_QUEUE_SIZE); 171 } 172 173 static inline unsigned int sq_idx(unsigned int qid, u32 stride) 174 { 175 return qid * 2 * stride; 176 } 177 178 static inline unsigned int cq_idx(unsigned int qid, u32 stride) 179 { 180 return (qid * 2 + 1) * stride; 181 } 182 183 static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) 184 { 185 return container_of(ctrl, struct nvme_dev, ctrl); 186 } 187 188 /* 189 * An NVM Express queue. Each device has at least two (one for admin 190 * commands and one for I/O commands). 191 */ 192 struct nvme_queue { 193 struct nvme_dev *dev; 194 spinlock_t sq_lock; 195 void *sq_cmds; 196 /* only used for poll queues: */ 197 spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; 198 struct nvme_completion *cqes; 199 dma_addr_t sq_dma_addr; 200 dma_addr_t cq_dma_addr; 201 u32 __iomem *q_db; 202 u32 q_depth; 203 u16 cq_vector; 204 u16 sq_tail; 205 u16 last_sq_tail; 206 u16 cq_head; 207 u16 qid; 208 u8 cq_phase; 209 u8 sqes; 210 unsigned long flags; 211 #define NVMEQ_ENABLED 0 212 #define NVMEQ_SQ_CMB 1 213 #define NVMEQ_DELETE_ERROR 2 214 #define NVMEQ_POLLED 3 215 __le32 *dbbuf_sq_db; 216 __le32 *dbbuf_cq_db; 217 __le32 *dbbuf_sq_ei; 218 __le32 *dbbuf_cq_ei; 219 struct completion delete_done; 220 }; 221 222 union nvme_descriptor { 223 struct nvme_sgl_desc *sg_list; 224 __le64 *prp_list; 225 }; 226 227 /* 228 * The nvme_iod describes the data in an I/O. 229 * 230 * The sg pointer contains the list of PRP/SGL chunk allocations in addition 231 * to the actual struct scatterlist. 232 */ 233 struct nvme_iod { 234 struct nvme_request req; 235 struct nvme_command cmd; 236 bool aborted; 237 s8 nr_allocations; /* PRP list pool allocations. 0 means small 238 pool in use */ 239 unsigned int dma_len; /* length of single DMA segment mapping */ 240 dma_addr_t first_dma; 241 dma_addr_t meta_dma; 242 struct sg_table sgt; 243 struct sg_table meta_sgt; 244 union nvme_descriptor meta_list; 245 union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS]; 246 }; 247 248 static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) 249 { 250 return dev->nr_allocated_queues * 8 * dev->db_stride; 251 } 252 253 static void nvme_dbbuf_dma_alloc(struct nvme_dev *dev) 254 { 255 unsigned int mem_size = nvme_dbbuf_size(dev); 256 257 if (!(dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP)) 258 return; 259 260 if (dev->dbbuf_dbs) { 261 /* 262 * Clear the dbbuf memory so the driver doesn't observe stale 263 * values from the previous instantiation. 264 */ 265 memset(dev->dbbuf_dbs, 0, mem_size); 266 memset(dev->dbbuf_eis, 0, mem_size); 267 return; 268 } 269 270 dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, 271 &dev->dbbuf_dbs_dma_addr, 272 GFP_KERNEL); 273 if (!dev->dbbuf_dbs) 274 goto fail; 275 dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, 276 &dev->dbbuf_eis_dma_addr, 277 GFP_KERNEL); 278 if (!dev->dbbuf_eis) 279 goto fail_free_dbbuf_dbs; 280 return; 281 282 fail_free_dbbuf_dbs: 283 dma_free_coherent(dev->dev, mem_size, dev->dbbuf_dbs, 284 dev->dbbuf_dbs_dma_addr); 285 dev->dbbuf_dbs = NULL; 286 fail: 287 dev_warn(dev->dev, "unable to allocate dma for dbbuf\n"); 288 } 289 290 static void nvme_dbbuf_dma_free(struct nvme_dev *dev) 291 { 292 unsigned int mem_size = nvme_dbbuf_size(dev); 293 294 if (dev->dbbuf_dbs) { 295 dma_free_coherent(dev->dev, mem_size, 296 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 297 dev->dbbuf_dbs = NULL; 298 } 299 if (dev->dbbuf_eis) { 300 dma_free_coherent(dev->dev, mem_size, 301 dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); 302 dev->dbbuf_eis = NULL; 303 } 304 } 305 306 static void nvme_dbbuf_init(struct nvme_dev *dev, 307 struct nvme_queue *nvmeq, int qid) 308 { 309 if (!dev->dbbuf_dbs || !qid) 310 return; 311 312 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; 313 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; 314 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; 315 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; 316 } 317 318 static void nvme_dbbuf_free(struct nvme_queue *nvmeq) 319 { 320 if (!nvmeq->qid) 321 return; 322 323 nvmeq->dbbuf_sq_db = NULL; 324 nvmeq->dbbuf_cq_db = NULL; 325 nvmeq->dbbuf_sq_ei = NULL; 326 nvmeq->dbbuf_cq_ei = NULL; 327 } 328 329 static void nvme_dbbuf_set(struct nvme_dev *dev) 330 { 331 struct nvme_command c = { }; 332 unsigned int i; 333 334 if (!dev->dbbuf_dbs) 335 return; 336 337 c.dbbuf.opcode = nvme_admin_dbbuf; 338 c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); 339 c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); 340 341 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { 342 dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); 343 /* Free memory and continue on */ 344 nvme_dbbuf_dma_free(dev); 345 346 for (i = 1; i <= dev->online_queues; i++) 347 nvme_dbbuf_free(&dev->queues[i]); 348 } 349 } 350 351 static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) 352 { 353 return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); 354 } 355 356 /* Update dbbuf and return true if an MMIO is required */ 357 static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, 358 volatile __le32 *dbbuf_ei) 359 { 360 if (dbbuf_db) { 361 u16 old_value, event_idx; 362 363 /* 364 * Ensure that the queue is written before updating 365 * the doorbell in memory 366 */ 367 wmb(); 368 369 old_value = le32_to_cpu(*dbbuf_db); 370 *dbbuf_db = cpu_to_le32(value); 371 372 /* 373 * Ensure that the doorbell is updated before reading the event 374 * index from memory. The controller needs to provide similar 375 * ordering to ensure the event index is updated before reading 376 * the doorbell. 377 */ 378 mb(); 379 380 event_idx = le32_to_cpu(*dbbuf_ei); 381 if (!nvme_dbbuf_need_event(event_idx, value, old_value)) 382 return false; 383 } 384 385 return true; 386 } 387 388 /* 389 * Will slightly overestimate the number of pages needed. This is OK 390 * as it only leads to a small amount of wasted memory for the lifetime of 391 * the I/O. 392 */ 393 static int nvme_pci_npages_prp(void) 394 { 395 unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE; 396 unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE); 397 return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); 398 } 399 400 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 401 unsigned int hctx_idx) 402 { 403 struct nvme_dev *dev = to_nvme_dev(data); 404 struct nvme_queue *nvmeq = &dev->queues[0]; 405 406 WARN_ON(hctx_idx != 0); 407 WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); 408 409 hctx->driver_data = nvmeq; 410 return 0; 411 } 412 413 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 414 unsigned int hctx_idx) 415 { 416 struct nvme_dev *dev = to_nvme_dev(data); 417 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; 418 419 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); 420 hctx->driver_data = nvmeq; 421 return 0; 422 } 423 424 static int nvme_pci_init_request(struct blk_mq_tag_set *set, 425 struct request *req, unsigned int hctx_idx, 426 unsigned int numa_node) 427 { 428 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 429 430 nvme_req(req)->ctrl = set->driver_data; 431 nvme_req(req)->cmd = &iod->cmd; 432 return 0; 433 } 434 435 static int queue_irq_offset(struct nvme_dev *dev) 436 { 437 /* if we have more than 1 vec, admin queue offsets us by 1 */ 438 if (dev->num_vecs > 1) 439 return 1; 440 441 return 0; 442 } 443 444 static void nvme_pci_map_queues(struct blk_mq_tag_set *set) 445 { 446 struct nvme_dev *dev = to_nvme_dev(set->driver_data); 447 int i, qoff, offset; 448 449 offset = queue_irq_offset(dev); 450 for (i = 0, qoff = 0; i < set->nr_maps; i++) { 451 struct blk_mq_queue_map *map = &set->map[i]; 452 453 map->nr_queues = dev->io_queues[i]; 454 if (!map->nr_queues) { 455 BUG_ON(i == HCTX_TYPE_DEFAULT); 456 continue; 457 } 458 459 /* 460 * The poll queue(s) doesn't have an IRQ (and hence IRQ 461 * affinity), so use the regular blk-mq cpu mapping 462 */ 463 map->queue_offset = qoff; 464 if (i != HCTX_TYPE_POLL && offset) 465 blk_mq_map_hw_queues(map, dev->dev, offset); 466 else 467 blk_mq_map_queues(map); 468 qoff += map->nr_queues; 469 offset += map->nr_queues; 470 } 471 } 472 473 /* 474 * Write sq tail if we are asked to, or if the next command would wrap. 475 */ 476 static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) 477 { 478 if (!write_sq) { 479 u16 next_tail = nvmeq->sq_tail + 1; 480 481 if (next_tail == nvmeq->q_depth) 482 next_tail = 0; 483 if (next_tail != nvmeq->last_sq_tail) 484 return; 485 } 486 487 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, 488 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) 489 writel(nvmeq->sq_tail, nvmeq->q_db); 490 nvmeq->last_sq_tail = nvmeq->sq_tail; 491 } 492 493 static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq, 494 struct nvme_command *cmd) 495 { 496 memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), 497 absolute_pointer(cmd), sizeof(*cmd)); 498 if (++nvmeq->sq_tail == nvmeq->q_depth) 499 nvmeq->sq_tail = 0; 500 } 501 502 static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) 503 { 504 struct nvme_queue *nvmeq = hctx->driver_data; 505 506 spin_lock(&nvmeq->sq_lock); 507 if (nvmeq->sq_tail != nvmeq->last_sq_tail) 508 nvme_write_sq_db(nvmeq, true); 509 spin_unlock(&nvmeq->sq_lock); 510 } 511 512 static inline bool nvme_pci_metadata_use_sgls(struct nvme_dev *dev, 513 struct request *req) 514 { 515 if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl)) 516 return false; 517 return req->nr_integrity_segments > 1 || 518 nvme_req(req)->flags & NVME_REQ_USERCMD; 519 } 520 521 static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req, 522 int nseg) 523 { 524 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 525 unsigned int avg_seg_size; 526 527 avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); 528 529 if (!nvme_ctrl_sgl_supported(&dev->ctrl)) 530 return false; 531 if (!nvmeq->qid) 532 return false; 533 if (nvme_pci_metadata_use_sgls(dev, req)) 534 return true; 535 if (!sgl_threshold || avg_seg_size < sgl_threshold) 536 return nvme_req(req)->flags & NVME_REQ_USERCMD; 537 return true; 538 } 539 540 static void nvme_free_prps(struct nvme_dev *dev, struct request *req) 541 { 542 const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; 543 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 544 dma_addr_t dma_addr = iod->first_dma; 545 int i; 546 547 for (i = 0; i < iod->nr_allocations; i++) { 548 __le64 *prp_list = iod->list[i].prp_list; 549 dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); 550 551 dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); 552 dma_addr = next_dma_addr; 553 } 554 } 555 556 static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) 557 { 558 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 559 560 if (iod->dma_len) { 561 dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, 562 rq_dma_dir(req)); 563 return; 564 } 565 566 WARN_ON_ONCE(!iod->sgt.nents); 567 568 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); 569 570 if (iod->nr_allocations == 0) 571 dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list, 572 iod->first_dma); 573 else if (iod->nr_allocations == 1) 574 dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list, 575 iod->first_dma); 576 else 577 nvme_free_prps(dev, req); 578 mempool_free(iod->sgt.sgl, dev->iod_mempool); 579 } 580 581 static void nvme_print_sgl(struct scatterlist *sgl, int nents) 582 { 583 int i; 584 struct scatterlist *sg; 585 586 for_each_sg(sgl, sg, nents, i) { 587 dma_addr_t phys = sg_phys(sg); 588 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " 589 "dma_address:%pad dma_length:%d\n", 590 i, &phys, sg->offset, sg->length, &sg_dma_address(sg), 591 sg_dma_len(sg)); 592 } 593 } 594 595 static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, 596 struct request *req, struct nvme_rw_command *cmnd) 597 { 598 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 599 struct dma_pool *pool; 600 int length = blk_rq_payload_bytes(req); 601 struct scatterlist *sg = iod->sgt.sgl; 602 int dma_len = sg_dma_len(sg); 603 u64 dma_addr = sg_dma_address(sg); 604 int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); 605 __le64 *prp_list; 606 dma_addr_t prp_dma; 607 int nprps, i; 608 609 length -= (NVME_CTRL_PAGE_SIZE - offset); 610 if (length <= 0) { 611 iod->first_dma = 0; 612 goto done; 613 } 614 615 dma_len -= (NVME_CTRL_PAGE_SIZE - offset); 616 if (dma_len) { 617 dma_addr += (NVME_CTRL_PAGE_SIZE - offset); 618 } else { 619 sg = sg_next(sg); 620 dma_addr = sg_dma_address(sg); 621 dma_len = sg_dma_len(sg); 622 } 623 624 if (length <= NVME_CTRL_PAGE_SIZE) { 625 iod->first_dma = dma_addr; 626 goto done; 627 } 628 629 nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); 630 if (nprps <= (256 / 8)) { 631 pool = dev->prp_small_pool; 632 iod->nr_allocations = 0; 633 } else { 634 pool = dev->prp_page_pool; 635 iod->nr_allocations = 1; 636 } 637 638 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 639 if (!prp_list) { 640 iod->nr_allocations = -1; 641 return BLK_STS_RESOURCE; 642 } 643 iod->list[0].prp_list = prp_list; 644 iod->first_dma = prp_dma; 645 i = 0; 646 for (;;) { 647 if (i == NVME_CTRL_PAGE_SIZE >> 3) { 648 __le64 *old_prp_list = prp_list; 649 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 650 if (!prp_list) 651 goto free_prps; 652 iod->list[iod->nr_allocations++].prp_list = prp_list; 653 prp_list[0] = old_prp_list[i - 1]; 654 old_prp_list[i - 1] = cpu_to_le64(prp_dma); 655 i = 1; 656 } 657 prp_list[i++] = cpu_to_le64(dma_addr); 658 dma_len -= NVME_CTRL_PAGE_SIZE; 659 dma_addr += NVME_CTRL_PAGE_SIZE; 660 length -= NVME_CTRL_PAGE_SIZE; 661 if (length <= 0) 662 break; 663 if (dma_len > 0) 664 continue; 665 if (unlikely(dma_len < 0)) 666 goto bad_sgl; 667 sg = sg_next(sg); 668 dma_addr = sg_dma_address(sg); 669 dma_len = sg_dma_len(sg); 670 } 671 done: 672 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl)); 673 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); 674 return BLK_STS_OK; 675 free_prps: 676 nvme_free_prps(dev, req); 677 return BLK_STS_RESOURCE; 678 bad_sgl: 679 WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), 680 "Invalid SGL for payload:%d nents:%d\n", 681 blk_rq_payload_bytes(req), iod->sgt.nents); 682 return BLK_STS_IOERR; 683 } 684 685 static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, 686 struct scatterlist *sg) 687 { 688 sge->addr = cpu_to_le64(sg_dma_address(sg)); 689 sge->length = cpu_to_le32(sg_dma_len(sg)); 690 sge->type = NVME_SGL_FMT_DATA_DESC << 4; 691 } 692 693 static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, 694 dma_addr_t dma_addr, int entries) 695 { 696 sge->addr = cpu_to_le64(dma_addr); 697 sge->length = cpu_to_le32(entries * sizeof(*sge)); 698 sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; 699 } 700 701 static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, 702 struct request *req, struct nvme_rw_command *cmd) 703 { 704 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 705 struct dma_pool *pool; 706 struct nvme_sgl_desc *sg_list; 707 struct scatterlist *sg = iod->sgt.sgl; 708 unsigned int entries = iod->sgt.nents; 709 dma_addr_t sgl_dma; 710 int i = 0; 711 712 /* setting the transfer type as SGL */ 713 cmd->flags = NVME_CMD_SGL_METABUF; 714 715 if (entries == 1) { 716 nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); 717 return BLK_STS_OK; 718 } 719 720 if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { 721 pool = dev->prp_small_pool; 722 iod->nr_allocations = 0; 723 } else { 724 pool = dev->prp_page_pool; 725 iod->nr_allocations = 1; 726 } 727 728 sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 729 if (!sg_list) { 730 iod->nr_allocations = -1; 731 return BLK_STS_RESOURCE; 732 } 733 734 iod->list[0].sg_list = sg_list; 735 iod->first_dma = sgl_dma; 736 737 nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); 738 do { 739 nvme_pci_sgl_set_data(&sg_list[i++], sg); 740 sg = sg_next(sg); 741 } while (--entries > 0); 742 743 return BLK_STS_OK; 744 } 745 746 static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, 747 struct request *req, struct nvme_rw_command *cmnd, 748 struct bio_vec *bv) 749 { 750 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 751 unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); 752 unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; 753 754 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); 755 if (dma_mapping_error(dev->dev, iod->first_dma)) 756 return BLK_STS_RESOURCE; 757 iod->dma_len = bv->bv_len; 758 759 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); 760 if (bv->bv_len > first_prp_len) 761 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); 762 else 763 cmnd->dptr.prp2 = 0; 764 return BLK_STS_OK; 765 } 766 767 static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, 768 struct request *req, struct nvme_rw_command *cmnd, 769 struct bio_vec *bv) 770 { 771 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 772 773 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); 774 if (dma_mapping_error(dev->dev, iod->first_dma)) 775 return BLK_STS_RESOURCE; 776 iod->dma_len = bv->bv_len; 777 778 cmnd->flags = NVME_CMD_SGL_METABUF; 779 cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); 780 cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); 781 cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; 782 return BLK_STS_OK; 783 } 784 785 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 786 struct nvme_command *cmnd) 787 { 788 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 789 blk_status_t ret = BLK_STS_RESOURCE; 790 int rc; 791 792 if (blk_rq_nr_phys_segments(req) == 1) { 793 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 794 struct bio_vec bv = req_bvec(req); 795 796 if (!is_pci_p2pdma_page(bv.bv_page)) { 797 if (!nvme_pci_metadata_use_sgls(dev, req) && 798 (bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) + 799 bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) 800 return nvme_setup_prp_simple(dev, req, 801 &cmnd->rw, &bv); 802 803 if (nvmeq->qid && sgl_threshold && 804 nvme_ctrl_sgl_supported(&dev->ctrl)) 805 return nvme_setup_sgl_simple(dev, req, 806 &cmnd->rw, &bv); 807 } 808 } 809 810 iod->dma_len = 0; 811 iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); 812 if (!iod->sgt.sgl) 813 return BLK_STS_RESOURCE; 814 sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req)); 815 iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl); 816 if (!iod->sgt.orig_nents) 817 goto out_free_sg; 818 819 rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 820 DMA_ATTR_NO_WARN); 821 if (rc) { 822 if (rc == -EREMOTEIO) 823 ret = BLK_STS_TARGET; 824 goto out_free_sg; 825 } 826 827 if (nvme_pci_use_sgls(dev, req, iod->sgt.nents)) 828 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); 829 else 830 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); 831 if (ret != BLK_STS_OK) 832 goto out_unmap_sg; 833 return BLK_STS_OK; 834 835 out_unmap_sg: 836 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); 837 out_free_sg: 838 mempool_free(iod->sgt.sgl, dev->iod_mempool); 839 return ret; 840 } 841 842 static blk_status_t nvme_pci_setup_meta_sgls(struct nvme_dev *dev, 843 struct request *req) 844 { 845 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 846 struct nvme_rw_command *cmnd = &iod->cmd.rw; 847 struct nvme_sgl_desc *sg_list; 848 struct scatterlist *sgl, *sg; 849 unsigned int entries; 850 dma_addr_t sgl_dma; 851 int rc, i; 852 853 iod->meta_sgt.sgl = mempool_alloc(dev->iod_meta_mempool, GFP_ATOMIC); 854 if (!iod->meta_sgt.sgl) 855 return BLK_STS_RESOURCE; 856 857 sg_init_table(iod->meta_sgt.sgl, req->nr_integrity_segments); 858 iod->meta_sgt.orig_nents = blk_rq_map_integrity_sg(req, 859 iod->meta_sgt.sgl); 860 if (!iod->meta_sgt.orig_nents) 861 goto out_free_sg; 862 863 rc = dma_map_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 864 DMA_ATTR_NO_WARN); 865 if (rc) 866 goto out_free_sg; 867 868 sg_list = dma_pool_alloc(dev->prp_small_pool, GFP_ATOMIC, &sgl_dma); 869 if (!sg_list) 870 goto out_unmap_sg; 871 872 entries = iod->meta_sgt.nents; 873 iod->meta_list.sg_list = sg_list; 874 iod->meta_dma = sgl_dma; 875 876 cmnd->flags = NVME_CMD_SGL_METASEG; 877 cmnd->metadata = cpu_to_le64(sgl_dma); 878 879 sgl = iod->meta_sgt.sgl; 880 if (entries == 1) { 881 nvme_pci_sgl_set_data(sg_list, sgl); 882 return BLK_STS_OK; 883 } 884 885 sgl_dma += sizeof(*sg_list); 886 nvme_pci_sgl_set_seg(sg_list, sgl_dma, entries); 887 for_each_sg(sgl, sg, entries, i) 888 nvme_pci_sgl_set_data(&sg_list[i + 1], sg); 889 890 return BLK_STS_OK; 891 892 out_unmap_sg: 893 dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0); 894 out_free_sg: 895 mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool); 896 return BLK_STS_RESOURCE; 897 } 898 899 static blk_status_t nvme_pci_setup_meta_mptr(struct nvme_dev *dev, 900 struct request *req) 901 { 902 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 903 struct bio_vec bv = rq_integrity_vec(req); 904 struct nvme_command *cmnd = &iod->cmd; 905 906 iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0); 907 if (dma_mapping_error(dev->dev, iod->meta_dma)) 908 return BLK_STS_IOERR; 909 cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); 910 return BLK_STS_OK; 911 } 912 913 static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req) 914 { 915 if (nvme_pci_metadata_use_sgls(dev, req)) 916 return nvme_pci_setup_meta_sgls(dev, req); 917 return nvme_pci_setup_meta_mptr(dev, req); 918 } 919 920 static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) 921 { 922 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 923 blk_status_t ret; 924 925 iod->aborted = false; 926 iod->nr_allocations = -1; 927 iod->sgt.nents = 0; 928 iod->meta_sgt.nents = 0; 929 930 ret = nvme_setup_cmd(req->q->queuedata, req); 931 if (ret) 932 return ret; 933 934 if (blk_rq_nr_phys_segments(req)) { 935 ret = nvme_map_data(dev, req, &iod->cmd); 936 if (ret) 937 goto out_free_cmd; 938 } 939 940 if (blk_integrity_rq(req)) { 941 ret = nvme_map_metadata(dev, req); 942 if (ret) 943 goto out_unmap_data; 944 } 945 946 nvme_start_request(req); 947 return BLK_STS_OK; 948 out_unmap_data: 949 if (blk_rq_nr_phys_segments(req)) 950 nvme_unmap_data(dev, req); 951 out_free_cmd: 952 nvme_cleanup_cmd(req); 953 return ret; 954 } 955 956 /* 957 * NOTE: ns is NULL when called on the admin queue. 958 */ 959 static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 960 const struct blk_mq_queue_data *bd) 961 { 962 struct nvme_queue *nvmeq = hctx->driver_data; 963 struct nvme_dev *dev = nvmeq->dev; 964 struct request *req = bd->rq; 965 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 966 blk_status_t ret; 967 968 /* 969 * We should not need to do this, but we're still using this to 970 * ensure we can drain requests on a dying queue. 971 */ 972 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) 973 return BLK_STS_IOERR; 974 975 if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) 976 return nvme_fail_nonready_command(&dev->ctrl, req); 977 978 ret = nvme_prep_rq(dev, req); 979 if (unlikely(ret)) 980 return ret; 981 spin_lock(&nvmeq->sq_lock); 982 nvme_sq_copy_cmd(nvmeq, &iod->cmd); 983 nvme_write_sq_db(nvmeq, bd->last); 984 spin_unlock(&nvmeq->sq_lock); 985 return BLK_STS_OK; 986 } 987 988 static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct rq_list *rqlist) 989 { 990 struct request *req; 991 992 spin_lock(&nvmeq->sq_lock); 993 while ((req = rq_list_pop(rqlist))) { 994 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 995 996 nvme_sq_copy_cmd(nvmeq, &iod->cmd); 997 } 998 nvme_write_sq_db(nvmeq, true); 999 spin_unlock(&nvmeq->sq_lock); 1000 } 1001 1002 static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req) 1003 { 1004 /* 1005 * We should not need to do this, but we're still using this to 1006 * ensure we can drain requests on a dying queue. 1007 */ 1008 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) 1009 return false; 1010 if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) 1011 return false; 1012 1013 return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; 1014 } 1015 1016 static void nvme_queue_rqs(struct rq_list *rqlist) 1017 { 1018 struct rq_list submit_list = { }; 1019 struct rq_list requeue_list = { }; 1020 struct nvme_queue *nvmeq = NULL; 1021 struct request *req; 1022 1023 while ((req = rq_list_pop(rqlist))) { 1024 if (nvmeq && nvmeq != req->mq_hctx->driver_data) 1025 nvme_submit_cmds(nvmeq, &submit_list); 1026 nvmeq = req->mq_hctx->driver_data; 1027 1028 if (nvme_prep_rq_batch(nvmeq, req)) 1029 rq_list_add_tail(&submit_list, req); 1030 else 1031 rq_list_add_tail(&requeue_list, req); 1032 } 1033 1034 if (nvmeq) 1035 nvme_submit_cmds(nvmeq, &submit_list); 1036 *rqlist = requeue_list; 1037 } 1038 1039 static __always_inline void nvme_unmap_metadata(struct nvme_dev *dev, 1040 struct request *req) 1041 { 1042 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1043 1044 if (!iod->meta_sgt.nents) { 1045 dma_unmap_page(dev->dev, iod->meta_dma, 1046 rq_integrity_vec(req).bv_len, 1047 rq_dma_dir(req)); 1048 return; 1049 } 1050 1051 dma_pool_free(dev->prp_small_pool, iod->meta_list.sg_list, 1052 iod->meta_dma); 1053 dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0); 1054 mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool); 1055 } 1056 1057 static __always_inline void nvme_pci_unmap_rq(struct request *req) 1058 { 1059 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1060 struct nvme_dev *dev = nvmeq->dev; 1061 1062 if (blk_integrity_rq(req)) 1063 nvme_unmap_metadata(dev, req); 1064 1065 if (blk_rq_nr_phys_segments(req)) 1066 nvme_unmap_data(dev, req); 1067 } 1068 1069 static void nvme_pci_complete_rq(struct request *req) 1070 { 1071 nvme_pci_unmap_rq(req); 1072 nvme_complete_rq(req); 1073 } 1074 1075 static void nvme_pci_complete_batch(struct io_comp_batch *iob) 1076 { 1077 nvme_complete_batch(iob, nvme_pci_unmap_rq); 1078 } 1079 1080 /* We read the CQE phase first to check if the rest of the entry is valid */ 1081 static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) 1082 { 1083 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; 1084 1085 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; 1086 } 1087 1088 static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) 1089 { 1090 u16 head = nvmeq->cq_head; 1091 1092 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, 1093 nvmeq->dbbuf_cq_ei)) 1094 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 1095 } 1096 1097 static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) 1098 { 1099 if (!nvmeq->qid) 1100 return nvmeq->dev->admin_tagset.tags[0]; 1101 return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; 1102 } 1103 1104 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, 1105 struct io_comp_batch *iob, u16 idx) 1106 { 1107 struct nvme_completion *cqe = &nvmeq->cqes[idx]; 1108 __u16 command_id = READ_ONCE(cqe->command_id); 1109 struct request *req; 1110 1111 /* 1112 * AEN requests are special as they don't time out and can 1113 * survive any kind of queue freeze and often don't respond to 1114 * aborts. We don't even bother to allocate a struct request 1115 * for them but rather special case them here. 1116 */ 1117 if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { 1118 nvme_complete_async_event(&nvmeq->dev->ctrl, 1119 cqe->status, &cqe->result); 1120 return; 1121 } 1122 1123 req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id); 1124 if (unlikely(!req)) { 1125 dev_warn(nvmeq->dev->ctrl.device, 1126 "invalid id %d completed on queue %d\n", 1127 command_id, le16_to_cpu(cqe->sq_id)); 1128 return; 1129 } 1130 1131 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); 1132 if (!nvme_try_complete_req(req, cqe->status, cqe->result) && 1133 !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, 1134 nvme_pci_complete_batch)) 1135 nvme_pci_complete_rq(req); 1136 } 1137 1138 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) 1139 { 1140 u32 tmp = nvmeq->cq_head + 1; 1141 1142 if (tmp == nvmeq->q_depth) { 1143 nvmeq->cq_head = 0; 1144 nvmeq->cq_phase ^= 1; 1145 } else { 1146 nvmeq->cq_head = tmp; 1147 } 1148 } 1149 1150 static inline bool nvme_poll_cq(struct nvme_queue *nvmeq, 1151 struct io_comp_batch *iob) 1152 { 1153 bool found = false; 1154 1155 while (nvme_cqe_pending(nvmeq)) { 1156 found = true; 1157 /* 1158 * load-load control dependency between phase and the rest of 1159 * the cqe requires a full read memory barrier 1160 */ 1161 dma_rmb(); 1162 nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); 1163 nvme_update_cq_head(nvmeq); 1164 } 1165 1166 if (found) 1167 nvme_ring_cq_doorbell(nvmeq); 1168 return found; 1169 } 1170 1171 static irqreturn_t nvme_irq(int irq, void *data) 1172 { 1173 struct nvme_queue *nvmeq = data; 1174 DEFINE_IO_COMP_BATCH(iob); 1175 1176 if (nvme_poll_cq(nvmeq, &iob)) { 1177 if (!rq_list_empty(&iob.req_list)) 1178 nvme_pci_complete_batch(&iob); 1179 return IRQ_HANDLED; 1180 } 1181 return IRQ_NONE; 1182 } 1183 1184 static irqreturn_t nvme_irq_check(int irq, void *data) 1185 { 1186 struct nvme_queue *nvmeq = data; 1187 1188 if (nvme_cqe_pending(nvmeq)) 1189 return IRQ_WAKE_THREAD; 1190 return IRQ_NONE; 1191 } 1192 1193 /* 1194 * Poll for completions for any interrupt driven queue 1195 * Can be called from any context. 1196 */ 1197 static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) 1198 { 1199 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 1200 1201 WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); 1202 1203 disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1204 nvme_poll_cq(nvmeq, NULL); 1205 enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1206 } 1207 1208 static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) 1209 { 1210 struct nvme_queue *nvmeq = hctx->driver_data; 1211 bool found; 1212 1213 if (!nvme_cqe_pending(nvmeq)) 1214 return 0; 1215 1216 spin_lock(&nvmeq->cq_poll_lock); 1217 found = nvme_poll_cq(nvmeq, iob); 1218 spin_unlock(&nvmeq->cq_poll_lock); 1219 1220 return found; 1221 } 1222 1223 static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) 1224 { 1225 struct nvme_dev *dev = to_nvme_dev(ctrl); 1226 struct nvme_queue *nvmeq = &dev->queues[0]; 1227 struct nvme_command c = { }; 1228 1229 c.common.opcode = nvme_admin_async_event; 1230 c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; 1231 1232 spin_lock(&nvmeq->sq_lock); 1233 nvme_sq_copy_cmd(nvmeq, &c); 1234 nvme_write_sq_db(nvmeq, true); 1235 spin_unlock(&nvmeq->sq_lock); 1236 } 1237 1238 static int nvme_pci_subsystem_reset(struct nvme_ctrl *ctrl) 1239 { 1240 struct nvme_dev *dev = to_nvme_dev(ctrl); 1241 int ret = 0; 1242 1243 /* 1244 * Taking the shutdown_lock ensures the BAR mapping is not being 1245 * altered by reset_work. Holding this lock before the RESETTING state 1246 * change, if successful, also ensures nvme_remove won't be able to 1247 * proceed to iounmap until we're done. 1248 */ 1249 mutex_lock(&dev->shutdown_lock); 1250 if (!dev->bar_mapped_size) { 1251 ret = -ENODEV; 1252 goto unlock; 1253 } 1254 1255 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { 1256 ret = -EBUSY; 1257 goto unlock; 1258 } 1259 1260 writel(NVME_SUBSYS_RESET, dev->bar + NVME_REG_NSSR); 1261 nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE); 1262 1263 /* 1264 * Read controller status to flush the previous write and trigger a 1265 * pcie read error. 1266 */ 1267 readl(dev->bar + NVME_REG_CSTS); 1268 unlock: 1269 mutex_unlock(&dev->shutdown_lock); 1270 return ret; 1271 } 1272 1273 static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 1274 { 1275 struct nvme_command c = { }; 1276 1277 c.delete_queue.opcode = opcode; 1278 c.delete_queue.qid = cpu_to_le16(id); 1279 1280 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 1281 } 1282 1283 static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 1284 struct nvme_queue *nvmeq, s16 vector) 1285 { 1286 struct nvme_command c = { }; 1287 int flags = NVME_QUEUE_PHYS_CONTIG; 1288 1289 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) 1290 flags |= NVME_CQ_IRQ_ENABLED; 1291 1292 /* 1293 * Note: we (ab)use the fact that the prp fields survive if no data 1294 * is attached to the request. 1295 */ 1296 c.create_cq.opcode = nvme_admin_create_cq; 1297 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 1298 c.create_cq.cqid = cpu_to_le16(qid); 1299 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 1300 c.create_cq.cq_flags = cpu_to_le16(flags); 1301 c.create_cq.irq_vector = cpu_to_le16(vector); 1302 1303 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 1304 } 1305 1306 static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 1307 struct nvme_queue *nvmeq) 1308 { 1309 struct nvme_ctrl *ctrl = &dev->ctrl; 1310 struct nvme_command c = { }; 1311 int flags = NVME_QUEUE_PHYS_CONTIG; 1312 1313 /* 1314 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't 1315 * set. Since URGENT priority is zeroes, it makes all queues 1316 * URGENT. 1317 */ 1318 if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) 1319 flags |= NVME_SQ_PRIO_MEDIUM; 1320 1321 /* 1322 * Note: we (ab)use the fact that the prp fields survive if no data 1323 * is attached to the request. 1324 */ 1325 c.create_sq.opcode = nvme_admin_create_sq; 1326 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 1327 c.create_sq.sqid = cpu_to_le16(qid); 1328 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 1329 c.create_sq.sq_flags = cpu_to_le16(flags); 1330 c.create_sq.cqid = cpu_to_le16(qid); 1331 1332 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 1333 } 1334 1335 static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 1336 { 1337 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 1338 } 1339 1340 static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 1341 { 1342 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 1343 } 1344 1345 static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error) 1346 { 1347 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1348 1349 dev_warn(nvmeq->dev->ctrl.device, 1350 "Abort status: 0x%x", nvme_req(req)->status); 1351 atomic_inc(&nvmeq->dev->ctrl.abort_limit); 1352 blk_mq_free_request(req); 1353 return RQ_END_IO_NONE; 1354 } 1355 1356 static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) 1357 { 1358 /* If true, indicates loss of adapter communication, possibly by a 1359 * NVMe Subsystem reset. 1360 */ 1361 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 1362 1363 /* If there is a reset/reinit ongoing, we shouldn't reset again. */ 1364 switch (nvme_ctrl_state(&dev->ctrl)) { 1365 case NVME_CTRL_RESETTING: 1366 case NVME_CTRL_CONNECTING: 1367 return false; 1368 default: 1369 break; 1370 } 1371 1372 /* We shouldn't reset unless the controller is on fatal error state 1373 * _or_ if we lost the communication with it. 1374 */ 1375 if (!(csts & NVME_CSTS_CFS) && !nssro) 1376 return false; 1377 1378 return true; 1379 } 1380 1381 static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) 1382 { 1383 /* Read a config register to help see what died. */ 1384 u16 pci_status; 1385 int result; 1386 1387 result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, 1388 &pci_status); 1389 if (result == PCIBIOS_SUCCESSFUL) 1390 dev_warn(dev->ctrl.device, 1391 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", 1392 csts, pci_status); 1393 else 1394 dev_warn(dev->ctrl.device, 1395 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", 1396 csts, result); 1397 1398 if (csts != ~0) 1399 return; 1400 1401 dev_warn(dev->ctrl.device, 1402 "Does your device have a faulty power saving mode enabled?\n"); 1403 dev_warn(dev->ctrl.device, 1404 "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off pcie_port_pm=off\" and report a bug\n"); 1405 } 1406 1407 static enum blk_eh_timer_return nvme_timeout(struct request *req) 1408 { 1409 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1410 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1411 struct nvme_dev *dev = nvmeq->dev; 1412 struct request *abort_req; 1413 struct nvme_command cmd = { }; 1414 u32 csts = readl(dev->bar + NVME_REG_CSTS); 1415 u8 opcode; 1416 1417 if (nvme_state_terminal(&dev->ctrl)) 1418 goto disable; 1419 1420 /* If PCI error recovery process is happening, we cannot reset or 1421 * the recovery mechanism will surely fail. 1422 */ 1423 mb(); 1424 if (pci_channel_offline(to_pci_dev(dev->dev))) 1425 return BLK_EH_RESET_TIMER; 1426 1427 /* 1428 * Reset immediately if the controller is failed 1429 */ 1430 if (nvme_should_reset(dev, csts)) { 1431 nvme_warn_reset(dev, csts); 1432 goto disable; 1433 } 1434 1435 /* 1436 * Did we miss an interrupt? 1437 */ 1438 if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) 1439 nvme_poll(req->mq_hctx, NULL); 1440 else 1441 nvme_poll_irqdisable(nvmeq); 1442 1443 if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) { 1444 dev_warn(dev->ctrl.device, 1445 "I/O tag %d (%04x) QID %d timeout, completion polled\n", 1446 req->tag, nvme_cid(req), nvmeq->qid); 1447 return BLK_EH_DONE; 1448 } 1449 1450 /* 1451 * Shutdown immediately if controller times out while starting. The 1452 * reset work will see the pci device disabled when it gets the forced 1453 * cancellation error. All outstanding requests are completed on 1454 * shutdown, so we return BLK_EH_DONE. 1455 */ 1456 switch (nvme_ctrl_state(&dev->ctrl)) { 1457 case NVME_CTRL_CONNECTING: 1458 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 1459 fallthrough; 1460 case NVME_CTRL_DELETING: 1461 dev_warn_ratelimited(dev->ctrl.device, 1462 "I/O tag %d (%04x) QID %d timeout, disable controller\n", 1463 req->tag, nvme_cid(req), nvmeq->qid); 1464 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1465 nvme_dev_disable(dev, true); 1466 return BLK_EH_DONE; 1467 case NVME_CTRL_RESETTING: 1468 return BLK_EH_RESET_TIMER; 1469 default: 1470 break; 1471 } 1472 1473 /* 1474 * Shutdown the controller immediately and schedule a reset if the 1475 * command was already aborted once before and still hasn't been 1476 * returned to the driver, or if this is the admin queue. 1477 */ 1478 opcode = nvme_req(req)->cmd->common.opcode; 1479 if (!nvmeq->qid || iod->aborted) { 1480 dev_warn(dev->ctrl.device, 1481 "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n", 1482 req->tag, nvme_cid(req), opcode, 1483 nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid); 1484 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1485 goto disable; 1486 } 1487 1488 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { 1489 atomic_inc(&dev->ctrl.abort_limit); 1490 return BLK_EH_RESET_TIMER; 1491 } 1492 iod->aborted = true; 1493 1494 cmd.abort.opcode = nvme_admin_abort_cmd; 1495 cmd.abort.cid = nvme_cid(req); 1496 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 1497 1498 dev_warn(nvmeq->dev->ctrl.device, 1499 "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, aborting req_op:%s(%u) size:%u\n", 1500 req->tag, nvme_cid(req), opcode, nvme_get_opcode_str(opcode), 1501 nvmeq->qid, blk_op_str(req_op(req)), req_op(req), 1502 blk_rq_bytes(req)); 1503 1504 abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd), 1505 BLK_MQ_REQ_NOWAIT); 1506 if (IS_ERR(abort_req)) { 1507 atomic_inc(&dev->ctrl.abort_limit); 1508 return BLK_EH_RESET_TIMER; 1509 } 1510 nvme_init_request(abort_req, &cmd); 1511 1512 abort_req->end_io = abort_endio; 1513 abort_req->end_io_data = NULL; 1514 blk_execute_rq_nowait(abort_req, false); 1515 1516 /* 1517 * The aborted req will be completed on receiving the abort req. 1518 * We enable the timer again. If hit twice, it'll cause a device reset, 1519 * as the device then is in a faulty state. 1520 */ 1521 return BLK_EH_RESET_TIMER; 1522 1523 disable: 1524 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) { 1525 if (nvme_state_terminal(&dev->ctrl)) 1526 nvme_dev_disable(dev, true); 1527 return BLK_EH_DONE; 1528 } 1529 1530 nvme_dev_disable(dev, false); 1531 if (nvme_try_sched_reset(&dev->ctrl)) 1532 nvme_unquiesce_io_queues(&dev->ctrl); 1533 return BLK_EH_DONE; 1534 } 1535 1536 static void nvme_free_queue(struct nvme_queue *nvmeq) 1537 { 1538 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), 1539 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 1540 if (!nvmeq->sq_cmds) 1541 return; 1542 1543 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { 1544 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), 1545 nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 1546 } else { 1547 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), 1548 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 1549 } 1550 } 1551 1552 static void nvme_free_queues(struct nvme_dev *dev, int lowest) 1553 { 1554 int i; 1555 1556 for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { 1557 dev->ctrl.queue_count--; 1558 nvme_free_queue(&dev->queues[i]); 1559 } 1560 } 1561 1562 static void nvme_suspend_queue(struct nvme_dev *dev, unsigned int qid) 1563 { 1564 struct nvme_queue *nvmeq = &dev->queues[qid]; 1565 1566 if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) 1567 return; 1568 1569 /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */ 1570 mb(); 1571 1572 nvmeq->dev->online_queues--; 1573 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) 1574 nvme_quiesce_admin_queue(&nvmeq->dev->ctrl); 1575 if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) 1576 pci_free_irq(to_pci_dev(dev->dev), nvmeq->cq_vector, nvmeq); 1577 } 1578 1579 static void nvme_suspend_io_queues(struct nvme_dev *dev) 1580 { 1581 int i; 1582 1583 for (i = dev->ctrl.queue_count - 1; i > 0; i--) 1584 nvme_suspend_queue(dev, i); 1585 } 1586 1587 /* 1588 * Called only on a device that has been disabled and after all other threads 1589 * that can check this device's completion queues have synced, except 1590 * nvme_poll(). This is the last chance for the driver to see a natural 1591 * completion before nvme_cancel_request() terminates all incomplete requests. 1592 */ 1593 static void nvme_reap_pending_cqes(struct nvme_dev *dev) 1594 { 1595 int i; 1596 1597 for (i = dev->ctrl.queue_count - 1; i > 0; i--) { 1598 spin_lock(&dev->queues[i].cq_poll_lock); 1599 nvme_poll_cq(&dev->queues[i], NULL); 1600 spin_unlock(&dev->queues[i].cq_poll_lock); 1601 } 1602 } 1603 1604 static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, 1605 int entry_size) 1606 { 1607 int q_depth = dev->q_depth; 1608 unsigned q_size_aligned = roundup(q_depth * entry_size, 1609 NVME_CTRL_PAGE_SIZE); 1610 1611 if (q_size_aligned * nr_io_queues > dev->cmb_size) { 1612 u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); 1613 1614 mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE); 1615 q_depth = div_u64(mem_per_q, entry_size); 1616 1617 /* 1618 * Ensure the reduced q_depth is above some threshold where it 1619 * would be better to map queues in system memory with the 1620 * original depth 1621 */ 1622 if (q_depth < 64) 1623 return -ENOMEM; 1624 } 1625 1626 return q_depth; 1627 } 1628 1629 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 1630 int qid) 1631 { 1632 struct pci_dev *pdev = to_pci_dev(dev->dev); 1633 1634 if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 1635 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); 1636 if (nvmeq->sq_cmds) { 1637 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, 1638 nvmeq->sq_cmds); 1639 if (nvmeq->sq_dma_addr) { 1640 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); 1641 return 0; 1642 } 1643 1644 pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 1645 } 1646 } 1647 1648 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), 1649 &nvmeq->sq_dma_addr, GFP_KERNEL); 1650 if (!nvmeq->sq_cmds) 1651 return -ENOMEM; 1652 return 0; 1653 } 1654 1655 static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) 1656 { 1657 struct nvme_queue *nvmeq = &dev->queues[qid]; 1658 1659 if (dev->ctrl.queue_count > qid) 1660 return 0; 1661 1662 nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; 1663 nvmeq->q_depth = depth; 1664 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), 1665 &nvmeq->cq_dma_addr, GFP_KERNEL); 1666 if (!nvmeq->cqes) 1667 goto free_nvmeq; 1668 1669 if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) 1670 goto free_cqdma; 1671 1672 nvmeq->dev = dev; 1673 spin_lock_init(&nvmeq->sq_lock); 1674 spin_lock_init(&nvmeq->cq_poll_lock); 1675 nvmeq->cq_head = 0; 1676 nvmeq->cq_phase = 1; 1677 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1678 nvmeq->qid = qid; 1679 dev->ctrl.queue_count++; 1680 1681 return 0; 1682 1683 free_cqdma: 1684 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, 1685 nvmeq->cq_dma_addr); 1686 free_nvmeq: 1687 return -ENOMEM; 1688 } 1689 1690 static int queue_request_irq(struct nvme_queue *nvmeq) 1691 { 1692 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 1693 int nr = nvmeq->dev->ctrl.instance; 1694 1695 if (use_threaded_interrupts) { 1696 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, 1697 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 1698 } else { 1699 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, 1700 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 1701 } 1702 } 1703 1704 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 1705 { 1706 struct nvme_dev *dev = nvmeq->dev; 1707 1708 nvmeq->sq_tail = 0; 1709 nvmeq->last_sq_tail = 0; 1710 nvmeq->cq_head = 0; 1711 nvmeq->cq_phase = 1; 1712 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1713 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); 1714 nvme_dbbuf_init(dev, nvmeq, qid); 1715 dev->online_queues++; 1716 wmb(); /* ensure the first interrupt sees the initialization */ 1717 } 1718 1719 /* 1720 * Try getting shutdown_lock while setting up IO queues. 1721 */ 1722 static int nvme_setup_io_queues_trylock(struct nvme_dev *dev) 1723 { 1724 /* 1725 * Give up if the lock is being held by nvme_dev_disable. 1726 */ 1727 if (!mutex_trylock(&dev->shutdown_lock)) 1728 return -ENODEV; 1729 1730 /* 1731 * Controller is in wrong state, fail early. 1732 */ 1733 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_CONNECTING) { 1734 mutex_unlock(&dev->shutdown_lock); 1735 return -ENODEV; 1736 } 1737 1738 return 0; 1739 } 1740 1741 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) 1742 { 1743 struct nvme_dev *dev = nvmeq->dev; 1744 int result; 1745 u16 vector = 0; 1746 1747 clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 1748 1749 /* 1750 * A queue's vector matches the queue identifier unless the controller 1751 * has only one vector available. 1752 */ 1753 if (!polled) 1754 vector = dev->num_vecs == 1 ? 0 : qid; 1755 else 1756 set_bit(NVMEQ_POLLED, &nvmeq->flags); 1757 1758 result = adapter_alloc_cq(dev, qid, nvmeq, vector); 1759 if (result) 1760 return result; 1761 1762 result = adapter_alloc_sq(dev, qid, nvmeq); 1763 if (result < 0) 1764 return result; 1765 if (result) 1766 goto release_cq; 1767 1768 nvmeq->cq_vector = vector; 1769 1770 result = nvme_setup_io_queues_trylock(dev); 1771 if (result) 1772 return result; 1773 nvme_init_queue(nvmeq, qid); 1774 if (!polled) { 1775 result = queue_request_irq(nvmeq); 1776 if (result < 0) 1777 goto release_sq; 1778 } 1779 1780 set_bit(NVMEQ_ENABLED, &nvmeq->flags); 1781 mutex_unlock(&dev->shutdown_lock); 1782 return result; 1783 1784 release_sq: 1785 dev->online_queues--; 1786 mutex_unlock(&dev->shutdown_lock); 1787 adapter_delete_sq(dev, qid); 1788 release_cq: 1789 adapter_delete_cq(dev, qid); 1790 return result; 1791 } 1792 1793 static const struct blk_mq_ops nvme_mq_admin_ops = { 1794 .queue_rq = nvme_queue_rq, 1795 .complete = nvme_pci_complete_rq, 1796 .init_hctx = nvme_admin_init_hctx, 1797 .init_request = nvme_pci_init_request, 1798 .timeout = nvme_timeout, 1799 }; 1800 1801 static const struct blk_mq_ops nvme_mq_ops = { 1802 .queue_rq = nvme_queue_rq, 1803 .queue_rqs = nvme_queue_rqs, 1804 .complete = nvme_pci_complete_rq, 1805 .commit_rqs = nvme_commit_rqs, 1806 .init_hctx = nvme_init_hctx, 1807 .init_request = nvme_pci_init_request, 1808 .map_queues = nvme_pci_map_queues, 1809 .timeout = nvme_timeout, 1810 .poll = nvme_poll, 1811 }; 1812 1813 static void nvme_dev_remove_admin(struct nvme_dev *dev) 1814 { 1815 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 1816 /* 1817 * If the controller was reset during removal, it's possible 1818 * user requests may be waiting on a stopped queue. Start the 1819 * queue to flush these to completion. 1820 */ 1821 nvme_unquiesce_admin_queue(&dev->ctrl); 1822 nvme_remove_admin_tag_set(&dev->ctrl); 1823 } 1824 } 1825 1826 static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) 1827 { 1828 return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); 1829 } 1830 1831 static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) 1832 { 1833 struct pci_dev *pdev = to_pci_dev(dev->dev); 1834 1835 if (size <= dev->bar_mapped_size) 1836 return 0; 1837 if (size > pci_resource_len(pdev, 0)) 1838 return -ENOMEM; 1839 if (dev->bar) 1840 iounmap(dev->bar); 1841 dev->bar = ioremap(pci_resource_start(pdev, 0), size); 1842 if (!dev->bar) { 1843 dev->bar_mapped_size = 0; 1844 return -ENOMEM; 1845 } 1846 dev->bar_mapped_size = size; 1847 dev->dbs = dev->bar + NVME_REG_DBS; 1848 1849 return 0; 1850 } 1851 1852 static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) 1853 { 1854 int result; 1855 u32 aqa; 1856 struct nvme_queue *nvmeq; 1857 1858 result = nvme_remap_bar(dev, db_bar_size(dev, 0)); 1859 if (result < 0) 1860 return result; 1861 1862 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? 1863 NVME_CAP_NSSRC(dev->ctrl.cap) : 0; 1864 1865 if (dev->subsystem && 1866 (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) 1867 writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); 1868 1869 /* 1870 * If the device has been passed off to us in an enabled state, just 1871 * clear the enabled bit. The spec says we should set the 'shutdown 1872 * notification bits', but doing so may cause the device to complete 1873 * commands to the admin queue ... and we don't know what memory that 1874 * might be pointing at! 1875 */ 1876 result = nvme_disable_ctrl(&dev->ctrl, false); 1877 if (result < 0) 1878 return result; 1879 1880 result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); 1881 if (result) 1882 return result; 1883 1884 dev->ctrl.numa_node = dev_to_node(dev->dev); 1885 1886 nvmeq = &dev->queues[0]; 1887 aqa = nvmeq->q_depth - 1; 1888 aqa |= aqa << 16; 1889 1890 writel(aqa, dev->bar + NVME_REG_AQA); 1891 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); 1892 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); 1893 1894 result = nvme_enable_ctrl(&dev->ctrl); 1895 if (result) 1896 return result; 1897 1898 nvmeq->cq_vector = 0; 1899 nvme_init_queue(nvmeq, 0); 1900 result = queue_request_irq(nvmeq); 1901 if (result) { 1902 dev->online_queues--; 1903 return result; 1904 } 1905 1906 set_bit(NVMEQ_ENABLED, &nvmeq->flags); 1907 return result; 1908 } 1909 1910 static int nvme_create_io_queues(struct nvme_dev *dev) 1911 { 1912 unsigned i, max, rw_queues; 1913 int ret = 0; 1914 1915 for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { 1916 if (nvme_alloc_queue(dev, i, dev->q_depth)) { 1917 ret = -ENOMEM; 1918 break; 1919 } 1920 } 1921 1922 max = min(dev->max_qid, dev->ctrl.queue_count - 1); 1923 if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { 1924 rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + 1925 dev->io_queues[HCTX_TYPE_READ]; 1926 } else { 1927 rw_queues = max; 1928 } 1929 1930 for (i = dev->online_queues; i <= max; i++) { 1931 bool polled = i > rw_queues; 1932 1933 ret = nvme_create_queue(&dev->queues[i], i, polled); 1934 if (ret) 1935 break; 1936 } 1937 1938 /* 1939 * Ignore failing Create SQ/CQ commands, we can continue with less 1940 * than the desired amount of queues, and even a controller without 1941 * I/O queues can still be used to issue admin commands. This might 1942 * be useful to upgrade a buggy firmware for example. 1943 */ 1944 return ret >= 0 ? 0 : ret; 1945 } 1946 1947 static u64 nvme_cmb_size_unit(struct nvme_dev *dev) 1948 { 1949 u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; 1950 1951 return 1ULL << (12 + 4 * szu); 1952 } 1953 1954 static u32 nvme_cmb_size(struct nvme_dev *dev) 1955 { 1956 return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; 1957 } 1958 1959 static void nvme_map_cmb(struct nvme_dev *dev) 1960 { 1961 u64 size, offset; 1962 resource_size_t bar_size; 1963 struct pci_dev *pdev = to_pci_dev(dev->dev); 1964 int bar; 1965 1966 if (dev->cmb_size) 1967 return; 1968 1969 if (NVME_CAP_CMBS(dev->ctrl.cap)) 1970 writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); 1971 1972 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 1973 if (!dev->cmbsz) 1974 return; 1975 dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); 1976 1977 size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); 1978 offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); 1979 bar = NVME_CMB_BIR(dev->cmbloc); 1980 bar_size = pci_resource_len(pdev, bar); 1981 1982 if (offset > bar_size) 1983 return; 1984 1985 /* 1986 * Controllers may support a CMB size larger than their BAR, for 1987 * example, due to being behind a bridge. Reduce the CMB to the 1988 * reported size of the BAR 1989 */ 1990 size = min(size, bar_size - offset); 1991 1992 if (!IS_ALIGNED(size, memremap_compat_align()) || 1993 !IS_ALIGNED(pci_resource_start(pdev, bar), 1994 memremap_compat_align())) 1995 return; 1996 1997 /* 1998 * Tell the controller about the host side address mapping the CMB, 1999 * and enable CMB decoding for the NVMe 1.4+ scheme: 2000 */ 2001 if (NVME_CAP_CMBS(dev->ctrl.cap)) { 2002 hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE | 2003 (pci_bus_address(pdev, bar) + offset), 2004 dev->bar + NVME_REG_CMBMSC); 2005 } 2006 2007 if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { 2008 dev_warn(dev->ctrl.device, 2009 "failed to register the CMB\n"); 2010 hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC); 2011 return; 2012 } 2013 2014 dev->cmb_size = size; 2015 dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); 2016 2017 if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == 2018 (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) 2019 pci_p2pmem_publish(pdev, true); 2020 2021 nvme_update_attrs(dev); 2022 } 2023 2024 static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) 2025 { 2026 u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; 2027 u64 dma_addr = dev->host_mem_descs_dma; 2028 struct nvme_command c = { }; 2029 int ret; 2030 2031 c.features.opcode = nvme_admin_set_features; 2032 c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); 2033 c.features.dword11 = cpu_to_le32(bits); 2034 c.features.dword12 = cpu_to_le32(host_mem_size); 2035 c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); 2036 c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); 2037 c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); 2038 2039 ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 2040 if (ret) { 2041 dev_warn(dev->ctrl.device, 2042 "failed to set host mem (err %d, flags %#x).\n", 2043 ret, bits); 2044 } else 2045 dev->hmb = bits & NVME_HOST_MEM_ENABLE; 2046 2047 return ret; 2048 } 2049 2050 static void nvme_free_host_mem_multi(struct nvme_dev *dev) 2051 { 2052 int i; 2053 2054 for (i = 0; i < dev->nr_host_mem_descs; i++) { 2055 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; 2056 size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE; 2057 2058 dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], 2059 le64_to_cpu(desc->addr), 2060 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 2061 } 2062 2063 kfree(dev->host_mem_desc_bufs); 2064 dev->host_mem_desc_bufs = NULL; 2065 } 2066 2067 static void nvme_free_host_mem(struct nvme_dev *dev) 2068 { 2069 if (dev->hmb_sgt) 2070 dma_free_noncontiguous(dev->dev, dev->host_mem_size, 2071 dev->hmb_sgt, DMA_BIDIRECTIONAL); 2072 else 2073 nvme_free_host_mem_multi(dev); 2074 2075 dma_free_coherent(dev->dev, dev->host_mem_descs_size, 2076 dev->host_mem_descs, dev->host_mem_descs_dma); 2077 dev->host_mem_descs = NULL; 2078 dev->host_mem_descs_size = 0; 2079 dev->nr_host_mem_descs = 0; 2080 } 2081 2082 static int nvme_alloc_host_mem_single(struct nvme_dev *dev, u64 size) 2083 { 2084 dev->hmb_sgt = dma_alloc_noncontiguous(dev->dev, size, 2085 DMA_BIDIRECTIONAL, GFP_KERNEL, 0); 2086 if (!dev->hmb_sgt) 2087 return -ENOMEM; 2088 2089 dev->host_mem_descs = dma_alloc_coherent(dev->dev, 2090 sizeof(*dev->host_mem_descs), &dev->host_mem_descs_dma, 2091 GFP_KERNEL); 2092 if (!dev->host_mem_descs) { 2093 dma_free_noncontiguous(dev->dev, size, dev->hmb_sgt, 2094 DMA_BIDIRECTIONAL); 2095 dev->hmb_sgt = NULL; 2096 return -ENOMEM; 2097 } 2098 dev->host_mem_size = size; 2099 dev->host_mem_descs_size = sizeof(*dev->host_mem_descs); 2100 dev->nr_host_mem_descs = 1; 2101 2102 dev->host_mem_descs[0].addr = 2103 cpu_to_le64(dev->hmb_sgt->sgl->dma_address); 2104 dev->host_mem_descs[0].size = cpu_to_le32(size / NVME_CTRL_PAGE_SIZE); 2105 return 0; 2106 } 2107 2108 static int nvme_alloc_host_mem_multi(struct nvme_dev *dev, u64 preferred, 2109 u32 chunk_size) 2110 { 2111 struct nvme_host_mem_buf_desc *descs; 2112 u32 max_entries, len, descs_size; 2113 dma_addr_t descs_dma; 2114 int i = 0; 2115 void **bufs; 2116 u64 size, tmp; 2117 2118 tmp = (preferred + chunk_size - 1); 2119 do_div(tmp, chunk_size); 2120 max_entries = tmp; 2121 2122 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) 2123 max_entries = dev->ctrl.hmmaxd; 2124 2125 descs_size = max_entries * sizeof(*descs); 2126 descs = dma_alloc_coherent(dev->dev, descs_size, &descs_dma, 2127 GFP_KERNEL); 2128 if (!descs) 2129 goto out; 2130 2131 bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL); 2132 if (!bufs) 2133 goto out_free_descs; 2134 2135 for (size = 0; size < preferred && i < max_entries; size += len) { 2136 dma_addr_t dma_addr; 2137 2138 len = min_t(u64, chunk_size, preferred - size); 2139 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, 2140 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 2141 if (!bufs[i]) 2142 break; 2143 2144 descs[i].addr = cpu_to_le64(dma_addr); 2145 descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE); 2146 i++; 2147 } 2148 2149 if (!size) 2150 goto out_free_bufs; 2151 2152 dev->nr_host_mem_descs = i; 2153 dev->host_mem_size = size; 2154 dev->host_mem_descs = descs; 2155 dev->host_mem_descs_dma = descs_dma; 2156 dev->host_mem_descs_size = descs_size; 2157 dev->host_mem_desc_bufs = bufs; 2158 return 0; 2159 2160 out_free_bufs: 2161 kfree(bufs); 2162 out_free_descs: 2163 dma_free_coherent(dev->dev, descs_size, descs, descs_dma); 2164 out: 2165 dev->host_mem_descs = NULL; 2166 return -ENOMEM; 2167 } 2168 2169 static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) 2170 { 2171 unsigned long dma_merge_boundary = dma_get_merge_boundary(dev->dev); 2172 u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); 2173 u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); 2174 u64 chunk_size; 2175 2176 /* 2177 * If there is an IOMMU that can merge pages, try a virtually 2178 * non-contiguous allocation for a single segment first. 2179 */ 2180 if (dma_merge_boundary && (PAGE_SIZE & dma_merge_boundary) == 0) { 2181 if (!nvme_alloc_host_mem_single(dev, preferred)) 2182 return 0; 2183 } 2184 2185 /* start big and work our way down */ 2186 for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) { 2187 if (!nvme_alloc_host_mem_multi(dev, preferred, chunk_size)) { 2188 if (!min || dev->host_mem_size >= min) 2189 return 0; 2190 nvme_free_host_mem(dev); 2191 } 2192 } 2193 2194 return -ENOMEM; 2195 } 2196 2197 static int nvme_setup_host_mem(struct nvme_dev *dev) 2198 { 2199 u64 max = (u64)max_host_mem_size_mb * SZ_1M; 2200 u64 preferred = (u64)dev->ctrl.hmpre * 4096; 2201 u64 min = (u64)dev->ctrl.hmmin * 4096; 2202 u32 enable_bits = NVME_HOST_MEM_ENABLE; 2203 int ret; 2204 2205 if (!dev->ctrl.hmpre) 2206 return 0; 2207 2208 preferred = min(preferred, max); 2209 if (min > max) { 2210 dev_warn(dev->ctrl.device, 2211 "min host memory (%lld MiB) above limit (%d MiB).\n", 2212 min >> ilog2(SZ_1M), max_host_mem_size_mb); 2213 nvme_free_host_mem(dev); 2214 return 0; 2215 } 2216 2217 /* 2218 * If we already have a buffer allocated check if we can reuse it. 2219 */ 2220 if (dev->host_mem_descs) { 2221 if (dev->host_mem_size >= min) 2222 enable_bits |= NVME_HOST_MEM_RETURN; 2223 else 2224 nvme_free_host_mem(dev); 2225 } 2226 2227 if (!dev->host_mem_descs) { 2228 if (nvme_alloc_host_mem(dev, min, preferred)) { 2229 dev_warn(dev->ctrl.device, 2230 "failed to allocate host memory buffer.\n"); 2231 return 0; /* controller must work without HMB */ 2232 } 2233 2234 dev_info(dev->ctrl.device, 2235 "allocated %lld MiB host memory buffer (%u segment%s).\n", 2236 dev->host_mem_size >> ilog2(SZ_1M), 2237 dev->nr_host_mem_descs, 2238 str_plural(dev->nr_host_mem_descs)); 2239 } 2240 2241 ret = nvme_set_host_mem(dev, enable_bits); 2242 if (ret) 2243 nvme_free_host_mem(dev); 2244 return ret; 2245 } 2246 2247 static ssize_t cmb_show(struct device *dev, struct device_attribute *attr, 2248 char *buf) 2249 { 2250 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2251 2252 return sysfs_emit(buf, "cmbloc : x%08x\ncmbsz : x%08x\n", 2253 ndev->cmbloc, ndev->cmbsz); 2254 } 2255 static DEVICE_ATTR_RO(cmb); 2256 2257 static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr, 2258 char *buf) 2259 { 2260 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2261 2262 return sysfs_emit(buf, "%u\n", ndev->cmbloc); 2263 } 2264 static DEVICE_ATTR_RO(cmbloc); 2265 2266 static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr, 2267 char *buf) 2268 { 2269 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2270 2271 return sysfs_emit(buf, "%u\n", ndev->cmbsz); 2272 } 2273 static DEVICE_ATTR_RO(cmbsz); 2274 2275 static ssize_t hmb_show(struct device *dev, struct device_attribute *attr, 2276 char *buf) 2277 { 2278 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2279 2280 return sysfs_emit(buf, "%d\n", ndev->hmb); 2281 } 2282 2283 static ssize_t hmb_store(struct device *dev, struct device_attribute *attr, 2284 const char *buf, size_t count) 2285 { 2286 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2287 bool new; 2288 int ret; 2289 2290 if (kstrtobool(buf, &new) < 0) 2291 return -EINVAL; 2292 2293 if (new == ndev->hmb) 2294 return count; 2295 2296 if (new) { 2297 ret = nvme_setup_host_mem(ndev); 2298 } else { 2299 ret = nvme_set_host_mem(ndev, 0); 2300 if (!ret) 2301 nvme_free_host_mem(ndev); 2302 } 2303 2304 if (ret < 0) 2305 return ret; 2306 2307 return count; 2308 } 2309 static DEVICE_ATTR_RW(hmb); 2310 2311 static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj, 2312 struct attribute *a, int n) 2313 { 2314 struct nvme_ctrl *ctrl = 2315 dev_get_drvdata(container_of(kobj, struct device, kobj)); 2316 struct nvme_dev *dev = to_nvme_dev(ctrl); 2317 2318 if (a == &dev_attr_cmb.attr || 2319 a == &dev_attr_cmbloc.attr || 2320 a == &dev_attr_cmbsz.attr) { 2321 if (!dev->cmbsz) 2322 return 0; 2323 } 2324 if (a == &dev_attr_hmb.attr && !ctrl->hmpre) 2325 return 0; 2326 2327 return a->mode; 2328 } 2329 2330 static struct attribute *nvme_pci_attrs[] = { 2331 &dev_attr_cmb.attr, 2332 &dev_attr_cmbloc.attr, 2333 &dev_attr_cmbsz.attr, 2334 &dev_attr_hmb.attr, 2335 NULL, 2336 }; 2337 2338 static const struct attribute_group nvme_pci_dev_attrs_group = { 2339 .attrs = nvme_pci_attrs, 2340 .is_visible = nvme_pci_attrs_are_visible, 2341 }; 2342 2343 static const struct attribute_group *nvme_pci_dev_attr_groups[] = { 2344 &nvme_dev_attrs_group, 2345 &nvme_pci_dev_attrs_group, 2346 NULL, 2347 }; 2348 2349 static void nvme_update_attrs(struct nvme_dev *dev) 2350 { 2351 sysfs_update_group(&dev->ctrl.device->kobj, &nvme_pci_dev_attrs_group); 2352 } 2353 2354 /* 2355 * nirqs is the number of interrupts available for write and read 2356 * queues. The core already reserved an interrupt for the admin queue. 2357 */ 2358 static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) 2359 { 2360 struct nvme_dev *dev = affd->priv; 2361 unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; 2362 2363 /* 2364 * If there is no interrupt available for queues, ensure that 2365 * the default queue is set to 1. The affinity set size is 2366 * also set to one, but the irq core ignores it for this case. 2367 * 2368 * If only one interrupt is available or 'write_queue' == 0, combine 2369 * write and read queues. 2370 * 2371 * If 'write_queues' > 0, ensure it leaves room for at least one read 2372 * queue. 2373 */ 2374 if (!nrirqs) { 2375 nrirqs = 1; 2376 nr_read_queues = 0; 2377 } else if (nrirqs == 1 || !nr_write_queues) { 2378 nr_read_queues = 0; 2379 } else if (nr_write_queues >= nrirqs) { 2380 nr_read_queues = 1; 2381 } else { 2382 nr_read_queues = nrirqs - nr_write_queues; 2383 } 2384 2385 dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2386 affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2387 dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; 2388 affd->set_size[HCTX_TYPE_READ] = nr_read_queues; 2389 affd->nr_sets = nr_read_queues ? 2 : 1; 2390 } 2391 2392 static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) 2393 { 2394 struct pci_dev *pdev = to_pci_dev(dev->dev); 2395 struct irq_affinity affd = { 2396 .pre_vectors = 1, 2397 .calc_sets = nvme_calc_irq_sets, 2398 .priv = dev, 2399 }; 2400 unsigned int irq_queues, poll_queues; 2401 unsigned int flags = PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY; 2402 2403 /* 2404 * Poll queues don't need interrupts, but we need at least one I/O queue 2405 * left over for non-polled I/O. 2406 */ 2407 poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); 2408 dev->io_queues[HCTX_TYPE_POLL] = poll_queues; 2409 2410 /* 2411 * Initialize for the single interrupt case, will be updated in 2412 * nvme_calc_irq_sets(). 2413 */ 2414 dev->io_queues[HCTX_TYPE_DEFAULT] = 1; 2415 dev->io_queues[HCTX_TYPE_READ] = 0; 2416 2417 /* 2418 * We need interrupts for the admin queue and each non-polled I/O queue, 2419 * but some Apple controllers require all queues to use the first 2420 * vector. 2421 */ 2422 irq_queues = 1; 2423 if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) 2424 irq_queues += (nr_io_queues - poll_queues); 2425 if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI) 2426 flags &= ~PCI_IRQ_MSI; 2427 return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, flags, 2428 &affd); 2429 } 2430 2431 static unsigned int nvme_max_io_queues(struct nvme_dev *dev) 2432 { 2433 /* 2434 * If tags are shared with admin queue (Apple bug), then 2435 * make sure we only use one IO queue. 2436 */ 2437 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) 2438 return 1; 2439 return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; 2440 } 2441 2442 static int nvme_setup_io_queues(struct nvme_dev *dev) 2443 { 2444 struct nvme_queue *adminq = &dev->queues[0]; 2445 struct pci_dev *pdev = to_pci_dev(dev->dev); 2446 unsigned int nr_io_queues; 2447 unsigned long size; 2448 int result; 2449 2450 /* 2451 * Sample the module parameters once at reset time so that we have 2452 * stable values to work with. 2453 */ 2454 dev->nr_write_queues = write_queues; 2455 dev->nr_poll_queues = poll_queues; 2456 2457 nr_io_queues = dev->nr_allocated_queues - 1; 2458 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 2459 if (result < 0) 2460 return result; 2461 2462 if (nr_io_queues == 0) 2463 return 0; 2464 2465 /* 2466 * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions 2467 * from set to unset. If there is a window to it is truely freed, 2468 * pci_free_irq_vectors() jumping into this window will crash. 2469 * And take lock to avoid racing with pci_free_irq_vectors() in 2470 * nvme_dev_disable() path. 2471 */ 2472 result = nvme_setup_io_queues_trylock(dev); 2473 if (result) 2474 return result; 2475 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) 2476 pci_free_irq(pdev, 0, adminq); 2477 2478 if (dev->cmb_use_sqes) { 2479 result = nvme_cmb_qdepth(dev, nr_io_queues, 2480 sizeof(struct nvme_command)); 2481 if (result > 0) { 2482 dev->q_depth = result; 2483 dev->ctrl.sqsize = result - 1; 2484 } else { 2485 dev->cmb_use_sqes = false; 2486 } 2487 } 2488 2489 do { 2490 size = db_bar_size(dev, nr_io_queues); 2491 result = nvme_remap_bar(dev, size); 2492 if (!result) 2493 break; 2494 if (!--nr_io_queues) { 2495 result = -ENOMEM; 2496 goto out_unlock; 2497 } 2498 } while (1); 2499 adminq->q_db = dev->dbs; 2500 2501 retry: 2502 /* Deregister the admin queue's interrupt */ 2503 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) 2504 pci_free_irq(pdev, 0, adminq); 2505 2506 /* 2507 * If we enable msix early due to not intx, disable it again before 2508 * setting up the full range we need. 2509 */ 2510 pci_free_irq_vectors(pdev); 2511 2512 result = nvme_setup_irqs(dev, nr_io_queues); 2513 if (result <= 0) { 2514 result = -EIO; 2515 goto out_unlock; 2516 } 2517 2518 dev->num_vecs = result; 2519 result = max(result - 1, 1); 2520 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; 2521 2522 /* 2523 * Should investigate if there's a performance win from allocating 2524 * more queues than interrupt vectors; it might allow the submission 2525 * path to scale better, even if the receive path is limited by the 2526 * number of interrupts. 2527 */ 2528 result = queue_request_irq(adminq); 2529 if (result) 2530 goto out_unlock; 2531 set_bit(NVMEQ_ENABLED, &adminq->flags); 2532 mutex_unlock(&dev->shutdown_lock); 2533 2534 result = nvme_create_io_queues(dev); 2535 if (result || dev->online_queues < 2) 2536 return result; 2537 2538 if (dev->online_queues - 1 < dev->max_qid) { 2539 nr_io_queues = dev->online_queues - 1; 2540 nvme_delete_io_queues(dev); 2541 result = nvme_setup_io_queues_trylock(dev); 2542 if (result) 2543 return result; 2544 nvme_suspend_io_queues(dev); 2545 goto retry; 2546 } 2547 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", 2548 dev->io_queues[HCTX_TYPE_DEFAULT], 2549 dev->io_queues[HCTX_TYPE_READ], 2550 dev->io_queues[HCTX_TYPE_POLL]); 2551 return 0; 2552 out_unlock: 2553 mutex_unlock(&dev->shutdown_lock); 2554 return result; 2555 } 2556 2557 static enum rq_end_io_ret nvme_del_queue_end(struct request *req, 2558 blk_status_t error) 2559 { 2560 struct nvme_queue *nvmeq = req->end_io_data; 2561 2562 blk_mq_free_request(req); 2563 complete(&nvmeq->delete_done); 2564 return RQ_END_IO_NONE; 2565 } 2566 2567 static enum rq_end_io_ret nvme_del_cq_end(struct request *req, 2568 blk_status_t error) 2569 { 2570 struct nvme_queue *nvmeq = req->end_io_data; 2571 2572 if (error) 2573 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 2574 2575 return nvme_del_queue_end(req, error); 2576 } 2577 2578 static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) 2579 { 2580 struct request_queue *q = nvmeq->dev->ctrl.admin_q; 2581 struct request *req; 2582 struct nvme_command cmd = { }; 2583 2584 cmd.delete_queue.opcode = opcode; 2585 cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); 2586 2587 req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT); 2588 if (IS_ERR(req)) 2589 return PTR_ERR(req); 2590 nvme_init_request(req, &cmd); 2591 2592 if (opcode == nvme_admin_delete_cq) 2593 req->end_io = nvme_del_cq_end; 2594 else 2595 req->end_io = nvme_del_queue_end; 2596 req->end_io_data = nvmeq; 2597 2598 init_completion(&nvmeq->delete_done); 2599 blk_execute_rq_nowait(req, false); 2600 return 0; 2601 } 2602 2603 static bool __nvme_delete_io_queues(struct nvme_dev *dev, u8 opcode) 2604 { 2605 int nr_queues = dev->online_queues - 1, sent = 0; 2606 unsigned long timeout; 2607 2608 retry: 2609 timeout = NVME_ADMIN_TIMEOUT; 2610 while (nr_queues > 0) { 2611 if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) 2612 break; 2613 nr_queues--; 2614 sent++; 2615 } 2616 while (sent) { 2617 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; 2618 2619 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, 2620 timeout); 2621 if (timeout == 0) 2622 return false; 2623 2624 sent--; 2625 if (nr_queues) 2626 goto retry; 2627 } 2628 return true; 2629 } 2630 2631 static void nvme_delete_io_queues(struct nvme_dev *dev) 2632 { 2633 if (__nvme_delete_io_queues(dev, nvme_admin_delete_sq)) 2634 __nvme_delete_io_queues(dev, nvme_admin_delete_cq); 2635 } 2636 2637 static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev) 2638 { 2639 if (dev->io_queues[HCTX_TYPE_POLL]) 2640 return 3; 2641 if (dev->io_queues[HCTX_TYPE_READ]) 2642 return 2; 2643 return 1; 2644 } 2645 2646 static bool nvme_pci_update_nr_queues(struct nvme_dev *dev) 2647 { 2648 if (!dev->ctrl.tagset) { 2649 nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, 2650 nvme_pci_nr_maps(dev), sizeof(struct nvme_iod)); 2651 return true; 2652 } 2653 2654 /* Give up if we are racing with nvme_dev_disable() */ 2655 if (!mutex_trylock(&dev->shutdown_lock)) 2656 return false; 2657 2658 /* Check if nvme_dev_disable() has been executed already */ 2659 if (!dev->online_queues) { 2660 mutex_unlock(&dev->shutdown_lock); 2661 return false; 2662 } 2663 2664 blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); 2665 /* free previously allocated queues that are no longer usable */ 2666 nvme_free_queues(dev, dev->online_queues); 2667 mutex_unlock(&dev->shutdown_lock); 2668 return true; 2669 } 2670 2671 static int nvme_pci_enable(struct nvme_dev *dev) 2672 { 2673 int result = -ENOMEM; 2674 struct pci_dev *pdev = to_pci_dev(dev->dev); 2675 unsigned int flags = PCI_IRQ_ALL_TYPES; 2676 2677 if (pci_enable_device_mem(pdev)) 2678 return result; 2679 2680 pci_set_master(pdev); 2681 2682 if (readl(dev->bar + NVME_REG_CSTS) == -1) { 2683 result = -ENODEV; 2684 goto disable; 2685 } 2686 2687 /* 2688 * Some devices and/or platforms don't advertise or work with INTx 2689 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll 2690 * adjust this later. 2691 */ 2692 if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI) 2693 flags &= ~PCI_IRQ_MSI; 2694 result = pci_alloc_irq_vectors(pdev, 1, 1, flags); 2695 if (result < 0) 2696 goto disable; 2697 2698 dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 2699 2700 dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, 2701 io_queue_depth); 2702 dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); 2703 dev->dbs = dev->bar + 4096; 2704 2705 /* 2706 * Some Apple controllers require a non-standard SQE size. 2707 * Interestingly they also seem to ignore the CC:IOSQES register 2708 * so we don't bother updating it here. 2709 */ 2710 if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) 2711 dev->io_sqes = 7; 2712 else 2713 dev->io_sqes = NVME_NVM_IOSQES; 2714 2715 if (dev->ctrl.quirks & NVME_QUIRK_QDEPTH_ONE) { 2716 dev->q_depth = 2; 2717 } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && 2718 (pdev->device == 0xa821 || pdev->device == 0xa822) && 2719 NVME_CAP_MQES(dev->ctrl.cap) == 0) { 2720 dev->q_depth = 64; 2721 dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " 2722 "set queue depth=%u\n", dev->q_depth); 2723 } 2724 2725 /* 2726 * Controllers with the shared tags quirk need the IO queue to be 2727 * big enough so that we get 32 tags for the admin queue 2728 */ 2729 if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && 2730 (dev->q_depth < (NVME_AQ_DEPTH + 2))) { 2731 dev->q_depth = NVME_AQ_DEPTH + 2; 2732 dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", 2733 dev->q_depth); 2734 } 2735 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ 2736 2737 nvme_map_cmb(dev); 2738 2739 pci_save_state(pdev); 2740 2741 result = nvme_pci_configure_admin_queue(dev); 2742 if (result) 2743 goto free_irq; 2744 return result; 2745 2746 free_irq: 2747 pci_free_irq_vectors(pdev); 2748 disable: 2749 pci_disable_device(pdev); 2750 return result; 2751 } 2752 2753 static void nvme_dev_unmap(struct nvme_dev *dev) 2754 { 2755 if (dev->bar) 2756 iounmap(dev->bar); 2757 pci_release_mem_regions(to_pci_dev(dev->dev)); 2758 } 2759 2760 static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev) 2761 { 2762 struct pci_dev *pdev = to_pci_dev(dev->dev); 2763 u32 csts; 2764 2765 if (!pci_is_enabled(pdev) || !pci_device_is_present(pdev)) 2766 return true; 2767 if (pdev->error_state != pci_channel_io_normal) 2768 return true; 2769 2770 csts = readl(dev->bar + NVME_REG_CSTS); 2771 return (csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY); 2772 } 2773 2774 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 2775 { 2776 enum nvme_ctrl_state state = nvme_ctrl_state(&dev->ctrl); 2777 struct pci_dev *pdev = to_pci_dev(dev->dev); 2778 bool dead; 2779 2780 mutex_lock(&dev->shutdown_lock); 2781 dead = nvme_pci_ctrl_is_dead(dev); 2782 if (state == NVME_CTRL_LIVE || state == NVME_CTRL_RESETTING) { 2783 if (pci_is_enabled(pdev)) 2784 nvme_start_freeze(&dev->ctrl); 2785 /* 2786 * Give the controller a chance to complete all entered requests 2787 * if doing a safe shutdown. 2788 */ 2789 if (!dead && shutdown) 2790 nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); 2791 } 2792 2793 nvme_quiesce_io_queues(&dev->ctrl); 2794 2795 if (!dead && dev->ctrl.queue_count > 0) { 2796 nvme_delete_io_queues(dev); 2797 nvme_disable_ctrl(&dev->ctrl, shutdown); 2798 nvme_poll_irqdisable(&dev->queues[0]); 2799 } 2800 nvme_suspend_io_queues(dev); 2801 nvme_suspend_queue(dev, 0); 2802 pci_free_irq_vectors(pdev); 2803 if (pci_is_enabled(pdev)) 2804 pci_disable_device(pdev); 2805 nvme_reap_pending_cqes(dev); 2806 2807 nvme_cancel_tagset(&dev->ctrl); 2808 nvme_cancel_admin_tagset(&dev->ctrl); 2809 2810 /* 2811 * The driver will not be starting up queues again if shutting down so 2812 * must flush all entered requests to their failed completion to avoid 2813 * deadlocking blk-mq hot-cpu notifier. 2814 */ 2815 if (shutdown) { 2816 nvme_unquiesce_io_queues(&dev->ctrl); 2817 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) 2818 nvme_unquiesce_admin_queue(&dev->ctrl); 2819 } 2820 mutex_unlock(&dev->shutdown_lock); 2821 } 2822 2823 static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) 2824 { 2825 if (!nvme_wait_reset(&dev->ctrl)) 2826 return -EBUSY; 2827 nvme_dev_disable(dev, shutdown); 2828 return 0; 2829 } 2830 2831 static int nvme_setup_prp_pools(struct nvme_dev *dev) 2832 { 2833 size_t small_align = 256; 2834 2835 dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, 2836 NVME_CTRL_PAGE_SIZE, 2837 NVME_CTRL_PAGE_SIZE, 0); 2838 if (!dev->prp_page_pool) 2839 return -ENOMEM; 2840 2841 if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512) 2842 small_align = 512; 2843 2844 /* Optimisation for I/Os between 4k and 128k */ 2845 dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, 2846 256, small_align, 0); 2847 if (!dev->prp_small_pool) { 2848 dma_pool_destroy(dev->prp_page_pool); 2849 return -ENOMEM; 2850 } 2851 return 0; 2852 } 2853 2854 static void nvme_release_prp_pools(struct nvme_dev *dev) 2855 { 2856 dma_pool_destroy(dev->prp_page_pool); 2857 dma_pool_destroy(dev->prp_small_pool); 2858 } 2859 2860 static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) 2861 { 2862 size_t meta_size = sizeof(struct scatterlist) * (NVME_MAX_META_SEGS + 1); 2863 size_t alloc_size = sizeof(struct scatterlist) * NVME_MAX_SEGS; 2864 2865 dev->iod_mempool = mempool_create_node(1, 2866 mempool_kmalloc, mempool_kfree, 2867 (void *)alloc_size, GFP_KERNEL, 2868 dev_to_node(dev->dev)); 2869 if (!dev->iod_mempool) 2870 return -ENOMEM; 2871 2872 dev->iod_meta_mempool = mempool_create_node(1, 2873 mempool_kmalloc, mempool_kfree, 2874 (void *)meta_size, GFP_KERNEL, 2875 dev_to_node(dev->dev)); 2876 if (!dev->iod_meta_mempool) 2877 goto free; 2878 2879 return 0; 2880 free: 2881 mempool_destroy(dev->iod_mempool); 2882 return -ENOMEM; 2883 } 2884 2885 static void nvme_free_tagset(struct nvme_dev *dev) 2886 { 2887 if (dev->tagset.tags) 2888 nvme_remove_io_tag_set(&dev->ctrl); 2889 dev->ctrl.tagset = NULL; 2890 } 2891 2892 /* pairs with nvme_pci_alloc_dev */ 2893 static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) 2894 { 2895 struct nvme_dev *dev = to_nvme_dev(ctrl); 2896 2897 nvme_free_tagset(dev); 2898 put_device(dev->dev); 2899 kfree(dev->queues); 2900 kfree(dev); 2901 } 2902 2903 static void nvme_reset_work(struct work_struct *work) 2904 { 2905 struct nvme_dev *dev = 2906 container_of(work, struct nvme_dev, ctrl.reset_work); 2907 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 2908 int result; 2909 2910 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_RESETTING) { 2911 dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", 2912 dev->ctrl.state); 2913 result = -ENODEV; 2914 goto out; 2915 } 2916 2917 /* 2918 * If we're called to reset a live controller first shut it down before 2919 * moving on. 2920 */ 2921 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 2922 nvme_dev_disable(dev, false); 2923 nvme_sync_queues(&dev->ctrl); 2924 2925 mutex_lock(&dev->shutdown_lock); 2926 result = nvme_pci_enable(dev); 2927 if (result) 2928 goto out_unlock; 2929 nvme_unquiesce_admin_queue(&dev->ctrl); 2930 mutex_unlock(&dev->shutdown_lock); 2931 2932 /* 2933 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the 2934 * initializing procedure here. 2935 */ 2936 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 2937 dev_warn(dev->ctrl.device, 2938 "failed to mark controller CONNECTING\n"); 2939 result = -EBUSY; 2940 goto out; 2941 } 2942 2943 result = nvme_init_ctrl_finish(&dev->ctrl, was_suspend); 2944 if (result) 2945 goto out; 2946 2947 if (nvme_ctrl_meta_sgl_supported(&dev->ctrl)) 2948 dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS; 2949 else 2950 dev->ctrl.max_integrity_segments = 1; 2951 2952 nvme_dbbuf_dma_alloc(dev); 2953 2954 result = nvme_setup_host_mem(dev); 2955 if (result < 0) 2956 goto out; 2957 2958 result = nvme_setup_io_queues(dev); 2959 if (result) 2960 goto out; 2961 2962 /* 2963 * Freeze and update the number of I/O queues as thos might have 2964 * changed. If there are no I/O queues left after this reset, keep the 2965 * controller around but remove all namespaces. 2966 */ 2967 if (dev->online_queues > 1) { 2968 nvme_dbbuf_set(dev); 2969 nvme_unquiesce_io_queues(&dev->ctrl); 2970 nvme_wait_freeze(&dev->ctrl); 2971 if (!nvme_pci_update_nr_queues(dev)) 2972 goto out; 2973 nvme_unfreeze(&dev->ctrl); 2974 } else { 2975 dev_warn(dev->ctrl.device, "IO queues lost\n"); 2976 nvme_mark_namespaces_dead(&dev->ctrl); 2977 nvme_unquiesce_io_queues(&dev->ctrl); 2978 nvme_remove_namespaces(&dev->ctrl); 2979 nvme_free_tagset(dev); 2980 } 2981 2982 /* 2983 * If only admin queue live, keep it to do further investigation or 2984 * recovery. 2985 */ 2986 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { 2987 dev_warn(dev->ctrl.device, 2988 "failed to mark controller live state\n"); 2989 result = -ENODEV; 2990 goto out; 2991 } 2992 2993 nvme_start_ctrl(&dev->ctrl); 2994 return; 2995 2996 out_unlock: 2997 mutex_unlock(&dev->shutdown_lock); 2998 out: 2999 /* 3000 * Set state to deleting now to avoid blocking nvme_wait_reset(), which 3001 * may be holding this pci_dev's device lock. 3002 */ 3003 dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n", 3004 result); 3005 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 3006 nvme_dev_disable(dev, true); 3007 nvme_sync_queues(&dev->ctrl); 3008 nvme_mark_namespaces_dead(&dev->ctrl); 3009 nvme_unquiesce_io_queues(&dev->ctrl); 3010 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 3011 } 3012 3013 static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 3014 { 3015 *val = readl(to_nvme_dev(ctrl)->bar + off); 3016 return 0; 3017 } 3018 3019 static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) 3020 { 3021 writel(val, to_nvme_dev(ctrl)->bar + off); 3022 return 0; 3023 } 3024 3025 static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 3026 { 3027 *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); 3028 return 0; 3029 } 3030 3031 static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) 3032 { 3033 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); 3034 3035 return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); 3036 } 3037 3038 static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl) 3039 { 3040 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); 3041 struct nvme_subsystem *subsys = ctrl->subsys; 3042 3043 dev_err(ctrl->device, 3044 "VID:DID %04x:%04x model:%.*s firmware:%.*s\n", 3045 pdev->vendor, pdev->device, 3046 nvme_strlen(subsys->model, sizeof(subsys->model)), 3047 subsys->model, nvme_strlen(subsys->firmware_rev, 3048 sizeof(subsys->firmware_rev)), 3049 subsys->firmware_rev); 3050 } 3051 3052 static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl) 3053 { 3054 struct nvme_dev *dev = to_nvme_dev(ctrl); 3055 3056 return dma_pci_p2pdma_supported(dev->dev); 3057 } 3058 3059 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 3060 .name = "pcie", 3061 .module = THIS_MODULE, 3062 .flags = NVME_F_METADATA_SUPPORTED, 3063 .dev_attr_groups = nvme_pci_dev_attr_groups, 3064 .reg_read32 = nvme_pci_reg_read32, 3065 .reg_write32 = nvme_pci_reg_write32, 3066 .reg_read64 = nvme_pci_reg_read64, 3067 .free_ctrl = nvme_pci_free_ctrl, 3068 .submit_async_event = nvme_pci_submit_async_event, 3069 .subsystem_reset = nvme_pci_subsystem_reset, 3070 .get_address = nvme_pci_get_address, 3071 .print_device_info = nvme_pci_print_device_info, 3072 .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma, 3073 }; 3074 3075 static int nvme_dev_map(struct nvme_dev *dev) 3076 { 3077 struct pci_dev *pdev = to_pci_dev(dev->dev); 3078 3079 if (pci_request_mem_regions(pdev, "nvme")) 3080 return -ENODEV; 3081 3082 if (nvme_remap_bar(dev, NVME_REG_DBS + 4096)) 3083 goto release; 3084 3085 return 0; 3086 release: 3087 pci_release_mem_regions(pdev); 3088 return -ENODEV; 3089 } 3090 3091 static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) 3092 { 3093 if (pdev->vendor == 0x144d && pdev->device == 0xa802) { 3094 /* 3095 * Several Samsung devices seem to drop off the PCIe bus 3096 * randomly when APST is on and uses the deepest sleep state. 3097 * This has been observed on a Samsung "SM951 NVMe SAMSUNG 3098 * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD 3099 * 950 PRO 256GB", but it seems to be restricted to two Dell 3100 * laptops. 3101 */ 3102 if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && 3103 (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || 3104 dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) 3105 return NVME_QUIRK_NO_DEEPEST_PS; 3106 } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { 3107 /* 3108 * Samsung SSD 960 EVO drops off the PCIe bus after system 3109 * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as 3110 * within few minutes after bootup on a Coffee Lake board - 3111 * ASUS PRIME Z370-A 3112 */ 3113 if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && 3114 (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || 3115 dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) 3116 return NVME_QUIRK_NO_APST; 3117 } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || 3118 pdev->device == 0xa808 || pdev->device == 0xa809)) || 3119 (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { 3120 /* 3121 * Forcing to use host managed nvme power settings for 3122 * lowest idle power with quick resume latency on 3123 * Samsung and Toshiba SSDs based on suspend behavior 3124 * on Coffee Lake board for LENOVO C640 3125 */ 3126 if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) && 3127 dmi_match(DMI_BOARD_NAME, "LNVNB161216")) 3128 return NVME_QUIRK_SIMPLE_SUSPEND; 3129 } else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 || 3130 pdev->device == 0x500f)) { 3131 /* 3132 * Exclude some Kingston NV1 and A2000 devices from 3133 * NVME_QUIRK_SIMPLE_SUSPEND. Do a full suspend to save a 3134 * lot fo energy with s2idle sleep on some TUXEDO platforms. 3135 */ 3136 if (dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") || 3137 dmi_match(DMI_BOARD_NAME, "NS5x_7xAU") || 3138 dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") || 3139 dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1")) 3140 return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND; 3141 } else if (pdev->vendor == 0x144d && pdev->device == 0xa80d) { 3142 /* 3143 * Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND 3144 * because of high power consumption (> 2 Watt) in s2idle 3145 * sleep. Only some boards with Intel CPU are affected. 3146 */ 3147 if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") || 3148 dmi_match(DMI_BOARD_NAME, "GMxPXxx") || 3149 dmi_match(DMI_BOARD_NAME, "GXxMRXx") || 3150 dmi_match(DMI_BOARD_NAME, "PH4PG31") || 3151 dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") || 3152 dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71")) 3153 return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND; 3154 } 3155 3156 /* 3157 * NVMe SSD drops off the PCIe bus after system idle 3158 * for 10 hours on a Lenovo N60z board. 3159 */ 3160 if (dmi_match(DMI_BOARD_NAME, "LXKT-ZXEG-N6")) 3161 return NVME_QUIRK_NO_APST; 3162 3163 return 0; 3164 } 3165 3166 static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, 3167 const struct pci_device_id *id) 3168 { 3169 unsigned long quirks = id->driver_data; 3170 int node = dev_to_node(&pdev->dev); 3171 struct nvme_dev *dev; 3172 int ret = -ENOMEM; 3173 3174 dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); 3175 if (!dev) 3176 return ERR_PTR(-ENOMEM); 3177 INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); 3178 mutex_init(&dev->shutdown_lock); 3179 3180 dev->nr_write_queues = write_queues; 3181 dev->nr_poll_queues = poll_queues; 3182 dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; 3183 dev->queues = kcalloc_node(dev->nr_allocated_queues, 3184 sizeof(struct nvme_queue), GFP_KERNEL, node); 3185 if (!dev->queues) 3186 goto out_free_dev; 3187 3188 dev->dev = get_device(&pdev->dev); 3189 3190 quirks |= check_vendor_combination_bug(pdev); 3191 if (!noacpi && 3192 !(quirks & NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND) && 3193 acpi_storage_d3(&pdev->dev)) { 3194 /* 3195 * Some systems use a bios work around to ask for D3 on 3196 * platforms that support kernel managed suspend. 3197 */ 3198 dev_info(&pdev->dev, 3199 "platform quirk: setting simple suspend\n"); 3200 quirks |= NVME_QUIRK_SIMPLE_SUSPEND; 3201 } 3202 ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 3203 quirks); 3204 if (ret) 3205 goto out_put_device; 3206 3207 if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) 3208 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); 3209 else 3210 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3211 dma_set_min_align_mask(&pdev->dev, NVME_CTRL_PAGE_SIZE - 1); 3212 dma_set_max_seg_size(&pdev->dev, 0xffffffff); 3213 3214 /* 3215 * Limit the max command size to prevent iod->sg allocations going 3216 * over a single page. 3217 */ 3218 dev->ctrl.max_hw_sectors = min_t(u32, 3219 NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9); 3220 dev->ctrl.max_segments = NVME_MAX_SEGS; 3221 dev->ctrl.max_integrity_segments = 1; 3222 return dev; 3223 3224 out_put_device: 3225 put_device(dev->dev); 3226 kfree(dev->queues); 3227 out_free_dev: 3228 kfree(dev); 3229 return ERR_PTR(ret); 3230 } 3231 3232 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3233 { 3234 struct nvme_dev *dev; 3235 int result = -ENOMEM; 3236 3237 dev = nvme_pci_alloc_dev(pdev, id); 3238 if (IS_ERR(dev)) 3239 return PTR_ERR(dev); 3240 3241 result = nvme_add_ctrl(&dev->ctrl); 3242 if (result) 3243 goto out_put_ctrl; 3244 3245 result = nvme_dev_map(dev); 3246 if (result) 3247 goto out_uninit_ctrl; 3248 3249 result = nvme_setup_prp_pools(dev); 3250 if (result) 3251 goto out_dev_unmap; 3252 3253 result = nvme_pci_alloc_iod_mempool(dev); 3254 if (result) 3255 goto out_release_prp_pools; 3256 3257 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 3258 3259 result = nvme_pci_enable(dev); 3260 if (result) 3261 goto out_release_iod_mempool; 3262 3263 result = nvme_alloc_admin_tag_set(&dev->ctrl, &dev->admin_tagset, 3264 &nvme_mq_admin_ops, sizeof(struct nvme_iod)); 3265 if (result) 3266 goto out_disable; 3267 3268 /* 3269 * Mark the controller as connecting before sending admin commands to 3270 * allow the timeout handler to do the right thing. 3271 */ 3272 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 3273 dev_warn(dev->ctrl.device, 3274 "failed to mark controller CONNECTING\n"); 3275 result = -EBUSY; 3276 goto out_disable; 3277 } 3278 3279 result = nvme_init_ctrl_finish(&dev->ctrl, false); 3280 if (result) 3281 goto out_disable; 3282 3283 if (nvme_ctrl_meta_sgl_supported(&dev->ctrl)) 3284 dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS; 3285 else 3286 dev->ctrl.max_integrity_segments = 1; 3287 3288 nvme_dbbuf_dma_alloc(dev); 3289 3290 result = nvme_setup_host_mem(dev); 3291 if (result < 0) 3292 goto out_disable; 3293 3294 result = nvme_setup_io_queues(dev); 3295 if (result) 3296 goto out_disable; 3297 3298 if (dev->online_queues > 1) { 3299 nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, 3300 nvme_pci_nr_maps(dev), sizeof(struct nvme_iod)); 3301 nvme_dbbuf_set(dev); 3302 } 3303 3304 if (!dev->ctrl.tagset) 3305 dev_warn(dev->ctrl.device, "IO queues not created\n"); 3306 3307 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { 3308 dev_warn(dev->ctrl.device, 3309 "failed to mark controller live state\n"); 3310 result = -ENODEV; 3311 goto out_disable; 3312 } 3313 3314 pci_set_drvdata(pdev, dev); 3315 3316 nvme_start_ctrl(&dev->ctrl); 3317 nvme_put_ctrl(&dev->ctrl); 3318 flush_work(&dev->ctrl.scan_work); 3319 return 0; 3320 3321 out_disable: 3322 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 3323 nvme_dev_disable(dev, true); 3324 nvme_free_host_mem(dev); 3325 nvme_dev_remove_admin(dev); 3326 nvme_dbbuf_dma_free(dev); 3327 nvme_free_queues(dev, 0); 3328 out_release_iod_mempool: 3329 mempool_destroy(dev->iod_mempool); 3330 mempool_destroy(dev->iod_meta_mempool); 3331 out_release_prp_pools: 3332 nvme_release_prp_pools(dev); 3333 out_dev_unmap: 3334 nvme_dev_unmap(dev); 3335 out_uninit_ctrl: 3336 nvme_uninit_ctrl(&dev->ctrl); 3337 out_put_ctrl: 3338 nvme_put_ctrl(&dev->ctrl); 3339 return result; 3340 } 3341 3342 static void nvme_reset_prepare(struct pci_dev *pdev) 3343 { 3344 struct nvme_dev *dev = pci_get_drvdata(pdev); 3345 3346 /* 3347 * We don't need to check the return value from waiting for the reset 3348 * state as pci_dev device lock is held, making it impossible to race 3349 * with ->remove(). 3350 */ 3351 nvme_disable_prepare_reset(dev, false); 3352 nvme_sync_queues(&dev->ctrl); 3353 } 3354 3355 static void nvme_reset_done(struct pci_dev *pdev) 3356 { 3357 struct nvme_dev *dev = pci_get_drvdata(pdev); 3358 3359 if (!nvme_try_sched_reset(&dev->ctrl)) 3360 flush_work(&dev->ctrl.reset_work); 3361 } 3362 3363 static void nvme_shutdown(struct pci_dev *pdev) 3364 { 3365 struct nvme_dev *dev = pci_get_drvdata(pdev); 3366 3367 nvme_disable_prepare_reset(dev, true); 3368 } 3369 3370 /* 3371 * The driver's remove may be called on a device in a partially initialized 3372 * state. This function must not have any dependencies on the device state in 3373 * order to proceed. 3374 */ 3375 static void nvme_remove(struct pci_dev *pdev) 3376 { 3377 struct nvme_dev *dev = pci_get_drvdata(pdev); 3378 3379 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 3380 pci_set_drvdata(pdev, NULL); 3381 3382 if (!pci_device_is_present(pdev)) { 3383 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 3384 nvme_dev_disable(dev, true); 3385 } 3386 3387 flush_work(&dev->ctrl.reset_work); 3388 nvme_stop_ctrl(&dev->ctrl); 3389 nvme_remove_namespaces(&dev->ctrl); 3390 nvme_dev_disable(dev, true); 3391 nvme_free_host_mem(dev); 3392 nvme_dev_remove_admin(dev); 3393 nvme_dbbuf_dma_free(dev); 3394 nvme_free_queues(dev, 0); 3395 mempool_destroy(dev->iod_mempool); 3396 mempool_destroy(dev->iod_meta_mempool); 3397 nvme_release_prp_pools(dev); 3398 nvme_dev_unmap(dev); 3399 nvme_uninit_ctrl(&dev->ctrl); 3400 } 3401 3402 #ifdef CONFIG_PM_SLEEP 3403 static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps) 3404 { 3405 return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps); 3406 } 3407 3408 static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps) 3409 { 3410 return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL); 3411 } 3412 3413 static int nvme_resume(struct device *dev) 3414 { 3415 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 3416 struct nvme_ctrl *ctrl = &ndev->ctrl; 3417 3418 if (ndev->last_ps == U32_MAX || 3419 nvme_set_power_state(ctrl, ndev->last_ps) != 0) 3420 goto reset; 3421 if (ctrl->hmpre && nvme_setup_host_mem(ndev)) 3422 goto reset; 3423 3424 return 0; 3425 reset: 3426 return nvme_try_sched_reset(ctrl); 3427 } 3428 3429 static int nvme_suspend(struct device *dev) 3430 { 3431 struct pci_dev *pdev = to_pci_dev(dev); 3432 struct nvme_dev *ndev = pci_get_drvdata(pdev); 3433 struct nvme_ctrl *ctrl = &ndev->ctrl; 3434 int ret = -EBUSY; 3435 3436 ndev->last_ps = U32_MAX; 3437 3438 /* 3439 * The platform does not remove power for a kernel managed suspend so 3440 * use host managed nvme power settings for lowest idle power if 3441 * possible. This should have quicker resume latency than a full device 3442 * shutdown. But if the firmware is involved after the suspend or the 3443 * device does not support any non-default power states, shut down the 3444 * device fully. 3445 * 3446 * If ASPM is not enabled for the device, shut down the device and allow 3447 * the PCI bus layer to put it into D3 in order to take the PCIe link 3448 * down, so as to allow the platform to achieve its minimum low-power 3449 * state (which may not be possible if the link is up). 3450 */ 3451 if (pm_suspend_via_firmware() || !ctrl->npss || 3452 !pcie_aspm_enabled(pdev) || 3453 (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) 3454 return nvme_disable_prepare_reset(ndev, true); 3455 3456 nvme_start_freeze(ctrl); 3457 nvme_wait_freeze(ctrl); 3458 nvme_sync_queues(ctrl); 3459 3460 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) 3461 goto unfreeze; 3462 3463 /* 3464 * Host memory access may not be successful in a system suspend state, 3465 * but the specification allows the controller to access memory in a 3466 * non-operational power state. 3467 */ 3468 if (ndev->hmb) { 3469 ret = nvme_set_host_mem(ndev, 0); 3470 if (ret < 0) 3471 goto unfreeze; 3472 } 3473 3474 ret = nvme_get_power_state(ctrl, &ndev->last_ps); 3475 if (ret < 0) 3476 goto unfreeze; 3477 3478 /* 3479 * A saved state prevents pci pm from generically controlling the 3480 * device's power. If we're using protocol specific settings, we don't 3481 * want pci interfering. 3482 */ 3483 pci_save_state(pdev); 3484 3485 ret = nvme_set_power_state(ctrl, ctrl->npss); 3486 if (ret < 0) 3487 goto unfreeze; 3488 3489 if (ret) { 3490 /* discard the saved state */ 3491 pci_load_saved_state(pdev, NULL); 3492 3493 /* 3494 * Clearing npss forces a controller reset on resume. The 3495 * correct value will be rediscovered then. 3496 */ 3497 ret = nvme_disable_prepare_reset(ndev, true); 3498 ctrl->npss = 0; 3499 } 3500 unfreeze: 3501 nvme_unfreeze(ctrl); 3502 return ret; 3503 } 3504 3505 static int nvme_simple_suspend(struct device *dev) 3506 { 3507 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 3508 3509 return nvme_disable_prepare_reset(ndev, true); 3510 } 3511 3512 static int nvme_simple_resume(struct device *dev) 3513 { 3514 struct pci_dev *pdev = to_pci_dev(dev); 3515 struct nvme_dev *ndev = pci_get_drvdata(pdev); 3516 3517 return nvme_try_sched_reset(&ndev->ctrl); 3518 } 3519 3520 static const struct dev_pm_ops nvme_dev_pm_ops = { 3521 .suspend = nvme_suspend, 3522 .resume = nvme_resume, 3523 .freeze = nvme_simple_suspend, 3524 .thaw = nvme_simple_resume, 3525 .poweroff = nvme_simple_suspend, 3526 .restore = nvme_simple_resume, 3527 }; 3528 #endif /* CONFIG_PM_SLEEP */ 3529 3530 static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, 3531 pci_channel_state_t state) 3532 { 3533 struct nvme_dev *dev = pci_get_drvdata(pdev); 3534 3535 /* 3536 * A frozen channel requires a reset. When detected, this method will 3537 * shutdown the controller to quiesce. The controller will be restarted 3538 * after the slot reset through driver's slot_reset callback. 3539 */ 3540 switch (state) { 3541 case pci_channel_io_normal: 3542 return PCI_ERS_RESULT_CAN_RECOVER; 3543 case pci_channel_io_frozen: 3544 dev_warn(dev->ctrl.device, 3545 "frozen state error detected, reset controller\n"); 3546 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) { 3547 nvme_dev_disable(dev, true); 3548 return PCI_ERS_RESULT_DISCONNECT; 3549 } 3550 nvme_dev_disable(dev, false); 3551 return PCI_ERS_RESULT_NEED_RESET; 3552 case pci_channel_io_perm_failure: 3553 dev_warn(dev->ctrl.device, 3554 "failure state error detected, request disconnect\n"); 3555 return PCI_ERS_RESULT_DISCONNECT; 3556 } 3557 return PCI_ERS_RESULT_NEED_RESET; 3558 } 3559 3560 static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) 3561 { 3562 struct nvme_dev *dev = pci_get_drvdata(pdev); 3563 3564 dev_info(dev->ctrl.device, "restart after slot reset\n"); 3565 pci_restore_state(pdev); 3566 if (!nvme_try_sched_reset(&dev->ctrl)) 3567 nvme_unquiesce_io_queues(&dev->ctrl); 3568 return PCI_ERS_RESULT_RECOVERED; 3569 } 3570 3571 static void nvme_error_resume(struct pci_dev *pdev) 3572 { 3573 struct nvme_dev *dev = pci_get_drvdata(pdev); 3574 3575 flush_work(&dev->ctrl.reset_work); 3576 } 3577 3578 static const struct pci_error_handlers nvme_err_handler = { 3579 .error_detected = nvme_error_detected, 3580 .slot_reset = nvme_slot_reset, 3581 .resume = nvme_error_resume, 3582 .reset_prepare = nvme_reset_prepare, 3583 .reset_done = nvme_reset_done, 3584 }; 3585 3586 static const struct pci_device_id nvme_id_table[] = { 3587 { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */ 3588 .driver_data = NVME_QUIRK_STRIPE_SIZE | 3589 NVME_QUIRK_DEALLOCATE_ZEROES, }, 3590 { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */ 3591 .driver_data = NVME_QUIRK_STRIPE_SIZE | 3592 NVME_QUIRK_DEALLOCATE_ZEROES, }, 3593 { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */ 3594 .driver_data = NVME_QUIRK_STRIPE_SIZE | 3595 NVME_QUIRK_IGNORE_DEV_SUBNQN | 3596 NVME_QUIRK_BOGUS_NID, }, 3597 { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */ 3598 .driver_data = NVME_QUIRK_STRIPE_SIZE, }, 3599 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ 3600 .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 3601 NVME_QUIRK_MEDIUM_PRIO_SQ | 3602 NVME_QUIRK_NO_TEMP_THRESH_CHANGE | 3603 NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3604 { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ 3605 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3606 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 3607 .driver_data = NVME_QUIRK_IDENTIFY_CNS | 3608 NVME_QUIRK_DISABLE_WRITE_ZEROES | 3609 NVME_QUIRK_BOGUS_NID, }, 3610 { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ 3611 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3612 { PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */ 3613 .driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, }, 3614 { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */ 3615 .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 3616 NVME_QUIRK_BOGUS_NID, }, 3617 { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ 3618 .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | 3619 NVME_QUIRK_BOGUS_NID, }, 3620 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ 3621 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 3622 NVME_QUIRK_NO_NS_DESC_LIST, }, 3623 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 3624 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3625 { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ 3626 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3627 { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ 3628 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3629 { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ 3630 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3631 { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ 3632 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 3633 NVME_QUIRK_DISABLE_WRITE_ZEROES| 3634 NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3635 { PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */ 3636 .driver_data = NVME_QUIRK_BROKEN_MSI }, 3637 { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ 3638 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3639 { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ 3640 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | 3641 NVME_QUIRK_BOGUS_NID, }, 3642 { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */ 3643 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3644 { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */ 3645 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3646 { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ 3647 .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | 3648 NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3649 { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */ 3650 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3651 { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ 3652 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | 3653 NVME_QUIRK_BOGUS_NID, }, 3654 { PCI_DEVICE(0x10ec, 0x5763), /* ADATA SX6000PNP */ 3655 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3656 { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ 3657 .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 3658 NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3659 { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */ 3660 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN }, 3661 { PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */ 3662 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3663 { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ 3664 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3665 { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */ 3666 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3667 { PCI_DEVICE(0x1c5c, 0x1D59), /* SK Hynix BC901 */ 3668 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3669 { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ 3670 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3671 { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ 3672 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3673 { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */ 3674 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES | 3675 NVME_QUIRK_BOGUS_NID, }, 3676 { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */ 3677 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3678 { PCI_DEVICE(0x144d, 0xa802), /* Samsung SM953 */ 3679 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3680 { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */ 3681 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3682 { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */ 3683 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3684 { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */ 3685 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3686 { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ 3687 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3688 { PCI_DEVICE(0x2646, 0x5013), /* Kingston KC3000, Kingston FURY Renegade */ 3689 .driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, }, 3690 { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */ 3691 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3692 { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */ 3693 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3694 { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */ 3695 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3696 { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */ 3697 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3698 { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */ 3699 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3700 { PCI_DEVICE(0x1f40, 0x1202), /* Netac Technologies Co. NV3000 NVMe SSD */ 3701 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3702 { PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */ 3703 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3704 { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */ 3705 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3706 { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */ 3707 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3708 { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ 3709 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3710 { PCI_DEVICE(0x1e4B, 0x1602), /* MAXIO MAP1602 */ 3711 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3712 { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ 3713 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3714 { PCI_DEVICE(0x1dbe, 0x5216), /* Acer/INNOGRIT FA100/5216 NVMe SSD */ 3715 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3716 { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ 3717 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3718 { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */ 3719 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3720 { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ 3721 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3722 { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ 3723 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3724 { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ 3725 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3726 { PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */ 3727 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3728 { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */ 3729 .driver_data = NVME_QUIRK_BOGUS_NID | 3730 NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3731 { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */ 3732 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3733 { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */ 3734 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3735 { PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */ 3736 .driver_data = NVME_QUIRK_BOGUS_NID, }, 3737 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), 3738 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3739 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), 3740 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3741 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061), 3742 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3743 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00), 3744 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3745 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01), 3746 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3747 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02), 3748 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 3749 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), 3750 /* 3751 * Fix for the Apple controller found in the MacBook8,1 and 3752 * some MacBook7,1 to avoid controller resets and data loss. 3753 */ 3754 .driver_data = NVME_QUIRK_SINGLE_VECTOR | 3755 NVME_QUIRK_QDEPTH_ONE }, 3756 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, 3757 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), 3758 .driver_data = NVME_QUIRK_SINGLE_VECTOR | 3759 NVME_QUIRK_128_BYTES_SQES | 3760 NVME_QUIRK_SHARED_TAGS | 3761 NVME_QUIRK_SKIP_CID_GEN | 3762 NVME_QUIRK_IDENTIFY_CNS }, 3763 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 3764 { 0, } 3765 }; 3766 MODULE_DEVICE_TABLE(pci, nvme_id_table); 3767 3768 static struct pci_driver nvme_driver = { 3769 .name = "nvme", 3770 .id_table = nvme_id_table, 3771 .probe = nvme_probe, 3772 .remove = nvme_remove, 3773 .shutdown = nvme_shutdown, 3774 .driver = { 3775 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 3776 #ifdef CONFIG_PM_SLEEP 3777 .pm = &nvme_dev_pm_ops, 3778 #endif 3779 }, 3780 .sriov_configure = pci_sriov_configure_simple, 3781 .err_handler = &nvme_err_handler, 3782 }; 3783 3784 static int __init nvme_init(void) 3785 { 3786 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 3787 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 3788 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 3789 BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); 3790 BUILD_BUG_ON(NVME_MAX_SEGS > SGES_PER_PAGE); 3791 BUILD_BUG_ON(sizeof(struct scatterlist) * NVME_MAX_SEGS > PAGE_SIZE); 3792 BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_ALLOCATIONS); 3793 3794 return pci_register_driver(&nvme_driver); 3795 } 3796 3797 static void __exit nvme_exit(void) 3798 { 3799 pci_unregister_driver(&nvme_driver); 3800 flush_workqueue(nvme_wq); 3801 } 3802 3803 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 3804 MODULE_LICENSE("GPL"); 3805 MODULE_VERSION("1.0"); 3806 MODULE_DESCRIPTION("NVMe host PCIe transport driver"); 3807 module_init(nvme_init); 3808 module_exit(nvme_exit); 3809