Lines Matching +full:blk +full:- +full:ctrl
1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2011-2014, Intel Corporation.
10 #include <linux/blk-mq-dma.h>
11 #include <linux/blk-integrity.h>
25 #include <linux/t10-pi.h>
27 #include <linux/io-64-nonatomic-lo-hi.h>
28 #include <linux/io-64-nonatomic-hi-lo.h>
29 #include <linux/sed-opal.h>
34 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
35 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
58 ((NVME_SMALL_POOL_SIZE / sizeof(struct nvme_sgl_desc)) - 1)
64 (((NVME_CTRL_PAGE_SIZE / sizeof(__le64))) - 1)
67 * I/O could be non-aligned both at the beginning and end.
70 (NVME_MAX_BYTES + 2 * (NVME_CTRL_PAGE_SIZE - 1))
112 return -EINVAL; in io_queue_count_set()
171 struct nvme_ctrl ctrl; member
212 static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) in to_nvme_dev() argument
214 return container_of(ctrl, struct nvme_dev, ctrl); in to_nvme_dev()
252 /* bits for iod->flags */
263 /* Metadata using non-coalesced MPTR */
295 return dev->nr_allocated_queues * 8 * dev->db_stride; in nvme_dbbuf_size()
302 if (!(dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP)) in nvme_dbbuf_dma_alloc()
305 if (dev->dbbuf_dbs) { in nvme_dbbuf_dma_alloc()
310 memset(dev->dbbuf_dbs, 0, mem_size); in nvme_dbbuf_dma_alloc()
311 memset(dev->dbbuf_eis, 0, mem_size); in nvme_dbbuf_dma_alloc()
315 dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_alloc()
316 &dev->dbbuf_dbs_dma_addr, in nvme_dbbuf_dma_alloc()
318 if (!dev->dbbuf_dbs) in nvme_dbbuf_dma_alloc()
320 dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_alloc()
321 &dev->dbbuf_eis_dma_addr, in nvme_dbbuf_dma_alloc()
323 if (!dev->dbbuf_eis) in nvme_dbbuf_dma_alloc()
328 dma_free_coherent(dev->dev, mem_size, dev->dbbuf_dbs, in nvme_dbbuf_dma_alloc()
329 dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_dma_alloc()
330 dev->dbbuf_dbs = NULL; in nvme_dbbuf_dma_alloc()
332 dev_warn(dev->dev, "unable to allocate dma for dbbuf\n"); in nvme_dbbuf_dma_alloc()
339 if (dev->dbbuf_dbs) { in nvme_dbbuf_dma_free()
340 dma_free_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_free()
341 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_dma_free()
342 dev->dbbuf_dbs = NULL; in nvme_dbbuf_dma_free()
344 if (dev->dbbuf_eis) { in nvme_dbbuf_dma_free()
345 dma_free_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_free()
346 dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); in nvme_dbbuf_dma_free()
347 dev->dbbuf_eis = NULL; in nvme_dbbuf_dma_free()
354 if (!dev->dbbuf_dbs || !qid) in nvme_dbbuf_init()
357 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
358 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
359 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
360 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
365 if (!nvmeq->qid) in nvme_dbbuf_free()
368 nvmeq->dbbuf_sq_db = NULL; in nvme_dbbuf_free()
369 nvmeq->dbbuf_cq_db = NULL; in nvme_dbbuf_free()
370 nvmeq->dbbuf_sq_ei = NULL; in nvme_dbbuf_free()
371 nvmeq->dbbuf_cq_ei = NULL; in nvme_dbbuf_free()
379 if (!dev->dbbuf_dbs) in nvme_dbbuf_set()
383 c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_set()
384 c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); in nvme_dbbuf_set()
386 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { in nvme_dbbuf_set()
387 dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); in nvme_dbbuf_set()
391 for (i = 1; i <= dev->online_queues; i++) in nvme_dbbuf_set()
392 nvme_dbbuf_free(&dev->queues[i]); in nvme_dbbuf_set()
398 return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); in nvme_dbbuf_need_event()
436 struct nvme_descriptor_pools *pools = &dev->descriptor_pools[numa_node]; in nvme_setup_descriptor_pools()
439 if (pools->small) in nvme_setup_descriptor_pools()
442 pools->large = dma_pool_create_node("nvme descriptor page", dev->dev, in nvme_setup_descriptor_pools()
444 if (!pools->large) in nvme_setup_descriptor_pools()
445 return ERR_PTR(-ENOMEM); in nvme_setup_descriptor_pools()
447 if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512) in nvme_setup_descriptor_pools()
450 pools->small = dma_pool_create_node("nvme descriptor small", dev->dev, in nvme_setup_descriptor_pools()
452 if (!pools->small) { in nvme_setup_descriptor_pools()
453 dma_pool_destroy(pools->large); in nvme_setup_descriptor_pools()
454 pools->large = NULL; in nvme_setup_descriptor_pools()
455 return ERR_PTR(-ENOMEM); in nvme_setup_descriptor_pools()
466 struct nvme_descriptor_pools *pools = &dev->descriptor_pools[i]; in nvme_release_descriptor_pools()
468 dma_pool_destroy(pools->large); in nvme_release_descriptor_pools()
469 dma_pool_destroy(pools->small); in nvme_release_descriptor_pools()
477 struct nvme_queue *nvmeq = &dev->queues[qid]; in nvme_init_hctx_common()
481 tags = qid ? dev->tagset.tags[qid - 1] : dev->admin_tagset.tags[0]; in nvme_init_hctx_common()
482 WARN_ON(tags != hctx->tags); in nvme_init_hctx_common()
483 pools = nvme_setup_descriptor_pools(dev, hctx->numa_node); in nvme_init_hctx_common()
487 nvmeq->descriptor_pools = *pools; in nvme_init_hctx_common()
488 hctx->driver_data = nvmeq; in nvme_init_hctx_common()
511 nvme_req(req)->ctrl = set->driver_data; in nvme_pci_init_request()
512 nvme_req(req)->cmd = &iod->cmd; in nvme_pci_init_request()
519 if (dev->num_vecs > 1) in queue_irq_offset()
527 struct nvme_dev *dev = to_nvme_dev(set->driver_data); in nvme_pci_map_queues()
531 for (i = 0, qoff = 0; i < set->nr_maps; i++) { in nvme_pci_map_queues()
532 struct blk_mq_queue_map *map = &set->map[i]; in nvme_pci_map_queues()
534 map->nr_queues = dev->io_queues[i]; in nvme_pci_map_queues()
535 if (!map->nr_queues) { in nvme_pci_map_queues()
542 * affinity), so use the regular blk-mq cpu mapping in nvme_pci_map_queues()
544 map->queue_offset = qoff; in nvme_pci_map_queues()
546 blk_mq_map_hw_queues(map, dev->dev, offset); in nvme_pci_map_queues()
549 qoff += map->nr_queues; in nvme_pci_map_queues()
550 offset += map->nr_queues; in nvme_pci_map_queues()
560 u16 next_tail = nvmeq->sq_tail + 1; in nvme_write_sq_db()
562 if (next_tail == nvmeq->q_depth) in nvme_write_sq_db()
564 if (next_tail != nvmeq->last_sq_tail) in nvme_write_sq_db()
568 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, in nvme_write_sq_db()
569 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) in nvme_write_sq_db()
570 writel(nvmeq->sq_tail, nvmeq->q_db); in nvme_write_sq_db()
571 nvmeq->last_sq_tail = nvmeq->sq_tail; in nvme_write_sq_db()
577 memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), in nvme_sq_copy_cmd()
579 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_sq_copy_cmd()
580 nvmeq->sq_tail = 0; in nvme_sq_copy_cmd()
585 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_commit_rqs()
587 spin_lock(&nvmeq->sq_lock); in nvme_commit_rqs()
588 if (nvmeq->sq_tail != nvmeq->last_sq_tail) in nvme_commit_rqs()
590 spin_unlock(&nvmeq->sq_lock); in nvme_commit_rqs()
601 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_pci_metadata_use_sgls()
602 struct nvme_dev *dev = nvmeq->dev; in nvme_pci_metadata_use_sgls()
604 if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl)) in nvme_pci_metadata_use_sgls()
606 return req->nr_integrity_segments > 1 || in nvme_pci_metadata_use_sgls()
607 nvme_req(req)->flags & NVME_REQ_USERCMD; in nvme_pci_metadata_use_sgls()
613 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_pci_use_sgls()
615 if (nvmeq->qid && nvme_ctrl_sgl_supported(&dev->ctrl)) { in nvme_pci_use_sgls()
616 if (nvme_req(req)->flags & NVME_REQ_USERCMD) in nvme_pci_use_sgls()
618 if (req->nr_integrity_segments > 1) in nvme_pci_use_sgls()
631 if (blk_rq_dma_map_coalesce(&iod->dma_state)) in nvme_pci_avg_seg_size()
641 if (iod->flags & IOD_SMALL_DESCRIPTOR) in nvme_dma_pool()
642 return nvmeq->descriptor_pools.small; in nvme_dma_pool()
643 return nvmeq->descriptor_pools.large; in nvme_dma_pool()
648 return (cmd->common.flags & NVME_CMD_SGL_ALL) == NVME_CMD_SGL_METASEG; in nvme_pci_cmd_use_meta_sgl()
653 return cmd->common.flags & in nvme_pci_cmd_use_sgl()
660 return le64_to_cpu(cmd->common.dptr.sgl.addr); in nvme_pci_first_desc_dma_addr()
661 return le64_to_cpu(cmd->common.dptr.prp2); in nvme_pci_first_desc_dma_addr()
666 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_free_descriptors()
667 const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; in nvme_free_descriptors()
669 dma_addr_t dma_addr = nvme_pci_first_desc_dma_addr(&iod->cmd); in nvme_free_descriptors()
672 if (iod->nr_descriptors == 1) { in nvme_free_descriptors()
673 dma_pool_free(nvme_dma_pool(nvmeq, iod), iod->descriptors[0], in nvme_free_descriptors()
678 for (i = 0; i < iod->nr_descriptors; i++) { in nvme_free_descriptors()
679 __le64 *prp_list = iod->descriptors[i]; in nvme_free_descriptors()
682 dma_pool_free(nvmeq->descriptor_pools.large, prp_list, in nvme_free_descriptors()
691 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_free_prps()
694 for (i = 0; i < iod->nr_dma_vecs; i++) in nvme_free_prps()
695 dma_unmap_page(nvmeq->dev->dev, iod->dma_vecs[i].addr, in nvme_free_prps()
696 iod->dma_vecs[i].len, rq_dma_dir(req)); in nvme_free_prps()
697 mempool_free(iod->dma_vecs, nvmeq->dev->dmavec_mempool); in nvme_free_prps()
703 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_free_sgls()
705 unsigned int len = le32_to_cpu(sge->length); in nvme_free_sgls()
706 struct device *dma_dev = nvmeq->dev->dev; in nvme_free_sgls()
709 if (sge->type == (NVME_SGL_FMT_DATA_DESC << 4)) { in nvme_free_sgls()
710 dma_unmap_page(dma_dev, le64_to_cpu(sge->addr), len, dir); in nvme_free_sgls()
721 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_unmap_metadata()
724 struct device *dma_dev = nvmeq->dev->dev; in nvme_unmap_metadata()
725 struct nvme_sgl_desc *sge = iod->meta_descriptor; in nvme_unmap_metadata()
727 if (iod->flags & IOD_SINGLE_META_SEGMENT) { in nvme_unmap_metadata()
728 dma_unmap_page(dma_dev, iod->meta_dma, in nvme_unmap_metadata()
734 if (!blk_rq_integrity_dma_unmap(req, dma_dev, &iod->meta_dma_state, in nvme_unmap_metadata()
735 iod->meta_total_len)) { in nvme_unmap_metadata()
736 if (nvme_pci_cmd_use_meta_sgl(&iod->cmd)) in nvme_unmap_metadata()
739 dma_unmap_page(dma_dev, iod->meta_dma, in nvme_unmap_metadata()
740 iod->meta_total_len, dir); in nvme_unmap_metadata()
743 if (iod->meta_descriptor) in nvme_unmap_metadata()
744 dma_pool_free(nvmeq->descriptor_pools.small, in nvme_unmap_metadata()
745 iod->meta_descriptor, iod->meta_dma); in nvme_unmap_metadata()
751 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_unmap_data()
752 struct device *dma_dev = nvmeq->dev->dev; in nvme_unmap_data()
754 if (iod->flags & IOD_SINGLE_SEGMENT) { in nvme_unmap_data()
757 dma_unmap_page(dma_dev, le64_to_cpu(iod->cmd.common.dptr.prp1), in nvme_unmap_data()
758 iod->total_len, rq_dma_dir(req)); in nvme_unmap_data()
762 if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) { in nvme_unmap_data()
763 if (nvme_pci_cmd_use_sgl(&iod->cmd)) in nvme_unmap_data()
764 nvme_free_sgls(req, iod->descriptors[0], in nvme_unmap_data()
765 &iod->cmd.common.dptr.sgl); in nvme_unmap_data()
770 if (iod->nr_descriptors) in nvme_unmap_data()
779 if (iter->len) in nvme_pci_prp_iter_next()
781 if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter)) in nvme_pci_prp_iter_next()
783 if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) { in nvme_pci_prp_iter_next()
784 iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr; in nvme_pci_prp_iter_next()
785 iod->dma_vecs[iod->nr_dma_vecs].len = iter->len; in nvme_pci_prp_iter_next()
786 iod->nr_dma_vecs++; in nvme_pci_prp_iter_next()
795 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_pci_setup_data_prp()
801 if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(nvmeq->dev->dev)) { in nvme_pci_setup_data_prp()
802 iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool, in nvme_pci_setup_data_prp()
804 if (!iod->dma_vecs) in nvme_pci_setup_data_prp()
806 iod->dma_vecs[0].addr = iter->addr; in nvme_pci_setup_data_prp()
807 iod->dma_vecs[0].len = iter->len; in nvme_pci_setup_data_prp()
808 iod->nr_dma_vecs = 1; in nvme_pci_setup_data_prp()
815 * non-aligned. in nvme_pci_setup_data_prp()
817 prp1_dma = iter->addr; in nvme_pci_setup_data_prp()
818 prp_len = min(length, NVME_CTRL_PAGE_SIZE - in nvme_pci_setup_data_prp()
819 (iter->addr & (NVME_CTRL_PAGE_SIZE - 1))); in nvme_pci_setup_data_prp()
820 iod->total_len += prp_len; in nvme_pci_setup_data_prp()
821 iter->addr += prp_len; in nvme_pci_setup_data_prp()
822 iter->len -= prp_len; in nvme_pci_setup_data_prp()
823 length -= prp_len; in nvme_pci_setup_data_prp()
827 if (!nvme_pci_prp_iter_next(req, nvmeq->dev->dev, iter)) { in nvme_pci_setup_data_prp()
828 if (WARN_ON_ONCE(!iter->status)) in nvme_pci_setup_data_prp()
838 prp2_dma = iter->addr; in nvme_pci_setup_data_prp()
839 iod->total_len += length; in nvme_pci_setup_data_prp()
845 iod->flags |= IOD_SMALL_DESCRIPTOR; in nvme_pci_setup_data_prp()
850 iter->status = BLK_STS_RESOURCE; in nvme_pci_setup_data_prp()
853 iod->descriptors[iod->nr_descriptors++] = prp_list; in nvme_pci_setup_data_prp()
857 prp_list[i++] = cpu_to_le64(iter->addr); in nvme_pci_setup_data_prp()
859 if (WARN_ON_ONCE(iter->len < prp_len)) in nvme_pci_setup_data_prp()
862 iod->total_len += prp_len; in nvme_pci_setup_data_prp()
863 iter->addr += prp_len; in nvme_pci_setup_data_prp()
864 iter->len -= prp_len; in nvme_pci_setup_data_prp()
865 length -= prp_len; in nvme_pci_setup_data_prp()
869 if (!nvme_pci_prp_iter_next(req, nvmeq->dev->dev, iter)) { in nvme_pci_setup_data_prp()
870 if (WARN_ON_ONCE(!iter->status)) in nvme_pci_setup_data_prp()
885 prp_list = dma_pool_alloc(nvmeq->descriptor_pools.large, in nvme_pci_setup_data_prp()
888 iter->status = BLK_STS_RESOURCE; in nvme_pci_setup_data_prp()
891 iod->descriptors[iod->nr_descriptors++] = prp_list; in nvme_pci_setup_data_prp()
893 prp_list[0] = old_prp_list[i - 1]; in nvme_pci_setup_data_prp()
894 old_prp_list[i - 1] = cpu_to_le64(prp_list_dma); in nvme_pci_setup_data_prp()
904 iod->cmd.common.dptr.prp1 = cpu_to_le64(prp1_dma); in nvme_pci_setup_data_prp()
905 iod->cmd.common.dptr.prp2 = cpu_to_le64(prp2_dma); in nvme_pci_setup_data_prp()
906 if (unlikely(iter->status)) in nvme_pci_setup_data_prp()
908 return iter->status; in nvme_pci_setup_data_prp()
911 dev_err_once(nvmeq->dev->dev, in nvme_pci_setup_data_prp()
920 sge->addr = cpu_to_le64(iter->addr); in nvme_pci_sgl_set_data()
921 sge->length = cpu_to_le32(iter->len); in nvme_pci_sgl_set_data()
922 sge->type = NVME_SGL_FMT_DATA_DESC << 4; in nvme_pci_sgl_set_data()
928 sge->addr = cpu_to_le64(dma_addr); in nvme_pci_sgl_set_seg()
929 sge->length = cpu_to_le32(entries * sizeof(*sge)); in nvme_pci_sgl_set_seg()
930 sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; in nvme_pci_sgl_set_seg()
937 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_pci_setup_data_sgl()
944 iod->cmd.common.flags = NVME_CMD_SGL_METABUF; in nvme_pci_setup_data_sgl()
946 if (entries == 1 || blk_rq_dma_map_coalesce(&iod->dma_state)) { in nvme_pci_setup_data_sgl()
947 nvme_pci_sgl_set_data(&iod->cmd.common.dptr.sgl, iter); in nvme_pci_setup_data_sgl()
948 iod->total_len += iter->len; in nvme_pci_setup_data_sgl()
953 iod->flags |= IOD_SMALL_DESCRIPTOR; in nvme_pci_setup_data_sgl()
959 iod->descriptors[iod->nr_descriptors++] = sg_list; in nvme_pci_setup_data_sgl()
963 iter->status = BLK_STS_IOERR; in nvme_pci_setup_data_sgl()
967 iod->total_len += iter->len; in nvme_pci_setup_data_sgl()
968 } while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, &iod->dma_state, in nvme_pci_setup_data_sgl()
971 nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped); in nvme_pci_setup_data_sgl()
972 if (unlikely(iter->status)) in nvme_pci_setup_data_sgl()
974 return iter->status; in nvme_pci_setup_data_sgl()
981 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_pci_setup_data_simple()
983 unsigned int prp1_offset = bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1); in nvme_pci_setup_data_simple()
992 dma_addr = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0); in nvme_pci_setup_data_simple()
993 if (dma_mapping_error(nvmeq->dev->dev, dma_addr)) in nvme_pci_setup_data_simple()
995 iod->total_len = bv.bv_len; in nvme_pci_setup_data_simple()
996 iod->flags |= IOD_SINGLE_SEGMENT; in nvme_pci_setup_data_simple()
999 iod->cmd.common.flags = NVME_CMD_SGL_METABUF; in nvme_pci_setup_data_simple()
1000 iod->cmd.common.dptr.sgl.addr = cpu_to_le64(dma_addr); in nvme_pci_setup_data_simple()
1001 iod->cmd.common.dptr.sgl.length = cpu_to_le32(bv.bv_len); in nvme_pci_setup_data_simple()
1002 iod->cmd.common.dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; in nvme_pci_setup_data_simple()
1004 unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - prp1_offset; in nvme_pci_setup_data_simple()
1006 iod->cmd.common.dptr.prp1 = cpu_to_le64(dma_addr); in nvme_pci_setup_data_simple()
1007 iod->cmd.common.dptr.prp2 = 0; in nvme_pci_setup_data_simple()
1009 iod->cmd.common.dptr.prp2 = in nvme_pci_setup_data_simple()
1019 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_map_data()
1020 struct nvme_dev *dev = nvmeq->dev; in nvme_map_data()
1035 if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter)) in nvme_map_data()
1047 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_pci_setup_meta_iter()
1048 unsigned int entries = req->nr_integrity_segments; in nvme_pci_setup_meta_iter()
1050 struct nvme_dev *dev = nvmeq->dev; in nvme_pci_setup_meta_iter()
1056 if (!blk_rq_integrity_dma_map_iter_start(req, dev->dev, in nvme_pci_setup_meta_iter()
1057 &iod->meta_dma_state, &iter)) in nvme_pci_setup_meta_iter()
1060 if (blk_rq_dma_map_coalesce(&iod->meta_dma_state)) in nvme_pci_setup_meta_iter()
1079 if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl) || in nvme_pci_setup_meta_iter()
1080 (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD))) { in nvme_pci_setup_meta_iter()
1081 iod->cmd.common.metadata = cpu_to_le64(iter.addr); in nvme_pci_setup_meta_iter()
1082 iod->meta_total_len = iter.len; in nvme_pci_setup_meta_iter()
1083 iod->meta_dma = iter.addr; in nvme_pci_setup_meta_iter()
1084 iod->meta_descriptor = NULL; in nvme_pci_setup_meta_iter()
1088 sg_list = dma_pool_alloc(nvmeq->descriptor_pools.small, GFP_ATOMIC, in nvme_pci_setup_meta_iter()
1093 iod->meta_descriptor = sg_list; in nvme_pci_setup_meta_iter()
1094 iod->meta_dma = sgl_dma; in nvme_pci_setup_meta_iter()
1095 iod->cmd.common.flags = NVME_CMD_SGL_METASEG; in nvme_pci_setup_meta_iter()
1096 iod->cmd.common.metadata = cpu_to_le64(sgl_dma); in nvme_pci_setup_meta_iter()
1098 iod->meta_total_len = iter.len; in nvme_pci_setup_meta_iter()
1106 iod->meta_total_len += iter.len; in nvme_pci_setup_meta_iter()
1107 } while (blk_rq_integrity_dma_map_iter_next(req, dev->dev, &iter)); in nvme_pci_setup_meta_iter()
1118 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_pci_setup_meta_mptr()
1124 iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0); in nvme_pci_setup_meta_mptr()
1125 if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma)) in nvme_pci_setup_meta_mptr()
1127 iod->cmd.common.metadata = cpu_to_le64(iod->meta_dma); in nvme_pci_setup_meta_mptr()
1128 iod->flags |= IOD_SINGLE_META_SEGMENT; in nvme_pci_setup_meta_mptr()
1136 if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) && in nvme_map_metadata()
1147 iod->flags = 0; in nvme_prep_rq()
1148 iod->nr_descriptors = 0; in nvme_prep_rq()
1149 iod->total_len = 0; in nvme_prep_rq()
1150 iod->meta_total_len = 0; in nvme_prep_rq()
1152 ret = nvme_setup_cmd(req->q->queuedata, req); in nvme_prep_rq()
1181 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_queue_rq()
1182 struct nvme_dev *dev = nvmeq->dev; in nvme_queue_rq()
1183 struct request *req = bd->rq; in nvme_queue_rq()
1191 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) in nvme_queue_rq()
1194 if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) in nvme_queue_rq()
1195 return nvme_fail_nonready_command(&dev->ctrl, req); in nvme_queue_rq()
1200 spin_lock(&nvmeq->sq_lock); in nvme_queue_rq()
1201 nvme_sq_copy_cmd(nvmeq, &iod->cmd); in nvme_queue_rq()
1202 nvme_write_sq_db(nvmeq, bd->last); in nvme_queue_rq()
1203 spin_unlock(&nvmeq->sq_lock); in nvme_queue_rq()
1214 spin_lock(&nvmeq->sq_lock); in nvme_submit_cmds()
1218 nvme_sq_copy_cmd(nvmeq, &iod->cmd); in nvme_submit_cmds()
1221 spin_unlock(&nvmeq->sq_lock); in nvme_submit_cmds()
1230 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) in nvme_prep_rq_batch()
1232 if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) in nvme_prep_rq_batch()
1246 if (nvmeq && nvmeq != req->mq_hctx->driver_data) in nvme_queue_rqs()
1248 nvmeq = req->mq_hctx->driver_data; in nvme_queue_rqs()
1283 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; in nvme_cqe_pending()
1285 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; in nvme_cqe_pending()
1290 u16 head = nvmeq->cq_head; in nvme_ring_cq_doorbell()
1292 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, in nvme_ring_cq_doorbell()
1293 nvmeq->dbbuf_cq_ei)) in nvme_ring_cq_doorbell()
1294 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); in nvme_ring_cq_doorbell()
1299 if (!nvmeq->qid) in nvme_queue_tagset()
1300 return nvmeq->dev->admin_tagset.tags[0]; in nvme_queue_tagset()
1301 return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; in nvme_queue_tagset()
1307 struct nvme_completion *cqe = &nvmeq->cqes[idx]; in nvme_handle_cqe()
1308 __u16 command_id = READ_ONCE(cqe->command_id); in nvme_handle_cqe()
1317 if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { in nvme_handle_cqe()
1318 nvme_complete_async_event(&nvmeq->dev->ctrl, in nvme_handle_cqe()
1319 cqe->status, &cqe->result); in nvme_handle_cqe()
1325 dev_warn(nvmeq->dev->ctrl.device, in nvme_handle_cqe()
1327 command_id, le16_to_cpu(cqe->sq_id)); in nvme_handle_cqe()
1331 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); in nvme_handle_cqe()
1332 if (!nvme_try_complete_req(req, cqe->status, cqe->result) && in nvme_handle_cqe()
1334 nvme_req(req)->status != NVME_SC_SUCCESS, in nvme_handle_cqe()
1341 u32 tmp = nvmeq->cq_head + 1; in nvme_update_cq_head()
1343 if (tmp == nvmeq->q_depth) { in nvme_update_cq_head()
1344 nvmeq->cq_head = 0; in nvme_update_cq_head()
1345 nvmeq->cq_phase ^= 1; in nvme_update_cq_head()
1347 nvmeq->cq_head = tmp; in nvme_update_cq_head()
1359 * load-load control dependency between phase and the rest of in nvme_poll_cq()
1363 nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); in nvme_poll_cq()
1400 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in nvme_poll_irqdisable()
1402 WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); in nvme_poll_irqdisable()
1404 disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); in nvme_poll_irqdisable()
1405 spin_lock(&nvmeq->cq_poll_lock); in nvme_poll_irqdisable()
1407 spin_unlock(&nvmeq->cq_poll_lock); in nvme_poll_irqdisable()
1408 enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); in nvme_poll_irqdisable()
1413 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_poll()
1419 spin_lock(&nvmeq->cq_poll_lock); in nvme_poll()
1421 spin_unlock(&nvmeq->cq_poll_lock); in nvme_poll()
1426 static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) in nvme_pci_submit_async_event() argument
1428 struct nvme_dev *dev = to_nvme_dev(ctrl); in nvme_pci_submit_async_event()
1429 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_pci_submit_async_event()
1435 spin_lock(&nvmeq->sq_lock); in nvme_pci_submit_async_event()
1438 spin_unlock(&nvmeq->sq_lock); in nvme_pci_submit_async_event()
1441 static int nvme_pci_subsystem_reset(struct nvme_ctrl *ctrl) in nvme_pci_subsystem_reset() argument
1443 struct nvme_dev *dev = to_nvme_dev(ctrl); in nvme_pci_subsystem_reset()
1452 mutex_lock(&dev->shutdown_lock); in nvme_pci_subsystem_reset()
1453 if (!dev->bar_mapped_size) { in nvme_pci_subsystem_reset()
1454 ret = -ENODEV; in nvme_pci_subsystem_reset()
1458 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { in nvme_pci_subsystem_reset()
1459 ret = -EBUSY; in nvme_pci_subsystem_reset()
1463 writel(NVME_SUBSYS_RESET, dev->bar + NVME_REG_NSSR); in nvme_pci_subsystem_reset()
1464 nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE); in nvme_pci_subsystem_reset()
1470 readl(dev->bar + NVME_REG_CSTS); in nvme_pci_subsystem_reset()
1472 mutex_unlock(&dev->shutdown_lock); in nvme_pci_subsystem_reset()
1483 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_delete_queue()
1492 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) in adapter_alloc_cq()
1500 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); in adapter_alloc_cq()
1502 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq()
1506 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_alloc_cq()
1512 struct nvme_ctrl *ctrl = &dev->ctrl; in adapter_alloc_sq() local
1517 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't in adapter_alloc_sq()
1521 if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) in adapter_alloc_sq()
1529 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); in adapter_alloc_sq()
1531 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq()
1535 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_alloc_sq()
1550 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in abort_endio()
1552 dev_warn(nvmeq->dev->ctrl.device, in abort_endio()
1553 "Abort status: 0x%x", nvme_req(req)->status); in abort_endio()
1554 atomic_inc(&nvmeq->dev->ctrl.abort_limit); in abort_endio()
1564 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); in nvme_should_reset()
1567 switch (nvme_ctrl_state(&dev->ctrl)) { in nvme_should_reset()
1590 result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, in nvme_warn_reset()
1593 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1597 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1604 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1606 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1613 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_timeout()
1614 struct nvme_dev *dev = nvmeq->dev; in nvme_timeout()
1617 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_timeout()
1618 u32 csts = readl(dev->bar + NVME_REG_CSTS); in nvme_timeout()
1630 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_timeout()
1631 if (nvme_state_terminal(&dev->ctrl)) in nvme_timeout()
1652 if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) in nvme_timeout()
1653 nvme_poll(req->mq_hctx, NULL); in nvme_timeout()
1658 dev_warn(dev->ctrl.device, in nvme_timeout()
1660 req->tag, nvme_cid(req), nvmeq->qid); in nvme_timeout()
1670 switch (nvme_ctrl_state(&dev->ctrl)) { in nvme_timeout()
1672 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_timeout()
1675 dev_warn_ratelimited(dev->ctrl.device, in nvme_timeout()
1677 req->tag, nvme_cid(req), nvmeq->qid); in nvme_timeout()
1678 nvme_req(req)->flags |= NVME_REQ_CANCELLED; in nvme_timeout()
1692 opcode = nvme_req(req)->cmd->common.opcode; in nvme_timeout()
1693 if (!nvmeq->qid || (iod->flags & IOD_ABORTED)) { in nvme_timeout()
1694 dev_warn(dev->ctrl.device, in nvme_timeout()
1696 req->tag, nvme_cid(req), opcode, in nvme_timeout()
1697 nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid); in nvme_timeout()
1698 nvme_req(req)->flags |= NVME_REQ_CANCELLED; in nvme_timeout()
1702 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { in nvme_timeout()
1703 atomic_inc(&dev->ctrl.abort_limit); in nvme_timeout()
1706 iod->flags |= IOD_ABORTED; in nvme_timeout()
1710 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); in nvme_timeout()
1712 dev_warn(nvmeq->dev->ctrl.device, in nvme_timeout()
1714 req->tag, nvme_cid(req), opcode, nvme_get_opcode_str(opcode), in nvme_timeout()
1715 nvmeq->qid, blk_op_str(req_op(req)), req_op(req), in nvme_timeout()
1718 abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd), in nvme_timeout()
1721 atomic_inc(&dev->ctrl.abort_limit); in nvme_timeout()
1726 abort_req->end_io = abort_endio; in nvme_timeout()
1727 abort_req->end_io_data = NULL; in nvme_timeout()
1738 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) { in nvme_timeout()
1739 if (nvme_state_terminal(&dev->ctrl)) in nvme_timeout()
1745 if (nvme_try_sched_reset(&dev->ctrl)) in nvme_timeout()
1746 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_timeout()
1752 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), in nvme_free_queue()
1753 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); in nvme_free_queue()
1754 if (!nvmeq->sq_cmds) in nvme_free_queue()
1757 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { in nvme_free_queue()
1758 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), in nvme_free_queue()
1759 nvmeq->sq_cmds, SQ_SIZE(nvmeq)); in nvme_free_queue()
1761 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), in nvme_free_queue()
1762 nvmeq->sq_cmds, nvmeq->sq_dma_addr); in nvme_free_queue()
1770 for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { in nvme_free_queues()
1771 dev->ctrl.queue_count--; in nvme_free_queues()
1772 nvme_free_queue(&dev->queues[i]); in nvme_free_queues()
1778 struct nvme_queue *nvmeq = &dev->queues[qid]; in nvme_suspend_queue()
1780 if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) in nvme_suspend_queue()
1786 nvmeq->dev->online_queues--; in nvme_suspend_queue()
1787 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) in nvme_suspend_queue()
1788 nvme_quiesce_admin_queue(&nvmeq->dev->ctrl); in nvme_suspend_queue()
1789 if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) in nvme_suspend_queue()
1790 pci_free_irq(to_pci_dev(dev->dev), nvmeq->cq_vector, nvmeq); in nvme_suspend_queue()
1797 for (i = dev->ctrl.queue_count - 1; i > 0; i--) in nvme_suspend_io_queues()
1811 for (i = dev->ctrl.queue_count - 1; i > 0; i--) { in nvme_reap_pending_cqes()
1812 spin_lock(&dev->queues[i].cq_poll_lock); in nvme_reap_pending_cqes()
1813 nvme_poll_cq(&dev->queues[i], NULL); in nvme_reap_pending_cqes()
1814 spin_unlock(&dev->queues[i].cq_poll_lock); in nvme_reap_pending_cqes()
1821 int q_depth = dev->q_depth; in nvme_cmb_qdepth()
1825 if (q_size_aligned * nr_io_queues > dev->cmb_size) { in nvme_cmb_qdepth()
1826 u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); in nvme_cmb_qdepth()
1837 return -ENOMEM; in nvme_cmb_qdepth()
1846 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_alloc_sq_cmds()
1848 if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { in nvme_alloc_sq_cmds()
1849 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); in nvme_alloc_sq_cmds()
1850 if (nvmeq->sq_cmds) { in nvme_alloc_sq_cmds()
1851 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, in nvme_alloc_sq_cmds()
1852 nvmeq->sq_cmds); in nvme_alloc_sq_cmds()
1853 if (nvmeq->sq_dma_addr) { in nvme_alloc_sq_cmds()
1854 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); in nvme_alloc_sq_cmds()
1858 pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); in nvme_alloc_sq_cmds()
1862 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), in nvme_alloc_sq_cmds()
1863 &nvmeq->sq_dma_addr, GFP_KERNEL); in nvme_alloc_sq_cmds()
1864 if (!nvmeq->sq_cmds) in nvme_alloc_sq_cmds()
1865 return -ENOMEM; in nvme_alloc_sq_cmds()
1871 struct nvme_queue *nvmeq = &dev->queues[qid]; in nvme_alloc_queue()
1873 if (dev->ctrl.queue_count > qid) in nvme_alloc_queue()
1876 nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; in nvme_alloc_queue()
1877 nvmeq->q_depth = depth; in nvme_alloc_queue()
1878 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), in nvme_alloc_queue()
1879 &nvmeq->cq_dma_addr, GFP_KERNEL); in nvme_alloc_queue()
1880 if (!nvmeq->cqes) in nvme_alloc_queue()
1886 nvmeq->dev = dev; in nvme_alloc_queue()
1887 spin_lock_init(&nvmeq->sq_lock); in nvme_alloc_queue()
1888 spin_lock_init(&nvmeq->cq_poll_lock); in nvme_alloc_queue()
1889 nvmeq->cq_head = 0; in nvme_alloc_queue()
1890 nvmeq->cq_phase = 1; in nvme_alloc_queue()
1891 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_alloc_queue()
1892 nvmeq->qid = qid; in nvme_alloc_queue()
1893 dev->ctrl.queue_count++; in nvme_alloc_queue()
1898 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, in nvme_alloc_queue()
1899 nvmeq->cq_dma_addr); in nvme_alloc_queue()
1901 return -ENOMEM; in nvme_alloc_queue()
1906 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in queue_request_irq()
1907 int nr = nvmeq->dev->ctrl.instance; in queue_request_irq()
1910 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, in queue_request_irq()
1911 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); in queue_request_irq()
1913 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, in queue_request_irq()
1914 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); in queue_request_irq()
1920 struct nvme_dev *dev = nvmeq->dev; in nvme_init_queue()
1922 nvmeq->sq_tail = 0; in nvme_init_queue()
1923 nvmeq->last_sq_tail = 0; in nvme_init_queue()
1924 nvmeq->cq_head = 0; in nvme_init_queue()
1925 nvmeq->cq_phase = 1; in nvme_init_queue()
1926 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_init_queue()
1927 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); in nvme_init_queue()
1929 dev->online_queues++; in nvme_init_queue()
1941 if (!mutex_trylock(&dev->shutdown_lock)) in nvme_setup_io_queues_trylock()
1942 return -ENODEV; in nvme_setup_io_queues_trylock()
1947 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_CONNECTING) { in nvme_setup_io_queues_trylock()
1948 mutex_unlock(&dev->shutdown_lock); in nvme_setup_io_queues_trylock()
1949 return -ENODEV; in nvme_setup_io_queues_trylock()
1957 struct nvme_dev *dev = nvmeq->dev; in nvme_create_queue()
1961 clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); in nvme_create_queue()
1968 vector = dev->num_vecs == 1 ? 0 : qid; in nvme_create_queue()
1970 set_bit(NVMEQ_POLLED, &nvmeq->flags); in nvme_create_queue()
1982 nvmeq->cq_vector = vector; in nvme_create_queue()
1994 set_bit(NVMEQ_ENABLED, &nvmeq->flags); in nvme_create_queue()
1995 mutex_unlock(&dev->shutdown_lock); in nvme_create_queue()
1999 dev->online_queues--; in nvme_create_queue()
2000 mutex_unlock(&dev->shutdown_lock); in nvme_create_queue()
2029 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { in nvme_dev_remove_admin()
2035 nvme_unquiesce_admin_queue(&dev->ctrl); in nvme_dev_remove_admin()
2036 nvme_remove_admin_tag_set(&dev->ctrl); in nvme_dev_remove_admin()
2042 return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); in db_bar_size()
2047 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_remap_bar()
2049 if (size <= dev->bar_mapped_size) in nvme_remap_bar()
2052 return -ENOMEM; in nvme_remap_bar()
2053 if (dev->bar) in nvme_remap_bar()
2054 iounmap(dev->bar); in nvme_remap_bar()
2055 dev->bar = ioremap(pci_resource_start(pdev, 0), size); in nvme_remap_bar()
2056 if (!dev->bar) { in nvme_remap_bar()
2057 dev->bar_mapped_size = 0; in nvme_remap_bar()
2058 return -ENOMEM; in nvme_remap_bar()
2060 dev->bar_mapped_size = size; in nvme_remap_bar()
2061 dev->dbs = dev->bar + NVME_REG_DBS; in nvme_remap_bar()
2076 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? in nvme_pci_configure_admin_queue()
2077 NVME_CAP_NSSRC(dev->ctrl.cap) : 0; in nvme_pci_configure_admin_queue()
2079 if (dev->subsystem && in nvme_pci_configure_admin_queue()
2080 (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) in nvme_pci_configure_admin_queue()
2081 writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); in nvme_pci_configure_admin_queue()
2090 result = nvme_disable_ctrl(&dev->ctrl, false); in nvme_pci_configure_admin_queue()
2092 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_pci_configure_admin_queue()
2106 result = nvme_disable_ctrl(&dev->ctrl, false); in nvme_pci_configure_admin_queue()
2110 dev_info(dev->ctrl.device, in nvme_pci_configure_admin_queue()
2118 dev->ctrl.numa_node = dev_to_node(dev->dev); in nvme_pci_configure_admin_queue()
2120 nvmeq = &dev->queues[0]; in nvme_pci_configure_admin_queue()
2121 aqa = nvmeq->q_depth - 1; in nvme_pci_configure_admin_queue()
2124 writel(aqa, dev->bar + NVME_REG_AQA); in nvme_pci_configure_admin_queue()
2125 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); in nvme_pci_configure_admin_queue()
2126 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); in nvme_pci_configure_admin_queue()
2128 result = nvme_enable_ctrl(&dev->ctrl); in nvme_pci_configure_admin_queue()
2132 nvmeq->cq_vector = 0; in nvme_pci_configure_admin_queue()
2136 dev->online_queues--; in nvme_pci_configure_admin_queue()
2140 set_bit(NVMEQ_ENABLED, &nvmeq->flags); in nvme_pci_configure_admin_queue()
2149 for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { in nvme_create_io_queues()
2150 if (nvme_alloc_queue(dev, i, dev->q_depth)) { in nvme_create_io_queues()
2151 ret = -ENOMEM; in nvme_create_io_queues()
2156 max = min(dev->max_qid, dev->ctrl.queue_count - 1); in nvme_create_io_queues()
2157 if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { in nvme_create_io_queues()
2158 rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + in nvme_create_io_queues()
2159 dev->io_queues[HCTX_TYPE_READ]; in nvme_create_io_queues()
2164 for (i = dev->online_queues; i <= max; i++) { in nvme_create_io_queues()
2167 ret = nvme_create_queue(&dev->queues[i], i, polled); in nvme_create_io_queues()
2183 u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; in nvme_cmb_size_unit()
2190 return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; in nvme_cmb_size()
2197 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_map_cmb()
2200 if (dev->cmb_size) in nvme_map_cmb()
2203 if (NVME_CAP_CMBS(dev->ctrl.cap)) in nvme_map_cmb()
2204 writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); in nvme_map_cmb()
2206 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); in nvme_map_cmb()
2207 if (!dev->cmbsz) in nvme_map_cmb()
2209 dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); in nvme_map_cmb()
2212 offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); in nvme_map_cmb()
2213 bar = NVME_CMB_BIR(dev->cmbloc); in nvme_map_cmb()
2224 size = min(size, bar_size - offset); in nvme_map_cmb()
2235 if (NVME_CAP_CMBS(dev->ctrl.cap)) { in nvme_map_cmb()
2238 dev->bar + NVME_REG_CMBMSC); in nvme_map_cmb()
2242 dev_warn(dev->ctrl.device, in nvme_map_cmb()
2244 hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC); in nvme_map_cmb()
2248 dev->cmb_size = size; in nvme_map_cmb()
2249 dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); in nvme_map_cmb()
2251 if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == in nvme_map_cmb()
2258 u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; in nvme_set_host_mem()
2259 u64 dma_addr = dev->host_mem_descs_dma; in nvme_set_host_mem()
2269 c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); in nvme_set_host_mem()
2271 ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in nvme_set_host_mem()
2273 dev_warn(dev->ctrl.device, in nvme_set_host_mem()
2277 dev->hmb = bits & NVME_HOST_MEM_ENABLE; in nvme_set_host_mem()
2286 for (i = 0; i < dev->nr_host_mem_descs; i++) { in nvme_free_host_mem_multi()
2287 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; in nvme_free_host_mem_multi()
2288 size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE; in nvme_free_host_mem_multi()
2290 dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], in nvme_free_host_mem_multi()
2291 le64_to_cpu(desc->addr), in nvme_free_host_mem_multi()
2295 kfree(dev->host_mem_desc_bufs); in nvme_free_host_mem_multi()
2296 dev->host_mem_desc_bufs = NULL; in nvme_free_host_mem_multi()
2301 if (dev->hmb_sgt) in nvme_free_host_mem()
2302 dma_free_noncontiguous(dev->dev, dev->host_mem_size, in nvme_free_host_mem()
2303 dev->hmb_sgt, DMA_BIDIRECTIONAL); in nvme_free_host_mem()
2307 dma_free_coherent(dev->dev, dev->host_mem_descs_size, in nvme_free_host_mem()
2308 dev->host_mem_descs, dev->host_mem_descs_dma); in nvme_free_host_mem()
2309 dev->host_mem_descs = NULL; in nvme_free_host_mem()
2310 dev->host_mem_descs_size = 0; in nvme_free_host_mem()
2311 dev->nr_host_mem_descs = 0; in nvme_free_host_mem()
2316 dev->hmb_sgt = dma_alloc_noncontiguous(dev->dev, size, in nvme_alloc_host_mem_single()
2318 if (!dev->hmb_sgt) in nvme_alloc_host_mem_single()
2319 return -ENOMEM; in nvme_alloc_host_mem_single()
2321 dev->host_mem_descs = dma_alloc_coherent(dev->dev, in nvme_alloc_host_mem_single()
2322 sizeof(*dev->host_mem_descs), &dev->host_mem_descs_dma, in nvme_alloc_host_mem_single()
2324 if (!dev->host_mem_descs) { in nvme_alloc_host_mem_single()
2325 dma_free_noncontiguous(dev->dev, size, dev->hmb_sgt, in nvme_alloc_host_mem_single()
2327 dev->hmb_sgt = NULL; in nvme_alloc_host_mem_single()
2328 return -ENOMEM; in nvme_alloc_host_mem_single()
2330 dev->host_mem_size = size; in nvme_alloc_host_mem_single()
2331 dev->host_mem_descs_size = sizeof(*dev->host_mem_descs); in nvme_alloc_host_mem_single()
2332 dev->nr_host_mem_descs = 1; in nvme_alloc_host_mem_single()
2334 dev->host_mem_descs[0].addr = in nvme_alloc_host_mem_single()
2335 cpu_to_le64(dev->hmb_sgt->sgl->dma_address); in nvme_alloc_host_mem_single()
2336 dev->host_mem_descs[0].size = cpu_to_le32(size / NVME_CTRL_PAGE_SIZE); in nvme_alloc_host_mem_single()
2350 tmp = (preferred + chunk_size - 1); in nvme_alloc_host_mem_multi()
2354 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) in nvme_alloc_host_mem_multi()
2355 max_entries = dev->ctrl.hmmaxd; in nvme_alloc_host_mem_multi()
2358 descs = dma_alloc_coherent(dev->dev, descs_size, &descs_dma, in nvme_alloc_host_mem_multi()
2370 len = min_t(u64, chunk_size, preferred - size); in nvme_alloc_host_mem_multi()
2371 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, in nvme_alloc_host_mem_multi()
2384 dev->nr_host_mem_descs = i; in nvme_alloc_host_mem_multi()
2385 dev->host_mem_size = size; in nvme_alloc_host_mem_multi()
2386 dev->host_mem_descs = descs; in nvme_alloc_host_mem_multi()
2387 dev->host_mem_descs_dma = descs_dma; in nvme_alloc_host_mem_multi()
2388 dev->host_mem_descs_size = descs_size; in nvme_alloc_host_mem_multi()
2389 dev->host_mem_desc_bufs = bufs; in nvme_alloc_host_mem_multi()
2395 dma_free_coherent(dev->dev, descs_size, descs, descs_dma); in nvme_alloc_host_mem_multi()
2397 dev->host_mem_descs = NULL; in nvme_alloc_host_mem_multi()
2398 return -ENOMEM; in nvme_alloc_host_mem_multi()
2403 unsigned long dma_merge_boundary = dma_get_merge_boundary(dev->dev); in nvme_alloc_host_mem()
2405 u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); in nvme_alloc_host_mem()
2410 * non-contiguous allocation for a single segment first. in nvme_alloc_host_mem()
2420 if (!min || dev->host_mem_size >= min) in nvme_alloc_host_mem()
2426 return -ENOMEM; in nvme_alloc_host_mem()
2432 u64 preferred = (u64)dev->ctrl.hmpre * 4096; in nvme_setup_host_mem()
2433 u64 min = (u64)dev->ctrl.hmmin * 4096; in nvme_setup_host_mem()
2437 if (!dev->ctrl.hmpre) in nvme_setup_host_mem()
2442 dev_warn(dev->ctrl.device, in nvme_setup_host_mem()
2452 if (dev->host_mem_descs) { in nvme_setup_host_mem()
2453 if (dev->host_mem_size >= min) in nvme_setup_host_mem()
2459 if (!dev->host_mem_descs) { in nvme_setup_host_mem()
2461 dev_warn(dev->ctrl.device, in nvme_setup_host_mem()
2466 dev_info(dev->ctrl.device, in nvme_setup_host_mem()
2468 dev->host_mem_size >> ilog2(SZ_1M), in nvme_setup_host_mem()
2469 dev->nr_host_mem_descs, in nvme_setup_host_mem()
2470 str_plural(dev->nr_host_mem_descs)); in nvme_setup_host_mem()
2485 ndev->cmbloc, ndev->cmbsz); in cmb_show()
2494 return sysfs_emit(buf, "%u\n", ndev->cmbloc); in cmbloc_show()
2503 return sysfs_emit(buf, "%u\n", ndev->cmbsz); in cmbsz_show()
2512 return sysfs_emit(buf, "%d\n", ndev->hmb); in hmb_show()
2523 return -EINVAL; in hmb_store()
2525 if (new == ndev->hmb) in hmb_store()
2546 struct nvme_ctrl *ctrl = in nvme_pci_attrs_are_visible() local
2548 struct nvme_dev *dev = to_nvme_dev(ctrl); in nvme_pci_attrs_are_visible()
2553 if (!dev->cmbsz) in nvme_pci_attrs_are_visible()
2556 if (a == &dev_attr_hmb.attr && !ctrl->hmpre) in nvme_pci_attrs_are_visible()
2559 return a->mode; in nvme_pci_attrs_are_visible()
2583 sysfs_update_group(&dev->ctrl.device->kobj, &nvme_pci_dev_attrs_group); in nvme_update_attrs()
2592 struct nvme_dev *dev = affd->priv; in nvme_calc_irq_sets()
2593 unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; in nvme_calc_irq_sets()
2614 nr_read_queues = nrirqs - nr_write_queues; in nvme_calc_irq_sets()
2617 dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; in nvme_calc_irq_sets()
2618 affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; in nvme_calc_irq_sets()
2619 dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; in nvme_calc_irq_sets()
2620 affd->set_size[HCTX_TYPE_READ] = nr_read_queues; in nvme_calc_irq_sets()
2621 affd->nr_sets = nr_read_queues ? 2 : 1; in nvme_calc_irq_sets()
2626 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_setup_irqs()
2637 * left over for non-polled I/O. in nvme_setup_irqs()
2639 poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); in nvme_setup_irqs()
2640 dev->io_queues[HCTX_TYPE_POLL] = poll_queues; in nvme_setup_irqs()
2646 dev->io_queues[HCTX_TYPE_DEFAULT] = 1; in nvme_setup_irqs()
2647 dev->io_queues[HCTX_TYPE_READ] = 0; in nvme_setup_irqs()
2650 * We need interrupts for the admin queue and each non-polled I/O queue, in nvme_setup_irqs()
2655 if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) in nvme_setup_irqs()
2656 irq_queues += (nr_io_queues - poll_queues); in nvme_setup_irqs()
2657 if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI) in nvme_setup_irqs()
2669 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) in nvme_max_io_queues()
2671 return blk_mq_num_possible_queues(0) + dev->nr_write_queues + in nvme_max_io_queues()
2672 dev->nr_poll_queues; in nvme_max_io_queues()
2677 struct nvme_queue *adminq = &dev->queues[0]; in nvme_setup_io_queues()
2678 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_setup_io_queues()
2687 dev->nr_write_queues = write_queues; in nvme_setup_io_queues()
2688 dev->nr_poll_queues = poll_queues; in nvme_setup_io_queues()
2690 nr_io_queues = dev->nr_allocated_queues - 1; in nvme_setup_io_queues()
2691 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); in nvme_setup_io_queues()
2708 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) in nvme_setup_io_queues()
2711 if (dev->cmb_use_sqes) { in nvme_setup_io_queues()
2715 dev->q_depth = result; in nvme_setup_io_queues()
2716 dev->ctrl.sqsize = result - 1; in nvme_setup_io_queues()
2718 dev->cmb_use_sqes = false; in nvme_setup_io_queues()
2727 if (!--nr_io_queues) { in nvme_setup_io_queues()
2728 result = -ENOMEM; in nvme_setup_io_queues()
2732 adminq->q_db = dev->dbs; in nvme_setup_io_queues()
2736 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) in nvme_setup_io_queues()
2747 result = -EIO; in nvme_setup_io_queues()
2751 dev->num_vecs = result; in nvme_setup_io_queues()
2752 result = max(result - 1, 1); in nvme_setup_io_queues()
2753 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; in nvme_setup_io_queues()
2764 set_bit(NVMEQ_ENABLED, &adminq->flags); in nvme_setup_io_queues()
2765 mutex_unlock(&dev->shutdown_lock); in nvme_setup_io_queues()
2768 if (result || dev->online_queues < 2) in nvme_setup_io_queues()
2771 if (dev->online_queues - 1 < dev->max_qid) { in nvme_setup_io_queues()
2772 nr_io_queues = dev->online_queues - 1; in nvme_setup_io_queues()
2780 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", in nvme_setup_io_queues()
2781 dev->io_queues[HCTX_TYPE_DEFAULT], in nvme_setup_io_queues()
2782 dev->io_queues[HCTX_TYPE_READ], in nvme_setup_io_queues()
2783 dev->io_queues[HCTX_TYPE_POLL]); in nvme_setup_io_queues()
2786 mutex_unlock(&dev->shutdown_lock); in nvme_setup_io_queues()
2793 struct nvme_queue *nvmeq = req->end_io_data; in nvme_del_queue_end()
2796 complete(&nvmeq->delete_done); in nvme_del_queue_end()
2803 struct nvme_queue *nvmeq = req->end_io_data; in nvme_del_cq_end()
2806 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); in nvme_del_cq_end()
2813 struct request_queue *q = nvmeq->dev->ctrl.admin_q; in nvme_delete_queue()
2818 cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); in nvme_delete_queue()
2826 req->end_io = nvme_del_cq_end; in nvme_delete_queue()
2828 req->end_io = nvme_del_queue_end; in nvme_delete_queue()
2829 req->end_io_data = nvmeq; in nvme_delete_queue()
2831 init_completion(&nvmeq->delete_done); in nvme_delete_queue()
2838 int nr_queues = dev->online_queues - 1, sent = 0; in __nvme_delete_io_queues()
2844 if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) in __nvme_delete_io_queues()
2846 nr_queues--; in __nvme_delete_io_queues()
2850 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; in __nvme_delete_io_queues()
2852 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, in __nvme_delete_io_queues()
2857 sent--; in __nvme_delete_io_queues()
2872 if (dev->io_queues[HCTX_TYPE_POLL]) in nvme_pci_nr_maps()
2874 if (dev->io_queues[HCTX_TYPE_READ]) in nvme_pci_nr_maps()
2881 if (!dev->ctrl.tagset) { in nvme_pci_update_nr_queues()
2882 nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, in nvme_pci_update_nr_queues()
2888 if (!mutex_trylock(&dev->shutdown_lock)) in nvme_pci_update_nr_queues()
2892 if (!dev->online_queues) { in nvme_pci_update_nr_queues()
2893 mutex_unlock(&dev->shutdown_lock); in nvme_pci_update_nr_queues()
2897 blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); in nvme_pci_update_nr_queues()
2899 nvme_free_queues(dev, dev->online_queues); in nvme_pci_update_nr_queues()
2900 mutex_unlock(&dev->shutdown_lock); in nvme_pci_update_nr_queues()
2906 int result = -ENOMEM; in nvme_pci_enable()
2907 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_pci_enable()
2915 if (readl(dev->bar + NVME_REG_CSTS) == -1) { in nvme_pci_enable()
2916 result = -ENODEV; in nvme_pci_enable()
2922 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll in nvme_pci_enable()
2925 if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI) in nvme_pci_enable()
2931 dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); in nvme_pci_enable()
2933 dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, in nvme_pci_enable()
2935 dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); in nvme_pci_enable()
2936 dev->dbs = dev->bar + 4096; in nvme_pci_enable()
2939 * Some Apple controllers require a non-standard SQE size. in nvme_pci_enable()
2943 if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) in nvme_pci_enable()
2944 dev->io_sqes = 7; in nvme_pci_enable()
2946 dev->io_sqes = NVME_NVM_IOSQES; in nvme_pci_enable()
2948 if (dev->ctrl.quirks & NVME_QUIRK_QDEPTH_ONE) { in nvme_pci_enable()
2949 dev->q_depth = 2; in nvme_pci_enable()
2950 } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && in nvme_pci_enable()
2951 (pdev->device == 0xa821 || pdev->device == 0xa822) && in nvme_pci_enable()
2952 NVME_CAP_MQES(dev->ctrl.cap) == 0) { in nvme_pci_enable()
2953 dev->q_depth = 64; in nvme_pci_enable()
2954 dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " in nvme_pci_enable()
2955 "set queue depth=%u\n", dev->q_depth); in nvme_pci_enable()
2962 if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && in nvme_pci_enable()
2963 (dev->q_depth < (NVME_AQ_DEPTH + 2))) { in nvme_pci_enable()
2964 dev->q_depth = NVME_AQ_DEPTH + 2; in nvme_pci_enable()
2965 dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", in nvme_pci_enable()
2966 dev->q_depth); in nvme_pci_enable()
2968 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ in nvme_pci_enable()
2988 if (dev->bar) in nvme_dev_unmap()
2989 iounmap(dev->bar); in nvme_dev_unmap()
2990 pci_release_mem_regions(to_pci_dev(dev->dev)); in nvme_dev_unmap()
2995 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_pci_ctrl_is_dead()
3000 if (pdev->error_state != pci_channel_io_normal) in nvme_pci_ctrl_is_dead()
3003 csts = readl(dev->bar + NVME_REG_CSTS); in nvme_pci_ctrl_is_dead()
3009 enum nvme_ctrl_state state = nvme_ctrl_state(&dev->ctrl); in nvme_dev_disable()
3010 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_dev_disable()
3013 mutex_lock(&dev->shutdown_lock); in nvme_dev_disable()
3017 nvme_start_freeze(&dev->ctrl); in nvme_dev_disable()
3023 nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); in nvme_dev_disable()
3026 nvme_quiesce_io_queues(&dev->ctrl); in nvme_dev_disable()
3028 if (!dead && dev->ctrl.queue_count > 0) { in nvme_dev_disable()
3030 nvme_disable_ctrl(&dev->ctrl, shutdown); in nvme_dev_disable()
3031 nvme_poll_irqdisable(&dev->queues[0]); in nvme_dev_disable()
3040 nvme_cancel_tagset(&dev->ctrl); in nvme_dev_disable()
3041 nvme_cancel_admin_tagset(&dev->ctrl); in nvme_dev_disable()
3046 * deadlocking blk-mq hot-cpu notifier. in nvme_dev_disable()
3049 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_dev_disable()
3050 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) in nvme_dev_disable()
3051 nvme_unquiesce_admin_queue(&dev->ctrl); in nvme_dev_disable()
3053 mutex_unlock(&dev->shutdown_lock); in nvme_dev_disable()
3058 if (!nvme_wait_reset(&dev->ctrl)) in nvme_disable_prepare_reset()
3059 return -EBUSY; in nvme_disable_prepare_reset()
3068 dev->dmavec_mempool = mempool_create_node(1, in nvme_pci_alloc_iod_mempool()
3071 dev_to_node(dev->dev)); in nvme_pci_alloc_iod_mempool()
3072 if (!dev->dmavec_mempool) in nvme_pci_alloc_iod_mempool()
3073 return -ENOMEM; in nvme_pci_alloc_iod_mempool()
3079 if (dev->tagset.tags) in nvme_free_tagset()
3080 nvme_remove_io_tag_set(&dev->ctrl); in nvme_free_tagset()
3081 dev->ctrl.tagset = NULL; in nvme_free_tagset()
3085 static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) in nvme_pci_free_ctrl() argument
3087 struct nvme_dev *dev = to_nvme_dev(ctrl); in nvme_pci_free_ctrl()
3090 put_device(dev->dev); in nvme_pci_free_ctrl()
3091 kfree(dev->queues); in nvme_pci_free_ctrl()
3098 container_of(work, struct nvme_dev, ctrl.reset_work); in nvme_reset_work()
3099 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); in nvme_reset_work()
3102 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_RESETTING) { in nvme_reset_work()
3103 dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", in nvme_reset_work()
3104 dev->ctrl.state); in nvme_reset_work()
3105 result = -ENODEV; in nvme_reset_work()
3113 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) in nvme_reset_work()
3115 nvme_sync_queues(&dev->ctrl); in nvme_reset_work()
3117 mutex_lock(&dev->shutdown_lock); in nvme_reset_work()
3121 nvme_unquiesce_admin_queue(&dev->ctrl); in nvme_reset_work()
3122 mutex_unlock(&dev->shutdown_lock); in nvme_reset_work()
3125 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the in nvme_reset_work()
3128 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { in nvme_reset_work()
3129 dev_warn(dev->ctrl.device, in nvme_reset_work()
3131 result = -EBUSY; in nvme_reset_work()
3135 result = nvme_init_ctrl_finish(&dev->ctrl, was_suspend); in nvme_reset_work()
3139 if (nvme_ctrl_meta_sgl_supported(&dev->ctrl)) in nvme_reset_work()
3140 dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS; in nvme_reset_work()
3142 dev->ctrl.max_integrity_segments = 1; in nvme_reset_work()
3161 if (dev->online_queues > 1) { in nvme_reset_work()
3163 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_reset_work()
3164 nvme_wait_freeze(&dev->ctrl); in nvme_reset_work()
3167 nvme_unfreeze(&dev->ctrl); in nvme_reset_work()
3169 dev_warn(dev->ctrl.device, "IO queues lost\n"); in nvme_reset_work()
3170 nvme_mark_namespaces_dead(&dev->ctrl); in nvme_reset_work()
3171 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_reset_work()
3172 nvme_remove_namespaces(&dev->ctrl); in nvme_reset_work()
3180 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { in nvme_reset_work()
3181 dev_warn(dev->ctrl.device, in nvme_reset_work()
3183 result = -ENODEV; in nvme_reset_work()
3187 nvme_start_ctrl(&dev->ctrl); in nvme_reset_work()
3191 mutex_unlock(&dev->shutdown_lock); in nvme_reset_work()
3197 dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n", in nvme_reset_work()
3199 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_reset_work()
3201 nvme_sync_queues(&dev->ctrl); in nvme_reset_work()
3202 nvme_mark_namespaces_dead(&dev->ctrl); in nvme_reset_work()
3203 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_reset_work()
3204 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); in nvme_reset_work()
3207 static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) in nvme_pci_reg_read32() argument
3209 *val = readl(to_nvme_dev(ctrl)->bar + off); in nvme_pci_reg_read32()
3213 static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) in nvme_pci_reg_write32() argument
3215 writel(val, to_nvme_dev(ctrl)->bar + off); in nvme_pci_reg_write32()
3219 static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) in nvme_pci_reg_read64() argument
3221 *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); in nvme_pci_reg_read64()
3225 static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) in nvme_pci_get_address() argument
3227 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); in nvme_pci_get_address()
3229 return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); in nvme_pci_get_address()
3232 static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl) in nvme_pci_print_device_info() argument
3234 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); in nvme_pci_print_device_info()
3235 struct nvme_subsystem *subsys = ctrl->subsys; in nvme_pci_print_device_info()
3237 dev_err(ctrl->device, in nvme_pci_print_device_info()
3239 pdev->vendor, pdev->device, in nvme_pci_print_device_info()
3240 nvme_strlen(subsys->model, sizeof(subsys->model)), in nvme_pci_print_device_info()
3241 subsys->model, nvme_strlen(subsys->firmware_rev, in nvme_pci_print_device_info()
3242 sizeof(subsys->firmware_rev)), in nvme_pci_print_device_info()
3243 subsys->firmware_rev); in nvme_pci_print_device_info()
3246 static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl) in nvme_pci_supports_pci_p2pdma() argument
3248 struct nvme_dev *dev = to_nvme_dev(ctrl); in nvme_pci_supports_pci_p2pdma()
3250 return dma_pci_p2pdma_supported(dev->dev); in nvme_pci_supports_pci_p2pdma()
3271 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_dev_map()
3274 return -ENODEV; in nvme_dev_map()
3282 return -ENODEV; in nvme_dev_map()
3287 if (pdev->vendor == 0x144d && pdev->device == 0xa802) { in check_vendor_combination_bug()
3300 } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { in check_vendor_combination_bug()
3303 * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as in check_vendor_combination_bug()
3304 * within few minutes after bootup on a Coffee Lake board - in check_vendor_combination_bug()
3305 * ASUS PRIME Z370-A in check_vendor_combination_bug()
3308 (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || in check_vendor_combination_bug()
3309 dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) in check_vendor_combination_bug()
3311 } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || in check_vendor_combination_bug()
3312 pdev->device == 0xa808 || pdev->device == 0xa809)) || in check_vendor_combination_bug()
3313 (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { in check_vendor_combination_bug()
3323 } else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 || in check_vendor_combination_bug()
3324 pdev->device == 0x500f)) { in check_vendor_combination_bug()
3335 } else if (pdev->vendor == 0x144d && pdev->device == 0xa80d) { in check_vendor_combination_bug()
3342 if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") || in check_vendor_combination_bug()
3356 if (dmi_match(DMI_BOARD_NAME, "LXKT-ZXEG-N6")) in check_vendor_combination_bug()
3365 unsigned long quirks = id->driver_data; in nvme_pci_alloc_dev()
3366 int node = dev_to_node(&pdev->dev); in nvme_pci_alloc_dev()
3368 int ret = -ENOMEM; in nvme_pci_alloc_dev()
3373 return ERR_PTR(-ENOMEM); in nvme_pci_alloc_dev()
3374 INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); in nvme_pci_alloc_dev()
3375 mutex_init(&dev->shutdown_lock); in nvme_pci_alloc_dev()
3377 dev->nr_write_queues = write_queues; in nvme_pci_alloc_dev()
3378 dev->nr_poll_queues = poll_queues; in nvme_pci_alloc_dev()
3379 dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; in nvme_pci_alloc_dev()
3380 dev->queues = kcalloc_node(dev->nr_allocated_queues, in nvme_pci_alloc_dev()
3382 if (!dev->queues) in nvme_pci_alloc_dev()
3385 dev->dev = get_device(&pdev->dev); in nvme_pci_alloc_dev()
3390 acpi_storage_d3(&pdev->dev)) { in nvme_pci_alloc_dev()
3395 dev_info(&pdev->dev, in nvme_pci_alloc_dev()
3399 ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, in nvme_pci_alloc_dev()
3404 if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) in nvme_pci_alloc_dev()
3405 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); in nvme_pci_alloc_dev()
3407 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in nvme_pci_alloc_dev()
3408 dma_set_min_align_mask(&pdev->dev, NVME_CTRL_PAGE_SIZE - 1); in nvme_pci_alloc_dev()
3409 dma_set_max_seg_size(&pdev->dev, 0xffffffff); in nvme_pci_alloc_dev()
3412 * Limit the max command size to prevent iod->sg allocations going in nvme_pci_alloc_dev()
3415 dev->ctrl.max_hw_sectors = min_t(u32, in nvme_pci_alloc_dev()
3417 dma_opt_mapping_size(&pdev->dev) >> 9); in nvme_pci_alloc_dev()
3418 dev->ctrl.max_segments = NVME_MAX_SEGS; in nvme_pci_alloc_dev()
3419 dev->ctrl.max_integrity_segments = 1; in nvme_pci_alloc_dev()
3423 put_device(dev->dev); in nvme_pci_alloc_dev()
3424 kfree(dev->queues); in nvme_pci_alloc_dev()
3433 int result = -ENOMEM; in nvme_probe()
3439 result = nvme_add_ctrl(&dev->ctrl); in nvme_probe()
3451 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); in nvme_probe()
3457 result = nvme_alloc_admin_tag_set(&dev->ctrl, &dev->admin_tagset, in nvme_probe()
3466 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { in nvme_probe()
3467 dev_warn(dev->ctrl.device, in nvme_probe()
3469 result = -EBUSY; in nvme_probe()
3473 result = nvme_init_ctrl_finish(&dev->ctrl, false); in nvme_probe()
3477 if (nvme_ctrl_meta_sgl_supported(&dev->ctrl)) in nvme_probe()
3478 dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS; in nvme_probe()
3480 dev->ctrl.max_integrity_segments = 1; in nvme_probe()
3494 if (dev->online_queues > 1) { in nvme_probe()
3495 nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, in nvme_probe()
3500 if (!dev->ctrl.tagset) in nvme_probe()
3501 dev_warn(dev->ctrl.device, "IO queues not created\n"); in nvme_probe()
3503 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { in nvme_probe()
3504 dev_warn(dev->ctrl.device, in nvme_probe()
3506 result = -ENODEV; in nvme_probe()
3512 nvme_start_ctrl(&dev->ctrl); in nvme_probe()
3513 nvme_put_ctrl(&dev->ctrl); in nvme_probe()
3514 flush_work(&dev->ctrl.scan_work); in nvme_probe()
3518 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_probe()
3525 mempool_destroy(dev->dmavec_mempool); in nvme_probe()
3529 nvme_uninit_ctrl(&dev->ctrl); in nvme_probe()
3531 nvme_put_ctrl(&dev->ctrl); in nvme_probe()
3542 * with ->remove(). in nvme_reset_prepare()
3545 nvme_sync_queues(&dev->ctrl); in nvme_reset_prepare()
3552 if (!nvme_try_sched_reset(&dev->ctrl)) in nvme_reset_done()
3553 flush_work(&dev->ctrl.reset_work); in nvme_reset_done()
3572 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_remove()
3576 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); in nvme_remove()
3580 flush_work(&dev->ctrl.reset_work); in nvme_remove()
3581 nvme_stop_ctrl(&dev->ctrl); in nvme_remove()
3582 nvme_remove_namespaces(&dev->ctrl); in nvme_remove()
3588 mempool_destroy(dev->dmavec_mempool); in nvme_remove()
3591 nvme_uninit_ctrl(&dev->ctrl); in nvme_remove()
3595 static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps) in nvme_get_power_state() argument
3597 return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps); in nvme_get_power_state()
3600 static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps) in nvme_set_power_state() argument
3602 return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL); in nvme_set_power_state()
3608 struct nvme_ctrl *ctrl = &ndev->ctrl; in nvme_resume() local
3610 if (ndev->last_ps == U32_MAX || in nvme_resume()
3611 nvme_set_power_state(ctrl, ndev->last_ps) != 0) in nvme_resume()
3613 if (ctrl->hmpre && nvme_setup_host_mem(ndev)) in nvme_resume()
3618 return nvme_try_sched_reset(ctrl); in nvme_resume()
3625 struct nvme_ctrl *ctrl = &ndev->ctrl; in nvme_suspend() local
3626 int ret = -EBUSY; in nvme_suspend()
3628 ndev->last_ps = U32_MAX; in nvme_suspend()
3635 * device does not support any non-default power states, shut down the in nvme_suspend()
3640 * down, so as to allow the platform to achieve its minimum low-power in nvme_suspend()
3643 if (pm_suspend_via_firmware() || !ctrl->npss || in nvme_suspend()
3645 (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) in nvme_suspend()
3648 nvme_start_freeze(ctrl); in nvme_suspend()
3649 nvme_wait_freeze(ctrl); in nvme_suspend()
3650 nvme_sync_queues(ctrl); in nvme_suspend()
3652 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) in nvme_suspend()
3658 * non-operational power state. in nvme_suspend()
3660 if (ndev->hmb) { in nvme_suspend()
3666 ret = nvme_get_power_state(ctrl, &ndev->last_ps); in nvme_suspend()
3677 ret = nvme_set_power_state(ctrl, ctrl->npss); in nvme_suspend()
3690 ctrl->npss = 0; in nvme_suspend()
3693 nvme_unfreeze(ctrl); in nvme_suspend()
3709 return nvme_try_sched_reset(&ndev->ctrl); in nvme_simple_resume()
3736 dev_warn(dev->ctrl.device, in nvme_error_detected()
3738 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) { in nvme_error_detected()
3745 dev_warn(dev->ctrl.device, in nvme_error_detected()
3756 dev_info(dev->ctrl.device, "restart after slot reset\n"); in nvme_slot_reset()
3758 if (nvme_try_sched_reset(&dev->ctrl)) in nvme_slot_reset()
3759 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_slot_reset()
3767 flush_work(&dev->ctrl.reset_work); in nvme_error_resume()
3931 { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
3933 { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */