Lines Matching +full:xps +full:- +full:timer +full:- +full:1
1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2011-2014, Intel Corporation.
10 #include <linux/blk-mq.h>
11 #include <linux/blk-integrity.h>
24 #include <linux/t10-pi.h>
26 #include <linux/io-64-nonatomic-lo-hi.h>
27 #include <linux/io-64-nonatomic-hi-lo.h>
28 #include <linux/sed-opal.h>
29 #include <linux/pci-p2pdma.h>
34 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
35 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
85 return -EINVAL; in io_queue_count_set()
180 return (qid * 2 + 1) * stride; in cq_idx()
212 #define NVMEQ_SQ_CMB 1
250 return dev->nr_allocated_queues * 8 * dev->db_stride; in nvme_dbbuf_size()
257 if (!(dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP)) in nvme_dbbuf_dma_alloc()
260 if (dev->dbbuf_dbs) { in nvme_dbbuf_dma_alloc()
265 memset(dev->dbbuf_dbs, 0, mem_size); in nvme_dbbuf_dma_alloc()
266 memset(dev->dbbuf_eis, 0, mem_size); in nvme_dbbuf_dma_alloc()
270 dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_alloc()
271 &dev->dbbuf_dbs_dma_addr, in nvme_dbbuf_dma_alloc()
273 if (!dev->dbbuf_dbs) in nvme_dbbuf_dma_alloc()
275 dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_alloc()
276 &dev->dbbuf_eis_dma_addr, in nvme_dbbuf_dma_alloc()
278 if (!dev->dbbuf_eis) in nvme_dbbuf_dma_alloc()
283 dma_free_coherent(dev->dev, mem_size, dev->dbbuf_dbs, in nvme_dbbuf_dma_alloc()
284 dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_dma_alloc()
285 dev->dbbuf_dbs = NULL; in nvme_dbbuf_dma_alloc()
287 dev_warn(dev->dev, "unable to allocate dma for dbbuf\n"); in nvme_dbbuf_dma_alloc()
294 if (dev->dbbuf_dbs) { in nvme_dbbuf_dma_free()
295 dma_free_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_free()
296 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_dma_free()
297 dev->dbbuf_dbs = NULL; in nvme_dbbuf_dma_free()
299 if (dev->dbbuf_eis) { in nvme_dbbuf_dma_free()
300 dma_free_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_free()
301 dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); in nvme_dbbuf_dma_free()
302 dev->dbbuf_eis = NULL; in nvme_dbbuf_dma_free()
309 if (!dev->dbbuf_dbs || !qid) in nvme_dbbuf_init()
312 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
313 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
314 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
315 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
320 if (!nvmeq->qid) in nvme_dbbuf_free()
323 nvmeq->dbbuf_sq_db = NULL; in nvme_dbbuf_free()
324 nvmeq->dbbuf_cq_db = NULL; in nvme_dbbuf_free()
325 nvmeq->dbbuf_sq_ei = NULL; in nvme_dbbuf_free()
326 nvmeq->dbbuf_cq_ei = NULL; in nvme_dbbuf_free()
334 if (!dev->dbbuf_dbs) in nvme_dbbuf_set()
338 c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_set()
339 c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); in nvme_dbbuf_set()
341 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { in nvme_dbbuf_set()
342 dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); in nvme_dbbuf_set()
346 for (i = 1; i <= dev->online_queues; i++) in nvme_dbbuf_set()
347 nvme_dbbuf_free(&dev->queues[i]); in nvme_dbbuf_set()
353 return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); in nvme_dbbuf_need_event()
397 return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); in nvme_pci_npages_prp()
404 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_admin_init_hctx()
407 WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); in nvme_admin_init_hctx()
409 hctx->driver_data = nvmeq; in nvme_admin_init_hctx()
417 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; in nvme_init_hctx()
419 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); in nvme_init_hctx()
420 hctx->driver_data = nvmeq; in nvme_init_hctx()
430 nvme_req(req)->ctrl = set->driver_data; in nvme_pci_init_request()
431 nvme_req(req)->cmd = &iod->cmd; in nvme_pci_init_request()
437 /* if we have more than 1 vec, admin queue offsets us by 1 */ in queue_irq_offset()
438 if (dev->num_vecs > 1) in queue_irq_offset()
439 return 1; in queue_irq_offset()
446 struct nvme_dev *dev = to_nvme_dev(set->driver_data); in nvme_pci_map_queues()
450 for (i = 0, qoff = 0; i < set->nr_maps; i++) { in nvme_pci_map_queues()
451 struct blk_mq_queue_map *map = &set->map[i]; in nvme_pci_map_queues()
453 map->nr_queues = dev->io_queues[i]; in nvme_pci_map_queues()
454 if (!map->nr_queues) { in nvme_pci_map_queues()
461 * affinity), so use the regular blk-mq cpu mapping in nvme_pci_map_queues()
463 map->queue_offset = qoff; in nvme_pci_map_queues()
465 blk_mq_map_hw_queues(map, dev->dev, offset); in nvme_pci_map_queues()
468 qoff += map->nr_queues; in nvme_pci_map_queues()
469 offset += map->nr_queues; in nvme_pci_map_queues()
479 u16 next_tail = nvmeq->sq_tail + 1; in nvme_write_sq_db()
481 if (next_tail == nvmeq->q_depth) in nvme_write_sq_db()
483 if (next_tail != nvmeq->last_sq_tail) in nvme_write_sq_db()
487 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, in nvme_write_sq_db()
488 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) in nvme_write_sq_db()
489 writel(nvmeq->sq_tail, nvmeq->q_db); in nvme_write_sq_db()
490 nvmeq->last_sq_tail = nvmeq->sq_tail; in nvme_write_sq_db()
496 memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), in nvme_sq_copy_cmd()
498 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_sq_copy_cmd()
499 nvmeq->sq_tail = 0; in nvme_sq_copy_cmd()
504 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_commit_rqs()
506 spin_lock(&nvmeq->sq_lock); in nvme_commit_rqs()
507 if (nvmeq->sq_tail != nvmeq->last_sq_tail) in nvme_commit_rqs()
509 spin_unlock(&nvmeq->sq_lock); in nvme_commit_rqs()
515 if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl)) in nvme_pci_metadata_use_sgls()
517 return req->nr_integrity_segments > 1 || in nvme_pci_metadata_use_sgls()
518 nvme_req(req)->flags & NVME_REQ_USERCMD; in nvme_pci_metadata_use_sgls()
524 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_pci_use_sgls()
529 if (!nvme_ctrl_sgl_supported(&dev->ctrl)) in nvme_pci_use_sgls()
531 if (!nvmeq->qid) in nvme_pci_use_sgls()
536 return nvme_req(req)->flags & NVME_REQ_USERCMD; in nvme_pci_use_sgls()
542 const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; in nvme_free_prps()
544 dma_addr_t dma_addr = iod->first_dma; in nvme_free_prps()
547 for (i = 0; i < iod->nr_allocations; i++) { in nvme_free_prps()
548 __le64 *prp_list = iod->list[i].prp_list; in nvme_free_prps()
551 dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); in nvme_free_prps()
560 if (iod->dma_len) { in nvme_unmap_data()
561 dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, in nvme_unmap_data()
566 WARN_ON_ONCE(!iod->sgt.nents); in nvme_unmap_data()
568 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); in nvme_unmap_data()
570 if (iod->nr_allocations == 0) in nvme_unmap_data()
571 dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list, in nvme_unmap_data()
572 iod->first_dma); in nvme_unmap_data()
573 else if (iod->nr_allocations == 1) in nvme_unmap_data()
574 dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list, in nvme_unmap_data()
575 iod->first_dma); in nvme_unmap_data()
578 mempool_free(iod->sgt.sgl, dev->iod_mempool); in nvme_unmap_data()
590 i, &phys, sg->offset, sg->length, &sg_dma_address(sg), in nvme_print_sgl()
601 struct scatterlist *sg = iod->sgt.sgl; in nvme_pci_setup_prps()
604 int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); in nvme_pci_setup_prps()
609 length -= (NVME_CTRL_PAGE_SIZE - offset); in nvme_pci_setup_prps()
611 iod->first_dma = 0; in nvme_pci_setup_prps()
615 dma_len -= (NVME_CTRL_PAGE_SIZE - offset); in nvme_pci_setup_prps()
617 dma_addr += (NVME_CTRL_PAGE_SIZE - offset); in nvme_pci_setup_prps()
625 iod->first_dma = dma_addr; in nvme_pci_setup_prps()
631 pool = dev->prp_small_pool; in nvme_pci_setup_prps()
632 iod->nr_allocations = 0; in nvme_pci_setup_prps()
634 pool = dev->prp_page_pool; in nvme_pci_setup_prps()
635 iod->nr_allocations = 1; in nvme_pci_setup_prps()
640 iod->nr_allocations = -1; in nvme_pci_setup_prps()
643 iod->list[0].prp_list = prp_list; in nvme_pci_setup_prps()
644 iod->first_dma = prp_dma; in nvme_pci_setup_prps()
652 iod->list[iod->nr_allocations++].prp_list = prp_list; in nvme_pci_setup_prps()
653 prp_list[0] = old_prp_list[i - 1]; in nvme_pci_setup_prps()
654 old_prp_list[i - 1] = cpu_to_le64(prp_dma); in nvme_pci_setup_prps()
655 i = 1; in nvme_pci_setup_prps()
658 dma_len -= NVME_CTRL_PAGE_SIZE; in nvme_pci_setup_prps()
660 length -= NVME_CTRL_PAGE_SIZE; in nvme_pci_setup_prps()
672 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl)); in nvme_pci_setup_prps()
673 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); in nvme_pci_setup_prps()
679 WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), in nvme_pci_setup_prps()
681 blk_rq_payload_bytes(req), iod->sgt.nents); in nvme_pci_setup_prps()
688 sge->addr = cpu_to_le64(sg_dma_address(sg)); in nvme_pci_sgl_set_data()
689 sge->length = cpu_to_le32(sg_dma_len(sg)); in nvme_pci_sgl_set_data()
690 sge->type = NVME_SGL_FMT_DATA_DESC << 4; in nvme_pci_sgl_set_data()
696 sge->addr = cpu_to_le64(dma_addr); in nvme_pci_sgl_set_seg()
697 sge->length = cpu_to_le32(entries * sizeof(*sge)); in nvme_pci_sgl_set_seg()
698 sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; in nvme_pci_sgl_set_seg()
707 struct scatterlist *sg = iod->sgt.sgl; in nvme_pci_setup_sgls()
708 unsigned int entries = iod->sgt.nents; in nvme_pci_setup_sgls()
713 cmd->flags = NVME_CMD_SGL_METABUF; in nvme_pci_setup_sgls()
715 if (entries == 1) { in nvme_pci_setup_sgls()
716 nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); in nvme_pci_setup_sgls()
721 pool = dev->prp_small_pool; in nvme_pci_setup_sgls()
722 iod->nr_allocations = 0; in nvme_pci_setup_sgls()
724 pool = dev->prp_page_pool; in nvme_pci_setup_sgls()
725 iod->nr_allocations = 1; in nvme_pci_setup_sgls()
730 iod->nr_allocations = -1; in nvme_pci_setup_sgls()
734 iod->list[0].sg_list = sg_list; in nvme_pci_setup_sgls()
735 iod->first_dma = sgl_dma; in nvme_pci_setup_sgls()
737 nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); in nvme_pci_setup_sgls()
741 } while (--entries > 0); in nvme_pci_setup_sgls()
751 unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); in nvme_setup_prp_simple()
752 unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; in nvme_setup_prp_simple()
754 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_prp_simple()
755 if (dma_mapping_error(dev->dev, iod->first_dma)) in nvme_setup_prp_simple()
757 iod->dma_len = bv->bv_len; in nvme_setup_prp_simple()
759 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); in nvme_setup_prp_simple()
760 if (bv->bv_len > first_prp_len) in nvme_setup_prp_simple()
761 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); in nvme_setup_prp_simple()
763 cmnd->dptr.prp2 = 0; in nvme_setup_prp_simple()
773 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_sgl_simple()
774 if (dma_mapping_error(dev->dev, iod->first_dma)) in nvme_setup_sgl_simple()
776 iod->dma_len = bv->bv_len; in nvme_setup_sgl_simple()
778 cmnd->flags = NVME_CMD_SGL_METABUF; in nvme_setup_sgl_simple()
779 cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); in nvme_setup_sgl_simple()
780 cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); in nvme_setup_sgl_simple()
781 cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; in nvme_setup_sgl_simple()
792 if (blk_rq_nr_phys_segments(req) == 1) { in nvme_map_data()
793 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_map_data()
798 (bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) + in nvme_map_data()
801 &cmnd->rw, &bv); in nvme_map_data()
803 if (nvmeq->qid && sgl_threshold && in nvme_map_data()
804 nvme_ctrl_sgl_supported(&dev->ctrl)) in nvme_map_data()
806 &cmnd->rw, &bv); in nvme_map_data()
810 iod->dma_len = 0; in nvme_map_data()
811 iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); in nvme_map_data()
812 if (!iod->sgt.sgl) in nvme_map_data()
814 sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req)); in nvme_map_data()
815 iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl); in nvme_map_data()
816 if (!iod->sgt.orig_nents) in nvme_map_data()
819 rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), in nvme_map_data()
822 if (rc == -EREMOTEIO) in nvme_map_data()
827 if (nvme_pci_use_sgls(dev, req, iod->sgt.nents)) in nvme_map_data()
828 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); in nvme_map_data()
830 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); in nvme_map_data()
836 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); in nvme_map_data()
838 mempool_free(iod->sgt.sgl, dev->iod_mempool); in nvme_map_data()
846 struct nvme_rw_command *cmnd = &iod->cmd.rw; in nvme_pci_setup_meta_sgls()
853 iod->meta_sgt.sgl = mempool_alloc(dev->iod_meta_mempool, GFP_ATOMIC); in nvme_pci_setup_meta_sgls()
854 if (!iod->meta_sgt.sgl) in nvme_pci_setup_meta_sgls()
857 sg_init_table(iod->meta_sgt.sgl, req->nr_integrity_segments); in nvme_pci_setup_meta_sgls()
858 iod->meta_sgt.orig_nents = blk_rq_map_integrity_sg(req, in nvme_pci_setup_meta_sgls()
859 iod->meta_sgt.sgl); in nvme_pci_setup_meta_sgls()
860 if (!iod->meta_sgt.orig_nents) in nvme_pci_setup_meta_sgls()
863 rc = dma_map_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), in nvme_pci_setup_meta_sgls()
868 sg_list = dma_pool_alloc(dev->prp_small_pool, GFP_ATOMIC, &sgl_dma); in nvme_pci_setup_meta_sgls()
872 entries = iod->meta_sgt.nents; in nvme_pci_setup_meta_sgls()
873 iod->meta_list.sg_list = sg_list; in nvme_pci_setup_meta_sgls()
874 iod->meta_dma = sgl_dma; in nvme_pci_setup_meta_sgls()
876 cmnd->flags = NVME_CMD_SGL_METASEG; in nvme_pci_setup_meta_sgls()
877 cmnd->metadata = cpu_to_le64(sgl_dma); in nvme_pci_setup_meta_sgls()
879 sgl = iod->meta_sgt.sgl; in nvme_pci_setup_meta_sgls()
880 if (entries == 1) { in nvme_pci_setup_meta_sgls()
888 nvme_pci_sgl_set_data(&sg_list[i + 1], sg); in nvme_pci_setup_meta_sgls()
893 dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0); in nvme_pci_setup_meta_sgls()
895 mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool); in nvme_pci_setup_meta_sgls()
904 struct nvme_command *cmnd = &iod->cmd; in nvme_pci_setup_meta_mptr()
906 iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0); in nvme_pci_setup_meta_mptr()
907 if (dma_mapping_error(dev->dev, iod->meta_dma)) in nvme_pci_setup_meta_mptr()
909 cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); in nvme_pci_setup_meta_mptr()
925 iod->aborted = false; in nvme_prep_rq()
926 iod->nr_allocations = -1; in nvme_prep_rq()
927 iod->sgt.nents = 0; in nvme_prep_rq()
928 iod->meta_sgt.nents = 0; in nvme_prep_rq()
930 ret = nvme_setup_cmd(req->q->queuedata, req); in nvme_prep_rq()
935 ret = nvme_map_data(dev, req, &iod->cmd); in nvme_prep_rq()
962 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_queue_rq()
963 struct nvme_dev *dev = nvmeq->dev; in nvme_queue_rq()
964 struct request *req = bd->rq; in nvme_queue_rq()
972 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) in nvme_queue_rq()
975 if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) in nvme_queue_rq()
976 return nvme_fail_nonready_command(&dev->ctrl, req); in nvme_queue_rq()
981 spin_lock(&nvmeq->sq_lock); in nvme_queue_rq()
982 nvme_sq_copy_cmd(nvmeq, &iod->cmd); in nvme_queue_rq()
983 nvme_write_sq_db(nvmeq, bd->last); in nvme_queue_rq()
984 spin_unlock(&nvmeq->sq_lock); in nvme_queue_rq()
992 spin_lock(&nvmeq->sq_lock); in nvme_submit_cmds()
996 nvme_sq_copy_cmd(nvmeq, &iod->cmd); in nvme_submit_cmds()
999 spin_unlock(&nvmeq->sq_lock); in nvme_submit_cmds()
1008 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) in nvme_prep_rq_batch()
1010 if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) in nvme_prep_rq_batch()
1013 return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; in nvme_prep_rq_batch()
1024 if (nvmeq && nvmeq != req->mq_hctx->driver_data) in nvme_queue_rqs()
1026 nvmeq = req->mq_hctx->driver_data; in nvme_queue_rqs()
1044 if (!iod->meta_sgt.nents) { in nvme_unmap_metadata()
1045 dma_unmap_page(dev->dev, iod->meta_dma, in nvme_unmap_metadata()
1051 dma_pool_free(dev->prp_small_pool, iod->meta_list.sg_list, in nvme_unmap_metadata()
1052 iod->meta_dma); in nvme_unmap_metadata()
1053 dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0); in nvme_unmap_metadata()
1054 mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool); in nvme_unmap_metadata()
1059 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_pci_unmap_rq()
1060 struct nvme_dev *dev = nvmeq->dev; in nvme_pci_unmap_rq()
1083 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; in nvme_cqe_pending()
1085 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; in nvme_cqe_pending()
1090 u16 head = nvmeq->cq_head; in nvme_ring_cq_doorbell()
1092 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, in nvme_ring_cq_doorbell()
1093 nvmeq->dbbuf_cq_ei)) in nvme_ring_cq_doorbell()
1094 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); in nvme_ring_cq_doorbell()
1099 if (!nvmeq->qid) in nvme_queue_tagset()
1100 return nvmeq->dev->admin_tagset.tags[0]; in nvme_queue_tagset()
1101 return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; in nvme_queue_tagset()
1107 struct nvme_completion *cqe = &nvmeq->cqes[idx]; in nvme_handle_cqe()
1108 __u16 command_id = READ_ONCE(cqe->command_id); in nvme_handle_cqe()
1117 if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { in nvme_handle_cqe()
1118 nvme_complete_async_event(&nvmeq->dev->ctrl, in nvme_handle_cqe()
1119 cqe->status, &cqe->result); in nvme_handle_cqe()
1125 dev_warn(nvmeq->dev->ctrl.device, in nvme_handle_cqe()
1127 command_id, le16_to_cpu(cqe->sq_id)); in nvme_handle_cqe()
1131 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); in nvme_handle_cqe()
1132 if (!nvme_try_complete_req(req, cqe->status, cqe->result) && in nvme_handle_cqe()
1134 nvme_req(req)->status != NVME_SC_SUCCESS, in nvme_handle_cqe()
1141 u32 tmp = nvmeq->cq_head + 1; in nvme_update_cq_head()
1143 if (tmp == nvmeq->q_depth) { in nvme_update_cq_head()
1144 nvmeq->cq_head = 0; in nvme_update_cq_head()
1145 nvmeq->cq_phase ^= 1; in nvme_update_cq_head()
1147 nvmeq->cq_head = tmp; in nvme_update_cq_head()
1159 * load-load control dependency between phase and the rest of in nvme_poll_cq()
1163 nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); in nvme_poll_cq()
1200 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in nvme_poll_irqdisable()
1202 WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); in nvme_poll_irqdisable()
1204 disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); in nvme_poll_irqdisable()
1206 enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); in nvme_poll_irqdisable()
1211 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_poll()
1217 spin_lock(&nvmeq->cq_poll_lock); in nvme_poll()
1219 spin_unlock(&nvmeq->cq_poll_lock); in nvme_poll()
1227 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_pci_submit_async_event()
1233 spin_lock(&nvmeq->sq_lock); in nvme_pci_submit_async_event()
1236 spin_unlock(&nvmeq->sq_lock); in nvme_pci_submit_async_event()
1250 mutex_lock(&dev->shutdown_lock); in nvme_pci_subsystem_reset()
1251 if (!dev->bar_mapped_size) { in nvme_pci_subsystem_reset()
1252 ret = -ENODEV; in nvme_pci_subsystem_reset()
1257 ret = -EBUSY; in nvme_pci_subsystem_reset()
1261 writel(NVME_SUBSYS_RESET, dev->bar + NVME_REG_NSSR); in nvme_pci_subsystem_reset()
1268 readl(dev->bar + NVME_REG_CSTS); in nvme_pci_subsystem_reset()
1270 mutex_unlock(&dev->shutdown_lock); in nvme_pci_subsystem_reset()
1281 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_delete_queue()
1290 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) in adapter_alloc_cq()
1298 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); in adapter_alloc_cq()
1300 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq()
1304 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_alloc_cq()
1310 struct nvme_ctrl *ctrl = &dev->ctrl; in adapter_alloc_sq()
1315 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't in adapter_alloc_sq()
1319 if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) in adapter_alloc_sq()
1327 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); in adapter_alloc_sq()
1329 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq()
1333 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_alloc_sq()
1348 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in abort_endio()
1350 dev_warn(nvmeq->dev->ctrl.device, in abort_endio()
1351 "Abort status: 0x%x", nvme_req(req)->status); in abort_endio()
1352 atomic_inc(&nvmeq->dev->ctrl.abort_limit); in abort_endio()
1362 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); in nvme_should_reset()
1365 switch (nvme_ctrl_state(&dev->ctrl)) { in nvme_should_reset()
1388 result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, in nvme_warn_reset()
1391 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1395 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1402 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1404 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1411 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_timeout()
1412 struct nvme_dev *dev = nvmeq->dev; in nvme_timeout()
1415 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_timeout()
1416 u32 csts = readl(dev->bar + NVME_REG_CSTS); in nvme_timeout()
1428 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_timeout()
1429 if (nvme_state_terminal(&dev->ctrl)) in nvme_timeout()
1450 if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) in nvme_timeout()
1451 nvme_poll(req->mq_hctx, NULL); in nvme_timeout()
1456 dev_warn(dev->ctrl.device, in nvme_timeout()
1458 req->tag, nvme_cid(req), nvmeq->qid); in nvme_timeout()
1468 switch (nvme_ctrl_state(&dev->ctrl)) { in nvme_timeout()
1470 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_timeout()
1473 dev_warn_ratelimited(dev->ctrl.device, in nvme_timeout()
1475 req->tag, nvme_cid(req), nvmeq->qid); in nvme_timeout()
1476 nvme_req(req)->flags |= NVME_REQ_CANCELLED; in nvme_timeout()
1490 opcode = nvme_req(req)->cmd->common.opcode; in nvme_timeout()
1491 if (!nvmeq->qid || iod->aborted) { in nvme_timeout()
1492 dev_warn(dev->ctrl.device, in nvme_timeout()
1494 req->tag, nvme_cid(req), opcode, in nvme_timeout()
1495 nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid); in nvme_timeout()
1496 nvme_req(req)->flags |= NVME_REQ_CANCELLED; in nvme_timeout()
1500 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { in nvme_timeout()
1501 atomic_inc(&dev->ctrl.abort_limit); in nvme_timeout()
1504 iod->aborted = true; in nvme_timeout()
1508 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); in nvme_timeout()
1510 dev_warn(nvmeq->dev->ctrl.device, in nvme_timeout()
1512 req->tag, nvme_cid(req), opcode, nvme_get_opcode_str(opcode), in nvme_timeout()
1513 nvmeq->qid, blk_op_str(req_op(req)), req_op(req), in nvme_timeout()
1516 abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd), in nvme_timeout()
1519 atomic_inc(&dev->ctrl.abort_limit); in nvme_timeout()
1524 abort_req->end_io = abort_endio; in nvme_timeout()
1525 abort_req->end_io_data = NULL; in nvme_timeout()
1530 * We enable the timer again. If hit twice, it'll cause a device reset, in nvme_timeout()
1536 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) { in nvme_timeout()
1537 if (nvme_state_terminal(&dev->ctrl)) in nvme_timeout()
1543 if (nvme_try_sched_reset(&dev->ctrl)) in nvme_timeout()
1544 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_timeout()
1550 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), in nvme_free_queue()
1551 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); in nvme_free_queue()
1552 if (!nvmeq->sq_cmds) in nvme_free_queue()
1555 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { in nvme_free_queue()
1556 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), in nvme_free_queue()
1557 nvmeq->sq_cmds, SQ_SIZE(nvmeq)); in nvme_free_queue()
1559 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), in nvme_free_queue()
1560 nvmeq->sq_cmds, nvmeq->sq_dma_addr); in nvme_free_queue()
1568 for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { in nvme_free_queues()
1569 dev->ctrl.queue_count--; in nvme_free_queues()
1570 nvme_free_queue(&dev->queues[i]); in nvme_free_queues()
1576 struct nvme_queue *nvmeq = &dev->queues[qid]; in nvme_suspend_queue()
1578 if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) in nvme_suspend_queue()
1584 nvmeq->dev->online_queues--; in nvme_suspend_queue()
1585 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) in nvme_suspend_queue()
1586 nvme_quiesce_admin_queue(&nvmeq->dev->ctrl); in nvme_suspend_queue()
1587 if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) in nvme_suspend_queue()
1588 pci_free_irq(to_pci_dev(dev->dev), nvmeq->cq_vector, nvmeq); in nvme_suspend_queue()
1595 for (i = dev->ctrl.queue_count - 1; i > 0; i--) in nvme_suspend_io_queues()
1609 for (i = dev->ctrl.queue_count - 1; i > 0; i--) { in nvme_reap_pending_cqes()
1610 spin_lock(&dev->queues[i].cq_poll_lock); in nvme_reap_pending_cqes()
1611 nvme_poll_cq(&dev->queues[i], NULL); in nvme_reap_pending_cqes()
1612 spin_unlock(&dev->queues[i].cq_poll_lock); in nvme_reap_pending_cqes()
1619 int q_depth = dev->q_depth; in nvme_cmb_qdepth()
1623 if (q_size_aligned * nr_io_queues > dev->cmb_size) { in nvme_cmb_qdepth()
1624 u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); in nvme_cmb_qdepth()
1635 return -ENOMEM; in nvme_cmb_qdepth()
1644 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_alloc_sq_cmds()
1646 if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { in nvme_alloc_sq_cmds()
1647 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); in nvme_alloc_sq_cmds()
1648 if (nvmeq->sq_cmds) { in nvme_alloc_sq_cmds()
1649 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, in nvme_alloc_sq_cmds()
1650 nvmeq->sq_cmds); in nvme_alloc_sq_cmds()
1651 if (nvmeq->sq_dma_addr) { in nvme_alloc_sq_cmds()
1652 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); in nvme_alloc_sq_cmds()
1656 pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); in nvme_alloc_sq_cmds()
1660 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), in nvme_alloc_sq_cmds()
1661 &nvmeq->sq_dma_addr, GFP_KERNEL); in nvme_alloc_sq_cmds()
1662 if (!nvmeq->sq_cmds) in nvme_alloc_sq_cmds()
1663 return -ENOMEM; in nvme_alloc_sq_cmds()
1669 struct nvme_queue *nvmeq = &dev->queues[qid]; in nvme_alloc_queue()
1671 if (dev->ctrl.queue_count > qid) in nvme_alloc_queue()
1674 nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; in nvme_alloc_queue()
1675 nvmeq->q_depth = depth; in nvme_alloc_queue()
1676 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), in nvme_alloc_queue()
1677 &nvmeq->cq_dma_addr, GFP_KERNEL); in nvme_alloc_queue()
1678 if (!nvmeq->cqes) in nvme_alloc_queue()
1684 nvmeq->dev = dev; in nvme_alloc_queue()
1685 spin_lock_init(&nvmeq->sq_lock); in nvme_alloc_queue()
1686 spin_lock_init(&nvmeq->cq_poll_lock); in nvme_alloc_queue()
1687 nvmeq->cq_head = 0; in nvme_alloc_queue()
1688 nvmeq->cq_phase = 1; in nvme_alloc_queue()
1689 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_alloc_queue()
1690 nvmeq->qid = qid; in nvme_alloc_queue()
1691 dev->ctrl.queue_count++; in nvme_alloc_queue()
1696 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, in nvme_alloc_queue()
1697 nvmeq->cq_dma_addr); in nvme_alloc_queue()
1699 return -ENOMEM; in nvme_alloc_queue()
1704 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in queue_request_irq()
1705 int nr = nvmeq->dev->ctrl.instance; in queue_request_irq()
1708 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, in queue_request_irq()
1709 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); in queue_request_irq()
1711 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, in queue_request_irq()
1712 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); in queue_request_irq()
1718 struct nvme_dev *dev = nvmeq->dev; in nvme_init_queue()
1720 nvmeq->sq_tail = 0; in nvme_init_queue()
1721 nvmeq->last_sq_tail = 0; in nvme_init_queue()
1722 nvmeq->cq_head = 0; in nvme_init_queue()
1723 nvmeq->cq_phase = 1; in nvme_init_queue()
1724 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_init_queue()
1725 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); in nvme_init_queue()
1727 dev->online_queues++; in nvme_init_queue()
1739 if (!mutex_trylock(&dev->shutdown_lock)) in nvme_setup_io_queues_trylock()
1740 return -ENODEV; in nvme_setup_io_queues_trylock()
1745 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_CONNECTING) { in nvme_setup_io_queues_trylock()
1746 mutex_unlock(&dev->shutdown_lock); in nvme_setup_io_queues_trylock()
1747 return -ENODEV; in nvme_setup_io_queues_trylock()
1755 struct nvme_dev *dev = nvmeq->dev; in nvme_create_queue()
1759 clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); in nvme_create_queue()
1766 vector = dev->num_vecs == 1 ? 0 : qid; in nvme_create_queue()
1768 set_bit(NVMEQ_POLLED, &nvmeq->flags); in nvme_create_queue()
1780 nvmeq->cq_vector = vector; in nvme_create_queue()
1792 set_bit(NVMEQ_ENABLED, &nvmeq->flags); in nvme_create_queue()
1793 mutex_unlock(&dev->shutdown_lock); in nvme_create_queue()
1797 dev->online_queues--; in nvme_create_queue()
1798 mutex_unlock(&dev->shutdown_lock); in nvme_create_queue()
1827 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { in nvme_dev_remove_admin()
1833 nvme_unquiesce_admin_queue(&dev->ctrl); in nvme_dev_remove_admin()
1834 nvme_remove_admin_tag_set(&dev->ctrl); in nvme_dev_remove_admin()
1840 return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); in db_bar_size()
1845 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_remap_bar()
1847 if (size <= dev->bar_mapped_size) in nvme_remap_bar()
1850 return -ENOMEM; in nvme_remap_bar()
1851 if (dev->bar) in nvme_remap_bar()
1852 iounmap(dev->bar); in nvme_remap_bar()
1853 dev->bar = ioremap(pci_resource_start(pdev, 0), size); in nvme_remap_bar()
1854 if (!dev->bar) { in nvme_remap_bar()
1855 dev->bar_mapped_size = 0; in nvme_remap_bar()
1856 return -ENOMEM; in nvme_remap_bar()
1858 dev->bar_mapped_size = size; in nvme_remap_bar()
1859 dev->dbs = dev->bar + NVME_REG_DBS; in nvme_remap_bar()
1874 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? in nvme_pci_configure_admin_queue()
1875 NVME_CAP_NSSRC(dev->ctrl.cap) : 0; in nvme_pci_configure_admin_queue()
1877 if (dev->subsystem && in nvme_pci_configure_admin_queue()
1878 (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) in nvme_pci_configure_admin_queue()
1879 writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); in nvme_pci_configure_admin_queue()
1888 result = nvme_disable_ctrl(&dev->ctrl, false); in nvme_pci_configure_admin_queue()
1896 dev->ctrl.numa_node = dev_to_node(dev->dev); in nvme_pci_configure_admin_queue()
1898 nvmeq = &dev->queues[0]; in nvme_pci_configure_admin_queue()
1899 aqa = nvmeq->q_depth - 1; in nvme_pci_configure_admin_queue()
1902 writel(aqa, dev->bar + NVME_REG_AQA); in nvme_pci_configure_admin_queue()
1903 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); in nvme_pci_configure_admin_queue()
1904 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); in nvme_pci_configure_admin_queue()
1906 result = nvme_enable_ctrl(&dev->ctrl); in nvme_pci_configure_admin_queue()
1910 nvmeq->cq_vector = 0; in nvme_pci_configure_admin_queue()
1914 dev->online_queues--; in nvme_pci_configure_admin_queue()
1918 set_bit(NVMEQ_ENABLED, &nvmeq->flags); in nvme_pci_configure_admin_queue()
1927 for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { in nvme_create_io_queues()
1928 if (nvme_alloc_queue(dev, i, dev->q_depth)) { in nvme_create_io_queues()
1929 ret = -ENOMEM; in nvme_create_io_queues()
1934 max = min(dev->max_qid, dev->ctrl.queue_count - 1); in nvme_create_io_queues()
1935 if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { in nvme_create_io_queues()
1936 rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + in nvme_create_io_queues()
1937 dev->io_queues[HCTX_TYPE_READ]; in nvme_create_io_queues()
1942 for (i = dev->online_queues; i <= max; i++) { in nvme_create_io_queues()
1945 ret = nvme_create_queue(&dev->queues[i], i, polled); in nvme_create_io_queues()
1961 u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; in nvme_cmb_size_unit()
1963 return 1ULL << (12 + 4 * szu); in nvme_cmb_size_unit()
1968 return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; in nvme_cmb_size()
1975 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_map_cmb()
1978 if (dev->cmb_size) in nvme_map_cmb()
1981 if (NVME_CAP_CMBS(dev->ctrl.cap)) in nvme_map_cmb()
1982 writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); in nvme_map_cmb()
1984 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); in nvme_map_cmb()
1985 if (!dev->cmbsz) in nvme_map_cmb()
1987 dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); in nvme_map_cmb()
1990 offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); in nvme_map_cmb()
1991 bar = NVME_CMB_BIR(dev->cmbloc); in nvme_map_cmb()
2002 size = min(size, bar_size - offset); in nvme_map_cmb()
2013 if (NVME_CAP_CMBS(dev->ctrl.cap)) { in nvme_map_cmb()
2016 dev->bar + NVME_REG_CMBMSC); in nvme_map_cmb()
2020 dev_warn(dev->ctrl.device, in nvme_map_cmb()
2022 hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC); in nvme_map_cmb()
2026 dev->cmb_size = size; in nvme_map_cmb()
2027 dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); in nvme_map_cmb()
2029 if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == in nvme_map_cmb()
2038 u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; in nvme_set_host_mem()
2039 u64 dma_addr = dev->host_mem_descs_dma; in nvme_set_host_mem()
2049 c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); in nvme_set_host_mem()
2051 ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in nvme_set_host_mem()
2053 dev_warn(dev->ctrl.device, in nvme_set_host_mem()
2057 dev->hmb = bits & NVME_HOST_MEM_ENABLE; in nvme_set_host_mem()
2066 for (i = 0; i < dev->nr_host_mem_descs; i++) { in nvme_free_host_mem_multi()
2067 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; in nvme_free_host_mem_multi()
2068 size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE; in nvme_free_host_mem_multi()
2070 dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], in nvme_free_host_mem_multi()
2071 le64_to_cpu(desc->addr), in nvme_free_host_mem_multi()
2075 kfree(dev->host_mem_desc_bufs); in nvme_free_host_mem_multi()
2076 dev->host_mem_desc_bufs = NULL; in nvme_free_host_mem_multi()
2081 if (dev->hmb_sgt) in nvme_free_host_mem()
2082 dma_free_noncontiguous(dev->dev, dev->host_mem_size, in nvme_free_host_mem()
2083 dev->hmb_sgt, DMA_BIDIRECTIONAL); in nvme_free_host_mem()
2087 dma_free_coherent(dev->dev, dev->host_mem_descs_size, in nvme_free_host_mem()
2088 dev->host_mem_descs, dev->host_mem_descs_dma); in nvme_free_host_mem()
2089 dev->host_mem_descs = NULL; in nvme_free_host_mem()
2090 dev->host_mem_descs_size = 0; in nvme_free_host_mem()
2091 dev->nr_host_mem_descs = 0; in nvme_free_host_mem()
2096 dev->hmb_sgt = dma_alloc_noncontiguous(dev->dev, size, in nvme_alloc_host_mem_single()
2098 if (!dev->hmb_sgt) in nvme_alloc_host_mem_single()
2099 return -ENOMEM; in nvme_alloc_host_mem_single()
2101 dev->host_mem_descs = dma_alloc_coherent(dev->dev, in nvme_alloc_host_mem_single()
2102 sizeof(*dev->host_mem_descs), &dev->host_mem_descs_dma, in nvme_alloc_host_mem_single()
2104 if (!dev->host_mem_descs) { in nvme_alloc_host_mem_single()
2105 dma_free_noncontiguous(dev->dev, size, dev->hmb_sgt, in nvme_alloc_host_mem_single()
2107 dev->hmb_sgt = NULL; in nvme_alloc_host_mem_single()
2108 return -ENOMEM; in nvme_alloc_host_mem_single()
2110 dev->host_mem_size = size; in nvme_alloc_host_mem_single()
2111 dev->host_mem_descs_size = sizeof(*dev->host_mem_descs); in nvme_alloc_host_mem_single()
2112 dev->nr_host_mem_descs = 1; in nvme_alloc_host_mem_single()
2114 dev->host_mem_descs[0].addr = in nvme_alloc_host_mem_single()
2115 cpu_to_le64(dev->hmb_sgt->sgl->dma_address); in nvme_alloc_host_mem_single()
2116 dev->host_mem_descs[0].size = cpu_to_le32(size / NVME_CTRL_PAGE_SIZE); in nvme_alloc_host_mem_single()
2130 tmp = (preferred + chunk_size - 1); in nvme_alloc_host_mem_multi()
2134 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) in nvme_alloc_host_mem_multi()
2135 max_entries = dev->ctrl.hmmaxd; in nvme_alloc_host_mem_multi()
2138 descs = dma_alloc_coherent(dev->dev, descs_size, &descs_dma, in nvme_alloc_host_mem_multi()
2150 len = min_t(u64, chunk_size, preferred - size); in nvme_alloc_host_mem_multi()
2151 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, in nvme_alloc_host_mem_multi()
2164 dev->nr_host_mem_descs = i; in nvme_alloc_host_mem_multi()
2165 dev->host_mem_size = size; in nvme_alloc_host_mem_multi()
2166 dev->host_mem_descs = descs; in nvme_alloc_host_mem_multi()
2167 dev->host_mem_descs_dma = descs_dma; in nvme_alloc_host_mem_multi()
2168 dev->host_mem_descs_size = descs_size; in nvme_alloc_host_mem_multi()
2169 dev->host_mem_desc_bufs = bufs; in nvme_alloc_host_mem_multi()
2175 dma_free_coherent(dev->dev, descs_size, descs, descs_dma); in nvme_alloc_host_mem_multi()
2177 dev->host_mem_descs = NULL; in nvme_alloc_host_mem_multi()
2178 return -ENOMEM; in nvme_alloc_host_mem_multi()
2183 unsigned long dma_merge_boundary = dma_get_merge_boundary(dev->dev); in nvme_alloc_host_mem()
2185 u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); in nvme_alloc_host_mem()
2190 * non-contiguous allocation for a single segment first. in nvme_alloc_host_mem()
2200 if (!min || dev->host_mem_size >= min) in nvme_alloc_host_mem()
2206 return -ENOMEM; in nvme_alloc_host_mem()
2212 u64 preferred = (u64)dev->ctrl.hmpre * 4096; in nvme_setup_host_mem()
2213 u64 min = (u64)dev->ctrl.hmmin * 4096; in nvme_setup_host_mem()
2217 if (!dev->ctrl.hmpre) in nvme_setup_host_mem()
2222 dev_warn(dev->ctrl.device, in nvme_setup_host_mem()
2232 if (dev->host_mem_descs) { in nvme_setup_host_mem()
2233 if (dev->host_mem_size >= min) in nvme_setup_host_mem()
2239 if (!dev->host_mem_descs) { in nvme_setup_host_mem()
2241 dev_warn(dev->ctrl.device, in nvme_setup_host_mem()
2246 dev_info(dev->ctrl.device, in nvme_setup_host_mem()
2248 dev->host_mem_size >> ilog2(SZ_1M), in nvme_setup_host_mem()
2249 dev->nr_host_mem_descs, in nvme_setup_host_mem()
2250 str_plural(dev->nr_host_mem_descs)); in nvme_setup_host_mem()
2265 ndev->cmbloc, ndev->cmbsz); in cmb_show()
2274 return sysfs_emit(buf, "%u\n", ndev->cmbloc); in cmbloc_show()
2283 return sysfs_emit(buf, "%u\n", ndev->cmbsz); in cmbsz_show()
2292 return sysfs_emit(buf, "%d\n", ndev->hmb); in hmb_show()
2303 return -EINVAL; in hmb_store()
2305 if (new == ndev->hmb) in hmb_store()
2333 if (!dev->cmbsz) in nvme_pci_attrs_are_visible()
2336 if (a == &dev_attr_hmb.attr && !ctrl->hmpre) in nvme_pci_attrs_are_visible()
2339 return a->mode; in nvme_pci_attrs_are_visible()
2363 sysfs_update_group(&dev->ctrl.device->kobj, &nvme_pci_dev_attrs_group); in nvme_update_attrs()
2372 struct nvme_dev *dev = affd->priv; in nvme_calc_irq_sets()
2373 unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; in nvme_calc_irq_sets()
2377 * the default queue is set to 1. The affinity set size is in nvme_calc_irq_sets()
2387 nrirqs = 1; in nvme_calc_irq_sets()
2389 } else if (nrirqs == 1 || !nr_write_queues) { in nvme_calc_irq_sets()
2392 nr_read_queues = 1; in nvme_calc_irq_sets()
2394 nr_read_queues = nrirqs - nr_write_queues; in nvme_calc_irq_sets()
2397 dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; in nvme_calc_irq_sets()
2398 affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; in nvme_calc_irq_sets()
2399 dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; in nvme_calc_irq_sets()
2400 affd->set_size[HCTX_TYPE_READ] = nr_read_queues; in nvme_calc_irq_sets()
2401 affd->nr_sets = nr_read_queues ? 2 : 1; in nvme_calc_irq_sets()
2406 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_setup_irqs()
2408 .pre_vectors = 1, in nvme_setup_irqs()
2417 * left over for non-polled I/O. in nvme_setup_irqs()
2419 poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); in nvme_setup_irqs()
2420 dev->io_queues[HCTX_TYPE_POLL] = poll_queues; in nvme_setup_irqs()
2426 dev->io_queues[HCTX_TYPE_DEFAULT] = 1; in nvme_setup_irqs()
2427 dev->io_queues[HCTX_TYPE_READ] = 0; in nvme_setup_irqs()
2430 * We need interrupts for the admin queue and each non-polled I/O queue, in nvme_setup_irqs()
2434 irq_queues = 1; in nvme_setup_irqs()
2435 if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) in nvme_setup_irqs()
2436 irq_queues += (nr_io_queues - poll_queues); in nvme_setup_irqs()
2437 if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI) in nvme_setup_irqs()
2439 return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, flags, in nvme_setup_irqs()
2449 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) in nvme_max_io_queues()
2450 return 1; in nvme_max_io_queues()
2451 return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; in nvme_max_io_queues()
2456 struct nvme_queue *adminq = &dev->queues[0]; in nvme_setup_io_queues()
2457 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_setup_io_queues()
2466 dev->nr_write_queues = write_queues; in nvme_setup_io_queues()
2467 dev->nr_poll_queues = poll_queues; in nvme_setup_io_queues()
2469 nr_io_queues = dev->nr_allocated_queues - 1; in nvme_setup_io_queues()
2470 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); in nvme_setup_io_queues()
2487 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) in nvme_setup_io_queues()
2490 if (dev->cmb_use_sqes) { in nvme_setup_io_queues()
2494 dev->q_depth = result; in nvme_setup_io_queues()
2495 dev->ctrl.sqsize = result - 1; in nvme_setup_io_queues()
2497 dev->cmb_use_sqes = false; in nvme_setup_io_queues()
2506 if (!--nr_io_queues) { in nvme_setup_io_queues()
2507 result = -ENOMEM; in nvme_setup_io_queues()
2510 } while (1); in nvme_setup_io_queues()
2511 adminq->q_db = dev->dbs; in nvme_setup_io_queues()
2515 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) in nvme_setup_io_queues()
2526 result = -EIO; in nvme_setup_io_queues()
2530 dev->num_vecs = result; in nvme_setup_io_queues()
2531 result = max(result - 1, 1); in nvme_setup_io_queues()
2532 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; in nvme_setup_io_queues()
2543 set_bit(NVMEQ_ENABLED, &adminq->flags); in nvme_setup_io_queues()
2544 mutex_unlock(&dev->shutdown_lock); in nvme_setup_io_queues()
2547 if (result || dev->online_queues < 2) in nvme_setup_io_queues()
2550 if (dev->online_queues - 1 < dev->max_qid) { in nvme_setup_io_queues()
2551 nr_io_queues = dev->online_queues - 1; in nvme_setup_io_queues()
2559 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", in nvme_setup_io_queues()
2560 dev->io_queues[HCTX_TYPE_DEFAULT], in nvme_setup_io_queues()
2561 dev->io_queues[HCTX_TYPE_READ], in nvme_setup_io_queues()
2562 dev->io_queues[HCTX_TYPE_POLL]); in nvme_setup_io_queues()
2565 mutex_unlock(&dev->shutdown_lock); in nvme_setup_io_queues()
2572 struct nvme_queue *nvmeq = req->end_io_data; in nvme_del_queue_end()
2575 complete(&nvmeq->delete_done); in nvme_del_queue_end()
2582 struct nvme_queue *nvmeq = req->end_io_data; in nvme_del_cq_end()
2585 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); in nvme_del_cq_end()
2592 struct request_queue *q = nvmeq->dev->ctrl.admin_q; in nvme_delete_queue()
2597 cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); in nvme_delete_queue()
2605 req->end_io = nvme_del_cq_end; in nvme_delete_queue()
2607 req->end_io = nvme_del_queue_end; in nvme_delete_queue()
2608 req->end_io_data = nvmeq; in nvme_delete_queue()
2610 init_completion(&nvmeq->delete_done); in nvme_delete_queue()
2617 int nr_queues = dev->online_queues - 1, sent = 0; in __nvme_delete_io_queues()
2623 if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) in __nvme_delete_io_queues()
2625 nr_queues--; in __nvme_delete_io_queues()
2629 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; in __nvme_delete_io_queues()
2631 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, in __nvme_delete_io_queues()
2636 sent--; in __nvme_delete_io_queues()
2651 if (dev->io_queues[HCTX_TYPE_POLL]) in nvme_pci_nr_maps()
2653 if (dev->io_queues[HCTX_TYPE_READ]) in nvme_pci_nr_maps()
2655 return 1; in nvme_pci_nr_maps()
2660 if (!dev->ctrl.tagset) { in nvme_pci_update_nr_queues()
2661 nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, in nvme_pci_update_nr_queues()
2667 if (!mutex_trylock(&dev->shutdown_lock)) in nvme_pci_update_nr_queues()
2671 if (!dev->online_queues) { in nvme_pci_update_nr_queues()
2672 mutex_unlock(&dev->shutdown_lock); in nvme_pci_update_nr_queues()
2676 blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); in nvme_pci_update_nr_queues()
2678 nvme_free_queues(dev, dev->online_queues); in nvme_pci_update_nr_queues()
2679 mutex_unlock(&dev->shutdown_lock); in nvme_pci_update_nr_queues()
2685 int result = -ENOMEM; in nvme_pci_enable()
2686 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_pci_enable()
2694 if (readl(dev->bar + NVME_REG_CSTS) == -1) { in nvme_pci_enable()
2695 result = -ENODEV; in nvme_pci_enable()
2701 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll in nvme_pci_enable()
2704 if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI) in nvme_pci_enable()
2706 result = pci_alloc_irq_vectors(pdev, 1, 1, flags); in nvme_pci_enable()
2710 dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); in nvme_pci_enable()
2712 dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, in nvme_pci_enable()
2714 dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); in nvme_pci_enable()
2715 dev->dbs = dev->bar + 4096; in nvme_pci_enable()
2718 * Some Apple controllers require a non-standard SQE size. in nvme_pci_enable()
2722 if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) in nvme_pci_enable()
2723 dev->io_sqes = 7; in nvme_pci_enable()
2725 dev->io_sqes = NVME_NVM_IOSQES; in nvme_pci_enable()
2727 if (dev->ctrl.quirks & NVME_QUIRK_QDEPTH_ONE) { in nvme_pci_enable()
2728 dev->q_depth = 2; in nvme_pci_enable()
2729 } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && in nvme_pci_enable()
2730 (pdev->device == 0xa821 || pdev->device == 0xa822) && in nvme_pci_enable()
2731 NVME_CAP_MQES(dev->ctrl.cap) == 0) { in nvme_pci_enable()
2732 dev->q_depth = 64; in nvme_pci_enable()
2733 dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " in nvme_pci_enable()
2734 "set queue depth=%u\n", dev->q_depth); in nvme_pci_enable()
2741 if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && in nvme_pci_enable()
2742 (dev->q_depth < (NVME_AQ_DEPTH + 2))) { in nvme_pci_enable()
2743 dev->q_depth = NVME_AQ_DEPTH + 2; in nvme_pci_enable()
2744 dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", in nvme_pci_enable()
2745 dev->q_depth); in nvme_pci_enable()
2747 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ in nvme_pci_enable()
2767 if (dev->bar) in nvme_dev_unmap()
2768 iounmap(dev->bar); in nvme_dev_unmap()
2769 pci_release_mem_regions(to_pci_dev(dev->dev)); in nvme_dev_unmap()
2774 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_pci_ctrl_is_dead()
2779 if (pdev->error_state != pci_channel_io_normal) in nvme_pci_ctrl_is_dead()
2782 csts = readl(dev->bar + NVME_REG_CSTS); in nvme_pci_ctrl_is_dead()
2788 enum nvme_ctrl_state state = nvme_ctrl_state(&dev->ctrl); in nvme_dev_disable()
2789 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_dev_disable()
2792 mutex_lock(&dev->shutdown_lock); in nvme_dev_disable()
2796 nvme_start_freeze(&dev->ctrl); in nvme_dev_disable()
2802 nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); in nvme_dev_disable()
2805 nvme_quiesce_io_queues(&dev->ctrl); in nvme_dev_disable()
2807 if (!dead && dev->ctrl.queue_count > 0) { in nvme_dev_disable()
2809 nvme_disable_ctrl(&dev->ctrl, shutdown); in nvme_dev_disable()
2810 nvme_poll_irqdisable(&dev->queues[0]); in nvme_dev_disable()
2819 nvme_cancel_tagset(&dev->ctrl); in nvme_dev_disable()
2820 nvme_cancel_admin_tagset(&dev->ctrl); in nvme_dev_disable()
2825 * deadlocking blk-mq hot-cpu notifier. in nvme_dev_disable()
2828 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_dev_disable()
2829 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) in nvme_dev_disable()
2830 nvme_unquiesce_admin_queue(&dev->ctrl); in nvme_dev_disable()
2832 mutex_unlock(&dev->shutdown_lock); in nvme_dev_disable()
2837 if (!nvme_wait_reset(&dev->ctrl)) in nvme_disable_prepare_reset()
2838 return -EBUSY; in nvme_disable_prepare_reset()
2847 dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, in nvme_setup_prp_pools()
2850 if (!dev->prp_page_pool) in nvme_setup_prp_pools()
2851 return -ENOMEM; in nvme_setup_prp_pools()
2853 if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512) in nvme_setup_prp_pools()
2857 dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, in nvme_setup_prp_pools()
2859 if (!dev->prp_small_pool) { in nvme_setup_prp_pools()
2860 dma_pool_destroy(dev->prp_page_pool); in nvme_setup_prp_pools()
2861 return -ENOMEM; in nvme_setup_prp_pools()
2868 dma_pool_destroy(dev->prp_page_pool); in nvme_release_prp_pools()
2869 dma_pool_destroy(dev->prp_small_pool); in nvme_release_prp_pools()
2874 size_t meta_size = sizeof(struct scatterlist) * (NVME_MAX_META_SEGS + 1); in nvme_pci_alloc_iod_mempool()
2877 dev->iod_mempool = mempool_create_node(1, in nvme_pci_alloc_iod_mempool()
2880 dev_to_node(dev->dev)); in nvme_pci_alloc_iod_mempool()
2881 if (!dev->iod_mempool) in nvme_pci_alloc_iod_mempool()
2882 return -ENOMEM; in nvme_pci_alloc_iod_mempool()
2884 dev->iod_meta_mempool = mempool_create_node(1, in nvme_pci_alloc_iod_mempool()
2887 dev_to_node(dev->dev)); in nvme_pci_alloc_iod_mempool()
2888 if (!dev->iod_meta_mempool) in nvme_pci_alloc_iod_mempool()
2893 mempool_destroy(dev->iod_mempool); in nvme_pci_alloc_iod_mempool()
2894 return -ENOMEM; in nvme_pci_alloc_iod_mempool()
2899 if (dev->tagset.tags) in nvme_free_tagset()
2900 nvme_remove_io_tag_set(&dev->ctrl); in nvme_free_tagset()
2901 dev->ctrl.tagset = NULL; in nvme_free_tagset()
2910 put_device(dev->dev); in nvme_pci_free_ctrl()
2911 kfree(dev->queues); in nvme_pci_free_ctrl()
2919 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); in nvme_reset_work()
2922 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_RESETTING) { in nvme_reset_work()
2923 dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", in nvme_reset_work()
2924 dev->ctrl.state); in nvme_reset_work()
2925 result = -ENODEV; in nvme_reset_work()
2933 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) in nvme_reset_work()
2935 nvme_sync_queues(&dev->ctrl); in nvme_reset_work()
2937 mutex_lock(&dev->shutdown_lock); in nvme_reset_work()
2941 nvme_unquiesce_admin_queue(&dev->ctrl); in nvme_reset_work()
2942 mutex_unlock(&dev->shutdown_lock); in nvme_reset_work()
2945 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the in nvme_reset_work()
2948 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { in nvme_reset_work()
2949 dev_warn(dev->ctrl.device, in nvme_reset_work()
2951 result = -EBUSY; in nvme_reset_work()
2955 result = nvme_init_ctrl_finish(&dev->ctrl, was_suspend); in nvme_reset_work()
2959 if (nvme_ctrl_meta_sgl_supported(&dev->ctrl)) in nvme_reset_work()
2960 dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS; in nvme_reset_work()
2962 dev->ctrl.max_integrity_segments = 1; in nvme_reset_work()
2979 if (dev->online_queues > 1) { in nvme_reset_work()
2981 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_reset_work()
2982 nvme_wait_freeze(&dev->ctrl); in nvme_reset_work()
2985 nvme_unfreeze(&dev->ctrl); in nvme_reset_work()
2987 dev_warn(dev->ctrl.device, "IO queues lost\n"); in nvme_reset_work()
2988 nvme_mark_namespaces_dead(&dev->ctrl); in nvme_reset_work()
2989 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_reset_work()
2990 nvme_remove_namespaces(&dev->ctrl); in nvme_reset_work()
2998 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { in nvme_reset_work()
2999 dev_warn(dev->ctrl.device, in nvme_reset_work()
3001 result = -ENODEV; in nvme_reset_work()
3005 nvme_start_ctrl(&dev->ctrl); in nvme_reset_work()
3009 mutex_unlock(&dev->shutdown_lock); in nvme_reset_work()
3015 dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n", in nvme_reset_work()
3017 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_reset_work()
3019 nvme_sync_queues(&dev->ctrl); in nvme_reset_work()
3020 nvme_mark_namespaces_dead(&dev->ctrl); in nvme_reset_work()
3021 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_reset_work()
3022 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); in nvme_reset_work()
3027 *val = readl(to_nvme_dev(ctrl)->bar + off); in nvme_pci_reg_read32()
3033 writel(val, to_nvme_dev(ctrl)->bar + off); in nvme_pci_reg_write32()
3039 *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); in nvme_pci_reg_read64()
3045 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); in nvme_pci_get_address()
3047 return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); in nvme_pci_get_address()
3052 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); in nvme_pci_print_device_info()
3053 struct nvme_subsystem *subsys = ctrl->subsys; in nvme_pci_print_device_info()
3055 dev_err(ctrl->device, in nvme_pci_print_device_info()
3057 pdev->vendor, pdev->device, in nvme_pci_print_device_info()
3058 nvme_strlen(subsys->model, sizeof(subsys->model)), in nvme_pci_print_device_info()
3059 subsys->model, nvme_strlen(subsys->firmware_rev, in nvme_pci_print_device_info()
3060 sizeof(subsys->firmware_rev)), in nvme_pci_print_device_info()
3061 subsys->firmware_rev); in nvme_pci_print_device_info()
3068 return dma_pci_p2pdma_supported(dev->dev); in nvme_pci_supports_pci_p2pdma()
3089 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_dev_map()
3092 return -ENODEV; in nvme_dev_map()
3100 return -ENODEV; in nvme_dev_map()
3105 if (pdev->vendor == 0x144d && pdev->device == 0xa802) { in check_vendor_combination_bug()
3115 (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || in check_vendor_combination_bug()
3118 } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { in check_vendor_combination_bug()
3121 * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as in check_vendor_combination_bug()
3122 * within few minutes after bootup on a Coffee Lake board - in check_vendor_combination_bug()
3123 * ASUS PRIME Z370-A in check_vendor_combination_bug()
3126 (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || in check_vendor_combination_bug()
3127 dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) in check_vendor_combination_bug()
3129 } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || in check_vendor_combination_bug()
3130 pdev->device == 0xa808 || pdev->device == 0xa809)) || in check_vendor_combination_bug()
3131 (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { in check_vendor_combination_bug()
3141 } else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 || in check_vendor_combination_bug()
3142 pdev->device == 0x500f)) { in check_vendor_combination_bug()
3153 } else if (pdev->vendor == 0x144d && pdev->device == 0xa80d) { in check_vendor_combination_bug()
3159 if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") || in check_vendor_combination_bug()
3172 if (dmi_match(DMI_BOARD_NAME, "LXKT-ZXEG-N6")) in check_vendor_combination_bug()
3181 unsigned long quirks = id->driver_data; in nvme_pci_alloc_dev()
3182 int node = dev_to_node(&pdev->dev); in nvme_pci_alloc_dev()
3184 int ret = -ENOMEM; in nvme_pci_alloc_dev()
3188 return ERR_PTR(-ENOMEM); in nvme_pci_alloc_dev()
3189 INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); in nvme_pci_alloc_dev()
3190 mutex_init(&dev->shutdown_lock); in nvme_pci_alloc_dev()
3192 dev->nr_write_queues = write_queues; in nvme_pci_alloc_dev()
3193 dev->nr_poll_queues = poll_queues; in nvme_pci_alloc_dev()
3194 dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; in nvme_pci_alloc_dev()
3195 dev->queues = kcalloc_node(dev->nr_allocated_queues, in nvme_pci_alloc_dev()
3197 if (!dev->queues) in nvme_pci_alloc_dev()
3200 dev->dev = get_device(&pdev->dev); in nvme_pci_alloc_dev()
3205 acpi_storage_d3(&pdev->dev)) { in nvme_pci_alloc_dev()
3210 dev_info(&pdev->dev, in nvme_pci_alloc_dev()
3214 ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, in nvme_pci_alloc_dev()
3219 if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) in nvme_pci_alloc_dev()
3220 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); in nvme_pci_alloc_dev()
3222 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in nvme_pci_alloc_dev()
3223 dma_set_min_align_mask(&pdev->dev, NVME_CTRL_PAGE_SIZE - 1); in nvme_pci_alloc_dev()
3224 dma_set_max_seg_size(&pdev->dev, 0xffffffff); in nvme_pci_alloc_dev()
3227 * Limit the max command size to prevent iod->sg allocations going in nvme_pci_alloc_dev()
3230 dev->ctrl.max_hw_sectors = min_t(u32, in nvme_pci_alloc_dev()
3231 NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9); in nvme_pci_alloc_dev()
3232 dev->ctrl.max_segments = NVME_MAX_SEGS; in nvme_pci_alloc_dev()
3233 dev->ctrl.max_integrity_segments = 1; in nvme_pci_alloc_dev()
3237 put_device(dev->dev); in nvme_pci_alloc_dev()
3238 kfree(dev->queues); in nvme_pci_alloc_dev()
3247 int result = -ENOMEM; in nvme_probe()
3253 result = nvme_add_ctrl(&dev->ctrl); in nvme_probe()
3269 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); in nvme_probe()
3275 result = nvme_alloc_admin_tag_set(&dev->ctrl, &dev->admin_tagset, in nvme_probe()
3284 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { in nvme_probe()
3285 dev_warn(dev->ctrl.device, in nvme_probe()
3287 result = -EBUSY; in nvme_probe()
3291 result = nvme_init_ctrl_finish(&dev->ctrl, false); in nvme_probe()
3295 if (nvme_ctrl_meta_sgl_supported(&dev->ctrl)) in nvme_probe()
3296 dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS; in nvme_probe()
3298 dev->ctrl.max_integrity_segments = 1; in nvme_probe()
3310 if (dev->online_queues > 1) { in nvme_probe()
3311 nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, in nvme_probe()
3316 if (!dev->ctrl.tagset) in nvme_probe()
3317 dev_warn(dev->ctrl.device, "IO queues not created\n"); in nvme_probe()
3319 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { in nvme_probe()
3320 dev_warn(dev->ctrl.device, in nvme_probe()
3322 result = -ENODEV; in nvme_probe()
3328 nvme_start_ctrl(&dev->ctrl); in nvme_probe()
3329 nvme_put_ctrl(&dev->ctrl); in nvme_probe()
3330 flush_work(&dev->ctrl.scan_work); in nvme_probe()
3334 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_probe()
3341 mempool_destroy(dev->iod_mempool); in nvme_probe()
3342 mempool_destroy(dev->iod_meta_mempool); in nvme_probe()
3348 nvme_uninit_ctrl(&dev->ctrl); in nvme_probe()
3350 nvme_put_ctrl(&dev->ctrl); in nvme_probe()
3361 * with ->remove(). in nvme_reset_prepare()
3364 nvme_sync_queues(&dev->ctrl); in nvme_reset_prepare()
3371 if (!nvme_try_sched_reset(&dev->ctrl)) in nvme_reset_done()
3372 flush_work(&dev->ctrl.reset_work); in nvme_reset_done()
3391 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_remove()
3395 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); in nvme_remove()
3399 flush_work(&dev->ctrl.reset_work); in nvme_remove()
3400 nvme_stop_ctrl(&dev->ctrl); in nvme_remove()
3401 nvme_remove_namespaces(&dev->ctrl); in nvme_remove()
3407 mempool_destroy(dev->iod_mempool); in nvme_remove()
3408 mempool_destroy(dev->iod_meta_mempool); in nvme_remove()
3411 nvme_uninit_ctrl(&dev->ctrl); in nvme_remove()
3428 struct nvme_ctrl *ctrl = &ndev->ctrl; in nvme_resume()
3430 if (ndev->last_ps == U32_MAX || in nvme_resume()
3431 nvme_set_power_state(ctrl, ndev->last_ps) != 0) in nvme_resume()
3433 if (ctrl->hmpre && nvme_setup_host_mem(ndev)) in nvme_resume()
3445 struct nvme_ctrl *ctrl = &ndev->ctrl; in nvme_suspend()
3446 int ret = -EBUSY; in nvme_suspend()
3448 ndev->last_ps = U32_MAX; in nvme_suspend()
3455 * device does not support any non-default power states, shut down the in nvme_suspend()
3460 * down, so as to allow the platform to achieve its minimum low-power in nvme_suspend()
3463 if (pm_suspend_via_firmware() || !ctrl->npss || in nvme_suspend()
3465 (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) in nvme_suspend()
3478 * non-operational power state. in nvme_suspend()
3480 if (ndev->hmb) { in nvme_suspend()
3486 ret = nvme_get_power_state(ctrl, &ndev->last_ps); in nvme_suspend()
3497 ret = nvme_set_power_state(ctrl, ctrl->npss); in nvme_suspend()
3510 ctrl->npss = 0; in nvme_suspend()
3529 return nvme_try_sched_reset(&ndev->ctrl); in nvme_simple_resume()
3556 dev_warn(dev->ctrl.device, in nvme_error_detected()
3558 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) { in nvme_error_detected()
3565 dev_warn(dev->ctrl.device, in nvme_error_detected()
3576 dev_info(dev->ctrl.device, "restart after slot reset\n"); in nvme_slot_reset()
3578 if (!nvme_try_sched_reset(&dev->ctrl)) in nvme_slot_reset()
3579 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_slot_reset()
3587 flush_work(&dev->ctrl.reset_work); in nvme_error_resume()
3661 { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */
3743 { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
3745 { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */
3763 * Fix for the Apple controller found in the MacBook8,1 and
3764 * some MacBook7,1 to avoid controller resets and data loss.