Lines Matching +full:0 +full:x1c58
91 "this size. Use 0 to disable SGLs.");
111 if (ret != 0 || n > blk_mq_num_possible_queues(0)) in io_queue_count_set()
241 #define NVMEQ_ENABLED 0
255 IOD_ABORTED = 1U << 0,
310 memset(dev->dbbuf_dbs, 0, mem_size); in nvme_dbbuf_dma_alloc()
311 memset(dev->dbbuf_eis, 0, mem_size); in nvme_dbbuf_dma_alloc()
386 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { in nvme_dbbuf_set()
443 NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE, 0, numa_node); in nvme_setup_descriptor_pools()
451 NVME_SMALL_POOL_SIZE, small_align, 0, numa_node); in nvme_setup_descriptor_pools()
465 for (i = 0; i < nr_node_ids; i++) { in nvme_release_descriptor_pools()
481 tags = qid ? dev->tagset.tags[qid - 1] : dev->admin_tagset.tags[0]; in nvme_init_hctx_common()
489 return 0; in nvme_init_hctx_common()
495 WARN_ON(hctx_idx != 0); in nvme_admin_init_hctx()
496 return nvme_init_hctx_common(hctx, data, 0); in nvme_admin_init_hctx()
513 return 0; in nvme_pci_init_request()
522 return 0; in queue_irq_offset()
531 for (i = 0, qoff = 0; i < set->nr_maps; i++) { in nvme_pci_map_queues()
563 next_tail = 0; in nvme_write_sq_db()
580 nvmeq->sq_tail = 0; in nvme_sq_copy_cmd()
673 dma_pool_free(nvme_dma_pool(nvmeq, iod), iod->descriptors[0], in nvme_free_descriptors()
678 for (i = 0; i < iod->nr_descriptors; i++) { in nvme_free_descriptors()
694 for (i = 0; i < iod->nr_dma_vecs; i++) in nvme_free_prps()
714 for (i = 0; i < len / sizeof(*sg_list); i++) in nvme_free_sgls()
764 nvme_free_sgls(req, iod->descriptors[0], in nvme_unmap_data()
797 dma_addr_t prp1_dma, prp2_dma = 0; in nvme_pci_setup_data_prp()
806 iod->dma_vecs[0].addr = iter->addr; in nvme_pci_setup_data_prp()
807 iod->dma_vecs[0].len = iter->len; in nvme_pci_setup_data_prp()
855 i = 0; in nvme_pci_setup_data_prp()
893 prp_list[0] = old_prp_list[i - 1]; in nvme_pci_setup_data_prp()
941 unsigned int mapped = 0; in nvme_pci_setup_data_sgl()
992 dma_addr = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0); in nvme_pci_setup_data_simple()
1007 iod->cmd.common.dptr.prp2 = 0; in nvme_pci_setup_data_simple()
1054 int i = 0; in nvme_pci_setup_meta_iter()
1124 iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0); in nvme_pci_setup_meta_mptr()
1147 iod->flags = 0; in nvme_prep_rq()
1148 iod->nr_descriptors = 0; in nvme_prep_rq()
1149 iod->total_len = 0; in nvme_prep_rq()
1150 iod->meta_total_len = 0; in nvme_prep_rq()
1300 return nvmeq->dev->admin_tagset.tags[0]; in nvme_queue_tagset()
1344 nvmeq->cq_head = 0; in nvme_update_cq_head()
1417 return 0; in nvme_poll()
1429 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_pci_submit_async_event()
1444 int ret = 0; in nvme_pci_subsystem_reset()
1483 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_delete_queue()
1506 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_alloc_cq()
1535 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_alloc_sq()
1553 "Abort status: 0x%x", nvme_req(req)->status); in abort_endio()
1594 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", in nvme_warn_reset()
1598 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", in nvme_warn_reset()
1601 if (csts != ~0) in nvme_warn_reset()
1607 …"Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off pcie_port_pm=off\" and report a bug\n"); in nvme_warn_reset()
1702 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { in nvme_timeout()
1797 for (i = dev->ctrl.queue_count - 1; i > 0; i--) in nvme_suspend_io_queues()
1811 for (i = dev->ctrl.queue_count - 1; i > 0; i--) { in nvme_reap_pending_cqes()
1855 return 0; in nvme_alloc_sq_cmds()
1866 return 0; in nvme_alloc_sq_cmds()
1874 return 0; in nvme_alloc_queue()
1889 nvmeq->cq_head = 0; in nvme_alloc_queue()
1895 return 0; in nvme_alloc_queue()
1922 nvmeq->sq_tail = 0; in nvme_init_queue()
1923 nvmeq->last_sq_tail = 0; in nvme_init_queue()
1924 nvmeq->cq_head = 0; in nvme_init_queue()
1927 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); in nvme_init_queue()
1952 return 0; in nvme_setup_io_queues_trylock()
1959 u16 vector = 0; in nvme_create_queue()
1968 vector = dev->num_vecs == 1 ? 0 : qid; in nvme_create_queue()
1977 if (result < 0) in nvme_create_queue()
1990 if (result < 0) in nvme_create_queue()
2050 return 0; in nvme_remap_bar()
2051 if (size > pci_resource_len(pdev, 0)) in nvme_remap_bar()
2055 dev->bar = ioremap(pci_resource_start(pdev, 0), size); in nvme_remap_bar()
2057 dev->bar_mapped_size = 0; in nvme_remap_bar()
2063 return 0; in nvme_remap_bar()
2072 result = nvme_remap_bar(dev, db_bar_size(dev, 0)); in nvme_pci_configure_admin_queue()
2073 if (result < 0) in nvme_pci_configure_admin_queue()
2076 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? in nvme_pci_configure_admin_queue()
2077 NVME_CAP_NSSRC(dev->ctrl.cap) : 0; in nvme_pci_configure_admin_queue()
2091 if (result < 0) { in nvme_pci_configure_admin_queue()
2102 if (result < 0) in nvme_pci_configure_admin_queue()
2107 if (result < 0) in nvme_pci_configure_admin_queue()
2114 result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); in nvme_pci_configure_admin_queue()
2120 nvmeq = &dev->queues[0]; in nvme_pci_configure_admin_queue()
2132 nvmeq->cq_vector = 0; in nvme_pci_configure_admin_queue()
2133 nvme_init_queue(nvmeq, 0); in nvme_pci_configure_admin_queue()
2147 int ret = 0; in nvme_create_io_queues()
2178 return ret >= 0 ? 0 : ret; in nvme_create_io_queues()
2244 hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC); in nvme_map_cmb()
2271 ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in nvme_set_host_mem()
2286 for (i = 0; i < dev->nr_host_mem_descs; i++) { in nvme_free_host_mem_multi()
2310 dev->host_mem_descs_size = 0; in nvme_free_host_mem()
2311 dev->nr_host_mem_descs = 0; in nvme_free_host_mem()
2317 DMA_BIDIRECTIONAL, GFP_KERNEL, 0); in nvme_alloc_host_mem_single()
2334 dev->host_mem_descs[0].addr = in nvme_alloc_host_mem_single()
2336 dev->host_mem_descs[0].size = cpu_to_le32(size / NVME_CTRL_PAGE_SIZE); in nvme_alloc_host_mem_single()
2337 return 0; in nvme_alloc_host_mem_single()
2346 int i = 0; in nvme_alloc_host_mem_multi()
2367 for (size = 0; size < preferred && i < max_entries; size += len) { in nvme_alloc_host_mem_multi()
2390 return 0; in nvme_alloc_host_mem_multi()
2412 if (dma_merge_boundary && (PAGE_SIZE & dma_merge_boundary) == 0) { in nvme_alloc_host_mem()
2414 return 0; in nvme_alloc_host_mem()
2421 return 0; in nvme_alloc_host_mem()
2438 return 0; in nvme_setup_host_mem()
2446 return 0; in nvme_setup_host_mem()
2463 return 0; /* controller must work without HMB */ in nvme_setup_host_mem()
2484 return sysfs_emit(buf, "cmbloc : 0x%08x\ncmbsz : 0x%08x\n", in cmb_show()
2522 if (kstrtobool(buf, &new) < 0) in hmb_store()
2531 ret = nvme_set_host_mem(ndev, 0); in hmb_store()
2536 if (ret < 0) in hmb_store()
2554 return 0; in nvme_pci_attrs_are_visible()
2557 return 0; in nvme_pci_attrs_are_visible()
2600 * If only one interrupt is available or 'write_queue' == 0, combine in nvme_calc_irq_sets()
2603 * If 'write_queues' > 0, ensure it leaves room for at least one read in nvme_calc_irq_sets()
2608 nr_read_queues = 0; in nvme_calc_irq_sets()
2610 nr_read_queues = 0; in nvme_calc_irq_sets()
2647 dev->io_queues[HCTX_TYPE_READ] = 0; in nvme_setup_irqs()
2671 return blk_mq_num_possible_queues(0) + dev->nr_write_queues + in nvme_max_io_queues()
2677 struct nvme_queue *adminq = &dev->queues[0]; in nvme_setup_io_queues()
2692 if (result < 0) in nvme_setup_io_queues()
2695 if (nr_io_queues == 0) in nvme_setup_io_queues()
2696 return 0; in nvme_setup_io_queues()
2709 pci_free_irq(pdev, 0, adminq); in nvme_setup_io_queues()
2714 if (result > 0) { in nvme_setup_io_queues()
2737 pci_free_irq(pdev, 0, adminq); in nvme_setup_io_queues()
2746 if (result <= 0) { in nvme_setup_io_queues()
2784 return 0; in nvme_setup_io_queues()
2833 return 0; in nvme_delete_queue()
2838 int nr_queues = dev->online_queues - 1, sent = 0; in __nvme_delete_io_queues()
2843 while (nr_queues > 0) { in __nvme_delete_io_queues()
2854 if (timeout == 0) in __nvme_delete_io_queues()
2928 if (result < 0) in nvme_pci_enable()
2951 (pdev->device == 0xa821 || pdev->device == 0xa822) && in nvme_pci_enable()
2952 NVME_CAP_MQES(dev->ctrl.cap) == 0) { in nvme_pci_enable()
2968 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ in nvme_pci_enable()
3028 if (!dead && dev->ctrl.queue_count > 0) { in nvme_dev_disable()
3031 nvme_poll_irqdisable(&dev->queues[0]); in nvme_dev_disable()
3034 nvme_suspend_queue(dev, 0); in nvme_dev_disable()
3061 return 0; in nvme_disable_prepare_reset()
3074 return 0; in nvme_pci_alloc_iod_mempool()
3147 if (result < 0) in nvme_reset_work()
3210 return 0; in nvme_pci_reg_read32()
3216 return 0; in nvme_pci_reg_write32()
3222 return 0; in nvme_pci_reg_read64()
3279 return 0; in nvme_dev_map()
3287 if (pdev->vendor == 0x144d && pdev->device == 0xa802) { in check_vendor_combination_bug()
3300 } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { in check_vendor_combination_bug()
3311 } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || in check_vendor_combination_bug()
3312 pdev->device == 0xa808 || pdev->device == 0xa809)) || in check_vendor_combination_bug()
3313 (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { in check_vendor_combination_bug()
3323 } else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 || in check_vendor_combination_bug()
3324 pdev->device == 0x500f)) { in check_vendor_combination_bug()
3335 } else if (pdev->vendor == 0x144d && pdev->device == 0xa80d) { in check_vendor_combination_bug()
3359 return 0; in check_vendor_combination_bug()
3409 dma_set_max_seg_size(&pdev->dev, 0xffffffff); in nvme_pci_alloc_dev()
3485 if (result < 0) in nvme_probe()
3515 return 0; in nvme_probe()
3523 nvme_free_queues(dev, 0); in nvme_probe()
3587 nvme_free_queues(dev, 0); in nvme_remove()
3597 return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps); in nvme_get_power_state()
3602 return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL); in nvme_set_power_state()
3611 nvme_set_power_state(ctrl, ndev->last_ps) != 0) in nvme_resume()
3616 return 0; in nvme_resume()
3661 ret = nvme_set_host_mem(ndev, 0); in nvme_suspend()
3662 if (ret < 0) in nvme_suspend()
3667 if (ret < 0) in nvme_suspend()
3678 if (ret < 0) in nvme_suspend()
3690 ctrl->npss = 0; in nvme_suspend()
3779 { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */
3782 { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */
3785 { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
3789 { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
3791 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
3796 { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
3798 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
3802 { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
3804 { PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
3806 { PCI_DEVICE(0x126f, 0x1001), /* Silicon Motion generic */
3809 { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
3812 { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
3815 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
3818 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
3820 { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
3822 { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
3824 { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
3826 { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
3830 { PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */
3832 { PCI_DEVICE(0x15b7, 0x5009), /* Sandisk SN550 */
3835 { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */
3837 { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
3840 { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */
3842 { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */
3844 { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
3847 { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */
3849 { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
3852 { PCI_DEVICE(0x10ec, 0x5763), /* ADATA SX6000PNP */
3854 { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
3857 { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */
3859 { PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */
3861 { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
3863 { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */
3865 { PCI_DEVICE(0x1c5c, 0x1D59), /* SK Hynix BC901 */
3867 { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
3869 { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
3871 { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */
3874 { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */
3876 { PCI_DEVICE(0x144d, 0xa802), /* Samsung SM953 */
3878 { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */
3880 { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */
3882 { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */
3884 { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
3886 { PCI_DEVICE(0x2646, 0x5013), /* Kingston KC3000, Kingston FURY Renegade */
3888 { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */
3890 { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */
3892 { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */
3894 { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */
3896 { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
3898 { PCI_DEVICE(0x1f40, 0x1202), /* Netac Technologies Co. NV3000 NVMe SSD */
3900 { PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */
3902 { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */
3904 { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */
3906 { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
3908 { PCI_DEVICE(0x1e4B, 0x1602), /* MAXIO MAP1602 */
3910 { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
3912 { PCI_DEVICE(0x1dbe, 0x5216), /* Acer/INNOGRIT FA100/5216 NVMe SSD */
3914 { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
3916 { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */
3918 { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
3920 { PCI_DEVICE(0x025e, 0xf1ac), /* SOLIDIGM P44 pro SSDPFKKW020X7 */
3922 { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
3924 { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
3926 { PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
3928 { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
3931 { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
3933 { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */
3935 { PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */
3937 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
3939 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
3941 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061),
3943 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00),
3945 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01),
3947 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02),
3949 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
3956 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
3957 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
3963 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
3964 { 0, }