pci.c (5f675231e456cb599b283f8361f01cf34b0617df) pci.c (e20ba6e1da029136ded295f33076483d65ddf50a)
1/*
2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *

--- 60 unchanged lines hidden (view full) ---

69 .set = io_queue_depth_set,
70 .get = param_get_int,
71};
72
73static int io_queue_depth = 1024;
74module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
75MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
76
1/*
2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *

--- 60 unchanged lines hidden (view full) ---

69 .set = io_queue_depth_set,
70 .get = param_get_int,
71};
72
73static int io_queue_depth = 1024;
74module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
75MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
76
77static int queue_count_set(const char *val, const struct kernel_param *kp);
78static const struct kernel_param_ops queue_count_ops = {
79 .set = queue_count_set,
80 .get = param_get_int,
81};
82
83static int write_queues;
84module_param_cb(write_queues, &queue_count_ops, &write_queues, 0644);
85MODULE_PARM_DESC(write_queues,
86 "Number of queues to use for writes. If not set, reads and writes "
87 "will share a queue set.");
88
89static int poll_queues = 0;
90module_param_cb(poll_queues, &queue_count_ops, &poll_queues, 0644);
91MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
92
77struct nvme_dev;
78struct nvme_queue;
79
80static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
81
82/*
83 * Represents an NVM Express device. Each nvme_dev is a PCI function.
84 */
85struct nvme_dev {
86 struct nvme_queue *queues;
87 struct blk_mq_tag_set tagset;
88 struct blk_mq_tag_set admin_tagset;
89 u32 __iomem *dbs;
90 struct device *dev;
91 struct dma_pool *prp_page_pool;
92 struct dma_pool *prp_small_pool;
93 unsigned online_queues;
94 unsigned max_qid;
93struct nvme_dev;
94struct nvme_queue;
95
96static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
97
98/*
99 * Represents an NVM Express device. Each nvme_dev is a PCI function.
100 */
101struct nvme_dev {
102 struct nvme_queue *queues;
103 struct blk_mq_tag_set tagset;
104 struct blk_mq_tag_set admin_tagset;
105 u32 __iomem *dbs;
106 struct device *dev;
107 struct dma_pool *prp_page_pool;
108 struct dma_pool *prp_small_pool;
109 unsigned online_queues;
110 unsigned max_qid;
111 unsigned io_queues[HCTX_MAX_TYPES];
95 unsigned int num_vecs;
96 int q_depth;
97 u32 db_stride;
98 void __iomem *bar;
99 unsigned long bar_mapped_size;
100 struct work_struct remove_work;
101 struct mutex shutdown_lock;
102 bool subsystem;

--- 26 unchanged lines hidden (view full) ---

129
130 ret = kstrtoint(val, 10, &n);
131 if (ret != 0 || n < 2)
132 return -EINVAL;
133
134 return param_set_int(val, kp);
135}
136
112 unsigned int num_vecs;
113 int q_depth;
114 u32 db_stride;
115 void __iomem *bar;
116 unsigned long bar_mapped_size;
117 struct work_struct remove_work;
118 struct mutex shutdown_lock;
119 bool subsystem;

--- 26 unchanged lines hidden (view full) ---

146
147 ret = kstrtoint(val, 10, &n);
148 if (ret != 0 || n < 2)
149 return -EINVAL;
150
151 return param_set_int(val, kp);
152}
153
154static int queue_count_set(const char *val, const struct kernel_param *kp)
155{
156 int n = 0, ret;
157
158 ret = kstrtoint(val, 10, &n);
159 if (n > num_possible_cpus())
160 n = num_possible_cpus();
161
162 return param_set_int(val, kp);
163}
164
137static inline unsigned int sq_idx(unsigned int qid, u32 stride)
138{
139 return qid * 2 * stride;
140}
141
142static inline unsigned int cq_idx(unsigned int qid, u32 stride)
143{
144 return (qid * 2 + 1) * stride;

--- 18 unchanged lines hidden (view full) ---

163 volatile struct nvme_completion *cqes;
164 struct blk_mq_tags **tags;
165 dma_addr_t sq_dma_addr;
166 dma_addr_t cq_dma_addr;
167 u32 __iomem *q_db;
168 u16 q_depth;
169 s16 cq_vector;
170 u16 sq_tail;
165static inline unsigned int sq_idx(unsigned int qid, u32 stride)
166{
167 return qid * 2 * stride;
168}
169
170static inline unsigned int cq_idx(unsigned int qid, u32 stride)
171{
172 return (qid * 2 + 1) * stride;

--- 18 unchanged lines hidden (view full) ---

191 volatile struct nvme_completion *cqes;
192 struct blk_mq_tags **tags;
193 dma_addr_t sq_dma_addr;
194 dma_addr_t cq_dma_addr;
195 u32 __iomem *q_db;
196 u16 q_depth;
197 s16 cq_vector;
198 u16 sq_tail;
199 u16 last_sq_tail;
171 u16 cq_head;
172 u16 last_cq_head;
173 u16 qid;
174 u8 cq_phase;
200 u16 cq_head;
201 u16 last_cq_head;
202 u16 qid;
203 u8 cq_phase;
204 u8 polled;
175 u32 *dbbuf_sq_db;
176 u32 *dbbuf_cq_db;
177 u32 *dbbuf_sq_ei;
178 u32 *dbbuf_cq_ei;
179};
180
181/*
182 * The nvme_iod describes the data in an I/O, including the list of PRP

--- 30 unchanged lines hidden (view full) ---

213 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
214 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
215 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
216 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
217 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
218 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
219}
220
205 u32 *dbbuf_sq_db;
206 u32 *dbbuf_cq_db;
207 u32 *dbbuf_sq_ei;
208 u32 *dbbuf_cq_ei;
209};
210
211/*
212 * The nvme_iod describes the data in an I/O, including the list of PRP

--- 30 unchanged lines hidden (view full) ---

243 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
244 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
245 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
246 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
247 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
248 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
249}
250
251static unsigned int max_io_queues(void)
252{
253 return num_possible_cpus() + write_queues + poll_queues;
254}
255
256static unsigned int max_queue_count(void)
257{
258 /* IO queues + admin queue */
259 return 1 + max_io_queues();
260}
261
221static inline unsigned int nvme_dbbuf_size(u32 stride)
222{
262static inline unsigned int nvme_dbbuf_size(u32 stride)
263{
223 return ((num_possible_cpus() + 1) * 8 * stride);
264 return (max_queue_count() * 8 * stride);
224}
225
226static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
227{
228 unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
229
230 if (dev->dbbuf_dbs)
231 return 0;

--- 194 unchanged lines hidden (view full) ---

426
427 BUG_ON(!nvmeq);
428 iod->nvmeq = nvmeq;
429
430 nvme_req(req)->ctrl = &dev->ctrl;
431 return 0;
432}
433
265}
266
267static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
268{
269 unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
270
271 if (dev->dbbuf_dbs)
272 return 0;

--- 194 unchanged lines hidden (view full) ---

467
468 BUG_ON(!nvmeq);
469 iod->nvmeq = nvmeq;
470
471 nvme_req(req)->ctrl = &dev->ctrl;
472 return 0;
473}
474
475static int queue_irq_offset(struct nvme_dev *dev)
476{
477 /* if we have more than 1 vec, admin queue offsets us by 1 */
478 if (dev->num_vecs > 1)
479 return 1;
480
481 return 0;
482}
483
434static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
435{
436 struct nvme_dev *dev = set->driver_data;
484static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
485{
486 struct nvme_dev *dev = set->driver_data;
487 int i, qoff, offset;
437
488
438 return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev),
439 dev->num_vecs > 1 ? 1 /* admin queue */ : 0);
489 offset = queue_irq_offset(dev);
490 for (i = 0, qoff = 0; i < set->nr_maps; i++) {
491 struct blk_mq_queue_map *map = &set->map[i];
492
493 map->nr_queues = dev->io_queues[i];
494 if (!map->nr_queues) {
495 BUG_ON(i == HCTX_TYPE_DEFAULT);
496
497 /* shared set, resuse read set parameters */
498 map->nr_queues = dev->io_queues[HCTX_TYPE_DEFAULT];
499 qoff = 0;
500 offset = queue_irq_offset(dev);
501 }
502
503 /*
504 * The poll queue(s) doesn't have an IRQ (and hence IRQ
505 * affinity), so use the regular blk-mq cpu mapping
506 */
507 map->queue_offset = qoff;
508 if (i != HCTX_TYPE_POLL)
509 blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset);
510 else
511 blk_mq_map_queues(map);
512 qoff += map->nr_queues;
513 offset += map->nr_queues;
514 }
515
516 return 0;
440}
441
517}
518
519/*
520 * Write sq tail if we are asked to, or if the next command would wrap.
521 */
522static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
523{
524 if (!write_sq) {
525 u16 next_tail = nvmeq->sq_tail + 1;
526
527 if (next_tail == nvmeq->q_depth)
528 next_tail = 0;
529 if (next_tail != nvmeq->last_sq_tail)
530 return;
531 }
532
533 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
534 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
535 writel(nvmeq->sq_tail, nvmeq->q_db);
536 nvmeq->last_sq_tail = nvmeq->sq_tail;
537}
538
442/**
443 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
444 * @nvmeq: The queue to use
445 * @cmd: The command to send
539/**
540 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
541 * @nvmeq: The queue to use
542 * @cmd: The command to send
543 * @write_sq: whether to write to the SQ doorbell
446 */
544 */
447static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
545static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
546 bool write_sq)
448{
449 spin_lock(&nvmeq->sq_lock);
547{
548 spin_lock(&nvmeq->sq_lock);
450
451 memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd));
549 memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd));
452
453 if (++nvmeq->sq_tail == nvmeq->q_depth)
454 nvmeq->sq_tail = 0;
550 if (++nvmeq->sq_tail == nvmeq->q_depth)
551 nvmeq->sq_tail = 0;
455 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
456 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
457 writel(nvmeq->sq_tail, nvmeq->q_db);
552 nvme_write_sq_db(nvmeq, write_sq);
458 spin_unlock(&nvmeq->sq_lock);
459}
460
553 spin_unlock(&nvmeq->sq_lock);
554}
555
556static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
557{
558 struct nvme_queue *nvmeq = hctx->driver_data;
559
560 spin_lock(&nvmeq->sq_lock);
561 if (nvmeq->sq_tail != nvmeq->last_sq_tail)
562 nvme_write_sq_db(nvmeq, true);
563 spin_unlock(&nvmeq->sq_lock);
564}
565
461static void **nvme_pci_iod_list(struct request *req)
462{
463 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
464 return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
465}
466
467static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
468{

--- 348 unchanged lines hidden (view full) ---

817 struct request *req = bd->rq;
818 struct nvme_command cmnd;
819 blk_status_t ret;
820
821 /*
822 * We should not need to do this, but we're still using this to
823 * ensure we can drain requests on a dying queue.
824 */
566static void **nvme_pci_iod_list(struct request *req)
567{
568 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
569 return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
570}
571
572static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
573{

--- 348 unchanged lines hidden (view full) ---

922 struct request *req = bd->rq;
923 struct nvme_command cmnd;
924 blk_status_t ret;
925
926 /*
927 * We should not need to do this, but we're still using this to
928 * ensure we can drain requests on a dying queue.
929 */
825 if (unlikely(nvmeq->cq_vector < 0))
930 if (unlikely(nvmeq->cq_vector < 0 && !nvmeq->polled))
826 return BLK_STS_IOERR;
827
828 ret = nvme_setup_cmd(ns, req, &cmnd);
829 if (ret)
830 return ret;
831
832 ret = nvme_init_iod(req, dev);
833 if (ret)
834 goto out_free_cmd;
835
836 if (blk_rq_nr_phys_segments(req)) {
837 ret = nvme_map_data(dev, req, &cmnd);
838 if (ret)
839 goto out_cleanup_iod;
840 }
841
842 blk_mq_start_request(req);
931 return BLK_STS_IOERR;
932
933 ret = nvme_setup_cmd(ns, req, &cmnd);
934 if (ret)
935 return ret;
936
937 ret = nvme_init_iod(req, dev);
938 if (ret)
939 goto out_free_cmd;
940
941 if (blk_rq_nr_phys_segments(req)) {
942 ret = nvme_map_data(dev, req, &cmnd);
943 if (ret)
944 goto out_cleanup_iod;
945 }
946
947 blk_mq_start_request(req);
843 nvme_submit_cmd(nvmeq, &cmnd);
948 nvme_submit_cmd(nvmeq, &cmnd, bd->last);
844 return BLK_STS_OK;
845out_cleanup_iod:
846 nvme_free_iod(dev, req);
847out_free_cmd:
848 nvme_cleanup_cmd(req);
849 return ret;
850}
851

--- 62 unchanged lines hidden (view full) ---

914static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
915{
916 if (++nvmeq->cq_head == nvmeq->q_depth) {
917 nvmeq->cq_head = 0;
918 nvmeq->cq_phase = !nvmeq->cq_phase;
919 }
920}
921
949 return BLK_STS_OK;
950out_cleanup_iod:
951 nvme_free_iod(dev, req);
952out_free_cmd:
953 nvme_cleanup_cmd(req);
954 return ret;
955}
956

--- 62 unchanged lines hidden (view full) ---

1019static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
1020{
1021 if (++nvmeq->cq_head == nvmeq->q_depth) {
1022 nvmeq->cq_head = 0;
1023 nvmeq->cq_phase = !nvmeq->cq_phase;
1024 }
1025}
1026
922static inline bool nvme_process_cq(struct nvme_queue *nvmeq, u16 *start,
923 u16 *end, int tag)
1027static inline int nvme_process_cq(struct nvme_queue *nvmeq, u16 *start,
1028 u16 *end, unsigned int tag)
924{
1029{
925 bool found = false;
1030 int found = 0;
926
927 *start = nvmeq->cq_head;
1031
1032 *start = nvmeq->cq_head;
928 while (!found && nvme_cqe_pending(nvmeq)) {
929 if (nvmeq->cqes[nvmeq->cq_head].command_id == tag)
930 found = true;
1033 while (nvme_cqe_pending(nvmeq)) {
1034 if (tag == -1U || nvmeq->cqes[nvmeq->cq_head].command_id == tag)
1035 found++;
931 nvme_update_cq_head(nvmeq);
932 }
933 *end = nvmeq->cq_head;
934
935 if (*start != *end)
936 nvme_ring_cq_doorbell(nvmeq);
937 return found;
938}

--- 25 unchanged lines hidden (view full) ---

964 if (nvme_cqe_pending(nvmeq))
965 return IRQ_WAKE_THREAD;
966 return IRQ_NONE;
967}
968
969static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
970{
971 u16 start, end;
1036 nvme_update_cq_head(nvmeq);
1037 }
1038 *end = nvmeq->cq_head;
1039
1040 if (*start != *end)
1041 nvme_ring_cq_doorbell(nvmeq);
1042 return found;
1043}

--- 25 unchanged lines hidden (view full) ---

1069 if (nvme_cqe_pending(nvmeq))
1070 return IRQ_WAKE_THREAD;
1071 return IRQ_NONE;
1072}
1073
1074static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
1075{
1076 u16 start, end;
972 bool found;
1077 int found;
973
974 if (!nvme_cqe_pending(nvmeq))
975 return 0;
976
977 spin_lock_irq(&nvmeq->cq_lock);
978 found = nvme_process_cq(nvmeq, &start, &end, tag);
979 spin_unlock_irq(&nvmeq->cq_lock);
980
981 nvme_complete_cqes(nvmeq, start, end);
982 return found;
983}
984
1078
1079 if (!nvme_cqe_pending(nvmeq))
1080 return 0;
1081
1082 spin_lock_irq(&nvmeq->cq_lock);
1083 found = nvme_process_cq(nvmeq, &start, &end, tag);
1084 spin_unlock_irq(&nvmeq->cq_lock);
1085
1086 nvme_complete_cqes(nvmeq, start, end);
1087 return found;
1088}
1089
985static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
1090static int nvme_poll(struct blk_mq_hw_ctx *hctx)
986{
987 struct nvme_queue *nvmeq = hctx->driver_data;
988
1091{
1092 struct nvme_queue *nvmeq = hctx->driver_data;
1093
989 return __nvme_poll(nvmeq, tag);
1094 return __nvme_poll(nvmeq, -1);
990}
991
1095}
1096
1097static int nvme_poll_noirq(struct blk_mq_hw_ctx *hctx)
1098{
1099 struct nvme_queue *nvmeq = hctx->driver_data;
1100 u16 start, end;
1101 bool found;
1102
1103 if (!nvme_cqe_pending(nvmeq))
1104 return 0;
1105
1106 spin_lock(&nvmeq->cq_lock);
1107 found = nvme_process_cq(nvmeq, &start, &end, -1);
1108 spin_unlock(&nvmeq->cq_lock);
1109
1110 nvme_complete_cqes(nvmeq, start, end);
1111 return found;
1112}
1113
992static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
993{
994 struct nvme_dev *dev = to_nvme_dev(ctrl);
995 struct nvme_queue *nvmeq = &dev->queues[0];
996 struct nvme_command c;
997
998 memset(&c, 0, sizeof(c));
999 c.common.opcode = nvme_admin_async_event;
1000 c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1114static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
1115{
1116 struct nvme_dev *dev = to_nvme_dev(ctrl);
1117 struct nvme_queue *nvmeq = &dev->queues[0];
1118 struct nvme_command c;
1119
1120 memset(&c, 0, sizeof(c));
1121 c.common.opcode = nvme_admin_async_event;
1122 c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1001 nvme_submit_cmd(nvmeq, &c);
1123 nvme_submit_cmd(nvmeq, &c, true);
1002}
1003
1004static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
1005{
1006 struct nvme_command c;
1007
1008 memset(&c, 0, sizeof(c));
1009 c.delete_queue.opcode = opcode;
1010 c.delete_queue.qid = cpu_to_le16(id);
1011
1012 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1013}
1014
1015static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
1016 struct nvme_queue *nvmeq, s16 vector)
1017{
1018 struct nvme_command c;
1124}
1125
1126static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
1127{
1128 struct nvme_command c;
1129
1130 memset(&c, 0, sizeof(c));
1131 c.delete_queue.opcode = opcode;
1132 c.delete_queue.qid = cpu_to_le16(id);
1133
1134 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1135}
1136
1137static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
1138 struct nvme_queue *nvmeq, s16 vector)
1139{
1140 struct nvme_command c;
1019 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
1141 int flags = NVME_QUEUE_PHYS_CONTIG;
1020
1142
1143 if (vector != -1)
1144 flags |= NVME_CQ_IRQ_ENABLED;
1145
1021 /*
1022 * Note: we (ab)use the fact that the prp fields survive if no data
1023 * is attached to the request.
1024 */
1025 memset(&c, 0, sizeof(c));
1026 c.create_cq.opcode = nvme_admin_create_cq;
1027 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
1028 c.create_cq.cqid = cpu_to_le16(qid);
1029 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1030 c.create_cq.cq_flags = cpu_to_le16(flags);
1146 /*
1147 * Note: we (ab)use the fact that the prp fields survive if no data
1148 * is attached to the request.
1149 */
1150 memset(&c, 0, sizeof(c));
1151 c.create_cq.opcode = nvme_admin_create_cq;
1152 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
1153 c.create_cq.cqid = cpu_to_le16(qid);
1154 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1155 c.create_cq.cq_flags = cpu_to_le16(flags);
1031 c.create_cq.irq_vector = cpu_to_le16(vector);
1156 if (vector != -1)
1157 c.create_cq.irq_vector = cpu_to_le16(vector);
1158 else
1159 c.create_cq.irq_vector = 0;
1032
1033 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1034}
1035
1036static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
1037 struct nvme_queue *nvmeq)
1038{
1039 struct nvme_ctrl *ctrl = &dev->ctrl;

--- 225 unchanged lines hidden (view full) ---

1265 * nvme_suspend_queue - put queue into suspended state
1266 * @nvmeq: queue to suspend
1267 */
1268static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1269{
1270 int vector;
1271
1272 spin_lock_irq(&nvmeq->cq_lock);
1160
1161 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1162}
1163
1164static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
1165 struct nvme_queue *nvmeq)
1166{
1167 struct nvme_ctrl *ctrl = &dev->ctrl;

--- 225 unchanged lines hidden (view full) ---

1393 * nvme_suspend_queue - put queue into suspended state
1394 * @nvmeq: queue to suspend
1395 */
1396static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1397{
1398 int vector;
1399
1400 spin_lock_irq(&nvmeq->cq_lock);
1273 if (nvmeq->cq_vector == -1) {
1401 if (nvmeq->cq_vector == -1 && !nvmeq->polled) {
1274 spin_unlock_irq(&nvmeq->cq_lock);
1275 return 1;
1276 }
1277 vector = nvmeq->cq_vector;
1278 nvmeq->dev->online_queues--;
1279 nvmeq->cq_vector = -1;
1402 spin_unlock_irq(&nvmeq->cq_lock);
1403 return 1;
1404 }
1405 vector = nvmeq->cq_vector;
1406 nvmeq->dev->online_queues--;
1407 nvmeq->cq_vector = -1;
1408 nvmeq->polled = false;
1280 spin_unlock_irq(&nvmeq->cq_lock);
1281
1282 /*
1283 * Ensure that nvme_queue_rq() sees it ->cq_vector == -1 without
1284 * having to grab the lock.
1285 */
1286 mb();
1287
1288 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
1289 blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
1290
1409 spin_unlock_irq(&nvmeq->cq_lock);
1410
1411 /*
1412 * Ensure that nvme_queue_rq() sees it ->cq_vector == -1 without
1413 * having to grab the lock.
1414 */
1415 mb();
1416
1417 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
1418 blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
1419
1291 pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq);
1420 if (vector != -1)
1421 pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq);
1292
1293 return 0;
1294}
1295
1296static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
1297{
1298 struct nvme_queue *nvmeq = &dev->queues[0];
1299 u16 start, end;

--- 108 unchanged lines hidden (view full) ---

1408}
1409
1410static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
1411{
1412 struct nvme_dev *dev = nvmeq->dev;
1413
1414 spin_lock_irq(&nvmeq->cq_lock);
1415 nvmeq->sq_tail = 0;
1422
1423 return 0;
1424}
1425
1426static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
1427{
1428 struct nvme_queue *nvmeq = &dev->queues[0];
1429 u16 start, end;

--- 108 unchanged lines hidden (view full) ---

1538}
1539
1540static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
1541{
1542 struct nvme_dev *dev = nvmeq->dev;
1543
1544 spin_lock_irq(&nvmeq->cq_lock);
1545 nvmeq->sq_tail = 0;
1546 nvmeq->last_sq_tail = 0;
1416 nvmeq->cq_head = 0;
1417 nvmeq->cq_phase = 1;
1418 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1419 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
1420 nvme_dbbuf_init(dev, nvmeq, qid);
1421 dev->online_queues++;
1422 spin_unlock_irq(&nvmeq->cq_lock);
1423}
1424
1547 nvmeq->cq_head = 0;
1548 nvmeq->cq_phase = 1;
1549 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1550 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
1551 nvme_dbbuf_init(dev, nvmeq, qid);
1552 dev->online_queues++;
1553 spin_unlock_irq(&nvmeq->cq_lock);
1554}
1555
1425static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1556static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
1426{
1427 struct nvme_dev *dev = nvmeq->dev;
1428 int result;
1429 s16 vector;
1430
1431 /*
1432 * A queue's vector matches the queue identifier unless the controller
1433 * has only one vector available.
1434 */
1557{
1558 struct nvme_dev *dev = nvmeq->dev;
1559 int result;
1560 s16 vector;
1561
1562 /*
1563 * A queue's vector matches the queue identifier unless the controller
1564 * has only one vector available.
1565 */
1435 vector = dev->num_vecs == 1 ? 0 : qid;
1566 if (!polled)
1567 vector = dev->num_vecs == 1 ? 0 : qid;
1568 else
1569 vector = -1;
1570
1436 result = adapter_alloc_cq(dev, qid, nvmeq, vector);
1437 if (result)
1438 return result;
1439
1440 result = adapter_alloc_sq(dev, qid, nvmeq);
1441 if (result < 0)
1442 return result;
1443 else if (result)
1444 goto release_cq;
1445
1446 /*
1447 * Set cq_vector after alloc cq/sq, otherwise nvme_suspend_queue will
1448 * invoke free_irq for it and cause a 'Trying to free already-free IRQ
1449 * xxx' warning if the create CQ/SQ command times out.
1450 */
1451 nvmeq->cq_vector = vector;
1571 result = adapter_alloc_cq(dev, qid, nvmeq, vector);
1572 if (result)
1573 return result;
1574
1575 result = adapter_alloc_sq(dev, qid, nvmeq);
1576 if (result < 0)
1577 return result;
1578 else if (result)
1579 goto release_cq;
1580
1581 /*
1582 * Set cq_vector after alloc cq/sq, otherwise nvme_suspend_queue will
1583 * invoke free_irq for it and cause a 'Trying to free already-free IRQ
1584 * xxx' warning if the create CQ/SQ command times out.
1585 */
1586 nvmeq->cq_vector = vector;
1587 nvmeq->polled = polled;
1452 nvme_init_queue(nvmeq, qid);
1588 nvme_init_queue(nvmeq, qid);
1453 result = queue_request_irq(nvmeq);
1454 if (result < 0)
1455 goto release_sq;
1456
1589
1590 if (vector != -1) {
1591 result = queue_request_irq(nvmeq);
1592 if (result < 0)
1593 goto release_sq;
1594 }
1595
1457 return result;
1458
1459release_sq:
1460 nvmeq->cq_vector = -1;
1596 return result;
1597
1598release_sq:
1599 nvmeq->cq_vector = -1;
1600 nvmeq->polled = false;
1461 dev->online_queues--;
1462 adapter_delete_sq(dev, qid);
1463release_cq:
1464 adapter_delete_cq(dev, qid);
1465 return result;
1466}
1467
1468static const struct blk_mq_ops nvme_mq_admin_ops = {
1469 .queue_rq = nvme_queue_rq,
1470 .complete = nvme_pci_complete_rq,
1471 .init_hctx = nvme_admin_init_hctx,
1472 .exit_hctx = nvme_admin_exit_hctx,
1473 .init_request = nvme_init_request,
1474 .timeout = nvme_timeout,
1475};
1476
1601 dev->online_queues--;
1602 adapter_delete_sq(dev, qid);
1603release_cq:
1604 adapter_delete_cq(dev, qid);
1605 return result;
1606}
1607
1608static const struct blk_mq_ops nvme_mq_admin_ops = {
1609 .queue_rq = nvme_queue_rq,
1610 .complete = nvme_pci_complete_rq,
1611 .init_hctx = nvme_admin_init_hctx,
1612 .exit_hctx = nvme_admin_exit_hctx,
1613 .init_request = nvme_init_request,
1614 .timeout = nvme_timeout,
1615};
1616
1617#define NVME_SHARED_MQ_OPS \
1618 .queue_rq = nvme_queue_rq, \
1619 .commit_rqs = nvme_commit_rqs, \
1620 .complete = nvme_pci_complete_rq, \
1621 .init_hctx = nvme_init_hctx, \
1622 .init_request = nvme_init_request, \
1623 .map_queues = nvme_pci_map_queues, \
1624 .timeout = nvme_timeout \
1625
1477static const struct blk_mq_ops nvme_mq_ops = {
1626static const struct blk_mq_ops nvme_mq_ops = {
1478 .queue_rq = nvme_queue_rq,
1479 .complete = nvme_pci_complete_rq,
1480 .init_hctx = nvme_init_hctx,
1481 .init_request = nvme_init_request,
1482 .map_queues = nvme_pci_map_queues,
1483 .timeout = nvme_timeout,
1484 .poll = nvme_poll,
1627 NVME_SHARED_MQ_OPS,
1628 .poll = nvme_poll,
1485};
1486
1629};
1630
1631static const struct blk_mq_ops nvme_mq_poll_noirq_ops = {
1632 NVME_SHARED_MQ_OPS,
1633 .poll = nvme_poll_noirq,
1634};
1635
1487static void nvme_dev_remove_admin(struct nvme_dev *dev)
1488{
1489 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
1490 /*
1491 * If the controller was reset during removal, it's possible
1492 * user requests may be waiting on a stopped queue. Start the
1493 * queue to flush these to completion.
1494 */

--- 107 unchanged lines hidden (view full) ---

1602 return result;
1603 }
1604
1605 return result;
1606}
1607
1608static int nvme_create_io_queues(struct nvme_dev *dev)
1609{
1636static void nvme_dev_remove_admin(struct nvme_dev *dev)
1637{
1638 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
1639 /*
1640 * If the controller was reset during removal, it's possible
1641 * user requests may be waiting on a stopped queue. Start the
1642 * queue to flush these to completion.
1643 */

--- 107 unchanged lines hidden (view full) ---

1751 return result;
1752 }
1753
1754 return result;
1755}
1756
1757static int nvme_create_io_queues(struct nvme_dev *dev)
1758{
1610 unsigned i, max;
1759 unsigned i, max, rw_queues;
1611 int ret = 0;
1612
1613 for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
1614 if (nvme_alloc_queue(dev, i, dev->q_depth)) {
1615 ret = -ENOMEM;
1616 break;
1617 }
1618 }
1619
1620 max = min(dev->max_qid, dev->ctrl.queue_count - 1);
1760 int ret = 0;
1761
1762 for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
1763 if (nvme_alloc_queue(dev, i, dev->q_depth)) {
1764 ret = -ENOMEM;
1765 break;
1766 }
1767 }
1768
1769 max = min(dev->max_qid, dev->ctrl.queue_count - 1);
1770 if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) {
1771 rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] +
1772 dev->io_queues[HCTX_TYPE_READ];
1773 } else {
1774 rw_queues = max;
1775 }
1776
1621 for (i = dev->online_queues; i <= max; i++) {
1777 for (i = dev->online_queues; i <= max; i++) {
1622 ret = nvme_create_queue(&dev->queues[i], i);
1778 bool polled = i > rw_queues;
1779
1780 ret = nvme_create_queue(&dev->queues[i], i, polled);
1623 if (ret)
1624 break;
1625 }
1626
1627 /*
1628 * Ignore failing Create SQ/CQ commands, we can continue with less
1629 * than the desired amount of queues, and even a controller without
1630 * I/O queues can still be used to issue admin commands. This might

--- 255 unchanged lines hidden (view full) ---

1886 }
1887
1888 ret = nvme_set_host_mem(dev, enable_bits);
1889 if (ret)
1890 nvme_free_host_mem(dev);
1891 return ret;
1892}
1893
1781 if (ret)
1782 break;
1783 }
1784
1785 /*
1786 * Ignore failing Create SQ/CQ commands, we can continue with less
1787 * than the desired amount of queues, and even a controller without
1788 * I/O queues can still be used to issue admin commands. This might

--- 255 unchanged lines hidden (view full) ---

2044 }
2045
2046 ret = nvme_set_host_mem(dev, enable_bits);
2047 if (ret)
2048 nvme_free_host_mem(dev);
2049 return ret;
2050}
2051
2052static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int nr_io_queues)
2053{
2054 unsigned int this_w_queues = write_queues;
2055 unsigned int this_p_queues = poll_queues;
2056
2057 /*
2058 * Setup read/write queue split
2059 */
2060 if (nr_io_queues == 1) {
2061 dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
2062 dev->io_queues[HCTX_TYPE_READ] = 0;
2063 dev->io_queues[HCTX_TYPE_POLL] = 0;
2064 return;
2065 }
2066
2067 /*
2068 * Configure number of poll queues, if set
2069 */
2070 if (this_p_queues) {
2071 /*
2072 * We need at least one queue left. With just one queue, we'll
2073 * have a single shared read/write set.
2074 */
2075 if (this_p_queues >= nr_io_queues) {
2076 this_w_queues = 0;
2077 this_p_queues = nr_io_queues - 1;
2078 }
2079
2080 dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
2081 nr_io_queues -= this_p_queues;
2082 } else
2083 dev->io_queues[HCTX_TYPE_POLL] = 0;
2084
2085 /*
2086 * If 'write_queues' is set, ensure it leaves room for at least
2087 * one read queue
2088 */
2089 if (this_w_queues >= nr_io_queues)
2090 this_w_queues = nr_io_queues - 1;
2091
2092 /*
2093 * If 'write_queues' is set to zero, reads and writes will share
2094 * a queue set.
2095 */
2096 if (!this_w_queues) {
2097 dev->io_queues[HCTX_TYPE_DEFAULT] = nr_io_queues;
2098 dev->io_queues[HCTX_TYPE_READ] = 0;
2099 } else {
2100 dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues;
2101 dev->io_queues[HCTX_TYPE_READ] = nr_io_queues - this_w_queues;
2102 }
2103}
2104
2105static int nvme_setup_irqs(struct nvme_dev *dev, int nr_io_queues)
2106{
2107 struct pci_dev *pdev = to_pci_dev(dev->dev);
2108 int irq_sets[2];
2109 struct irq_affinity affd = {
2110 .pre_vectors = 1,
2111 .nr_sets = ARRAY_SIZE(irq_sets),
2112 .sets = irq_sets,
2113 };
2114 int result = 0;
2115
2116 /*
2117 * For irq sets, we have to ask for minvec == maxvec. This passes
2118 * any reduction back to us, so we can adjust our queue counts and
2119 * IRQ vector needs.
2120 */
2121 do {
2122 nvme_calc_io_queues(dev, nr_io_queues);
2123 irq_sets[0] = dev->io_queues[HCTX_TYPE_DEFAULT];
2124 irq_sets[1] = dev->io_queues[HCTX_TYPE_READ];
2125 if (!irq_sets[1])
2126 affd.nr_sets = 1;
2127
2128 /*
2129 * If we got a failure and we're down to asking for just
2130 * 1 + 1 queues, just ask for a single vector. We'll share
2131 * that between the single IO queue and the admin queue.
2132 */
2133 if (!(result < 0 && nr_io_queues == 1))
2134 nr_io_queues = irq_sets[0] + irq_sets[1] + 1;
2135
2136 result = pci_alloc_irq_vectors_affinity(pdev, nr_io_queues,
2137 nr_io_queues,
2138 PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
2139
2140 /*
2141 * Need to reduce our vec counts. If we get ENOSPC, the
2142 * platform should support mulitple vecs, we just need
2143 * to decrease our ask. If we get EINVAL, the platform
2144 * likely does not. Back down to ask for just one vector.
2145 */
2146 if (result == -ENOSPC) {
2147 nr_io_queues--;
2148 if (!nr_io_queues)
2149 return result;
2150 continue;
2151 } else if (result == -EINVAL) {
2152 nr_io_queues = 1;
2153 continue;
2154 } else if (result <= 0)
2155 return -EIO;
2156 break;
2157 } while (1);
2158
2159 return result;
2160}
2161
1894static int nvme_setup_io_queues(struct nvme_dev *dev)
1895{
1896 struct nvme_queue *adminq = &dev->queues[0];
1897 struct pci_dev *pdev = to_pci_dev(dev->dev);
1898 int result, nr_io_queues;
1899 unsigned long size;
1900
2162static int nvme_setup_io_queues(struct nvme_dev *dev)
2163{
2164 struct nvme_queue *adminq = &dev->queues[0];
2165 struct pci_dev *pdev = to_pci_dev(dev->dev);
2166 int result, nr_io_queues;
2167 unsigned long size;
2168
1901 struct irq_affinity affd = {
1902 .pre_vectors = 1
1903 };
1904
1905 nr_io_queues = num_possible_cpus();
2169 nr_io_queues = max_io_queues();
1906 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
1907 if (result < 0)
1908 return result;
1909
1910 if (nr_io_queues == 0)
1911 return 0;
1912
1913 if (dev->cmb_use_sqes) {

--- 18 unchanged lines hidden (view full) ---

1932 /* Deregister the admin queue's interrupt */
1933 pci_free_irq(pdev, 0, adminq);
1934
1935 /*
1936 * If we enable msix early due to not intx, disable it again before
1937 * setting up the full range we need.
1938 */
1939 pci_free_irq_vectors(pdev);
2170 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
2171 if (result < 0)
2172 return result;
2173
2174 if (nr_io_queues == 0)
2175 return 0;
2176
2177 if (dev->cmb_use_sqes) {

--- 18 unchanged lines hidden (view full) ---

2196 /* Deregister the admin queue's interrupt */
2197 pci_free_irq(pdev, 0, adminq);
2198
2199 /*
2200 * If we enable msix early due to not intx, disable it again before
2201 * setting up the full range we need.
2202 */
2203 pci_free_irq_vectors(pdev);
1940 result = pci_alloc_irq_vectors_affinity(pdev, 1, nr_io_queues + 1,
1941 PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
2204
2205 result = nvme_setup_irqs(dev, nr_io_queues);
1942 if (result <= 0)
1943 return -EIO;
2206 if (result <= 0)
2207 return -EIO;
2208
1944 dev->num_vecs = result;
2209 dev->num_vecs = result;
1945 dev->max_qid = max(result - 1, 1);
2210 result = max(result - 1, 1);
2211 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL];
1946
2212
2213 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
2214 dev->io_queues[HCTX_TYPE_DEFAULT],
2215 dev->io_queues[HCTX_TYPE_READ],
2216 dev->io_queues[HCTX_TYPE_POLL]);
2217
1947 /*
1948 * Should investigate if there's a performance win from allocating
1949 * more queues than interrupt vectors; it might allow the submission
1950 * path to scale better, even if the receive path is limited by the
1951 * number of interrupts.
1952 */
1953
1954 result = queue_request_irq(adminq);

--- 83 unchanged lines hidden (view full) ---

2038/*
2039 * return error value only when tagset allocation failed
2040 */
2041static int nvme_dev_add(struct nvme_dev *dev)
2042{
2043 int ret;
2044
2045 if (!dev->ctrl.tagset) {
2218 /*
2219 * Should investigate if there's a performance win from allocating
2220 * more queues than interrupt vectors; it might allow the submission
2221 * path to scale better, even if the receive path is limited by the
2222 * number of interrupts.
2223 */
2224
2225 result = queue_request_irq(adminq);

--- 83 unchanged lines hidden (view full) ---

2309/*
2310 * return error value only when tagset allocation failed
2311 */
2312static int nvme_dev_add(struct nvme_dev *dev)
2313{
2314 int ret;
2315
2316 if (!dev->ctrl.tagset) {
2046 dev->tagset.ops = &nvme_mq_ops;
2317 if (!dev->io_queues[HCTX_TYPE_POLL])
2318 dev->tagset.ops = &nvme_mq_ops;
2319 else
2320 dev->tagset.ops = &nvme_mq_poll_noirq_ops;
2321
2047 dev->tagset.nr_hw_queues = dev->online_queues - 1;
2322 dev->tagset.nr_hw_queues = dev->online_queues - 1;
2323 dev->tagset.nr_maps = HCTX_MAX_TYPES;
2048 dev->tagset.timeout = NVME_IO_TIMEOUT;
2049 dev->tagset.numa_node = dev_to_node(dev->dev);
2050 dev->tagset.queue_depth =
2051 min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
2052 dev->tagset.cmd_size = nvme_pci_cmd_size(dev, false);
2053 if ((dev->ctrl.sgls & ((1 << 0) | (1 << 1))) && sgl_threshold) {
2054 dev->tagset.cmd_size = max(dev->tagset.cmd_size,
2055 nvme_pci_cmd_size(dev, true));

--- 430 unchanged lines hidden (view full) ---

2486 node = dev_to_node(&pdev->dev);
2487 if (node == NUMA_NO_NODE)
2488 set_dev_node(&pdev->dev, first_memory_node);
2489
2490 dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
2491 if (!dev)
2492 return -ENOMEM;
2493
2324 dev->tagset.timeout = NVME_IO_TIMEOUT;
2325 dev->tagset.numa_node = dev_to_node(dev->dev);
2326 dev->tagset.queue_depth =
2327 min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
2328 dev->tagset.cmd_size = nvme_pci_cmd_size(dev, false);
2329 if ((dev->ctrl.sgls & ((1 << 0) | (1 << 1))) && sgl_threshold) {
2330 dev->tagset.cmd_size = max(dev->tagset.cmd_size,
2331 nvme_pci_cmd_size(dev, true));

--- 430 unchanged lines hidden (view full) ---

2762 node = dev_to_node(&pdev->dev);
2763 if (node == NUMA_NO_NODE)
2764 set_dev_node(&pdev->dev, first_memory_node);
2765
2766 dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
2767 if (!dev)
2768 return -ENOMEM;
2769
2494 dev->queues = kcalloc_node(num_possible_cpus() + 1,
2495 sizeof(struct nvme_queue), GFP_KERNEL, node);
2770 dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue),
2771 GFP_KERNEL, node);
2496 if (!dev->queues)
2497 goto free;
2498
2499 dev->dev = get_device(&pdev->dev);
2500 pci_set_drvdata(pdev, dev);
2501
2502 result = nvme_dev_map(dev);
2503 if (result)

--- 252 unchanged lines hidden ---
2772 if (!dev->queues)
2773 goto free;
2774
2775 dev->dev = get_device(&pdev->dev);
2776 pci_set_drvdata(pdev, dev);
2777
2778 result = nvme_dev_map(dev);
2779 if (result)

--- 252 unchanged lines hidden ---