1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVM Express device driver
4 * Copyright (c) 2011-2014, Intel Corporation.
5 */
6
7 #include <linux/acpi.h>
8 #include <linux/async.h>
9 #include <linux/blkdev.h>
10 #include <linux/blk-mq-dma.h>
11 #include <linux/blk-integrity.h>
12 #include <linux/dmi.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/kstrtox.h>
17 #include <linux/memremap.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/nodemask.h>
22 #include <linux/once.h>
23 #include <linux/pci.h>
24 #include <linux/suspend.h>
25 #include <linux/t10-pi.h>
26 #include <linux/types.h>
27 #include <linux/io-64-nonatomic-lo-hi.h>
28 #include <linux/io-64-nonatomic-hi-lo.h>
29 #include <linux/sed-opal.h>
30
31 #include "trace.h"
32 #include "nvme.h"
33
34 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
35 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
36
37 /* Optimisation for I/Os between 4k and 128k */
38 #define NVME_SMALL_POOL_SIZE 256
39
40 /*
41 * Arbitrary upper bound.
42 */
43 #define NVME_MAX_BYTES SZ_8M
44 #define NVME_MAX_NR_DESCRIPTORS 5
45
46 /*
47 * For data SGLs we support a single descriptors worth of SGL entries.
48 * For PRPs, segments don't matter at all.
49 */
50 #define NVME_MAX_SEGS \
51 (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc))
52
53 /*
54 * For metadata SGLs, only the small descriptor is supported, and the first
55 * entry is the segment descriptor, which for the data pointer sits in the SQE.
56 */
57 #define NVME_MAX_META_SEGS \
58 ((NVME_SMALL_POOL_SIZE / sizeof(struct nvme_sgl_desc)) - 1)
59
60 /*
61 * The last entry is used to link to the next descriptor.
62 */
63 #define PRPS_PER_PAGE \
64 (((NVME_CTRL_PAGE_SIZE / sizeof(__le64))) - 1)
65
66 /*
67 * I/O could be non-aligned both at the beginning and end.
68 */
69 #define MAX_PRP_RANGE \
70 (NVME_MAX_BYTES + 2 * (NVME_CTRL_PAGE_SIZE - 1))
71
72 static_assert(MAX_PRP_RANGE / NVME_CTRL_PAGE_SIZE <=
73 (1 /* prp1 */ + NVME_MAX_NR_DESCRIPTORS * PRPS_PER_PAGE));
74
75 struct quirk_entry {
76 u16 vendor_id;
77 u16 dev_id;
78 u32 enabled_quirks;
79 u32 disabled_quirks;
80 };
81
82 static int use_threaded_interrupts;
83 module_param(use_threaded_interrupts, int, 0444);
84
85 static bool use_cmb_sqes = true;
86 module_param(use_cmb_sqes, bool, 0444);
87 MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
88
89 static unsigned int max_host_mem_size_mb = 128;
90 module_param(max_host_mem_size_mb, uint, 0444);
91 MODULE_PARM_DESC(max_host_mem_size_mb,
92 "Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
93
94 static unsigned int sgl_threshold = SZ_32K;
95 module_param(sgl_threshold, uint, 0644);
96 MODULE_PARM_DESC(sgl_threshold,
97 "Use SGLs when average request segment size is larger or equal to "
98 "this size. Use 0 to disable SGLs.");
99
100 #define NVME_PCI_MIN_QUEUE_SIZE 2
101 #define NVME_PCI_MAX_QUEUE_SIZE 4095
102 static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
103 static const struct kernel_param_ops io_queue_depth_ops = {
104 .set = io_queue_depth_set,
105 .get = param_get_uint,
106 };
107
108 static unsigned int io_queue_depth = 1024;
109 module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
110 MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096");
111
112 static struct quirk_entry *nvme_pci_quirk_list;
113 static unsigned int nvme_pci_quirk_count;
114
115 /* Helper to parse individual quirk names */
nvme_parse_quirk_names(char * quirk_str,struct quirk_entry * entry)116 static int nvme_parse_quirk_names(char *quirk_str, struct quirk_entry *entry)
117 {
118 int i;
119 size_t field_len;
120 bool disabled, found;
121 char *p = quirk_str, *field;
122
123 while ((field = strsep(&p, ",")) && *field) {
124 disabled = false;
125 found = false;
126
127 if (*field == '^') {
128 /* Skip the '^' character */
129 disabled = true;
130 field++;
131 }
132
133 field_len = strlen(field);
134 for (i = 0; i < 32; i++) {
135 unsigned int bit = 1U << i;
136 char *q_name = nvme_quirk_name(bit);
137 size_t q_len = strlen(q_name);
138
139 if (!strcmp(q_name, "unknown"))
140 break;
141
142 if (!strcmp(q_name, field) &&
143 q_len == field_len) {
144 if (disabled)
145 entry->disabled_quirks |= bit;
146 else
147 entry->enabled_quirks |= bit;
148 found = true;
149 break;
150 }
151 }
152
153 if (!found) {
154 pr_err("nvme: unrecognized quirk %s\n", field);
155 return -EINVAL;
156 }
157 }
158 return 0;
159 }
160
161 /* Helper to parse a single VID:DID:quirk_names entry */
nvme_parse_quirk_entry(char * s,struct quirk_entry * entry)162 static int nvme_parse_quirk_entry(char *s, struct quirk_entry *entry)
163 {
164 char *field;
165
166 field = strsep(&s, ":");
167 if (!field || kstrtou16(field, 16, &entry->vendor_id))
168 return -EINVAL;
169
170 field = strsep(&s, ":");
171 if (!field || kstrtou16(field, 16, &entry->dev_id))
172 return -EINVAL;
173
174 field = strsep(&s, ":");
175 if (!field)
176 return -EINVAL;
177
178 return nvme_parse_quirk_names(field, entry);
179 }
180
quirks_param_set(const char * value,const struct kernel_param * kp)181 static int quirks_param_set(const char *value, const struct kernel_param *kp)
182 {
183 int count, err, i;
184 struct quirk_entry *qlist;
185 char *field, *val, *sep_ptr;
186
187 err = param_set_copystring(value, kp);
188 if (err)
189 return err;
190
191 val = kstrdup(value, GFP_KERNEL);
192 if (!val)
193 return -ENOMEM;
194
195 if (!*val)
196 goto out_free_val;
197
198 count = 1;
199 for (i = 0; val[i]; i++) {
200 if (val[i] == '-')
201 count++;
202 }
203
204 qlist = kcalloc(count, sizeof(*qlist), GFP_KERNEL);
205 if (!qlist) {
206 err = -ENOMEM;
207 goto out_free_val;
208 }
209
210 i = 0;
211 sep_ptr = val;
212 while ((field = strsep(&sep_ptr, "-"))) {
213 if (nvme_parse_quirk_entry(field, &qlist[i])) {
214 pr_err("nvme: failed to parse quirk string %s\n",
215 value);
216 goto out_free_qlist;
217 }
218
219 i++;
220 }
221
222 kfree(nvme_pci_quirk_list);
223 nvme_pci_quirk_count = count;
224 nvme_pci_quirk_list = qlist;
225 goto out_free_val;
226
227 out_free_qlist:
228 kfree(qlist);
229 out_free_val:
230 kfree(val);
231 return err;
232 }
233
234 static char quirks_param[128];
235 static const struct kernel_param_ops quirks_param_ops = {
236 .set = quirks_param_set,
237 .get = param_get_string,
238 };
239
240 static struct kparam_string quirks_param_string = {
241 .maxlen = sizeof(quirks_param),
242 .string = quirks_param,
243 };
244
245 module_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0444);
246 MODULE_PARM_DESC(quirks, "Enable/disable NVMe quirks by specifying "
247 "quirks=VID:DID:quirk_names");
248
io_queue_count_set(const char * val,const struct kernel_param * kp)249 static int io_queue_count_set(const char *val, const struct kernel_param *kp)
250 {
251 unsigned int n;
252 int ret;
253
254 ret = kstrtouint(val, 10, &n);
255 if (ret != 0 || n > blk_mq_num_possible_queues(0))
256 return -EINVAL;
257 return param_set_uint(val, kp);
258 }
259
260 static const struct kernel_param_ops io_queue_count_ops = {
261 .set = io_queue_count_set,
262 .get = param_get_uint,
263 };
264
265 static unsigned int write_queues;
266 module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644);
267 MODULE_PARM_DESC(write_queues,
268 "Number of queues to use for writes. If not set, reads and writes "
269 "will share a queue set.");
270
271 static unsigned int poll_queues;
272 module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644);
273 MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
274
275 static bool noacpi;
276 module_param(noacpi, bool, 0444);
277 MODULE_PARM_DESC(noacpi, "disable acpi bios quirks");
278
279 struct nvme_dev;
280 struct nvme_queue;
281
282 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
283 static void nvme_delete_io_queues(struct nvme_dev *dev);
284 static void nvme_update_attrs(struct nvme_dev *dev);
285
286 struct nvme_descriptor_pools {
287 struct dma_pool *large;
288 struct dma_pool *small;
289 };
290
291 /*
292 * Represents an NVM Express device. Each nvme_dev is a PCI function.
293 */
294 struct nvme_dev {
295 struct nvme_queue *queues;
296 struct blk_mq_tag_set tagset;
297 struct blk_mq_tag_set admin_tagset;
298 u32 __iomem *dbs;
299 struct device *dev;
300 unsigned online_queues;
301 unsigned max_qid;
302 unsigned io_queues[HCTX_MAX_TYPES];
303 unsigned int num_vecs;
304 u32 q_depth;
305 int io_sqes;
306 u32 db_stride;
307 void __iomem *bar;
308 unsigned long bar_mapped_size;
309 struct mutex shutdown_lock;
310 bool subsystem;
311 u64 cmb_size;
312 bool cmb_use_sqes;
313 u32 cmbsz;
314 u32 cmbloc;
315 struct nvme_ctrl ctrl;
316 u32 last_ps;
317 bool hmb;
318 struct sg_table *hmb_sgt;
319 mempool_t *dmavec_mempool;
320
321 /* shadow doorbell buffer support: */
322 __le32 *dbbuf_dbs;
323 dma_addr_t dbbuf_dbs_dma_addr;
324 __le32 *dbbuf_eis;
325 dma_addr_t dbbuf_eis_dma_addr;
326
327 /* host memory buffer support: */
328 u64 host_mem_size;
329 u32 nr_host_mem_descs;
330 u32 host_mem_descs_size;
331 dma_addr_t host_mem_descs_dma;
332 struct nvme_host_mem_buf_desc *host_mem_descs;
333 void **host_mem_desc_bufs;
334 unsigned int nr_allocated_queues;
335 unsigned int nr_write_queues;
336 unsigned int nr_poll_queues;
337 struct nvme_descriptor_pools descriptor_pools[];
338 };
339
io_queue_depth_set(const char * val,const struct kernel_param * kp)340 static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
341 {
342 return param_set_uint_minmax(val, kp, NVME_PCI_MIN_QUEUE_SIZE,
343 NVME_PCI_MAX_QUEUE_SIZE);
344 }
345
sq_idx(unsigned int qid,u32 stride)346 static inline unsigned int sq_idx(unsigned int qid, u32 stride)
347 {
348 return qid * 2 * stride;
349 }
350
cq_idx(unsigned int qid,u32 stride)351 static inline unsigned int cq_idx(unsigned int qid, u32 stride)
352 {
353 return (qid * 2 + 1) * stride;
354 }
355
to_nvme_dev(struct nvme_ctrl * ctrl)356 static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
357 {
358 return container_of(ctrl, struct nvme_dev, ctrl);
359 }
360
361 /*
362 * An NVM Express queue. Each device has at least two (one for admin
363 * commands and one for I/O commands).
364 */
365 struct nvme_queue {
366 struct nvme_dev *dev;
367 struct nvme_descriptor_pools descriptor_pools;
368 spinlock_t sq_lock;
369 void *sq_cmds;
370 /* only used for poll queues: */
371 spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
372 struct nvme_completion *cqes;
373 dma_addr_t sq_dma_addr;
374 dma_addr_t cq_dma_addr;
375 u32 __iomem *q_db;
376 u32 q_depth;
377 u16 cq_vector;
378 u16 sq_tail;
379 u16 last_sq_tail;
380 u16 cq_head;
381 u16 qid;
382 u8 cq_phase;
383 u8 sqes;
384 unsigned long flags;
385 #define NVMEQ_ENABLED 0
386 #define NVMEQ_SQ_CMB 1
387 #define NVMEQ_DELETE_ERROR 2
388 #define NVMEQ_POLLED 3
389 __le32 *dbbuf_sq_db;
390 __le32 *dbbuf_cq_db;
391 __le32 *dbbuf_sq_ei;
392 __le32 *dbbuf_cq_ei;
393 struct completion delete_done;
394 };
395
396 /* bits for iod->flags */
397 enum nvme_iod_flags {
398 /* this command has been aborted by the timeout handler */
399 IOD_ABORTED = 1U << 0,
400
401 /* uses the small descriptor pool */
402 IOD_SMALL_DESCRIPTOR = 1U << 1,
403
404 /* single segment dma mapping */
405 IOD_SINGLE_SEGMENT = 1U << 2,
406
407 /* Data payload contains p2p memory */
408 IOD_DATA_P2P = 1U << 3,
409
410 /* Metadata contains p2p memory */
411 IOD_META_P2P = 1U << 4,
412
413 /* Data payload contains MMIO memory */
414 IOD_DATA_MMIO = 1U << 5,
415
416 /* Metadata contains MMIO memory */
417 IOD_META_MMIO = 1U << 6,
418
419 /* Metadata using non-coalesced MPTR */
420 IOD_SINGLE_META_SEGMENT = 1U << 7,
421 };
422
423 struct nvme_dma_vec {
424 dma_addr_t addr;
425 unsigned int len;
426 };
427
428 /*
429 * The nvme_iod describes the data in an I/O.
430 */
431 struct nvme_iod {
432 struct nvme_request req;
433 struct nvme_command cmd;
434 u8 flags;
435 u8 nr_descriptors;
436
437 size_t total_len;
438 struct dma_iova_state dma_state;
439 void *descriptors[NVME_MAX_NR_DESCRIPTORS];
440 struct nvme_dma_vec *dma_vecs;
441 unsigned int nr_dma_vecs;
442
443 dma_addr_t meta_dma;
444 size_t meta_total_len;
445 struct dma_iova_state meta_dma_state;
446 struct nvme_sgl_desc *meta_descriptor;
447 };
448
nvme_dbbuf_size(struct nvme_dev * dev)449 static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
450 {
451 return dev->nr_allocated_queues * 8 * dev->db_stride;
452 }
453
nvme_dbbuf_dma_alloc(struct nvme_dev * dev)454 static void nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
455 {
456 unsigned int mem_size = nvme_dbbuf_size(dev);
457
458 if (!(dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP))
459 return;
460
461 if (dev->dbbuf_dbs) {
462 /*
463 * Clear the dbbuf memory so the driver doesn't observe stale
464 * values from the previous instantiation.
465 */
466 memset(dev->dbbuf_dbs, 0, mem_size);
467 memset(dev->dbbuf_eis, 0, mem_size);
468 return;
469 }
470
471 dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
472 &dev->dbbuf_dbs_dma_addr,
473 GFP_KERNEL);
474 if (!dev->dbbuf_dbs)
475 goto fail;
476 dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size,
477 &dev->dbbuf_eis_dma_addr,
478 GFP_KERNEL);
479 if (!dev->dbbuf_eis)
480 goto fail_free_dbbuf_dbs;
481 return;
482
483 fail_free_dbbuf_dbs:
484 dma_free_coherent(dev->dev, mem_size, dev->dbbuf_dbs,
485 dev->dbbuf_dbs_dma_addr);
486 dev->dbbuf_dbs = NULL;
487 fail:
488 dev_warn(dev->dev, "unable to allocate dma for dbbuf\n");
489 }
490
nvme_dbbuf_dma_free(struct nvme_dev * dev)491 static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
492 {
493 unsigned int mem_size = nvme_dbbuf_size(dev);
494
495 if (dev->dbbuf_dbs) {
496 dma_free_coherent(dev->dev, mem_size,
497 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
498 dev->dbbuf_dbs = NULL;
499 }
500 if (dev->dbbuf_eis) {
501 dma_free_coherent(dev->dev, mem_size,
502 dev->dbbuf_eis, dev->dbbuf_eis_dma_addr);
503 dev->dbbuf_eis = NULL;
504 }
505 }
506
nvme_dbbuf_init(struct nvme_dev * dev,struct nvme_queue * nvmeq,int qid)507 static void nvme_dbbuf_init(struct nvme_dev *dev,
508 struct nvme_queue *nvmeq, int qid)
509 {
510 if (!dev->dbbuf_dbs || !qid)
511 return;
512
513 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)];
514 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
515 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)];
516 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
517 }
518
nvme_dbbuf_free(struct nvme_queue * nvmeq)519 static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
520 {
521 if (!nvmeq->qid)
522 return;
523
524 nvmeq->dbbuf_sq_db = NULL;
525 nvmeq->dbbuf_cq_db = NULL;
526 nvmeq->dbbuf_sq_ei = NULL;
527 nvmeq->dbbuf_cq_ei = NULL;
528 }
529
nvme_dbbuf_set(struct nvme_dev * dev)530 static void nvme_dbbuf_set(struct nvme_dev *dev)
531 {
532 struct nvme_command c = { };
533 unsigned int i;
534
535 if (!dev->dbbuf_dbs)
536 return;
537
538 c.dbbuf.opcode = nvme_admin_dbbuf;
539 c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
540 c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
541
542 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
543 dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
544 /* Free memory and continue on */
545 nvme_dbbuf_dma_free(dev);
546
547 for (i = 1; i < dev->online_queues; i++)
548 nvme_dbbuf_free(&dev->queues[i]);
549 }
550 }
551
nvme_dbbuf_need_event(u16 event_idx,u16 new_idx,u16 old)552 static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
553 {
554 return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old);
555 }
556
557 /* Update dbbuf and return true if an MMIO is required */
nvme_dbbuf_update_and_check_event(u16 value,__le32 * dbbuf_db,volatile __le32 * dbbuf_ei)558 static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
559 volatile __le32 *dbbuf_ei)
560 {
561 if (dbbuf_db) {
562 u16 old_value, event_idx;
563
564 /*
565 * Ensure that the queue is written before updating
566 * the doorbell in memory
567 */
568 wmb();
569
570 old_value = le32_to_cpu(*dbbuf_db);
571 *dbbuf_db = cpu_to_le32(value);
572
573 /*
574 * Ensure that the doorbell is updated before reading the event
575 * index from memory. The controller needs to provide similar
576 * ordering to ensure the event index is updated before reading
577 * the doorbell.
578 */
579 mb();
580
581 event_idx = le32_to_cpu(*dbbuf_ei);
582 if (!nvme_dbbuf_need_event(event_idx, value, old_value))
583 return false;
584 }
585
586 return true;
587 }
588
589 static struct nvme_descriptor_pools *
nvme_setup_descriptor_pools(struct nvme_dev * dev,unsigned numa_node)590 nvme_setup_descriptor_pools(struct nvme_dev *dev, unsigned numa_node)
591 {
592 struct nvme_descriptor_pools *pools = &dev->descriptor_pools[numa_node];
593 size_t small_align = NVME_SMALL_POOL_SIZE;
594
595 if (pools->small)
596 return pools; /* already initialized */
597
598 pools->large = dma_pool_create_node("nvme descriptor page", dev->dev,
599 NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE, 0, numa_node);
600 if (!pools->large)
601 return ERR_PTR(-ENOMEM);
602
603 if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512)
604 small_align = 512;
605
606 pools->small = dma_pool_create_node("nvme descriptor small", dev->dev,
607 NVME_SMALL_POOL_SIZE, small_align, 0, numa_node);
608 if (!pools->small) {
609 dma_pool_destroy(pools->large);
610 pools->large = NULL;
611 return ERR_PTR(-ENOMEM);
612 }
613
614 return pools;
615 }
616
nvme_release_descriptor_pools(struct nvme_dev * dev)617 static void nvme_release_descriptor_pools(struct nvme_dev *dev)
618 {
619 unsigned i;
620
621 for (i = 0; i < nr_node_ids; i++) {
622 struct nvme_descriptor_pools *pools = &dev->descriptor_pools[i];
623
624 dma_pool_destroy(pools->large);
625 dma_pool_destroy(pools->small);
626 }
627 }
628
nvme_init_hctx_common(struct blk_mq_hw_ctx * hctx,void * data,unsigned qid)629 static int nvme_init_hctx_common(struct blk_mq_hw_ctx *hctx, void *data,
630 unsigned qid)
631 {
632 struct nvme_dev *dev = to_nvme_dev(data);
633 struct nvme_queue *nvmeq = &dev->queues[qid];
634 struct nvme_descriptor_pools *pools;
635 struct blk_mq_tags *tags;
636
637 tags = qid ? dev->tagset.tags[qid - 1] : dev->admin_tagset.tags[0];
638 WARN_ON(tags != hctx->tags);
639 pools = nvme_setup_descriptor_pools(dev, hctx->numa_node);
640 if (IS_ERR(pools))
641 return PTR_ERR(pools);
642
643 nvmeq->descriptor_pools = *pools;
644 hctx->driver_data = nvmeq;
645 return 0;
646 }
647
nvme_admin_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)648 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
649 unsigned int hctx_idx)
650 {
651 WARN_ON(hctx_idx != 0);
652 return nvme_init_hctx_common(hctx, data, 0);
653 }
654
nvme_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)655 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
656 unsigned int hctx_idx)
657 {
658 return nvme_init_hctx_common(hctx, data, hctx_idx + 1);
659 }
660
nvme_pci_init_request(struct blk_mq_tag_set * set,struct request * req,unsigned int hctx_idx,unsigned int numa_node)661 static int nvme_pci_init_request(struct blk_mq_tag_set *set,
662 struct request *req, unsigned int hctx_idx,
663 unsigned int numa_node)
664 {
665 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
666
667 nvme_req(req)->ctrl = set->driver_data;
668 nvme_req(req)->cmd = &iod->cmd;
669 return 0;
670 }
671
queue_irq_offset(struct nvme_dev * dev)672 static int queue_irq_offset(struct nvme_dev *dev)
673 {
674 /* if we have more than 1 vec, admin queue offsets us by 1 */
675 if (dev->num_vecs > 1)
676 return 1;
677
678 return 0;
679 }
680
nvme_pci_map_queues(struct blk_mq_tag_set * set)681 static void nvme_pci_map_queues(struct blk_mq_tag_set *set)
682 {
683 struct nvme_dev *dev = to_nvme_dev(set->driver_data);
684 int i, qoff, offset;
685
686 offset = queue_irq_offset(dev);
687 for (i = 0, qoff = 0; i < set->nr_maps; i++) {
688 struct blk_mq_queue_map *map = &set->map[i];
689
690 map->nr_queues = dev->io_queues[i];
691 if (!map->nr_queues) {
692 BUG_ON(i == HCTX_TYPE_DEFAULT);
693 continue;
694 }
695
696 /*
697 * The poll queue(s) doesn't have an IRQ (and hence IRQ
698 * affinity), so use the regular blk-mq cpu mapping
699 */
700 map->queue_offset = qoff;
701 if (i != HCTX_TYPE_POLL && offset)
702 blk_mq_map_hw_queues(map, dev->dev, offset);
703 else
704 blk_mq_map_queues(map);
705 qoff += map->nr_queues;
706 offset += map->nr_queues;
707 }
708 }
709
710 /*
711 * Write sq tail if we are asked to, or if the next command would wrap.
712 */
nvme_write_sq_db(struct nvme_queue * nvmeq,bool write_sq)713 static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
714 {
715 if (!write_sq) {
716 u16 next_tail = nvmeq->sq_tail + 1;
717
718 if (next_tail == nvmeq->q_depth)
719 next_tail = 0;
720 if (next_tail != nvmeq->last_sq_tail)
721 return;
722 }
723
724 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
725 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
726 writel(nvmeq->sq_tail, nvmeq->q_db);
727 nvmeq->last_sq_tail = nvmeq->sq_tail;
728 }
729
nvme_sq_copy_cmd(struct nvme_queue * nvmeq,struct nvme_command * cmd)730 static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq,
731 struct nvme_command *cmd)
732 {
733 memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes),
734 absolute_pointer(cmd), sizeof(*cmd));
735 if (++nvmeq->sq_tail == nvmeq->q_depth)
736 nvmeq->sq_tail = 0;
737 }
738
nvme_commit_rqs(struct blk_mq_hw_ctx * hctx)739 static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
740 {
741 struct nvme_queue *nvmeq = hctx->driver_data;
742
743 spin_lock(&nvmeq->sq_lock);
744 if (nvmeq->sq_tail != nvmeq->last_sq_tail)
745 nvme_write_sq_db(nvmeq, true);
746 spin_unlock(&nvmeq->sq_lock);
747 }
748
749 enum nvme_use_sgl {
750 SGL_UNSUPPORTED,
751 SGL_SUPPORTED,
752 SGL_FORCED,
753 };
754
nvme_pci_metadata_use_sgls(struct request * req)755 static inline bool nvme_pci_metadata_use_sgls(struct request *req)
756 {
757 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
758 struct nvme_dev *dev = nvmeq->dev;
759
760 if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl))
761 return false;
762 return req->nr_integrity_segments > 1 ||
763 nvme_req(req)->flags & NVME_REQ_USERCMD;
764 }
765
nvme_pci_use_sgls(struct nvme_dev * dev,struct request * req)766 static inline enum nvme_use_sgl nvme_pci_use_sgls(struct nvme_dev *dev,
767 struct request *req)
768 {
769 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
770
771 if (nvmeq->qid && nvme_ctrl_sgl_supported(&dev->ctrl)) {
772 /*
773 * When the controller is capable of using SGL, there are
774 * several conditions that we force to use it:
775 *
776 * 1. A request containing page gaps within the controller's
777 * mask can not use the PRP format.
778 *
779 * 2. User commands use SGL because that lets the device
780 * validate the requested transfer lengths.
781 *
782 * 3. Multiple integrity segments must use SGL as that's the
783 * only way to describe such a command in NVMe.
784 */
785 if (req_phys_gap_mask(req) & (NVME_CTRL_PAGE_SIZE - 1) ||
786 nvme_req(req)->flags & NVME_REQ_USERCMD ||
787 req->nr_integrity_segments > 1)
788 return SGL_FORCED;
789 return SGL_SUPPORTED;
790 }
791
792 return SGL_UNSUPPORTED;
793 }
794
nvme_pci_avg_seg_size(struct request * req)795 static unsigned int nvme_pci_avg_seg_size(struct request *req)
796 {
797 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
798 unsigned int nseg;
799
800 if (blk_rq_dma_map_coalesce(&iod->dma_state))
801 nseg = 1;
802 else
803 nseg = blk_rq_nr_phys_segments(req);
804 return DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
805 }
806
nvme_dma_pool(struct nvme_queue * nvmeq,struct nvme_iod * iod)807 static inline struct dma_pool *nvme_dma_pool(struct nvme_queue *nvmeq,
808 struct nvme_iod *iod)
809 {
810 if (iod->flags & IOD_SMALL_DESCRIPTOR)
811 return nvmeq->descriptor_pools.small;
812 return nvmeq->descriptor_pools.large;
813 }
814
nvme_pci_cmd_use_meta_sgl(struct nvme_command * cmd)815 static inline bool nvme_pci_cmd_use_meta_sgl(struct nvme_command *cmd)
816 {
817 return (cmd->common.flags & NVME_CMD_SGL_ALL) == NVME_CMD_SGL_METASEG;
818 }
819
nvme_pci_cmd_use_sgl(struct nvme_command * cmd)820 static inline bool nvme_pci_cmd_use_sgl(struct nvme_command *cmd)
821 {
822 return cmd->common.flags &
823 (NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG);
824 }
825
nvme_pci_first_desc_dma_addr(struct nvme_command * cmd)826 static inline dma_addr_t nvme_pci_first_desc_dma_addr(struct nvme_command *cmd)
827 {
828 if (nvme_pci_cmd_use_sgl(cmd))
829 return le64_to_cpu(cmd->common.dptr.sgl.addr);
830 return le64_to_cpu(cmd->common.dptr.prp2);
831 }
832
nvme_free_descriptors(struct request * req)833 static void nvme_free_descriptors(struct request *req)
834 {
835 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
836 const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
837 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
838 dma_addr_t dma_addr = nvme_pci_first_desc_dma_addr(&iod->cmd);
839 int i;
840
841 if (iod->nr_descriptors == 1) {
842 dma_pool_free(nvme_dma_pool(nvmeq, iod), iod->descriptors[0],
843 dma_addr);
844 return;
845 }
846
847 for (i = 0; i < iod->nr_descriptors; i++) {
848 __le64 *prp_list = iod->descriptors[i];
849 dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
850
851 dma_pool_free(nvmeq->descriptor_pools.large, prp_list,
852 dma_addr);
853 dma_addr = next_dma_addr;
854 }
855 }
856
nvme_free_prps(struct request * req,unsigned int attrs)857 static void nvme_free_prps(struct request *req, unsigned int attrs)
858 {
859 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
860 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
861 unsigned int i;
862
863 for (i = 0; i < iod->nr_dma_vecs; i++)
864 dma_unmap_phys(nvmeq->dev->dev, iod->dma_vecs[i].addr,
865 iod->dma_vecs[i].len, rq_dma_dir(req), attrs);
866 mempool_free(iod->dma_vecs, nvmeq->dev->dmavec_mempool);
867 }
868
nvme_free_sgls(struct request * req,struct nvme_sgl_desc * sge,struct nvme_sgl_desc * sg_list,unsigned int attrs)869 static void nvme_free_sgls(struct request *req, struct nvme_sgl_desc *sge,
870 struct nvme_sgl_desc *sg_list, unsigned int attrs)
871 {
872 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
873 enum dma_data_direction dir = rq_dma_dir(req);
874 unsigned int len = le32_to_cpu(sge->length);
875 struct device *dma_dev = nvmeq->dev->dev;
876 unsigned int i;
877
878 if (sge->type == (NVME_SGL_FMT_DATA_DESC << 4)) {
879 dma_unmap_phys(dma_dev, le64_to_cpu(sge->addr), len, dir,
880 attrs);
881 return;
882 }
883
884 for (i = 0; i < len / sizeof(*sg_list); i++)
885 dma_unmap_phys(dma_dev, le64_to_cpu(sg_list[i].addr),
886 le32_to_cpu(sg_list[i].length), dir, attrs);
887 }
888
nvme_unmap_metadata(struct request * req)889 static void nvme_unmap_metadata(struct request *req)
890 {
891 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
892 enum pci_p2pdma_map_type map = PCI_P2PDMA_MAP_NONE;
893 enum dma_data_direction dir = rq_dma_dir(req);
894 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
895 struct device *dma_dev = nvmeq->dev->dev;
896 struct nvme_sgl_desc *sge = iod->meta_descriptor;
897 unsigned int attrs = 0;
898
899 if (iod->flags & IOD_SINGLE_META_SEGMENT) {
900 dma_unmap_page(dma_dev, iod->meta_dma,
901 rq_integrity_vec(req).bv_len,
902 rq_dma_dir(req));
903 return;
904 }
905
906 if (iod->flags & IOD_META_P2P)
907 map = PCI_P2PDMA_MAP_BUS_ADDR;
908 else if (iod->flags & IOD_META_MMIO) {
909 map = PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
910 attrs |= DMA_ATTR_MMIO;
911 }
912
913 if (!blk_rq_dma_unmap(req, dma_dev, &iod->meta_dma_state,
914 iod->meta_total_len, map)) {
915 if (nvme_pci_cmd_use_meta_sgl(&iod->cmd))
916 nvme_free_sgls(req, sge, &sge[1], attrs);
917 else
918 dma_unmap_phys(dma_dev, iod->meta_dma,
919 iod->meta_total_len, dir, attrs);
920 }
921
922 if (iod->meta_descriptor)
923 dma_pool_free(nvmeq->descriptor_pools.small,
924 iod->meta_descriptor, iod->meta_dma);
925 }
926
nvme_unmap_data(struct request * req)927 static void nvme_unmap_data(struct request *req)
928 {
929 enum pci_p2pdma_map_type map = PCI_P2PDMA_MAP_NONE;
930 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
931 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
932 struct device *dma_dev = nvmeq->dev->dev;
933 unsigned int attrs = 0;
934
935 if (iod->flags & IOD_SINGLE_SEGMENT) {
936 static_assert(offsetof(union nvme_data_ptr, prp1) ==
937 offsetof(union nvme_data_ptr, sgl.addr));
938 dma_unmap_page(dma_dev, le64_to_cpu(iod->cmd.common.dptr.prp1),
939 iod->total_len, rq_dma_dir(req));
940 return;
941 }
942
943 if (iod->flags & IOD_DATA_P2P)
944 map = PCI_P2PDMA_MAP_BUS_ADDR;
945 else if (iod->flags & IOD_DATA_MMIO) {
946 map = PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
947 attrs |= DMA_ATTR_MMIO;
948 }
949
950 if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len,
951 map)) {
952 if (nvme_pci_cmd_use_sgl(&iod->cmd))
953 nvme_free_sgls(req, &iod->cmd.common.dptr.sgl,
954 iod->descriptors[0], attrs);
955 else
956 nvme_free_prps(req, attrs);
957 }
958
959 if (iod->nr_descriptors)
960 nvme_free_descriptors(req);
961 }
962
nvme_pci_prp_save_mapping(struct request * req,struct device * dma_dev,struct blk_dma_iter * iter)963 static bool nvme_pci_prp_save_mapping(struct request *req,
964 struct device *dma_dev,
965 struct blk_dma_iter *iter)
966 {
967 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
968
969 if (dma_use_iova(&iod->dma_state) || !dma_need_unmap(dma_dev))
970 return true;
971
972 if (!iod->nr_dma_vecs) {
973 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
974
975 iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
976 GFP_ATOMIC);
977 if (!iod->dma_vecs) {
978 iter->status = BLK_STS_RESOURCE;
979 return false;
980 }
981 }
982
983 iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
984 iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
985 iod->nr_dma_vecs++;
986 return true;
987 }
988
nvme_pci_prp_iter_next(struct request * req,struct device * dma_dev,struct blk_dma_iter * iter)989 static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
990 struct blk_dma_iter *iter)
991 {
992 if (iter->len)
993 return true;
994 if (!blk_rq_dma_map_iter_next(req, dma_dev, iter))
995 return false;
996 return nvme_pci_prp_save_mapping(req, dma_dev, iter);
997 }
998
nvme_pci_setup_data_prp(struct request * req,struct blk_dma_iter * iter)999 static blk_status_t nvme_pci_setup_data_prp(struct request *req,
1000 struct blk_dma_iter *iter)
1001 {
1002 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1003 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1004 unsigned int length = blk_rq_payload_bytes(req);
1005 dma_addr_t prp1_dma, prp2_dma = 0;
1006 unsigned int prp_len, i;
1007 __le64 *prp_list;
1008
1009 if (!nvme_pci_prp_save_mapping(req, nvmeq->dev->dev, iter))
1010 return iter->status;
1011
1012 /*
1013 * PRP1 always points to the start of the DMA transfers.
1014 *
1015 * This is the only PRP (except for the list entries) that could be
1016 * non-aligned.
1017 */
1018 prp1_dma = iter->addr;
1019 prp_len = min(length, NVME_CTRL_PAGE_SIZE -
1020 (iter->addr & (NVME_CTRL_PAGE_SIZE - 1)));
1021 iod->total_len += prp_len;
1022 iter->addr += prp_len;
1023 iter->len -= prp_len;
1024 length -= prp_len;
1025 if (!length)
1026 goto done;
1027
1028 if (!nvme_pci_prp_iter_next(req, nvmeq->dev->dev, iter)) {
1029 if (WARN_ON_ONCE(!iter->status))
1030 goto bad_sgl;
1031 goto done;
1032 }
1033
1034 /*
1035 * PRP2 is usually a list, but can point to data if all data to be
1036 * transferred fits into PRP1 + PRP2:
1037 */
1038 if (length <= NVME_CTRL_PAGE_SIZE) {
1039 prp2_dma = iter->addr;
1040 iod->total_len += length;
1041 goto done;
1042 }
1043
1044 if (DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE) <=
1045 NVME_SMALL_POOL_SIZE / sizeof(__le64))
1046 iod->flags |= IOD_SMALL_DESCRIPTOR;
1047
1048 prp_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC,
1049 &prp2_dma);
1050 if (!prp_list) {
1051 iter->status = BLK_STS_RESOURCE;
1052 goto done;
1053 }
1054 iod->descriptors[iod->nr_descriptors++] = prp_list;
1055
1056 i = 0;
1057 for (;;) {
1058 prp_list[i++] = cpu_to_le64(iter->addr);
1059 prp_len = min(length, NVME_CTRL_PAGE_SIZE);
1060 if (WARN_ON_ONCE(iter->len < prp_len))
1061 goto bad_sgl;
1062
1063 iod->total_len += prp_len;
1064 iter->addr += prp_len;
1065 iter->len -= prp_len;
1066 length -= prp_len;
1067 if (!length)
1068 break;
1069
1070 if (!nvme_pci_prp_iter_next(req, nvmeq->dev->dev, iter)) {
1071 if (WARN_ON_ONCE(!iter->status))
1072 goto bad_sgl;
1073 goto done;
1074 }
1075
1076 /*
1077 * If we've filled the entire descriptor, allocate a new that is
1078 * pointed to be the last entry in the previous PRP list. To
1079 * accommodate for that move the last actual entry to the new
1080 * descriptor.
1081 */
1082 if (i == NVME_CTRL_PAGE_SIZE >> 3) {
1083 __le64 *old_prp_list = prp_list;
1084 dma_addr_t prp_list_dma;
1085
1086 prp_list = dma_pool_alloc(nvmeq->descriptor_pools.large,
1087 GFP_ATOMIC, &prp_list_dma);
1088 if (!prp_list) {
1089 iter->status = BLK_STS_RESOURCE;
1090 goto done;
1091 }
1092 iod->descriptors[iod->nr_descriptors++] = prp_list;
1093
1094 prp_list[0] = old_prp_list[i - 1];
1095 old_prp_list[i - 1] = cpu_to_le64(prp_list_dma);
1096 i = 1;
1097 }
1098 }
1099
1100 done:
1101 /*
1102 * nvme_unmap_data uses the DPT field in the SQE to tear down the
1103 * mapping, so initialize it even for failures.
1104 */
1105 iod->cmd.common.dptr.prp1 = cpu_to_le64(prp1_dma);
1106 iod->cmd.common.dptr.prp2 = cpu_to_le64(prp2_dma);
1107 if (unlikely(iter->status))
1108 nvme_unmap_data(req);
1109 return iter->status;
1110
1111 bad_sgl:
1112 dev_err_once(nvmeq->dev->dev,
1113 "Incorrectly formed request for payload:%d nents:%d\n",
1114 blk_rq_payload_bytes(req), blk_rq_nr_phys_segments(req));
1115 return BLK_STS_IOERR;
1116 }
1117
nvme_pci_sgl_set_data(struct nvme_sgl_desc * sge,struct blk_dma_iter * iter)1118 static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge,
1119 struct blk_dma_iter *iter)
1120 {
1121 sge->addr = cpu_to_le64(iter->addr);
1122 sge->length = cpu_to_le32(iter->len);
1123 sge->type = NVME_SGL_FMT_DATA_DESC << 4;
1124 }
1125
nvme_pci_sgl_set_seg(struct nvme_sgl_desc * sge,dma_addr_t dma_addr,int entries)1126 static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
1127 dma_addr_t dma_addr, int entries)
1128 {
1129 sge->addr = cpu_to_le64(dma_addr);
1130 sge->length = cpu_to_le32(entries * sizeof(*sge));
1131 sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
1132 }
1133
nvme_pci_setup_data_sgl(struct request * req,struct blk_dma_iter * iter)1134 static blk_status_t nvme_pci_setup_data_sgl(struct request *req,
1135 struct blk_dma_iter *iter)
1136 {
1137 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1138 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1139 unsigned int entries = blk_rq_nr_phys_segments(req);
1140 struct nvme_sgl_desc *sg_list;
1141 dma_addr_t sgl_dma;
1142 unsigned int mapped = 0;
1143
1144 /* set the transfer type as SGL */
1145 iod->cmd.common.flags = NVME_CMD_SGL_METABUF;
1146
1147 if (entries == 1 || blk_rq_dma_map_coalesce(&iod->dma_state)) {
1148 nvme_pci_sgl_set_data(&iod->cmd.common.dptr.sgl, iter);
1149 iod->total_len += iter->len;
1150 return BLK_STS_OK;
1151 }
1152
1153 if (entries <= NVME_SMALL_POOL_SIZE / sizeof(*sg_list))
1154 iod->flags |= IOD_SMALL_DESCRIPTOR;
1155
1156 sg_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC,
1157 &sgl_dma);
1158 if (!sg_list)
1159 return BLK_STS_RESOURCE;
1160 iod->descriptors[iod->nr_descriptors++] = sg_list;
1161
1162 do {
1163 if (WARN_ON_ONCE(mapped == entries)) {
1164 iter->status = BLK_STS_IOERR;
1165 break;
1166 }
1167 nvme_pci_sgl_set_data(&sg_list[mapped++], iter);
1168 iod->total_len += iter->len;
1169 } while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, iter));
1170
1171 nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped);
1172 if (unlikely(iter->status))
1173 nvme_unmap_data(req);
1174 return iter->status;
1175 }
1176
nvme_pci_setup_data_simple(struct request * req,enum nvme_use_sgl use_sgl)1177 static blk_status_t nvme_pci_setup_data_simple(struct request *req,
1178 enum nvme_use_sgl use_sgl)
1179 {
1180 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1181 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1182 struct bio_vec bv = req_bvec(req);
1183 unsigned int prp1_offset = bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
1184 bool prp_possible = prp1_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2;
1185 dma_addr_t dma_addr;
1186
1187 if (!use_sgl && !prp_possible)
1188 return BLK_STS_AGAIN;
1189 if (is_pci_p2pdma_page(bv.bv_page))
1190 return BLK_STS_AGAIN;
1191
1192 dma_addr = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0);
1193 if (dma_mapping_error(nvmeq->dev->dev, dma_addr))
1194 return BLK_STS_RESOURCE;
1195 iod->total_len = bv.bv_len;
1196 iod->flags |= IOD_SINGLE_SEGMENT;
1197
1198 if (use_sgl == SGL_FORCED || !prp_possible) {
1199 iod->cmd.common.flags = NVME_CMD_SGL_METABUF;
1200 iod->cmd.common.dptr.sgl.addr = cpu_to_le64(dma_addr);
1201 iod->cmd.common.dptr.sgl.length = cpu_to_le32(bv.bv_len);
1202 iod->cmd.common.dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4;
1203 } else {
1204 unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - prp1_offset;
1205
1206 iod->cmd.common.dptr.prp1 = cpu_to_le64(dma_addr);
1207 iod->cmd.common.dptr.prp2 = 0;
1208 if (bv.bv_len > first_prp_len)
1209 iod->cmd.common.dptr.prp2 =
1210 cpu_to_le64(dma_addr + first_prp_len);
1211 }
1212
1213 return BLK_STS_OK;
1214 }
1215
nvme_map_data(struct request * req)1216 static blk_status_t nvme_map_data(struct request *req)
1217 {
1218 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1219 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1220 struct nvme_dev *dev = nvmeq->dev;
1221 enum nvme_use_sgl use_sgl = nvme_pci_use_sgls(dev, req);
1222 struct blk_dma_iter iter;
1223 blk_status_t ret;
1224
1225 /*
1226 * Try to skip the DMA iterator for single segment requests, as that
1227 * significantly improves performances for small I/O sizes.
1228 */
1229 if (blk_rq_nr_phys_segments(req) == 1) {
1230 ret = nvme_pci_setup_data_simple(req, use_sgl);
1231 if (ret != BLK_STS_AGAIN)
1232 return ret;
1233 }
1234
1235 if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter))
1236 return iter.status;
1237
1238 switch (iter.p2pdma.map) {
1239 case PCI_P2PDMA_MAP_BUS_ADDR:
1240 iod->flags |= IOD_DATA_P2P;
1241 break;
1242 case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
1243 iod->flags |= IOD_DATA_MMIO;
1244 break;
1245 case PCI_P2PDMA_MAP_NONE:
1246 break;
1247 default:
1248 return BLK_STS_RESOURCE;
1249 }
1250
1251 if (use_sgl == SGL_FORCED ||
1252 (use_sgl == SGL_SUPPORTED &&
1253 (sgl_threshold && nvme_pci_avg_seg_size(req) >= sgl_threshold)))
1254 return nvme_pci_setup_data_sgl(req, &iter);
1255 return nvme_pci_setup_data_prp(req, &iter);
1256 }
1257
nvme_pci_setup_meta_iter(struct request * req)1258 static blk_status_t nvme_pci_setup_meta_iter(struct request *req)
1259 {
1260 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1261 unsigned int entries = req->nr_integrity_segments;
1262 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1263 struct nvme_dev *dev = nvmeq->dev;
1264 struct nvme_sgl_desc *sg_list;
1265 struct blk_dma_iter iter;
1266 dma_addr_t sgl_dma;
1267 int i = 0;
1268
1269 if (!blk_rq_integrity_dma_map_iter_start(req, dev->dev,
1270 &iod->meta_dma_state, &iter))
1271 return iter.status;
1272
1273 switch (iter.p2pdma.map) {
1274 case PCI_P2PDMA_MAP_BUS_ADDR:
1275 iod->flags |= IOD_META_P2P;
1276 break;
1277 case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
1278 iod->flags |= IOD_META_MMIO;
1279 break;
1280 case PCI_P2PDMA_MAP_NONE:
1281 break;
1282 default:
1283 return BLK_STS_RESOURCE;
1284 }
1285
1286 if (blk_rq_dma_map_coalesce(&iod->meta_dma_state))
1287 entries = 1;
1288
1289 /*
1290 * The NVMe MPTR descriptor has an implicit length that the host and
1291 * device must agree on to avoid data/memory corruption. We trust the
1292 * kernel allocated correctly based on the format's parameters, so use
1293 * the more efficient MPTR to avoid extra dma pool allocations for the
1294 * SGL indirection.
1295 *
1296 * But for user commands, we don't necessarily know what they do, so
1297 * the driver can't validate the metadata buffer size. The SGL
1298 * descriptor provides an explicit length, so we're relying on that
1299 * mechanism to catch any misunderstandings between the application and
1300 * device.
1301 *
1302 * P2P DMA also needs to use the blk_dma_iter method, so mptr setup
1303 * leverages this routine when that happens.
1304 */
1305 if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl) ||
1306 (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD))) {
1307 iod->cmd.common.metadata = cpu_to_le64(iter.addr);
1308 iod->meta_total_len = iter.len;
1309 iod->meta_dma = iter.addr;
1310 iod->meta_descriptor = NULL;
1311 return BLK_STS_OK;
1312 }
1313
1314 sg_list = dma_pool_alloc(nvmeq->descriptor_pools.small, GFP_ATOMIC,
1315 &sgl_dma);
1316 if (!sg_list)
1317 return BLK_STS_RESOURCE;
1318
1319 iod->meta_descriptor = sg_list;
1320 iod->meta_dma = sgl_dma;
1321 iod->cmd.common.flags = NVME_CMD_SGL_METASEG;
1322 iod->cmd.common.metadata = cpu_to_le64(sgl_dma);
1323 if (entries == 1) {
1324 iod->meta_total_len = iter.len;
1325 nvme_pci_sgl_set_data(sg_list, &iter);
1326 return BLK_STS_OK;
1327 }
1328
1329 sgl_dma += sizeof(*sg_list);
1330 do {
1331 nvme_pci_sgl_set_data(&sg_list[++i], &iter);
1332 iod->meta_total_len += iter.len;
1333 } while (blk_rq_integrity_dma_map_iter_next(req, dev->dev, &iter));
1334
1335 nvme_pci_sgl_set_seg(sg_list, sgl_dma, i);
1336 if (unlikely(iter.status))
1337 nvme_unmap_metadata(req);
1338 return iter.status;
1339 }
1340
nvme_pci_setup_meta_mptr(struct request * req)1341 static blk_status_t nvme_pci_setup_meta_mptr(struct request *req)
1342 {
1343 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1344 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1345 struct bio_vec bv = rq_integrity_vec(req);
1346
1347 if (is_pci_p2pdma_page(bv.bv_page))
1348 return nvme_pci_setup_meta_iter(req);
1349
1350 iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0);
1351 if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma))
1352 return BLK_STS_IOERR;
1353 iod->cmd.common.metadata = cpu_to_le64(iod->meta_dma);
1354 iod->flags |= IOD_SINGLE_META_SEGMENT;
1355 return BLK_STS_OK;
1356 }
1357
nvme_map_metadata(struct request * req)1358 static blk_status_t nvme_map_metadata(struct request *req)
1359 {
1360 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1361
1362 if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) &&
1363 nvme_pci_metadata_use_sgls(req))
1364 return nvme_pci_setup_meta_iter(req);
1365 return nvme_pci_setup_meta_mptr(req);
1366 }
1367
nvme_prep_rq(struct request * req)1368 static blk_status_t nvme_prep_rq(struct request *req)
1369 {
1370 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1371 blk_status_t ret;
1372
1373 iod->flags = 0;
1374 iod->nr_descriptors = 0;
1375 iod->total_len = 0;
1376 iod->meta_total_len = 0;
1377 iod->nr_dma_vecs = 0;
1378
1379 ret = nvme_setup_cmd(req->q->queuedata, req);
1380 if (ret)
1381 return ret;
1382
1383 if (blk_rq_nr_phys_segments(req)) {
1384 ret = nvme_map_data(req);
1385 if (ret)
1386 goto out_free_cmd;
1387 }
1388
1389 if (blk_integrity_rq(req)) {
1390 ret = nvme_map_metadata(req);
1391 if (ret)
1392 goto out_unmap_data;
1393 }
1394
1395 nvme_start_request(req);
1396 return BLK_STS_OK;
1397 out_unmap_data:
1398 if (blk_rq_nr_phys_segments(req))
1399 nvme_unmap_data(req);
1400 out_free_cmd:
1401 nvme_cleanup_cmd(req);
1402 return ret;
1403 }
1404
nvme_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)1405 static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
1406 const struct blk_mq_queue_data *bd)
1407 {
1408 struct nvme_queue *nvmeq = hctx->driver_data;
1409 struct nvme_dev *dev = nvmeq->dev;
1410 struct request *req = bd->rq;
1411 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1412 blk_status_t ret;
1413
1414 /*
1415 * We should not need to do this, but we're still using this to
1416 * ensure we can drain requests on a dying queue.
1417 */
1418 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
1419 return BLK_STS_IOERR;
1420
1421 if (unlikely(!nvme_check_ready(&dev->ctrl, req, true)))
1422 return nvme_fail_nonready_command(&dev->ctrl, req);
1423
1424 ret = nvme_prep_rq(req);
1425 if (unlikely(ret))
1426 return ret;
1427 spin_lock(&nvmeq->sq_lock);
1428 nvme_sq_copy_cmd(nvmeq, &iod->cmd);
1429 nvme_write_sq_db(nvmeq, bd->last);
1430 spin_unlock(&nvmeq->sq_lock);
1431 return BLK_STS_OK;
1432 }
1433
nvme_submit_cmds(struct nvme_queue * nvmeq,struct rq_list * rqlist)1434 static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct rq_list *rqlist)
1435 {
1436 struct request *req;
1437
1438 if (rq_list_empty(rqlist))
1439 return;
1440
1441 spin_lock(&nvmeq->sq_lock);
1442 while ((req = rq_list_pop(rqlist))) {
1443 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1444
1445 nvme_sq_copy_cmd(nvmeq, &iod->cmd);
1446 }
1447 nvme_write_sq_db(nvmeq, true);
1448 spin_unlock(&nvmeq->sq_lock);
1449 }
1450
nvme_prep_rq_batch(struct nvme_queue * nvmeq,struct request * req)1451 static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
1452 {
1453 /*
1454 * We should not need to do this, but we're still using this to
1455 * ensure we can drain requests on a dying queue.
1456 */
1457 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
1458 return false;
1459 if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true)))
1460 return false;
1461
1462 return nvme_prep_rq(req) == BLK_STS_OK;
1463 }
1464
nvme_queue_rqs(struct rq_list * rqlist)1465 static void nvme_queue_rqs(struct rq_list *rqlist)
1466 {
1467 struct rq_list submit_list = { };
1468 struct rq_list requeue_list = { };
1469 struct nvme_queue *nvmeq = NULL;
1470 struct request *req;
1471
1472 while ((req = rq_list_pop(rqlist))) {
1473 if (nvmeq && nvmeq != req->mq_hctx->driver_data)
1474 nvme_submit_cmds(nvmeq, &submit_list);
1475 nvmeq = req->mq_hctx->driver_data;
1476
1477 if (nvme_prep_rq_batch(nvmeq, req))
1478 rq_list_add_tail(&submit_list, req);
1479 else
1480 rq_list_add_tail(&requeue_list, req);
1481 }
1482
1483 if (nvmeq)
1484 nvme_submit_cmds(nvmeq, &submit_list);
1485 *rqlist = requeue_list;
1486 }
1487
nvme_pci_unmap_rq(struct request * req)1488 static __always_inline void nvme_pci_unmap_rq(struct request *req)
1489 {
1490 if (blk_integrity_rq(req))
1491 nvme_unmap_metadata(req);
1492 if (blk_rq_nr_phys_segments(req))
1493 nvme_unmap_data(req);
1494 }
1495
nvme_pci_complete_rq(struct request * req)1496 static void nvme_pci_complete_rq(struct request *req)
1497 {
1498 nvme_pci_unmap_rq(req);
1499 nvme_complete_rq(req);
1500 }
1501
nvme_pci_complete_batch(struct io_comp_batch * iob)1502 static void nvme_pci_complete_batch(struct io_comp_batch *iob)
1503 {
1504 nvme_complete_batch(iob, nvme_pci_unmap_rq);
1505 }
1506
1507 /* We read the CQE phase first to check if the rest of the entry is valid */
nvme_cqe_pending(struct nvme_queue * nvmeq)1508 static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
1509 {
1510 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head];
1511
1512 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase;
1513 }
1514
nvme_ring_cq_doorbell(struct nvme_queue * nvmeq)1515 static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
1516 {
1517 u16 head = nvmeq->cq_head;
1518
1519 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
1520 nvmeq->dbbuf_cq_ei))
1521 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
1522 }
1523
nvme_queue_tagset(struct nvme_queue * nvmeq)1524 static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
1525 {
1526 if (!nvmeq->qid)
1527 return nvmeq->dev->admin_tagset.tags[0];
1528 return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
1529 }
1530
nvme_handle_cqe(struct nvme_queue * nvmeq,struct io_comp_batch * iob,u16 idx)1531 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
1532 struct io_comp_batch *iob, u16 idx)
1533 {
1534 struct nvme_completion *cqe = &nvmeq->cqes[idx];
1535 __u16 command_id = READ_ONCE(cqe->command_id);
1536 struct request *req;
1537
1538 /*
1539 * AEN requests are special as they don't time out and can
1540 * survive any kind of queue freeze and often don't respond to
1541 * aborts. We don't even bother to allocate a struct request
1542 * for them but rather special case them here.
1543 */
1544 if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
1545 nvme_complete_async_event(&nvmeq->dev->ctrl,
1546 cqe->status, &cqe->result);
1547 return;
1548 }
1549
1550 req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id);
1551 if (unlikely(!req)) {
1552 dev_warn(nvmeq->dev->ctrl.device,
1553 "invalid id %d completed on queue %d\n",
1554 command_id, le16_to_cpu(cqe->sq_id));
1555 return;
1556 }
1557
1558 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
1559 if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
1560 !blk_mq_add_to_batch(req, iob,
1561 nvme_req(req)->status != NVME_SC_SUCCESS,
1562 nvme_pci_complete_batch))
1563 nvme_pci_complete_rq(req);
1564 }
1565
nvme_update_cq_head(struct nvme_queue * nvmeq)1566 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
1567 {
1568 u32 tmp = nvmeq->cq_head + 1;
1569
1570 if (tmp == nvmeq->q_depth) {
1571 nvmeq->cq_head = 0;
1572 nvmeq->cq_phase ^= 1;
1573 } else {
1574 nvmeq->cq_head = tmp;
1575 }
1576 }
1577
nvme_poll_cq(struct nvme_queue * nvmeq,struct io_comp_batch * iob)1578 static inline bool nvme_poll_cq(struct nvme_queue *nvmeq,
1579 struct io_comp_batch *iob)
1580 {
1581 bool found = false;
1582
1583 while (nvme_cqe_pending(nvmeq)) {
1584 found = true;
1585 /*
1586 * load-load control dependency between phase and the rest of
1587 * the cqe requires a full read memory barrier
1588 */
1589 dma_rmb();
1590 nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head);
1591 nvme_update_cq_head(nvmeq);
1592 }
1593
1594 if (found)
1595 nvme_ring_cq_doorbell(nvmeq);
1596 return found;
1597 }
1598
nvme_irq(int irq,void * data)1599 static irqreturn_t nvme_irq(int irq, void *data)
1600 {
1601 struct nvme_queue *nvmeq = data;
1602 DEFINE_IO_COMP_BATCH(iob);
1603
1604 if (nvme_poll_cq(nvmeq, &iob)) {
1605 if (!rq_list_empty(&iob.req_list))
1606 nvme_pci_complete_batch(&iob);
1607 return IRQ_HANDLED;
1608 }
1609 return IRQ_NONE;
1610 }
1611
nvme_irq_check(int irq,void * data)1612 static irqreturn_t nvme_irq_check(int irq, void *data)
1613 {
1614 struct nvme_queue *nvmeq = data;
1615
1616 if (nvme_cqe_pending(nvmeq))
1617 return IRQ_WAKE_THREAD;
1618 return IRQ_NONE;
1619 }
1620
1621 /*
1622 * Poll for completions for any interrupt driven queue
1623 * Can be called from any context.
1624 */
nvme_poll_irqdisable(struct nvme_queue * nvmeq)1625 static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
1626 {
1627 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
1628 int irq;
1629
1630 WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
1631
1632 irq = pci_irq_vector(pdev, nvmeq->cq_vector);
1633 disable_irq(irq);
1634 spin_lock(&nvmeq->cq_poll_lock);
1635 nvme_poll_cq(nvmeq, NULL);
1636 spin_unlock(&nvmeq->cq_poll_lock);
1637 enable_irq(irq);
1638 }
1639
nvme_poll(struct blk_mq_hw_ctx * hctx,struct io_comp_batch * iob)1640 static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
1641 {
1642 struct nvme_queue *nvmeq = hctx->driver_data;
1643 bool found;
1644
1645 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags) ||
1646 !nvme_cqe_pending(nvmeq))
1647 return 0;
1648
1649 spin_lock(&nvmeq->cq_poll_lock);
1650 found = nvme_poll_cq(nvmeq, iob);
1651 spin_unlock(&nvmeq->cq_poll_lock);
1652
1653 return found;
1654 }
1655
nvme_pci_submit_async_event(struct nvme_ctrl * ctrl)1656 static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
1657 {
1658 struct nvme_dev *dev = to_nvme_dev(ctrl);
1659 struct nvme_queue *nvmeq = &dev->queues[0];
1660 struct nvme_command c = { };
1661
1662 c.common.opcode = nvme_admin_async_event;
1663 c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1664
1665 spin_lock(&nvmeq->sq_lock);
1666 nvme_sq_copy_cmd(nvmeq, &c);
1667 nvme_write_sq_db(nvmeq, true);
1668 spin_unlock(&nvmeq->sq_lock);
1669 }
1670
nvme_pci_subsystem_reset(struct nvme_ctrl * ctrl)1671 static int nvme_pci_subsystem_reset(struct nvme_ctrl *ctrl)
1672 {
1673 struct nvme_dev *dev = to_nvme_dev(ctrl);
1674 int ret = 0;
1675
1676 /*
1677 * Taking the shutdown_lock ensures the BAR mapping is not being
1678 * altered by reset_work. Holding this lock before the RESETTING state
1679 * change, if successful, also ensures nvme_remove won't be able to
1680 * proceed to iounmap until we're done.
1681 */
1682 mutex_lock(&dev->shutdown_lock);
1683 if (!dev->bar_mapped_size) {
1684 ret = -ENODEV;
1685 goto unlock;
1686 }
1687
1688 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
1689 ret = -EBUSY;
1690 goto unlock;
1691 }
1692
1693 writel(NVME_SUBSYS_RESET, dev->bar + NVME_REG_NSSR);
1694
1695 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING) ||
1696 !nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
1697 goto unlock;
1698
1699 /*
1700 * Read controller status to flush the previous write and trigger a
1701 * pcie read error.
1702 */
1703 readl(dev->bar + NVME_REG_CSTS);
1704 unlock:
1705 mutex_unlock(&dev->shutdown_lock);
1706 return ret;
1707 }
1708
adapter_delete_queue(struct nvme_dev * dev,u8 opcode,u16 id)1709 static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
1710 {
1711 struct nvme_command c = { };
1712
1713 c.delete_queue.opcode = opcode;
1714 c.delete_queue.qid = cpu_to_le16(id);
1715
1716 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1717 }
1718
adapter_alloc_cq(struct nvme_dev * dev,u16 qid,struct nvme_queue * nvmeq,s16 vector)1719 static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
1720 struct nvme_queue *nvmeq, s16 vector)
1721 {
1722 struct nvme_command c = { };
1723 int flags = NVME_QUEUE_PHYS_CONTIG;
1724
1725 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags))
1726 flags |= NVME_CQ_IRQ_ENABLED;
1727
1728 /*
1729 * Note: we (ab)use the fact that the prp fields survive if no data
1730 * is attached to the request.
1731 */
1732 c.create_cq.opcode = nvme_admin_create_cq;
1733 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
1734 c.create_cq.cqid = cpu_to_le16(qid);
1735 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1736 c.create_cq.cq_flags = cpu_to_le16(flags);
1737 c.create_cq.irq_vector = cpu_to_le16(vector);
1738
1739 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1740 }
1741
adapter_alloc_sq(struct nvme_dev * dev,u16 qid,struct nvme_queue * nvmeq)1742 static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
1743 struct nvme_queue *nvmeq)
1744 {
1745 struct nvme_ctrl *ctrl = &dev->ctrl;
1746 struct nvme_command c = { };
1747 int flags = NVME_QUEUE_PHYS_CONTIG;
1748
1749 /*
1750 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't
1751 * set. Since URGENT priority is zeroes, it makes all queues
1752 * URGENT.
1753 */
1754 if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ)
1755 flags |= NVME_SQ_PRIO_MEDIUM;
1756
1757 /*
1758 * Note: we (ab)use the fact that the prp fields survive if no data
1759 * is attached to the request.
1760 */
1761 c.create_sq.opcode = nvme_admin_create_sq;
1762 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
1763 c.create_sq.sqid = cpu_to_le16(qid);
1764 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1765 c.create_sq.sq_flags = cpu_to_le16(flags);
1766 c.create_sq.cqid = cpu_to_le16(qid);
1767
1768 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1769 }
1770
adapter_delete_cq(struct nvme_dev * dev,u16 cqid)1771 static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
1772 {
1773 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
1774 }
1775
adapter_delete_sq(struct nvme_dev * dev,u16 sqid)1776 static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
1777 {
1778 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
1779 }
1780
abort_endio(struct request * req,blk_status_t error,const struct io_comp_batch * iob)1781 static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error,
1782 const struct io_comp_batch *iob)
1783 {
1784 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1785
1786 dev_warn(nvmeq->dev->ctrl.device,
1787 "Abort status: 0x%x", nvme_req(req)->status);
1788 atomic_inc(&nvmeq->dev->ctrl.abort_limit);
1789 blk_mq_free_request(req);
1790 return RQ_END_IO_NONE;
1791 }
1792
nvme_should_reset(struct nvme_dev * dev,u32 csts)1793 static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
1794 {
1795 /* If true, indicates loss of adapter communication, possibly by a
1796 * NVMe Subsystem reset.
1797 */
1798 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
1799
1800 /* If there is a reset/reinit ongoing, we shouldn't reset again. */
1801 switch (nvme_ctrl_state(&dev->ctrl)) {
1802 case NVME_CTRL_RESETTING:
1803 case NVME_CTRL_CONNECTING:
1804 return false;
1805 default:
1806 break;
1807 }
1808
1809 /* We shouldn't reset unless the controller is on fatal error state
1810 * _or_ if we lost the communication with it.
1811 */
1812 if (!(csts & NVME_CSTS_CFS) && !nssro)
1813 return false;
1814
1815 return true;
1816 }
1817
nvme_warn_reset(struct nvme_dev * dev,u32 csts)1818 static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
1819 {
1820 /* Read a config register to help see what died. */
1821 u16 pci_status;
1822 int result;
1823
1824 result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
1825 &pci_status);
1826 if (result == PCIBIOS_SUCCESSFUL)
1827 dev_warn(dev->ctrl.device,
1828 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
1829 csts, pci_status);
1830 else
1831 dev_warn(dev->ctrl.device,
1832 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
1833 csts, result);
1834
1835 if (csts != ~0)
1836 return;
1837
1838 dev_warn(dev->ctrl.device,
1839 "Does your device have a faulty power saving mode enabled?\n");
1840 dev_warn(dev->ctrl.device,
1841 "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off pcie_port_pm=off\" and report a bug\n");
1842 }
1843
nvme_timeout(struct request * req)1844 static enum blk_eh_timer_return nvme_timeout(struct request *req)
1845 {
1846 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1847 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1848 struct nvme_dev *dev = nvmeq->dev;
1849 struct request *abort_req;
1850 struct nvme_command cmd = { };
1851 struct pci_dev *pdev = to_pci_dev(dev->dev);
1852 u32 csts = readl(dev->bar + NVME_REG_CSTS);
1853 u8 opcode;
1854
1855 /*
1856 * Shutdown the device immediately if we see it is disconnected. This
1857 * unblocks PCIe error handling if the nvme driver is waiting in
1858 * error_resume for a device that has been removed. We can't unbind the
1859 * driver while the driver's error callback is waiting to complete, so
1860 * we're relying on a timeout to break that deadlock if a removal
1861 * occurs while reset work is running.
1862 */
1863 if (pci_dev_is_disconnected(pdev))
1864 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
1865 if (nvme_state_terminal(&dev->ctrl))
1866 goto disable;
1867
1868 /* If PCI error recovery process is happening, we cannot reset or
1869 * the recovery mechanism will surely fail.
1870 */
1871 mb();
1872 if (pci_channel_offline(pdev))
1873 return BLK_EH_RESET_TIMER;
1874
1875 /*
1876 * Reset immediately if the controller is failed
1877 */
1878 if (nvme_should_reset(dev, csts)) {
1879 nvme_warn_reset(dev, csts);
1880 goto disable;
1881 }
1882
1883 /*
1884 * Did we miss an interrupt?
1885 */
1886 if (test_bit(NVMEQ_POLLED, &nvmeq->flags))
1887 nvme_poll(req->mq_hctx, NULL);
1888 else
1889 nvme_poll_irqdisable(nvmeq);
1890
1891 if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
1892 dev_warn(dev->ctrl.device,
1893 "I/O tag %d (%04x) QID %d timeout, completion polled\n",
1894 req->tag, nvme_cid(req), nvmeq->qid);
1895 return BLK_EH_DONE;
1896 }
1897
1898 /*
1899 * Shutdown immediately if controller times out while starting. The
1900 * reset work will see the pci device disabled when it gets the forced
1901 * cancellation error. All outstanding requests are completed on
1902 * shutdown, so we return BLK_EH_DONE.
1903 */
1904 switch (nvme_ctrl_state(&dev->ctrl)) {
1905 case NVME_CTRL_CONNECTING:
1906 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
1907 fallthrough;
1908 case NVME_CTRL_DELETING:
1909 dev_warn_ratelimited(dev->ctrl.device,
1910 "I/O tag %d (%04x) QID %d timeout, disable controller\n",
1911 req->tag, nvme_cid(req), nvmeq->qid);
1912 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1913 nvme_dev_disable(dev, true);
1914 return BLK_EH_DONE;
1915 case NVME_CTRL_RESETTING:
1916 return BLK_EH_RESET_TIMER;
1917 default:
1918 break;
1919 }
1920
1921 /*
1922 * Shutdown the controller immediately and schedule a reset if the
1923 * command was already aborted once before and still hasn't been
1924 * returned to the driver, or if this is the admin queue.
1925 */
1926 opcode = nvme_req(req)->cmd->common.opcode;
1927 if (!nvmeq->qid || (iod->flags & IOD_ABORTED)) {
1928 dev_warn(dev->ctrl.device,
1929 "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n",
1930 req->tag, nvme_cid(req), opcode,
1931 nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid);
1932 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1933 goto disable;
1934 }
1935
1936 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
1937 atomic_inc(&dev->ctrl.abort_limit);
1938 return BLK_EH_RESET_TIMER;
1939 }
1940 iod->flags |= IOD_ABORTED;
1941
1942 cmd.abort.opcode = nvme_admin_abort_cmd;
1943 cmd.abort.cid = nvme_cid(req);
1944 cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
1945
1946 dev_warn(nvmeq->dev->ctrl.device,
1947 "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, aborting req_op:%s(%u) size:%u\n",
1948 req->tag, nvme_cid(req), opcode, nvme_get_opcode_str(opcode),
1949 nvmeq->qid, blk_op_str(req_op(req)), req_op(req),
1950 blk_rq_bytes(req));
1951
1952 abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
1953 BLK_MQ_REQ_NOWAIT);
1954 if (IS_ERR(abort_req)) {
1955 atomic_inc(&dev->ctrl.abort_limit);
1956 return BLK_EH_RESET_TIMER;
1957 }
1958 nvme_init_request(abort_req, &cmd);
1959
1960 abort_req->end_io = abort_endio;
1961 abort_req->end_io_data = NULL;
1962 blk_execute_rq_nowait(abort_req, false);
1963
1964 /*
1965 * The aborted req will be completed on receiving the abort req.
1966 * We enable the timer again. If hit twice, it'll cause a device reset,
1967 * as the device then is in a faulty state.
1968 */
1969 return BLK_EH_RESET_TIMER;
1970
1971 disable:
1972 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) {
1973 if (nvme_state_terminal(&dev->ctrl))
1974 nvme_dev_disable(dev, true);
1975 return BLK_EH_DONE;
1976 }
1977
1978 nvme_dev_disable(dev, false);
1979 if (nvme_try_sched_reset(&dev->ctrl))
1980 nvme_unquiesce_io_queues(&dev->ctrl);
1981 return BLK_EH_DONE;
1982 }
1983
nvme_free_queue(struct nvme_queue * nvmeq)1984 static void nvme_free_queue(struct nvme_queue *nvmeq)
1985 {
1986 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq),
1987 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1988 if (!nvmeq->sq_cmds)
1989 return;
1990
1991 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
1992 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
1993 nvmeq->sq_cmds, SQ_SIZE(nvmeq));
1994 } else {
1995 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq),
1996 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
1997 }
1998 }
1999
nvme_free_queues(struct nvme_dev * dev,int lowest)2000 static void nvme_free_queues(struct nvme_dev *dev, int lowest)
2001 {
2002 int i;
2003
2004 for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
2005 dev->ctrl.queue_count--;
2006 nvme_free_queue(&dev->queues[i]);
2007 }
2008 }
2009
nvme_suspend_queue(struct nvme_dev * dev,unsigned int qid)2010 static void nvme_suspend_queue(struct nvme_dev *dev, unsigned int qid)
2011 {
2012 struct nvme_queue *nvmeq = &dev->queues[qid];
2013
2014 if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags))
2015 return;
2016
2017 /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */
2018 mb();
2019
2020 nvmeq->dev->online_queues--;
2021 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
2022 nvme_quiesce_admin_queue(&nvmeq->dev->ctrl);
2023 if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags))
2024 pci_free_irq(to_pci_dev(dev->dev), nvmeq->cq_vector, nvmeq);
2025 }
2026
nvme_suspend_io_queues(struct nvme_dev * dev)2027 static void nvme_suspend_io_queues(struct nvme_dev *dev)
2028 {
2029 int i;
2030
2031 for (i = dev->ctrl.queue_count - 1; i > 0; i--)
2032 nvme_suspend_queue(dev, i);
2033 }
2034
2035 /*
2036 * Called only on a device that has been disabled and after all other threads
2037 * that can check this device's completion queues have synced, except
2038 * nvme_poll(). This is the last chance for the driver to see a natural
2039 * completion before nvme_cancel_request() terminates all incomplete requests.
2040 */
nvme_reap_pending_cqes(struct nvme_dev * dev)2041 static void nvme_reap_pending_cqes(struct nvme_dev *dev)
2042 {
2043 int i;
2044
2045 for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
2046 spin_lock(&dev->queues[i].cq_poll_lock);
2047 nvme_poll_cq(&dev->queues[i], NULL);
2048 spin_unlock(&dev->queues[i].cq_poll_lock);
2049 }
2050 }
2051
nvme_cmb_qdepth(struct nvme_dev * dev,int nr_io_queues,int entry_size)2052 static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
2053 int entry_size)
2054 {
2055 int q_depth = dev->q_depth;
2056 unsigned q_size_aligned = roundup(q_depth * entry_size,
2057 NVME_CTRL_PAGE_SIZE);
2058
2059 if (q_size_aligned * nr_io_queues > dev->cmb_size) {
2060 u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
2061
2062 mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE);
2063 q_depth = div_u64(mem_per_q, entry_size);
2064
2065 /*
2066 * Ensure the reduced q_depth is above some threshold where it
2067 * would be better to map queues in system memory with the
2068 * original depth
2069 */
2070 if (q_depth < 64)
2071 return -ENOMEM;
2072 }
2073
2074 return q_depth;
2075 }
2076
nvme_alloc_sq_cmds(struct nvme_dev * dev,struct nvme_queue * nvmeq,int qid)2077 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
2078 int qid)
2079 {
2080 struct pci_dev *pdev = to_pci_dev(dev->dev);
2081
2082 if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
2083 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq));
2084 if (nvmeq->sq_cmds) {
2085 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
2086 nvmeq->sq_cmds);
2087 if (nvmeq->sq_dma_addr) {
2088 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags);
2089 return 0;
2090 }
2091
2092 pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq));
2093 }
2094 }
2095
2096 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq),
2097 &nvmeq->sq_dma_addr, GFP_KERNEL);
2098 if (!nvmeq->sq_cmds)
2099 return -ENOMEM;
2100 return 0;
2101 }
2102
nvme_alloc_queue(struct nvme_dev * dev,int qid,int depth)2103 static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
2104 {
2105 struct nvme_queue *nvmeq = &dev->queues[qid];
2106
2107 if (dev->ctrl.queue_count > qid)
2108 return 0;
2109
2110 nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES;
2111 nvmeq->q_depth = depth;
2112 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq),
2113 &nvmeq->cq_dma_addr, GFP_KERNEL);
2114 if (!nvmeq->cqes)
2115 goto free_nvmeq;
2116
2117 if (nvme_alloc_sq_cmds(dev, nvmeq, qid))
2118 goto free_cqdma;
2119
2120 nvmeq->dev = dev;
2121 spin_lock_init(&nvmeq->sq_lock);
2122 spin_lock_init(&nvmeq->cq_poll_lock);
2123 nvmeq->cq_head = 0;
2124 nvmeq->cq_phase = 1;
2125 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
2126 nvmeq->qid = qid;
2127 dev->ctrl.queue_count++;
2128
2129 return 0;
2130
2131 free_cqdma:
2132 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes,
2133 nvmeq->cq_dma_addr);
2134 free_nvmeq:
2135 return -ENOMEM;
2136 }
2137
queue_request_irq(struct nvme_queue * nvmeq)2138 static int queue_request_irq(struct nvme_queue *nvmeq)
2139 {
2140 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
2141 int nr = nvmeq->dev->ctrl.instance;
2142
2143 if (use_threaded_interrupts) {
2144 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
2145 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
2146 } else {
2147 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
2148 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
2149 }
2150 }
2151
nvme_init_queue(struct nvme_queue * nvmeq,u16 qid)2152 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
2153 {
2154 struct nvme_dev *dev = nvmeq->dev;
2155
2156 nvmeq->sq_tail = 0;
2157 nvmeq->last_sq_tail = 0;
2158 nvmeq->cq_head = 0;
2159 nvmeq->cq_phase = 1;
2160 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
2161 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq));
2162 nvme_dbbuf_init(dev, nvmeq, qid);
2163 dev->online_queues++;
2164 wmb(); /* ensure the first interrupt sees the initialization */
2165 }
2166
2167 /*
2168 * Try getting shutdown_lock while setting up IO queues.
2169 */
nvme_setup_io_queues_trylock(struct nvme_dev * dev)2170 static int nvme_setup_io_queues_trylock(struct nvme_dev *dev)
2171 {
2172 /*
2173 * Give up if the lock is being held by nvme_dev_disable.
2174 */
2175 if (!mutex_trylock(&dev->shutdown_lock))
2176 return -ENODEV;
2177
2178 /*
2179 * Controller is in wrong state, fail early.
2180 */
2181 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_CONNECTING) {
2182 mutex_unlock(&dev->shutdown_lock);
2183 return -ENODEV;
2184 }
2185
2186 return 0;
2187 }
2188
nvme_create_queue(struct nvme_queue * nvmeq,int qid,bool polled)2189 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
2190 {
2191 struct nvme_dev *dev = nvmeq->dev;
2192 int result;
2193 u16 vector = 0;
2194
2195 clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
2196
2197 /*
2198 * A queue's vector matches the queue identifier unless the controller
2199 * has only one vector available.
2200 */
2201 if (!polled)
2202 vector = dev->num_vecs == 1 ? 0 : qid;
2203 else
2204 set_bit(NVMEQ_POLLED, &nvmeq->flags);
2205
2206 result = adapter_alloc_cq(dev, qid, nvmeq, vector);
2207 if (result)
2208 return result;
2209
2210 result = adapter_alloc_sq(dev, qid, nvmeq);
2211 if (result < 0)
2212 return result;
2213 if (result)
2214 goto release_cq;
2215
2216 nvmeq->cq_vector = vector;
2217
2218 result = nvme_setup_io_queues_trylock(dev);
2219 if (result)
2220 return result;
2221 nvme_init_queue(nvmeq, qid);
2222 if (!polled) {
2223 result = queue_request_irq(nvmeq);
2224 if (result < 0)
2225 goto release_sq;
2226 }
2227
2228 set_bit(NVMEQ_ENABLED, &nvmeq->flags);
2229 mutex_unlock(&dev->shutdown_lock);
2230 return result;
2231
2232 release_sq:
2233 dev->online_queues--;
2234 mutex_unlock(&dev->shutdown_lock);
2235 adapter_delete_sq(dev, qid);
2236 release_cq:
2237 adapter_delete_cq(dev, qid);
2238 return result;
2239 }
2240
2241 static const struct blk_mq_ops nvme_mq_admin_ops = {
2242 .queue_rq = nvme_queue_rq,
2243 .complete = nvme_pci_complete_rq,
2244 .init_hctx = nvme_admin_init_hctx,
2245 .init_request = nvme_pci_init_request,
2246 .timeout = nvme_timeout,
2247 };
2248
2249 static const struct blk_mq_ops nvme_mq_ops = {
2250 .queue_rq = nvme_queue_rq,
2251 .queue_rqs = nvme_queue_rqs,
2252 .complete = nvme_pci_complete_rq,
2253 .commit_rqs = nvme_commit_rqs,
2254 .init_hctx = nvme_init_hctx,
2255 .init_request = nvme_pci_init_request,
2256 .map_queues = nvme_pci_map_queues,
2257 .timeout = nvme_timeout,
2258 .poll = nvme_poll,
2259 };
2260
nvme_dev_remove_admin(struct nvme_dev * dev)2261 static void nvme_dev_remove_admin(struct nvme_dev *dev)
2262 {
2263 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
2264 /*
2265 * If the controller was reset during removal, it's possible
2266 * user requests may be waiting on a stopped queue. Start the
2267 * queue to flush these to completion.
2268 */
2269 nvme_unquiesce_admin_queue(&dev->ctrl);
2270 nvme_remove_admin_tag_set(&dev->ctrl);
2271 }
2272 }
2273
db_bar_size(struct nvme_dev * dev,unsigned nr_io_queues)2274 static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
2275 {
2276 return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride);
2277 }
2278
nvme_remap_bar(struct nvme_dev * dev,unsigned long size)2279 static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size)
2280 {
2281 struct pci_dev *pdev = to_pci_dev(dev->dev);
2282
2283 if (size <= dev->bar_mapped_size)
2284 return 0;
2285 if (size > pci_resource_len(pdev, 0))
2286 return -ENOMEM;
2287 if (dev->bar)
2288 iounmap(dev->bar);
2289 dev->bar = ioremap(pci_resource_start(pdev, 0), size);
2290 if (!dev->bar) {
2291 dev->bar_mapped_size = 0;
2292 return -ENOMEM;
2293 }
2294 dev->bar_mapped_size = size;
2295 dev->dbs = dev->bar + NVME_REG_DBS;
2296
2297 return 0;
2298 }
2299
nvme_pci_configure_admin_queue(struct nvme_dev * dev)2300 static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
2301 {
2302 int result;
2303 u32 aqa;
2304 struct nvme_queue *nvmeq;
2305
2306 result = nvme_remap_bar(dev, db_bar_size(dev, 0));
2307 if (result < 0)
2308 return result;
2309
2310 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
2311 NVME_CAP_NSSRC(dev->ctrl.cap) : 0;
2312
2313 if (dev->subsystem &&
2314 (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
2315 writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
2316
2317 /*
2318 * If the device has been passed off to us in an enabled state, just
2319 * clear the enabled bit. The spec says we should set the 'shutdown
2320 * notification bits', but doing so may cause the device to complete
2321 * commands to the admin queue ... and we don't know what memory that
2322 * might be pointing at!
2323 */
2324 result = nvme_disable_ctrl(&dev->ctrl, false);
2325 if (result < 0) {
2326 struct pci_dev *pdev = to_pci_dev(dev->dev);
2327
2328 /*
2329 * The NVMe Controller Reset method did not get an expected
2330 * CSTS.RDY transition, so something with the device appears to
2331 * be stuck. Use the lower level and bigger hammer PCIe
2332 * Function Level Reset to attempt restoring the device to its
2333 * initial state, and try again.
2334 */
2335 result = pcie_reset_flr(pdev, false);
2336 if (result < 0)
2337 return result;
2338
2339 pci_restore_state(pdev);
2340 result = nvme_disable_ctrl(&dev->ctrl, false);
2341 if (result < 0)
2342 return result;
2343
2344 dev_info(dev->ctrl.device,
2345 "controller reset completed after pcie flr\n");
2346 }
2347
2348 result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
2349 if (result)
2350 return result;
2351
2352 dev->ctrl.numa_node = dev_to_node(dev->dev);
2353
2354 nvmeq = &dev->queues[0];
2355 aqa = nvmeq->q_depth - 1;
2356 aqa |= aqa << 16;
2357
2358 writel(aqa, dev->bar + NVME_REG_AQA);
2359 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
2360 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
2361
2362 result = nvme_enable_ctrl(&dev->ctrl);
2363 if (result)
2364 return result;
2365
2366 nvmeq->cq_vector = 0;
2367 nvme_init_queue(nvmeq, 0);
2368 result = queue_request_irq(nvmeq);
2369 if (result) {
2370 dev->online_queues--;
2371 return result;
2372 }
2373
2374 set_bit(NVMEQ_ENABLED, &nvmeq->flags);
2375 return result;
2376 }
2377
nvme_create_io_queues(struct nvme_dev * dev)2378 static int nvme_create_io_queues(struct nvme_dev *dev)
2379 {
2380 unsigned i, max, rw_queues;
2381 int ret = 0;
2382
2383 for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
2384 if (nvme_alloc_queue(dev, i, dev->q_depth)) {
2385 ret = -ENOMEM;
2386 break;
2387 }
2388 }
2389
2390 max = min(dev->max_qid, dev->ctrl.queue_count - 1);
2391 if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) {
2392 rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] +
2393 dev->io_queues[HCTX_TYPE_READ];
2394 } else {
2395 rw_queues = max;
2396 }
2397
2398 for (i = dev->online_queues; i <= max; i++) {
2399 bool polled = i > rw_queues;
2400
2401 ret = nvme_create_queue(&dev->queues[i], i, polled);
2402 if (ret)
2403 break;
2404 }
2405
2406 /*
2407 * Ignore failing Create SQ/CQ commands, we can continue with less
2408 * than the desired amount of queues, and even a controller without
2409 * I/O queues can still be used to issue admin commands. This might
2410 * be useful to upgrade a buggy firmware for example.
2411 */
2412 return ret >= 0 ? 0 : ret;
2413 }
2414
nvme_cmb_size_unit(struct nvme_dev * dev)2415 static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
2416 {
2417 u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;
2418
2419 return 1ULL << (12 + 4 * szu);
2420 }
2421
nvme_cmb_size(struct nvme_dev * dev)2422 static u32 nvme_cmb_size(struct nvme_dev *dev)
2423 {
2424 return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK;
2425 }
2426
nvme_map_cmb(struct nvme_dev * dev)2427 static void nvme_map_cmb(struct nvme_dev *dev)
2428 {
2429 u64 size, offset;
2430 resource_size_t bar_size;
2431 struct pci_dev *pdev = to_pci_dev(dev->dev);
2432 int bar;
2433
2434 if (dev->cmb_size)
2435 return;
2436
2437 if (NVME_CAP_CMBS(dev->ctrl.cap))
2438 writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC);
2439
2440 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
2441 if (!dev->cmbsz)
2442 return;
2443 dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
2444
2445 size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
2446 offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
2447 bar = NVME_CMB_BIR(dev->cmbloc);
2448 bar_size = pci_resource_len(pdev, bar);
2449
2450 if (offset > bar_size)
2451 return;
2452
2453 /*
2454 * Controllers may support a CMB size larger than their BAR, for
2455 * example, due to being behind a bridge. Reduce the CMB to the
2456 * reported size of the BAR
2457 */
2458 size = min(size, bar_size - offset);
2459
2460 if (!IS_ALIGNED(size, memremap_compat_align()) ||
2461 !IS_ALIGNED(pci_resource_start(pdev, bar),
2462 memremap_compat_align()))
2463 return;
2464
2465 /*
2466 * Tell the controller about the host side address mapping the CMB,
2467 * and enable CMB decoding for the NVMe 1.4+ scheme:
2468 */
2469 if (NVME_CAP_CMBS(dev->ctrl.cap)) {
2470 hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE |
2471 (pci_bus_address(pdev, bar) + offset),
2472 dev->bar + NVME_REG_CMBMSC);
2473 }
2474
2475 if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
2476 dev_warn(dev->ctrl.device,
2477 "failed to register the CMB\n");
2478 hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC);
2479 return;
2480 }
2481
2482 dev->cmb_size = size;
2483 dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS);
2484
2485 if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) ==
2486 (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS))
2487 pci_p2pmem_publish(pdev, true);
2488 }
2489
nvme_set_host_mem(struct nvme_dev * dev,u32 bits)2490 static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
2491 {
2492 u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT;
2493 u64 dma_addr = dev->host_mem_descs_dma;
2494 struct nvme_command c = { };
2495 int ret;
2496
2497 c.features.opcode = nvme_admin_set_features;
2498 c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
2499 c.features.dword11 = cpu_to_le32(bits);
2500 c.features.dword12 = cpu_to_le32(host_mem_size);
2501 c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr));
2502 c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr));
2503 c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs);
2504
2505 ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
2506 if (ret) {
2507 dev_warn(dev->ctrl.device,
2508 "failed to set host mem (err %d, flags %#x).\n",
2509 ret, bits);
2510 } else
2511 dev->hmb = bits & NVME_HOST_MEM_ENABLE;
2512
2513 return ret;
2514 }
2515
nvme_free_host_mem_multi(struct nvme_dev * dev)2516 static void nvme_free_host_mem_multi(struct nvme_dev *dev)
2517 {
2518 int i;
2519
2520 for (i = 0; i < dev->nr_host_mem_descs; i++) {
2521 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
2522 size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE;
2523
2524 dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
2525 le64_to_cpu(desc->addr),
2526 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
2527 }
2528
2529 kfree(dev->host_mem_desc_bufs);
2530 dev->host_mem_desc_bufs = NULL;
2531 }
2532
nvme_free_host_mem(struct nvme_dev * dev)2533 static void nvme_free_host_mem(struct nvme_dev *dev)
2534 {
2535 if (dev->hmb_sgt)
2536 dma_free_noncontiguous(dev->dev, dev->host_mem_size,
2537 dev->hmb_sgt, DMA_BIDIRECTIONAL);
2538 else
2539 nvme_free_host_mem_multi(dev);
2540
2541 dma_free_coherent(dev->dev, dev->host_mem_descs_size,
2542 dev->host_mem_descs, dev->host_mem_descs_dma);
2543 dev->host_mem_descs = NULL;
2544 dev->host_mem_descs_size = 0;
2545 dev->nr_host_mem_descs = 0;
2546 }
2547
nvme_alloc_host_mem_single(struct nvme_dev * dev,u64 size)2548 static int nvme_alloc_host_mem_single(struct nvme_dev *dev, u64 size)
2549 {
2550 dev->hmb_sgt = dma_alloc_noncontiguous(dev->dev, size,
2551 DMA_BIDIRECTIONAL, GFP_KERNEL, 0);
2552 if (!dev->hmb_sgt)
2553 return -ENOMEM;
2554
2555 dev->host_mem_descs = dma_alloc_coherent(dev->dev,
2556 sizeof(*dev->host_mem_descs), &dev->host_mem_descs_dma,
2557 GFP_KERNEL);
2558 if (!dev->host_mem_descs) {
2559 dma_free_noncontiguous(dev->dev, size, dev->hmb_sgt,
2560 DMA_BIDIRECTIONAL);
2561 dev->hmb_sgt = NULL;
2562 return -ENOMEM;
2563 }
2564 dev->host_mem_size = size;
2565 dev->host_mem_descs_size = sizeof(*dev->host_mem_descs);
2566 dev->nr_host_mem_descs = 1;
2567
2568 dev->host_mem_descs[0].addr =
2569 cpu_to_le64(dev->hmb_sgt->sgl->dma_address);
2570 dev->host_mem_descs[0].size = cpu_to_le32(size / NVME_CTRL_PAGE_SIZE);
2571 return 0;
2572 }
2573
nvme_alloc_host_mem_multi(struct nvme_dev * dev,u64 preferred,u32 chunk_size)2574 static int nvme_alloc_host_mem_multi(struct nvme_dev *dev, u64 preferred,
2575 u32 chunk_size)
2576 {
2577 struct nvme_host_mem_buf_desc *descs;
2578 u32 max_entries, len, descs_size;
2579 dma_addr_t descs_dma;
2580 int i = 0;
2581 void **bufs;
2582 u64 size, tmp;
2583
2584 tmp = (preferred + chunk_size - 1);
2585 do_div(tmp, chunk_size);
2586 max_entries = tmp;
2587
2588 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
2589 max_entries = dev->ctrl.hmmaxd;
2590
2591 descs_size = max_entries * sizeof(*descs);
2592 descs = dma_alloc_coherent(dev->dev, descs_size, &descs_dma,
2593 GFP_KERNEL);
2594 if (!descs)
2595 goto out;
2596
2597 bufs = kzalloc_objs(*bufs, max_entries);
2598 if (!bufs)
2599 goto out_free_descs;
2600
2601 for (size = 0; size < preferred && i < max_entries; size += len) {
2602 dma_addr_t dma_addr;
2603
2604 len = min_t(u64, chunk_size, preferred - size);
2605 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
2606 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
2607 if (!bufs[i])
2608 break;
2609
2610 descs[i].addr = cpu_to_le64(dma_addr);
2611 descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE);
2612 i++;
2613 }
2614
2615 if (!size)
2616 goto out_free_bufs;
2617
2618 dev->nr_host_mem_descs = i;
2619 dev->host_mem_size = size;
2620 dev->host_mem_descs = descs;
2621 dev->host_mem_descs_dma = descs_dma;
2622 dev->host_mem_descs_size = descs_size;
2623 dev->host_mem_desc_bufs = bufs;
2624 return 0;
2625
2626 out_free_bufs:
2627 kfree(bufs);
2628 out_free_descs:
2629 dma_free_coherent(dev->dev, descs_size, descs, descs_dma);
2630 out:
2631 dev->host_mem_descs = NULL;
2632 return -ENOMEM;
2633 }
2634
nvme_alloc_host_mem(struct nvme_dev * dev,u64 min,u64 preferred)2635 static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
2636 {
2637 unsigned long dma_merge_boundary = dma_get_merge_boundary(dev->dev);
2638 u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
2639 u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
2640 u64 chunk_size;
2641
2642 /*
2643 * If there is an IOMMU that can merge pages, try a virtually
2644 * non-contiguous allocation for a single segment first.
2645 */
2646 if (dma_merge_boundary && (PAGE_SIZE & dma_merge_boundary) == 0) {
2647 if (!nvme_alloc_host_mem_single(dev, preferred))
2648 return 0;
2649 }
2650
2651 /* start big and work our way down */
2652 for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) {
2653 if (!nvme_alloc_host_mem_multi(dev, preferred, chunk_size)) {
2654 if (!min || dev->host_mem_size >= min)
2655 return 0;
2656 nvme_free_host_mem(dev);
2657 }
2658 }
2659
2660 return -ENOMEM;
2661 }
2662
nvme_setup_host_mem(struct nvme_dev * dev)2663 static int nvme_setup_host_mem(struct nvme_dev *dev)
2664 {
2665 u64 max = (u64)max_host_mem_size_mb * SZ_1M;
2666 u64 preferred = (u64)dev->ctrl.hmpre * 4096;
2667 u64 min = (u64)dev->ctrl.hmmin * 4096;
2668 u32 enable_bits = NVME_HOST_MEM_ENABLE;
2669 int ret;
2670
2671 if (!dev->ctrl.hmpre)
2672 return 0;
2673
2674 preferred = min(preferred, max);
2675 if (min > max) {
2676 dev_warn(dev->ctrl.device,
2677 "min host memory (%lld MiB) above limit (%d MiB).\n",
2678 min >> ilog2(SZ_1M), max_host_mem_size_mb);
2679 nvme_free_host_mem(dev);
2680 return 0;
2681 }
2682
2683 /*
2684 * If we already have a buffer allocated check if we can reuse it.
2685 */
2686 if (dev->host_mem_descs) {
2687 if (dev->host_mem_size >= min)
2688 enable_bits |= NVME_HOST_MEM_RETURN;
2689 else
2690 nvme_free_host_mem(dev);
2691 }
2692
2693 if (!dev->host_mem_descs) {
2694 if (nvme_alloc_host_mem(dev, min, preferred)) {
2695 dev_warn(dev->ctrl.device,
2696 "failed to allocate host memory buffer.\n");
2697 return 0; /* controller must work without HMB */
2698 }
2699
2700 dev_info(dev->ctrl.device,
2701 "allocated %lld MiB host memory buffer (%u segment%s).\n",
2702 dev->host_mem_size >> ilog2(SZ_1M),
2703 dev->nr_host_mem_descs,
2704 str_plural(dev->nr_host_mem_descs));
2705 }
2706
2707 ret = nvme_set_host_mem(dev, enable_bits);
2708 if (ret)
2709 nvme_free_host_mem(dev);
2710 return ret;
2711 }
2712
cmb_show(struct device * dev,struct device_attribute * attr,char * buf)2713 static ssize_t cmb_show(struct device *dev, struct device_attribute *attr,
2714 char *buf)
2715 {
2716 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
2717
2718 return sysfs_emit(buf, "cmbloc : 0x%08x\ncmbsz : 0x%08x\n",
2719 ndev->cmbloc, ndev->cmbsz);
2720 }
2721 static DEVICE_ATTR_RO(cmb);
2722
cmbloc_show(struct device * dev,struct device_attribute * attr,char * buf)2723 static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr,
2724 char *buf)
2725 {
2726 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
2727
2728 return sysfs_emit(buf, "%u\n", ndev->cmbloc);
2729 }
2730 static DEVICE_ATTR_RO(cmbloc);
2731
cmbsz_show(struct device * dev,struct device_attribute * attr,char * buf)2732 static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr,
2733 char *buf)
2734 {
2735 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
2736
2737 return sysfs_emit(buf, "%u\n", ndev->cmbsz);
2738 }
2739 static DEVICE_ATTR_RO(cmbsz);
2740
hmb_show(struct device * dev,struct device_attribute * attr,char * buf)2741 static ssize_t hmb_show(struct device *dev, struct device_attribute *attr,
2742 char *buf)
2743 {
2744 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
2745
2746 return sysfs_emit(buf, "%d\n", ndev->hmb);
2747 }
2748
hmb_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2749 static ssize_t hmb_store(struct device *dev, struct device_attribute *attr,
2750 const char *buf, size_t count)
2751 {
2752 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
2753 bool new;
2754 int ret;
2755
2756 if (kstrtobool(buf, &new) < 0)
2757 return -EINVAL;
2758
2759 if (new == ndev->hmb)
2760 return count;
2761
2762 if (new) {
2763 ret = nvme_setup_host_mem(ndev);
2764 } else {
2765 ret = nvme_set_host_mem(ndev, 0);
2766 if (!ret)
2767 nvme_free_host_mem(ndev);
2768 }
2769
2770 if (ret < 0)
2771 return ret;
2772
2773 return count;
2774 }
2775 static DEVICE_ATTR_RW(hmb);
2776
nvme_pci_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)2777 static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj,
2778 struct attribute *a, int n)
2779 {
2780 struct nvme_ctrl *ctrl =
2781 dev_get_drvdata(container_of(kobj, struct device, kobj));
2782 struct nvme_dev *dev = to_nvme_dev(ctrl);
2783
2784 if (a == &dev_attr_cmb.attr ||
2785 a == &dev_attr_cmbloc.attr ||
2786 a == &dev_attr_cmbsz.attr) {
2787 if (!dev->cmbsz)
2788 return 0;
2789 }
2790 if (a == &dev_attr_hmb.attr && !ctrl->hmpre)
2791 return 0;
2792
2793 return a->mode;
2794 }
2795
2796 static struct attribute *nvme_pci_attrs[] = {
2797 &dev_attr_cmb.attr,
2798 &dev_attr_cmbloc.attr,
2799 &dev_attr_cmbsz.attr,
2800 &dev_attr_hmb.attr,
2801 NULL,
2802 };
2803
2804 static const struct attribute_group nvme_pci_dev_attrs_group = {
2805 .attrs = nvme_pci_attrs,
2806 .is_visible = nvme_pci_attrs_are_visible,
2807 };
2808
2809 static const struct attribute_group *nvme_pci_dev_attr_groups[] = {
2810 &nvme_dev_attrs_group,
2811 &nvme_pci_dev_attrs_group,
2812 NULL,
2813 };
2814
nvme_update_attrs(struct nvme_dev * dev)2815 static void nvme_update_attrs(struct nvme_dev *dev)
2816 {
2817 sysfs_update_group(&dev->ctrl.device->kobj, &nvme_pci_dev_attrs_group);
2818 }
2819
2820 /*
2821 * nirqs is the number of interrupts available for write and read
2822 * queues. The core already reserved an interrupt for the admin queue.
2823 */
nvme_calc_irq_sets(struct irq_affinity * affd,unsigned int nrirqs)2824 static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
2825 {
2826 struct nvme_dev *dev = affd->priv;
2827 unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues;
2828
2829 /*
2830 * If there is no interrupt available for queues, ensure that
2831 * the default queue is set to 1. The affinity set size is
2832 * also set to one, but the irq core ignores it for this case.
2833 *
2834 * If only one interrupt is available or 'write_queue' == 0, combine
2835 * write and read queues.
2836 *
2837 * If 'write_queues' > 0, ensure it leaves room for at least one read
2838 * queue.
2839 */
2840 if (!nrirqs) {
2841 nrirqs = 1;
2842 nr_read_queues = 0;
2843 } else if (nrirqs == 1 || !nr_write_queues) {
2844 nr_read_queues = 0;
2845 } else if (nr_write_queues >= nrirqs) {
2846 nr_read_queues = 1;
2847 } else {
2848 nr_read_queues = nrirqs - nr_write_queues;
2849 }
2850
2851 dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
2852 affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
2853 dev->io_queues[HCTX_TYPE_READ] = nr_read_queues;
2854 affd->set_size[HCTX_TYPE_READ] = nr_read_queues;
2855 affd->nr_sets = nr_read_queues ? 2 : 1;
2856 }
2857
nvme_setup_irqs(struct nvme_dev * dev,unsigned int nr_io_queues)2858 static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2859 {
2860 struct pci_dev *pdev = to_pci_dev(dev->dev);
2861 struct irq_affinity affd = {
2862 .pre_vectors = 1,
2863 .calc_sets = nvme_calc_irq_sets,
2864 .priv = dev,
2865 };
2866 unsigned int irq_queues, poll_queues;
2867 unsigned int flags = PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY;
2868
2869 /*
2870 * Poll queues don't need interrupts, but we need at least one I/O queue
2871 * left over for non-polled I/O.
2872 */
2873 poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1);
2874 dev->io_queues[HCTX_TYPE_POLL] = poll_queues;
2875
2876 /*
2877 * Initialize for the single interrupt case, will be updated in
2878 * nvme_calc_irq_sets().
2879 */
2880 dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
2881 dev->io_queues[HCTX_TYPE_READ] = 0;
2882
2883 /*
2884 * We need interrupts for the admin queue and each non-polled I/O queue,
2885 * but some Apple controllers require all queues to use the first
2886 * vector.
2887 */
2888 irq_queues = 1;
2889 if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR))
2890 irq_queues += (nr_io_queues - poll_queues);
2891 if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
2892 flags &= ~PCI_IRQ_MSI;
2893 return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, flags,
2894 &affd);
2895 }
2896
nvme_max_io_queues(struct nvme_dev * dev)2897 static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
2898 {
2899 /*
2900 * If tags are shared with admin queue (Apple bug), then
2901 * make sure we only use one IO queue.
2902 */
2903 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
2904 return 1;
2905 return blk_mq_num_possible_queues(0) + dev->nr_write_queues +
2906 dev->nr_poll_queues;
2907 }
2908
nvme_setup_io_queues(struct nvme_dev * dev)2909 static int nvme_setup_io_queues(struct nvme_dev *dev)
2910 {
2911 struct nvme_queue *adminq = &dev->queues[0];
2912 struct pci_dev *pdev = to_pci_dev(dev->dev);
2913 unsigned int nr_io_queues;
2914 unsigned long size;
2915 int result;
2916
2917 /*
2918 * Sample the module parameters once at reset time so that we have
2919 * stable values to work with.
2920 */
2921 dev->nr_write_queues = write_queues;
2922 dev->nr_poll_queues = poll_queues;
2923
2924 if (dev->ctrl.tagset) {
2925 /*
2926 * The set's maps are allocated only once at initialization
2927 * time. We can't add special queues later if their mq_map
2928 * wasn't preallocated.
2929 */
2930 if (dev->ctrl.tagset->nr_maps < 3)
2931 dev->nr_poll_queues = 0;
2932 if (dev->ctrl.tagset->nr_maps < 2)
2933 dev->nr_write_queues = 0;
2934 }
2935
2936 /*
2937 * The initial number of allocated queue slots may be too large if the
2938 * user reduced the special queue parameters. Cap the value to the
2939 * number we need for this round.
2940 */
2941 nr_io_queues = min(nvme_max_io_queues(dev),
2942 dev->nr_allocated_queues - 1);
2943 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
2944 if (result < 0)
2945 return result;
2946
2947 if (nr_io_queues == 0)
2948 return 0;
2949
2950 /*
2951 * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions
2952 * from set to unset. If there is a window to it is truely freed,
2953 * pci_free_irq_vectors() jumping into this window will crash.
2954 * And take lock to avoid racing with pci_free_irq_vectors() in
2955 * nvme_dev_disable() path.
2956 */
2957 result = nvme_setup_io_queues_trylock(dev);
2958 if (result)
2959 return result;
2960 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
2961 pci_free_irq(pdev, 0, adminq);
2962
2963 if (dev->cmb_use_sqes) {
2964 result = nvme_cmb_qdepth(dev, nr_io_queues,
2965 sizeof(struct nvme_command));
2966 if (result > 0) {
2967 dev->q_depth = result;
2968 dev->ctrl.sqsize = result - 1;
2969 } else {
2970 dev->cmb_use_sqes = false;
2971 }
2972 }
2973
2974 do {
2975 size = db_bar_size(dev, nr_io_queues);
2976 result = nvme_remap_bar(dev, size);
2977 if (!result)
2978 break;
2979 if (!--nr_io_queues) {
2980 result = -ENOMEM;
2981 goto out_unlock;
2982 }
2983 } while (1);
2984 adminq->q_db = dev->dbs;
2985
2986 retry:
2987 /* Deregister the admin queue's interrupt */
2988 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
2989 pci_free_irq(pdev, 0, adminq);
2990
2991 /*
2992 * If we enable msix early due to not intx, disable it again before
2993 * setting up the full range we need.
2994 */
2995 pci_free_irq_vectors(pdev);
2996
2997 result = nvme_setup_irqs(dev, nr_io_queues);
2998 if (result <= 0) {
2999 result = -EIO;
3000 goto out_unlock;
3001 }
3002
3003 dev->num_vecs = result;
3004 result = max(result - 1, 1);
3005 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL];
3006
3007 /*
3008 * Should investigate if there's a performance win from allocating
3009 * more queues than interrupt vectors; it might allow the submission
3010 * path to scale better, even if the receive path is limited by the
3011 * number of interrupts.
3012 */
3013 result = queue_request_irq(adminq);
3014 if (result)
3015 goto out_unlock;
3016 set_bit(NVMEQ_ENABLED, &adminq->flags);
3017 mutex_unlock(&dev->shutdown_lock);
3018
3019 result = nvme_create_io_queues(dev);
3020 if (result || dev->online_queues < 2)
3021 return result;
3022
3023 if (dev->online_queues - 1 < dev->max_qid) {
3024 nr_io_queues = dev->online_queues - 1;
3025 nvme_delete_io_queues(dev);
3026 result = nvme_setup_io_queues_trylock(dev);
3027 if (result)
3028 return result;
3029 nvme_suspend_io_queues(dev);
3030 goto retry;
3031 }
3032 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
3033 dev->io_queues[HCTX_TYPE_DEFAULT],
3034 dev->io_queues[HCTX_TYPE_READ],
3035 dev->io_queues[HCTX_TYPE_POLL]);
3036 return 0;
3037 out_unlock:
3038 mutex_unlock(&dev->shutdown_lock);
3039 return result;
3040 }
3041
nvme_del_queue_end(struct request * req,blk_status_t error,const struct io_comp_batch * iob)3042 static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
3043 blk_status_t error,
3044 const struct io_comp_batch *iob)
3045 {
3046 struct nvme_queue *nvmeq = req->end_io_data;
3047
3048 blk_mq_free_request(req);
3049 complete(&nvmeq->delete_done);
3050 return RQ_END_IO_NONE;
3051 }
3052
nvme_del_cq_end(struct request * req,blk_status_t error,const struct io_comp_batch * iob)3053 static enum rq_end_io_ret nvme_del_cq_end(struct request *req,
3054 blk_status_t error,
3055 const struct io_comp_batch *iob)
3056 {
3057 struct nvme_queue *nvmeq = req->end_io_data;
3058
3059 if (error)
3060 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
3061
3062 return nvme_del_queue_end(req, error, iob);
3063 }
3064
nvme_delete_queue(struct nvme_queue * nvmeq,u8 opcode)3065 static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
3066 {
3067 struct request_queue *q = nvmeq->dev->ctrl.admin_q;
3068 struct request *req;
3069 struct nvme_command cmd = { };
3070
3071 cmd.delete_queue.opcode = opcode;
3072 cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
3073
3074 req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT);
3075 if (IS_ERR(req))
3076 return PTR_ERR(req);
3077 nvme_init_request(req, &cmd);
3078
3079 if (opcode == nvme_admin_delete_cq)
3080 req->end_io = nvme_del_cq_end;
3081 else
3082 req->end_io = nvme_del_queue_end;
3083 req->end_io_data = nvmeq;
3084
3085 init_completion(&nvmeq->delete_done);
3086 blk_execute_rq_nowait(req, false);
3087 return 0;
3088 }
3089
__nvme_delete_io_queues(struct nvme_dev * dev,u8 opcode)3090 static bool __nvme_delete_io_queues(struct nvme_dev *dev, u8 opcode)
3091 {
3092 int nr_queues = dev->online_queues - 1, sent = 0;
3093 unsigned long timeout;
3094
3095 retry:
3096 timeout = NVME_ADMIN_TIMEOUT;
3097 while (nr_queues > 0) {
3098 if (nvme_delete_queue(&dev->queues[nr_queues], opcode))
3099 break;
3100 nr_queues--;
3101 sent++;
3102 }
3103 while (sent) {
3104 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent];
3105
3106 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done,
3107 timeout);
3108 if (timeout == 0)
3109 return false;
3110
3111 sent--;
3112 if (nr_queues)
3113 goto retry;
3114 }
3115 return true;
3116 }
3117
nvme_delete_io_queues(struct nvme_dev * dev)3118 static void nvme_delete_io_queues(struct nvme_dev *dev)
3119 {
3120 if (__nvme_delete_io_queues(dev, nvme_admin_delete_sq))
3121 __nvme_delete_io_queues(dev, nvme_admin_delete_cq);
3122 }
3123
nvme_pci_nr_maps(struct nvme_dev * dev)3124 static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev)
3125 {
3126 if (dev->io_queues[HCTX_TYPE_POLL])
3127 return 3;
3128 if (dev->io_queues[HCTX_TYPE_READ])
3129 return 2;
3130 return 1;
3131 }
3132
nvme_pci_update_nr_queues(struct nvme_dev * dev)3133 static bool nvme_pci_update_nr_queues(struct nvme_dev *dev)
3134 {
3135 if (!dev->ctrl.tagset) {
3136 nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops,
3137 nvme_pci_nr_maps(dev), sizeof(struct nvme_iod));
3138 return true;
3139 }
3140
3141 /* Give up if we are racing with nvme_dev_disable() */
3142 if (!mutex_trylock(&dev->shutdown_lock))
3143 return false;
3144
3145 /* Check if nvme_dev_disable() has been executed already */
3146 if (!dev->online_queues) {
3147 mutex_unlock(&dev->shutdown_lock);
3148 return false;
3149 }
3150
3151 blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
3152 /* free previously allocated queues that are no longer usable */
3153 nvme_free_queues(dev, dev->online_queues);
3154 mutex_unlock(&dev->shutdown_lock);
3155 return true;
3156 }
3157
nvme_pci_enable(struct nvme_dev * dev)3158 static int nvme_pci_enable(struct nvme_dev *dev)
3159 {
3160 int result = -ENOMEM;
3161 struct pci_dev *pdev = to_pci_dev(dev->dev);
3162 unsigned int flags = PCI_IRQ_ALL_TYPES;
3163
3164 if (pci_enable_device_mem(pdev))
3165 return result;
3166
3167 pci_set_master(pdev);
3168
3169 if (readl(dev->bar + NVME_REG_CSTS) == -1) {
3170 dev_dbg(dev->ctrl.device, "reading CSTS register failed\n");
3171 result = -ENODEV;
3172 goto disable;
3173 }
3174
3175 /*
3176 * Some devices and/or platforms don't advertise or work with INTx
3177 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
3178 * adjust this later.
3179 */
3180 if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
3181 flags &= ~PCI_IRQ_MSI;
3182 result = pci_alloc_irq_vectors(pdev, 1, 1, flags);
3183 if (result < 0)
3184 goto disable;
3185
3186 dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
3187
3188 dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1,
3189 io_queue_depth);
3190 dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
3191 dev->dbs = dev->bar + 4096;
3192
3193 /*
3194 * Some Apple controllers require a non-standard SQE size.
3195 * Interestingly they also seem to ignore the CC:IOSQES register
3196 * so we don't bother updating it here.
3197 */
3198 if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES)
3199 dev->io_sqes = 7;
3200 else
3201 dev->io_sqes = NVME_NVM_IOSQES;
3202
3203 if (dev->ctrl.quirks & NVME_QUIRK_QDEPTH_ONE) {
3204 dev->q_depth = 2;
3205 } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG &&
3206 (pdev->device == 0xa821 || pdev->device == 0xa822) &&
3207 NVME_CAP_MQES(dev->ctrl.cap) == 0) {
3208 dev->q_depth = 64;
3209 dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, "
3210 "set queue depth=%u\n", dev->q_depth);
3211 }
3212
3213 /*
3214 * Controllers with the shared tags quirk need the IO queue to be
3215 * big enough so that we get 32 tags for the admin queue
3216 */
3217 if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) &&
3218 (dev->q_depth < (NVME_AQ_DEPTH + 2))) {
3219 dev->q_depth = NVME_AQ_DEPTH + 2;
3220 dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n",
3221 dev->q_depth);
3222 }
3223 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
3224
3225 nvme_map_cmb(dev);
3226
3227 pci_save_state(pdev);
3228
3229 result = nvme_pci_configure_admin_queue(dev);
3230 if (result)
3231 goto free_irq;
3232 return result;
3233
3234 free_irq:
3235 pci_free_irq_vectors(pdev);
3236 disable:
3237 pci_disable_device(pdev);
3238 return result;
3239 }
3240
nvme_dev_unmap(struct nvme_dev * dev)3241 static void nvme_dev_unmap(struct nvme_dev *dev)
3242 {
3243 if (dev->bar)
3244 iounmap(dev->bar);
3245 pci_release_mem_regions(to_pci_dev(dev->dev));
3246 }
3247
nvme_pci_ctrl_is_dead(struct nvme_dev * dev)3248 static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev)
3249 {
3250 struct pci_dev *pdev = to_pci_dev(dev->dev);
3251 u32 csts;
3252
3253 if (!pci_is_enabled(pdev) || !pci_device_is_present(pdev))
3254 return true;
3255 if (pdev->error_state != pci_channel_io_normal)
3256 return true;
3257
3258 csts = readl(dev->bar + NVME_REG_CSTS);
3259 return (csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY);
3260 }
3261
nvme_dev_disable(struct nvme_dev * dev,bool shutdown)3262 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
3263 {
3264 enum nvme_ctrl_state state = nvme_ctrl_state(&dev->ctrl);
3265 struct pci_dev *pdev = to_pci_dev(dev->dev);
3266 bool dead;
3267
3268 mutex_lock(&dev->shutdown_lock);
3269 dead = nvme_pci_ctrl_is_dead(dev);
3270 if (state == NVME_CTRL_LIVE || state == NVME_CTRL_RESETTING) {
3271 if (pci_is_enabled(pdev))
3272 nvme_start_freeze(&dev->ctrl);
3273 /*
3274 * Give the controller a chance to complete all entered requests
3275 * if doing a safe shutdown.
3276 */
3277 if (!dead && shutdown)
3278 nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
3279 }
3280
3281 nvme_quiesce_io_queues(&dev->ctrl);
3282
3283 if (!dead && dev->ctrl.queue_count > 0) {
3284 nvme_delete_io_queues(dev);
3285 nvme_disable_ctrl(&dev->ctrl, shutdown);
3286 nvme_poll_irqdisable(&dev->queues[0]);
3287 }
3288 nvme_suspend_io_queues(dev);
3289 nvme_suspend_queue(dev, 0);
3290 pci_free_irq_vectors(pdev);
3291 if (pci_is_enabled(pdev))
3292 pci_disable_device(pdev);
3293 nvme_reap_pending_cqes(dev);
3294
3295 nvme_cancel_tagset(&dev->ctrl);
3296 nvme_cancel_admin_tagset(&dev->ctrl);
3297
3298 /*
3299 * The driver will not be starting up queues again if shutting down so
3300 * must flush all entered requests to their failed completion to avoid
3301 * deadlocking blk-mq hot-cpu notifier.
3302 */
3303 if (shutdown) {
3304 nvme_unquiesce_io_queues(&dev->ctrl);
3305 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
3306 nvme_unquiesce_admin_queue(&dev->ctrl);
3307 }
3308 mutex_unlock(&dev->shutdown_lock);
3309 }
3310
nvme_disable_prepare_reset(struct nvme_dev * dev,bool shutdown)3311 static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
3312 {
3313 if (!nvme_wait_reset(&dev->ctrl))
3314 return -EBUSY;
3315 nvme_dev_disable(dev, shutdown);
3316 return 0;
3317 }
3318
nvme_pci_alloc_iod_mempool(struct nvme_dev * dev)3319 static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
3320 {
3321 size_t alloc_size = sizeof(struct nvme_dma_vec) * NVME_MAX_SEGS;
3322
3323 dev->dmavec_mempool = mempool_create_node(1,
3324 mempool_kmalloc, mempool_kfree,
3325 (void *)alloc_size, GFP_KERNEL,
3326 dev_to_node(dev->dev));
3327 if (!dev->dmavec_mempool)
3328 return -ENOMEM;
3329 return 0;
3330 }
3331
nvme_free_tagset(struct nvme_dev * dev)3332 static void nvme_free_tagset(struct nvme_dev *dev)
3333 {
3334 if (dev->tagset.tags)
3335 nvme_remove_io_tag_set(&dev->ctrl);
3336 dev->ctrl.tagset = NULL;
3337 }
3338
3339 /* pairs with nvme_pci_alloc_dev */
nvme_pci_free_ctrl(struct nvme_ctrl * ctrl)3340 static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
3341 {
3342 struct nvme_dev *dev = to_nvme_dev(ctrl);
3343
3344 nvme_free_tagset(dev);
3345 put_device(dev->dev);
3346 kfree(dev->queues);
3347 kfree(dev);
3348 }
3349
nvme_reset_work(struct work_struct * work)3350 static void nvme_reset_work(struct work_struct *work)
3351 {
3352 struct nvme_dev *dev =
3353 container_of(work, struct nvme_dev, ctrl.reset_work);
3354 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
3355 int result;
3356
3357 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_RESETTING) {
3358 dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
3359 dev->ctrl.state);
3360 result = -ENODEV;
3361 goto out;
3362 }
3363
3364 /*
3365 * If we're called to reset a live controller first shut it down before
3366 * moving on.
3367 */
3368 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
3369 nvme_dev_disable(dev, false);
3370 nvme_sync_queues(&dev->ctrl);
3371
3372 mutex_lock(&dev->shutdown_lock);
3373 result = nvme_pci_enable(dev);
3374 if (result)
3375 goto out_unlock;
3376 nvme_unquiesce_admin_queue(&dev->ctrl);
3377 mutex_unlock(&dev->shutdown_lock);
3378
3379 /*
3380 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
3381 * initializing procedure here.
3382 */
3383 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
3384 dev_warn(dev->ctrl.device,
3385 "failed to mark controller CONNECTING\n");
3386 result = -EBUSY;
3387 goto out;
3388 }
3389
3390 result = nvme_init_ctrl_finish(&dev->ctrl, was_suspend);
3391 if (result)
3392 goto out;
3393
3394 if (nvme_ctrl_meta_sgl_supported(&dev->ctrl))
3395 dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS;
3396 else
3397 dev->ctrl.max_integrity_segments = 1;
3398
3399 nvme_dbbuf_dma_alloc(dev);
3400
3401 result = nvme_setup_host_mem(dev);
3402 if (result < 0)
3403 goto out;
3404
3405 nvme_update_attrs(dev);
3406
3407 result = nvme_setup_io_queues(dev);
3408 if (result)
3409 goto out;
3410
3411 /*
3412 * Freeze and update the number of I/O queues as those might have
3413 * changed. If there are no I/O queues left after this reset, keep the
3414 * controller around but remove all namespaces.
3415 */
3416 if (dev->online_queues > 1) {
3417 nvme_dbbuf_set(dev);
3418 nvme_unquiesce_io_queues(&dev->ctrl);
3419 nvme_wait_freeze(&dev->ctrl);
3420 if (!nvme_pci_update_nr_queues(dev))
3421 goto out;
3422 nvme_unfreeze(&dev->ctrl);
3423 } else {
3424 dev_warn(dev->ctrl.device, "IO queues lost\n");
3425 nvme_mark_namespaces_dead(&dev->ctrl);
3426 nvme_unquiesce_io_queues(&dev->ctrl);
3427 nvme_remove_namespaces(&dev->ctrl);
3428 nvme_free_tagset(dev);
3429 }
3430
3431 /*
3432 * If only admin queue live, keep it to do further investigation or
3433 * recovery.
3434 */
3435 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
3436 dev_warn(dev->ctrl.device,
3437 "failed to mark controller live state\n");
3438 result = -ENODEV;
3439 goto out;
3440 }
3441
3442 nvme_start_ctrl(&dev->ctrl);
3443 return;
3444
3445 out_unlock:
3446 mutex_unlock(&dev->shutdown_lock);
3447 out:
3448 /*
3449 * Set state to deleting now to avoid blocking nvme_wait_reset(), which
3450 * may be holding this pci_dev's device lock.
3451 */
3452 dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n",
3453 result);
3454 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
3455 nvme_dev_disable(dev, true);
3456 nvme_sync_queues(&dev->ctrl);
3457 nvme_mark_namespaces_dead(&dev->ctrl);
3458 nvme_unquiesce_io_queues(&dev->ctrl);
3459 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
3460 }
3461
nvme_pci_reg_read32(struct nvme_ctrl * ctrl,u32 off,u32 * val)3462 static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
3463 {
3464 *val = readl(to_nvme_dev(ctrl)->bar + off);
3465 return 0;
3466 }
3467
nvme_pci_reg_write32(struct nvme_ctrl * ctrl,u32 off,u32 val)3468 static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
3469 {
3470 writel(val, to_nvme_dev(ctrl)->bar + off);
3471 return 0;
3472 }
3473
nvme_pci_reg_read64(struct nvme_ctrl * ctrl,u32 off,u64 * val)3474 static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
3475 {
3476 *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off);
3477 return 0;
3478 }
3479
nvme_pci_get_address(struct nvme_ctrl * ctrl,char * buf,int size)3480 static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
3481 {
3482 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
3483
3484 return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
3485 }
3486
nvme_pci_print_device_info(struct nvme_ctrl * ctrl)3487 static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
3488 {
3489 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
3490 struct nvme_subsystem *subsys = ctrl->subsys;
3491
3492 dev_err(ctrl->device,
3493 "VID:DID %04x:%04x model:%.*s firmware:%.*s\n",
3494 pdev->vendor, pdev->device,
3495 nvme_strlen(subsys->model, sizeof(subsys->model)),
3496 subsys->model, nvme_strlen(subsys->firmware_rev,
3497 sizeof(subsys->firmware_rev)),
3498 subsys->firmware_rev);
3499 }
3500
nvme_pci_supports_pci_p2pdma(struct nvme_ctrl * ctrl)3501 static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl)
3502 {
3503 struct nvme_dev *dev = to_nvme_dev(ctrl);
3504
3505 return dma_pci_p2pdma_supported(dev->dev);
3506 }
3507
nvme_pci_get_virt_boundary(struct nvme_ctrl * ctrl,bool is_admin)3508 static unsigned long nvme_pci_get_virt_boundary(struct nvme_ctrl *ctrl,
3509 bool is_admin)
3510 {
3511 if (!nvme_ctrl_sgl_supported(ctrl) || is_admin)
3512 return NVME_CTRL_PAGE_SIZE - 1;
3513 return 0;
3514 }
3515
3516 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
3517 .name = "pcie",
3518 .module = THIS_MODULE,
3519 .flags = NVME_F_METADATA_SUPPORTED,
3520 .dev_attr_groups = nvme_pci_dev_attr_groups,
3521 .reg_read32 = nvme_pci_reg_read32,
3522 .reg_write32 = nvme_pci_reg_write32,
3523 .reg_read64 = nvme_pci_reg_read64,
3524 .free_ctrl = nvme_pci_free_ctrl,
3525 .submit_async_event = nvme_pci_submit_async_event,
3526 .subsystem_reset = nvme_pci_subsystem_reset,
3527 .get_address = nvme_pci_get_address,
3528 .print_device_info = nvme_pci_print_device_info,
3529 .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma,
3530 .get_virt_boundary = nvme_pci_get_virt_boundary,
3531 };
3532
nvme_dev_map(struct nvme_dev * dev)3533 static int nvme_dev_map(struct nvme_dev *dev)
3534 {
3535 struct pci_dev *pdev = to_pci_dev(dev->dev);
3536
3537 if (pci_request_mem_regions(pdev, "nvme"))
3538 return -ENODEV;
3539
3540 if (nvme_remap_bar(dev, NVME_REG_DBS + 4096))
3541 goto release;
3542
3543 return 0;
3544 release:
3545 pci_release_mem_regions(pdev);
3546 return -ENODEV;
3547 }
3548
check_vendor_combination_bug(struct pci_dev * pdev)3549 static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
3550 {
3551 if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
3552 /*
3553 * Several Samsung devices seem to drop off the PCIe bus
3554 * randomly when APST is on and uses the deepest sleep state.
3555 * This has been observed on a Samsung "SM951 NVMe SAMSUNG
3556 * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
3557 * 950 PRO 256GB", but it seems to be restricted to two Dell
3558 * laptops.
3559 */
3560 if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") &&
3561 (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
3562 dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
3563 return NVME_QUIRK_NO_DEEPEST_PS;
3564 } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
3565 /*
3566 * Samsung SSD 960 EVO drops off the PCIe bus after system
3567 * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as
3568 * within few minutes after bootup on a Coffee Lake board -
3569 * ASUS PRIME Z370-A
3570 */
3571 if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
3572 (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") ||
3573 dmi_match(DMI_BOARD_NAME, "PRIME Z370-A")))
3574 return NVME_QUIRK_NO_APST;
3575 } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 ||
3576 pdev->device == 0xa808 || pdev->device == 0xa809)) ||
3577 (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) {
3578 /*
3579 * Forcing to use host managed nvme power settings for
3580 * lowest idle power with quick resume latency on
3581 * Samsung and Toshiba SSDs based on suspend behavior
3582 * on Coffee Lake board for LENOVO C640
3583 */
3584 if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) &&
3585 dmi_match(DMI_BOARD_NAME, "LNVNB161216"))
3586 return NVME_QUIRK_SIMPLE_SUSPEND;
3587 } else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 ||
3588 pdev->device == 0x500f)) {
3589 /*
3590 * Exclude some Kingston NV1 and A2000 devices from
3591 * NVME_QUIRK_SIMPLE_SUSPEND. Do a full suspend to save a
3592 * lot of energy with s2idle sleep on some TUXEDO platforms.
3593 */
3594 if (dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
3595 dmi_match(DMI_BOARD_NAME, "NS5x_7xAU") ||
3596 dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") ||
3597 dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1"))
3598 return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
3599 } else if (pdev->vendor == 0x144d && pdev->device == 0xa80d) {
3600 /*
3601 * Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND
3602 * because of high power consumption (> 2 Watt) in s2idle
3603 * sleep. Only some boards with Intel CPU are affected.
3604 * (Note for testing: Samsung 990 Evo Plus has same PCI ID)
3605 */
3606 if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") ||
3607 dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
3608 dmi_match(DMI_BOARD_NAME, "GXxMRXx") ||
3609 dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
3610 dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
3611 dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
3612 dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
3613 return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
3614 }
3615
3616 /*
3617 * NVMe SSD drops off the PCIe bus after system idle
3618 * for 10 hours on a Lenovo N60z board.
3619 */
3620 if (dmi_match(DMI_BOARD_NAME, "LXKT-ZXEG-N6"))
3621 return NVME_QUIRK_NO_APST;
3622
3623 return 0;
3624 }
3625
detect_dynamic_quirks(struct pci_dev * pdev)3626 static struct quirk_entry *detect_dynamic_quirks(struct pci_dev *pdev)
3627 {
3628 int i;
3629
3630 for (i = 0; i < nvme_pci_quirk_count; i++)
3631 if (pdev->vendor == nvme_pci_quirk_list[i].vendor_id &&
3632 pdev->device == nvme_pci_quirk_list[i].dev_id)
3633 return &nvme_pci_quirk_list[i];
3634
3635 return NULL;
3636 }
3637
nvme_pci_alloc_dev(struct pci_dev * pdev,const struct pci_device_id * id)3638 static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
3639 const struct pci_device_id *id)
3640 {
3641 unsigned long quirks = id->driver_data;
3642 int node = dev_to_node(&pdev->dev);
3643 struct nvme_dev *dev;
3644 struct quirk_entry *qentry;
3645 int ret = -ENOMEM;
3646
3647 dev = kzalloc_node(struct_size(dev, descriptor_pools, nr_node_ids),
3648 GFP_KERNEL, node);
3649 if (!dev)
3650 return ERR_PTR(-ENOMEM);
3651 INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
3652 mutex_init(&dev->shutdown_lock);
3653
3654 dev->nr_write_queues = write_queues;
3655 dev->nr_poll_queues = poll_queues;
3656 dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1;
3657 dev->queues = kcalloc_node(dev->nr_allocated_queues,
3658 sizeof(struct nvme_queue), GFP_KERNEL, node);
3659 if (!dev->queues)
3660 goto out_free_dev;
3661
3662 dev->dev = get_device(&pdev->dev);
3663
3664 quirks |= check_vendor_combination_bug(pdev);
3665 if (!noacpi &&
3666 !(quirks & NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND) &&
3667 acpi_storage_d3(&pdev->dev)) {
3668 /*
3669 * Some systems use a bios work around to ask for D3 on
3670 * platforms that support kernel managed suspend.
3671 */
3672 dev_info(&pdev->dev,
3673 "platform quirk: setting simple suspend\n");
3674 quirks |= NVME_QUIRK_SIMPLE_SUSPEND;
3675 }
3676 qentry = detect_dynamic_quirks(pdev);
3677 if (qentry) {
3678 quirks |= qentry->enabled_quirks;
3679 quirks &= ~qentry->disabled_quirks;
3680 }
3681 ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
3682 quirks);
3683 if (ret)
3684 goto out_put_device;
3685
3686 if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
3687 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
3688 else
3689 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3690 dma_set_min_align_mask(&pdev->dev, NVME_CTRL_PAGE_SIZE - 1);
3691 dma_set_max_seg_size(&pdev->dev, 0xffffffff);
3692
3693 /*
3694 * Limit the max command size to prevent iod->sg allocations going
3695 * over a single page.
3696 */
3697 dev->ctrl.max_hw_sectors = min_t(u32,
3698 NVME_MAX_BYTES >> SECTOR_SHIFT,
3699 dma_opt_mapping_size(&pdev->dev) >> 9);
3700 dev->ctrl.max_segments = NVME_MAX_SEGS;
3701 dev->ctrl.max_integrity_segments = 1;
3702 return dev;
3703
3704 out_put_device:
3705 put_device(dev->dev);
3706 kfree(dev->queues);
3707 out_free_dev:
3708 kfree(dev);
3709 return ERR_PTR(ret);
3710 }
3711
nvme_probe(struct pci_dev * pdev,const struct pci_device_id * id)3712 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3713 {
3714 struct nvme_dev *dev;
3715 int result = -ENOMEM;
3716
3717 dev = nvme_pci_alloc_dev(pdev, id);
3718 if (IS_ERR(dev))
3719 return PTR_ERR(dev);
3720
3721 result = nvme_add_ctrl(&dev->ctrl);
3722 if (result)
3723 goto out_put_ctrl;
3724
3725 result = nvme_dev_map(dev);
3726 if (result)
3727 goto out_uninit_ctrl;
3728
3729 result = nvme_pci_alloc_iod_mempool(dev);
3730 if (result)
3731 goto out_dev_unmap;
3732
3733 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
3734
3735 result = nvme_pci_enable(dev);
3736 if (result)
3737 goto out_release_iod_mempool;
3738
3739 result = nvme_alloc_admin_tag_set(&dev->ctrl, &dev->admin_tagset,
3740 &nvme_mq_admin_ops, sizeof(struct nvme_iod));
3741 if (result)
3742 goto out_disable;
3743
3744 /*
3745 * Mark the controller as connecting before sending admin commands to
3746 * allow the timeout handler to do the right thing.
3747 */
3748 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
3749 dev_warn(dev->ctrl.device,
3750 "failed to mark controller CONNECTING\n");
3751 result = -EBUSY;
3752 goto out_disable;
3753 }
3754
3755 result = nvme_init_ctrl_finish(&dev->ctrl, false);
3756 if (result)
3757 goto out_disable;
3758
3759 if (nvme_ctrl_meta_sgl_supported(&dev->ctrl))
3760 dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS;
3761 else
3762 dev->ctrl.max_integrity_segments = 1;
3763
3764 nvme_dbbuf_dma_alloc(dev);
3765
3766 result = nvme_setup_host_mem(dev);
3767 if (result < 0)
3768 goto out_disable;
3769
3770 nvme_update_attrs(dev);
3771
3772 result = nvme_setup_io_queues(dev);
3773 if (result)
3774 goto out_disable;
3775
3776 if (dev->online_queues > 1) {
3777 nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops,
3778 nvme_pci_nr_maps(dev), sizeof(struct nvme_iod));
3779 nvme_dbbuf_set(dev);
3780 }
3781
3782 if (!dev->ctrl.tagset)
3783 dev_warn(dev->ctrl.device, "IO queues not created\n");
3784
3785 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
3786 dev_warn(dev->ctrl.device,
3787 "failed to mark controller live state\n");
3788 result = -ENODEV;
3789 goto out_disable;
3790 }
3791
3792 pci_set_drvdata(pdev, dev);
3793
3794 nvme_start_ctrl(&dev->ctrl);
3795 nvme_put_ctrl(&dev->ctrl);
3796 flush_work(&dev->ctrl.scan_work);
3797 return 0;
3798
3799 out_disable:
3800 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
3801 nvme_dev_disable(dev, true);
3802 nvme_free_host_mem(dev);
3803 nvme_dev_remove_admin(dev);
3804 nvme_dbbuf_dma_free(dev);
3805 nvme_free_queues(dev, 0);
3806 out_release_iod_mempool:
3807 mempool_destroy(dev->dmavec_mempool);
3808 out_dev_unmap:
3809 nvme_dev_unmap(dev);
3810 out_uninit_ctrl:
3811 nvme_uninit_ctrl(&dev->ctrl);
3812 out_put_ctrl:
3813 nvme_put_ctrl(&dev->ctrl);
3814 dev_err_probe(&pdev->dev, result, "probe failed\n");
3815 return result;
3816 }
3817
nvme_reset_prepare(struct pci_dev * pdev)3818 static void nvme_reset_prepare(struct pci_dev *pdev)
3819 {
3820 struct nvme_dev *dev = pci_get_drvdata(pdev);
3821
3822 /*
3823 * We don't need to check the return value from waiting for the reset
3824 * state as pci_dev device lock is held, making it impossible to race
3825 * with ->remove().
3826 */
3827 nvme_disable_prepare_reset(dev, false);
3828 nvme_sync_queues(&dev->ctrl);
3829 }
3830
nvme_reset_done(struct pci_dev * pdev)3831 static void nvme_reset_done(struct pci_dev *pdev)
3832 {
3833 struct nvme_dev *dev = pci_get_drvdata(pdev);
3834
3835 if (!nvme_try_sched_reset(&dev->ctrl))
3836 flush_work(&dev->ctrl.reset_work);
3837 }
3838
nvme_shutdown(struct pci_dev * pdev)3839 static void nvme_shutdown(struct pci_dev *pdev)
3840 {
3841 struct nvme_dev *dev = pci_get_drvdata(pdev);
3842
3843 nvme_disable_prepare_reset(dev, true);
3844 }
3845
3846 /*
3847 * The driver's remove may be called on a device in a partially initialized
3848 * state. This function must not have any dependencies on the device state in
3849 * order to proceed.
3850 */
nvme_remove(struct pci_dev * pdev)3851 static void nvme_remove(struct pci_dev *pdev)
3852 {
3853 struct nvme_dev *dev = pci_get_drvdata(pdev);
3854
3855 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
3856 pci_set_drvdata(pdev, NULL);
3857
3858 if (!pci_device_is_present(pdev)) {
3859 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
3860 nvme_dev_disable(dev, true);
3861 }
3862
3863 flush_work(&dev->ctrl.reset_work);
3864 nvme_stop_ctrl(&dev->ctrl);
3865 nvme_remove_namespaces(&dev->ctrl);
3866 nvme_dev_disable(dev, true);
3867 nvme_free_host_mem(dev);
3868 nvme_dev_remove_admin(dev);
3869 nvme_dbbuf_dma_free(dev);
3870 nvme_free_queues(dev, 0);
3871 mempool_destroy(dev->dmavec_mempool);
3872 nvme_release_descriptor_pools(dev);
3873 nvme_dev_unmap(dev);
3874 nvme_uninit_ctrl(&dev->ctrl);
3875 }
3876
3877 #ifdef CONFIG_PM_SLEEP
nvme_get_power_state(struct nvme_ctrl * ctrl,u32 * ps)3878 static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps)
3879 {
3880 return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps);
3881 }
3882
nvme_set_power_state(struct nvme_ctrl * ctrl,u32 ps)3883 static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps)
3884 {
3885 return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL);
3886 }
3887
nvme_resume(struct device * dev)3888 static int nvme_resume(struct device *dev)
3889 {
3890 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
3891 struct nvme_ctrl *ctrl = &ndev->ctrl;
3892
3893 if (ndev->last_ps == U32_MAX ||
3894 nvme_set_power_state(ctrl, ndev->last_ps) != 0)
3895 goto reset;
3896 if (ctrl->hmpre && nvme_setup_host_mem(ndev))
3897 goto reset;
3898
3899 return 0;
3900 reset:
3901 return nvme_try_sched_reset(ctrl);
3902 }
3903
nvme_suspend(struct device * dev)3904 static int nvme_suspend(struct device *dev)
3905 {
3906 struct pci_dev *pdev = to_pci_dev(dev);
3907 struct nvme_dev *ndev = pci_get_drvdata(pdev);
3908 struct nvme_ctrl *ctrl = &ndev->ctrl;
3909 int ret = -EBUSY;
3910
3911 ndev->last_ps = U32_MAX;
3912
3913 /*
3914 * The platform does not remove power for a kernel managed suspend so
3915 * use host managed nvme power settings for lowest idle power if
3916 * possible. This should have quicker resume latency than a full device
3917 * shutdown. But if the firmware is involved after the suspend or the
3918 * device does not support any non-default power states, shut down the
3919 * device fully.
3920 *
3921 * If ASPM is not enabled for the device, shut down the device and allow
3922 * the PCI bus layer to put it into D3 in order to take the PCIe link
3923 * down, so as to allow the platform to achieve its minimum low-power
3924 * state (which may not be possible if the link is up).
3925 */
3926 if (pm_suspend_via_firmware() || !ctrl->npss ||
3927 !pcie_aspm_enabled(pdev) ||
3928 (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
3929 return nvme_disable_prepare_reset(ndev, true);
3930
3931 nvme_start_freeze(ctrl);
3932 nvme_wait_freeze(ctrl);
3933 nvme_sync_queues(ctrl);
3934
3935 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
3936 goto unfreeze;
3937
3938 /*
3939 * Host memory access may not be successful in a system suspend state,
3940 * but the specification allows the controller to access memory in a
3941 * non-operational power state.
3942 */
3943 if (ndev->hmb) {
3944 ret = nvme_set_host_mem(ndev, 0);
3945 if (ret < 0)
3946 goto unfreeze;
3947 }
3948
3949 ret = nvme_get_power_state(ctrl, &ndev->last_ps);
3950 if (ret < 0)
3951 goto unfreeze;
3952
3953 /*
3954 * A saved state prevents pci pm from generically controlling the
3955 * device's power. If we're using protocol specific settings, we don't
3956 * want pci interfering.
3957 */
3958 pci_save_state(pdev);
3959
3960 ret = nvme_set_power_state(ctrl, ctrl->npss);
3961 if (ret < 0)
3962 goto unfreeze;
3963
3964 if (ret) {
3965 /* discard the saved state */
3966 pci_load_saved_state(pdev, NULL);
3967
3968 /*
3969 * Clearing npss forces a controller reset on resume. The
3970 * correct value will be rediscovered then.
3971 */
3972 ret = nvme_disable_prepare_reset(ndev, true);
3973 ctrl->npss = 0;
3974 }
3975 unfreeze:
3976 nvme_unfreeze(ctrl);
3977 return ret;
3978 }
3979
nvme_simple_suspend(struct device * dev)3980 static int nvme_simple_suspend(struct device *dev)
3981 {
3982 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
3983
3984 return nvme_disable_prepare_reset(ndev, true);
3985 }
3986
nvme_simple_resume(struct device * dev)3987 static int nvme_simple_resume(struct device *dev)
3988 {
3989 struct pci_dev *pdev = to_pci_dev(dev);
3990 struct nvme_dev *ndev = pci_get_drvdata(pdev);
3991
3992 return nvme_try_sched_reset(&ndev->ctrl);
3993 }
3994
3995 static const struct dev_pm_ops nvme_dev_pm_ops = {
3996 .suspend = nvme_suspend,
3997 .resume = nvme_resume,
3998 .freeze = nvme_simple_suspend,
3999 .thaw = nvme_simple_resume,
4000 .poweroff = nvme_simple_suspend,
4001 .restore = nvme_simple_resume,
4002 };
4003 #endif /* CONFIG_PM_SLEEP */
4004
nvme_error_detected(struct pci_dev * pdev,pci_channel_state_t state)4005 static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
4006 pci_channel_state_t state)
4007 {
4008 struct nvme_dev *dev = pci_get_drvdata(pdev);
4009
4010 /*
4011 * A frozen channel requires a reset. When detected, this method will
4012 * shutdown the controller to quiesce. The controller will be restarted
4013 * after the slot reset through driver's slot_reset callback.
4014 */
4015 switch (state) {
4016 case pci_channel_io_normal:
4017 return PCI_ERS_RESULT_CAN_RECOVER;
4018 case pci_channel_io_frozen:
4019 dev_warn(dev->ctrl.device,
4020 "frozen state error detected, reset controller\n");
4021 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) {
4022 nvme_dev_disable(dev, true);
4023 return PCI_ERS_RESULT_DISCONNECT;
4024 }
4025 nvme_dev_disable(dev, false);
4026 return PCI_ERS_RESULT_NEED_RESET;
4027 case pci_channel_io_perm_failure:
4028 dev_warn(dev->ctrl.device,
4029 "failure state error detected, request disconnect\n");
4030 return PCI_ERS_RESULT_DISCONNECT;
4031 }
4032 return PCI_ERS_RESULT_NEED_RESET;
4033 }
4034
nvme_slot_reset(struct pci_dev * pdev)4035 static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
4036 {
4037 struct nvme_dev *dev = pci_get_drvdata(pdev);
4038
4039 dev_info(dev->ctrl.device, "restart after slot reset\n");
4040 pci_restore_state(pdev);
4041 if (nvme_try_sched_reset(&dev->ctrl))
4042 nvme_unquiesce_io_queues(&dev->ctrl);
4043 return PCI_ERS_RESULT_RECOVERED;
4044 }
4045
nvme_error_resume(struct pci_dev * pdev)4046 static void nvme_error_resume(struct pci_dev *pdev)
4047 {
4048 struct nvme_dev *dev = pci_get_drvdata(pdev);
4049
4050 flush_work(&dev->ctrl.reset_work);
4051 }
4052
4053 static const struct pci_error_handlers nvme_err_handler = {
4054 .error_detected = nvme_error_detected,
4055 .slot_reset = nvme_slot_reset,
4056 .resume = nvme_error_resume,
4057 .reset_prepare = nvme_reset_prepare,
4058 .reset_done = nvme_reset_done,
4059 };
4060
4061 static const struct pci_device_id nvme_id_table[] = {
4062 { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */
4063 .driver_data = NVME_QUIRK_STRIPE_SIZE |
4064 NVME_QUIRK_DEALLOCATE_ZEROES, },
4065 { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */
4066 .driver_data = NVME_QUIRK_STRIPE_SIZE |
4067 NVME_QUIRK_DEALLOCATE_ZEROES, },
4068 { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
4069 .driver_data = NVME_QUIRK_STRIPE_SIZE |
4070 NVME_QUIRK_IGNORE_DEV_SUBNQN |
4071 NVME_QUIRK_BOGUS_NID, },
4072 { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
4073 .driver_data = NVME_QUIRK_STRIPE_SIZE, },
4074 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
4075 .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
4076 NVME_QUIRK_MEDIUM_PRIO_SQ |
4077 NVME_QUIRK_NO_TEMP_THRESH_CHANGE |
4078 NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4079 { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
4080 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
4081 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
4082 .driver_data = NVME_QUIRK_IDENTIFY_CNS |
4083 NVME_QUIRK_DISABLE_WRITE_ZEROES |
4084 NVME_QUIRK_BOGUS_NID, },
4085 { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
4086 .driver_data = NVME_QUIRK_BOGUS_NID, },
4087 { PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
4088 .driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, },
4089 { PCI_DEVICE(0x126f, 0x1001), /* Silicon Motion generic */
4090 .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
4091 NVME_QUIRK_IGNORE_DEV_SUBNQN, },
4092 { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
4093 .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
4094 NVME_QUIRK_BOGUS_NID, },
4095 { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
4096 .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
4097 NVME_QUIRK_BOGUS_NID, },
4098 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
4099 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
4100 NVME_QUIRK_NO_NS_DESC_LIST, },
4101 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
4102 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
4103 { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
4104 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
4105 { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
4106 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
4107 { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
4108 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
4109 { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
4110 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
4111 NVME_QUIRK_DISABLE_WRITE_ZEROES|
4112 NVME_QUIRK_IGNORE_DEV_SUBNQN, },
4113 { PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */
4114 .driver_data = NVME_QUIRK_BROKEN_MSI },
4115 { PCI_DEVICE(0x15b7, 0x5009), /* Sandisk SN550 */
4116 .driver_data = NVME_QUIRK_BROKEN_MSI |
4117 NVME_QUIRK_NO_DEEPEST_PS },
4118 { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */
4119 .driver_data = NVME_QUIRK_BOGUS_NID, },
4120 { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
4121 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
4122 NVME_QUIRK_BOGUS_NID, },
4123 { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */
4124 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4125 { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */
4126 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4127 { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
4128 .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
4129 NVME_QUIRK_IGNORE_DEV_SUBNQN, },
4130 { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */
4131 .driver_data = NVME_QUIRK_BOGUS_NID, },
4132 { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
4133 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
4134 NVME_QUIRK_BOGUS_NID, },
4135 { PCI_DEVICE(0x10ec, 0x5763), /* ADATA SX6000PNP */
4136 .driver_data = NVME_QUIRK_BOGUS_NID, },
4137 { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
4138 .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
4139 NVME_QUIRK_IGNORE_DEV_SUBNQN, },
4140 { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */
4141 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN },
4142 { PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */
4143 .driver_data = NVME_QUIRK_BOGUS_NID, },
4144 { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
4145 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4146 { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */
4147 .driver_data = NVME_QUIRK_BOGUS_NID, },
4148 { PCI_DEVICE(0x1c5c, 0x1D59), /* SK Hynix BC901 */
4149 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4150 { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
4151 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4152 { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
4153 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4154 { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */
4155 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES |
4156 NVME_QUIRK_BOGUS_NID, },
4157 { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */
4158 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4159 { PCI_DEVICE(0x144d, 0xa802), /* Samsung SM953 */
4160 .driver_data = NVME_QUIRK_BOGUS_NID, },
4161 { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */
4162 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4163 { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */
4164 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4165 { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */
4166 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
4167 { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
4168 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
4169 { PCI_DEVICE(0x2646, 0x5013), /* Kingston KC3000, Kingston FURY Renegade */
4170 .driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, },
4171 { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */
4172 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4173 { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */
4174 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4175 { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */
4176 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4177 { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */
4178 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4179 { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
4180 .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4181 { PCI_DEVICE(0x1f40, 0x1202), /* Netac Technologies Co. NV3000 NVMe SSD */
4182 .driver_data = NVME_QUIRK_BOGUS_NID, },
4183 { PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */
4184 .driver_data = NVME_QUIRK_BOGUS_NID, },
4185 { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */
4186 .driver_data = NVME_QUIRK_BOGUS_NID, },
4187 { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */
4188 .driver_data = NVME_QUIRK_BOGUS_NID, },
4189 { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
4190 .driver_data = NVME_QUIRK_BOGUS_NID, },
4191 { PCI_DEVICE(0x1e4B, 0x1602), /* MAXIO MAP1602 */
4192 .driver_data = NVME_QUIRK_BOGUS_NID, },
4193 { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
4194 .driver_data = NVME_QUIRK_BOGUS_NID, },
4195 { PCI_DEVICE(0x1dbe, 0x5216), /* Acer/INNOGRIT FA100/5216 NVMe SSD */
4196 .driver_data = NVME_QUIRK_BOGUS_NID, },
4197 { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
4198 .driver_data = NVME_QUIRK_BOGUS_NID, },
4199 { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */
4200 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
4201 { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
4202 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
4203 { PCI_DEVICE(0x1fa0, 0x2283), /* Wodposit WPBSNM8-256GTP */
4204 .driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, },
4205 { PCI_DEVICE(0x025e, 0xf1ac), /* SOLIDIGM P44 pro SSDPFKKW020X7 */
4206 .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
4207 { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
4208 .driver_data = NVME_QUIRK_BOGUS_NID, },
4209 { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
4210 .driver_data = NVME_QUIRK_BOGUS_NID, },
4211 { PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
4212 .driver_data = NVME_QUIRK_BOGUS_NID, },
4213 { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
4214 .driver_data = NVME_QUIRK_BOGUS_NID |
4215 NVME_QUIRK_IGNORE_DEV_SUBNQN, },
4216 { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
4217 .driver_data = NVME_QUIRK_BOGUS_NID, },
4218 { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */
4219 .driver_data = NVME_QUIRK_BOGUS_NID, },
4220 { PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */
4221 .driver_data = NVME_QUIRK_BOGUS_NID, },
4222 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
4223 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
4224 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
4225 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
4226 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061),
4227 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
4228 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00),
4229 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
4230 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01),
4231 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
4232 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02),
4233 .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
4234 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
4235 /*
4236 * Fix for the Apple controller found in the MacBook8,1 and
4237 * some MacBook7,1 to avoid controller resets and data loss.
4238 */
4239 .driver_data = NVME_QUIRK_SINGLE_VECTOR |
4240 NVME_QUIRK_QDEPTH_ONE },
4241 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
4242 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
4243 .driver_data = NVME_QUIRK_SINGLE_VECTOR |
4244 NVME_QUIRK_128_BYTES_SQES |
4245 NVME_QUIRK_SHARED_TAGS |
4246 NVME_QUIRK_SKIP_CID_GEN |
4247 NVME_QUIRK_IDENTIFY_CNS },
4248 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
4249 { 0, }
4250 };
4251 MODULE_DEVICE_TABLE(pci, nvme_id_table);
4252
4253 static struct pci_driver nvme_driver = {
4254 .name = "nvme",
4255 .id_table = nvme_id_table,
4256 .probe = nvme_probe,
4257 .remove = nvme_remove,
4258 .shutdown = nvme_shutdown,
4259 .driver = {
4260 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
4261 #ifdef CONFIG_PM_SLEEP
4262 .pm = &nvme_dev_pm_ops,
4263 #endif
4264 },
4265 .sriov_configure = pci_sriov_configure_simple,
4266 .err_handler = &nvme_err_handler,
4267 };
4268
nvme_init(void)4269 static int __init nvme_init(void)
4270 {
4271 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
4272 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
4273 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
4274 BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
4275
4276 return pci_register_driver(&nvme_driver);
4277 }
4278
nvme_exit(void)4279 static void __exit nvme_exit(void)
4280 {
4281 kfree(nvme_pci_quirk_list);
4282 pci_unregister_driver(&nvme_driver);
4283 flush_workqueue(nvme_wq);
4284 }
4285
4286 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
4287 MODULE_LICENSE("GPL");
4288 MODULE_VERSION("1.0");
4289 MODULE_DESCRIPTION("NVMe host PCIe transport driver");
4290 module_init(nvme_init);
4291 module_exit(nvme_exit);
4292