xref: /linux/drivers/nvme/host/pci.c (revision a028739a4330881a6a3b5aa4a39381bbcacf2f2f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVM Express device driver
4  * Copyright (c) 2011-2014, Intel Corporation.
5  */
6 
7 #include <linux/acpi.h>
8 #include <linux/async.h>
9 #include <linux/blkdev.h>
10 #include <linux/blk-mq-dma.h>
11 #include <linux/blk-integrity.h>
12 #include <linux/dmi.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/kstrtox.h>
17 #include <linux/memremap.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/nodemask.h>
22 #include <linux/once.h>
23 #include <linux/pci.h>
24 #include <linux/suspend.h>
25 #include <linux/t10-pi.h>
26 #include <linux/types.h>
27 #include <linux/io-64-nonatomic-lo-hi.h>
28 #include <linux/io-64-nonatomic-hi-lo.h>
29 #include <linux/sed-opal.h>
30 
31 #include "trace.h"
32 #include "nvme.h"
33 
34 #define SQ_SIZE(q)	((q)->q_depth << (q)->sqes)
35 #define CQ_SIZE(q)	((q)->q_depth * sizeof(struct nvme_completion))
36 
37 /* Optimisation for I/Os between 4k and 128k */
38 #define NVME_SMALL_POOL_SIZE	256
39 
40 /*
41  * Arbitrary upper bound.
42  */
43 #define NVME_MAX_BYTES		SZ_8M
44 #define NVME_MAX_NR_DESCRIPTORS	5
45 
46 /*
47  * For data SGLs we support a single descriptors worth of SGL entries.
48  * For PRPs, segments don't matter at all.
49  */
50 #define NVME_MAX_SEGS \
51 	(NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc))
52 
53 /*
54  * For metadata SGLs, only the small descriptor is supported, and the first
55  * entry is the segment descriptor, which for the data pointer sits in the SQE.
56  */
57 #define NVME_MAX_META_SEGS \
58 	((NVME_SMALL_POOL_SIZE / sizeof(struct nvme_sgl_desc)) - 1)
59 
60 /*
61  * The last entry is used to link to the next descriptor.
62  */
63 #define PRPS_PER_PAGE \
64 	(((NVME_CTRL_PAGE_SIZE / sizeof(__le64))) - 1)
65 
66 /*
67  * I/O could be non-aligned both at the beginning and end.
68  */
69 #define MAX_PRP_RANGE \
70 	(NVME_MAX_BYTES + 2 * (NVME_CTRL_PAGE_SIZE - 1))
71 
72 static_assert(MAX_PRP_RANGE / NVME_CTRL_PAGE_SIZE <=
73 	(1 /* prp1 */ + NVME_MAX_NR_DESCRIPTORS * PRPS_PER_PAGE));
74 
75 struct quirk_entry {
76 	u16 vendor_id;
77 	u16 dev_id;
78 	u32 enabled_quirks;
79 	u32 disabled_quirks;
80 };
81 
82 static int use_threaded_interrupts;
83 module_param(use_threaded_interrupts, int, 0444);
84 
85 static bool use_cmb_sqes = true;
86 module_param(use_cmb_sqes, bool, 0444);
87 MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
88 
89 static unsigned int max_host_mem_size_mb = 128;
90 module_param(max_host_mem_size_mb, uint, 0444);
91 MODULE_PARM_DESC(max_host_mem_size_mb,
92 	"Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
93 
94 static unsigned int sgl_threshold = SZ_32K;
95 module_param(sgl_threshold, uint, 0644);
96 MODULE_PARM_DESC(sgl_threshold,
97 		"Use SGLs when average request segment size is larger or equal to "
98 		"this size. Use 0 to disable SGLs.");
99 
100 #define NVME_PCI_MIN_QUEUE_SIZE 2
101 #define NVME_PCI_MAX_QUEUE_SIZE 4095
102 static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
103 static const struct kernel_param_ops io_queue_depth_ops = {
104 	.set = io_queue_depth_set,
105 	.get = param_get_uint,
106 };
107 
108 static unsigned int io_queue_depth = 1024;
109 module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
110 MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096");
111 
112 static struct quirk_entry *nvme_pci_quirk_list;
113 static unsigned int nvme_pci_quirk_count;
114 
115 /* Helper to parse individual quirk names */
nvme_parse_quirk_names(char * quirk_str,struct quirk_entry * entry)116 static int nvme_parse_quirk_names(char *quirk_str, struct quirk_entry *entry)
117 {
118 	int i;
119 	size_t field_len;
120 	bool disabled, found;
121 	char *p = quirk_str, *field;
122 
123 	while ((field = strsep(&p, ",")) && *field) {
124 		disabled = false;
125 		found = false;
126 
127 		if (*field == '^') {
128 			/* Skip the '^' character */
129 			disabled = true;
130 			field++;
131 		}
132 
133 		field_len = strlen(field);
134 		for (i = 0; i < 32; i++) {
135 			unsigned int bit = 1U << i;
136 			char *q_name = nvme_quirk_name(bit);
137 			size_t q_len = strlen(q_name);
138 
139 			if (!strcmp(q_name, "unknown"))
140 				break;
141 
142 			if (!strcmp(q_name, field) &&
143 				    q_len == field_len) {
144 				if (disabled)
145 					entry->disabled_quirks |= bit;
146 				else
147 					entry->enabled_quirks |= bit;
148 				found = true;
149 				break;
150 			}
151 		}
152 
153 		if (!found) {
154 			pr_err("nvme: unrecognized quirk %s\n", field);
155 			return -EINVAL;
156 		}
157 	}
158 	return 0;
159 }
160 
161 /* Helper to parse a single VID:DID:quirk_names entry */
nvme_parse_quirk_entry(char * s,struct quirk_entry * entry)162 static int nvme_parse_quirk_entry(char *s, struct quirk_entry *entry)
163 {
164 	char *field;
165 
166 	field = strsep(&s, ":");
167 	if (!field || kstrtou16(field, 16, &entry->vendor_id))
168 		return -EINVAL;
169 
170 	field = strsep(&s, ":");
171 	if (!field || kstrtou16(field, 16, &entry->dev_id))
172 		return -EINVAL;
173 
174 	field = strsep(&s, ":");
175 	if (!field)
176 		return -EINVAL;
177 
178 	return nvme_parse_quirk_names(field, entry);
179 }
180 
quirks_param_set(const char * value,const struct kernel_param * kp)181 static int quirks_param_set(const char *value, const struct kernel_param *kp)
182 {
183 	int count, err, i;
184 	struct quirk_entry *qlist;
185 	char *field, *val, *sep_ptr;
186 
187 	err = param_set_copystring(value, kp);
188 	if (err)
189 		return err;
190 
191 	val = kstrdup(value, GFP_KERNEL);
192 	if (!val)
193 		return -ENOMEM;
194 
195 	if (!*val)
196 		goto out_free_val;
197 
198 	count = 1;
199 	for (i = 0; val[i]; i++) {
200 		if (val[i] == '-')
201 			count++;
202 	}
203 
204 	qlist = kcalloc(count, sizeof(*qlist), GFP_KERNEL);
205 	if (!qlist) {
206 		err = -ENOMEM;
207 		goto out_free_val;
208 	}
209 
210 	i = 0;
211 	sep_ptr = val;
212 	while ((field = strsep(&sep_ptr, "-"))) {
213 		if (nvme_parse_quirk_entry(field, &qlist[i])) {
214 			pr_err("nvme: failed to parse quirk string %s\n",
215 				value);
216 			goto out_free_qlist;
217 		}
218 
219 		i++;
220 	}
221 
222 	kfree(nvme_pci_quirk_list);
223 	nvme_pci_quirk_count = count;
224 	nvme_pci_quirk_list  = qlist;
225 	goto out_free_val;
226 
227 out_free_qlist:
228 	kfree(qlist);
229 out_free_val:
230 	kfree(val);
231 	return err;
232 }
233 
234 static char quirks_param[128];
235 static const struct kernel_param_ops quirks_param_ops = {
236 	.set = quirks_param_set,
237 	.get = param_get_string,
238 };
239 
240 static struct kparam_string quirks_param_string = {
241 	.maxlen = sizeof(quirks_param),
242 	.string = quirks_param,
243 };
244 
245 module_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0444);
246 MODULE_PARM_DESC(quirks, "Enable/disable NVMe quirks by specifying "
247 						"quirks=VID:DID:quirk_names");
248 
io_queue_count_set(const char * val,const struct kernel_param * kp)249 static int io_queue_count_set(const char *val, const struct kernel_param *kp)
250 {
251 	unsigned int n;
252 	int ret;
253 
254 	ret = kstrtouint(val, 10, &n);
255 	if (ret != 0 || n > blk_mq_num_possible_queues(0))
256 		return -EINVAL;
257 	return param_set_uint(val, kp);
258 }
259 
260 static const struct kernel_param_ops io_queue_count_ops = {
261 	.set = io_queue_count_set,
262 	.get = param_get_uint,
263 };
264 
265 static unsigned int write_queues;
266 module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644);
267 MODULE_PARM_DESC(write_queues,
268 	"Number of queues to use for writes. If not set, reads and writes "
269 	"will share a queue set.");
270 
271 static unsigned int poll_queues;
272 module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644);
273 MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
274 
275 static bool noacpi;
276 module_param(noacpi, bool, 0444);
277 MODULE_PARM_DESC(noacpi, "disable acpi bios quirks");
278 
279 struct nvme_dev;
280 struct nvme_queue;
281 
282 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
283 static void nvme_delete_io_queues(struct nvme_dev *dev);
284 static void nvme_update_attrs(struct nvme_dev *dev);
285 
286 struct nvme_descriptor_pools {
287 	struct dma_pool *large;
288 	struct dma_pool *small;
289 };
290 
291 /*
292  * Represents an NVM Express device.  Each nvme_dev is a PCI function.
293  */
294 struct nvme_dev {
295 	struct nvme_queue *queues;
296 	struct blk_mq_tag_set tagset;
297 	struct blk_mq_tag_set admin_tagset;
298 	u32 __iomem *dbs;
299 	struct device *dev;
300 	unsigned online_queues;
301 	unsigned max_qid;
302 	unsigned io_queues[HCTX_MAX_TYPES];
303 	unsigned int num_vecs;
304 	u32 q_depth;
305 	int io_sqes;
306 	u32 db_stride;
307 	void __iomem *bar;
308 	unsigned long bar_mapped_size;
309 	struct mutex shutdown_lock;
310 	bool subsystem;
311 	u64 cmb_size;
312 	bool cmb_use_sqes;
313 	u32 cmbsz;
314 	u32 cmbloc;
315 	struct nvme_ctrl ctrl;
316 	u32 last_ps;
317 	bool hmb;
318 	struct sg_table *hmb_sgt;
319 	mempool_t *dmavec_mempool;
320 
321 	/* shadow doorbell buffer support: */
322 	__le32 *dbbuf_dbs;
323 	dma_addr_t dbbuf_dbs_dma_addr;
324 	__le32 *dbbuf_eis;
325 	dma_addr_t dbbuf_eis_dma_addr;
326 
327 	/* host memory buffer support: */
328 	u64 host_mem_size;
329 	u32 nr_host_mem_descs;
330 	u32 host_mem_descs_size;
331 	dma_addr_t host_mem_descs_dma;
332 	struct nvme_host_mem_buf_desc *host_mem_descs;
333 	void **host_mem_desc_bufs;
334 	unsigned int nr_allocated_queues;
335 	unsigned int nr_write_queues;
336 	unsigned int nr_poll_queues;
337 	struct nvme_descriptor_pools descriptor_pools[];
338 };
339 
io_queue_depth_set(const char * val,const struct kernel_param * kp)340 static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
341 {
342 	return param_set_uint_minmax(val, kp, NVME_PCI_MIN_QUEUE_SIZE,
343 			NVME_PCI_MAX_QUEUE_SIZE);
344 }
345 
sq_idx(unsigned int qid,u32 stride)346 static inline unsigned int sq_idx(unsigned int qid, u32 stride)
347 {
348 	return qid * 2 * stride;
349 }
350 
cq_idx(unsigned int qid,u32 stride)351 static inline unsigned int cq_idx(unsigned int qid, u32 stride)
352 {
353 	return (qid * 2 + 1) * stride;
354 }
355 
to_nvme_dev(struct nvme_ctrl * ctrl)356 static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
357 {
358 	return container_of(ctrl, struct nvme_dev, ctrl);
359 }
360 
361 /*
362  * An NVM Express queue.  Each device has at least two (one for admin
363  * commands and one for I/O commands).
364  */
365 struct nvme_queue {
366 	struct nvme_dev *dev;
367 	struct nvme_descriptor_pools descriptor_pools;
368 	spinlock_t sq_lock;
369 	void *sq_cmds;
370 	 /* only used for poll queues: */
371 	spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
372 	struct nvme_completion *cqes;
373 	dma_addr_t sq_dma_addr;
374 	dma_addr_t cq_dma_addr;
375 	u32 __iomem *q_db;
376 	u32 q_depth;
377 	u16 cq_vector;
378 	u16 sq_tail;
379 	u16 last_sq_tail;
380 	u16 cq_head;
381 	u16 qid;
382 	u8 cq_phase;
383 	u8 sqes;
384 	unsigned long flags;
385 #define NVMEQ_ENABLED		0
386 #define NVMEQ_SQ_CMB		1
387 #define NVMEQ_DELETE_ERROR	2
388 #define NVMEQ_POLLED		3
389 	__le32 *dbbuf_sq_db;
390 	__le32 *dbbuf_cq_db;
391 	__le32 *dbbuf_sq_ei;
392 	__le32 *dbbuf_cq_ei;
393 	struct completion delete_done;
394 };
395 
396 /* bits for iod->flags */
397 enum nvme_iod_flags {
398 	/* this command has been aborted by the timeout handler */
399 	IOD_ABORTED		= 1U << 0,
400 
401 	/* uses the small descriptor pool */
402 	IOD_SMALL_DESCRIPTOR	= 1U << 1,
403 
404 	/* single segment dma mapping */
405 	IOD_SINGLE_SEGMENT	= 1U << 2,
406 
407 	/* Data payload contains p2p memory */
408 	IOD_DATA_P2P		= 1U << 3,
409 
410 	/* Metadata contains p2p memory */
411 	IOD_META_P2P		= 1U << 4,
412 
413 	/* Data payload contains MMIO memory */
414 	IOD_DATA_MMIO		= 1U << 5,
415 
416 	/* Metadata contains MMIO memory */
417 	IOD_META_MMIO		= 1U << 6,
418 
419 	/* Metadata using non-coalesced MPTR */
420 	IOD_SINGLE_META_SEGMENT	= 1U << 7,
421 };
422 
423 struct nvme_dma_vec {
424 	dma_addr_t addr;
425 	unsigned int len;
426 };
427 
428 /*
429  * The nvme_iod describes the data in an I/O.
430  */
431 struct nvme_iod {
432 	struct nvme_request req;
433 	struct nvme_command cmd;
434 	u8 flags;
435 	u8 nr_descriptors;
436 
437 	size_t total_len;
438 	struct dma_iova_state dma_state;
439 	void *descriptors[NVME_MAX_NR_DESCRIPTORS];
440 	struct nvme_dma_vec *dma_vecs;
441 	unsigned int nr_dma_vecs;
442 
443 	dma_addr_t meta_dma;
444 	size_t meta_total_len;
445 	struct dma_iova_state meta_dma_state;
446 	struct nvme_sgl_desc *meta_descriptor;
447 };
448 
nvme_dbbuf_size(struct nvme_dev * dev)449 static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
450 {
451 	return dev->nr_allocated_queues * 8 * dev->db_stride;
452 }
453 
nvme_dbbuf_dma_alloc(struct nvme_dev * dev)454 static void nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
455 {
456 	unsigned int mem_size = nvme_dbbuf_size(dev);
457 
458 	if (!(dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP))
459 		return;
460 
461 	if (dev->dbbuf_dbs) {
462 		/*
463 		 * Clear the dbbuf memory so the driver doesn't observe stale
464 		 * values from the previous instantiation.
465 		 */
466 		memset(dev->dbbuf_dbs, 0, mem_size);
467 		memset(dev->dbbuf_eis, 0, mem_size);
468 		return;
469 	}
470 
471 	dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
472 					    &dev->dbbuf_dbs_dma_addr,
473 					    GFP_KERNEL);
474 	if (!dev->dbbuf_dbs)
475 		goto fail;
476 	dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size,
477 					    &dev->dbbuf_eis_dma_addr,
478 					    GFP_KERNEL);
479 	if (!dev->dbbuf_eis)
480 		goto fail_free_dbbuf_dbs;
481 	return;
482 
483 fail_free_dbbuf_dbs:
484 	dma_free_coherent(dev->dev, mem_size, dev->dbbuf_dbs,
485 			  dev->dbbuf_dbs_dma_addr);
486 	dev->dbbuf_dbs = NULL;
487 fail:
488 	dev_warn(dev->dev, "unable to allocate dma for dbbuf\n");
489 }
490 
nvme_dbbuf_dma_free(struct nvme_dev * dev)491 static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
492 {
493 	unsigned int mem_size = nvme_dbbuf_size(dev);
494 
495 	if (dev->dbbuf_dbs) {
496 		dma_free_coherent(dev->dev, mem_size,
497 				  dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
498 		dev->dbbuf_dbs = NULL;
499 	}
500 	if (dev->dbbuf_eis) {
501 		dma_free_coherent(dev->dev, mem_size,
502 				  dev->dbbuf_eis, dev->dbbuf_eis_dma_addr);
503 		dev->dbbuf_eis = NULL;
504 	}
505 }
506 
nvme_dbbuf_init(struct nvme_dev * dev,struct nvme_queue * nvmeq,int qid)507 static void nvme_dbbuf_init(struct nvme_dev *dev,
508 			    struct nvme_queue *nvmeq, int qid)
509 {
510 	if (!dev->dbbuf_dbs || !qid)
511 		return;
512 
513 	nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)];
514 	nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
515 	nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)];
516 	nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
517 }
518 
nvme_dbbuf_free(struct nvme_queue * nvmeq)519 static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
520 {
521 	if (!nvmeq->qid)
522 		return;
523 
524 	nvmeq->dbbuf_sq_db = NULL;
525 	nvmeq->dbbuf_cq_db = NULL;
526 	nvmeq->dbbuf_sq_ei = NULL;
527 	nvmeq->dbbuf_cq_ei = NULL;
528 }
529 
nvme_dbbuf_set(struct nvme_dev * dev)530 static void nvme_dbbuf_set(struct nvme_dev *dev)
531 {
532 	struct nvme_command c = { };
533 	unsigned int i;
534 
535 	if (!dev->dbbuf_dbs)
536 		return;
537 
538 	c.dbbuf.opcode = nvme_admin_dbbuf;
539 	c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
540 	c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
541 
542 	if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
543 		dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
544 		/* Free memory and continue on */
545 		nvme_dbbuf_dma_free(dev);
546 
547 		for (i = 1; i <= dev->online_queues; i++)
548 			nvme_dbbuf_free(&dev->queues[i]);
549 	}
550 }
551 
nvme_dbbuf_need_event(u16 event_idx,u16 new_idx,u16 old)552 static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
553 {
554 	return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old);
555 }
556 
557 /* Update dbbuf and return true if an MMIO is required */
nvme_dbbuf_update_and_check_event(u16 value,__le32 * dbbuf_db,volatile __le32 * dbbuf_ei)558 static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
559 					      volatile __le32 *dbbuf_ei)
560 {
561 	if (dbbuf_db) {
562 		u16 old_value, event_idx;
563 
564 		/*
565 		 * Ensure that the queue is written before updating
566 		 * the doorbell in memory
567 		 */
568 		wmb();
569 
570 		old_value = le32_to_cpu(*dbbuf_db);
571 		*dbbuf_db = cpu_to_le32(value);
572 
573 		/*
574 		 * Ensure that the doorbell is updated before reading the event
575 		 * index from memory.  The controller needs to provide similar
576 		 * ordering to ensure the event index is updated before reading
577 		 * the doorbell.
578 		 */
579 		mb();
580 
581 		event_idx = le32_to_cpu(*dbbuf_ei);
582 		if (!nvme_dbbuf_need_event(event_idx, value, old_value))
583 			return false;
584 	}
585 
586 	return true;
587 }
588 
589 static struct nvme_descriptor_pools *
nvme_setup_descriptor_pools(struct nvme_dev * dev,unsigned numa_node)590 nvme_setup_descriptor_pools(struct nvme_dev *dev, unsigned numa_node)
591 {
592 	struct nvme_descriptor_pools *pools = &dev->descriptor_pools[numa_node];
593 	size_t small_align = NVME_SMALL_POOL_SIZE;
594 
595 	if (pools->small)
596 		return pools; /* already initialized */
597 
598 	pools->large = dma_pool_create_node("nvme descriptor page", dev->dev,
599 			NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE, 0, numa_node);
600 	if (!pools->large)
601 		return ERR_PTR(-ENOMEM);
602 
603 	if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512)
604 		small_align = 512;
605 
606 	pools->small = dma_pool_create_node("nvme descriptor small", dev->dev,
607 			NVME_SMALL_POOL_SIZE, small_align, 0, numa_node);
608 	if (!pools->small) {
609 		dma_pool_destroy(pools->large);
610 		pools->large = NULL;
611 		return ERR_PTR(-ENOMEM);
612 	}
613 
614 	return pools;
615 }
616 
nvme_release_descriptor_pools(struct nvme_dev * dev)617 static void nvme_release_descriptor_pools(struct nvme_dev *dev)
618 {
619 	unsigned i;
620 
621 	for (i = 0; i < nr_node_ids; i++) {
622 		struct nvme_descriptor_pools *pools = &dev->descriptor_pools[i];
623 
624 		dma_pool_destroy(pools->large);
625 		dma_pool_destroy(pools->small);
626 	}
627 }
628 
nvme_init_hctx_common(struct blk_mq_hw_ctx * hctx,void * data,unsigned qid)629 static int nvme_init_hctx_common(struct blk_mq_hw_ctx *hctx, void *data,
630 		unsigned qid)
631 {
632 	struct nvme_dev *dev = to_nvme_dev(data);
633 	struct nvme_queue *nvmeq = &dev->queues[qid];
634 	struct nvme_descriptor_pools *pools;
635 	struct blk_mq_tags *tags;
636 
637 	tags = qid ? dev->tagset.tags[qid - 1] : dev->admin_tagset.tags[0];
638 	WARN_ON(tags != hctx->tags);
639 	pools = nvme_setup_descriptor_pools(dev, hctx->numa_node);
640 	if (IS_ERR(pools))
641 		return PTR_ERR(pools);
642 
643 	nvmeq->descriptor_pools = *pools;
644 	hctx->driver_data = nvmeq;
645 	return 0;
646 }
647 
nvme_admin_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)648 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
649 				unsigned int hctx_idx)
650 {
651 	WARN_ON(hctx_idx != 0);
652 	return nvme_init_hctx_common(hctx, data, 0);
653 }
654 
nvme_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)655 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
656 			     unsigned int hctx_idx)
657 {
658 	return nvme_init_hctx_common(hctx, data, hctx_idx + 1);
659 }
660 
nvme_pci_init_request(struct blk_mq_tag_set * set,struct request * req,unsigned int hctx_idx,unsigned int numa_node)661 static int nvme_pci_init_request(struct blk_mq_tag_set *set,
662 		struct request *req, unsigned int hctx_idx,
663 		unsigned int numa_node)
664 {
665 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
666 
667 	nvme_req(req)->ctrl = set->driver_data;
668 	nvme_req(req)->cmd = &iod->cmd;
669 	return 0;
670 }
671 
queue_irq_offset(struct nvme_dev * dev)672 static int queue_irq_offset(struct nvme_dev *dev)
673 {
674 	/* if we have more than 1 vec, admin queue offsets us by 1 */
675 	if (dev->num_vecs > 1)
676 		return 1;
677 
678 	return 0;
679 }
680 
nvme_pci_map_queues(struct blk_mq_tag_set * set)681 static void nvme_pci_map_queues(struct blk_mq_tag_set *set)
682 {
683 	struct nvme_dev *dev = to_nvme_dev(set->driver_data);
684 	int i, qoff, offset;
685 
686 	offset = queue_irq_offset(dev);
687 	for (i = 0, qoff = 0; i < set->nr_maps; i++) {
688 		struct blk_mq_queue_map *map = &set->map[i];
689 
690 		map->nr_queues = dev->io_queues[i];
691 		if (!map->nr_queues) {
692 			BUG_ON(i == HCTX_TYPE_DEFAULT);
693 			continue;
694 		}
695 
696 		/*
697 		 * The poll queue(s) doesn't have an IRQ (and hence IRQ
698 		 * affinity), so use the regular blk-mq cpu mapping
699 		 */
700 		map->queue_offset = qoff;
701 		if (i != HCTX_TYPE_POLL && offset)
702 			blk_mq_map_hw_queues(map, dev->dev, offset);
703 		else
704 			blk_mq_map_queues(map);
705 		qoff += map->nr_queues;
706 		offset += map->nr_queues;
707 	}
708 }
709 
710 /*
711  * Write sq tail if we are asked to, or if the next command would wrap.
712  */
nvme_write_sq_db(struct nvme_queue * nvmeq,bool write_sq)713 static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
714 {
715 	if (!write_sq) {
716 		u16 next_tail = nvmeq->sq_tail + 1;
717 
718 		if (next_tail == nvmeq->q_depth)
719 			next_tail = 0;
720 		if (next_tail != nvmeq->last_sq_tail)
721 			return;
722 	}
723 
724 	if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
725 			nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
726 		writel(nvmeq->sq_tail, nvmeq->q_db);
727 	nvmeq->last_sq_tail = nvmeq->sq_tail;
728 }
729 
nvme_sq_copy_cmd(struct nvme_queue * nvmeq,struct nvme_command * cmd)730 static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq,
731 				    struct nvme_command *cmd)
732 {
733 	memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes),
734 		absolute_pointer(cmd), sizeof(*cmd));
735 	if (++nvmeq->sq_tail == nvmeq->q_depth)
736 		nvmeq->sq_tail = 0;
737 }
738 
nvme_commit_rqs(struct blk_mq_hw_ctx * hctx)739 static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
740 {
741 	struct nvme_queue *nvmeq = hctx->driver_data;
742 
743 	spin_lock(&nvmeq->sq_lock);
744 	if (nvmeq->sq_tail != nvmeq->last_sq_tail)
745 		nvme_write_sq_db(nvmeq, true);
746 	spin_unlock(&nvmeq->sq_lock);
747 }
748 
749 enum nvme_use_sgl {
750 	SGL_UNSUPPORTED,
751 	SGL_SUPPORTED,
752 	SGL_FORCED,
753 };
754 
nvme_pci_metadata_use_sgls(struct request * req)755 static inline bool nvme_pci_metadata_use_sgls(struct request *req)
756 {
757 	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
758 	struct nvme_dev *dev = nvmeq->dev;
759 
760 	if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl))
761 		return false;
762 	return req->nr_integrity_segments > 1 ||
763 		nvme_req(req)->flags & NVME_REQ_USERCMD;
764 }
765 
nvme_pci_use_sgls(struct nvme_dev * dev,struct request * req)766 static inline enum nvme_use_sgl nvme_pci_use_sgls(struct nvme_dev *dev,
767 		struct request *req)
768 {
769 	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
770 
771 	if (nvmeq->qid && nvme_ctrl_sgl_supported(&dev->ctrl)) {
772 		/*
773 		 * When the controller is capable of using SGL, there are
774 		 * several conditions that we force to use it:
775 		 *
776 		 * 1. A request containing page gaps within the controller's
777 		 *    mask can not use the PRP format.
778 		 *
779 		 * 2. User commands use SGL because that lets the device
780 		 *    validate the requested transfer lengths.
781 		 *
782 		 * 3. Multiple integrity segments must use SGL as that's the
783 		 *    only way to describe such a command in NVMe.
784 		 */
785 		if (req_phys_gap_mask(req) & (NVME_CTRL_PAGE_SIZE - 1) ||
786 		    nvme_req(req)->flags & NVME_REQ_USERCMD ||
787 		    req->nr_integrity_segments > 1)
788 			return SGL_FORCED;
789 		return SGL_SUPPORTED;
790 	}
791 
792 	return SGL_UNSUPPORTED;
793 }
794 
nvme_pci_avg_seg_size(struct request * req)795 static unsigned int nvme_pci_avg_seg_size(struct request *req)
796 {
797 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
798 	unsigned int nseg;
799 
800 	if (blk_rq_dma_map_coalesce(&iod->dma_state))
801 		nseg = 1;
802 	else
803 		nseg = blk_rq_nr_phys_segments(req);
804 	return DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
805 }
806 
nvme_dma_pool(struct nvme_queue * nvmeq,struct nvme_iod * iod)807 static inline struct dma_pool *nvme_dma_pool(struct nvme_queue *nvmeq,
808 		struct nvme_iod *iod)
809 {
810 	if (iod->flags & IOD_SMALL_DESCRIPTOR)
811 		return nvmeq->descriptor_pools.small;
812 	return nvmeq->descriptor_pools.large;
813 }
814 
nvme_pci_cmd_use_meta_sgl(struct nvme_command * cmd)815 static inline bool nvme_pci_cmd_use_meta_sgl(struct nvme_command *cmd)
816 {
817 	return (cmd->common.flags & NVME_CMD_SGL_ALL) == NVME_CMD_SGL_METASEG;
818 }
819 
nvme_pci_cmd_use_sgl(struct nvme_command * cmd)820 static inline bool nvme_pci_cmd_use_sgl(struct nvme_command *cmd)
821 {
822 	return cmd->common.flags &
823 		(NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG);
824 }
825 
nvme_pci_first_desc_dma_addr(struct nvme_command * cmd)826 static inline dma_addr_t nvme_pci_first_desc_dma_addr(struct nvme_command *cmd)
827 {
828 	if (nvme_pci_cmd_use_sgl(cmd))
829 		return le64_to_cpu(cmd->common.dptr.sgl.addr);
830 	return le64_to_cpu(cmd->common.dptr.prp2);
831 }
832 
nvme_free_descriptors(struct request * req)833 static void nvme_free_descriptors(struct request *req)
834 {
835 	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
836 	const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
837 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
838 	dma_addr_t dma_addr = nvme_pci_first_desc_dma_addr(&iod->cmd);
839 	int i;
840 
841 	if (iod->nr_descriptors == 1) {
842 		dma_pool_free(nvme_dma_pool(nvmeq, iod), iod->descriptors[0],
843 				dma_addr);
844 		return;
845 	}
846 
847 	for (i = 0; i < iod->nr_descriptors; i++) {
848 		__le64 *prp_list = iod->descriptors[i];
849 		dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
850 
851 		dma_pool_free(nvmeq->descriptor_pools.large, prp_list,
852 				dma_addr);
853 		dma_addr = next_dma_addr;
854 	}
855 }
856 
nvme_free_prps(struct request * req,unsigned int attrs)857 static void nvme_free_prps(struct request *req, unsigned int attrs)
858 {
859 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
860 	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
861 	unsigned int i;
862 
863 	for (i = 0; i < iod->nr_dma_vecs; i++)
864 		dma_unmap_phys(nvmeq->dev->dev, iod->dma_vecs[i].addr,
865 			       iod->dma_vecs[i].len, rq_dma_dir(req), attrs);
866 	mempool_free(iod->dma_vecs, nvmeq->dev->dmavec_mempool);
867 }
868 
nvme_free_sgls(struct request * req,struct nvme_sgl_desc * sge,struct nvme_sgl_desc * sg_list,unsigned int attrs)869 static void nvme_free_sgls(struct request *req, struct nvme_sgl_desc *sge,
870 		struct nvme_sgl_desc *sg_list, unsigned int attrs)
871 {
872 	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
873 	enum dma_data_direction dir = rq_dma_dir(req);
874 	unsigned int len = le32_to_cpu(sge->length);
875 	struct device *dma_dev = nvmeq->dev->dev;
876 	unsigned int i;
877 
878 	if (sge->type == (NVME_SGL_FMT_DATA_DESC << 4)) {
879 		dma_unmap_phys(dma_dev, le64_to_cpu(sge->addr), len, dir,
880 			       attrs);
881 		return;
882 	}
883 
884 	for (i = 0; i < len / sizeof(*sg_list); i++)
885 		dma_unmap_phys(dma_dev, le64_to_cpu(sg_list[i].addr),
886 			le32_to_cpu(sg_list[i].length), dir, attrs);
887 }
888 
nvme_unmap_metadata(struct request * req)889 static void nvme_unmap_metadata(struct request *req)
890 {
891 	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
892 	enum pci_p2pdma_map_type map = PCI_P2PDMA_MAP_NONE;
893 	enum dma_data_direction dir = rq_dma_dir(req);
894 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
895 	struct device *dma_dev = nvmeq->dev->dev;
896 	struct nvme_sgl_desc *sge = iod->meta_descriptor;
897 	unsigned int attrs = 0;
898 
899 	if (iod->flags & IOD_SINGLE_META_SEGMENT) {
900 		dma_unmap_page(dma_dev, iod->meta_dma,
901 			       rq_integrity_vec(req).bv_len,
902 			       rq_dma_dir(req));
903 		return;
904 	}
905 
906 	if (iod->flags & IOD_META_P2P)
907 		map = PCI_P2PDMA_MAP_BUS_ADDR;
908 	else if (iod->flags & IOD_META_MMIO) {
909 		map = PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
910 		attrs |= DMA_ATTR_MMIO;
911 	}
912 
913 	if (!blk_rq_dma_unmap(req, dma_dev, &iod->meta_dma_state,
914 			      iod->meta_total_len, map)) {
915 		if (nvme_pci_cmd_use_meta_sgl(&iod->cmd))
916 			nvme_free_sgls(req, sge, &sge[1], attrs);
917 		else
918 			dma_unmap_phys(dma_dev, iod->meta_dma,
919 				       iod->meta_total_len, dir, attrs);
920 	}
921 
922 	if (iod->meta_descriptor)
923 		dma_pool_free(nvmeq->descriptor_pools.small,
924 			      iod->meta_descriptor, iod->meta_dma);
925 }
926 
nvme_unmap_data(struct request * req)927 static void nvme_unmap_data(struct request *req)
928 {
929 	enum pci_p2pdma_map_type map = PCI_P2PDMA_MAP_NONE;
930 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
931 	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
932 	struct device *dma_dev = nvmeq->dev->dev;
933 	unsigned int attrs = 0;
934 
935 	if (iod->flags & IOD_SINGLE_SEGMENT) {
936 		static_assert(offsetof(union nvme_data_ptr, prp1) ==
937 				offsetof(union nvme_data_ptr, sgl.addr));
938 		dma_unmap_page(dma_dev, le64_to_cpu(iod->cmd.common.dptr.prp1),
939 				iod->total_len, rq_dma_dir(req));
940 		return;
941 	}
942 
943 	if (iod->flags & IOD_DATA_P2P)
944 		map = PCI_P2PDMA_MAP_BUS_ADDR;
945 	else if (iod->flags & IOD_DATA_MMIO) {
946 		map = PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
947 		attrs |= DMA_ATTR_MMIO;
948 	}
949 
950 	if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len,
951 			      map)) {
952 		if (nvme_pci_cmd_use_sgl(&iod->cmd))
953 			nvme_free_sgls(req, &iod->cmd.common.dptr.sgl,
954 			               iod->descriptors[0], attrs);
955 		else
956 			nvme_free_prps(req, attrs);
957 	}
958 
959 	if (iod->nr_descriptors)
960 		nvme_free_descriptors(req);
961 }
962 
nvme_pci_prp_save_mapping(struct request * req,struct device * dma_dev,struct blk_dma_iter * iter)963 static bool nvme_pci_prp_save_mapping(struct request *req,
964 				      struct device *dma_dev,
965 				      struct blk_dma_iter *iter)
966 {
967 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
968 
969 	if (dma_use_iova(&iod->dma_state) || !dma_need_unmap(dma_dev))
970 		return true;
971 
972 	if (!iod->nr_dma_vecs) {
973 		struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
974 
975 		iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
976 				GFP_ATOMIC);
977 		if (!iod->dma_vecs) {
978 			iter->status = BLK_STS_RESOURCE;
979 			return false;
980 		}
981 	}
982 
983 	iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
984 	iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
985 	iod->nr_dma_vecs++;
986 	return true;
987 }
988 
nvme_pci_prp_iter_next(struct request * req,struct device * dma_dev,struct blk_dma_iter * iter)989 static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
990 		struct blk_dma_iter *iter)
991 {
992 	if (iter->len)
993 		return true;
994 	if (!blk_rq_dma_map_iter_next(req, dma_dev, iter))
995 		return false;
996 	return nvme_pci_prp_save_mapping(req, dma_dev, iter);
997 }
998 
nvme_pci_setup_data_prp(struct request * req,struct blk_dma_iter * iter)999 static blk_status_t nvme_pci_setup_data_prp(struct request *req,
1000 		struct blk_dma_iter *iter)
1001 {
1002 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1003 	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1004 	unsigned int length = blk_rq_payload_bytes(req);
1005 	dma_addr_t prp1_dma, prp2_dma = 0;
1006 	unsigned int prp_len, i;
1007 	__le64 *prp_list;
1008 
1009 	if (!nvme_pci_prp_save_mapping(req, nvmeq->dev->dev, iter))
1010 		return iter->status;
1011 
1012 	/*
1013 	 * PRP1 always points to the start of the DMA transfers.
1014 	 *
1015 	 * This is the only PRP (except for the list entries) that could be
1016 	 * non-aligned.
1017 	 */
1018 	prp1_dma = iter->addr;
1019 	prp_len = min(length, NVME_CTRL_PAGE_SIZE -
1020 			(iter->addr & (NVME_CTRL_PAGE_SIZE - 1)));
1021 	iod->total_len += prp_len;
1022 	iter->addr += prp_len;
1023 	iter->len -= prp_len;
1024 	length -= prp_len;
1025 	if (!length)
1026 		goto done;
1027 
1028 	if (!nvme_pci_prp_iter_next(req, nvmeq->dev->dev, iter)) {
1029 		if (WARN_ON_ONCE(!iter->status))
1030 			goto bad_sgl;
1031 		goto done;
1032 	}
1033 
1034 	/*
1035 	 * PRP2 is usually a list, but can point to data if all data to be
1036 	 * transferred fits into PRP1 + PRP2:
1037 	 */
1038 	if (length <= NVME_CTRL_PAGE_SIZE) {
1039 		prp2_dma = iter->addr;
1040 		iod->total_len += length;
1041 		goto done;
1042 	}
1043 
1044 	if (DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE) <=
1045 	    NVME_SMALL_POOL_SIZE / sizeof(__le64))
1046 		iod->flags |= IOD_SMALL_DESCRIPTOR;
1047 
1048 	prp_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC,
1049 			&prp2_dma);
1050 	if (!prp_list) {
1051 		iter->status = BLK_STS_RESOURCE;
1052 		goto done;
1053 	}
1054 	iod->descriptors[iod->nr_descriptors++] = prp_list;
1055 
1056 	i = 0;
1057 	for (;;) {
1058 		prp_list[i++] = cpu_to_le64(iter->addr);
1059 		prp_len = min(length, NVME_CTRL_PAGE_SIZE);
1060 		if (WARN_ON_ONCE(iter->len < prp_len))
1061 			goto bad_sgl;
1062 
1063 		iod->total_len += prp_len;
1064 		iter->addr += prp_len;
1065 		iter->len -= prp_len;
1066 		length -= prp_len;
1067 		if (!length)
1068 			break;
1069 
1070 		if (!nvme_pci_prp_iter_next(req, nvmeq->dev->dev, iter)) {
1071 			if (WARN_ON_ONCE(!iter->status))
1072 				goto bad_sgl;
1073 			goto done;
1074 		}
1075 
1076 		/*
1077 		 * If we've filled the entire descriptor, allocate a new that is
1078 		 * pointed to be the last entry in the previous PRP list.  To
1079 		 * accommodate for that move the last actual entry to the new
1080 		 * descriptor.
1081 		 */
1082 		if (i == NVME_CTRL_PAGE_SIZE >> 3) {
1083 			__le64 *old_prp_list = prp_list;
1084 			dma_addr_t prp_list_dma;
1085 
1086 			prp_list = dma_pool_alloc(nvmeq->descriptor_pools.large,
1087 					GFP_ATOMIC, &prp_list_dma);
1088 			if (!prp_list) {
1089 				iter->status = BLK_STS_RESOURCE;
1090 				goto done;
1091 			}
1092 			iod->descriptors[iod->nr_descriptors++] = prp_list;
1093 
1094 			prp_list[0] = old_prp_list[i - 1];
1095 			old_prp_list[i - 1] = cpu_to_le64(prp_list_dma);
1096 			i = 1;
1097 		}
1098 	}
1099 
1100 done:
1101 	/*
1102 	 * nvme_unmap_data uses the DPT field in the SQE to tear down the
1103 	 * mapping, so initialize it even for failures.
1104 	 */
1105 	iod->cmd.common.dptr.prp1 = cpu_to_le64(prp1_dma);
1106 	iod->cmd.common.dptr.prp2 = cpu_to_le64(prp2_dma);
1107 	if (unlikely(iter->status))
1108 		nvme_unmap_data(req);
1109 	return iter->status;
1110 
1111 bad_sgl:
1112 	dev_err_once(nvmeq->dev->dev,
1113 		"Incorrectly formed request for payload:%d nents:%d\n",
1114 		blk_rq_payload_bytes(req), blk_rq_nr_phys_segments(req));
1115 	return BLK_STS_IOERR;
1116 }
1117 
nvme_pci_sgl_set_data(struct nvme_sgl_desc * sge,struct blk_dma_iter * iter)1118 static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge,
1119 		struct blk_dma_iter *iter)
1120 {
1121 	sge->addr = cpu_to_le64(iter->addr);
1122 	sge->length = cpu_to_le32(iter->len);
1123 	sge->type = NVME_SGL_FMT_DATA_DESC << 4;
1124 }
1125 
nvme_pci_sgl_set_seg(struct nvme_sgl_desc * sge,dma_addr_t dma_addr,int entries)1126 static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
1127 		dma_addr_t dma_addr, int entries)
1128 {
1129 	sge->addr = cpu_to_le64(dma_addr);
1130 	sge->length = cpu_to_le32(entries * sizeof(*sge));
1131 	sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
1132 }
1133 
nvme_pci_setup_data_sgl(struct request * req,struct blk_dma_iter * iter)1134 static blk_status_t nvme_pci_setup_data_sgl(struct request *req,
1135 		struct blk_dma_iter *iter)
1136 {
1137 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1138 	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1139 	unsigned int entries = blk_rq_nr_phys_segments(req);
1140 	struct nvme_sgl_desc *sg_list;
1141 	dma_addr_t sgl_dma;
1142 	unsigned int mapped = 0;
1143 
1144 	/* set the transfer type as SGL */
1145 	iod->cmd.common.flags = NVME_CMD_SGL_METABUF;
1146 
1147 	if (entries == 1 || blk_rq_dma_map_coalesce(&iod->dma_state)) {
1148 		nvme_pci_sgl_set_data(&iod->cmd.common.dptr.sgl, iter);
1149 		iod->total_len += iter->len;
1150 		return BLK_STS_OK;
1151 	}
1152 
1153 	if (entries <= NVME_SMALL_POOL_SIZE / sizeof(*sg_list))
1154 		iod->flags |= IOD_SMALL_DESCRIPTOR;
1155 
1156 	sg_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC,
1157 			&sgl_dma);
1158 	if (!sg_list)
1159 		return BLK_STS_RESOURCE;
1160 	iod->descriptors[iod->nr_descriptors++] = sg_list;
1161 
1162 	do {
1163 		if (WARN_ON_ONCE(mapped == entries)) {
1164 			iter->status = BLK_STS_IOERR;
1165 			break;
1166 		}
1167 		nvme_pci_sgl_set_data(&sg_list[mapped++], iter);
1168 		iod->total_len += iter->len;
1169 	} while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, iter));
1170 
1171 	nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped);
1172 	if (unlikely(iter->status))
1173 		nvme_unmap_data(req);
1174 	return iter->status;
1175 }
1176 
nvme_pci_setup_data_simple(struct request * req,enum nvme_use_sgl use_sgl)1177 static blk_status_t nvme_pci_setup_data_simple(struct request *req,
1178 		enum nvme_use_sgl use_sgl)
1179 {
1180 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1181 	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1182 	struct bio_vec bv = req_bvec(req);
1183 	unsigned int prp1_offset = bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
1184 	bool prp_possible = prp1_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2;
1185 	dma_addr_t dma_addr;
1186 
1187 	if (!use_sgl && !prp_possible)
1188 		return BLK_STS_AGAIN;
1189 	if (is_pci_p2pdma_page(bv.bv_page))
1190 		return BLK_STS_AGAIN;
1191 
1192 	dma_addr = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0);
1193 	if (dma_mapping_error(nvmeq->dev->dev, dma_addr))
1194 		return BLK_STS_RESOURCE;
1195 	iod->total_len = bv.bv_len;
1196 	iod->flags |= IOD_SINGLE_SEGMENT;
1197 
1198 	if (use_sgl == SGL_FORCED || !prp_possible) {
1199 		iod->cmd.common.flags = NVME_CMD_SGL_METABUF;
1200 		iod->cmd.common.dptr.sgl.addr = cpu_to_le64(dma_addr);
1201 		iod->cmd.common.dptr.sgl.length = cpu_to_le32(bv.bv_len);
1202 		iod->cmd.common.dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4;
1203 	} else {
1204 		unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - prp1_offset;
1205 
1206 		iod->cmd.common.dptr.prp1 = cpu_to_le64(dma_addr);
1207 		iod->cmd.common.dptr.prp2 = 0;
1208 		if (bv.bv_len > first_prp_len)
1209 			iod->cmd.common.dptr.prp2 =
1210 				cpu_to_le64(dma_addr + first_prp_len);
1211 	}
1212 
1213 	return BLK_STS_OK;
1214 }
1215 
nvme_map_data(struct request * req)1216 static blk_status_t nvme_map_data(struct request *req)
1217 {
1218 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1219 	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1220 	struct nvme_dev *dev = nvmeq->dev;
1221 	enum nvme_use_sgl use_sgl = nvme_pci_use_sgls(dev, req);
1222 	struct blk_dma_iter iter;
1223 	blk_status_t ret;
1224 
1225 	/*
1226 	 * Try to skip the DMA iterator for single segment requests, as that
1227 	 * significantly improves performances for small I/O sizes.
1228 	 */
1229 	if (blk_rq_nr_phys_segments(req) == 1) {
1230 		ret = nvme_pci_setup_data_simple(req, use_sgl);
1231 		if (ret != BLK_STS_AGAIN)
1232 			return ret;
1233 	}
1234 
1235 	if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter))
1236 		return iter.status;
1237 
1238 	switch (iter.p2pdma.map) {
1239 	case PCI_P2PDMA_MAP_BUS_ADDR:
1240 		iod->flags |= IOD_DATA_P2P;
1241 		break;
1242 	case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
1243 		iod->flags |= IOD_DATA_MMIO;
1244 		break;
1245 	case PCI_P2PDMA_MAP_NONE:
1246 		break;
1247 	default:
1248 		return BLK_STS_RESOURCE;
1249 	}
1250 
1251 	if (use_sgl == SGL_FORCED ||
1252 	    (use_sgl == SGL_SUPPORTED &&
1253 	     (sgl_threshold && nvme_pci_avg_seg_size(req) >= sgl_threshold)))
1254 		return nvme_pci_setup_data_sgl(req, &iter);
1255 	return nvme_pci_setup_data_prp(req, &iter);
1256 }
1257 
nvme_pci_setup_meta_iter(struct request * req)1258 static blk_status_t nvme_pci_setup_meta_iter(struct request *req)
1259 {
1260 	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1261 	unsigned int entries = req->nr_integrity_segments;
1262 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1263 	struct nvme_dev *dev = nvmeq->dev;
1264 	struct nvme_sgl_desc *sg_list;
1265 	struct blk_dma_iter iter;
1266 	dma_addr_t sgl_dma;
1267 	int i = 0;
1268 
1269 	if (!blk_rq_integrity_dma_map_iter_start(req, dev->dev,
1270 						&iod->meta_dma_state, &iter))
1271 		return iter.status;
1272 
1273 	switch (iter.p2pdma.map) {
1274 	case PCI_P2PDMA_MAP_BUS_ADDR:
1275 		iod->flags |= IOD_META_P2P;
1276 		break;
1277 	case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
1278 		iod->flags |= IOD_META_MMIO;
1279 		break;
1280 	case PCI_P2PDMA_MAP_NONE:
1281 		break;
1282 	default:
1283 		return BLK_STS_RESOURCE;
1284 	}
1285 
1286 	if (blk_rq_dma_map_coalesce(&iod->meta_dma_state))
1287 		entries = 1;
1288 
1289 	/*
1290 	 * The NVMe MPTR descriptor has an implicit length that the host and
1291 	 * device must agree on to avoid data/memory corruption. We trust the
1292 	 * kernel allocated correctly based on the format's parameters, so use
1293 	 * the more efficient MPTR to avoid extra dma pool allocations for the
1294 	 * SGL indirection.
1295 	 *
1296 	 * But for user commands, we don't necessarily know what they do, so
1297 	 * the driver can't validate the metadata buffer size. The SGL
1298 	 * descriptor provides an explicit length, so we're relying on that
1299 	 * mechanism to catch any misunderstandings between the application and
1300 	 * device.
1301 	 *
1302 	 * P2P DMA also needs to use the blk_dma_iter method, so mptr setup
1303 	 * leverages this routine when that happens.
1304 	 */
1305 	if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl) ||
1306 	    (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD))) {
1307 		iod->cmd.common.metadata = cpu_to_le64(iter.addr);
1308 		iod->meta_total_len = iter.len;
1309 		iod->meta_dma = iter.addr;
1310 		iod->meta_descriptor = NULL;
1311 		return BLK_STS_OK;
1312 	}
1313 
1314 	sg_list = dma_pool_alloc(nvmeq->descriptor_pools.small, GFP_ATOMIC,
1315 			&sgl_dma);
1316 	if (!sg_list)
1317 		return BLK_STS_RESOURCE;
1318 
1319 	iod->meta_descriptor = sg_list;
1320 	iod->meta_dma = sgl_dma;
1321 	iod->cmd.common.flags = NVME_CMD_SGL_METASEG;
1322 	iod->cmd.common.metadata = cpu_to_le64(sgl_dma);
1323 	if (entries == 1) {
1324 		iod->meta_total_len = iter.len;
1325 		nvme_pci_sgl_set_data(sg_list, &iter);
1326 		return BLK_STS_OK;
1327 	}
1328 
1329 	sgl_dma += sizeof(*sg_list);
1330 	do {
1331 		nvme_pci_sgl_set_data(&sg_list[++i], &iter);
1332 		iod->meta_total_len += iter.len;
1333 	} while (blk_rq_integrity_dma_map_iter_next(req, dev->dev, &iter));
1334 
1335 	nvme_pci_sgl_set_seg(sg_list, sgl_dma, i);
1336 	if (unlikely(iter.status))
1337 		nvme_unmap_metadata(req);
1338 	return iter.status;
1339 }
1340 
nvme_pci_setup_meta_mptr(struct request * req)1341 static blk_status_t nvme_pci_setup_meta_mptr(struct request *req)
1342 {
1343 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1344 	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1345 	struct bio_vec bv = rq_integrity_vec(req);
1346 
1347 	if (is_pci_p2pdma_page(bv.bv_page))
1348 		return nvme_pci_setup_meta_iter(req);
1349 
1350 	iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0);
1351 	if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma))
1352 		return BLK_STS_IOERR;
1353 	iod->cmd.common.metadata = cpu_to_le64(iod->meta_dma);
1354 	iod->flags |= IOD_SINGLE_META_SEGMENT;
1355 	return BLK_STS_OK;
1356 }
1357 
nvme_map_metadata(struct request * req)1358 static blk_status_t nvme_map_metadata(struct request *req)
1359 {
1360 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1361 
1362 	if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) &&
1363 	    nvme_pci_metadata_use_sgls(req))
1364 		return nvme_pci_setup_meta_iter(req);
1365 	return nvme_pci_setup_meta_mptr(req);
1366 }
1367 
nvme_prep_rq(struct request * req)1368 static blk_status_t nvme_prep_rq(struct request *req)
1369 {
1370 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1371 	blk_status_t ret;
1372 
1373 	iod->flags = 0;
1374 	iod->nr_descriptors = 0;
1375 	iod->total_len = 0;
1376 	iod->meta_total_len = 0;
1377 	iod->nr_dma_vecs = 0;
1378 
1379 	ret = nvme_setup_cmd(req->q->queuedata, req);
1380 	if (ret)
1381 		return ret;
1382 
1383 	if (blk_rq_nr_phys_segments(req)) {
1384 		ret = nvme_map_data(req);
1385 		if (ret)
1386 			goto out_free_cmd;
1387 	}
1388 
1389 	if (blk_integrity_rq(req)) {
1390 		ret = nvme_map_metadata(req);
1391 		if (ret)
1392 			goto out_unmap_data;
1393 	}
1394 
1395 	nvme_start_request(req);
1396 	return BLK_STS_OK;
1397 out_unmap_data:
1398 	if (blk_rq_nr_phys_segments(req))
1399 		nvme_unmap_data(req);
1400 out_free_cmd:
1401 	nvme_cleanup_cmd(req);
1402 	return ret;
1403 }
1404 
nvme_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)1405 static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
1406 			 const struct blk_mq_queue_data *bd)
1407 {
1408 	struct nvme_queue *nvmeq = hctx->driver_data;
1409 	struct nvme_dev *dev = nvmeq->dev;
1410 	struct request *req = bd->rq;
1411 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1412 	blk_status_t ret;
1413 
1414 	/*
1415 	 * We should not need to do this, but we're still using this to
1416 	 * ensure we can drain requests on a dying queue.
1417 	 */
1418 	if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
1419 		return BLK_STS_IOERR;
1420 
1421 	if (unlikely(!nvme_check_ready(&dev->ctrl, req, true)))
1422 		return nvme_fail_nonready_command(&dev->ctrl, req);
1423 
1424 	ret = nvme_prep_rq(req);
1425 	if (unlikely(ret))
1426 		return ret;
1427 	spin_lock(&nvmeq->sq_lock);
1428 	nvme_sq_copy_cmd(nvmeq, &iod->cmd);
1429 	nvme_write_sq_db(nvmeq, bd->last);
1430 	spin_unlock(&nvmeq->sq_lock);
1431 	return BLK_STS_OK;
1432 }
1433 
nvme_submit_cmds(struct nvme_queue * nvmeq,struct rq_list * rqlist)1434 static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct rq_list *rqlist)
1435 {
1436 	struct request *req;
1437 
1438 	if (rq_list_empty(rqlist))
1439 		return;
1440 
1441 	spin_lock(&nvmeq->sq_lock);
1442 	while ((req = rq_list_pop(rqlist))) {
1443 		struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1444 
1445 		nvme_sq_copy_cmd(nvmeq, &iod->cmd);
1446 	}
1447 	nvme_write_sq_db(nvmeq, true);
1448 	spin_unlock(&nvmeq->sq_lock);
1449 }
1450 
nvme_prep_rq_batch(struct nvme_queue * nvmeq,struct request * req)1451 static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
1452 {
1453 	/*
1454 	 * We should not need to do this, but we're still using this to
1455 	 * ensure we can drain requests on a dying queue.
1456 	 */
1457 	if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
1458 		return false;
1459 	if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true)))
1460 		return false;
1461 
1462 	return nvme_prep_rq(req) == BLK_STS_OK;
1463 }
1464 
nvme_queue_rqs(struct rq_list * rqlist)1465 static void nvme_queue_rqs(struct rq_list *rqlist)
1466 {
1467 	struct rq_list submit_list = { };
1468 	struct rq_list requeue_list = { };
1469 	struct nvme_queue *nvmeq = NULL;
1470 	struct request *req;
1471 
1472 	while ((req = rq_list_pop(rqlist))) {
1473 		if (nvmeq && nvmeq != req->mq_hctx->driver_data)
1474 			nvme_submit_cmds(nvmeq, &submit_list);
1475 		nvmeq = req->mq_hctx->driver_data;
1476 
1477 		if (nvme_prep_rq_batch(nvmeq, req))
1478 			rq_list_add_tail(&submit_list, req);
1479 		else
1480 			rq_list_add_tail(&requeue_list, req);
1481 	}
1482 
1483 	if (nvmeq)
1484 		nvme_submit_cmds(nvmeq, &submit_list);
1485 	*rqlist = requeue_list;
1486 }
1487 
nvme_pci_unmap_rq(struct request * req)1488 static __always_inline void nvme_pci_unmap_rq(struct request *req)
1489 {
1490 	if (blk_integrity_rq(req))
1491 		nvme_unmap_metadata(req);
1492 	if (blk_rq_nr_phys_segments(req))
1493 		nvme_unmap_data(req);
1494 }
1495 
nvme_pci_complete_rq(struct request * req)1496 static void nvme_pci_complete_rq(struct request *req)
1497 {
1498 	nvme_pci_unmap_rq(req);
1499 	nvme_complete_rq(req);
1500 }
1501 
nvme_pci_complete_batch(struct io_comp_batch * iob)1502 static void nvme_pci_complete_batch(struct io_comp_batch *iob)
1503 {
1504 	nvme_complete_batch(iob, nvme_pci_unmap_rq);
1505 }
1506 
1507 /* We read the CQE phase first to check if the rest of the entry is valid */
nvme_cqe_pending(struct nvme_queue * nvmeq)1508 static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
1509 {
1510 	struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head];
1511 
1512 	return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase;
1513 }
1514 
nvme_ring_cq_doorbell(struct nvme_queue * nvmeq)1515 static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
1516 {
1517 	u16 head = nvmeq->cq_head;
1518 
1519 	if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
1520 					      nvmeq->dbbuf_cq_ei))
1521 		writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
1522 }
1523 
nvme_queue_tagset(struct nvme_queue * nvmeq)1524 static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
1525 {
1526 	if (!nvmeq->qid)
1527 		return nvmeq->dev->admin_tagset.tags[0];
1528 	return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
1529 }
1530 
nvme_handle_cqe(struct nvme_queue * nvmeq,struct io_comp_batch * iob,u16 idx)1531 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
1532 				   struct io_comp_batch *iob, u16 idx)
1533 {
1534 	struct nvme_completion *cqe = &nvmeq->cqes[idx];
1535 	__u16 command_id = READ_ONCE(cqe->command_id);
1536 	struct request *req;
1537 
1538 	/*
1539 	 * AEN requests are special as they don't time out and can
1540 	 * survive any kind of queue freeze and often don't respond to
1541 	 * aborts.  We don't even bother to allocate a struct request
1542 	 * for them but rather special case them here.
1543 	 */
1544 	if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
1545 		nvme_complete_async_event(&nvmeq->dev->ctrl,
1546 				cqe->status, &cqe->result);
1547 		return;
1548 	}
1549 
1550 	req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id);
1551 	if (unlikely(!req)) {
1552 		dev_warn(nvmeq->dev->ctrl.device,
1553 			"invalid id %d completed on queue %d\n",
1554 			command_id, le16_to_cpu(cqe->sq_id));
1555 		return;
1556 	}
1557 
1558 	trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
1559 	if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
1560 	    !blk_mq_add_to_batch(req, iob,
1561 				 nvme_req(req)->status != NVME_SC_SUCCESS,
1562 				 nvme_pci_complete_batch))
1563 		nvme_pci_complete_rq(req);
1564 }
1565 
nvme_update_cq_head(struct nvme_queue * nvmeq)1566 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
1567 {
1568 	u32 tmp = nvmeq->cq_head + 1;
1569 
1570 	if (tmp == nvmeq->q_depth) {
1571 		nvmeq->cq_head = 0;
1572 		nvmeq->cq_phase ^= 1;
1573 	} else {
1574 		nvmeq->cq_head = tmp;
1575 	}
1576 }
1577 
nvme_poll_cq(struct nvme_queue * nvmeq,struct io_comp_batch * iob)1578 static inline bool nvme_poll_cq(struct nvme_queue *nvmeq,
1579 			        struct io_comp_batch *iob)
1580 {
1581 	bool found = false;
1582 
1583 	while (nvme_cqe_pending(nvmeq)) {
1584 		found = true;
1585 		/*
1586 		 * load-load control dependency between phase and the rest of
1587 		 * the cqe requires a full read memory barrier
1588 		 */
1589 		dma_rmb();
1590 		nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head);
1591 		nvme_update_cq_head(nvmeq);
1592 	}
1593 
1594 	if (found)
1595 		nvme_ring_cq_doorbell(nvmeq);
1596 	return found;
1597 }
1598 
nvme_irq(int irq,void * data)1599 static irqreturn_t nvme_irq(int irq, void *data)
1600 {
1601 	struct nvme_queue *nvmeq = data;
1602 	DEFINE_IO_COMP_BATCH(iob);
1603 
1604 	if (nvme_poll_cq(nvmeq, &iob)) {
1605 		if (!rq_list_empty(&iob.req_list))
1606 			nvme_pci_complete_batch(&iob);
1607 		return IRQ_HANDLED;
1608 	}
1609 	return IRQ_NONE;
1610 }
1611 
nvme_irq_check(int irq,void * data)1612 static irqreturn_t nvme_irq_check(int irq, void *data)
1613 {
1614 	struct nvme_queue *nvmeq = data;
1615 
1616 	if (nvme_cqe_pending(nvmeq))
1617 		return IRQ_WAKE_THREAD;
1618 	return IRQ_NONE;
1619 }
1620 
1621 /*
1622  * Poll for completions for any interrupt driven queue
1623  * Can be called from any context.
1624  */
nvme_poll_irqdisable(struct nvme_queue * nvmeq)1625 static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
1626 {
1627 	struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
1628 
1629 	WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
1630 
1631 	disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
1632 	spin_lock(&nvmeq->cq_poll_lock);
1633 	nvme_poll_cq(nvmeq, NULL);
1634 	spin_unlock(&nvmeq->cq_poll_lock);
1635 	enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
1636 }
1637 
nvme_poll(struct blk_mq_hw_ctx * hctx,struct io_comp_batch * iob)1638 static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
1639 {
1640 	struct nvme_queue *nvmeq = hctx->driver_data;
1641 	bool found;
1642 
1643 	if (!test_bit(NVMEQ_POLLED, &nvmeq->flags) ||
1644 	    !nvme_cqe_pending(nvmeq))
1645 		return 0;
1646 
1647 	spin_lock(&nvmeq->cq_poll_lock);
1648 	found = nvme_poll_cq(nvmeq, iob);
1649 	spin_unlock(&nvmeq->cq_poll_lock);
1650 
1651 	return found;
1652 }
1653 
nvme_pci_submit_async_event(struct nvme_ctrl * ctrl)1654 static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
1655 {
1656 	struct nvme_dev *dev = to_nvme_dev(ctrl);
1657 	struct nvme_queue *nvmeq = &dev->queues[0];
1658 	struct nvme_command c = { };
1659 
1660 	c.common.opcode = nvme_admin_async_event;
1661 	c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1662 
1663 	spin_lock(&nvmeq->sq_lock);
1664 	nvme_sq_copy_cmd(nvmeq, &c);
1665 	nvme_write_sq_db(nvmeq, true);
1666 	spin_unlock(&nvmeq->sq_lock);
1667 }
1668 
nvme_pci_subsystem_reset(struct nvme_ctrl * ctrl)1669 static int nvme_pci_subsystem_reset(struct nvme_ctrl *ctrl)
1670 {
1671 	struct nvme_dev *dev = to_nvme_dev(ctrl);
1672 	int ret = 0;
1673 
1674 	/*
1675 	 * Taking the shutdown_lock ensures the BAR mapping is not being
1676 	 * altered by reset_work. Holding this lock before the RESETTING state
1677 	 * change, if successful, also ensures nvme_remove won't be able to
1678 	 * proceed to iounmap until we're done.
1679 	 */
1680 	mutex_lock(&dev->shutdown_lock);
1681 	if (!dev->bar_mapped_size) {
1682 		ret = -ENODEV;
1683 		goto unlock;
1684 	}
1685 
1686 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
1687 		ret = -EBUSY;
1688 		goto unlock;
1689 	}
1690 
1691 	writel(NVME_SUBSYS_RESET, dev->bar + NVME_REG_NSSR);
1692 
1693 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING) ||
1694 	    !nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
1695 		goto unlock;
1696 
1697 	/*
1698 	 * Read controller status to flush the previous write and trigger a
1699 	 * pcie read error.
1700 	 */
1701 	readl(dev->bar + NVME_REG_CSTS);
1702 unlock:
1703 	mutex_unlock(&dev->shutdown_lock);
1704 	return ret;
1705 }
1706 
adapter_delete_queue(struct nvme_dev * dev,u8 opcode,u16 id)1707 static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
1708 {
1709 	struct nvme_command c = { };
1710 
1711 	c.delete_queue.opcode = opcode;
1712 	c.delete_queue.qid = cpu_to_le16(id);
1713 
1714 	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1715 }
1716 
adapter_alloc_cq(struct nvme_dev * dev,u16 qid,struct nvme_queue * nvmeq,s16 vector)1717 static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
1718 		struct nvme_queue *nvmeq, s16 vector)
1719 {
1720 	struct nvme_command c = { };
1721 	int flags = NVME_QUEUE_PHYS_CONTIG;
1722 
1723 	if (!test_bit(NVMEQ_POLLED, &nvmeq->flags))
1724 		flags |= NVME_CQ_IRQ_ENABLED;
1725 
1726 	/*
1727 	 * Note: we (ab)use the fact that the prp fields survive if no data
1728 	 * is attached to the request.
1729 	 */
1730 	c.create_cq.opcode = nvme_admin_create_cq;
1731 	c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
1732 	c.create_cq.cqid = cpu_to_le16(qid);
1733 	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1734 	c.create_cq.cq_flags = cpu_to_le16(flags);
1735 	c.create_cq.irq_vector = cpu_to_le16(vector);
1736 
1737 	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1738 }
1739 
adapter_alloc_sq(struct nvme_dev * dev,u16 qid,struct nvme_queue * nvmeq)1740 static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
1741 						struct nvme_queue *nvmeq)
1742 {
1743 	struct nvme_ctrl *ctrl = &dev->ctrl;
1744 	struct nvme_command c = { };
1745 	int flags = NVME_QUEUE_PHYS_CONTIG;
1746 
1747 	/*
1748 	 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't
1749 	 * set. Since URGENT priority is zeroes, it makes all queues
1750 	 * URGENT.
1751 	 */
1752 	if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ)
1753 		flags |= NVME_SQ_PRIO_MEDIUM;
1754 
1755 	/*
1756 	 * Note: we (ab)use the fact that the prp fields survive if no data
1757 	 * is attached to the request.
1758 	 */
1759 	c.create_sq.opcode = nvme_admin_create_sq;
1760 	c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
1761 	c.create_sq.sqid = cpu_to_le16(qid);
1762 	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1763 	c.create_sq.sq_flags = cpu_to_le16(flags);
1764 	c.create_sq.cqid = cpu_to_le16(qid);
1765 
1766 	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1767 }
1768 
adapter_delete_cq(struct nvme_dev * dev,u16 cqid)1769 static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
1770 {
1771 	return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
1772 }
1773 
adapter_delete_sq(struct nvme_dev * dev,u16 sqid)1774 static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
1775 {
1776 	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
1777 }
1778 
abort_endio(struct request * req,blk_status_t error,const struct io_comp_batch * iob)1779 static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error,
1780 				      const struct io_comp_batch *iob)
1781 {
1782 	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1783 
1784 	dev_warn(nvmeq->dev->ctrl.device,
1785 		 "Abort status: 0x%x", nvme_req(req)->status);
1786 	atomic_inc(&nvmeq->dev->ctrl.abort_limit);
1787 	blk_mq_free_request(req);
1788 	return RQ_END_IO_NONE;
1789 }
1790 
nvme_should_reset(struct nvme_dev * dev,u32 csts)1791 static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
1792 {
1793 	/* If true, indicates loss of adapter communication, possibly by a
1794 	 * NVMe Subsystem reset.
1795 	 */
1796 	bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
1797 
1798 	/* If there is a reset/reinit ongoing, we shouldn't reset again. */
1799 	switch (nvme_ctrl_state(&dev->ctrl)) {
1800 	case NVME_CTRL_RESETTING:
1801 	case NVME_CTRL_CONNECTING:
1802 		return false;
1803 	default:
1804 		break;
1805 	}
1806 
1807 	/* We shouldn't reset unless the controller is on fatal error state
1808 	 * _or_ if we lost the communication with it.
1809 	 */
1810 	if (!(csts & NVME_CSTS_CFS) && !nssro)
1811 		return false;
1812 
1813 	return true;
1814 }
1815 
nvme_warn_reset(struct nvme_dev * dev,u32 csts)1816 static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
1817 {
1818 	/* Read a config register to help see what died. */
1819 	u16 pci_status;
1820 	int result;
1821 
1822 	result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
1823 				      &pci_status);
1824 	if (result == PCIBIOS_SUCCESSFUL)
1825 		dev_warn(dev->ctrl.device,
1826 			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
1827 			 csts, pci_status);
1828 	else
1829 		dev_warn(dev->ctrl.device,
1830 			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
1831 			 csts, result);
1832 
1833 	if (csts != ~0)
1834 		return;
1835 
1836 	dev_warn(dev->ctrl.device,
1837 		 "Does your device have a faulty power saving mode enabled?\n");
1838 	dev_warn(dev->ctrl.device,
1839 		 "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off pcie_port_pm=off\" and report a bug\n");
1840 }
1841 
nvme_timeout(struct request * req)1842 static enum blk_eh_timer_return nvme_timeout(struct request *req)
1843 {
1844 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1845 	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1846 	struct nvme_dev *dev = nvmeq->dev;
1847 	struct request *abort_req;
1848 	struct nvme_command cmd = { };
1849 	struct pci_dev *pdev = to_pci_dev(dev->dev);
1850 	u32 csts = readl(dev->bar + NVME_REG_CSTS);
1851 	u8 opcode;
1852 
1853 	/*
1854 	 * Shutdown the device immediately if we see it is disconnected. This
1855 	 * unblocks PCIe error handling if the nvme driver is waiting in
1856 	 * error_resume for a device that has been removed. We can't unbind the
1857 	 * driver while the driver's error callback is waiting to complete, so
1858 	 * we're relying on a timeout to break that deadlock if a removal
1859 	 * occurs while reset work is running.
1860 	 */
1861 	if (pci_dev_is_disconnected(pdev))
1862 		nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
1863 	if (nvme_state_terminal(&dev->ctrl))
1864 		goto disable;
1865 
1866 	/* If PCI error recovery process is happening, we cannot reset or
1867 	 * the recovery mechanism will surely fail.
1868 	 */
1869 	mb();
1870 	if (pci_channel_offline(pdev))
1871 		return BLK_EH_RESET_TIMER;
1872 
1873 	/*
1874 	 * Reset immediately if the controller is failed
1875 	 */
1876 	if (nvme_should_reset(dev, csts)) {
1877 		nvme_warn_reset(dev, csts);
1878 		goto disable;
1879 	}
1880 
1881 	/*
1882 	 * Did we miss an interrupt?
1883 	 */
1884 	if (test_bit(NVMEQ_POLLED, &nvmeq->flags))
1885 		nvme_poll(req->mq_hctx, NULL);
1886 	else
1887 		nvme_poll_irqdisable(nvmeq);
1888 
1889 	if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
1890 		dev_warn(dev->ctrl.device,
1891 			 "I/O tag %d (%04x) QID %d timeout, completion polled\n",
1892 			 req->tag, nvme_cid(req), nvmeq->qid);
1893 		return BLK_EH_DONE;
1894 	}
1895 
1896 	/*
1897 	 * Shutdown immediately if controller times out while starting. The
1898 	 * reset work will see the pci device disabled when it gets the forced
1899 	 * cancellation error. All outstanding requests are completed on
1900 	 * shutdown, so we return BLK_EH_DONE.
1901 	 */
1902 	switch (nvme_ctrl_state(&dev->ctrl)) {
1903 	case NVME_CTRL_CONNECTING:
1904 		nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
1905 		fallthrough;
1906 	case NVME_CTRL_DELETING:
1907 		dev_warn_ratelimited(dev->ctrl.device,
1908 			 "I/O tag %d (%04x) QID %d timeout, disable controller\n",
1909 			 req->tag, nvme_cid(req), nvmeq->qid);
1910 		nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1911 		nvme_dev_disable(dev, true);
1912 		return BLK_EH_DONE;
1913 	case NVME_CTRL_RESETTING:
1914 		return BLK_EH_RESET_TIMER;
1915 	default:
1916 		break;
1917 	}
1918 
1919 	/*
1920 	 * Shutdown the controller immediately and schedule a reset if the
1921 	 * command was already aborted once before and still hasn't been
1922 	 * returned to the driver, or if this is the admin queue.
1923 	 */
1924 	opcode = nvme_req(req)->cmd->common.opcode;
1925 	if (!nvmeq->qid || (iod->flags & IOD_ABORTED)) {
1926 		dev_warn(dev->ctrl.device,
1927 			 "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n",
1928 			 req->tag, nvme_cid(req), opcode,
1929 			 nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid);
1930 		nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1931 		goto disable;
1932 	}
1933 
1934 	if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
1935 		atomic_inc(&dev->ctrl.abort_limit);
1936 		return BLK_EH_RESET_TIMER;
1937 	}
1938 	iod->flags |= IOD_ABORTED;
1939 
1940 	cmd.abort.opcode = nvme_admin_abort_cmd;
1941 	cmd.abort.cid = nvme_cid(req);
1942 	cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
1943 
1944 	dev_warn(nvmeq->dev->ctrl.device,
1945 		 "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, aborting req_op:%s(%u) size:%u\n",
1946 		 req->tag, nvme_cid(req), opcode, nvme_get_opcode_str(opcode),
1947 		 nvmeq->qid, blk_op_str(req_op(req)), req_op(req),
1948 		 blk_rq_bytes(req));
1949 
1950 	abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
1951 					 BLK_MQ_REQ_NOWAIT);
1952 	if (IS_ERR(abort_req)) {
1953 		atomic_inc(&dev->ctrl.abort_limit);
1954 		return BLK_EH_RESET_TIMER;
1955 	}
1956 	nvme_init_request(abort_req, &cmd);
1957 
1958 	abort_req->end_io = abort_endio;
1959 	abort_req->end_io_data = NULL;
1960 	blk_execute_rq_nowait(abort_req, false);
1961 
1962 	/*
1963 	 * The aborted req will be completed on receiving the abort req.
1964 	 * We enable the timer again. If hit twice, it'll cause a device reset,
1965 	 * as the device then is in a faulty state.
1966 	 */
1967 	return BLK_EH_RESET_TIMER;
1968 
1969 disable:
1970 	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) {
1971 		if (nvme_state_terminal(&dev->ctrl))
1972 			nvme_dev_disable(dev, true);
1973 		return BLK_EH_DONE;
1974 	}
1975 
1976 	nvme_dev_disable(dev, false);
1977 	if (nvme_try_sched_reset(&dev->ctrl))
1978 		nvme_unquiesce_io_queues(&dev->ctrl);
1979 	return BLK_EH_DONE;
1980 }
1981 
nvme_free_queue(struct nvme_queue * nvmeq)1982 static void nvme_free_queue(struct nvme_queue *nvmeq)
1983 {
1984 	dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq),
1985 				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1986 	if (!nvmeq->sq_cmds)
1987 		return;
1988 
1989 	if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
1990 		pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
1991 				nvmeq->sq_cmds, SQ_SIZE(nvmeq));
1992 	} else {
1993 		dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq),
1994 				nvmeq->sq_cmds, nvmeq->sq_dma_addr);
1995 	}
1996 }
1997 
nvme_free_queues(struct nvme_dev * dev,int lowest)1998 static void nvme_free_queues(struct nvme_dev *dev, int lowest)
1999 {
2000 	int i;
2001 
2002 	for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
2003 		dev->ctrl.queue_count--;
2004 		nvme_free_queue(&dev->queues[i]);
2005 	}
2006 }
2007 
nvme_suspend_queue(struct nvme_dev * dev,unsigned int qid)2008 static void nvme_suspend_queue(struct nvme_dev *dev, unsigned int qid)
2009 {
2010 	struct nvme_queue *nvmeq = &dev->queues[qid];
2011 
2012 	if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags))
2013 		return;
2014 
2015 	/* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */
2016 	mb();
2017 
2018 	nvmeq->dev->online_queues--;
2019 	if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
2020 		nvme_quiesce_admin_queue(&nvmeq->dev->ctrl);
2021 	if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags))
2022 		pci_free_irq(to_pci_dev(dev->dev), nvmeq->cq_vector, nvmeq);
2023 }
2024 
nvme_suspend_io_queues(struct nvme_dev * dev)2025 static void nvme_suspend_io_queues(struct nvme_dev *dev)
2026 {
2027 	int i;
2028 
2029 	for (i = dev->ctrl.queue_count - 1; i > 0; i--)
2030 		nvme_suspend_queue(dev, i);
2031 }
2032 
2033 /*
2034  * Called only on a device that has been disabled and after all other threads
2035  * that can check this device's completion queues have synced, except
2036  * nvme_poll(). This is the last chance for the driver to see a natural
2037  * completion before nvme_cancel_request() terminates all incomplete requests.
2038  */
nvme_reap_pending_cqes(struct nvme_dev * dev)2039 static void nvme_reap_pending_cqes(struct nvme_dev *dev)
2040 {
2041 	int i;
2042 
2043 	for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
2044 		spin_lock(&dev->queues[i].cq_poll_lock);
2045 		nvme_poll_cq(&dev->queues[i], NULL);
2046 		spin_unlock(&dev->queues[i].cq_poll_lock);
2047 	}
2048 }
2049 
nvme_cmb_qdepth(struct nvme_dev * dev,int nr_io_queues,int entry_size)2050 static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
2051 				int entry_size)
2052 {
2053 	int q_depth = dev->q_depth;
2054 	unsigned q_size_aligned = roundup(q_depth * entry_size,
2055 					  NVME_CTRL_PAGE_SIZE);
2056 
2057 	if (q_size_aligned * nr_io_queues > dev->cmb_size) {
2058 		u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
2059 
2060 		mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE);
2061 		q_depth = div_u64(mem_per_q, entry_size);
2062 
2063 		/*
2064 		 * Ensure the reduced q_depth is above some threshold where it
2065 		 * would be better to map queues in system memory with the
2066 		 * original depth
2067 		 */
2068 		if (q_depth < 64)
2069 			return -ENOMEM;
2070 	}
2071 
2072 	return q_depth;
2073 }
2074 
nvme_alloc_sq_cmds(struct nvme_dev * dev,struct nvme_queue * nvmeq,int qid)2075 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
2076 				int qid)
2077 {
2078 	struct pci_dev *pdev = to_pci_dev(dev->dev);
2079 
2080 	if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
2081 		nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq));
2082 		if (nvmeq->sq_cmds) {
2083 			nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
2084 							nvmeq->sq_cmds);
2085 			if (nvmeq->sq_dma_addr) {
2086 				set_bit(NVMEQ_SQ_CMB, &nvmeq->flags);
2087 				return 0;
2088 			}
2089 
2090 			pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq));
2091 		}
2092 	}
2093 
2094 	nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq),
2095 				&nvmeq->sq_dma_addr, GFP_KERNEL);
2096 	if (!nvmeq->sq_cmds)
2097 		return -ENOMEM;
2098 	return 0;
2099 }
2100 
nvme_alloc_queue(struct nvme_dev * dev,int qid,int depth)2101 static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
2102 {
2103 	struct nvme_queue *nvmeq = &dev->queues[qid];
2104 
2105 	if (dev->ctrl.queue_count > qid)
2106 		return 0;
2107 
2108 	nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES;
2109 	nvmeq->q_depth = depth;
2110 	nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq),
2111 					 &nvmeq->cq_dma_addr, GFP_KERNEL);
2112 	if (!nvmeq->cqes)
2113 		goto free_nvmeq;
2114 
2115 	if (nvme_alloc_sq_cmds(dev, nvmeq, qid))
2116 		goto free_cqdma;
2117 
2118 	nvmeq->dev = dev;
2119 	spin_lock_init(&nvmeq->sq_lock);
2120 	spin_lock_init(&nvmeq->cq_poll_lock);
2121 	nvmeq->cq_head = 0;
2122 	nvmeq->cq_phase = 1;
2123 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
2124 	nvmeq->qid = qid;
2125 	dev->ctrl.queue_count++;
2126 
2127 	return 0;
2128 
2129  free_cqdma:
2130 	dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes,
2131 			  nvmeq->cq_dma_addr);
2132  free_nvmeq:
2133 	return -ENOMEM;
2134 }
2135 
queue_request_irq(struct nvme_queue * nvmeq)2136 static int queue_request_irq(struct nvme_queue *nvmeq)
2137 {
2138 	struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
2139 	int nr = nvmeq->dev->ctrl.instance;
2140 
2141 	if (use_threaded_interrupts) {
2142 		return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
2143 				nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
2144 	} else {
2145 		return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
2146 				NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
2147 	}
2148 }
2149 
nvme_init_queue(struct nvme_queue * nvmeq,u16 qid)2150 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
2151 {
2152 	struct nvme_dev *dev = nvmeq->dev;
2153 
2154 	nvmeq->sq_tail = 0;
2155 	nvmeq->last_sq_tail = 0;
2156 	nvmeq->cq_head = 0;
2157 	nvmeq->cq_phase = 1;
2158 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
2159 	memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq));
2160 	nvme_dbbuf_init(dev, nvmeq, qid);
2161 	dev->online_queues++;
2162 	wmb(); /* ensure the first interrupt sees the initialization */
2163 }
2164 
2165 /*
2166  * Try getting shutdown_lock while setting up IO queues.
2167  */
nvme_setup_io_queues_trylock(struct nvme_dev * dev)2168 static int nvme_setup_io_queues_trylock(struct nvme_dev *dev)
2169 {
2170 	/*
2171 	 * Give up if the lock is being held by nvme_dev_disable.
2172 	 */
2173 	if (!mutex_trylock(&dev->shutdown_lock))
2174 		return -ENODEV;
2175 
2176 	/*
2177 	 * Controller is in wrong state, fail early.
2178 	 */
2179 	if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_CONNECTING) {
2180 		mutex_unlock(&dev->shutdown_lock);
2181 		return -ENODEV;
2182 	}
2183 
2184 	return 0;
2185 }
2186 
nvme_create_queue(struct nvme_queue * nvmeq,int qid,bool polled)2187 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
2188 {
2189 	struct nvme_dev *dev = nvmeq->dev;
2190 	int result;
2191 	u16 vector = 0;
2192 
2193 	clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
2194 
2195 	/*
2196 	 * A queue's vector matches the queue identifier unless the controller
2197 	 * has only one vector available.
2198 	 */
2199 	if (!polled)
2200 		vector = dev->num_vecs == 1 ? 0 : qid;
2201 	else
2202 		set_bit(NVMEQ_POLLED, &nvmeq->flags);
2203 
2204 	result = adapter_alloc_cq(dev, qid, nvmeq, vector);
2205 	if (result)
2206 		return result;
2207 
2208 	result = adapter_alloc_sq(dev, qid, nvmeq);
2209 	if (result < 0)
2210 		return result;
2211 	if (result)
2212 		goto release_cq;
2213 
2214 	nvmeq->cq_vector = vector;
2215 
2216 	result = nvme_setup_io_queues_trylock(dev);
2217 	if (result)
2218 		return result;
2219 	nvme_init_queue(nvmeq, qid);
2220 	if (!polled) {
2221 		result = queue_request_irq(nvmeq);
2222 		if (result < 0)
2223 			goto release_sq;
2224 	}
2225 
2226 	set_bit(NVMEQ_ENABLED, &nvmeq->flags);
2227 	mutex_unlock(&dev->shutdown_lock);
2228 	return result;
2229 
2230 release_sq:
2231 	dev->online_queues--;
2232 	mutex_unlock(&dev->shutdown_lock);
2233 	adapter_delete_sq(dev, qid);
2234 release_cq:
2235 	adapter_delete_cq(dev, qid);
2236 	return result;
2237 }
2238 
2239 static const struct blk_mq_ops nvme_mq_admin_ops = {
2240 	.queue_rq	= nvme_queue_rq,
2241 	.complete	= nvme_pci_complete_rq,
2242 	.init_hctx	= nvme_admin_init_hctx,
2243 	.init_request	= nvme_pci_init_request,
2244 	.timeout	= nvme_timeout,
2245 };
2246 
2247 static const struct blk_mq_ops nvme_mq_ops = {
2248 	.queue_rq	= nvme_queue_rq,
2249 	.queue_rqs	= nvme_queue_rqs,
2250 	.complete	= nvme_pci_complete_rq,
2251 	.commit_rqs	= nvme_commit_rqs,
2252 	.init_hctx	= nvme_init_hctx,
2253 	.init_request	= nvme_pci_init_request,
2254 	.map_queues	= nvme_pci_map_queues,
2255 	.timeout	= nvme_timeout,
2256 	.poll		= nvme_poll,
2257 };
2258 
nvme_dev_remove_admin(struct nvme_dev * dev)2259 static void nvme_dev_remove_admin(struct nvme_dev *dev)
2260 {
2261 	if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
2262 		/*
2263 		 * If the controller was reset during removal, it's possible
2264 		 * user requests may be waiting on a stopped queue. Start the
2265 		 * queue to flush these to completion.
2266 		 */
2267 		nvme_unquiesce_admin_queue(&dev->ctrl);
2268 		nvme_remove_admin_tag_set(&dev->ctrl);
2269 	}
2270 }
2271 
db_bar_size(struct nvme_dev * dev,unsigned nr_io_queues)2272 static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
2273 {
2274 	return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride);
2275 }
2276 
nvme_remap_bar(struct nvme_dev * dev,unsigned long size)2277 static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size)
2278 {
2279 	struct pci_dev *pdev = to_pci_dev(dev->dev);
2280 
2281 	if (size <= dev->bar_mapped_size)
2282 		return 0;
2283 	if (size > pci_resource_len(pdev, 0))
2284 		return -ENOMEM;
2285 	if (dev->bar)
2286 		iounmap(dev->bar);
2287 	dev->bar = ioremap(pci_resource_start(pdev, 0), size);
2288 	if (!dev->bar) {
2289 		dev->bar_mapped_size = 0;
2290 		return -ENOMEM;
2291 	}
2292 	dev->bar_mapped_size = size;
2293 	dev->dbs = dev->bar + NVME_REG_DBS;
2294 
2295 	return 0;
2296 }
2297 
nvme_pci_configure_admin_queue(struct nvme_dev * dev)2298 static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
2299 {
2300 	int result;
2301 	u32 aqa;
2302 	struct nvme_queue *nvmeq;
2303 
2304 	result = nvme_remap_bar(dev, db_bar_size(dev, 0));
2305 	if (result < 0)
2306 		return result;
2307 
2308 	dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
2309 				NVME_CAP_NSSRC(dev->ctrl.cap) : 0;
2310 
2311 	if (dev->subsystem &&
2312 	    (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
2313 		writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
2314 
2315 	/*
2316 	 * If the device has been passed off to us in an enabled state, just
2317 	 * clear the enabled bit.  The spec says we should set the 'shutdown
2318 	 * notification bits', but doing so may cause the device to complete
2319 	 * commands to the admin queue ... and we don't know what memory that
2320 	 * might be pointing at!
2321 	 */
2322 	result = nvme_disable_ctrl(&dev->ctrl, false);
2323 	if (result < 0) {
2324 		struct pci_dev *pdev = to_pci_dev(dev->dev);
2325 
2326 		/*
2327 		 * The NVMe Controller Reset method did not get an expected
2328 		 * CSTS.RDY transition, so something with the device appears to
2329 		 * be stuck. Use the lower level and bigger hammer PCIe
2330 		 * Function Level Reset to attempt restoring the device to its
2331 		 * initial state, and try again.
2332 		 */
2333 		result = pcie_reset_flr(pdev, false);
2334 		if (result < 0)
2335 			return result;
2336 
2337 		pci_restore_state(pdev);
2338 		result = nvme_disable_ctrl(&dev->ctrl, false);
2339 		if (result < 0)
2340 			return result;
2341 
2342 		dev_info(dev->ctrl.device,
2343 			"controller reset completed after pcie flr\n");
2344 	}
2345 
2346 	result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
2347 	if (result)
2348 		return result;
2349 
2350 	dev->ctrl.numa_node = dev_to_node(dev->dev);
2351 
2352 	nvmeq = &dev->queues[0];
2353 	aqa = nvmeq->q_depth - 1;
2354 	aqa |= aqa << 16;
2355 
2356 	writel(aqa, dev->bar + NVME_REG_AQA);
2357 	lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
2358 	lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
2359 
2360 	result = nvme_enable_ctrl(&dev->ctrl);
2361 	if (result)
2362 		return result;
2363 
2364 	nvmeq->cq_vector = 0;
2365 	nvme_init_queue(nvmeq, 0);
2366 	result = queue_request_irq(nvmeq);
2367 	if (result) {
2368 		dev->online_queues--;
2369 		return result;
2370 	}
2371 
2372 	set_bit(NVMEQ_ENABLED, &nvmeq->flags);
2373 	return result;
2374 }
2375 
nvme_create_io_queues(struct nvme_dev * dev)2376 static int nvme_create_io_queues(struct nvme_dev *dev)
2377 {
2378 	unsigned i, max, rw_queues;
2379 	int ret = 0;
2380 
2381 	for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
2382 		if (nvme_alloc_queue(dev, i, dev->q_depth)) {
2383 			ret = -ENOMEM;
2384 			break;
2385 		}
2386 	}
2387 
2388 	max = min(dev->max_qid, dev->ctrl.queue_count - 1);
2389 	if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) {
2390 		rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] +
2391 				dev->io_queues[HCTX_TYPE_READ];
2392 	} else {
2393 		rw_queues = max;
2394 	}
2395 
2396 	for (i = dev->online_queues; i <= max; i++) {
2397 		bool polled = i > rw_queues;
2398 
2399 		ret = nvme_create_queue(&dev->queues[i], i, polled);
2400 		if (ret)
2401 			break;
2402 	}
2403 
2404 	/*
2405 	 * Ignore failing Create SQ/CQ commands, we can continue with less
2406 	 * than the desired amount of queues, and even a controller without
2407 	 * I/O queues can still be used to issue admin commands.  This might
2408 	 * be useful to upgrade a buggy firmware for example.
2409 	 */
2410 	return ret >= 0 ? 0 : ret;
2411 }
2412 
nvme_cmb_size_unit(struct nvme_dev * dev)2413 static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
2414 {
2415 	u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;
2416 
2417 	return 1ULL << (12 + 4 * szu);
2418 }
2419 
nvme_cmb_size(struct nvme_dev * dev)2420 static u32 nvme_cmb_size(struct nvme_dev *dev)
2421 {
2422 	return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK;
2423 }
2424 
nvme_map_cmb(struct nvme_dev * dev)2425 static void nvme_map_cmb(struct nvme_dev *dev)
2426 {
2427 	u64 size, offset;
2428 	resource_size_t bar_size;
2429 	struct pci_dev *pdev = to_pci_dev(dev->dev);
2430 	int bar;
2431 
2432 	if (dev->cmb_size)
2433 		return;
2434 
2435 	if (NVME_CAP_CMBS(dev->ctrl.cap))
2436 		writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC);
2437 
2438 	dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
2439 	if (!dev->cmbsz)
2440 		return;
2441 	dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
2442 
2443 	size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
2444 	offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
2445 	bar = NVME_CMB_BIR(dev->cmbloc);
2446 	bar_size = pci_resource_len(pdev, bar);
2447 
2448 	if (offset > bar_size)
2449 		return;
2450 
2451 	/*
2452 	 * Controllers may support a CMB size larger than their BAR, for
2453 	 * example, due to being behind a bridge. Reduce the CMB to the
2454 	 * reported size of the BAR
2455 	 */
2456 	size = min(size, bar_size - offset);
2457 
2458 	if (!IS_ALIGNED(size, memremap_compat_align()) ||
2459 	    !IS_ALIGNED(pci_resource_start(pdev, bar),
2460 			memremap_compat_align()))
2461 		return;
2462 
2463 	/*
2464 	 * Tell the controller about the host side address mapping the CMB,
2465 	 * and enable CMB decoding for the NVMe 1.4+ scheme:
2466 	 */
2467 	if (NVME_CAP_CMBS(dev->ctrl.cap)) {
2468 		hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE |
2469 			     (pci_bus_address(pdev, bar) + offset),
2470 			     dev->bar + NVME_REG_CMBMSC);
2471 	}
2472 
2473 	if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
2474 		dev_warn(dev->ctrl.device,
2475 			 "failed to register the CMB\n");
2476 		hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC);
2477 		return;
2478 	}
2479 
2480 	dev->cmb_size = size;
2481 	dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS);
2482 
2483 	if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) ==
2484 			(NVME_CMBSZ_WDS | NVME_CMBSZ_RDS))
2485 		pci_p2pmem_publish(pdev, true);
2486 }
2487 
nvme_set_host_mem(struct nvme_dev * dev,u32 bits)2488 static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
2489 {
2490 	u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT;
2491 	u64 dma_addr = dev->host_mem_descs_dma;
2492 	struct nvme_command c = { };
2493 	int ret;
2494 
2495 	c.features.opcode	= nvme_admin_set_features;
2496 	c.features.fid		= cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
2497 	c.features.dword11	= cpu_to_le32(bits);
2498 	c.features.dword12	= cpu_to_le32(host_mem_size);
2499 	c.features.dword13	= cpu_to_le32(lower_32_bits(dma_addr));
2500 	c.features.dword14	= cpu_to_le32(upper_32_bits(dma_addr));
2501 	c.features.dword15	= cpu_to_le32(dev->nr_host_mem_descs);
2502 
2503 	ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
2504 	if (ret) {
2505 		dev_warn(dev->ctrl.device,
2506 			 "failed to set host mem (err %d, flags %#x).\n",
2507 			 ret, bits);
2508 	} else
2509 		dev->hmb = bits & NVME_HOST_MEM_ENABLE;
2510 
2511 	return ret;
2512 }
2513 
nvme_free_host_mem_multi(struct nvme_dev * dev)2514 static void nvme_free_host_mem_multi(struct nvme_dev *dev)
2515 {
2516 	int i;
2517 
2518 	for (i = 0; i < dev->nr_host_mem_descs; i++) {
2519 		struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
2520 		size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE;
2521 
2522 		dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
2523 			       le64_to_cpu(desc->addr),
2524 			       DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
2525 	}
2526 
2527 	kfree(dev->host_mem_desc_bufs);
2528 	dev->host_mem_desc_bufs = NULL;
2529 }
2530 
nvme_free_host_mem(struct nvme_dev * dev)2531 static void nvme_free_host_mem(struct nvme_dev *dev)
2532 {
2533 	if (dev->hmb_sgt)
2534 		dma_free_noncontiguous(dev->dev, dev->host_mem_size,
2535 				dev->hmb_sgt, DMA_BIDIRECTIONAL);
2536 	else
2537 		nvme_free_host_mem_multi(dev);
2538 
2539 	dma_free_coherent(dev->dev, dev->host_mem_descs_size,
2540 			dev->host_mem_descs, dev->host_mem_descs_dma);
2541 	dev->host_mem_descs = NULL;
2542 	dev->host_mem_descs_size = 0;
2543 	dev->nr_host_mem_descs = 0;
2544 }
2545 
nvme_alloc_host_mem_single(struct nvme_dev * dev,u64 size)2546 static int nvme_alloc_host_mem_single(struct nvme_dev *dev, u64 size)
2547 {
2548 	dev->hmb_sgt = dma_alloc_noncontiguous(dev->dev, size,
2549 				DMA_BIDIRECTIONAL, GFP_KERNEL, 0);
2550 	if (!dev->hmb_sgt)
2551 		return -ENOMEM;
2552 
2553 	dev->host_mem_descs = dma_alloc_coherent(dev->dev,
2554 			sizeof(*dev->host_mem_descs), &dev->host_mem_descs_dma,
2555 			GFP_KERNEL);
2556 	if (!dev->host_mem_descs) {
2557 		dma_free_noncontiguous(dev->dev, size, dev->hmb_sgt,
2558 				DMA_BIDIRECTIONAL);
2559 		dev->hmb_sgt = NULL;
2560 		return -ENOMEM;
2561 	}
2562 	dev->host_mem_size = size;
2563 	dev->host_mem_descs_size = sizeof(*dev->host_mem_descs);
2564 	dev->nr_host_mem_descs = 1;
2565 
2566 	dev->host_mem_descs[0].addr =
2567 		cpu_to_le64(dev->hmb_sgt->sgl->dma_address);
2568 	dev->host_mem_descs[0].size = cpu_to_le32(size / NVME_CTRL_PAGE_SIZE);
2569 	return 0;
2570 }
2571 
nvme_alloc_host_mem_multi(struct nvme_dev * dev,u64 preferred,u32 chunk_size)2572 static int nvme_alloc_host_mem_multi(struct nvme_dev *dev, u64 preferred,
2573 		u32 chunk_size)
2574 {
2575 	struct nvme_host_mem_buf_desc *descs;
2576 	u32 max_entries, len, descs_size;
2577 	dma_addr_t descs_dma;
2578 	int i = 0;
2579 	void **bufs;
2580 	u64 size, tmp;
2581 
2582 	tmp = (preferred + chunk_size - 1);
2583 	do_div(tmp, chunk_size);
2584 	max_entries = tmp;
2585 
2586 	if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
2587 		max_entries = dev->ctrl.hmmaxd;
2588 
2589 	descs_size = max_entries * sizeof(*descs);
2590 	descs = dma_alloc_coherent(dev->dev, descs_size, &descs_dma,
2591 			GFP_KERNEL);
2592 	if (!descs)
2593 		goto out;
2594 
2595 	bufs = kzalloc_objs(*bufs, max_entries);
2596 	if (!bufs)
2597 		goto out_free_descs;
2598 
2599 	for (size = 0; size < preferred && i < max_entries; size += len) {
2600 		dma_addr_t dma_addr;
2601 
2602 		len = min_t(u64, chunk_size, preferred - size);
2603 		bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
2604 				DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
2605 		if (!bufs[i])
2606 			break;
2607 
2608 		descs[i].addr = cpu_to_le64(dma_addr);
2609 		descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE);
2610 		i++;
2611 	}
2612 
2613 	if (!size)
2614 		goto out_free_bufs;
2615 
2616 	dev->nr_host_mem_descs = i;
2617 	dev->host_mem_size = size;
2618 	dev->host_mem_descs = descs;
2619 	dev->host_mem_descs_dma = descs_dma;
2620 	dev->host_mem_descs_size = descs_size;
2621 	dev->host_mem_desc_bufs = bufs;
2622 	return 0;
2623 
2624 out_free_bufs:
2625 	kfree(bufs);
2626 out_free_descs:
2627 	dma_free_coherent(dev->dev, descs_size, descs, descs_dma);
2628 out:
2629 	dev->host_mem_descs = NULL;
2630 	return -ENOMEM;
2631 }
2632 
nvme_alloc_host_mem(struct nvme_dev * dev,u64 min,u64 preferred)2633 static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
2634 {
2635 	unsigned long dma_merge_boundary = dma_get_merge_boundary(dev->dev);
2636 	u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
2637 	u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
2638 	u64 chunk_size;
2639 
2640 	/*
2641 	 * If there is an IOMMU that can merge pages, try a virtually
2642 	 * non-contiguous allocation for a single segment first.
2643 	 */
2644 	if (dma_merge_boundary && (PAGE_SIZE & dma_merge_boundary) == 0) {
2645 		if (!nvme_alloc_host_mem_single(dev, preferred))
2646 			return 0;
2647 	}
2648 
2649 	/* start big and work our way down */
2650 	for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) {
2651 		if (!nvme_alloc_host_mem_multi(dev, preferred, chunk_size)) {
2652 			if (!min || dev->host_mem_size >= min)
2653 				return 0;
2654 			nvme_free_host_mem(dev);
2655 		}
2656 	}
2657 
2658 	return -ENOMEM;
2659 }
2660 
nvme_setup_host_mem(struct nvme_dev * dev)2661 static int nvme_setup_host_mem(struct nvme_dev *dev)
2662 {
2663 	u64 max = (u64)max_host_mem_size_mb * SZ_1M;
2664 	u64 preferred = (u64)dev->ctrl.hmpre * 4096;
2665 	u64 min = (u64)dev->ctrl.hmmin * 4096;
2666 	u32 enable_bits = NVME_HOST_MEM_ENABLE;
2667 	int ret;
2668 
2669 	if (!dev->ctrl.hmpre)
2670 		return 0;
2671 
2672 	preferred = min(preferred, max);
2673 	if (min > max) {
2674 		dev_warn(dev->ctrl.device,
2675 			"min host memory (%lld MiB) above limit (%d MiB).\n",
2676 			min >> ilog2(SZ_1M), max_host_mem_size_mb);
2677 		nvme_free_host_mem(dev);
2678 		return 0;
2679 	}
2680 
2681 	/*
2682 	 * If we already have a buffer allocated check if we can reuse it.
2683 	 */
2684 	if (dev->host_mem_descs) {
2685 		if (dev->host_mem_size >= min)
2686 			enable_bits |= NVME_HOST_MEM_RETURN;
2687 		else
2688 			nvme_free_host_mem(dev);
2689 	}
2690 
2691 	if (!dev->host_mem_descs) {
2692 		if (nvme_alloc_host_mem(dev, min, preferred)) {
2693 			dev_warn(dev->ctrl.device,
2694 				"failed to allocate host memory buffer.\n");
2695 			return 0; /* controller must work without HMB */
2696 		}
2697 
2698 		dev_info(dev->ctrl.device,
2699 			"allocated %lld MiB host memory buffer (%u segment%s).\n",
2700 			dev->host_mem_size >> ilog2(SZ_1M),
2701 			dev->nr_host_mem_descs,
2702 			str_plural(dev->nr_host_mem_descs));
2703 	}
2704 
2705 	ret = nvme_set_host_mem(dev, enable_bits);
2706 	if (ret)
2707 		nvme_free_host_mem(dev);
2708 	return ret;
2709 }
2710 
cmb_show(struct device * dev,struct device_attribute * attr,char * buf)2711 static ssize_t cmb_show(struct device *dev, struct device_attribute *attr,
2712 		char *buf)
2713 {
2714 	struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
2715 
2716 	return sysfs_emit(buf, "cmbloc : 0x%08x\ncmbsz  : 0x%08x\n",
2717 		       ndev->cmbloc, ndev->cmbsz);
2718 }
2719 static DEVICE_ATTR_RO(cmb);
2720 
cmbloc_show(struct device * dev,struct device_attribute * attr,char * buf)2721 static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr,
2722 		char *buf)
2723 {
2724 	struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
2725 
2726 	return sysfs_emit(buf, "%u\n", ndev->cmbloc);
2727 }
2728 static DEVICE_ATTR_RO(cmbloc);
2729 
cmbsz_show(struct device * dev,struct device_attribute * attr,char * buf)2730 static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr,
2731 		char *buf)
2732 {
2733 	struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
2734 
2735 	return sysfs_emit(buf, "%u\n", ndev->cmbsz);
2736 }
2737 static DEVICE_ATTR_RO(cmbsz);
2738 
hmb_show(struct device * dev,struct device_attribute * attr,char * buf)2739 static ssize_t hmb_show(struct device *dev, struct device_attribute *attr,
2740 			char *buf)
2741 {
2742 	struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
2743 
2744 	return sysfs_emit(buf, "%d\n", ndev->hmb);
2745 }
2746 
hmb_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2747 static ssize_t hmb_store(struct device *dev, struct device_attribute *attr,
2748 			 const char *buf, size_t count)
2749 {
2750 	struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
2751 	bool new;
2752 	int ret;
2753 
2754 	if (kstrtobool(buf, &new) < 0)
2755 		return -EINVAL;
2756 
2757 	if (new == ndev->hmb)
2758 		return count;
2759 
2760 	if (new) {
2761 		ret = nvme_setup_host_mem(ndev);
2762 	} else {
2763 		ret = nvme_set_host_mem(ndev, 0);
2764 		if (!ret)
2765 			nvme_free_host_mem(ndev);
2766 	}
2767 
2768 	if (ret < 0)
2769 		return ret;
2770 
2771 	return count;
2772 }
2773 static DEVICE_ATTR_RW(hmb);
2774 
nvme_pci_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)2775 static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj,
2776 		struct attribute *a, int n)
2777 {
2778 	struct nvme_ctrl *ctrl =
2779 		dev_get_drvdata(container_of(kobj, struct device, kobj));
2780 	struct nvme_dev *dev = to_nvme_dev(ctrl);
2781 
2782 	if (a == &dev_attr_cmb.attr ||
2783 	    a == &dev_attr_cmbloc.attr ||
2784 	    a == &dev_attr_cmbsz.attr) {
2785 	    	if (!dev->cmbsz)
2786 			return 0;
2787 	}
2788 	if (a == &dev_attr_hmb.attr && !ctrl->hmpre)
2789 		return 0;
2790 
2791 	return a->mode;
2792 }
2793 
2794 static struct attribute *nvme_pci_attrs[] = {
2795 	&dev_attr_cmb.attr,
2796 	&dev_attr_cmbloc.attr,
2797 	&dev_attr_cmbsz.attr,
2798 	&dev_attr_hmb.attr,
2799 	NULL,
2800 };
2801 
2802 static const struct attribute_group nvme_pci_dev_attrs_group = {
2803 	.attrs		= nvme_pci_attrs,
2804 	.is_visible	= nvme_pci_attrs_are_visible,
2805 };
2806 
2807 static const struct attribute_group *nvme_pci_dev_attr_groups[] = {
2808 	&nvme_dev_attrs_group,
2809 	&nvme_pci_dev_attrs_group,
2810 	NULL,
2811 };
2812 
nvme_update_attrs(struct nvme_dev * dev)2813 static void nvme_update_attrs(struct nvme_dev *dev)
2814 {
2815 	sysfs_update_group(&dev->ctrl.device->kobj, &nvme_pci_dev_attrs_group);
2816 }
2817 
2818 /*
2819  * nirqs is the number of interrupts available for write and read
2820  * queues. The core already reserved an interrupt for the admin queue.
2821  */
nvme_calc_irq_sets(struct irq_affinity * affd,unsigned int nrirqs)2822 static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
2823 {
2824 	struct nvme_dev *dev = affd->priv;
2825 	unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues;
2826 
2827 	/*
2828 	 * If there is no interrupt available for queues, ensure that
2829 	 * the default queue is set to 1. The affinity set size is
2830 	 * also set to one, but the irq core ignores it for this case.
2831 	 *
2832 	 * If only one interrupt is available or 'write_queue' == 0, combine
2833 	 * write and read queues.
2834 	 *
2835 	 * If 'write_queues' > 0, ensure it leaves room for at least one read
2836 	 * queue.
2837 	 */
2838 	if (!nrirqs) {
2839 		nrirqs = 1;
2840 		nr_read_queues = 0;
2841 	} else if (nrirqs == 1 || !nr_write_queues) {
2842 		nr_read_queues = 0;
2843 	} else if (nr_write_queues >= nrirqs) {
2844 		nr_read_queues = 1;
2845 	} else {
2846 		nr_read_queues = nrirqs - nr_write_queues;
2847 	}
2848 
2849 	dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
2850 	affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
2851 	dev->io_queues[HCTX_TYPE_READ] = nr_read_queues;
2852 	affd->set_size[HCTX_TYPE_READ] = nr_read_queues;
2853 	affd->nr_sets = nr_read_queues ? 2 : 1;
2854 }
2855 
nvme_setup_irqs(struct nvme_dev * dev,unsigned int nr_io_queues)2856 static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2857 {
2858 	struct pci_dev *pdev = to_pci_dev(dev->dev);
2859 	struct irq_affinity affd = {
2860 		.pre_vectors	= 1,
2861 		.calc_sets	= nvme_calc_irq_sets,
2862 		.priv		= dev,
2863 	};
2864 	unsigned int irq_queues, poll_queues;
2865 	unsigned int flags = PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY;
2866 
2867 	/*
2868 	 * Poll queues don't need interrupts, but we need at least one I/O queue
2869 	 * left over for non-polled I/O.
2870 	 */
2871 	poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1);
2872 	dev->io_queues[HCTX_TYPE_POLL] = poll_queues;
2873 
2874 	/*
2875 	 * Initialize for the single interrupt case, will be updated in
2876 	 * nvme_calc_irq_sets().
2877 	 */
2878 	dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
2879 	dev->io_queues[HCTX_TYPE_READ] = 0;
2880 
2881 	/*
2882 	 * We need interrupts for the admin queue and each non-polled I/O queue,
2883 	 * but some Apple controllers require all queues to use the first
2884 	 * vector.
2885 	 */
2886 	irq_queues = 1;
2887 	if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR))
2888 		irq_queues += (nr_io_queues - poll_queues);
2889 	if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
2890 		flags &= ~PCI_IRQ_MSI;
2891 	return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, flags,
2892 					      &affd);
2893 }
2894 
nvme_max_io_queues(struct nvme_dev * dev)2895 static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
2896 {
2897 	/*
2898 	 * If tags are shared with admin queue (Apple bug), then
2899 	 * make sure we only use one IO queue.
2900 	 */
2901 	if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
2902 		return 1;
2903 	return blk_mq_num_possible_queues(0) + dev->nr_write_queues +
2904 		dev->nr_poll_queues;
2905 }
2906 
nvme_setup_io_queues(struct nvme_dev * dev)2907 static int nvme_setup_io_queues(struct nvme_dev *dev)
2908 {
2909 	struct nvme_queue *adminq = &dev->queues[0];
2910 	struct pci_dev *pdev = to_pci_dev(dev->dev);
2911 	unsigned int nr_io_queues;
2912 	unsigned long size;
2913 	int result;
2914 
2915 	/*
2916 	 * Sample the module parameters once at reset time so that we have
2917 	 * stable values to work with.
2918 	 */
2919 	dev->nr_write_queues = write_queues;
2920 	dev->nr_poll_queues = poll_queues;
2921 
2922 	if (dev->ctrl.tagset) {
2923 		/*
2924 		 * The set's maps are allocated only once at initialization
2925 		 * time. We can't add special queues later if their mq_map
2926 		 * wasn't preallocated.
2927 		 */
2928 		if (dev->ctrl.tagset->nr_maps < 3)
2929 			dev->nr_poll_queues = 0;
2930 		if (dev->ctrl.tagset->nr_maps < 2)
2931 			dev->nr_write_queues = 0;
2932 	}
2933 
2934 	/*
2935 	 * The initial number of allocated queue slots may be too large if the
2936 	 * user reduced the special queue parameters. Cap the value to the
2937 	 * number we need for this round.
2938 	 */
2939 	nr_io_queues = min(nvme_max_io_queues(dev),
2940 			   dev->nr_allocated_queues - 1);
2941 	result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
2942 	if (result < 0)
2943 		return result;
2944 
2945 	if (nr_io_queues == 0)
2946 		return 0;
2947 
2948 	/*
2949 	 * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions
2950 	 * from set to unset. If there is a window to it is truely freed,
2951 	 * pci_free_irq_vectors() jumping into this window will crash.
2952 	 * And take lock to avoid racing with pci_free_irq_vectors() in
2953 	 * nvme_dev_disable() path.
2954 	 */
2955 	result = nvme_setup_io_queues_trylock(dev);
2956 	if (result)
2957 		return result;
2958 	if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
2959 		pci_free_irq(pdev, 0, adminq);
2960 
2961 	if (dev->cmb_use_sqes) {
2962 		result = nvme_cmb_qdepth(dev, nr_io_queues,
2963 				sizeof(struct nvme_command));
2964 		if (result > 0) {
2965 			dev->q_depth = result;
2966 			dev->ctrl.sqsize = result - 1;
2967 		} else {
2968 			dev->cmb_use_sqes = false;
2969 		}
2970 	}
2971 
2972 	do {
2973 		size = db_bar_size(dev, nr_io_queues);
2974 		result = nvme_remap_bar(dev, size);
2975 		if (!result)
2976 			break;
2977 		if (!--nr_io_queues) {
2978 			result = -ENOMEM;
2979 			goto out_unlock;
2980 		}
2981 	} while (1);
2982 	adminq->q_db = dev->dbs;
2983 
2984  retry:
2985 	/* Deregister the admin queue's interrupt */
2986 	if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
2987 		pci_free_irq(pdev, 0, adminq);
2988 
2989 	/*
2990 	 * If we enable msix early due to not intx, disable it again before
2991 	 * setting up the full range we need.
2992 	 */
2993 	pci_free_irq_vectors(pdev);
2994 
2995 	result = nvme_setup_irqs(dev, nr_io_queues);
2996 	if (result <= 0) {
2997 		result = -EIO;
2998 		goto out_unlock;
2999 	}
3000 
3001 	dev->num_vecs = result;
3002 	result = max(result - 1, 1);
3003 	dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL];
3004 
3005 	/*
3006 	 * Should investigate if there's a performance win from allocating
3007 	 * more queues than interrupt vectors; it might allow the submission
3008 	 * path to scale better, even if the receive path is limited by the
3009 	 * number of interrupts.
3010 	 */
3011 	result = queue_request_irq(adminq);
3012 	if (result)
3013 		goto out_unlock;
3014 	set_bit(NVMEQ_ENABLED, &adminq->flags);
3015 	mutex_unlock(&dev->shutdown_lock);
3016 
3017 	result = nvme_create_io_queues(dev);
3018 	if (result || dev->online_queues < 2)
3019 		return result;
3020 
3021 	if (dev->online_queues - 1 < dev->max_qid) {
3022 		nr_io_queues = dev->online_queues - 1;
3023 		nvme_delete_io_queues(dev);
3024 		result = nvme_setup_io_queues_trylock(dev);
3025 		if (result)
3026 			return result;
3027 		nvme_suspend_io_queues(dev);
3028 		goto retry;
3029 	}
3030 	dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
3031 					dev->io_queues[HCTX_TYPE_DEFAULT],
3032 					dev->io_queues[HCTX_TYPE_READ],
3033 					dev->io_queues[HCTX_TYPE_POLL]);
3034 	return 0;
3035 out_unlock:
3036 	mutex_unlock(&dev->shutdown_lock);
3037 	return result;
3038 }
3039 
nvme_del_queue_end(struct request * req,blk_status_t error,const struct io_comp_batch * iob)3040 static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
3041 					     blk_status_t error,
3042 					     const struct io_comp_batch *iob)
3043 {
3044 	struct nvme_queue *nvmeq = req->end_io_data;
3045 
3046 	blk_mq_free_request(req);
3047 	complete(&nvmeq->delete_done);
3048 	return RQ_END_IO_NONE;
3049 }
3050 
nvme_del_cq_end(struct request * req,blk_status_t error,const struct io_comp_batch * iob)3051 static enum rq_end_io_ret nvme_del_cq_end(struct request *req,
3052 					  blk_status_t error,
3053 					  const struct io_comp_batch *iob)
3054 {
3055 	struct nvme_queue *nvmeq = req->end_io_data;
3056 
3057 	if (error)
3058 		set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
3059 
3060 	return nvme_del_queue_end(req, error, iob);
3061 }
3062 
nvme_delete_queue(struct nvme_queue * nvmeq,u8 opcode)3063 static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
3064 {
3065 	struct request_queue *q = nvmeq->dev->ctrl.admin_q;
3066 	struct request *req;
3067 	struct nvme_command cmd = { };
3068 
3069 	cmd.delete_queue.opcode = opcode;
3070 	cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
3071 
3072 	req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT);
3073 	if (IS_ERR(req))
3074 		return PTR_ERR(req);
3075 	nvme_init_request(req, &cmd);
3076 
3077 	if (opcode == nvme_admin_delete_cq)
3078 		req->end_io = nvme_del_cq_end;
3079 	else
3080 		req->end_io = nvme_del_queue_end;
3081 	req->end_io_data = nvmeq;
3082 
3083 	init_completion(&nvmeq->delete_done);
3084 	blk_execute_rq_nowait(req, false);
3085 	return 0;
3086 }
3087 
__nvme_delete_io_queues(struct nvme_dev * dev,u8 opcode)3088 static bool __nvme_delete_io_queues(struct nvme_dev *dev, u8 opcode)
3089 {
3090 	int nr_queues = dev->online_queues - 1, sent = 0;
3091 	unsigned long timeout;
3092 
3093  retry:
3094 	timeout = NVME_ADMIN_TIMEOUT;
3095 	while (nr_queues > 0) {
3096 		if (nvme_delete_queue(&dev->queues[nr_queues], opcode))
3097 			break;
3098 		nr_queues--;
3099 		sent++;
3100 	}
3101 	while (sent) {
3102 		struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent];
3103 
3104 		timeout = wait_for_completion_io_timeout(&nvmeq->delete_done,
3105 				timeout);
3106 		if (timeout == 0)
3107 			return false;
3108 
3109 		sent--;
3110 		if (nr_queues)
3111 			goto retry;
3112 	}
3113 	return true;
3114 }
3115 
nvme_delete_io_queues(struct nvme_dev * dev)3116 static void nvme_delete_io_queues(struct nvme_dev *dev)
3117 {
3118 	if (__nvme_delete_io_queues(dev, nvme_admin_delete_sq))
3119 		__nvme_delete_io_queues(dev, nvme_admin_delete_cq);
3120 }
3121 
nvme_pci_nr_maps(struct nvme_dev * dev)3122 static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev)
3123 {
3124 	if (dev->io_queues[HCTX_TYPE_POLL])
3125 		return 3;
3126 	if (dev->io_queues[HCTX_TYPE_READ])
3127 		return 2;
3128 	return 1;
3129 }
3130 
nvme_pci_update_nr_queues(struct nvme_dev * dev)3131 static bool nvme_pci_update_nr_queues(struct nvme_dev *dev)
3132 {
3133 	if (!dev->ctrl.tagset) {
3134 		nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops,
3135 				nvme_pci_nr_maps(dev), sizeof(struct nvme_iod));
3136 		return true;
3137 	}
3138 
3139 	/* Give up if we are racing with nvme_dev_disable() */
3140 	if (!mutex_trylock(&dev->shutdown_lock))
3141 		return false;
3142 
3143 	/* Check if nvme_dev_disable() has been executed already */
3144 	if (!dev->online_queues) {
3145 		mutex_unlock(&dev->shutdown_lock);
3146 		return false;
3147 	}
3148 
3149 	blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
3150 	/* free previously allocated queues that are no longer usable */
3151 	nvme_free_queues(dev, dev->online_queues);
3152 	mutex_unlock(&dev->shutdown_lock);
3153 	return true;
3154 }
3155 
nvme_pci_enable(struct nvme_dev * dev)3156 static int nvme_pci_enable(struct nvme_dev *dev)
3157 {
3158 	int result = -ENOMEM;
3159 	struct pci_dev *pdev = to_pci_dev(dev->dev);
3160 	unsigned int flags = PCI_IRQ_ALL_TYPES;
3161 
3162 	if (pci_enable_device_mem(pdev))
3163 		return result;
3164 
3165 	pci_set_master(pdev);
3166 
3167 	if (readl(dev->bar + NVME_REG_CSTS) == -1) {
3168 		dev_dbg(dev->ctrl.device, "reading CSTS register failed\n");
3169 		result = -ENODEV;
3170 		goto disable;
3171 	}
3172 
3173 	/*
3174 	 * Some devices and/or platforms don't advertise or work with INTx
3175 	 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
3176 	 * adjust this later.
3177 	 */
3178 	if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
3179 		flags &= ~PCI_IRQ_MSI;
3180 	result = pci_alloc_irq_vectors(pdev, 1, 1, flags);
3181 	if (result < 0)
3182 		goto disable;
3183 
3184 	dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
3185 
3186 	dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1,
3187 				io_queue_depth);
3188 	dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
3189 	dev->dbs = dev->bar + 4096;
3190 
3191 	/*
3192 	 * Some Apple controllers require a non-standard SQE size.
3193 	 * Interestingly they also seem to ignore the CC:IOSQES register
3194 	 * so we don't bother updating it here.
3195 	 */
3196 	if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES)
3197 		dev->io_sqes = 7;
3198 	else
3199 		dev->io_sqes = NVME_NVM_IOSQES;
3200 
3201 	if (dev->ctrl.quirks & NVME_QUIRK_QDEPTH_ONE) {
3202 		dev->q_depth = 2;
3203 	} else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG &&
3204 		   (pdev->device == 0xa821 || pdev->device == 0xa822) &&
3205 		   NVME_CAP_MQES(dev->ctrl.cap) == 0) {
3206 		dev->q_depth = 64;
3207 		dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, "
3208                         "set queue depth=%u\n", dev->q_depth);
3209 	}
3210 
3211 	/*
3212 	 * Controllers with the shared tags quirk need the IO queue to be
3213 	 * big enough so that we get 32 tags for the admin queue
3214 	 */
3215 	if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) &&
3216 	    (dev->q_depth < (NVME_AQ_DEPTH + 2))) {
3217 		dev->q_depth = NVME_AQ_DEPTH + 2;
3218 		dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n",
3219 			 dev->q_depth);
3220 	}
3221 	dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
3222 
3223 	nvme_map_cmb(dev);
3224 
3225 	pci_save_state(pdev);
3226 
3227 	result = nvme_pci_configure_admin_queue(dev);
3228 	if (result)
3229 		goto free_irq;
3230 	return result;
3231 
3232  free_irq:
3233 	pci_free_irq_vectors(pdev);
3234  disable:
3235 	pci_disable_device(pdev);
3236 	return result;
3237 }
3238 
nvme_dev_unmap(struct nvme_dev * dev)3239 static void nvme_dev_unmap(struct nvme_dev *dev)
3240 {
3241 	if (dev->bar)
3242 		iounmap(dev->bar);
3243 	pci_release_mem_regions(to_pci_dev(dev->dev));
3244 }
3245 
nvme_pci_ctrl_is_dead(struct nvme_dev * dev)3246 static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev)
3247 {
3248 	struct pci_dev *pdev = to_pci_dev(dev->dev);
3249 	u32 csts;
3250 
3251 	if (!pci_is_enabled(pdev) || !pci_device_is_present(pdev))
3252 		return true;
3253 	if (pdev->error_state != pci_channel_io_normal)
3254 		return true;
3255 
3256 	csts = readl(dev->bar + NVME_REG_CSTS);
3257 	return (csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY);
3258 }
3259 
nvme_dev_disable(struct nvme_dev * dev,bool shutdown)3260 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
3261 {
3262 	enum nvme_ctrl_state state = nvme_ctrl_state(&dev->ctrl);
3263 	struct pci_dev *pdev = to_pci_dev(dev->dev);
3264 	bool dead;
3265 
3266 	mutex_lock(&dev->shutdown_lock);
3267 	dead = nvme_pci_ctrl_is_dead(dev);
3268 	if (state == NVME_CTRL_LIVE || state == NVME_CTRL_RESETTING) {
3269 		if (pci_is_enabled(pdev))
3270 			nvme_start_freeze(&dev->ctrl);
3271 		/*
3272 		 * Give the controller a chance to complete all entered requests
3273 		 * if doing a safe shutdown.
3274 		 */
3275 		if (!dead && shutdown)
3276 			nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
3277 	}
3278 
3279 	nvme_quiesce_io_queues(&dev->ctrl);
3280 
3281 	if (!dead && dev->ctrl.queue_count > 0) {
3282 		nvme_delete_io_queues(dev);
3283 		nvme_disable_ctrl(&dev->ctrl, shutdown);
3284 		nvme_poll_irqdisable(&dev->queues[0]);
3285 	}
3286 	nvme_suspend_io_queues(dev);
3287 	nvme_suspend_queue(dev, 0);
3288 	pci_free_irq_vectors(pdev);
3289 	if (pci_is_enabled(pdev))
3290 		pci_disable_device(pdev);
3291 	nvme_reap_pending_cqes(dev);
3292 
3293 	nvme_cancel_tagset(&dev->ctrl);
3294 	nvme_cancel_admin_tagset(&dev->ctrl);
3295 
3296 	/*
3297 	 * The driver will not be starting up queues again if shutting down so
3298 	 * must flush all entered requests to their failed completion to avoid
3299 	 * deadlocking blk-mq hot-cpu notifier.
3300 	 */
3301 	if (shutdown) {
3302 		nvme_unquiesce_io_queues(&dev->ctrl);
3303 		if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
3304 			nvme_unquiesce_admin_queue(&dev->ctrl);
3305 	}
3306 	mutex_unlock(&dev->shutdown_lock);
3307 }
3308 
nvme_disable_prepare_reset(struct nvme_dev * dev,bool shutdown)3309 static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
3310 {
3311 	if (!nvme_wait_reset(&dev->ctrl))
3312 		return -EBUSY;
3313 	nvme_dev_disable(dev, shutdown);
3314 	return 0;
3315 }
3316 
nvme_pci_alloc_iod_mempool(struct nvme_dev * dev)3317 static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
3318 {
3319 	size_t alloc_size = sizeof(struct nvme_dma_vec) * NVME_MAX_SEGS;
3320 
3321 	dev->dmavec_mempool = mempool_create_node(1,
3322 			mempool_kmalloc, mempool_kfree,
3323 			(void *)alloc_size, GFP_KERNEL,
3324 			dev_to_node(dev->dev));
3325 	if (!dev->dmavec_mempool)
3326 		return -ENOMEM;
3327 	return 0;
3328 }
3329 
nvme_free_tagset(struct nvme_dev * dev)3330 static void nvme_free_tagset(struct nvme_dev *dev)
3331 {
3332 	if (dev->tagset.tags)
3333 		nvme_remove_io_tag_set(&dev->ctrl);
3334 	dev->ctrl.tagset = NULL;
3335 }
3336 
3337 /* pairs with nvme_pci_alloc_dev */
nvme_pci_free_ctrl(struct nvme_ctrl * ctrl)3338 static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
3339 {
3340 	struct nvme_dev *dev = to_nvme_dev(ctrl);
3341 
3342 	nvme_free_tagset(dev);
3343 	put_device(dev->dev);
3344 	kfree(dev->queues);
3345 	kfree(dev);
3346 }
3347 
nvme_reset_work(struct work_struct * work)3348 static void nvme_reset_work(struct work_struct *work)
3349 {
3350 	struct nvme_dev *dev =
3351 		container_of(work, struct nvme_dev, ctrl.reset_work);
3352 	bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
3353 	int result;
3354 
3355 	if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_RESETTING) {
3356 		dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
3357 			 dev->ctrl.state);
3358 		result = -ENODEV;
3359 		goto out;
3360 	}
3361 
3362 	/*
3363 	 * If we're called to reset a live controller first shut it down before
3364 	 * moving on.
3365 	 */
3366 	if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
3367 		nvme_dev_disable(dev, false);
3368 	nvme_sync_queues(&dev->ctrl);
3369 
3370 	mutex_lock(&dev->shutdown_lock);
3371 	result = nvme_pci_enable(dev);
3372 	if (result)
3373 		goto out_unlock;
3374 	nvme_unquiesce_admin_queue(&dev->ctrl);
3375 	mutex_unlock(&dev->shutdown_lock);
3376 
3377 	/*
3378 	 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
3379 	 * initializing procedure here.
3380 	 */
3381 	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
3382 		dev_warn(dev->ctrl.device,
3383 			"failed to mark controller CONNECTING\n");
3384 		result = -EBUSY;
3385 		goto out;
3386 	}
3387 
3388 	result = nvme_init_ctrl_finish(&dev->ctrl, was_suspend);
3389 	if (result)
3390 		goto out;
3391 
3392 	if (nvme_ctrl_meta_sgl_supported(&dev->ctrl))
3393 		dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS;
3394 	else
3395 		dev->ctrl.max_integrity_segments = 1;
3396 
3397 	nvme_dbbuf_dma_alloc(dev);
3398 
3399 	result = nvme_setup_host_mem(dev);
3400 	if (result < 0)
3401 		goto out;
3402 
3403 	nvme_update_attrs(dev);
3404 
3405 	result = nvme_setup_io_queues(dev);
3406 	if (result)
3407 		goto out;
3408 
3409 	/*
3410 	 * Freeze and update the number of I/O queues as those might have
3411 	 * changed.  If there are no I/O queues left after this reset, keep the
3412 	 * controller around but remove all namespaces.
3413 	 */
3414 	if (dev->online_queues > 1) {
3415 		nvme_dbbuf_set(dev);
3416 		nvme_unquiesce_io_queues(&dev->ctrl);
3417 		nvme_wait_freeze(&dev->ctrl);
3418 		if (!nvme_pci_update_nr_queues(dev))
3419 			goto out;
3420 		nvme_unfreeze(&dev->ctrl);
3421 	} else {
3422 		dev_warn(dev->ctrl.device, "IO queues lost\n");
3423 		nvme_mark_namespaces_dead(&dev->ctrl);
3424 		nvme_unquiesce_io_queues(&dev->ctrl);
3425 		nvme_remove_namespaces(&dev->ctrl);
3426 		nvme_free_tagset(dev);
3427 	}
3428 
3429 	/*
3430 	 * If only admin queue live, keep it to do further investigation or
3431 	 * recovery.
3432 	 */
3433 	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
3434 		dev_warn(dev->ctrl.device,
3435 			"failed to mark controller live state\n");
3436 		result = -ENODEV;
3437 		goto out;
3438 	}
3439 
3440 	nvme_start_ctrl(&dev->ctrl);
3441 	return;
3442 
3443  out_unlock:
3444 	mutex_unlock(&dev->shutdown_lock);
3445  out:
3446 	/*
3447 	 * Set state to deleting now to avoid blocking nvme_wait_reset(), which
3448 	 * may be holding this pci_dev's device lock.
3449 	 */
3450 	dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n",
3451 		 result);
3452 	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
3453 	nvme_dev_disable(dev, true);
3454 	nvme_sync_queues(&dev->ctrl);
3455 	nvme_mark_namespaces_dead(&dev->ctrl);
3456 	nvme_unquiesce_io_queues(&dev->ctrl);
3457 	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
3458 }
3459 
nvme_pci_reg_read32(struct nvme_ctrl * ctrl,u32 off,u32 * val)3460 static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
3461 {
3462 	*val = readl(to_nvme_dev(ctrl)->bar + off);
3463 	return 0;
3464 }
3465 
nvme_pci_reg_write32(struct nvme_ctrl * ctrl,u32 off,u32 val)3466 static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
3467 {
3468 	writel(val, to_nvme_dev(ctrl)->bar + off);
3469 	return 0;
3470 }
3471 
nvme_pci_reg_read64(struct nvme_ctrl * ctrl,u32 off,u64 * val)3472 static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
3473 {
3474 	*val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off);
3475 	return 0;
3476 }
3477 
nvme_pci_get_address(struct nvme_ctrl * ctrl,char * buf,int size)3478 static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
3479 {
3480 	struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
3481 
3482 	return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
3483 }
3484 
nvme_pci_print_device_info(struct nvme_ctrl * ctrl)3485 static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
3486 {
3487 	struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
3488 	struct nvme_subsystem *subsys = ctrl->subsys;
3489 
3490 	dev_err(ctrl->device,
3491 		"VID:DID %04x:%04x model:%.*s firmware:%.*s\n",
3492 		pdev->vendor, pdev->device,
3493 		nvme_strlen(subsys->model, sizeof(subsys->model)),
3494 		subsys->model, nvme_strlen(subsys->firmware_rev,
3495 					   sizeof(subsys->firmware_rev)),
3496 		subsys->firmware_rev);
3497 }
3498 
nvme_pci_supports_pci_p2pdma(struct nvme_ctrl * ctrl)3499 static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl)
3500 {
3501 	struct nvme_dev *dev = to_nvme_dev(ctrl);
3502 
3503 	return dma_pci_p2pdma_supported(dev->dev);
3504 }
3505 
nvme_pci_get_virt_boundary(struct nvme_ctrl * ctrl,bool is_admin)3506 static unsigned long nvme_pci_get_virt_boundary(struct nvme_ctrl *ctrl,
3507 						bool is_admin)
3508 {
3509 	if (!nvme_ctrl_sgl_supported(ctrl) || is_admin)
3510 		return NVME_CTRL_PAGE_SIZE - 1;
3511 	return 0;
3512 }
3513 
3514 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
3515 	.name			= "pcie",
3516 	.module			= THIS_MODULE,
3517 	.flags			= NVME_F_METADATA_SUPPORTED,
3518 	.dev_attr_groups	= nvme_pci_dev_attr_groups,
3519 	.reg_read32		= nvme_pci_reg_read32,
3520 	.reg_write32		= nvme_pci_reg_write32,
3521 	.reg_read64		= nvme_pci_reg_read64,
3522 	.free_ctrl		= nvme_pci_free_ctrl,
3523 	.submit_async_event	= nvme_pci_submit_async_event,
3524 	.subsystem_reset	= nvme_pci_subsystem_reset,
3525 	.get_address		= nvme_pci_get_address,
3526 	.print_device_info	= nvme_pci_print_device_info,
3527 	.supports_pci_p2pdma	= nvme_pci_supports_pci_p2pdma,
3528 	.get_virt_boundary	= nvme_pci_get_virt_boundary,
3529 };
3530 
nvme_dev_map(struct nvme_dev * dev)3531 static int nvme_dev_map(struct nvme_dev *dev)
3532 {
3533 	struct pci_dev *pdev = to_pci_dev(dev->dev);
3534 
3535 	if (pci_request_mem_regions(pdev, "nvme"))
3536 		return -ENODEV;
3537 
3538 	if (nvme_remap_bar(dev, NVME_REG_DBS + 4096))
3539 		goto release;
3540 
3541 	return 0;
3542   release:
3543 	pci_release_mem_regions(pdev);
3544 	return -ENODEV;
3545 }
3546 
check_vendor_combination_bug(struct pci_dev * pdev)3547 static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
3548 {
3549 	if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
3550 		/*
3551 		 * Several Samsung devices seem to drop off the PCIe bus
3552 		 * randomly when APST is on and uses the deepest sleep state.
3553 		 * This has been observed on a Samsung "SM951 NVMe SAMSUNG
3554 		 * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
3555 		 * 950 PRO 256GB", but it seems to be restricted to two Dell
3556 		 * laptops.
3557 		 */
3558 		if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") &&
3559 		    (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
3560 		     dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
3561 			return NVME_QUIRK_NO_DEEPEST_PS;
3562 	} else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
3563 		/*
3564 		 * Samsung SSD 960 EVO drops off the PCIe bus after system
3565 		 * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as
3566 		 * within few minutes after bootup on a Coffee Lake board -
3567 		 * ASUS PRIME Z370-A
3568 		 */
3569 		if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
3570 		    (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") ||
3571 		     dmi_match(DMI_BOARD_NAME, "PRIME Z370-A")))
3572 			return NVME_QUIRK_NO_APST;
3573 	} else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 ||
3574 		    pdev->device == 0xa808 || pdev->device == 0xa809)) ||
3575 		   (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) {
3576 		/*
3577 		 * Forcing to use host managed nvme power settings for
3578 		 * lowest idle power with quick resume latency on
3579 		 * Samsung and Toshiba SSDs based on suspend behavior
3580 		 * on Coffee Lake board for LENOVO C640
3581 		 */
3582 		if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) &&
3583 		     dmi_match(DMI_BOARD_NAME, "LNVNB161216"))
3584 			return NVME_QUIRK_SIMPLE_SUSPEND;
3585 	} else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 ||
3586 		   pdev->device == 0x500f)) {
3587 		/*
3588 		 * Exclude some Kingston NV1 and A2000 devices from
3589 		 * NVME_QUIRK_SIMPLE_SUSPEND. Do a full suspend to save a
3590 		 * lot of energy with s2idle sleep on some TUXEDO platforms.
3591 		 */
3592 		if (dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
3593 		    dmi_match(DMI_BOARD_NAME, "NS5x_7xAU") ||
3594 		    dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") ||
3595 		    dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1"))
3596 			return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
3597 	} else if (pdev->vendor == 0x144d && pdev->device == 0xa80d) {
3598 		/*
3599 		 * Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND
3600 		 * because of high power consumption (> 2 Watt) in s2idle
3601 		 * sleep. Only some boards with Intel CPU are affected.
3602 		 * (Note for testing: Samsung 990 Evo Plus has same PCI ID)
3603 		 */
3604 		if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") ||
3605 		    dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
3606 		    dmi_match(DMI_BOARD_NAME, "GXxMRXx") ||
3607 		    dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
3608 		    dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
3609 		    dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
3610 		    dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
3611 			return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
3612 	}
3613 
3614 	/*
3615 	 * NVMe SSD drops off the PCIe bus after system idle
3616 	 * for 10 hours on a Lenovo N60z board.
3617 	 */
3618 	if (dmi_match(DMI_BOARD_NAME, "LXKT-ZXEG-N6"))
3619 		return NVME_QUIRK_NO_APST;
3620 
3621 	return 0;
3622 }
3623 
detect_dynamic_quirks(struct pci_dev * pdev)3624 static struct quirk_entry *detect_dynamic_quirks(struct pci_dev *pdev)
3625 {
3626 	int i;
3627 
3628 	for (i = 0; i < nvme_pci_quirk_count; i++)
3629 		if (pdev->vendor == nvme_pci_quirk_list[i].vendor_id &&
3630 		    pdev->device == nvme_pci_quirk_list[i].dev_id)
3631 			return &nvme_pci_quirk_list[i];
3632 
3633 	return NULL;
3634 }
3635 
nvme_pci_alloc_dev(struct pci_dev * pdev,const struct pci_device_id * id)3636 static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
3637 		const struct pci_device_id *id)
3638 {
3639 	unsigned long quirks = id->driver_data;
3640 	int node = dev_to_node(&pdev->dev);
3641 	struct nvme_dev *dev;
3642 	struct quirk_entry *qentry;
3643 	int ret = -ENOMEM;
3644 
3645 	dev = kzalloc_node(struct_size(dev, descriptor_pools, nr_node_ids),
3646 			GFP_KERNEL, node);
3647 	if (!dev)
3648 		return ERR_PTR(-ENOMEM);
3649 	INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
3650 	mutex_init(&dev->shutdown_lock);
3651 
3652 	dev->nr_write_queues = write_queues;
3653 	dev->nr_poll_queues = poll_queues;
3654 	dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1;
3655 	dev->queues = kcalloc_node(dev->nr_allocated_queues,
3656 			sizeof(struct nvme_queue), GFP_KERNEL, node);
3657 	if (!dev->queues)
3658 		goto out_free_dev;
3659 
3660 	dev->dev = get_device(&pdev->dev);
3661 
3662 	quirks |= check_vendor_combination_bug(pdev);
3663 	if (!noacpi &&
3664 	    !(quirks & NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND) &&
3665 	    acpi_storage_d3(&pdev->dev)) {
3666 		/*
3667 		 * Some systems use a bios work around to ask for D3 on
3668 		 * platforms that support kernel managed suspend.
3669 		 */
3670 		dev_info(&pdev->dev,
3671 			 "platform quirk: setting simple suspend\n");
3672 		quirks |= NVME_QUIRK_SIMPLE_SUSPEND;
3673 	}
3674 	qentry = detect_dynamic_quirks(pdev);
3675 	if (qentry) {
3676 		quirks |= qentry->enabled_quirks;
3677 		quirks &= ~qentry->disabled_quirks;
3678 	}
3679 	ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
3680 			     quirks);
3681 	if (ret)
3682 		goto out_put_device;
3683 
3684 	if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
3685 		dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
3686 	else
3687 		dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3688 	dma_set_min_align_mask(&pdev->dev, NVME_CTRL_PAGE_SIZE - 1);
3689 	dma_set_max_seg_size(&pdev->dev, 0xffffffff);
3690 
3691 	/*
3692 	 * Limit the max command size to prevent iod->sg allocations going
3693 	 * over a single page.
3694 	 */
3695 	dev->ctrl.max_hw_sectors = min_t(u32,
3696 			NVME_MAX_BYTES >> SECTOR_SHIFT,
3697 			dma_opt_mapping_size(&pdev->dev) >> 9);
3698 	dev->ctrl.max_segments = NVME_MAX_SEGS;
3699 	dev->ctrl.max_integrity_segments = 1;
3700 	return dev;
3701 
3702 out_put_device:
3703 	put_device(dev->dev);
3704 	kfree(dev->queues);
3705 out_free_dev:
3706 	kfree(dev);
3707 	return ERR_PTR(ret);
3708 }
3709 
nvme_probe(struct pci_dev * pdev,const struct pci_device_id * id)3710 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3711 {
3712 	struct nvme_dev *dev;
3713 	int result = -ENOMEM;
3714 
3715 	dev = nvme_pci_alloc_dev(pdev, id);
3716 	if (IS_ERR(dev))
3717 		return PTR_ERR(dev);
3718 
3719 	result = nvme_add_ctrl(&dev->ctrl);
3720 	if (result)
3721 		goto out_put_ctrl;
3722 
3723 	result = nvme_dev_map(dev);
3724 	if (result)
3725 		goto out_uninit_ctrl;
3726 
3727 	result = nvme_pci_alloc_iod_mempool(dev);
3728 	if (result)
3729 		goto out_dev_unmap;
3730 
3731 	dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
3732 
3733 	result = nvme_pci_enable(dev);
3734 	if (result)
3735 		goto out_release_iod_mempool;
3736 
3737 	result = nvme_alloc_admin_tag_set(&dev->ctrl, &dev->admin_tagset,
3738 				&nvme_mq_admin_ops, sizeof(struct nvme_iod));
3739 	if (result)
3740 		goto out_disable;
3741 
3742 	/*
3743 	 * Mark the controller as connecting before sending admin commands to
3744 	 * allow the timeout handler to do the right thing.
3745 	 */
3746 	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
3747 		dev_warn(dev->ctrl.device,
3748 			"failed to mark controller CONNECTING\n");
3749 		result = -EBUSY;
3750 		goto out_disable;
3751 	}
3752 
3753 	result = nvme_init_ctrl_finish(&dev->ctrl, false);
3754 	if (result)
3755 		goto out_disable;
3756 
3757 	if (nvme_ctrl_meta_sgl_supported(&dev->ctrl))
3758 		dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS;
3759 	else
3760 		dev->ctrl.max_integrity_segments = 1;
3761 
3762 	nvme_dbbuf_dma_alloc(dev);
3763 
3764 	result = nvme_setup_host_mem(dev);
3765 	if (result < 0)
3766 		goto out_disable;
3767 
3768 	nvme_update_attrs(dev);
3769 
3770 	result = nvme_setup_io_queues(dev);
3771 	if (result)
3772 		goto out_disable;
3773 
3774 	if (dev->online_queues > 1) {
3775 		nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops,
3776 				nvme_pci_nr_maps(dev), sizeof(struct nvme_iod));
3777 		nvme_dbbuf_set(dev);
3778 	}
3779 
3780 	if (!dev->ctrl.tagset)
3781 		dev_warn(dev->ctrl.device, "IO queues not created\n");
3782 
3783 	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
3784 		dev_warn(dev->ctrl.device,
3785 			"failed to mark controller live state\n");
3786 		result = -ENODEV;
3787 		goto out_disable;
3788 	}
3789 
3790 	pci_set_drvdata(pdev, dev);
3791 
3792 	nvme_start_ctrl(&dev->ctrl);
3793 	nvme_put_ctrl(&dev->ctrl);
3794 	flush_work(&dev->ctrl.scan_work);
3795 	return 0;
3796 
3797 out_disable:
3798 	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
3799 	nvme_dev_disable(dev, true);
3800 	nvme_free_host_mem(dev);
3801 	nvme_dev_remove_admin(dev);
3802 	nvme_dbbuf_dma_free(dev);
3803 	nvme_free_queues(dev, 0);
3804 out_release_iod_mempool:
3805 	mempool_destroy(dev->dmavec_mempool);
3806 out_dev_unmap:
3807 	nvme_dev_unmap(dev);
3808 out_uninit_ctrl:
3809 	nvme_uninit_ctrl(&dev->ctrl);
3810 out_put_ctrl:
3811 	nvme_put_ctrl(&dev->ctrl);
3812 	dev_err_probe(&pdev->dev, result, "probe failed\n");
3813 	return result;
3814 }
3815 
nvme_reset_prepare(struct pci_dev * pdev)3816 static void nvme_reset_prepare(struct pci_dev *pdev)
3817 {
3818 	struct nvme_dev *dev = pci_get_drvdata(pdev);
3819 
3820 	/*
3821 	 * We don't need to check the return value from waiting for the reset
3822 	 * state as pci_dev device lock is held, making it impossible to race
3823 	 * with ->remove().
3824 	 */
3825 	nvme_disable_prepare_reset(dev, false);
3826 	nvme_sync_queues(&dev->ctrl);
3827 }
3828 
nvme_reset_done(struct pci_dev * pdev)3829 static void nvme_reset_done(struct pci_dev *pdev)
3830 {
3831 	struct nvme_dev *dev = pci_get_drvdata(pdev);
3832 
3833 	if (!nvme_try_sched_reset(&dev->ctrl))
3834 		flush_work(&dev->ctrl.reset_work);
3835 }
3836 
nvme_shutdown(struct pci_dev * pdev)3837 static void nvme_shutdown(struct pci_dev *pdev)
3838 {
3839 	struct nvme_dev *dev = pci_get_drvdata(pdev);
3840 
3841 	nvme_disable_prepare_reset(dev, true);
3842 }
3843 
3844 /*
3845  * The driver's remove may be called on a device in a partially initialized
3846  * state. This function must not have any dependencies on the device state in
3847  * order to proceed.
3848  */
nvme_remove(struct pci_dev * pdev)3849 static void nvme_remove(struct pci_dev *pdev)
3850 {
3851 	struct nvme_dev *dev = pci_get_drvdata(pdev);
3852 
3853 	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
3854 	pci_set_drvdata(pdev, NULL);
3855 
3856 	if (!pci_device_is_present(pdev)) {
3857 		nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
3858 		nvme_dev_disable(dev, true);
3859 	}
3860 
3861 	flush_work(&dev->ctrl.reset_work);
3862 	nvme_stop_ctrl(&dev->ctrl);
3863 	nvme_remove_namespaces(&dev->ctrl);
3864 	nvme_dev_disable(dev, true);
3865 	nvme_free_host_mem(dev);
3866 	nvme_dev_remove_admin(dev);
3867 	nvme_dbbuf_dma_free(dev);
3868 	nvme_free_queues(dev, 0);
3869 	mempool_destroy(dev->dmavec_mempool);
3870 	nvme_release_descriptor_pools(dev);
3871 	nvme_dev_unmap(dev);
3872 	nvme_uninit_ctrl(&dev->ctrl);
3873 }
3874 
3875 #ifdef CONFIG_PM_SLEEP
nvme_get_power_state(struct nvme_ctrl * ctrl,u32 * ps)3876 static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps)
3877 {
3878 	return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps);
3879 }
3880 
nvme_set_power_state(struct nvme_ctrl * ctrl,u32 ps)3881 static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps)
3882 {
3883 	return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL);
3884 }
3885 
nvme_resume(struct device * dev)3886 static int nvme_resume(struct device *dev)
3887 {
3888 	struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
3889 	struct nvme_ctrl *ctrl = &ndev->ctrl;
3890 
3891 	if (ndev->last_ps == U32_MAX ||
3892 	    nvme_set_power_state(ctrl, ndev->last_ps) != 0)
3893 		goto reset;
3894 	if (ctrl->hmpre && nvme_setup_host_mem(ndev))
3895 		goto reset;
3896 
3897 	return 0;
3898 reset:
3899 	return nvme_try_sched_reset(ctrl);
3900 }
3901 
nvme_suspend(struct device * dev)3902 static int nvme_suspend(struct device *dev)
3903 {
3904 	struct pci_dev *pdev = to_pci_dev(dev);
3905 	struct nvme_dev *ndev = pci_get_drvdata(pdev);
3906 	struct nvme_ctrl *ctrl = &ndev->ctrl;
3907 	int ret = -EBUSY;
3908 
3909 	ndev->last_ps = U32_MAX;
3910 
3911 	/*
3912 	 * The platform does not remove power for a kernel managed suspend so
3913 	 * use host managed nvme power settings for lowest idle power if
3914 	 * possible. This should have quicker resume latency than a full device
3915 	 * shutdown.  But if the firmware is involved after the suspend or the
3916 	 * device does not support any non-default power states, shut down the
3917 	 * device fully.
3918 	 *
3919 	 * If ASPM is not enabled for the device, shut down the device and allow
3920 	 * the PCI bus layer to put it into D3 in order to take the PCIe link
3921 	 * down, so as to allow the platform to achieve its minimum low-power
3922 	 * state (which may not be possible if the link is up).
3923 	 */
3924 	if (pm_suspend_via_firmware() || !ctrl->npss ||
3925 	    !pcie_aspm_enabled(pdev) ||
3926 	    (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
3927 		return nvme_disable_prepare_reset(ndev, true);
3928 
3929 	nvme_start_freeze(ctrl);
3930 	nvme_wait_freeze(ctrl);
3931 	nvme_sync_queues(ctrl);
3932 
3933 	if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
3934 		goto unfreeze;
3935 
3936 	/*
3937 	 * Host memory access may not be successful in a system suspend state,
3938 	 * but the specification allows the controller to access memory in a
3939 	 * non-operational power state.
3940 	 */
3941 	if (ndev->hmb) {
3942 		ret = nvme_set_host_mem(ndev, 0);
3943 		if (ret < 0)
3944 			goto unfreeze;
3945 	}
3946 
3947 	ret = nvme_get_power_state(ctrl, &ndev->last_ps);
3948 	if (ret < 0)
3949 		goto unfreeze;
3950 
3951 	/*
3952 	 * A saved state prevents pci pm from generically controlling the
3953 	 * device's power. If we're using protocol specific settings, we don't
3954 	 * want pci interfering.
3955 	 */
3956 	pci_save_state(pdev);
3957 
3958 	ret = nvme_set_power_state(ctrl, ctrl->npss);
3959 	if (ret < 0)
3960 		goto unfreeze;
3961 
3962 	if (ret) {
3963 		/* discard the saved state */
3964 		pci_load_saved_state(pdev, NULL);
3965 
3966 		/*
3967 		 * Clearing npss forces a controller reset on resume. The
3968 		 * correct value will be rediscovered then.
3969 		 */
3970 		ret = nvme_disable_prepare_reset(ndev, true);
3971 		ctrl->npss = 0;
3972 	}
3973 unfreeze:
3974 	nvme_unfreeze(ctrl);
3975 	return ret;
3976 }
3977 
nvme_simple_suspend(struct device * dev)3978 static int nvme_simple_suspend(struct device *dev)
3979 {
3980 	struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
3981 
3982 	return nvme_disable_prepare_reset(ndev, true);
3983 }
3984 
nvme_simple_resume(struct device * dev)3985 static int nvme_simple_resume(struct device *dev)
3986 {
3987 	struct pci_dev *pdev = to_pci_dev(dev);
3988 	struct nvme_dev *ndev = pci_get_drvdata(pdev);
3989 
3990 	return nvme_try_sched_reset(&ndev->ctrl);
3991 }
3992 
3993 static const struct dev_pm_ops nvme_dev_pm_ops = {
3994 	.suspend	= nvme_suspend,
3995 	.resume		= nvme_resume,
3996 	.freeze		= nvme_simple_suspend,
3997 	.thaw		= nvme_simple_resume,
3998 	.poweroff	= nvme_simple_suspend,
3999 	.restore	= nvme_simple_resume,
4000 };
4001 #endif /* CONFIG_PM_SLEEP */
4002 
nvme_error_detected(struct pci_dev * pdev,pci_channel_state_t state)4003 static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
4004 						pci_channel_state_t state)
4005 {
4006 	struct nvme_dev *dev = pci_get_drvdata(pdev);
4007 
4008 	/*
4009 	 * A frozen channel requires a reset. When detected, this method will
4010 	 * shutdown the controller to quiesce. The controller will be restarted
4011 	 * after the slot reset through driver's slot_reset callback.
4012 	 */
4013 	switch (state) {
4014 	case pci_channel_io_normal:
4015 		return PCI_ERS_RESULT_CAN_RECOVER;
4016 	case pci_channel_io_frozen:
4017 		dev_warn(dev->ctrl.device,
4018 			"frozen state error detected, reset controller\n");
4019 		if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) {
4020 			nvme_dev_disable(dev, true);
4021 			return PCI_ERS_RESULT_DISCONNECT;
4022 		}
4023 		nvme_dev_disable(dev, false);
4024 		return PCI_ERS_RESULT_NEED_RESET;
4025 	case pci_channel_io_perm_failure:
4026 		dev_warn(dev->ctrl.device,
4027 			"failure state error detected, request disconnect\n");
4028 		return PCI_ERS_RESULT_DISCONNECT;
4029 	}
4030 	return PCI_ERS_RESULT_NEED_RESET;
4031 }
4032 
nvme_slot_reset(struct pci_dev * pdev)4033 static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
4034 {
4035 	struct nvme_dev *dev = pci_get_drvdata(pdev);
4036 
4037 	dev_info(dev->ctrl.device, "restart after slot reset\n");
4038 	pci_restore_state(pdev);
4039 	if (nvme_try_sched_reset(&dev->ctrl))
4040 		nvme_unquiesce_io_queues(&dev->ctrl);
4041 	return PCI_ERS_RESULT_RECOVERED;
4042 }
4043 
nvme_error_resume(struct pci_dev * pdev)4044 static void nvme_error_resume(struct pci_dev *pdev)
4045 {
4046 	struct nvme_dev *dev = pci_get_drvdata(pdev);
4047 
4048 	flush_work(&dev->ctrl.reset_work);
4049 }
4050 
4051 static const struct pci_error_handlers nvme_err_handler = {
4052 	.error_detected	= nvme_error_detected,
4053 	.slot_reset	= nvme_slot_reset,
4054 	.resume		= nvme_error_resume,
4055 	.reset_prepare	= nvme_reset_prepare,
4056 	.reset_done	= nvme_reset_done,
4057 };
4058 
4059 static const struct pci_device_id nvme_id_table[] = {
4060 	{ PCI_VDEVICE(INTEL, 0x0953),	/* Intel 750/P3500/P3600/P3700 */
4061 		.driver_data = NVME_QUIRK_STRIPE_SIZE |
4062 				NVME_QUIRK_DEALLOCATE_ZEROES, },
4063 	{ PCI_VDEVICE(INTEL, 0x0a53),	/* Intel P3520 */
4064 		.driver_data = NVME_QUIRK_STRIPE_SIZE |
4065 				NVME_QUIRK_DEALLOCATE_ZEROES, },
4066 	{ PCI_VDEVICE(INTEL, 0x0a54),	/* Intel P4500/P4600 */
4067 		.driver_data = NVME_QUIRK_STRIPE_SIZE |
4068 				NVME_QUIRK_IGNORE_DEV_SUBNQN |
4069 				NVME_QUIRK_BOGUS_NID, },
4070 	{ PCI_VDEVICE(INTEL, 0x0a55),	/* Dell Express Flash P4600 */
4071 		.driver_data = NVME_QUIRK_STRIPE_SIZE, },
4072 	{ PCI_VDEVICE(INTEL, 0xf1a5),	/* Intel 600P/P3100 */
4073 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
4074 				NVME_QUIRK_MEDIUM_PRIO_SQ |
4075 				NVME_QUIRK_NO_TEMP_THRESH_CHANGE |
4076 				NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4077 	{ PCI_VDEVICE(INTEL, 0xf1a6),	/* Intel 760p/Pro 7600p */
4078 		.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
4079 	{ PCI_VDEVICE(INTEL, 0x5845),	/* Qemu emulated controller */
4080 		.driver_data = NVME_QUIRK_IDENTIFY_CNS |
4081 				NVME_QUIRK_DISABLE_WRITE_ZEROES |
4082 				NVME_QUIRK_BOGUS_NID, },
4083 	{ PCI_VDEVICE(REDHAT, 0x0010),	/* Qemu emulated controller */
4084 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4085 	{ PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
4086 		.driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, },
4087 	{ PCI_DEVICE(0x126f, 0x1001),	/* Silicon Motion generic */
4088 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
4089 				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
4090 	{ PCI_DEVICE(0x126f, 0x2262),	/* Silicon Motion generic */
4091 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
4092 				NVME_QUIRK_BOGUS_NID, },
4093 	{ PCI_DEVICE(0x126f, 0x2263),	/* Silicon Motion unidentified */
4094 		.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
4095 				NVME_QUIRK_BOGUS_NID, },
4096 	{ PCI_DEVICE(0x1bb1, 0x0100),   /* Seagate Nytro Flash Storage */
4097 		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
4098 				NVME_QUIRK_NO_NS_DESC_LIST, },
4099 	{ PCI_DEVICE(0x1c58, 0x0003),	/* HGST adapter */
4100 		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
4101 	{ PCI_DEVICE(0x1c58, 0x0023),	/* WDC SN200 adapter */
4102 		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
4103 	{ PCI_DEVICE(0x1c5f, 0x0540),	/* Memblaze Pblaze4 adapter */
4104 		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
4105 	{ PCI_DEVICE(0x144d, 0xa821),   /* Samsung PM1725 */
4106 		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
4107 	{ PCI_DEVICE(0x144d, 0xa822),   /* Samsung PM1725a */
4108 		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
4109 				NVME_QUIRK_DISABLE_WRITE_ZEROES|
4110 				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
4111 	{ PCI_DEVICE(0x15b7, 0x5008),   /* Sandisk SN530 */
4112 		.driver_data = NVME_QUIRK_BROKEN_MSI },
4113 	{ PCI_DEVICE(0x15b7, 0x5009),   /* Sandisk SN550 */
4114 		.driver_data = NVME_QUIRK_BROKEN_MSI |
4115 				NVME_QUIRK_NO_DEEPEST_PS },
4116 	{ PCI_DEVICE(0x1987, 0x5012),	/* Phison E12 */
4117 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4118 	{ PCI_DEVICE(0x1987, 0x5016),	/* Phison E16 */
4119 		.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
4120 				NVME_QUIRK_BOGUS_NID, },
4121 	{ PCI_DEVICE(0x1987, 0x5019),  /* phison E19 */
4122 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4123 	{ PCI_DEVICE(0x1987, 0x5021),   /* Phison E21 */
4124 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4125 	{ PCI_DEVICE(0x1b4b, 0x1092),	/* Lexar 256 GB SSD */
4126 		.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
4127 				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
4128 	{ PCI_DEVICE(0x1cc1, 0x33f8),   /* ADATA IM2P33F8ABR1 1 TB */
4129 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4130 	{ PCI_DEVICE(0x10ec, 0x5762),   /* ADATA SX6000LNP */
4131 		.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
4132 				NVME_QUIRK_BOGUS_NID, },
4133 	{ PCI_DEVICE(0x10ec, 0x5763),  /* ADATA SX6000PNP */
4134 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4135 	{ PCI_DEVICE(0x1cc1, 0x8201),   /* ADATA SX8200PNP 512GB */
4136 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
4137 				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
4138 	 { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */
4139 		.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN },
4140 	 { PCI_DEVICE(0x1344, 0x6001),   /* Micron Nitro NVMe */
4141 		 .driver_data = NVME_QUIRK_BOGUS_NID, },
4142 	{ PCI_DEVICE(0x1c5c, 0x1504),   /* SK Hynix PC400 */
4143 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4144 	{ PCI_DEVICE(0x1c5c, 0x174a),   /* SK Hynix P31 SSD */
4145 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4146 	{ PCI_DEVICE(0x1c5c, 0x1D59),   /* SK Hynix BC901 */
4147 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4148 	{ PCI_DEVICE(0x15b7, 0x2001),   /*  Sandisk Skyhawk */
4149 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4150 	{ PCI_DEVICE(0x1d97, 0x2263),   /* SPCC */
4151 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4152 	{ PCI_DEVICE(0x144d, 0xa80b),   /* Samsung PM9B1 256G and 512G */
4153 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES |
4154 				NVME_QUIRK_BOGUS_NID, },
4155 	{ PCI_DEVICE(0x144d, 0xa809),   /* Samsung MZALQ256HBJD 256G */
4156 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4157 	{ PCI_DEVICE(0x144d, 0xa802),   /* Samsung SM953 */
4158 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4159 	{ PCI_DEVICE(0x1cc4, 0x6303),   /* UMIS RPJTJ512MGE1QDY 512G */
4160 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4161 	{ PCI_DEVICE(0x1cc4, 0x6302),   /* UMIS RPJTJ256MGE1QDY 256G */
4162 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4163 	{ PCI_DEVICE(0x2646, 0x2262),   /* KINGSTON SKC2000 NVMe SSD */
4164 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
4165 	{ PCI_DEVICE(0x2646, 0x2263),   /* KINGSTON A2000 NVMe SSD  */
4166 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
4167 	{ PCI_DEVICE(0x2646, 0x5013),   /* Kingston KC3000, Kingston FURY Renegade */
4168 		.driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, },
4169 	{ PCI_DEVICE(0x2646, 0x5018),   /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */
4170 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4171 	{ PCI_DEVICE(0x2646, 0x5016),   /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */
4172 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4173 	{ PCI_DEVICE(0x2646, 0x501A),   /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */
4174 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4175 	{ PCI_DEVICE(0x2646, 0x501B),   /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */
4176 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4177 	{ PCI_DEVICE(0x2646, 0x501E),   /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
4178 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
4179 	{ PCI_DEVICE(0x1f40, 0x1202),   /* Netac Technologies Co. NV3000 NVMe SSD */
4180 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4181 	{ PCI_DEVICE(0x1f40, 0x5236),   /* Netac Technologies Co. NV7000 NVMe SSD */
4182 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4183 	{ PCI_DEVICE(0x1e4B, 0x1001),   /* MAXIO MAP1001 */
4184 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4185 	{ PCI_DEVICE(0x1e4B, 0x1002),   /* MAXIO MAP1002 */
4186 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4187 	{ PCI_DEVICE(0x1e4B, 0x1202),   /* MAXIO MAP1202 */
4188 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4189 	{ PCI_DEVICE(0x1e4B, 0x1602),   /* MAXIO MAP1602 */
4190 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4191 	{ PCI_DEVICE(0x1cc1, 0x5350),   /* ADATA XPG GAMMIX S50 */
4192 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4193 	{ PCI_DEVICE(0x1dbe, 0x5216),   /* Acer/INNOGRIT FA100/5216 NVMe SSD */
4194 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4195 	{ PCI_DEVICE(0x1dbe, 0x5236),   /* ADATA XPG GAMMIX S70 */
4196 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4197 	{ PCI_DEVICE(0x1e49, 0x0021),   /* ZHITAI TiPro5000 NVMe SSD */
4198 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
4199 	{ PCI_DEVICE(0x1e49, 0x0041),   /* ZHITAI TiPro7000 NVMe SSD */
4200 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
4201 	{ PCI_DEVICE(0x1fa0, 0x2283),   /* Wodposit WPBSNM8-256GTP */
4202 		.driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, },
4203 	{ PCI_DEVICE(0x025e, 0xf1ac),   /* SOLIDIGM  P44 pro SSDPFKKW020X7  */
4204 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
4205 	{ PCI_DEVICE(0xc0a9, 0x540a),   /* Crucial P2 */
4206 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4207 	{ PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
4208 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4209 	{ PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
4210 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4211 	{ PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
4212 		.driver_data = NVME_QUIRK_BOGUS_NID |
4213 				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
4214 	{ PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
4215 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4216 	{ PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G  */
4217 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4218 	{ PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */
4219 		.driver_data = NVME_QUIRK_BOGUS_NID, },
4220 	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
4221 		.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
4222 	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
4223 		.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
4224 	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061),
4225 		.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
4226 	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00),
4227 		.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
4228 	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01),
4229 		.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
4230 	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02),
4231 		.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
4232 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
4233 		/*
4234 		 * Fix for the Apple controller found in the MacBook8,1 and
4235 		 * some MacBook7,1 to avoid controller resets and data loss.
4236 		 */
4237 		.driver_data = NVME_QUIRK_SINGLE_VECTOR |
4238 				NVME_QUIRK_QDEPTH_ONE },
4239 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
4240 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
4241 		.driver_data = NVME_QUIRK_SINGLE_VECTOR |
4242 				NVME_QUIRK_128_BYTES_SQES |
4243 				NVME_QUIRK_SHARED_TAGS |
4244 				NVME_QUIRK_SKIP_CID_GEN |
4245 				NVME_QUIRK_IDENTIFY_CNS },
4246 	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
4247 	{ 0, }
4248 };
4249 MODULE_DEVICE_TABLE(pci, nvme_id_table);
4250 
4251 static struct pci_driver nvme_driver = {
4252 	.name		= "nvme",
4253 	.id_table	= nvme_id_table,
4254 	.probe		= nvme_probe,
4255 	.remove		= nvme_remove,
4256 	.shutdown	= nvme_shutdown,
4257 	.driver		= {
4258 		.probe_type	= PROBE_PREFER_ASYNCHRONOUS,
4259 #ifdef CONFIG_PM_SLEEP
4260 		.pm		= &nvme_dev_pm_ops,
4261 #endif
4262 	},
4263 	.sriov_configure = pci_sriov_configure_simple,
4264 	.err_handler	= &nvme_err_handler,
4265 };
4266 
nvme_init(void)4267 static int __init nvme_init(void)
4268 {
4269 	BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
4270 	BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
4271 	BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
4272 	BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
4273 
4274 	return pci_register_driver(&nvme_driver);
4275 }
4276 
nvme_exit(void)4277 static void __exit nvme_exit(void)
4278 {
4279 	kfree(nvme_pci_quirk_list);
4280 	pci_unregister_driver(&nvme_driver);
4281 	flush_workqueue(nvme_wq);
4282 }
4283 
4284 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
4285 MODULE_LICENSE("GPL");
4286 MODULE_VERSION("1.0");
4287 MODULE_DESCRIPTION("NVMe host PCIe transport driver");
4288 module_init(nvme_init);
4289 module_exit(nvme_exit);
4290