xref: /linux/drivers/nvme/host/core.c (revision e72e8bf1c9847a12de74f2fd3ea1f5511866526b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVM Express device driver
4  * Copyright (c) 2011-2014, Intel Corporation.
5  */
6 
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/compat.h>
10 #include <linux/delay.h>
11 #include <linux/errno.h>
12 #include <linux/hdreg.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/backing-dev.h>
16 #include <linux/list_sort.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/pr.h>
20 #include <linux/ptrace.h>
21 #include <linux/nvme_ioctl.h>
22 #include <linux/t10-pi.h>
23 #include <linux/pm_qos.h>
24 #include <asm/unaligned.h>
25 
26 #include "nvme.h"
27 #include "fabrics.h"
28 
29 #define CREATE_TRACE_POINTS
30 #include "trace.h"
31 
32 #define NVME_MINORS		(1U << MINORBITS)
33 
34 unsigned int admin_timeout = 60;
35 module_param(admin_timeout, uint, 0644);
36 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
37 EXPORT_SYMBOL_GPL(admin_timeout);
38 
39 unsigned int nvme_io_timeout = 30;
40 module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
41 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
42 EXPORT_SYMBOL_GPL(nvme_io_timeout);
43 
44 static unsigned char shutdown_timeout = 5;
45 module_param(shutdown_timeout, byte, 0644);
46 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
47 
48 static u8 nvme_max_retries = 5;
49 module_param_named(max_retries, nvme_max_retries, byte, 0644);
50 MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
51 
52 static unsigned long default_ps_max_latency_us = 100000;
53 module_param(default_ps_max_latency_us, ulong, 0644);
54 MODULE_PARM_DESC(default_ps_max_latency_us,
55 		 "max power saving latency for new devices; use PM QOS to change per device");
56 
57 static bool force_apst;
58 module_param(force_apst, bool, 0644);
59 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
60 
61 static bool streams;
62 module_param(streams, bool, 0644);
63 MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
64 
65 /*
66  * nvme_wq - hosts nvme related works that are not reset or delete
67  * nvme_reset_wq - hosts nvme reset works
68  * nvme_delete_wq - hosts nvme delete works
69  *
70  * nvme_wq will host works such as scan, aen handling, fw activation,
71  * keep-alive, periodic reconnects etc. nvme_reset_wq
72  * runs reset works which also flush works hosted on nvme_wq for
73  * serialization purposes. nvme_delete_wq host controller deletion
74  * works which flush reset works for serialization.
75  */
76 struct workqueue_struct *nvme_wq;
77 EXPORT_SYMBOL_GPL(nvme_wq);
78 
79 struct workqueue_struct *nvme_reset_wq;
80 EXPORT_SYMBOL_GPL(nvme_reset_wq);
81 
82 struct workqueue_struct *nvme_delete_wq;
83 EXPORT_SYMBOL_GPL(nvme_delete_wq);
84 
85 static LIST_HEAD(nvme_subsystems);
86 static DEFINE_MUTEX(nvme_subsystems_lock);
87 
88 static DEFINE_IDA(nvme_instance_ida);
89 static dev_t nvme_chr_devt;
90 static struct class *nvme_class;
91 static struct class *nvme_subsys_class;
92 
93 static int nvme_revalidate_disk(struct gendisk *disk);
94 static void nvme_put_subsystem(struct nvme_subsystem *subsys);
95 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
96 					   unsigned nsid);
97 
98 static void nvme_set_queue_dying(struct nvme_ns *ns)
99 {
100 	/*
101 	 * Revalidating a dead namespace sets capacity to 0. This will end
102 	 * buffered writers dirtying pages that can't be synced.
103 	 */
104 	if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
105 		return;
106 	blk_set_queue_dying(ns->queue);
107 	/* Forcibly unquiesce queues to avoid blocking dispatch */
108 	blk_mq_unquiesce_queue(ns->queue);
109 	/*
110 	 * Revalidate after unblocking dispatchers that may be holding bd_butex
111 	 */
112 	revalidate_disk(ns->disk);
113 }
114 
115 static void nvme_queue_scan(struct nvme_ctrl *ctrl)
116 {
117 	/*
118 	 * Only new queue scan work when admin and IO queues are both alive
119 	 */
120 	if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
121 		queue_work(nvme_wq, &ctrl->scan_work);
122 }
123 
124 /*
125  * Use this function to proceed with scheduling reset_work for a controller
126  * that had previously been set to the resetting state. This is intended for
127  * code paths that can't be interrupted by other reset attempts. A hot removal
128  * may prevent this from succeeding.
129  */
130 int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
131 {
132 	if (ctrl->state != NVME_CTRL_RESETTING)
133 		return -EBUSY;
134 	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
135 		return -EBUSY;
136 	return 0;
137 }
138 EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
139 
140 int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
141 {
142 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
143 		return -EBUSY;
144 	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
145 		return -EBUSY;
146 	return 0;
147 }
148 EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
149 
150 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
151 {
152 	int ret;
153 
154 	ret = nvme_reset_ctrl(ctrl);
155 	if (!ret) {
156 		flush_work(&ctrl->reset_work);
157 		if (ctrl->state != NVME_CTRL_LIVE)
158 			ret = -ENETRESET;
159 	}
160 
161 	return ret;
162 }
163 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
164 
165 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
166 {
167 	dev_info(ctrl->device,
168 		 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);
169 
170 	flush_work(&ctrl->reset_work);
171 	nvme_stop_ctrl(ctrl);
172 	nvme_remove_namespaces(ctrl);
173 	ctrl->ops->delete_ctrl(ctrl);
174 	nvme_uninit_ctrl(ctrl);
175 }
176 
177 static void nvme_delete_ctrl_work(struct work_struct *work)
178 {
179 	struct nvme_ctrl *ctrl =
180 		container_of(work, struct nvme_ctrl, delete_work);
181 
182 	nvme_do_delete_ctrl(ctrl);
183 }
184 
185 int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
186 {
187 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
188 		return -EBUSY;
189 	if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
190 		return -EBUSY;
191 	return 0;
192 }
193 EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
194 
195 static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
196 {
197 	/*
198 	 * Keep a reference until nvme_do_delete_ctrl() complete,
199 	 * since ->delete_ctrl can free the controller.
200 	 */
201 	nvme_get_ctrl(ctrl);
202 	if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
203 		nvme_do_delete_ctrl(ctrl);
204 	nvme_put_ctrl(ctrl);
205 }
206 
207 static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
208 {
209 	return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
210 }
211 
212 static blk_status_t nvme_error_status(u16 status)
213 {
214 	switch (status & 0x7ff) {
215 	case NVME_SC_SUCCESS:
216 		return BLK_STS_OK;
217 	case NVME_SC_CAP_EXCEEDED:
218 		return BLK_STS_NOSPC;
219 	case NVME_SC_LBA_RANGE:
220 	case NVME_SC_CMD_INTERRUPTED:
221 	case NVME_SC_NS_NOT_READY:
222 		return BLK_STS_TARGET;
223 	case NVME_SC_BAD_ATTRIBUTES:
224 	case NVME_SC_ONCS_NOT_SUPPORTED:
225 	case NVME_SC_INVALID_OPCODE:
226 	case NVME_SC_INVALID_FIELD:
227 	case NVME_SC_INVALID_NS:
228 		return BLK_STS_NOTSUPP;
229 	case NVME_SC_WRITE_FAULT:
230 	case NVME_SC_READ_ERROR:
231 	case NVME_SC_UNWRITTEN_BLOCK:
232 	case NVME_SC_ACCESS_DENIED:
233 	case NVME_SC_READ_ONLY:
234 	case NVME_SC_COMPARE_FAILED:
235 		return BLK_STS_MEDIUM;
236 	case NVME_SC_GUARD_CHECK:
237 	case NVME_SC_APPTAG_CHECK:
238 	case NVME_SC_REFTAG_CHECK:
239 	case NVME_SC_INVALID_PI:
240 		return BLK_STS_PROTECTION;
241 	case NVME_SC_RESERVATION_CONFLICT:
242 		return BLK_STS_NEXUS;
243 	case NVME_SC_HOST_PATH_ERROR:
244 		return BLK_STS_TRANSPORT;
245 	default:
246 		return BLK_STS_IOERR;
247 	}
248 }
249 
250 static inline bool nvme_req_needs_retry(struct request *req)
251 {
252 	if (blk_noretry_request(req))
253 		return false;
254 	if (nvme_req(req)->status & NVME_SC_DNR)
255 		return false;
256 	if (nvme_req(req)->retries >= nvme_max_retries)
257 		return false;
258 	return true;
259 }
260 
261 static void nvme_retry_req(struct request *req)
262 {
263 	struct nvme_ns *ns = req->q->queuedata;
264 	unsigned long delay = 0;
265 	u16 crd;
266 
267 	/* The mask and shift result must be <= 3 */
268 	crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
269 	if (ns && crd)
270 		delay = ns->ctrl->crdt[crd - 1] * 100;
271 
272 	nvme_req(req)->retries++;
273 	blk_mq_requeue_request(req, false);
274 	blk_mq_delay_kick_requeue_list(req->q, delay);
275 }
276 
277 void nvme_complete_rq(struct request *req)
278 {
279 	blk_status_t status = nvme_error_status(nvme_req(req)->status);
280 
281 	trace_nvme_complete_rq(req);
282 
283 	nvme_cleanup_cmd(req);
284 
285 	if (nvme_req(req)->ctrl->kas)
286 		nvme_req(req)->ctrl->comp_seen = true;
287 
288 	if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
289 		if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req))
290 			return;
291 
292 		if (!blk_queue_dying(req->q)) {
293 			nvme_retry_req(req);
294 			return;
295 		}
296 	}
297 
298 	nvme_trace_bio_complete(req, status);
299 	blk_mq_end_request(req, status);
300 }
301 EXPORT_SYMBOL_GPL(nvme_complete_rq);
302 
303 bool nvme_cancel_request(struct request *req, void *data, bool reserved)
304 {
305 	dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
306 				"Cancelling I/O %d", req->tag);
307 
308 	/* don't abort one completed request */
309 	if (blk_mq_request_completed(req))
310 		return true;
311 
312 	nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
313 	blk_mq_complete_request(req);
314 	return true;
315 }
316 EXPORT_SYMBOL_GPL(nvme_cancel_request);
317 
318 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
319 		enum nvme_ctrl_state new_state)
320 {
321 	enum nvme_ctrl_state old_state;
322 	unsigned long flags;
323 	bool changed = false;
324 
325 	spin_lock_irqsave(&ctrl->lock, flags);
326 
327 	old_state = ctrl->state;
328 	switch (new_state) {
329 	case NVME_CTRL_LIVE:
330 		switch (old_state) {
331 		case NVME_CTRL_NEW:
332 		case NVME_CTRL_RESETTING:
333 		case NVME_CTRL_CONNECTING:
334 			changed = true;
335 			/* FALLTHRU */
336 		default:
337 			break;
338 		}
339 		break;
340 	case NVME_CTRL_RESETTING:
341 		switch (old_state) {
342 		case NVME_CTRL_NEW:
343 		case NVME_CTRL_LIVE:
344 			changed = true;
345 			/* FALLTHRU */
346 		default:
347 			break;
348 		}
349 		break;
350 	case NVME_CTRL_CONNECTING:
351 		switch (old_state) {
352 		case NVME_CTRL_NEW:
353 		case NVME_CTRL_RESETTING:
354 			changed = true;
355 			/* FALLTHRU */
356 		default:
357 			break;
358 		}
359 		break;
360 	case NVME_CTRL_DELETING:
361 		switch (old_state) {
362 		case NVME_CTRL_LIVE:
363 		case NVME_CTRL_RESETTING:
364 		case NVME_CTRL_CONNECTING:
365 			changed = true;
366 			/* FALLTHRU */
367 		default:
368 			break;
369 		}
370 		break;
371 	case NVME_CTRL_DEAD:
372 		switch (old_state) {
373 		case NVME_CTRL_DELETING:
374 			changed = true;
375 			/* FALLTHRU */
376 		default:
377 			break;
378 		}
379 		break;
380 	default:
381 		break;
382 	}
383 
384 	if (changed) {
385 		ctrl->state = new_state;
386 		wake_up_all(&ctrl->state_wq);
387 	}
388 
389 	spin_unlock_irqrestore(&ctrl->lock, flags);
390 	if (changed && ctrl->state == NVME_CTRL_LIVE)
391 		nvme_kick_requeue_lists(ctrl);
392 	return changed;
393 }
394 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
395 
396 /*
397  * Returns true for sink states that can't ever transition back to live.
398  */
399 static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
400 {
401 	switch (ctrl->state) {
402 	case NVME_CTRL_NEW:
403 	case NVME_CTRL_LIVE:
404 	case NVME_CTRL_RESETTING:
405 	case NVME_CTRL_CONNECTING:
406 		return false;
407 	case NVME_CTRL_DELETING:
408 	case NVME_CTRL_DEAD:
409 		return true;
410 	default:
411 		WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
412 		return true;
413 	}
414 }
415 
416 /*
417  * Waits for the controller state to be resetting, or returns false if it is
418  * not possible to ever transition to that state.
419  */
420 bool nvme_wait_reset(struct nvme_ctrl *ctrl)
421 {
422 	wait_event(ctrl->state_wq,
423 		   nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
424 		   nvme_state_terminal(ctrl));
425 	return ctrl->state == NVME_CTRL_RESETTING;
426 }
427 EXPORT_SYMBOL_GPL(nvme_wait_reset);
428 
429 static void nvme_free_ns_head(struct kref *ref)
430 {
431 	struct nvme_ns_head *head =
432 		container_of(ref, struct nvme_ns_head, ref);
433 
434 	nvme_mpath_remove_disk(head);
435 	ida_simple_remove(&head->subsys->ns_ida, head->instance);
436 	cleanup_srcu_struct(&head->srcu);
437 	nvme_put_subsystem(head->subsys);
438 	kfree(head);
439 }
440 
441 static void nvme_put_ns_head(struct nvme_ns_head *head)
442 {
443 	kref_put(&head->ref, nvme_free_ns_head);
444 }
445 
446 static void nvme_free_ns(struct kref *kref)
447 {
448 	struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
449 
450 	if (ns->ndev)
451 		nvme_nvm_unregister(ns);
452 
453 	put_disk(ns->disk);
454 	nvme_put_ns_head(ns->head);
455 	nvme_put_ctrl(ns->ctrl);
456 	kfree(ns);
457 }
458 
459 static void nvme_put_ns(struct nvme_ns *ns)
460 {
461 	kref_put(&ns->kref, nvme_free_ns);
462 }
463 
464 static inline void nvme_clear_nvme_request(struct request *req)
465 {
466 	if (!(req->rq_flags & RQF_DONTPREP)) {
467 		nvme_req(req)->retries = 0;
468 		nvme_req(req)->flags = 0;
469 		req->rq_flags |= RQF_DONTPREP;
470 	}
471 }
472 
473 struct request *nvme_alloc_request(struct request_queue *q,
474 		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
475 {
476 	unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
477 	struct request *req;
478 
479 	if (qid == NVME_QID_ANY) {
480 		req = blk_mq_alloc_request(q, op, flags);
481 	} else {
482 		req = blk_mq_alloc_request_hctx(q, op, flags,
483 				qid ? qid - 1 : 0);
484 	}
485 	if (IS_ERR(req))
486 		return req;
487 
488 	req->cmd_flags |= REQ_FAILFAST_DRIVER;
489 	nvme_clear_nvme_request(req);
490 	nvme_req(req)->cmd = cmd;
491 
492 	return req;
493 }
494 EXPORT_SYMBOL_GPL(nvme_alloc_request);
495 
496 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
497 {
498 	struct nvme_command c;
499 
500 	memset(&c, 0, sizeof(c));
501 
502 	c.directive.opcode = nvme_admin_directive_send;
503 	c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
504 	c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
505 	c.directive.dtype = NVME_DIR_IDENTIFY;
506 	c.directive.tdtype = NVME_DIR_STREAMS;
507 	c.directive.endir = enable ? NVME_DIR_ENDIR : 0;
508 
509 	return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
510 }
511 
512 static int nvme_disable_streams(struct nvme_ctrl *ctrl)
513 {
514 	return nvme_toggle_streams(ctrl, false);
515 }
516 
517 static int nvme_enable_streams(struct nvme_ctrl *ctrl)
518 {
519 	return nvme_toggle_streams(ctrl, true);
520 }
521 
522 static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
523 				  struct streams_directive_params *s, u32 nsid)
524 {
525 	struct nvme_command c;
526 
527 	memset(&c, 0, sizeof(c));
528 	memset(s, 0, sizeof(*s));
529 
530 	c.directive.opcode = nvme_admin_directive_recv;
531 	c.directive.nsid = cpu_to_le32(nsid);
532 	c.directive.numd = cpu_to_le32(nvme_bytes_to_numd(sizeof(*s)));
533 	c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
534 	c.directive.dtype = NVME_DIR_STREAMS;
535 
536 	return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
537 }
538 
539 static int nvme_configure_directives(struct nvme_ctrl *ctrl)
540 {
541 	struct streams_directive_params s;
542 	int ret;
543 
544 	if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
545 		return 0;
546 	if (!streams)
547 		return 0;
548 
549 	ret = nvme_enable_streams(ctrl);
550 	if (ret)
551 		return ret;
552 
553 	ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
554 	if (ret)
555 		return ret;
556 
557 	ctrl->nssa = le16_to_cpu(s.nssa);
558 	if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
559 		dev_info(ctrl->device, "too few streams (%u) available\n",
560 					ctrl->nssa);
561 		nvme_disable_streams(ctrl);
562 		return 0;
563 	}
564 
565 	ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
566 	dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
567 	return 0;
568 }
569 
570 /*
571  * Check if 'req' has a write hint associated with it. If it does, assign
572  * a valid namespace stream to the write.
573  */
574 static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
575 				     struct request *req, u16 *control,
576 				     u32 *dsmgmt)
577 {
578 	enum rw_hint streamid = req->write_hint;
579 
580 	if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
581 		streamid = 0;
582 	else {
583 		streamid--;
584 		if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
585 			return;
586 
587 		*control |= NVME_RW_DTYPE_STREAMS;
588 		*dsmgmt |= streamid << 16;
589 	}
590 
591 	if (streamid < ARRAY_SIZE(req->q->write_hints))
592 		req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
593 }
594 
595 static inline void nvme_setup_flush(struct nvme_ns *ns,
596 		struct nvme_command *cmnd)
597 {
598 	cmnd->common.opcode = nvme_cmd_flush;
599 	cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
600 }
601 
602 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
603 		struct nvme_command *cmnd)
604 {
605 	unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
606 	struct nvme_dsm_range *range;
607 	struct bio *bio;
608 
609 	/*
610 	 * Some devices do not consider the DSM 'Number of Ranges' field when
611 	 * determining how much data to DMA. Always allocate memory for maximum
612 	 * number of segments to prevent device reading beyond end of buffer.
613 	 */
614 	static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
615 
616 	range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
617 	if (!range) {
618 		/*
619 		 * If we fail allocation our range, fallback to the controller
620 		 * discard page. If that's also busy, it's safe to return
621 		 * busy, as we know we can make progress once that's freed.
622 		 */
623 		if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
624 			return BLK_STS_RESOURCE;
625 
626 		range = page_address(ns->ctrl->discard_page);
627 	}
628 
629 	__rq_for_each_bio(bio, req) {
630 		u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
631 		u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
632 
633 		if (n < segments) {
634 			range[n].cattr = cpu_to_le32(0);
635 			range[n].nlb = cpu_to_le32(nlb);
636 			range[n].slba = cpu_to_le64(slba);
637 		}
638 		n++;
639 	}
640 
641 	if (WARN_ON_ONCE(n != segments)) {
642 		if (virt_to_page(range) == ns->ctrl->discard_page)
643 			clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
644 		else
645 			kfree(range);
646 		return BLK_STS_IOERR;
647 	}
648 
649 	cmnd->dsm.opcode = nvme_cmd_dsm;
650 	cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
651 	cmnd->dsm.nr = cpu_to_le32(segments - 1);
652 	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
653 
654 	req->special_vec.bv_page = virt_to_page(range);
655 	req->special_vec.bv_offset = offset_in_page(range);
656 	req->special_vec.bv_len = alloc_size;
657 	req->rq_flags |= RQF_SPECIAL_PAYLOAD;
658 
659 	return BLK_STS_OK;
660 }
661 
662 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
663 		struct request *req, struct nvme_command *cmnd)
664 {
665 	if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
666 		return nvme_setup_discard(ns, req, cmnd);
667 
668 	cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
669 	cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
670 	cmnd->write_zeroes.slba =
671 		cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
672 	cmnd->write_zeroes.length =
673 		cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
674 	cmnd->write_zeroes.control = 0;
675 	return BLK_STS_OK;
676 }
677 
678 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
679 		struct request *req, struct nvme_command *cmnd)
680 {
681 	struct nvme_ctrl *ctrl = ns->ctrl;
682 	u16 control = 0;
683 	u32 dsmgmt = 0;
684 
685 	if (req->cmd_flags & REQ_FUA)
686 		control |= NVME_RW_FUA;
687 	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
688 		control |= NVME_RW_LR;
689 
690 	if (req->cmd_flags & REQ_RAHEAD)
691 		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
692 
693 	cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
694 	cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
695 	cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
696 	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
697 
698 	if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
699 		nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
700 
701 	if (ns->ms) {
702 		/*
703 		 * If formated with metadata, the block layer always provides a
704 		 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
705 		 * we enable the PRACT bit for protection information or set the
706 		 * namespace capacity to zero to prevent any I/O.
707 		 */
708 		if (!blk_integrity_rq(req)) {
709 			if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
710 				return BLK_STS_NOTSUPP;
711 			control |= NVME_RW_PRINFO_PRACT;
712 		}
713 
714 		switch (ns->pi_type) {
715 		case NVME_NS_DPS_PI_TYPE3:
716 			control |= NVME_RW_PRINFO_PRCHK_GUARD;
717 			break;
718 		case NVME_NS_DPS_PI_TYPE1:
719 		case NVME_NS_DPS_PI_TYPE2:
720 			control |= NVME_RW_PRINFO_PRCHK_GUARD |
721 					NVME_RW_PRINFO_PRCHK_REF;
722 			cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
723 			break;
724 		}
725 	}
726 
727 	cmnd->rw.control = cpu_to_le16(control);
728 	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
729 	return 0;
730 }
731 
732 void nvme_cleanup_cmd(struct request *req)
733 {
734 	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
735 		struct nvme_ns *ns = req->rq_disk->private_data;
736 		struct page *page = req->special_vec.bv_page;
737 
738 		if (page == ns->ctrl->discard_page)
739 			clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
740 		else
741 			kfree(page_address(page) + req->special_vec.bv_offset);
742 	}
743 }
744 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
745 
746 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
747 		struct nvme_command *cmd)
748 {
749 	blk_status_t ret = BLK_STS_OK;
750 
751 	nvme_clear_nvme_request(req);
752 
753 	memset(cmd, 0, sizeof(*cmd));
754 	switch (req_op(req)) {
755 	case REQ_OP_DRV_IN:
756 	case REQ_OP_DRV_OUT:
757 		memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
758 		break;
759 	case REQ_OP_FLUSH:
760 		nvme_setup_flush(ns, cmd);
761 		break;
762 	case REQ_OP_WRITE_ZEROES:
763 		ret = nvme_setup_write_zeroes(ns, req, cmd);
764 		break;
765 	case REQ_OP_DISCARD:
766 		ret = nvme_setup_discard(ns, req, cmd);
767 		break;
768 	case REQ_OP_READ:
769 	case REQ_OP_WRITE:
770 		ret = nvme_setup_rw(ns, req, cmd);
771 		break;
772 	default:
773 		WARN_ON_ONCE(1);
774 		return BLK_STS_IOERR;
775 	}
776 
777 	cmd->common.command_id = req->tag;
778 	trace_nvme_setup_cmd(req, cmd);
779 	return ret;
780 }
781 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
782 
783 static void nvme_end_sync_rq(struct request *rq, blk_status_t error)
784 {
785 	struct completion *waiting = rq->end_io_data;
786 
787 	rq->end_io_data = NULL;
788 	complete(waiting);
789 }
790 
791 static void nvme_execute_rq_polled(struct request_queue *q,
792 		struct gendisk *bd_disk, struct request *rq, int at_head)
793 {
794 	DECLARE_COMPLETION_ONSTACK(wait);
795 
796 	WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags));
797 
798 	rq->cmd_flags |= REQ_HIPRI;
799 	rq->end_io_data = &wait;
800 	blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq);
801 
802 	while (!completion_done(&wait)) {
803 		blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
804 		cond_resched();
805 	}
806 }
807 
808 /*
809  * Returns 0 on success.  If the result is negative, it's a Linux error code;
810  * if the result is positive, it's an NVM Express status code
811  */
812 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
813 		union nvme_result *result, void *buffer, unsigned bufflen,
814 		unsigned timeout, int qid, int at_head,
815 		blk_mq_req_flags_t flags, bool poll)
816 {
817 	struct request *req;
818 	int ret;
819 
820 	req = nvme_alloc_request(q, cmd, flags, qid);
821 	if (IS_ERR(req))
822 		return PTR_ERR(req);
823 
824 	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
825 
826 	if (buffer && bufflen) {
827 		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
828 		if (ret)
829 			goto out;
830 	}
831 
832 	if (poll)
833 		nvme_execute_rq_polled(req->q, NULL, req, at_head);
834 	else
835 		blk_execute_rq(req->q, NULL, req, at_head);
836 	if (result)
837 		*result = nvme_req(req)->result;
838 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
839 		ret = -EINTR;
840 	else
841 		ret = nvme_req(req)->status;
842  out:
843 	blk_mq_free_request(req);
844 	return ret;
845 }
846 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
847 
848 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
849 		void *buffer, unsigned bufflen)
850 {
851 	return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
852 			NVME_QID_ANY, 0, 0, false);
853 }
854 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
855 
856 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
857 		unsigned len, u32 seed, bool write)
858 {
859 	struct bio_integrity_payload *bip;
860 	int ret = -ENOMEM;
861 	void *buf;
862 
863 	buf = kmalloc(len, GFP_KERNEL);
864 	if (!buf)
865 		goto out;
866 
867 	ret = -EFAULT;
868 	if (write && copy_from_user(buf, ubuf, len))
869 		goto out_free_meta;
870 
871 	bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
872 	if (IS_ERR(bip)) {
873 		ret = PTR_ERR(bip);
874 		goto out_free_meta;
875 	}
876 
877 	bip->bip_iter.bi_size = len;
878 	bip->bip_iter.bi_sector = seed;
879 	ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
880 			offset_in_page(buf));
881 	if (ret == len)
882 		return buf;
883 	ret = -ENOMEM;
884 out_free_meta:
885 	kfree(buf);
886 out:
887 	return ERR_PTR(ret);
888 }
889 
890 static int nvme_submit_user_cmd(struct request_queue *q,
891 		struct nvme_command *cmd, void __user *ubuffer,
892 		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
893 		u32 meta_seed, u64 *result, unsigned timeout)
894 {
895 	bool write = nvme_is_write(cmd);
896 	struct nvme_ns *ns = q->queuedata;
897 	struct gendisk *disk = ns ? ns->disk : NULL;
898 	struct request *req;
899 	struct bio *bio = NULL;
900 	void *meta = NULL;
901 	int ret;
902 
903 	req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
904 	if (IS_ERR(req))
905 		return PTR_ERR(req);
906 
907 	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
908 	nvme_req(req)->flags |= NVME_REQ_USERCMD;
909 
910 	if (ubuffer && bufflen) {
911 		ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
912 				GFP_KERNEL);
913 		if (ret)
914 			goto out;
915 		bio = req->bio;
916 		bio->bi_disk = disk;
917 		if (disk && meta_buffer && meta_len) {
918 			meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
919 					meta_seed, write);
920 			if (IS_ERR(meta)) {
921 				ret = PTR_ERR(meta);
922 				goto out_unmap;
923 			}
924 			req->cmd_flags |= REQ_INTEGRITY;
925 		}
926 	}
927 
928 	blk_execute_rq(req->q, disk, req, 0);
929 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
930 		ret = -EINTR;
931 	else
932 		ret = nvme_req(req)->status;
933 	if (result)
934 		*result = le64_to_cpu(nvme_req(req)->result.u64);
935 	if (meta && !ret && !write) {
936 		if (copy_to_user(meta_buffer, meta, meta_len))
937 			ret = -EFAULT;
938 	}
939 	kfree(meta);
940  out_unmap:
941 	if (bio)
942 		blk_rq_unmap_user(bio);
943  out:
944 	blk_mq_free_request(req);
945 	return ret;
946 }
947 
948 static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
949 {
950 	struct nvme_ctrl *ctrl = rq->end_io_data;
951 	unsigned long flags;
952 	bool startka = false;
953 
954 	blk_mq_free_request(rq);
955 
956 	if (status) {
957 		dev_err(ctrl->device,
958 			"failed nvme_keep_alive_end_io error=%d\n",
959 				status);
960 		return;
961 	}
962 
963 	ctrl->comp_seen = false;
964 	spin_lock_irqsave(&ctrl->lock, flags);
965 	if (ctrl->state == NVME_CTRL_LIVE ||
966 	    ctrl->state == NVME_CTRL_CONNECTING)
967 		startka = true;
968 	spin_unlock_irqrestore(&ctrl->lock, flags);
969 	if (startka)
970 		queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
971 }
972 
973 static int nvme_keep_alive(struct nvme_ctrl *ctrl)
974 {
975 	struct request *rq;
976 
977 	rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
978 			NVME_QID_ANY);
979 	if (IS_ERR(rq))
980 		return PTR_ERR(rq);
981 
982 	rq->timeout = ctrl->kato * HZ;
983 	rq->end_io_data = ctrl;
984 
985 	blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
986 
987 	return 0;
988 }
989 
990 static void nvme_keep_alive_work(struct work_struct *work)
991 {
992 	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
993 			struct nvme_ctrl, ka_work);
994 	bool comp_seen = ctrl->comp_seen;
995 
996 	if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
997 		dev_dbg(ctrl->device,
998 			"reschedule traffic based keep-alive timer\n");
999 		ctrl->comp_seen = false;
1000 		queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
1001 		return;
1002 	}
1003 
1004 	if (nvme_keep_alive(ctrl)) {
1005 		/* allocation failure, reset the controller */
1006 		dev_err(ctrl->device, "keep-alive failed\n");
1007 		nvme_reset_ctrl(ctrl);
1008 		return;
1009 	}
1010 }
1011 
1012 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
1013 {
1014 	if (unlikely(ctrl->kato == 0))
1015 		return;
1016 
1017 	queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
1018 }
1019 
1020 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
1021 {
1022 	if (unlikely(ctrl->kato == 0))
1023 		return;
1024 
1025 	cancel_delayed_work_sync(&ctrl->ka_work);
1026 }
1027 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
1028 
1029 /*
1030  * In NVMe 1.0 the CNS field was just a binary controller or namespace
1031  * flag, thus sending any new CNS opcodes has a big chance of not working.
1032  * Qemu unfortunately had that bug after reporting a 1.1 version compliance
1033  * (but not for any later version).
1034  */
1035 static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
1036 {
1037 	if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
1038 		return ctrl->vs < NVME_VS(1, 2, 0);
1039 	return ctrl->vs < NVME_VS(1, 1, 0);
1040 }
1041 
1042 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
1043 {
1044 	struct nvme_command c = { };
1045 	int error;
1046 
1047 	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1048 	c.identify.opcode = nvme_admin_identify;
1049 	c.identify.cns = NVME_ID_CNS_CTRL;
1050 
1051 	*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
1052 	if (!*id)
1053 		return -ENOMEM;
1054 
1055 	error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
1056 			sizeof(struct nvme_id_ctrl));
1057 	if (error)
1058 		kfree(*id);
1059 	return error;
1060 }
1061 
1062 static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1063 		struct nvme_ns_id_desc *cur)
1064 {
1065 	const char *warn_str = "ctrl returned bogus length:";
1066 	void *data = cur;
1067 
1068 	switch (cur->nidt) {
1069 	case NVME_NIDT_EUI64:
1070 		if (cur->nidl != NVME_NIDT_EUI64_LEN) {
1071 			dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
1072 				 warn_str, cur->nidl);
1073 			return -1;
1074 		}
1075 		memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
1076 		return NVME_NIDT_EUI64_LEN;
1077 	case NVME_NIDT_NGUID:
1078 		if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1079 			dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
1080 				 warn_str, cur->nidl);
1081 			return -1;
1082 		}
1083 		memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
1084 		return NVME_NIDT_NGUID_LEN;
1085 	case NVME_NIDT_UUID:
1086 		if (cur->nidl != NVME_NIDT_UUID_LEN) {
1087 			dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
1088 				 warn_str, cur->nidl);
1089 			return -1;
1090 		}
1091 		uuid_copy(&ids->uuid, data + sizeof(*cur));
1092 		return NVME_NIDT_UUID_LEN;
1093 	default:
1094 		/* Skip unknown types */
1095 		return cur->nidl;
1096 	}
1097 }
1098 
1099 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
1100 		struct nvme_ns_ids *ids)
1101 {
1102 	struct nvme_command c = { };
1103 	int status;
1104 	void *data;
1105 	int pos;
1106 	int len;
1107 
1108 	c.identify.opcode = nvme_admin_identify;
1109 	c.identify.nsid = cpu_to_le32(nsid);
1110 	c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
1111 
1112 	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
1113 	if (!data)
1114 		return -ENOMEM;
1115 
1116 	status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1117 				      NVME_IDENTIFY_DATA_SIZE);
1118 	if (status) {
1119 		dev_warn(ctrl->device,
1120 			"Identify Descriptors failed (%d)\n", status);
1121 		 /*
1122 		  * Don't treat an error as fatal, as we potentially already
1123 		  * have a NGUID or EUI-64.
1124 		  */
1125 		if (status > 0 && !(status & NVME_SC_DNR))
1126 			status = 0;
1127 		goto free_data;
1128 	}
1129 
1130 	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
1131 		struct nvme_ns_id_desc *cur = data + pos;
1132 
1133 		if (cur->nidl == 0)
1134 			break;
1135 
1136 		len = nvme_process_ns_desc(ctrl, ids, cur);
1137 		if (len < 0)
1138 			goto free_data;
1139 
1140 		len += sizeof(*cur);
1141 	}
1142 free_data:
1143 	kfree(data);
1144 	return status;
1145 }
1146 
1147 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
1148 {
1149 	struct nvme_command c = { };
1150 
1151 	c.identify.opcode = nvme_admin_identify;
1152 	c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
1153 	c.identify.nsid = cpu_to_le32(nsid);
1154 	return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list,
1155 				    NVME_IDENTIFY_DATA_SIZE);
1156 }
1157 
1158 static int nvme_identify_ns(struct nvme_ctrl *ctrl,
1159 		unsigned nsid, struct nvme_id_ns **id)
1160 {
1161 	struct nvme_command c = { };
1162 	int error;
1163 
1164 	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1165 	c.identify.opcode = nvme_admin_identify;
1166 	c.identify.nsid = cpu_to_le32(nsid);
1167 	c.identify.cns = NVME_ID_CNS_NS;
1168 
1169 	*id = kmalloc(sizeof(**id), GFP_KERNEL);
1170 	if (!*id)
1171 		return -ENOMEM;
1172 
1173 	error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1174 	if (error) {
1175 		dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1176 		kfree(*id);
1177 	}
1178 
1179 	return error;
1180 }
1181 
1182 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
1183 		unsigned int dword11, void *buffer, size_t buflen, u32 *result)
1184 {
1185 	union nvme_result res = { 0 };
1186 	struct nvme_command c;
1187 	int ret;
1188 
1189 	memset(&c, 0, sizeof(c));
1190 	c.features.opcode = op;
1191 	c.features.fid = cpu_to_le32(fid);
1192 	c.features.dword11 = cpu_to_le32(dword11);
1193 
1194 	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1195 			buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
1196 	if (ret >= 0 && result)
1197 		*result = le32_to_cpu(res.u32);
1198 	return ret;
1199 }
1200 
1201 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
1202 		      unsigned int dword11, void *buffer, size_t buflen,
1203 		      u32 *result)
1204 {
1205 	return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
1206 			     buflen, result);
1207 }
1208 EXPORT_SYMBOL_GPL(nvme_set_features);
1209 
1210 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
1211 		      unsigned int dword11, void *buffer, size_t buflen,
1212 		      u32 *result)
1213 {
1214 	return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
1215 			     buflen, result);
1216 }
1217 EXPORT_SYMBOL_GPL(nvme_get_features);
1218 
1219 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
1220 {
1221 	u32 q_count = (*count - 1) | ((*count - 1) << 16);
1222 	u32 result;
1223 	int status, nr_io_queues;
1224 
1225 	status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
1226 			&result);
1227 	if (status < 0)
1228 		return status;
1229 
1230 	/*
1231 	 * Degraded controllers might return an error when setting the queue
1232 	 * count.  We still want to be able to bring them online and offer
1233 	 * access to the admin queue, as that might be only way to fix them up.
1234 	 */
1235 	if (status > 0) {
1236 		dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1237 		*count = 0;
1238 	} else {
1239 		nr_io_queues = min(result & 0xffff, result >> 16) + 1;
1240 		*count = min(*count, nr_io_queues);
1241 	}
1242 
1243 	return 0;
1244 }
1245 EXPORT_SYMBOL_GPL(nvme_set_queue_count);
1246 
1247 #define NVME_AEN_SUPPORTED \
1248 	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1249 	 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1250 
1251 static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1252 {
1253 	u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1254 	int status;
1255 
1256 	if (!supported_aens)
1257 		return;
1258 
1259 	status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
1260 			NULL, 0, &result);
1261 	if (status)
1262 		dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1263 			 supported_aens);
1264 
1265 	queue_work(nvme_wq, &ctrl->async_event_work);
1266 }
1267 
1268 /*
1269  * Convert integer values from ioctl structures to user pointers, silently
1270  * ignoring the upper bits in the compat case to match behaviour of 32-bit
1271  * kernels.
1272  */
1273 static void __user *nvme_to_user_ptr(uintptr_t ptrval)
1274 {
1275 	if (in_compat_syscall())
1276 		ptrval = (compat_uptr_t)ptrval;
1277 	return (void __user *)ptrval;
1278 }
1279 
1280 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1281 {
1282 	struct nvme_user_io io;
1283 	struct nvme_command c;
1284 	unsigned length, meta_len;
1285 	void __user *metadata;
1286 
1287 	if (copy_from_user(&io, uio, sizeof(io)))
1288 		return -EFAULT;
1289 	if (io.flags)
1290 		return -EINVAL;
1291 
1292 	switch (io.opcode) {
1293 	case nvme_cmd_write:
1294 	case nvme_cmd_read:
1295 	case nvme_cmd_compare:
1296 		break;
1297 	default:
1298 		return -EINVAL;
1299 	}
1300 
1301 	length = (io.nblocks + 1) << ns->lba_shift;
1302 	meta_len = (io.nblocks + 1) * ns->ms;
1303 	metadata = nvme_to_user_ptr(io.metadata);
1304 
1305 	if (ns->ext) {
1306 		length += meta_len;
1307 		meta_len = 0;
1308 	} else if (meta_len) {
1309 		if ((io.metadata & 3) || !io.metadata)
1310 			return -EINVAL;
1311 	}
1312 
1313 	memset(&c, 0, sizeof(c));
1314 	c.rw.opcode = io.opcode;
1315 	c.rw.flags = io.flags;
1316 	c.rw.nsid = cpu_to_le32(ns->head->ns_id);
1317 	c.rw.slba = cpu_to_le64(io.slba);
1318 	c.rw.length = cpu_to_le16(io.nblocks);
1319 	c.rw.control = cpu_to_le16(io.control);
1320 	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
1321 	c.rw.reftag = cpu_to_le32(io.reftag);
1322 	c.rw.apptag = cpu_to_le16(io.apptag);
1323 	c.rw.appmask = cpu_to_le16(io.appmask);
1324 
1325 	return nvme_submit_user_cmd(ns->queue, &c,
1326 			nvme_to_user_ptr(io.addr), length,
1327 			metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
1328 }
1329 
1330 static u32 nvme_known_admin_effects(u8 opcode)
1331 {
1332 	switch (opcode) {
1333 	case nvme_admin_format_nvm:
1334 		return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
1335 					NVME_CMD_EFFECTS_CSE_MASK;
1336 	case nvme_admin_sanitize_nvm:
1337 		return NVME_CMD_EFFECTS_CSE_MASK;
1338 	default:
1339 		break;
1340 	}
1341 	return 0;
1342 }
1343 
1344 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1345 								u8 opcode)
1346 {
1347 	u32 effects = 0;
1348 
1349 	if (ns) {
1350 		if (ctrl->effects)
1351 			effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
1352 		if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
1353 			dev_warn(ctrl->device,
1354 				 "IO command:%02x has unhandled effects:%08x\n",
1355 				 opcode, effects);
1356 		return 0;
1357 	}
1358 
1359 	if (ctrl->effects)
1360 		effects = le32_to_cpu(ctrl->effects->acs[opcode]);
1361 	effects |= nvme_known_admin_effects(opcode);
1362 
1363 	/*
1364 	 * For simplicity, IO to all namespaces is quiesced even if the command
1365 	 * effects say only one namespace is affected.
1366 	 */
1367 	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1368 		mutex_lock(&ctrl->scan_lock);
1369 		mutex_lock(&ctrl->subsys->lock);
1370 		nvme_mpath_start_freeze(ctrl->subsys);
1371 		nvme_mpath_wait_freeze(ctrl->subsys);
1372 		nvme_start_freeze(ctrl);
1373 		nvme_wait_freeze(ctrl);
1374 	}
1375 	return effects;
1376 }
1377 
1378 static void nvme_update_formats(struct nvme_ctrl *ctrl)
1379 {
1380 	struct nvme_ns *ns;
1381 
1382 	down_read(&ctrl->namespaces_rwsem);
1383 	list_for_each_entry(ns, &ctrl->namespaces, list)
1384 		if (ns->disk && nvme_revalidate_disk(ns->disk))
1385 			nvme_set_queue_dying(ns);
1386 	up_read(&ctrl->namespaces_rwsem);
1387 }
1388 
1389 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
1390 {
1391 	/*
1392 	 * Revalidate LBA changes prior to unfreezing. This is necessary to
1393 	 * prevent memory corruption if a logical block size was changed by
1394 	 * this command.
1395 	 */
1396 	if (effects & NVME_CMD_EFFECTS_LBCC)
1397 		nvme_update_formats(ctrl);
1398 	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1399 		nvme_unfreeze(ctrl);
1400 		nvme_mpath_unfreeze(ctrl->subsys);
1401 		mutex_unlock(&ctrl->subsys->lock);
1402 		nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
1403 		mutex_unlock(&ctrl->scan_lock);
1404 	}
1405 	if (effects & NVME_CMD_EFFECTS_CCC)
1406 		nvme_init_identify(ctrl);
1407 	if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
1408 		nvme_queue_scan(ctrl);
1409 		flush_work(&ctrl->scan_work);
1410 	}
1411 }
1412 
1413 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1414 			struct nvme_passthru_cmd __user *ucmd)
1415 {
1416 	struct nvme_passthru_cmd cmd;
1417 	struct nvme_command c;
1418 	unsigned timeout = 0;
1419 	u32 effects;
1420 	u64 result;
1421 	int status;
1422 
1423 	if (!capable(CAP_SYS_ADMIN))
1424 		return -EACCES;
1425 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1426 		return -EFAULT;
1427 	if (cmd.flags)
1428 		return -EINVAL;
1429 
1430 	memset(&c, 0, sizeof(c));
1431 	c.common.opcode = cmd.opcode;
1432 	c.common.flags = cmd.flags;
1433 	c.common.nsid = cpu_to_le32(cmd.nsid);
1434 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1435 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1436 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
1437 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
1438 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
1439 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
1440 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
1441 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
1442 
1443 	if (cmd.timeout_ms)
1444 		timeout = msecs_to_jiffies(cmd.timeout_ms);
1445 
1446 	effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
1447 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1448 			nvme_to_user_ptr(cmd.addr), cmd.data_len,
1449 			nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
1450 			0, &result, timeout);
1451 	nvme_passthru_end(ctrl, effects);
1452 
1453 	if (status >= 0) {
1454 		if (put_user(result, &ucmd->result))
1455 			return -EFAULT;
1456 	}
1457 
1458 	return status;
1459 }
1460 
1461 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1462 			struct nvme_passthru_cmd64 __user *ucmd)
1463 {
1464 	struct nvme_passthru_cmd64 cmd;
1465 	struct nvme_command c;
1466 	unsigned timeout = 0;
1467 	u32 effects;
1468 	int status;
1469 
1470 	if (!capable(CAP_SYS_ADMIN))
1471 		return -EACCES;
1472 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1473 		return -EFAULT;
1474 	if (cmd.flags)
1475 		return -EINVAL;
1476 
1477 	memset(&c, 0, sizeof(c));
1478 	c.common.opcode = cmd.opcode;
1479 	c.common.flags = cmd.flags;
1480 	c.common.nsid = cpu_to_le32(cmd.nsid);
1481 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1482 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1483 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
1484 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
1485 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
1486 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
1487 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
1488 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
1489 
1490 	if (cmd.timeout_ms)
1491 		timeout = msecs_to_jiffies(cmd.timeout_ms);
1492 
1493 	effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
1494 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1495 			nvme_to_user_ptr(cmd.addr), cmd.data_len,
1496 			nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
1497 			0, &cmd.result, timeout);
1498 	nvme_passthru_end(ctrl, effects);
1499 
1500 	if (status >= 0) {
1501 		if (put_user(cmd.result, &ucmd->result))
1502 			return -EFAULT;
1503 	}
1504 
1505 	return status;
1506 }
1507 
1508 /*
1509  * Issue ioctl requests on the first available path.  Note that unlike normal
1510  * block layer requests we will not retry failed request on another controller.
1511  */
1512 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
1513 		struct nvme_ns_head **head, int *srcu_idx)
1514 {
1515 #ifdef CONFIG_NVME_MULTIPATH
1516 	if (disk->fops == &nvme_ns_head_ops) {
1517 		struct nvme_ns *ns;
1518 
1519 		*head = disk->private_data;
1520 		*srcu_idx = srcu_read_lock(&(*head)->srcu);
1521 		ns = nvme_find_path(*head);
1522 		if (!ns)
1523 			srcu_read_unlock(&(*head)->srcu, *srcu_idx);
1524 		return ns;
1525 	}
1526 #endif
1527 	*head = NULL;
1528 	*srcu_idx = -1;
1529 	return disk->private_data;
1530 }
1531 
1532 static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
1533 {
1534 	if (head)
1535 		srcu_read_unlock(&head->srcu, idx);
1536 }
1537 
1538 static bool is_ctrl_ioctl(unsigned int cmd)
1539 {
1540 	if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
1541 		return true;
1542 	if (is_sed_ioctl(cmd))
1543 		return true;
1544 	return false;
1545 }
1546 
1547 static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
1548 				  void __user *argp,
1549 				  struct nvme_ns_head *head,
1550 				  int srcu_idx)
1551 {
1552 	struct nvme_ctrl *ctrl = ns->ctrl;
1553 	int ret;
1554 
1555 	nvme_get_ctrl(ns->ctrl);
1556 	nvme_put_ns_from_disk(head, srcu_idx);
1557 
1558 	switch (cmd) {
1559 	case NVME_IOCTL_ADMIN_CMD:
1560 		ret = nvme_user_cmd(ctrl, NULL, argp);
1561 		break;
1562 	case NVME_IOCTL_ADMIN64_CMD:
1563 		ret = nvme_user_cmd64(ctrl, NULL, argp);
1564 		break;
1565 	default:
1566 		ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
1567 		break;
1568 	}
1569 	nvme_put_ctrl(ctrl);
1570 	return ret;
1571 }
1572 
1573 static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
1574 		unsigned int cmd, unsigned long arg)
1575 {
1576 	struct nvme_ns_head *head = NULL;
1577 	void __user *argp = (void __user *)arg;
1578 	struct nvme_ns *ns;
1579 	int srcu_idx, ret;
1580 
1581 	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
1582 	if (unlikely(!ns))
1583 		return -EWOULDBLOCK;
1584 
1585 	/*
1586 	 * Handle ioctls that apply to the controller instead of the namespace
1587 	 * seperately and drop the ns SRCU reference early.  This avoids a
1588 	 * deadlock when deleting namespaces using the passthrough interface.
1589 	 */
1590 	if (is_ctrl_ioctl(cmd))
1591 		return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
1592 
1593 	switch (cmd) {
1594 	case NVME_IOCTL_ID:
1595 		force_successful_syscall_return();
1596 		ret = ns->head->ns_id;
1597 		break;
1598 	case NVME_IOCTL_IO_CMD:
1599 		ret = nvme_user_cmd(ns->ctrl, ns, argp);
1600 		break;
1601 	case NVME_IOCTL_SUBMIT_IO:
1602 		ret = nvme_submit_io(ns, argp);
1603 		break;
1604 	case NVME_IOCTL_IO64_CMD:
1605 		ret = nvme_user_cmd64(ns->ctrl, ns, argp);
1606 		break;
1607 	default:
1608 		if (ns->ndev)
1609 			ret = nvme_nvm_ioctl(ns, cmd, arg);
1610 		else
1611 			ret = -ENOTTY;
1612 	}
1613 
1614 	nvme_put_ns_from_disk(head, srcu_idx);
1615 	return ret;
1616 }
1617 
1618 #ifdef CONFIG_COMPAT
1619 struct nvme_user_io32 {
1620 	__u8	opcode;
1621 	__u8	flags;
1622 	__u16	control;
1623 	__u16	nblocks;
1624 	__u16	rsvd;
1625 	__u64	metadata;
1626 	__u64	addr;
1627 	__u64	slba;
1628 	__u32	dsmgmt;
1629 	__u32	reftag;
1630 	__u16	apptag;
1631 	__u16	appmask;
1632 } __attribute__((__packed__));
1633 
1634 #define NVME_IOCTL_SUBMIT_IO32	_IOW('N', 0x42, struct nvme_user_io32)
1635 
1636 static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
1637 		unsigned int cmd, unsigned long arg)
1638 {
1639 	/*
1640 	 * Corresponds to the difference of NVME_IOCTL_SUBMIT_IO
1641 	 * between 32 bit programs and 64 bit kernel.
1642 	 * The cause is that the results of sizeof(struct nvme_user_io),
1643 	 * which is used to define NVME_IOCTL_SUBMIT_IO,
1644 	 * are not same between 32 bit compiler and 64 bit compiler.
1645 	 * NVME_IOCTL_SUBMIT_IO32 is for 64 bit kernel handling
1646 	 * NVME_IOCTL_SUBMIT_IO issued from 32 bit programs.
1647 	 * Other IOCTL numbers are same between 32 bit and 64 bit.
1648 	 * So there is nothing to do regarding to other IOCTL numbers.
1649 	 */
1650 	if (cmd == NVME_IOCTL_SUBMIT_IO32)
1651 		return nvme_ioctl(bdev, mode, NVME_IOCTL_SUBMIT_IO, arg);
1652 
1653 	return nvme_ioctl(bdev, mode, cmd, arg);
1654 }
1655 #else
1656 #define nvme_compat_ioctl	NULL
1657 #endif /* CONFIG_COMPAT */
1658 
1659 static int nvme_open(struct block_device *bdev, fmode_t mode)
1660 {
1661 	struct nvme_ns *ns = bdev->bd_disk->private_data;
1662 
1663 #ifdef CONFIG_NVME_MULTIPATH
1664 	/* should never be called due to GENHD_FL_HIDDEN */
1665 	if (WARN_ON_ONCE(ns->head->disk))
1666 		goto fail;
1667 #endif
1668 	if (!kref_get_unless_zero(&ns->kref))
1669 		goto fail;
1670 	if (!try_module_get(ns->ctrl->ops->module))
1671 		goto fail_put_ns;
1672 
1673 	return 0;
1674 
1675 fail_put_ns:
1676 	nvme_put_ns(ns);
1677 fail:
1678 	return -ENXIO;
1679 }
1680 
1681 static void nvme_release(struct gendisk *disk, fmode_t mode)
1682 {
1683 	struct nvme_ns *ns = disk->private_data;
1684 
1685 	module_put(ns->ctrl->ops->module);
1686 	nvme_put_ns(ns);
1687 }
1688 
1689 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1690 {
1691 	/* some standard values */
1692 	geo->heads = 1 << 6;
1693 	geo->sectors = 1 << 5;
1694 	geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
1695 	return 0;
1696 }
1697 
1698 #ifdef CONFIG_BLK_DEV_INTEGRITY
1699 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
1700 {
1701 	struct blk_integrity integrity;
1702 
1703 	memset(&integrity, 0, sizeof(integrity));
1704 	switch (pi_type) {
1705 	case NVME_NS_DPS_PI_TYPE3:
1706 		integrity.profile = &t10_pi_type3_crc;
1707 		integrity.tag_size = sizeof(u16) + sizeof(u32);
1708 		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1709 		break;
1710 	case NVME_NS_DPS_PI_TYPE1:
1711 	case NVME_NS_DPS_PI_TYPE2:
1712 		integrity.profile = &t10_pi_type1_crc;
1713 		integrity.tag_size = sizeof(u16);
1714 		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1715 		break;
1716 	default:
1717 		integrity.profile = NULL;
1718 		break;
1719 	}
1720 	integrity.tuple_size = ms;
1721 	blk_integrity_register(disk, &integrity);
1722 	blk_queue_max_integrity_segments(disk->queue, 1);
1723 }
1724 #else
1725 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
1726 {
1727 }
1728 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1729 
1730 static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1731 {
1732 	struct nvme_ctrl *ctrl = ns->ctrl;
1733 	struct request_queue *queue = disk->queue;
1734 	u32 size = queue_logical_block_size(queue);
1735 
1736 	if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) {
1737 		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
1738 		return;
1739 	}
1740 
1741 	if (ctrl->nr_streams && ns->sws && ns->sgs)
1742 		size *= ns->sws * ns->sgs;
1743 
1744 	BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
1745 			NVME_DSM_MAX_RANGES);
1746 
1747 	queue->limits.discard_alignment = 0;
1748 	queue->limits.discard_granularity = size;
1749 
1750 	/* If discard is already enabled, don't reset queue limits */
1751 	if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
1752 		return;
1753 
1754 	blk_queue_max_discard_sectors(queue, UINT_MAX);
1755 	blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
1756 
1757 	if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1758 		blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
1759 }
1760 
1761 static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
1762 {
1763 	u64 max_blocks;
1764 
1765 	if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
1766 	    (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
1767 		return;
1768 	/*
1769 	 * Even though NVMe spec explicitly states that MDTS is not
1770 	 * applicable to the write-zeroes:- "The restriction does not apply to
1771 	 * commands that do not transfer data between the host and the
1772 	 * controller (e.g., Write Uncorrectable ro Write Zeroes command).".
1773 	 * In order to be more cautious use controller's max_hw_sectors value
1774 	 * to configure the maximum sectors for the write-zeroes which is
1775 	 * configured based on the controller's MDTS field in the
1776 	 * nvme_init_identify() if available.
1777 	 */
1778 	if (ns->ctrl->max_hw_sectors == UINT_MAX)
1779 		max_blocks = (u64)USHRT_MAX + 1;
1780 	else
1781 		max_blocks = ns->ctrl->max_hw_sectors + 1;
1782 
1783 	blk_queue_max_write_zeroes_sectors(disk->queue,
1784 					   nvme_lba_to_sect(ns, max_blocks));
1785 }
1786 
1787 static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
1788 		struct nvme_id_ns *id, struct nvme_ns_ids *ids)
1789 {
1790 	memset(ids, 0, sizeof(*ids));
1791 
1792 	if (ctrl->vs >= NVME_VS(1, 1, 0))
1793 		memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
1794 	if (ctrl->vs >= NVME_VS(1, 2, 0))
1795 		memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
1796 	if (ctrl->vs >= NVME_VS(1, 3, 0))
1797 		return nvme_identify_ns_descs(ctrl, nsid, ids);
1798 	return 0;
1799 }
1800 
1801 static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
1802 {
1803 	return !uuid_is_null(&ids->uuid) ||
1804 		memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
1805 		memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
1806 }
1807 
1808 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
1809 {
1810 	return uuid_equal(&a->uuid, &b->uuid) &&
1811 		memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1812 		memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0;
1813 }
1814 
1815 static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1816 				 u32 *phys_bs, u32 *io_opt)
1817 {
1818 	struct streams_directive_params s;
1819 	int ret;
1820 
1821 	if (!ctrl->nr_streams)
1822 		return 0;
1823 
1824 	ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
1825 	if (ret)
1826 		return ret;
1827 
1828 	ns->sws = le32_to_cpu(s.sws);
1829 	ns->sgs = le16_to_cpu(s.sgs);
1830 
1831 	if (ns->sws) {
1832 		*phys_bs = ns->sws * (1 << ns->lba_shift);
1833 		if (ns->sgs)
1834 			*io_opt = *phys_bs * ns->sgs;
1835 	}
1836 
1837 	return 0;
1838 }
1839 
1840 static void nvme_update_disk_info(struct gendisk *disk,
1841 		struct nvme_ns *ns, struct nvme_id_ns *id)
1842 {
1843 	sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
1844 	unsigned short bs = 1 << ns->lba_shift;
1845 	u32 atomic_bs, phys_bs, io_opt;
1846 
1847 	if (ns->lba_shift > PAGE_SHIFT) {
1848 		/* unsupported block size, set capacity to 0 later */
1849 		bs = (1 << 9);
1850 	}
1851 	blk_mq_freeze_queue(disk->queue);
1852 	blk_integrity_unregister(disk);
1853 
1854 	atomic_bs = phys_bs = io_opt = bs;
1855 	nvme_setup_streams_ns(ns->ctrl, ns, &phys_bs, &io_opt);
1856 	if (id->nabo == 0) {
1857 		/*
1858 		 * Bit 1 indicates whether NAWUPF is defined for this namespace
1859 		 * and whether it should be used instead of AWUPF. If NAWUPF ==
1860 		 * 0 then AWUPF must be used instead.
1861 		 */
1862 		if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
1863 			atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
1864 		else
1865 			atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
1866 	}
1867 
1868 	if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
1869 		/* NPWG = Namespace Preferred Write Granularity */
1870 		phys_bs = bs * (1 + le16_to_cpu(id->npwg));
1871 		/* NOWS = Namespace Optimal Write Size */
1872 		io_opt = bs * (1 + le16_to_cpu(id->nows));
1873 	}
1874 
1875 	blk_queue_logical_block_size(disk->queue, bs);
1876 	/*
1877 	 * Linux filesystems assume writing a single physical block is
1878 	 * an atomic operation. Hence limit the physical block size to the
1879 	 * value of the Atomic Write Unit Power Fail parameter.
1880 	 */
1881 	blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
1882 	blk_queue_io_min(disk->queue, phys_bs);
1883 	blk_queue_io_opt(disk->queue, io_opt);
1884 
1885 	if (ns->ms && !ns->ext &&
1886 	    (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1887 		nvme_init_integrity(disk, ns->ms, ns->pi_type);
1888 	if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) ||
1889 	    ns->lba_shift > PAGE_SHIFT)
1890 		capacity = 0;
1891 
1892 	set_capacity_revalidate_and_notify(disk, capacity, false);
1893 
1894 	nvme_config_discard(disk, ns);
1895 	nvme_config_write_zeroes(disk, ns);
1896 
1897 	if (id->nsattr & NVME_NS_ATTR_RO)
1898 		set_disk_ro(disk, true);
1899 	else
1900 		set_disk_ro(disk, false);
1901 
1902 	blk_mq_unfreeze_queue(disk->queue);
1903 }
1904 
1905 static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1906 {
1907 	struct nvme_ns *ns = disk->private_data;
1908 	u32 iob;
1909 
1910 	/*
1911 	 * If identify namespace failed, use default 512 byte block size so
1912 	 * block layer can use before failing read/write for 0 capacity.
1913 	 */
1914 	ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
1915 	if (ns->lba_shift == 0)
1916 		ns->lba_shift = 9;
1917 
1918 	if ((ns->ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
1919 	    is_power_of_2(ns->ctrl->max_hw_sectors))
1920 		iob = ns->ctrl->max_hw_sectors;
1921 	else
1922 		iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
1923 
1924 	ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
1925 	ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
1926 	/* the PI implementation requires metadata equal t10 pi tuple size */
1927 	if (ns->ms == sizeof(struct t10_pi_tuple))
1928 		ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1929 	else
1930 		ns->pi_type = 0;
1931 
1932 	if (iob)
1933 		blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(iob));
1934 	nvme_update_disk_info(disk, ns, id);
1935 #ifdef CONFIG_NVME_MULTIPATH
1936 	if (ns->head->disk) {
1937 		nvme_update_disk_info(ns->head->disk, ns, id);
1938 		blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
1939 		revalidate_disk(ns->head->disk);
1940 	}
1941 #endif
1942 }
1943 
1944 static int nvme_revalidate_disk(struct gendisk *disk)
1945 {
1946 	struct nvme_ns *ns = disk->private_data;
1947 	struct nvme_ctrl *ctrl = ns->ctrl;
1948 	struct nvme_id_ns *id;
1949 	struct nvme_ns_ids ids;
1950 	int ret = 0;
1951 
1952 	if (test_bit(NVME_NS_DEAD, &ns->flags)) {
1953 		set_capacity(disk, 0);
1954 		return -ENODEV;
1955 	}
1956 
1957 	ret = nvme_identify_ns(ctrl, ns->head->ns_id, &id);
1958 	if (ret)
1959 		goto out;
1960 
1961 	if (id->ncap == 0) {
1962 		ret = -ENODEV;
1963 		goto free_id;
1964 	}
1965 
1966 	ret = nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
1967 	if (ret)
1968 		goto free_id;
1969 
1970 	if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) {
1971 		dev_err(ctrl->device,
1972 			"identifiers changed for nsid %d\n", ns->head->ns_id);
1973 		ret = -ENODEV;
1974 		goto free_id;
1975 	}
1976 
1977 	__nvme_revalidate_disk(disk, id);
1978 free_id:
1979 	kfree(id);
1980 out:
1981 	/*
1982 	 * Only fail the function if we got a fatal error back from the
1983 	 * device, otherwise ignore the error and just move on.
1984 	 */
1985 	if (ret == -ENOMEM || (ret > 0 && !(ret & NVME_SC_DNR)))
1986 		ret = 0;
1987 	else if (ret > 0)
1988 		ret = blk_status_to_errno(nvme_error_status(ret));
1989 	return ret;
1990 }
1991 
1992 static char nvme_pr_type(enum pr_type type)
1993 {
1994 	switch (type) {
1995 	case PR_WRITE_EXCLUSIVE:
1996 		return 1;
1997 	case PR_EXCLUSIVE_ACCESS:
1998 		return 2;
1999 	case PR_WRITE_EXCLUSIVE_REG_ONLY:
2000 		return 3;
2001 	case PR_EXCLUSIVE_ACCESS_REG_ONLY:
2002 		return 4;
2003 	case PR_WRITE_EXCLUSIVE_ALL_REGS:
2004 		return 5;
2005 	case PR_EXCLUSIVE_ACCESS_ALL_REGS:
2006 		return 6;
2007 	default:
2008 		return 0;
2009 	}
2010 };
2011 
2012 static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
2013 				u64 key, u64 sa_key, u8 op)
2014 {
2015 	struct nvme_ns_head *head = NULL;
2016 	struct nvme_ns *ns;
2017 	struct nvme_command c;
2018 	int srcu_idx, ret;
2019 	u8 data[16] = { 0, };
2020 
2021 	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
2022 	if (unlikely(!ns))
2023 		return -EWOULDBLOCK;
2024 
2025 	put_unaligned_le64(key, &data[0]);
2026 	put_unaligned_le64(sa_key, &data[8]);
2027 
2028 	memset(&c, 0, sizeof(c));
2029 	c.common.opcode = op;
2030 	c.common.nsid = cpu_to_le32(ns->head->ns_id);
2031 	c.common.cdw10 = cpu_to_le32(cdw10);
2032 
2033 	ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
2034 	nvme_put_ns_from_disk(head, srcu_idx);
2035 	return ret;
2036 }
2037 
2038 static int nvme_pr_register(struct block_device *bdev, u64 old,
2039 		u64 new, unsigned flags)
2040 {
2041 	u32 cdw10;
2042 
2043 	if (flags & ~PR_FL_IGNORE_KEY)
2044 		return -EOPNOTSUPP;
2045 
2046 	cdw10 = old ? 2 : 0;
2047 	cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
2048 	cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
2049 	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
2050 }
2051 
2052 static int nvme_pr_reserve(struct block_device *bdev, u64 key,
2053 		enum pr_type type, unsigned flags)
2054 {
2055 	u32 cdw10;
2056 
2057 	if (flags & ~PR_FL_IGNORE_KEY)
2058 		return -EOPNOTSUPP;
2059 
2060 	cdw10 = nvme_pr_type(type) << 8;
2061 	cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
2062 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
2063 }
2064 
2065 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
2066 		enum pr_type type, bool abort)
2067 {
2068 	u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
2069 	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
2070 }
2071 
2072 static int nvme_pr_clear(struct block_device *bdev, u64 key)
2073 {
2074 	u32 cdw10 = 1 | (key ? 1 << 3 : 0);
2075 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
2076 }
2077 
2078 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
2079 {
2080 	u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
2081 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
2082 }
2083 
2084 static const struct pr_ops nvme_pr_ops = {
2085 	.pr_register	= nvme_pr_register,
2086 	.pr_reserve	= nvme_pr_reserve,
2087 	.pr_release	= nvme_pr_release,
2088 	.pr_preempt	= nvme_pr_preempt,
2089 	.pr_clear	= nvme_pr_clear,
2090 };
2091 
2092 #ifdef CONFIG_BLK_SED_OPAL
2093 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
2094 		bool send)
2095 {
2096 	struct nvme_ctrl *ctrl = data;
2097 	struct nvme_command cmd;
2098 
2099 	memset(&cmd, 0, sizeof(cmd));
2100 	if (send)
2101 		cmd.common.opcode = nvme_admin_security_send;
2102 	else
2103 		cmd.common.opcode = nvme_admin_security_recv;
2104 	cmd.common.nsid = 0;
2105 	cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
2106 	cmd.common.cdw11 = cpu_to_le32(len);
2107 
2108 	return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
2109 				      ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false);
2110 }
2111 EXPORT_SYMBOL_GPL(nvme_sec_submit);
2112 #endif /* CONFIG_BLK_SED_OPAL */
2113 
2114 static const struct block_device_operations nvme_fops = {
2115 	.owner		= THIS_MODULE,
2116 	.ioctl		= nvme_ioctl,
2117 	.compat_ioctl	= nvme_compat_ioctl,
2118 	.open		= nvme_open,
2119 	.release	= nvme_release,
2120 	.getgeo		= nvme_getgeo,
2121 	.revalidate_disk= nvme_revalidate_disk,
2122 	.pr_ops		= &nvme_pr_ops,
2123 };
2124 
2125 #ifdef CONFIG_NVME_MULTIPATH
2126 static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
2127 {
2128 	struct nvme_ns_head *head = bdev->bd_disk->private_data;
2129 
2130 	if (!kref_get_unless_zero(&head->ref))
2131 		return -ENXIO;
2132 	return 0;
2133 }
2134 
2135 static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
2136 {
2137 	nvme_put_ns_head(disk->private_data);
2138 }
2139 
2140 const struct block_device_operations nvme_ns_head_ops = {
2141 	.owner		= THIS_MODULE,
2142 	.open		= nvme_ns_head_open,
2143 	.release	= nvme_ns_head_release,
2144 	.ioctl		= nvme_ioctl,
2145 	.compat_ioctl	= nvme_compat_ioctl,
2146 	.getgeo		= nvme_getgeo,
2147 	.pr_ops		= &nvme_pr_ops,
2148 };
2149 #endif /* CONFIG_NVME_MULTIPATH */
2150 
2151 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
2152 {
2153 	unsigned long timeout =
2154 		((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
2155 	u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
2156 	int ret;
2157 
2158 	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2159 		if (csts == ~0)
2160 			return -ENODEV;
2161 		if ((csts & NVME_CSTS_RDY) == bit)
2162 			break;
2163 
2164 		usleep_range(1000, 2000);
2165 		if (fatal_signal_pending(current))
2166 			return -EINTR;
2167 		if (time_after(jiffies, timeout)) {
2168 			dev_err(ctrl->device,
2169 				"Device not ready; aborting %s, CSTS=0x%x\n",
2170 				enabled ? "initialisation" : "reset", csts);
2171 			return -ENODEV;
2172 		}
2173 	}
2174 
2175 	return ret;
2176 }
2177 
2178 /*
2179  * If the device has been passed off to us in an enabled state, just clear
2180  * the enabled bit.  The spec says we should set the 'shutdown notification
2181  * bits', but doing so may cause the device to complete commands to the
2182  * admin queue ... and we don't know what memory that might be pointing at!
2183  */
2184 int nvme_disable_ctrl(struct nvme_ctrl *ctrl)
2185 {
2186 	int ret;
2187 
2188 	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2189 	ctrl->ctrl_config &= ~NVME_CC_ENABLE;
2190 
2191 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2192 	if (ret)
2193 		return ret;
2194 
2195 	if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2196 		msleep(NVME_QUIRK_DELAY_AMOUNT);
2197 
2198 	return nvme_wait_ready(ctrl, ctrl->cap, false);
2199 }
2200 EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
2201 
2202 int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
2203 {
2204 	/*
2205 	 * Default to a 4K page size, with the intention to update this
2206 	 * path in the future to accomodate architectures with differing
2207 	 * kernel and IO page sizes.
2208 	 */
2209 	unsigned dev_page_min, page_shift = 12;
2210 	int ret;
2211 
2212 	ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2213 	if (ret) {
2214 		dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
2215 		return ret;
2216 	}
2217 	dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2218 
2219 	if (page_shift < dev_page_min) {
2220 		dev_err(ctrl->device,
2221 			"Minimum device page size %u too large for host (%u)\n",
2222 			1 << dev_page_min, 1 << page_shift);
2223 		return -ENODEV;
2224 	}
2225 
2226 	ctrl->page_size = 1 << page_shift;
2227 
2228 	ctrl->ctrl_config = NVME_CC_CSS_NVM;
2229 	ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
2230 	ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2231 	ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
2232 	ctrl->ctrl_config |= NVME_CC_ENABLE;
2233 
2234 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2235 	if (ret)
2236 		return ret;
2237 	return nvme_wait_ready(ctrl, ctrl->cap, true);
2238 }
2239 EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
2240 
2241 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
2242 {
2243 	unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
2244 	u32 csts;
2245 	int ret;
2246 
2247 	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2248 	ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
2249 
2250 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2251 	if (ret)
2252 		return ret;
2253 
2254 	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2255 		if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
2256 			break;
2257 
2258 		msleep(100);
2259 		if (fatal_signal_pending(current))
2260 			return -EINTR;
2261 		if (time_after(jiffies, timeout)) {
2262 			dev_err(ctrl->device,
2263 				"Device shutdown incomplete; abort shutdown\n");
2264 			return -ENODEV;
2265 		}
2266 	}
2267 
2268 	return ret;
2269 }
2270 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
2271 
2272 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
2273 		struct request_queue *q)
2274 {
2275 	bool vwc = false;
2276 
2277 	if (ctrl->max_hw_sectors) {
2278 		u32 max_segments =
2279 			(ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
2280 
2281 		max_segments = min_not_zero(max_segments, ctrl->max_segments);
2282 		blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
2283 		blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
2284 	}
2285 	blk_queue_virt_boundary(q, ctrl->page_size - 1);
2286 	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
2287 		vwc = true;
2288 	blk_queue_write_cache(q, vwc, vwc);
2289 }
2290 
2291 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
2292 {
2293 	__le64 ts;
2294 	int ret;
2295 
2296 	if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
2297 		return 0;
2298 
2299 	ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
2300 	ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
2301 			NULL);
2302 	if (ret)
2303 		dev_warn_once(ctrl->device,
2304 			"could not set timestamp (%d)\n", ret);
2305 	return ret;
2306 }
2307 
2308 static int nvme_configure_acre(struct nvme_ctrl *ctrl)
2309 {
2310 	struct nvme_feat_host_behavior *host;
2311 	int ret;
2312 
2313 	/* Don't bother enabling the feature if retry delay is not reported */
2314 	if (!ctrl->crdt[0])
2315 		return 0;
2316 
2317 	host = kzalloc(sizeof(*host), GFP_KERNEL);
2318 	if (!host)
2319 		return 0;
2320 
2321 	host->acre = NVME_ENABLE_ACRE;
2322 	ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
2323 				host, sizeof(*host), NULL);
2324 	kfree(host);
2325 	return ret;
2326 }
2327 
2328 static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2329 {
2330 	/*
2331 	 * APST (Autonomous Power State Transition) lets us program a
2332 	 * table of power state transitions that the controller will
2333 	 * perform automatically.  We configure it with a simple
2334 	 * heuristic: we are willing to spend at most 2% of the time
2335 	 * transitioning between power states.  Therefore, when running
2336 	 * in any given state, we will enter the next lower-power
2337 	 * non-operational state after waiting 50 * (enlat + exlat)
2338 	 * microseconds, as long as that state's exit latency is under
2339 	 * the requested maximum latency.
2340 	 *
2341 	 * We will not autonomously enter any non-operational state for
2342 	 * which the total latency exceeds ps_max_latency_us.  Users
2343 	 * can set ps_max_latency_us to zero to turn off APST.
2344 	 */
2345 
2346 	unsigned apste;
2347 	struct nvme_feat_auto_pst *table;
2348 	u64 max_lat_us = 0;
2349 	int max_ps = -1;
2350 	int ret;
2351 
2352 	/*
2353 	 * If APST isn't supported or if we haven't been initialized yet,
2354 	 * then don't do anything.
2355 	 */
2356 	if (!ctrl->apsta)
2357 		return 0;
2358 
2359 	if (ctrl->npss > 31) {
2360 		dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2361 		return 0;
2362 	}
2363 
2364 	table = kzalloc(sizeof(*table), GFP_KERNEL);
2365 	if (!table)
2366 		return 0;
2367 
2368 	if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2369 		/* Turn off APST. */
2370 		apste = 0;
2371 		dev_dbg(ctrl->device, "APST disabled\n");
2372 	} else {
2373 		__le64 target = cpu_to_le64(0);
2374 		int state;
2375 
2376 		/*
2377 		 * Walk through all states from lowest- to highest-power.
2378 		 * According to the spec, lower-numbered states use more
2379 		 * power.  NPSS, despite the name, is the index of the
2380 		 * lowest-power state, not the number of states.
2381 		 */
2382 		for (state = (int)ctrl->npss; state >= 0; state--) {
2383 			u64 total_latency_us, exit_latency_us, transition_ms;
2384 
2385 			if (target)
2386 				table->entries[state] = target;
2387 
2388 			/*
2389 			 * Don't allow transitions to the deepest state
2390 			 * if it's quirked off.
2391 			 */
2392 			if (state == ctrl->npss &&
2393 			    (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2394 				continue;
2395 
2396 			/*
2397 			 * Is this state a useful non-operational state for
2398 			 * higher-power states to autonomously transition to?
2399 			 */
2400 			if (!(ctrl->psd[state].flags &
2401 			      NVME_PS_FLAGS_NON_OP_STATE))
2402 				continue;
2403 
2404 			exit_latency_us =
2405 				(u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2406 			if (exit_latency_us > ctrl->ps_max_latency_us)
2407 				continue;
2408 
2409 			total_latency_us =
2410 				exit_latency_us +
2411 				le32_to_cpu(ctrl->psd[state].entry_lat);
2412 
2413 			/*
2414 			 * This state is good.  Use it as the APST idle
2415 			 * target for higher power states.
2416 			 */
2417 			transition_ms = total_latency_us + 19;
2418 			do_div(transition_ms, 20);
2419 			if (transition_ms > (1 << 24) - 1)
2420 				transition_ms = (1 << 24) - 1;
2421 
2422 			target = cpu_to_le64((state << 3) |
2423 					     (transition_ms << 8));
2424 
2425 			if (max_ps == -1)
2426 				max_ps = state;
2427 
2428 			if (total_latency_us > max_lat_us)
2429 				max_lat_us = total_latency_us;
2430 		}
2431 
2432 		apste = 1;
2433 
2434 		if (max_ps == -1) {
2435 			dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2436 		} else {
2437 			dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2438 				max_ps, max_lat_us, (int)sizeof(*table), table);
2439 		}
2440 	}
2441 
2442 	ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
2443 				table, sizeof(*table), NULL);
2444 	if (ret)
2445 		dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
2446 
2447 	kfree(table);
2448 	return ret;
2449 }
2450 
2451 static void nvme_set_latency_tolerance(struct device *dev, s32 val)
2452 {
2453 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2454 	u64 latency;
2455 
2456 	switch (val) {
2457 	case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
2458 	case PM_QOS_LATENCY_ANY:
2459 		latency = U64_MAX;
2460 		break;
2461 
2462 	default:
2463 		latency = val;
2464 	}
2465 
2466 	if (ctrl->ps_max_latency_us != latency) {
2467 		ctrl->ps_max_latency_us = latency;
2468 		nvme_configure_apst(ctrl);
2469 	}
2470 }
2471 
2472 struct nvme_core_quirk_entry {
2473 	/*
2474 	 * NVMe model and firmware strings are padded with spaces.  For
2475 	 * simplicity, strings in the quirk table are padded with NULLs
2476 	 * instead.
2477 	 */
2478 	u16 vid;
2479 	const char *mn;
2480 	const char *fr;
2481 	unsigned long quirks;
2482 };
2483 
2484 static const struct nvme_core_quirk_entry core_quirks[] = {
2485 	{
2486 		/*
2487 		 * This Toshiba device seems to die using any APST states.  See:
2488 		 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2489 		 */
2490 		.vid = 0x1179,
2491 		.mn = "THNSF5256GPUK TOSHIBA",
2492 		.quirks = NVME_QUIRK_NO_APST,
2493 	},
2494 	{
2495 		/*
2496 		 * This LiteON CL1-3D*-Q11 firmware version has a race
2497 		 * condition associated with actions related to suspend to idle
2498 		 * LiteON has resolved the problem in future firmware
2499 		 */
2500 		.vid = 0x14a4,
2501 		.fr = "22301111",
2502 		.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2503 	}
2504 };
2505 
2506 /* match is null-terminated but idstr is space-padded. */
2507 static bool string_matches(const char *idstr, const char *match, size_t len)
2508 {
2509 	size_t matchlen;
2510 
2511 	if (!match)
2512 		return true;
2513 
2514 	matchlen = strlen(match);
2515 	WARN_ON_ONCE(matchlen > len);
2516 
2517 	if (memcmp(idstr, match, matchlen))
2518 		return false;
2519 
2520 	for (; matchlen < len; matchlen++)
2521 		if (idstr[matchlen] != ' ')
2522 			return false;
2523 
2524 	return true;
2525 }
2526 
2527 static bool quirk_matches(const struct nvme_id_ctrl *id,
2528 			  const struct nvme_core_quirk_entry *q)
2529 {
2530 	return q->vid == le16_to_cpu(id->vid) &&
2531 		string_matches(id->mn, q->mn, sizeof(id->mn)) &&
2532 		string_matches(id->fr, q->fr, sizeof(id->fr));
2533 }
2534 
2535 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
2536 		struct nvme_id_ctrl *id)
2537 {
2538 	size_t nqnlen;
2539 	int off;
2540 
2541 	if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
2542 		nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
2543 		if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
2544 			strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
2545 			return;
2546 		}
2547 
2548 		if (ctrl->vs >= NVME_VS(1, 2, 1))
2549 			dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2550 	}
2551 
2552 	/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2553 	off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2554 			"nqn.2014.08.org.nvmexpress:%04x%04x",
2555 			le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2556 	memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2557 	off += sizeof(id->sn);
2558 	memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2559 	off += sizeof(id->mn);
2560 	memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
2561 }
2562 
2563 static void nvme_release_subsystem(struct device *dev)
2564 {
2565 	struct nvme_subsystem *subsys =
2566 		container_of(dev, struct nvme_subsystem, dev);
2567 
2568 	if (subsys->instance >= 0)
2569 		ida_simple_remove(&nvme_instance_ida, subsys->instance);
2570 	kfree(subsys);
2571 }
2572 
2573 static void nvme_destroy_subsystem(struct kref *ref)
2574 {
2575 	struct nvme_subsystem *subsys =
2576 			container_of(ref, struct nvme_subsystem, ref);
2577 
2578 	mutex_lock(&nvme_subsystems_lock);
2579 	list_del(&subsys->entry);
2580 	mutex_unlock(&nvme_subsystems_lock);
2581 
2582 	ida_destroy(&subsys->ns_ida);
2583 	device_del(&subsys->dev);
2584 	put_device(&subsys->dev);
2585 }
2586 
2587 static void nvme_put_subsystem(struct nvme_subsystem *subsys)
2588 {
2589 	kref_put(&subsys->ref, nvme_destroy_subsystem);
2590 }
2591 
2592 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
2593 {
2594 	struct nvme_subsystem *subsys;
2595 
2596 	lockdep_assert_held(&nvme_subsystems_lock);
2597 
2598 	/*
2599 	 * Fail matches for discovery subsystems. This results
2600 	 * in each discovery controller bound to a unique subsystem.
2601 	 * This avoids issues with validating controller values
2602 	 * that can only be true when there is a single unique subsystem.
2603 	 * There may be multiple and completely independent entities
2604 	 * that provide discovery controllers.
2605 	 */
2606 	if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
2607 		return NULL;
2608 
2609 	list_for_each_entry(subsys, &nvme_subsystems, entry) {
2610 		if (strcmp(subsys->subnqn, subsysnqn))
2611 			continue;
2612 		if (!kref_get_unless_zero(&subsys->ref))
2613 			continue;
2614 		return subsys;
2615 	}
2616 
2617 	return NULL;
2618 }
2619 
2620 #define SUBSYS_ATTR_RO(_name, _mode, _show)			\
2621 	struct device_attribute subsys_attr_##_name = \
2622 		__ATTR(_name, _mode, _show, NULL)
2623 
2624 static ssize_t nvme_subsys_show_nqn(struct device *dev,
2625 				    struct device_attribute *attr,
2626 				    char *buf)
2627 {
2628 	struct nvme_subsystem *subsys =
2629 		container_of(dev, struct nvme_subsystem, dev);
2630 
2631 	return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
2632 }
2633 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
2634 
2635 #define nvme_subsys_show_str_function(field)				\
2636 static ssize_t subsys_##field##_show(struct device *dev,		\
2637 			    struct device_attribute *attr, char *buf)	\
2638 {									\
2639 	struct nvme_subsystem *subsys =					\
2640 		container_of(dev, struct nvme_subsystem, dev);		\
2641 	return sprintf(buf, "%.*s\n",					\
2642 		       (int)sizeof(subsys->field), subsys->field);	\
2643 }									\
2644 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
2645 
2646 nvme_subsys_show_str_function(model);
2647 nvme_subsys_show_str_function(serial);
2648 nvme_subsys_show_str_function(firmware_rev);
2649 
2650 static struct attribute *nvme_subsys_attrs[] = {
2651 	&subsys_attr_model.attr,
2652 	&subsys_attr_serial.attr,
2653 	&subsys_attr_firmware_rev.attr,
2654 	&subsys_attr_subsysnqn.attr,
2655 #ifdef CONFIG_NVME_MULTIPATH
2656 	&subsys_attr_iopolicy.attr,
2657 #endif
2658 	NULL,
2659 };
2660 
2661 static struct attribute_group nvme_subsys_attrs_group = {
2662 	.attrs = nvme_subsys_attrs,
2663 };
2664 
2665 static const struct attribute_group *nvme_subsys_attrs_groups[] = {
2666 	&nvme_subsys_attrs_group,
2667 	NULL,
2668 };
2669 
2670 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
2671 		struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2672 {
2673 	struct nvme_ctrl *tmp;
2674 
2675 	lockdep_assert_held(&nvme_subsystems_lock);
2676 
2677 	list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
2678 		if (nvme_state_terminal(tmp))
2679 			continue;
2680 
2681 		if (tmp->cntlid == ctrl->cntlid) {
2682 			dev_err(ctrl->device,
2683 				"Duplicate cntlid %u with %s, rejecting\n",
2684 				ctrl->cntlid, dev_name(tmp->device));
2685 			return false;
2686 		}
2687 
2688 		if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
2689 		    (ctrl->opts && ctrl->opts->discovery_nqn))
2690 			continue;
2691 
2692 		dev_err(ctrl->device,
2693 			"Subsystem does not support multiple controllers\n");
2694 		return false;
2695 	}
2696 
2697 	return true;
2698 }
2699 
2700 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2701 {
2702 	struct nvme_subsystem *subsys, *found;
2703 	int ret;
2704 
2705 	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
2706 	if (!subsys)
2707 		return -ENOMEM;
2708 
2709 	subsys->instance = -1;
2710 	mutex_init(&subsys->lock);
2711 	kref_init(&subsys->ref);
2712 	INIT_LIST_HEAD(&subsys->ctrls);
2713 	INIT_LIST_HEAD(&subsys->nsheads);
2714 	nvme_init_subnqn(subsys, ctrl, id);
2715 	memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
2716 	memcpy(subsys->model, id->mn, sizeof(subsys->model));
2717 	memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
2718 	subsys->vendor_id = le16_to_cpu(id->vid);
2719 	subsys->cmic = id->cmic;
2720 	subsys->awupf = le16_to_cpu(id->awupf);
2721 #ifdef CONFIG_NVME_MULTIPATH
2722 	subsys->iopolicy = NVME_IOPOLICY_NUMA;
2723 #endif
2724 
2725 	subsys->dev.class = nvme_subsys_class;
2726 	subsys->dev.release = nvme_release_subsystem;
2727 	subsys->dev.groups = nvme_subsys_attrs_groups;
2728 	dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
2729 	device_initialize(&subsys->dev);
2730 
2731 	mutex_lock(&nvme_subsystems_lock);
2732 	found = __nvme_find_get_subsystem(subsys->subnqn);
2733 	if (found) {
2734 		put_device(&subsys->dev);
2735 		subsys = found;
2736 
2737 		if (!nvme_validate_cntlid(subsys, ctrl, id)) {
2738 			ret = -EINVAL;
2739 			goto out_put_subsystem;
2740 		}
2741 	} else {
2742 		ret = device_add(&subsys->dev);
2743 		if (ret) {
2744 			dev_err(ctrl->device,
2745 				"failed to register subsystem device.\n");
2746 			put_device(&subsys->dev);
2747 			goto out_unlock;
2748 		}
2749 		ida_init(&subsys->ns_ida);
2750 		list_add_tail(&subsys->entry, &nvme_subsystems);
2751 	}
2752 
2753 	ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
2754 				dev_name(ctrl->device));
2755 	if (ret) {
2756 		dev_err(ctrl->device,
2757 			"failed to create sysfs link from subsystem.\n");
2758 		goto out_put_subsystem;
2759 	}
2760 
2761 	if (!found)
2762 		subsys->instance = ctrl->instance;
2763 	ctrl->subsys = subsys;
2764 	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2765 	mutex_unlock(&nvme_subsystems_lock);
2766 	return 0;
2767 
2768 out_put_subsystem:
2769 	nvme_put_subsystem(subsys);
2770 out_unlock:
2771 	mutex_unlock(&nvme_subsystems_lock);
2772 	return ret;
2773 }
2774 
2775 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
2776 		void *log, size_t size, u64 offset)
2777 {
2778 	struct nvme_command c = { };
2779 	u32 dwlen = nvme_bytes_to_numd(size);
2780 
2781 	c.get_log_page.opcode = nvme_admin_get_log_page;
2782 	c.get_log_page.nsid = cpu_to_le32(nsid);
2783 	c.get_log_page.lid = log_page;
2784 	c.get_log_page.lsp = lsp;
2785 	c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
2786 	c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
2787 	c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
2788 	c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
2789 
2790 	return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
2791 }
2792 
2793 static int nvme_get_effects_log(struct nvme_ctrl *ctrl)
2794 {
2795 	int ret;
2796 
2797 	if (!ctrl->effects)
2798 		ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
2799 
2800 	if (!ctrl->effects)
2801 		return 0;
2802 
2803 	ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0,
2804 			ctrl->effects, sizeof(*ctrl->effects), 0);
2805 	if (ret) {
2806 		kfree(ctrl->effects);
2807 		ctrl->effects = NULL;
2808 	}
2809 	return ret;
2810 }
2811 
2812 /*
2813  * Initialize the cached copies of the Identify data and various controller
2814  * register in our nvme_ctrl structure.  This should be called as soon as
2815  * the admin queue is fully up and running.
2816  */
2817 int nvme_init_identify(struct nvme_ctrl *ctrl)
2818 {
2819 	struct nvme_id_ctrl *id;
2820 	int ret, page_shift;
2821 	u32 max_hw_sectors;
2822 	bool prev_apst_enabled;
2823 
2824 	ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
2825 	if (ret) {
2826 		dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
2827 		return ret;
2828 	}
2829 	page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2830 	ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
2831 
2832 	if (ctrl->vs >= NVME_VS(1, 1, 0))
2833 		ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
2834 
2835 	ret = nvme_identify_ctrl(ctrl, &id);
2836 	if (ret) {
2837 		dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
2838 		return -EIO;
2839 	}
2840 
2841 	if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
2842 		ret = nvme_get_effects_log(ctrl);
2843 		if (ret < 0)
2844 			goto out_free;
2845 	}
2846 
2847 	if (!(ctrl->ops->flags & NVME_F_FABRICS))
2848 		ctrl->cntlid = le16_to_cpu(id->cntlid);
2849 
2850 	if (!ctrl->identified) {
2851 		int i;
2852 
2853 		ret = nvme_init_subsystem(ctrl, id);
2854 		if (ret)
2855 			goto out_free;
2856 
2857 		/*
2858 		 * Check for quirks.  Quirk can depend on firmware version,
2859 		 * so, in principle, the set of quirks present can change
2860 		 * across a reset.  As a possible future enhancement, we
2861 		 * could re-scan for quirks every time we reinitialize
2862 		 * the device, but we'd have to make sure that the driver
2863 		 * behaves intelligently if the quirks change.
2864 		 */
2865 		for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
2866 			if (quirk_matches(id, &core_quirks[i]))
2867 				ctrl->quirks |= core_quirks[i].quirks;
2868 		}
2869 	}
2870 
2871 	if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
2872 		dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
2873 		ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
2874 	}
2875 
2876 	ctrl->crdt[0] = le16_to_cpu(id->crdt1);
2877 	ctrl->crdt[1] = le16_to_cpu(id->crdt2);
2878 	ctrl->crdt[2] = le16_to_cpu(id->crdt3);
2879 
2880 	ctrl->oacs = le16_to_cpu(id->oacs);
2881 	ctrl->oncs = le16_to_cpu(id->oncs);
2882 	ctrl->mtfa = le16_to_cpu(id->mtfa);
2883 	ctrl->oaes = le32_to_cpu(id->oaes);
2884 	ctrl->wctemp = le16_to_cpu(id->wctemp);
2885 	ctrl->cctemp = le16_to_cpu(id->cctemp);
2886 
2887 	atomic_set(&ctrl->abort_limit, id->acl + 1);
2888 	ctrl->vwc = id->vwc;
2889 	if (id->mdts)
2890 		max_hw_sectors = 1 << (id->mdts + page_shift - 9);
2891 	else
2892 		max_hw_sectors = UINT_MAX;
2893 	ctrl->max_hw_sectors =
2894 		min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
2895 
2896 	nvme_set_queue_limits(ctrl, ctrl->admin_q);
2897 	ctrl->sgls = le32_to_cpu(id->sgls);
2898 	ctrl->kas = le16_to_cpu(id->kas);
2899 	ctrl->max_namespaces = le32_to_cpu(id->mnan);
2900 	ctrl->ctratt = le32_to_cpu(id->ctratt);
2901 
2902 	if (id->rtd3e) {
2903 		/* us -> s */
2904 		u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000;
2905 
2906 		ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
2907 						 shutdown_timeout, 60);
2908 
2909 		if (ctrl->shutdown_timeout != shutdown_timeout)
2910 			dev_info(ctrl->device,
2911 				 "Shutdown timeout set to %u seconds\n",
2912 				 ctrl->shutdown_timeout);
2913 	} else
2914 		ctrl->shutdown_timeout = shutdown_timeout;
2915 
2916 	ctrl->npss = id->npss;
2917 	ctrl->apsta = id->apsta;
2918 	prev_apst_enabled = ctrl->apst_enabled;
2919 	if (ctrl->quirks & NVME_QUIRK_NO_APST) {
2920 		if (force_apst && id->apsta) {
2921 			dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
2922 			ctrl->apst_enabled = true;
2923 		} else {
2924 			ctrl->apst_enabled = false;
2925 		}
2926 	} else {
2927 		ctrl->apst_enabled = id->apsta;
2928 	}
2929 	memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
2930 
2931 	if (ctrl->ops->flags & NVME_F_FABRICS) {
2932 		ctrl->icdoff = le16_to_cpu(id->icdoff);
2933 		ctrl->ioccsz = le32_to_cpu(id->ioccsz);
2934 		ctrl->iorcsz = le32_to_cpu(id->iorcsz);
2935 		ctrl->maxcmd = le16_to_cpu(id->maxcmd);
2936 
2937 		/*
2938 		 * In fabrics we need to verify the cntlid matches the
2939 		 * admin connect
2940 		 */
2941 		if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
2942 			dev_err(ctrl->device,
2943 				"Mismatching cntlid: Connect %u vs Identify "
2944 				"%u, rejecting\n",
2945 				ctrl->cntlid, le16_to_cpu(id->cntlid));
2946 			ret = -EINVAL;
2947 			goto out_free;
2948 		}
2949 
2950 		if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
2951 			dev_err(ctrl->device,
2952 				"keep-alive support is mandatory for fabrics\n");
2953 			ret = -EINVAL;
2954 			goto out_free;
2955 		}
2956 	} else {
2957 		ctrl->hmpre = le32_to_cpu(id->hmpre);
2958 		ctrl->hmmin = le32_to_cpu(id->hmmin);
2959 		ctrl->hmminds = le32_to_cpu(id->hmminds);
2960 		ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
2961 	}
2962 
2963 	ret = nvme_mpath_init(ctrl, id);
2964 	kfree(id);
2965 
2966 	if (ret < 0)
2967 		return ret;
2968 
2969 	if (ctrl->apst_enabled && !prev_apst_enabled)
2970 		dev_pm_qos_expose_latency_tolerance(ctrl->device);
2971 	else if (!ctrl->apst_enabled && prev_apst_enabled)
2972 		dev_pm_qos_hide_latency_tolerance(ctrl->device);
2973 
2974 	ret = nvme_configure_apst(ctrl);
2975 	if (ret < 0)
2976 		return ret;
2977 
2978 	ret = nvme_configure_timestamp(ctrl);
2979 	if (ret < 0)
2980 		return ret;
2981 
2982 	ret = nvme_configure_directives(ctrl);
2983 	if (ret < 0)
2984 		return ret;
2985 
2986 	ret = nvme_configure_acre(ctrl);
2987 	if (ret < 0)
2988 		return ret;
2989 
2990 	if (!ctrl->identified)
2991 		nvme_hwmon_init(ctrl);
2992 
2993 	ctrl->identified = true;
2994 
2995 	return 0;
2996 
2997 out_free:
2998 	kfree(id);
2999 	return ret;
3000 }
3001 EXPORT_SYMBOL_GPL(nvme_init_identify);
3002 
3003 static int nvme_dev_open(struct inode *inode, struct file *file)
3004 {
3005 	struct nvme_ctrl *ctrl =
3006 		container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3007 
3008 	switch (ctrl->state) {
3009 	case NVME_CTRL_LIVE:
3010 		break;
3011 	default:
3012 		return -EWOULDBLOCK;
3013 	}
3014 
3015 	file->private_data = ctrl;
3016 	return 0;
3017 }
3018 
3019 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
3020 {
3021 	struct nvme_ns *ns;
3022 	int ret;
3023 
3024 	down_read(&ctrl->namespaces_rwsem);
3025 	if (list_empty(&ctrl->namespaces)) {
3026 		ret = -ENOTTY;
3027 		goto out_unlock;
3028 	}
3029 
3030 	ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
3031 	if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
3032 		dev_warn(ctrl->device,
3033 			"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
3034 		ret = -EINVAL;
3035 		goto out_unlock;
3036 	}
3037 
3038 	dev_warn(ctrl->device,
3039 		"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
3040 	kref_get(&ns->kref);
3041 	up_read(&ctrl->namespaces_rwsem);
3042 
3043 	ret = nvme_user_cmd(ctrl, ns, argp);
3044 	nvme_put_ns(ns);
3045 	return ret;
3046 
3047 out_unlock:
3048 	up_read(&ctrl->namespaces_rwsem);
3049 	return ret;
3050 }
3051 
3052 static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
3053 		unsigned long arg)
3054 {
3055 	struct nvme_ctrl *ctrl = file->private_data;
3056 	void __user *argp = (void __user *)arg;
3057 
3058 	switch (cmd) {
3059 	case NVME_IOCTL_ADMIN_CMD:
3060 		return nvme_user_cmd(ctrl, NULL, argp);
3061 	case NVME_IOCTL_ADMIN64_CMD:
3062 		return nvme_user_cmd64(ctrl, NULL, argp);
3063 	case NVME_IOCTL_IO_CMD:
3064 		return nvme_dev_user_cmd(ctrl, argp);
3065 	case NVME_IOCTL_RESET:
3066 		dev_warn(ctrl->device, "resetting controller\n");
3067 		return nvme_reset_ctrl_sync(ctrl);
3068 	case NVME_IOCTL_SUBSYS_RESET:
3069 		return nvme_reset_subsystem(ctrl);
3070 	case NVME_IOCTL_RESCAN:
3071 		nvme_queue_scan(ctrl);
3072 		return 0;
3073 	default:
3074 		return -ENOTTY;
3075 	}
3076 }
3077 
3078 static const struct file_operations nvme_dev_fops = {
3079 	.owner		= THIS_MODULE,
3080 	.open		= nvme_dev_open,
3081 	.unlocked_ioctl	= nvme_dev_ioctl,
3082 	.compat_ioctl	= compat_ptr_ioctl,
3083 };
3084 
3085 static ssize_t nvme_sysfs_reset(struct device *dev,
3086 				struct device_attribute *attr, const char *buf,
3087 				size_t count)
3088 {
3089 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3090 	int ret;
3091 
3092 	ret = nvme_reset_ctrl_sync(ctrl);
3093 	if (ret < 0)
3094 		return ret;
3095 	return count;
3096 }
3097 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
3098 
3099 static ssize_t nvme_sysfs_rescan(struct device *dev,
3100 				struct device_attribute *attr, const char *buf,
3101 				size_t count)
3102 {
3103 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3104 
3105 	nvme_queue_scan(ctrl);
3106 	return count;
3107 }
3108 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
3109 
3110 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
3111 {
3112 	struct gendisk *disk = dev_to_disk(dev);
3113 
3114 	if (disk->fops == &nvme_fops)
3115 		return nvme_get_ns_from_dev(dev)->head;
3116 	else
3117 		return disk->private_data;
3118 }
3119 
3120 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
3121 		char *buf)
3122 {
3123 	struct nvme_ns_head *head = dev_to_ns_head(dev);
3124 	struct nvme_ns_ids *ids = &head->ids;
3125 	struct nvme_subsystem *subsys = head->subsys;
3126 	int serial_len = sizeof(subsys->serial);
3127 	int model_len = sizeof(subsys->model);
3128 
3129 	if (!uuid_is_null(&ids->uuid))
3130 		return sprintf(buf, "uuid.%pU\n", &ids->uuid);
3131 
3132 	if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3133 		return sprintf(buf, "eui.%16phN\n", ids->nguid);
3134 
3135 	if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3136 		return sprintf(buf, "eui.%8phN\n", ids->eui64);
3137 
3138 	while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
3139 				  subsys->serial[serial_len - 1] == '\0'))
3140 		serial_len--;
3141 	while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
3142 				 subsys->model[model_len - 1] == '\0'))
3143 		model_len--;
3144 
3145 	return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
3146 		serial_len, subsys->serial, model_len, subsys->model,
3147 		head->ns_id);
3148 }
3149 static DEVICE_ATTR_RO(wwid);
3150 
3151 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
3152 		char *buf)
3153 {
3154 	return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
3155 }
3156 static DEVICE_ATTR_RO(nguid);
3157 
3158 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
3159 		char *buf)
3160 {
3161 	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3162 
3163 	/* For backward compatibility expose the NGUID to userspace if
3164 	 * we have no UUID set
3165 	 */
3166 	if (uuid_is_null(&ids->uuid)) {
3167 		printk_ratelimited(KERN_WARNING
3168 				   "No UUID available providing old NGUID\n");
3169 		return sprintf(buf, "%pU\n", ids->nguid);
3170 	}
3171 	return sprintf(buf, "%pU\n", &ids->uuid);
3172 }
3173 static DEVICE_ATTR_RO(uuid);
3174 
3175 static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
3176 		char *buf)
3177 {
3178 	return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
3179 }
3180 static DEVICE_ATTR_RO(eui);
3181 
3182 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
3183 		char *buf)
3184 {
3185 	return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
3186 }
3187 static DEVICE_ATTR_RO(nsid);
3188 
3189 static struct attribute *nvme_ns_id_attrs[] = {
3190 	&dev_attr_wwid.attr,
3191 	&dev_attr_uuid.attr,
3192 	&dev_attr_nguid.attr,
3193 	&dev_attr_eui.attr,
3194 	&dev_attr_nsid.attr,
3195 #ifdef CONFIG_NVME_MULTIPATH
3196 	&dev_attr_ana_grpid.attr,
3197 	&dev_attr_ana_state.attr,
3198 #endif
3199 	NULL,
3200 };
3201 
3202 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
3203 		struct attribute *a, int n)
3204 {
3205 	struct device *dev = container_of(kobj, struct device, kobj);
3206 	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3207 
3208 	if (a == &dev_attr_uuid.attr) {
3209 		if (uuid_is_null(&ids->uuid) &&
3210 		    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3211 			return 0;
3212 	}
3213 	if (a == &dev_attr_nguid.attr) {
3214 		if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3215 			return 0;
3216 	}
3217 	if (a == &dev_attr_eui.attr) {
3218 		if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3219 			return 0;
3220 	}
3221 #ifdef CONFIG_NVME_MULTIPATH
3222 	if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
3223 		if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */
3224 			return 0;
3225 		if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
3226 			return 0;
3227 	}
3228 #endif
3229 	return a->mode;
3230 }
3231 
3232 static const struct attribute_group nvme_ns_id_attr_group = {
3233 	.attrs		= nvme_ns_id_attrs,
3234 	.is_visible	= nvme_ns_id_attrs_are_visible,
3235 };
3236 
3237 const struct attribute_group *nvme_ns_id_attr_groups[] = {
3238 	&nvme_ns_id_attr_group,
3239 #ifdef CONFIG_NVM
3240 	&nvme_nvm_attr_group,
3241 #endif
3242 	NULL,
3243 };
3244 
3245 #define nvme_show_str_function(field)						\
3246 static ssize_t  field##_show(struct device *dev,				\
3247 			    struct device_attribute *attr, char *buf)		\
3248 {										\
3249         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
3250         return sprintf(buf, "%.*s\n",						\
3251 		(int)sizeof(ctrl->subsys->field), ctrl->subsys->field);		\
3252 }										\
3253 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3254 
3255 nvme_show_str_function(model);
3256 nvme_show_str_function(serial);
3257 nvme_show_str_function(firmware_rev);
3258 
3259 #define nvme_show_int_function(field)						\
3260 static ssize_t  field##_show(struct device *dev,				\
3261 			    struct device_attribute *attr, char *buf)		\
3262 {										\
3263         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
3264         return sprintf(buf, "%d\n", ctrl->field);	\
3265 }										\
3266 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3267 
3268 nvme_show_int_function(cntlid);
3269 nvme_show_int_function(numa_node);
3270 nvme_show_int_function(queue_count);
3271 nvme_show_int_function(sqsize);
3272 
3273 static ssize_t nvme_sysfs_delete(struct device *dev,
3274 				struct device_attribute *attr, const char *buf,
3275 				size_t count)
3276 {
3277 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3278 
3279 	/* Can't delete non-created controllers */
3280 	if (!ctrl->created)
3281 		return -EBUSY;
3282 
3283 	if (device_remove_file_self(dev, attr))
3284 		nvme_delete_ctrl_sync(ctrl);
3285 	return count;
3286 }
3287 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
3288 
3289 static ssize_t nvme_sysfs_show_transport(struct device *dev,
3290 					 struct device_attribute *attr,
3291 					 char *buf)
3292 {
3293 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3294 
3295 	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
3296 }
3297 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
3298 
3299 static ssize_t nvme_sysfs_show_state(struct device *dev,
3300 				     struct device_attribute *attr,
3301 				     char *buf)
3302 {
3303 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3304 	static const char *const state_name[] = {
3305 		[NVME_CTRL_NEW]		= "new",
3306 		[NVME_CTRL_LIVE]	= "live",
3307 		[NVME_CTRL_RESETTING]	= "resetting",
3308 		[NVME_CTRL_CONNECTING]	= "connecting",
3309 		[NVME_CTRL_DELETING]	= "deleting",
3310 		[NVME_CTRL_DEAD]	= "dead",
3311 	};
3312 
3313 	if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
3314 	    state_name[ctrl->state])
3315 		return sprintf(buf, "%s\n", state_name[ctrl->state]);
3316 
3317 	return sprintf(buf, "unknown state\n");
3318 }
3319 
3320 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
3321 
3322 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
3323 					 struct device_attribute *attr,
3324 					 char *buf)
3325 {
3326 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3327 
3328 	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
3329 }
3330 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
3331 
3332 static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
3333 					struct device_attribute *attr,
3334 					char *buf)
3335 {
3336 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3337 
3338 	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn);
3339 }
3340 static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
3341 
3342 static ssize_t nvme_sysfs_show_hostid(struct device *dev,
3343 					struct device_attribute *attr,
3344 					char *buf)
3345 {
3346 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3347 
3348 	return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id);
3349 }
3350 static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
3351 
3352 static ssize_t nvme_sysfs_show_address(struct device *dev,
3353 					 struct device_attribute *attr,
3354 					 char *buf)
3355 {
3356 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3357 
3358 	return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
3359 }
3360 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
3361 
3362 static struct attribute *nvme_dev_attrs[] = {
3363 	&dev_attr_reset_controller.attr,
3364 	&dev_attr_rescan_controller.attr,
3365 	&dev_attr_model.attr,
3366 	&dev_attr_serial.attr,
3367 	&dev_attr_firmware_rev.attr,
3368 	&dev_attr_cntlid.attr,
3369 	&dev_attr_delete_controller.attr,
3370 	&dev_attr_transport.attr,
3371 	&dev_attr_subsysnqn.attr,
3372 	&dev_attr_address.attr,
3373 	&dev_attr_state.attr,
3374 	&dev_attr_numa_node.attr,
3375 	&dev_attr_queue_count.attr,
3376 	&dev_attr_sqsize.attr,
3377 	&dev_attr_hostnqn.attr,
3378 	&dev_attr_hostid.attr,
3379 	NULL
3380 };
3381 
3382 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
3383 		struct attribute *a, int n)
3384 {
3385 	struct device *dev = container_of(kobj, struct device, kobj);
3386 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3387 
3388 	if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
3389 		return 0;
3390 	if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
3391 		return 0;
3392 	if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
3393 		return 0;
3394 	if (a == &dev_attr_hostid.attr && !ctrl->opts)
3395 		return 0;
3396 
3397 	return a->mode;
3398 }
3399 
3400 static struct attribute_group nvme_dev_attrs_group = {
3401 	.attrs		= nvme_dev_attrs,
3402 	.is_visible	= nvme_dev_attrs_are_visible,
3403 };
3404 
3405 static const struct attribute_group *nvme_dev_attr_groups[] = {
3406 	&nvme_dev_attrs_group,
3407 	NULL,
3408 };
3409 
3410 static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
3411 		unsigned nsid)
3412 {
3413 	struct nvme_ns_head *h;
3414 
3415 	lockdep_assert_held(&subsys->lock);
3416 
3417 	list_for_each_entry(h, &subsys->nsheads, entry) {
3418 		if (h->ns_id == nsid && kref_get_unless_zero(&h->ref))
3419 			return h;
3420 	}
3421 
3422 	return NULL;
3423 }
3424 
3425 static int __nvme_check_ids(struct nvme_subsystem *subsys,
3426 		struct nvme_ns_head *new)
3427 {
3428 	struct nvme_ns_head *h;
3429 
3430 	lockdep_assert_held(&subsys->lock);
3431 
3432 	list_for_each_entry(h, &subsys->nsheads, entry) {
3433 		if (nvme_ns_ids_valid(&new->ids) &&
3434 		    nvme_ns_ids_equal(&new->ids, &h->ids))
3435 			return -EINVAL;
3436 	}
3437 
3438 	return 0;
3439 }
3440 
3441 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
3442 		unsigned nsid, struct nvme_ns_ids *ids)
3443 {
3444 	struct nvme_ns_head *head;
3445 	size_t size = sizeof(*head);
3446 	int ret = -ENOMEM;
3447 
3448 #ifdef CONFIG_NVME_MULTIPATH
3449 	size += num_possible_nodes() * sizeof(struct nvme_ns *);
3450 #endif
3451 
3452 	head = kzalloc(size, GFP_KERNEL);
3453 	if (!head)
3454 		goto out;
3455 	ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
3456 	if (ret < 0)
3457 		goto out_free_head;
3458 	head->instance = ret;
3459 	INIT_LIST_HEAD(&head->list);
3460 	ret = init_srcu_struct(&head->srcu);
3461 	if (ret)
3462 		goto out_ida_remove;
3463 	head->subsys = ctrl->subsys;
3464 	head->ns_id = nsid;
3465 	head->ids = *ids;
3466 	kref_init(&head->ref);
3467 
3468 	ret = __nvme_check_ids(ctrl->subsys, head);
3469 	if (ret) {
3470 		dev_err(ctrl->device,
3471 			"duplicate IDs for nsid %d\n", nsid);
3472 		goto out_cleanup_srcu;
3473 	}
3474 
3475 	ret = nvme_mpath_alloc_disk(ctrl, head);
3476 	if (ret)
3477 		goto out_cleanup_srcu;
3478 
3479 	list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3480 
3481 	kref_get(&ctrl->subsys->ref);
3482 
3483 	return head;
3484 out_cleanup_srcu:
3485 	cleanup_srcu_struct(&head->srcu);
3486 out_ida_remove:
3487 	ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
3488 out_free_head:
3489 	kfree(head);
3490 out:
3491 	if (ret > 0)
3492 		ret = blk_status_to_errno(nvme_error_status(ret));
3493 	return ERR_PTR(ret);
3494 }
3495 
3496 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
3497 		struct nvme_id_ns *id)
3498 {
3499 	struct nvme_ctrl *ctrl = ns->ctrl;
3500 	bool is_shared = id->nmic & NVME_NS_NMIC_SHARED;
3501 	struct nvme_ns_head *head = NULL;
3502 	struct nvme_ns_ids ids;
3503 	int ret = 0;
3504 
3505 	ret = nvme_report_ns_ids(ctrl, nsid, id, &ids);
3506 	if (ret) {
3507 		if (ret < 0)
3508 			return ret;
3509 		return blk_status_to_errno(nvme_error_status(ret));
3510 	}
3511 
3512 	mutex_lock(&ctrl->subsys->lock);
3513 	head = nvme_find_ns_head(ctrl->subsys, nsid);
3514 	if (!head) {
3515 		head = nvme_alloc_ns_head(ctrl, nsid, &ids);
3516 		if (IS_ERR(head)) {
3517 			ret = PTR_ERR(head);
3518 			goto out_unlock;
3519 		}
3520 		head->shared = is_shared;
3521 	} else {
3522 		ret = -EINVAL;
3523 		if (!is_shared || !head->shared) {
3524 			dev_err(ctrl->device,
3525 				"Duplicate unshared namespace %d\n", nsid);
3526 			goto out_put_ns_head;
3527 		}
3528 		if (!nvme_ns_ids_equal(&head->ids, &ids)) {
3529 			dev_err(ctrl->device,
3530 				"IDs don't match for shared namespace %d\n",
3531 					nsid);
3532 			goto out_put_ns_head;
3533 		}
3534 	}
3535 
3536 	list_add_tail(&ns->siblings, &head->list);
3537 	ns->head = head;
3538 	mutex_unlock(&ctrl->subsys->lock);
3539 	return 0;
3540 
3541 out_put_ns_head:
3542 	nvme_put_ns_head(head);
3543 out_unlock:
3544 	mutex_unlock(&ctrl->subsys->lock);
3545 	return ret;
3546 }
3547 
3548 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
3549 {
3550 	struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
3551 	struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
3552 
3553 	return nsa->head->ns_id - nsb->head->ns_id;
3554 }
3555 
3556 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3557 {
3558 	struct nvme_ns *ns, *ret = NULL;
3559 
3560 	down_read(&ctrl->namespaces_rwsem);
3561 	list_for_each_entry(ns, &ctrl->namespaces, list) {
3562 		if (ns->head->ns_id == nsid) {
3563 			if (!kref_get_unless_zero(&ns->kref))
3564 				continue;
3565 			ret = ns;
3566 			break;
3567 		}
3568 		if (ns->head->ns_id > nsid)
3569 			break;
3570 	}
3571 	up_read(&ctrl->namespaces_rwsem);
3572 	return ret;
3573 }
3574 
3575 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3576 {
3577 	struct nvme_ns *ns;
3578 	struct gendisk *disk;
3579 	struct nvme_id_ns *id;
3580 	char disk_name[DISK_NAME_LEN];
3581 	int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret;
3582 
3583 	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
3584 	if (!ns)
3585 		return;
3586 
3587 	ns->queue = blk_mq_init_queue(ctrl->tagset);
3588 	if (IS_ERR(ns->queue))
3589 		goto out_free_ns;
3590 
3591 	if (ctrl->opts && ctrl->opts->data_digest)
3592 		ns->queue->backing_dev_info->capabilities
3593 			|= BDI_CAP_STABLE_WRITES;
3594 
3595 	blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3596 	if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
3597 		blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
3598 
3599 	ns->queue->queuedata = ns;
3600 	ns->ctrl = ctrl;
3601 
3602 	kref_init(&ns->kref);
3603 	ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
3604 
3605 	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
3606 	nvme_set_queue_limits(ctrl, ns->queue);
3607 
3608 	ret = nvme_identify_ns(ctrl, nsid, &id);
3609 	if (ret)
3610 		goto out_free_queue;
3611 
3612 	if (id->ncap == 0)	/* no namespace (legacy quirk) */
3613 		goto out_free_id;
3614 
3615 	ret = nvme_init_ns_head(ns, nsid, id);
3616 	if (ret)
3617 		goto out_free_id;
3618 	nvme_set_disk_name(disk_name, ns, ctrl, &flags);
3619 
3620 	disk = alloc_disk_node(0, node);
3621 	if (!disk)
3622 		goto out_unlink_ns;
3623 
3624 	disk->fops = &nvme_fops;
3625 	disk->private_data = ns;
3626 	disk->queue = ns->queue;
3627 	disk->flags = flags;
3628 	memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
3629 	ns->disk = disk;
3630 
3631 	__nvme_revalidate_disk(disk, id);
3632 
3633 	if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
3634 		ret = nvme_nvm_register(ns, disk_name, node);
3635 		if (ret) {
3636 			dev_warn(ctrl->device, "LightNVM init failure\n");
3637 			goto out_put_disk;
3638 		}
3639 	}
3640 
3641 	down_write(&ctrl->namespaces_rwsem);
3642 	list_add_tail(&ns->list, &ctrl->namespaces);
3643 	up_write(&ctrl->namespaces_rwsem);
3644 
3645 	nvme_get_ctrl(ctrl);
3646 
3647 	device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
3648 
3649 	nvme_mpath_add_disk(ns, id);
3650 	nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
3651 	kfree(id);
3652 
3653 	return;
3654  out_put_disk:
3655 	/* prevent double queue cleanup */
3656 	ns->disk->queue = NULL;
3657 	put_disk(ns->disk);
3658  out_unlink_ns:
3659 	mutex_lock(&ctrl->subsys->lock);
3660 	list_del_rcu(&ns->siblings);
3661 	if (list_empty(&ns->head->list))
3662 		list_del_init(&ns->head->entry);
3663 	mutex_unlock(&ctrl->subsys->lock);
3664 	nvme_put_ns_head(ns->head);
3665  out_free_id:
3666 	kfree(id);
3667  out_free_queue:
3668 	blk_cleanup_queue(ns->queue);
3669  out_free_ns:
3670 	kfree(ns);
3671 }
3672 
3673 static void nvme_ns_remove(struct nvme_ns *ns)
3674 {
3675 	if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
3676 		return;
3677 
3678 	nvme_fault_inject_fini(&ns->fault_inject);
3679 
3680 	mutex_lock(&ns->ctrl->subsys->lock);
3681 	list_del_rcu(&ns->siblings);
3682 	if (list_empty(&ns->head->list))
3683 		list_del_init(&ns->head->entry);
3684 	mutex_unlock(&ns->ctrl->subsys->lock);
3685 
3686 	synchronize_rcu(); /* guarantee not available in head->list */
3687 	nvme_mpath_clear_current_path(ns);
3688 	synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
3689 
3690 	if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
3691 		del_gendisk(ns->disk);
3692 		blk_cleanup_queue(ns->queue);
3693 		if (blk_get_integrity(ns->disk))
3694 			blk_integrity_unregister(ns->disk);
3695 	}
3696 
3697 	down_write(&ns->ctrl->namespaces_rwsem);
3698 	list_del_init(&ns->list);
3699 	up_write(&ns->ctrl->namespaces_rwsem);
3700 
3701 	nvme_mpath_check_last_path(ns);
3702 	nvme_put_ns(ns);
3703 }
3704 
3705 static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
3706 {
3707 	struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
3708 
3709 	if (ns) {
3710 		nvme_ns_remove(ns);
3711 		nvme_put_ns(ns);
3712 	}
3713 }
3714 
3715 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3716 {
3717 	struct nvme_ns *ns;
3718 
3719 	ns = nvme_find_get_ns(ctrl, nsid);
3720 	if (ns) {
3721 		if (ns->disk && revalidate_disk(ns->disk))
3722 			nvme_ns_remove(ns);
3723 		nvme_put_ns(ns);
3724 	} else
3725 		nvme_alloc_ns(ctrl, nsid);
3726 }
3727 
3728 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
3729 					unsigned nsid)
3730 {
3731 	struct nvme_ns *ns, *next;
3732 	LIST_HEAD(rm_list);
3733 
3734 	down_write(&ctrl->namespaces_rwsem);
3735 	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
3736 		if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
3737 			list_move_tail(&ns->list, &rm_list);
3738 	}
3739 	up_write(&ctrl->namespaces_rwsem);
3740 
3741 	list_for_each_entry_safe(ns, next, &rm_list, list)
3742 		nvme_ns_remove(ns);
3743 
3744 }
3745 
3746 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
3747 {
3748 	const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
3749 	__le32 *ns_list;
3750 	u32 prev = 0;
3751 	int ret = 0, i;
3752 
3753 	if (nvme_ctrl_limited_cns(ctrl))
3754 		return -EOPNOTSUPP;
3755 
3756 	ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
3757 	if (!ns_list)
3758 		return -ENOMEM;
3759 
3760 	for (;;) {
3761 		ret = nvme_identify_ns_list(ctrl, prev, ns_list);
3762 		if (ret)
3763 			goto free;
3764 
3765 		for (i = 0; i < nr_entries; i++) {
3766 			u32 nsid = le32_to_cpu(ns_list[i]);
3767 
3768 			if (!nsid)	/* end of the list? */
3769 				goto out;
3770 			nvme_validate_ns(ctrl, nsid);
3771 			while (++prev < nsid)
3772 				nvme_ns_remove_by_nsid(ctrl, prev);
3773 		}
3774 	}
3775  out:
3776 	nvme_remove_invalid_namespaces(ctrl, prev);
3777  free:
3778 	kfree(ns_list);
3779 	return ret;
3780 }
3781 
3782 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
3783 {
3784 	struct nvme_id_ctrl *id;
3785 	u32 nn, i;
3786 
3787 	if (nvme_identify_ctrl(ctrl, &id))
3788 		return;
3789 	nn = le32_to_cpu(id->nn);
3790 	kfree(id);
3791 
3792 	for (i = 1; i <= nn; i++)
3793 		nvme_validate_ns(ctrl, i);
3794 
3795 	nvme_remove_invalid_namespaces(ctrl, nn);
3796 }
3797 
3798 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
3799 {
3800 	size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
3801 	__le32 *log;
3802 	int error;
3803 
3804 	log = kzalloc(log_size, GFP_KERNEL);
3805 	if (!log)
3806 		return;
3807 
3808 	/*
3809 	 * We need to read the log to clear the AEN, but we don't want to rely
3810 	 * on it for the changed namespace information as userspace could have
3811 	 * raced with us in reading the log page, which could cause us to miss
3812 	 * updates.
3813 	 */
3814 	error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log,
3815 			log_size, 0);
3816 	if (error)
3817 		dev_warn(ctrl->device,
3818 			"reading changed ns log failed: %d\n", error);
3819 
3820 	kfree(log);
3821 }
3822 
3823 static void nvme_scan_work(struct work_struct *work)
3824 {
3825 	struct nvme_ctrl *ctrl =
3826 		container_of(work, struct nvme_ctrl, scan_work);
3827 
3828 	/* No tagset on a live ctrl means IO queues could not created */
3829 	if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
3830 		return;
3831 
3832 	if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
3833 		dev_info(ctrl->device, "rescanning namespaces.\n");
3834 		nvme_clear_changed_ns_log(ctrl);
3835 	}
3836 
3837 	mutex_lock(&ctrl->scan_lock);
3838 	if (nvme_scan_ns_list(ctrl) != 0)
3839 		nvme_scan_ns_sequential(ctrl);
3840 	mutex_unlock(&ctrl->scan_lock);
3841 
3842 	down_write(&ctrl->namespaces_rwsem);
3843 	list_sort(NULL, &ctrl->namespaces, ns_cmp);
3844 	up_write(&ctrl->namespaces_rwsem);
3845 }
3846 
3847 /*
3848  * This function iterates the namespace list unlocked to allow recovery from
3849  * controller failure. It is up to the caller to ensure the namespace list is
3850  * not modified by scan work while this function is executing.
3851  */
3852 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
3853 {
3854 	struct nvme_ns *ns, *next;
3855 	LIST_HEAD(ns_list);
3856 
3857 	/*
3858 	 * make sure to requeue I/O to all namespaces as these
3859 	 * might result from the scan itself and must complete
3860 	 * for the scan_work to make progress
3861 	 */
3862 	nvme_mpath_clear_ctrl_paths(ctrl);
3863 
3864 	/* prevent racing with ns scanning */
3865 	flush_work(&ctrl->scan_work);
3866 
3867 	/*
3868 	 * The dead states indicates the controller was not gracefully
3869 	 * disconnected. In that case, we won't be able to flush any data while
3870 	 * removing the namespaces' disks; fail all the queues now to avoid
3871 	 * potentially having to clean up the failed sync later.
3872 	 */
3873 	if (ctrl->state == NVME_CTRL_DEAD)
3874 		nvme_kill_queues(ctrl);
3875 
3876 	down_write(&ctrl->namespaces_rwsem);
3877 	list_splice_init(&ctrl->namespaces, &ns_list);
3878 	up_write(&ctrl->namespaces_rwsem);
3879 
3880 	list_for_each_entry_safe(ns, next, &ns_list, list)
3881 		nvme_ns_remove(ns);
3882 }
3883 EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
3884 
3885 static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
3886 {
3887 	struct nvme_ctrl *ctrl =
3888 		container_of(dev, struct nvme_ctrl, ctrl_device);
3889 	struct nvmf_ctrl_options *opts = ctrl->opts;
3890 	int ret;
3891 
3892 	ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
3893 	if (ret)
3894 		return ret;
3895 
3896 	if (opts) {
3897 		ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
3898 		if (ret)
3899 			return ret;
3900 
3901 		ret = add_uevent_var(env, "NVME_TRSVCID=%s",
3902 				opts->trsvcid ?: "none");
3903 		if (ret)
3904 			return ret;
3905 
3906 		ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
3907 				opts->host_traddr ?: "none");
3908 	}
3909 	return ret;
3910 }
3911 
3912 static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
3913 {
3914 	char *envp[2] = { NULL, NULL };
3915 	u32 aen_result = ctrl->aen_result;
3916 
3917 	ctrl->aen_result = 0;
3918 	if (!aen_result)
3919 		return;
3920 
3921 	envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
3922 	if (!envp[0])
3923 		return;
3924 	kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
3925 	kfree(envp[0]);
3926 }
3927 
3928 static void nvme_async_event_work(struct work_struct *work)
3929 {
3930 	struct nvme_ctrl *ctrl =
3931 		container_of(work, struct nvme_ctrl, async_event_work);
3932 
3933 	nvme_aen_uevent(ctrl);
3934 	ctrl->ops->submit_async_event(ctrl);
3935 }
3936 
3937 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
3938 {
3939 
3940 	u32 csts;
3941 
3942 	if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
3943 		return false;
3944 
3945 	if (csts == ~0)
3946 		return false;
3947 
3948 	return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
3949 }
3950 
3951 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
3952 {
3953 	struct nvme_fw_slot_info_log *log;
3954 
3955 	log = kmalloc(sizeof(*log), GFP_KERNEL);
3956 	if (!log)
3957 		return;
3958 
3959 	if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log,
3960 			sizeof(*log), 0))
3961 		dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
3962 	kfree(log);
3963 }
3964 
3965 static void nvme_fw_act_work(struct work_struct *work)
3966 {
3967 	struct nvme_ctrl *ctrl = container_of(work,
3968 				struct nvme_ctrl, fw_act_work);
3969 	unsigned long fw_act_timeout;
3970 
3971 	if (ctrl->mtfa)
3972 		fw_act_timeout = jiffies +
3973 				msecs_to_jiffies(ctrl->mtfa * 100);
3974 	else
3975 		fw_act_timeout = jiffies +
3976 				msecs_to_jiffies(admin_timeout * 1000);
3977 
3978 	nvme_stop_queues(ctrl);
3979 	while (nvme_ctrl_pp_status(ctrl)) {
3980 		if (time_after(jiffies, fw_act_timeout)) {
3981 			dev_warn(ctrl->device,
3982 				"Fw activation timeout, reset controller\n");
3983 			nvme_try_sched_reset(ctrl);
3984 			return;
3985 		}
3986 		msleep(100);
3987 	}
3988 
3989 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
3990 		return;
3991 
3992 	nvme_start_queues(ctrl);
3993 	/* read FW slot information to clear the AER */
3994 	nvme_get_fw_slot_info(ctrl);
3995 }
3996 
3997 static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
3998 {
3999 	u32 aer_notice_type = (result & 0xff00) >> 8;
4000 
4001 	trace_nvme_async_event(ctrl, aer_notice_type);
4002 
4003 	switch (aer_notice_type) {
4004 	case NVME_AER_NOTICE_NS_CHANGED:
4005 		set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
4006 		nvme_queue_scan(ctrl);
4007 		break;
4008 	case NVME_AER_NOTICE_FW_ACT_STARTING:
4009 		/*
4010 		 * We are (ab)using the RESETTING state to prevent subsequent
4011 		 * recovery actions from interfering with the controller's
4012 		 * firmware activation.
4013 		 */
4014 		if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
4015 			queue_work(nvme_wq, &ctrl->fw_act_work);
4016 		break;
4017 #ifdef CONFIG_NVME_MULTIPATH
4018 	case NVME_AER_NOTICE_ANA:
4019 		if (!ctrl->ana_log_buf)
4020 			break;
4021 		queue_work(nvme_wq, &ctrl->ana_work);
4022 		break;
4023 #endif
4024 	case NVME_AER_NOTICE_DISC_CHANGED:
4025 		ctrl->aen_result = result;
4026 		break;
4027 	default:
4028 		dev_warn(ctrl->device, "async event result %08x\n", result);
4029 	}
4030 }
4031 
4032 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
4033 		volatile union nvme_result *res)
4034 {
4035 	u32 result = le32_to_cpu(res->u32);
4036 	u32 aer_type = result & 0x07;
4037 
4038 	if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
4039 		return;
4040 
4041 	switch (aer_type) {
4042 	case NVME_AER_NOTICE:
4043 		nvme_handle_aen_notice(ctrl, result);
4044 		break;
4045 	case NVME_AER_ERROR:
4046 	case NVME_AER_SMART:
4047 	case NVME_AER_CSS:
4048 	case NVME_AER_VS:
4049 		trace_nvme_async_event(ctrl, aer_type);
4050 		ctrl->aen_result = result;
4051 		break;
4052 	default:
4053 		break;
4054 	}
4055 	queue_work(nvme_wq, &ctrl->async_event_work);
4056 }
4057 EXPORT_SYMBOL_GPL(nvme_complete_async_event);
4058 
4059 void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
4060 {
4061 	nvme_mpath_stop(ctrl);
4062 	nvme_stop_keep_alive(ctrl);
4063 	flush_work(&ctrl->async_event_work);
4064 	cancel_work_sync(&ctrl->fw_act_work);
4065 }
4066 EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
4067 
4068 void nvme_start_ctrl(struct nvme_ctrl *ctrl)
4069 {
4070 	if (ctrl->kato)
4071 		nvme_start_keep_alive(ctrl);
4072 
4073 	nvme_enable_aen(ctrl);
4074 
4075 	if (ctrl->queue_count > 1) {
4076 		nvme_queue_scan(ctrl);
4077 		nvme_start_queues(ctrl);
4078 	}
4079 	ctrl->created = true;
4080 }
4081 EXPORT_SYMBOL_GPL(nvme_start_ctrl);
4082 
4083 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
4084 {
4085 	nvme_fault_inject_fini(&ctrl->fault_inject);
4086 	dev_pm_qos_hide_latency_tolerance(ctrl->device);
4087 	cdev_device_del(&ctrl->cdev, ctrl->device);
4088 	nvme_put_ctrl(ctrl);
4089 }
4090 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
4091 
4092 static void nvme_free_ctrl(struct device *dev)
4093 {
4094 	struct nvme_ctrl *ctrl =
4095 		container_of(dev, struct nvme_ctrl, ctrl_device);
4096 	struct nvme_subsystem *subsys = ctrl->subsys;
4097 
4098 	if (subsys && ctrl->instance != subsys->instance)
4099 		ida_simple_remove(&nvme_instance_ida, ctrl->instance);
4100 
4101 	kfree(ctrl->effects);
4102 	nvme_mpath_uninit(ctrl);
4103 	__free_page(ctrl->discard_page);
4104 
4105 	if (subsys) {
4106 		mutex_lock(&nvme_subsystems_lock);
4107 		list_del(&ctrl->subsys_entry);
4108 		sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
4109 		mutex_unlock(&nvme_subsystems_lock);
4110 	}
4111 
4112 	ctrl->ops->free_ctrl(ctrl);
4113 
4114 	if (subsys)
4115 		nvme_put_subsystem(subsys);
4116 }
4117 
4118 /*
4119  * Initialize a NVMe controller structures.  This needs to be called during
4120  * earliest initialization so that we have the initialized structured around
4121  * during probing.
4122  */
4123 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
4124 		const struct nvme_ctrl_ops *ops, unsigned long quirks)
4125 {
4126 	int ret;
4127 
4128 	ctrl->state = NVME_CTRL_NEW;
4129 	spin_lock_init(&ctrl->lock);
4130 	mutex_init(&ctrl->scan_lock);
4131 	INIT_LIST_HEAD(&ctrl->namespaces);
4132 	init_rwsem(&ctrl->namespaces_rwsem);
4133 	ctrl->dev = dev;
4134 	ctrl->ops = ops;
4135 	ctrl->quirks = quirks;
4136 	INIT_WORK(&ctrl->scan_work, nvme_scan_work);
4137 	INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
4138 	INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
4139 	INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
4140 	init_waitqueue_head(&ctrl->state_wq);
4141 
4142 	INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
4143 	memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
4144 	ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
4145 
4146 	BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
4147 			PAGE_SIZE);
4148 	ctrl->discard_page = alloc_page(GFP_KERNEL);
4149 	if (!ctrl->discard_page) {
4150 		ret = -ENOMEM;
4151 		goto out;
4152 	}
4153 
4154 	ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
4155 	if (ret < 0)
4156 		goto out;
4157 	ctrl->instance = ret;
4158 
4159 	device_initialize(&ctrl->ctrl_device);
4160 	ctrl->device = &ctrl->ctrl_device;
4161 	ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance);
4162 	ctrl->device->class = nvme_class;
4163 	ctrl->device->parent = ctrl->dev;
4164 	ctrl->device->groups = nvme_dev_attr_groups;
4165 	ctrl->device->release = nvme_free_ctrl;
4166 	dev_set_drvdata(ctrl->device, ctrl);
4167 	ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
4168 	if (ret)
4169 		goto out_release_instance;
4170 
4171 	nvme_get_ctrl(ctrl);
4172 	cdev_init(&ctrl->cdev, &nvme_dev_fops);
4173 	ctrl->cdev.owner = ops->module;
4174 	ret = cdev_device_add(&ctrl->cdev, ctrl->device);
4175 	if (ret)
4176 		goto out_free_name;
4177 
4178 	/*
4179 	 * Initialize latency tolerance controls.  The sysfs files won't
4180 	 * be visible to userspace unless the device actually supports APST.
4181 	 */
4182 	ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
4183 	dev_pm_qos_update_user_latency_tolerance(ctrl->device,
4184 		min(default_ps_max_latency_us, (unsigned long)S32_MAX));
4185 
4186 	nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
4187 
4188 	return 0;
4189 out_free_name:
4190 	nvme_put_ctrl(ctrl);
4191 	kfree_const(ctrl->device->kobj.name);
4192 out_release_instance:
4193 	ida_simple_remove(&nvme_instance_ida, ctrl->instance);
4194 out:
4195 	if (ctrl->discard_page)
4196 		__free_page(ctrl->discard_page);
4197 	return ret;
4198 }
4199 EXPORT_SYMBOL_GPL(nvme_init_ctrl);
4200 
4201 /**
4202  * nvme_kill_queues(): Ends all namespace queues
4203  * @ctrl: the dead controller that needs to end
4204  *
4205  * Call this function when the driver determines it is unable to get the
4206  * controller in a state capable of servicing IO.
4207  */
4208 void nvme_kill_queues(struct nvme_ctrl *ctrl)
4209 {
4210 	struct nvme_ns *ns;
4211 
4212 	down_read(&ctrl->namespaces_rwsem);
4213 
4214 	/* Forcibly unquiesce queues to avoid blocking dispatch */
4215 	if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
4216 		blk_mq_unquiesce_queue(ctrl->admin_q);
4217 
4218 	list_for_each_entry(ns, &ctrl->namespaces, list)
4219 		nvme_set_queue_dying(ns);
4220 
4221 	up_read(&ctrl->namespaces_rwsem);
4222 }
4223 EXPORT_SYMBOL_GPL(nvme_kill_queues);
4224 
4225 void nvme_unfreeze(struct nvme_ctrl *ctrl)
4226 {
4227 	struct nvme_ns *ns;
4228 
4229 	down_read(&ctrl->namespaces_rwsem);
4230 	list_for_each_entry(ns, &ctrl->namespaces, list)
4231 		blk_mq_unfreeze_queue(ns->queue);
4232 	up_read(&ctrl->namespaces_rwsem);
4233 }
4234 EXPORT_SYMBOL_GPL(nvme_unfreeze);
4235 
4236 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
4237 {
4238 	struct nvme_ns *ns;
4239 
4240 	down_read(&ctrl->namespaces_rwsem);
4241 	list_for_each_entry(ns, &ctrl->namespaces, list) {
4242 		timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
4243 		if (timeout <= 0)
4244 			break;
4245 	}
4246 	up_read(&ctrl->namespaces_rwsem);
4247 }
4248 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
4249 
4250 void nvme_wait_freeze(struct nvme_ctrl *ctrl)
4251 {
4252 	struct nvme_ns *ns;
4253 
4254 	down_read(&ctrl->namespaces_rwsem);
4255 	list_for_each_entry(ns, &ctrl->namespaces, list)
4256 		blk_mq_freeze_queue_wait(ns->queue);
4257 	up_read(&ctrl->namespaces_rwsem);
4258 }
4259 EXPORT_SYMBOL_GPL(nvme_wait_freeze);
4260 
4261 void nvme_start_freeze(struct nvme_ctrl *ctrl)
4262 {
4263 	struct nvme_ns *ns;
4264 
4265 	down_read(&ctrl->namespaces_rwsem);
4266 	list_for_each_entry(ns, &ctrl->namespaces, list)
4267 		blk_freeze_queue_start(ns->queue);
4268 	up_read(&ctrl->namespaces_rwsem);
4269 }
4270 EXPORT_SYMBOL_GPL(nvme_start_freeze);
4271 
4272 void nvme_stop_queues(struct nvme_ctrl *ctrl)
4273 {
4274 	struct nvme_ns *ns;
4275 
4276 	down_read(&ctrl->namespaces_rwsem);
4277 	list_for_each_entry(ns, &ctrl->namespaces, list)
4278 		blk_mq_quiesce_queue(ns->queue);
4279 	up_read(&ctrl->namespaces_rwsem);
4280 }
4281 EXPORT_SYMBOL_GPL(nvme_stop_queues);
4282 
4283 void nvme_start_queues(struct nvme_ctrl *ctrl)
4284 {
4285 	struct nvme_ns *ns;
4286 
4287 	down_read(&ctrl->namespaces_rwsem);
4288 	list_for_each_entry(ns, &ctrl->namespaces, list)
4289 		blk_mq_unquiesce_queue(ns->queue);
4290 	up_read(&ctrl->namespaces_rwsem);
4291 }
4292 EXPORT_SYMBOL_GPL(nvme_start_queues);
4293 
4294 
4295 void nvme_sync_queues(struct nvme_ctrl *ctrl)
4296 {
4297 	struct nvme_ns *ns;
4298 
4299 	down_read(&ctrl->namespaces_rwsem);
4300 	list_for_each_entry(ns, &ctrl->namespaces, list)
4301 		blk_sync_queue(ns->queue);
4302 	up_read(&ctrl->namespaces_rwsem);
4303 
4304 	if (ctrl->admin_q)
4305 		blk_sync_queue(ctrl->admin_q);
4306 }
4307 EXPORT_SYMBOL_GPL(nvme_sync_queues);
4308 
4309 /*
4310  * Check we didn't inadvertently grow the command structure sizes:
4311  */
4312 static inline void _nvme_check_size(void)
4313 {
4314 	BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
4315 	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
4316 	BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
4317 	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
4318 	BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
4319 	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
4320 	BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
4321 	BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
4322 	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
4323 	BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
4324 	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
4325 	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
4326 	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
4327 	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
4328 	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
4329 	BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
4330 	BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
4331 }
4332 
4333 
4334 static int __init nvme_core_init(void)
4335 {
4336 	int result = -ENOMEM;
4337 
4338 	_nvme_check_size();
4339 
4340 	nvme_wq = alloc_workqueue("nvme-wq",
4341 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4342 	if (!nvme_wq)
4343 		goto out;
4344 
4345 	nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
4346 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4347 	if (!nvme_reset_wq)
4348 		goto destroy_wq;
4349 
4350 	nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
4351 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4352 	if (!nvme_delete_wq)
4353 		goto destroy_reset_wq;
4354 
4355 	result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
4356 	if (result < 0)
4357 		goto destroy_delete_wq;
4358 
4359 	nvme_class = class_create(THIS_MODULE, "nvme");
4360 	if (IS_ERR(nvme_class)) {
4361 		result = PTR_ERR(nvme_class);
4362 		goto unregister_chrdev;
4363 	}
4364 	nvme_class->dev_uevent = nvme_class_uevent;
4365 
4366 	nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
4367 	if (IS_ERR(nvme_subsys_class)) {
4368 		result = PTR_ERR(nvme_subsys_class);
4369 		goto destroy_class;
4370 	}
4371 	return 0;
4372 
4373 destroy_class:
4374 	class_destroy(nvme_class);
4375 unregister_chrdev:
4376 	unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
4377 destroy_delete_wq:
4378 	destroy_workqueue(nvme_delete_wq);
4379 destroy_reset_wq:
4380 	destroy_workqueue(nvme_reset_wq);
4381 destroy_wq:
4382 	destroy_workqueue(nvme_wq);
4383 out:
4384 	return result;
4385 }
4386 
4387 static void __exit nvme_core_exit(void)
4388 {
4389 	class_destroy(nvme_subsys_class);
4390 	class_destroy(nvme_class);
4391 	unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
4392 	destroy_workqueue(nvme_delete_wq);
4393 	destroy_workqueue(nvme_reset_wq);
4394 	destroy_workqueue(nvme_wq);
4395 	ida_destroy(&nvme_instance_ida);
4396 }
4397 
4398 MODULE_LICENSE("GPL");
4399 MODULE_VERSION("1.0");
4400 module_init(nvme_core_init);
4401 module_exit(nvme_core_exit);
4402