xref: /linux/drivers/nvme/host/core.c (revision faabed295cccc2aba2b67f2e7b309f2892d55004)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVM Express device driver
4  * Copyright (c) 2011-2014, Intel Corporation.
5  */
6 
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/compat.h>
10 #include <linux/delay.h>
11 #include <linux/errno.h>
12 #include <linux/hdreg.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/backing-dev.h>
16 #include <linux/list_sort.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/pr.h>
20 #include <linux/ptrace.h>
21 #include <linux/nvme_ioctl.h>
22 #include <linux/pm_qos.h>
23 #include <asm/unaligned.h>
24 
25 #include "nvme.h"
26 #include "fabrics.h"
27 
28 #define CREATE_TRACE_POINTS
29 #include "trace.h"
30 
31 #define NVME_MINORS		(1U << MINORBITS)
32 
33 unsigned int admin_timeout = 60;
34 module_param(admin_timeout, uint, 0644);
35 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
36 EXPORT_SYMBOL_GPL(admin_timeout);
37 
38 unsigned int nvme_io_timeout = 30;
39 module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
40 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
41 EXPORT_SYMBOL_GPL(nvme_io_timeout);
42 
43 static unsigned char shutdown_timeout = 5;
44 module_param(shutdown_timeout, byte, 0644);
45 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
46 
47 static u8 nvme_max_retries = 5;
48 module_param_named(max_retries, nvme_max_retries, byte, 0644);
49 MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
50 
51 static unsigned long default_ps_max_latency_us = 100000;
52 module_param(default_ps_max_latency_us, ulong, 0644);
53 MODULE_PARM_DESC(default_ps_max_latency_us,
54 		 "max power saving latency for new devices; use PM QOS to change per device");
55 
56 static bool force_apst;
57 module_param(force_apst, bool, 0644);
58 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
59 
60 static bool streams;
61 module_param(streams, bool, 0644);
62 MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
63 
64 /*
65  * nvme_wq - hosts nvme related works that are not reset or delete
66  * nvme_reset_wq - hosts nvme reset works
67  * nvme_delete_wq - hosts nvme delete works
68  *
69  * nvme_wq will host works such as scan, aen handling, fw activation,
70  * keep-alive, periodic reconnects etc. nvme_reset_wq
71  * runs reset works which also flush works hosted on nvme_wq for
72  * serialization purposes. nvme_delete_wq host controller deletion
73  * works which flush reset works for serialization.
74  */
75 struct workqueue_struct *nvme_wq;
76 EXPORT_SYMBOL_GPL(nvme_wq);
77 
78 struct workqueue_struct *nvme_reset_wq;
79 EXPORT_SYMBOL_GPL(nvme_reset_wq);
80 
81 struct workqueue_struct *nvme_delete_wq;
82 EXPORT_SYMBOL_GPL(nvme_delete_wq);
83 
84 static LIST_HEAD(nvme_subsystems);
85 static DEFINE_MUTEX(nvme_subsystems_lock);
86 
87 static DEFINE_IDA(nvme_instance_ida);
88 static dev_t nvme_chr_devt;
89 static struct class *nvme_class;
90 static struct class *nvme_subsys_class;
91 
92 static int nvme_revalidate_disk(struct gendisk *disk);
93 static void nvme_put_subsystem(struct nvme_subsystem *subsys);
94 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
95 					   unsigned nsid);
96 
97 static void nvme_set_queue_dying(struct nvme_ns *ns)
98 {
99 	/*
100 	 * Revalidating a dead namespace sets capacity to 0. This will end
101 	 * buffered writers dirtying pages that can't be synced.
102 	 */
103 	if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
104 		return;
105 	blk_set_queue_dying(ns->queue);
106 	/* Forcibly unquiesce queues to avoid blocking dispatch */
107 	blk_mq_unquiesce_queue(ns->queue);
108 	/*
109 	 * Revalidate after unblocking dispatchers that may be holding bd_butex
110 	 */
111 	revalidate_disk(ns->disk);
112 }
113 
114 static void nvme_queue_scan(struct nvme_ctrl *ctrl)
115 {
116 	/*
117 	 * Only new queue scan work when admin and IO queues are both alive
118 	 */
119 	if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
120 		queue_work(nvme_wq, &ctrl->scan_work);
121 }
122 
123 /*
124  * Use this function to proceed with scheduling reset_work for a controller
125  * that had previously been set to the resetting state. This is intended for
126  * code paths that can't be interrupted by other reset attempts. A hot removal
127  * may prevent this from succeeding.
128  */
129 int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
130 {
131 	if (ctrl->state != NVME_CTRL_RESETTING)
132 		return -EBUSY;
133 	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
134 		return -EBUSY;
135 	return 0;
136 }
137 EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
138 
139 int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
140 {
141 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
142 		return -EBUSY;
143 	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
144 		return -EBUSY;
145 	return 0;
146 }
147 EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
148 
149 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
150 {
151 	int ret;
152 
153 	ret = nvme_reset_ctrl(ctrl);
154 	if (!ret) {
155 		flush_work(&ctrl->reset_work);
156 		if (ctrl->state != NVME_CTRL_LIVE)
157 			ret = -ENETRESET;
158 	}
159 
160 	return ret;
161 }
162 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
163 
164 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
165 {
166 	dev_info(ctrl->device,
167 		 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);
168 
169 	flush_work(&ctrl->reset_work);
170 	nvme_stop_ctrl(ctrl);
171 	nvme_remove_namespaces(ctrl);
172 	ctrl->ops->delete_ctrl(ctrl);
173 	nvme_uninit_ctrl(ctrl);
174 }
175 
176 static void nvme_delete_ctrl_work(struct work_struct *work)
177 {
178 	struct nvme_ctrl *ctrl =
179 		container_of(work, struct nvme_ctrl, delete_work);
180 
181 	nvme_do_delete_ctrl(ctrl);
182 }
183 
184 int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
185 {
186 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
187 		return -EBUSY;
188 	if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
189 		return -EBUSY;
190 	return 0;
191 }
192 EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
193 
194 static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
195 {
196 	/*
197 	 * Keep a reference until nvme_do_delete_ctrl() complete,
198 	 * since ->delete_ctrl can free the controller.
199 	 */
200 	nvme_get_ctrl(ctrl);
201 	if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
202 		nvme_do_delete_ctrl(ctrl);
203 	nvme_put_ctrl(ctrl);
204 }
205 
206 static blk_status_t nvme_error_status(u16 status)
207 {
208 	switch (status & 0x7ff) {
209 	case NVME_SC_SUCCESS:
210 		return BLK_STS_OK;
211 	case NVME_SC_CAP_EXCEEDED:
212 		return BLK_STS_NOSPC;
213 	case NVME_SC_LBA_RANGE:
214 	case NVME_SC_CMD_INTERRUPTED:
215 	case NVME_SC_NS_NOT_READY:
216 		return BLK_STS_TARGET;
217 	case NVME_SC_BAD_ATTRIBUTES:
218 	case NVME_SC_ONCS_NOT_SUPPORTED:
219 	case NVME_SC_INVALID_OPCODE:
220 	case NVME_SC_INVALID_FIELD:
221 	case NVME_SC_INVALID_NS:
222 		return BLK_STS_NOTSUPP;
223 	case NVME_SC_WRITE_FAULT:
224 	case NVME_SC_READ_ERROR:
225 	case NVME_SC_UNWRITTEN_BLOCK:
226 	case NVME_SC_ACCESS_DENIED:
227 	case NVME_SC_READ_ONLY:
228 	case NVME_SC_COMPARE_FAILED:
229 		return BLK_STS_MEDIUM;
230 	case NVME_SC_GUARD_CHECK:
231 	case NVME_SC_APPTAG_CHECK:
232 	case NVME_SC_REFTAG_CHECK:
233 	case NVME_SC_INVALID_PI:
234 		return BLK_STS_PROTECTION;
235 	case NVME_SC_RESERVATION_CONFLICT:
236 		return BLK_STS_NEXUS;
237 	case NVME_SC_HOST_PATH_ERROR:
238 		return BLK_STS_TRANSPORT;
239 	default:
240 		return BLK_STS_IOERR;
241 	}
242 }
243 
244 static inline bool nvme_req_needs_retry(struct request *req)
245 {
246 	if (blk_noretry_request(req))
247 		return false;
248 	if (nvme_req(req)->status & NVME_SC_DNR)
249 		return false;
250 	if (nvme_req(req)->retries >= nvme_max_retries)
251 		return false;
252 	return true;
253 }
254 
255 static void nvme_retry_req(struct request *req)
256 {
257 	struct nvme_ns *ns = req->q->queuedata;
258 	unsigned long delay = 0;
259 	u16 crd;
260 
261 	/* The mask and shift result must be <= 3 */
262 	crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
263 	if (ns && crd)
264 		delay = ns->ctrl->crdt[crd - 1] * 100;
265 
266 	nvme_req(req)->retries++;
267 	blk_mq_requeue_request(req, false);
268 	blk_mq_delay_kick_requeue_list(req->q, delay);
269 }
270 
271 void nvme_complete_rq(struct request *req)
272 {
273 	blk_status_t status = nvme_error_status(nvme_req(req)->status);
274 
275 	trace_nvme_complete_rq(req);
276 
277 	nvme_cleanup_cmd(req);
278 
279 	if (nvme_req(req)->ctrl->kas)
280 		nvme_req(req)->ctrl->comp_seen = true;
281 
282 	if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
283 		if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req))
284 			return;
285 
286 		if (!blk_queue_dying(req->q)) {
287 			nvme_retry_req(req);
288 			return;
289 		}
290 	}
291 
292 	nvme_trace_bio_complete(req, status);
293 	blk_mq_end_request(req, status);
294 }
295 EXPORT_SYMBOL_GPL(nvme_complete_rq);
296 
297 bool nvme_cancel_request(struct request *req, void *data, bool reserved)
298 {
299 	dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
300 				"Cancelling I/O %d", req->tag);
301 
302 	/* don't abort one completed request */
303 	if (blk_mq_request_completed(req))
304 		return true;
305 
306 	nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
307 	blk_mq_force_complete_rq(req);
308 	return true;
309 }
310 EXPORT_SYMBOL_GPL(nvme_cancel_request);
311 
312 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
313 		enum nvme_ctrl_state new_state)
314 {
315 	enum nvme_ctrl_state old_state;
316 	unsigned long flags;
317 	bool changed = false;
318 
319 	spin_lock_irqsave(&ctrl->lock, flags);
320 
321 	old_state = ctrl->state;
322 	switch (new_state) {
323 	case NVME_CTRL_LIVE:
324 		switch (old_state) {
325 		case NVME_CTRL_NEW:
326 		case NVME_CTRL_RESETTING:
327 		case NVME_CTRL_CONNECTING:
328 			changed = true;
329 			/* FALLTHRU */
330 		default:
331 			break;
332 		}
333 		break;
334 	case NVME_CTRL_RESETTING:
335 		switch (old_state) {
336 		case NVME_CTRL_NEW:
337 		case NVME_CTRL_LIVE:
338 			changed = true;
339 			/* FALLTHRU */
340 		default:
341 			break;
342 		}
343 		break;
344 	case NVME_CTRL_CONNECTING:
345 		switch (old_state) {
346 		case NVME_CTRL_NEW:
347 		case NVME_CTRL_RESETTING:
348 			changed = true;
349 			/* FALLTHRU */
350 		default:
351 			break;
352 		}
353 		break;
354 	case NVME_CTRL_DELETING:
355 		switch (old_state) {
356 		case NVME_CTRL_LIVE:
357 		case NVME_CTRL_RESETTING:
358 		case NVME_CTRL_CONNECTING:
359 			changed = true;
360 			/* FALLTHRU */
361 		default:
362 			break;
363 		}
364 		break;
365 	case NVME_CTRL_DEAD:
366 		switch (old_state) {
367 		case NVME_CTRL_DELETING:
368 			changed = true;
369 			/* FALLTHRU */
370 		default:
371 			break;
372 		}
373 		break;
374 	default:
375 		break;
376 	}
377 
378 	if (changed) {
379 		ctrl->state = new_state;
380 		wake_up_all(&ctrl->state_wq);
381 	}
382 
383 	spin_unlock_irqrestore(&ctrl->lock, flags);
384 	if (changed && ctrl->state == NVME_CTRL_LIVE)
385 		nvme_kick_requeue_lists(ctrl);
386 	return changed;
387 }
388 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
389 
390 /*
391  * Returns true for sink states that can't ever transition back to live.
392  */
393 static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
394 {
395 	switch (ctrl->state) {
396 	case NVME_CTRL_NEW:
397 	case NVME_CTRL_LIVE:
398 	case NVME_CTRL_RESETTING:
399 	case NVME_CTRL_CONNECTING:
400 		return false;
401 	case NVME_CTRL_DELETING:
402 	case NVME_CTRL_DEAD:
403 		return true;
404 	default:
405 		WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
406 		return true;
407 	}
408 }
409 
410 /*
411  * Waits for the controller state to be resetting, or returns false if it is
412  * not possible to ever transition to that state.
413  */
414 bool nvme_wait_reset(struct nvme_ctrl *ctrl)
415 {
416 	wait_event(ctrl->state_wq,
417 		   nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
418 		   nvme_state_terminal(ctrl));
419 	return ctrl->state == NVME_CTRL_RESETTING;
420 }
421 EXPORT_SYMBOL_GPL(nvme_wait_reset);
422 
423 static void nvme_free_ns_head(struct kref *ref)
424 {
425 	struct nvme_ns_head *head =
426 		container_of(ref, struct nvme_ns_head, ref);
427 
428 	nvme_mpath_remove_disk(head);
429 	ida_simple_remove(&head->subsys->ns_ida, head->instance);
430 	cleanup_srcu_struct(&head->srcu);
431 	nvme_put_subsystem(head->subsys);
432 	kfree(head);
433 }
434 
435 static void nvme_put_ns_head(struct nvme_ns_head *head)
436 {
437 	kref_put(&head->ref, nvme_free_ns_head);
438 }
439 
440 static void nvme_free_ns(struct kref *kref)
441 {
442 	struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
443 
444 	if (ns->ndev)
445 		nvme_nvm_unregister(ns);
446 
447 	put_disk(ns->disk);
448 	nvme_put_ns_head(ns->head);
449 	nvme_put_ctrl(ns->ctrl);
450 	kfree(ns);
451 }
452 
453 static void nvme_put_ns(struct nvme_ns *ns)
454 {
455 	kref_put(&ns->kref, nvme_free_ns);
456 }
457 
458 static inline void nvme_clear_nvme_request(struct request *req)
459 {
460 	if (!(req->rq_flags & RQF_DONTPREP)) {
461 		nvme_req(req)->retries = 0;
462 		nvme_req(req)->flags = 0;
463 		req->rq_flags |= RQF_DONTPREP;
464 	}
465 }
466 
467 struct request *nvme_alloc_request(struct request_queue *q,
468 		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
469 {
470 	unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
471 	struct request *req;
472 
473 	if (qid == NVME_QID_ANY) {
474 		req = blk_mq_alloc_request(q, op, flags);
475 	} else {
476 		req = blk_mq_alloc_request_hctx(q, op, flags,
477 				qid ? qid - 1 : 0);
478 	}
479 	if (IS_ERR(req))
480 		return req;
481 
482 	req->cmd_flags |= REQ_FAILFAST_DRIVER;
483 	nvme_clear_nvme_request(req);
484 	nvme_req(req)->cmd = cmd;
485 
486 	return req;
487 }
488 EXPORT_SYMBOL_GPL(nvme_alloc_request);
489 
490 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
491 {
492 	struct nvme_command c;
493 
494 	memset(&c, 0, sizeof(c));
495 
496 	c.directive.opcode = nvme_admin_directive_send;
497 	c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
498 	c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
499 	c.directive.dtype = NVME_DIR_IDENTIFY;
500 	c.directive.tdtype = NVME_DIR_STREAMS;
501 	c.directive.endir = enable ? NVME_DIR_ENDIR : 0;
502 
503 	return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
504 }
505 
506 static int nvme_disable_streams(struct nvme_ctrl *ctrl)
507 {
508 	return nvme_toggle_streams(ctrl, false);
509 }
510 
511 static int nvme_enable_streams(struct nvme_ctrl *ctrl)
512 {
513 	return nvme_toggle_streams(ctrl, true);
514 }
515 
516 static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
517 				  struct streams_directive_params *s, u32 nsid)
518 {
519 	struct nvme_command c;
520 
521 	memset(&c, 0, sizeof(c));
522 	memset(s, 0, sizeof(*s));
523 
524 	c.directive.opcode = nvme_admin_directive_recv;
525 	c.directive.nsid = cpu_to_le32(nsid);
526 	c.directive.numd = cpu_to_le32(nvme_bytes_to_numd(sizeof(*s)));
527 	c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
528 	c.directive.dtype = NVME_DIR_STREAMS;
529 
530 	return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
531 }
532 
533 static int nvme_configure_directives(struct nvme_ctrl *ctrl)
534 {
535 	struct streams_directive_params s;
536 	int ret;
537 
538 	if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
539 		return 0;
540 	if (!streams)
541 		return 0;
542 
543 	ret = nvme_enable_streams(ctrl);
544 	if (ret)
545 		return ret;
546 
547 	ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
548 	if (ret)
549 		goto out_disable_stream;
550 
551 	ctrl->nssa = le16_to_cpu(s.nssa);
552 	if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
553 		dev_info(ctrl->device, "too few streams (%u) available\n",
554 					ctrl->nssa);
555 		goto out_disable_stream;
556 	}
557 
558 	ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
559 	dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
560 	return 0;
561 
562 out_disable_stream:
563 	nvme_disable_streams(ctrl);
564 	return ret;
565 }
566 
567 /*
568  * Check if 'req' has a write hint associated with it. If it does, assign
569  * a valid namespace stream to the write.
570  */
571 static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
572 				     struct request *req, u16 *control,
573 				     u32 *dsmgmt)
574 {
575 	enum rw_hint streamid = req->write_hint;
576 
577 	if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
578 		streamid = 0;
579 	else {
580 		streamid--;
581 		if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
582 			return;
583 
584 		*control |= NVME_RW_DTYPE_STREAMS;
585 		*dsmgmt |= streamid << 16;
586 	}
587 
588 	if (streamid < ARRAY_SIZE(req->q->write_hints))
589 		req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
590 }
591 
592 static inline void nvme_setup_flush(struct nvme_ns *ns,
593 		struct nvme_command *cmnd)
594 {
595 	cmnd->common.opcode = nvme_cmd_flush;
596 	cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
597 }
598 
599 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
600 		struct nvme_command *cmnd)
601 {
602 	unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
603 	struct nvme_dsm_range *range;
604 	struct bio *bio;
605 
606 	/*
607 	 * Some devices do not consider the DSM 'Number of Ranges' field when
608 	 * determining how much data to DMA. Always allocate memory for maximum
609 	 * number of segments to prevent device reading beyond end of buffer.
610 	 */
611 	static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
612 
613 	range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
614 	if (!range) {
615 		/*
616 		 * If we fail allocation our range, fallback to the controller
617 		 * discard page. If that's also busy, it's safe to return
618 		 * busy, as we know we can make progress once that's freed.
619 		 */
620 		if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
621 			return BLK_STS_RESOURCE;
622 
623 		range = page_address(ns->ctrl->discard_page);
624 	}
625 
626 	__rq_for_each_bio(bio, req) {
627 		u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
628 		u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
629 
630 		if (n < segments) {
631 			range[n].cattr = cpu_to_le32(0);
632 			range[n].nlb = cpu_to_le32(nlb);
633 			range[n].slba = cpu_to_le64(slba);
634 		}
635 		n++;
636 	}
637 
638 	if (WARN_ON_ONCE(n != segments)) {
639 		if (virt_to_page(range) == ns->ctrl->discard_page)
640 			clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
641 		else
642 			kfree(range);
643 		return BLK_STS_IOERR;
644 	}
645 
646 	cmnd->dsm.opcode = nvme_cmd_dsm;
647 	cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
648 	cmnd->dsm.nr = cpu_to_le32(segments - 1);
649 	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
650 
651 	req->special_vec.bv_page = virt_to_page(range);
652 	req->special_vec.bv_offset = offset_in_page(range);
653 	req->special_vec.bv_len = alloc_size;
654 	req->rq_flags |= RQF_SPECIAL_PAYLOAD;
655 
656 	return BLK_STS_OK;
657 }
658 
659 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
660 		struct request *req, struct nvme_command *cmnd)
661 {
662 	if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
663 		return nvme_setup_discard(ns, req, cmnd);
664 
665 	cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
666 	cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
667 	cmnd->write_zeroes.slba =
668 		cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
669 	cmnd->write_zeroes.length =
670 		cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
671 	cmnd->write_zeroes.control = 0;
672 	return BLK_STS_OK;
673 }
674 
675 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
676 		struct request *req, struct nvme_command *cmnd)
677 {
678 	struct nvme_ctrl *ctrl = ns->ctrl;
679 	u16 control = 0;
680 	u32 dsmgmt = 0;
681 
682 	if (req->cmd_flags & REQ_FUA)
683 		control |= NVME_RW_FUA;
684 	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
685 		control |= NVME_RW_LR;
686 
687 	if (req->cmd_flags & REQ_RAHEAD)
688 		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
689 
690 	cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
691 	cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
692 	cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
693 	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
694 
695 	if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
696 		nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
697 
698 	if (ns->ms) {
699 		/*
700 		 * If formated with metadata, the block layer always provides a
701 		 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
702 		 * we enable the PRACT bit for protection information or set the
703 		 * namespace capacity to zero to prevent any I/O.
704 		 */
705 		if (!blk_integrity_rq(req)) {
706 			if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
707 				return BLK_STS_NOTSUPP;
708 			control |= NVME_RW_PRINFO_PRACT;
709 		}
710 
711 		switch (ns->pi_type) {
712 		case NVME_NS_DPS_PI_TYPE3:
713 			control |= NVME_RW_PRINFO_PRCHK_GUARD;
714 			break;
715 		case NVME_NS_DPS_PI_TYPE1:
716 		case NVME_NS_DPS_PI_TYPE2:
717 			control |= NVME_RW_PRINFO_PRCHK_GUARD |
718 					NVME_RW_PRINFO_PRCHK_REF;
719 			cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
720 			break;
721 		}
722 	}
723 
724 	cmnd->rw.control = cpu_to_le16(control);
725 	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
726 	return 0;
727 }
728 
729 void nvme_cleanup_cmd(struct request *req)
730 {
731 	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
732 		struct nvme_ns *ns = req->rq_disk->private_data;
733 		struct page *page = req->special_vec.bv_page;
734 
735 		if (page == ns->ctrl->discard_page)
736 			clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
737 		else
738 			kfree(page_address(page) + req->special_vec.bv_offset);
739 	}
740 }
741 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
742 
743 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
744 		struct nvme_command *cmd)
745 {
746 	blk_status_t ret = BLK_STS_OK;
747 
748 	nvme_clear_nvme_request(req);
749 
750 	memset(cmd, 0, sizeof(*cmd));
751 	switch (req_op(req)) {
752 	case REQ_OP_DRV_IN:
753 	case REQ_OP_DRV_OUT:
754 		memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
755 		break;
756 	case REQ_OP_FLUSH:
757 		nvme_setup_flush(ns, cmd);
758 		break;
759 	case REQ_OP_WRITE_ZEROES:
760 		ret = nvme_setup_write_zeroes(ns, req, cmd);
761 		break;
762 	case REQ_OP_DISCARD:
763 		ret = nvme_setup_discard(ns, req, cmd);
764 		break;
765 	case REQ_OP_READ:
766 	case REQ_OP_WRITE:
767 		ret = nvme_setup_rw(ns, req, cmd);
768 		break;
769 	default:
770 		WARN_ON_ONCE(1);
771 		return BLK_STS_IOERR;
772 	}
773 
774 	cmd->common.command_id = req->tag;
775 	trace_nvme_setup_cmd(req, cmd);
776 	return ret;
777 }
778 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
779 
780 static void nvme_end_sync_rq(struct request *rq, blk_status_t error)
781 {
782 	struct completion *waiting = rq->end_io_data;
783 
784 	rq->end_io_data = NULL;
785 	complete(waiting);
786 }
787 
788 static void nvme_execute_rq_polled(struct request_queue *q,
789 		struct gendisk *bd_disk, struct request *rq, int at_head)
790 {
791 	DECLARE_COMPLETION_ONSTACK(wait);
792 
793 	WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags));
794 
795 	rq->cmd_flags |= REQ_HIPRI;
796 	rq->end_io_data = &wait;
797 	blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq);
798 
799 	while (!completion_done(&wait)) {
800 		blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
801 		cond_resched();
802 	}
803 }
804 
805 /*
806  * Returns 0 on success.  If the result is negative, it's a Linux error code;
807  * if the result is positive, it's an NVM Express status code
808  */
809 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
810 		union nvme_result *result, void *buffer, unsigned bufflen,
811 		unsigned timeout, int qid, int at_head,
812 		blk_mq_req_flags_t flags, bool poll)
813 {
814 	struct request *req;
815 	int ret;
816 
817 	req = nvme_alloc_request(q, cmd, flags, qid);
818 	if (IS_ERR(req))
819 		return PTR_ERR(req);
820 
821 	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
822 
823 	if (buffer && bufflen) {
824 		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
825 		if (ret)
826 			goto out;
827 	}
828 
829 	if (poll)
830 		nvme_execute_rq_polled(req->q, NULL, req, at_head);
831 	else
832 		blk_execute_rq(req->q, NULL, req, at_head);
833 	if (result)
834 		*result = nvme_req(req)->result;
835 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
836 		ret = -EINTR;
837 	else
838 		ret = nvme_req(req)->status;
839  out:
840 	blk_mq_free_request(req);
841 	return ret;
842 }
843 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
844 
845 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
846 		void *buffer, unsigned bufflen)
847 {
848 	return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
849 			NVME_QID_ANY, 0, 0, false);
850 }
851 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
852 
853 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
854 		unsigned len, u32 seed, bool write)
855 {
856 	struct bio_integrity_payload *bip;
857 	int ret = -ENOMEM;
858 	void *buf;
859 
860 	buf = kmalloc(len, GFP_KERNEL);
861 	if (!buf)
862 		goto out;
863 
864 	ret = -EFAULT;
865 	if (write && copy_from_user(buf, ubuf, len))
866 		goto out_free_meta;
867 
868 	bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
869 	if (IS_ERR(bip)) {
870 		ret = PTR_ERR(bip);
871 		goto out_free_meta;
872 	}
873 
874 	bip->bip_iter.bi_size = len;
875 	bip->bip_iter.bi_sector = seed;
876 	ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
877 			offset_in_page(buf));
878 	if (ret == len)
879 		return buf;
880 	ret = -ENOMEM;
881 out_free_meta:
882 	kfree(buf);
883 out:
884 	return ERR_PTR(ret);
885 }
886 
887 static int nvme_submit_user_cmd(struct request_queue *q,
888 		struct nvme_command *cmd, void __user *ubuffer,
889 		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
890 		u32 meta_seed, u64 *result, unsigned timeout)
891 {
892 	bool write = nvme_is_write(cmd);
893 	struct nvme_ns *ns = q->queuedata;
894 	struct gendisk *disk = ns ? ns->disk : NULL;
895 	struct request *req;
896 	struct bio *bio = NULL;
897 	void *meta = NULL;
898 	int ret;
899 
900 	req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
901 	if (IS_ERR(req))
902 		return PTR_ERR(req);
903 
904 	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
905 	nvme_req(req)->flags |= NVME_REQ_USERCMD;
906 
907 	if (ubuffer && bufflen) {
908 		ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
909 				GFP_KERNEL);
910 		if (ret)
911 			goto out;
912 		bio = req->bio;
913 		bio->bi_disk = disk;
914 		if (disk && meta_buffer && meta_len) {
915 			meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
916 					meta_seed, write);
917 			if (IS_ERR(meta)) {
918 				ret = PTR_ERR(meta);
919 				goto out_unmap;
920 			}
921 			req->cmd_flags |= REQ_INTEGRITY;
922 		}
923 	}
924 
925 	blk_execute_rq(req->q, disk, req, 0);
926 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
927 		ret = -EINTR;
928 	else
929 		ret = nvme_req(req)->status;
930 	if (result)
931 		*result = le64_to_cpu(nvme_req(req)->result.u64);
932 	if (meta && !ret && !write) {
933 		if (copy_to_user(meta_buffer, meta, meta_len))
934 			ret = -EFAULT;
935 	}
936 	kfree(meta);
937  out_unmap:
938 	if (bio)
939 		blk_rq_unmap_user(bio);
940  out:
941 	blk_mq_free_request(req);
942 	return ret;
943 }
944 
945 static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
946 {
947 	struct nvme_ctrl *ctrl = rq->end_io_data;
948 	unsigned long flags;
949 	bool startka = false;
950 
951 	blk_mq_free_request(rq);
952 
953 	if (status) {
954 		dev_err(ctrl->device,
955 			"failed nvme_keep_alive_end_io error=%d\n",
956 				status);
957 		return;
958 	}
959 
960 	ctrl->comp_seen = false;
961 	spin_lock_irqsave(&ctrl->lock, flags);
962 	if (ctrl->state == NVME_CTRL_LIVE ||
963 	    ctrl->state == NVME_CTRL_CONNECTING)
964 		startka = true;
965 	spin_unlock_irqrestore(&ctrl->lock, flags);
966 	if (startka)
967 		queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
968 }
969 
970 static int nvme_keep_alive(struct nvme_ctrl *ctrl)
971 {
972 	struct request *rq;
973 
974 	rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
975 			NVME_QID_ANY);
976 	if (IS_ERR(rq))
977 		return PTR_ERR(rq);
978 
979 	rq->timeout = ctrl->kato * HZ;
980 	rq->end_io_data = ctrl;
981 
982 	blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
983 
984 	return 0;
985 }
986 
987 static void nvme_keep_alive_work(struct work_struct *work)
988 {
989 	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
990 			struct nvme_ctrl, ka_work);
991 	bool comp_seen = ctrl->comp_seen;
992 
993 	if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
994 		dev_dbg(ctrl->device,
995 			"reschedule traffic based keep-alive timer\n");
996 		ctrl->comp_seen = false;
997 		queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
998 		return;
999 	}
1000 
1001 	if (nvme_keep_alive(ctrl)) {
1002 		/* allocation failure, reset the controller */
1003 		dev_err(ctrl->device, "keep-alive failed\n");
1004 		nvme_reset_ctrl(ctrl);
1005 		return;
1006 	}
1007 }
1008 
1009 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
1010 {
1011 	if (unlikely(ctrl->kato == 0))
1012 		return;
1013 
1014 	queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
1015 }
1016 
1017 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
1018 {
1019 	if (unlikely(ctrl->kato == 0))
1020 		return;
1021 
1022 	cancel_delayed_work_sync(&ctrl->ka_work);
1023 }
1024 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
1025 
1026 /*
1027  * In NVMe 1.0 the CNS field was just a binary controller or namespace
1028  * flag, thus sending any new CNS opcodes has a big chance of not working.
1029  * Qemu unfortunately had that bug after reporting a 1.1 version compliance
1030  * (but not for any later version).
1031  */
1032 static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
1033 {
1034 	if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
1035 		return ctrl->vs < NVME_VS(1, 2, 0);
1036 	return ctrl->vs < NVME_VS(1, 1, 0);
1037 }
1038 
1039 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
1040 {
1041 	struct nvme_command c = { };
1042 	int error;
1043 
1044 	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1045 	c.identify.opcode = nvme_admin_identify;
1046 	c.identify.cns = NVME_ID_CNS_CTRL;
1047 
1048 	*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
1049 	if (!*id)
1050 		return -ENOMEM;
1051 
1052 	error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
1053 			sizeof(struct nvme_id_ctrl));
1054 	if (error)
1055 		kfree(*id);
1056 	return error;
1057 }
1058 
1059 static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1060 		struct nvme_ns_id_desc *cur)
1061 {
1062 	const char *warn_str = "ctrl returned bogus length:";
1063 	void *data = cur;
1064 
1065 	switch (cur->nidt) {
1066 	case NVME_NIDT_EUI64:
1067 		if (cur->nidl != NVME_NIDT_EUI64_LEN) {
1068 			dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
1069 				 warn_str, cur->nidl);
1070 			return -1;
1071 		}
1072 		memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
1073 		return NVME_NIDT_EUI64_LEN;
1074 	case NVME_NIDT_NGUID:
1075 		if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1076 			dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
1077 				 warn_str, cur->nidl);
1078 			return -1;
1079 		}
1080 		memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
1081 		return NVME_NIDT_NGUID_LEN;
1082 	case NVME_NIDT_UUID:
1083 		if (cur->nidl != NVME_NIDT_UUID_LEN) {
1084 			dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
1085 				 warn_str, cur->nidl);
1086 			return -1;
1087 		}
1088 		uuid_copy(&ids->uuid, data + sizeof(*cur));
1089 		return NVME_NIDT_UUID_LEN;
1090 	default:
1091 		/* Skip unknown types */
1092 		return cur->nidl;
1093 	}
1094 }
1095 
1096 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
1097 		struct nvme_ns_ids *ids)
1098 {
1099 	struct nvme_command c = { };
1100 	int status;
1101 	void *data;
1102 	int pos;
1103 	int len;
1104 
1105 	c.identify.opcode = nvme_admin_identify;
1106 	c.identify.nsid = cpu_to_le32(nsid);
1107 	c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
1108 
1109 	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
1110 	if (!data)
1111 		return -ENOMEM;
1112 
1113 	status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1114 				      NVME_IDENTIFY_DATA_SIZE);
1115 	if (status) {
1116 		dev_warn(ctrl->device,
1117 			"Identify Descriptors failed (%d)\n", status);
1118 		 /*
1119 		  * Don't treat non-retryable errors as fatal, as we potentially
1120 		  * already have a NGUID or EUI-64.  If we failed with DNR set,
1121 		  * we want to silently ignore the error as we can still
1122 		  * identify the device, but if the status has DNR set, we want
1123 		  * to propagate the error back specifically for the disk
1124 		  * revalidation flow to make sure we don't abandon the
1125 		  * device just because of a temporal retry-able error (such
1126 		  * as path of transport errors).
1127 		  */
1128 		if (status > 0 && (status & NVME_SC_DNR))
1129 			status = 0;
1130 		goto free_data;
1131 	}
1132 
1133 	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
1134 		struct nvme_ns_id_desc *cur = data + pos;
1135 
1136 		if (cur->nidl == 0)
1137 			break;
1138 
1139 		len = nvme_process_ns_desc(ctrl, ids, cur);
1140 		if (len < 0)
1141 			goto free_data;
1142 
1143 		len += sizeof(*cur);
1144 	}
1145 free_data:
1146 	kfree(data);
1147 	return status;
1148 }
1149 
1150 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
1151 {
1152 	struct nvme_command c = { };
1153 
1154 	c.identify.opcode = nvme_admin_identify;
1155 	c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
1156 	c.identify.nsid = cpu_to_le32(nsid);
1157 	return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list,
1158 				    NVME_IDENTIFY_DATA_SIZE);
1159 }
1160 
1161 static int nvme_identify_ns(struct nvme_ctrl *ctrl,
1162 		unsigned nsid, struct nvme_id_ns **id)
1163 {
1164 	struct nvme_command c = { };
1165 	int error;
1166 
1167 	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1168 	c.identify.opcode = nvme_admin_identify;
1169 	c.identify.nsid = cpu_to_le32(nsid);
1170 	c.identify.cns = NVME_ID_CNS_NS;
1171 
1172 	*id = kmalloc(sizeof(**id), GFP_KERNEL);
1173 	if (!*id)
1174 		return -ENOMEM;
1175 
1176 	error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1177 	if (error) {
1178 		dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1179 		kfree(*id);
1180 	}
1181 
1182 	return error;
1183 }
1184 
1185 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
1186 		unsigned int dword11, void *buffer, size_t buflen, u32 *result)
1187 {
1188 	union nvme_result res = { 0 };
1189 	struct nvme_command c;
1190 	int ret;
1191 
1192 	memset(&c, 0, sizeof(c));
1193 	c.features.opcode = op;
1194 	c.features.fid = cpu_to_le32(fid);
1195 	c.features.dword11 = cpu_to_le32(dword11);
1196 
1197 	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1198 			buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
1199 	if (ret >= 0 && result)
1200 		*result = le32_to_cpu(res.u32);
1201 	return ret;
1202 }
1203 
1204 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
1205 		      unsigned int dword11, void *buffer, size_t buflen,
1206 		      u32 *result)
1207 {
1208 	return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
1209 			     buflen, result);
1210 }
1211 EXPORT_SYMBOL_GPL(nvme_set_features);
1212 
1213 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
1214 		      unsigned int dword11, void *buffer, size_t buflen,
1215 		      u32 *result)
1216 {
1217 	return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
1218 			     buflen, result);
1219 }
1220 EXPORT_SYMBOL_GPL(nvme_get_features);
1221 
1222 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
1223 {
1224 	u32 q_count = (*count - 1) | ((*count - 1) << 16);
1225 	u32 result;
1226 	int status, nr_io_queues;
1227 
1228 	status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
1229 			&result);
1230 	if (status < 0)
1231 		return status;
1232 
1233 	/*
1234 	 * Degraded controllers might return an error when setting the queue
1235 	 * count.  We still want to be able to bring them online and offer
1236 	 * access to the admin queue, as that might be only way to fix them up.
1237 	 */
1238 	if (status > 0) {
1239 		dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1240 		*count = 0;
1241 	} else {
1242 		nr_io_queues = min(result & 0xffff, result >> 16) + 1;
1243 		*count = min(*count, nr_io_queues);
1244 	}
1245 
1246 	return 0;
1247 }
1248 EXPORT_SYMBOL_GPL(nvme_set_queue_count);
1249 
1250 #define NVME_AEN_SUPPORTED \
1251 	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1252 	 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1253 
1254 static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1255 {
1256 	u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1257 	int status;
1258 
1259 	if (!supported_aens)
1260 		return;
1261 
1262 	status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
1263 			NULL, 0, &result);
1264 	if (status)
1265 		dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1266 			 supported_aens);
1267 
1268 	queue_work(nvme_wq, &ctrl->async_event_work);
1269 }
1270 
1271 /*
1272  * Convert integer values from ioctl structures to user pointers, silently
1273  * ignoring the upper bits in the compat case to match behaviour of 32-bit
1274  * kernels.
1275  */
1276 static void __user *nvme_to_user_ptr(uintptr_t ptrval)
1277 {
1278 	if (in_compat_syscall())
1279 		ptrval = (compat_uptr_t)ptrval;
1280 	return (void __user *)ptrval;
1281 }
1282 
1283 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1284 {
1285 	struct nvme_user_io io;
1286 	struct nvme_command c;
1287 	unsigned length, meta_len;
1288 	void __user *metadata;
1289 
1290 	if (copy_from_user(&io, uio, sizeof(io)))
1291 		return -EFAULT;
1292 	if (io.flags)
1293 		return -EINVAL;
1294 
1295 	switch (io.opcode) {
1296 	case nvme_cmd_write:
1297 	case nvme_cmd_read:
1298 	case nvme_cmd_compare:
1299 		break;
1300 	default:
1301 		return -EINVAL;
1302 	}
1303 
1304 	length = (io.nblocks + 1) << ns->lba_shift;
1305 	meta_len = (io.nblocks + 1) * ns->ms;
1306 	metadata = nvme_to_user_ptr(io.metadata);
1307 
1308 	if (ns->features & NVME_NS_EXT_LBAS) {
1309 		length += meta_len;
1310 		meta_len = 0;
1311 	} else if (meta_len) {
1312 		if ((io.metadata & 3) || !io.metadata)
1313 			return -EINVAL;
1314 	}
1315 
1316 	memset(&c, 0, sizeof(c));
1317 	c.rw.opcode = io.opcode;
1318 	c.rw.flags = io.flags;
1319 	c.rw.nsid = cpu_to_le32(ns->head->ns_id);
1320 	c.rw.slba = cpu_to_le64(io.slba);
1321 	c.rw.length = cpu_to_le16(io.nblocks);
1322 	c.rw.control = cpu_to_le16(io.control);
1323 	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
1324 	c.rw.reftag = cpu_to_le32(io.reftag);
1325 	c.rw.apptag = cpu_to_le16(io.apptag);
1326 	c.rw.appmask = cpu_to_le16(io.appmask);
1327 
1328 	return nvme_submit_user_cmd(ns->queue, &c,
1329 			nvme_to_user_ptr(io.addr), length,
1330 			metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
1331 }
1332 
1333 static u32 nvme_known_admin_effects(u8 opcode)
1334 {
1335 	switch (opcode) {
1336 	case nvme_admin_format_nvm:
1337 		return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
1338 					NVME_CMD_EFFECTS_CSE_MASK;
1339 	case nvme_admin_sanitize_nvm:
1340 		return NVME_CMD_EFFECTS_CSE_MASK;
1341 	default:
1342 		break;
1343 	}
1344 	return 0;
1345 }
1346 
1347 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1348 								u8 opcode)
1349 {
1350 	u32 effects = 0;
1351 
1352 	if (ns) {
1353 		if (ctrl->effects)
1354 			effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
1355 		if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
1356 			dev_warn(ctrl->device,
1357 				 "IO command:%02x has unhandled effects:%08x\n",
1358 				 opcode, effects);
1359 		return 0;
1360 	}
1361 
1362 	if (ctrl->effects)
1363 		effects = le32_to_cpu(ctrl->effects->acs[opcode]);
1364 	effects |= nvme_known_admin_effects(opcode);
1365 
1366 	/*
1367 	 * For simplicity, IO to all namespaces is quiesced even if the command
1368 	 * effects say only one namespace is affected.
1369 	 */
1370 	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1371 		mutex_lock(&ctrl->scan_lock);
1372 		mutex_lock(&ctrl->subsys->lock);
1373 		nvme_mpath_start_freeze(ctrl->subsys);
1374 		nvme_mpath_wait_freeze(ctrl->subsys);
1375 		nvme_start_freeze(ctrl);
1376 		nvme_wait_freeze(ctrl);
1377 	}
1378 	return effects;
1379 }
1380 
1381 static void nvme_update_formats(struct nvme_ctrl *ctrl)
1382 {
1383 	struct nvme_ns *ns;
1384 
1385 	down_read(&ctrl->namespaces_rwsem);
1386 	list_for_each_entry(ns, &ctrl->namespaces, list)
1387 		if (ns->disk && nvme_revalidate_disk(ns->disk))
1388 			nvme_set_queue_dying(ns);
1389 	up_read(&ctrl->namespaces_rwsem);
1390 }
1391 
1392 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
1393 {
1394 	/*
1395 	 * Revalidate LBA changes prior to unfreezing. This is necessary to
1396 	 * prevent memory corruption if a logical block size was changed by
1397 	 * this command.
1398 	 */
1399 	if (effects & NVME_CMD_EFFECTS_LBCC)
1400 		nvme_update_formats(ctrl);
1401 	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1402 		nvme_unfreeze(ctrl);
1403 		nvme_mpath_unfreeze(ctrl->subsys);
1404 		mutex_unlock(&ctrl->subsys->lock);
1405 		nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
1406 		mutex_unlock(&ctrl->scan_lock);
1407 	}
1408 	if (effects & NVME_CMD_EFFECTS_CCC)
1409 		nvme_init_identify(ctrl);
1410 	if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
1411 		nvme_queue_scan(ctrl);
1412 		flush_work(&ctrl->scan_work);
1413 	}
1414 }
1415 
1416 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1417 			struct nvme_passthru_cmd __user *ucmd)
1418 {
1419 	struct nvme_passthru_cmd cmd;
1420 	struct nvme_command c;
1421 	unsigned timeout = 0;
1422 	u32 effects;
1423 	u64 result;
1424 	int status;
1425 
1426 	if (!capable(CAP_SYS_ADMIN))
1427 		return -EACCES;
1428 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1429 		return -EFAULT;
1430 	if (cmd.flags)
1431 		return -EINVAL;
1432 
1433 	memset(&c, 0, sizeof(c));
1434 	c.common.opcode = cmd.opcode;
1435 	c.common.flags = cmd.flags;
1436 	c.common.nsid = cpu_to_le32(cmd.nsid);
1437 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1438 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1439 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
1440 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
1441 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
1442 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
1443 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
1444 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
1445 
1446 	if (cmd.timeout_ms)
1447 		timeout = msecs_to_jiffies(cmd.timeout_ms);
1448 
1449 	effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
1450 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1451 			nvme_to_user_ptr(cmd.addr), cmd.data_len,
1452 			nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
1453 			0, &result, timeout);
1454 	nvme_passthru_end(ctrl, effects);
1455 
1456 	if (status >= 0) {
1457 		if (put_user(result, &ucmd->result))
1458 			return -EFAULT;
1459 	}
1460 
1461 	return status;
1462 }
1463 
1464 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1465 			struct nvme_passthru_cmd64 __user *ucmd)
1466 {
1467 	struct nvme_passthru_cmd64 cmd;
1468 	struct nvme_command c;
1469 	unsigned timeout = 0;
1470 	u32 effects;
1471 	int status;
1472 
1473 	if (!capable(CAP_SYS_ADMIN))
1474 		return -EACCES;
1475 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1476 		return -EFAULT;
1477 	if (cmd.flags)
1478 		return -EINVAL;
1479 
1480 	memset(&c, 0, sizeof(c));
1481 	c.common.opcode = cmd.opcode;
1482 	c.common.flags = cmd.flags;
1483 	c.common.nsid = cpu_to_le32(cmd.nsid);
1484 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1485 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1486 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
1487 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
1488 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
1489 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
1490 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
1491 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
1492 
1493 	if (cmd.timeout_ms)
1494 		timeout = msecs_to_jiffies(cmd.timeout_ms);
1495 
1496 	effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
1497 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1498 			nvme_to_user_ptr(cmd.addr), cmd.data_len,
1499 			nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
1500 			0, &cmd.result, timeout);
1501 	nvme_passthru_end(ctrl, effects);
1502 
1503 	if (status >= 0) {
1504 		if (put_user(cmd.result, &ucmd->result))
1505 			return -EFAULT;
1506 	}
1507 
1508 	return status;
1509 }
1510 
1511 /*
1512  * Issue ioctl requests on the first available path.  Note that unlike normal
1513  * block layer requests we will not retry failed request on another controller.
1514  */
1515 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
1516 		struct nvme_ns_head **head, int *srcu_idx)
1517 {
1518 #ifdef CONFIG_NVME_MULTIPATH
1519 	if (disk->fops == &nvme_ns_head_ops) {
1520 		struct nvme_ns *ns;
1521 
1522 		*head = disk->private_data;
1523 		*srcu_idx = srcu_read_lock(&(*head)->srcu);
1524 		ns = nvme_find_path(*head);
1525 		if (!ns)
1526 			srcu_read_unlock(&(*head)->srcu, *srcu_idx);
1527 		return ns;
1528 	}
1529 #endif
1530 	*head = NULL;
1531 	*srcu_idx = -1;
1532 	return disk->private_data;
1533 }
1534 
1535 static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
1536 {
1537 	if (head)
1538 		srcu_read_unlock(&head->srcu, idx);
1539 }
1540 
1541 static bool is_ctrl_ioctl(unsigned int cmd)
1542 {
1543 	if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
1544 		return true;
1545 	if (is_sed_ioctl(cmd))
1546 		return true;
1547 	return false;
1548 }
1549 
1550 static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
1551 				  void __user *argp,
1552 				  struct nvme_ns_head *head,
1553 				  int srcu_idx)
1554 {
1555 	struct nvme_ctrl *ctrl = ns->ctrl;
1556 	int ret;
1557 
1558 	nvme_get_ctrl(ns->ctrl);
1559 	nvme_put_ns_from_disk(head, srcu_idx);
1560 
1561 	switch (cmd) {
1562 	case NVME_IOCTL_ADMIN_CMD:
1563 		ret = nvme_user_cmd(ctrl, NULL, argp);
1564 		break;
1565 	case NVME_IOCTL_ADMIN64_CMD:
1566 		ret = nvme_user_cmd64(ctrl, NULL, argp);
1567 		break;
1568 	default:
1569 		ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
1570 		break;
1571 	}
1572 	nvme_put_ctrl(ctrl);
1573 	return ret;
1574 }
1575 
1576 static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
1577 		unsigned int cmd, unsigned long arg)
1578 {
1579 	struct nvme_ns_head *head = NULL;
1580 	void __user *argp = (void __user *)arg;
1581 	struct nvme_ns *ns;
1582 	int srcu_idx, ret;
1583 
1584 	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
1585 	if (unlikely(!ns))
1586 		return -EWOULDBLOCK;
1587 
1588 	/*
1589 	 * Handle ioctls that apply to the controller instead of the namespace
1590 	 * seperately and drop the ns SRCU reference early.  This avoids a
1591 	 * deadlock when deleting namespaces using the passthrough interface.
1592 	 */
1593 	if (is_ctrl_ioctl(cmd))
1594 		return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
1595 
1596 	switch (cmd) {
1597 	case NVME_IOCTL_ID:
1598 		force_successful_syscall_return();
1599 		ret = ns->head->ns_id;
1600 		break;
1601 	case NVME_IOCTL_IO_CMD:
1602 		ret = nvme_user_cmd(ns->ctrl, ns, argp);
1603 		break;
1604 	case NVME_IOCTL_SUBMIT_IO:
1605 		ret = nvme_submit_io(ns, argp);
1606 		break;
1607 	case NVME_IOCTL_IO64_CMD:
1608 		ret = nvme_user_cmd64(ns->ctrl, ns, argp);
1609 		break;
1610 	default:
1611 		if (ns->ndev)
1612 			ret = nvme_nvm_ioctl(ns, cmd, arg);
1613 		else
1614 			ret = -ENOTTY;
1615 	}
1616 
1617 	nvme_put_ns_from_disk(head, srcu_idx);
1618 	return ret;
1619 }
1620 
1621 #ifdef CONFIG_COMPAT
1622 struct nvme_user_io32 {
1623 	__u8	opcode;
1624 	__u8	flags;
1625 	__u16	control;
1626 	__u16	nblocks;
1627 	__u16	rsvd;
1628 	__u64	metadata;
1629 	__u64	addr;
1630 	__u64	slba;
1631 	__u32	dsmgmt;
1632 	__u32	reftag;
1633 	__u16	apptag;
1634 	__u16	appmask;
1635 } __attribute__((__packed__));
1636 
1637 #define NVME_IOCTL_SUBMIT_IO32	_IOW('N', 0x42, struct nvme_user_io32)
1638 
1639 static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
1640 		unsigned int cmd, unsigned long arg)
1641 {
1642 	/*
1643 	 * Corresponds to the difference of NVME_IOCTL_SUBMIT_IO
1644 	 * between 32 bit programs and 64 bit kernel.
1645 	 * The cause is that the results of sizeof(struct nvme_user_io),
1646 	 * which is used to define NVME_IOCTL_SUBMIT_IO,
1647 	 * are not same between 32 bit compiler and 64 bit compiler.
1648 	 * NVME_IOCTL_SUBMIT_IO32 is for 64 bit kernel handling
1649 	 * NVME_IOCTL_SUBMIT_IO issued from 32 bit programs.
1650 	 * Other IOCTL numbers are same between 32 bit and 64 bit.
1651 	 * So there is nothing to do regarding to other IOCTL numbers.
1652 	 */
1653 	if (cmd == NVME_IOCTL_SUBMIT_IO32)
1654 		return nvme_ioctl(bdev, mode, NVME_IOCTL_SUBMIT_IO, arg);
1655 
1656 	return nvme_ioctl(bdev, mode, cmd, arg);
1657 }
1658 #else
1659 #define nvme_compat_ioctl	NULL
1660 #endif /* CONFIG_COMPAT */
1661 
1662 static int nvme_open(struct block_device *bdev, fmode_t mode)
1663 {
1664 	struct nvme_ns *ns = bdev->bd_disk->private_data;
1665 
1666 #ifdef CONFIG_NVME_MULTIPATH
1667 	/* should never be called due to GENHD_FL_HIDDEN */
1668 	if (WARN_ON_ONCE(ns->head->disk))
1669 		goto fail;
1670 #endif
1671 	if (!kref_get_unless_zero(&ns->kref))
1672 		goto fail;
1673 	if (!try_module_get(ns->ctrl->ops->module))
1674 		goto fail_put_ns;
1675 
1676 	return 0;
1677 
1678 fail_put_ns:
1679 	nvme_put_ns(ns);
1680 fail:
1681 	return -ENXIO;
1682 }
1683 
1684 static void nvme_release(struct gendisk *disk, fmode_t mode)
1685 {
1686 	struct nvme_ns *ns = disk->private_data;
1687 
1688 	module_put(ns->ctrl->ops->module);
1689 	nvme_put_ns(ns);
1690 }
1691 
1692 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1693 {
1694 	/* some standard values */
1695 	geo->heads = 1 << 6;
1696 	geo->sectors = 1 << 5;
1697 	geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
1698 	return 0;
1699 }
1700 
1701 #ifdef CONFIG_BLK_DEV_INTEGRITY
1702 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
1703 				u32 max_integrity_segments)
1704 {
1705 	struct blk_integrity integrity;
1706 
1707 	memset(&integrity, 0, sizeof(integrity));
1708 	switch (pi_type) {
1709 	case NVME_NS_DPS_PI_TYPE3:
1710 		integrity.profile = &t10_pi_type3_crc;
1711 		integrity.tag_size = sizeof(u16) + sizeof(u32);
1712 		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1713 		break;
1714 	case NVME_NS_DPS_PI_TYPE1:
1715 	case NVME_NS_DPS_PI_TYPE2:
1716 		integrity.profile = &t10_pi_type1_crc;
1717 		integrity.tag_size = sizeof(u16);
1718 		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1719 		break;
1720 	default:
1721 		integrity.profile = NULL;
1722 		break;
1723 	}
1724 	integrity.tuple_size = ms;
1725 	blk_integrity_register(disk, &integrity);
1726 	blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
1727 }
1728 #else
1729 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
1730 				u32 max_integrity_segments)
1731 {
1732 }
1733 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1734 
1735 static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1736 {
1737 	struct nvme_ctrl *ctrl = ns->ctrl;
1738 	struct request_queue *queue = disk->queue;
1739 	u32 size = queue_logical_block_size(queue);
1740 
1741 	if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) {
1742 		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
1743 		return;
1744 	}
1745 
1746 	if (ctrl->nr_streams && ns->sws && ns->sgs)
1747 		size *= ns->sws * ns->sgs;
1748 
1749 	BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
1750 			NVME_DSM_MAX_RANGES);
1751 
1752 	queue->limits.discard_alignment = 0;
1753 	queue->limits.discard_granularity = size;
1754 
1755 	/* If discard is already enabled, don't reset queue limits */
1756 	if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
1757 		return;
1758 
1759 	blk_queue_max_discard_sectors(queue, UINT_MAX);
1760 	blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
1761 
1762 	if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1763 		blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
1764 }
1765 
1766 static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
1767 {
1768 	u64 max_blocks;
1769 
1770 	if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
1771 	    (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
1772 		return;
1773 	/*
1774 	 * Even though NVMe spec explicitly states that MDTS is not
1775 	 * applicable to the write-zeroes:- "The restriction does not apply to
1776 	 * commands that do not transfer data between the host and the
1777 	 * controller (e.g., Write Uncorrectable ro Write Zeroes command).".
1778 	 * In order to be more cautious use controller's max_hw_sectors value
1779 	 * to configure the maximum sectors for the write-zeroes which is
1780 	 * configured based on the controller's MDTS field in the
1781 	 * nvme_init_identify() if available.
1782 	 */
1783 	if (ns->ctrl->max_hw_sectors == UINT_MAX)
1784 		max_blocks = (u64)USHRT_MAX + 1;
1785 	else
1786 		max_blocks = ns->ctrl->max_hw_sectors + 1;
1787 
1788 	blk_queue_max_write_zeroes_sectors(disk->queue,
1789 					   nvme_lba_to_sect(ns, max_blocks));
1790 }
1791 
1792 static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
1793 		struct nvme_id_ns *id, struct nvme_ns_ids *ids)
1794 {
1795 	memset(ids, 0, sizeof(*ids));
1796 
1797 	if (ctrl->vs >= NVME_VS(1, 1, 0))
1798 		memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
1799 	if (ctrl->vs >= NVME_VS(1, 2, 0))
1800 		memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
1801 	if (ctrl->vs >= NVME_VS(1, 3, 0))
1802 		return nvme_identify_ns_descs(ctrl, nsid, ids);
1803 	return 0;
1804 }
1805 
1806 static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
1807 {
1808 	return !uuid_is_null(&ids->uuid) ||
1809 		memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
1810 		memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
1811 }
1812 
1813 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
1814 {
1815 	return uuid_equal(&a->uuid, &b->uuid) &&
1816 		memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1817 		memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0;
1818 }
1819 
1820 static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1821 				 u32 *phys_bs, u32 *io_opt)
1822 {
1823 	struct streams_directive_params s;
1824 	int ret;
1825 
1826 	if (!ctrl->nr_streams)
1827 		return 0;
1828 
1829 	ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
1830 	if (ret)
1831 		return ret;
1832 
1833 	ns->sws = le32_to_cpu(s.sws);
1834 	ns->sgs = le16_to_cpu(s.sgs);
1835 
1836 	if (ns->sws) {
1837 		*phys_bs = ns->sws * (1 << ns->lba_shift);
1838 		if (ns->sgs)
1839 			*io_opt = *phys_bs * ns->sgs;
1840 	}
1841 
1842 	return 0;
1843 }
1844 
1845 static void nvme_update_disk_info(struct gendisk *disk,
1846 		struct nvme_ns *ns, struct nvme_id_ns *id)
1847 {
1848 	sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
1849 	unsigned short bs = 1 << ns->lba_shift;
1850 	u32 atomic_bs, phys_bs, io_opt = 0;
1851 
1852 	if (ns->lba_shift > PAGE_SHIFT) {
1853 		/* unsupported block size, set capacity to 0 later */
1854 		bs = (1 << 9);
1855 	}
1856 	blk_mq_freeze_queue(disk->queue);
1857 	blk_integrity_unregister(disk);
1858 
1859 	atomic_bs = phys_bs = bs;
1860 	nvme_setup_streams_ns(ns->ctrl, ns, &phys_bs, &io_opt);
1861 	if (id->nabo == 0) {
1862 		/*
1863 		 * Bit 1 indicates whether NAWUPF is defined for this namespace
1864 		 * and whether it should be used instead of AWUPF. If NAWUPF ==
1865 		 * 0 then AWUPF must be used instead.
1866 		 */
1867 		if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
1868 			atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
1869 		else
1870 			atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
1871 	}
1872 
1873 	if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
1874 		/* NPWG = Namespace Preferred Write Granularity */
1875 		phys_bs = bs * (1 + le16_to_cpu(id->npwg));
1876 		/* NOWS = Namespace Optimal Write Size */
1877 		io_opt = bs * (1 + le16_to_cpu(id->nows));
1878 	}
1879 
1880 	blk_queue_logical_block_size(disk->queue, bs);
1881 	/*
1882 	 * Linux filesystems assume writing a single physical block is
1883 	 * an atomic operation. Hence limit the physical block size to the
1884 	 * value of the Atomic Write Unit Power Fail parameter.
1885 	 */
1886 	blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
1887 	blk_queue_io_min(disk->queue, phys_bs);
1888 	blk_queue_io_opt(disk->queue, io_opt);
1889 
1890 	/*
1891 	 * The block layer can't support LBA sizes larger than the page size
1892 	 * yet, so catch this early and don't allow block I/O.
1893 	 */
1894 	if (ns->lba_shift > PAGE_SHIFT)
1895 		capacity = 0;
1896 
1897 	/*
1898 	 * Register a metadata profile for PI, or the plain non-integrity NVMe
1899 	 * metadata masquerading as Type 0 if supported, otherwise reject block
1900 	 * I/O to namespaces with metadata except when the namespace supports
1901 	 * PI, as it can strip/insert in that case.
1902 	 */
1903 	if (ns->ms) {
1904 		if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
1905 		    (ns->features & NVME_NS_METADATA_SUPPORTED))
1906 			nvme_init_integrity(disk, ns->ms, ns->pi_type,
1907 					    ns->ctrl->max_integrity_segments);
1908 		else if (!nvme_ns_has_pi(ns))
1909 			capacity = 0;
1910 	}
1911 
1912 	set_capacity_revalidate_and_notify(disk, capacity, false);
1913 
1914 	nvme_config_discard(disk, ns);
1915 	nvme_config_write_zeroes(disk, ns);
1916 
1917 	if (id->nsattr & NVME_NS_ATTR_RO)
1918 		set_disk_ro(disk, true);
1919 	else
1920 		set_disk_ro(disk, false);
1921 
1922 	blk_mq_unfreeze_queue(disk->queue);
1923 }
1924 
1925 static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1926 {
1927 	struct nvme_ns *ns = disk->private_data;
1928 	struct nvme_ctrl *ctrl = ns->ctrl;
1929 	u32 iob;
1930 
1931 	/*
1932 	 * If identify namespace failed, use default 512 byte block size so
1933 	 * block layer can use before failing read/write for 0 capacity.
1934 	 */
1935 	ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
1936 	if (ns->lba_shift == 0)
1937 		ns->lba_shift = 9;
1938 
1939 	if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
1940 	    is_power_of_2(ctrl->max_hw_sectors))
1941 		iob = ctrl->max_hw_sectors;
1942 	else
1943 		iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
1944 
1945 	ns->features = 0;
1946 	ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
1947 	/* the PI implementation requires metadata equal t10 pi tuple size */
1948 	if (ns->ms == sizeof(struct t10_pi_tuple))
1949 		ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1950 	else
1951 		ns->pi_type = 0;
1952 
1953 	if (ns->ms) {
1954 		/*
1955 		 * For PCIe only the separate metadata pointer is supported,
1956 		 * as the block layer supplies metadata in a separate bio_vec
1957 		 * chain. For Fabrics, only metadata as part of extended data
1958 		 * LBA is supported on the wire per the Fabrics specification,
1959 		 * but the HBA/HCA will do the remapping from the separate
1960 		 * metadata buffers for us.
1961 		 */
1962 		if (id->flbas & NVME_NS_FLBAS_META_EXT) {
1963 			ns->features |= NVME_NS_EXT_LBAS;
1964 			if ((ctrl->ops->flags & NVME_F_FABRICS) &&
1965 			    (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED) &&
1966 			    ctrl->max_integrity_segments)
1967 				ns->features |= NVME_NS_METADATA_SUPPORTED;
1968 		} else {
1969 			if (WARN_ON_ONCE(ctrl->ops->flags & NVME_F_FABRICS))
1970 				return -EINVAL;
1971 			if (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
1972 				ns->features |= NVME_NS_METADATA_SUPPORTED;
1973 		}
1974 	}
1975 
1976 	if (iob)
1977 		blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(iob));
1978 	nvme_update_disk_info(disk, ns, id);
1979 #ifdef CONFIG_NVME_MULTIPATH
1980 	if (ns->head->disk) {
1981 		nvme_update_disk_info(ns->head->disk, ns, id);
1982 		blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
1983 	}
1984 #endif
1985 	return 0;
1986 }
1987 
1988 static int nvme_revalidate_disk(struct gendisk *disk)
1989 {
1990 	struct nvme_ns *ns = disk->private_data;
1991 	struct nvme_ctrl *ctrl = ns->ctrl;
1992 	struct nvme_id_ns *id;
1993 	struct nvme_ns_ids ids;
1994 	int ret = 0;
1995 
1996 	if (test_bit(NVME_NS_DEAD, &ns->flags)) {
1997 		set_capacity(disk, 0);
1998 		return -ENODEV;
1999 	}
2000 
2001 	ret = nvme_identify_ns(ctrl, ns->head->ns_id, &id);
2002 	if (ret)
2003 		goto out;
2004 
2005 	if (id->ncap == 0) {
2006 		ret = -ENODEV;
2007 		goto free_id;
2008 	}
2009 
2010 	ret = nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
2011 	if (ret)
2012 		goto free_id;
2013 
2014 	if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) {
2015 		dev_err(ctrl->device,
2016 			"identifiers changed for nsid %d\n", ns->head->ns_id);
2017 		ret = -ENODEV;
2018 		goto free_id;
2019 	}
2020 
2021 	ret = __nvme_revalidate_disk(disk, id);
2022 free_id:
2023 	kfree(id);
2024 out:
2025 	/*
2026 	 * Only fail the function if we got a fatal error back from the
2027 	 * device, otherwise ignore the error and just move on.
2028 	 */
2029 	if (ret == -ENOMEM || (ret > 0 && !(ret & NVME_SC_DNR)))
2030 		ret = 0;
2031 	else if (ret > 0)
2032 		ret = blk_status_to_errno(nvme_error_status(ret));
2033 	return ret;
2034 }
2035 
2036 static char nvme_pr_type(enum pr_type type)
2037 {
2038 	switch (type) {
2039 	case PR_WRITE_EXCLUSIVE:
2040 		return 1;
2041 	case PR_EXCLUSIVE_ACCESS:
2042 		return 2;
2043 	case PR_WRITE_EXCLUSIVE_REG_ONLY:
2044 		return 3;
2045 	case PR_EXCLUSIVE_ACCESS_REG_ONLY:
2046 		return 4;
2047 	case PR_WRITE_EXCLUSIVE_ALL_REGS:
2048 		return 5;
2049 	case PR_EXCLUSIVE_ACCESS_ALL_REGS:
2050 		return 6;
2051 	default:
2052 		return 0;
2053 	}
2054 };
2055 
2056 static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
2057 				u64 key, u64 sa_key, u8 op)
2058 {
2059 	struct nvme_ns_head *head = NULL;
2060 	struct nvme_ns *ns;
2061 	struct nvme_command c;
2062 	int srcu_idx, ret;
2063 	u8 data[16] = { 0, };
2064 
2065 	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
2066 	if (unlikely(!ns))
2067 		return -EWOULDBLOCK;
2068 
2069 	put_unaligned_le64(key, &data[0]);
2070 	put_unaligned_le64(sa_key, &data[8]);
2071 
2072 	memset(&c, 0, sizeof(c));
2073 	c.common.opcode = op;
2074 	c.common.nsid = cpu_to_le32(ns->head->ns_id);
2075 	c.common.cdw10 = cpu_to_le32(cdw10);
2076 
2077 	ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
2078 	nvme_put_ns_from_disk(head, srcu_idx);
2079 	return ret;
2080 }
2081 
2082 static int nvme_pr_register(struct block_device *bdev, u64 old,
2083 		u64 new, unsigned flags)
2084 {
2085 	u32 cdw10;
2086 
2087 	if (flags & ~PR_FL_IGNORE_KEY)
2088 		return -EOPNOTSUPP;
2089 
2090 	cdw10 = old ? 2 : 0;
2091 	cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
2092 	cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
2093 	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
2094 }
2095 
2096 static int nvme_pr_reserve(struct block_device *bdev, u64 key,
2097 		enum pr_type type, unsigned flags)
2098 {
2099 	u32 cdw10;
2100 
2101 	if (flags & ~PR_FL_IGNORE_KEY)
2102 		return -EOPNOTSUPP;
2103 
2104 	cdw10 = nvme_pr_type(type) << 8;
2105 	cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
2106 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
2107 }
2108 
2109 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
2110 		enum pr_type type, bool abort)
2111 {
2112 	u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
2113 	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
2114 }
2115 
2116 static int nvme_pr_clear(struct block_device *bdev, u64 key)
2117 {
2118 	u32 cdw10 = 1 | (key ? 1 << 3 : 0);
2119 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
2120 }
2121 
2122 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
2123 {
2124 	u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
2125 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
2126 }
2127 
2128 static const struct pr_ops nvme_pr_ops = {
2129 	.pr_register	= nvme_pr_register,
2130 	.pr_reserve	= nvme_pr_reserve,
2131 	.pr_release	= nvme_pr_release,
2132 	.pr_preempt	= nvme_pr_preempt,
2133 	.pr_clear	= nvme_pr_clear,
2134 };
2135 
2136 #ifdef CONFIG_BLK_SED_OPAL
2137 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
2138 		bool send)
2139 {
2140 	struct nvme_ctrl *ctrl = data;
2141 	struct nvme_command cmd;
2142 
2143 	memset(&cmd, 0, sizeof(cmd));
2144 	if (send)
2145 		cmd.common.opcode = nvme_admin_security_send;
2146 	else
2147 		cmd.common.opcode = nvme_admin_security_recv;
2148 	cmd.common.nsid = 0;
2149 	cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
2150 	cmd.common.cdw11 = cpu_to_le32(len);
2151 
2152 	return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
2153 				      ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false);
2154 }
2155 EXPORT_SYMBOL_GPL(nvme_sec_submit);
2156 #endif /* CONFIG_BLK_SED_OPAL */
2157 
2158 static const struct block_device_operations nvme_fops = {
2159 	.owner		= THIS_MODULE,
2160 	.ioctl		= nvme_ioctl,
2161 	.compat_ioctl	= nvme_compat_ioctl,
2162 	.open		= nvme_open,
2163 	.release	= nvme_release,
2164 	.getgeo		= nvme_getgeo,
2165 	.revalidate_disk= nvme_revalidate_disk,
2166 	.pr_ops		= &nvme_pr_ops,
2167 };
2168 
2169 #ifdef CONFIG_NVME_MULTIPATH
2170 static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
2171 {
2172 	struct nvme_ns_head *head = bdev->bd_disk->private_data;
2173 
2174 	if (!kref_get_unless_zero(&head->ref))
2175 		return -ENXIO;
2176 	return 0;
2177 }
2178 
2179 static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
2180 {
2181 	nvme_put_ns_head(disk->private_data);
2182 }
2183 
2184 const struct block_device_operations nvme_ns_head_ops = {
2185 	.owner		= THIS_MODULE,
2186 	.open		= nvme_ns_head_open,
2187 	.release	= nvme_ns_head_release,
2188 	.ioctl		= nvme_ioctl,
2189 	.compat_ioctl	= nvme_compat_ioctl,
2190 	.getgeo		= nvme_getgeo,
2191 	.pr_ops		= &nvme_pr_ops,
2192 };
2193 #endif /* CONFIG_NVME_MULTIPATH */
2194 
2195 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
2196 {
2197 	unsigned long timeout =
2198 		((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
2199 	u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
2200 	int ret;
2201 
2202 	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2203 		if (csts == ~0)
2204 			return -ENODEV;
2205 		if ((csts & NVME_CSTS_RDY) == bit)
2206 			break;
2207 
2208 		usleep_range(1000, 2000);
2209 		if (fatal_signal_pending(current))
2210 			return -EINTR;
2211 		if (time_after(jiffies, timeout)) {
2212 			dev_err(ctrl->device,
2213 				"Device not ready; aborting %s, CSTS=0x%x\n",
2214 				enabled ? "initialisation" : "reset", csts);
2215 			return -ENODEV;
2216 		}
2217 	}
2218 
2219 	return ret;
2220 }
2221 
2222 /*
2223  * If the device has been passed off to us in an enabled state, just clear
2224  * the enabled bit.  The spec says we should set the 'shutdown notification
2225  * bits', but doing so may cause the device to complete commands to the
2226  * admin queue ... and we don't know what memory that might be pointing at!
2227  */
2228 int nvme_disable_ctrl(struct nvme_ctrl *ctrl)
2229 {
2230 	int ret;
2231 
2232 	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2233 	ctrl->ctrl_config &= ~NVME_CC_ENABLE;
2234 
2235 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2236 	if (ret)
2237 		return ret;
2238 
2239 	if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2240 		msleep(NVME_QUIRK_DELAY_AMOUNT);
2241 
2242 	return nvme_wait_ready(ctrl, ctrl->cap, false);
2243 }
2244 EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
2245 
2246 int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
2247 {
2248 	/*
2249 	 * Default to a 4K page size, with the intention to update this
2250 	 * path in the future to accomodate architectures with differing
2251 	 * kernel and IO page sizes.
2252 	 */
2253 	unsigned dev_page_min, page_shift = 12;
2254 	int ret;
2255 
2256 	ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2257 	if (ret) {
2258 		dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
2259 		return ret;
2260 	}
2261 	dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2262 
2263 	if (page_shift < dev_page_min) {
2264 		dev_err(ctrl->device,
2265 			"Minimum device page size %u too large for host (%u)\n",
2266 			1 << dev_page_min, 1 << page_shift);
2267 		return -ENODEV;
2268 	}
2269 
2270 	ctrl->page_size = 1 << page_shift;
2271 
2272 	ctrl->ctrl_config = NVME_CC_CSS_NVM;
2273 	ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
2274 	ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2275 	ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
2276 	ctrl->ctrl_config |= NVME_CC_ENABLE;
2277 
2278 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2279 	if (ret)
2280 		return ret;
2281 	return nvme_wait_ready(ctrl, ctrl->cap, true);
2282 }
2283 EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
2284 
2285 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
2286 {
2287 	unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
2288 	u32 csts;
2289 	int ret;
2290 
2291 	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2292 	ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
2293 
2294 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2295 	if (ret)
2296 		return ret;
2297 
2298 	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2299 		if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
2300 			break;
2301 
2302 		msleep(100);
2303 		if (fatal_signal_pending(current))
2304 			return -EINTR;
2305 		if (time_after(jiffies, timeout)) {
2306 			dev_err(ctrl->device,
2307 				"Device shutdown incomplete; abort shutdown\n");
2308 			return -ENODEV;
2309 		}
2310 	}
2311 
2312 	return ret;
2313 }
2314 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
2315 
2316 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
2317 		struct request_queue *q)
2318 {
2319 	bool vwc = false;
2320 
2321 	if (ctrl->max_hw_sectors) {
2322 		u32 max_segments =
2323 			(ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
2324 
2325 		max_segments = min_not_zero(max_segments, ctrl->max_segments);
2326 		blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
2327 		blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
2328 	}
2329 	blk_queue_virt_boundary(q, ctrl->page_size - 1);
2330 	blk_queue_dma_alignment(q, 7);
2331 	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
2332 		vwc = true;
2333 	blk_queue_write_cache(q, vwc, vwc);
2334 }
2335 
2336 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
2337 {
2338 	__le64 ts;
2339 	int ret;
2340 
2341 	if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
2342 		return 0;
2343 
2344 	ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
2345 	ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
2346 			NULL);
2347 	if (ret)
2348 		dev_warn_once(ctrl->device,
2349 			"could not set timestamp (%d)\n", ret);
2350 	return ret;
2351 }
2352 
2353 static int nvme_configure_acre(struct nvme_ctrl *ctrl)
2354 {
2355 	struct nvme_feat_host_behavior *host;
2356 	int ret;
2357 
2358 	/* Don't bother enabling the feature if retry delay is not reported */
2359 	if (!ctrl->crdt[0])
2360 		return 0;
2361 
2362 	host = kzalloc(sizeof(*host), GFP_KERNEL);
2363 	if (!host)
2364 		return 0;
2365 
2366 	host->acre = NVME_ENABLE_ACRE;
2367 	ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
2368 				host, sizeof(*host), NULL);
2369 	kfree(host);
2370 	return ret;
2371 }
2372 
2373 static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2374 {
2375 	/*
2376 	 * APST (Autonomous Power State Transition) lets us program a
2377 	 * table of power state transitions that the controller will
2378 	 * perform automatically.  We configure it with a simple
2379 	 * heuristic: we are willing to spend at most 2% of the time
2380 	 * transitioning between power states.  Therefore, when running
2381 	 * in any given state, we will enter the next lower-power
2382 	 * non-operational state after waiting 50 * (enlat + exlat)
2383 	 * microseconds, as long as that state's exit latency is under
2384 	 * the requested maximum latency.
2385 	 *
2386 	 * We will not autonomously enter any non-operational state for
2387 	 * which the total latency exceeds ps_max_latency_us.  Users
2388 	 * can set ps_max_latency_us to zero to turn off APST.
2389 	 */
2390 
2391 	unsigned apste;
2392 	struct nvme_feat_auto_pst *table;
2393 	u64 max_lat_us = 0;
2394 	int max_ps = -1;
2395 	int ret;
2396 
2397 	/*
2398 	 * If APST isn't supported or if we haven't been initialized yet,
2399 	 * then don't do anything.
2400 	 */
2401 	if (!ctrl->apsta)
2402 		return 0;
2403 
2404 	if (ctrl->npss > 31) {
2405 		dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2406 		return 0;
2407 	}
2408 
2409 	table = kzalloc(sizeof(*table), GFP_KERNEL);
2410 	if (!table)
2411 		return 0;
2412 
2413 	if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2414 		/* Turn off APST. */
2415 		apste = 0;
2416 		dev_dbg(ctrl->device, "APST disabled\n");
2417 	} else {
2418 		__le64 target = cpu_to_le64(0);
2419 		int state;
2420 
2421 		/*
2422 		 * Walk through all states from lowest- to highest-power.
2423 		 * According to the spec, lower-numbered states use more
2424 		 * power.  NPSS, despite the name, is the index of the
2425 		 * lowest-power state, not the number of states.
2426 		 */
2427 		for (state = (int)ctrl->npss; state >= 0; state--) {
2428 			u64 total_latency_us, exit_latency_us, transition_ms;
2429 
2430 			if (target)
2431 				table->entries[state] = target;
2432 
2433 			/*
2434 			 * Don't allow transitions to the deepest state
2435 			 * if it's quirked off.
2436 			 */
2437 			if (state == ctrl->npss &&
2438 			    (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2439 				continue;
2440 
2441 			/*
2442 			 * Is this state a useful non-operational state for
2443 			 * higher-power states to autonomously transition to?
2444 			 */
2445 			if (!(ctrl->psd[state].flags &
2446 			      NVME_PS_FLAGS_NON_OP_STATE))
2447 				continue;
2448 
2449 			exit_latency_us =
2450 				(u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2451 			if (exit_latency_us > ctrl->ps_max_latency_us)
2452 				continue;
2453 
2454 			total_latency_us =
2455 				exit_latency_us +
2456 				le32_to_cpu(ctrl->psd[state].entry_lat);
2457 
2458 			/*
2459 			 * This state is good.  Use it as the APST idle
2460 			 * target for higher power states.
2461 			 */
2462 			transition_ms = total_latency_us + 19;
2463 			do_div(transition_ms, 20);
2464 			if (transition_ms > (1 << 24) - 1)
2465 				transition_ms = (1 << 24) - 1;
2466 
2467 			target = cpu_to_le64((state << 3) |
2468 					     (transition_ms << 8));
2469 
2470 			if (max_ps == -1)
2471 				max_ps = state;
2472 
2473 			if (total_latency_us > max_lat_us)
2474 				max_lat_us = total_latency_us;
2475 		}
2476 
2477 		apste = 1;
2478 
2479 		if (max_ps == -1) {
2480 			dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2481 		} else {
2482 			dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2483 				max_ps, max_lat_us, (int)sizeof(*table), table);
2484 		}
2485 	}
2486 
2487 	ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
2488 				table, sizeof(*table), NULL);
2489 	if (ret)
2490 		dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
2491 
2492 	kfree(table);
2493 	return ret;
2494 }
2495 
2496 static void nvme_set_latency_tolerance(struct device *dev, s32 val)
2497 {
2498 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2499 	u64 latency;
2500 
2501 	switch (val) {
2502 	case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
2503 	case PM_QOS_LATENCY_ANY:
2504 		latency = U64_MAX;
2505 		break;
2506 
2507 	default:
2508 		latency = val;
2509 	}
2510 
2511 	if (ctrl->ps_max_latency_us != latency) {
2512 		ctrl->ps_max_latency_us = latency;
2513 		nvme_configure_apst(ctrl);
2514 	}
2515 }
2516 
2517 struct nvme_core_quirk_entry {
2518 	/*
2519 	 * NVMe model and firmware strings are padded with spaces.  For
2520 	 * simplicity, strings in the quirk table are padded with NULLs
2521 	 * instead.
2522 	 */
2523 	u16 vid;
2524 	const char *mn;
2525 	const char *fr;
2526 	unsigned long quirks;
2527 };
2528 
2529 static const struct nvme_core_quirk_entry core_quirks[] = {
2530 	{
2531 		/*
2532 		 * This Toshiba device seems to die using any APST states.  See:
2533 		 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2534 		 */
2535 		.vid = 0x1179,
2536 		.mn = "THNSF5256GPUK TOSHIBA",
2537 		.quirks = NVME_QUIRK_NO_APST,
2538 	},
2539 	{
2540 		/*
2541 		 * This LiteON CL1-3D*-Q11 firmware version has a race
2542 		 * condition associated with actions related to suspend to idle
2543 		 * LiteON has resolved the problem in future firmware
2544 		 */
2545 		.vid = 0x14a4,
2546 		.fr = "22301111",
2547 		.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2548 	}
2549 };
2550 
2551 /* match is null-terminated but idstr is space-padded. */
2552 static bool string_matches(const char *idstr, const char *match, size_t len)
2553 {
2554 	size_t matchlen;
2555 
2556 	if (!match)
2557 		return true;
2558 
2559 	matchlen = strlen(match);
2560 	WARN_ON_ONCE(matchlen > len);
2561 
2562 	if (memcmp(idstr, match, matchlen))
2563 		return false;
2564 
2565 	for (; matchlen < len; matchlen++)
2566 		if (idstr[matchlen] != ' ')
2567 			return false;
2568 
2569 	return true;
2570 }
2571 
2572 static bool quirk_matches(const struct nvme_id_ctrl *id,
2573 			  const struct nvme_core_quirk_entry *q)
2574 {
2575 	return q->vid == le16_to_cpu(id->vid) &&
2576 		string_matches(id->mn, q->mn, sizeof(id->mn)) &&
2577 		string_matches(id->fr, q->fr, sizeof(id->fr));
2578 }
2579 
2580 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
2581 		struct nvme_id_ctrl *id)
2582 {
2583 	size_t nqnlen;
2584 	int off;
2585 
2586 	if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
2587 		nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
2588 		if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
2589 			strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
2590 			return;
2591 		}
2592 
2593 		if (ctrl->vs >= NVME_VS(1, 2, 1))
2594 			dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2595 	}
2596 
2597 	/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2598 	off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2599 			"nqn.2014.08.org.nvmexpress:%04x%04x",
2600 			le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2601 	memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2602 	off += sizeof(id->sn);
2603 	memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2604 	off += sizeof(id->mn);
2605 	memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
2606 }
2607 
2608 static void nvme_release_subsystem(struct device *dev)
2609 {
2610 	struct nvme_subsystem *subsys =
2611 		container_of(dev, struct nvme_subsystem, dev);
2612 
2613 	if (subsys->instance >= 0)
2614 		ida_simple_remove(&nvme_instance_ida, subsys->instance);
2615 	kfree(subsys);
2616 }
2617 
2618 static void nvme_destroy_subsystem(struct kref *ref)
2619 {
2620 	struct nvme_subsystem *subsys =
2621 			container_of(ref, struct nvme_subsystem, ref);
2622 
2623 	mutex_lock(&nvme_subsystems_lock);
2624 	list_del(&subsys->entry);
2625 	mutex_unlock(&nvme_subsystems_lock);
2626 
2627 	ida_destroy(&subsys->ns_ida);
2628 	device_del(&subsys->dev);
2629 	put_device(&subsys->dev);
2630 }
2631 
2632 static void nvme_put_subsystem(struct nvme_subsystem *subsys)
2633 {
2634 	kref_put(&subsys->ref, nvme_destroy_subsystem);
2635 }
2636 
2637 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
2638 {
2639 	struct nvme_subsystem *subsys;
2640 
2641 	lockdep_assert_held(&nvme_subsystems_lock);
2642 
2643 	/*
2644 	 * Fail matches for discovery subsystems. This results
2645 	 * in each discovery controller bound to a unique subsystem.
2646 	 * This avoids issues with validating controller values
2647 	 * that can only be true when there is a single unique subsystem.
2648 	 * There may be multiple and completely independent entities
2649 	 * that provide discovery controllers.
2650 	 */
2651 	if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
2652 		return NULL;
2653 
2654 	list_for_each_entry(subsys, &nvme_subsystems, entry) {
2655 		if (strcmp(subsys->subnqn, subsysnqn))
2656 			continue;
2657 		if (!kref_get_unless_zero(&subsys->ref))
2658 			continue;
2659 		return subsys;
2660 	}
2661 
2662 	return NULL;
2663 }
2664 
2665 #define SUBSYS_ATTR_RO(_name, _mode, _show)			\
2666 	struct device_attribute subsys_attr_##_name = \
2667 		__ATTR(_name, _mode, _show, NULL)
2668 
2669 static ssize_t nvme_subsys_show_nqn(struct device *dev,
2670 				    struct device_attribute *attr,
2671 				    char *buf)
2672 {
2673 	struct nvme_subsystem *subsys =
2674 		container_of(dev, struct nvme_subsystem, dev);
2675 
2676 	return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
2677 }
2678 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
2679 
2680 #define nvme_subsys_show_str_function(field)				\
2681 static ssize_t subsys_##field##_show(struct device *dev,		\
2682 			    struct device_attribute *attr, char *buf)	\
2683 {									\
2684 	struct nvme_subsystem *subsys =					\
2685 		container_of(dev, struct nvme_subsystem, dev);		\
2686 	return sprintf(buf, "%.*s\n",					\
2687 		       (int)sizeof(subsys->field), subsys->field);	\
2688 }									\
2689 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
2690 
2691 nvme_subsys_show_str_function(model);
2692 nvme_subsys_show_str_function(serial);
2693 nvme_subsys_show_str_function(firmware_rev);
2694 
2695 static struct attribute *nvme_subsys_attrs[] = {
2696 	&subsys_attr_model.attr,
2697 	&subsys_attr_serial.attr,
2698 	&subsys_attr_firmware_rev.attr,
2699 	&subsys_attr_subsysnqn.attr,
2700 #ifdef CONFIG_NVME_MULTIPATH
2701 	&subsys_attr_iopolicy.attr,
2702 #endif
2703 	NULL,
2704 };
2705 
2706 static struct attribute_group nvme_subsys_attrs_group = {
2707 	.attrs = nvme_subsys_attrs,
2708 };
2709 
2710 static const struct attribute_group *nvme_subsys_attrs_groups[] = {
2711 	&nvme_subsys_attrs_group,
2712 	NULL,
2713 };
2714 
2715 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
2716 		struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2717 {
2718 	struct nvme_ctrl *tmp;
2719 
2720 	lockdep_assert_held(&nvme_subsystems_lock);
2721 
2722 	list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
2723 		if (nvme_state_terminal(tmp))
2724 			continue;
2725 
2726 		if (tmp->cntlid == ctrl->cntlid) {
2727 			dev_err(ctrl->device,
2728 				"Duplicate cntlid %u with %s, rejecting\n",
2729 				ctrl->cntlid, dev_name(tmp->device));
2730 			return false;
2731 		}
2732 
2733 		if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
2734 		    (ctrl->opts && ctrl->opts->discovery_nqn))
2735 			continue;
2736 
2737 		dev_err(ctrl->device,
2738 			"Subsystem does not support multiple controllers\n");
2739 		return false;
2740 	}
2741 
2742 	return true;
2743 }
2744 
2745 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2746 {
2747 	struct nvme_subsystem *subsys, *found;
2748 	int ret;
2749 
2750 	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
2751 	if (!subsys)
2752 		return -ENOMEM;
2753 
2754 	subsys->instance = -1;
2755 	mutex_init(&subsys->lock);
2756 	kref_init(&subsys->ref);
2757 	INIT_LIST_HEAD(&subsys->ctrls);
2758 	INIT_LIST_HEAD(&subsys->nsheads);
2759 	nvme_init_subnqn(subsys, ctrl, id);
2760 	memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
2761 	memcpy(subsys->model, id->mn, sizeof(subsys->model));
2762 	memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
2763 	subsys->vendor_id = le16_to_cpu(id->vid);
2764 	subsys->cmic = id->cmic;
2765 	subsys->awupf = le16_to_cpu(id->awupf);
2766 #ifdef CONFIG_NVME_MULTIPATH
2767 	subsys->iopolicy = NVME_IOPOLICY_NUMA;
2768 #endif
2769 
2770 	subsys->dev.class = nvme_subsys_class;
2771 	subsys->dev.release = nvme_release_subsystem;
2772 	subsys->dev.groups = nvme_subsys_attrs_groups;
2773 	dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
2774 	device_initialize(&subsys->dev);
2775 
2776 	mutex_lock(&nvme_subsystems_lock);
2777 	found = __nvme_find_get_subsystem(subsys->subnqn);
2778 	if (found) {
2779 		put_device(&subsys->dev);
2780 		subsys = found;
2781 
2782 		if (!nvme_validate_cntlid(subsys, ctrl, id)) {
2783 			ret = -EINVAL;
2784 			goto out_put_subsystem;
2785 		}
2786 	} else {
2787 		ret = device_add(&subsys->dev);
2788 		if (ret) {
2789 			dev_err(ctrl->device,
2790 				"failed to register subsystem device.\n");
2791 			put_device(&subsys->dev);
2792 			goto out_unlock;
2793 		}
2794 		ida_init(&subsys->ns_ida);
2795 		list_add_tail(&subsys->entry, &nvme_subsystems);
2796 	}
2797 
2798 	ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
2799 				dev_name(ctrl->device));
2800 	if (ret) {
2801 		dev_err(ctrl->device,
2802 			"failed to create sysfs link from subsystem.\n");
2803 		goto out_put_subsystem;
2804 	}
2805 
2806 	if (!found)
2807 		subsys->instance = ctrl->instance;
2808 	ctrl->subsys = subsys;
2809 	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2810 	mutex_unlock(&nvme_subsystems_lock);
2811 	return 0;
2812 
2813 out_put_subsystem:
2814 	nvme_put_subsystem(subsys);
2815 out_unlock:
2816 	mutex_unlock(&nvme_subsystems_lock);
2817 	return ret;
2818 }
2819 
2820 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
2821 		void *log, size_t size, u64 offset)
2822 {
2823 	struct nvme_command c = { };
2824 	u32 dwlen = nvme_bytes_to_numd(size);
2825 
2826 	c.get_log_page.opcode = nvme_admin_get_log_page;
2827 	c.get_log_page.nsid = cpu_to_le32(nsid);
2828 	c.get_log_page.lid = log_page;
2829 	c.get_log_page.lsp = lsp;
2830 	c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
2831 	c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
2832 	c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
2833 	c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
2834 
2835 	return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
2836 }
2837 
2838 static int nvme_get_effects_log(struct nvme_ctrl *ctrl)
2839 {
2840 	int ret;
2841 
2842 	if (!ctrl->effects)
2843 		ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
2844 
2845 	if (!ctrl->effects)
2846 		return 0;
2847 
2848 	ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0,
2849 			ctrl->effects, sizeof(*ctrl->effects), 0);
2850 	if (ret) {
2851 		kfree(ctrl->effects);
2852 		ctrl->effects = NULL;
2853 	}
2854 	return ret;
2855 }
2856 
2857 /*
2858  * Initialize the cached copies of the Identify data and various controller
2859  * register in our nvme_ctrl structure.  This should be called as soon as
2860  * the admin queue is fully up and running.
2861  */
2862 int nvme_init_identify(struct nvme_ctrl *ctrl)
2863 {
2864 	struct nvme_id_ctrl *id;
2865 	int ret, page_shift;
2866 	u32 max_hw_sectors;
2867 	bool prev_apst_enabled;
2868 
2869 	ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
2870 	if (ret) {
2871 		dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
2872 		return ret;
2873 	}
2874 	page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2875 	ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
2876 
2877 	if (ctrl->vs >= NVME_VS(1, 1, 0))
2878 		ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
2879 
2880 	ret = nvme_identify_ctrl(ctrl, &id);
2881 	if (ret) {
2882 		dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
2883 		return -EIO;
2884 	}
2885 
2886 	if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
2887 		ret = nvme_get_effects_log(ctrl);
2888 		if (ret < 0)
2889 			goto out_free;
2890 	}
2891 
2892 	if (!(ctrl->ops->flags & NVME_F_FABRICS))
2893 		ctrl->cntlid = le16_to_cpu(id->cntlid);
2894 
2895 	if (!ctrl->identified) {
2896 		int i;
2897 
2898 		ret = nvme_init_subsystem(ctrl, id);
2899 		if (ret)
2900 			goto out_free;
2901 
2902 		/*
2903 		 * Check for quirks.  Quirk can depend on firmware version,
2904 		 * so, in principle, the set of quirks present can change
2905 		 * across a reset.  As a possible future enhancement, we
2906 		 * could re-scan for quirks every time we reinitialize
2907 		 * the device, but we'd have to make sure that the driver
2908 		 * behaves intelligently if the quirks change.
2909 		 */
2910 		for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
2911 			if (quirk_matches(id, &core_quirks[i]))
2912 				ctrl->quirks |= core_quirks[i].quirks;
2913 		}
2914 	}
2915 
2916 	if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
2917 		dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
2918 		ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
2919 	}
2920 
2921 	ctrl->crdt[0] = le16_to_cpu(id->crdt1);
2922 	ctrl->crdt[1] = le16_to_cpu(id->crdt2);
2923 	ctrl->crdt[2] = le16_to_cpu(id->crdt3);
2924 
2925 	ctrl->oacs = le16_to_cpu(id->oacs);
2926 	ctrl->oncs = le16_to_cpu(id->oncs);
2927 	ctrl->mtfa = le16_to_cpu(id->mtfa);
2928 	ctrl->oaes = le32_to_cpu(id->oaes);
2929 	ctrl->wctemp = le16_to_cpu(id->wctemp);
2930 	ctrl->cctemp = le16_to_cpu(id->cctemp);
2931 
2932 	atomic_set(&ctrl->abort_limit, id->acl + 1);
2933 	ctrl->vwc = id->vwc;
2934 	if (id->mdts)
2935 		max_hw_sectors = 1 << (id->mdts + page_shift - 9);
2936 	else
2937 		max_hw_sectors = UINT_MAX;
2938 	ctrl->max_hw_sectors =
2939 		min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
2940 
2941 	nvme_set_queue_limits(ctrl, ctrl->admin_q);
2942 	ctrl->sgls = le32_to_cpu(id->sgls);
2943 	ctrl->kas = le16_to_cpu(id->kas);
2944 	ctrl->max_namespaces = le32_to_cpu(id->mnan);
2945 	ctrl->ctratt = le32_to_cpu(id->ctratt);
2946 
2947 	if (id->rtd3e) {
2948 		/* us -> s */
2949 		u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000;
2950 
2951 		ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
2952 						 shutdown_timeout, 60);
2953 
2954 		if (ctrl->shutdown_timeout != shutdown_timeout)
2955 			dev_info(ctrl->device,
2956 				 "Shutdown timeout set to %u seconds\n",
2957 				 ctrl->shutdown_timeout);
2958 	} else
2959 		ctrl->shutdown_timeout = shutdown_timeout;
2960 
2961 	ctrl->npss = id->npss;
2962 	ctrl->apsta = id->apsta;
2963 	prev_apst_enabled = ctrl->apst_enabled;
2964 	if (ctrl->quirks & NVME_QUIRK_NO_APST) {
2965 		if (force_apst && id->apsta) {
2966 			dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
2967 			ctrl->apst_enabled = true;
2968 		} else {
2969 			ctrl->apst_enabled = false;
2970 		}
2971 	} else {
2972 		ctrl->apst_enabled = id->apsta;
2973 	}
2974 	memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
2975 
2976 	if (ctrl->ops->flags & NVME_F_FABRICS) {
2977 		ctrl->icdoff = le16_to_cpu(id->icdoff);
2978 		ctrl->ioccsz = le32_to_cpu(id->ioccsz);
2979 		ctrl->iorcsz = le32_to_cpu(id->iorcsz);
2980 		ctrl->maxcmd = le16_to_cpu(id->maxcmd);
2981 
2982 		/*
2983 		 * In fabrics we need to verify the cntlid matches the
2984 		 * admin connect
2985 		 */
2986 		if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
2987 			dev_err(ctrl->device,
2988 				"Mismatching cntlid: Connect %u vs Identify "
2989 				"%u, rejecting\n",
2990 				ctrl->cntlid, le16_to_cpu(id->cntlid));
2991 			ret = -EINVAL;
2992 			goto out_free;
2993 		}
2994 
2995 		if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
2996 			dev_err(ctrl->device,
2997 				"keep-alive support is mandatory for fabrics\n");
2998 			ret = -EINVAL;
2999 			goto out_free;
3000 		}
3001 	} else {
3002 		ctrl->hmpre = le32_to_cpu(id->hmpre);
3003 		ctrl->hmmin = le32_to_cpu(id->hmmin);
3004 		ctrl->hmminds = le32_to_cpu(id->hmminds);
3005 		ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
3006 	}
3007 
3008 	ret = nvme_mpath_init(ctrl, id);
3009 	kfree(id);
3010 
3011 	if (ret < 0)
3012 		return ret;
3013 
3014 	if (ctrl->apst_enabled && !prev_apst_enabled)
3015 		dev_pm_qos_expose_latency_tolerance(ctrl->device);
3016 	else if (!ctrl->apst_enabled && prev_apst_enabled)
3017 		dev_pm_qos_hide_latency_tolerance(ctrl->device);
3018 
3019 	ret = nvme_configure_apst(ctrl);
3020 	if (ret < 0)
3021 		return ret;
3022 
3023 	ret = nvme_configure_timestamp(ctrl);
3024 	if (ret < 0)
3025 		return ret;
3026 
3027 	ret = nvme_configure_directives(ctrl);
3028 	if (ret < 0)
3029 		return ret;
3030 
3031 	ret = nvme_configure_acre(ctrl);
3032 	if (ret < 0)
3033 		return ret;
3034 
3035 	if (!ctrl->identified)
3036 		nvme_hwmon_init(ctrl);
3037 
3038 	ctrl->identified = true;
3039 
3040 	return 0;
3041 
3042 out_free:
3043 	kfree(id);
3044 	return ret;
3045 }
3046 EXPORT_SYMBOL_GPL(nvme_init_identify);
3047 
3048 static int nvme_dev_open(struct inode *inode, struct file *file)
3049 {
3050 	struct nvme_ctrl *ctrl =
3051 		container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3052 
3053 	switch (ctrl->state) {
3054 	case NVME_CTRL_LIVE:
3055 		break;
3056 	default:
3057 		return -EWOULDBLOCK;
3058 	}
3059 
3060 	file->private_data = ctrl;
3061 	return 0;
3062 }
3063 
3064 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
3065 {
3066 	struct nvme_ns *ns;
3067 	int ret;
3068 
3069 	down_read(&ctrl->namespaces_rwsem);
3070 	if (list_empty(&ctrl->namespaces)) {
3071 		ret = -ENOTTY;
3072 		goto out_unlock;
3073 	}
3074 
3075 	ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
3076 	if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
3077 		dev_warn(ctrl->device,
3078 			"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
3079 		ret = -EINVAL;
3080 		goto out_unlock;
3081 	}
3082 
3083 	dev_warn(ctrl->device,
3084 		"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
3085 	kref_get(&ns->kref);
3086 	up_read(&ctrl->namespaces_rwsem);
3087 
3088 	ret = nvme_user_cmd(ctrl, ns, argp);
3089 	nvme_put_ns(ns);
3090 	return ret;
3091 
3092 out_unlock:
3093 	up_read(&ctrl->namespaces_rwsem);
3094 	return ret;
3095 }
3096 
3097 static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
3098 		unsigned long arg)
3099 {
3100 	struct nvme_ctrl *ctrl = file->private_data;
3101 	void __user *argp = (void __user *)arg;
3102 
3103 	switch (cmd) {
3104 	case NVME_IOCTL_ADMIN_CMD:
3105 		return nvme_user_cmd(ctrl, NULL, argp);
3106 	case NVME_IOCTL_ADMIN64_CMD:
3107 		return nvme_user_cmd64(ctrl, NULL, argp);
3108 	case NVME_IOCTL_IO_CMD:
3109 		return nvme_dev_user_cmd(ctrl, argp);
3110 	case NVME_IOCTL_RESET:
3111 		dev_warn(ctrl->device, "resetting controller\n");
3112 		return nvme_reset_ctrl_sync(ctrl);
3113 	case NVME_IOCTL_SUBSYS_RESET:
3114 		return nvme_reset_subsystem(ctrl);
3115 	case NVME_IOCTL_RESCAN:
3116 		nvme_queue_scan(ctrl);
3117 		return 0;
3118 	default:
3119 		return -ENOTTY;
3120 	}
3121 }
3122 
3123 static const struct file_operations nvme_dev_fops = {
3124 	.owner		= THIS_MODULE,
3125 	.open		= nvme_dev_open,
3126 	.unlocked_ioctl	= nvme_dev_ioctl,
3127 	.compat_ioctl	= compat_ptr_ioctl,
3128 };
3129 
3130 static ssize_t nvme_sysfs_reset(struct device *dev,
3131 				struct device_attribute *attr, const char *buf,
3132 				size_t count)
3133 {
3134 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3135 	int ret;
3136 
3137 	ret = nvme_reset_ctrl_sync(ctrl);
3138 	if (ret < 0)
3139 		return ret;
3140 	return count;
3141 }
3142 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
3143 
3144 static ssize_t nvme_sysfs_rescan(struct device *dev,
3145 				struct device_attribute *attr, const char *buf,
3146 				size_t count)
3147 {
3148 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3149 
3150 	nvme_queue_scan(ctrl);
3151 	return count;
3152 }
3153 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
3154 
3155 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
3156 {
3157 	struct gendisk *disk = dev_to_disk(dev);
3158 
3159 	if (disk->fops == &nvme_fops)
3160 		return nvme_get_ns_from_dev(dev)->head;
3161 	else
3162 		return disk->private_data;
3163 }
3164 
3165 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
3166 		char *buf)
3167 {
3168 	struct nvme_ns_head *head = dev_to_ns_head(dev);
3169 	struct nvme_ns_ids *ids = &head->ids;
3170 	struct nvme_subsystem *subsys = head->subsys;
3171 	int serial_len = sizeof(subsys->serial);
3172 	int model_len = sizeof(subsys->model);
3173 
3174 	if (!uuid_is_null(&ids->uuid))
3175 		return sprintf(buf, "uuid.%pU\n", &ids->uuid);
3176 
3177 	if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3178 		return sprintf(buf, "eui.%16phN\n", ids->nguid);
3179 
3180 	if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3181 		return sprintf(buf, "eui.%8phN\n", ids->eui64);
3182 
3183 	while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
3184 				  subsys->serial[serial_len - 1] == '\0'))
3185 		serial_len--;
3186 	while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
3187 				 subsys->model[model_len - 1] == '\0'))
3188 		model_len--;
3189 
3190 	return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
3191 		serial_len, subsys->serial, model_len, subsys->model,
3192 		head->ns_id);
3193 }
3194 static DEVICE_ATTR_RO(wwid);
3195 
3196 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
3197 		char *buf)
3198 {
3199 	return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
3200 }
3201 static DEVICE_ATTR_RO(nguid);
3202 
3203 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
3204 		char *buf)
3205 {
3206 	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3207 
3208 	/* For backward compatibility expose the NGUID to userspace if
3209 	 * we have no UUID set
3210 	 */
3211 	if (uuid_is_null(&ids->uuid)) {
3212 		printk_ratelimited(KERN_WARNING
3213 				   "No UUID available providing old NGUID\n");
3214 		return sprintf(buf, "%pU\n", ids->nguid);
3215 	}
3216 	return sprintf(buf, "%pU\n", &ids->uuid);
3217 }
3218 static DEVICE_ATTR_RO(uuid);
3219 
3220 static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
3221 		char *buf)
3222 {
3223 	return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
3224 }
3225 static DEVICE_ATTR_RO(eui);
3226 
3227 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
3228 		char *buf)
3229 {
3230 	return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
3231 }
3232 static DEVICE_ATTR_RO(nsid);
3233 
3234 static struct attribute *nvme_ns_id_attrs[] = {
3235 	&dev_attr_wwid.attr,
3236 	&dev_attr_uuid.attr,
3237 	&dev_attr_nguid.attr,
3238 	&dev_attr_eui.attr,
3239 	&dev_attr_nsid.attr,
3240 #ifdef CONFIG_NVME_MULTIPATH
3241 	&dev_attr_ana_grpid.attr,
3242 	&dev_attr_ana_state.attr,
3243 #endif
3244 	NULL,
3245 };
3246 
3247 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
3248 		struct attribute *a, int n)
3249 {
3250 	struct device *dev = container_of(kobj, struct device, kobj);
3251 	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3252 
3253 	if (a == &dev_attr_uuid.attr) {
3254 		if (uuid_is_null(&ids->uuid) &&
3255 		    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3256 			return 0;
3257 	}
3258 	if (a == &dev_attr_nguid.attr) {
3259 		if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3260 			return 0;
3261 	}
3262 	if (a == &dev_attr_eui.attr) {
3263 		if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3264 			return 0;
3265 	}
3266 #ifdef CONFIG_NVME_MULTIPATH
3267 	if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
3268 		if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */
3269 			return 0;
3270 		if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
3271 			return 0;
3272 	}
3273 #endif
3274 	return a->mode;
3275 }
3276 
3277 static const struct attribute_group nvme_ns_id_attr_group = {
3278 	.attrs		= nvme_ns_id_attrs,
3279 	.is_visible	= nvme_ns_id_attrs_are_visible,
3280 };
3281 
3282 const struct attribute_group *nvme_ns_id_attr_groups[] = {
3283 	&nvme_ns_id_attr_group,
3284 #ifdef CONFIG_NVM
3285 	&nvme_nvm_attr_group,
3286 #endif
3287 	NULL,
3288 };
3289 
3290 #define nvme_show_str_function(field)						\
3291 static ssize_t  field##_show(struct device *dev,				\
3292 			    struct device_attribute *attr, char *buf)		\
3293 {										\
3294         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
3295         return sprintf(buf, "%.*s\n",						\
3296 		(int)sizeof(ctrl->subsys->field), ctrl->subsys->field);		\
3297 }										\
3298 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3299 
3300 nvme_show_str_function(model);
3301 nvme_show_str_function(serial);
3302 nvme_show_str_function(firmware_rev);
3303 
3304 #define nvme_show_int_function(field)						\
3305 static ssize_t  field##_show(struct device *dev,				\
3306 			    struct device_attribute *attr, char *buf)		\
3307 {										\
3308         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
3309         return sprintf(buf, "%d\n", ctrl->field);	\
3310 }										\
3311 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3312 
3313 nvme_show_int_function(cntlid);
3314 nvme_show_int_function(numa_node);
3315 nvme_show_int_function(queue_count);
3316 nvme_show_int_function(sqsize);
3317 
3318 static ssize_t nvme_sysfs_delete(struct device *dev,
3319 				struct device_attribute *attr, const char *buf,
3320 				size_t count)
3321 {
3322 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3323 
3324 	/* Can't delete non-created controllers */
3325 	if (!ctrl->created)
3326 		return -EBUSY;
3327 
3328 	if (device_remove_file_self(dev, attr))
3329 		nvme_delete_ctrl_sync(ctrl);
3330 	return count;
3331 }
3332 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
3333 
3334 static ssize_t nvme_sysfs_show_transport(struct device *dev,
3335 					 struct device_attribute *attr,
3336 					 char *buf)
3337 {
3338 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3339 
3340 	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
3341 }
3342 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
3343 
3344 static ssize_t nvme_sysfs_show_state(struct device *dev,
3345 				     struct device_attribute *attr,
3346 				     char *buf)
3347 {
3348 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3349 	static const char *const state_name[] = {
3350 		[NVME_CTRL_NEW]		= "new",
3351 		[NVME_CTRL_LIVE]	= "live",
3352 		[NVME_CTRL_RESETTING]	= "resetting",
3353 		[NVME_CTRL_CONNECTING]	= "connecting",
3354 		[NVME_CTRL_DELETING]	= "deleting",
3355 		[NVME_CTRL_DEAD]	= "dead",
3356 	};
3357 
3358 	if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
3359 	    state_name[ctrl->state])
3360 		return sprintf(buf, "%s\n", state_name[ctrl->state]);
3361 
3362 	return sprintf(buf, "unknown state\n");
3363 }
3364 
3365 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
3366 
3367 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
3368 					 struct device_attribute *attr,
3369 					 char *buf)
3370 {
3371 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3372 
3373 	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
3374 }
3375 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
3376 
3377 static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
3378 					struct device_attribute *attr,
3379 					char *buf)
3380 {
3381 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3382 
3383 	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn);
3384 }
3385 static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
3386 
3387 static ssize_t nvme_sysfs_show_hostid(struct device *dev,
3388 					struct device_attribute *attr,
3389 					char *buf)
3390 {
3391 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3392 
3393 	return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id);
3394 }
3395 static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
3396 
3397 static ssize_t nvme_sysfs_show_address(struct device *dev,
3398 					 struct device_attribute *attr,
3399 					 char *buf)
3400 {
3401 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3402 
3403 	return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
3404 }
3405 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
3406 
3407 static struct attribute *nvme_dev_attrs[] = {
3408 	&dev_attr_reset_controller.attr,
3409 	&dev_attr_rescan_controller.attr,
3410 	&dev_attr_model.attr,
3411 	&dev_attr_serial.attr,
3412 	&dev_attr_firmware_rev.attr,
3413 	&dev_attr_cntlid.attr,
3414 	&dev_attr_delete_controller.attr,
3415 	&dev_attr_transport.attr,
3416 	&dev_attr_subsysnqn.attr,
3417 	&dev_attr_address.attr,
3418 	&dev_attr_state.attr,
3419 	&dev_attr_numa_node.attr,
3420 	&dev_attr_queue_count.attr,
3421 	&dev_attr_sqsize.attr,
3422 	&dev_attr_hostnqn.attr,
3423 	&dev_attr_hostid.attr,
3424 	NULL
3425 };
3426 
3427 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
3428 		struct attribute *a, int n)
3429 {
3430 	struct device *dev = container_of(kobj, struct device, kobj);
3431 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3432 
3433 	if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
3434 		return 0;
3435 	if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
3436 		return 0;
3437 	if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
3438 		return 0;
3439 	if (a == &dev_attr_hostid.attr && !ctrl->opts)
3440 		return 0;
3441 
3442 	return a->mode;
3443 }
3444 
3445 static struct attribute_group nvme_dev_attrs_group = {
3446 	.attrs		= nvme_dev_attrs,
3447 	.is_visible	= nvme_dev_attrs_are_visible,
3448 };
3449 
3450 static const struct attribute_group *nvme_dev_attr_groups[] = {
3451 	&nvme_dev_attrs_group,
3452 	NULL,
3453 };
3454 
3455 static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
3456 		unsigned nsid)
3457 {
3458 	struct nvme_ns_head *h;
3459 
3460 	lockdep_assert_held(&subsys->lock);
3461 
3462 	list_for_each_entry(h, &subsys->nsheads, entry) {
3463 		if (h->ns_id == nsid && kref_get_unless_zero(&h->ref))
3464 			return h;
3465 	}
3466 
3467 	return NULL;
3468 }
3469 
3470 static int __nvme_check_ids(struct nvme_subsystem *subsys,
3471 		struct nvme_ns_head *new)
3472 {
3473 	struct nvme_ns_head *h;
3474 
3475 	lockdep_assert_held(&subsys->lock);
3476 
3477 	list_for_each_entry(h, &subsys->nsheads, entry) {
3478 		if (nvme_ns_ids_valid(&new->ids) &&
3479 		    nvme_ns_ids_equal(&new->ids, &h->ids))
3480 			return -EINVAL;
3481 	}
3482 
3483 	return 0;
3484 }
3485 
3486 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
3487 		unsigned nsid, struct nvme_ns_ids *ids)
3488 {
3489 	struct nvme_ns_head *head;
3490 	size_t size = sizeof(*head);
3491 	int ret = -ENOMEM;
3492 
3493 #ifdef CONFIG_NVME_MULTIPATH
3494 	size += num_possible_nodes() * sizeof(struct nvme_ns *);
3495 #endif
3496 
3497 	head = kzalloc(size, GFP_KERNEL);
3498 	if (!head)
3499 		goto out;
3500 	ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
3501 	if (ret < 0)
3502 		goto out_free_head;
3503 	head->instance = ret;
3504 	INIT_LIST_HEAD(&head->list);
3505 	ret = init_srcu_struct(&head->srcu);
3506 	if (ret)
3507 		goto out_ida_remove;
3508 	head->subsys = ctrl->subsys;
3509 	head->ns_id = nsid;
3510 	head->ids = *ids;
3511 	kref_init(&head->ref);
3512 
3513 	ret = __nvme_check_ids(ctrl->subsys, head);
3514 	if (ret) {
3515 		dev_err(ctrl->device,
3516 			"duplicate IDs for nsid %d\n", nsid);
3517 		goto out_cleanup_srcu;
3518 	}
3519 
3520 	ret = nvme_mpath_alloc_disk(ctrl, head);
3521 	if (ret)
3522 		goto out_cleanup_srcu;
3523 
3524 	list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3525 
3526 	kref_get(&ctrl->subsys->ref);
3527 
3528 	return head;
3529 out_cleanup_srcu:
3530 	cleanup_srcu_struct(&head->srcu);
3531 out_ida_remove:
3532 	ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
3533 out_free_head:
3534 	kfree(head);
3535 out:
3536 	if (ret > 0)
3537 		ret = blk_status_to_errno(nvme_error_status(ret));
3538 	return ERR_PTR(ret);
3539 }
3540 
3541 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
3542 		struct nvme_id_ns *id)
3543 {
3544 	struct nvme_ctrl *ctrl = ns->ctrl;
3545 	bool is_shared = id->nmic & NVME_NS_NMIC_SHARED;
3546 	struct nvme_ns_head *head = NULL;
3547 	struct nvme_ns_ids ids;
3548 	int ret = 0;
3549 
3550 	ret = nvme_report_ns_ids(ctrl, nsid, id, &ids);
3551 	if (ret) {
3552 		if (ret < 0)
3553 			return ret;
3554 		return blk_status_to_errno(nvme_error_status(ret));
3555 	}
3556 
3557 	mutex_lock(&ctrl->subsys->lock);
3558 	head = nvme_find_ns_head(ctrl->subsys, nsid);
3559 	if (!head) {
3560 		head = nvme_alloc_ns_head(ctrl, nsid, &ids);
3561 		if (IS_ERR(head)) {
3562 			ret = PTR_ERR(head);
3563 			goto out_unlock;
3564 		}
3565 		head->shared = is_shared;
3566 	} else {
3567 		ret = -EINVAL;
3568 		if (!is_shared || !head->shared) {
3569 			dev_err(ctrl->device,
3570 				"Duplicate unshared namespace %d\n", nsid);
3571 			goto out_put_ns_head;
3572 		}
3573 		if (!nvme_ns_ids_equal(&head->ids, &ids)) {
3574 			dev_err(ctrl->device,
3575 				"IDs don't match for shared namespace %d\n",
3576 					nsid);
3577 			goto out_put_ns_head;
3578 		}
3579 	}
3580 
3581 	list_add_tail(&ns->siblings, &head->list);
3582 	ns->head = head;
3583 	mutex_unlock(&ctrl->subsys->lock);
3584 	return 0;
3585 
3586 out_put_ns_head:
3587 	nvme_put_ns_head(head);
3588 out_unlock:
3589 	mutex_unlock(&ctrl->subsys->lock);
3590 	return ret;
3591 }
3592 
3593 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
3594 {
3595 	struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
3596 	struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
3597 
3598 	return nsa->head->ns_id - nsb->head->ns_id;
3599 }
3600 
3601 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3602 {
3603 	struct nvme_ns *ns, *ret = NULL;
3604 
3605 	down_read(&ctrl->namespaces_rwsem);
3606 	list_for_each_entry(ns, &ctrl->namespaces, list) {
3607 		if (ns->head->ns_id == nsid) {
3608 			if (!kref_get_unless_zero(&ns->kref))
3609 				continue;
3610 			ret = ns;
3611 			break;
3612 		}
3613 		if (ns->head->ns_id > nsid)
3614 			break;
3615 	}
3616 	up_read(&ctrl->namespaces_rwsem);
3617 	return ret;
3618 }
3619 
3620 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3621 {
3622 	struct nvme_ns *ns;
3623 	struct gendisk *disk;
3624 	struct nvme_id_ns *id;
3625 	char disk_name[DISK_NAME_LEN];
3626 	int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret;
3627 
3628 	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
3629 	if (!ns)
3630 		return;
3631 
3632 	ns->queue = blk_mq_init_queue(ctrl->tagset);
3633 	if (IS_ERR(ns->queue))
3634 		goto out_free_ns;
3635 
3636 	if (ctrl->opts && ctrl->opts->data_digest)
3637 		ns->queue->backing_dev_info->capabilities
3638 			|= BDI_CAP_STABLE_WRITES;
3639 
3640 	blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3641 	if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
3642 		blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
3643 
3644 	ns->queue->queuedata = ns;
3645 	ns->ctrl = ctrl;
3646 
3647 	kref_init(&ns->kref);
3648 	ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
3649 
3650 	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
3651 	nvme_set_queue_limits(ctrl, ns->queue);
3652 
3653 	ret = nvme_identify_ns(ctrl, nsid, &id);
3654 	if (ret)
3655 		goto out_free_queue;
3656 
3657 	if (id->ncap == 0)	/* no namespace (legacy quirk) */
3658 		goto out_free_id;
3659 
3660 	ret = nvme_init_ns_head(ns, nsid, id);
3661 	if (ret)
3662 		goto out_free_id;
3663 	nvme_set_disk_name(disk_name, ns, ctrl, &flags);
3664 
3665 	disk = alloc_disk_node(0, node);
3666 	if (!disk)
3667 		goto out_unlink_ns;
3668 
3669 	disk->fops = &nvme_fops;
3670 	disk->private_data = ns;
3671 	disk->queue = ns->queue;
3672 	disk->flags = flags;
3673 	memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
3674 	ns->disk = disk;
3675 
3676 	if (__nvme_revalidate_disk(disk, id))
3677 		goto out_put_disk;
3678 
3679 	if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
3680 		ret = nvme_nvm_register(ns, disk_name, node);
3681 		if (ret) {
3682 			dev_warn(ctrl->device, "LightNVM init failure\n");
3683 			goto out_put_disk;
3684 		}
3685 	}
3686 
3687 	down_write(&ctrl->namespaces_rwsem);
3688 	list_add_tail(&ns->list, &ctrl->namespaces);
3689 	up_write(&ctrl->namespaces_rwsem);
3690 
3691 	nvme_get_ctrl(ctrl);
3692 
3693 	device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
3694 
3695 	nvme_mpath_add_disk(ns, id);
3696 	nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
3697 	kfree(id);
3698 
3699 	return;
3700  out_put_disk:
3701 	/* prevent double queue cleanup */
3702 	ns->disk->queue = NULL;
3703 	put_disk(ns->disk);
3704  out_unlink_ns:
3705 	mutex_lock(&ctrl->subsys->lock);
3706 	list_del_rcu(&ns->siblings);
3707 	if (list_empty(&ns->head->list))
3708 		list_del_init(&ns->head->entry);
3709 	mutex_unlock(&ctrl->subsys->lock);
3710 	nvme_put_ns_head(ns->head);
3711  out_free_id:
3712 	kfree(id);
3713  out_free_queue:
3714 	blk_cleanup_queue(ns->queue);
3715  out_free_ns:
3716 	kfree(ns);
3717 }
3718 
3719 static void nvme_ns_remove(struct nvme_ns *ns)
3720 {
3721 	if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
3722 		return;
3723 
3724 	nvme_fault_inject_fini(&ns->fault_inject);
3725 
3726 	mutex_lock(&ns->ctrl->subsys->lock);
3727 	list_del_rcu(&ns->siblings);
3728 	if (list_empty(&ns->head->list))
3729 		list_del_init(&ns->head->entry);
3730 	mutex_unlock(&ns->ctrl->subsys->lock);
3731 
3732 	synchronize_rcu(); /* guarantee not available in head->list */
3733 	nvme_mpath_clear_current_path(ns);
3734 	synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
3735 
3736 	if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
3737 		del_gendisk(ns->disk);
3738 		blk_cleanup_queue(ns->queue);
3739 		if (blk_get_integrity(ns->disk))
3740 			blk_integrity_unregister(ns->disk);
3741 	}
3742 
3743 	down_write(&ns->ctrl->namespaces_rwsem);
3744 	list_del_init(&ns->list);
3745 	up_write(&ns->ctrl->namespaces_rwsem);
3746 
3747 	nvme_mpath_check_last_path(ns);
3748 	nvme_put_ns(ns);
3749 }
3750 
3751 static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
3752 {
3753 	struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
3754 
3755 	if (ns) {
3756 		nvme_ns_remove(ns);
3757 		nvme_put_ns(ns);
3758 	}
3759 }
3760 
3761 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3762 {
3763 	struct nvme_ns *ns;
3764 
3765 	ns = nvme_find_get_ns(ctrl, nsid);
3766 	if (ns) {
3767 		if (ns->disk && revalidate_disk(ns->disk))
3768 			nvme_ns_remove(ns);
3769 		nvme_put_ns(ns);
3770 	} else
3771 		nvme_alloc_ns(ctrl, nsid);
3772 }
3773 
3774 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
3775 					unsigned nsid)
3776 {
3777 	struct nvme_ns *ns, *next;
3778 	LIST_HEAD(rm_list);
3779 
3780 	down_write(&ctrl->namespaces_rwsem);
3781 	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
3782 		if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
3783 			list_move_tail(&ns->list, &rm_list);
3784 	}
3785 	up_write(&ctrl->namespaces_rwsem);
3786 
3787 	list_for_each_entry_safe(ns, next, &rm_list, list)
3788 		nvme_ns_remove(ns);
3789 
3790 }
3791 
3792 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
3793 {
3794 	const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
3795 	__le32 *ns_list;
3796 	u32 prev = 0;
3797 	int ret = 0, i;
3798 
3799 	if (nvme_ctrl_limited_cns(ctrl))
3800 		return -EOPNOTSUPP;
3801 
3802 	ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
3803 	if (!ns_list)
3804 		return -ENOMEM;
3805 
3806 	for (;;) {
3807 		ret = nvme_identify_ns_list(ctrl, prev, ns_list);
3808 		if (ret)
3809 			goto free;
3810 
3811 		for (i = 0; i < nr_entries; i++) {
3812 			u32 nsid = le32_to_cpu(ns_list[i]);
3813 
3814 			if (!nsid)	/* end of the list? */
3815 				goto out;
3816 			nvme_validate_ns(ctrl, nsid);
3817 			while (++prev < nsid)
3818 				nvme_ns_remove_by_nsid(ctrl, prev);
3819 		}
3820 	}
3821  out:
3822 	nvme_remove_invalid_namespaces(ctrl, prev);
3823  free:
3824 	kfree(ns_list);
3825 	return ret;
3826 }
3827 
3828 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
3829 {
3830 	struct nvme_id_ctrl *id;
3831 	u32 nn, i;
3832 
3833 	if (nvme_identify_ctrl(ctrl, &id))
3834 		return;
3835 	nn = le32_to_cpu(id->nn);
3836 	kfree(id);
3837 
3838 	for (i = 1; i <= nn; i++)
3839 		nvme_validate_ns(ctrl, i);
3840 
3841 	nvme_remove_invalid_namespaces(ctrl, nn);
3842 }
3843 
3844 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
3845 {
3846 	size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
3847 	__le32 *log;
3848 	int error;
3849 
3850 	log = kzalloc(log_size, GFP_KERNEL);
3851 	if (!log)
3852 		return;
3853 
3854 	/*
3855 	 * We need to read the log to clear the AEN, but we don't want to rely
3856 	 * on it for the changed namespace information as userspace could have
3857 	 * raced with us in reading the log page, which could cause us to miss
3858 	 * updates.
3859 	 */
3860 	error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log,
3861 			log_size, 0);
3862 	if (error)
3863 		dev_warn(ctrl->device,
3864 			"reading changed ns log failed: %d\n", error);
3865 
3866 	kfree(log);
3867 }
3868 
3869 static void nvme_scan_work(struct work_struct *work)
3870 {
3871 	struct nvme_ctrl *ctrl =
3872 		container_of(work, struct nvme_ctrl, scan_work);
3873 
3874 	/* No tagset on a live ctrl means IO queues could not created */
3875 	if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
3876 		return;
3877 
3878 	if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
3879 		dev_info(ctrl->device, "rescanning namespaces.\n");
3880 		nvme_clear_changed_ns_log(ctrl);
3881 	}
3882 
3883 	mutex_lock(&ctrl->scan_lock);
3884 	if (nvme_scan_ns_list(ctrl) != 0)
3885 		nvme_scan_ns_sequential(ctrl);
3886 	mutex_unlock(&ctrl->scan_lock);
3887 
3888 	down_write(&ctrl->namespaces_rwsem);
3889 	list_sort(NULL, &ctrl->namespaces, ns_cmp);
3890 	up_write(&ctrl->namespaces_rwsem);
3891 }
3892 
3893 /*
3894  * This function iterates the namespace list unlocked to allow recovery from
3895  * controller failure. It is up to the caller to ensure the namespace list is
3896  * not modified by scan work while this function is executing.
3897  */
3898 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
3899 {
3900 	struct nvme_ns *ns, *next;
3901 	LIST_HEAD(ns_list);
3902 
3903 	/*
3904 	 * make sure to requeue I/O to all namespaces as these
3905 	 * might result from the scan itself and must complete
3906 	 * for the scan_work to make progress
3907 	 */
3908 	nvme_mpath_clear_ctrl_paths(ctrl);
3909 
3910 	/* prevent racing with ns scanning */
3911 	flush_work(&ctrl->scan_work);
3912 
3913 	/*
3914 	 * The dead states indicates the controller was not gracefully
3915 	 * disconnected. In that case, we won't be able to flush any data while
3916 	 * removing the namespaces' disks; fail all the queues now to avoid
3917 	 * potentially having to clean up the failed sync later.
3918 	 */
3919 	if (ctrl->state == NVME_CTRL_DEAD)
3920 		nvme_kill_queues(ctrl);
3921 
3922 	down_write(&ctrl->namespaces_rwsem);
3923 	list_splice_init(&ctrl->namespaces, &ns_list);
3924 	up_write(&ctrl->namespaces_rwsem);
3925 
3926 	list_for_each_entry_safe(ns, next, &ns_list, list)
3927 		nvme_ns_remove(ns);
3928 }
3929 EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
3930 
3931 static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
3932 {
3933 	struct nvme_ctrl *ctrl =
3934 		container_of(dev, struct nvme_ctrl, ctrl_device);
3935 	struct nvmf_ctrl_options *opts = ctrl->opts;
3936 	int ret;
3937 
3938 	ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
3939 	if (ret)
3940 		return ret;
3941 
3942 	if (opts) {
3943 		ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
3944 		if (ret)
3945 			return ret;
3946 
3947 		ret = add_uevent_var(env, "NVME_TRSVCID=%s",
3948 				opts->trsvcid ?: "none");
3949 		if (ret)
3950 			return ret;
3951 
3952 		ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
3953 				opts->host_traddr ?: "none");
3954 	}
3955 	return ret;
3956 }
3957 
3958 static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
3959 {
3960 	char *envp[2] = { NULL, NULL };
3961 	u32 aen_result = ctrl->aen_result;
3962 
3963 	ctrl->aen_result = 0;
3964 	if (!aen_result)
3965 		return;
3966 
3967 	envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
3968 	if (!envp[0])
3969 		return;
3970 	kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
3971 	kfree(envp[0]);
3972 }
3973 
3974 static void nvme_async_event_work(struct work_struct *work)
3975 {
3976 	struct nvme_ctrl *ctrl =
3977 		container_of(work, struct nvme_ctrl, async_event_work);
3978 
3979 	nvme_aen_uevent(ctrl);
3980 	ctrl->ops->submit_async_event(ctrl);
3981 }
3982 
3983 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
3984 {
3985 
3986 	u32 csts;
3987 
3988 	if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
3989 		return false;
3990 
3991 	if (csts == ~0)
3992 		return false;
3993 
3994 	return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
3995 }
3996 
3997 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
3998 {
3999 	struct nvme_fw_slot_info_log *log;
4000 
4001 	log = kmalloc(sizeof(*log), GFP_KERNEL);
4002 	if (!log)
4003 		return;
4004 
4005 	if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log,
4006 			sizeof(*log), 0))
4007 		dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
4008 	kfree(log);
4009 }
4010 
4011 static void nvme_fw_act_work(struct work_struct *work)
4012 {
4013 	struct nvme_ctrl *ctrl = container_of(work,
4014 				struct nvme_ctrl, fw_act_work);
4015 	unsigned long fw_act_timeout;
4016 
4017 	if (ctrl->mtfa)
4018 		fw_act_timeout = jiffies +
4019 				msecs_to_jiffies(ctrl->mtfa * 100);
4020 	else
4021 		fw_act_timeout = jiffies +
4022 				msecs_to_jiffies(admin_timeout * 1000);
4023 
4024 	nvme_stop_queues(ctrl);
4025 	while (nvme_ctrl_pp_status(ctrl)) {
4026 		if (time_after(jiffies, fw_act_timeout)) {
4027 			dev_warn(ctrl->device,
4028 				"Fw activation timeout, reset controller\n");
4029 			nvme_try_sched_reset(ctrl);
4030 			return;
4031 		}
4032 		msleep(100);
4033 	}
4034 
4035 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
4036 		return;
4037 
4038 	nvme_start_queues(ctrl);
4039 	/* read FW slot information to clear the AER */
4040 	nvme_get_fw_slot_info(ctrl);
4041 }
4042 
4043 static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
4044 {
4045 	u32 aer_notice_type = (result & 0xff00) >> 8;
4046 
4047 	trace_nvme_async_event(ctrl, aer_notice_type);
4048 
4049 	switch (aer_notice_type) {
4050 	case NVME_AER_NOTICE_NS_CHANGED:
4051 		set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
4052 		nvme_queue_scan(ctrl);
4053 		break;
4054 	case NVME_AER_NOTICE_FW_ACT_STARTING:
4055 		/*
4056 		 * We are (ab)using the RESETTING state to prevent subsequent
4057 		 * recovery actions from interfering with the controller's
4058 		 * firmware activation.
4059 		 */
4060 		if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
4061 			queue_work(nvme_wq, &ctrl->fw_act_work);
4062 		break;
4063 #ifdef CONFIG_NVME_MULTIPATH
4064 	case NVME_AER_NOTICE_ANA:
4065 		if (!ctrl->ana_log_buf)
4066 			break;
4067 		queue_work(nvme_wq, &ctrl->ana_work);
4068 		break;
4069 #endif
4070 	case NVME_AER_NOTICE_DISC_CHANGED:
4071 		ctrl->aen_result = result;
4072 		break;
4073 	default:
4074 		dev_warn(ctrl->device, "async event result %08x\n", result);
4075 	}
4076 }
4077 
4078 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
4079 		volatile union nvme_result *res)
4080 {
4081 	u32 result = le32_to_cpu(res->u32);
4082 	u32 aer_type = result & 0x07;
4083 
4084 	if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
4085 		return;
4086 
4087 	switch (aer_type) {
4088 	case NVME_AER_NOTICE:
4089 		nvme_handle_aen_notice(ctrl, result);
4090 		break;
4091 	case NVME_AER_ERROR:
4092 	case NVME_AER_SMART:
4093 	case NVME_AER_CSS:
4094 	case NVME_AER_VS:
4095 		trace_nvme_async_event(ctrl, aer_type);
4096 		ctrl->aen_result = result;
4097 		break;
4098 	default:
4099 		break;
4100 	}
4101 	queue_work(nvme_wq, &ctrl->async_event_work);
4102 }
4103 EXPORT_SYMBOL_GPL(nvme_complete_async_event);
4104 
4105 void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
4106 {
4107 	nvme_mpath_stop(ctrl);
4108 	nvme_stop_keep_alive(ctrl);
4109 	flush_work(&ctrl->async_event_work);
4110 	cancel_work_sync(&ctrl->fw_act_work);
4111 }
4112 EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
4113 
4114 void nvme_start_ctrl(struct nvme_ctrl *ctrl)
4115 {
4116 	if (ctrl->kato)
4117 		nvme_start_keep_alive(ctrl);
4118 
4119 	nvme_enable_aen(ctrl);
4120 
4121 	if (ctrl->queue_count > 1) {
4122 		nvme_queue_scan(ctrl);
4123 		nvme_start_queues(ctrl);
4124 	}
4125 	ctrl->created = true;
4126 }
4127 EXPORT_SYMBOL_GPL(nvme_start_ctrl);
4128 
4129 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
4130 {
4131 	nvme_fault_inject_fini(&ctrl->fault_inject);
4132 	dev_pm_qos_hide_latency_tolerance(ctrl->device);
4133 	cdev_device_del(&ctrl->cdev, ctrl->device);
4134 	nvme_put_ctrl(ctrl);
4135 }
4136 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
4137 
4138 static void nvme_free_ctrl(struct device *dev)
4139 {
4140 	struct nvme_ctrl *ctrl =
4141 		container_of(dev, struct nvme_ctrl, ctrl_device);
4142 	struct nvme_subsystem *subsys = ctrl->subsys;
4143 
4144 	if (subsys && ctrl->instance != subsys->instance)
4145 		ida_simple_remove(&nvme_instance_ida, ctrl->instance);
4146 
4147 	kfree(ctrl->effects);
4148 	nvme_mpath_uninit(ctrl);
4149 	__free_page(ctrl->discard_page);
4150 
4151 	if (subsys) {
4152 		mutex_lock(&nvme_subsystems_lock);
4153 		list_del(&ctrl->subsys_entry);
4154 		sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
4155 		mutex_unlock(&nvme_subsystems_lock);
4156 	}
4157 
4158 	ctrl->ops->free_ctrl(ctrl);
4159 
4160 	if (subsys)
4161 		nvme_put_subsystem(subsys);
4162 }
4163 
4164 /*
4165  * Initialize a NVMe controller structures.  This needs to be called during
4166  * earliest initialization so that we have the initialized structured around
4167  * during probing.
4168  */
4169 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
4170 		const struct nvme_ctrl_ops *ops, unsigned long quirks)
4171 {
4172 	int ret;
4173 
4174 	ctrl->state = NVME_CTRL_NEW;
4175 	spin_lock_init(&ctrl->lock);
4176 	mutex_init(&ctrl->scan_lock);
4177 	INIT_LIST_HEAD(&ctrl->namespaces);
4178 	init_rwsem(&ctrl->namespaces_rwsem);
4179 	ctrl->dev = dev;
4180 	ctrl->ops = ops;
4181 	ctrl->quirks = quirks;
4182 	ctrl->numa_node = NUMA_NO_NODE;
4183 	INIT_WORK(&ctrl->scan_work, nvme_scan_work);
4184 	INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
4185 	INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
4186 	INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
4187 	init_waitqueue_head(&ctrl->state_wq);
4188 
4189 	INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
4190 	memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
4191 	ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
4192 
4193 	BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
4194 			PAGE_SIZE);
4195 	ctrl->discard_page = alloc_page(GFP_KERNEL);
4196 	if (!ctrl->discard_page) {
4197 		ret = -ENOMEM;
4198 		goto out;
4199 	}
4200 
4201 	ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
4202 	if (ret < 0)
4203 		goto out;
4204 	ctrl->instance = ret;
4205 
4206 	device_initialize(&ctrl->ctrl_device);
4207 	ctrl->device = &ctrl->ctrl_device;
4208 	ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance);
4209 	ctrl->device->class = nvme_class;
4210 	ctrl->device->parent = ctrl->dev;
4211 	ctrl->device->groups = nvme_dev_attr_groups;
4212 	ctrl->device->release = nvme_free_ctrl;
4213 	dev_set_drvdata(ctrl->device, ctrl);
4214 	ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
4215 	if (ret)
4216 		goto out_release_instance;
4217 
4218 	nvme_get_ctrl(ctrl);
4219 	cdev_init(&ctrl->cdev, &nvme_dev_fops);
4220 	ctrl->cdev.owner = ops->module;
4221 	ret = cdev_device_add(&ctrl->cdev, ctrl->device);
4222 	if (ret)
4223 		goto out_free_name;
4224 
4225 	/*
4226 	 * Initialize latency tolerance controls.  The sysfs files won't
4227 	 * be visible to userspace unless the device actually supports APST.
4228 	 */
4229 	ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
4230 	dev_pm_qos_update_user_latency_tolerance(ctrl->device,
4231 		min(default_ps_max_latency_us, (unsigned long)S32_MAX));
4232 
4233 	nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
4234 
4235 	return 0;
4236 out_free_name:
4237 	nvme_put_ctrl(ctrl);
4238 	kfree_const(ctrl->device->kobj.name);
4239 out_release_instance:
4240 	ida_simple_remove(&nvme_instance_ida, ctrl->instance);
4241 out:
4242 	if (ctrl->discard_page)
4243 		__free_page(ctrl->discard_page);
4244 	return ret;
4245 }
4246 EXPORT_SYMBOL_GPL(nvme_init_ctrl);
4247 
4248 /**
4249  * nvme_kill_queues(): Ends all namespace queues
4250  * @ctrl: the dead controller that needs to end
4251  *
4252  * Call this function when the driver determines it is unable to get the
4253  * controller in a state capable of servicing IO.
4254  */
4255 void nvme_kill_queues(struct nvme_ctrl *ctrl)
4256 {
4257 	struct nvme_ns *ns;
4258 
4259 	down_read(&ctrl->namespaces_rwsem);
4260 
4261 	/* Forcibly unquiesce queues to avoid blocking dispatch */
4262 	if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
4263 		blk_mq_unquiesce_queue(ctrl->admin_q);
4264 
4265 	list_for_each_entry(ns, &ctrl->namespaces, list)
4266 		nvme_set_queue_dying(ns);
4267 
4268 	up_read(&ctrl->namespaces_rwsem);
4269 }
4270 EXPORT_SYMBOL_GPL(nvme_kill_queues);
4271 
4272 void nvme_unfreeze(struct nvme_ctrl *ctrl)
4273 {
4274 	struct nvme_ns *ns;
4275 
4276 	down_read(&ctrl->namespaces_rwsem);
4277 	list_for_each_entry(ns, &ctrl->namespaces, list)
4278 		blk_mq_unfreeze_queue(ns->queue);
4279 	up_read(&ctrl->namespaces_rwsem);
4280 }
4281 EXPORT_SYMBOL_GPL(nvme_unfreeze);
4282 
4283 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
4284 {
4285 	struct nvme_ns *ns;
4286 
4287 	down_read(&ctrl->namespaces_rwsem);
4288 	list_for_each_entry(ns, &ctrl->namespaces, list) {
4289 		timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
4290 		if (timeout <= 0)
4291 			break;
4292 	}
4293 	up_read(&ctrl->namespaces_rwsem);
4294 }
4295 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
4296 
4297 void nvme_wait_freeze(struct nvme_ctrl *ctrl)
4298 {
4299 	struct nvme_ns *ns;
4300 
4301 	down_read(&ctrl->namespaces_rwsem);
4302 	list_for_each_entry(ns, &ctrl->namespaces, list)
4303 		blk_mq_freeze_queue_wait(ns->queue);
4304 	up_read(&ctrl->namespaces_rwsem);
4305 }
4306 EXPORT_SYMBOL_GPL(nvme_wait_freeze);
4307 
4308 void nvme_start_freeze(struct nvme_ctrl *ctrl)
4309 {
4310 	struct nvme_ns *ns;
4311 
4312 	down_read(&ctrl->namespaces_rwsem);
4313 	list_for_each_entry(ns, &ctrl->namespaces, list)
4314 		blk_freeze_queue_start(ns->queue);
4315 	up_read(&ctrl->namespaces_rwsem);
4316 }
4317 EXPORT_SYMBOL_GPL(nvme_start_freeze);
4318 
4319 void nvme_stop_queues(struct nvme_ctrl *ctrl)
4320 {
4321 	struct nvme_ns *ns;
4322 
4323 	down_read(&ctrl->namespaces_rwsem);
4324 	list_for_each_entry(ns, &ctrl->namespaces, list)
4325 		blk_mq_quiesce_queue(ns->queue);
4326 	up_read(&ctrl->namespaces_rwsem);
4327 }
4328 EXPORT_SYMBOL_GPL(nvme_stop_queues);
4329 
4330 void nvme_start_queues(struct nvme_ctrl *ctrl)
4331 {
4332 	struct nvme_ns *ns;
4333 
4334 	down_read(&ctrl->namespaces_rwsem);
4335 	list_for_each_entry(ns, &ctrl->namespaces, list)
4336 		blk_mq_unquiesce_queue(ns->queue);
4337 	up_read(&ctrl->namespaces_rwsem);
4338 }
4339 EXPORT_SYMBOL_GPL(nvme_start_queues);
4340 
4341 
4342 void nvme_sync_queues(struct nvme_ctrl *ctrl)
4343 {
4344 	struct nvme_ns *ns;
4345 
4346 	down_read(&ctrl->namespaces_rwsem);
4347 	list_for_each_entry(ns, &ctrl->namespaces, list)
4348 		blk_sync_queue(ns->queue);
4349 	up_read(&ctrl->namespaces_rwsem);
4350 
4351 	if (ctrl->admin_q)
4352 		blk_sync_queue(ctrl->admin_q);
4353 }
4354 EXPORT_SYMBOL_GPL(nvme_sync_queues);
4355 
4356 /*
4357  * Check we didn't inadvertently grow the command structure sizes:
4358  */
4359 static inline void _nvme_check_size(void)
4360 {
4361 	BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
4362 	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
4363 	BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
4364 	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
4365 	BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
4366 	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
4367 	BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
4368 	BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
4369 	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
4370 	BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
4371 	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
4372 	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
4373 	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
4374 	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
4375 	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
4376 	BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
4377 	BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
4378 }
4379 
4380 
4381 static int __init nvme_core_init(void)
4382 {
4383 	int result = -ENOMEM;
4384 
4385 	_nvme_check_size();
4386 
4387 	nvme_wq = alloc_workqueue("nvme-wq",
4388 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4389 	if (!nvme_wq)
4390 		goto out;
4391 
4392 	nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
4393 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4394 	if (!nvme_reset_wq)
4395 		goto destroy_wq;
4396 
4397 	nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
4398 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4399 	if (!nvme_delete_wq)
4400 		goto destroy_reset_wq;
4401 
4402 	result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
4403 	if (result < 0)
4404 		goto destroy_delete_wq;
4405 
4406 	nvme_class = class_create(THIS_MODULE, "nvme");
4407 	if (IS_ERR(nvme_class)) {
4408 		result = PTR_ERR(nvme_class);
4409 		goto unregister_chrdev;
4410 	}
4411 	nvme_class->dev_uevent = nvme_class_uevent;
4412 
4413 	nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
4414 	if (IS_ERR(nvme_subsys_class)) {
4415 		result = PTR_ERR(nvme_subsys_class);
4416 		goto destroy_class;
4417 	}
4418 	return 0;
4419 
4420 destroy_class:
4421 	class_destroy(nvme_class);
4422 unregister_chrdev:
4423 	unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
4424 destroy_delete_wq:
4425 	destroy_workqueue(nvme_delete_wq);
4426 destroy_reset_wq:
4427 	destroy_workqueue(nvme_reset_wq);
4428 destroy_wq:
4429 	destroy_workqueue(nvme_wq);
4430 out:
4431 	return result;
4432 }
4433 
4434 static void __exit nvme_core_exit(void)
4435 {
4436 	class_destroy(nvme_subsys_class);
4437 	class_destroy(nvme_class);
4438 	unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
4439 	destroy_workqueue(nvme_delete_wq);
4440 	destroy_workqueue(nvme_reset_wq);
4441 	destroy_workqueue(nvme_wq);
4442 	ida_destroy(&nvme_instance_ida);
4443 }
4444 
4445 MODULE_LICENSE("GPL");
4446 MODULE_VERSION("1.0");
4447 module_init(nvme_core_init);
4448 module_exit(nvme_core_exit);
4449